From b283acb80eccfa31563f2b017e2b8badecc03f53 Mon Sep 17 00:00:00 2001 From: Maxim Krasilnikov Date: Tue, 20 Aug 2024 20:06:32 +0300 Subject: [PATCH] Up goose version to 3.21.1 --- .github/workflows/ci.yml | 2 +- .github/workflows/dist.yml | 2 +- Dockerfile | 2 +- README.md | 2 +- go.mod | 53 +- go.sum | 237 +- vendor/filippo.io/edwards25519/LICENSE | 27 + vendor/filippo.io/edwards25519/README.md | 14 + vendor/filippo.io/edwards25519/doc.go | 20 + .../filippo.io/edwards25519/edwards25519.go | 427 + vendor/filippo.io/edwards25519/extra.go | 349 + vendor/filippo.io/edwards25519/field/fe.go | 420 + .../filippo.io/edwards25519/field/fe_amd64.go | 16 + .../filippo.io/edwards25519/field/fe_amd64.s | 379 + .../edwards25519/field/fe_amd64_noasm.go | 12 + .../filippo.io/edwards25519/field/fe_arm64.go | 16 + .../filippo.io/edwards25519/field/fe_arm64.s | 42 + .../edwards25519/field/fe_arm64_noasm.go | 12 + .../filippo.io/edwards25519/field/fe_extra.go | 50 + .../edwards25519/field/fe_generic.go | 266 + vendor/filippo.io/edwards25519/scalar.go | 343 + vendor/filippo.io/edwards25519/scalar_fiat.go | 1147 + vendor/filippo.io/edwards25519/scalarmult.go | 214 + vendor/filippo.io/edwards25519/tables.go | 129 + vendor/github.com/ClickHouse/ch-go/AUTHORS | 2 + vendor/github.com/ClickHouse/ch-go/LICENSE | 202 + .../ClickHouse/ch-go/compress/compress.go | 51 + .../ClickHouse/ch-go/compress/method_enum.go | 97 + .../ClickHouse/ch-go/compress/reader.go | 135 + .../ClickHouse/ch-go/compress/writer.go | 67 + .../ClickHouse/ch-go/proto/block.go | 286 + .../github.com/ClickHouse/ch-go/proto/bool.go | 6 + .../ClickHouse/ch-go/proto/buffer.go | 158 + .../ClickHouse/ch-go/proto/client_code.go | 19 + .../ch-go/proto/client_code_enum.go | 94 + .../ClickHouse/ch-go/proto/client_data.go | 24 + .../ClickHouse/ch-go/proto/client_hello.go | 86 + .../ClickHouse/ch-go/proto/client_info.go | 319 + .../ch-go/proto/client_info_interface_enum.go | 79 + .../ch-go/proto/client_info_query_enum.go | 82 + .../ClickHouse/ch-go/proto/col_arr.go | 155 + .../ClickHouse/ch-go/proto/col_auto.go | 124 + .../ClickHouse/ch-go/proto/col_auto_gen.go | 160 + .../ClickHouse/ch-go/proto/col_bool.go | 53 + .../ClickHouse/ch-go/proto/col_bool_safe.go | 44 + .../ClickHouse/ch-go/proto/col_bool_unsafe.go | 36 + .../ClickHouse/ch-go/proto/col_date.go | 49 + .../ClickHouse/ch-go/proto/col_date32.go | 49 + .../ClickHouse/ch-go/proto/col_date32_gen.go | 28 + .../ch-go/proto/col_date32_safe_gen.go | 55 + .../ch-go/proto/col_date32_unsafe_gen.go | 45 + .../ClickHouse/ch-go/proto/col_date_gen.go | 28 + .../ch-go/proto/col_date_safe_gen.go | 55 + .../ch-go/proto/col_date_unsafe_gen.go | 45 + .../ClickHouse/ch-go/proto/col_datetime.go | 104 + .../ClickHouse/ch-go/proto/col_datetime64.go | 150 + .../ch-go/proto/col_datetime64_safe_gen.go | 55 + .../ch-go/proto/col_datetime64_unsafe_gen.go | 45 + .../ch-go/proto/col_datetime_safe_gen.go | 55 + .../ch-go/proto/col_datetime_unsafe_gen.go | 45 + .../ch-go/proto/col_decimal128_gen.go | 71 + .../ch-go/proto/col_decimal128_safe_gen.go | 55 + .../ch-go/proto/col_decimal128_unsafe_gen.go | 45 + .../ch-go/proto/col_decimal256_gen.go | 71 + .../ch-go/proto/col_decimal256_safe_gen.go | 55 + .../ch-go/proto/col_decimal256_unsafe_gen.go | 45 + .../ch-go/proto/col_decimal32_gen.go | 71 + .../ch-go/proto/col_decimal32_safe_gen.go | 55 + .../ch-go/proto/col_decimal32_unsafe_gen.go | 45 + .../ch-go/proto/col_decimal64_gen.go | 71 + .../ch-go/proto/col_decimal64_safe_gen.go | 55 + .../ch-go/proto/col_decimal64_unsafe_gen.go | 45 + .../ClickHouse/ch-go/proto/col_enum.go | 172 + .../ClickHouse/ch-go/proto/col_enum16_gen.go | 71 + .../ch-go/proto/col_enum16_safe_gen.go | 55 + .../ch-go/proto/col_enum16_unsafe_gen.go | 45 + .../ClickHouse/ch-go/proto/col_enum8_gen.go | 71 + .../ch-go/proto/col_enum8_safe_gen.go | 44 + .../ch-go/proto/col_enum8_unsafe_gen.go | 39 + .../ClickHouse/ch-go/proto/col_fixed_str.go | 94 + .../ch-go/proto/col_fixedstr128_gen.go | 71 + .../ch-go/proto/col_fixedstr128_safe_gen.go | 55 + .../ch-go/proto/col_fixedstr128_unsafe_gen.go | 45 + .../ch-go/proto/col_fixedstr16_gen.go | 71 + .../ch-go/proto/col_fixedstr16_safe_gen.go | 55 + .../ch-go/proto/col_fixedstr16_unsafe_gen.go | 45 + .../ch-go/proto/col_fixedstr256_gen.go | 71 + .../ch-go/proto/col_fixedstr256_safe_gen.go | 55 + .../ch-go/proto/col_fixedstr256_unsafe_gen.go | 45 + .../ch-go/proto/col_fixedstr32_gen.go | 71 + .../ch-go/proto/col_fixedstr32_safe_gen.go | 55 + .../ch-go/proto/col_fixedstr32_unsafe_gen.go | 45 + .../ch-go/proto/col_fixedstr512_gen.go | 71 + .../ch-go/proto/col_fixedstr512_safe_gen.go | 55 + .../ch-go/proto/col_fixedstr512_unsafe_gen.go | 45 + .../ch-go/proto/col_fixedstr64_gen.go | 71 + .../ch-go/proto/col_fixedstr64_safe_gen.go | 55 + .../ch-go/proto/col_fixedstr64_unsafe_gen.go | 45 + .../ch-go/proto/col_fixedstr8_gen.go | 71 + .../ch-go/proto/col_fixedstr8_safe_gen.go | 55 + .../ch-go/proto/col_fixedstr8_unsafe_gen.go | 45 + .../ClickHouse/ch-go/proto/col_float32_gen.go | 71 + .../ch-go/proto/col_float32_safe_gen.go | 56 + .../ch-go/proto/col_float32_unsafe_gen.go | 45 + .../ClickHouse/ch-go/proto/col_float64_gen.go | 71 + .../ch-go/proto/col_float64_safe_gen.go | 56 + .../ch-go/proto/col_float64_unsafe_gen.go | 45 + .../ClickHouse/ch-go/proto/col_int128_gen.go | 71 + .../ch-go/proto/col_int128_safe_gen.go | 55 + .../ch-go/proto/col_int128_unsafe_gen.go | 45 + .../ClickHouse/ch-go/proto/col_int16_gen.go | 71 + .../ch-go/proto/col_int16_safe_gen.go | 55 + .../ch-go/proto/col_int16_unsafe_gen.go | 45 + .../ClickHouse/ch-go/proto/col_int256_gen.go | 71 + .../ch-go/proto/col_int256_safe_gen.go | 55 + .../ch-go/proto/col_int256_unsafe_gen.go | 45 + .../ClickHouse/ch-go/proto/col_int32_gen.go | 71 + .../ch-go/proto/col_int32_safe_gen.go | 55 + .../ch-go/proto/col_int32_unsafe_gen.go | 45 + .../ClickHouse/ch-go/proto/col_int64_gen.go | 71 + .../ch-go/proto/col_int64_safe_gen.go | 55 + .../ch-go/proto/col_int64_unsafe_gen.go | 45 + .../ClickHouse/ch-go/proto/col_int8_gen.go | 71 + .../ch-go/proto/col_int8_safe_gen.go | 44 + .../ch-go/proto/col_int8_unsafe_gen.go | 39 + .../ClickHouse/ch-go/proto/col_interval.go | 112 + .../ClickHouse/ch-go/proto/col_ipv4_gen.go | 71 + .../ch-go/proto/col_ipv4_safe_gen.go | 55 + .../ch-go/proto/col_ipv4_unsafe_gen.go | 45 + .../ClickHouse/ch-go/proto/col_ipv6_gen.go | 71 + .../ch-go/proto/col_ipv6_safe_gen.go | 55 + .../ch-go/proto/col_ipv6_unsafe_gen.go | 45 + .../ch-go/proto/col_low_cardinality.go | 347 + .../ch-go/proto/col_low_cardinality_enum.go | 86 + .../ch-go/proto/col_low_cardinality_raw.go | 157 + .../ClickHouse/ch-go/proto/col_map.go | 201 + .../ClickHouse/ch-go/proto/col_nothing.go | 73 + .../ClickHouse/ch-go/proto/col_nullable.go | 135 + .../ClickHouse/ch-go/proto/col_point.go | 63 + .../ClickHouse/ch-go/proto/col_raw.go | 35 + .../ClickHouse/ch-go/proto/col_raw_of.go | 84 + .../ClickHouse/ch-go/proto/col_str.go | 205 + .../ClickHouse/ch-go/proto/col_tuple.go | 169 + .../ClickHouse/ch-go/proto/col_uint128_gen.go | 71 + .../ch-go/proto/col_uint128_safe_gen.go | 55 + .../ch-go/proto/col_uint128_unsafe_gen.go | 45 + .../ClickHouse/ch-go/proto/col_uint16_gen.go | 71 + .../ch-go/proto/col_uint16_safe_gen.go | 55 + .../ch-go/proto/col_uint16_unsafe_gen.go | 45 + .../ClickHouse/ch-go/proto/col_uint256_gen.go | 71 + .../ch-go/proto/col_uint256_safe_gen.go | 55 + .../ch-go/proto/col_uint256_unsafe_gen.go | 45 + .../ClickHouse/ch-go/proto/col_uint32_gen.go | 71 + .../ch-go/proto/col_uint32_safe_gen.go | 55 + .../ch-go/proto/col_uint32_unsafe_gen.go | 45 + .../ClickHouse/ch-go/proto/col_uint64_gen.go | 71 + .../ch-go/proto/col_uint64_safe_gen.go | 55 + .../ch-go/proto/col_uint64_unsafe_gen.go | 45 + .../ClickHouse/ch-go/proto/col_uint8_gen.go | 71 + .../ch-go/proto/col_uint8_safe_gen.go | 33 + .../ClickHouse/ch-go/proto/col_uuid.go | 33 + .../ClickHouse/ch-go/proto/col_uuid_safe.go | 36 + .../ClickHouse/ch-go/proto/col_uuid_unsafe.go | 49 + .../ClickHouse/ch-go/proto/column.go | 290 + .../ClickHouse/ch-go/proto/compression.go | 17 + .../ch-go/proto/compression_enum.go | 78 + .../github.com/ClickHouse/ch-go/proto/date.go | 46 + .../ClickHouse/ch-go/proto/date32.go | 38 + .../ClickHouse/ch-go/proto/datetime.go | 21 + .../ClickHouse/ch-go/proto/datetime64.go | 62 + .../ClickHouse/ch-go/proto/decimal.go | 13 + .../ClickHouse/ch-go/proto/enum16.go | 6 + .../ClickHouse/ch-go/proto/enum8.go | 6 + .../ClickHouse/ch-go/proto/error.go | 15 + .../ClickHouse/ch-go/proto/error_codes.go | 376 + .../ClickHouse/ch-go/proto/error_enum.go | 1916 + .../ClickHouse/ch-go/proto/exception.go | 59 + .../ClickHouse/ch-go/proto/feature.go | 45 + .../ClickHouse/ch-go/proto/feature_enum.go | 176 + .../github.com/ClickHouse/ch-go/proto/gen.go | 3 + .../ClickHouse/ch-go/proto/int128.go | 93 + .../ClickHouse/ch-go/proto/int256.go | 68 + .../ClickHouse/ch-go/proto/interval_enum.go | 102 + .../github.com/ClickHouse/ch-go/proto/ipv4.go | 31 + .../github.com/ClickHouse/ch-go/proto/ipv6.go | 25 + .../ClickHouse/ch-go/proto/profile.go | 69 + .../ClickHouse/ch-go/proto/profile_enum.go | 109 + .../ClickHouse/ch-go/proto/profile_events.go | 70 + .../ClickHouse/ch-go/proto/progress.go | 78 + .../ClickHouse/ch-go/proto/proto.go | 8 + .../ClickHouse/ch-go/proto/query.go | 213 + .../ClickHouse/ch-go/proto/reader.go | 290 + .../ClickHouse/ch-go/proto/reset.go | 12 + .../ClickHouse/ch-go/proto/results.go | 153 + .../ClickHouse/ch-go/proto/server_code.go | 38 + .../ch-go/proto/server_code_enum.go | 130 + .../ClickHouse/ch-go/proto/server_hello.go | 120 + .../ClickHouse/ch-go/proto/server_log.go | 55 + .../ClickHouse/ch-go/proto/slice_unsafe.go | 17 + .../ClickHouse/ch-go/proto/stage.go | 16 + .../ClickHouse/ch-go/proto/stage_enum.go | 82 + .../ClickHouse/ch-go/proto/table_columns.go | 32 + .../ClickHouse/clickhouse-go/v2/.gitignore | 13 +- .../clickhouse-go/v2/.golangci.yaml | 71 + .../ClickHouse/clickhouse-go/v2/CHANGELOG.md | 673 + .../ClickHouse/clickhouse-go/v2/LICENSE | 5 +- .../ClickHouse/clickhouse-go/v2/Makefile | 14 +- .../ClickHouse/clickhouse-go/v2/README.md | 278 +- .../ClickHouse/clickhouse-go/v2/TYPES.md | 82 + .../ClickHouse/clickhouse-go/v2/batch.go | 58 + .../ClickHouse/clickhouse-go/v2/bind.go | 252 +- .../ClickHouse/clickhouse-go/v2/clickhouse.go | 136 +- .../clickhouse-go/v2/clickhouse_options.go | 254 +- .../clickhouse-go/v2/clickhouse_rows.go | 49 +- .../clickhouse-go/v2/clickhouse_std.go | 372 +- .../clickhouse-go/v2/client_info.go | 86 + .../ClickHouse/clickhouse-go/v2/conn.go | 235 +- .../clickhouse-go/v2/conn_async_insert.go | 13 +- .../ClickHouse/clickhouse-go/v2/conn_batch.go | 256 +- .../ClickHouse/clickhouse-go/v2/conn_check.go | 8 +- .../ClickHouse/clickhouse-go/v2/conn_exec.go | 12 +- .../clickhouse-go/v2/conn_handshake.go | 42 +- .../ClickHouse/clickhouse-go/v2/conn_http.go | 535 +- ...e_helpers.go => conn_http_async_insert.go} | 42 +- .../clickhouse-go/v2/conn_http_batch.go | 232 + .../compress.go => conn_http_exec.go} | 35 +- .../clickhouse-go/v2/conn_http_query.go | 125 + .../ClickHouse/clickhouse-go/v2/conn_logs.go | 5 +- .../ClickHouse/clickhouse-go/v2/conn_ping.go | 13 +- .../clickhouse-go/v2/conn_process.go | 37 +- .../clickhouse-go/v2/conn_profile_events.go | 5 +- .../ClickHouse/clickhouse-go/v2/conn_query.go | 28 +- .../clickhouse-go/v2/conn_send_query.go | 39 +- .../ClickHouse/clickhouse-go/v2/context.go | 50 +- .../city64.go => context_watchdog.go} | 73 +- .../v2/contributors/contributors.go | 1 + .../clickhouse-go/v2/contributors/list | 87 +- .../ClickHouse/clickhouse-go/v2/ext/ext.go | 12 +- .../clickhouse-go/v2/lib/binary/decoder.go | 175 - .../clickhouse-go/v2/lib/binary/encoder.go | 146 - .../v2/lib/binary/string_safe.go | 11 +- .../v2/lib/binary/string_unsafe.go | 12 +- .../v2/lib/cityhash102/cityhash.go | 400 - .../clickhouse-go/v2/lib/cityhash102/doc.go | 22 - .../clickhouse-go/v2/lib/column/array.go | 163 +- .../clickhouse-go/v2/lib/column/array_gen.go | 175 + .../clickhouse-go/v2/lib/column/bigint.go | 135 +- .../clickhouse-go/v2/lib/column/bool.go | 106 +- .../clickhouse-go/v2/lib/column/column.go | 28 +- .../clickhouse-go/v2/lib/column/column_gen.go | 1225 +- .../v2/lib/column/column_gen_option.go | 46 + .../v2/lib/column/column_safe_gen.go | 228 - .../v2/lib/column/column_unsafe_gen.go | 410 - .../clickhouse-go/v2/lib/column/date.go | 195 +- .../clickhouse-go/v2/lib/column/date32.go | 176 +- .../clickhouse-go/v2/lib/column/datetime.go | 223 +- .../clickhouse-go/v2/lib/column/datetime64.go | 228 +- .../clickhouse-go/v2/lib/column/decimal.go | 235 +- .../clickhouse-go/v2/lib/column/enum.go | 18 +- .../clickhouse-go/v2/lib/column/enum16.go | 169 +- .../clickhouse-go/v2/lib/column/enum8.go | 168 +- .../v2/lib/column/fixed_string.go | 171 +- .../v2/lib/column/geo_multi_polygon.go | 62 +- .../clickhouse-go/v2/lib/column/geo_point.go | 96 +- .../v2/lib/column/geo_polygon.go | 62 +- .../clickhouse-go/v2/lib/column/geo_ring.go | 62 +- .../clickhouse-go/v2/lib/column/interval.go | 40 +- .../clickhouse-go/v2/lib/column/ipv4.go | 247 +- .../clickhouse-go/v2/lib/column/ipv6.go | 300 +- .../clickhouse-go/v2/lib/column/json.go | 167 +- .../v2/lib/column/lowcardinality.go | 95 +- .../clickhouse-go/v2/lib/column/map.go | 253 +- .../clickhouse-go/v2/lib/column/nested.go | 9 +- .../clickhouse-go/v2/lib/column/nothing.go | 44 +- .../clickhouse-go/v2/lib/column/nullable.go | 107 +- .../lib/column/simple_aggregate_function.go | 28 +- .../clickhouse-go/v2/lib/column/string.go | 177 +- .../clickhouse-go/v2/lib/column/tuple.go | 253 +- .../clickhouse-go/v2/lib/column/uuid.go | 119 +- .../v2/lib/compress/compress_reader.go | 111 - .../v2/lib/compress/compress_writer.go | 91 - .../clickhouse-go/v2/lib/driver/driver.go | 36 +- .../clickhouse-go/v2/lib/driver/options.go | 21 + .../clickhouse-go/v2/lib/io/stream.go | 82 - .../clickhouse-go/v2/lib/proto/block.go | 199 +- .../clickhouse-go/v2/lib/proto/const.go | 33 +- .../clickhouse-go/v2/lib/proto/exception.go | 18 +- .../clickhouse-go/v2/lib/proto/handshake.go | 123 +- .../v2/lib/proto/profile_info.go | 17 +- .../clickhouse-go/v2/lib/proto/progress.go | 30 +- .../clickhouse-go/v2/lib/proto/query.go | 195 +- .../v2/lib/proto/table_columns.go | 9 +- .../clickhouse-go/v2/query_parameters.go | 60 + .../clickhouse-go/v2/resources/meta.go | 78 + .../clickhouse-go/v2/resources/meta.yml | 9 + .../ClickHouse/clickhouse-go/v2/scan.go | 4 +- .../ClickHouse/clickhouse-go/v2/struct_map.go | 4 +- .../clickhouse-go/v2/v1_v2_CHANGES.md | 8 + vendor/github.com/andybalholm/brotli/LICENSE | 19 + .../github.com/andybalholm/brotli/README.md | 14 + .../andybalholm/brotli/backward_references.go | 185 + .../brotli/backward_references_hq.go | 796 + .../github.com/andybalholm/brotli/bit_cost.go | 436 + .../andybalholm/brotli/bit_reader.go | 266 + .../andybalholm/brotli/bitwriter.go | 56 + .../andybalholm/brotli/block_splitter.go | 144 + .../brotli/block_splitter_command.go | 434 + .../brotli/block_splitter_distance.go | 433 + .../brotli/block_splitter_literal.go | 433 + .../andybalholm/brotli/brotli_bit_stream.go | 1539 + .../github.com/andybalholm/brotli/cluster.go | 30 + .../andybalholm/brotli/cluster_command.go | 164 + .../andybalholm/brotli/cluster_distance.go | 326 + .../andybalholm/brotli/cluster_literal.go | 326 + .../github.com/andybalholm/brotli/command.go | 254 + .../andybalholm/brotli/compress_fragment.go | 834 + .../brotli/compress_fragment_two_pass.go | 773 + .../andybalholm/brotli/constants.go | 77 + .../github.com/andybalholm/brotli/context.go | 2176 + .../github.com/andybalholm/brotli/decode.go | 2581 + .../andybalholm/brotli/dictionary.go | 122890 +++++++++++++++ .../andybalholm/brotli/dictionary_hash.go | 32779 ++++ .../github.com/andybalholm/brotli/encode.go | 1220 + .../github.com/andybalholm/brotli/encoder.go | 168 + .../andybalholm/brotli/encoder_dict.go | 22 + .../andybalholm/brotli/entropy_encode.go | 592 + .../brotli/entropy_encode_static.go | 4399 + .../github.com/andybalholm/brotli/fast_log.go | 290 + .../andybalholm/brotli/find_match_length.go | 45 + vendor/github.com/andybalholm/brotli/h10.go | 287 + vendor/github.com/andybalholm/brotli/h5.go | 214 + vendor/github.com/andybalholm/brotli/h6.go | 216 + vendor/github.com/andybalholm/brotli/hash.go | 342 + .../andybalholm/brotli/hash_composite.go | 93 + .../brotli/hash_forgetful_chain.go | 252 + .../brotli/hash_longest_match_quickly.go | 214 + .../andybalholm/brotli/hash_rolling.go | 168 + .../andybalholm/brotli/histogram.go | 226 + vendor/github.com/andybalholm/brotli/http.go | 184 + .../github.com/andybalholm/brotli/huffman.go | 653 + .../andybalholm/brotli/literal_cost.go | 182 + .../andybalholm/brotli/matchfinder/emitter.go | 45 + .../andybalholm/brotli/matchfinder/m0.go | 169 + .../andybalholm/brotli/matchfinder/m4.go | 297 + .../brotli/matchfinder/matchfinder.go | 103 + .../brotli/matchfinder/textencoder.go | 53 + .../github.com/andybalholm/brotli/memory.go | 66 + .../andybalholm/brotli/metablock.go | 574 + .../andybalholm/brotli/metablock_command.go | 165 + .../andybalholm/brotli/metablock_distance.go | 165 + .../andybalholm/brotli/metablock_literal.go | 165 + .../github.com/andybalholm/brotli/params.go | 37 + .../github.com/andybalholm/brotli/platform.go | 103 + .../github.com/andybalholm/brotli/prefix.go | 30 + .../andybalholm/brotli/prefix_dec.go | 723 + .../github.com/andybalholm/brotli/quality.go | 196 + .../github.com/andybalholm/brotli/reader.go | 108 + .../andybalholm/brotli/ringbuffer.go | 134 + vendor/github.com/andybalholm/brotli/state.go | 294 + .../andybalholm/brotli/static_dict.go | 662 + .../andybalholm/brotli/static_dict_lut.go | 75094 +++++++++ .../andybalholm/brotli/symbol_list.go | 22 + .../andybalholm/brotli/transform.go | 641 + .../andybalholm/brotli/utf8_util.go | 70 + vendor/github.com/andybalholm/brotli/util.go | 7 + .../andybalholm/brotli/write_bits.go | 52 + .../github.com/andybalholm/brotli/writer.go | 162 + .../github.com/elastic/go-sysinfo/.gitignore | 6 +- .../elastic/go-sysinfo/.golangci.yml | 16 + .../elastic/go-sysinfo/CHANGELOG.md | 165 - .../elastic/go-sysinfo/CONTRIBUTING.md | 16 + .../github.com/elastic/go-sysinfo/README.md | 2 +- .../providers/aix/boottime_aix_ppc64.go | 1 - .../providers/aix/host_aix_ppc64.go | 18 +- .../providers/aix/kernel_aix_ppc64.go | 1 - .../providers/aix/machineid_aix_ppc64.go | 1 - .../go-sysinfo/providers/aix/os_aix_ppc64.go | 1 - .../providers/aix/process_aix_ppc64.go | 1 - .../providers/aix/ztypes_aix_ppc64.go | 1 - .../providers/darwin/arch_darwin.go | 1 - .../providers/darwin/boottime_darwin.go | 1 - .../providers/darwin/host_darwin.go | 8 +- .../providers/darwin/kernel_darwin.go | 1 - .../providers/darwin/load_average_darwin.go | 1 - .../providers/darwin/machineid_darwin.go | 1 - .../providers/darwin/memory_darwin.go | 1 - .../providers/darwin/process_cgo_darwin.go | 2 - .../providers/darwin/process_darwin.go | 64 +- .../providers/darwin/syscall_cgo_darwin.go | 1 - .../providers/darwin/syscall_darwin.go | 1 - .../providers/linux/boottime_linux.go | 2 +- .../providers/linux/capabilities_linux.go | 8 +- .../go-sysinfo/providers/linux/host_linux.go | 12 +- .../providers/linux/memory_linux.go | 2 +- .../elastic/go-sysinfo/providers/linux/os.go | 79 +- .../providers/linux/process_linux.go | 2 +- .../providers/linux/seccomp_linux.go | 2 +- .../go-sysinfo/providers/linux/util.go | 24 +- .../go-sysinfo/providers/linux/vmstat.go | 2 +- .../go-sysinfo/providers/shared/fqdn.go | 77 + .../providers/windows/host_windows.go | 50 +- .../providers/windows/os_windows.go | 2 +- .../elastic/go-sysinfo/types/host.go | 5 +- .../elastic/go-sysinfo/types/process.go | 2 +- vendor/github.com/go-faster/city/.codecov.yml | 9 + vendor/github.com/go-faster/city/.gitignore | 12 + .../github.com/go-faster/city/.golangci.yml | 111 + vendor/github.com/go-faster/city/128.go | 143 + vendor/github.com/go-faster/city/32.go | 169 + vendor/github.com/go-faster/city/64.go | 203 + .../go-faster/city/LICENSE} | 12 +- vendor/github.com/go-faster/city/Makefile | 13 + vendor/github.com/go-faster/city/README.md | 46 + vendor/github.com/go-faster/city/ch_128.go | 158 + vendor/github.com/go-faster/city/ch_64.go | 118 + vendor/github.com/go-faster/city/doc.go | 2 + .../github.com/go-faster/city/go.coverage.sh | 6 + vendor/github.com/go-faster/city/go.test.sh | 9 + .../github.com/go-faster/errors/.codecov.yml | 8 + .../github.com/go-faster/errors/.editorconfig | 25 + vendor/github.com/go-faster/errors/.gitignore | 8 + .../github.com/go-faster/errors/.golangci.yml | 104 + vendor/github.com/go-faster/errors/LICENSE | 27 + vendor/github.com/go-faster/errors/Makefile | 10 + vendor/github.com/go-faster/errors/README.md | 56 + vendor/github.com/go-faster/errors/adaptor.go | 193 + vendor/github.com/go-faster/errors/doc.go | 9 + vendor/github.com/go-faster/errors/errors.go | 37 + vendor/github.com/go-faster/errors/format.go | 47 + vendor/github.com/go-faster/errors/frame.go | 56 + .../go-faster/errors/go.coverage.sh | 6 + vendor/github.com/go-faster/errors/go.test.sh | 12 + vendor/github.com/go-faster/errors/into.go | 11 + .../github.com/go-faster/errors/join_go120.go | 20 + vendor/github.com/go-faster/errors/must.go | 12 + .../github.com/go-faster/errors/noerrtrace.go | 13 + vendor/github.com/go-faster/errors/trace.go | 37 + vendor/github.com/go-faster/errors/wrap.go | 134 + vendor/github.com/go-sql-driver/mysql/AUTHORS | 18 + .../go-sql-driver/mysql/CHANGELOG.md | 61 +- .../github.com/go-sql-driver/mysql/README.md | 82 +- vendor/github.com/go-sql-driver/mysql/auth.go | 63 +- .../go-sql-driver/mysql/collations.go | 2 +- .../go-sql-driver/mysql/connection.go | 104 +- .../go-sql-driver/mysql/connector.go | 67 +- .../github.com/go-sql-driver/mysql/const.go | 15 +- .../github.com/go-sql-driver/mysql/driver.go | 27 +- vendor/github.com/go-sql-driver/mysql/dsn.go | 128 +- .../github.com/go-sql-driver/mysql/errors.go | 18 +- .../github.com/go-sql-driver/mysql/fields.go | 70 +- vendor/github.com/go-sql-driver/mysql/fuzz.go | 25 - .../github.com/go-sql-driver/mysql/infile.go | 12 +- .../go-sql-driver/mysql/nulltime.go | 4 +- .../github.com/go-sql-driver/mysql/packets.go | 209 +- .../github.com/go-sql-driver/mysql/result.go | 36 +- vendor/github.com/go-sql-driver/mysql/rows.go | 13 +- .../go-sql-driver/mysql/statement.go | 23 +- .../github.com/go-sql-driver/mysql/utils.go | 13 +- vendor/github.com/google/uuid/.travis.yml | 9 - vendor/github.com/google/uuid/CHANGELOG.md | 41 + vendor/github.com/google/uuid/CONTRIBUTING.md | 16 + vendor/github.com/google/uuid/README.md | 10 +- vendor/github.com/google/uuid/hash.go | 6 + vendor/github.com/google/uuid/node_js.go | 2 +- vendor/github.com/google/uuid/time.go | 21 +- vendor/github.com/google/uuid/uuid.go | 89 +- vendor/github.com/google/uuid/version6.go | 56 + vendor/github.com/google/uuid/version7.go | 104 + .../klauspost/compress/.gitattributes | 2 + .../github.com/klauspost/compress/.gitignore | 32 + .../klauspost/compress/.goreleaser.yml | 123 + vendor/github.com/klauspost/compress/LICENSE | 304 + .../github.com/klauspost/compress/README.md | 693 + .../github.com/klauspost/compress/SECURITY.md | 25 + .../klauspost/compress/compressible.go | 85 + .../klauspost/compress/fse/README.md | 79 + .../klauspost/compress/fse/bitreader.go | 122 + .../klauspost/compress/fse/bitwriter.go | 167 + .../klauspost/compress/fse/bytereader.go | 47 + .../klauspost/compress/fse/compress.go | 683 + .../klauspost/compress/fse/decompress.go | 376 + .../github.com/klauspost/compress/fse/fse.go | 144 + vendor/github.com/klauspost/compress/gen.sh | 4 + .../klauspost/compress/huff0/.gitignore | 1 + .../klauspost/compress/huff0/README.md | 89 + .../klauspost/compress/huff0/bitreader.go | 229 + .../klauspost/compress/huff0/bitwriter.go | 102 + .../klauspost/compress/huff0/compress.go | 742 + .../klauspost/compress/huff0/decompress.go | 1167 + .../compress/huff0/decompress_amd64.go | 226 + .../compress/huff0/decompress_amd64.s | 830 + .../compress/huff0/decompress_generic.go | 299 + .../klauspost/compress/huff0/huff0.go | 337 + .../compress/internal/cpuinfo/cpuinfo.go | 34 + .../internal/cpuinfo/cpuinfo_amd64.go | 11 + .../compress/internal/cpuinfo/cpuinfo_amd64.s | 36 + .../compress/internal/snapref/LICENSE | 27 + .../compress/internal/snapref/decode.go | 264 + .../compress/internal/snapref/decode_other.go | 113 + .../compress/internal/snapref/encode.go | 289 + .../compress/internal/snapref/encode_other.go | 250 + .../compress/internal/snapref/snappy.go | 98 + vendor/github.com/klauspost/compress/s2sx.mod | 4 + vendor/github.com/klauspost/compress/s2sx.sum | 0 .../klauspost/compress/zstd/README.md | 441 + .../klauspost/compress/zstd/bitreader.go | 136 + .../klauspost/compress/zstd/bitwriter.go | 112 + .../klauspost/compress/zstd/blockdec.go | 726 + .../klauspost/compress/zstd/blockenc.go | 889 + .../compress/zstd/blocktype_string.go | 85 + .../klauspost/compress/zstd/bytebuf.go | 131 + .../klauspost/compress/zstd/bytereader.go | 82 + .../klauspost/compress/zstd/decodeheader.go | 261 + .../klauspost/compress/zstd/decoder.go | 948 + .../compress/zstd/decoder_options.go | 169 + .../klauspost/compress/zstd/dict.go | 534 + .../klauspost/compress/zstd/enc_base.go | 173 + .../klauspost/compress/zstd/enc_best.go | 548 + .../klauspost/compress/zstd/enc_better.go | 1241 + .../klauspost/compress/zstd/enc_dfast.go | 1123 + .../klauspost/compress/zstd/enc_fast.go | 891 + .../klauspost/compress/zstd/encoder.go | 619 + .../compress/zstd/encoder_options.go | 339 + .../klauspost/compress/zstd/framedec.go | 413 + .../klauspost/compress/zstd/frameenc.go | 137 + .../klauspost/compress/zstd/fse_decoder.go | 307 + .../compress/zstd/fse_decoder_amd64.go | 65 + .../compress/zstd/fse_decoder_amd64.s | 126 + .../compress/zstd/fse_decoder_generic.go | 73 + .../klauspost/compress/zstd/fse_encoder.go | 701 + .../klauspost/compress/zstd/fse_predefined.go | 158 + .../klauspost/compress/zstd/hash.go | 35 + .../klauspost/compress/zstd/history.go | 116 + .../compress/zstd/internal/xxhash/LICENSE.txt | 22 + .../compress/zstd/internal/xxhash/README.md | 71 + .../compress/zstd/internal/xxhash/xxhash.go | 230 + .../zstd/internal/xxhash/xxhash_amd64.s | 210 + .../zstd/internal/xxhash/xxhash_arm64.s | 184 + .../zstd/internal/xxhash/xxhash_asm.go | 16 + .../zstd/internal/xxhash/xxhash_other.go | 76 + .../zstd/internal/xxhash/xxhash_safe.go | 11 + .../klauspost/compress/zstd/matchlen_amd64.go | 16 + .../klauspost/compress/zstd/matchlen_amd64.s | 68 + .../compress/zstd/matchlen_generic.go | 33 + .../klauspost/compress/zstd/seqdec.go | 503 + .../klauspost/compress/zstd/seqdec_amd64.go | 394 + .../klauspost/compress/zstd/seqdec_amd64.s | 4151 + .../klauspost/compress/zstd/seqdec_generic.go | 237 + .../klauspost/compress/zstd/seqenc.go | 114 + .../klauspost/compress/zstd/snappy.go | 434 + .../github.com/klauspost/compress/zstd/zip.go | 141 + .../klauspost/compress/zstd/zstd.go | 121 + .../magiconair/properties/.travis.yml | 10 - .../magiconair/properties/CHANGELOG.md | 74 + .../properties/{LICENSE => LICENSE.md} | 9 +- .../magiconair/properties/README.md | 3 +- .../magiconair/properties/decode.go | 66 +- .../github.com/magiconair/properties/doc.go | 127 +- .../magiconair/properties/integrate.go | 7 +- .../github.com/magiconair/properties/lex.go | 14 +- .../github.com/magiconair/properties/load.go | 9 +- .../magiconair/properties/parser.go | 11 +- .../magiconair/properties/properties.go | 49 +- .../magiconair/properties/rangecheck.go | 2 +- .../mfridman/interpolate/LICENSE.txt | 24 + .../github.com/mfridman/interpolate/README.md | 85 + vendor/github.com/mfridman/interpolate/env.go | 53 + .../mfridman/interpolate/interpolate.go | 213 + .../github.com/mfridman/interpolate/parser.go | 281 + vendor/github.com/paulmach/orb/CHANGELOG.md | 59 + vendor/github.com/paulmach/orb/README.md | 8 +- vendor/github.com/pierrec/lz4/v4/README.md | 2 +- .../pierrec/lz4/v4/compressing_reader.go | 222 + .../pierrec/lz4/v4/internal/lz4block/block.go | 9 +- .../lz4/v4/internal/lz4block/blocks.go | 5 +- .../lz4/v4/internal/lz4block/decode_arm64.s | 15 +- .../lz4/v4/internal/lz4block/decode_other.go | 13 +- .../lz4/v4/internal/lz4stream/block.go | 4 +- .../lz4/v4/internal/xxh32/xxh32zero.go | 2 +- vendor/github.com/pierrec/lz4/v4/options.go | 28 + vendor/github.com/pierrec/lz4/v4/writer.go | 4 + vendor/github.com/pressly/goose/v3/.gitignore | 6 + .../pressly/goose/v3/.goreleaser.yaml | 36 + .../pressly/goose/v3/.goreleaser.yml | 51 - .../github.com/pressly/goose/v3/CHANGELOG.md | 218 + .../pressly/goose/v3/Dockerfile.local | 8 - vendor/github.com/pressly/goose/v3/LICENSE | 5 +- vendor/github.com/pressly/goose/v3/Makefile | 133 +- vendor/github.com/pressly/goose/v3/README.md | 138 +- vendor/github.com/pressly/goose/v3/create.go | 17 +- .../pressly/goose/v3/database/dialect.go | 147 + .../pressly/goose/v3/database/doc.go | 14 + .../pressly/goose/v3/database/sql_extended.go | 23 + .../pressly/goose/v3/database/store.go | 67 + vendor/github.com/pressly/goose/v3/db.go | 36 +- vendor/github.com/pressly/goose/v3/dialect.go | 427 +- vendor/github.com/pressly/goose/v3/down.go | 31 +- .../00001_create_users_table.sql | 14 - .../sql-migrations/00002_rename_root.sql | 9 - .../sql-migrations/00003_no_transaction.sql | 11 - vendor/github.com/pressly/goose/v3/fix.go | 2 +- vendor/github.com/pressly/goose/v3/globals.go | 104 + vendor/github.com/pressly/goose/v3/goose.go | 47 +- vendor/github.com/pressly/goose/v3/install.sh | 8 +- .../dialect/dialectquery/clickhouse.go | 85 + .../dialect/dialectquery/dialectquery.go | 27 + .../v3/internal/dialect/dialectquery/mysql.go | 43 + .../internal/dialect/dialectquery/postgres.go | 43 + .../internal/dialect/dialectquery/redshift.go | 43 + .../internal/dialect/dialectquery/sqlite3.go | 42 + .../dialect/dialectquery/sqlserver.go | 42 + .../v3/internal/dialect/dialectquery/tidb.go | 43 + .../v3/internal/dialect/dialectquery/turso.go | 7 + .../internal/dialect/dialectquery/vertica.go | 43 + .../v3/internal/dialect/dialectquery/ydb.go | 53 + .../goose/v3/internal/dialect/dialects.go | 18 + .../goose/v3/internal/dialect/store.go | 164 + .../goose/v3/internal/gooseutil/resolve.go | 124 + .../goose/v3/internal/sqlparser/parse.go | 59 + .../goose/v3/internal/sqlparser/parser.go | 281 +- .../pressly/goose/v3/lock/postgres.go | 118 + .../pressly/goose/v3/lock/session_locker.go | 23 + .../goose/v3/lock/session_locker_options.go | 98 + vendor/github.com/pressly/goose/v3/log.go | 9 - vendor/github.com/pressly/goose/v3/migrate.go | 328 +- .../github.com/pressly/goose/v3/migration.go | 271 +- .../pressly/goose/v3/migration_sql.go | 56 +- vendor/github.com/pressly/goose/v3/osfs.go | 8 + .../github.com/pressly/goose/v3/provider.go | 641 + .../pressly/goose/v3/provider_collect.go | 196 + .../pressly/goose/v3/provider_errors.go | 44 + .../pressly/goose/v3/provider_options.go | 198 + .../pressly/goose/v3/provider_run.go | 445 + .../pressly/goose/v3/provider_types.go | 91 + vendor/github.com/pressly/goose/v3/redo.go | 13 +- .../github.com/pressly/goose/v3/register.go | 133 + vendor/github.com/pressly/goose/v3/reset.go | 39 +- vendor/github.com/pressly/goose/v3/status.go | 40 +- vendor/github.com/pressly/goose/v3/up.go | 201 +- vendor/github.com/pressly/goose/v3/version.go | 9 +- .../prometheus/procfs/.golangci.yml | 3 + .../prometheus/procfs/Makefile.common | 22 +- vendor/github.com/prometheus/procfs/README.md | 4 +- vendor/github.com/prometheus/procfs/arp.go | 6 +- .../github.com/prometheus/procfs/buddyinfo.go | 6 +- .../github.com/prometheus/procfs/cpuinfo.go | 17 +- vendor/github.com/prometheus/procfs/crypto.go | 7 +- vendor/github.com/prometheus/procfs/fs.go | 11 +- .../prometheus/procfs/fs_statfs_notype.go | 23 + .../prometheus/procfs/fs_statfs_type.go | 33 + .../github.com/prometheus/procfs/fscache.go | 6 +- .../prometheus/procfs/internal/util/parse.go | 15 + vendor/github.com/prometheus/procfs/ipvs.go | 7 +- .../github.com/prometheus/procfs/loadavg.go | 4 +- vendor/github.com/prometheus/procfs/mdstat.go | 36 +- .../github.com/prometheus/procfs/meminfo.go | 4 +- .../github.com/prometheus/procfs/mountinfo.go | 10 +- .../prometheus/procfs/mountstats.go | 117 +- .../prometheus/procfs/net_conntrackstat.go | 91 +- .../prometheus/procfs/net_ip_socket.go | 32 +- .../prometheus/procfs/net_protocols.go | 4 +- .../github.com/prometheus/procfs/net_route.go | 143 + .../prometheus/procfs/net_sockstat.go | 9 +- .../prometheus/procfs/net_softnet.go | 9 +- .../github.com/prometheus/procfs/net_unix.go | 16 +- .../prometheus/procfs/net_wireless.go | 182 + .../github.com/prometheus/procfs/net_xfrm.go | 2 +- .../github.com/prometheus/procfs/netstat.go | 25 +- vendor/github.com/prometheus/procfs/proc.go | 37 +- .../prometheus/procfs/proc_cgroup.go | 4 +- .../prometheus/procfs/proc_cgroups.go | 8 +- .../prometheus/procfs/proc_fdinfo.go | 10 +- .../prometheus/procfs/proc_interrupts.go | 2 +- .../prometheus/procfs/proc_limits.go | 4 +- .../github.com/prometheus/procfs/proc_maps.go | 24 +- .../prometheus/procfs/proc_netstat.go | 4 +- .../github.com/prometheus/procfs/proc_ns.go | 6 +- .../github.com/prometheus/procfs/proc_psi.go | 6 +- .../prometheus/procfs/proc_smaps.go | 4 +- .../github.com/prometheus/procfs/proc_snmp.go | 4 +- .../github.com/prometheus/procfs/proc_stat.go | 8 +- .../prometheus/procfs/proc_status.go | 53 +- .../github.com/prometheus/procfs/proc_sys.go | 2 +- vendor/github.com/prometheus/procfs/slab.go | 2 +- .../github.com/prometheus/procfs/softirqs.go | 24 +- vendor/github.com/prometheus/procfs/stat.go | 28 +- vendor/github.com/prometheus/procfs/swaps.go | 8 +- vendor/github.com/prometheus/procfs/thread.go | 11 +- vendor/github.com/prometheus/procfs/vm.go | 2 +- .../github.com/prometheus/procfs/zoneinfo.go | 4 +- vendor/github.com/segmentio/asm/LICENSE | 21 + .../github.com/segmentio/asm/bswap/swap64.go | 15 + .../segmentio/asm/bswap/swap64_amd64.go | 8 + .../segmentio/asm/bswap/swap64_amd64.s | 74 + .../segmentio/asm/bswap/swap64_default.go | 13 + .../github.com/segmentio/asm/cpu/arm/arm.go | 80 + .../segmentio/asm/cpu/arm64/arm64.go | 74 + vendor/github.com/segmentio/asm/cpu/cpu.go | 22 + .../segmentio/asm/cpu/cpuid/cpuid.go | 32 + .../github.com/segmentio/asm/cpu/x86/x86.go | 76 + vendor/github.com/sethvargo/go-retry/LICENSE | 202 + .../github.com/sethvargo/go-retry/README.md | 185 + .../github.com/sethvargo/go-retry/backoff.go | 134 + .../sethvargo/go-retry/backoff_constant.go | 25 + .../sethvargo/go-retry/backoff_exponential.go | 47 + .../sethvargo/go-retry/backoff_fibonacci.go | 56 + vendor/github.com/sethvargo/go-retry/rand.go | 54 + vendor/github.com/sethvargo/go-retry/retry.go | 104 + .../github.com/shopspring/decimal/.travis.yml | 19 - .../shopspring/decimal/CHANGELOG.md | 27 + .../github.com/shopspring/decimal/README.md | 25 +- vendor/github.com/shopspring/decimal/const.go | 63 + .../github.com/shopspring/decimal/decimal.go | 775 +- .../vertica/vertica-sql-go/CONTRIBUTING.md | 23 +- .../github.com/vertica/vertica-sql-go/LICENSE | 2 +- .../vertica/vertica-sql-go/README.md | 24 +- .../vertica-sql-go/common/fileutils.go | 2 +- .../vertica/vertica-sql-go/common/osutils.go | 2 +- .../vertica/vertica-sql-go/common/types.go | 3 +- .../vertica/vertica-sql-go/connection.go | 57 +- .../vertica/vertica-sql-go/context.go | 2 +- .../vertica/vertica-sql-go/driver.go | 6 +- .../vertica/vertica-sql-go/errors.go | 2 +- .../vertica-sql-go/logger/filelogger.go | 2 +- .../vertica/vertica-sql-go/logger/logger.go | 2 +- .../vertica-sql-go/logger/stdiologger.go | 2 +- .../msgs/beauthenticationmsg.go | 2 +- .../vertica-sql-go/msgs/bebindcompletemsg.go | 2 +- .../vertica-sql-go/msgs/beclosecompletemsg.go | 2 +- .../vertica-sql-go/msgs/becmdcompletemsg.go | 2 +- .../msgs/becmddescriptionmsg.go | 2 +- .../vertica-sql-go/msgs/bedatarowmsg.go | 2 +- .../msgs/beemptyqueryrespmsg.go | 2 +- .../vertica/vertica-sql-go/msgs/beerrormsg.go | 2 +- .../vertica-sql-go/msgs/beinitstdinloadmsg.go | 2 +- .../vertica-sql-go/msgs/bekeydatamsg.go | 2 +- .../vertica-sql-go/msgs/beloadbalancemsg.go | 2 +- .../vertica-sql-go/msgs/beloadnewfilemsg.go | 2 +- .../vertica-sql-go/msgs/benodatamsg.go | 2 +- .../vertica-sql-go/msgs/benoticemsg.go | 2 +- .../vertica-sql-go/msgs/beparameterdescmsg.go | 2 +- .../vertica-sql-go/msgs/beparamstatusmsg.go | 2 +- .../vertica-sql-go/msgs/beparsecomplete.go | 2 +- .../msgs/beportalsuspendedmsg.go | 2 +- .../vertica-sql-go/msgs/bereadyforquerymsg.go | 2 +- .../vertica-sql-go/msgs/berowdescmsg.go | 2 +- .../msgs/beverifyloadfilesmsg.go | 2 +- .../vertica-sql-go/msgs/bewritefilemsg.go | 2 +- .../vertica/vertica-sql-go/msgs/febindmsg.go | 2 +- .../vertica-sql-go/msgs/fecancelmsg.go | 2 +- .../vertica/vertica-sql-go/msgs/feclosemsg.go | 2 +- .../vertica-sql-go/msgs/fedescribemsg.go | 2 +- .../vertica/vertica-sql-go/msgs/feerrormsg.go | 2 +- .../vertica-sql-go/msgs/feexecutemsg.go | 2 +- .../vertica/vertica-sql-go/msgs/feflush.go | 2 +- .../vertica-sql-go/msgs/feloadbalancemsg.go | 2 +- .../vertica-sql-go/msgs/feloaddatamsg.go | 2 +- .../vertica-sql-go/msgs/feloaddonemsg.go | 2 +- .../vertica-sql-go/msgs/feloadfailmsg.go | 2 +- .../vertica/vertica-sql-go/msgs/feparsemsg.go | 2 +- .../vertica-sql-go/msgs/fepasswordmsg.go | 2 +- .../vertica/vertica-sql-go/msgs/fequerymsg.go | 2 +- .../vertica/vertica-sql-go/msgs/fesslmsg.go | 2 +- .../vertica-sql-go/msgs/festartupmsg.go | 51 +- .../vertica/vertica-sql-go/msgs/fesyncmsg.go | 2 +- .../vertica-sql-go/msgs/feterminatemsg.go | 2 +- .../vertica-sql-go/msgs/feverifyloadfiles.go | 2 +- .../vertica/vertica-sql-go/msgs/msg.go | 2 +- .../vertica/vertica-sql-go/msgs/msgbuffer.go | 2 +- .../vertica/vertica-sql-go/parse/queryLex.go | 2 +- .../vertica/vertica-sql-go/result.go | 2 +- .../vertica/vertica-sql-go/rowcache/file.go | 2 +- .../vertica/vertica-sql-go/rowcache/memory.go | 2 +- .../github.com/vertica/vertica-sql-go/rows.go | 2 +- .../github.com/vertica/vertica-sql-go/stmt.go | 2 +- .../github.com/vertica/vertica-sql-go/tx.go | 2 +- .../otel/attribute/README.md | 3 + .../go.opentelemetry.io/otel/attribute/doc.go | 13 +- .../otel/attribute/encoder.go | 13 +- .../otel/attribute/filter.go | 49 + .../otel/attribute/iterator.go | 13 +- .../go.opentelemetry.io/otel/attribute/key.go | 13 +- .../go.opentelemetry.io/otel/attribute/kv.go | 13 +- .../go.opentelemetry.io/otel/attribute/set.go | 221 +- .../otel/attribute/value.go | 108 +- .../go.opentelemetry.io/otel/codes/README.md | 3 + .../go.opentelemetry.io/otel/codes/codes.go | 23 +- vendor/go.opentelemetry.io/otel/codes/doc.go | 15 +- .../otel/internal/attribute/attribute.go | 100 + .../go.opentelemetry.io/otel/internal/gen.go | 18 + .../otel/internal/rawhelpers.go | 13 +- .../go.opentelemetry.io/otel/trace/README.md | 3 + .../go.opentelemetry.io/otel/trace/config.go | 31 +- .../go.opentelemetry.io/otel/trace/context.go | 17 +- vendor/go.opentelemetry.io/otel/trace/doc.go | 79 +- .../otel/trace/embedded/README.md | 3 + .../otel/trace/embedded/embedded.go | 45 + .../otel/trace/nonrecording.go | 13 +- vendor/go.opentelemetry.io/otel/trace/noop.go | 34 +- .../go.opentelemetry.io/otel/trace/trace.go | 133 +- .../otel/trace/tracestate.go | 212 +- vendor/go.uber.org/atomic/.codecov.yml | 15 - vendor/go.uber.org/atomic/.gitignore | 11 - vendor/go.uber.org/atomic/.travis.yml | 23 - vendor/go.uber.org/atomic/Makefile | 64 - vendor/go.uber.org/atomic/README.md | 36 - vendor/go.uber.org/atomic/atomic.go | 351 - vendor/go.uber.org/atomic/glide.lock | 17 - vendor/go.uber.org/atomic/glide.yaml | 6 - vendor/go.uber.org/multierr/.gitignore | 3 + vendor/go.uber.org/multierr/.travis.yml | 33 - vendor/go.uber.org/multierr/CHANGELOG.md | 67 + vendor/go.uber.org/multierr/LICENSE.txt | 2 +- vendor/go.uber.org/multierr/Makefile | 56 +- vendor/go.uber.org/multierr/README.md | 30 +- vendor/go.uber.org/multierr/error.go | 421 +- .../go.uber.org/multierr/error_post_go120.go | 48 + .../go.uber.org/multierr/error_pre_go120.go | 79 + vendor/go.uber.org/multierr/glide.lock | 19 - vendor/go.uber.org/multierr/glide.yaml | 8 - vendor/go.uber.org/zap/.gitignore | 4 + vendor/go.uber.org/zap/.golangci.yml | 77 + vendor/go.uber.org/zap/.readme.tmpl | 23 +- vendor/go.uber.org/zap/.travis.yml | 21 - vendor/go.uber.org/zap/CHANGELOG.md | 522 +- vendor/go.uber.org/zap/CONTRIBUTING.md | 27 +- vendor/go.uber.org/zap/FAQ.md | 9 + .../go.uber.org/zap/{LICENSE.txt => LICENSE} | 0 vendor/go.uber.org/zap/Makefile | 104 +- vendor/go.uber.org/zap/README.md | 91 +- vendor/go.uber.org/zap/array.go | 127 + vendor/go.uber.org/zap/buffer/buffer.go | 33 +- vendor/go.uber.org/zap/buffer/pool.go | 20 +- .../zap/{check_license.sh => checklicense.sh} | 0 vendor/go.uber.org/zap/config.go | 115 +- vendor/go.uber.org/zap/doc.go | 60 +- vendor/go.uber.org/zap/encoder.go | 4 + vendor/go.uber.org/zap/error.go | 14 +- vendor/go.uber.org/zap/field.go | 397 +- vendor/go.uber.org/zap/glide.lock | 76 - vendor/go.uber.org/zap/glide.yaml | 3 +- vendor/go.uber.org/zap/global.go | 2 +- vendor/go.uber.org/zap/http_handler.go | 109 +- vendor/go.uber.org/zap/internal/exit/exit.go | 22 +- .../go.uber.org/zap/internal/level_enabler.go | 37 + vendor/go.uber.org/zap/internal/pool/pool.go | 58 + .../zap/internal/stacktrace/stack.go | 181 + vendor/go.uber.org/zap/level.go | 29 +- vendor/go.uber.org/zap/logger.go | 172 +- vendor/go.uber.org/zap/options.go | 81 +- vendor/go.uber.org/zap/sink.go | 101 +- vendor/go.uber.org/zap/stacktrace.go | 126 - vendor/go.uber.org/zap/sugar.go | 266 +- vendor/go.uber.org/zap/writer.go | 23 +- .../zap/zapcore/buffered_write_syncer.go | 219 + vendor/go.uber.org/zap/zapcore/clock.go | 48 + .../zap/zapcore/console_encoder.go | 52 +- vendor/go.uber.org/zap/zapcore/core.go | 15 +- vendor/go.uber.org/zap/zapcore/encoder.go | 146 +- vendor/go.uber.org/zap/zapcore/entry.go | 115 +- vendor/go.uber.org/zap/zapcore/error.go | 54 +- vendor/go.uber.org/zap/zapcore/field.go | 40 +- vendor/go.uber.org/zap/zapcore/hook.go | 9 + .../go.uber.org/zap/zapcore/increase_level.go | 75 + .../go.uber.org/zap/zapcore/json_encoder.go | 327 +- vendor/go.uber.org/zap/zapcore/lazy_with.go | 54 + vendor/go.uber.org/zap/zapcore/level.go | 54 + vendor/go.uber.org/zap/zapcore/marshaler.go | 8 + .../go.uber.org/zap/zapcore/memory_encoder.go | 2 +- .../zapcore/reflected_encoder.go} | 40 +- vendor/go.uber.org/zap/zapcore/sampler.go | 135 +- vendor/go.uber.org/zap/zapcore/tee.go | 17 +- .../go.uber.org/zap/zapcore/write_syncer.go | 3 +- vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go | 2 +- vendor/golang.org/x/sync/LICENSE | 27 + vendor/golang.org/x/sync/PATENTS | 22 + vendor/golang.org/x/sync/errgroup/errgroup.go | 135 + vendor/golang.org/x/sync/errgroup/go120.go | 13 + .../golang.org/x/sync/errgroup/pre_go120.go | 14 + vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s | 17 + vendor/golang.org/x/sys/cpu/byteorder.go | 66 + vendor/golang.org/x/sys/cpu/cpu.go | 291 + vendor/golang.org/x/sys/cpu/cpu_aix.go | 33 + vendor/golang.org/x/sys/cpu/cpu_arm.go | 73 + vendor/golang.org/x/sys/cpu/cpu_arm64.go | 182 + vendor/golang.org/x/sys/cpu/cpu_arm64.s | 39 + vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go | 12 + vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go | 21 + vendor/golang.org/x/sys/cpu/cpu_gc_x86.go | 15 + .../golang.org/x/sys/cpu/cpu_gccgo_arm64.go | 11 + .../golang.org/x/sys/cpu/cpu_gccgo_s390x.go | 22 + vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c | 37 + vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go | 31 + vendor/golang.org/x/sys/cpu/cpu_linux.go | 15 + vendor/golang.org/x/sys/cpu/cpu_linux_arm.go | 39 + .../golang.org/x/sys/cpu/cpu_linux_arm64.go | 116 + .../golang.org/x/sys/cpu/cpu_linux_mips64x.go | 22 + .../golang.org/x/sys/cpu/cpu_linux_noinit.go | 9 + .../golang.org/x/sys/cpu/cpu_linux_ppc64x.go | 30 + .../golang.org/x/sys/cpu/cpu_linux_s390x.go | 40 + vendor/golang.org/x/sys/cpu/cpu_loong64.go | 12 + vendor/golang.org/x/sys/cpu/cpu_mips64x.go | 15 + vendor/golang.org/x/sys/cpu/cpu_mipsx.go | 11 + .../golang.org/x/sys/cpu/cpu_netbsd_arm64.go | 173 + .../golang.org/x/sys/cpu/cpu_openbsd_arm64.go | 65 + .../golang.org/x/sys/cpu/cpu_openbsd_arm64.s | 11 + vendor/golang.org/x/sys/cpu/cpu_other_arm.go | 9 + .../golang.org/x/sys/cpu/cpu_other_arm64.go | 9 + .../golang.org/x/sys/cpu/cpu_other_mips64x.go | 11 + .../golang.org/x/sys/cpu/cpu_other_ppc64x.go | 12 + .../golang.org/x/sys/cpu/cpu_other_riscv64.go | 11 + vendor/golang.org/x/sys/cpu/cpu_ppc64x.go | 16 + vendor/golang.org/x/sys/cpu/cpu_riscv64.go | 11 + vendor/golang.org/x/sys/cpu/cpu_s390x.go | 172 + vendor/golang.org/x/sys/cpu/cpu_s390x.s | 57 + vendor/golang.org/x/sys/cpu/cpu_wasm.go | 17 + vendor/golang.org/x/sys/cpu/cpu_x86.go | 151 + vendor/golang.org/x/sys/cpu/cpu_x86.s | 26 + vendor/golang.org/x/sys/cpu/cpu_zos.go | 10 + vendor/golang.org/x/sys/cpu/cpu_zos_s390x.go | 25 + vendor/golang.org/x/sys/cpu/endian_big.go | 10 + vendor/golang.org/x/sys/cpu/endian_little.go | 10 + vendor/golang.org/x/sys/cpu/hwcap_linux.go | 71 + vendor/golang.org/x/sys/cpu/parse.go | 43 + .../x/sys/cpu/proc_cpuinfo_linux.go | 53 + vendor/golang.org/x/sys/cpu/runtime_auxv.go | 16 + .../x/sys/cpu/runtime_auxv_go121.go | 18 + .../golang.org/x/sys/cpu/syscall_aix_gccgo.go | 26 + .../x/sys/cpu/syscall_aix_ppc64_gc.go | 35 + .../sys/internal/unsafeheader/unsafeheader.go | 30 - vendor/golang.org/x/sys/unix/aliases.go | 4 +- vendor/golang.org/x/sys/unix/asm_aix_ppc64.s | 1 - vendor/golang.org/x/sys/unix/asm_bsd_386.s | 2 - vendor/golang.org/x/sys/unix/asm_bsd_amd64.s | 2 - vendor/golang.org/x/sys/unix/asm_bsd_arm.s | 2 - vendor/golang.org/x/sys/unix/asm_bsd_arm64.s | 2 - vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s | 2 - .../golang.org/x/sys/unix/asm_bsd_riscv64.s | 2 - vendor/golang.org/x/sys/unix/asm_linux_386.s | 1 - .../golang.org/x/sys/unix/asm_linux_amd64.s | 1 - vendor/golang.org/x/sys/unix/asm_linux_arm.s | 1 - .../golang.org/x/sys/unix/asm_linux_arm64.s | 3 - .../golang.org/x/sys/unix/asm_linux_loong64.s | 3 - .../golang.org/x/sys/unix/asm_linux_mips64x.s | 3 - .../golang.org/x/sys/unix/asm_linux_mipsx.s | 3 - .../golang.org/x/sys/unix/asm_linux_ppc64x.s | 3 - .../golang.org/x/sys/unix/asm_linux_riscv64.s | 2 - .../golang.org/x/sys/unix/asm_linux_s390x.s | 3 - .../x/sys/unix/asm_openbsd_mips64.s | 1 - .../golang.org/x/sys/unix/asm_solaris_amd64.s | 1 - vendor/golang.org/x/sys/unix/asm_zos_s390x.s | 668 +- vendor/golang.org/x/sys/unix/bpxsvc_zos.go | 657 + vendor/golang.org/x/sys/unix/bpxsvc_zos.s | 192 + vendor/golang.org/x/sys/unix/cap_freebsd.go | 1 - vendor/golang.org/x/sys/unix/constants.go | 1 - vendor/golang.org/x/sys/unix/dev_aix_ppc.go | 1 - vendor/golang.org/x/sys/unix/dev_aix_ppc64.go | 1 - vendor/golang.org/x/sys/unix/dev_zos.go | 1 - vendor/golang.org/x/sys/unix/dirent.go | 1 - vendor/golang.org/x/sys/unix/endian_big.go | 1 - vendor/golang.org/x/sys/unix/endian_little.go | 1 - vendor/golang.org/x/sys/unix/env_unix.go | 1 - vendor/golang.org/x/sys/unix/epoll_zos.go | 221 - vendor/golang.org/x/sys/unix/fcntl.go | 3 +- .../x/sys/unix/fcntl_linux_32bit.go | 1 - vendor/golang.org/x/sys/unix/fdset.go | 1 - vendor/golang.org/x/sys/unix/fstatfs_zos.go | 164 - vendor/golang.org/x/sys/unix/gccgo.go | 1 - vendor/golang.org/x/sys/unix/gccgo_c.c | 3 +- .../x/sys/unix/gccgo_linux_amd64.go | 1 - vendor/golang.org/x/sys/unix/ifreq_linux.go | 1 - vendor/golang.org/x/sys/unix/ioctl_linux.go | 5 + vendor/golang.org/x/sys/unix/ioctl_signed.go | 69 + .../sys/unix/{ioctl.go => ioctl_unsigned.go} | 20 +- vendor/golang.org/x/sys/unix/ioctl_zos.go | 21 +- vendor/golang.org/x/sys/unix/mkall.sh | 2 +- vendor/golang.org/x/sys/unix/mkerrors.sh | 59 +- vendor/golang.org/x/sys/unix/mmap_nomremap.go | 13 + vendor/golang.org/x/sys/unix/mremap.go | 57 + vendor/golang.org/x/sys/unix/pagesize_unix.go | 3 +- .../golang.org/x/sys/unix/pledge_openbsd.go | 92 +- vendor/golang.org/x/sys/unix/ptrace_darwin.go | 1 - vendor/golang.org/x/sys/unix/ptrace_ios.go | 1 - vendor/golang.org/x/sys/unix/race.go | 1 - vendor/golang.org/x/sys/unix/race0.go | 1 - .../x/sys/unix/readdirent_getdents.go | 1 - .../x/sys/unix/readdirent_getdirentries.go | 3 +- vendor/golang.org/x/sys/unix/sockcmsg_unix.go | 1 - .../x/sys/unix/sockcmsg_unix_other.go | 1 - vendor/golang.org/x/sys/unix/sockcmsg_zos.go | 58 + .../golang.org/x/sys/unix/symaddr_zos_s390x.s | 75 + vendor/golang.org/x/sys/unix/syscall.go | 1 - vendor/golang.org/x/sys/unix/syscall_aix.go | 28 +- .../golang.org/x/sys/unix/syscall_aix_ppc.go | 2 - .../x/sys/unix/syscall_aix_ppc64.go | 2 - vendor/golang.org/x/sys/unix/syscall_bsd.go | 20 +- .../golang.org/x/sys/unix/syscall_darwin.go | 262 +- .../x/sys/unix/syscall_darwin_amd64.go | 1 - .../x/sys/unix/syscall_darwin_arm64.go | 1 - .../x/sys/unix/syscall_darwin_libSystem.go | 3 +- .../x/sys/unix/syscall_dragonfly.go | 200 +- .../x/sys/unix/syscall_dragonfly_amd64.go | 1 - .../golang.org/x/sys/unix/syscall_freebsd.go | 248 +- .../x/sys/unix/syscall_freebsd_386.go | 13 +- .../x/sys/unix/syscall_freebsd_amd64.go | 13 +- .../x/sys/unix/syscall_freebsd_arm.go | 11 +- .../x/sys/unix/syscall_freebsd_arm64.go | 11 +- .../x/sys/unix/syscall_freebsd_riscv64.go | 11 +- vendor/golang.org/x/sys/unix/syscall_hurd.go | 9 +- .../golang.org/x/sys/unix/syscall_hurd_386.go | 1 - .../golang.org/x/sys/unix/syscall_illumos.go | 1 - vendor/golang.org/x/sys/unix/syscall_linux.go | 401 +- .../x/sys/unix/syscall_linux_386.go | 28 - .../x/sys/unix/syscall_linux_alarm.go | 2 - .../x/sys/unix/syscall_linux_amd64.go | 4 +- .../x/sys/unix/syscall_linux_amd64_gc.go | 1 - .../x/sys/unix/syscall_linux_arm.go | 28 - .../x/sys/unix/syscall_linux_arm64.go | 13 +- .../golang.org/x/sys/unix/syscall_linux_gc.go | 1 - .../x/sys/unix/syscall_linux_gc_386.go | 1 - .../x/sys/unix/syscall_linux_gc_arm.go | 1 - .../x/sys/unix/syscall_linux_gccgo_386.go | 1 - .../x/sys/unix/syscall_linux_gccgo_arm.go | 1 - .../x/sys/unix/syscall_linux_loong64.go | 8 +- .../x/sys/unix/syscall_linux_mips64x.go | 5 +- .../x/sys/unix/syscall_linux_mipsx.go | 29 - .../x/sys/unix/syscall_linux_ppc.go | 28 - .../x/sys/unix/syscall_linux_ppc64x.go | 3 - .../x/sys/unix/syscall_linux_riscv64.go | 15 +- .../x/sys/unix/syscall_linux_s390x.go | 2 - .../x/sys/unix/syscall_linux_sparc64.go | 2 - .../golang.org/x/sys/unix/syscall_netbsd.go | 279 +- .../x/sys/unix/syscall_netbsd_386.go | 1 - .../x/sys/unix/syscall_netbsd_amd64.go | 1 - .../x/sys/unix/syscall_netbsd_arm.go | 1 - .../x/sys/unix/syscall_netbsd_arm64.go | 1 - .../golang.org/x/sys/unix/syscall_openbsd.go | 121 +- .../x/sys/unix/syscall_openbsd_386.go | 1 - .../x/sys/unix/syscall_openbsd_amd64.go | 1 - .../x/sys/unix/syscall_openbsd_arm.go | 1 - .../x/sys/unix/syscall_openbsd_arm64.go | 1 - .../x/sys/unix/syscall_openbsd_libc.go | 1 - .../x/sys/unix/syscall_openbsd_ppc64.go | 1 - .../x/sys/unix/syscall_openbsd_riscv64.go | 1 - .../golang.org/x/sys/unix/syscall_solaris.go | 73 +- .../x/sys/unix/syscall_solaris_amd64.go | 1 - vendor/golang.org/x/sys/unix/syscall_unix.go | 30 +- .../golang.org/x/sys/unix/syscall_unix_gc.go | 2 - .../x/sys/unix/syscall_unix_gc_ppc64x.go | 3 - .../x/sys/unix/syscall_zos_s390x.go | 1517 +- vendor/golang.org/x/sys/unix/sysvshm_linux.go | 1 - vendor/golang.org/x/sys/unix/sysvshm_unix.go | 3 +- .../x/sys/unix/sysvshm_unix_other.go | 3 +- vendor/golang.org/x/sys/unix/timestruct.go | 3 +- .../golang.org/x/sys/unix/unveil_openbsd.go | 41 +- vendor/golang.org/x/sys/unix/xattr_bsd.go | 10 +- .../golang.org/x/sys/unix/zerrors_aix_ppc.go | 1 - .../x/sys/unix/zerrors_aix_ppc64.go | 1 - .../x/sys/unix/zerrors_darwin_amd64.go | 20 +- .../x/sys/unix/zerrors_darwin_arm64.go | 20 +- .../x/sys/unix/zerrors_dragonfly_amd64.go | 1 - .../x/sys/unix/zerrors_freebsd_386.go | 1 - .../x/sys/unix/zerrors_freebsd_amd64.go | 1 - .../x/sys/unix/zerrors_freebsd_arm.go | 1 - .../x/sys/unix/zerrors_freebsd_arm64.go | 1 - .../x/sys/unix/zerrors_freebsd_riscv64.go | 1 - vendor/golang.org/x/sys/unix/zerrors_linux.go | 233 +- .../x/sys/unix/zerrors_linux_386.go | 17 +- .../x/sys/unix/zerrors_linux_amd64.go | 17 +- .../x/sys/unix/zerrors_linux_arm.go | 16 +- .../x/sys/unix/zerrors_linux_arm64.go | 19 +- .../x/sys/unix/zerrors_linux_loong64.go | 19 +- .../x/sys/unix/zerrors_linux_mips.go | 16 +- .../x/sys/unix/zerrors_linux_mips64.go | 16 +- .../x/sys/unix/zerrors_linux_mips64le.go | 16 +- .../x/sys/unix/zerrors_linux_mipsle.go | 16 +- .../x/sys/unix/zerrors_linux_ppc.go | 16 +- .../x/sys/unix/zerrors_linux_ppc64.go | 16 +- .../x/sys/unix/zerrors_linux_ppc64le.go | 16 +- .../x/sys/unix/zerrors_linux_riscv64.go | 19 +- .../x/sys/unix/zerrors_linux_s390x.go | 16 +- .../x/sys/unix/zerrors_linux_sparc64.go | 64 +- .../x/sys/unix/zerrors_netbsd_386.go | 1 - .../x/sys/unix/zerrors_netbsd_amd64.go | 1 - .../x/sys/unix/zerrors_netbsd_arm.go | 1 - .../x/sys/unix/zerrors_netbsd_arm64.go | 1 - .../x/sys/unix/zerrors_openbsd_386.go | 1 - .../x/sys/unix/zerrors_openbsd_amd64.go | 1 - .../x/sys/unix/zerrors_openbsd_arm.go | 1 - .../x/sys/unix/zerrors_openbsd_arm64.go | 1 - .../x/sys/unix/zerrors_openbsd_mips64.go | 1 - .../x/sys/unix/zerrors_openbsd_ppc64.go | 1 - .../x/sys/unix/zerrors_openbsd_riscv64.go | 1 - .../x/sys/unix/zerrors_solaris_amd64.go | 1 - .../x/sys/unix/zerrors_zos_s390x.go | 234 +- .../x/sys/unix/zptrace_armnn_linux.go | 10 +- .../x/sys/unix/zptrace_linux_arm64.go | 4 +- .../x/sys/unix/zptrace_mipsnn_linux.go | 10 +- .../x/sys/unix/zptrace_mipsnnle_linux.go | 10 +- .../x/sys/unix/zptrace_x86_linux.go | 10 +- .../x/sys/unix/zsymaddr_zos_s390x.s | 364 + .../golang.org/x/sys/unix/zsyscall_aix_ppc.go | 46 +- .../x/sys/unix/zsyscall_aix_ppc64.go | 47 +- .../x/sys/unix/zsyscall_aix_ppc64_gc.go | 18 +- .../x/sys/unix/zsyscall_aix_ppc64_gccgo.go | 19 +- .../x/sys/unix/zsyscall_darwin_amd64.go | 105 +- .../x/sys/unix/zsyscall_darwin_amd64.s | 170 +- .../x/sys/unix/zsyscall_darwin_arm64.go | 105 +- .../x/sys/unix/zsyscall_darwin_arm64.s | 170 +- .../x/sys/unix/zsyscall_dragonfly_amd64.go | 43 +- .../x/sys/unix/zsyscall_freebsd_386.go | 53 +- .../x/sys/unix/zsyscall_freebsd_amd64.go | 53 +- .../x/sys/unix/zsyscall_freebsd_arm.go | 53 +- .../x/sys/unix/zsyscall_freebsd_arm64.go | 53 +- .../x/sys/unix/zsyscall_freebsd_riscv64.go | 53 +- .../x/sys/unix/zsyscall_illumos_amd64.go | 11 +- .../golang.org/x/sys/unix/zsyscall_linux.go | 136 +- .../x/sys/unix/zsyscall_linux_386.go | 11 - .../x/sys/unix/zsyscall_linux_amd64.go | 11 - .../x/sys/unix/zsyscall_linux_arm.go | 11 - .../x/sys/unix/zsyscall_linux_arm64.go | 11 - .../x/sys/unix/zsyscall_linux_loong64.go | 1 - .../x/sys/unix/zsyscall_linux_mips.go | 11 - .../x/sys/unix/zsyscall_linux_mips64.go | 11 - .../x/sys/unix/zsyscall_linux_mips64le.go | 11 - .../x/sys/unix/zsyscall_linux_mipsle.go | 11 - .../x/sys/unix/zsyscall_linux_ppc.go | 11 - .../x/sys/unix/zsyscall_linux_ppc64.go | 11 - .../x/sys/unix/zsyscall_linux_ppc64le.go | 11 - .../x/sys/unix/zsyscall_linux_riscv64.go | 27 +- .../x/sys/unix/zsyscall_linux_s390x.go | 11 - .../x/sys/unix/zsyscall_linux_sparc64.go | 11 - .../x/sys/unix/zsyscall_netbsd_386.go | 50 +- .../x/sys/unix/zsyscall_netbsd_amd64.go | 50 +- .../x/sys/unix/zsyscall_netbsd_arm.go | 50 +- .../x/sys/unix/zsyscall_netbsd_arm64.go | 50 +- .../x/sys/unix/zsyscall_openbsd_386.go | 116 +- .../x/sys/unix/zsyscall_openbsd_386.s | 35 +- .../x/sys/unix/zsyscall_openbsd_amd64.go | 116 +- .../x/sys/unix/zsyscall_openbsd_amd64.s | 35 +- .../x/sys/unix/zsyscall_openbsd_arm.go | 116 +- .../x/sys/unix/zsyscall_openbsd_arm.s | 35 +- .../x/sys/unix/zsyscall_openbsd_arm64.go | 116 +- .../x/sys/unix/zsyscall_openbsd_arm64.s | 35 +- .../x/sys/unix/zsyscall_openbsd_mips64.go | 116 +- .../x/sys/unix/zsyscall_openbsd_mips64.s | 35 +- .../x/sys/unix/zsyscall_openbsd_ppc64.go | 116 +- .../x/sys/unix/zsyscall_openbsd_ppc64.s | 42 +- .../x/sys/unix/zsyscall_openbsd_riscv64.go | 116 +- .../x/sys/unix/zsyscall_openbsd_riscv64.s | 35 +- .../x/sys/unix/zsyscall_solaris_amd64.go | 281 +- .../x/sys/unix/zsyscall_zos_s390x.go | 3113 +- .../x/sys/unix/zsysctl_openbsd_386.go | 1 - .../x/sys/unix/zsysctl_openbsd_amd64.go | 1 - .../x/sys/unix/zsysctl_openbsd_arm.go | 1 - .../x/sys/unix/zsysctl_openbsd_arm64.go | 1 - .../x/sys/unix/zsysctl_openbsd_mips64.go | 1 - .../x/sys/unix/zsysctl_openbsd_ppc64.go | 1 - .../x/sys/unix/zsysctl_openbsd_riscv64.go | 1 - .../x/sys/unix/zsysnum_darwin_amd64.go | 1 - .../x/sys/unix/zsysnum_darwin_arm64.go | 1 - .../x/sys/unix/zsysnum_dragonfly_amd64.go | 1 - .../x/sys/unix/zsysnum_freebsd_386.go | 1 - .../x/sys/unix/zsysnum_freebsd_amd64.go | 1 - .../x/sys/unix/zsysnum_freebsd_arm.go | 1 - .../x/sys/unix/zsysnum_freebsd_arm64.go | 1 - .../x/sys/unix/zsysnum_freebsd_riscv64.go | 1 - .../x/sys/unix/zsysnum_linux_386.go | 12 +- .../x/sys/unix/zsysnum_linux_amd64.go | 12 +- .../x/sys/unix/zsysnum_linux_arm.go | 12 +- .../x/sys/unix/zsysnum_linux_arm64.go | 12 +- .../x/sys/unix/zsysnum_linux_loong64.go | 12 +- .../x/sys/unix/zsysnum_linux_mips.go | 12 +- .../x/sys/unix/zsysnum_linux_mips64.go | 12 +- .../x/sys/unix/zsysnum_linux_mips64le.go | 12 +- .../x/sys/unix/zsysnum_linux_mipsle.go | 12 +- .../x/sys/unix/zsysnum_linux_ppc.go | 12 +- .../x/sys/unix/zsysnum_linux_ppc64.go | 12 +- .../x/sys/unix/zsysnum_linux_ppc64le.go | 12 +- .../x/sys/unix/zsysnum_linux_riscv64.go | 14 +- .../x/sys/unix/zsysnum_linux_s390x.go | 13 +- .../x/sys/unix/zsysnum_linux_sparc64.go | 12 +- .../x/sys/unix/zsysnum_netbsd_386.go | 1 - .../x/sys/unix/zsysnum_netbsd_amd64.go | 1 - .../x/sys/unix/zsysnum_netbsd_arm.go | 1 - .../x/sys/unix/zsysnum_netbsd_arm64.go | 1 - .../x/sys/unix/zsysnum_openbsd_386.go | 1 - .../x/sys/unix/zsysnum_openbsd_amd64.go | 1 - .../x/sys/unix/zsysnum_openbsd_arm.go | 1 - .../x/sys/unix/zsysnum_openbsd_arm64.go | 1 - .../x/sys/unix/zsysnum_openbsd_mips64.go | 1 - .../x/sys/unix/zsysnum_openbsd_ppc64.go | 1 - .../x/sys/unix/zsysnum_openbsd_riscv64.go | 1 - .../x/sys/unix/zsysnum_zos_s390x.go | 5508 +- .../golang.org/x/sys/unix/ztypes_aix_ppc.go | 1 - .../golang.org/x/sys/unix/ztypes_aix_ppc64.go | 1 - .../x/sys/unix/ztypes_darwin_amd64.go | 12 +- .../x/sys/unix/ztypes_darwin_arm64.go | 12 +- .../x/sys/unix/ztypes_dragonfly_amd64.go | 1 - .../x/sys/unix/ztypes_freebsd_386.go | 3 +- .../x/sys/unix/ztypes_freebsd_amd64.go | 3 +- .../x/sys/unix/ztypes_freebsd_arm.go | 3 +- .../x/sys/unix/ztypes_freebsd_arm64.go | 3 +- .../x/sys/unix/ztypes_freebsd_riscv64.go | 3 +- vendor/golang.org/x/sys/unix/ztypes_linux.go | 595 +- .../golang.org/x/sys/unix/ztypes_linux_386.go | 13 +- .../x/sys/unix/ztypes_linux_amd64.go | 14 +- .../golang.org/x/sys/unix/ztypes_linux_arm.go | 14 +- .../x/sys/unix/ztypes_linux_arm64.go | 14 +- .../x/sys/unix/ztypes_linux_loong64.go | 14 +- .../x/sys/unix/ztypes_linux_mips.go | 14 +- .../x/sys/unix/ztypes_linux_mips64.go | 14 +- .../x/sys/unix/ztypes_linux_mips64le.go | 14 +- .../x/sys/unix/ztypes_linux_mipsle.go | 14 +- .../golang.org/x/sys/unix/ztypes_linux_ppc.go | 14 +- .../x/sys/unix/ztypes_linux_ppc64.go | 14 +- .../x/sys/unix/ztypes_linux_ppc64le.go | 14 +- .../x/sys/unix/ztypes_linux_riscv64.go | 41 +- .../x/sys/unix/ztypes_linux_s390x.go | 14 +- .../x/sys/unix/ztypes_linux_sparc64.go | 14 +- .../x/sys/unix/ztypes_netbsd_386.go | 1 - .../x/sys/unix/ztypes_netbsd_amd64.go | 1 - .../x/sys/unix/ztypes_netbsd_arm.go | 1 - .../x/sys/unix/ztypes_netbsd_arm64.go | 1 - .../x/sys/unix/ztypes_openbsd_386.go | 1 - .../x/sys/unix/ztypes_openbsd_amd64.go | 1 - .../x/sys/unix/ztypes_openbsd_arm.go | 1 - .../x/sys/unix/ztypes_openbsd_arm64.go | 1 - .../x/sys/unix/ztypes_openbsd_mips64.go | 1 - .../x/sys/unix/ztypes_openbsd_ppc64.go | 1 - .../x/sys/unix/ztypes_openbsd_riscv64.go | 1 - .../x/sys/unix/ztypes_solaris_amd64.go | 1 - .../golang.org/x/sys/unix/ztypes_zos_s390x.go | 147 +- vendor/golang.org/x/sys/windows/aliases.go | 3 +- vendor/golang.org/x/sys/windows/empty.s | 9 - .../golang.org/x/sys/windows/env_windows.go | 17 +- vendor/golang.org/x/sys/windows/eventlog.go | 1 - .../golang.org/x/sys/windows/exec_windows.go | 92 +- vendor/golang.org/x/sys/windows/mksyscall.go | 1 - vendor/golang.org/x/sys/windows/race.go | 1 - vendor/golang.org/x/sys/windows/race0.go | 1 - .../golang.org/x/sys/windows/registry/key.go | 1 - .../x/sys/windows/registry/mksyscall.go | 1 - .../x/sys/windows/registry/syscall.go | 1 - .../x/sys/windows/registry/value.go | 1 - .../x/sys/windows/security_windows.go | 46 +- vendor/golang.org/x/sys/windows/service.go | 12 +- vendor/golang.org/x/sys/windows/str.go | 1 - vendor/golang.org/x/sys/windows/syscall.go | 1 - .../x/sys/windows/syscall_windows.go | 182 +- .../golang.org/x/sys/windows/types_windows.go | 154 +- .../x/sys/windows/zsyscall_windows.go | 261 +- .../x/text/unicode/norm/forminfo.go | 2 +- .../x/text/unicode/norm/tables10.0.0.go | 1 - .../x/text/unicode/norm/tables11.0.0.go | 1 - .../x/text/unicode/norm/tables12.0.0.go | 1 - .../x/text/unicode/norm/tables13.0.0.go | 3 +- .../x/text/unicode/norm/tables15.0.0.go | 7907 + .../x/text/unicode/norm/tables9.0.0.go | 1 - vendor/golang.org/x/text/unicode/norm/trie.go | 2 +- vendor/gopkg.in/yaml.v3/LICENSE | 50 + vendor/gopkg.in/yaml.v3/NOTICE | 13 + vendor/gopkg.in/yaml.v3/README.md | 150 + vendor/gopkg.in/yaml.v3/apic.go | 747 + vendor/gopkg.in/yaml.v3/decode.go | 1000 + vendor/gopkg.in/yaml.v3/emitterc.go | 2020 + vendor/gopkg.in/yaml.v3/encode.go | 577 + vendor/gopkg.in/yaml.v3/parserc.go | 1258 + vendor/gopkg.in/yaml.v3/readerc.go | 434 + vendor/gopkg.in/yaml.v3/resolve.go | 326 + vendor/gopkg.in/yaml.v3/scannerc.go | 3038 + vendor/gopkg.in/yaml.v3/sorter.go | 134 + vendor/gopkg.in/yaml.v3/writerc.go | 48 + vendor/gopkg.in/yaml.v3/yaml.go | 698 + vendor/gopkg.in/yaml.v3/yamlh.go | 807 + vendor/gopkg.in/yaml.v3/yamlprivateh.go | 198 + vendor/modules.txt | 132 +- 1277 files changed, 363470 insertions(+), 16383 deletions(-) create mode 100644 vendor/filippo.io/edwards25519/LICENSE create mode 100644 vendor/filippo.io/edwards25519/README.md create mode 100644 vendor/filippo.io/edwards25519/doc.go create mode 100644 vendor/filippo.io/edwards25519/edwards25519.go create mode 100644 vendor/filippo.io/edwards25519/extra.go create mode 100644 vendor/filippo.io/edwards25519/field/fe.go create mode 100644 vendor/filippo.io/edwards25519/field/fe_amd64.go create mode 100644 vendor/filippo.io/edwards25519/field/fe_amd64.s create mode 100644 vendor/filippo.io/edwards25519/field/fe_amd64_noasm.go create mode 100644 vendor/filippo.io/edwards25519/field/fe_arm64.go create mode 100644 vendor/filippo.io/edwards25519/field/fe_arm64.s create mode 100644 vendor/filippo.io/edwards25519/field/fe_arm64_noasm.go create mode 100644 vendor/filippo.io/edwards25519/field/fe_extra.go create mode 100644 vendor/filippo.io/edwards25519/field/fe_generic.go create mode 100644 vendor/filippo.io/edwards25519/scalar.go create mode 100644 vendor/filippo.io/edwards25519/scalar_fiat.go create mode 100644 vendor/filippo.io/edwards25519/scalarmult.go create mode 100644 vendor/filippo.io/edwards25519/tables.go create mode 100644 vendor/github.com/ClickHouse/ch-go/AUTHORS create mode 100644 vendor/github.com/ClickHouse/ch-go/LICENSE create mode 100644 vendor/github.com/ClickHouse/ch-go/compress/compress.go create mode 100644 vendor/github.com/ClickHouse/ch-go/compress/method_enum.go create mode 100644 vendor/github.com/ClickHouse/ch-go/compress/reader.go create mode 100644 vendor/github.com/ClickHouse/ch-go/compress/writer.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/block.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/bool.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/buffer.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/client_code.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/client_code_enum.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/client_data.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/client_hello.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/client_info.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/client_info_interface_enum.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/client_info_query_enum.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_arr.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_auto.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_auto_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_bool.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_bool_safe.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_bool_unsafe.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_date.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_date32.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_date32_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_date32_safe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_date32_unsafe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_date_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_date_safe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_date_unsafe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_datetime.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_datetime64.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_datetime64_safe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_datetime64_unsafe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_datetime_safe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_datetime_unsafe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_decimal128_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_decimal128_safe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_decimal128_unsafe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_decimal256_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_decimal256_safe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_decimal256_unsafe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_decimal32_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_decimal32_safe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_decimal32_unsafe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_decimal64_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_decimal64_safe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_decimal64_unsafe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_enum.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_enum16_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_enum16_safe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_enum16_unsafe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_enum8_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_enum8_safe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_enum8_unsafe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_fixed_str.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr128_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr128_safe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr128_unsafe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr16_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr16_safe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr16_unsafe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr256_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr256_safe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr256_unsafe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr32_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr32_safe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr32_unsafe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr512_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr512_safe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr512_unsafe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr64_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr64_safe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr64_unsafe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr8_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr8_safe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr8_unsafe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_float32_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_float32_safe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_float32_unsafe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_float64_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_float64_safe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_float64_unsafe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_int128_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_int128_safe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_int128_unsafe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_int16_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_int16_safe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_int16_unsafe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_int256_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_int256_safe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_int256_unsafe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_int32_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_int32_safe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_int32_unsafe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_int64_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_int64_safe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_int64_unsafe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_int8_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_int8_safe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_int8_unsafe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_interval.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_ipv4_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_ipv4_safe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_ipv4_unsafe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_ipv6_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_ipv6_safe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_ipv6_unsafe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_low_cardinality.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_low_cardinality_enum.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_low_cardinality_raw.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_map.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_nothing.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_nullable.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_point.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_raw.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_raw_of.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_str.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_tuple.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_uint128_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_uint128_safe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_uint128_unsafe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_uint16_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_uint16_safe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_uint16_unsafe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_uint256_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_uint256_safe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_uint256_unsafe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_uint32_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_uint32_safe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_uint32_unsafe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_uint64_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_uint64_safe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_uint64_unsafe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_uint8_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_uint8_safe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_uuid.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_uuid_safe.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_uuid_unsafe.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/column.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/compression.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/compression_enum.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/date.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/date32.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/datetime.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/datetime64.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/decimal.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/enum16.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/enum8.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/error.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/error_codes.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/error_enum.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/exception.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/feature.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/feature_enum.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/int128.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/int256.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/interval_enum.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/ipv4.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/ipv6.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/profile.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/profile_enum.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/profile_events.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/progress.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/proto.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/query.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/reader.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/reset.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/results.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/server_code.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/server_code_enum.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/server_hello.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/server_log.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/slice_unsafe.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/stage.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/stage_enum.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/table_columns.go create mode 100644 vendor/github.com/ClickHouse/clickhouse-go/v2/.golangci.yaml create mode 100644 vendor/github.com/ClickHouse/clickhouse-go/v2/CHANGELOG.md create mode 100644 vendor/github.com/ClickHouse/clickhouse-go/v2/TYPES.md create mode 100644 vendor/github.com/ClickHouse/clickhouse-go/v2/batch.go create mode 100644 vendor/github.com/ClickHouse/clickhouse-go/v2/client_info.go rename vendor/github.com/ClickHouse/clickhouse-go/v2/{lib/column/date_helpers.go => conn_http_async_insert.go} (53%) create mode 100644 vendor/github.com/ClickHouse/clickhouse-go/v2/conn_http_batch.go rename vendor/github.com/ClickHouse/clickhouse-go/v2/{lib/compress/compress.go => conn_http_exec.go} (61%) create mode 100644 vendor/github.com/ClickHouse/clickhouse-go/v2/conn_http_query.go rename vendor/github.com/ClickHouse/clickhouse-go/v2/{lib/cityhash102/city64.go => context_watchdog.go} (52%) delete mode 100644 vendor/github.com/ClickHouse/clickhouse-go/v2/lib/binary/decoder.go delete mode 100644 vendor/github.com/ClickHouse/clickhouse-go/v2/lib/binary/encoder.go delete mode 100644 vendor/github.com/ClickHouse/clickhouse-go/v2/lib/cityhash102/cityhash.go delete mode 100644 vendor/github.com/ClickHouse/clickhouse-go/v2/lib/cityhash102/doc.go create mode 100644 vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/array_gen.go create mode 100644 vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/column_gen_option.go delete mode 100644 vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/column_safe_gen.go delete mode 100644 vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/column_unsafe_gen.go delete mode 100644 vendor/github.com/ClickHouse/clickhouse-go/v2/lib/compress/compress_reader.go delete mode 100644 vendor/github.com/ClickHouse/clickhouse-go/v2/lib/compress/compress_writer.go create mode 100644 vendor/github.com/ClickHouse/clickhouse-go/v2/lib/driver/options.go delete mode 100644 vendor/github.com/ClickHouse/clickhouse-go/v2/lib/io/stream.go create mode 100644 vendor/github.com/ClickHouse/clickhouse-go/v2/query_parameters.go create mode 100644 vendor/github.com/ClickHouse/clickhouse-go/v2/resources/meta.go create mode 100644 vendor/github.com/ClickHouse/clickhouse-go/v2/resources/meta.yml create mode 100644 vendor/github.com/ClickHouse/clickhouse-go/v2/v1_v2_CHANGES.md create mode 100644 vendor/github.com/andybalholm/brotli/LICENSE create mode 100644 vendor/github.com/andybalholm/brotli/README.md create mode 100644 vendor/github.com/andybalholm/brotli/backward_references.go create mode 100644 vendor/github.com/andybalholm/brotli/backward_references_hq.go create mode 100644 vendor/github.com/andybalholm/brotli/bit_cost.go create mode 100644 vendor/github.com/andybalholm/brotli/bit_reader.go create mode 100644 vendor/github.com/andybalholm/brotli/bitwriter.go create mode 100644 vendor/github.com/andybalholm/brotli/block_splitter.go create mode 100644 vendor/github.com/andybalholm/brotli/block_splitter_command.go create mode 100644 vendor/github.com/andybalholm/brotli/block_splitter_distance.go create mode 100644 vendor/github.com/andybalholm/brotli/block_splitter_literal.go create mode 100644 vendor/github.com/andybalholm/brotli/brotli_bit_stream.go create mode 100644 vendor/github.com/andybalholm/brotli/cluster.go create mode 100644 vendor/github.com/andybalholm/brotli/cluster_command.go create mode 100644 vendor/github.com/andybalholm/brotli/cluster_distance.go create mode 100644 vendor/github.com/andybalholm/brotli/cluster_literal.go create mode 100644 vendor/github.com/andybalholm/brotli/command.go create mode 100644 vendor/github.com/andybalholm/brotli/compress_fragment.go create mode 100644 vendor/github.com/andybalholm/brotli/compress_fragment_two_pass.go create mode 100644 vendor/github.com/andybalholm/brotli/constants.go create mode 100644 vendor/github.com/andybalholm/brotli/context.go create mode 100644 vendor/github.com/andybalholm/brotli/decode.go create mode 100644 vendor/github.com/andybalholm/brotli/dictionary.go create mode 100644 vendor/github.com/andybalholm/brotli/dictionary_hash.go create mode 100644 vendor/github.com/andybalholm/brotli/encode.go create mode 100644 vendor/github.com/andybalholm/brotli/encoder.go create mode 100644 vendor/github.com/andybalholm/brotli/encoder_dict.go create mode 100644 vendor/github.com/andybalholm/brotli/entropy_encode.go create mode 100644 vendor/github.com/andybalholm/brotli/entropy_encode_static.go create mode 100644 vendor/github.com/andybalholm/brotli/fast_log.go create mode 100644 vendor/github.com/andybalholm/brotli/find_match_length.go create mode 100644 vendor/github.com/andybalholm/brotli/h10.go create mode 100644 vendor/github.com/andybalholm/brotli/h5.go create mode 100644 vendor/github.com/andybalholm/brotli/h6.go create mode 100644 vendor/github.com/andybalholm/brotli/hash.go create mode 100644 vendor/github.com/andybalholm/brotli/hash_composite.go create mode 100644 vendor/github.com/andybalholm/brotli/hash_forgetful_chain.go create mode 100644 vendor/github.com/andybalholm/brotli/hash_longest_match_quickly.go create mode 100644 vendor/github.com/andybalholm/brotli/hash_rolling.go create mode 100644 vendor/github.com/andybalholm/brotli/histogram.go create mode 100644 vendor/github.com/andybalholm/brotli/http.go create mode 100644 vendor/github.com/andybalholm/brotli/huffman.go create mode 100644 vendor/github.com/andybalholm/brotli/literal_cost.go create mode 100644 vendor/github.com/andybalholm/brotli/matchfinder/emitter.go create mode 100644 vendor/github.com/andybalholm/brotli/matchfinder/m0.go create mode 100644 vendor/github.com/andybalholm/brotli/matchfinder/m4.go create mode 100644 vendor/github.com/andybalholm/brotli/matchfinder/matchfinder.go create mode 100644 vendor/github.com/andybalholm/brotli/matchfinder/textencoder.go create mode 100644 vendor/github.com/andybalholm/brotli/memory.go create mode 100644 vendor/github.com/andybalholm/brotli/metablock.go create mode 100644 vendor/github.com/andybalholm/brotli/metablock_command.go create mode 100644 vendor/github.com/andybalholm/brotli/metablock_distance.go create mode 100644 vendor/github.com/andybalholm/brotli/metablock_literal.go create mode 100644 vendor/github.com/andybalholm/brotli/params.go create mode 100644 vendor/github.com/andybalholm/brotli/platform.go create mode 100644 vendor/github.com/andybalholm/brotli/prefix.go create mode 100644 vendor/github.com/andybalholm/brotli/prefix_dec.go create mode 100644 vendor/github.com/andybalholm/brotli/quality.go create mode 100644 vendor/github.com/andybalholm/brotli/reader.go create mode 100644 vendor/github.com/andybalholm/brotli/ringbuffer.go create mode 100644 vendor/github.com/andybalholm/brotli/state.go create mode 100644 vendor/github.com/andybalholm/brotli/static_dict.go create mode 100644 vendor/github.com/andybalholm/brotli/static_dict_lut.go create mode 100644 vendor/github.com/andybalholm/brotli/symbol_list.go create mode 100644 vendor/github.com/andybalholm/brotli/transform.go create mode 100644 vendor/github.com/andybalholm/brotli/utf8_util.go create mode 100644 vendor/github.com/andybalholm/brotli/util.go create mode 100644 vendor/github.com/andybalholm/brotli/write_bits.go create mode 100644 vendor/github.com/andybalholm/brotli/writer.go create mode 100644 vendor/github.com/elastic/go-sysinfo/.golangci.yml delete mode 100644 vendor/github.com/elastic/go-sysinfo/CHANGELOG.md create mode 100644 vendor/github.com/elastic/go-sysinfo/CONTRIBUTING.md create mode 100644 vendor/github.com/elastic/go-sysinfo/providers/shared/fqdn.go create mode 100644 vendor/github.com/go-faster/city/.codecov.yml create mode 100644 vendor/github.com/go-faster/city/.gitignore create mode 100644 vendor/github.com/go-faster/city/.golangci.yml create mode 100644 vendor/github.com/go-faster/city/128.go create mode 100644 vendor/github.com/go-faster/city/32.go create mode 100644 vendor/github.com/go-faster/city/64.go rename vendor/{go.uber.org/atomic/LICENSE.txt => github.com/go-faster/city/LICENSE} (87%) create mode 100644 vendor/github.com/go-faster/city/Makefile create mode 100644 vendor/github.com/go-faster/city/README.md create mode 100644 vendor/github.com/go-faster/city/ch_128.go create mode 100644 vendor/github.com/go-faster/city/ch_64.go create mode 100644 vendor/github.com/go-faster/city/doc.go create mode 100644 vendor/github.com/go-faster/city/go.coverage.sh create mode 100644 vendor/github.com/go-faster/city/go.test.sh create mode 100644 vendor/github.com/go-faster/errors/.codecov.yml create mode 100644 vendor/github.com/go-faster/errors/.editorconfig create mode 100644 vendor/github.com/go-faster/errors/.gitignore create mode 100644 vendor/github.com/go-faster/errors/.golangci.yml create mode 100644 vendor/github.com/go-faster/errors/LICENSE create mode 100644 vendor/github.com/go-faster/errors/Makefile create mode 100644 vendor/github.com/go-faster/errors/README.md create mode 100644 vendor/github.com/go-faster/errors/adaptor.go create mode 100644 vendor/github.com/go-faster/errors/doc.go create mode 100644 vendor/github.com/go-faster/errors/errors.go create mode 100644 vendor/github.com/go-faster/errors/format.go create mode 100644 vendor/github.com/go-faster/errors/frame.go create mode 100644 vendor/github.com/go-faster/errors/go.coverage.sh create mode 100644 vendor/github.com/go-faster/errors/go.test.sh create mode 100644 vendor/github.com/go-faster/errors/into.go create mode 100644 vendor/github.com/go-faster/errors/join_go120.go create mode 100644 vendor/github.com/go-faster/errors/must.go create mode 100644 vendor/github.com/go-faster/errors/noerrtrace.go create mode 100644 vendor/github.com/go-faster/errors/trace.go create mode 100644 vendor/github.com/go-faster/errors/wrap.go delete mode 100644 vendor/github.com/go-sql-driver/mysql/fuzz.go delete mode 100644 vendor/github.com/google/uuid/.travis.yml create mode 100644 vendor/github.com/google/uuid/CHANGELOG.md create mode 100644 vendor/github.com/google/uuid/version6.go create mode 100644 vendor/github.com/google/uuid/version7.go create mode 100644 vendor/github.com/klauspost/compress/.gitattributes create mode 100644 vendor/github.com/klauspost/compress/.gitignore create mode 100644 vendor/github.com/klauspost/compress/.goreleaser.yml create mode 100644 vendor/github.com/klauspost/compress/LICENSE create mode 100644 vendor/github.com/klauspost/compress/README.md create mode 100644 vendor/github.com/klauspost/compress/SECURITY.md create mode 100644 vendor/github.com/klauspost/compress/compressible.go create mode 100644 vendor/github.com/klauspost/compress/fse/README.md create mode 100644 vendor/github.com/klauspost/compress/fse/bitreader.go create mode 100644 vendor/github.com/klauspost/compress/fse/bitwriter.go create mode 100644 vendor/github.com/klauspost/compress/fse/bytereader.go create mode 100644 vendor/github.com/klauspost/compress/fse/compress.go create mode 100644 vendor/github.com/klauspost/compress/fse/decompress.go create mode 100644 vendor/github.com/klauspost/compress/fse/fse.go create mode 100644 vendor/github.com/klauspost/compress/gen.sh create mode 100644 vendor/github.com/klauspost/compress/huff0/.gitignore create mode 100644 vendor/github.com/klauspost/compress/huff0/README.md create mode 100644 vendor/github.com/klauspost/compress/huff0/bitreader.go create mode 100644 vendor/github.com/klauspost/compress/huff0/bitwriter.go create mode 100644 vendor/github.com/klauspost/compress/huff0/compress.go create mode 100644 vendor/github.com/klauspost/compress/huff0/decompress.go create mode 100644 vendor/github.com/klauspost/compress/huff0/decompress_amd64.go create mode 100644 vendor/github.com/klauspost/compress/huff0/decompress_amd64.s create mode 100644 vendor/github.com/klauspost/compress/huff0/decompress_generic.go create mode 100644 vendor/github.com/klauspost/compress/huff0/huff0.go create mode 100644 vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go create mode 100644 vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go create mode 100644 vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/LICENSE create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/decode.go create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/decode_other.go create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/encode.go create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/encode_other.go create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/snappy.go create mode 100644 vendor/github.com/klauspost/compress/s2sx.mod create mode 100644 vendor/github.com/klauspost/compress/s2sx.sum create mode 100644 vendor/github.com/klauspost/compress/zstd/README.md create mode 100644 vendor/github.com/klauspost/compress/zstd/bitreader.go create mode 100644 vendor/github.com/klauspost/compress/zstd/bitwriter.go create mode 100644 vendor/github.com/klauspost/compress/zstd/blockdec.go create mode 100644 vendor/github.com/klauspost/compress/zstd/blockenc.go create mode 100644 vendor/github.com/klauspost/compress/zstd/blocktype_string.go create mode 100644 vendor/github.com/klauspost/compress/zstd/bytebuf.go create mode 100644 vendor/github.com/klauspost/compress/zstd/bytereader.go create mode 100644 vendor/github.com/klauspost/compress/zstd/decodeheader.go create mode 100644 vendor/github.com/klauspost/compress/zstd/decoder.go create mode 100644 vendor/github.com/klauspost/compress/zstd/decoder_options.go create mode 100644 vendor/github.com/klauspost/compress/zstd/dict.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_base.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_best.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_better.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_dfast.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_fast.go create mode 100644 vendor/github.com/klauspost/compress/zstd/encoder.go create mode 100644 vendor/github.com/klauspost/compress/zstd/encoder_options.go create mode 100644 vendor/github.com/klauspost/compress/zstd/framedec.go create mode 100644 vendor/github.com/klauspost/compress/zstd/frameenc.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_encoder.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_predefined.go create mode 100644 vendor/github.com/klauspost/compress/zstd/hash.go create mode 100644 vendor/github.com/klauspost/compress/zstd/history.go create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go create mode 100644 vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go create mode 100644 vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/matchlen_generic.go create mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec.go create mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go create mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec_generic.go create mode 100644 vendor/github.com/klauspost/compress/zstd/seqenc.go create mode 100644 vendor/github.com/klauspost/compress/zstd/snappy.go create mode 100644 vendor/github.com/klauspost/compress/zstd/zip.go create mode 100644 vendor/github.com/klauspost/compress/zstd/zstd.go delete mode 100644 vendor/github.com/magiconair/properties/.travis.yml rename vendor/github.com/magiconair/properties/{LICENSE => LICENSE.md} (84%) create mode 100644 vendor/github.com/mfridman/interpolate/LICENSE.txt create mode 100644 vendor/github.com/mfridman/interpolate/README.md create mode 100644 vendor/github.com/mfridman/interpolate/env.go create mode 100644 vendor/github.com/mfridman/interpolate/interpolate.go create mode 100644 vendor/github.com/mfridman/interpolate/parser.go create mode 100644 vendor/github.com/pierrec/lz4/v4/compressing_reader.go create mode 100644 vendor/github.com/pressly/goose/v3/.goreleaser.yaml delete mode 100644 vendor/github.com/pressly/goose/v3/.goreleaser.yml create mode 100644 vendor/github.com/pressly/goose/v3/CHANGELOG.md delete mode 100644 vendor/github.com/pressly/goose/v3/Dockerfile.local create mode 100644 vendor/github.com/pressly/goose/v3/database/dialect.go create mode 100644 vendor/github.com/pressly/goose/v3/database/doc.go create mode 100644 vendor/github.com/pressly/goose/v3/database/sql_extended.go create mode 100644 vendor/github.com/pressly/goose/v3/database/store.go delete mode 100644 vendor/github.com/pressly/goose/v3/examples/sql-migrations/00001_create_users_table.sql delete mode 100644 vendor/github.com/pressly/goose/v3/examples/sql-migrations/00002_rename_root.sql delete mode 100644 vendor/github.com/pressly/goose/v3/examples/sql-migrations/00003_no_transaction.sql create mode 100644 vendor/github.com/pressly/goose/v3/globals.go create mode 100644 vendor/github.com/pressly/goose/v3/internal/dialect/dialectquery/clickhouse.go create mode 100644 vendor/github.com/pressly/goose/v3/internal/dialect/dialectquery/dialectquery.go create mode 100644 vendor/github.com/pressly/goose/v3/internal/dialect/dialectquery/mysql.go create mode 100644 vendor/github.com/pressly/goose/v3/internal/dialect/dialectquery/postgres.go create mode 100644 vendor/github.com/pressly/goose/v3/internal/dialect/dialectquery/redshift.go create mode 100644 vendor/github.com/pressly/goose/v3/internal/dialect/dialectquery/sqlite3.go create mode 100644 vendor/github.com/pressly/goose/v3/internal/dialect/dialectquery/sqlserver.go create mode 100644 vendor/github.com/pressly/goose/v3/internal/dialect/dialectquery/tidb.go create mode 100644 vendor/github.com/pressly/goose/v3/internal/dialect/dialectquery/turso.go create mode 100644 vendor/github.com/pressly/goose/v3/internal/dialect/dialectquery/vertica.go create mode 100644 vendor/github.com/pressly/goose/v3/internal/dialect/dialectquery/ydb.go create mode 100644 vendor/github.com/pressly/goose/v3/internal/dialect/dialects.go create mode 100644 vendor/github.com/pressly/goose/v3/internal/dialect/store.go create mode 100644 vendor/github.com/pressly/goose/v3/internal/gooseutil/resolve.go create mode 100644 vendor/github.com/pressly/goose/v3/internal/sqlparser/parse.go create mode 100644 vendor/github.com/pressly/goose/v3/lock/postgres.go create mode 100644 vendor/github.com/pressly/goose/v3/lock/session_locker.go create mode 100644 vendor/github.com/pressly/goose/v3/lock/session_locker_options.go create mode 100644 vendor/github.com/pressly/goose/v3/provider.go create mode 100644 vendor/github.com/pressly/goose/v3/provider_collect.go create mode 100644 vendor/github.com/pressly/goose/v3/provider_errors.go create mode 100644 vendor/github.com/pressly/goose/v3/provider_options.go create mode 100644 vendor/github.com/pressly/goose/v3/provider_run.go create mode 100644 vendor/github.com/pressly/goose/v3/provider_types.go create mode 100644 vendor/github.com/pressly/goose/v3/register.go create mode 100644 vendor/github.com/prometheus/procfs/fs_statfs_notype.go create mode 100644 vendor/github.com/prometheus/procfs/fs_statfs_type.go create mode 100644 vendor/github.com/prometheus/procfs/net_route.go create mode 100644 vendor/github.com/prometheus/procfs/net_wireless.go create mode 100644 vendor/github.com/segmentio/asm/LICENSE create mode 100644 vendor/github.com/segmentio/asm/bswap/swap64.go create mode 100644 vendor/github.com/segmentio/asm/bswap/swap64_amd64.go create mode 100644 vendor/github.com/segmentio/asm/bswap/swap64_amd64.s create mode 100644 vendor/github.com/segmentio/asm/bswap/swap64_default.go create mode 100644 vendor/github.com/segmentio/asm/cpu/arm/arm.go create mode 100644 vendor/github.com/segmentio/asm/cpu/arm64/arm64.go create mode 100644 vendor/github.com/segmentio/asm/cpu/cpu.go create mode 100644 vendor/github.com/segmentio/asm/cpu/cpuid/cpuid.go create mode 100644 vendor/github.com/segmentio/asm/cpu/x86/x86.go create mode 100644 vendor/github.com/sethvargo/go-retry/LICENSE create mode 100644 vendor/github.com/sethvargo/go-retry/README.md create mode 100644 vendor/github.com/sethvargo/go-retry/backoff.go create mode 100644 vendor/github.com/sethvargo/go-retry/backoff_constant.go create mode 100644 vendor/github.com/sethvargo/go-retry/backoff_exponential.go create mode 100644 vendor/github.com/sethvargo/go-retry/backoff_fibonacci.go create mode 100644 vendor/github.com/sethvargo/go-retry/rand.go create mode 100644 vendor/github.com/sethvargo/go-retry/retry.go delete mode 100644 vendor/github.com/shopspring/decimal/.travis.yml create mode 100644 vendor/github.com/shopspring/decimal/const.go create mode 100644 vendor/go.opentelemetry.io/otel/attribute/README.md create mode 100644 vendor/go.opentelemetry.io/otel/attribute/filter.go create mode 100644 vendor/go.opentelemetry.io/otel/codes/README.md create mode 100644 vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go create mode 100644 vendor/go.opentelemetry.io/otel/internal/gen.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/README.md create mode 100644 vendor/go.opentelemetry.io/otel/trace/embedded/README.md create mode 100644 vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go delete mode 100644 vendor/go.uber.org/atomic/.codecov.yml delete mode 100644 vendor/go.uber.org/atomic/.gitignore delete mode 100644 vendor/go.uber.org/atomic/.travis.yml delete mode 100644 vendor/go.uber.org/atomic/Makefile delete mode 100644 vendor/go.uber.org/atomic/README.md delete mode 100644 vendor/go.uber.org/atomic/atomic.go delete mode 100644 vendor/go.uber.org/atomic/glide.lock delete mode 100644 vendor/go.uber.org/atomic/glide.yaml delete mode 100644 vendor/go.uber.org/multierr/.travis.yml create mode 100644 vendor/go.uber.org/multierr/error_post_go120.go create mode 100644 vendor/go.uber.org/multierr/error_pre_go120.go delete mode 100644 vendor/go.uber.org/multierr/glide.lock delete mode 100644 vendor/go.uber.org/multierr/glide.yaml create mode 100644 vendor/go.uber.org/zap/.golangci.yml delete mode 100644 vendor/go.uber.org/zap/.travis.yml rename vendor/go.uber.org/zap/{LICENSE.txt => LICENSE} (100%) rename vendor/go.uber.org/zap/{check_license.sh => checklicense.sh} (100%) delete mode 100644 vendor/go.uber.org/zap/glide.lock create mode 100644 vendor/go.uber.org/zap/internal/level_enabler.go create mode 100644 vendor/go.uber.org/zap/internal/pool/pool.go create mode 100644 vendor/go.uber.org/zap/internal/stacktrace/stack.go delete mode 100644 vendor/go.uber.org/zap/stacktrace.go create mode 100644 vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go create mode 100644 vendor/go.uber.org/zap/zapcore/clock.go create mode 100644 vendor/go.uber.org/zap/zapcore/increase_level.go create mode 100644 vendor/go.uber.org/zap/zapcore/lazy_with.go rename vendor/go.uber.org/{atomic/string.go => zap/zapcore/reflected_encoder.go} (65%) create mode 100644 vendor/golang.org/x/sync/LICENSE create mode 100644 vendor/golang.org/x/sync/PATENTS create mode 100644 vendor/golang.org/x/sync/errgroup/errgroup.go create mode 100644 vendor/golang.org/x/sync/errgroup/go120.go create mode 100644 vendor/golang.org/x/sync/errgroup/pre_go120.go create mode 100644 vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s create mode 100644 vendor/golang.org/x/sys/cpu/byteorder.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_aix.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_arm.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_arm64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_arm64.s create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gc_x86.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_arm.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_loong64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_mips64x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_mipsx.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s create mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_arm.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_arm64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_ppc64x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_riscv64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_s390x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_s390x.s create mode 100644 vendor/golang.org/x/sys/cpu/cpu_wasm.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_x86.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_x86.s create mode 100644 vendor/golang.org/x/sys/cpu/cpu_zos.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_zos_s390x.go create mode 100644 vendor/golang.org/x/sys/cpu/endian_big.go create mode 100644 vendor/golang.org/x/sys/cpu/endian_little.go create mode 100644 vendor/golang.org/x/sys/cpu/hwcap_linux.go create mode 100644 vendor/golang.org/x/sys/cpu/parse.go create mode 100644 vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go create mode 100644 vendor/golang.org/x/sys/cpu/runtime_auxv.go create mode 100644 vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go create mode 100644 vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go create mode 100644 vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go delete mode 100644 vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go create mode 100644 vendor/golang.org/x/sys/unix/bpxsvc_zos.go create mode 100644 vendor/golang.org/x/sys/unix/bpxsvc_zos.s delete mode 100644 vendor/golang.org/x/sys/unix/epoll_zos.go delete mode 100644 vendor/golang.org/x/sys/unix/fstatfs_zos.go create mode 100644 vendor/golang.org/x/sys/unix/ioctl_signed.go rename vendor/golang.org/x/sys/unix/{ioctl.go => ioctl_unsigned.go} (76%) create mode 100644 vendor/golang.org/x/sys/unix/mmap_nomremap.go create mode 100644 vendor/golang.org/x/sys/unix/mremap.go create mode 100644 vendor/golang.org/x/sys/unix/sockcmsg_zos.go create mode 100644 vendor/golang.org/x/sys/unix/symaddr_zos_s390x.s create mode 100644 vendor/golang.org/x/sys/unix/zsymaddr_zos_s390x.s delete mode 100644 vendor/golang.org/x/sys/windows/empty.s create mode 100644 vendor/golang.org/x/text/unicode/norm/tables15.0.0.go create mode 100644 vendor/gopkg.in/yaml.v3/LICENSE create mode 100644 vendor/gopkg.in/yaml.v3/NOTICE create mode 100644 vendor/gopkg.in/yaml.v3/README.md create mode 100644 vendor/gopkg.in/yaml.v3/apic.go create mode 100644 vendor/gopkg.in/yaml.v3/decode.go create mode 100644 vendor/gopkg.in/yaml.v3/emitterc.go create mode 100644 vendor/gopkg.in/yaml.v3/encode.go create mode 100644 vendor/gopkg.in/yaml.v3/parserc.go create mode 100644 vendor/gopkg.in/yaml.v3/readerc.go create mode 100644 vendor/gopkg.in/yaml.v3/resolve.go create mode 100644 vendor/gopkg.in/yaml.v3/scannerc.go create mode 100644 vendor/gopkg.in/yaml.v3/sorter.go create mode 100644 vendor/gopkg.in/yaml.v3/writerc.go create mode 100644 vendor/gopkg.in/yaml.v3/yaml.go create mode 100644 vendor/gopkg.in/yaml.v3/yamlh.go create mode 100644 vendor/gopkg.in/yaml.v3/yamlprivateh.go diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 127cfdd..eeb204f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -20,7 +20,7 @@ jobs: - name: Install Go uses: actions/setup-go@v3 with: - go-version: 1.19 + go-version: 1.23 check-latest: true cache: true diff --git a/.github/workflows/dist.yml b/.github/workflows/dist.yml index 2947c29..96a803b 100644 --- a/.github/workflows/dist.yml +++ b/.github/workflows/dist.yml @@ -16,7 +16,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v3 with: - go-version: 1.19 + go-version: 1.23 - name: Run GoReleaser uses: goreleaser/goreleaser-action@v4 diff --git a/Dockerfile b/Dockerfile index e3ba5c7..eb5989c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.21.4-alpine3.18 AS build +FROM golang:1.23.0-alpine3.20 AS build RUN apk add make tzdata COPY . /go/src/miga WORKDIR /go/src/miga diff --git a/README.md b/README.md index e43a799..e50985a 100644 --- a/README.md +++ b/README.md @@ -17,7 +17,7 @@ Aimed to add extra features and hide some limitations of existing golang migrati PackageName | Version | Postgres | MySQL | Clickhouse | Vertica ----------- | ------- | ------------------- | -------- | ---------- | ---- -[goose](https://github.com/pressly/goose) | 3.9.1 ([patch](https://github.com/chapsuk/goose/commit/d8dae35e216b5b70d3db4e986884f715b5a280cc) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: +[goose](https://github.com/pressly/goose) | 3.21.1 ([patch](https://github.com/chapsuk/goose/commit/b77972a357125bf35a19ed10161c3b9070fc7993)) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: [migrate](https://github.com/golang-migrate/migrate) | 4.2.5 | :heavy_check_mark: | :heavy_check_mark: | | [impg](https://github.com/im-kulikov/migrate) | 0.1 | :heavy_check_mark: | | | diff --git a/go.mod b/go.mod index 449258b..0c6fde0 100644 --- a/go.mod +++ b/go.mod @@ -1,31 +1,36 @@ module miga -go 1.19 +go 1.23 -replace github.com/pressly/goose/v3 v3.9.0 => github.com/chapsuk/goose/v3 v3.0.0-20230127172535-d8dae35e216b +replace github.com/pressly/goose/v3 v3.21.1 => github.com/chapsuk/goose/v3 v3.0.0-20240820170337-b77972a35712 require ( - github.com/ClickHouse/clickhouse-go/v2 v2.2.0 + github.com/ClickHouse/clickhouse-go/v2 v2.27.1 github.com/go-pg/pg v8.0.3+incompatible - github.com/go-sql-driver/mysql v1.7.0 + github.com/go-sql-driver/mysql v1.8.1 github.com/golang-migrate/migrate/v4 v4.2.5 github.com/im-kulikov/migrate v0.1.0 github.com/lib/pq v1.10.6 github.com/pkg/errors v0.9.1 - github.com/pressly/goose/v3 v3.9.0 + github.com/pressly/goose/v3 v3.21.1 github.com/smartystreets/goconvey v1.6.4 github.com/spf13/cobra v1.6.1 github.com/spf13/viper v1.3.2 - github.com/vertica/vertica-sql-go v1.3.1 - go.uber.org/zap v1.9.1 + github.com/vertica/vertica-sql-go v1.3.3 + go.uber.org/zap v1.27.0 ) require ( + filippo.io/edwards25519 v1.1.0 // indirect github.com/BurntSushi/toml v0.3.1 // indirect - github.com/elastic/go-sysinfo v1.9.0 // indirect + github.com/ClickHouse/ch-go v0.61.5 // indirect + github.com/andybalholm/brotli v1.1.0 // indirect + github.com/elastic/go-sysinfo v1.11.2 // indirect github.com/elastic/go-windows v1.0.1 // indirect github.com/fsnotify/fsnotify v1.4.9 // indirect - github.com/google/uuid v1.3.0 // indirect + github.com/go-faster/city v1.0.1 // indirect + github.com/go-faster/errors v0.7.1 // indirect + github.com/google/uuid v1.6.0 // indirect github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 // indirect github.com/hashicorp/errwrap v1.0.0 // indirect github.com/hashicorp/go-multierror v1.0.0 // indirect @@ -34,29 +39,33 @@ require ( github.com/jinzhu/inflection v0.0.0-20180308033659-04140366298a // indirect github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 // indirect github.com/jtolds/gls v4.20.0+incompatible // indirect - github.com/magiconair/properties v1.8.0 // indirect + github.com/klauspost/compress v1.17.7 // indirect + github.com/magiconair/properties v1.8.7 // indirect + github.com/mfridman/interpolate v0.0.2 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect - github.com/morikuni/aec v1.0.0 // indirect github.com/onsi/ginkgo v1.15.0 // indirect github.com/onsi/gomega v1.10.5 // indirect - github.com/paulmach/orb v0.7.1 // indirect + github.com/paulmach/orb v0.11.1 // indirect github.com/pelletier/go-toml v1.2.0 // indirect - github.com/pierrec/lz4/v4 v4.1.15 // indirect - github.com/prometheus/procfs v0.9.0 // indirect - github.com/shopspring/decimal v1.3.1 // indirect + github.com/pierrec/lz4/v4 v4.1.21 // indirect + github.com/prometheus/procfs v0.12.0 // indirect + github.com/segmentio/asm v1.2.0 // indirect + github.com/sethvargo/go-retry v0.3.0 // indirect + github.com/shopspring/decimal v1.4.0 // indirect github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d // indirect github.com/spf13/afero v1.1.2 // indirect github.com/spf13/cast v1.3.0 // indirect github.com/spf13/jwalterweatherman v1.0.0 // indirect github.com/spf13/pflag v1.0.5 // indirect - go.opentelemetry.io/otel v1.9.0 // indirect - go.opentelemetry.io/otel/trace v1.9.0 // indirect - go.uber.org/atomic v1.3.2 // indirect - go.uber.org/multierr v1.1.0 // indirect - golang.org/x/crypto v0.5.0 // indirect - golang.org/x/sys v0.4.0 // indirect - golang.org/x/text v0.6.0 // indirect + go.opentelemetry.io/otel v1.26.0 // indirect + go.opentelemetry.io/otel/trace v1.26.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + golang.org/x/crypto v0.25.0 // indirect + golang.org/x/sync v0.8.0 // indirect + golang.org/x/sys v0.22.0 // indirect + golang.org/x/text v0.16.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect howett.net/plist v1.0.0 // indirect mellium.im/sasl v0.3.1 // indirect ) diff --git a/go.sum b/go.sum index b43c9bc..aedff99 100644 --- a/go.sum +++ b/go.sum @@ -2,29 +2,33 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT cloud.google.com/go v0.28.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= contrib.go.opencensus.io/exporter/stackdriver v0.6.0/go.mod h1:QeFzMJDAw8TXt5+aRaSuE8l5BwaMIOIlaVkBOPRuMuw= +filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= +filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= git.apache.org/thrift.git v0.0.0-20180924222215-a9235805469b/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/ClickHouse/clickhouse-go v1.5.4/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhHgv5+JMS9NSr2smCJI= -github.com/ClickHouse/clickhouse-go/v2 v2.2.0 h1:dj00TDKY+xwuTJdbpspCSmTLFyWzRJerTHwaBxut1C0= -github.com/ClickHouse/clickhouse-go/v2 v2.2.0/go.mod h1:8f2XZUi7XoeU+uPIytSi1cvx8fmJxi7vIgqpvYTF1+o= +github.com/ClickHouse/ch-go v0.61.5 h1:zwR8QbYI0tsMiEcze/uIMK+Tz1D3XZXLdNrlaOpeEI4= +github.com/ClickHouse/ch-go v0.61.5/go.mod h1:s1LJW/F/LcFs5HJnuogFMta50kKDO0lf9zzfrbl0RQg= +github.com/ClickHouse/clickhouse-go/v2 v2.27.1 h1:cSUewKnQ2XWvCNpCV0WRAQGvShElJ1Qyb6nDq8GId/I= +github.com/ClickHouse/clickhouse-go/v2 v2.27.1/go.mod h1:XvcaX7ai9T9si83rZ0cB3y2upq9AYMwdj16Trqm+sPg= github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= -github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= -github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= +github.com/andybalholm/brotli v1.1.0 h1:eLKJA0d02Lf0mVpIDgYnqXcUn0GqVmEFny3VuID1U3M= +github.com/andybalholm/brotli v1.1.0/go.mod h1:sms7XGricyQI9K10gOSf56VKKWS4oLer58Q+mhRPtnY= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/aws/aws-sdk-go v1.15.54/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= -github.com/bkaradzic/go-lz4 v1.0.0/go.mod h1:0YdlkowM3VswSROI7qDxhRvJ3sLhlFrRRwjwegp5jy4= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= -github.com/chapsuk/goose/v3 v3.0.0-20230127172535-d8dae35e216b h1:2PabApnk2+DjDuJr2AD4LsWFiD/OgkFUmnMWfCDJT6k= -github.com/chapsuk/goose/v3 v3.0.0-20230127172535-d8dae35e216b/go.mod h1:MM80AHPkHlmtRxQxxxp2ioe+Yj0suUYOY4JDjQ9Y6Ko= +github.com/chapsuk/goose/v3 v3.0.0-20240820170337-b77972a35712 h1:jnkrhnckk8OSdti3ofRzr1rKdloK4ELXa8xerlknMjs= +github.com/chapsuk/goose/v3 v3.0.0-20240820170337-b77972a35712/go.mod h1:3cbFEcrkpSdwbzqv0g81mfughlLVY5dlvrIMyi8NBk8= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58/go.mod h1:EOBUe0h4xcZ5GoxqC5SDxFQ8gwyZPKQoEzownBlhI80= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= @@ -46,40 +50,49 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dhui/dktest v0.3.0 h1:kwX5a7EkLcjo7VpsPQSYJcKGbXBXdjI9FGjuUj1jn6I= github.com/dhui/dktest v0.3.0/go.mod h1:cyzIUfGsBEbZ6BT7tnXqAShHSXCZhSNmFl70sZ7c1yc= -github.com/docker/distribution v2.7.0+incompatible h1:neUDAlf3wX6Ml4HdqTrbcOHXtfRN0TFIwt6YFL7N9RU= +github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/docker/distribution v2.7.0+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v0.7.3-0.20190103212154-2b7e084dc98b/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v0.7.3-0.20190108045446-77df18c24acf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v20.10.17+incompatible h1:JYCuMrWaVNophQTOrMMoSwudOVEfcegoZZrleKc1xwE= -github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/docker v27.0.3+incompatible h1:aBGI9TeQ4MPlhquTQKq9XbK79rKFVwXNUAYz9aXyEBE= +github.com/docker/docker v27.0.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= +github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/elastic/go-sysinfo v1.8.1/go.mod h1:JfllUnzoQV/JRYymbH3dO1yggI3mV2oTKSXsDHM+uIM= -github.com/elastic/go-sysinfo v1.9.0 h1:usICqY/Nw4Mpn9f4LdtpFrKxXroJDe81GaxxUlCckIo= -github.com/elastic/go-sysinfo v1.9.0/go.mod h1:eBD1wEGVaRnRLGecc9iG1z8eOv5HnEdz9+nWd8UAxcE= +github.com/elastic/go-sysinfo v1.11.2 h1:mcm4OSYVMyws6+n2HIVMGkln5HOpo5Ie1ZmbbNn0jg4= +github.com/elastic/go-sysinfo v1.11.2/go.mod h1:GKqR8bbMK/1ITnez9NIsIfXQr25aLhRJa7AfT8HpBFQ= github.com/elastic/go-windows v1.0.0/go.mod h1:TsU0Nrp7/y3+VwE82FoZF8gC/XFg/Elz6CcloAxnPgU= github.com/elastic/go-windows v1.0.1 h1:AlYZOldA+UJ0/2nBuqWdo90GFCgG9xuyw9SYzGUtJm0= github.com/elastic/go-windows v1.0.1/go.mod h1:FoVvqWSun28vaDQPbj2Elfc0JahhPB7WQEGa3c814Ss= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsouza/fake-gcs-server v1.3.0/go.mod h1:Lq+43m2znsXfDKHnQMfdA0HpYYAEJsfizsbpk5k3TLo= +github.com/go-faster/city v1.0.1 h1:4WAxSZ3V2Ws4QRDrscLEDcibJY8uf41H6AhXDrNDcGw= +github.com/go-faster/city v1.0.1/go.mod h1:jKcUJId49qdW3L1qKHH/3wPeUstCVpVSXTM6vO3VcTw= +github.com/go-faster/errors v0.7.1 h1:MkJTnDoEdi9pDabt1dpWf7AA8/BaSYZqibYyhZ20AYg= +github.com/go-faster/errors v0.7.1/go.mod h1:5ySTjWFiphBs07IKuiL69nxdfd5+fzh1u7FPGZP2quo= github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-ini/ini v1.39.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= -github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= -github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-pg/pg v8.0.3+incompatible h1:h+wfgh3nBdyTcYPQtK7GGnAFhVUGH33F9SEcMv5wbzI= github.com/go-pg/pg v8.0.3+incompatible/go.mod h1:a2oXow+aFOrvwcKs3eIA0lNFmMilrxK2sOkB5NWe0vA= -github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc= -github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= +github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y= +github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gocql/gocql v0.0.0-20181124151448-70385f88b28b/go.mod h1:4Fw1eo5iaEhDUs8XyuhSVCVy52Jq3L+/3GJgYkwc+/0= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -100,31 +113,33 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/gopherjs/gopherjs v0.0.0-20181004151105-1babbf986f6f/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/handlers v1.4.2/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= @@ -139,38 +154,50 @@ github.com/jinzhu/inflection v0.0.0-20180308033659-04140366298a h1:eeaG9XMUvRBYX github.com/jinzhu/inflection v0.0.0-20180308033659-04140366298a/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 h1:rp+c0RAYOWj8l6qbCUTSiRLG/iKnW3K3/QfPPuSsBt4= github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak= github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg= +github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kshvakov/clickhouse v1.3.4/go.mod h1:DMzX7FxRymoNkVgizH0DWAL8Cur7wHLgx3MUnGwJqpE= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.10.6 h1:jbk+ZieJ0D7EVGJYpL9QTz7/YW6UHbmdnZWYyK5cdBs= github.com/lib/pq v1.10.6/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mfridman/interpolate v0.0.2 h1:pnuTK7MQIxxFz1Gr+rjSIx9u7qVjf5VOoM/u6BbAxPY= +github.com/mfridman/interpolate v0.0.2/go.mod h1:p+7uk6oE07mpE/Ik1b8EckO0O4ZXiGAfshKBWLUM9Xg= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mkevac/debugcharts v0.0.0-20191222103121-ae1c48aa8615/go.mod h1:Ad7oeElCZqA1Ufj0U9/liOF4BtVepxRcTvr2ey7zTvM= -github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae h1:O4SWKdcHVCvYqyDV+9CJA1fcDN2L11Bule0iFy3YlAI= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/mongodb/mongo-go-driver v0.1.0/go.mod h1:NK/HWDIIZkaYsnYa0hmtP443T5ELr0KDecmIioVuuyU= +github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4= +github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -183,17 +210,18 @@ github.com/onsi/gomega v1.10.5 h1:7n6FEkpFmfCoo2t+YYqXH0evK+a9ICQz0xcAy9dYcaQ= github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= +github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= +github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= -github.com/paulmach/orb v0.7.1 h1:Zha++Z5OX/l168sqHK3k4z18LDvr+YAO/VjK0ReQ9rU= -github.com/paulmach/orb v0.7.1/go.mod h1:FWRlTgl88VI1RBx/MkrwWDRhQ96ctqMCh8boXhmqB/A= +github.com/paulmach/orb v0.11.1 h1:3koVegMC4X/WeiXYz9iswopaTwMem53NzTJuTF20JzU= +github.com/paulmach/orb v0.11.1/go.mod h1:5mULz1xQfs3bmQm63QEJA6lNGujuRafwA5S/EnuLaLU= github.com/paulmach/protoscan v0.2.1/go.mod h1:SpcSwydNLrxUGSDvXvO0P7g7AuhJ7lcKfDlhJCDw2gY= github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pierrec/lz4/v4 v4.1.15 h1:MO0/ucJhngq7299dKLwIMtgTfbkoSPF6AoMYDd8Q4q0= -github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= +github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -206,20 +234,23 @@ github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7q github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20180920065004-418d78d0b9a7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190425082905-87a4384529e0/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= -github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= -github.com/remyoudompheng/bigfft v0.0.0-20220927061507-ef77025ab5aa h1:tEkEyxYeZ43TR55QU/hsIt9aRGBxbgGuz9CGykjvogY= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/shirou/gopsutil v2.19.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= -github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= -github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= +github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys= +github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= +github.com/sethvargo/go-retry v0.3.0 h1:EEt31A35QhrcRZtrYFDTBg91cqZVnFL2navjDrah2SE= +github.com/sethvargo/go-retry v0.3.0/go.mod h1:mNX17F0C/HguQMyMyJxcnU471gOZGxCLyYaFyAZraas= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= -github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= -github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= +github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.3.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s= @@ -240,51 +271,54 @@ github.com/spf13/viper v1.3.2 h1:VUFqw5KcqRf7i70GOzW7N+Q7+gxVBkSSqiXB12+JQ4M= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.5/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk= -github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/vertica/vertica-sql-go v1.3.1 h1:qjkJzkFmLG+z2koRC6inT+yFr23TyBkNXUP4vf92rSQ= -github.com/vertica/vertica-sql-go v1.3.1/go.mod h1:jnn2GFuv+O2Jcjktb7zyc4Utlbu9YVqpHH/lx63+1M4= +github.com/vertica/vertica-sql-go v1.3.3 h1:fL+FKEAEy5ONmsvya2WH5T8bhkvY27y/Ik3ReR2T+Qw= +github.com/vertica/vertica-sql-go v1.3.3/go.mod h1:jnn2GFuv+O2Jcjktb7zyc4Utlbu9YVqpHH/lx63+1M4= +github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= +github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= +github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +go.mongodb.org/mongo-driver v1.11.4/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g= go.opencensus.io v0.17.0/go.mod h1:mp1VrMQxhlqqDpKvH4UcQUa4YwlzNmymAjPrDdfxNpI= -go.opentelemetry.io/otel v1.7.0/go.mod h1:5BdUoMIz5WEs0vt0CUEMtSSaTSHBBVwrhnz7+nrD5xk= -go.opentelemetry.io/otel v1.9.0 h1:8WZNQFIB2a71LnANS9JeyidJKKGOOremcUtb/OtHISw= -go.opentelemetry.io/otel v1.9.0/go.mod h1:np4EoPGzoPs3O67xUVNoPPcmSvsfOxNlNA4F4AC+0Eo= -go.opentelemetry.io/otel/trace v1.7.0/go.mod h1:fzLSB9nqR2eXzxPXb2JW9IKE+ScyXA48yyE4TNvoHqU= -go.opentelemetry.io/otel/trace v1.9.0 h1:oZaCNJUjWcg60VXWee8lJKlqhPbXAPB51URuR47pQYc= -go.opentelemetry.io/otel/trace v1.9.0/go.mod h1:2737Q0MuG8q1uILYm2YYVkAyLtOofiTNGg6VODnOiPo= -go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/zap v1.9.1 h1:XCJQEf3W6eZaVwhRBof6ImoYGJSITeKWsyeh3HFu/5o= -go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= +go.opentelemetry.io/otel v1.26.0 h1:LQwgL5s/1W7YiiRwxf03QGnWLb2HW4pLiAhaA5cZXBs= +go.opentelemetry.io/otel v1.26.0/go.mod h1:UmLkJHUAidDval2EICqBMbnAd0/m2vmpf/dAM+fvFs4= +go.opentelemetry.io/otel/metric v1.26.0 h1:7S39CLuY5Jgg9CrnA9HHiEjGMF/X2VHvoXGgSllRz30= +go.opentelemetry.io/otel/metric v1.26.0/go.mod h1:SY+rHOI4cEawI9a7N1A4nIg/nTQXe1ccCNWYOJUrpX4= +go.opentelemetry.io/otel/trace v1.26.0 h1:1ieeAUb4y0TE26jUFrCIXKpTuVK7uJGN9/Z/2LP5sQA= +go.opentelemetry.io/otel/trace v1.26.0/go.mod h1:4iDxvGDQuUkHve82hJJ8UqrwswHYsZuWCBllGV2U2y0= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190103213133-ff983b9c42bc/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE= -golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU= +golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= +golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.7.0 h1:LapD9S96VoQRhi/GrNTqeBJFrUjs5UHCAtTlgwA5oZA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -299,7 +333,9 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= +golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -307,6 +343,9 @@ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -318,22 +357,24 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191220220014-0732a990476f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220429233432-b5fbb4746d32/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18= -golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= +golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k= -golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -344,7 +385,6 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.5.0 h1:+bSpV5HIeWkuvgaMfI3UmKRThoTA5ODJTUd8T17NO+4= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -372,8 +412,9 @@ google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.39.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= @@ -395,15 +436,19 @@ honnef.co/go/tools v0.0.0-20180920025451-e3ad64cb4ed3/go.mod h1:rf3lG4BRIbNafJWh howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= howett.net/plist v1.0.0 h1:7CrbWYbPPO/PyNy38b2EB/+gYbjCe2DXBxgtOOZbSQM= howett.net/plist v1.0.0/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g= -lukechampine.com/uint128 v1.2.0 h1:mBi/5l91vocEN8otkC5bDLhi2KdCticRiwbdB0O+rjI= mellium.im/sasl v0.3.1 h1:wE0LW6g7U83vhvxjC1IY8DnXM+EU095yeo8XClvCdfo= mellium.im/sasl v0.3.1/go.mod h1:xm59PUYpZHhgQ9ZqoJ5QaCqzWMi8IeS49dhp6plPCzw= -modernc.org/cc/v3 v3.40.0 h1:P3g79IUS/93SYhtoeaHW+kRCIrYaxJ27MFPv+7kaTOw= -modernc.org/ccgo/v3 v3.16.13 h1:Mkgdzl46i5F/CNR/Kj80Ri59hC8TKAhZrYSaqvkwzUw= -modernc.org/libc v1.22.2 h1:4U7v51GyhlWqQmwCHj28Rdq2Yzwk55ovjFrdPjs8Hb0= -modernc.org/mathutil v1.5.0 h1:rV0Ko/6SfM+8G+yKiyI830l3Wuz1zRutdslNoQ0kfiQ= -modernc.org/memory v1.5.0 h1:N+/8c5rE6EqugZwHii4IFsaJ7MUhoWX07J5tC/iI5Ds= -modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4= -modernc.org/sqlite v1.20.2 h1:9AaVzJH1Yf0u9iOZRjjuvqxLoGqybqVFbAUC5rvi9u8= -modernc.org/strutil v1.1.3 h1:fNMm+oJklMGYfU9Ylcywl0CO5O6nTfaowNsh2wpPjzY= +modernc.org/gc/v3 v3.0.0-20240107210532-573471604cb6 h1:5D53IMaUuA5InSeMu9eJtlQXS2NxAhyWQvkKEgXZhHI= +modernc.org/gc/v3 v3.0.0-20240107210532-573471604cb6/go.mod h1:Qz0X07sNOR1jWYCrJMEnbW/X55x206Q7Vt4mz6/wHp4= +modernc.org/libc v1.55.3 h1:AzcW1mhlPNrRtjS5sS+eW2ISCgSOLLNyFzRh/V3Qj/U= +modernc.org/libc v1.55.3/go.mod h1:qFXepLhz+JjFThQ4kzwzOjA/y/artDeg+pcYnY+Q83w= +modernc.org/mathutil v1.6.0 h1:fRe9+AmYlaej+64JsEEhoWuAYBkOtQiMEU7n/XgfYi4= +modernc.org/mathutil v1.6.0/go.mod h1:Ui5Q9q1TR2gFm0AQRqQUaBWFLAhQpCwNcuhBOSedWPo= +modernc.org/memory v1.8.0 h1:IqGTL6eFMaDZZhEWwcREgeMXYwmW83LYW8cROZYkg+E= +modernc.org/memory v1.8.0/go.mod h1:XPZ936zp5OMKGWPqbD3JShgd/ZoQ7899TUuQqxY+peU= +modernc.org/sqlite v1.32.0 h1:6BM4uGza7bWypsw4fdLRsLxut6bHe4c58VeqjRgST8s= +modernc.org/sqlite v1.32.0/go.mod h1:UqoylwmTb9F+IqXERT8bW9zzOWN8qwAIcLdzeBZs4hA= +modernc.org/strutil v1.2.0 h1:agBi9dp1I+eOnxXeiZawM8F4LawKv4NzGWSaLfyeNZA= +modernc.org/strutil v1.2.0/go.mod h1:/mdcBmfOibveCTBxUl5B5l6W+TTH1FXPLHZE6bTosX0= modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y= +modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= diff --git a/vendor/filippo.io/edwards25519/LICENSE b/vendor/filippo.io/edwards25519/LICENSE new file mode 100644 index 0000000..6a66aea --- /dev/null +++ b/vendor/filippo.io/edwards25519/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/filippo.io/edwards25519/README.md b/vendor/filippo.io/edwards25519/README.md new file mode 100644 index 0000000..24e2457 --- /dev/null +++ b/vendor/filippo.io/edwards25519/README.md @@ -0,0 +1,14 @@ +# filippo.io/edwards25519 + +``` +import "filippo.io/edwards25519" +``` + +This library implements the edwards25519 elliptic curve, exposing the necessary APIs to build a wide array of higher-level primitives. +Read the docs at [pkg.go.dev/filippo.io/edwards25519](https://pkg.go.dev/filippo.io/edwards25519). + +The code is originally derived from Adam Langley's internal implementation in the Go standard library, and includes George Tankersley's [performance improvements](https://golang.org/cl/71950). It was then further developed by Henry de Valence for use in ristretto255, and was finally [merged back into the Go standard library](https://golang.org/cl/276272) as of Go 1.17. It now tracks the upstream codebase and extends it with additional functionality. + +Most users don't need this package, and should instead use `crypto/ed25519` for signatures, `golang.org/x/crypto/curve25519` for Diffie-Hellman, or `github.com/gtank/ristretto255` for prime order group logic. However, for anyone currently using a fork of `crypto/internal/edwards25519`/`crypto/ed25519/internal/edwards25519` or `github.com/agl/edwards25519`, this package should be a safer, faster, and more powerful alternative. + +Since this package is meant to curb proliferation of edwards25519 implementations in the Go ecosystem, it welcomes requests for new APIs or reviewable performance improvements. diff --git a/vendor/filippo.io/edwards25519/doc.go b/vendor/filippo.io/edwards25519/doc.go new file mode 100644 index 0000000..ab6aaeb --- /dev/null +++ b/vendor/filippo.io/edwards25519/doc.go @@ -0,0 +1,20 @@ +// Copyright (c) 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package edwards25519 implements group logic for the twisted Edwards curve +// +// -x^2 + y^2 = 1 + -(121665/121666)*x^2*y^2 +// +// This is better known as the Edwards curve equivalent to Curve25519, and is +// the curve used by the Ed25519 signature scheme. +// +// Most users don't need this package, and should instead use crypto/ed25519 for +// signatures, golang.org/x/crypto/curve25519 for Diffie-Hellman, or +// github.com/gtank/ristretto255 for prime order group logic. +// +// However, developers who do need to interact with low-level edwards25519 +// operations can use this package, which is an extended version of +// crypto/internal/edwards25519 from the standard library repackaged as +// an importable module. +package edwards25519 diff --git a/vendor/filippo.io/edwards25519/edwards25519.go b/vendor/filippo.io/edwards25519/edwards25519.go new file mode 100644 index 0000000..a744da2 --- /dev/null +++ b/vendor/filippo.io/edwards25519/edwards25519.go @@ -0,0 +1,427 @@ +// Copyright (c) 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package edwards25519 + +import ( + "errors" + + "filippo.io/edwards25519/field" +) + +// Point types. + +type projP1xP1 struct { + X, Y, Z, T field.Element +} + +type projP2 struct { + X, Y, Z field.Element +} + +// Point represents a point on the edwards25519 curve. +// +// This type works similarly to math/big.Int, and all arguments and receivers +// are allowed to alias. +// +// The zero value is NOT valid, and it may be used only as a receiver. +type Point struct { + // Make the type not comparable (i.e. used with == or as a map key), as + // equivalent points can be represented by different Go values. + _ incomparable + + // The point is internally represented in extended coordinates (X, Y, Z, T) + // where x = X/Z, y = Y/Z, and xy = T/Z per https://eprint.iacr.org/2008/522. + x, y, z, t field.Element +} + +type incomparable [0]func() + +func checkInitialized(points ...*Point) { + for _, p := range points { + if p.x == (field.Element{}) && p.y == (field.Element{}) { + panic("edwards25519: use of uninitialized Point") + } + } +} + +type projCached struct { + YplusX, YminusX, Z, T2d field.Element +} + +type affineCached struct { + YplusX, YminusX, T2d field.Element +} + +// Constructors. + +func (v *projP2) Zero() *projP2 { + v.X.Zero() + v.Y.One() + v.Z.One() + return v +} + +// identity is the point at infinity. +var identity, _ = new(Point).SetBytes([]byte{ + 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}) + +// NewIdentityPoint returns a new Point set to the identity. +func NewIdentityPoint() *Point { + return new(Point).Set(identity) +} + +// generator is the canonical curve basepoint. See TestGenerator for the +// correspondence of this encoding with the values in RFC 8032. +var generator, _ = new(Point).SetBytes([]byte{ + 0x58, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, + 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, + 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, + 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66}) + +// NewGeneratorPoint returns a new Point set to the canonical generator. +func NewGeneratorPoint() *Point { + return new(Point).Set(generator) +} + +func (v *projCached) Zero() *projCached { + v.YplusX.One() + v.YminusX.One() + v.Z.One() + v.T2d.Zero() + return v +} + +func (v *affineCached) Zero() *affineCached { + v.YplusX.One() + v.YminusX.One() + v.T2d.Zero() + return v +} + +// Assignments. + +// Set sets v = u, and returns v. +func (v *Point) Set(u *Point) *Point { + *v = *u + return v +} + +// Encoding. + +// Bytes returns the canonical 32-byte encoding of v, according to RFC 8032, +// Section 5.1.2. +func (v *Point) Bytes() []byte { + // This function is outlined to make the allocations inline in the caller + // rather than happen on the heap. + var buf [32]byte + return v.bytes(&buf) +} + +func (v *Point) bytes(buf *[32]byte) []byte { + checkInitialized(v) + + var zInv, x, y field.Element + zInv.Invert(&v.z) // zInv = 1 / Z + x.Multiply(&v.x, &zInv) // x = X / Z + y.Multiply(&v.y, &zInv) // y = Y / Z + + out := copyFieldElement(buf, &y) + out[31] |= byte(x.IsNegative() << 7) + return out +} + +var feOne = new(field.Element).One() + +// SetBytes sets v = x, where x is a 32-byte encoding of v. If x does not +// represent a valid point on the curve, SetBytes returns nil and an error and +// the receiver is unchanged. Otherwise, SetBytes returns v. +// +// Note that SetBytes accepts all non-canonical encodings of valid points. +// That is, it follows decoding rules that match most implementations in +// the ecosystem rather than RFC 8032. +func (v *Point) SetBytes(x []byte) (*Point, error) { + // Specifically, the non-canonical encodings that are accepted are + // 1) the ones where the field element is not reduced (see the + // (*field.Element).SetBytes docs) and + // 2) the ones where the x-coordinate is zero and the sign bit is set. + // + // Read more at https://hdevalence.ca/blog/2020-10-04-its-25519am, + // specifically the "Canonical A, R" section. + + y, err := new(field.Element).SetBytes(x) + if err != nil { + return nil, errors.New("edwards25519: invalid point encoding length") + } + + // -x² + y² = 1 + dx²y² + // x² + dx²y² = x²(dy² + 1) = y² - 1 + // x² = (y² - 1) / (dy² + 1) + + // u = y² - 1 + y2 := new(field.Element).Square(y) + u := new(field.Element).Subtract(y2, feOne) + + // v = dy² + 1 + vv := new(field.Element).Multiply(y2, d) + vv = vv.Add(vv, feOne) + + // x = +√(u/v) + xx, wasSquare := new(field.Element).SqrtRatio(u, vv) + if wasSquare == 0 { + return nil, errors.New("edwards25519: invalid point encoding") + } + + // Select the negative square root if the sign bit is set. + xxNeg := new(field.Element).Negate(xx) + xx = xx.Select(xxNeg, xx, int(x[31]>>7)) + + v.x.Set(xx) + v.y.Set(y) + v.z.One() + v.t.Multiply(xx, y) // xy = T / Z + + return v, nil +} + +func copyFieldElement(buf *[32]byte, v *field.Element) []byte { + copy(buf[:], v.Bytes()) + return buf[:] +} + +// Conversions. + +func (v *projP2) FromP1xP1(p *projP1xP1) *projP2 { + v.X.Multiply(&p.X, &p.T) + v.Y.Multiply(&p.Y, &p.Z) + v.Z.Multiply(&p.Z, &p.T) + return v +} + +func (v *projP2) FromP3(p *Point) *projP2 { + v.X.Set(&p.x) + v.Y.Set(&p.y) + v.Z.Set(&p.z) + return v +} + +func (v *Point) fromP1xP1(p *projP1xP1) *Point { + v.x.Multiply(&p.X, &p.T) + v.y.Multiply(&p.Y, &p.Z) + v.z.Multiply(&p.Z, &p.T) + v.t.Multiply(&p.X, &p.Y) + return v +} + +func (v *Point) fromP2(p *projP2) *Point { + v.x.Multiply(&p.X, &p.Z) + v.y.Multiply(&p.Y, &p.Z) + v.z.Square(&p.Z) + v.t.Multiply(&p.X, &p.Y) + return v +} + +// d is a constant in the curve equation. +var d, _ = new(field.Element).SetBytes([]byte{ + 0xa3, 0x78, 0x59, 0x13, 0xca, 0x4d, 0xeb, 0x75, + 0xab, 0xd8, 0x41, 0x41, 0x4d, 0x0a, 0x70, 0x00, + 0x98, 0xe8, 0x79, 0x77, 0x79, 0x40, 0xc7, 0x8c, + 0x73, 0xfe, 0x6f, 0x2b, 0xee, 0x6c, 0x03, 0x52}) +var d2 = new(field.Element).Add(d, d) + +func (v *projCached) FromP3(p *Point) *projCached { + v.YplusX.Add(&p.y, &p.x) + v.YminusX.Subtract(&p.y, &p.x) + v.Z.Set(&p.z) + v.T2d.Multiply(&p.t, d2) + return v +} + +func (v *affineCached) FromP3(p *Point) *affineCached { + v.YplusX.Add(&p.y, &p.x) + v.YminusX.Subtract(&p.y, &p.x) + v.T2d.Multiply(&p.t, d2) + + var invZ field.Element + invZ.Invert(&p.z) + v.YplusX.Multiply(&v.YplusX, &invZ) + v.YminusX.Multiply(&v.YminusX, &invZ) + v.T2d.Multiply(&v.T2d, &invZ) + return v +} + +// (Re)addition and subtraction. + +// Add sets v = p + q, and returns v. +func (v *Point) Add(p, q *Point) *Point { + checkInitialized(p, q) + qCached := new(projCached).FromP3(q) + result := new(projP1xP1).Add(p, qCached) + return v.fromP1xP1(result) +} + +// Subtract sets v = p - q, and returns v. +func (v *Point) Subtract(p, q *Point) *Point { + checkInitialized(p, q) + qCached := new(projCached).FromP3(q) + result := new(projP1xP1).Sub(p, qCached) + return v.fromP1xP1(result) +} + +func (v *projP1xP1) Add(p *Point, q *projCached) *projP1xP1 { + var YplusX, YminusX, PP, MM, TT2d, ZZ2 field.Element + + YplusX.Add(&p.y, &p.x) + YminusX.Subtract(&p.y, &p.x) + + PP.Multiply(&YplusX, &q.YplusX) + MM.Multiply(&YminusX, &q.YminusX) + TT2d.Multiply(&p.t, &q.T2d) + ZZ2.Multiply(&p.z, &q.Z) + + ZZ2.Add(&ZZ2, &ZZ2) + + v.X.Subtract(&PP, &MM) + v.Y.Add(&PP, &MM) + v.Z.Add(&ZZ2, &TT2d) + v.T.Subtract(&ZZ2, &TT2d) + return v +} + +func (v *projP1xP1) Sub(p *Point, q *projCached) *projP1xP1 { + var YplusX, YminusX, PP, MM, TT2d, ZZ2 field.Element + + YplusX.Add(&p.y, &p.x) + YminusX.Subtract(&p.y, &p.x) + + PP.Multiply(&YplusX, &q.YminusX) // flipped sign + MM.Multiply(&YminusX, &q.YplusX) // flipped sign + TT2d.Multiply(&p.t, &q.T2d) + ZZ2.Multiply(&p.z, &q.Z) + + ZZ2.Add(&ZZ2, &ZZ2) + + v.X.Subtract(&PP, &MM) + v.Y.Add(&PP, &MM) + v.Z.Subtract(&ZZ2, &TT2d) // flipped sign + v.T.Add(&ZZ2, &TT2d) // flipped sign + return v +} + +func (v *projP1xP1) AddAffine(p *Point, q *affineCached) *projP1xP1 { + var YplusX, YminusX, PP, MM, TT2d, Z2 field.Element + + YplusX.Add(&p.y, &p.x) + YminusX.Subtract(&p.y, &p.x) + + PP.Multiply(&YplusX, &q.YplusX) + MM.Multiply(&YminusX, &q.YminusX) + TT2d.Multiply(&p.t, &q.T2d) + + Z2.Add(&p.z, &p.z) + + v.X.Subtract(&PP, &MM) + v.Y.Add(&PP, &MM) + v.Z.Add(&Z2, &TT2d) + v.T.Subtract(&Z2, &TT2d) + return v +} + +func (v *projP1xP1) SubAffine(p *Point, q *affineCached) *projP1xP1 { + var YplusX, YminusX, PP, MM, TT2d, Z2 field.Element + + YplusX.Add(&p.y, &p.x) + YminusX.Subtract(&p.y, &p.x) + + PP.Multiply(&YplusX, &q.YminusX) // flipped sign + MM.Multiply(&YminusX, &q.YplusX) // flipped sign + TT2d.Multiply(&p.t, &q.T2d) + + Z2.Add(&p.z, &p.z) + + v.X.Subtract(&PP, &MM) + v.Y.Add(&PP, &MM) + v.Z.Subtract(&Z2, &TT2d) // flipped sign + v.T.Add(&Z2, &TT2d) // flipped sign + return v +} + +// Doubling. + +func (v *projP1xP1) Double(p *projP2) *projP1xP1 { + var XX, YY, ZZ2, XplusYsq field.Element + + XX.Square(&p.X) + YY.Square(&p.Y) + ZZ2.Square(&p.Z) + ZZ2.Add(&ZZ2, &ZZ2) + XplusYsq.Add(&p.X, &p.Y) + XplusYsq.Square(&XplusYsq) + + v.Y.Add(&YY, &XX) + v.Z.Subtract(&YY, &XX) + + v.X.Subtract(&XplusYsq, &v.Y) + v.T.Subtract(&ZZ2, &v.Z) + return v +} + +// Negation. + +// Negate sets v = -p, and returns v. +func (v *Point) Negate(p *Point) *Point { + checkInitialized(p) + v.x.Negate(&p.x) + v.y.Set(&p.y) + v.z.Set(&p.z) + v.t.Negate(&p.t) + return v +} + +// Equal returns 1 if v is equivalent to u, and 0 otherwise. +func (v *Point) Equal(u *Point) int { + checkInitialized(v, u) + + var t1, t2, t3, t4 field.Element + t1.Multiply(&v.x, &u.z) + t2.Multiply(&u.x, &v.z) + t3.Multiply(&v.y, &u.z) + t4.Multiply(&u.y, &v.z) + + return t1.Equal(&t2) & t3.Equal(&t4) +} + +// Constant-time operations + +// Select sets v to a if cond == 1 and to b if cond == 0. +func (v *projCached) Select(a, b *projCached, cond int) *projCached { + v.YplusX.Select(&a.YplusX, &b.YplusX, cond) + v.YminusX.Select(&a.YminusX, &b.YminusX, cond) + v.Z.Select(&a.Z, &b.Z, cond) + v.T2d.Select(&a.T2d, &b.T2d, cond) + return v +} + +// Select sets v to a if cond == 1 and to b if cond == 0. +func (v *affineCached) Select(a, b *affineCached, cond int) *affineCached { + v.YplusX.Select(&a.YplusX, &b.YplusX, cond) + v.YminusX.Select(&a.YminusX, &b.YminusX, cond) + v.T2d.Select(&a.T2d, &b.T2d, cond) + return v +} + +// CondNeg negates v if cond == 1 and leaves it unchanged if cond == 0. +func (v *projCached) CondNeg(cond int) *projCached { + v.YplusX.Swap(&v.YminusX, cond) + v.T2d.Select(new(field.Element).Negate(&v.T2d), &v.T2d, cond) + return v +} + +// CondNeg negates v if cond == 1 and leaves it unchanged if cond == 0. +func (v *affineCached) CondNeg(cond int) *affineCached { + v.YplusX.Swap(&v.YminusX, cond) + v.T2d.Select(new(field.Element).Negate(&v.T2d), &v.T2d, cond) + return v +} diff --git a/vendor/filippo.io/edwards25519/extra.go b/vendor/filippo.io/edwards25519/extra.go new file mode 100644 index 0000000..d152d68 --- /dev/null +++ b/vendor/filippo.io/edwards25519/extra.go @@ -0,0 +1,349 @@ +// Copyright (c) 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package edwards25519 + +// This file contains additional functionality that is not included in the +// upstream crypto/internal/edwards25519 package. + +import ( + "errors" + + "filippo.io/edwards25519/field" +) + +// ExtendedCoordinates returns v in extended coordinates (X:Y:Z:T) where +// x = X/Z, y = Y/Z, and xy = T/Z as in https://eprint.iacr.org/2008/522. +func (v *Point) ExtendedCoordinates() (X, Y, Z, T *field.Element) { + // This function is outlined to make the allocations inline in the caller + // rather than happen on the heap. Don't change the style without making + // sure it doesn't increase the inliner cost. + var e [4]field.Element + X, Y, Z, T = v.extendedCoordinates(&e) + return +} + +func (v *Point) extendedCoordinates(e *[4]field.Element) (X, Y, Z, T *field.Element) { + checkInitialized(v) + X = e[0].Set(&v.x) + Y = e[1].Set(&v.y) + Z = e[2].Set(&v.z) + T = e[3].Set(&v.t) + return +} + +// SetExtendedCoordinates sets v = (X:Y:Z:T) in extended coordinates where +// x = X/Z, y = Y/Z, and xy = T/Z as in https://eprint.iacr.org/2008/522. +// +// If the coordinates are invalid or don't represent a valid point on the curve, +// SetExtendedCoordinates returns nil and an error and the receiver is +// unchanged. Otherwise, SetExtendedCoordinates returns v. +func (v *Point) SetExtendedCoordinates(X, Y, Z, T *field.Element) (*Point, error) { + if !isOnCurve(X, Y, Z, T) { + return nil, errors.New("edwards25519: invalid point coordinates") + } + v.x.Set(X) + v.y.Set(Y) + v.z.Set(Z) + v.t.Set(T) + return v, nil +} + +func isOnCurve(X, Y, Z, T *field.Element) bool { + var lhs, rhs field.Element + XX := new(field.Element).Square(X) + YY := new(field.Element).Square(Y) + ZZ := new(field.Element).Square(Z) + TT := new(field.Element).Square(T) + // -x² + y² = 1 + dx²y² + // -(X/Z)² + (Y/Z)² = 1 + d(T/Z)² + // -X² + Y² = Z² + dT² + lhs.Subtract(YY, XX) + rhs.Multiply(d, TT).Add(&rhs, ZZ) + if lhs.Equal(&rhs) != 1 { + return false + } + // xy = T/Z + // XY/Z² = T/Z + // XY = TZ + lhs.Multiply(X, Y) + rhs.Multiply(T, Z) + return lhs.Equal(&rhs) == 1 +} + +// BytesMontgomery converts v to a point on the birationally-equivalent +// Curve25519 Montgomery curve, and returns its canonical 32 bytes encoding +// according to RFC 7748. +// +// Note that BytesMontgomery only encodes the u-coordinate, so v and -v encode +// to the same value. If v is the identity point, BytesMontgomery returns 32 +// zero bytes, analogously to the X25519 function. +// +// The lack of an inverse operation (such as SetMontgomeryBytes) is deliberate: +// while every valid edwards25519 point has a unique u-coordinate Montgomery +// encoding, X25519 accepts inputs on the quadratic twist, which don't correspond +// to any edwards25519 point, and every other X25519 input corresponds to two +// edwards25519 points. +func (v *Point) BytesMontgomery() []byte { + // This function is outlined to make the allocations inline in the caller + // rather than happen on the heap. + var buf [32]byte + return v.bytesMontgomery(&buf) +} + +func (v *Point) bytesMontgomery(buf *[32]byte) []byte { + checkInitialized(v) + + // RFC 7748, Section 4.1 provides the bilinear map to calculate the + // Montgomery u-coordinate + // + // u = (1 + y) / (1 - y) + // + // where y = Y / Z. + + var y, recip, u field.Element + + y.Multiply(&v.y, y.Invert(&v.z)) // y = Y / Z + recip.Invert(recip.Subtract(feOne, &y)) // r = 1/(1 - y) + u.Multiply(u.Add(feOne, &y), &recip) // u = (1 + y)*r + + return copyFieldElement(buf, &u) +} + +// MultByCofactor sets v = 8 * p, and returns v. +func (v *Point) MultByCofactor(p *Point) *Point { + checkInitialized(p) + result := projP1xP1{} + pp := (&projP2{}).FromP3(p) + result.Double(pp) + pp.FromP1xP1(&result) + result.Double(pp) + pp.FromP1xP1(&result) + result.Double(pp) + return v.fromP1xP1(&result) +} + +// Given k > 0, set s = s**(2*i). +func (s *Scalar) pow2k(k int) { + for i := 0; i < k; i++ { + s.Multiply(s, s) + } +} + +// Invert sets s to the inverse of a nonzero scalar v, and returns s. +// +// If t is zero, Invert returns zero. +func (s *Scalar) Invert(t *Scalar) *Scalar { + // Uses a hardcoded sliding window of width 4. + var table [8]Scalar + var tt Scalar + tt.Multiply(t, t) + table[0] = *t + for i := 0; i < 7; i++ { + table[i+1].Multiply(&table[i], &tt) + } + // Now table = [t**1, t**3, t**5, t**7, t**9, t**11, t**13, t**15] + // so t**k = t[k/2] for odd k + + // To compute the sliding window digits, use the following Sage script: + + // sage: import itertools + // sage: def sliding_window(w,k): + // ....: digits = [] + // ....: while k > 0: + // ....: if k % 2 == 1: + // ....: kmod = k % (2**w) + // ....: digits.append(kmod) + // ....: k = k - kmod + // ....: else: + // ....: digits.append(0) + // ....: k = k // 2 + // ....: return digits + + // Now we can compute s roughly as follows: + + // sage: s = 1 + // sage: for coeff in reversed(sliding_window(4,l-2)): + // ....: s = s*s + // ....: if coeff > 0 : + // ....: s = s*t**coeff + + // This works on one bit at a time, with many runs of zeros. + // The digits can be collapsed into [(count, coeff)] as follows: + + // sage: [(len(list(group)),d) for d,group in itertools.groupby(sliding_window(4,l-2))] + + // Entries of the form (k, 0) turn into pow2k(k) + // Entries of the form (1, coeff) turn into a squaring and then a table lookup. + // We can fold the squaring into the previous pow2k(k) as pow2k(k+1). + + *s = table[1/2] + s.pow2k(127 + 1) + s.Multiply(s, &table[1/2]) + s.pow2k(4 + 1) + s.Multiply(s, &table[9/2]) + s.pow2k(3 + 1) + s.Multiply(s, &table[11/2]) + s.pow2k(3 + 1) + s.Multiply(s, &table[13/2]) + s.pow2k(3 + 1) + s.Multiply(s, &table[15/2]) + s.pow2k(4 + 1) + s.Multiply(s, &table[7/2]) + s.pow2k(4 + 1) + s.Multiply(s, &table[15/2]) + s.pow2k(3 + 1) + s.Multiply(s, &table[5/2]) + s.pow2k(3 + 1) + s.Multiply(s, &table[1/2]) + s.pow2k(4 + 1) + s.Multiply(s, &table[15/2]) + s.pow2k(4 + 1) + s.Multiply(s, &table[15/2]) + s.pow2k(4 + 1) + s.Multiply(s, &table[7/2]) + s.pow2k(3 + 1) + s.Multiply(s, &table[3/2]) + s.pow2k(4 + 1) + s.Multiply(s, &table[11/2]) + s.pow2k(5 + 1) + s.Multiply(s, &table[11/2]) + s.pow2k(9 + 1) + s.Multiply(s, &table[9/2]) + s.pow2k(3 + 1) + s.Multiply(s, &table[3/2]) + s.pow2k(4 + 1) + s.Multiply(s, &table[3/2]) + s.pow2k(4 + 1) + s.Multiply(s, &table[3/2]) + s.pow2k(4 + 1) + s.Multiply(s, &table[9/2]) + s.pow2k(3 + 1) + s.Multiply(s, &table[7/2]) + s.pow2k(3 + 1) + s.Multiply(s, &table[3/2]) + s.pow2k(3 + 1) + s.Multiply(s, &table[13/2]) + s.pow2k(3 + 1) + s.Multiply(s, &table[7/2]) + s.pow2k(4 + 1) + s.Multiply(s, &table[9/2]) + s.pow2k(3 + 1) + s.Multiply(s, &table[15/2]) + s.pow2k(4 + 1) + s.Multiply(s, &table[11/2]) + + return s +} + +// MultiScalarMult sets v = sum(scalars[i] * points[i]), and returns v. +// +// Execution time depends only on the lengths of the two slices, which must match. +func (v *Point) MultiScalarMult(scalars []*Scalar, points []*Point) *Point { + if len(scalars) != len(points) { + panic("edwards25519: called MultiScalarMult with different size inputs") + } + checkInitialized(points...) + + // Proceed as in the single-base case, but share doublings + // between each point in the multiscalar equation. + + // Build lookup tables for each point + tables := make([]projLookupTable, len(points)) + for i := range tables { + tables[i].FromP3(points[i]) + } + // Compute signed radix-16 digits for each scalar + digits := make([][64]int8, len(scalars)) + for i := range digits { + digits[i] = scalars[i].signedRadix16() + } + + // Unwrap first loop iteration to save computing 16*identity + multiple := &projCached{} + tmp1 := &projP1xP1{} + tmp2 := &projP2{} + // Lookup-and-add the appropriate multiple of each input point + for j := range tables { + tables[j].SelectInto(multiple, digits[j][63]) + tmp1.Add(v, multiple) // tmp1 = v + x_(j,63)*Q in P1xP1 coords + v.fromP1xP1(tmp1) // update v + } + tmp2.FromP3(v) // set up tmp2 = v in P2 coords for next iteration + for i := 62; i >= 0; i-- { + tmp1.Double(tmp2) // tmp1 = 2*(prev) in P1xP1 coords + tmp2.FromP1xP1(tmp1) // tmp2 = 2*(prev) in P2 coords + tmp1.Double(tmp2) // tmp1 = 4*(prev) in P1xP1 coords + tmp2.FromP1xP1(tmp1) // tmp2 = 4*(prev) in P2 coords + tmp1.Double(tmp2) // tmp1 = 8*(prev) in P1xP1 coords + tmp2.FromP1xP1(tmp1) // tmp2 = 8*(prev) in P2 coords + tmp1.Double(tmp2) // tmp1 = 16*(prev) in P1xP1 coords + v.fromP1xP1(tmp1) // v = 16*(prev) in P3 coords + // Lookup-and-add the appropriate multiple of each input point + for j := range tables { + tables[j].SelectInto(multiple, digits[j][i]) + tmp1.Add(v, multiple) // tmp1 = v + x_(j,i)*Q in P1xP1 coords + v.fromP1xP1(tmp1) // update v + } + tmp2.FromP3(v) // set up tmp2 = v in P2 coords for next iteration + } + return v +} + +// VarTimeMultiScalarMult sets v = sum(scalars[i] * points[i]), and returns v. +// +// Execution time depends on the inputs. +func (v *Point) VarTimeMultiScalarMult(scalars []*Scalar, points []*Point) *Point { + if len(scalars) != len(points) { + panic("edwards25519: called VarTimeMultiScalarMult with different size inputs") + } + checkInitialized(points...) + + // Generalize double-base NAF computation to arbitrary sizes. + // Here all the points are dynamic, so we only use the smaller + // tables. + + // Build lookup tables for each point + tables := make([]nafLookupTable5, len(points)) + for i := range tables { + tables[i].FromP3(points[i]) + } + // Compute a NAF for each scalar + nafs := make([][256]int8, len(scalars)) + for i := range nafs { + nafs[i] = scalars[i].nonAdjacentForm(5) + } + + multiple := &projCached{} + tmp1 := &projP1xP1{} + tmp2 := &projP2{} + tmp2.Zero() + + // Move from high to low bits, doubling the accumulator + // at each iteration and checking whether there is a nonzero + // coefficient to look up a multiple of. + // + // Skip trying to find the first nonzero coefficent, because + // searching might be more work than a few extra doublings. + for i := 255; i >= 0; i-- { + tmp1.Double(tmp2) + + for j := range nafs { + if nafs[j][i] > 0 { + v.fromP1xP1(tmp1) + tables[j].SelectInto(multiple, nafs[j][i]) + tmp1.Add(v, multiple) + } else if nafs[j][i] < 0 { + v.fromP1xP1(tmp1) + tables[j].SelectInto(multiple, -nafs[j][i]) + tmp1.Sub(v, multiple) + } + } + + tmp2.FromP1xP1(tmp1) + } + + v.fromP2(tmp2) + return v +} diff --git a/vendor/filippo.io/edwards25519/field/fe.go b/vendor/filippo.io/edwards25519/field/fe.go new file mode 100644 index 0000000..5518ef2 --- /dev/null +++ b/vendor/filippo.io/edwards25519/field/fe.go @@ -0,0 +1,420 @@ +// Copyright (c) 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package field implements fast arithmetic modulo 2^255-19. +package field + +import ( + "crypto/subtle" + "encoding/binary" + "errors" + "math/bits" +) + +// Element represents an element of the field GF(2^255-19). Note that this +// is not a cryptographically secure group, and should only be used to interact +// with edwards25519.Point coordinates. +// +// This type works similarly to math/big.Int, and all arguments and receivers +// are allowed to alias. +// +// The zero value is a valid zero element. +type Element struct { + // An element t represents the integer + // t.l0 + t.l1*2^51 + t.l2*2^102 + t.l3*2^153 + t.l4*2^204 + // + // Between operations, all limbs are expected to be lower than 2^52. + l0 uint64 + l1 uint64 + l2 uint64 + l3 uint64 + l4 uint64 +} + +const maskLow51Bits uint64 = (1 << 51) - 1 + +var feZero = &Element{0, 0, 0, 0, 0} + +// Zero sets v = 0, and returns v. +func (v *Element) Zero() *Element { + *v = *feZero + return v +} + +var feOne = &Element{1, 0, 0, 0, 0} + +// One sets v = 1, and returns v. +func (v *Element) One() *Element { + *v = *feOne + return v +} + +// reduce reduces v modulo 2^255 - 19 and returns it. +func (v *Element) reduce() *Element { + v.carryPropagate() + + // After the light reduction we now have a field element representation + // v < 2^255 + 2^13 * 19, but need v < 2^255 - 19. + + // If v >= 2^255 - 19, then v + 19 >= 2^255, which would overflow 2^255 - 1, + // generating a carry. That is, c will be 0 if v < 2^255 - 19, and 1 otherwise. + c := (v.l0 + 19) >> 51 + c = (v.l1 + c) >> 51 + c = (v.l2 + c) >> 51 + c = (v.l3 + c) >> 51 + c = (v.l4 + c) >> 51 + + // If v < 2^255 - 19 and c = 0, this will be a no-op. Otherwise, it's + // effectively applying the reduction identity to the carry. + v.l0 += 19 * c + + v.l1 += v.l0 >> 51 + v.l0 = v.l0 & maskLow51Bits + v.l2 += v.l1 >> 51 + v.l1 = v.l1 & maskLow51Bits + v.l3 += v.l2 >> 51 + v.l2 = v.l2 & maskLow51Bits + v.l4 += v.l3 >> 51 + v.l3 = v.l3 & maskLow51Bits + // no additional carry + v.l4 = v.l4 & maskLow51Bits + + return v +} + +// Add sets v = a + b, and returns v. +func (v *Element) Add(a, b *Element) *Element { + v.l0 = a.l0 + b.l0 + v.l1 = a.l1 + b.l1 + v.l2 = a.l2 + b.l2 + v.l3 = a.l3 + b.l3 + v.l4 = a.l4 + b.l4 + // Using the generic implementation here is actually faster than the + // assembly. Probably because the body of this function is so simple that + // the compiler can figure out better optimizations by inlining the carry + // propagation. + return v.carryPropagateGeneric() +} + +// Subtract sets v = a - b, and returns v. +func (v *Element) Subtract(a, b *Element) *Element { + // We first add 2 * p, to guarantee the subtraction won't underflow, and + // then subtract b (which can be up to 2^255 + 2^13 * 19). + v.l0 = (a.l0 + 0xFFFFFFFFFFFDA) - b.l0 + v.l1 = (a.l1 + 0xFFFFFFFFFFFFE) - b.l1 + v.l2 = (a.l2 + 0xFFFFFFFFFFFFE) - b.l2 + v.l3 = (a.l3 + 0xFFFFFFFFFFFFE) - b.l3 + v.l4 = (a.l4 + 0xFFFFFFFFFFFFE) - b.l4 + return v.carryPropagate() +} + +// Negate sets v = -a, and returns v. +func (v *Element) Negate(a *Element) *Element { + return v.Subtract(feZero, a) +} + +// Invert sets v = 1/z mod p, and returns v. +// +// If z == 0, Invert returns v = 0. +func (v *Element) Invert(z *Element) *Element { + // Inversion is implemented as exponentiation with exponent p − 2. It uses the + // same sequence of 255 squarings and 11 multiplications as [Curve25519]. + var z2, z9, z11, z2_5_0, z2_10_0, z2_20_0, z2_50_0, z2_100_0, t Element + + z2.Square(z) // 2 + t.Square(&z2) // 4 + t.Square(&t) // 8 + z9.Multiply(&t, z) // 9 + z11.Multiply(&z9, &z2) // 11 + t.Square(&z11) // 22 + z2_5_0.Multiply(&t, &z9) // 31 = 2^5 - 2^0 + + t.Square(&z2_5_0) // 2^6 - 2^1 + for i := 0; i < 4; i++ { + t.Square(&t) // 2^10 - 2^5 + } + z2_10_0.Multiply(&t, &z2_5_0) // 2^10 - 2^0 + + t.Square(&z2_10_0) // 2^11 - 2^1 + for i := 0; i < 9; i++ { + t.Square(&t) // 2^20 - 2^10 + } + z2_20_0.Multiply(&t, &z2_10_0) // 2^20 - 2^0 + + t.Square(&z2_20_0) // 2^21 - 2^1 + for i := 0; i < 19; i++ { + t.Square(&t) // 2^40 - 2^20 + } + t.Multiply(&t, &z2_20_0) // 2^40 - 2^0 + + t.Square(&t) // 2^41 - 2^1 + for i := 0; i < 9; i++ { + t.Square(&t) // 2^50 - 2^10 + } + z2_50_0.Multiply(&t, &z2_10_0) // 2^50 - 2^0 + + t.Square(&z2_50_0) // 2^51 - 2^1 + for i := 0; i < 49; i++ { + t.Square(&t) // 2^100 - 2^50 + } + z2_100_0.Multiply(&t, &z2_50_0) // 2^100 - 2^0 + + t.Square(&z2_100_0) // 2^101 - 2^1 + for i := 0; i < 99; i++ { + t.Square(&t) // 2^200 - 2^100 + } + t.Multiply(&t, &z2_100_0) // 2^200 - 2^0 + + t.Square(&t) // 2^201 - 2^1 + for i := 0; i < 49; i++ { + t.Square(&t) // 2^250 - 2^50 + } + t.Multiply(&t, &z2_50_0) // 2^250 - 2^0 + + t.Square(&t) // 2^251 - 2^1 + t.Square(&t) // 2^252 - 2^2 + t.Square(&t) // 2^253 - 2^3 + t.Square(&t) // 2^254 - 2^4 + t.Square(&t) // 2^255 - 2^5 + + return v.Multiply(&t, &z11) // 2^255 - 21 +} + +// Set sets v = a, and returns v. +func (v *Element) Set(a *Element) *Element { + *v = *a + return v +} + +// SetBytes sets v to x, where x is a 32-byte little-endian encoding. If x is +// not of the right length, SetBytes returns nil and an error, and the +// receiver is unchanged. +// +// Consistent with RFC 7748, the most significant bit (the high bit of the +// last byte) is ignored, and non-canonical values (2^255-19 through 2^255-1) +// are accepted. Note that this is laxer than specified by RFC 8032, but +// consistent with most Ed25519 implementations. +func (v *Element) SetBytes(x []byte) (*Element, error) { + if len(x) != 32 { + return nil, errors.New("edwards25519: invalid field element input size") + } + + // Bits 0:51 (bytes 0:8, bits 0:64, shift 0, mask 51). + v.l0 = binary.LittleEndian.Uint64(x[0:8]) + v.l0 &= maskLow51Bits + // Bits 51:102 (bytes 6:14, bits 48:112, shift 3, mask 51). + v.l1 = binary.LittleEndian.Uint64(x[6:14]) >> 3 + v.l1 &= maskLow51Bits + // Bits 102:153 (bytes 12:20, bits 96:160, shift 6, mask 51). + v.l2 = binary.LittleEndian.Uint64(x[12:20]) >> 6 + v.l2 &= maskLow51Bits + // Bits 153:204 (bytes 19:27, bits 152:216, shift 1, mask 51). + v.l3 = binary.LittleEndian.Uint64(x[19:27]) >> 1 + v.l3 &= maskLow51Bits + // Bits 204:255 (bytes 24:32, bits 192:256, shift 12, mask 51). + // Note: not bytes 25:33, shift 4, to avoid overread. + v.l4 = binary.LittleEndian.Uint64(x[24:32]) >> 12 + v.l4 &= maskLow51Bits + + return v, nil +} + +// Bytes returns the canonical 32-byte little-endian encoding of v. +func (v *Element) Bytes() []byte { + // This function is outlined to make the allocations inline in the caller + // rather than happen on the heap. + var out [32]byte + return v.bytes(&out) +} + +func (v *Element) bytes(out *[32]byte) []byte { + t := *v + t.reduce() + + var buf [8]byte + for i, l := range [5]uint64{t.l0, t.l1, t.l2, t.l3, t.l4} { + bitsOffset := i * 51 + binary.LittleEndian.PutUint64(buf[:], l<= len(out) { + break + } + out[off] |= bb + } + } + + return out[:] +} + +// Equal returns 1 if v and u are equal, and 0 otherwise. +func (v *Element) Equal(u *Element) int { + sa, sv := u.Bytes(), v.Bytes() + return subtle.ConstantTimeCompare(sa, sv) +} + +// mask64Bits returns 0xffffffff if cond is 1, and 0 otherwise. +func mask64Bits(cond int) uint64 { return ^(uint64(cond) - 1) } + +// Select sets v to a if cond == 1, and to b if cond == 0. +func (v *Element) Select(a, b *Element, cond int) *Element { + m := mask64Bits(cond) + v.l0 = (m & a.l0) | (^m & b.l0) + v.l1 = (m & a.l1) | (^m & b.l1) + v.l2 = (m & a.l2) | (^m & b.l2) + v.l3 = (m & a.l3) | (^m & b.l3) + v.l4 = (m & a.l4) | (^m & b.l4) + return v +} + +// Swap swaps v and u if cond == 1 or leaves them unchanged if cond == 0, and returns v. +func (v *Element) Swap(u *Element, cond int) { + m := mask64Bits(cond) + t := m & (v.l0 ^ u.l0) + v.l0 ^= t + u.l0 ^= t + t = m & (v.l1 ^ u.l1) + v.l1 ^= t + u.l1 ^= t + t = m & (v.l2 ^ u.l2) + v.l2 ^= t + u.l2 ^= t + t = m & (v.l3 ^ u.l3) + v.l3 ^= t + u.l3 ^= t + t = m & (v.l4 ^ u.l4) + v.l4 ^= t + u.l4 ^= t +} + +// IsNegative returns 1 if v is negative, and 0 otherwise. +func (v *Element) IsNegative() int { + return int(v.Bytes()[0] & 1) +} + +// Absolute sets v to |u|, and returns v. +func (v *Element) Absolute(u *Element) *Element { + return v.Select(new(Element).Negate(u), u, u.IsNegative()) +} + +// Multiply sets v = x * y, and returns v. +func (v *Element) Multiply(x, y *Element) *Element { + feMul(v, x, y) + return v +} + +// Square sets v = x * x, and returns v. +func (v *Element) Square(x *Element) *Element { + feSquare(v, x) + return v +} + +// Mult32 sets v = x * y, and returns v. +func (v *Element) Mult32(x *Element, y uint32) *Element { + x0lo, x0hi := mul51(x.l0, y) + x1lo, x1hi := mul51(x.l1, y) + x2lo, x2hi := mul51(x.l2, y) + x3lo, x3hi := mul51(x.l3, y) + x4lo, x4hi := mul51(x.l4, y) + v.l0 = x0lo + 19*x4hi // carried over per the reduction identity + v.l1 = x1lo + x0hi + v.l2 = x2lo + x1hi + v.l3 = x3lo + x2hi + v.l4 = x4lo + x3hi + // The hi portions are going to be only 32 bits, plus any previous excess, + // so we can skip the carry propagation. + return v +} + +// mul51 returns lo + hi * 2⁵¹ = a * b. +func mul51(a uint64, b uint32) (lo uint64, hi uint64) { + mh, ml := bits.Mul64(a, uint64(b)) + lo = ml & maskLow51Bits + hi = (mh << 13) | (ml >> 51) + return +} + +// Pow22523 set v = x^((p-5)/8), and returns v. (p-5)/8 is 2^252-3. +func (v *Element) Pow22523(x *Element) *Element { + var t0, t1, t2 Element + + t0.Square(x) // x^2 + t1.Square(&t0) // x^4 + t1.Square(&t1) // x^8 + t1.Multiply(x, &t1) // x^9 + t0.Multiply(&t0, &t1) // x^11 + t0.Square(&t0) // x^22 + t0.Multiply(&t1, &t0) // x^31 + t1.Square(&t0) // x^62 + for i := 1; i < 5; i++ { // x^992 + t1.Square(&t1) + } + t0.Multiply(&t1, &t0) // x^1023 -> 1023 = 2^10 - 1 + t1.Square(&t0) // 2^11 - 2 + for i := 1; i < 10; i++ { // 2^20 - 2^10 + t1.Square(&t1) + } + t1.Multiply(&t1, &t0) // 2^20 - 1 + t2.Square(&t1) // 2^21 - 2 + for i := 1; i < 20; i++ { // 2^40 - 2^20 + t2.Square(&t2) + } + t1.Multiply(&t2, &t1) // 2^40 - 1 + t1.Square(&t1) // 2^41 - 2 + for i := 1; i < 10; i++ { // 2^50 - 2^10 + t1.Square(&t1) + } + t0.Multiply(&t1, &t0) // 2^50 - 1 + t1.Square(&t0) // 2^51 - 2 + for i := 1; i < 50; i++ { // 2^100 - 2^50 + t1.Square(&t1) + } + t1.Multiply(&t1, &t0) // 2^100 - 1 + t2.Square(&t1) // 2^101 - 2 + for i := 1; i < 100; i++ { // 2^200 - 2^100 + t2.Square(&t2) + } + t1.Multiply(&t2, &t1) // 2^200 - 1 + t1.Square(&t1) // 2^201 - 2 + for i := 1; i < 50; i++ { // 2^250 - 2^50 + t1.Square(&t1) + } + t0.Multiply(&t1, &t0) // 2^250 - 1 + t0.Square(&t0) // 2^251 - 2 + t0.Square(&t0) // 2^252 - 4 + return v.Multiply(&t0, x) // 2^252 - 3 -> x^(2^252-3) +} + +// sqrtM1 is 2^((p-1)/4), which squared is equal to -1 by Euler's Criterion. +var sqrtM1 = &Element{1718705420411056, 234908883556509, + 2233514472574048, 2117202627021982, 765476049583133} + +// SqrtRatio sets r to the non-negative square root of the ratio of u and v. +// +// If u/v is square, SqrtRatio returns r and 1. If u/v is not square, SqrtRatio +// sets r according to Section 4.3 of draft-irtf-cfrg-ristretto255-decaf448-00, +// and returns r and 0. +func (r *Element) SqrtRatio(u, v *Element) (R *Element, wasSquare int) { + t0 := new(Element) + + // r = (u * v3) * (u * v7)^((p-5)/8) + v2 := new(Element).Square(v) + uv3 := new(Element).Multiply(u, t0.Multiply(v2, v)) + uv7 := new(Element).Multiply(uv3, t0.Square(v2)) + rr := new(Element).Multiply(uv3, t0.Pow22523(uv7)) + + check := new(Element).Multiply(v, t0.Square(rr)) // check = v * r^2 + + uNeg := new(Element).Negate(u) + correctSignSqrt := check.Equal(u) + flippedSignSqrt := check.Equal(uNeg) + flippedSignSqrtI := check.Equal(t0.Multiply(uNeg, sqrtM1)) + + rPrime := new(Element).Multiply(rr, sqrtM1) // r_prime = SQRT_M1 * r + // r = CT_SELECT(r_prime IF flipped_sign_sqrt | flipped_sign_sqrt_i ELSE r) + rr.Select(rPrime, rr, flippedSignSqrt|flippedSignSqrtI) + + r.Absolute(rr) // Choose the nonnegative square root. + return r, correctSignSqrt | flippedSignSqrt +} diff --git a/vendor/filippo.io/edwards25519/field/fe_amd64.go b/vendor/filippo.io/edwards25519/field/fe_amd64.go new file mode 100644 index 0000000..edcf163 --- /dev/null +++ b/vendor/filippo.io/edwards25519/field/fe_amd64.go @@ -0,0 +1,16 @@ +// Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT. + +//go:build amd64 && gc && !purego +// +build amd64,gc,!purego + +package field + +// feMul sets out = a * b. It works like feMulGeneric. +// +//go:noescape +func feMul(out *Element, a *Element, b *Element) + +// feSquare sets out = a * a. It works like feSquareGeneric. +// +//go:noescape +func feSquare(out *Element, a *Element) diff --git a/vendor/filippo.io/edwards25519/field/fe_amd64.s b/vendor/filippo.io/edwards25519/field/fe_amd64.s new file mode 100644 index 0000000..293f013 --- /dev/null +++ b/vendor/filippo.io/edwards25519/field/fe_amd64.s @@ -0,0 +1,379 @@ +// Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT. + +//go:build amd64 && gc && !purego +// +build amd64,gc,!purego + +#include "textflag.h" + +// func feMul(out *Element, a *Element, b *Element) +TEXT ·feMul(SB), NOSPLIT, $0-24 + MOVQ a+8(FP), CX + MOVQ b+16(FP), BX + + // r0 = a0×b0 + MOVQ (CX), AX + MULQ (BX) + MOVQ AX, DI + MOVQ DX, SI + + // r0 += 19×a1×b4 + MOVQ 8(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 32(BX) + ADDQ AX, DI + ADCQ DX, SI + + // r0 += 19×a2×b3 + MOVQ 16(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 24(BX) + ADDQ AX, DI + ADCQ DX, SI + + // r0 += 19×a3×b2 + MOVQ 24(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 16(BX) + ADDQ AX, DI + ADCQ DX, SI + + // r0 += 19×a4×b1 + MOVQ 32(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 8(BX) + ADDQ AX, DI + ADCQ DX, SI + + // r1 = a0×b1 + MOVQ (CX), AX + MULQ 8(BX) + MOVQ AX, R9 + MOVQ DX, R8 + + // r1 += a1×b0 + MOVQ 8(CX), AX + MULQ (BX) + ADDQ AX, R9 + ADCQ DX, R8 + + // r1 += 19×a2×b4 + MOVQ 16(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 32(BX) + ADDQ AX, R9 + ADCQ DX, R8 + + // r1 += 19×a3×b3 + MOVQ 24(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 24(BX) + ADDQ AX, R9 + ADCQ DX, R8 + + // r1 += 19×a4×b2 + MOVQ 32(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 16(BX) + ADDQ AX, R9 + ADCQ DX, R8 + + // r2 = a0×b2 + MOVQ (CX), AX + MULQ 16(BX) + MOVQ AX, R11 + MOVQ DX, R10 + + // r2 += a1×b1 + MOVQ 8(CX), AX + MULQ 8(BX) + ADDQ AX, R11 + ADCQ DX, R10 + + // r2 += a2×b0 + MOVQ 16(CX), AX + MULQ (BX) + ADDQ AX, R11 + ADCQ DX, R10 + + // r2 += 19×a3×b4 + MOVQ 24(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 32(BX) + ADDQ AX, R11 + ADCQ DX, R10 + + // r2 += 19×a4×b3 + MOVQ 32(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 24(BX) + ADDQ AX, R11 + ADCQ DX, R10 + + // r3 = a0×b3 + MOVQ (CX), AX + MULQ 24(BX) + MOVQ AX, R13 + MOVQ DX, R12 + + // r3 += a1×b2 + MOVQ 8(CX), AX + MULQ 16(BX) + ADDQ AX, R13 + ADCQ DX, R12 + + // r3 += a2×b1 + MOVQ 16(CX), AX + MULQ 8(BX) + ADDQ AX, R13 + ADCQ DX, R12 + + // r3 += a3×b0 + MOVQ 24(CX), AX + MULQ (BX) + ADDQ AX, R13 + ADCQ DX, R12 + + // r3 += 19×a4×b4 + MOVQ 32(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 32(BX) + ADDQ AX, R13 + ADCQ DX, R12 + + // r4 = a0×b4 + MOVQ (CX), AX + MULQ 32(BX) + MOVQ AX, R15 + MOVQ DX, R14 + + // r4 += a1×b3 + MOVQ 8(CX), AX + MULQ 24(BX) + ADDQ AX, R15 + ADCQ DX, R14 + + // r4 += a2×b2 + MOVQ 16(CX), AX + MULQ 16(BX) + ADDQ AX, R15 + ADCQ DX, R14 + + // r4 += a3×b1 + MOVQ 24(CX), AX + MULQ 8(BX) + ADDQ AX, R15 + ADCQ DX, R14 + + // r4 += a4×b0 + MOVQ 32(CX), AX + MULQ (BX) + ADDQ AX, R15 + ADCQ DX, R14 + + // First reduction chain + MOVQ $0x0007ffffffffffff, AX + SHLQ $0x0d, DI, SI + SHLQ $0x0d, R9, R8 + SHLQ $0x0d, R11, R10 + SHLQ $0x0d, R13, R12 + SHLQ $0x0d, R15, R14 + ANDQ AX, DI + IMUL3Q $0x13, R14, R14 + ADDQ R14, DI + ANDQ AX, R9 + ADDQ SI, R9 + ANDQ AX, R11 + ADDQ R8, R11 + ANDQ AX, R13 + ADDQ R10, R13 + ANDQ AX, R15 + ADDQ R12, R15 + + // Second reduction chain (carryPropagate) + MOVQ DI, SI + SHRQ $0x33, SI + MOVQ R9, R8 + SHRQ $0x33, R8 + MOVQ R11, R10 + SHRQ $0x33, R10 + MOVQ R13, R12 + SHRQ $0x33, R12 + MOVQ R15, R14 + SHRQ $0x33, R14 + ANDQ AX, DI + IMUL3Q $0x13, R14, R14 + ADDQ R14, DI + ANDQ AX, R9 + ADDQ SI, R9 + ANDQ AX, R11 + ADDQ R8, R11 + ANDQ AX, R13 + ADDQ R10, R13 + ANDQ AX, R15 + ADDQ R12, R15 + + // Store output + MOVQ out+0(FP), AX + MOVQ DI, (AX) + MOVQ R9, 8(AX) + MOVQ R11, 16(AX) + MOVQ R13, 24(AX) + MOVQ R15, 32(AX) + RET + +// func feSquare(out *Element, a *Element) +TEXT ·feSquare(SB), NOSPLIT, $0-16 + MOVQ a+8(FP), CX + + // r0 = l0×l0 + MOVQ (CX), AX + MULQ (CX) + MOVQ AX, SI + MOVQ DX, BX + + // r0 += 38×l1×l4 + MOVQ 8(CX), AX + IMUL3Q $0x26, AX, AX + MULQ 32(CX) + ADDQ AX, SI + ADCQ DX, BX + + // r0 += 38×l2×l3 + MOVQ 16(CX), AX + IMUL3Q $0x26, AX, AX + MULQ 24(CX) + ADDQ AX, SI + ADCQ DX, BX + + // r1 = 2×l0×l1 + MOVQ (CX), AX + SHLQ $0x01, AX + MULQ 8(CX) + MOVQ AX, R8 + MOVQ DX, DI + + // r1 += 38×l2×l4 + MOVQ 16(CX), AX + IMUL3Q $0x26, AX, AX + MULQ 32(CX) + ADDQ AX, R8 + ADCQ DX, DI + + // r1 += 19×l3×l3 + MOVQ 24(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 24(CX) + ADDQ AX, R8 + ADCQ DX, DI + + // r2 = 2×l0×l2 + MOVQ (CX), AX + SHLQ $0x01, AX + MULQ 16(CX) + MOVQ AX, R10 + MOVQ DX, R9 + + // r2 += l1×l1 + MOVQ 8(CX), AX + MULQ 8(CX) + ADDQ AX, R10 + ADCQ DX, R9 + + // r2 += 38×l3×l4 + MOVQ 24(CX), AX + IMUL3Q $0x26, AX, AX + MULQ 32(CX) + ADDQ AX, R10 + ADCQ DX, R9 + + // r3 = 2×l0×l3 + MOVQ (CX), AX + SHLQ $0x01, AX + MULQ 24(CX) + MOVQ AX, R12 + MOVQ DX, R11 + + // r3 += 2×l1×l2 + MOVQ 8(CX), AX + IMUL3Q $0x02, AX, AX + MULQ 16(CX) + ADDQ AX, R12 + ADCQ DX, R11 + + // r3 += 19×l4×l4 + MOVQ 32(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 32(CX) + ADDQ AX, R12 + ADCQ DX, R11 + + // r4 = 2×l0×l4 + MOVQ (CX), AX + SHLQ $0x01, AX + MULQ 32(CX) + MOVQ AX, R14 + MOVQ DX, R13 + + // r4 += 2×l1×l3 + MOVQ 8(CX), AX + IMUL3Q $0x02, AX, AX + MULQ 24(CX) + ADDQ AX, R14 + ADCQ DX, R13 + + // r4 += l2×l2 + MOVQ 16(CX), AX + MULQ 16(CX) + ADDQ AX, R14 + ADCQ DX, R13 + + // First reduction chain + MOVQ $0x0007ffffffffffff, AX + SHLQ $0x0d, SI, BX + SHLQ $0x0d, R8, DI + SHLQ $0x0d, R10, R9 + SHLQ $0x0d, R12, R11 + SHLQ $0x0d, R14, R13 + ANDQ AX, SI + IMUL3Q $0x13, R13, R13 + ADDQ R13, SI + ANDQ AX, R8 + ADDQ BX, R8 + ANDQ AX, R10 + ADDQ DI, R10 + ANDQ AX, R12 + ADDQ R9, R12 + ANDQ AX, R14 + ADDQ R11, R14 + + // Second reduction chain (carryPropagate) + MOVQ SI, BX + SHRQ $0x33, BX + MOVQ R8, DI + SHRQ $0x33, DI + MOVQ R10, R9 + SHRQ $0x33, R9 + MOVQ R12, R11 + SHRQ $0x33, R11 + MOVQ R14, R13 + SHRQ $0x33, R13 + ANDQ AX, SI + IMUL3Q $0x13, R13, R13 + ADDQ R13, SI + ANDQ AX, R8 + ADDQ BX, R8 + ANDQ AX, R10 + ADDQ DI, R10 + ANDQ AX, R12 + ADDQ R9, R12 + ANDQ AX, R14 + ADDQ R11, R14 + + // Store output + MOVQ out+0(FP), AX + MOVQ SI, (AX) + MOVQ R8, 8(AX) + MOVQ R10, 16(AX) + MOVQ R12, 24(AX) + MOVQ R14, 32(AX) + RET diff --git a/vendor/filippo.io/edwards25519/field/fe_amd64_noasm.go b/vendor/filippo.io/edwards25519/field/fe_amd64_noasm.go new file mode 100644 index 0000000..ddb6c9b --- /dev/null +++ b/vendor/filippo.io/edwards25519/field/fe_amd64_noasm.go @@ -0,0 +1,12 @@ +// Copyright (c) 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !amd64 || !gc || purego +// +build !amd64 !gc purego + +package field + +func feMul(v, x, y *Element) { feMulGeneric(v, x, y) } + +func feSquare(v, x *Element) { feSquareGeneric(v, x) } diff --git a/vendor/filippo.io/edwards25519/field/fe_arm64.go b/vendor/filippo.io/edwards25519/field/fe_arm64.go new file mode 100644 index 0000000..af459ef --- /dev/null +++ b/vendor/filippo.io/edwards25519/field/fe_arm64.go @@ -0,0 +1,16 @@ +// Copyright (c) 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build arm64 && gc && !purego +// +build arm64,gc,!purego + +package field + +//go:noescape +func carryPropagate(v *Element) + +func (v *Element) carryPropagate() *Element { + carryPropagate(v) + return v +} diff --git a/vendor/filippo.io/edwards25519/field/fe_arm64.s b/vendor/filippo.io/edwards25519/field/fe_arm64.s new file mode 100644 index 0000000..3126a43 --- /dev/null +++ b/vendor/filippo.io/edwards25519/field/fe_arm64.s @@ -0,0 +1,42 @@ +// Copyright (c) 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build arm64 && gc && !purego + +#include "textflag.h" + +// carryPropagate works exactly like carryPropagateGeneric and uses the +// same AND, ADD, and LSR+MADD instructions emitted by the compiler, but +// avoids loading R0-R4 twice and uses LDP and STP. +// +// See https://golang.org/issues/43145 for the main compiler issue. +// +// func carryPropagate(v *Element) +TEXT ·carryPropagate(SB),NOFRAME|NOSPLIT,$0-8 + MOVD v+0(FP), R20 + + LDP 0(R20), (R0, R1) + LDP 16(R20), (R2, R3) + MOVD 32(R20), R4 + + AND $0x7ffffffffffff, R0, R10 + AND $0x7ffffffffffff, R1, R11 + AND $0x7ffffffffffff, R2, R12 + AND $0x7ffffffffffff, R3, R13 + AND $0x7ffffffffffff, R4, R14 + + ADD R0>>51, R11, R11 + ADD R1>>51, R12, R12 + ADD R2>>51, R13, R13 + ADD R3>>51, R14, R14 + // R4>>51 * 19 + R10 -> R10 + LSR $51, R4, R21 + MOVD $19, R22 + MADD R22, R10, R21, R10 + + STP (R10, R11), 0(R20) + STP (R12, R13), 16(R20) + MOVD R14, 32(R20) + + RET diff --git a/vendor/filippo.io/edwards25519/field/fe_arm64_noasm.go b/vendor/filippo.io/edwards25519/field/fe_arm64_noasm.go new file mode 100644 index 0000000..234a5b2 --- /dev/null +++ b/vendor/filippo.io/edwards25519/field/fe_arm64_noasm.go @@ -0,0 +1,12 @@ +// Copyright (c) 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !arm64 || !gc || purego +// +build !arm64 !gc purego + +package field + +func (v *Element) carryPropagate() *Element { + return v.carryPropagateGeneric() +} diff --git a/vendor/filippo.io/edwards25519/field/fe_extra.go b/vendor/filippo.io/edwards25519/field/fe_extra.go new file mode 100644 index 0000000..1ef503b --- /dev/null +++ b/vendor/filippo.io/edwards25519/field/fe_extra.go @@ -0,0 +1,50 @@ +// Copyright (c) 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package field + +import "errors" + +// This file contains additional functionality that is not included in the +// upstream crypto/ed25519/edwards25519/field package. + +// SetWideBytes sets v to x, where x is a 64-byte little-endian encoding, which +// is reduced modulo the field order. If x is not of the right length, +// SetWideBytes returns nil and an error, and the receiver is unchanged. +// +// SetWideBytes is not necessary to select a uniformly distributed value, and is +// only provided for compatibility: SetBytes can be used instead as the chance +// of bias is less than 2⁻²⁵⁰. +func (v *Element) SetWideBytes(x []byte) (*Element, error) { + if len(x) != 64 { + return nil, errors.New("edwards25519: invalid SetWideBytes input size") + } + + // Split the 64 bytes into two elements, and extract the most significant + // bit of each, which is ignored by SetBytes. + lo, _ := new(Element).SetBytes(x[:32]) + loMSB := uint64(x[31] >> 7) + hi, _ := new(Element).SetBytes(x[32:]) + hiMSB := uint64(x[63] >> 7) + + // The output we want is + // + // v = lo + loMSB * 2²⁵⁵ + hi * 2²⁵⁶ + hiMSB * 2⁵¹¹ + // + // which applying the reduction identity comes out to + // + // v = lo + loMSB * 19 + hi * 2 * 19 + hiMSB * 2 * 19² + // + // l0 will be the sum of a 52 bits value (lo.l0), plus a 5 bits value + // (loMSB * 19), a 6 bits value (hi.l0 * 2 * 19), and a 10 bits value + // (hiMSB * 2 * 19²), so it fits in a uint64. + + v.l0 = lo.l0 + loMSB*19 + hi.l0*2*19 + hiMSB*2*19*19 + v.l1 = lo.l1 + hi.l1*2*19 + v.l2 = lo.l2 + hi.l2*2*19 + v.l3 = lo.l3 + hi.l3*2*19 + v.l4 = lo.l4 + hi.l4*2*19 + + return v.carryPropagate(), nil +} diff --git a/vendor/filippo.io/edwards25519/field/fe_generic.go b/vendor/filippo.io/edwards25519/field/fe_generic.go new file mode 100644 index 0000000..86f5fd9 --- /dev/null +++ b/vendor/filippo.io/edwards25519/field/fe_generic.go @@ -0,0 +1,266 @@ +// Copyright (c) 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package field + +import "math/bits" + +// uint128 holds a 128-bit number as two 64-bit limbs, for use with the +// bits.Mul64 and bits.Add64 intrinsics. +type uint128 struct { + lo, hi uint64 +} + +// mul64 returns a * b. +func mul64(a, b uint64) uint128 { + hi, lo := bits.Mul64(a, b) + return uint128{lo, hi} +} + +// addMul64 returns v + a * b. +func addMul64(v uint128, a, b uint64) uint128 { + hi, lo := bits.Mul64(a, b) + lo, c := bits.Add64(lo, v.lo, 0) + hi, _ = bits.Add64(hi, v.hi, c) + return uint128{lo, hi} +} + +// shiftRightBy51 returns a >> 51. a is assumed to be at most 115 bits. +func shiftRightBy51(a uint128) uint64 { + return (a.hi << (64 - 51)) | (a.lo >> 51) +} + +func feMulGeneric(v, a, b *Element) { + a0 := a.l0 + a1 := a.l1 + a2 := a.l2 + a3 := a.l3 + a4 := a.l4 + + b0 := b.l0 + b1 := b.l1 + b2 := b.l2 + b3 := b.l3 + b4 := b.l4 + + // Limb multiplication works like pen-and-paper columnar multiplication, but + // with 51-bit limbs instead of digits. + // + // a4 a3 a2 a1 a0 x + // b4 b3 b2 b1 b0 = + // ------------------------ + // a4b0 a3b0 a2b0 a1b0 a0b0 + + // a4b1 a3b1 a2b1 a1b1 a0b1 + + // a4b2 a3b2 a2b2 a1b2 a0b2 + + // a4b3 a3b3 a2b3 a1b3 a0b3 + + // a4b4 a3b4 a2b4 a1b4 a0b4 = + // ---------------------------------------------- + // r8 r7 r6 r5 r4 r3 r2 r1 r0 + // + // We can then use the reduction identity (a * 2²⁵⁵ + b = a * 19 + b) to + // reduce the limbs that would overflow 255 bits. r5 * 2²⁵⁵ becomes 19 * r5, + // r6 * 2³⁰⁶ becomes 19 * r6 * 2⁵¹, etc. + // + // Reduction can be carried out simultaneously to multiplication. For + // example, we do not compute r5: whenever the result of a multiplication + // belongs to r5, like a1b4, we multiply it by 19 and add the result to r0. + // + // a4b0 a3b0 a2b0 a1b0 a0b0 + + // a3b1 a2b1 a1b1 a0b1 19×a4b1 + + // a2b2 a1b2 a0b2 19×a4b2 19×a3b2 + + // a1b3 a0b3 19×a4b3 19×a3b3 19×a2b3 + + // a0b4 19×a4b4 19×a3b4 19×a2b4 19×a1b4 = + // -------------------------------------- + // r4 r3 r2 r1 r0 + // + // Finally we add up the columns into wide, overlapping limbs. + + a1_19 := a1 * 19 + a2_19 := a2 * 19 + a3_19 := a3 * 19 + a4_19 := a4 * 19 + + // r0 = a0×b0 + 19×(a1×b4 + a2×b3 + a3×b2 + a4×b1) + r0 := mul64(a0, b0) + r0 = addMul64(r0, a1_19, b4) + r0 = addMul64(r0, a2_19, b3) + r0 = addMul64(r0, a3_19, b2) + r0 = addMul64(r0, a4_19, b1) + + // r1 = a0×b1 + a1×b0 + 19×(a2×b4 + a3×b3 + a4×b2) + r1 := mul64(a0, b1) + r1 = addMul64(r1, a1, b0) + r1 = addMul64(r1, a2_19, b4) + r1 = addMul64(r1, a3_19, b3) + r1 = addMul64(r1, a4_19, b2) + + // r2 = a0×b2 + a1×b1 + a2×b0 + 19×(a3×b4 + a4×b3) + r2 := mul64(a0, b2) + r2 = addMul64(r2, a1, b1) + r2 = addMul64(r2, a2, b0) + r2 = addMul64(r2, a3_19, b4) + r2 = addMul64(r2, a4_19, b3) + + // r3 = a0×b3 + a1×b2 + a2×b1 + a3×b0 + 19×a4×b4 + r3 := mul64(a0, b3) + r3 = addMul64(r3, a1, b2) + r3 = addMul64(r3, a2, b1) + r3 = addMul64(r3, a3, b0) + r3 = addMul64(r3, a4_19, b4) + + // r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0 + r4 := mul64(a0, b4) + r4 = addMul64(r4, a1, b3) + r4 = addMul64(r4, a2, b2) + r4 = addMul64(r4, a3, b1) + r4 = addMul64(r4, a4, b0) + + // After the multiplication, we need to reduce (carry) the five coefficients + // to obtain a result with limbs that are at most slightly larger than 2⁵¹, + // to respect the Element invariant. + // + // Overall, the reduction works the same as carryPropagate, except with + // wider inputs: we take the carry for each coefficient by shifting it right + // by 51, and add it to the limb above it. The top carry is multiplied by 19 + // according to the reduction identity and added to the lowest limb. + // + // The largest coefficient (r0) will be at most 111 bits, which guarantees + // that all carries are at most 111 - 51 = 60 bits, which fits in a uint64. + // + // r0 = a0×b0 + 19×(a1×b4 + a2×b3 + a3×b2 + a4×b1) + // r0 < 2⁵²×2⁵² + 19×(2⁵²×2⁵² + 2⁵²×2⁵² + 2⁵²×2⁵² + 2⁵²×2⁵²) + // r0 < (1 + 19 × 4) × 2⁵² × 2⁵² + // r0 < 2⁷ × 2⁵² × 2⁵² + // r0 < 2¹¹¹ + // + // Moreover, the top coefficient (r4) is at most 107 bits, so c4 is at most + // 56 bits, and c4 * 19 is at most 61 bits, which again fits in a uint64 and + // allows us to easily apply the reduction identity. + // + // r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0 + // r4 < 5 × 2⁵² × 2⁵² + // r4 < 2¹⁰⁷ + // + + c0 := shiftRightBy51(r0) + c1 := shiftRightBy51(r1) + c2 := shiftRightBy51(r2) + c3 := shiftRightBy51(r3) + c4 := shiftRightBy51(r4) + + rr0 := r0.lo&maskLow51Bits + c4*19 + rr1 := r1.lo&maskLow51Bits + c0 + rr2 := r2.lo&maskLow51Bits + c1 + rr3 := r3.lo&maskLow51Bits + c2 + rr4 := r4.lo&maskLow51Bits + c3 + + // Now all coefficients fit into 64-bit registers but are still too large to + // be passed around as an Element. We therefore do one last carry chain, + // where the carries will be small enough to fit in the wiggle room above 2⁵¹. + *v = Element{rr0, rr1, rr2, rr3, rr4} + v.carryPropagate() +} + +func feSquareGeneric(v, a *Element) { + l0 := a.l0 + l1 := a.l1 + l2 := a.l2 + l3 := a.l3 + l4 := a.l4 + + // Squaring works precisely like multiplication above, but thanks to its + // symmetry we get to group a few terms together. + // + // l4 l3 l2 l1 l0 x + // l4 l3 l2 l1 l0 = + // ------------------------ + // l4l0 l3l0 l2l0 l1l0 l0l0 + + // l4l1 l3l1 l2l1 l1l1 l0l1 + + // l4l2 l3l2 l2l2 l1l2 l0l2 + + // l4l3 l3l3 l2l3 l1l3 l0l3 + + // l4l4 l3l4 l2l4 l1l4 l0l4 = + // ---------------------------------------------- + // r8 r7 r6 r5 r4 r3 r2 r1 r0 + // + // l4l0 l3l0 l2l0 l1l0 l0l0 + + // l3l1 l2l1 l1l1 l0l1 19×l4l1 + + // l2l2 l1l2 l0l2 19×l4l2 19×l3l2 + + // l1l3 l0l3 19×l4l3 19×l3l3 19×l2l3 + + // l0l4 19×l4l4 19×l3l4 19×l2l4 19×l1l4 = + // -------------------------------------- + // r4 r3 r2 r1 r0 + // + // With precomputed 2×, 19×, and 2×19× terms, we can compute each limb with + // only three Mul64 and four Add64, instead of five and eight. + + l0_2 := l0 * 2 + l1_2 := l1 * 2 + + l1_38 := l1 * 38 + l2_38 := l2 * 38 + l3_38 := l3 * 38 + + l3_19 := l3 * 19 + l4_19 := l4 * 19 + + // r0 = l0×l0 + 19×(l1×l4 + l2×l3 + l3×l2 + l4×l1) = l0×l0 + 19×2×(l1×l4 + l2×l3) + r0 := mul64(l0, l0) + r0 = addMul64(r0, l1_38, l4) + r0 = addMul64(r0, l2_38, l3) + + // r1 = l0×l1 + l1×l0 + 19×(l2×l4 + l3×l3 + l4×l2) = 2×l0×l1 + 19×2×l2×l4 + 19×l3×l3 + r1 := mul64(l0_2, l1) + r1 = addMul64(r1, l2_38, l4) + r1 = addMul64(r1, l3_19, l3) + + // r2 = l0×l2 + l1×l1 + l2×l0 + 19×(l3×l4 + l4×l3) = 2×l0×l2 + l1×l1 + 19×2×l3×l4 + r2 := mul64(l0_2, l2) + r2 = addMul64(r2, l1, l1) + r2 = addMul64(r2, l3_38, l4) + + // r3 = l0×l3 + l1×l2 + l2×l1 + l3×l0 + 19×l4×l4 = 2×l0×l3 + 2×l1×l2 + 19×l4×l4 + r3 := mul64(l0_2, l3) + r3 = addMul64(r3, l1_2, l2) + r3 = addMul64(r3, l4_19, l4) + + // r4 = l0×l4 + l1×l3 + l2×l2 + l3×l1 + l4×l0 = 2×l0×l4 + 2×l1×l3 + l2×l2 + r4 := mul64(l0_2, l4) + r4 = addMul64(r4, l1_2, l3) + r4 = addMul64(r4, l2, l2) + + c0 := shiftRightBy51(r0) + c1 := shiftRightBy51(r1) + c2 := shiftRightBy51(r2) + c3 := shiftRightBy51(r3) + c4 := shiftRightBy51(r4) + + rr0 := r0.lo&maskLow51Bits + c4*19 + rr1 := r1.lo&maskLow51Bits + c0 + rr2 := r2.lo&maskLow51Bits + c1 + rr3 := r3.lo&maskLow51Bits + c2 + rr4 := r4.lo&maskLow51Bits + c3 + + *v = Element{rr0, rr1, rr2, rr3, rr4} + v.carryPropagate() +} + +// carryPropagateGeneric brings the limbs below 52 bits by applying the reduction +// identity (a * 2²⁵⁵ + b = a * 19 + b) to the l4 carry. +func (v *Element) carryPropagateGeneric() *Element { + c0 := v.l0 >> 51 + c1 := v.l1 >> 51 + c2 := v.l2 >> 51 + c3 := v.l3 >> 51 + c4 := v.l4 >> 51 + + // c4 is at most 64 - 51 = 13 bits, so c4*19 is at most 18 bits, and + // the final l0 will be at most 52 bits. Similarly for the rest. + v.l0 = v.l0&maskLow51Bits + c4*19 + v.l1 = v.l1&maskLow51Bits + c0 + v.l2 = v.l2&maskLow51Bits + c1 + v.l3 = v.l3&maskLow51Bits + c2 + v.l4 = v.l4&maskLow51Bits + c3 + + return v +} diff --git a/vendor/filippo.io/edwards25519/scalar.go b/vendor/filippo.io/edwards25519/scalar.go new file mode 100644 index 0000000..3fd1653 --- /dev/null +++ b/vendor/filippo.io/edwards25519/scalar.go @@ -0,0 +1,343 @@ +// Copyright (c) 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package edwards25519 + +import ( + "encoding/binary" + "errors" +) + +// A Scalar is an integer modulo +// +// l = 2^252 + 27742317777372353535851937790883648493 +// +// which is the prime order of the edwards25519 group. +// +// This type works similarly to math/big.Int, and all arguments and +// receivers are allowed to alias. +// +// The zero value is a valid zero element. +type Scalar struct { + // s is the scalar in the Montgomery domain, in the format of the + // fiat-crypto implementation. + s fiatScalarMontgomeryDomainFieldElement +} + +// The field implementation in scalar_fiat.go is generated by the fiat-crypto +// project (https://github.com/mit-plv/fiat-crypto) at version v0.0.9 (23d2dbc) +// from a formally verified model. +// +// fiat-crypto code comes under the following license. +// +// Copyright (c) 2015-2020 The fiat-crypto Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// 1. Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// THIS SOFTWARE IS PROVIDED BY the fiat-crypto authors "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, +// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Berkeley Software Design, +// Inc. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// + +// NewScalar returns a new zero Scalar. +func NewScalar() *Scalar { + return &Scalar{} +} + +// MultiplyAdd sets s = x * y + z mod l, and returns s. It is equivalent to +// using Multiply and then Add. +func (s *Scalar) MultiplyAdd(x, y, z *Scalar) *Scalar { + // Make a copy of z in case it aliases s. + zCopy := new(Scalar).Set(z) + return s.Multiply(x, y).Add(s, zCopy) +} + +// Add sets s = x + y mod l, and returns s. +func (s *Scalar) Add(x, y *Scalar) *Scalar { + // s = 1 * x + y mod l + fiatScalarAdd(&s.s, &x.s, &y.s) + return s +} + +// Subtract sets s = x - y mod l, and returns s. +func (s *Scalar) Subtract(x, y *Scalar) *Scalar { + // s = -1 * y + x mod l + fiatScalarSub(&s.s, &x.s, &y.s) + return s +} + +// Negate sets s = -x mod l, and returns s. +func (s *Scalar) Negate(x *Scalar) *Scalar { + // s = -1 * x + 0 mod l + fiatScalarOpp(&s.s, &x.s) + return s +} + +// Multiply sets s = x * y mod l, and returns s. +func (s *Scalar) Multiply(x, y *Scalar) *Scalar { + // s = x * y + 0 mod l + fiatScalarMul(&s.s, &x.s, &y.s) + return s +} + +// Set sets s = x, and returns s. +func (s *Scalar) Set(x *Scalar) *Scalar { + *s = *x + return s +} + +// SetUniformBytes sets s = x mod l, where x is a 64-byte little-endian integer. +// If x is not of the right length, SetUniformBytes returns nil and an error, +// and the receiver is unchanged. +// +// SetUniformBytes can be used to set s to a uniformly distributed value given +// 64 uniformly distributed random bytes. +func (s *Scalar) SetUniformBytes(x []byte) (*Scalar, error) { + if len(x) != 64 { + return nil, errors.New("edwards25519: invalid SetUniformBytes input length") + } + + // We have a value x of 512 bits, but our fiatScalarFromBytes function + // expects an input lower than l, which is a little over 252 bits. + // + // Instead of writing a reduction function that operates on wider inputs, we + // can interpret x as the sum of three shorter values a, b, and c. + // + // x = a + b * 2^168 + c * 2^336 mod l + // + // We then precompute 2^168 and 2^336 modulo l, and perform the reduction + // with two multiplications and two additions. + + s.setShortBytes(x[:21]) + t := new(Scalar).setShortBytes(x[21:42]) + s.Add(s, t.Multiply(t, scalarTwo168)) + t.setShortBytes(x[42:]) + s.Add(s, t.Multiply(t, scalarTwo336)) + + return s, nil +} + +// scalarTwo168 and scalarTwo336 are 2^168 and 2^336 modulo l, encoded as a +// fiatScalarMontgomeryDomainFieldElement, which is a little-endian 4-limb value +// in the 2^256 Montgomery domain. +var scalarTwo168 = &Scalar{s: [4]uint64{0x5b8ab432eac74798, 0x38afddd6de59d5d7, + 0xa2c131b399411b7c, 0x6329a7ed9ce5a30}} +var scalarTwo336 = &Scalar{s: [4]uint64{0xbd3d108e2b35ecc5, 0x5c3a3718bdf9c90b, + 0x63aa97a331b4f2ee, 0x3d217f5be65cb5c}} + +// setShortBytes sets s = x mod l, where x is a little-endian integer shorter +// than 32 bytes. +func (s *Scalar) setShortBytes(x []byte) *Scalar { + if len(x) >= 32 { + panic("edwards25519: internal error: setShortBytes called with a long string") + } + var buf [32]byte + copy(buf[:], x) + fiatScalarFromBytes((*[4]uint64)(&s.s), &buf) + fiatScalarToMontgomery(&s.s, (*fiatScalarNonMontgomeryDomainFieldElement)(&s.s)) + return s +} + +// SetCanonicalBytes sets s = x, where x is a 32-byte little-endian encoding of +// s, and returns s. If x is not a canonical encoding of s, SetCanonicalBytes +// returns nil and an error, and the receiver is unchanged. +func (s *Scalar) SetCanonicalBytes(x []byte) (*Scalar, error) { + if len(x) != 32 { + return nil, errors.New("invalid scalar length") + } + if !isReduced(x) { + return nil, errors.New("invalid scalar encoding") + } + + fiatScalarFromBytes((*[4]uint64)(&s.s), (*[32]byte)(x)) + fiatScalarToMontgomery(&s.s, (*fiatScalarNonMontgomeryDomainFieldElement)(&s.s)) + + return s, nil +} + +// scalarMinusOneBytes is l - 1 in little endian. +var scalarMinusOneBytes = [32]byte{236, 211, 245, 92, 26, 99, 18, 88, 214, 156, 247, 162, 222, 249, 222, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16} + +// isReduced returns whether the given scalar in 32-byte little endian encoded +// form is reduced modulo l. +func isReduced(s []byte) bool { + if len(s) != 32 { + return false + } + + for i := len(s) - 1; i >= 0; i-- { + switch { + case s[i] > scalarMinusOneBytes[i]: + return false + case s[i] < scalarMinusOneBytes[i]: + return true + } + } + return true +} + +// SetBytesWithClamping applies the buffer pruning described in RFC 8032, +// Section 5.1.5 (also known as clamping) and sets s to the result. The input +// must be 32 bytes, and it is not modified. If x is not of the right length, +// SetBytesWithClamping returns nil and an error, and the receiver is unchanged. +// +// Note that since Scalar values are always reduced modulo the prime order of +// the curve, the resulting value will not preserve any of the cofactor-clearing +// properties that clamping is meant to provide. It will however work as +// expected as long as it is applied to points on the prime order subgroup, like +// in Ed25519. In fact, it is lost to history why RFC 8032 adopted the +// irrelevant RFC 7748 clamping, but it is now required for compatibility. +func (s *Scalar) SetBytesWithClamping(x []byte) (*Scalar, error) { + // The description above omits the purpose of the high bits of the clamping + // for brevity, but those are also lost to reductions, and are also + // irrelevant to edwards25519 as they protect against a specific + // implementation bug that was once observed in a generic Montgomery ladder. + if len(x) != 32 { + return nil, errors.New("edwards25519: invalid SetBytesWithClamping input length") + } + + // We need to use the wide reduction from SetUniformBytes, since clamping + // sets the 2^254 bit, making the value higher than the order. + var wideBytes [64]byte + copy(wideBytes[:], x[:]) + wideBytes[0] &= 248 + wideBytes[31] &= 63 + wideBytes[31] |= 64 + return s.SetUniformBytes(wideBytes[:]) +} + +// Bytes returns the canonical 32-byte little-endian encoding of s. +func (s *Scalar) Bytes() []byte { + // This function is outlined to make the allocations inline in the caller + // rather than happen on the heap. + var encoded [32]byte + return s.bytes(&encoded) +} + +func (s *Scalar) bytes(out *[32]byte) []byte { + var ss fiatScalarNonMontgomeryDomainFieldElement + fiatScalarFromMontgomery(&ss, &s.s) + fiatScalarToBytes(out, (*[4]uint64)(&ss)) + return out[:] +} + +// Equal returns 1 if s and t are equal, and 0 otherwise. +func (s *Scalar) Equal(t *Scalar) int { + var diff fiatScalarMontgomeryDomainFieldElement + fiatScalarSub(&diff, &s.s, &t.s) + var nonzero uint64 + fiatScalarNonzero(&nonzero, (*[4]uint64)(&diff)) + nonzero |= nonzero >> 32 + nonzero |= nonzero >> 16 + nonzero |= nonzero >> 8 + nonzero |= nonzero >> 4 + nonzero |= nonzero >> 2 + nonzero |= nonzero >> 1 + return int(^nonzero) & 1 +} + +// nonAdjacentForm computes a width-w non-adjacent form for this scalar. +// +// w must be between 2 and 8, or nonAdjacentForm will panic. +func (s *Scalar) nonAdjacentForm(w uint) [256]int8 { + // This implementation is adapted from the one + // in curve25519-dalek and is documented there: + // https://github.com/dalek-cryptography/curve25519-dalek/blob/f630041af28e9a405255f98a8a93adca18e4315b/src/scalar.rs#L800-L871 + b := s.Bytes() + if b[31] > 127 { + panic("scalar has high bit set illegally") + } + if w < 2 { + panic("w must be at least 2 by the definition of NAF") + } else if w > 8 { + panic("NAF digits must fit in int8") + } + + var naf [256]int8 + var digits [5]uint64 + + for i := 0; i < 4; i++ { + digits[i] = binary.LittleEndian.Uint64(b[i*8:]) + } + + width := uint64(1 << w) + windowMask := uint64(width - 1) + + pos := uint(0) + carry := uint64(0) + for pos < 256 { + indexU64 := pos / 64 + indexBit := pos % 64 + var bitBuf uint64 + if indexBit < 64-w { + // This window's bits are contained in a single u64 + bitBuf = digits[indexU64] >> indexBit + } else { + // Combine the current 64 bits with bits from the next 64 + bitBuf = (digits[indexU64] >> indexBit) | (digits[1+indexU64] << (64 - indexBit)) + } + + // Add carry into the current window + window := carry + (bitBuf & windowMask) + + if window&1 == 0 { + // If the window value is even, preserve the carry and continue. + // Why is the carry preserved? + // If carry == 0 and window & 1 == 0, + // then the next carry should be 0 + // If carry == 1 and window & 1 == 0, + // then bit_buf & 1 == 1 so the next carry should be 1 + pos += 1 + continue + } + + if window < width/2 { + carry = 0 + naf[pos] = int8(window) + } else { + carry = 1 + naf[pos] = int8(window) - int8(width) + } + + pos += w + } + return naf +} + +func (s *Scalar) signedRadix16() [64]int8 { + b := s.Bytes() + if b[31] > 127 { + panic("scalar has high bit set illegally") + } + + var digits [64]int8 + + // Compute unsigned radix-16 digits: + for i := 0; i < 32; i++ { + digits[2*i] = int8(b[i] & 15) + digits[2*i+1] = int8((b[i] >> 4) & 15) + } + + // Recenter coefficients: + for i := 0; i < 63; i++ { + carry := (digits[i] + 8) >> 4 + digits[i] -= carry << 4 + digits[i+1] += carry + } + + return digits +} diff --git a/vendor/filippo.io/edwards25519/scalar_fiat.go b/vendor/filippo.io/edwards25519/scalar_fiat.go new file mode 100644 index 0000000..2e5782b --- /dev/null +++ b/vendor/filippo.io/edwards25519/scalar_fiat.go @@ -0,0 +1,1147 @@ +// Code generated by Fiat Cryptography. DO NOT EDIT. +// +// Autogenerated: word_by_word_montgomery --lang Go --cmovznz-by-mul --relax-primitive-carry-to-bitwidth 32,64 --public-function-case camelCase --public-type-case camelCase --private-function-case camelCase --private-type-case camelCase --doc-text-before-function-name '' --doc-newline-before-package-declaration --doc-prepend-header 'Code generated by Fiat Cryptography. DO NOT EDIT.' --package-name edwards25519 Scalar 64 '2^252 + 27742317777372353535851937790883648493' mul add sub opp nonzero from_montgomery to_montgomery to_bytes from_bytes +// +// curve description: Scalar +// +// machine_wordsize = 64 (from "64") +// +// requested operations: mul, add, sub, opp, nonzero, from_montgomery, to_montgomery, to_bytes, from_bytes +// +// m = 0x1000000000000000000000000000000014def9dea2f79cd65812631a5cf5d3ed (from "2^252 + 27742317777372353535851937790883648493") +// +// +// +// NOTE: In addition to the bounds specified above each function, all +// +// functions synthesized for this Montgomery arithmetic require the +// +// input to be strictly less than the prime modulus (m), and also +// +// require the input to be in the unique saturated representation. +// +// All functions also ensure that these two properties are true of +// +// return values. +// +// +// +// Computed values: +// +// eval z = z[0] + (z[1] << 64) + (z[2] << 128) + (z[3] << 192) +// +// bytes_eval z = z[0] + (z[1] << 8) + (z[2] << 16) + (z[3] << 24) + (z[4] << 32) + (z[5] << 40) + (z[6] << 48) + (z[7] << 56) + (z[8] << 64) + (z[9] << 72) + (z[10] << 80) + (z[11] << 88) + (z[12] << 96) + (z[13] << 104) + (z[14] << 112) + (z[15] << 120) + (z[16] << 128) + (z[17] << 136) + (z[18] << 144) + (z[19] << 152) + (z[20] << 160) + (z[21] << 168) + (z[22] << 176) + (z[23] << 184) + (z[24] << 192) + (z[25] << 200) + (z[26] << 208) + (z[27] << 216) + (z[28] << 224) + (z[29] << 232) + (z[30] << 240) + (z[31] << 248) +// +// twos_complement_eval z = let x1 := z[0] + (z[1] << 64) + (z[2] << 128) + (z[3] << 192) in +// +// if x1 & (2^256-1) < 2^255 then x1 & (2^256-1) else (x1 & (2^256-1)) - 2^256 + +package edwards25519 + +import "math/bits" + +type fiatScalarUint1 uint64 // We use uint64 instead of a more narrow type for performance reasons; see https://github.com/mit-plv/fiat-crypto/pull/1006#issuecomment-892625927 +type fiatScalarInt1 int64 // We use uint64 instead of a more narrow type for performance reasons; see https://github.com/mit-plv/fiat-crypto/pull/1006#issuecomment-892625927 + +// The type fiatScalarMontgomeryDomainFieldElement is a field element in the Montgomery domain. +// +// Bounds: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]] +type fiatScalarMontgomeryDomainFieldElement [4]uint64 + +// The type fiatScalarNonMontgomeryDomainFieldElement is a field element NOT in the Montgomery domain. +// +// Bounds: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]] +type fiatScalarNonMontgomeryDomainFieldElement [4]uint64 + +// fiatScalarCmovznzU64 is a single-word conditional move. +// +// Postconditions: +// +// out1 = (if arg1 = 0 then arg2 else arg3) +// +// Input Bounds: +// +// arg1: [0x0 ~> 0x1] +// arg2: [0x0 ~> 0xffffffffffffffff] +// arg3: [0x0 ~> 0xffffffffffffffff] +// +// Output Bounds: +// +// out1: [0x0 ~> 0xffffffffffffffff] +func fiatScalarCmovznzU64(out1 *uint64, arg1 fiatScalarUint1, arg2 uint64, arg3 uint64) { + x1 := (uint64(arg1) * 0xffffffffffffffff) + x2 := ((x1 & arg3) | ((^x1) & arg2)) + *out1 = x2 +} + +// fiatScalarMul multiplies two field elements in the Montgomery domain. +// +// Preconditions: +// +// 0 ≤ eval arg1 < m +// 0 ≤ eval arg2 < m +// +// Postconditions: +// +// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) * eval (from_montgomery arg2)) mod m +// 0 ≤ eval out1 < m +func fiatScalarMul(out1 *fiatScalarMontgomeryDomainFieldElement, arg1 *fiatScalarMontgomeryDomainFieldElement, arg2 *fiatScalarMontgomeryDomainFieldElement) { + x1 := arg1[1] + x2 := arg1[2] + x3 := arg1[3] + x4 := arg1[0] + var x5 uint64 + var x6 uint64 + x6, x5 = bits.Mul64(x4, arg2[3]) + var x7 uint64 + var x8 uint64 + x8, x7 = bits.Mul64(x4, arg2[2]) + var x9 uint64 + var x10 uint64 + x10, x9 = bits.Mul64(x4, arg2[1]) + var x11 uint64 + var x12 uint64 + x12, x11 = bits.Mul64(x4, arg2[0]) + var x13 uint64 + var x14 uint64 + x13, x14 = bits.Add64(x12, x9, uint64(0x0)) + var x15 uint64 + var x16 uint64 + x15, x16 = bits.Add64(x10, x7, uint64(fiatScalarUint1(x14))) + var x17 uint64 + var x18 uint64 + x17, x18 = bits.Add64(x8, x5, uint64(fiatScalarUint1(x16))) + x19 := (uint64(fiatScalarUint1(x18)) + x6) + var x20 uint64 + _, x20 = bits.Mul64(x11, 0xd2b51da312547e1b) + var x22 uint64 + var x23 uint64 + x23, x22 = bits.Mul64(x20, 0x1000000000000000) + var x24 uint64 + var x25 uint64 + x25, x24 = bits.Mul64(x20, 0x14def9dea2f79cd6) + var x26 uint64 + var x27 uint64 + x27, x26 = bits.Mul64(x20, 0x5812631a5cf5d3ed) + var x28 uint64 + var x29 uint64 + x28, x29 = bits.Add64(x27, x24, uint64(0x0)) + x30 := (uint64(fiatScalarUint1(x29)) + x25) + var x32 uint64 + _, x32 = bits.Add64(x11, x26, uint64(0x0)) + var x33 uint64 + var x34 uint64 + x33, x34 = bits.Add64(x13, x28, uint64(fiatScalarUint1(x32))) + var x35 uint64 + var x36 uint64 + x35, x36 = bits.Add64(x15, x30, uint64(fiatScalarUint1(x34))) + var x37 uint64 + var x38 uint64 + x37, x38 = bits.Add64(x17, x22, uint64(fiatScalarUint1(x36))) + var x39 uint64 + var x40 uint64 + x39, x40 = bits.Add64(x19, x23, uint64(fiatScalarUint1(x38))) + var x41 uint64 + var x42 uint64 + x42, x41 = bits.Mul64(x1, arg2[3]) + var x43 uint64 + var x44 uint64 + x44, x43 = bits.Mul64(x1, arg2[2]) + var x45 uint64 + var x46 uint64 + x46, x45 = bits.Mul64(x1, arg2[1]) + var x47 uint64 + var x48 uint64 + x48, x47 = bits.Mul64(x1, arg2[0]) + var x49 uint64 + var x50 uint64 + x49, x50 = bits.Add64(x48, x45, uint64(0x0)) + var x51 uint64 + var x52 uint64 + x51, x52 = bits.Add64(x46, x43, uint64(fiatScalarUint1(x50))) + var x53 uint64 + var x54 uint64 + x53, x54 = bits.Add64(x44, x41, uint64(fiatScalarUint1(x52))) + x55 := (uint64(fiatScalarUint1(x54)) + x42) + var x56 uint64 + var x57 uint64 + x56, x57 = bits.Add64(x33, x47, uint64(0x0)) + var x58 uint64 + var x59 uint64 + x58, x59 = bits.Add64(x35, x49, uint64(fiatScalarUint1(x57))) + var x60 uint64 + var x61 uint64 + x60, x61 = bits.Add64(x37, x51, uint64(fiatScalarUint1(x59))) + var x62 uint64 + var x63 uint64 + x62, x63 = bits.Add64(x39, x53, uint64(fiatScalarUint1(x61))) + var x64 uint64 + var x65 uint64 + x64, x65 = bits.Add64(uint64(fiatScalarUint1(x40)), x55, uint64(fiatScalarUint1(x63))) + var x66 uint64 + _, x66 = bits.Mul64(x56, 0xd2b51da312547e1b) + var x68 uint64 + var x69 uint64 + x69, x68 = bits.Mul64(x66, 0x1000000000000000) + var x70 uint64 + var x71 uint64 + x71, x70 = bits.Mul64(x66, 0x14def9dea2f79cd6) + var x72 uint64 + var x73 uint64 + x73, x72 = bits.Mul64(x66, 0x5812631a5cf5d3ed) + var x74 uint64 + var x75 uint64 + x74, x75 = bits.Add64(x73, x70, uint64(0x0)) + x76 := (uint64(fiatScalarUint1(x75)) + x71) + var x78 uint64 + _, x78 = bits.Add64(x56, x72, uint64(0x0)) + var x79 uint64 + var x80 uint64 + x79, x80 = bits.Add64(x58, x74, uint64(fiatScalarUint1(x78))) + var x81 uint64 + var x82 uint64 + x81, x82 = bits.Add64(x60, x76, uint64(fiatScalarUint1(x80))) + var x83 uint64 + var x84 uint64 + x83, x84 = bits.Add64(x62, x68, uint64(fiatScalarUint1(x82))) + var x85 uint64 + var x86 uint64 + x85, x86 = bits.Add64(x64, x69, uint64(fiatScalarUint1(x84))) + x87 := (uint64(fiatScalarUint1(x86)) + uint64(fiatScalarUint1(x65))) + var x88 uint64 + var x89 uint64 + x89, x88 = bits.Mul64(x2, arg2[3]) + var x90 uint64 + var x91 uint64 + x91, x90 = bits.Mul64(x2, arg2[2]) + var x92 uint64 + var x93 uint64 + x93, x92 = bits.Mul64(x2, arg2[1]) + var x94 uint64 + var x95 uint64 + x95, x94 = bits.Mul64(x2, arg2[0]) + var x96 uint64 + var x97 uint64 + x96, x97 = bits.Add64(x95, x92, uint64(0x0)) + var x98 uint64 + var x99 uint64 + x98, x99 = bits.Add64(x93, x90, uint64(fiatScalarUint1(x97))) + var x100 uint64 + var x101 uint64 + x100, x101 = bits.Add64(x91, x88, uint64(fiatScalarUint1(x99))) + x102 := (uint64(fiatScalarUint1(x101)) + x89) + var x103 uint64 + var x104 uint64 + x103, x104 = bits.Add64(x79, x94, uint64(0x0)) + var x105 uint64 + var x106 uint64 + x105, x106 = bits.Add64(x81, x96, uint64(fiatScalarUint1(x104))) + var x107 uint64 + var x108 uint64 + x107, x108 = bits.Add64(x83, x98, uint64(fiatScalarUint1(x106))) + var x109 uint64 + var x110 uint64 + x109, x110 = bits.Add64(x85, x100, uint64(fiatScalarUint1(x108))) + var x111 uint64 + var x112 uint64 + x111, x112 = bits.Add64(x87, x102, uint64(fiatScalarUint1(x110))) + var x113 uint64 + _, x113 = bits.Mul64(x103, 0xd2b51da312547e1b) + var x115 uint64 + var x116 uint64 + x116, x115 = bits.Mul64(x113, 0x1000000000000000) + var x117 uint64 + var x118 uint64 + x118, x117 = bits.Mul64(x113, 0x14def9dea2f79cd6) + var x119 uint64 + var x120 uint64 + x120, x119 = bits.Mul64(x113, 0x5812631a5cf5d3ed) + var x121 uint64 + var x122 uint64 + x121, x122 = bits.Add64(x120, x117, uint64(0x0)) + x123 := (uint64(fiatScalarUint1(x122)) + x118) + var x125 uint64 + _, x125 = bits.Add64(x103, x119, uint64(0x0)) + var x126 uint64 + var x127 uint64 + x126, x127 = bits.Add64(x105, x121, uint64(fiatScalarUint1(x125))) + var x128 uint64 + var x129 uint64 + x128, x129 = bits.Add64(x107, x123, uint64(fiatScalarUint1(x127))) + var x130 uint64 + var x131 uint64 + x130, x131 = bits.Add64(x109, x115, uint64(fiatScalarUint1(x129))) + var x132 uint64 + var x133 uint64 + x132, x133 = bits.Add64(x111, x116, uint64(fiatScalarUint1(x131))) + x134 := (uint64(fiatScalarUint1(x133)) + uint64(fiatScalarUint1(x112))) + var x135 uint64 + var x136 uint64 + x136, x135 = bits.Mul64(x3, arg2[3]) + var x137 uint64 + var x138 uint64 + x138, x137 = bits.Mul64(x3, arg2[2]) + var x139 uint64 + var x140 uint64 + x140, x139 = bits.Mul64(x3, arg2[1]) + var x141 uint64 + var x142 uint64 + x142, x141 = bits.Mul64(x3, arg2[0]) + var x143 uint64 + var x144 uint64 + x143, x144 = bits.Add64(x142, x139, uint64(0x0)) + var x145 uint64 + var x146 uint64 + x145, x146 = bits.Add64(x140, x137, uint64(fiatScalarUint1(x144))) + var x147 uint64 + var x148 uint64 + x147, x148 = bits.Add64(x138, x135, uint64(fiatScalarUint1(x146))) + x149 := (uint64(fiatScalarUint1(x148)) + x136) + var x150 uint64 + var x151 uint64 + x150, x151 = bits.Add64(x126, x141, uint64(0x0)) + var x152 uint64 + var x153 uint64 + x152, x153 = bits.Add64(x128, x143, uint64(fiatScalarUint1(x151))) + var x154 uint64 + var x155 uint64 + x154, x155 = bits.Add64(x130, x145, uint64(fiatScalarUint1(x153))) + var x156 uint64 + var x157 uint64 + x156, x157 = bits.Add64(x132, x147, uint64(fiatScalarUint1(x155))) + var x158 uint64 + var x159 uint64 + x158, x159 = bits.Add64(x134, x149, uint64(fiatScalarUint1(x157))) + var x160 uint64 + _, x160 = bits.Mul64(x150, 0xd2b51da312547e1b) + var x162 uint64 + var x163 uint64 + x163, x162 = bits.Mul64(x160, 0x1000000000000000) + var x164 uint64 + var x165 uint64 + x165, x164 = bits.Mul64(x160, 0x14def9dea2f79cd6) + var x166 uint64 + var x167 uint64 + x167, x166 = bits.Mul64(x160, 0x5812631a5cf5d3ed) + var x168 uint64 + var x169 uint64 + x168, x169 = bits.Add64(x167, x164, uint64(0x0)) + x170 := (uint64(fiatScalarUint1(x169)) + x165) + var x172 uint64 + _, x172 = bits.Add64(x150, x166, uint64(0x0)) + var x173 uint64 + var x174 uint64 + x173, x174 = bits.Add64(x152, x168, uint64(fiatScalarUint1(x172))) + var x175 uint64 + var x176 uint64 + x175, x176 = bits.Add64(x154, x170, uint64(fiatScalarUint1(x174))) + var x177 uint64 + var x178 uint64 + x177, x178 = bits.Add64(x156, x162, uint64(fiatScalarUint1(x176))) + var x179 uint64 + var x180 uint64 + x179, x180 = bits.Add64(x158, x163, uint64(fiatScalarUint1(x178))) + x181 := (uint64(fiatScalarUint1(x180)) + uint64(fiatScalarUint1(x159))) + var x182 uint64 + var x183 uint64 + x182, x183 = bits.Sub64(x173, 0x5812631a5cf5d3ed, uint64(0x0)) + var x184 uint64 + var x185 uint64 + x184, x185 = bits.Sub64(x175, 0x14def9dea2f79cd6, uint64(fiatScalarUint1(x183))) + var x186 uint64 + var x187 uint64 + x186, x187 = bits.Sub64(x177, uint64(0x0), uint64(fiatScalarUint1(x185))) + var x188 uint64 + var x189 uint64 + x188, x189 = bits.Sub64(x179, 0x1000000000000000, uint64(fiatScalarUint1(x187))) + var x191 uint64 + _, x191 = bits.Sub64(x181, uint64(0x0), uint64(fiatScalarUint1(x189))) + var x192 uint64 + fiatScalarCmovznzU64(&x192, fiatScalarUint1(x191), x182, x173) + var x193 uint64 + fiatScalarCmovznzU64(&x193, fiatScalarUint1(x191), x184, x175) + var x194 uint64 + fiatScalarCmovznzU64(&x194, fiatScalarUint1(x191), x186, x177) + var x195 uint64 + fiatScalarCmovznzU64(&x195, fiatScalarUint1(x191), x188, x179) + out1[0] = x192 + out1[1] = x193 + out1[2] = x194 + out1[3] = x195 +} + +// fiatScalarAdd adds two field elements in the Montgomery domain. +// +// Preconditions: +// +// 0 ≤ eval arg1 < m +// 0 ≤ eval arg2 < m +// +// Postconditions: +// +// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) + eval (from_montgomery arg2)) mod m +// 0 ≤ eval out1 < m +func fiatScalarAdd(out1 *fiatScalarMontgomeryDomainFieldElement, arg1 *fiatScalarMontgomeryDomainFieldElement, arg2 *fiatScalarMontgomeryDomainFieldElement) { + var x1 uint64 + var x2 uint64 + x1, x2 = bits.Add64(arg1[0], arg2[0], uint64(0x0)) + var x3 uint64 + var x4 uint64 + x3, x4 = bits.Add64(arg1[1], arg2[1], uint64(fiatScalarUint1(x2))) + var x5 uint64 + var x6 uint64 + x5, x6 = bits.Add64(arg1[2], arg2[2], uint64(fiatScalarUint1(x4))) + var x7 uint64 + var x8 uint64 + x7, x8 = bits.Add64(arg1[3], arg2[3], uint64(fiatScalarUint1(x6))) + var x9 uint64 + var x10 uint64 + x9, x10 = bits.Sub64(x1, 0x5812631a5cf5d3ed, uint64(0x0)) + var x11 uint64 + var x12 uint64 + x11, x12 = bits.Sub64(x3, 0x14def9dea2f79cd6, uint64(fiatScalarUint1(x10))) + var x13 uint64 + var x14 uint64 + x13, x14 = bits.Sub64(x5, uint64(0x0), uint64(fiatScalarUint1(x12))) + var x15 uint64 + var x16 uint64 + x15, x16 = bits.Sub64(x7, 0x1000000000000000, uint64(fiatScalarUint1(x14))) + var x18 uint64 + _, x18 = bits.Sub64(uint64(fiatScalarUint1(x8)), uint64(0x0), uint64(fiatScalarUint1(x16))) + var x19 uint64 + fiatScalarCmovznzU64(&x19, fiatScalarUint1(x18), x9, x1) + var x20 uint64 + fiatScalarCmovznzU64(&x20, fiatScalarUint1(x18), x11, x3) + var x21 uint64 + fiatScalarCmovznzU64(&x21, fiatScalarUint1(x18), x13, x5) + var x22 uint64 + fiatScalarCmovznzU64(&x22, fiatScalarUint1(x18), x15, x7) + out1[0] = x19 + out1[1] = x20 + out1[2] = x21 + out1[3] = x22 +} + +// fiatScalarSub subtracts two field elements in the Montgomery domain. +// +// Preconditions: +// +// 0 ≤ eval arg1 < m +// 0 ≤ eval arg2 < m +// +// Postconditions: +// +// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) - eval (from_montgomery arg2)) mod m +// 0 ≤ eval out1 < m +func fiatScalarSub(out1 *fiatScalarMontgomeryDomainFieldElement, arg1 *fiatScalarMontgomeryDomainFieldElement, arg2 *fiatScalarMontgomeryDomainFieldElement) { + var x1 uint64 + var x2 uint64 + x1, x2 = bits.Sub64(arg1[0], arg2[0], uint64(0x0)) + var x3 uint64 + var x4 uint64 + x3, x4 = bits.Sub64(arg1[1], arg2[1], uint64(fiatScalarUint1(x2))) + var x5 uint64 + var x6 uint64 + x5, x6 = bits.Sub64(arg1[2], arg2[2], uint64(fiatScalarUint1(x4))) + var x7 uint64 + var x8 uint64 + x7, x8 = bits.Sub64(arg1[3], arg2[3], uint64(fiatScalarUint1(x6))) + var x9 uint64 + fiatScalarCmovznzU64(&x9, fiatScalarUint1(x8), uint64(0x0), 0xffffffffffffffff) + var x10 uint64 + var x11 uint64 + x10, x11 = bits.Add64(x1, (x9 & 0x5812631a5cf5d3ed), uint64(0x0)) + var x12 uint64 + var x13 uint64 + x12, x13 = bits.Add64(x3, (x9 & 0x14def9dea2f79cd6), uint64(fiatScalarUint1(x11))) + var x14 uint64 + var x15 uint64 + x14, x15 = bits.Add64(x5, uint64(0x0), uint64(fiatScalarUint1(x13))) + var x16 uint64 + x16, _ = bits.Add64(x7, (x9 & 0x1000000000000000), uint64(fiatScalarUint1(x15))) + out1[0] = x10 + out1[1] = x12 + out1[2] = x14 + out1[3] = x16 +} + +// fiatScalarOpp negates a field element in the Montgomery domain. +// +// Preconditions: +// +// 0 ≤ eval arg1 < m +// +// Postconditions: +// +// eval (from_montgomery out1) mod m = -eval (from_montgomery arg1) mod m +// 0 ≤ eval out1 < m +func fiatScalarOpp(out1 *fiatScalarMontgomeryDomainFieldElement, arg1 *fiatScalarMontgomeryDomainFieldElement) { + var x1 uint64 + var x2 uint64 + x1, x2 = bits.Sub64(uint64(0x0), arg1[0], uint64(0x0)) + var x3 uint64 + var x4 uint64 + x3, x4 = bits.Sub64(uint64(0x0), arg1[1], uint64(fiatScalarUint1(x2))) + var x5 uint64 + var x6 uint64 + x5, x6 = bits.Sub64(uint64(0x0), arg1[2], uint64(fiatScalarUint1(x4))) + var x7 uint64 + var x8 uint64 + x7, x8 = bits.Sub64(uint64(0x0), arg1[3], uint64(fiatScalarUint1(x6))) + var x9 uint64 + fiatScalarCmovznzU64(&x9, fiatScalarUint1(x8), uint64(0x0), 0xffffffffffffffff) + var x10 uint64 + var x11 uint64 + x10, x11 = bits.Add64(x1, (x9 & 0x5812631a5cf5d3ed), uint64(0x0)) + var x12 uint64 + var x13 uint64 + x12, x13 = bits.Add64(x3, (x9 & 0x14def9dea2f79cd6), uint64(fiatScalarUint1(x11))) + var x14 uint64 + var x15 uint64 + x14, x15 = bits.Add64(x5, uint64(0x0), uint64(fiatScalarUint1(x13))) + var x16 uint64 + x16, _ = bits.Add64(x7, (x9 & 0x1000000000000000), uint64(fiatScalarUint1(x15))) + out1[0] = x10 + out1[1] = x12 + out1[2] = x14 + out1[3] = x16 +} + +// fiatScalarNonzero outputs a single non-zero word if the input is non-zero and zero otherwise. +// +// Preconditions: +// +// 0 ≤ eval arg1 < m +// +// Postconditions: +// +// out1 = 0 ↔ eval (from_montgomery arg1) mod m = 0 +// +// Input Bounds: +// +// arg1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]] +// +// Output Bounds: +// +// out1: [0x0 ~> 0xffffffffffffffff] +func fiatScalarNonzero(out1 *uint64, arg1 *[4]uint64) { + x1 := (arg1[0] | (arg1[1] | (arg1[2] | arg1[3]))) + *out1 = x1 +} + +// fiatScalarFromMontgomery translates a field element out of the Montgomery domain. +// +// Preconditions: +// +// 0 ≤ eval arg1 < m +// +// Postconditions: +// +// eval out1 mod m = (eval arg1 * ((2^64)⁻¹ mod m)^4) mod m +// 0 ≤ eval out1 < m +func fiatScalarFromMontgomery(out1 *fiatScalarNonMontgomeryDomainFieldElement, arg1 *fiatScalarMontgomeryDomainFieldElement) { + x1 := arg1[0] + var x2 uint64 + _, x2 = bits.Mul64(x1, 0xd2b51da312547e1b) + var x4 uint64 + var x5 uint64 + x5, x4 = bits.Mul64(x2, 0x1000000000000000) + var x6 uint64 + var x7 uint64 + x7, x6 = bits.Mul64(x2, 0x14def9dea2f79cd6) + var x8 uint64 + var x9 uint64 + x9, x8 = bits.Mul64(x2, 0x5812631a5cf5d3ed) + var x10 uint64 + var x11 uint64 + x10, x11 = bits.Add64(x9, x6, uint64(0x0)) + var x13 uint64 + _, x13 = bits.Add64(x1, x8, uint64(0x0)) + var x14 uint64 + var x15 uint64 + x14, x15 = bits.Add64(uint64(0x0), x10, uint64(fiatScalarUint1(x13))) + var x16 uint64 + var x17 uint64 + x16, x17 = bits.Add64(x14, arg1[1], uint64(0x0)) + var x18 uint64 + _, x18 = bits.Mul64(x16, 0xd2b51da312547e1b) + var x20 uint64 + var x21 uint64 + x21, x20 = bits.Mul64(x18, 0x1000000000000000) + var x22 uint64 + var x23 uint64 + x23, x22 = bits.Mul64(x18, 0x14def9dea2f79cd6) + var x24 uint64 + var x25 uint64 + x25, x24 = bits.Mul64(x18, 0x5812631a5cf5d3ed) + var x26 uint64 + var x27 uint64 + x26, x27 = bits.Add64(x25, x22, uint64(0x0)) + var x29 uint64 + _, x29 = bits.Add64(x16, x24, uint64(0x0)) + var x30 uint64 + var x31 uint64 + x30, x31 = bits.Add64((uint64(fiatScalarUint1(x17)) + (uint64(fiatScalarUint1(x15)) + (uint64(fiatScalarUint1(x11)) + x7))), x26, uint64(fiatScalarUint1(x29))) + var x32 uint64 + var x33 uint64 + x32, x33 = bits.Add64(x4, (uint64(fiatScalarUint1(x27)) + x23), uint64(fiatScalarUint1(x31))) + var x34 uint64 + var x35 uint64 + x34, x35 = bits.Add64(x5, x20, uint64(fiatScalarUint1(x33))) + var x36 uint64 + var x37 uint64 + x36, x37 = bits.Add64(x30, arg1[2], uint64(0x0)) + var x38 uint64 + var x39 uint64 + x38, x39 = bits.Add64(x32, uint64(0x0), uint64(fiatScalarUint1(x37))) + var x40 uint64 + var x41 uint64 + x40, x41 = bits.Add64(x34, uint64(0x0), uint64(fiatScalarUint1(x39))) + var x42 uint64 + _, x42 = bits.Mul64(x36, 0xd2b51da312547e1b) + var x44 uint64 + var x45 uint64 + x45, x44 = bits.Mul64(x42, 0x1000000000000000) + var x46 uint64 + var x47 uint64 + x47, x46 = bits.Mul64(x42, 0x14def9dea2f79cd6) + var x48 uint64 + var x49 uint64 + x49, x48 = bits.Mul64(x42, 0x5812631a5cf5d3ed) + var x50 uint64 + var x51 uint64 + x50, x51 = bits.Add64(x49, x46, uint64(0x0)) + var x53 uint64 + _, x53 = bits.Add64(x36, x48, uint64(0x0)) + var x54 uint64 + var x55 uint64 + x54, x55 = bits.Add64(x38, x50, uint64(fiatScalarUint1(x53))) + var x56 uint64 + var x57 uint64 + x56, x57 = bits.Add64(x40, (uint64(fiatScalarUint1(x51)) + x47), uint64(fiatScalarUint1(x55))) + var x58 uint64 + var x59 uint64 + x58, x59 = bits.Add64((uint64(fiatScalarUint1(x41)) + (uint64(fiatScalarUint1(x35)) + x21)), x44, uint64(fiatScalarUint1(x57))) + var x60 uint64 + var x61 uint64 + x60, x61 = bits.Add64(x54, arg1[3], uint64(0x0)) + var x62 uint64 + var x63 uint64 + x62, x63 = bits.Add64(x56, uint64(0x0), uint64(fiatScalarUint1(x61))) + var x64 uint64 + var x65 uint64 + x64, x65 = bits.Add64(x58, uint64(0x0), uint64(fiatScalarUint1(x63))) + var x66 uint64 + _, x66 = bits.Mul64(x60, 0xd2b51da312547e1b) + var x68 uint64 + var x69 uint64 + x69, x68 = bits.Mul64(x66, 0x1000000000000000) + var x70 uint64 + var x71 uint64 + x71, x70 = bits.Mul64(x66, 0x14def9dea2f79cd6) + var x72 uint64 + var x73 uint64 + x73, x72 = bits.Mul64(x66, 0x5812631a5cf5d3ed) + var x74 uint64 + var x75 uint64 + x74, x75 = bits.Add64(x73, x70, uint64(0x0)) + var x77 uint64 + _, x77 = bits.Add64(x60, x72, uint64(0x0)) + var x78 uint64 + var x79 uint64 + x78, x79 = bits.Add64(x62, x74, uint64(fiatScalarUint1(x77))) + var x80 uint64 + var x81 uint64 + x80, x81 = bits.Add64(x64, (uint64(fiatScalarUint1(x75)) + x71), uint64(fiatScalarUint1(x79))) + var x82 uint64 + var x83 uint64 + x82, x83 = bits.Add64((uint64(fiatScalarUint1(x65)) + (uint64(fiatScalarUint1(x59)) + x45)), x68, uint64(fiatScalarUint1(x81))) + x84 := (uint64(fiatScalarUint1(x83)) + x69) + var x85 uint64 + var x86 uint64 + x85, x86 = bits.Sub64(x78, 0x5812631a5cf5d3ed, uint64(0x0)) + var x87 uint64 + var x88 uint64 + x87, x88 = bits.Sub64(x80, 0x14def9dea2f79cd6, uint64(fiatScalarUint1(x86))) + var x89 uint64 + var x90 uint64 + x89, x90 = bits.Sub64(x82, uint64(0x0), uint64(fiatScalarUint1(x88))) + var x91 uint64 + var x92 uint64 + x91, x92 = bits.Sub64(x84, 0x1000000000000000, uint64(fiatScalarUint1(x90))) + var x94 uint64 + _, x94 = bits.Sub64(uint64(0x0), uint64(0x0), uint64(fiatScalarUint1(x92))) + var x95 uint64 + fiatScalarCmovznzU64(&x95, fiatScalarUint1(x94), x85, x78) + var x96 uint64 + fiatScalarCmovznzU64(&x96, fiatScalarUint1(x94), x87, x80) + var x97 uint64 + fiatScalarCmovznzU64(&x97, fiatScalarUint1(x94), x89, x82) + var x98 uint64 + fiatScalarCmovznzU64(&x98, fiatScalarUint1(x94), x91, x84) + out1[0] = x95 + out1[1] = x96 + out1[2] = x97 + out1[3] = x98 +} + +// fiatScalarToMontgomery translates a field element into the Montgomery domain. +// +// Preconditions: +// +// 0 ≤ eval arg1 < m +// +// Postconditions: +// +// eval (from_montgomery out1) mod m = eval arg1 mod m +// 0 ≤ eval out1 < m +func fiatScalarToMontgomery(out1 *fiatScalarMontgomeryDomainFieldElement, arg1 *fiatScalarNonMontgomeryDomainFieldElement) { + x1 := arg1[1] + x2 := arg1[2] + x3 := arg1[3] + x4 := arg1[0] + var x5 uint64 + var x6 uint64 + x6, x5 = bits.Mul64(x4, 0x399411b7c309a3d) + var x7 uint64 + var x8 uint64 + x8, x7 = bits.Mul64(x4, 0xceec73d217f5be65) + var x9 uint64 + var x10 uint64 + x10, x9 = bits.Mul64(x4, 0xd00e1ba768859347) + var x11 uint64 + var x12 uint64 + x12, x11 = bits.Mul64(x4, 0xa40611e3449c0f01) + var x13 uint64 + var x14 uint64 + x13, x14 = bits.Add64(x12, x9, uint64(0x0)) + var x15 uint64 + var x16 uint64 + x15, x16 = bits.Add64(x10, x7, uint64(fiatScalarUint1(x14))) + var x17 uint64 + var x18 uint64 + x17, x18 = bits.Add64(x8, x5, uint64(fiatScalarUint1(x16))) + var x19 uint64 + _, x19 = bits.Mul64(x11, 0xd2b51da312547e1b) + var x21 uint64 + var x22 uint64 + x22, x21 = bits.Mul64(x19, 0x1000000000000000) + var x23 uint64 + var x24 uint64 + x24, x23 = bits.Mul64(x19, 0x14def9dea2f79cd6) + var x25 uint64 + var x26 uint64 + x26, x25 = bits.Mul64(x19, 0x5812631a5cf5d3ed) + var x27 uint64 + var x28 uint64 + x27, x28 = bits.Add64(x26, x23, uint64(0x0)) + var x30 uint64 + _, x30 = bits.Add64(x11, x25, uint64(0x0)) + var x31 uint64 + var x32 uint64 + x31, x32 = bits.Add64(x13, x27, uint64(fiatScalarUint1(x30))) + var x33 uint64 + var x34 uint64 + x33, x34 = bits.Add64(x15, (uint64(fiatScalarUint1(x28)) + x24), uint64(fiatScalarUint1(x32))) + var x35 uint64 + var x36 uint64 + x35, x36 = bits.Add64(x17, x21, uint64(fiatScalarUint1(x34))) + var x37 uint64 + var x38 uint64 + x38, x37 = bits.Mul64(x1, 0x399411b7c309a3d) + var x39 uint64 + var x40 uint64 + x40, x39 = bits.Mul64(x1, 0xceec73d217f5be65) + var x41 uint64 + var x42 uint64 + x42, x41 = bits.Mul64(x1, 0xd00e1ba768859347) + var x43 uint64 + var x44 uint64 + x44, x43 = bits.Mul64(x1, 0xa40611e3449c0f01) + var x45 uint64 + var x46 uint64 + x45, x46 = bits.Add64(x44, x41, uint64(0x0)) + var x47 uint64 + var x48 uint64 + x47, x48 = bits.Add64(x42, x39, uint64(fiatScalarUint1(x46))) + var x49 uint64 + var x50 uint64 + x49, x50 = bits.Add64(x40, x37, uint64(fiatScalarUint1(x48))) + var x51 uint64 + var x52 uint64 + x51, x52 = bits.Add64(x31, x43, uint64(0x0)) + var x53 uint64 + var x54 uint64 + x53, x54 = bits.Add64(x33, x45, uint64(fiatScalarUint1(x52))) + var x55 uint64 + var x56 uint64 + x55, x56 = bits.Add64(x35, x47, uint64(fiatScalarUint1(x54))) + var x57 uint64 + var x58 uint64 + x57, x58 = bits.Add64(((uint64(fiatScalarUint1(x36)) + (uint64(fiatScalarUint1(x18)) + x6)) + x22), x49, uint64(fiatScalarUint1(x56))) + var x59 uint64 + _, x59 = bits.Mul64(x51, 0xd2b51da312547e1b) + var x61 uint64 + var x62 uint64 + x62, x61 = bits.Mul64(x59, 0x1000000000000000) + var x63 uint64 + var x64 uint64 + x64, x63 = bits.Mul64(x59, 0x14def9dea2f79cd6) + var x65 uint64 + var x66 uint64 + x66, x65 = bits.Mul64(x59, 0x5812631a5cf5d3ed) + var x67 uint64 + var x68 uint64 + x67, x68 = bits.Add64(x66, x63, uint64(0x0)) + var x70 uint64 + _, x70 = bits.Add64(x51, x65, uint64(0x0)) + var x71 uint64 + var x72 uint64 + x71, x72 = bits.Add64(x53, x67, uint64(fiatScalarUint1(x70))) + var x73 uint64 + var x74 uint64 + x73, x74 = bits.Add64(x55, (uint64(fiatScalarUint1(x68)) + x64), uint64(fiatScalarUint1(x72))) + var x75 uint64 + var x76 uint64 + x75, x76 = bits.Add64(x57, x61, uint64(fiatScalarUint1(x74))) + var x77 uint64 + var x78 uint64 + x78, x77 = bits.Mul64(x2, 0x399411b7c309a3d) + var x79 uint64 + var x80 uint64 + x80, x79 = bits.Mul64(x2, 0xceec73d217f5be65) + var x81 uint64 + var x82 uint64 + x82, x81 = bits.Mul64(x2, 0xd00e1ba768859347) + var x83 uint64 + var x84 uint64 + x84, x83 = bits.Mul64(x2, 0xa40611e3449c0f01) + var x85 uint64 + var x86 uint64 + x85, x86 = bits.Add64(x84, x81, uint64(0x0)) + var x87 uint64 + var x88 uint64 + x87, x88 = bits.Add64(x82, x79, uint64(fiatScalarUint1(x86))) + var x89 uint64 + var x90 uint64 + x89, x90 = bits.Add64(x80, x77, uint64(fiatScalarUint1(x88))) + var x91 uint64 + var x92 uint64 + x91, x92 = bits.Add64(x71, x83, uint64(0x0)) + var x93 uint64 + var x94 uint64 + x93, x94 = bits.Add64(x73, x85, uint64(fiatScalarUint1(x92))) + var x95 uint64 + var x96 uint64 + x95, x96 = bits.Add64(x75, x87, uint64(fiatScalarUint1(x94))) + var x97 uint64 + var x98 uint64 + x97, x98 = bits.Add64(((uint64(fiatScalarUint1(x76)) + (uint64(fiatScalarUint1(x58)) + (uint64(fiatScalarUint1(x50)) + x38))) + x62), x89, uint64(fiatScalarUint1(x96))) + var x99 uint64 + _, x99 = bits.Mul64(x91, 0xd2b51da312547e1b) + var x101 uint64 + var x102 uint64 + x102, x101 = bits.Mul64(x99, 0x1000000000000000) + var x103 uint64 + var x104 uint64 + x104, x103 = bits.Mul64(x99, 0x14def9dea2f79cd6) + var x105 uint64 + var x106 uint64 + x106, x105 = bits.Mul64(x99, 0x5812631a5cf5d3ed) + var x107 uint64 + var x108 uint64 + x107, x108 = bits.Add64(x106, x103, uint64(0x0)) + var x110 uint64 + _, x110 = bits.Add64(x91, x105, uint64(0x0)) + var x111 uint64 + var x112 uint64 + x111, x112 = bits.Add64(x93, x107, uint64(fiatScalarUint1(x110))) + var x113 uint64 + var x114 uint64 + x113, x114 = bits.Add64(x95, (uint64(fiatScalarUint1(x108)) + x104), uint64(fiatScalarUint1(x112))) + var x115 uint64 + var x116 uint64 + x115, x116 = bits.Add64(x97, x101, uint64(fiatScalarUint1(x114))) + var x117 uint64 + var x118 uint64 + x118, x117 = bits.Mul64(x3, 0x399411b7c309a3d) + var x119 uint64 + var x120 uint64 + x120, x119 = bits.Mul64(x3, 0xceec73d217f5be65) + var x121 uint64 + var x122 uint64 + x122, x121 = bits.Mul64(x3, 0xd00e1ba768859347) + var x123 uint64 + var x124 uint64 + x124, x123 = bits.Mul64(x3, 0xa40611e3449c0f01) + var x125 uint64 + var x126 uint64 + x125, x126 = bits.Add64(x124, x121, uint64(0x0)) + var x127 uint64 + var x128 uint64 + x127, x128 = bits.Add64(x122, x119, uint64(fiatScalarUint1(x126))) + var x129 uint64 + var x130 uint64 + x129, x130 = bits.Add64(x120, x117, uint64(fiatScalarUint1(x128))) + var x131 uint64 + var x132 uint64 + x131, x132 = bits.Add64(x111, x123, uint64(0x0)) + var x133 uint64 + var x134 uint64 + x133, x134 = bits.Add64(x113, x125, uint64(fiatScalarUint1(x132))) + var x135 uint64 + var x136 uint64 + x135, x136 = bits.Add64(x115, x127, uint64(fiatScalarUint1(x134))) + var x137 uint64 + var x138 uint64 + x137, x138 = bits.Add64(((uint64(fiatScalarUint1(x116)) + (uint64(fiatScalarUint1(x98)) + (uint64(fiatScalarUint1(x90)) + x78))) + x102), x129, uint64(fiatScalarUint1(x136))) + var x139 uint64 + _, x139 = bits.Mul64(x131, 0xd2b51da312547e1b) + var x141 uint64 + var x142 uint64 + x142, x141 = bits.Mul64(x139, 0x1000000000000000) + var x143 uint64 + var x144 uint64 + x144, x143 = bits.Mul64(x139, 0x14def9dea2f79cd6) + var x145 uint64 + var x146 uint64 + x146, x145 = bits.Mul64(x139, 0x5812631a5cf5d3ed) + var x147 uint64 + var x148 uint64 + x147, x148 = bits.Add64(x146, x143, uint64(0x0)) + var x150 uint64 + _, x150 = bits.Add64(x131, x145, uint64(0x0)) + var x151 uint64 + var x152 uint64 + x151, x152 = bits.Add64(x133, x147, uint64(fiatScalarUint1(x150))) + var x153 uint64 + var x154 uint64 + x153, x154 = bits.Add64(x135, (uint64(fiatScalarUint1(x148)) + x144), uint64(fiatScalarUint1(x152))) + var x155 uint64 + var x156 uint64 + x155, x156 = bits.Add64(x137, x141, uint64(fiatScalarUint1(x154))) + x157 := ((uint64(fiatScalarUint1(x156)) + (uint64(fiatScalarUint1(x138)) + (uint64(fiatScalarUint1(x130)) + x118))) + x142) + var x158 uint64 + var x159 uint64 + x158, x159 = bits.Sub64(x151, 0x5812631a5cf5d3ed, uint64(0x0)) + var x160 uint64 + var x161 uint64 + x160, x161 = bits.Sub64(x153, 0x14def9dea2f79cd6, uint64(fiatScalarUint1(x159))) + var x162 uint64 + var x163 uint64 + x162, x163 = bits.Sub64(x155, uint64(0x0), uint64(fiatScalarUint1(x161))) + var x164 uint64 + var x165 uint64 + x164, x165 = bits.Sub64(x157, 0x1000000000000000, uint64(fiatScalarUint1(x163))) + var x167 uint64 + _, x167 = bits.Sub64(uint64(0x0), uint64(0x0), uint64(fiatScalarUint1(x165))) + var x168 uint64 + fiatScalarCmovznzU64(&x168, fiatScalarUint1(x167), x158, x151) + var x169 uint64 + fiatScalarCmovznzU64(&x169, fiatScalarUint1(x167), x160, x153) + var x170 uint64 + fiatScalarCmovznzU64(&x170, fiatScalarUint1(x167), x162, x155) + var x171 uint64 + fiatScalarCmovznzU64(&x171, fiatScalarUint1(x167), x164, x157) + out1[0] = x168 + out1[1] = x169 + out1[2] = x170 + out1[3] = x171 +} + +// fiatScalarToBytes serializes a field element NOT in the Montgomery domain to bytes in little-endian order. +// +// Preconditions: +// +// 0 ≤ eval arg1 < m +// +// Postconditions: +// +// out1 = map (λ x, ⌊((eval arg1 mod m) mod 2^(8 * (x + 1))) / 2^(8 * x)⌋) [0..31] +// +// Input Bounds: +// +// arg1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0x1fffffffffffffff]] +// +// Output Bounds: +// +// out1: [[0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0x1f]] +func fiatScalarToBytes(out1 *[32]uint8, arg1 *[4]uint64) { + x1 := arg1[3] + x2 := arg1[2] + x3 := arg1[1] + x4 := arg1[0] + x5 := (uint8(x4) & 0xff) + x6 := (x4 >> 8) + x7 := (uint8(x6) & 0xff) + x8 := (x6 >> 8) + x9 := (uint8(x8) & 0xff) + x10 := (x8 >> 8) + x11 := (uint8(x10) & 0xff) + x12 := (x10 >> 8) + x13 := (uint8(x12) & 0xff) + x14 := (x12 >> 8) + x15 := (uint8(x14) & 0xff) + x16 := (x14 >> 8) + x17 := (uint8(x16) & 0xff) + x18 := uint8((x16 >> 8)) + x19 := (uint8(x3) & 0xff) + x20 := (x3 >> 8) + x21 := (uint8(x20) & 0xff) + x22 := (x20 >> 8) + x23 := (uint8(x22) & 0xff) + x24 := (x22 >> 8) + x25 := (uint8(x24) & 0xff) + x26 := (x24 >> 8) + x27 := (uint8(x26) & 0xff) + x28 := (x26 >> 8) + x29 := (uint8(x28) & 0xff) + x30 := (x28 >> 8) + x31 := (uint8(x30) & 0xff) + x32 := uint8((x30 >> 8)) + x33 := (uint8(x2) & 0xff) + x34 := (x2 >> 8) + x35 := (uint8(x34) & 0xff) + x36 := (x34 >> 8) + x37 := (uint8(x36) & 0xff) + x38 := (x36 >> 8) + x39 := (uint8(x38) & 0xff) + x40 := (x38 >> 8) + x41 := (uint8(x40) & 0xff) + x42 := (x40 >> 8) + x43 := (uint8(x42) & 0xff) + x44 := (x42 >> 8) + x45 := (uint8(x44) & 0xff) + x46 := uint8((x44 >> 8)) + x47 := (uint8(x1) & 0xff) + x48 := (x1 >> 8) + x49 := (uint8(x48) & 0xff) + x50 := (x48 >> 8) + x51 := (uint8(x50) & 0xff) + x52 := (x50 >> 8) + x53 := (uint8(x52) & 0xff) + x54 := (x52 >> 8) + x55 := (uint8(x54) & 0xff) + x56 := (x54 >> 8) + x57 := (uint8(x56) & 0xff) + x58 := (x56 >> 8) + x59 := (uint8(x58) & 0xff) + x60 := uint8((x58 >> 8)) + out1[0] = x5 + out1[1] = x7 + out1[2] = x9 + out1[3] = x11 + out1[4] = x13 + out1[5] = x15 + out1[6] = x17 + out1[7] = x18 + out1[8] = x19 + out1[9] = x21 + out1[10] = x23 + out1[11] = x25 + out1[12] = x27 + out1[13] = x29 + out1[14] = x31 + out1[15] = x32 + out1[16] = x33 + out1[17] = x35 + out1[18] = x37 + out1[19] = x39 + out1[20] = x41 + out1[21] = x43 + out1[22] = x45 + out1[23] = x46 + out1[24] = x47 + out1[25] = x49 + out1[26] = x51 + out1[27] = x53 + out1[28] = x55 + out1[29] = x57 + out1[30] = x59 + out1[31] = x60 +} + +// fiatScalarFromBytes deserializes a field element NOT in the Montgomery domain from bytes in little-endian order. +// +// Preconditions: +// +// 0 ≤ bytes_eval arg1 < m +// +// Postconditions: +// +// eval out1 mod m = bytes_eval arg1 mod m +// 0 ≤ eval out1 < m +// +// Input Bounds: +// +// arg1: [[0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0x1f]] +// +// Output Bounds: +// +// out1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0x1fffffffffffffff]] +func fiatScalarFromBytes(out1 *[4]uint64, arg1 *[32]uint8) { + x1 := (uint64(arg1[31]) << 56) + x2 := (uint64(arg1[30]) << 48) + x3 := (uint64(arg1[29]) << 40) + x4 := (uint64(arg1[28]) << 32) + x5 := (uint64(arg1[27]) << 24) + x6 := (uint64(arg1[26]) << 16) + x7 := (uint64(arg1[25]) << 8) + x8 := arg1[24] + x9 := (uint64(arg1[23]) << 56) + x10 := (uint64(arg1[22]) << 48) + x11 := (uint64(arg1[21]) << 40) + x12 := (uint64(arg1[20]) << 32) + x13 := (uint64(arg1[19]) << 24) + x14 := (uint64(arg1[18]) << 16) + x15 := (uint64(arg1[17]) << 8) + x16 := arg1[16] + x17 := (uint64(arg1[15]) << 56) + x18 := (uint64(arg1[14]) << 48) + x19 := (uint64(arg1[13]) << 40) + x20 := (uint64(arg1[12]) << 32) + x21 := (uint64(arg1[11]) << 24) + x22 := (uint64(arg1[10]) << 16) + x23 := (uint64(arg1[9]) << 8) + x24 := arg1[8] + x25 := (uint64(arg1[7]) << 56) + x26 := (uint64(arg1[6]) << 48) + x27 := (uint64(arg1[5]) << 40) + x28 := (uint64(arg1[4]) << 32) + x29 := (uint64(arg1[3]) << 24) + x30 := (uint64(arg1[2]) << 16) + x31 := (uint64(arg1[1]) << 8) + x32 := arg1[0] + x33 := (x31 + uint64(x32)) + x34 := (x30 + x33) + x35 := (x29 + x34) + x36 := (x28 + x35) + x37 := (x27 + x36) + x38 := (x26 + x37) + x39 := (x25 + x38) + x40 := (x23 + uint64(x24)) + x41 := (x22 + x40) + x42 := (x21 + x41) + x43 := (x20 + x42) + x44 := (x19 + x43) + x45 := (x18 + x44) + x46 := (x17 + x45) + x47 := (x15 + uint64(x16)) + x48 := (x14 + x47) + x49 := (x13 + x48) + x50 := (x12 + x49) + x51 := (x11 + x50) + x52 := (x10 + x51) + x53 := (x9 + x52) + x54 := (x7 + uint64(x8)) + x55 := (x6 + x54) + x56 := (x5 + x55) + x57 := (x4 + x56) + x58 := (x3 + x57) + x59 := (x2 + x58) + x60 := (x1 + x59) + out1[0] = x39 + out1[1] = x46 + out1[2] = x53 + out1[3] = x60 +} diff --git a/vendor/filippo.io/edwards25519/scalarmult.go b/vendor/filippo.io/edwards25519/scalarmult.go new file mode 100644 index 0000000..f7ca3ce --- /dev/null +++ b/vendor/filippo.io/edwards25519/scalarmult.go @@ -0,0 +1,214 @@ +// Copyright (c) 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package edwards25519 + +import "sync" + +// basepointTable is a set of 32 affineLookupTables, where table i is generated +// from 256i * basepoint. It is precomputed the first time it's used. +func basepointTable() *[32]affineLookupTable { + basepointTablePrecomp.initOnce.Do(func() { + p := NewGeneratorPoint() + for i := 0; i < 32; i++ { + basepointTablePrecomp.table[i].FromP3(p) + for j := 0; j < 8; j++ { + p.Add(p, p) + } + } + }) + return &basepointTablePrecomp.table +} + +var basepointTablePrecomp struct { + table [32]affineLookupTable + initOnce sync.Once +} + +// ScalarBaseMult sets v = x * B, where B is the canonical generator, and +// returns v. +// +// The scalar multiplication is done in constant time. +func (v *Point) ScalarBaseMult(x *Scalar) *Point { + basepointTable := basepointTable() + + // Write x = sum(x_i * 16^i) so x*B = sum( B*x_i*16^i ) + // as described in the Ed25519 paper + // + // Group even and odd coefficients + // x*B = x_0*16^0*B + x_2*16^2*B + ... + x_62*16^62*B + // + x_1*16^1*B + x_3*16^3*B + ... + x_63*16^63*B + // x*B = x_0*16^0*B + x_2*16^2*B + ... + x_62*16^62*B + // + 16*( x_1*16^0*B + x_3*16^2*B + ... + x_63*16^62*B) + // + // We use a lookup table for each i to get x_i*16^(2*i)*B + // and do four doublings to multiply by 16. + digits := x.signedRadix16() + + multiple := &affineCached{} + tmp1 := &projP1xP1{} + tmp2 := &projP2{} + + // Accumulate the odd components first + v.Set(NewIdentityPoint()) + for i := 1; i < 64; i += 2 { + basepointTable[i/2].SelectInto(multiple, digits[i]) + tmp1.AddAffine(v, multiple) + v.fromP1xP1(tmp1) + } + + // Multiply by 16 + tmp2.FromP3(v) // tmp2 = v in P2 coords + tmp1.Double(tmp2) // tmp1 = 2*v in P1xP1 coords + tmp2.FromP1xP1(tmp1) // tmp2 = 2*v in P2 coords + tmp1.Double(tmp2) // tmp1 = 4*v in P1xP1 coords + tmp2.FromP1xP1(tmp1) // tmp2 = 4*v in P2 coords + tmp1.Double(tmp2) // tmp1 = 8*v in P1xP1 coords + tmp2.FromP1xP1(tmp1) // tmp2 = 8*v in P2 coords + tmp1.Double(tmp2) // tmp1 = 16*v in P1xP1 coords + v.fromP1xP1(tmp1) // now v = 16*(odd components) + + // Accumulate the even components + for i := 0; i < 64; i += 2 { + basepointTable[i/2].SelectInto(multiple, digits[i]) + tmp1.AddAffine(v, multiple) + v.fromP1xP1(tmp1) + } + + return v +} + +// ScalarMult sets v = x * q, and returns v. +// +// The scalar multiplication is done in constant time. +func (v *Point) ScalarMult(x *Scalar, q *Point) *Point { + checkInitialized(q) + + var table projLookupTable + table.FromP3(q) + + // Write x = sum(x_i * 16^i) + // so x*Q = sum( Q*x_i*16^i ) + // = Q*x_0 + 16*(Q*x_1 + 16*( ... + Q*x_63) ... ) + // <------compute inside out--------- + // + // We use the lookup table to get the x_i*Q values + // and do four doublings to compute 16*Q + digits := x.signedRadix16() + + // Unwrap first loop iteration to save computing 16*identity + multiple := &projCached{} + tmp1 := &projP1xP1{} + tmp2 := &projP2{} + table.SelectInto(multiple, digits[63]) + + v.Set(NewIdentityPoint()) + tmp1.Add(v, multiple) // tmp1 = x_63*Q in P1xP1 coords + for i := 62; i >= 0; i-- { + tmp2.FromP1xP1(tmp1) // tmp2 = (prev) in P2 coords + tmp1.Double(tmp2) // tmp1 = 2*(prev) in P1xP1 coords + tmp2.FromP1xP1(tmp1) // tmp2 = 2*(prev) in P2 coords + tmp1.Double(tmp2) // tmp1 = 4*(prev) in P1xP1 coords + tmp2.FromP1xP1(tmp1) // tmp2 = 4*(prev) in P2 coords + tmp1.Double(tmp2) // tmp1 = 8*(prev) in P1xP1 coords + tmp2.FromP1xP1(tmp1) // tmp2 = 8*(prev) in P2 coords + tmp1.Double(tmp2) // tmp1 = 16*(prev) in P1xP1 coords + v.fromP1xP1(tmp1) // v = 16*(prev) in P3 coords + table.SelectInto(multiple, digits[i]) + tmp1.Add(v, multiple) // tmp1 = x_i*Q + 16*(prev) in P1xP1 coords + } + v.fromP1xP1(tmp1) + return v +} + +// basepointNafTable is the nafLookupTable8 for the basepoint. +// It is precomputed the first time it's used. +func basepointNafTable() *nafLookupTable8 { + basepointNafTablePrecomp.initOnce.Do(func() { + basepointNafTablePrecomp.table.FromP3(NewGeneratorPoint()) + }) + return &basepointNafTablePrecomp.table +} + +var basepointNafTablePrecomp struct { + table nafLookupTable8 + initOnce sync.Once +} + +// VarTimeDoubleScalarBaseMult sets v = a * A + b * B, where B is the canonical +// generator, and returns v. +// +// Execution time depends on the inputs. +func (v *Point) VarTimeDoubleScalarBaseMult(a *Scalar, A *Point, b *Scalar) *Point { + checkInitialized(A) + + // Similarly to the single variable-base approach, we compute + // digits and use them with a lookup table. However, because + // we are allowed to do variable-time operations, we don't + // need constant-time lookups or constant-time digit + // computations. + // + // So we use a non-adjacent form of some width w instead of + // radix 16. This is like a binary representation (one digit + // for each binary place) but we allow the digits to grow in + // magnitude up to 2^{w-1} so that the nonzero digits are as + // sparse as possible. Intuitively, this "condenses" the + // "mass" of the scalar onto sparse coefficients (meaning + // fewer additions). + + basepointNafTable := basepointNafTable() + var aTable nafLookupTable5 + aTable.FromP3(A) + // Because the basepoint is fixed, we can use a wider NAF + // corresponding to a bigger table. + aNaf := a.nonAdjacentForm(5) + bNaf := b.nonAdjacentForm(8) + + // Find the first nonzero coefficient. + i := 255 + for j := i; j >= 0; j-- { + if aNaf[j] != 0 || bNaf[j] != 0 { + break + } + } + + multA := &projCached{} + multB := &affineCached{} + tmp1 := &projP1xP1{} + tmp2 := &projP2{} + tmp2.Zero() + + // Move from high to low bits, doubling the accumulator + // at each iteration and checking whether there is a nonzero + // coefficient to look up a multiple of. + for ; i >= 0; i-- { + tmp1.Double(tmp2) + + // Only update v if we have a nonzero coeff to add in. + if aNaf[i] > 0 { + v.fromP1xP1(tmp1) + aTable.SelectInto(multA, aNaf[i]) + tmp1.Add(v, multA) + } else if aNaf[i] < 0 { + v.fromP1xP1(tmp1) + aTable.SelectInto(multA, -aNaf[i]) + tmp1.Sub(v, multA) + } + + if bNaf[i] > 0 { + v.fromP1xP1(tmp1) + basepointNafTable.SelectInto(multB, bNaf[i]) + tmp1.AddAffine(v, multB) + } else if bNaf[i] < 0 { + v.fromP1xP1(tmp1) + basepointNafTable.SelectInto(multB, -bNaf[i]) + tmp1.SubAffine(v, multB) + } + + tmp2.FromP1xP1(tmp1) + } + + v.fromP2(tmp2) + return v +} diff --git a/vendor/filippo.io/edwards25519/tables.go b/vendor/filippo.io/edwards25519/tables.go new file mode 100644 index 0000000..83234bb --- /dev/null +++ b/vendor/filippo.io/edwards25519/tables.go @@ -0,0 +1,129 @@ +// Copyright (c) 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package edwards25519 + +import ( + "crypto/subtle" +) + +// A dynamic lookup table for variable-base, constant-time scalar muls. +type projLookupTable struct { + points [8]projCached +} + +// A precomputed lookup table for fixed-base, constant-time scalar muls. +type affineLookupTable struct { + points [8]affineCached +} + +// A dynamic lookup table for variable-base, variable-time scalar muls. +type nafLookupTable5 struct { + points [8]projCached +} + +// A precomputed lookup table for fixed-base, variable-time scalar muls. +type nafLookupTable8 struct { + points [64]affineCached +} + +// Constructors. + +// Builds a lookup table at runtime. Fast. +func (v *projLookupTable) FromP3(q *Point) { + // Goal: v.points[i] = (i+1)*Q, i.e., Q, 2Q, ..., 8Q + // This allows lookup of -8Q, ..., -Q, 0, Q, ..., 8Q + v.points[0].FromP3(q) + tmpP3 := Point{} + tmpP1xP1 := projP1xP1{} + for i := 0; i < 7; i++ { + // Compute (i+1)*Q as Q + i*Q and convert to a projCached + // This is needlessly complicated because the API has explicit + // receivers instead of creating stack objects and relying on RVO + v.points[i+1].FromP3(tmpP3.fromP1xP1(tmpP1xP1.Add(q, &v.points[i]))) + } +} + +// This is not optimised for speed; fixed-base tables should be precomputed. +func (v *affineLookupTable) FromP3(q *Point) { + // Goal: v.points[i] = (i+1)*Q, i.e., Q, 2Q, ..., 8Q + // This allows lookup of -8Q, ..., -Q, 0, Q, ..., 8Q + v.points[0].FromP3(q) + tmpP3 := Point{} + tmpP1xP1 := projP1xP1{} + for i := 0; i < 7; i++ { + // Compute (i+1)*Q as Q + i*Q and convert to affineCached + v.points[i+1].FromP3(tmpP3.fromP1xP1(tmpP1xP1.AddAffine(q, &v.points[i]))) + } +} + +// Builds a lookup table at runtime. Fast. +func (v *nafLookupTable5) FromP3(q *Point) { + // Goal: v.points[i] = (2*i+1)*Q, i.e., Q, 3Q, 5Q, ..., 15Q + // This allows lookup of -15Q, ..., -3Q, -Q, 0, Q, 3Q, ..., 15Q + v.points[0].FromP3(q) + q2 := Point{} + q2.Add(q, q) + tmpP3 := Point{} + tmpP1xP1 := projP1xP1{} + for i := 0; i < 7; i++ { + v.points[i+1].FromP3(tmpP3.fromP1xP1(tmpP1xP1.Add(&q2, &v.points[i]))) + } +} + +// This is not optimised for speed; fixed-base tables should be precomputed. +func (v *nafLookupTable8) FromP3(q *Point) { + v.points[0].FromP3(q) + q2 := Point{} + q2.Add(q, q) + tmpP3 := Point{} + tmpP1xP1 := projP1xP1{} + for i := 0; i < 63; i++ { + v.points[i+1].FromP3(tmpP3.fromP1xP1(tmpP1xP1.AddAffine(&q2, &v.points[i]))) + } +} + +// Selectors. + +// Set dest to x*Q, where -8 <= x <= 8, in constant time. +func (v *projLookupTable) SelectInto(dest *projCached, x int8) { + // Compute xabs = |x| + xmask := x >> 7 + xabs := uint8((x + xmask) ^ xmask) + + dest.Zero() + for j := 1; j <= 8; j++ { + // Set dest = j*Q if |x| = j + cond := subtle.ConstantTimeByteEq(xabs, uint8(j)) + dest.Select(&v.points[j-1], dest, cond) + } + // Now dest = |x|*Q, conditionally negate to get x*Q + dest.CondNeg(int(xmask & 1)) +} + +// Set dest to x*Q, where -8 <= x <= 8, in constant time. +func (v *affineLookupTable) SelectInto(dest *affineCached, x int8) { + // Compute xabs = |x| + xmask := x >> 7 + xabs := uint8((x + xmask) ^ xmask) + + dest.Zero() + for j := 1; j <= 8; j++ { + // Set dest = j*Q if |x| = j + cond := subtle.ConstantTimeByteEq(xabs, uint8(j)) + dest.Select(&v.points[j-1], dest, cond) + } + // Now dest = |x|*Q, conditionally negate to get x*Q + dest.CondNeg(int(xmask & 1)) +} + +// Given odd x with 0 < x < 2^4, return x*Q (in variable time). +func (v *nafLookupTable5) SelectInto(dest *projCached, x int8) { + *dest = v.points[x/2] +} + +// Given odd x with 0 < x < 2^7, return x*Q (in variable time). +func (v *nafLookupTable8) SelectInto(dest *affineCached, x int8) { + *dest = v.points[x/2] +} diff --git a/vendor/github.com/ClickHouse/ch-go/AUTHORS b/vendor/github.com/ClickHouse/ch-go/AUTHORS new file mode 100644 index 0000000..bb6b010 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/AUTHORS @@ -0,0 +1,2 @@ +ClickHouse, LLC. +The Go Faster Authors diff --git a/vendor/github.com/ClickHouse/ch-go/LICENSE b/vendor/github.com/ClickHouse/ch-go/LICENSE new file mode 100644 index 0000000..f3d7932 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/LICENSE @@ -0,0 +1,202 @@ +Copyright 2016-2023 ClickHouse, Inc. + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016-2023 ClickHouse, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/ClickHouse/ch-go/compress/compress.go b/vendor/github.com/ClickHouse/ch-go/compress/compress.go new file mode 100644 index 0000000..a89c640 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/compress/compress.go @@ -0,0 +1,51 @@ +// Package compress implements compression support. +package compress + +import ( + "fmt" + + "github.com/go-faster/city" +) + +//go:generate go run github.com/dmarkham/enumer -transform snake_upper -type Method -output method_enum.go + +// Method is compression codec. +type Method byte + +// Possible compression methods. +const ( + None Method = 0x02 + LZ4 Method = 0x82 + ZSTD Method = 0x90 +) + +// Constants for compression encoding. +// +// See https://go-faster.org/docs/clickhouse/compression for reference. +const ( + checksumSize = 16 + compressHeaderSize = 1 + 4 + 4 + headerSize = checksumSize + compressHeaderSize + + // Limiting total data/block size to protect from possible OOM. + maxDataSize = 1024 * 1024 * 128 // 128MB + maxBlockSize = maxDataSize + + hRawSize = 17 + hDataSize = 21 + hMethod = 16 +) + +// CorruptedDataErr means that provided hash mismatch with calculated. +type CorruptedDataErr struct { + Actual city.U128 + Reference city.U128 + RawSize int + DataSize int +} + +func (c *CorruptedDataErr) Error() string { + return fmt.Sprintf("corrupted data: %s (actual), %s (reference), compressed size: %d, data size: %d", + FormatU128(c.Actual), FormatU128(c.Reference), c.RawSize, c.DataSize, + ) +} diff --git a/vendor/github.com/ClickHouse/ch-go/compress/method_enum.go b/vendor/github.com/ClickHouse/ch-go/compress/method_enum.go new file mode 100644 index 0000000..8d44352 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/compress/method_enum.go @@ -0,0 +1,97 @@ +// Code generated by "enumer -transform snake_upper -type Method -output method_enum.go"; DO NOT EDIT. + +package compress + +import ( + "fmt" + "strings" +) + +const ( + _MethodName_0 = "NONE" + _MethodLowerName_0 = "none" + _MethodName_1 = "LZ4" + _MethodLowerName_1 = "lz4" + _MethodName_2 = "ZSTD" + _MethodLowerName_2 = "zstd" +) + +var ( + _MethodIndex_0 = [...]uint8{0, 4} + _MethodIndex_1 = [...]uint8{0, 3} + _MethodIndex_2 = [...]uint8{0, 4} +) + +func (i Method) String() string { + switch { + case i == 2: + return _MethodName_0 + case i == 130: + return _MethodName_1 + case i == 144: + return _MethodName_2 + default: + return fmt.Sprintf("Method(%d)", i) + } +} + +// An "invalid array index" compiler error signifies that the constant values have changed. +// Re-run the stringer command to generate them again. +func _MethodNoOp() { + var x [1]struct{} + _ = x[None-(2)] + _ = x[LZ4-(130)] + _ = x[ZSTD-(144)] +} + +var _MethodValues = []Method{None, LZ4, ZSTD} + +var _MethodNameToValueMap = map[string]Method{ + _MethodName_0[0:4]: None, + _MethodLowerName_0[0:4]: None, + _MethodName_1[0:3]: LZ4, + _MethodLowerName_1[0:3]: LZ4, + _MethodName_2[0:4]: ZSTD, + _MethodLowerName_2[0:4]: ZSTD, +} + +var _MethodNames = []string{ + _MethodName_0[0:4], + _MethodName_1[0:3], + _MethodName_2[0:4], +} + +// MethodString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func MethodString(s string) (Method, error) { + if val, ok := _MethodNameToValueMap[s]; ok { + return val, nil + } + + if val, ok := _MethodNameToValueMap[strings.ToLower(s)]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to Method values", s) +} + +// MethodValues returns all values of the enum +func MethodValues() []Method { + return _MethodValues +} + +// MethodStrings returns a slice of all String values of the enum +func MethodStrings() []string { + strs := make([]string, len(_MethodNames)) + copy(strs, _MethodNames) + return strs +} + +// IsAMethod returns "true" if the value is listed in the enum definition. "false" otherwise +func (i Method) IsAMethod() bool { + for _, v := range _MethodValues { + if i == v { + return true + } + } + return false +} diff --git a/vendor/github.com/ClickHouse/ch-go/compress/reader.go b/vendor/github.com/ClickHouse/ch-go/compress/reader.go new file mode 100644 index 0000000..6a26f9d --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/compress/reader.go @@ -0,0 +1,135 @@ +package compress + +import ( + "encoding/binary" + "fmt" + "io" + + "github.com/go-faster/city" + "github.com/go-faster/errors" + "github.com/klauspost/compress/zstd" + "github.com/pierrec/lz4/v4" +) + +// Reader decodes compressed blocks. +type Reader struct { + reader io.Reader + data []byte + pos int64 + raw []byte + header []byte + zstd *zstd.Decoder +} + +// FormatU128 formats city.U128 as hex. +func FormatU128(v city.U128) string { + var buf [16]byte + binary.LittleEndian.PutUint64(buf[:8], v.Low) + binary.LittleEndian.PutUint64(buf[8:], v.High) + return fmt.Sprintf("%x", buf) +} + +// readBlock reads next compressed data into raw and decompresses into data. +func (r *Reader) readBlock() error { + r.pos = 0 + + _ = r.header[headerSize-1] + if _, err := io.ReadFull(r.reader, r.header); err != nil { + return errors.Wrap(err, "header") + } + + var ( + rawSize = int(binary.LittleEndian.Uint32(r.header[hRawSize:])) - compressHeaderSize + dataSize = int(binary.LittleEndian.Uint32(r.header[hDataSize:])) + ) + if dataSize < 0 || dataSize > maxDataSize { + return errors.Errorf("data size should be %d < %d < %d", 0, dataSize, maxDataSize) + } + if rawSize < 0 || rawSize > maxBlockSize { + return errors.Errorf("raw size should be %d < %d < %d", 0, rawSize, maxBlockSize) + } + + r.data = append(r.data[:0], make([]byte, dataSize)...) + r.raw = append(r.raw[:0], r.header...) + r.raw = append(r.raw, make([]byte, rawSize)...) + _ = r.raw[:rawSize+headerSize-1] + + if _, err := io.ReadFull(r.reader, r.raw[headerSize:]); err != nil { + return errors.Wrap(err, "read raw") + } + hGot := city.U128{ + Low: binary.LittleEndian.Uint64(r.raw[0:8]), + High: binary.LittleEndian.Uint64(r.raw[8:16]), + } + h := city.CH128(r.raw[hMethod:]) + if hGot != h { + return errors.Wrap(&CorruptedDataErr{ + Actual: h, + Reference: hGot, + RawSize: rawSize, + DataSize: dataSize, + }, "mismatch") + } + switch m := Method(r.header[hMethod]); m { + case LZ4: + n, err := lz4.UncompressBlock(r.raw[headerSize:], r.data) + if err != nil { + return errors.Wrap(err, "uncompress") + } + if n != dataSize { + return errors.Errorf("unexpected uncompressed data size: %d (actual) != %d (got in header)", + n, dataSize, + ) + } + case ZSTD: + if r.zstd == nil { + // Lazily initializing to prevent spawning goroutines in NewReader. + // See https://github.com/golang/go/issues/47056#issuecomment-997436820 + zstdReader, err := zstd.NewReader(nil, + zstd.WithDecoderConcurrency(1), + zstd.WithDecoderLowmem(true), + ) + if err != nil { + return errors.Wrap(err, "zstd") + } + r.zstd = zstdReader + } + data, err := r.zstd.DecodeAll(r.raw[headerSize:], r.data[:0]) + if err != nil { + return errors.Wrap(err, "uncompress") + } + if len(data) != dataSize { + return errors.Errorf("unexpected uncompressed data size: %d (actual) != %d (got in header)", + len(data), dataSize, + ) + } + r.data = data + case None: + copy(r.data, r.raw[headerSize:]) + default: + return errors.Errorf("compression 0x%02x not implemented", m) + } + + return nil +} + +// Read implements io.Reader. +func (r *Reader) Read(p []byte) (n int, err error) { + if r.pos >= int64(len(r.data)) { + if err := r.readBlock(); err != nil { + return 0, errors.Wrap(err, "read next block") + } + } + n = copy(p, r.data[r.pos:]) + r.pos += int64(n) + return n, nil +} + +// NewReader returns new *Reader from r. +func NewReader(r io.Reader) *Reader { + return &Reader{ + zstd: nil, // lazily initialized + reader: r, + header: make([]byte, headerSize), + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/compress/writer.go b/vendor/github.com/ClickHouse/ch-go/compress/writer.go new file mode 100644 index 0000000..6094b05 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/compress/writer.go @@ -0,0 +1,67 @@ +package compress + +import ( + "encoding/binary" + + "github.com/go-faster/city" + "github.com/go-faster/errors" + "github.com/klauspost/compress/zstd" + "github.com/pierrec/lz4/v4" +) + +// Writer encodes compressed blocks. +type Writer struct { + Data []byte + + lz4 *lz4.Compressor + zstd *zstd.Encoder +} + +// Compress buf into Data. +func (w *Writer) Compress(m Method, buf []byte) error { + maxSize := lz4.CompressBlockBound(len(buf)) + w.Data = append(w.Data[:0], make([]byte, maxSize+headerSize)...) + _ = w.Data[:headerSize] + w.Data[hMethod] = byte(m) + + var n int + + switch m { + case LZ4: + compressedSize, err := w.lz4.CompressBlock(buf, w.Data[headerSize:]) + if err != nil { + return errors.Wrap(err, "block") + } + n = compressedSize + case ZSTD: + w.Data = w.zstd.EncodeAll(buf, w.Data[:headerSize]) + n = len(w.Data) - headerSize + case None: + n = copy(w.Data[headerSize:], buf) + } + + w.Data = w.Data[:n+headerSize] + + binary.LittleEndian.PutUint32(w.Data[hRawSize:], uint32(n+compressHeaderSize)) + binary.LittleEndian.PutUint32(w.Data[hDataSize:], uint32(len(buf))) + h := city.CH128(w.Data[hMethod:]) + binary.LittleEndian.PutUint64(w.Data[0:8], h.Low) + binary.LittleEndian.PutUint64(w.Data[8:16], h.High) + + return nil +} + +func NewWriter() *Writer { + w, err := zstd.NewWriter(nil, + zstd.WithEncoderLevel(zstd.SpeedDefault), + zstd.WithEncoderConcurrency(1), + zstd.WithLowerEncoderMem(true), + ) + if err != nil { + panic(err) + } + return &Writer{ + lz4: &lz4.Compressor{}, + zstd: w, + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/block.go b/vendor/github.com/ClickHouse/ch-go/proto/block.go new file mode 100644 index 0000000..3454806 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/block.go @@ -0,0 +1,286 @@ +package proto + +import ( + "fmt" + "strconv" + "strings" + + "github.com/go-faster/errors" +) + +// BlockInfo describes block. +type BlockInfo struct { + Overflows bool + BucketNum int +} + +func (i BlockInfo) String() string { + return fmt.Sprintf("overflows: %v, buckets: %d", i.Overflows, i.BucketNum) +} + +const endField = 0 // end of field pairs + +// fields of BlockInfo. +const ( + blockInfoOverflows = 1 + blockInfoBucketNum = 2 +) + +// Encode to Buffer. +func (i BlockInfo) Encode(b *Buffer) { + b.PutUVarInt(blockInfoOverflows) + b.PutBool(i.Overflows) + + b.PutUVarInt(blockInfoBucketNum) + b.PutInt32(int32(i.BucketNum)) + + b.PutUVarInt(endField) +} + +func (i *BlockInfo) Decode(r *Reader) error { + for { + f, err := r.UVarInt() + if err != nil { + return errors.Wrap(err, "field id") + } + switch f { + case blockInfoOverflows: + v, err := r.Bool() + if err != nil { + return errors.Wrap(err, "overflows") + } + i.Overflows = v + case blockInfoBucketNum: + v, err := r.Int32() + if err != nil { + return errors.Wrap(err, "bucket number") + } + i.BucketNum = int(v) + case endField: + return nil + default: + return errors.Errorf("unknown field %d", f) + } + } +} + +// Input of query. +type Input []InputColumn + +// Reset all columns that implement proto.Resettable. +func (i Input) Reset() { + for _, c := range i { + if col, ok := c.Data.(Resettable); ok { + col.Reset() + } + } +} + +// Into returns INSERT INTO table (c0, c..., cn) VALUES query. +func (i Input) Into(table string) string { + return fmt.Sprintf("INSERT INTO %s %s VALUES", strconv.QuoteToASCII(table), i.Columns()) +} + +// Columns returns "(foo, bar, baz)" formatted list of Input column names. +func (i Input) Columns() string { + var ( + b strings.Builder + buf [64]byte + ) + + b.WriteRune('(') + for idx, v := range i { + escaped := strconv.AppendQuoteToASCII(buf[:0], v.Name) + b.Write(escaped) + if idx != len(i)-1 { + b.WriteRune(',') + } + } + b.WriteRune(')') + + return b.String() +} + +type InputColumn struct { + Name string + Data ColInput +} + +// ResultColumn can be uses as part of Results or as single Result. +type ResultColumn struct { + Name string // Name of column. Inferred if not provided. + Data ColResult // Data of column, required. +} + +// DecodeResult implements Result as "single result" helper. +func (c ResultColumn) DecodeResult(r *Reader, version int, b Block) error { + v := Results{c} + return v.DecodeResult(r, version, b) +} + +// AutoResult is ResultColumn with type inference. +func AutoResult(name string) ResultColumn { + return ResultColumn{ + Name: name, + Data: &ColAuto{}, + } +} + +func (c InputColumn) EncodeStart(buf *Buffer, version int) { + buf.PutString(c.Name) + buf.PutString(string(c.Data.Type())) + if FeatureCustomSerialization.In(version) { + buf.PutBool(false) // no custom serialization + } +} + +type Block struct { + Info BlockInfo + Columns int + Rows int +} + +func (b Block) EncodeAware(buf *Buffer, version int) { + if FeatureBlockInfo.In(version) { + b.Info.Encode(buf) + } + + buf.PutInt(b.Columns) + buf.PutInt(b.Rows) +} + +func (b Block) EncodeBlock(buf *Buffer, version int, input []InputColumn) error { + if FeatureBlockInfo.In(version) { + b.Info.Encode(buf) + } + if err := b.EncodeRawBlock(buf, version, input); err != nil { + return errors.Wrap(err, "raw block") + } + return nil +} + +func (b Block) EncodeRawBlock(buf *Buffer, version int, input []InputColumn) error { + buf.PutInt(b.Columns) + buf.PutInt(b.Rows) + for _, col := range input { + if r := col.Data.Rows(); r != b.Rows { + return errors.Errorf("%q has %d rows, expected %d", col.Name, r, b.Rows) + } + col.EncodeStart(buf, version) + if v, ok := col.Data.(Preparable); ok { + if err := v.Prepare(); err != nil { + return errors.Wrapf(err, "prepare %q", col.Name) + } + } + if col.Data.Rows() == 0 { + continue + } + if v, ok := col.Data.(StateEncoder); ok { + v.EncodeState(buf) + } + col.Data.EncodeColumn(buf) + } + return nil +} + +// This constrains can prevent accidental OOM and allow early detection +// of erroneous column or row count. +// +// Just empirical values, there are no such limits in spec or in ClickHouse, +// so is subject to change if false-positives occur. +const ( + maxColumnsInBlock = 1_000_000 + maxRowsInBLock = 100_000_000 +) + +func checkRows(n int) error { + if n < 0 { + return errors.New("negative") + } + if n > maxRowsInBLock { + // Most blocks should be less than 100M values, but technically + // there is no limit (can be several billions). + // 1B rows is too big and probably several gigabytes in RSS. + // + // The 100M UInt64 block is ~655MB RSS, should be pretty safe and + // protect from accidental (e.g. cosmic rays) rows count corruption. + return errors.Errorf("%d is suspiciously big, maximum is %d (preventing possible OOM)", n, maxRowsInBLock) + } + return nil +} + +func (b *Block) End() bool { + return b.Columns == 0 && b.Rows == 0 +} + +func (b *Block) DecodeRawBlock(r *Reader, version int, target Result) error { + { + v, err := r.Int() + if err != nil { + return errors.Wrap(err, "columns") + } + if v > maxColumnsInBlock || v < 0 { + return errors.Errorf("invalid columns number %d", v) + } + b.Columns = v + } + { + v, err := r.Int() + if err != nil { + return errors.Wrap(err, "rows") + } + if err := checkRows(v); err != nil { + return errors.Wrap(err, "rows count") + } + b.Rows = v + } + if b.End() { + // End of data, special case. + return nil + } + if target == nil && b.Rows > 0 { + return errors.New("got rows without target") + } + if target == nil { + // Just skipping rows and types. + for i := 0; i < b.Columns; i++ { + // Name. + if _, err := r.Str(); err != nil { + return errors.Wrapf(err, "column [%d] name", i) + } + // Type. + if _, err := r.Str(); err != nil { + return errors.Wrapf(err, "column [%d] type", i) + } + if FeatureCustomSerialization.In(version) { + // Custom serialization flag. + v, err := r.Bool() + if err != nil { + return errors.Wrapf(err, "column [%d] custom serialization flag", i) + } + if v { + return errors.Errorf("column [%d] has custom serialization (not supported)", i) + } + } + } + return nil + } + if err := target.DecodeResult(r, version, *b); err != nil { + return errors.Wrap(err, "target") + } + + return nil +} + +func (b *Block) DecodeBlock(r *Reader, version int, target Result) error { + if FeatureBlockInfo.In(version) { + if err := b.Info.Decode(r); err != nil { + return errors.Wrap(err, "info") + } + } + if err := b.DecodeRawBlock(r, version, target); err != nil { + return errors.Wrap(err, "raw block") + } + + return nil +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/bool.go b/vendor/github.com/ClickHouse/ch-go/proto/bool.go new file mode 100644 index 0000000..5b6f873 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/bool.go @@ -0,0 +1,6 @@ +package proto + +const ( + boolTrue uint8 = 1 + boolFalse uint8 = 0 +) diff --git a/vendor/github.com/ClickHouse/ch-go/proto/buffer.go b/vendor/github.com/ClickHouse/ch-go/proto/buffer.go new file mode 100644 index 0000000..e2b1e8f --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/buffer.go @@ -0,0 +1,158 @@ +package proto + +import ( + "bytes" + "encoding/binary" + "io" + "math" +) + +// Buffer implements ClickHouse binary protocol encoding. +type Buffer struct { + Buf []byte +} + +// Reader returns new *Reader from *Buffer. +func (b *Buffer) Reader() *Reader { + return NewReader(bytes.NewReader(b.Buf)) +} + +// Ensure Buf length. +func (b *Buffer) Ensure(n int) { + b.Buf = append(b.Buf[:0], make([]byte, n)...) +} + +// Encoder implements encoding to Buffer. +type Encoder interface { + Encode(b *Buffer) +} + +// AwareEncoder implements encoding to Buffer that depends on version. +type AwareEncoder interface { + EncodeAware(b *Buffer, version int) +} + +// EncodeAware value that implements AwareEncoder. +func (b *Buffer) EncodeAware(e AwareEncoder, version int) { + e.EncodeAware(b, version) +} + +// Encode value that implements Encoder. +func (b *Buffer) Encode(e Encoder) { + e.Encode(b) +} + +// Reset buffer to zero length. +func (b *Buffer) Reset() { + b.Buf = b.Buf[:0] +} + +// Read implements io.Reader. +func (b *Buffer) Read(p []byte) (n int, err error) { + if len(p) == 0 { + return 0, nil + } + if len(b.Buf) == 0 { + return 0, io.EOF + } + n = copy(p, b.Buf) + b.Buf = b.Buf[n:] + return n, nil +} + +// PutRaw writes v as raw bytes to buffer. +func (b *Buffer) PutRaw(v []byte) { + b.Buf = append(b.Buf, v...) +} + +// PutUVarInt encodes x as uvarint. +func (b *Buffer) PutUVarInt(x uint64) { + buf := make([]byte, binary.MaxVarintLen64) + n := binary.PutUvarint(buf, x) + b.Buf = append(b.Buf, buf[:n]...) +} + +// PutInt encodes integer as uvarint. +func (b *Buffer) PutInt(x int) { + b.PutUVarInt(uint64(x)) +} + +// PutByte encodes byte as uint8. +func (b *Buffer) PutByte(x byte) { + b.PutUInt8(x) +} + +// PutLen encodes length to buffer as uvarint. +func (b *Buffer) PutLen(x int) { + b.PutUVarInt(uint64(x)) +} + +// PutString encodes sting value to buffer. +func (b *Buffer) PutString(s string) { + b.PutLen(len(s)) + b.Buf = append(b.Buf, s...) +} + +func (b *Buffer) PutUInt8(x uint8) { + b.Buf = append(b.Buf, x) +} + +func (b *Buffer) PutUInt16(x uint16) { + buf := make([]byte, 16/8) + binary.LittleEndian.PutUint16(buf, x) + b.Buf = append(b.Buf, buf...) +} + +func (b *Buffer) PutUInt32(x uint32) { + buf := make([]byte, 32/8) + binary.LittleEndian.PutUint32(buf, x) + b.Buf = append(b.Buf, buf...) +} + +func (b *Buffer) PutUInt64(x uint64) { + buf := make([]byte, 64/8) + binary.LittleEndian.PutUint64(buf, x) + b.Buf = append(b.Buf, buf...) +} + +func (b *Buffer) PutUInt128(x UInt128) { + buf := make([]byte, 128/8) + binPutUInt128(buf, x) + b.Buf = append(b.Buf, buf...) +} + +func (b *Buffer) PutInt8(v int8) { + b.PutUInt8(uint8(v)) +} + +func (b *Buffer) PutInt16(v int16) { + b.PutUInt16(uint16(v)) +} + +func (b *Buffer) PutInt32(x int32) { + b.PutUInt32(uint32(x)) +} + +func (b *Buffer) PutInt64(x int64) { + b.PutUInt64(uint64(x)) +} + +func (b *Buffer) PutInt128(x Int128) { + b.PutUInt128(UInt128(x)) +} + +func (b *Buffer) PutBool(v bool) { + if v { + b.PutUInt8(boolTrue) + } else { + b.PutUInt8(boolFalse) + } +} + +func (b *Buffer) PutFloat64(v float64) { + b.PutUInt64(math.Float64bits(v)) +} + +func (b *Buffer) PutFloat32(v float32) { + b.PutUInt32(math.Float32bits(v)) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/client_code.go b/vendor/github.com/ClickHouse/ch-go/proto/client_code.go new file mode 100644 index 0000000..545cd71 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/client_code.go @@ -0,0 +1,19 @@ +package proto + +//go:generate go run github.com/dmarkham/enumer -type ClientCode -trimprefix ClientCode -output client_code_enum.go + +// ClientCode is sent from client to server. +type ClientCode byte + +// Possible client codes. +const ( + ClientCodeHello ClientCode = 0 // client part of "handshake" + ClientCodeQuery ClientCode = 1 // query start + ClientCodeData ClientCode = 2 // data block (can be compressed) + ClientCodeCancel ClientCode = 3 // query cancel + ClientCodePing ClientCode = 4 // ping request to server + ClientTablesStatusRequest ClientCode = 5 // tables status request +) + +// Encode to buffer. +func (c ClientCode) Encode(b *Buffer) { b.PutByte(byte(c)) } diff --git a/vendor/github.com/ClickHouse/ch-go/proto/client_code_enum.go b/vendor/github.com/ClickHouse/ch-go/proto/client_code_enum.go new file mode 100644 index 0000000..85ffab2 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/client_code_enum.go @@ -0,0 +1,94 @@ +// Code generated by "enumer -type ClientCode -trimprefix ClientCode -output client_code_enum.go"; DO NOT EDIT. + +package proto + +import ( + "fmt" + "strings" +) + +const _ClientCodeName = "HelloQueryDataCancelPingClientTablesStatusRequest" + +var _ClientCodeIndex = [...]uint8{0, 5, 10, 14, 20, 24, 49} + +const _ClientCodeLowerName = "helloquerydatacancelpingclienttablesstatusrequest" + +func (i ClientCode) String() string { + if i >= ClientCode(len(_ClientCodeIndex)-1) { + return fmt.Sprintf("ClientCode(%d)", i) + } + return _ClientCodeName[_ClientCodeIndex[i]:_ClientCodeIndex[i+1]] +} + +// An "invalid array index" compiler error signifies that the constant values have changed. +// Re-run the stringer command to generate them again. +func _ClientCodeNoOp() { + var x [1]struct{} + _ = x[ClientCodeHello-(0)] + _ = x[ClientCodeQuery-(1)] + _ = x[ClientCodeData-(2)] + _ = x[ClientCodeCancel-(3)] + _ = x[ClientCodePing-(4)] + _ = x[ClientTablesStatusRequest-(5)] +} + +var _ClientCodeValues = []ClientCode{ClientCodeHello, ClientCodeQuery, ClientCodeData, ClientCodeCancel, ClientCodePing, ClientTablesStatusRequest} + +var _ClientCodeNameToValueMap = map[string]ClientCode{ + _ClientCodeName[0:5]: ClientCodeHello, + _ClientCodeLowerName[0:5]: ClientCodeHello, + _ClientCodeName[5:10]: ClientCodeQuery, + _ClientCodeLowerName[5:10]: ClientCodeQuery, + _ClientCodeName[10:14]: ClientCodeData, + _ClientCodeLowerName[10:14]: ClientCodeData, + _ClientCodeName[14:20]: ClientCodeCancel, + _ClientCodeLowerName[14:20]: ClientCodeCancel, + _ClientCodeName[20:24]: ClientCodePing, + _ClientCodeLowerName[20:24]: ClientCodePing, + _ClientCodeName[24:49]: ClientTablesStatusRequest, + _ClientCodeLowerName[24:49]: ClientTablesStatusRequest, +} + +var _ClientCodeNames = []string{ + _ClientCodeName[0:5], + _ClientCodeName[5:10], + _ClientCodeName[10:14], + _ClientCodeName[14:20], + _ClientCodeName[20:24], + _ClientCodeName[24:49], +} + +// ClientCodeString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func ClientCodeString(s string) (ClientCode, error) { + if val, ok := _ClientCodeNameToValueMap[s]; ok { + return val, nil + } + + if val, ok := _ClientCodeNameToValueMap[strings.ToLower(s)]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to ClientCode values", s) +} + +// ClientCodeValues returns all values of the enum +func ClientCodeValues() []ClientCode { + return _ClientCodeValues +} + +// ClientCodeStrings returns a slice of all String values of the enum +func ClientCodeStrings() []string { + strs := make([]string, len(_ClientCodeNames)) + copy(strs, _ClientCodeNames) + return strs +} + +// IsAClientCode returns "true" if the value is listed in the enum definition. "false" otherwise +func (i ClientCode) IsAClientCode() bool { + for _, v := range _ClientCodeValues { + if i == v { + return true + } + } + return false +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/client_data.go b/vendor/github.com/ClickHouse/ch-go/proto/client_data.go new file mode 100644 index 0000000..286ddb4 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/client_data.go @@ -0,0 +1,24 @@ +package proto + +import "github.com/go-faster/errors" + +type ClientData struct { + TableName string +} + +func (c ClientData) EncodeAware(b *Buffer, version int) { + if FeatureTempTables.In(version) { + b.PutString(c.TableName) + } +} + +func (c *ClientData) DecodeAware(r *Reader, version int) error { + if FeatureTempTables.In(version) { + v, err := r.Str() + if err != nil { + return errors.Wrap(err, "temp tables") + } + c.TableName = v + } + return nil +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/client_hello.go b/vendor/github.com/ClickHouse/ch-go/proto/client_hello.go new file mode 100644 index 0000000..04dfc4b --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/client_hello.go @@ -0,0 +1,86 @@ +package proto + +import "github.com/go-faster/errors" + +// ClientHello represents ClientCodeHello message. +type ClientHello struct { + Name string + + Major int // client major version + Minor int // client minor version + + // ProtocolVersion is TCP protocol version of client. + // + // Usually it is equal to the latest compatible server revision, but + // should not be confused with it. + ProtocolVersion int + + Database string + User string + Password string +} + +// Encode to Buffer. +func (c ClientHello) Encode(b *Buffer) { + ClientCodeHello.Encode(b) + b.PutString(c.Name) + b.PutInt(c.Major) + b.PutInt(c.Minor) + b.PutInt(c.ProtocolVersion) + b.PutString(c.Database) + b.PutString(c.User) + b.PutString(c.Password) +} + +func (c *ClientHello) Decode(r *Reader) error { + { + v, err := r.Str() + if err != nil { + return errors.Wrap(err, "name") + } + c.Name = v + } + { + v, err := r.Int() + if err != nil { + return errors.Wrap(err, "major") + } + c.Major = v + } + { + v, err := r.Int() + if err != nil { + return errors.Wrap(err, "minor") + } + c.Minor = v + } + { + v, err := r.Int() + if err != nil { + return errors.Wrap(err, "protocol version") + } + c.ProtocolVersion = v + } + { + v, err := r.Str() + if err != nil { + return errors.Wrap(err, "database") + } + c.Database = v + } + { + v, err := r.Str() + if err != nil { + return errors.Wrap(err, "user") + } + c.User = v + } + { + v, err := r.Str() + if err != nil { + return errors.Wrap(err, "password") + } + c.Password = v + } + return nil +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/client_info.go b/vendor/github.com/ClickHouse/ch-go/proto/client_info.go new file mode 100644 index 0000000..f144761 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/client_info.go @@ -0,0 +1,319 @@ +package proto + +import ( + "github.com/go-faster/errors" + "github.com/segmentio/asm/bswap" + "go.opentelemetry.io/otel/trace" +) + +//go:generate go run github.com/dmarkham/enumer -type Interface -trimprefix Interface -output client_info_interface_enum.go + +// Interface is interface of client. +type Interface byte + +// Possible interfaces. +const ( + InterfaceTCP Interface = 1 + InterfaceHTTP Interface = 2 +) + +//go:generate go run github.com/dmarkham/enumer -type ClientQueryKind -trimprefix ClientQueryKind -output client_info_query_enum.go + +// ClientQueryKind is kind of query. +type ClientQueryKind byte + +// Possible query kinds. +const ( + ClientQueryNone ClientQueryKind = 0 + ClientQueryInitial ClientQueryKind = 1 + ClientQuerySecondary ClientQueryKind = 2 +) + +// ClientInfo message. +type ClientInfo struct { + ProtocolVersion int + + Major int + Minor int + Patch int + + Interface Interface + Query ClientQueryKind + + InitialUser string + InitialQueryID string + InitialAddress string + InitialTime int64 + + OSUser string + ClientHostname string + ClientName string + + Span trace.SpanContext + + QuotaKey string + DistributedDepth int + + // For parallel processing on replicas. + + CollaborateWithInitiator bool + CountParticipatingReplicas int + NumberOfCurrentReplica int +} + +// EncodeAware encodes to buffer version-aware. +func (c ClientInfo) EncodeAware(b *Buffer, version int) { + b.PutByte(byte(c.Query)) + + b.PutString(c.InitialUser) + b.PutString(c.InitialQueryID) + b.PutString(c.InitialAddress) + if FeatureQueryStartTime.In(version) { + b.PutInt64(c.InitialTime) + } + + b.PutByte(byte(c.Interface)) + + b.PutString(c.OSUser) + b.PutString(c.ClientHostname) + b.PutString(c.ClientName) + + b.PutInt(c.Major) + b.PutInt(c.Minor) + b.PutInt(c.ProtocolVersion) + + if FeatureQuotaKeyInClientInfo.In(version) { + b.PutString(c.QuotaKey) + } + if FeatureDistributedDepth.In(version) { + b.PutInt(c.DistributedDepth) + } + if FeatureVersionPatch.In(version) && c.Interface == InterfaceTCP { + b.PutInt(c.Patch) + } + if FeatureOpenTelemetry.In(version) { + if c.Span.IsValid() { + b.PutByte(1) + { + v := c.Span.TraceID() + start := len(b.Buf) + b.Buf = append(b.Buf, v[:]...) + bswap.Swap64(b.Buf[start:]) // https://github.com/ClickHouse/ClickHouse/issues/34369 + } + { + v := c.Span.SpanID() + start := len(b.Buf) + b.Buf = append(b.Buf, v[:]...) + bswap.Swap64(b.Buf[start:]) // https://github.com/ClickHouse/ClickHouse/issues/34369 + } + b.PutString(c.Span.TraceState().String()) + b.PutByte(byte(c.Span.TraceFlags())) + } else { + // No OTEL data. + b.PutByte(0) + } + } + if FeatureParallelReplicas.In(version) { + if c.CollaborateWithInitiator { + b.PutInt(1) + } else { + b.PutInt(0) + } + b.PutInt(c.CountParticipatingReplicas) + b.PutInt(c.NumberOfCurrentReplica) + } +} + +func (c *ClientInfo) DecodeAware(r *Reader, version int) error { + { + v, err := r.UInt8() + if err != nil { + return errors.Wrap(err, "query kind") + } + c.Query = ClientQueryKind(v) + if !c.Query.IsAClientQueryKind() { + return errors.Errorf("unknown query kind %d", v) + } + } + { + v, err := r.Str() + if err != nil { + return errors.Wrap(err, "initial user") + } + c.InitialUser = v + } + { + v, err := r.Str() + if err != nil { + return errors.Wrap(err, "initial query id") + } + c.InitialQueryID = v + } + { + v, err := r.Str() + if err != nil { + return errors.Wrap(err, "initial address") + } + c.InitialAddress = v + } + + if FeatureQueryStartTime.In(version) { + // Microseconds. + v, err := r.Int64() + if err != nil { + return errors.Wrap(err, "query start time") + } + c.InitialTime = v + } + + { + v, err := r.UInt8() + if err != nil { + return errors.Wrap(err, "interface") + } + c.Interface = Interface(v) + if !c.Interface.IsAInterface() { + return errors.Errorf("unknown interface %d", v) + } + + // TODO(ernado): support HTTP + if c.Interface != InterfaceTCP { + return errors.New("only tcp interface is supported") + } + } + + { + v, err := r.Str() + if err != nil { + return errors.Wrap(err, "os user") + } + c.OSUser = v + } + { + v, err := r.Str() + if err != nil { + return errors.Wrap(err, "client hostname") + } + c.ClientHostname = v + } + { + v, err := r.Str() + if err != nil { + return errors.Wrap(err, "client name") + } + c.ClientName = v + } + + { + v, err := r.Int() + if err != nil { + return errors.Wrap(err, "major version") + } + c.Major = v + } + { + v, err := r.Int() + if err != nil { + return errors.Wrap(err, "minor version") + } + c.Minor = v + } + { + v, err := r.Int() + if err != nil { + return errors.Wrap(err, "protocol version") + } + c.ProtocolVersion = v + } + + if FeatureQuotaKeyInClientInfo.In(version) { + v, err := r.Str() + if err != nil { + return errors.Wrap(err, "quota key") + } + c.QuotaKey = v + } + if FeatureDistributedDepth.In(version) { + v, err := r.Int() + if err != nil { + return errors.Wrap(err, "distributed depth") + } + c.DistributedDepth = v + } + if FeatureVersionPatch.In(version) && c.Interface == InterfaceTCP { + v, err := r.Int() + if err != nil { + return errors.Wrap(err, "patch version") + } + c.Patch = v + } + if FeatureOpenTelemetry.In(version) { + hasTrace, err := r.Bool() + if err != nil { + return errors.Wrap(err, "open telemetry start") + } + if hasTrace { + var cfg trace.SpanContextConfig + { + v, err := r.ReadRaw(len(cfg.TraceID)) + if err != nil { + return errors.Wrap(err, "trace id") + } + bswap.Swap64(v) // https://github.com/ClickHouse/ClickHouse/issues/34369 + copy(cfg.TraceID[:], v) + } + { + v, err := r.ReadRaw(len(cfg.SpanID)) + if err != nil { + return errors.Wrap(err, "span id") + } + bswap.Swap64(v) // https://github.com/ClickHouse/ClickHouse/issues/34369 + copy(cfg.SpanID[:], v) + } + { + v, err := r.Str() + if err != nil { + return errors.Wrap(err, "trace state") + } + state, err := trace.ParseTraceState(v) + if err != nil { + return errors.Wrap(err, "parse trace state") + } + cfg.TraceState = state + } + { + v, err := r.Byte() + if err != nil { + return errors.Wrap(err, "trace flag") + } + cfg.TraceFlags = trace.TraceFlags(v) + } + c.Span = trace.NewSpanContext(cfg) + } + } + if FeatureParallelReplicas.In(version) { + { + v, err := r.Int() + if err != nil { + return errors.Wrap(err, "parallel replicas") + } + c.CollaborateWithInitiator = v == 1 + } + { + v, err := r.Int() + if err != nil { + return errors.Wrap(err, "count participating replicas") + } + c.CountParticipatingReplicas = v + } + { + v, err := r.Int() + if err != nil { + return errors.Wrap(err, "number of current replica") + } + c.NumberOfCurrentReplica = v + } + } + + return nil +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/client_info_interface_enum.go b/vendor/github.com/ClickHouse/ch-go/proto/client_info_interface_enum.go new file mode 100644 index 0000000..3e09b0d --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/client_info_interface_enum.go @@ -0,0 +1,79 @@ +// Code generated by "enumer -type Interface -trimprefix Interface -output client_info_interface_enum.go"; DO NOT EDIT. + +package proto + +import ( + "fmt" + "strings" +) + +const _InterfaceName = "TCPHTTP" + +var _InterfaceIndex = [...]uint8{0, 3, 7} + +const _InterfaceLowerName = "tcphttp" + +func (i Interface) String() string { + i -= 1 + if i >= Interface(len(_InterfaceIndex)-1) { + return fmt.Sprintf("Interface(%d)", i+1) + } + return _InterfaceName[_InterfaceIndex[i]:_InterfaceIndex[i+1]] +} + +// An "invalid array index" compiler error signifies that the constant values have changed. +// Re-run the stringer command to generate them again. +func _InterfaceNoOp() { + var x [1]struct{} + _ = x[InterfaceTCP-(1)] + _ = x[InterfaceHTTP-(2)] +} + +var _InterfaceValues = []Interface{InterfaceTCP, InterfaceHTTP} + +var _InterfaceNameToValueMap = map[string]Interface{ + _InterfaceName[0:3]: InterfaceTCP, + _InterfaceLowerName[0:3]: InterfaceTCP, + _InterfaceName[3:7]: InterfaceHTTP, + _InterfaceLowerName[3:7]: InterfaceHTTP, +} + +var _InterfaceNames = []string{ + _InterfaceName[0:3], + _InterfaceName[3:7], +} + +// InterfaceString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func InterfaceString(s string) (Interface, error) { + if val, ok := _InterfaceNameToValueMap[s]; ok { + return val, nil + } + + if val, ok := _InterfaceNameToValueMap[strings.ToLower(s)]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to Interface values", s) +} + +// InterfaceValues returns all values of the enum +func InterfaceValues() []Interface { + return _InterfaceValues +} + +// InterfaceStrings returns a slice of all String values of the enum +func InterfaceStrings() []string { + strs := make([]string, len(_InterfaceNames)) + copy(strs, _InterfaceNames) + return strs +} + +// IsAInterface returns "true" if the value is listed in the enum definition. "false" otherwise +func (i Interface) IsAInterface() bool { + for _, v := range _InterfaceValues { + if i == v { + return true + } + } + return false +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/client_info_query_enum.go b/vendor/github.com/ClickHouse/ch-go/proto/client_info_query_enum.go new file mode 100644 index 0000000..e8a15e0 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/client_info_query_enum.go @@ -0,0 +1,82 @@ +// Code generated by "enumer -type ClientQueryKind -trimprefix ClientQueryKind -output client_info_query_enum.go"; DO NOT EDIT. + +package proto + +import ( + "fmt" + "strings" +) + +const _ClientQueryKindName = "ClientQueryNoneClientQueryInitialClientQuerySecondary" + +var _ClientQueryKindIndex = [...]uint8{0, 15, 33, 53} + +const _ClientQueryKindLowerName = "clientquerynoneclientqueryinitialclientquerysecondary" + +func (i ClientQueryKind) String() string { + if i >= ClientQueryKind(len(_ClientQueryKindIndex)-1) { + return fmt.Sprintf("ClientQueryKind(%d)", i) + } + return _ClientQueryKindName[_ClientQueryKindIndex[i]:_ClientQueryKindIndex[i+1]] +} + +// An "invalid array index" compiler error signifies that the constant values have changed. +// Re-run the stringer command to generate them again. +func _ClientQueryKindNoOp() { + var x [1]struct{} + _ = x[ClientQueryNone-(0)] + _ = x[ClientQueryInitial-(1)] + _ = x[ClientQuerySecondary-(2)] +} + +var _ClientQueryKindValues = []ClientQueryKind{ClientQueryNone, ClientQueryInitial, ClientQuerySecondary} + +var _ClientQueryKindNameToValueMap = map[string]ClientQueryKind{ + _ClientQueryKindName[0:15]: ClientQueryNone, + _ClientQueryKindLowerName[0:15]: ClientQueryNone, + _ClientQueryKindName[15:33]: ClientQueryInitial, + _ClientQueryKindLowerName[15:33]: ClientQueryInitial, + _ClientQueryKindName[33:53]: ClientQuerySecondary, + _ClientQueryKindLowerName[33:53]: ClientQuerySecondary, +} + +var _ClientQueryKindNames = []string{ + _ClientQueryKindName[0:15], + _ClientQueryKindName[15:33], + _ClientQueryKindName[33:53], +} + +// ClientQueryKindString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func ClientQueryKindString(s string) (ClientQueryKind, error) { + if val, ok := _ClientQueryKindNameToValueMap[s]; ok { + return val, nil + } + + if val, ok := _ClientQueryKindNameToValueMap[strings.ToLower(s)]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to ClientQueryKind values", s) +} + +// ClientQueryKindValues returns all values of the enum +func ClientQueryKindValues() []ClientQueryKind { + return _ClientQueryKindValues +} + +// ClientQueryKindStrings returns a slice of all String values of the enum +func ClientQueryKindStrings() []string { + strs := make([]string, len(_ClientQueryKindNames)) + copy(strs, _ClientQueryKindNames) + return strs +} + +// IsAClientQueryKind returns "true" if the value is listed in the enum definition. "false" otherwise +func (i ClientQueryKind) IsAClientQueryKind() bool { + for _, v := range _ClientQueryKindValues { + if i == v { + return true + } + } + return false +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_arr.go b/vendor/github.com/ClickHouse/ch-go/proto/col_arr.go new file mode 100644 index 0000000..ad57436 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_arr.go @@ -0,0 +1,155 @@ +package proto + +import ( + "github.com/go-faster/errors" +) + +// Compile-time assertions for Array. +var ( + _ ColInput = NewArray[string]((*ColStr)(nil)) + _ ColResult = NewArray[string]((*ColStr)(nil)) + _ Column = NewArray[string]((*ColStr)(nil)) + _ StateEncoder = NewArray[string]((*ColStr)(nil)) + _ StateDecoder = NewArray[string]((*ColStr)(nil)) + _ Inferable = NewArray[string]((*ColStr)(nil)) + _ Preparable = NewArray[string]((*ColStr)(nil)) +) + +// Arrayable constraint specifies ability of column T to be Array(T). +type Arrayable[T any] interface { + Array() *ColArr[T] +} + +// ColArr is Array(T). +type ColArr[T any] struct { + Offsets ColUInt64 + Data ColumnOf[T] +} + +// NewArray returns ColArr of c. +// +// Example: NewArray[string](new(ColStr)) +func NewArray[T any](c ColumnOf[T]) *ColArr[T] { + return &ColArr[T]{ + Data: c, + } +} + +// Type returns type of array, i.e. Array(T). +func (c ColArr[T]) Type() ColumnType { + return ColumnTypeArray.Sub(c.Data.Type()) +} + +// Rows returns rows count. +func (c ColArr[T]) Rows() int { + return c.Offsets.Rows() +} + +func (c *ColArr[T]) DecodeState(r *Reader) error { + if s, ok := c.Data.(StateDecoder); ok { + if err := s.DecodeState(r); err != nil { + return errors.Wrap(err, "data state") + } + } + return nil +} + +func (c *ColArr[T]) EncodeState(b *Buffer) { + if s, ok := c.Data.(StateEncoder); ok { + s.EncodeState(b) + } +} + +// Prepare ensures Preparable column propagation. +func (c *ColArr[T]) Prepare() error { + if v, ok := c.Data.(Preparable); ok { + if err := v.Prepare(); err != nil { + return errors.Wrap(err, "prepare data") + } + } + return nil +} + +// Infer ensures Inferable column propagation. +func (c *ColArr[T]) Infer(t ColumnType) error { + if v, ok := c.Data.(Inferable); ok { + if err := v.Infer(t.Elem()); err != nil { + return errors.Wrap(err, "infer data") + } + } + return nil +} + +// RowAppend appends i-th row to target and returns it. +func (c ColArr[T]) RowAppend(i int, target []T) []T { + var start int + end := int(c.Offsets[i]) + if i > 0 { + start = int(c.Offsets[i-1]) + } + for idx := start; idx < end; idx++ { + target = append(target, c.Data.Row(idx)) + } + + return target +} + +// Row returns i-th row. +func (c ColArr[T]) Row(i int) []T { + return c.RowAppend(i, nil) +} + +// DecodeColumn implements ColResult. +func (c *ColArr[T]) DecodeColumn(r *Reader, rows int) error { + if err := c.Offsets.DecodeColumn(r, rows); err != nil { + return errors.Wrap(err, "read offsets") + } + var size int + if l := len(c.Offsets); l > 0 { + // Pick last offset as total size of "elements" column. + size = int(c.Offsets[l-1]) + } + if err := checkRows(size); err != nil { + return errors.Wrap(err, "array size") + } + if err := c.Data.DecodeColumn(r, size); err != nil { + return errors.Wrap(err, "decode data") + } + return nil +} + +// Reset implements ColResult. +func (c *ColArr[T]) Reset() { + c.Data.Reset() + c.Offsets.Reset() +} + +// EncodeColumn implements ColInput. +func (c ColArr[T]) EncodeColumn(b *Buffer) { + c.Offsets.EncodeColumn(b) + c.Data.EncodeColumn(b) +} + +// Append appends new row to column. +func (c *ColArr[T]) Append(v []T) { + c.Data.AppendArr(v) + c.Offsets = append(c.Offsets, uint64(c.Data.Rows())) +} + +// AppendArr appends new slice of rows to column. +func (c *ColArr[T]) AppendArr(vs [][]T) { + for _, v := range vs { + c.Data.AppendArr(v) + c.Offsets = append(c.Offsets, uint64(c.Data.Rows())) + } +} + +// Result for current column. +func (c *ColArr[T]) Result(column string) ResultColumn { + return ResultColumn{Name: column, Data: c} +} + +// Results return Results containing single column. +func (c *ColArr[T]) Results(column string) Results { + return Results{c.Result(column)} +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_auto.go b/vendor/github.com/ClickHouse/ch-go/proto/col_auto.go new file mode 100644 index 0000000..2ed9b8e --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_auto.go @@ -0,0 +1,124 @@ +package proto + +import ( + "strings" + + "github.com/go-faster/errors" +) + +// ColAuto is column that is initialized during decoding. +type ColAuto struct { + Data Column + DataType ColumnType +} + +// Infer and initialize Column from ColumnType. +func (c *ColAuto) Infer(t ColumnType) error { + if c.Data != nil && !c.Type().Conflicts(t) { + // Already ok. + c.DataType = t // update subtype if needed + return nil + } + if v := inferGenerated(t); v != nil { + c.Data = v + c.DataType = t + return nil + } + if strings.HasPrefix(t.String(), ColumnTypeInterval.String()) { + v := new(ColInterval) + if err := v.Infer(t); err != nil { + return errors.Wrap(err, "interval") + } + c.Data = v + c.DataType = t + return nil + } + switch t { + case ColumnTypeNothing: + c.Data = new(ColNothing) + case ColumnTypeNullable.Sub(ColumnTypeNothing): + c.Data = new(ColNothing).Nullable() + case ColumnTypeArray.Sub(ColumnTypeNothing): + c.Data = new(ColNothing).Array() + case ColumnTypeString: + c.Data = new(ColStr) + case ColumnTypeArray.Sub(ColumnTypeString): + c.Data = new(ColStr).Array() + case ColumnTypeNullable.Sub(ColumnTypeString): + c.Data = new(ColStr).Nullable() + case ColumnTypeLowCardinality.Sub(ColumnTypeString): + c.Data = new(ColStr).LowCardinality() + case ColumnTypeArray.Sub(ColumnTypeLowCardinality.Sub(ColumnTypeString)): + c.Data = new(ColStr).LowCardinality().Array() + case ColumnTypeBool: + c.Data = new(ColBool) + case ColumnTypeDateTime: + c.Data = new(ColDateTime) + case ColumnTypeDate: + c.Data = new(ColDate) + case "Map(String,String)": + c.Data = NewMap[string, string](new(ColStr), new(ColStr)) + case ColumnTypeUUID: + c.Data = new(ColUUID) + case ColumnTypeArray.Sub(ColumnTypeUUID): + c.Data = new(ColUUID).Array() + case ColumnTypeNullable.Sub(ColumnTypeUUID): + c.Data = new(ColUUID).Nullable() + default: + switch t.Base() { + case ColumnTypeDateTime: + v := new(ColDateTime) + if err := v.Infer(t); err != nil { + return errors.Wrap(err, "datetime") + } + c.Data = v + c.DataType = t + return nil + case ColumnTypeEnum8, ColumnTypeEnum16: + v := new(ColEnum) + if err := v.Infer(t); err != nil { + return errors.Wrap(err, "enum") + } + c.Data = v + c.DataType = t + return nil + case ColumnTypeDateTime64: + v := new(ColDateTime64) + if err := v.Infer(t); err != nil { + return errors.Wrap(err, "datetime64") + } + c.Data = v + c.DataType = t + return nil + } + return errors.Errorf("automatic column inference not supported for %q", t) + } + + c.DataType = t + return nil +} + +var ( + _ Column = &ColAuto{} + _ Inferable = &ColAuto{} +) + +func (c ColAuto) Type() ColumnType { + return c.DataType +} + +func (c ColAuto) Rows() int { + return c.Data.Rows() +} + +func (c ColAuto) DecodeColumn(r *Reader, rows int) error { + return c.Data.DecodeColumn(r, rows) +} + +func (c ColAuto) Reset() { + c.Data.Reset() +} + +func (c ColAuto) EncodeColumn(b *Buffer) { + c.Data.EncodeColumn(b) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_auto_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_auto_gen.go new file mode 100644 index 0000000..70928c6 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_auto_gen.go @@ -0,0 +1,160 @@ +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +func inferGenerated(t ColumnType) Column { + switch t { + case ColumnTypeArray.Sub(ColumnTypeFloat32): + return new(ColFloat32).Array() + case ColumnTypeNullable.Sub(ColumnTypeFloat32): + return new(ColFloat32).Nullable() + case ColumnTypeFloat32: + return new(ColFloat32) + case ColumnTypeArray.Sub(ColumnTypeFloat64): + return new(ColFloat64).Array() + case ColumnTypeNullable.Sub(ColumnTypeFloat64): + return new(ColFloat64).Nullable() + case ColumnTypeFloat64: + return new(ColFloat64) + case ColumnTypeArray.Sub(ColumnTypeIPv4): + return new(ColIPv4).Array() + case ColumnTypeNullable.Sub(ColumnTypeIPv4): + return new(ColIPv4).Nullable() + case ColumnTypeIPv4: + return new(ColIPv4) + case ColumnTypeArray.Sub(ColumnTypeIPv6): + return new(ColIPv6).Array() + case ColumnTypeNullable.Sub(ColumnTypeIPv6): + return new(ColIPv6).Nullable() + case ColumnTypeIPv6: + return new(ColIPv6) + case ColumnTypeArray.Sub(ColumnTypeDate): + return new(ColDate).Array() + case ColumnTypeNullable.Sub(ColumnTypeDate): + return new(ColDate).Nullable() + case ColumnTypeDate: + return new(ColDate) + case ColumnTypeArray.Sub(ColumnTypeDate32): + return new(ColDate32).Array() + case ColumnTypeNullable.Sub(ColumnTypeDate32): + return new(ColDate32).Nullable() + case ColumnTypeDate32: + return new(ColDate32) + case ColumnTypeArray.Sub(ColumnTypeInt8): + return new(ColInt8).Array() + case ColumnTypeNullable.Sub(ColumnTypeInt8): + return new(ColInt8).Nullable() + case ColumnTypeInt8: + return new(ColInt8) + case ColumnTypeArray.Sub(ColumnTypeUInt8): + return new(ColUInt8).Array() + case ColumnTypeNullable.Sub(ColumnTypeUInt8): + return new(ColUInt8).Nullable() + case ColumnTypeUInt8: + return new(ColUInt8) + case ColumnTypeArray.Sub(ColumnTypeInt16): + return new(ColInt16).Array() + case ColumnTypeNullable.Sub(ColumnTypeInt16): + return new(ColInt16).Nullable() + case ColumnTypeInt16: + return new(ColInt16) + case ColumnTypeArray.Sub(ColumnTypeUInt16): + return new(ColUInt16).Array() + case ColumnTypeNullable.Sub(ColumnTypeUInt16): + return new(ColUInt16).Nullable() + case ColumnTypeUInt16: + return new(ColUInt16) + case ColumnTypeArray.Sub(ColumnTypeInt32): + return new(ColInt32).Array() + case ColumnTypeNullable.Sub(ColumnTypeInt32): + return new(ColInt32).Nullable() + case ColumnTypeInt32: + return new(ColInt32) + case ColumnTypeArray.Sub(ColumnTypeUInt32): + return new(ColUInt32).Array() + case ColumnTypeNullable.Sub(ColumnTypeUInt32): + return new(ColUInt32).Nullable() + case ColumnTypeUInt32: + return new(ColUInt32) + case ColumnTypeArray.Sub(ColumnTypeInt64): + return new(ColInt64).Array() + case ColumnTypeNullable.Sub(ColumnTypeInt64): + return new(ColInt64).Nullable() + case ColumnTypeInt64: + return new(ColInt64) + case ColumnTypeArray.Sub(ColumnTypeUInt64): + return new(ColUInt64).Array() + case ColumnTypeNullable.Sub(ColumnTypeUInt64): + return new(ColUInt64).Nullable() + case ColumnTypeUInt64: + return new(ColUInt64) + case ColumnTypeArray.Sub(ColumnTypeInt128): + return new(ColInt128).Array() + case ColumnTypeNullable.Sub(ColumnTypeInt128): + return new(ColInt128).Nullable() + case ColumnTypeInt128: + return new(ColInt128) + case ColumnTypeArray.Sub(ColumnTypeUInt128): + return new(ColUInt128).Array() + case ColumnTypeNullable.Sub(ColumnTypeUInt128): + return new(ColUInt128).Nullable() + case ColumnTypeUInt128: + return new(ColUInt128) + case ColumnTypeArray.Sub(ColumnTypeInt256): + return new(ColInt256).Array() + case ColumnTypeNullable.Sub(ColumnTypeInt256): + return new(ColInt256).Nullable() + case ColumnTypeInt256: + return new(ColInt256) + case ColumnTypeArray.Sub(ColumnTypeUInt256): + return new(ColUInt256).Array() + case ColumnTypeNullable.Sub(ColumnTypeUInt256): + return new(ColUInt256).Nullable() + case ColumnTypeUInt256: + return new(ColUInt256) + case ColumnTypeArray.Sub(ColumnTypeFixedString.With("8")): + return new(ColFixedStr8).Array() + case ColumnTypeNullable.Sub(ColumnTypeFixedString.With("8")): + return new(ColFixedStr8).Nullable() + case ColumnTypeFixedString.With("8"): + return new(ColFixedStr8) + case ColumnTypeArray.Sub(ColumnTypeFixedString.With("16")): + return new(ColFixedStr16).Array() + case ColumnTypeNullable.Sub(ColumnTypeFixedString.With("16")): + return new(ColFixedStr16).Nullable() + case ColumnTypeFixedString.With("16"): + return new(ColFixedStr16) + case ColumnTypeArray.Sub(ColumnTypeFixedString.With("32")): + return new(ColFixedStr32).Array() + case ColumnTypeNullable.Sub(ColumnTypeFixedString.With("32")): + return new(ColFixedStr32).Nullable() + case ColumnTypeFixedString.With("32"): + return new(ColFixedStr32) + case ColumnTypeArray.Sub(ColumnTypeFixedString.With("64")): + return new(ColFixedStr64).Array() + case ColumnTypeNullable.Sub(ColumnTypeFixedString.With("64")): + return new(ColFixedStr64).Nullable() + case ColumnTypeFixedString.With("64"): + return new(ColFixedStr64) + case ColumnTypeArray.Sub(ColumnTypeFixedString.With("128")): + return new(ColFixedStr128).Array() + case ColumnTypeNullable.Sub(ColumnTypeFixedString.With("128")): + return new(ColFixedStr128).Nullable() + case ColumnTypeFixedString.With("128"): + return new(ColFixedStr128) + case ColumnTypeArray.Sub(ColumnTypeFixedString.With("256")): + return new(ColFixedStr256).Array() + case ColumnTypeNullable.Sub(ColumnTypeFixedString.With("256")): + return new(ColFixedStr256).Nullable() + case ColumnTypeFixedString.With("256"): + return new(ColFixedStr256) + case ColumnTypeArray.Sub(ColumnTypeFixedString.With("512")): + return new(ColFixedStr512).Array() + case ColumnTypeNullable.Sub(ColumnTypeFixedString.With("512")): + return new(ColFixedStr512).Nullable() + case ColumnTypeFixedString.With("512"): + return new(ColFixedStr512) + default: + return nil + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_bool.go b/vendor/github.com/ClickHouse/ch-go/proto/col_bool.go new file mode 100644 index 0000000..0974e53 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_bool.go @@ -0,0 +1,53 @@ +package proto + +// ColBool is Bool column. +type ColBool []bool + +// Compile-time assertions for ColBool. +var ( + _ ColInput = ColBool{} + _ ColResult = (*ColBool)(nil) + _ Column = (*ColBool)(nil) + _ ColumnOf[bool] = (*ColBool)(nil) +) + +func (c ColBool) Row(i int) bool { + return c[i] +} + +func (c *ColBool) Append(v bool) { + *c = append(*c, v) +} + +func (c *ColBool) AppendArr(vs []bool) { + *c = append(*c, vs...) +} + +// Type returns ColumnType of Bool. +func (ColBool) Type() ColumnType { + return ColumnTypeBool +} + +// Rows returns count of rows in column. +func (c ColBool) Rows() int { + return len(c) +} + +// Reset resets data in row, preserving capacity for efficiency. +func (c *ColBool) Reset() { + *c = (*c)[:0] +} + +// Array is helper that creates Array(Bool). +func (c *ColBool) Array() *ColArr[bool] { + return &ColArr[bool]{ + Data: c, + } +} + +// Nullable is helper that creates Nullable(Bool). +func (c *ColBool) Nullable() *ColNullable[bool] { + return &ColNullable[bool]{ + Values: c, + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_bool_safe.go b/vendor/github.com/ClickHouse/ch-go/proto/col_bool_safe.go new file mode 100644 index 0000000..3e998e4 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_bool_safe.go @@ -0,0 +1,44 @@ +//go:build !(amd64 || arm64 || riscv64) || purego + +package proto + +import "github.com/go-faster/errors" + +// EncodeColumn encodes Bool rows to *Buffer. +func (c ColBool) EncodeColumn(b *Buffer) { + start := len(b.Buf) + b.Buf = append(b.Buf, make([]byte, len(c))...) + dst := b.Buf[start:] + for i, v := range c { + dst[i] = boolToByte(v) + } +} + +func boolToByte(b bool) byte { + if b { + return boolTrue + } + return boolFalse +} + +// DecodeColumn decodes Bool rows from *Reader. +func (c *ColBool) DecodeColumn(r *Reader, rows int) error { + data, err := r.ReadRaw(rows) + if err != nil { + return errors.Wrap(err, "read") + } + v := *c + v = append(v, make([]bool, rows)...) + for i := range data { + switch data[i] { + case boolTrue: + v[i] = true + case boolFalse: + v[i] = false + default: + return errors.Errorf("[%d]: bad value %d for Bool", i, data[i]) + } + } + *c = v + return nil +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_bool_unsafe.go b/vendor/github.com/ClickHouse/ch-go/proto/col_bool_unsafe.go new file mode 100644 index 0000000..92cac70 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_bool_unsafe.go @@ -0,0 +1,36 @@ +//go:build (amd64 || arm64 || riscv64) && !purego + +package proto + +import ( + "unsafe" + + "github.com/go-faster/errors" +) + +// EncodeColumn encodes Bool rows to *Buffer. +func (c ColBool) EncodeColumn(b *Buffer) { + if len(c) == 0 { + return + } + offset := len(b.Buf) + b.Buf = append(b.Buf, make([]byte, len(c))...) + s := *(*slice)(unsafe.Pointer(&c)) // #nosec G103 + src := *(*[]byte)(unsafe.Pointer(&s)) // #nosec G103 + dst := b.Buf[offset:] + copy(dst, src) +} + +// DecodeColumn decodes Bool rows from *Reader. +func (c *ColBool) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + *c = append(*c, make([]bool, rows)...) + s := *(*slice)(unsafe.Pointer(c)) // #nosec G103 + dst := *(*[]byte)(unsafe.Pointer(&s)) // #nosec G103 + if err := r.ReadFull(dst); err != nil { + return errors.Wrap(err, "read full") + } + return nil +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_date.go b/vendor/github.com/ClickHouse/ch-go/proto/col_date.go new file mode 100644 index 0000000..5bf75b3 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_date.go @@ -0,0 +1,49 @@ +package proto + +import "time" + +func (c *ColDate) Append(v time.Time) { + *c = append(*c, ToDate(v)) +} + +func (c *ColDate) AppendArr(vs []time.Time) { + var dates = make([]Date, len(vs)) + + for i, v := range vs { + dates[i] = ToDate(v) + } + + *c = append(*c, dates...) +} + +func (c ColDate) Row(i int) time.Time { + return c[i].Time() +} + +// LowCardinality returns LowCardinality for Enum8 . +func (c *ColDate) LowCardinality() *ColLowCardinality[time.Time] { + return &ColLowCardinality[time.Time]{ + index: c, + } +} + +// Array is helper that creates Array of Enum8. +func (c *ColDate) Array() *ColArr[time.Time] { + return &ColArr[time.Time]{ + Data: c, + } +} + +// Nullable is helper that creates Nullable(Enum8). +func (c *ColDate) Nullable() *ColNullable[time.Time] { + return &ColNullable[time.Time]{ + Values: c, + } +} + +// NewArrDate returns new Array(Date). +func NewArrDate() *ColArr[time.Time] { + return &ColArr[time.Time]{ + Data: new(ColDate), + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_date32.go b/vendor/github.com/ClickHouse/ch-go/proto/col_date32.go new file mode 100644 index 0000000..38f1a91 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_date32.go @@ -0,0 +1,49 @@ +package proto + +import "time" + +func (c *ColDate32) Append(v time.Time) { + *c = append(*c, ToDate32(v)) +} + +func (c *ColDate32) AppendArr(vs []time.Time) { + var dates = make([]Date32, len(vs)) + + for i, v := range vs { + dates[i] = ToDate32(v) + } + + *c = append(*c, dates...) +} + +func (c ColDate32) Row(i int) time.Time { + return c[i].Time() +} + +// LowCardinality returns LowCardinality for Enum8 . +func (c *ColDate32) LowCardinality() *ColLowCardinality[time.Time] { + return &ColLowCardinality[time.Time]{ + index: c, + } +} + +// Array is helper that creates Array of Enum8. +func (c *ColDate32) Array() *ColArr[time.Time] { + return &ColArr[time.Time]{ + Data: c, + } +} + +// Nullable is helper that creates Nullable(Enum8). +func (c *ColDate32) Nullable() *ColNullable[time.Time] { + return &ColNullable[time.Time]{ + Values: c, + } +} + +// NewArrDate32 returns new Array(Date32). +func NewArrDate32() *ColArr[time.Time] { + return &ColArr[time.Time]{ + Data: new(ColDate32), + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_date32_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_date32_gen.go new file mode 100644 index 0000000..c2e3004 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_date32_gen.go @@ -0,0 +1,28 @@ +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +// ColDate32 represents Date32 column. +type ColDate32 []Date32 + +// Compile-time assertions for ColDate32. +var ( + _ ColInput = ColDate32{} + _ ColResult = (*ColDate32)(nil) + _ Column = (*ColDate32)(nil) +) + +// Rows returns count of rows in column. +func (c ColDate32) Rows() int { + return len(c) +} + +// Reset resets data in row, preserving capacity for efficiency. +func (c *ColDate32) Reset() { + *c = (*c)[:0] +} + +// Type returns ColumnType of Date32. +func (ColDate32) Type() ColumnType { + return ColumnTypeDate32 +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_date32_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_date32_safe_gen.go new file mode 100644 index 0000000..7e6ac3d --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_date32_safe_gen.go @@ -0,0 +1,55 @@ +//go:build !(amd64 || arm64 || riscv64) || purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "encoding/binary" + + "github.com/go-faster/errors" +) + +var _ = binary.LittleEndian // clickHouse uses LittleEndian + +// DecodeColumn decodes Date32 rows from *Reader. +func (c *ColDate32) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + const size = 32 / 8 + data, err := r.ReadRaw(rows * size) + if err != nil { + return errors.Wrap(err, "read") + } + v := *c + // Move bound check out of loop. + // + // See https://github.com/golang/go/issues/30945. + _ = data[len(data)-size] + for i := 0; i <= len(data)-size; i += size { + v = append(v, + Date32(binary.LittleEndian.Uint32(data[i:i+size])), + ) + } + *c = v + return nil +} + +// EncodeColumn encodes Date32 rows to *Buffer. +func (c ColDate32) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + const size = 32 / 8 + offset := len(b.Buf) + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + for _, vv := range v { + binary.LittleEndian.PutUint32( + b.Buf[offset:offset+size], + uint32(vv), + ) + offset += size + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_date32_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_date32_unsafe_gen.go new file mode 100644 index 0000000..2690a31 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_date32_unsafe_gen.go @@ -0,0 +1,45 @@ +//go:build (amd64 || arm64 || riscv64) && !purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "unsafe" + + "github.com/go-faster/errors" +) + +// DecodeColumn decodes Date32 rows from *Reader. +func (c *ColDate32) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + *c = append(*c, make([]Date32, rows)...) + s := *(*slice)(unsafe.Pointer(c)) + const size = 32 / 8 + s.Len *= size + s.Cap *= size + dst := *(*[]byte)(unsafe.Pointer(&s)) + if err := r.ReadFull(dst); err != nil { + return errors.Wrap(err, "read full") + } + return nil +} + +// EncodeColumn encodes Date32 rows to *Buffer. +func (c ColDate32) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + offset := len(b.Buf) + const size = 32 / 8 + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + s := *(*slice)(unsafe.Pointer(&v)) + s.Len *= size + s.Cap *= size + src := *(*[]byte)(unsafe.Pointer(&s)) + dst := b.Buf[offset:] + copy(dst, src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_date_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_date_gen.go new file mode 100644 index 0000000..16447af --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_date_gen.go @@ -0,0 +1,28 @@ +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +// ColDate represents Date column. +type ColDate []Date + +// Compile-time assertions for ColDate. +var ( + _ ColInput = ColDate{} + _ ColResult = (*ColDate)(nil) + _ Column = (*ColDate)(nil) +) + +// Rows returns count of rows in column. +func (c ColDate) Rows() int { + return len(c) +} + +// Reset resets data in row, preserving capacity for efficiency. +func (c *ColDate) Reset() { + *c = (*c)[:0] +} + +// Type returns ColumnType of Date. +func (ColDate) Type() ColumnType { + return ColumnTypeDate +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_date_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_date_safe_gen.go new file mode 100644 index 0000000..49bb89b --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_date_safe_gen.go @@ -0,0 +1,55 @@ +//go:build !(amd64 || arm64 || riscv64) || purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "encoding/binary" + + "github.com/go-faster/errors" +) + +var _ = binary.LittleEndian // clickHouse uses LittleEndian + +// DecodeColumn decodes Date rows from *Reader. +func (c *ColDate) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + const size = 16 / 8 + data, err := r.ReadRaw(rows * size) + if err != nil { + return errors.Wrap(err, "read") + } + v := *c + // Move bound check out of loop. + // + // See https://github.com/golang/go/issues/30945. + _ = data[len(data)-size] + for i := 0; i <= len(data)-size; i += size { + v = append(v, + Date(binary.LittleEndian.Uint16(data[i:i+size])), + ) + } + *c = v + return nil +} + +// EncodeColumn encodes Date rows to *Buffer. +func (c ColDate) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + const size = 16 / 8 + offset := len(b.Buf) + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + for _, vv := range v { + binary.LittleEndian.PutUint16( + b.Buf[offset:offset+size], + uint16(vv), + ) + offset += size + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_date_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_date_unsafe_gen.go new file mode 100644 index 0000000..980d8b4 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_date_unsafe_gen.go @@ -0,0 +1,45 @@ +//go:build (amd64 || arm64 || riscv64) && !purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "unsafe" + + "github.com/go-faster/errors" +) + +// DecodeColumn decodes Date rows from *Reader. +func (c *ColDate) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + *c = append(*c, make([]Date, rows)...) + s := *(*slice)(unsafe.Pointer(c)) + const size = 16 / 8 + s.Len *= size + s.Cap *= size + dst := *(*[]byte)(unsafe.Pointer(&s)) + if err := r.ReadFull(dst); err != nil { + return errors.Wrap(err, "read full") + } + return nil +} + +// EncodeColumn encodes Date rows to *Buffer. +func (c ColDate) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + offset := len(b.Buf) + const size = 16 / 8 + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + s := *(*slice)(unsafe.Pointer(&v)) + s.Len *= size + s.Cap *= size + src := *(*[]byte)(unsafe.Pointer(&s)) + dst := b.Buf[offset:] + copy(dst, src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_datetime.go b/vendor/github.com/ClickHouse/ch-go/proto/col_datetime.go new file mode 100644 index 0000000..4243f2b --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_datetime.go @@ -0,0 +1,104 @@ +package proto + +import ( + "strings" + "time" + + "github.com/go-faster/errors" +) + +var ( + _ ColumnOf[time.Time] = (*ColDateTime)(nil) + _ Inferable = (*ColDateTime)(nil) +) + +// ColDateTime implements ColumnOf[time.Time]. +type ColDateTime struct { + Data []DateTime + Location *time.Location +} + +func (c *ColDateTime) Reset() { + c.Data = c.Data[:0] +} + +func (c ColDateTime) Rows() int { + return len(c.Data) +} + +func (c ColDateTime) Type() ColumnType { + if c.Location == nil { + return ColumnTypeDateTime + } + return ColumnTypeDateTime.With(`'` + c.Location.String() + `'`) +} + +func (c *ColDateTime) Infer(t ColumnType) error { + sub := t.Elem() + if sub == "" { + c.Location = nil + return nil + } + rawLoc := string(sub) + rawLoc = strings.Trim(rawLoc, `'`) + loc, err := time.LoadLocation(rawLoc) + if err != nil { + return errors.Wrap(err, "load location") + } + c.Location = loc + return nil +} + +func (c ColDateTime) loc() *time.Location { + if c.Location == nil { + // Defaulting to local timezone (not UTC). + return time.Local + } + return c.Location +} + +func (c ColDateTime) Row(i int) time.Time { + return c.Data[i].Time().In(c.loc()) +} + +func (c *ColDateTime) Append(v time.Time) { + c.Data = append(c.Data, ToDateTime(v)) +} + +func (c *ColDateTime) AppendArr(vs []time.Time) { + var dates = make([]DateTime, len(vs)) + + for i, v := range vs { + dates[i] = ToDateTime(v) + } + + c.Data = append(c.Data, dates...) +} + +// LowCardinality returns LowCardinality for Enum8 . +func (c *ColDateTime) LowCardinality() *ColLowCardinality[time.Time] { + return &ColLowCardinality[time.Time]{ + index: c, + } +} + +// Array is helper that creates Array of Enum8. +func (c *ColDateTime) Array() *ColArr[time.Time] { + return &ColArr[time.Time]{ + Data: c, + } +} + +// Nullable is helper that creates Nullable(Enum8). +func (c *ColDateTime) Nullable() *ColNullable[time.Time] { + return &ColNullable[time.Time]{ + Values: c, + } +} + +// NewArrDateTime returns new Array(DateTime). +func NewArrDateTime() *ColArr[time.Time] { + return &ColArr[time.Time]{ + Data: &ColDateTime{}, + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_datetime64.go b/vendor/github.com/ClickHouse/ch-go/proto/col_datetime64.go new file mode 100644 index 0000000..f4d96a4 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_datetime64.go @@ -0,0 +1,150 @@ +package proto + +import ( + "fmt" + "strconv" + "strings" + "time" + + "github.com/go-faster/errors" +) + +var ( + _ ColumnOf[time.Time] = (*ColDateTime64)(nil) + _ Inferable = (*ColDateTime64)(nil) + _ Column = (*ColDateTime64)(nil) +) + +// ColDateTime64 implements ColumnOf[time.Time]. +// +// If Precision is not set, Append and Row() panics. +// Use ColDateTime64Raw to work with raw DateTime64 values. +type ColDateTime64 struct { + Data []DateTime64 + Location *time.Location + Precision Precision + PrecisionSet bool +} + +func (c *ColDateTime64) WithPrecision(p Precision) *ColDateTime64 { + c.Precision = p + c.PrecisionSet = true + return c +} + +func (c *ColDateTime64) WithLocation(loc *time.Location) *ColDateTime64 { + c.Location = loc + return c +} + +func (c ColDateTime64) Rows() int { + return len(c.Data) +} + +func (c *ColDateTime64) Reset() { + c.Data = c.Data[:0] +} + +func (c ColDateTime64) Type() ColumnType { + var elems []string + if p := c.Precision; c.PrecisionSet { + elems = append(elems, strconv.Itoa(int(p))) + } + if loc := c.Location; loc != nil { + elems = append(elems, fmt.Sprintf(`'%s'`, loc)) + } + return ColumnTypeDateTime64.With(elems...) +} + +func (c *ColDateTime64) Infer(t ColumnType) error { + elem := string(t.Elem()) + if elem == "" { + return errors.Errorf("invalid DateTime64: no elements in %q", t) + } + elems := strings.SplitN(elem, ",", 2) + for i := range elems { + elems[i] = strings.Trim(elems[i], `' `) + } + n, err := strconv.ParseUint(elems[0], 10, 8) + if err != nil { + return errors.Wrap(err, "parse precision") + } + p := Precision(n) + if !p.Valid() { + return errors.Errorf("precision %d is invalid", n) + } + c.Precision = p + c.PrecisionSet = true + if len(elems) > 1 { + loc, err := time.LoadLocation(elems[1]) + if err != nil { + return errors.Wrap(err, "invalid location") + } + c.Location = loc + } + return nil +} + +func (c ColDateTime64) Row(i int) time.Time { + if !c.PrecisionSet { + panic("DateTime64: no precision set") + } + return c.Data[i].Time(c.Precision).In(c.loc()) +} + +func (c ColDateTime64) loc() *time.Location { + if c.Location == nil { + // Defaulting to local timezone (not UTC). + return time.Local + } + return c.Location +} + +func (c *ColDateTime64) AppendRaw(v DateTime64) { + c.Data = append(c.Data, v) +} + +func (c *ColDateTime64) Append(v time.Time) { + if !c.PrecisionSet { + panic("DateTime64: no precision set") + } + c.AppendRaw(ToDateTime64(v, c.Precision)) +} + +func (c *ColDateTime64) AppendArr(v []time.Time) { + if !c.PrecisionSet { + panic("DateTime64: no precision set") + } + + for _, item := range v { + c.AppendRaw(ToDateTime64(item, c.Precision)) + } +} + +// Raw version of ColDateTime64 for ColumnOf[DateTime64]. +func (c ColDateTime64) Raw() *ColDateTime64Raw { + return &ColDateTime64Raw{ColDateTime64: c} +} + +func (c *ColDateTime64) Array() *ColArr[time.Time] { + return &ColArr[time.Time]{Data: c} +} + +var ( + _ ColumnOf[DateTime64] = (*ColDateTime64Raw)(nil) + _ Inferable = (*ColDateTime64Raw)(nil) + _ Column = (*ColDateTime64Raw)(nil) +) + +// ColDateTime64Raw is DateTime64 wrapper to implement ColumnOf[DateTime64]. +type ColDateTime64Raw struct { + ColDateTime64 +} + +func (c *ColDateTime64Raw) Append(v DateTime64) { c.AppendRaw(v) } +func (c *ColDateTime64Raw) AppendArr(vs []DateTime64) { + for _, v := range vs { + c.AppendRaw(v) + } +} +func (c ColDateTime64Raw) Row(i int) DateTime64 { return c.Data[i] } diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_datetime64_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_datetime64_safe_gen.go new file mode 100644 index 0000000..ccff09d --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_datetime64_safe_gen.go @@ -0,0 +1,55 @@ +//go:build !(amd64 || arm64 || riscv64) || purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "encoding/binary" + + "github.com/go-faster/errors" +) + +var _ = binary.LittleEndian // clickHouse uses LittleEndian + +// DecodeColumn decodes DateTime64 rows from *Reader. +func (c *ColDateTime64) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + const size = 64 / 8 + data, err := r.ReadRaw(rows * size) + if err != nil { + return errors.Wrap(err, "read") + } + v := c.Data + // Move bound check out of loop. + // + // See https://github.com/golang/go/issues/30945. + _ = data[len(data)-size] + for i := 0; i <= len(data)-size; i += size { + v = append(v, + DateTime64(binary.LittleEndian.Uint64(data[i:i+size])), + ) + } + c.Data = v + return nil +} + +// EncodeColumn encodes DateTime64 rows to *Buffer. +func (c ColDateTime64) EncodeColumn(b *Buffer) { + v := c.Data + if len(v) == 0 { + return + } + const size = 64 / 8 + offset := len(b.Buf) + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + for _, vv := range v { + binary.LittleEndian.PutUint64( + b.Buf[offset:offset+size], + uint64(vv), + ) + offset += size + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_datetime64_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_datetime64_unsafe_gen.go new file mode 100644 index 0000000..4eeeaf5 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_datetime64_unsafe_gen.go @@ -0,0 +1,45 @@ +//go:build (amd64 || arm64 || riscv64) && !purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "unsafe" + + "github.com/go-faster/errors" +) + +// DecodeColumn decodes DateTime64 rows from *Reader. +func (c *ColDateTime64) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + c.Data = append(c.Data, make([]DateTime64, rows)...) + s := *(*slice)(unsafe.Pointer(&c.Data)) + const size = 64 / 8 + s.Len *= size + s.Cap *= size + dst := *(*[]byte)(unsafe.Pointer(&s)) + if err := r.ReadFull(dst); err != nil { + return errors.Wrap(err, "read full") + } + return nil +} + +// EncodeColumn encodes DateTime64 rows to *Buffer. +func (c ColDateTime64) EncodeColumn(b *Buffer) { + v := c.Data + if len(v) == 0 { + return + } + offset := len(b.Buf) + const size = 64 / 8 + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + s := *(*slice)(unsafe.Pointer(&v)) + s.Len *= size + s.Cap *= size + src := *(*[]byte)(unsafe.Pointer(&s)) + dst := b.Buf[offset:] + copy(dst, src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_datetime_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_datetime_safe_gen.go new file mode 100644 index 0000000..20e9aad --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_datetime_safe_gen.go @@ -0,0 +1,55 @@ +//go:build !(amd64 || arm64 || riscv64) || purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "encoding/binary" + + "github.com/go-faster/errors" +) + +var _ = binary.LittleEndian // clickHouse uses LittleEndian + +// DecodeColumn decodes DateTime rows from *Reader. +func (c *ColDateTime) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + const size = 32 / 8 + data, err := r.ReadRaw(rows * size) + if err != nil { + return errors.Wrap(err, "read") + } + v := c.Data + // Move bound check out of loop. + // + // See https://github.com/golang/go/issues/30945. + _ = data[len(data)-size] + for i := 0; i <= len(data)-size; i += size { + v = append(v, + DateTime(binary.LittleEndian.Uint32(data[i:i+size])), + ) + } + c.Data = v + return nil +} + +// EncodeColumn encodes DateTime rows to *Buffer. +func (c ColDateTime) EncodeColumn(b *Buffer) { + v := c.Data + if len(v) == 0 { + return + } + const size = 32 / 8 + offset := len(b.Buf) + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + for _, vv := range v { + binary.LittleEndian.PutUint32( + b.Buf[offset:offset+size], + uint32(vv), + ) + offset += size + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_datetime_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_datetime_unsafe_gen.go new file mode 100644 index 0000000..40a056c --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_datetime_unsafe_gen.go @@ -0,0 +1,45 @@ +//go:build (amd64 || arm64 || riscv64) && !purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "unsafe" + + "github.com/go-faster/errors" +) + +// DecodeColumn decodes DateTime rows from *Reader. +func (c *ColDateTime) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + c.Data = append(c.Data, make([]DateTime, rows)...) + s := *(*slice)(unsafe.Pointer(&c.Data)) + const size = 32 / 8 + s.Len *= size + s.Cap *= size + dst := *(*[]byte)(unsafe.Pointer(&s)) + if err := r.ReadFull(dst); err != nil { + return errors.Wrap(err, "read full") + } + return nil +} + +// EncodeColumn encodes DateTime rows to *Buffer. +func (c ColDateTime) EncodeColumn(b *Buffer) { + v := c.Data + if len(v) == 0 { + return + } + offset := len(b.Buf) + const size = 32 / 8 + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + s := *(*slice)(unsafe.Pointer(&v)) + s.Len *= size + s.Cap *= size + src := *(*[]byte)(unsafe.Pointer(&s)) + dst := b.Buf[offset:] + copy(dst, src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal128_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal128_gen.go new file mode 100644 index 0000000..18811ac --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal128_gen.go @@ -0,0 +1,71 @@ +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +// ColDecimal128 represents Decimal128 column. +type ColDecimal128 []Decimal128 + +// Compile-time assertions for ColDecimal128. +var ( + _ ColInput = ColDecimal128{} + _ ColResult = (*ColDecimal128)(nil) + _ Column = (*ColDecimal128)(nil) +) + +// Rows returns count of rows in column. +func (c ColDecimal128) Rows() int { + return len(c) +} + +// Reset resets data in row, preserving capacity for efficiency. +func (c *ColDecimal128) Reset() { + *c = (*c)[:0] +} + +// Type returns ColumnType of Decimal128. +func (ColDecimal128) Type() ColumnType { + return ColumnTypeDecimal128 +} + +// Row returns i-th row of column. +func (c ColDecimal128) Row(i int) Decimal128 { + return c[i] +} + +// Append Decimal128 to column. +func (c *ColDecimal128) Append(v Decimal128) { + *c = append(*c, v) +} + +// Append Decimal128 slice to column. +func (c *ColDecimal128) AppendArr(vs []Decimal128) { + *c = append(*c, vs...) +} + +// LowCardinality returns LowCardinality for Decimal128 . +func (c *ColDecimal128) LowCardinality() *ColLowCardinality[Decimal128] { + return &ColLowCardinality[Decimal128]{ + index: c, + } +} + +// Array is helper that creates Array of Decimal128. +func (c *ColDecimal128) Array() *ColArr[Decimal128] { + return &ColArr[Decimal128]{ + Data: c, + } +} + +// Nullable is helper that creates Nullable(Decimal128). +func (c *ColDecimal128) Nullable() *ColNullable[Decimal128] { + return &ColNullable[Decimal128]{ + Values: c, + } +} + +// NewArrDecimal128 returns new Array(Decimal128). +func NewArrDecimal128() *ColArr[Decimal128] { + return &ColArr[Decimal128]{ + Data: new(ColDecimal128), + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal128_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal128_safe_gen.go new file mode 100644 index 0000000..58c02eb --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal128_safe_gen.go @@ -0,0 +1,55 @@ +//go:build !(amd64 || arm64 || riscv64) || purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "encoding/binary" + + "github.com/go-faster/errors" +) + +var _ = binary.LittleEndian // clickHouse uses LittleEndian + +// DecodeColumn decodes Decimal128 rows from *Reader. +func (c *ColDecimal128) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + const size = 128 / 8 + data, err := r.ReadRaw(rows * size) + if err != nil { + return errors.Wrap(err, "read") + } + v := *c + // Move bound check out of loop. + // + // See https://github.com/golang/go/issues/30945. + _ = data[len(data)-size] + for i := 0; i <= len(data)-size; i += size { + v = append(v, + Decimal128(binUInt128(data[i:i+size])), + ) + } + *c = v + return nil +} + +// EncodeColumn encodes Decimal128 rows to *Buffer. +func (c ColDecimal128) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + const size = 128 / 8 + offset := len(b.Buf) + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + for _, vv := range v { + binPutUInt128( + b.Buf[offset:offset+size], + UInt128(vv), + ) + offset += size + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal128_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal128_unsafe_gen.go new file mode 100644 index 0000000..1b2fe12 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal128_unsafe_gen.go @@ -0,0 +1,45 @@ +//go:build (amd64 || arm64 || riscv64) && !purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "unsafe" + + "github.com/go-faster/errors" +) + +// DecodeColumn decodes Decimal128 rows from *Reader. +func (c *ColDecimal128) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + *c = append(*c, make([]Decimal128, rows)...) + s := *(*slice)(unsafe.Pointer(c)) + const size = 128 / 8 + s.Len *= size + s.Cap *= size + dst := *(*[]byte)(unsafe.Pointer(&s)) + if err := r.ReadFull(dst); err != nil { + return errors.Wrap(err, "read full") + } + return nil +} + +// EncodeColumn encodes Decimal128 rows to *Buffer. +func (c ColDecimal128) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + offset := len(b.Buf) + const size = 128 / 8 + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + s := *(*slice)(unsafe.Pointer(&v)) + s.Len *= size + s.Cap *= size + src := *(*[]byte)(unsafe.Pointer(&s)) + dst := b.Buf[offset:] + copy(dst, src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal256_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal256_gen.go new file mode 100644 index 0000000..ad96b27 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal256_gen.go @@ -0,0 +1,71 @@ +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +// ColDecimal256 represents Decimal256 column. +type ColDecimal256 []Decimal256 + +// Compile-time assertions for ColDecimal256. +var ( + _ ColInput = ColDecimal256{} + _ ColResult = (*ColDecimal256)(nil) + _ Column = (*ColDecimal256)(nil) +) + +// Rows returns count of rows in column. +func (c ColDecimal256) Rows() int { + return len(c) +} + +// Reset resets data in row, preserving capacity for efficiency. +func (c *ColDecimal256) Reset() { + *c = (*c)[:0] +} + +// Type returns ColumnType of Decimal256. +func (ColDecimal256) Type() ColumnType { + return ColumnTypeDecimal256 +} + +// Row returns i-th row of column. +func (c ColDecimal256) Row(i int) Decimal256 { + return c[i] +} + +// Append Decimal256 to column. +func (c *ColDecimal256) Append(v Decimal256) { + *c = append(*c, v) +} + +// Append Decimal256 slice to column. +func (c *ColDecimal256) AppendArr(vs []Decimal256) { + *c = append(*c, vs...) +} + +// LowCardinality returns LowCardinality for Decimal256 . +func (c *ColDecimal256) LowCardinality() *ColLowCardinality[Decimal256] { + return &ColLowCardinality[Decimal256]{ + index: c, + } +} + +// Array is helper that creates Array of Decimal256. +func (c *ColDecimal256) Array() *ColArr[Decimal256] { + return &ColArr[Decimal256]{ + Data: c, + } +} + +// Nullable is helper that creates Nullable(Decimal256). +func (c *ColDecimal256) Nullable() *ColNullable[Decimal256] { + return &ColNullable[Decimal256]{ + Values: c, + } +} + +// NewArrDecimal256 returns new Array(Decimal256). +func NewArrDecimal256() *ColArr[Decimal256] { + return &ColArr[Decimal256]{ + Data: new(ColDecimal256), + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal256_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal256_safe_gen.go new file mode 100644 index 0000000..301b7b1 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal256_safe_gen.go @@ -0,0 +1,55 @@ +//go:build !(amd64 || arm64 || riscv64) || purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "encoding/binary" + + "github.com/go-faster/errors" +) + +var _ = binary.LittleEndian // clickHouse uses LittleEndian + +// DecodeColumn decodes Decimal256 rows from *Reader. +func (c *ColDecimal256) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + const size = 256 / 8 + data, err := r.ReadRaw(rows * size) + if err != nil { + return errors.Wrap(err, "read") + } + v := *c + // Move bound check out of loop. + // + // See https://github.com/golang/go/issues/30945. + _ = data[len(data)-size] + for i := 0; i <= len(data)-size; i += size { + v = append(v, + Decimal256(binUInt256(data[i:i+size])), + ) + } + *c = v + return nil +} + +// EncodeColumn encodes Decimal256 rows to *Buffer. +func (c ColDecimal256) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + const size = 256 / 8 + offset := len(b.Buf) + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + for _, vv := range v { + binPutUInt256( + b.Buf[offset:offset+size], + UInt256(vv), + ) + offset += size + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal256_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal256_unsafe_gen.go new file mode 100644 index 0000000..b0d694e --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal256_unsafe_gen.go @@ -0,0 +1,45 @@ +//go:build (amd64 || arm64 || riscv64) && !purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "unsafe" + + "github.com/go-faster/errors" +) + +// DecodeColumn decodes Decimal256 rows from *Reader. +func (c *ColDecimal256) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + *c = append(*c, make([]Decimal256, rows)...) + s := *(*slice)(unsafe.Pointer(c)) + const size = 256 / 8 + s.Len *= size + s.Cap *= size + dst := *(*[]byte)(unsafe.Pointer(&s)) + if err := r.ReadFull(dst); err != nil { + return errors.Wrap(err, "read full") + } + return nil +} + +// EncodeColumn encodes Decimal256 rows to *Buffer. +func (c ColDecimal256) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + offset := len(b.Buf) + const size = 256 / 8 + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + s := *(*slice)(unsafe.Pointer(&v)) + s.Len *= size + s.Cap *= size + src := *(*[]byte)(unsafe.Pointer(&s)) + dst := b.Buf[offset:] + copy(dst, src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal32_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal32_gen.go new file mode 100644 index 0000000..2c4f4ea --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal32_gen.go @@ -0,0 +1,71 @@ +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +// ColDecimal32 represents Decimal32 column. +type ColDecimal32 []Decimal32 + +// Compile-time assertions for ColDecimal32. +var ( + _ ColInput = ColDecimal32{} + _ ColResult = (*ColDecimal32)(nil) + _ Column = (*ColDecimal32)(nil) +) + +// Rows returns count of rows in column. +func (c ColDecimal32) Rows() int { + return len(c) +} + +// Reset resets data in row, preserving capacity for efficiency. +func (c *ColDecimal32) Reset() { + *c = (*c)[:0] +} + +// Type returns ColumnType of Decimal32. +func (ColDecimal32) Type() ColumnType { + return ColumnTypeDecimal32 +} + +// Row returns i-th row of column. +func (c ColDecimal32) Row(i int) Decimal32 { + return c[i] +} + +// Append Decimal32 to column. +func (c *ColDecimal32) Append(v Decimal32) { + *c = append(*c, v) +} + +// Append Decimal32 slice to column. +func (c *ColDecimal32) AppendArr(vs []Decimal32) { + *c = append(*c, vs...) +} + +// LowCardinality returns LowCardinality for Decimal32 . +func (c *ColDecimal32) LowCardinality() *ColLowCardinality[Decimal32] { + return &ColLowCardinality[Decimal32]{ + index: c, + } +} + +// Array is helper that creates Array of Decimal32. +func (c *ColDecimal32) Array() *ColArr[Decimal32] { + return &ColArr[Decimal32]{ + Data: c, + } +} + +// Nullable is helper that creates Nullable(Decimal32). +func (c *ColDecimal32) Nullable() *ColNullable[Decimal32] { + return &ColNullable[Decimal32]{ + Values: c, + } +} + +// NewArrDecimal32 returns new Array(Decimal32). +func NewArrDecimal32() *ColArr[Decimal32] { + return &ColArr[Decimal32]{ + Data: new(ColDecimal32), + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal32_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal32_safe_gen.go new file mode 100644 index 0000000..44cb9f7 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal32_safe_gen.go @@ -0,0 +1,55 @@ +//go:build !(amd64 || arm64 || riscv64) || purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "encoding/binary" + + "github.com/go-faster/errors" +) + +var _ = binary.LittleEndian // clickHouse uses LittleEndian + +// DecodeColumn decodes Decimal32 rows from *Reader. +func (c *ColDecimal32) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + const size = 32 / 8 + data, err := r.ReadRaw(rows * size) + if err != nil { + return errors.Wrap(err, "read") + } + v := *c + // Move bound check out of loop. + // + // See https://github.com/golang/go/issues/30945. + _ = data[len(data)-size] + for i := 0; i <= len(data)-size; i += size { + v = append(v, + Decimal32(binary.LittleEndian.Uint32(data[i:i+size])), + ) + } + *c = v + return nil +} + +// EncodeColumn encodes Decimal32 rows to *Buffer. +func (c ColDecimal32) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + const size = 32 / 8 + offset := len(b.Buf) + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + for _, vv := range v { + binary.LittleEndian.PutUint32( + b.Buf[offset:offset+size], + uint32(vv), + ) + offset += size + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal32_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal32_unsafe_gen.go new file mode 100644 index 0000000..eaed3df --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal32_unsafe_gen.go @@ -0,0 +1,45 @@ +//go:build (amd64 || arm64 || riscv64) && !purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "unsafe" + + "github.com/go-faster/errors" +) + +// DecodeColumn decodes Decimal32 rows from *Reader. +func (c *ColDecimal32) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + *c = append(*c, make([]Decimal32, rows)...) + s := *(*slice)(unsafe.Pointer(c)) + const size = 32 / 8 + s.Len *= size + s.Cap *= size + dst := *(*[]byte)(unsafe.Pointer(&s)) + if err := r.ReadFull(dst); err != nil { + return errors.Wrap(err, "read full") + } + return nil +} + +// EncodeColumn encodes Decimal32 rows to *Buffer. +func (c ColDecimal32) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + offset := len(b.Buf) + const size = 32 / 8 + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + s := *(*slice)(unsafe.Pointer(&v)) + s.Len *= size + s.Cap *= size + src := *(*[]byte)(unsafe.Pointer(&s)) + dst := b.Buf[offset:] + copy(dst, src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal64_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal64_gen.go new file mode 100644 index 0000000..c37ffcd --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal64_gen.go @@ -0,0 +1,71 @@ +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +// ColDecimal64 represents Decimal64 column. +type ColDecimal64 []Decimal64 + +// Compile-time assertions for ColDecimal64. +var ( + _ ColInput = ColDecimal64{} + _ ColResult = (*ColDecimal64)(nil) + _ Column = (*ColDecimal64)(nil) +) + +// Rows returns count of rows in column. +func (c ColDecimal64) Rows() int { + return len(c) +} + +// Reset resets data in row, preserving capacity for efficiency. +func (c *ColDecimal64) Reset() { + *c = (*c)[:0] +} + +// Type returns ColumnType of Decimal64. +func (ColDecimal64) Type() ColumnType { + return ColumnTypeDecimal64 +} + +// Row returns i-th row of column. +func (c ColDecimal64) Row(i int) Decimal64 { + return c[i] +} + +// Append Decimal64 to column. +func (c *ColDecimal64) Append(v Decimal64) { + *c = append(*c, v) +} + +// Append Decimal64 slice to column. +func (c *ColDecimal64) AppendArr(vs []Decimal64) { + *c = append(*c, vs...) +} + +// LowCardinality returns LowCardinality for Decimal64 . +func (c *ColDecimal64) LowCardinality() *ColLowCardinality[Decimal64] { + return &ColLowCardinality[Decimal64]{ + index: c, + } +} + +// Array is helper that creates Array of Decimal64. +func (c *ColDecimal64) Array() *ColArr[Decimal64] { + return &ColArr[Decimal64]{ + Data: c, + } +} + +// Nullable is helper that creates Nullable(Decimal64). +func (c *ColDecimal64) Nullable() *ColNullable[Decimal64] { + return &ColNullable[Decimal64]{ + Values: c, + } +} + +// NewArrDecimal64 returns new Array(Decimal64). +func NewArrDecimal64() *ColArr[Decimal64] { + return &ColArr[Decimal64]{ + Data: new(ColDecimal64), + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal64_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal64_safe_gen.go new file mode 100644 index 0000000..a0934c6 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal64_safe_gen.go @@ -0,0 +1,55 @@ +//go:build !(amd64 || arm64 || riscv64) || purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "encoding/binary" + + "github.com/go-faster/errors" +) + +var _ = binary.LittleEndian // clickHouse uses LittleEndian + +// DecodeColumn decodes Decimal64 rows from *Reader. +func (c *ColDecimal64) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + const size = 64 / 8 + data, err := r.ReadRaw(rows * size) + if err != nil { + return errors.Wrap(err, "read") + } + v := *c + // Move bound check out of loop. + // + // See https://github.com/golang/go/issues/30945. + _ = data[len(data)-size] + for i := 0; i <= len(data)-size; i += size { + v = append(v, + Decimal64(binary.LittleEndian.Uint64(data[i:i+size])), + ) + } + *c = v + return nil +} + +// EncodeColumn encodes Decimal64 rows to *Buffer. +func (c ColDecimal64) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + const size = 64 / 8 + offset := len(b.Buf) + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + for _, vv := range v { + binary.LittleEndian.PutUint64( + b.Buf[offset:offset+size], + uint64(vv), + ) + offset += size + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal64_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal64_unsafe_gen.go new file mode 100644 index 0000000..f5ba1b2 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal64_unsafe_gen.go @@ -0,0 +1,45 @@ +//go:build (amd64 || arm64 || riscv64) && !purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "unsafe" + + "github.com/go-faster/errors" +) + +// DecodeColumn decodes Decimal64 rows from *Reader. +func (c *ColDecimal64) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + *c = append(*c, make([]Decimal64, rows)...) + s := *(*slice)(unsafe.Pointer(c)) + const size = 64 / 8 + s.Len *= size + s.Cap *= size + dst := *(*[]byte)(unsafe.Pointer(&s)) + if err := r.ReadFull(dst); err != nil { + return errors.Wrap(err, "read full") + } + return nil +} + +// EncodeColumn encodes Decimal64 rows to *Buffer. +func (c ColDecimal64) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + offset := len(b.Buf) + const size = 64 / 8 + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + s := *(*slice)(unsafe.Pointer(&v)) + s.Len *= size + s.Cap *= size + src := *(*[]byte)(unsafe.Pointer(&s)) + dst := b.Buf[offset:] + copy(dst, src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_enum.go b/vendor/github.com/ClickHouse/ch-go/proto/col_enum.go new file mode 100644 index 0000000..f4af963 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_enum.go @@ -0,0 +1,172 @@ +package proto + +import ( + "strconv" + "strings" + + "github.com/go-faster/errors" +) + +var ( + _ Column = (*ColEnum)(nil) + _ ColumnOf[string] = (*ColEnum)(nil) + _ Inferable = (*ColEnum)(nil) + _ Preparable = (*ColEnum)(nil) +) + +// ColEnum is inference helper for enums. +// +// You can set Values and actual enum mapping will be inferred during query +// execution. +type ColEnum struct { + t ColumnType + base ColumnType + + rawToStr map[int]string + strToRaw map[string]int + raw8 ColEnum8 + raw16 ColEnum16 + + // Values of ColEnum. + Values []string +} + +func (e *ColEnum) raw() Column { + if e.t.Base() == ColumnTypeEnum8 { + return &e.raw8 + } + return &e.raw16 +} + +func (e ColEnum) Row(i int) string { + return e.Values[i] +} + +// Append value to Enum8 column. +func (e *ColEnum) Append(v string) { + e.Values = append(e.Values, v) +} + +func (e *ColEnum) AppendArr(vs []string) { + e.Values = append(e.Values, vs...) +} + +func (e *ColEnum) parse(t ColumnType) error { + if e.rawToStr == nil { + e.rawToStr = map[int]string{} + } + if e.strToRaw == nil { + e.strToRaw = map[string]int{} + } + + elements := t.Elem().String() + for _, elem := range strings.Split(elements, ",") { + def := strings.TrimSpace(elem) + // 'hello' = 1 + parts := strings.SplitN(def, "=", 2) + if len(parts) != 2 { + return errors.Errorf("bad enum definition %q", def) + } + var ( + left = strings.TrimSpace(parts[0]) // 'hello' + right = strings.TrimSpace(parts[1]) // 1 + ) + idx, err := strconv.Atoi(right) + if err != nil { + return errors.Errorf("bad right side of definition %q", right) + } + left = strings.TrimFunc(left, func(c rune) bool { + return c == '\'' + }) + e.strToRaw[left] = idx + e.rawToStr[idx] = left + } + return nil +} + +func (e *ColEnum) Infer(t ColumnType) error { + if !strings.HasPrefix(t.Base().String(), "Enum") { + return errors.Errorf("invalid base %q to infer enum", t.Base()) + } + if err := e.parse(t); err != nil { + return errors.Wrap(err, "parse type") + } + base := t.Base() + switch base { + case ColumnTypeEnum8, ColumnTypeEnum16: + e.base = base + default: + return errors.Errorf("invalid base %q", base) + } + e.t = t + return nil +} + +func (e *ColEnum) Rows() int { + return len(e.Values) +} + +func appendEnum[E Enum8 | Enum16](c []E, mapping map[int]string, values []string) ([]string, error) { + for _, v := range c { + s, ok := mapping[int(v)] + if !ok { + return nil, errors.Errorf("unknown enum value %d", v) + } + values = append(values, s) + } + return values, nil +} + +func (e *ColEnum) DecodeColumn(r *Reader, rows int) error { + if err := e.raw().DecodeColumn(r, rows); err != nil { + return errors.Wrap(err, "raw") + } + var ( + err error + v []string + ) + switch e.base { + case ColumnTypeEnum8: + v, err = appendEnum[Enum8](e.raw8, e.rawToStr, e.Values[:0]) + case ColumnTypeEnum16: + v, err = appendEnum[Enum16](e.raw16, e.rawToStr, e.Values[:0]) + default: + return errors.Errorf("invalid enum base %q", e.base) + } + if err != nil { + return errors.Wrap(err, "map values") + } + e.Values = v + return nil +} + +func (e *ColEnum) Reset() { + e.raw().Reset() + e.Values = e.Values[:0] +} + +func (e *ColEnum) Prepare() error { + e.raw8 = e.raw8[:0] + e.raw16 = e.raw16[:0] + for _, v := range e.Values { + raw, ok := e.strToRaw[v] + if !ok { + return errors.Errorf("unknown enum value %q", v) + } + switch e.base { + case ColumnTypeEnum8: + e.raw8.Append(Enum8(raw)) + case ColumnTypeEnum16: + e.raw16.Append(Enum16(raw)) + default: + return errors.Errorf("invalid base %q", e.base) + } + } + return nil +} + +func (e *ColEnum) EncodeColumn(b *Buffer) { + e.raw().EncodeColumn(b) +} + +func (e *ColEnum) Type() ColumnType { return e.t } diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_enum16_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_enum16_gen.go new file mode 100644 index 0000000..3f99c64 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_enum16_gen.go @@ -0,0 +1,71 @@ +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +// ColEnum16 represents Enum16 column. +type ColEnum16 []Enum16 + +// Compile-time assertions for ColEnum16. +var ( + _ ColInput = ColEnum16{} + _ ColResult = (*ColEnum16)(nil) + _ Column = (*ColEnum16)(nil) +) + +// Rows returns count of rows in column. +func (c ColEnum16) Rows() int { + return len(c) +} + +// Reset resets data in row, preserving capacity for efficiency. +func (c *ColEnum16) Reset() { + *c = (*c)[:0] +} + +// Type returns ColumnType of Enum16. +func (ColEnum16) Type() ColumnType { + return ColumnTypeEnum16 +} + +// Row returns i-th row of column. +func (c ColEnum16) Row(i int) Enum16 { + return c[i] +} + +// Append Enum16 to column. +func (c *ColEnum16) Append(v Enum16) { + *c = append(*c, v) +} + +// Append Enum16 slice to column. +func (c *ColEnum16) AppendArr(vs []Enum16) { + *c = append(*c, vs...) +} + +// LowCardinality returns LowCardinality for Enum16 . +func (c *ColEnum16) LowCardinality() *ColLowCardinality[Enum16] { + return &ColLowCardinality[Enum16]{ + index: c, + } +} + +// Array is helper that creates Array of Enum16. +func (c *ColEnum16) Array() *ColArr[Enum16] { + return &ColArr[Enum16]{ + Data: c, + } +} + +// Nullable is helper that creates Nullable(Enum16). +func (c *ColEnum16) Nullable() *ColNullable[Enum16] { + return &ColNullable[Enum16]{ + Values: c, + } +} + +// NewArrEnum16 returns new Array(Enum16). +func NewArrEnum16() *ColArr[Enum16] { + return &ColArr[Enum16]{ + Data: new(ColEnum16), + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_enum16_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_enum16_safe_gen.go new file mode 100644 index 0000000..bf3b012 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_enum16_safe_gen.go @@ -0,0 +1,55 @@ +//go:build !(amd64 || arm64 || riscv64) || purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "encoding/binary" + + "github.com/go-faster/errors" +) + +var _ = binary.LittleEndian // clickHouse uses LittleEndian + +// DecodeColumn decodes Enum16 rows from *Reader. +func (c *ColEnum16) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + const size = 16 / 8 + data, err := r.ReadRaw(rows * size) + if err != nil { + return errors.Wrap(err, "read") + } + v := *c + // Move bound check out of loop. + // + // See https://github.com/golang/go/issues/30945. + _ = data[len(data)-size] + for i := 0; i <= len(data)-size; i += size { + v = append(v, + Enum16(binary.LittleEndian.Uint16(data[i:i+size])), + ) + } + *c = v + return nil +} + +// EncodeColumn encodes Enum16 rows to *Buffer. +func (c ColEnum16) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + const size = 16 / 8 + offset := len(b.Buf) + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + for _, vv := range v { + binary.LittleEndian.PutUint16( + b.Buf[offset:offset+size], + uint16(vv), + ) + offset += size + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_enum16_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_enum16_unsafe_gen.go new file mode 100644 index 0000000..5275701 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_enum16_unsafe_gen.go @@ -0,0 +1,45 @@ +//go:build (amd64 || arm64 || riscv64) && !purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "unsafe" + + "github.com/go-faster/errors" +) + +// DecodeColumn decodes Enum16 rows from *Reader. +func (c *ColEnum16) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + *c = append(*c, make([]Enum16, rows)...) + s := *(*slice)(unsafe.Pointer(c)) + const size = 16 / 8 + s.Len *= size + s.Cap *= size + dst := *(*[]byte)(unsafe.Pointer(&s)) + if err := r.ReadFull(dst); err != nil { + return errors.Wrap(err, "read full") + } + return nil +} + +// EncodeColumn encodes Enum16 rows to *Buffer. +func (c ColEnum16) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + offset := len(b.Buf) + const size = 16 / 8 + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + s := *(*slice)(unsafe.Pointer(&v)) + s.Len *= size + s.Cap *= size + src := *(*[]byte)(unsafe.Pointer(&s)) + dst := b.Buf[offset:] + copy(dst, src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_enum8_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_enum8_gen.go new file mode 100644 index 0000000..a063eaf --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_enum8_gen.go @@ -0,0 +1,71 @@ +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +// ColEnum8 represents Enum8 column. +type ColEnum8 []Enum8 + +// Compile-time assertions for ColEnum8. +var ( + _ ColInput = ColEnum8{} + _ ColResult = (*ColEnum8)(nil) + _ Column = (*ColEnum8)(nil) +) + +// Rows returns count of rows in column. +func (c ColEnum8) Rows() int { + return len(c) +} + +// Reset resets data in row, preserving capacity for efficiency. +func (c *ColEnum8) Reset() { + *c = (*c)[:0] +} + +// Type returns ColumnType of Enum8. +func (ColEnum8) Type() ColumnType { + return ColumnTypeEnum8 +} + +// Row returns i-th row of column. +func (c ColEnum8) Row(i int) Enum8 { + return c[i] +} + +// Append Enum8 to column. +func (c *ColEnum8) Append(v Enum8) { + *c = append(*c, v) +} + +// Append Enum8 slice to column. +func (c *ColEnum8) AppendArr(vs []Enum8) { + *c = append(*c, vs...) +} + +// LowCardinality returns LowCardinality for Enum8 . +func (c *ColEnum8) LowCardinality() *ColLowCardinality[Enum8] { + return &ColLowCardinality[Enum8]{ + index: c, + } +} + +// Array is helper that creates Array of Enum8. +func (c *ColEnum8) Array() *ColArr[Enum8] { + return &ColArr[Enum8]{ + Data: c, + } +} + +// Nullable is helper that creates Nullable(Enum8). +func (c *ColEnum8) Nullable() *ColNullable[Enum8] { + return &ColNullable[Enum8]{ + Values: c, + } +} + +// NewArrEnum8 returns new Array(Enum8). +func NewArrEnum8() *ColArr[Enum8] { + return &ColArr[Enum8]{ + Data: new(ColEnum8), + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_enum8_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_enum8_safe_gen.go new file mode 100644 index 0000000..edf5712 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_enum8_safe_gen.go @@ -0,0 +1,44 @@ +//go:build !(amd64 || arm64 || riscv64) || purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "encoding/binary" + + "github.com/go-faster/errors" +) + +var _ = binary.LittleEndian // clickHouse uses LittleEndian + +// DecodeColumn decodes Enum8 rows from *Reader. +func (c *ColEnum8) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + data, err := r.ReadRaw(rows) + if err != nil { + return errors.Wrap(err, "read") + } + v := *c + v = append(v, make([]Enum8, rows)...) + for i := range data { + v[i] = Enum8(data[i]) + } + *c = v + return nil +} + +// EncodeColumn encodes Enum8 rows to *Buffer. +func (c ColEnum8) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + start := len(b.Buf) + b.Buf = append(b.Buf, make([]byte, len(v))...) + for i := range v { + b.Buf[i+start] = uint8(v[i]) + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_enum8_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_enum8_unsafe_gen.go new file mode 100644 index 0000000..09e6fe2 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_enum8_unsafe_gen.go @@ -0,0 +1,39 @@ +//go:build (amd64 || arm64 || riscv64) && !purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "unsafe" + + "github.com/go-faster/errors" +) + +// DecodeColumn decodes Enum8 rows from *Reader. +func (c *ColEnum8) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + *c = append(*c, make([]Enum8, rows)...) + s := *(*slice)(unsafe.Pointer(c)) + dst := *(*[]byte)(unsafe.Pointer(&s)) + if err := r.ReadFull(dst); err != nil { + return errors.Wrap(err, "read full") + } + return nil +} + +// EncodeColumn encodes Enum8 rows to *Buffer. +func (c ColEnum8) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + offset := len(b.Buf) + b.Buf = append(b.Buf, make([]byte, len(v))...) + s := *(*slice)(unsafe.Pointer(&v)) + src := *(*[]byte)(unsafe.Pointer(&s)) + dst := b.Buf[offset:] + copy(dst, src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixed_str.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixed_str.go new file mode 100644 index 0000000..982cfa1 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixed_str.go @@ -0,0 +1,94 @@ +package proto + +import ( + "strconv" + + "github.com/go-faster/errors" +) + +// ColFixedStr represents FixedString(Size) column. Size is required. +// +// Can be used to store SHA256, MD5 or similar fixed size binary values. +// See https://clickhouse.com/docs/en/sql-reference/data-types/fixedstring/. +type ColFixedStr struct { + Buf []byte + Size int // N +} + +// Compile-time assertions for ColFixedStr. +var ( + _ ColInput = ColFixedStr{} + _ ColResult = (*ColFixedStr)(nil) + _ Column = (*ColFixedStr)(nil) +) + +// Type returns ColumnType of FixedString. +func (c ColFixedStr) Type() ColumnType { + return ColumnTypeFixedString.With(strconv.Itoa(c.Size)) +} + +// SetSize sets Size of FixedString(Size) to n. +// +// Can be called during decode to infer size from result. +func (c *ColFixedStr) SetSize(n int) { + c.Size = n +} + +// Rows returns count of rows in column. +func (c ColFixedStr) Rows() int { + if c.Size == 0 { + return 0 + } + return len(c.Buf) / c.Size +} + +// Row returns value of "i" row. +func (c ColFixedStr) Row(i int) []byte { + return c.Buf[i*c.Size : (i+1)*c.Size] +} + +// Reset resets data in row, preserving capacity for efficiency. +func (c *ColFixedStr) Reset() { + c.Buf = c.Buf[:0] +} + +// Append value to column. Panics if len(b) != Size. +// +// If Size is not set, will set to len of first value. +func (c *ColFixedStr) Append(b []byte) { + if c.Size == 0 { + // Automatic size set. + c.Size = len(b) + } + if len(b) != c.Size { + panic("invalid size") + } + c.Buf = append(c.Buf, b...) +} + +func (c *ColFixedStr) AppendArr(vs [][]byte) { + for _, v := range vs { + c.Append(v) + } +} + +// EncodeColumn encodes ColFixedStr rows to *Buffer. +func (c ColFixedStr) EncodeColumn(b *Buffer) { + b.Buf = append(b.Buf, c.Buf...) +} + +// DecodeColumn decodes ColFixedStr rows from *Reader. +func (c *ColFixedStr) DecodeColumn(r *Reader, rows int) error { + c.Buf = append(c.Buf[:0], make([]byte, rows*c.Size)...) + if err := r.ReadFull(c.Buf); err != nil { + return errors.Wrap(err, "read full") + } + return nil +} + +// Array returns new Array(FixedString). +func (c *ColFixedStr) Array() *ColArr[[]byte] { + return &ColArr[[]byte]{ + Data: c, + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr128_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr128_gen.go new file mode 100644 index 0000000..cb76953 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr128_gen.go @@ -0,0 +1,71 @@ +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +// ColFixedStr128 represents FixedStr128 column. +type ColFixedStr128 [][128]byte + +// Compile-time assertions for ColFixedStr128. +var ( + _ ColInput = ColFixedStr128{} + _ ColResult = (*ColFixedStr128)(nil) + _ Column = (*ColFixedStr128)(nil) +) + +// Rows returns count of rows in column. +func (c ColFixedStr128) Rows() int { + return len(c) +} + +// Reset resets data in row, preserving capacity for efficiency. +func (c *ColFixedStr128) Reset() { + *c = (*c)[:0] +} + +// Type returns ColumnType of FixedStr128. +func (ColFixedStr128) Type() ColumnType { + return ColumnTypeFixedString.With("128") +} + +// Row returns i-th row of column. +func (c ColFixedStr128) Row(i int) [128]byte { + return c[i] +} + +// Append [128]byte to column. +func (c *ColFixedStr128) Append(v [128]byte) { + *c = append(*c, v) +} + +// Append [128]byte slice to column. +func (c *ColFixedStr128) AppendArr(vs [][128]byte) { + *c = append(*c, vs...) +} + +// LowCardinality returns LowCardinality for FixedStr128 . +func (c *ColFixedStr128) LowCardinality() *ColLowCardinality[[128]byte] { + return &ColLowCardinality[[128]byte]{ + index: c, + } +} + +// Array is helper that creates Array of [128]byte. +func (c *ColFixedStr128) Array() *ColArr[[128]byte] { + return &ColArr[[128]byte]{ + Data: c, + } +} + +// Nullable is helper that creates Nullable([128]byte). +func (c *ColFixedStr128) Nullable() *ColNullable[[128]byte] { + return &ColNullable[[128]byte]{ + Values: c, + } +} + +// NewArrFixedStr128 returns new Array(FixedStr128). +func NewArrFixedStr128() *ColArr[[128]byte] { + return &ColArr[[128]byte]{ + Data: new(ColFixedStr128), + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr128_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr128_safe_gen.go new file mode 100644 index 0000000..edf7f9c --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr128_safe_gen.go @@ -0,0 +1,55 @@ +//go:build !(amd64 || arm64 || riscv64) || purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "encoding/binary" + + "github.com/go-faster/errors" +) + +var _ = binary.LittleEndian // clickHouse uses LittleEndian + +// DecodeColumn decodes FixedStr128 rows from *Reader. +func (c *ColFixedStr128) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + const size = 128 + data, err := r.ReadRaw(rows * size) + if err != nil { + return errors.Wrap(err, "read") + } + v := *c + // Move bound check out of loop. + // + // See https://github.com/golang/go/issues/30945. + _ = data[len(data)-size] + for i := 0; i <= len(data)-size; i += size { + v = append(v, + *(*[128]byte)(data[i : i+size]), + ) + } + *c = v + return nil +} + +// EncodeColumn encodes FixedStr128 rows to *Buffer. +func (c ColFixedStr128) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + const size = 128 + offset := len(b.Buf) + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + for _, vv := range v { + copy( + b.Buf[offset:offset+size], + vv[:], + ) + offset += size + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr128_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr128_unsafe_gen.go new file mode 100644 index 0000000..46ee96c --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr128_unsafe_gen.go @@ -0,0 +1,45 @@ +//go:build (amd64 || arm64 || riscv64) && !purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "unsafe" + + "github.com/go-faster/errors" +) + +// DecodeColumn decodes FixedStr128 rows from *Reader. +func (c *ColFixedStr128) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + *c = append(*c, make([][128]byte, rows)...) + s := *(*slice)(unsafe.Pointer(c)) + const size = 128 + s.Len *= size + s.Cap *= size + dst := *(*[]byte)(unsafe.Pointer(&s)) + if err := r.ReadFull(dst); err != nil { + return errors.Wrap(err, "read full") + } + return nil +} + +// EncodeColumn encodes FixedStr128 rows to *Buffer. +func (c ColFixedStr128) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + offset := len(b.Buf) + const size = 128 + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + s := *(*slice)(unsafe.Pointer(&v)) + s.Len *= size + s.Cap *= size + src := *(*[]byte)(unsafe.Pointer(&s)) + dst := b.Buf[offset:] + copy(dst, src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr16_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr16_gen.go new file mode 100644 index 0000000..adfc2de --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr16_gen.go @@ -0,0 +1,71 @@ +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +// ColFixedStr16 represents FixedStr16 column. +type ColFixedStr16 [][16]byte + +// Compile-time assertions for ColFixedStr16. +var ( + _ ColInput = ColFixedStr16{} + _ ColResult = (*ColFixedStr16)(nil) + _ Column = (*ColFixedStr16)(nil) +) + +// Rows returns count of rows in column. +func (c ColFixedStr16) Rows() int { + return len(c) +} + +// Reset resets data in row, preserving capacity for efficiency. +func (c *ColFixedStr16) Reset() { + *c = (*c)[:0] +} + +// Type returns ColumnType of FixedStr16. +func (ColFixedStr16) Type() ColumnType { + return ColumnTypeFixedString.With("16") +} + +// Row returns i-th row of column. +func (c ColFixedStr16) Row(i int) [16]byte { + return c[i] +} + +// Append [16]byte to column. +func (c *ColFixedStr16) Append(v [16]byte) { + *c = append(*c, v) +} + +// Append [16]byte slice to column. +func (c *ColFixedStr16) AppendArr(vs [][16]byte) { + *c = append(*c, vs...) +} + +// LowCardinality returns LowCardinality for FixedStr16 . +func (c *ColFixedStr16) LowCardinality() *ColLowCardinality[[16]byte] { + return &ColLowCardinality[[16]byte]{ + index: c, + } +} + +// Array is helper that creates Array of [16]byte. +func (c *ColFixedStr16) Array() *ColArr[[16]byte] { + return &ColArr[[16]byte]{ + Data: c, + } +} + +// Nullable is helper that creates Nullable([16]byte). +func (c *ColFixedStr16) Nullable() *ColNullable[[16]byte] { + return &ColNullable[[16]byte]{ + Values: c, + } +} + +// NewArrFixedStr16 returns new Array(FixedStr16). +func NewArrFixedStr16() *ColArr[[16]byte] { + return &ColArr[[16]byte]{ + Data: new(ColFixedStr16), + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr16_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr16_safe_gen.go new file mode 100644 index 0000000..4a9313a --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr16_safe_gen.go @@ -0,0 +1,55 @@ +//go:build !(amd64 || arm64 || riscv64) || purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "encoding/binary" + + "github.com/go-faster/errors" +) + +var _ = binary.LittleEndian // clickHouse uses LittleEndian + +// DecodeColumn decodes FixedStr16 rows from *Reader. +func (c *ColFixedStr16) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + const size = 16 + data, err := r.ReadRaw(rows * size) + if err != nil { + return errors.Wrap(err, "read") + } + v := *c + // Move bound check out of loop. + // + // See https://github.com/golang/go/issues/30945. + _ = data[len(data)-size] + for i := 0; i <= len(data)-size; i += size { + v = append(v, + *(*[16]byte)(data[i : i+size]), + ) + } + *c = v + return nil +} + +// EncodeColumn encodes FixedStr16 rows to *Buffer. +func (c ColFixedStr16) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + const size = 16 + offset := len(b.Buf) + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + for _, vv := range v { + copy( + b.Buf[offset:offset+size], + vv[:], + ) + offset += size + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr16_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr16_unsafe_gen.go new file mode 100644 index 0000000..5d0dbee --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr16_unsafe_gen.go @@ -0,0 +1,45 @@ +//go:build (amd64 || arm64 || riscv64) && !purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "unsafe" + + "github.com/go-faster/errors" +) + +// DecodeColumn decodes FixedStr16 rows from *Reader. +func (c *ColFixedStr16) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + *c = append(*c, make([][16]byte, rows)...) + s := *(*slice)(unsafe.Pointer(c)) + const size = 16 + s.Len *= size + s.Cap *= size + dst := *(*[]byte)(unsafe.Pointer(&s)) + if err := r.ReadFull(dst); err != nil { + return errors.Wrap(err, "read full") + } + return nil +} + +// EncodeColumn encodes FixedStr16 rows to *Buffer. +func (c ColFixedStr16) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + offset := len(b.Buf) + const size = 16 + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + s := *(*slice)(unsafe.Pointer(&v)) + s.Len *= size + s.Cap *= size + src := *(*[]byte)(unsafe.Pointer(&s)) + dst := b.Buf[offset:] + copy(dst, src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr256_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr256_gen.go new file mode 100644 index 0000000..1e2d955 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr256_gen.go @@ -0,0 +1,71 @@ +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +// ColFixedStr256 represents FixedStr256 column. +type ColFixedStr256 [][256]byte + +// Compile-time assertions for ColFixedStr256. +var ( + _ ColInput = ColFixedStr256{} + _ ColResult = (*ColFixedStr256)(nil) + _ Column = (*ColFixedStr256)(nil) +) + +// Rows returns count of rows in column. +func (c ColFixedStr256) Rows() int { + return len(c) +} + +// Reset resets data in row, preserving capacity for efficiency. +func (c *ColFixedStr256) Reset() { + *c = (*c)[:0] +} + +// Type returns ColumnType of FixedStr256. +func (ColFixedStr256) Type() ColumnType { + return ColumnTypeFixedString.With("256") +} + +// Row returns i-th row of column. +func (c ColFixedStr256) Row(i int) [256]byte { + return c[i] +} + +// Append [256]byte to column. +func (c *ColFixedStr256) Append(v [256]byte) { + *c = append(*c, v) +} + +// Append [256]byte slice to column. +func (c *ColFixedStr256) AppendArr(vs [][256]byte) { + *c = append(*c, vs...) +} + +// LowCardinality returns LowCardinality for FixedStr256 . +func (c *ColFixedStr256) LowCardinality() *ColLowCardinality[[256]byte] { + return &ColLowCardinality[[256]byte]{ + index: c, + } +} + +// Array is helper that creates Array of [256]byte. +func (c *ColFixedStr256) Array() *ColArr[[256]byte] { + return &ColArr[[256]byte]{ + Data: c, + } +} + +// Nullable is helper that creates Nullable([256]byte). +func (c *ColFixedStr256) Nullable() *ColNullable[[256]byte] { + return &ColNullable[[256]byte]{ + Values: c, + } +} + +// NewArrFixedStr256 returns new Array(FixedStr256). +func NewArrFixedStr256() *ColArr[[256]byte] { + return &ColArr[[256]byte]{ + Data: new(ColFixedStr256), + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr256_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr256_safe_gen.go new file mode 100644 index 0000000..bb961f8 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr256_safe_gen.go @@ -0,0 +1,55 @@ +//go:build !(amd64 || arm64 || riscv64) || purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "encoding/binary" + + "github.com/go-faster/errors" +) + +var _ = binary.LittleEndian // clickHouse uses LittleEndian + +// DecodeColumn decodes FixedStr256 rows from *Reader. +func (c *ColFixedStr256) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + const size = 256 + data, err := r.ReadRaw(rows * size) + if err != nil { + return errors.Wrap(err, "read") + } + v := *c + // Move bound check out of loop. + // + // See https://github.com/golang/go/issues/30945. + _ = data[len(data)-size] + for i := 0; i <= len(data)-size; i += size { + v = append(v, + *(*[256]byte)(data[i : i+size]), + ) + } + *c = v + return nil +} + +// EncodeColumn encodes FixedStr256 rows to *Buffer. +func (c ColFixedStr256) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + const size = 256 + offset := len(b.Buf) + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + for _, vv := range v { + copy( + b.Buf[offset:offset+size], + vv[:], + ) + offset += size + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr256_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr256_unsafe_gen.go new file mode 100644 index 0000000..277ac59 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr256_unsafe_gen.go @@ -0,0 +1,45 @@ +//go:build (amd64 || arm64 || riscv64) && !purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "unsafe" + + "github.com/go-faster/errors" +) + +// DecodeColumn decodes FixedStr256 rows from *Reader. +func (c *ColFixedStr256) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + *c = append(*c, make([][256]byte, rows)...) + s := *(*slice)(unsafe.Pointer(c)) + const size = 256 + s.Len *= size + s.Cap *= size + dst := *(*[]byte)(unsafe.Pointer(&s)) + if err := r.ReadFull(dst); err != nil { + return errors.Wrap(err, "read full") + } + return nil +} + +// EncodeColumn encodes FixedStr256 rows to *Buffer. +func (c ColFixedStr256) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + offset := len(b.Buf) + const size = 256 + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + s := *(*slice)(unsafe.Pointer(&v)) + s.Len *= size + s.Cap *= size + src := *(*[]byte)(unsafe.Pointer(&s)) + dst := b.Buf[offset:] + copy(dst, src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr32_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr32_gen.go new file mode 100644 index 0000000..90adba9 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr32_gen.go @@ -0,0 +1,71 @@ +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +// ColFixedStr32 represents FixedStr32 column. +type ColFixedStr32 [][32]byte + +// Compile-time assertions for ColFixedStr32. +var ( + _ ColInput = ColFixedStr32{} + _ ColResult = (*ColFixedStr32)(nil) + _ Column = (*ColFixedStr32)(nil) +) + +// Rows returns count of rows in column. +func (c ColFixedStr32) Rows() int { + return len(c) +} + +// Reset resets data in row, preserving capacity for efficiency. +func (c *ColFixedStr32) Reset() { + *c = (*c)[:0] +} + +// Type returns ColumnType of FixedStr32. +func (ColFixedStr32) Type() ColumnType { + return ColumnTypeFixedString.With("32") +} + +// Row returns i-th row of column. +func (c ColFixedStr32) Row(i int) [32]byte { + return c[i] +} + +// Append [32]byte to column. +func (c *ColFixedStr32) Append(v [32]byte) { + *c = append(*c, v) +} + +// Append [32]byte slice to column. +func (c *ColFixedStr32) AppendArr(vs [][32]byte) { + *c = append(*c, vs...) +} + +// LowCardinality returns LowCardinality for FixedStr32 . +func (c *ColFixedStr32) LowCardinality() *ColLowCardinality[[32]byte] { + return &ColLowCardinality[[32]byte]{ + index: c, + } +} + +// Array is helper that creates Array of [32]byte. +func (c *ColFixedStr32) Array() *ColArr[[32]byte] { + return &ColArr[[32]byte]{ + Data: c, + } +} + +// Nullable is helper that creates Nullable([32]byte). +func (c *ColFixedStr32) Nullable() *ColNullable[[32]byte] { + return &ColNullable[[32]byte]{ + Values: c, + } +} + +// NewArrFixedStr32 returns new Array(FixedStr32). +func NewArrFixedStr32() *ColArr[[32]byte] { + return &ColArr[[32]byte]{ + Data: new(ColFixedStr32), + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr32_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr32_safe_gen.go new file mode 100644 index 0000000..cdaf62d --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr32_safe_gen.go @@ -0,0 +1,55 @@ +//go:build !(amd64 || arm64 || riscv64) || purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "encoding/binary" + + "github.com/go-faster/errors" +) + +var _ = binary.LittleEndian // clickHouse uses LittleEndian + +// DecodeColumn decodes FixedStr32 rows from *Reader. +func (c *ColFixedStr32) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + const size = 32 + data, err := r.ReadRaw(rows * size) + if err != nil { + return errors.Wrap(err, "read") + } + v := *c + // Move bound check out of loop. + // + // See https://github.com/golang/go/issues/30945. + _ = data[len(data)-size] + for i := 0; i <= len(data)-size; i += size { + v = append(v, + *(*[32]byte)(data[i : i+size]), + ) + } + *c = v + return nil +} + +// EncodeColumn encodes FixedStr32 rows to *Buffer. +func (c ColFixedStr32) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + const size = 32 + offset := len(b.Buf) + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + for _, vv := range v { + copy( + b.Buf[offset:offset+size], + vv[:], + ) + offset += size + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr32_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr32_unsafe_gen.go new file mode 100644 index 0000000..3777e5e --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr32_unsafe_gen.go @@ -0,0 +1,45 @@ +//go:build (amd64 || arm64 || riscv64) && !purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "unsafe" + + "github.com/go-faster/errors" +) + +// DecodeColumn decodes FixedStr32 rows from *Reader. +func (c *ColFixedStr32) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + *c = append(*c, make([][32]byte, rows)...) + s := *(*slice)(unsafe.Pointer(c)) + const size = 32 + s.Len *= size + s.Cap *= size + dst := *(*[]byte)(unsafe.Pointer(&s)) + if err := r.ReadFull(dst); err != nil { + return errors.Wrap(err, "read full") + } + return nil +} + +// EncodeColumn encodes FixedStr32 rows to *Buffer. +func (c ColFixedStr32) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + offset := len(b.Buf) + const size = 32 + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + s := *(*slice)(unsafe.Pointer(&v)) + s.Len *= size + s.Cap *= size + src := *(*[]byte)(unsafe.Pointer(&s)) + dst := b.Buf[offset:] + copy(dst, src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr512_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr512_gen.go new file mode 100644 index 0000000..09837fa --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr512_gen.go @@ -0,0 +1,71 @@ +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +// ColFixedStr512 represents FixedStr512 column. +type ColFixedStr512 [][512]byte + +// Compile-time assertions for ColFixedStr512. +var ( + _ ColInput = ColFixedStr512{} + _ ColResult = (*ColFixedStr512)(nil) + _ Column = (*ColFixedStr512)(nil) +) + +// Rows returns count of rows in column. +func (c ColFixedStr512) Rows() int { + return len(c) +} + +// Reset resets data in row, preserving capacity for efficiency. +func (c *ColFixedStr512) Reset() { + *c = (*c)[:0] +} + +// Type returns ColumnType of FixedStr512. +func (ColFixedStr512) Type() ColumnType { + return ColumnTypeFixedString.With("512") +} + +// Row returns i-th row of column. +func (c ColFixedStr512) Row(i int) [512]byte { + return c[i] +} + +// Append [512]byte to column. +func (c *ColFixedStr512) Append(v [512]byte) { + *c = append(*c, v) +} + +// Append [512]byte slice to column. +func (c *ColFixedStr512) AppendArr(vs [][512]byte) { + *c = append(*c, vs...) +} + +// LowCardinality returns LowCardinality for FixedStr512 . +func (c *ColFixedStr512) LowCardinality() *ColLowCardinality[[512]byte] { + return &ColLowCardinality[[512]byte]{ + index: c, + } +} + +// Array is helper that creates Array of [512]byte. +func (c *ColFixedStr512) Array() *ColArr[[512]byte] { + return &ColArr[[512]byte]{ + Data: c, + } +} + +// Nullable is helper that creates Nullable([512]byte). +func (c *ColFixedStr512) Nullable() *ColNullable[[512]byte] { + return &ColNullable[[512]byte]{ + Values: c, + } +} + +// NewArrFixedStr512 returns new Array(FixedStr512). +func NewArrFixedStr512() *ColArr[[512]byte] { + return &ColArr[[512]byte]{ + Data: new(ColFixedStr512), + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr512_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr512_safe_gen.go new file mode 100644 index 0000000..aa8ea31 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr512_safe_gen.go @@ -0,0 +1,55 @@ +//go:build !(amd64 || arm64 || riscv64) || purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "encoding/binary" + + "github.com/go-faster/errors" +) + +var _ = binary.LittleEndian // clickHouse uses LittleEndian + +// DecodeColumn decodes FixedStr512 rows from *Reader. +func (c *ColFixedStr512) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + const size = 512 + data, err := r.ReadRaw(rows * size) + if err != nil { + return errors.Wrap(err, "read") + } + v := *c + // Move bound check out of loop. + // + // See https://github.com/golang/go/issues/30945. + _ = data[len(data)-size] + for i := 0; i <= len(data)-size; i += size { + v = append(v, + *(*[512]byte)(data[i : i+size]), + ) + } + *c = v + return nil +} + +// EncodeColumn encodes FixedStr512 rows to *Buffer. +func (c ColFixedStr512) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + const size = 512 + offset := len(b.Buf) + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + for _, vv := range v { + copy( + b.Buf[offset:offset+size], + vv[:], + ) + offset += size + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr512_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr512_unsafe_gen.go new file mode 100644 index 0000000..970ca0f --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr512_unsafe_gen.go @@ -0,0 +1,45 @@ +//go:build (amd64 || arm64 || riscv64) && !purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "unsafe" + + "github.com/go-faster/errors" +) + +// DecodeColumn decodes FixedStr512 rows from *Reader. +func (c *ColFixedStr512) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + *c = append(*c, make([][512]byte, rows)...) + s := *(*slice)(unsafe.Pointer(c)) + const size = 512 + s.Len *= size + s.Cap *= size + dst := *(*[]byte)(unsafe.Pointer(&s)) + if err := r.ReadFull(dst); err != nil { + return errors.Wrap(err, "read full") + } + return nil +} + +// EncodeColumn encodes FixedStr512 rows to *Buffer. +func (c ColFixedStr512) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + offset := len(b.Buf) + const size = 512 + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + s := *(*slice)(unsafe.Pointer(&v)) + s.Len *= size + s.Cap *= size + src := *(*[]byte)(unsafe.Pointer(&s)) + dst := b.Buf[offset:] + copy(dst, src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr64_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr64_gen.go new file mode 100644 index 0000000..38849cc --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr64_gen.go @@ -0,0 +1,71 @@ +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +// ColFixedStr64 represents FixedStr64 column. +type ColFixedStr64 [][64]byte + +// Compile-time assertions for ColFixedStr64. +var ( + _ ColInput = ColFixedStr64{} + _ ColResult = (*ColFixedStr64)(nil) + _ Column = (*ColFixedStr64)(nil) +) + +// Rows returns count of rows in column. +func (c ColFixedStr64) Rows() int { + return len(c) +} + +// Reset resets data in row, preserving capacity for efficiency. +func (c *ColFixedStr64) Reset() { + *c = (*c)[:0] +} + +// Type returns ColumnType of FixedStr64. +func (ColFixedStr64) Type() ColumnType { + return ColumnTypeFixedString.With("64") +} + +// Row returns i-th row of column. +func (c ColFixedStr64) Row(i int) [64]byte { + return c[i] +} + +// Append [64]byte to column. +func (c *ColFixedStr64) Append(v [64]byte) { + *c = append(*c, v) +} + +// Append [64]byte slice to column. +func (c *ColFixedStr64) AppendArr(vs [][64]byte) { + *c = append(*c, vs...) +} + +// LowCardinality returns LowCardinality for FixedStr64 . +func (c *ColFixedStr64) LowCardinality() *ColLowCardinality[[64]byte] { + return &ColLowCardinality[[64]byte]{ + index: c, + } +} + +// Array is helper that creates Array of [64]byte. +func (c *ColFixedStr64) Array() *ColArr[[64]byte] { + return &ColArr[[64]byte]{ + Data: c, + } +} + +// Nullable is helper that creates Nullable([64]byte). +func (c *ColFixedStr64) Nullable() *ColNullable[[64]byte] { + return &ColNullable[[64]byte]{ + Values: c, + } +} + +// NewArrFixedStr64 returns new Array(FixedStr64). +func NewArrFixedStr64() *ColArr[[64]byte] { + return &ColArr[[64]byte]{ + Data: new(ColFixedStr64), + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr64_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr64_safe_gen.go new file mode 100644 index 0000000..89c1f24 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr64_safe_gen.go @@ -0,0 +1,55 @@ +//go:build !(amd64 || arm64 || riscv64) || purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "encoding/binary" + + "github.com/go-faster/errors" +) + +var _ = binary.LittleEndian // clickHouse uses LittleEndian + +// DecodeColumn decodes FixedStr64 rows from *Reader. +func (c *ColFixedStr64) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + const size = 64 + data, err := r.ReadRaw(rows * size) + if err != nil { + return errors.Wrap(err, "read") + } + v := *c + // Move bound check out of loop. + // + // See https://github.com/golang/go/issues/30945. + _ = data[len(data)-size] + for i := 0; i <= len(data)-size; i += size { + v = append(v, + *(*[64]byte)(data[i : i+size]), + ) + } + *c = v + return nil +} + +// EncodeColumn encodes FixedStr64 rows to *Buffer. +func (c ColFixedStr64) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + const size = 64 + offset := len(b.Buf) + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + for _, vv := range v { + copy( + b.Buf[offset:offset+size], + vv[:], + ) + offset += size + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr64_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr64_unsafe_gen.go new file mode 100644 index 0000000..62ec09e --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr64_unsafe_gen.go @@ -0,0 +1,45 @@ +//go:build (amd64 || arm64 || riscv64) && !purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "unsafe" + + "github.com/go-faster/errors" +) + +// DecodeColumn decodes FixedStr64 rows from *Reader. +func (c *ColFixedStr64) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + *c = append(*c, make([][64]byte, rows)...) + s := *(*slice)(unsafe.Pointer(c)) + const size = 64 + s.Len *= size + s.Cap *= size + dst := *(*[]byte)(unsafe.Pointer(&s)) + if err := r.ReadFull(dst); err != nil { + return errors.Wrap(err, "read full") + } + return nil +} + +// EncodeColumn encodes FixedStr64 rows to *Buffer. +func (c ColFixedStr64) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + offset := len(b.Buf) + const size = 64 + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + s := *(*slice)(unsafe.Pointer(&v)) + s.Len *= size + s.Cap *= size + src := *(*[]byte)(unsafe.Pointer(&s)) + dst := b.Buf[offset:] + copy(dst, src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr8_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr8_gen.go new file mode 100644 index 0000000..a58723e --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr8_gen.go @@ -0,0 +1,71 @@ +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +// ColFixedStr8 represents FixedStr8 column. +type ColFixedStr8 [][8]byte + +// Compile-time assertions for ColFixedStr8. +var ( + _ ColInput = ColFixedStr8{} + _ ColResult = (*ColFixedStr8)(nil) + _ Column = (*ColFixedStr8)(nil) +) + +// Rows returns count of rows in column. +func (c ColFixedStr8) Rows() int { + return len(c) +} + +// Reset resets data in row, preserving capacity for efficiency. +func (c *ColFixedStr8) Reset() { + *c = (*c)[:0] +} + +// Type returns ColumnType of FixedStr8. +func (ColFixedStr8) Type() ColumnType { + return ColumnTypeFixedString.With("8") +} + +// Row returns i-th row of column. +func (c ColFixedStr8) Row(i int) [8]byte { + return c[i] +} + +// Append [8]byte to column. +func (c *ColFixedStr8) Append(v [8]byte) { + *c = append(*c, v) +} + +// Append [8]byte slice to column. +func (c *ColFixedStr8) AppendArr(vs [][8]byte) { + *c = append(*c, vs...) +} + +// LowCardinality returns LowCardinality for FixedStr8 . +func (c *ColFixedStr8) LowCardinality() *ColLowCardinality[[8]byte] { + return &ColLowCardinality[[8]byte]{ + index: c, + } +} + +// Array is helper that creates Array of [8]byte. +func (c *ColFixedStr8) Array() *ColArr[[8]byte] { + return &ColArr[[8]byte]{ + Data: c, + } +} + +// Nullable is helper that creates Nullable([8]byte). +func (c *ColFixedStr8) Nullable() *ColNullable[[8]byte] { + return &ColNullable[[8]byte]{ + Values: c, + } +} + +// NewArrFixedStr8 returns new Array(FixedStr8). +func NewArrFixedStr8() *ColArr[[8]byte] { + return &ColArr[[8]byte]{ + Data: new(ColFixedStr8), + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr8_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr8_safe_gen.go new file mode 100644 index 0000000..086ea6f --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr8_safe_gen.go @@ -0,0 +1,55 @@ +//go:build !(amd64 || arm64 || riscv64) || purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "encoding/binary" + + "github.com/go-faster/errors" +) + +var _ = binary.LittleEndian // clickHouse uses LittleEndian + +// DecodeColumn decodes FixedStr8 rows from *Reader. +func (c *ColFixedStr8) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + const size = 8 + data, err := r.ReadRaw(rows * size) + if err != nil { + return errors.Wrap(err, "read") + } + v := *c + // Move bound check out of loop. + // + // See https://github.com/golang/go/issues/30945. + _ = data[len(data)-size] + for i := 0; i <= len(data)-size; i += size { + v = append(v, + *(*[8]byte)(data[i : i+size]), + ) + } + *c = v + return nil +} + +// EncodeColumn encodes FixedStr8 rows to *Buffer. +func (c ColFixedStr8) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + const size = 8 + offset := len(b.Buf) + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + for _, vv := range v { + copy( + b.Buf[offset:offset+size], + vv[:], + ) + offset += size + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr8_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr8_unsafe_gen.go new file mode 100644 index 0000000..9991c06 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr8_unsafe_gen.go @@ -0,0 +1,45 @@ +//go:build (amd64 || arm64 || riscv64) && !purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "unsafe" + + "github.com/go-faster/errors" +) + +// DecodeColumn decodes FixedStr8 rows from *Reader. +func (c *ColFixedStr8) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + *c = append(*c, make([][8]byte, rows)...) + s := *(*slice)(unsafe.Pointer(c)) + const size = 8 + s.Len *= size + s.Cap *= size + dst := *(*[]byte)(unsafe.Pointer(&s)) + if err := r.ReadFull(dst); err != nil { + return errors.Wrap(err, "read full") + } + return nil +} + +// EncodeColumn encodes FixedStr8 rows to *Buffer. +func (c ColFixedStr8) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + offset := len(b.Buf) + const size = 8 + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + s := *(*slice)(unsafe.Pointer(&v)) + s.Len *= size + s.Cap *= size + src := *(*[]byte)(unsafe.Pointer(&s)) + dst := b.Buf[offset:] + copy(dst, src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_float32_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_float32_gen.go new file mode 100644 index 0000000..7031f11 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_float32_gen.go @@ -0,0 +1,71 @@ +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +// ColFloat32 represents Float32 column. +type ColFloat32 []float32 + +// Compile-time assertions for ColFloat32. +var ( + _ ColInput = ColFloat32{} + _ ColResult = (*ColFloat32)(nil) + _ Column = (*ColFloat32)(nil) +) + +// Rows returns count of rows in column. +func (c ColFloat32) Rows() int { + return len(c) +} + +// Reset resets data in row, preserving capacity for efficiency. +func (c *ColFloat32) Reset() { + *c = (*c)[:0] +} + +// Type returns ColumnType of Float32. +func (ColFloat32) Type() ColumnType { + return ColumnTypeFloat32 +} + +// Row returns i-th row of column. +func (c ColFloat32) Row(i int) float32 { + return c[i] +} + +// Append float32 to column. +func (c *ColFloat32) Append(v float32) { + *c = append(*c, v) +} + +// Append float32 slice to column. +func (c *ColFloat32) AppendArr(vs []float32) { + *c = append(*c, vs...) +} + +// LowCardinality returns LowCardinality for Float32 . +func (c *ColFloat32) LowCardinality() *ColLowCardinality[float32] { + return &ColLowCardinality[float32]{ + index: c, + } +} + +// Array is helper that creates Array of float32. +func (c *ColFloat32) Array() *ColArr[float32] { + return &ColArr[float32]{ + Data: c, + } +} + +// Nullable is helper that creates Nullable(float32). +func (c *ColFloat32) Nullable() *ColNullable[float32] { + return &ColNullable[float32]{ + Values: c, + } +} + +// NewArrFloat32 returns new Array(Float32). +func NewArrFloat32() *ColArr[float32] { + return &ColArr[float32]{ + Data: new(ColFloat32), + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_float32_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_float32_safe_gen.go new file mode 100644 index 0000000..f400aef --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_float32_safe_gen.go @@ -0,0 +1,56 @@ +//go:build !(amd64 || arm64 || riscv64) || purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "encoding/binary" + "math" + + "github.com/go-faster/errors" +) + +var _ = binary.LittleEndian // clickHouse uses LittleEndian + +// DecodeColumn decodes Float32 rows from *Reader. +func (c *ColFloat32) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + const size = 32 / 8 + data, err := r.ReadRaw(rows * size) + if err != nil { + return errors.Wrap(err, "read") + } + v := *c + // Move bound check out of loop. + // + // See https://github.com/golang/go/issues/30945. + _ = data[len(data)-size] + for i := 0; i <= len(data)-size; i += size { + v = append(v, + math.Float32frombits(binary.LittleEndian.Uint32(data[i:i+size])), + ) + } + *c = v + return nil +} + +// EncodeColumn encodes Float32 rows to *Buffer. +func (c ColFloat32) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + const size = 32 / 8 + offset := len(b.Buf) + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + for _, vv := range v { + binary.LittleEndian.PutUint32( + b.Buf[offset:offset+size], + math.Float32bits(vv), + ) + offset += size + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_float32_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_float32_unsafe_gen.go new file mode 100644 index 0000000..2ded35f --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_float32_unsafe_gen.go @@ -0,0 +1,45 @@ +//go:build (amd64 || arm64 || riscv64) && !purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "unsafe" + + "github.com/go-faster/errors" +) + +// DecodeColumn decodes Float32 rows from *Reader. +func (c *ColFloat32) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + *c = append(*c, make([]float32, rows)...) + s := *(*slice)(unsafe.Pointer(c)) + const size = 32 / 8 + s.Len *= size + s.Cap *= size + dst := *(*[]byte)(unsafe.Pointer(&s)) + if err := r.ReadFull(dst); err != nil { + return errors.Wrap(err, "read full") + } + return nil +} + +// EncodeColumn encodes Float32 rows to *Buffer. +func (c ColFloat32) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + offset := len(b.Buf) + const size = 32 / 8 + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + s := *(*slice)(unsafe.Pointer(&v)) + s.Len *= size + s.Cap *= size + src := *(*[]byte)(unsafe.Pointer(&s)) + dst := b.Buf[offset:] + copy(dst, src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_float64_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_float64_gen.go new file mode 100644 index 0000000..c210eb8 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_float64_gen.go @@ -0,0 +1,71 @@ +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +// ColFloat64 represents Float64 column. +type ColFloat64 []float64 + +// Compile-time assertions for ColFloat64. +var ( + _ ColInput = ColFloat64{} + _ ColResult = (*ColFloat64)(nil) + _ Column = (*ColFloat64)(nil) +) + +// Rows returns count of rows in column. +func (c ColFloat64) Rows() int { + return len(c) +} + +// Reset resets data in row, preserving capacity for efficiency. +func (c *ColFloat64) Reset() { + *c = (*c)[:0] +} + +// Type returns ColumnType of Float64. +func (ColFloat64) Type() ColumnType { + return ColumnTypeFloat64 +} + +// Row returns i-th row of column. +func (c ColFloat64) Row(i int) float64 { + return c[i] +} + +// Append float64 to column. +func (c *ColFloat64) Append(v float64) { + *c = append(*c, v) +} + +// Append float64 slice to column. +func (c *ColFloat64) AppendArr(vs []float64) { + *c = append(*c, vs...) +} + +// LowCardinality returns LowCardinality for Float64 . +func (c *ColFloat64) LowCardinality() *ColLowCardinality[float64] { + return &ColLowCardinality[float64]{ + index: c, + } +} + +// Array is helper that creates Array of float64. +func (c *ColFloat64) Array() *ColArr[float64] { + return &ColArr[float64]{ + Data: c, + } +} + +// Nullable is helper that creates Nullable(float64). +func (c *ColFloat64) Nullable() *ColNullable[float64] { + return &ColNullable[float64]{ + Values: c, + } +} + +// NewArrFloat64 returns new Array(Float64). +func NewArrFloat64() *ColArr[float64] { + return &ColArr[float64]{ + Data: new(ColFloat64), + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_float64_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_float64_safe_gen.go new file mode 100644 index 0000000..68281ae --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_float64_safe_gen.go @@ -0,0 +1,56 @@ +//go:build !(amd64 || arm64 || riscv64) || purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "encoding/binary" + "math" + + "github.com/go-faster/errors" +) + +var _ = binary.LittleEndian // clickHouse uses LittleEndian + +// DecodeColumn decodes Float64 rows from *Reader. +func (c *ColFloat64) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + const size = 64 / 8 + data, err := r.ReadRaw(rows * size) + if err != nil { + return errors.Wrap(err, "read") + } + v := *c + // Move bound check out of loop. + // + // See https://github.com/golang/go/issues/30945. + _ = data[len(data)-size] + for i := 0; i <= len(data)-size; i += size { + v = append(v, + math.Float64frombits(binary.LittleEndian.Uint64(data[i:i+size])), + ) + } + *c = v + return nil +} + +// EncodeColumn encodes Float64 rows to *Buffer. +func (c ColFloat64) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + const size = 64 / 8 + offset := len(b.Buf) + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + for _, vv := range v { + binary.LittleEndian.PutUint64( + b.Buf[offset:offset+size], + math.Float64bits(vv), + ) + offset += size + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_float64_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_float64_unsafe_gen.go new file mode 100644 index 0000000..f16fd39 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_float64_unsafe_gen.go @@ -0,0 +1,45 @@ +//go:build (amd64 || arm64 || riscv64) && !purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "unsafe" + + "github.com/go-faster/errors" +) + +// DecodeColumn decodes Float64 rows from *Reader. +func (c *ColFloat64) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + *c = append(*c, make([]float64, rows)...) + s := *(*slice)(unsafe.Pointer(c)) + const size = 64 / 8 + s.Len *= size + s.Cap *= size + dst := *(*[]byte)(unsafe.Pointer(&s)) + if err := r.ReadFull(dst); err != nil { + return errors.Wrap(err, "read full") + } + return nil +} + +// EncodeColumn encodes Float64 rows to *Buffer. +func (c ColFloat64) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + offset := len(b.Buf) + const size = 64 / 8 + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + s := *(*slice)(unsafe.Pointer(&v)) + s.Len *= size + s.Cap *= size + src := *(*[]byte)(unsafe.Pointer(&s)) + dst := b.Buf[offset:] + copy(dst, src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int128_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int128_gen.go new file mode 100644 index 0000000..5e982c4 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int128_gen.go @@ -0,0 +1,71 @@ +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +// ColInt128 represents Int128 column. +type ColInt128 []Int128 + +// Compile-time assertions for ColInt128. +var ( + _ ColInput = ColInt128{} + _ ColResult = (*ColInt128)(nil) + _ Column = (*ColInt128)(nil) +) + +// Rows returns count of rows in column. +func (c ColInt128) Rows() int { + return len(c) +} + +// Reset resets data in row, preserving capacity for efficiency. +func (c *ColInt128) Reset() { + *c = (*c)[:0] +} + +// Type returns ColumnType of Int128. +func (ColInt128) Type() ColumnType { + return ColumnTypeInt128 +} + +// Row returns i-th row of column. +func (c ColInt128) Row(i int) Int128 { + return c[i] +} + +// Append Int128 to column. +func (c *ColInt128) Append(v Int128) { + *c = append(*c, v) +} + +// Append Int128 slice to column. +func (c *ColInt128) AppendArr(vs []Int128) { + *c = append(*c, vs...) +} + +// LowCardinality returns LowCardinality for Int128 . +func (c *ColInt128) LowCardinality() *ColLowCardinality[Int128] { + return &ColLowCardinality[Int128]{ + index: c, + } +} + +// Array is helper that creates Array of Int128. +func (c *ColInt128) Array() *ColArr[Int128] { + return &ColArr[Int128]{ + Data: c, + } +} + +// Nullable is helper that creates Nullable(Int128). +func (c *ColInt128) Nullable() *ColNullable[Int128] { + return &ColNullable[Int128]{ + Values: c, + } +} + +// NewArrInt128 returns new Array(Int128). +func NewArrInt128() *ColArr[Int128] { + return &ColArr[Int128]{ + Data: new(ColInt128), + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int128_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int128_safe_gen.go new file mode 100644 index 0000000..5902d3f --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int128_safe_gen.go @@ -0,0 +1,55 @@ +//go:build !(amd64 || arm64 || riscv64) || purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "encoding/binary" + + "github.com/go-faster/errors" +) + +var _ = binary.LittleEndian // clickHouse uses LittleEndian + +// DecodeColumn decodes Int128 rows from *Reader. +func (c *ColInt128) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + const size = 128 / 8 + data, err := r.ReadRaw(rows * size) + if err != nil { + return errors.Wrap(err, "read") + } + v := *c + // Move bound check out of loop. + // + // See https://github.com/golang/go/issues/30945. + _ = data[len(data)-size] + for i := 0; i <= len(data)-size; i += size { + v = append(v, + Int128(binUInt128(data[i:i+size])), + ) + } + *c = v + return nil +} + +// EncodeColumn encodes Int128 rows to *Buffer. +func (c ColInt128) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + const size = 128 / 8 + offset := len(b.Buf) + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + for _, vv := range v { + binPutUInt128( + b.Buf[offset:offset+size], + UInt128(vv), + ) + offset += size + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int128_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int128_unsafe_gen.go new file mode 100644 index 0000000..c5862ff --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int128_unsafe_gen.go @@ -0,0 +1,45 @@ +//go:build (amd64 || arm64 || riscv64) && !purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "unsafe" + + "github.com/go-faster/errors" +) + +// DecodeColumn decodes Int128 rows from *Reader. +func (c *ColInt128) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + *c = append(*c, make([]Int128, rows)...) + s := *(*slice)(unsafe.Pointer(c)) + const size = 128 / 8 + s.Len *= size + s.Cap *= size + dst := *(*[]byte)(unsafe.Pointer(&s)) + if err := r.ReadFull(dst); err != nil { + return errors.Wrap(err, "read full") + } + return nil +} + +// EncodeColumn encodes Int128 rows to *Buffer. +func (c ColInt128) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + offset := len(b.Buf) + const size = 128 / 8 + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + s := *(*slice)(unsafe.Pointer(&v)) + s.Len *= size + s.Cap *= size + src := *(*[]byte)(unsafe.Pointer(&s)) + dst := b.Buf[offset:] + copy(dst, src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int16_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int16_gen.go new file mode 100644 index 0000000..212801d --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int16_gen.go @@ -0,0 +1,71 @@ +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +// ColInt16 represents Int16 column. +type ColInt16 []int16 + +// Compile-time assertions for ColInt16. +var ( + _ ColInput = ColInt16{} + _ ColResult = (*ColInt16)(nil) + _ Column = (*ColInt16)(nil) +) + +// Rows returns count of rows in column. +func (c ColInt16) Rows() int { + return len(c) +} + +// Reset resets data in row, preserving capacity for efficiency. +func (c *ColInt16) Reset() { + *c = (*c)[:0] +} + +// Type returns ColumnType of Int16. +func (ColInt16) Type() ColumnType { + return ColumnTypeInt16 +} + +// Row returns i-th row of column. +func (c ColInt16) Row(i int) int16 { + return c[i] +} + +// Append int16 to column. +func (c *ColInt16) Append(v int16) { + *c = append(*c, v) +} + +// Append int16 slice to column. +func (c *ColInt16) AppendArr(vs []int16) { + *c = append(*c, vs...) +} + +// LowCardinality returns LowCardinality for Int16 . +func (c *ColInt16) LowCardinality() *ColLowCardinality[int16] { + return &ColLowCardinality[int16]{ + index: c, + } +} + +// Array is helper that creates Array of int16. +func (c *ColInt16) Array() *ColArr[int16] { + return &ColArr[int16]{ + Data: c, + } +} + +// Nullable is helper that creates Nullable(int16). +func (c *ColInt16) Nullable() *ColNullable[int16] { + return &ColNullable[int16]{ + Values: c, + } +} + +// NewArrInt16 returns new Array(Int16). +func NewArrInt16() *ColArr[int16] { + return &ColArr[int16]{ + Data: new(ColInt16), + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int16_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int16_safe_gen.go new file mode 100644 index 0000000..75523a4 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int16_safe_gen.go @@ -0,0 +1,55 @@ +//go:build !(amd64 || arm64 || riscv64) || purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "encoding/binary" + + "github.com/go-faster/errors" +) + +var _ = binary.LittleEndian // clickHouse uses LittleEndian + +// DecodeColumn decodes Int16 rows from *Reader. +func (c *ColInt16) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + const size = 16 / 8 + data, err := r.ReadRaw(rows * size) + if err != nil { + return errors.Wrap(err, "read") + } + v := *c + // Move bound check out of loop. + // + // See https://github.com/golang/go/issues/30945. + _ = data[len(data)-size] + for i := 0; i <= len(data)-size; i += size { + v = append(v, + int16(binary.LittleEndian.Uint16(data[i:i+size])), + ) + } + *c = v + return nil +} + +// EncodeColumn encodes Int16 rows to *Buffer. +func (c ColInt16) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + const size = 16 / 8 + offset := len(b.Buf) + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + for _, vv := range v { + binary.LittleEndian.PutUint16( + b.Buf[offset:offset+size], + uint16(vv), + ) + offset += size + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int16_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int16_unsafe_gen.go new file mode 100644 index 0000000..6ba5e50 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int16_unsafe_gen.go @@ -0,0 +1,45 @@ +//go:build (amd64 || arm64 || riscv64) && !purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "unsafe" + + "github.com/go-faster/errors" +) + +// DecodeColumn decodes Int16 rows from *Reader. +func (c *ColInt16) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + *c = append(*c, make([]int16, rows)...) + s := *(*slice)(unsafe.Pointer(c)) + const size = 16 / 8 + s.Len *= size + s.Cap *= size + dst := *(*[]byte)(unsafe.Pointer(&s)) + if err := r.ReadFull(dst); err != nil { + return errors.Wrap(err, "read full") + } + return nil +} + +// EncodeColumn encodes Int16 rows to *Buffer. +func (c ColInt16) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + offset := len(b.Buf) + const size = 16 / 8 + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + s := *(*slice)(unsafe.Pointer(&v)) + s.Len *= size + s.Cap *= size + src := *(*[]byte)(unsafe.Pointer(&s)) + dst := b.Buf[offset:] + copy(dst, src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int256_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int256_gen.go new file mode 100644 index 0000000..5d7454b --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int256_gen.go @@ -0,0 +1,71 @@ +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +// ColInt256 represents Int256 column. +type ColInt256 []Int256 + +// Compile-time assertions for ColInt256. +var ( + _ ColInput = ColInt256{} + _ ColResult = (*ColInt256)(nil) + _ Column = (*ColInt256)(nil) +) + +// Rows returns count of rows in column. +func (c ColInt256) Rows() int { + return len(c) +} + +// Reset resets data in row, preserving capacity for efficiency. +func (c *ColInt256) Reset() { + *c = (*c)[:0] +} + +// Type returns ColumnType of Int256. +func (ColInt256) Type() ColumnType { + return ColumnTypeInt256 +} + +// Row returns i-th row of column. +func (c ColInt256) Row(i int) Int256 { + return c[i] +} + +// Append Int256 to column. +func (c *ColInt256) Append(v Int256) { + *c = append(*c, v) +} + +// Append Int256 slice to column. +func (c *ColInt256) AppendArr(vs []Int256) { + *c = append(*c, vs...) +} + +// LowCardinality returns LowCardinality for Int256 . +func (c *ColInt256) LowCardinality() *ColLowCardinality[Int256] { + return &ColLowCardinality[Int256]{ + index: c, + } +} + +// Array is helper that creates Array of Int256. +func (c *ColInt256) Array() *ColArr[Int256] { + return &ColArr[Int256]{ + Data: c, + } +} + +// Nullable is helper that creates Nullable(Int256). +func (c *ColInt256) Nullable() *ColNullable[Int256] { + return &ColNullable[Int256]{ + Values: c, + } +} + +// NewArrInt256 returns new Array(Int256). +func NewArrInt256() *ColArr[Int256] { + return &ColArr[Int256]{ + Data: new(ColInt256), + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int256_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int256_safe_gen.go new file mode 100644 index 0000000..0b9f8f1 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int256_safe_gen.go @@ -0,0 +1,55 @@ +//go:build !(amd64 || arm64 || riscv64) || purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "encoding/binary" + + "github.com/go-faster/errors" +) + +var _ = binary.LittleEndian // clickHouse uses LittleEndian + +// DecodeColumn decodes Int256 rows from *Reader. +func (c *ColInt256) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + const size = 256 / 8 + data, err := r.ReadRaw(rows * size) + if err != nil { + return errors.Wrap(err, "read") + } + v := *c + // Move bound check out of loop. + // + // See https://github.com/golang/go/issues/30945. + _ = data[len(data)-size] + for i := 0; i <= len(data)-size; i += size { + v = append(v, + Int256(binUInt256(data[i:i+size])), + ) + } + *c = v + return nil +} + +// EncodeColumn encodes Int256 rows to *Buffer. +func (c ColInt256) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + const size = 256 / 8 + offset := len(b.Buf) + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + for _, vv := range v { + binPutUInt256( + b.Buf[offset:offset+size], + UInt256(vv), + ) + offset += size + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int256_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int256_unsafe_gen.go new file mode 100644 index 0000000..2433bc9 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int256_unsafe_gen.go @@ -0,0 +1,45 @@ +//go:build (amd64 || arm64 || riscv64) && !purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "unsafe" + + "github.com/go-faster/errors" +) + +// DecodeColumn decodes Int256 rows from *Reader. +func (c *ColInt256) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + *c = append(*c, make([]Int256, rows)...) + s := *(*slice)(unsafe.Pointer(c)) + const size = 256 / 8 + s.Len *= size + s.Cap *= size + dst := *(*[]byte)(unsafe.Pointer(&s)) + if err := r.ReadFull(dst); err != nil { + return errors.Wrap(err, "read full") + } + return nil +} + +// EncodeColumn encodes Int256 rows to *Buffer. +func (c ColInt256) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + offset := len(b.Buf) + const size = 256 / 8 + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + s := *(*slice)(unsafe.Pointer(&v)) + s.Len *= size + s.Cap *= size + src := *(*[]byte)(unsafe.Pointer(&s)) + dst := b.Buf[offset:] + copy(dst, src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int32_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int32_gen.go new file mode 100644 index 0000000..46b0958 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int32_gen.go @@ -0,0 +1,71 @@ +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +// ColInt32 represents Int32 column. +type ColInt32 []int32 + +// Compile-time assertions for ColInt32. +var ( + _ ColInput = ColInt32{} + _ ColResult = (*ColInt32)(nil) + _ Column = (*ColInt32)(nil) +) + +// Rows returns count of rows in column. +func (c ColInt32) Rows() int { + return len(c) +} + +// Reset resets data in row, preserving capacity for efficiency. +func (c *ColInt32) Reset() { + *c = (*c)[:0] +} + +// Type returns ColumnType of Int32. +func (ColInt32) Type() ColumnType { + return ColumnTypeInt32 +} + +// Row returns i-th row of column. +func (c ColInt32) Row(i int) int32 { + return c[i] +} + +// Append int32 to column. +func (c *ColInt32) Append(v int32) { + *c = append(*c, v) +} + +// Append int32 slice to column. +func (c *ColInt32) AppendArr(vs []int32) { + *c = append(*c, vs...) +} + +// LowCardinality returns LowCardinality for Int32 . +func (c *ColInt32) LowCardinality() *ColLowCardinality[int32] { + return &ColLowCardinality[int32]{ + index: c, + } +} + +// Array is helper that creates Array of int32. +func (c *ColInt32) Array() *ColArr[int32] { + return &ColArr[int32]{ + Data: c, + } +} + +// Nullable is helper that creates Nullable(int32). +func (c *ColInt32) Nullable() *ColNullable[int32] { + return &ColNullable[int32]{ + Values: c, + } +} + +// NewArrInt32 returns new Array(Int32). +func NewArrInt32() *ColArr[int32] { + return &ColArr[int32]{ + Data: new(ColInt32), + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int32_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int32_safe_gen.go new file mode 100644 index 0000000..52f78c1 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int32_safe_gen.go @@ -0,0 +1,55 @@ +//go:build !(amd64 || arm64 || riscv64) || purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "encoding/binary" + + "github.com/go-faster/errors" +) + +var _ = binary.LittleEndian // clickHouse uses LittleEndian + +// DecodeColumn decodes Int32 rows from *Reader. +func (c *ColInt32) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + const size = 32 / 8 + data, err := r.ReadRaw(rows * size) + if err != nil { + return errors.Wrap(err, "read") + } + v := *c + // Move bound check out of loop. + // + // See https://github.com/golang/go/issues/30945. + _ = data[len(data)-size] + for i := 0; i <= len(data)-size; i += size { + v = append(v, + int32(binary.LittleEndian.Uint32(data[i:i+size])), + ) + } + *c = v + return nil +} + +// EncodeColumn encodes Int32 rows to *Buffer. +func (c ColInt32) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + const size = 32 / 8 + offset := len(b.Buf) + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + for _, vv := range v { + binary.LittleEndian.PutUint32( + b.Buf[offset:offset+size], + uint32(vv), + ) + offset += size + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int32_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int32_unsafe_gen.go new file mode 100644 index 0000000..b2e1025 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int32_unsafe_gen.go @@ -0,0 +1,45 @@ +//go:build (amd64 || arm64 || riscv64) && !purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "unsafe" + + "github.com/go-faster/errors" +) + +// DecodeColumn decodes Int32 rows from *Reader. +func (c *ColInt32) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + *c = append(*c, make([]int32, rows)...) + s := *(*slice)(unsafe.Pointer(c)) + const size = 32 / 8 + s.Len *= size + s.Cap *= size + dst := *(*[]byte)(unsafe.Pointer(&s)) + if err := r.ReadFull(dst); err != nil { + return errors.Wrap(err, "read full") + } + return nil +} + +// EncodeColumn encodes Int32 rows to *Buffer. +func (c ColInt32) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + offset := len(b.Buf) + const size = 32 / 8 + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + s := *(*slice)(unsafe.Pointer(&v)) + s.Len *= size + s.Cap *= size + src := *(*[]byte)(unsafe.Pointer(&s)) + dst := b.Buf[offset:] + copy(dst, src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int64_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int64_gen.go new file mode 100644 index 0000000..4c8875c --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int64_gen.go @@ -0,0 +1,71 @@ +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +// ColInt64 represents Int64 column. +type ColInt64 []int64 + +// Compile-time assertions for ColInt64. +var ( + _ ColInput = ColInt64{} + _ ColResult = (*ColInt64)(nil) + _ Column = (*ColInt64)(nil) +) + +// Rows returns count of rows in column. +func (c ColInt64) Rows() int { + return len(c) +} + +// Reset resets data in row, preserving capacity for efficiency. +func (c *ColInt64) Reset() { + *c = (*c)[:0] +} + +// Type returns ColumnType of Int64. +func (ColInt64) Type() ColumnType { + return ColumnTypeInt64 +} + +// Row returns i-th row of column. +func (c ColInt64) Row(i int) int64 { + return c[i] +} + +// Append int64 to column. +func (c *ColInt64) Append(v int64) { + *c = append(*c, v) +} + +// Append int64 slice to column. +func (c *ColInt64) AppendArr(vs []int64) { + *c = append(*c, vs...) +} + +// LowCardinality returns LowCardinality for Int64 . +func (c *ColInt64) LowCardinality() *ColLowCardinality[int64] { + return &ColLowCardinality[int64]{ + index: c, + } +} + +// Array is helper that creates Array of int64. +func (c *ColInt64) Array() *ColArr[int64] { + return &ColArr[int64]{ + Data: c, + } +} + +// Nullable is helper that creates Nullable(int64). +func (c *ColInt64) Nullable() *ColNullable[int64] { + return &ColNullable[int64]{ + Values: c, + } +} + +// NewArrInt64 returns new Array(Int64). +func NewArrInt64() *ColArr[int64] { + return &ColArr[int64]{ + Data: new(ColInt64), + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int64_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int64_safe_gen.go new file mode 100644 index 0000000..400367d --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int64_safe_gen.go @@ -0,0 +1,55 @@ +//go:build !(amd64 || arm64 || riscv64) || purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "encoding/binary" + + "github.com/go-faster/errors" +) + +var _ = binary.LittleEndian // clickHouse uses LittleEndian + +// DecodeColumn decodes Int64 rows from *Reader. +func (c *ColInt64) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + const size = 64 / 8 + data, err := r.ReadRaw(rows * size) + if err != nil { + return errors.Wrap(err, "read") + } + v := *c + // Move bound check out of loop. + // + // See https://github.com/golang/go/issues/30945. + _ = data[len(data)-size] + for i := 0; i <= len(data)-size; i += size { + v = append(v, + int64(binary.LittleEndian.Uint64(data[i:i+size])), + ) + } + *c = v + return nil +} + +// EncodeColumn encodes Int64 rows to *Buffer. +func (c ColInt64) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + const size = 64 / 8 + offset := len(b.Buf) + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + for _, vv := range v { + binary.LittleEndian.PutUint64( + b.Buf[offset:offset+size], + uint64(vv), + ) + offset += size + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int64_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int64_unsafe_gen.go new file mode 100644 index 0000000..5c6f265 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int64_unsafe_gen.go @@ -0,0 +1,45 @@ +//go:build (amd64 || arm64 || riscv64) && !purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "unsafe" + + "github.com/go-faster/errors" +) + +// DecodeColumn decodes Int64 rows from *Reader. +func (c *ColInt64) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + *c = append(*c, make([]int64, rows)...) + s := *(*slice)(unsafe.Pointer(c)) + const size = 64 / 8 + s.Len *= size + s.Cap *= size + dst := *(*[]byte)(unsafe.Pointer(&s)) + if err := r.ReadFull(dst); err != nil { + return errors.Wrap(err, "read full") + } + return nil +} + +// EncodeColumn encodes Int64 rows to *Buffer. +func (c ColInt64) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + offset := len(b.Buf) + const size = 64 / 8 + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + s := *(*slice)(unsafe.Pointer(&v)) + s.Len *= size + s.Cap *= size + src := *(*[]byte)(unsafe.Pointer(&s)) + dst := b.Buf[offset:] + copy(dst, src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int8_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int8_gen.go new file mode 100644 index 0000000..98a71a2 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int8_gen.go @@ -0,0 +1,71 @@ +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +// ColInt8 represents Int8 column. +type ColInt8 []int8 + +// Compile-time assertions for ColInt8. +var ( + _ ColInput = ColInt8{} + _ ColResult = (*ColInt8)(nil) + _ Column = (*ColInt8)(nil) +) + +// Rows returns count of rows in column. +func (c ColInt8) Rows() int { + return len(c) +} + +// Reset resets data in row, preserving capacity for efficiency. +func (c *ColInt8) Reset() { + *c = (*c)[:0] +} + +// Type returns ColumnType of Int8. +func (ColInt8) Type() ColumnType { + return ColumnTypeInt8 +} + +// Row returns i-th row of column. +func (c ColInt8) Row(i int) int8 { + return c[i] +} + +// Append int8 to column. +func (c *ColInt8) Append(v int8) { + *c = append(*c, v) +} + +// Append int8 slice to column. +func (c *ColInt8) AppendArr(vs []int8) { + *c = append(*c, vs...) +} + +// LowCardinality returns LowCardinality for Int8 . +func (c *ColInt8) LowCardinality() *ColLowCardinality[int8] { + return &ColLowCardinality[int8]{ + index: c, + } +} + +// Array is helper that creates Array of int8. +func (c *ColInt8) Array() *ColArr[int8] { + return &ColArr[int8]{ + Data: c, + } +} + +// Nullable is helper that creates Nullable(int8). +func (c *ColInt8) Nullable() *ColNullable[int8] { + return &ColNullable[int8]{ + Values: c, + } +} + +// NewArrInt8 returns new Array(Int8). +func NewArrInt8() *ColArr[int8] { + return &ColArr[int8]{ + Data: new(ColInt8), + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int8_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int8_safe_gen.go new file mode 100644 index 0000000..a79459d --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int8_safe_gen.go @@ -0,0 +1,44 @@ +//go:build !(amd64 || arm64 || riscv64) || purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "encoding/binary" + + "github.com/go-faster/errors" +) + +var _ = binary.LittleEndian // clickHouse uses LittleEndian + +// DecodeColumn decodes Int8 rows from *Reader. +func (c *ColInt8) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + data, err := r.ReadRaw(rows) + if err != nil { + return errors.Wrap(err, "read") + } + v := *c + v = append(v, make([]int8, rows)...) + for i := range data { + v[i] = int8(data[i]) + } + *c = v + return nil +} + +// EncodeColumn encodes Int8 rows to *Buffer. +func (c ColInt8) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + start := len(b.Buf) + b.Buf = append(b.Buf, make([]byte, len(v))...) + for i := range v { + b.Buf[i+start] = uint8(v[i]) + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int8_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int8_unsafe_gen.go new file mode 100644 index 0000000..1c62c7d --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int8_unsafe_gen.go @@ -0,0 +1,39 @@ +//go:build (amd64 || arm64 || riscv64) && !purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "unsafe" + + "github.com/go-faster/errors" +) + +// DecodeColumn decodes Int8 rows from *Reader. +func (c *ColInt8) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + *c = append(*c, make([]int8, rows)...) + s := *(*slice)(unsafe.Pointer(c)) + dst := *(*[]byte)(unsafe.Pointer(&s)) + if err := r.ReadFull(dst); err != nil { + return errors.Wrap(err, "read full") + } + return nil +} + +// EncodeColumn encodes Int8 rows to *Buffer. +func (c ColInt8) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + offset := len(b.Buf) + b.Buf = append(b.Buf, make([]byte, len(v))...) + s := *(*slice)(unsafe.Pointer(&v)) + src := *(*[]byte)(unsafe.Pointer(&s)) + dst := b.Buf[offset:] + copy(dst, src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_interval.go b/vendor/github.com/ClickHouse/ch-go/proto/col_interval.go new file mode 100644 index 0000000..57bb2e3 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_interval.go @@ -0,0 +1,112 @@ +package proto + +import ( + "fmt" + "strings" + "time" + + "github.com/go-faster/errors" +) + +//go:generate go run github.com/dmarkham/enumer -type IntervalScale -output interval_enum.go + +type IntervalScale byte + +const ( + IntervalSecond IntervalScale = iota + IntervalMinute + IntervalHour + IntervalDay + IntervalWeek + IntervalMonth + IntervalQuarter + IntervalYear +) + +type Interval struct { + Scale IntervalScale + Value int64 +} + +// Add Interval to time.Time. +func (i Interval) Add(t time.Time) time.Time { + switch i.Scale { + case IntervalSecond: + return t.Add(time.Second * time.Duration(i.Value)) + case IntervalMinute: + return t.Add(time.Minute * time.Duration(i.Value)) + case IntervalHour: + return t.Add(time.Hour * time.Duration(i.Value)) + case IntervalDay: + return t.AddDate(0, 0, int(i.Value)) + case IntervalWeek: + return t.AddDate(0, 0, int(i.Value)*7) + case IntervalMonth: + return t.AddDate(0, int(i.Value), 0) + case IntervalQuarter: + return t.AddDate(0, int(i.Value)*4, 0) + case IntervalYear: + return t.AddDate(int(i.Value), 0, 0) + default: + panic(fmt.Sprintf("unknown interval scale %s", i.Scale)) + } +} + +func (i Interval) String() string { + var out strings.Builder + out.WriteString(fmt.Sprintf("%d", i.Value)) + out.WriteRune(' ') + out.WriteString(strings.ToLower(strings.TrimPrefix(i.Scale.String(), ColumnTypeInterval.String()))) + if i.Value > 1 || i.Value < 1 { + out.WriteRune('s') + } + return out.String() +} + +type ColInterval struct { + Scale IntervalScale + Values ColInt64 +} + +func (c *ColInterval) Infer(t ColumnType) error { + scale, err := IntervalScaleString(t.String()) + if err != nil { + return errors.Wrap(err, "scale") + } + c.Scale = scale + return nil +} + +func (c *ColInterval) Append(v Interval) { + if v.Scale != c.Scale { + panic(fmt.Sprintf("append: cant append %s to %s", v.Scale, c.Scale)) + } + c.Values.Append(v.Value) +} + +func (c ColInterval) Row(i int) Interval { + return Interval{ + Scale: c.Scale, + Value: c.Values.Row(i), + } +} + +func (c ColInterval) Type() ColumnType { + return ColumnType(c.Scale.String()) +} + +func (c ColInterval) Rows() int { + return len(c.Values) +} + +func (c *ColInterval) DecodeColumn(r *Reader, rows int) error { + return c.Values.DecodeColumn(r, rows) +} + +func (c *ColInterval) Reset() { + c.Values.Reset() +} + +func (c ColInterval) EncodeColumn(b *Buffer) { + c.Values.EncodeColumn(b) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_ipv4_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_ipv4_gen.go new file mode 100644 index 0000000..4c7a0bc --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_ipv4_gen.go @@ -0,0 +1,71 @@ +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +// ColIPv4 represents IPv4 column. +type ColIPv4 []IPv4 + +// Compile-time assertions for ColIPv4. +var ( + _ ColInput = ColIPv4{} + _ ColResult = (*ColIPv4)(nil) + _ Column = (*ColIPv4)(nil) +) + +// Rows returns count of rows in column. +func (c ColIPv4) Rows() int { + return len(c) +} + +// Reset resets data in row, preserving capacity for efficiency. +func (c *ColIPv4) Reset() { + *c = (*c)[:0] +} + +// Type returns ColumnType of IPv4. +func (ColIPv4) Type() ColumnType { + return ColumnTypeIPv4 +} + +// Row returns i-th row of column. +func (c ColIPv4) Row(i int) IPv4 { + return c[i] +} + +// Append IPv4 to column. +func (c *ColIPv4) Append(v IPv4) { + *c = append(*c, v) +} + +// Append IPv4 slice to column. +func (c *ColIPv4) AppendArr(vs []IPv4) { + *c = append(*c, vs...) +} + +// LowCardinality returns LowCardinality for IPv4 . +func (c *ColIPv4) LowCardinality() *ColLowCardinality[IPv4] { + return &ColLowCardinality[IPv4]{ + index: c, + } +} + +// Array is helper that creates Array of IPv4. +func (c *ColIPv4) Array() *ColArr[IPv4] { + return &ColArr[IPv4]{ + Data: c, + } +} + +// Nullable is helper that creates Nullable(IPv4). +func (c *ColIPv4) Nullable() *ColNullable[IPv4] { + return &ColNullable[IPv4]{ + Values: c, + } +} + +// NewArrIPv4 returns new Array(IPv4). +func NewArrIPv4() *ColArr[IPv4] { + return &ColArr[IPv4]{ + Data: new(ColIPv4), + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_ipv4_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_ipv4_safe_gen.go new file mode 100644 index 0000000..8b0b790 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_ipv4_safe_gen.go @@ -0,0 +1,55 @@ +//go:build !(amd64 || arm64 || riscv64) || purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "encoding/binary" + + "github.com/go-faster/errors" +) + +var _ = binary.LittleEndian // clickHouse uses LittleEndian + +// DecodeColumn decodes IPv4 rows from *Reader. +func (c *ColIPv4) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + const size = 32 / 8 + data, err := r.ReadRaw(rows * size) + if err != nil { + return errors.Wrap(err, "read") + } + v := *c + // Move bound check out of loop. + // + // See https://github.com/golang/go/issues/30945. + _ = data[len(data)-size] + for i := 0; i <= len(data)-size; i += size { + v = append(v, + IPv4(binary.LittleEndian.Uint32(data[i:i+size])), + ) + } + *c = v + return nil +} + +// EncodeColumn encodes IPv4 rows to *Buffer. +func (c ColIPv4) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + const size = 32 / 8 + offset := len(b.Buf) + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + for _, vv := range v { + binary.LittleEndian.PutUint32( + b.Buf[offset:offset+size], + uint32(vv), + ) + offset += size + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_ipv4_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_ipv4_unsafe_gen.go new file mode 100644 index 0000000..5fc0b7c --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_ipv4_unsafe_gen.go @@ -0,0 +1,45 @@ +//go:build (amd64 || arm64 || riscv64) && !purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "unsafe" + + "github.com/go-faster/errors" +) + +// DecodeColumn decodes IPv4 rows from *Reader. +func (c *ColIPv4) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + *c = append(*c, make([]IPv4, rows)...) + s := *(*slice)(unsafe.Pointer(c)) + const size = 32 / 8 + s.Len *= size + s.Cap *= size + dst := *(*[]byte)(unsafe.Pointer(&s)) + if err := r.ReadFull(dst); err != nil { + return errors.Wrap(err, "read full") + } + return nil +} + +// EncodeColumn encodes IPv4 rows to *Buffer. +func (c ColIPv4) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + offset := len(b.Buf) + const size = 32 / 8 + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + s := *(*slice)(unsafe.Pointer(&v)) + s.Len *= size + s.Cap *= size + src := *(*[]byte)(unsafe.Pointer(&s)) + dst := b.Buf[offset:] + copy(dst, src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_ipv6_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_ipv6_gen.go new file mode 100644 index 0000000..5907bd7 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_ipv6_gen.go @@ -0,0 +1,71 @@ +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +// ColIPv6 represents IPv6 column. +type ColIPv6 []IPv6 + +// Compile-time assertions for ColIPv6. +var ( + _ ColInput = ColIPv6{} + _ ColResult = (*ColIPv6)(nil) + _ Column = (*ColIPv6)(nil) +) + +// Rows returns count of rows in column. +func (c ColIPv6) Rows() int { + return len(c) +} + +// Reset resets data in row, preserving capacity for efficiency. +func (c *ColIPv6) Reset() { + *c = (*c)[:0] +} + +// Type returns ColumnType of IPv6. +func (ColIPv6) Type() ColumnType { + return ColumnTypeIPv6 +} + +// Row returns i-th row of column. +func (c ColIPv6) Row(i int) IPv6 { + return c[i] +} + +// Append IPv6 to column. +func (c *ColIPv6) Append(v IPv6) { + *c = append(*c, v) +} + +// Append IPv6 slice to column. +func (c *ColIPv6) AppendArr(vs []IPv6) { + *c = append(*c, vs...) +} + +// LowCardinality returns LowCardinality for IPv6 . +func (c *ColIPv6) LowCardinality() *ColLowCardinality[IPv6] { + return &ColLowCardinality[IPv6]{ + index: c, + } +} + +// Array is helper that creates Array of IPv6. +func (c *ColIPv6) Array() *ColArr[IPv6] { + return &ColArr[IPv6]{ + Data: c, + } +} + +// Nullable is helper that creates Nullable(IPv6). +func (c *ColIPv6) Nullable() *ColNullable[IPv6] { + return &ColNullable[IPv6]{ + Values: c, + } +} + +// NewArrIPv6 returns new Array(IPv6). +func NewArrIPv6() *ColArr[IPv6] { + return &ColArr[IPv6]{ + Data: new(ColIPv6), + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_ipv6_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_ipv6_safe_gen.go new file mode 100644 index 0000000..9a5870d --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_ipv6_safe_gen.go @@ -0,0 +1,55 @@ +//go:build !(amd64 || arm64 || riscv64) || purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "encoding/binary" + + "github.com/go-faster/errors" +) + +var _ = binary.LittleEndian // clickHouse uses LittleEndian + +// DecodeColumn decodes IPv6 rows from *Reader. +func (c *ColIPv6) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + const size = 128 / 8 + data, err := r.ReadRaw(rows * size) + if err != nil { + return errors.Wrap(err, "read") + } + v := *c + // Move bound check out of loop. + // + // See https://github.com/golang/go/issues/30945. + _ = data[len(data)-size] + for i := 0; i <= len(data)-size; i += size { + v = append(v, + binIPv6(data[i:i+size]), + ) + } + *c = v + return nil +} + +// EncodeColumn encodes IPv6 rows to *Buffer. +func (c ColIPv6) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + const size = 128 / 8 + offset := len(b.Buf) + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + for _, vv := range v { + binPutIPv6( + b.Buf[offset:offset+size], + vv, + ) + offset += size + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_ipv6_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_ipv6_unsafe_gen.go new file mode 100644 index 0000000..5650b49 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_ipv6_unsafe_gen.go @@ -0,0 +1,45 @@ +//go:build (amd64 || arm64 || riscv64) && !purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "unsafe" + + "github.com/go-faster/errors" +) + +// DecodeColumn decodes IPv6 rows from *Reader. +func (c *ColIPv6) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + *c = append(*c, make([]IPv6, rows)...) + s := *(*slice)(unsafe.Pointer(c)) + const size = 128 / 8 + s.Len *= size + s.Cap *= size + dst := *(*[]byte)(unsafe.Pointer(&s)) + if err := r.ReadFull(dst); err != nil { + return errors.Wrap(err, "read full") + } + return nil +} + +// EncodeColumn encodes IPv6 rows to *Buffer. +func (c ColIPv6) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + offset := len(b.Buf) + const size = 128 / 8 + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + s := *(*slice)(unsafe.Pointer(&v)) + s.Len *= size + s.Cap *= size + src := *(*[]byte)(unsafe.Pointer(&s)) + dst := b.Buf[offset:] + copy(dst, src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_low_cardinality.go b/vendor/github.com/ClickHouse/ch-go/proto/col_low_cardinality.go new file mode 100644 index 0000000..ffed580 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_low_cardinality.go @@ -0,0 +1,347 @@ +package proto + +import ( + "math" + + "github.com/go-faster/errors" +) + +// Compile-time assertions for ColLowCardinality. +var ( + _ ColInput = (*ColLowCardinality[string])(nil) + _ ColResult = (*ColLowCardinality[string])(nil) + _ Column = (*ColLowCardinality[string])(nil) +) + +//go:generate go run github.com/dmarkham/enumer -type CardinalityKey -trimprefix Key -output col_low_cardinality_enum.go + +// CardinalityKey is integer type of ColLowCardinality.Keys column. +type CardinalityKey byte + +// Possible integer types for ColLowCardinality.Keys. +const ( + KeyUInt8 CardinalityKey = 0 + KeyUInt16 CardinalityKey = 1 + KeyUInt32 CardinalityKey = 2 + KeyUInt64 CardinalityKey = 3 +) + +// Constants for low cardinality metadata value that is represented as int64 +// consisted of bitflags and key type. +// +// https://github.com/ClickHouse/clickhouse-cpp/blob/b10d71eed0532405dfb4dd03aabce869ba68f581/clickhouse/columns/lowcardinality.cpp +// +// NB: shared dictionaries and on-the-fly dictionary update is not supported, +// because it is not currently used in client protocol. +const ( + cardinalityKeyMask = 0b0000_1111_1111 // last byte + + // Need to read dictionary if it wasn't. + cardinalityNeedGlobalDictionaryBit = 1 << 8 + // Need to read additional keys. + // Additional keys are stored before indexes as value N and N keys + // after them. + cardinalityHasAdditionalKeysBit = 1 << 9 + // Need to update dictionary. It means that previous granule has different dictionary. + cardinalityNeedUpdateDictionary = 1 << 10 + + // cardinalityUpdateAll sets both flags (update index, has additional keys) + cardinalityUpdateAll = cardinalityHasAdditionalKeysBit | cardinalityNeedUpdateDictionary +) + +type keySerializationVersion byte + +// sharedDictionariesWithAdditionalKeys is default key serialization. +const sharedDictionariesWithAdditionalKeys keySerializationVersion = 1 + +// ColLowCardinality is generic LowCardinality(T) column. +// +// ColLowCardinality contains index and keys columns. +// +// Index (i.e. dictionary) column contains unique values, Keys column contains +// sequence of indexes in Index column that represent actual values. +// +// For example, ["Eko", "Eko", "Amadela", "Amadela", "Amadela", "Amadela"] can +// be encoded as: +// +// Index: ["Eko", "Amadela"] (String) +// Keys: [0, 0, 1, 1, 1, 1] (UInt8) +// +// The CardinalityKey is chosen depending on Index size, i.e. maximum value +// of chosen type should be able to represent any index of Index element. +type ColLowCardinality[T comparable] struct { + Values []T + + index ColumnOf[T] + key CardinalityKey + + // Keeping all key column variants as fields to reuse + // memory more efficiently. + + // Values[T], kv and keys columns adds memory overhead, but simplifies + // implementation. + // TODO(ernado): revisit tradeoffs + + keys8 ColUInt8 + keys16 ColUInt16 + keys32 ColUInt32 + keys64 ColUInt64 + + kv map[T]int + keys []int +} + +// DecodeState implements StateDecoder, ensuring state for index column. +func (c *ColLowCardinality[T]) DecodeState(r *Reader) error { + keySerialization, err := r.Int64() + if err != nil { + return errors.Wrap(err, "version") + } + if keySerialization != int64(sharedDictionariesWithAdditionalKeys) { + return errors.Errorf("got version %d, expected %d", + keySerialization, sharedDictionariesWithAdditionalKeys, + ) + } + if s, ok := c.index.(StateDecoder); ok { + if err := s.DecodeState(r); err != nil { + return errors.Wrap(err, "index state") + } + } + return nil +} + +// EncodeState implements StateEncoder, ensuring state for index column. +func (c ColLowCardinality[T]) EncodeState(b *Buffer) { + // Writing key serialization version. + b.PutInt64(int64(sharedDictionariesWithAdditionalKeys)) + if s, ok := c.index.(StateEncoder); ok { + s.EncodeState(b) + } +} + +func (c *ColLowCardinality[T]) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + // Skipping entirely of no rows. + return nil + } + meta, err := r.Int64() + if err != nil { + return errors.Wrap(err, "meta") + } + if (meta & cardinalityNeedGlobalDictionaryBit) == 1 { + return errors.New("global dictionary is not supported") + } + if (meta & cardinalityHasAdditionalKeysBit) == 0 { + return errors.New("additional keys bit is missing") + } + + key := CardinalityKey(meta & cardinalityKeyMask) + if !key.IsACardinalityKey() { + return errors.Errorf("invalid low cardinality keys type %d", key) + } + c.key = key + + indexRows, err := r.Int64() + if err != nil { + return errors.Wrap(err, "index size") + } + if err := checkRows(int(indexRows)); err != nil { + return errors.Wrap(err, "index size") + } + if err := c.index.DecodeColumn(r, int(indexRows)); err != nil { + return errors.Wrap(err, "index column") + } + + keyRows, err := r.Int64() + if err != nil { + return errors.Wrap(err, "keys size") + } + if err := checkRows(int(keyRows)); err != nil { + return errors.Wrap(err, "index size") + } + switch c.key { + case KeyUInt8: + if err := c.keys8.DecodeColumn(r, rows); err != nil { + return errors.Wrap(err, "keys") + } + c.keys = fillValues(c.keys, c.keys8) + case KeyUInt16: + if err := c.keys16.DecodeColumn(r, rows); err != nil { + return errors.Wrap(err, "keys") + } + c.keys = fillValues(c.keys, c.keys16) + case KeyUInt32: + if err := c.keys32.DecodeColumn(r, rows); err != nil { + return errors.Wrap(err, "keys") + } + c.keys = fillValues(c.keys, c.keys32) + case KeyUInt64: + if err := c.keys64.DecodeColumn(r, rows); err != nil { + return errors.Wrap(err, "keys") + } + c.keys = fillValues(c.keys, c.keys64) + default: + return errors.Errorf("invalid key format %s", c.key) + } + + c.Values = c.Values[:0] + for _, idx := range c.keys { + if int64(idx) >= indexRows || idx < 0 { + return errors.Errorf("key index out of range [%d] with length %d", idx, indexRows) + } + c.Values = append(c.Values, c.index.Row(idx)) + } + + return nil +} + +func (c ColLowCardinality[T]) Type() ColumnType { + return ColumnTypeLowCardinality.Sub(c.index.Type()) +} + +func (c *ColLowCardinality[T]) EncodeColumn(b *Buffer) { + // Using pointer receiver as Prepare() is expected to be called before + // encoding. + + if c.Rows() == 0 { + // Skipping encoding entirely. + return + } + + // Meta encodes whether reader should update + // low cardinality metadata and keys column type. + meta := cardinalityUpdateAll | int64(c.key) + b.PutInt64(meta) + + // Writing index (dictionary). + b.PutInt64(int64(c.index.Rows())) + c.index.EncodeColumn(b) + + b.PutInt64(int64(c.Rows())) + switch c.key { + case KeyUInt8: + c.keys8.EncodeColumn(b) + case KeyUInt16: + c.keys16.EncodeColumn(b) + case KeyUInt32: + c.keys32.EncodeColumn(b) + case KeyUInt64: + c.keys64.EncodeColumn(b) + } +} + +func (c *ColLowCardinality[T]) Reset() { + for k := range c.kv { + delete(c.kv, k) + } + c.keys = c.keys[:0] + + c.keys8 = c.keys8[:0] + c.keys16 = c.keys16[:0] + c.keys32 = c.keys32[:0] + c.keys64 = c.keys64[:0] + c.Values = c.Values[:0] + + c.index.Reset() +} + +type cardinalityKeyValue interface { + ~uint8 | ~uint16 | ~uint32 | ~uint64 +} + +func fillKeys[K cardinalityKeyValue](values []int, keys []K) []K { + keys = keys[:0] + for _, v := range values { + keys = append(keys, K(v)) + } + return keys +} + +func fillValues[K cardinalityKeyValue](values []int, keys []K) []int { + for _, v := range keys { + values = append(values, int(v)) + } + return values +} + +// Append value to column. +func (c *ColLowCardinality[T]) Append(v T) { + c.Values = append(c.Values, v) +} + +// AppendArr appends slice to column. +func (c *ColLowCardinality[T]) AppendArr(v []T) { + c.Values = append(c.Values, v...) +} + +// Row returns i-th row. +func (c ColLowCardinality[T]) Row(i int) T { + return c.Values[i] +} + +// Rows returns rows count. +func (c ColLowCardinality[T]) Rows() int { + return len(c.Values) +} + +// Prepare column for ingestion. +func (c *ColLowCardinality[T]) Prepare() error { + // Select minimum possible size for key. + if n := len(c.Values); n < math.MaxUint8 { + c.key = KeyUInt8 + } else if n < math.MaxUint16 { + c.key = KeyUInt16 + } else if uint32(n) < math.MaxUint32 { + c.key = KeyUInt32 + } else { + c.key = KeyUInt64 + } + + // Allocate keys slice. + c.keys = append(c.keys[:0], make([]int, len(c.Values))...) + if c.kv == nil { + c.kv = map[T]int{} + c.index.Reset() + } + + // Fill keys with value indexes. + var last int + for i, v := range c.Values { + idx, ok := c.kv[v] + if !ok { + c.index.Append(v) + c.kv[v] = last + idx = last + last++ + } + c.keys[i] = idx + } + + // Fill key column with key indexes. + switch c.key { + case KeyUInt8: + c.keys8 = fillKeys(c.keys, c.keys8) + case KeyUInt16: + c.keys16 = fillKeys(c.keys, c.keys16) + case KeyUInt32: + c.keys32 = fillKeys(c.keys, c.keys32) + case KeyUInt64: + c.keys64 = fillKeys(c.keys, c.keys64) + } + + return nil +} + +// Array is helper that creates Array(ColLowCardinality(T)). +func (c *ColLowCardinality[T]) Array() *ColArr[T] { + return &ColArr[T]{ + Data: c, + } +} + +// NewLowCardinality creates new LowCardinality column from another column for T. +func NewLowCardinality[T comparable](c ColumnOf[T]) *ColLowCardinality[T] { + return &ColLowCardinality[T]{ + index: c, + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_low_cardinality_enum.go b/vendor/github.com/ClickHouse/ch-go/proto/col_low_cardinality_enum.go new file mode 100644 index 0000000..ade86f2 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_low_cardinality_enum.go @@ -0,0 +1,86 @@ +// Code generated by "enumer -type CardinalityKey -trimprefix Key -output col_low_cardinality_enum.go"; DO NOT EDIT. + +package proto + +import ( + "fmt" + "strings" +) + +const _CardinalityKeyName = "UInt8UInt16UInt32UInt64" + +var _CardinalityKeyIndex = [...]uint8{0, 5, 11, 17, 23} + +const _CardinalityKeyLowerName = "uint8uint16uint32uint64" + +func (i CardinalityKey) String() string { + if i >= CardinalityKey(len(_CardinalityKeyIndex)-1) { + return fmt.Sprintf("CardinalityKey(%d)", i) + } + return _CardinalityKeyName[_CardinalityKeyIndex[i]:_CardinalityKeyIndex[i+1]] +} + +// An "invalid array index" compiler error signifies that the constant values have changed. +// Re-run the stringer command to generate them again. +func _CardinalityKeyNoOp() { + var x [1]struct{} + _ = x[KeyUInt8-(0)] + _ = x[KeyUInt16-(1)] + _ = x[KeyUInt32-(2)] + _ = x[KeyUInt64-(3)] +} + +var _CardinalityKeyValues = []CardinalityKey{KeyUInt8, KeyUInt16, KeyUInt32, KeyUInt64} + +var _CardinalityKeyNameToValueMap = map[string]CardinalityKey{ + _CardinalityKeyName[0:5]: KeyUInt8, + _CardinalityKeyLowerName[0:5]: KeyUInt8, + _CardinalityKeyName[5:11]: KeyUInt16, + _CardinalityKeyLowerName[5:11]: KeyUInt16, + _CardinalityKeyName[11:17]: KeyUInt32, + _CardinalityKeyLowerName[11:17]: KeyUInt32, + _CardinalityKeyName[17:23]: KeyUInt64, + _CardinalityKeyLowerName[17:23]: KeyUInt64, +} + +var _CardinalityKeyNames = []string{ + _CardinalityKeyName[0:5], + _CardinalityKeyName[5:11], + _CardinalityKeyName[11:17], + _CardinalityKeyName[17:23], +} + +// CardinalityKeyString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func CardinalityKeyString(s string) (CardinalityKey, error) { + if val, ok := _CardinalityKeyNameToValueMap[s]; ok { + return val, nil + } + + if val, ok := _CardinalityKeyNameToValueMap[strings.ToLower(s)]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to CardinalityKey values", s) +} + +// CardinalityKeyValues returns all values of the enum +func CardinalityKeyValues() []CardinalityKey { + return _CardinalityKeyValues +} + +// CardinalityKeyStrings returns a slice of all String values of the enum +func CardinalityKeyStrings() []string { + strs := make([]string, len(_CardinalityKeyNames)) + copy(strs, _CardinalityKeyNames) + return strs +} + +// IsACardinalityKey returns "true" if the value is listed in the enum definition. "false" otherwise +func (i CardinalityKey) IsACardinalityKey() bool { + for _, v := range _CardinalityKeyValues { + if i == v { + return true + } + } + return false +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_low_cardinality_raw.go b/vendor/github.com/ClickHouse/ch-go/proto/col_low_cardinality_raw.go new file mode 100644 index 0000000..665dc20 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_low_cardinality_raw.go @@ -0,0 +1,157 @@ +package proto + +import "github.com/go-faster/errors" + +// ColLowCardinalityRaw is non-generic version of ColLowCardinality. +type ColLowCardinalityRaw struct { + Index Column // dictionary + Key CardinalityKey + + // Keeping all key column variants as fields to reuse + // memory more efficiently. + + Keys8 ColUInt8 + Keys16 ColUInt16 + Keys32 ColUInt32 + Keys64 ColUInt64 +} + +func (c *ColLowCardinalityRaw) DecodeState(r *Reader) error { + keySerialization, err := r.Int64() + if err != nil { + return errors.Wrap(err, "version") + } + if keySerialization != int64(sharedDictionariesWithAdditionalKeys) { + return errors.Errorf("got version %d, expected %d", + keySerialization, sharedDictionariesWithAdditionalKeys, + ) + } + if s, ok := c.Index.(StateDecoder); ok { + if err := s.DecodeState(r); err != nil { + return errors.Wrap(err, "state") + } + } + return nil +} + +func (c ColLowCardinalityRaw) EncodeState(b *Buffer) { + // Writing key serialization version. + b.PutInt64(int64(sharedDictionariesWithAdditionalKeys)) + if s, ok := c.Index.(StateEncoder); ok { + s.EncodeState(b) + } +} + +func (c *ColLowCardinalityRaw) AppendKey(i int) { + switch c.Key { + case KeyUInt8: + c.Keys8 = append(c.Keys8, uint8(i)) + case KeyUInt16: + c.Keys16 = append(c.Keys16, uint16(i)) + case KeyUInt32: + c.Keys32 = append(c.Keys32, uint32(i)) + case KeyUInt64: + c.Keys64 = append(c.Keys64, uint64(i)) + default: + panic("invalid key type") + } +} + +func (c *ColLowCardinalityRaw) Keys() Column { + switch c.Key { + case KeyUInt8: + return &c.Keys8 + case KeyUInt16: + return &c.Keys16 + case KeyUInt32: + return &c.Keys32 + case KeyUInt64: + return &c.Keys64 + default: + panic("invalid key type") + } +} + +func (c ColLowCardinalityRaw) Type() ColumnType { + return ColumnTypeLowCardinality.Sub(c.Index.Type()) +} + +func (c ColLowCardinalityRaw) Rows() int { + return c.Keys().Rows() +} + +func (c *ColLowCardinalityRaw) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + // Skipping entirely of no rows. + return nil + } + meta, err := r.Int64() + if err != nil { + return errors.Wrap(err, "meta") + } + if (meta & cardinalityNeedGlobalDictionaryBit) == 1 { + return errors.New("global dictionary is not supported") + } + if (meta & cardinalityHasAdditionalKeysBit) == 0 { + return errors.New("additional keys bit is missing") + } + + key := CardinalityKey(meta & cardinalityKeyMask) + if !key.IsACardinalityKey() { + return errors.Errorf("invalid low cardinality keys type %d", key) + } + c.Key = key + + indexRows, err := r.Int64() + if err != nil { + return errors.Wrap(err, "index size") + } + if err := checkRows(int(indexRows)); err != nil { + return errors.Wrap(err, "index size") + } + if err := c.Index.DecodeColumn(r, int(indexRows)); err != nil { + return errors.Wrap(err, "index column") + } + + keyRows, err := r.Int64() + if err != nil { + return errors.Wrap(err, "keys size") + } + if err := checkRows(int(keyRows)); err != nil { + return errors.Wrap(err, "index size") + } + if err := c.Keys().DecodeColumn(r, int(keyRows)); err != nil { + return errors.Wrap(err, "keys column") + } + + return nil +} + +func (c *ColLowCardinalityRaw) Reset() { + c.Index.Reset() + c.Keys8.Reset() + c.Keys16.Reset() + c.Keys32.Reset() + c.Keys64.Reset() +} + +func (c ColLowCardinalityRaw) EncodeColumn(b *Buffer) { + if c.Rows() == 0 { + // Skipping encoding entirely. + return + } + + // Meta encodes whether reader should update + // low cardinality metadata and keys column type. + meta := cardinalityUpdateAll | int64(c.Key) + b.PutInt64(meta) + + // Writing index (dictionary). + b.PutInt64(int64(c.Index.Rows())) + c.Index.EncodeColumn(b) + + // Sequence of values as indexes in dictionary. + k := c.Keys() + b.PutInt64(int64(k.Rows())) + k.EncodeColumn(b) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_map.go b/vendor/github.com/ClickHouse/ch-go/proto/col_map.go new file mode 100644 index 0000000..90925fb --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_map.go @@ -0,0 +1,201 @@ +package proto + +import ( + "strings" + + "github.com/go-faster/errors" +) + +// Compile-time assertions for ColMap. +var ( + _ ColInput = (*ColMap[string, string])(nil) + _ ColResult = (*ColMap[string, string])(nil) + _ Column = (*ColMap[string, string])(nil) + _ ColumnOf[map[string]int] = (*ColMap[string, int])(nil) + _ StateEncoder = (*ColMap[string, string])(nil) + _ StateDecoder = (*ColMap[string, string])(nil) + + _ = ColMap[int64, string]{ + Keys: new(ColInt64), + Values: new(ColStr), + } +) + +// NewMap constructs Map(K, V). +func NewMap[K comparable, V any](k ColumnOf[K], v ColumnOf[V]) *ColMap[K, V] { + return &ColMap[K, V]{ + Keys: k, + Values: v, + } +} + +// ColMap implements Map(K, V) as ColumnOf[map[K]V]. +type ColMap[K comparable, V any] struct { + Offsets ColUInt64 + Keys ColumnOf[K] + Values ColumnOf[V] +} + +func (c ColMap[K, V]) Type() ColumnType { + return ColumnTypeMap.Sub(c.Keys.Type(), c.Values.Type()) +} + +func (c ColMap[K, V]) Rows() int { + return c.Offsets.Rows() +} + +func (c *ColMap[K, V]) DecodeState(r *Reader) error { + if s, ok := c.Keys.(StateDecoder); ok { + if err := s.DecodeState(r); err != nil { + return errors.Wrap(err, "keys state") + } + } + if s, ok := c.Values.(StateDecoder); ok { + if err := s.DecodeState(r); err != nil { + return errors.Wrap(err, "values state") + } + } + return nil +} + +func (c ColMap[K, V]) EncodeState(b *Buffer) { + if s, ok := c.Keys.(StateEncoder); ok { + s.EncodeState(b) + } + if s, ok := c.Values.(StateEncoder); ok { + s.EncodeState(b) + } +} + +func (c ColMap[K, V]) Row(i int) map[K]V { + m := make(map[K]V) + var start int + end := int(c.Offsets[i]) + if i > 0 { + start = int(c.Offsets[i-1]) + } + for idx := start; idx < end; idx++ { + m[c.Keys.Row(idx)] = c.Values.Row(idx) + } + return m +} + +// RowKV returns a slice of KV[K, V] for a given row. +func (c ColMap[K, V]) RowKV(i int) []KV[K, V] { + var start int + end := int(c.Offsets[i]) + if i > 0 { + start = int(c.Offsets[i-1]) + } + v := make([]KV[K, V], 0, end-start) + for idx := start; idx < end; idx++ { + v = append(v, KV[K, V]{ + Key: c.Keys.Row(idx), + Value: c.Values.Row(idx), + }) + } + return v +} + +// KV is a key-value pair. +type KV[K comparable, V any] struct { + Key K + Value V +} + +// AppendKV is a convenience method for appending a slice of KV[K, V]. +func (c *ColMap[K, V]) AppendKV(kv []KV[K, V]) { + for _, v := range kv { + c.Keys.Append(v.Key) + c.Values.Append(v.Value) + } + c.Offsets.Append(uint64(c.Keys.Rows())) +} + +func (c *ColMap[K, V]) Append(m map[K]V) { + for k, v := range m { + c.Keys.Append(k) + c.Values.Append(v) + } + c.Offsets.Append(uint64(c.Keys.Rows())) +} + +func (c *ColMap[K, V]) AppendArr(v []map[K]V) { + for _, m := range v { + c.Append(m) + } +} + +func (c *ColMap[K, V]) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + if err := c.Offsets.DecodeColumn(r, rows); err != nil { + return errors.Wrap(err, "offsets") + } + + count := int(c.Offsets[rows-1]) + if err := checkRows(count); err != nil { + return errors.Wrap(err, "keys count") + } + if err := c.Keys.DecodeColumn(r, count); err != nil { + return errors.Wrap(err, "keys") + } + if err := c.Values.DecodeColumn(r, count); err != nil { + return errors.Wrap(err, "values") + } + + return nil +} + +func (c *ColMap[K, V]) Reset() { + c.Offsets.Reset() + c.Keys.Reset() + c.Values.Reset() +} + +func (c ColMap[K, V]) EncodeColumn(b *Buffer) { + if c.Rows() == 0 { + return + } + + c.Offsets.EncodeColumn(b) + c.Keys.EncodeColumn(b) + c.Values.EncodeColumn(b) +} + +// Prepare ensures Preparable column propagation. +func (c ColMap[K, V]) Prepare() error { + if v, ok := c.Keys.(Preparable); ok { + if err := v.Prepare(); err != nil { + return errors.Wrap(err, "prepare data") + } + } + if v, ok := c.Values.(Preparable); ok { + if err := v.Prepare(); err != nil { + return errors.Wrap(err, "prepare data") + } + } + return nil +} + +// Infer ensures Inferable column propagation. +func (c *ColMap[K, V]) Infer(t ColumnType) error { + elems := strings.Split(string(t.Elem()), ",") + if len(elems) != 2 { + return errors.New("invalid map type") + } + if v, ok := c.Keys.(Inferable); ok { + ct := ColumnType(strings.TrimSpace(elems[0])) + if err := v.Infer(ct); err != nil { + return errors.Wrap(err, "infer data") + } + } + if v, ok := c.Values.(Inferable); ok { + ct := ColumnType(strings.TrimSpace(elems[1])) + if err := v.Infer(ct); err != nil { + return errors.Wrap(err, "infer data") + } + } + return nil +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_nothing.go b/vendor/github.com/ClickHouse/ch-go/proto/col_nothing.go new file mode 100644 index 0000000..1a82509 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_nothing.go @@ -0,0 +1,73 @@ +package proto + +import ( + "fmt" + + "github.com/go-faster/errors" +) + +// Nothing represents NULL value. +type Nothing struct{} + +// ColNothing represents column of null values. +// Value is row count. +// +// https://clickhouse.com/docs/ru/sql-reference/data-types/special-data-types/nothing +type ColNothing int + +func (c *ColNothing) Append(_ Nothing) { + *c++ +} + +func (c *ColNothing) AppendArr(vs []Nothing) { + *c = ColNothing(int(*c) + len(vs)) +} + +func (c ColNothing) Row(i int) Nothing { + if i >= int(c) { + panic(fmt.Sprintf("[%d] of [%d]Nothing", i, c)) + } + return Nothing{} +} + +func (c ColNothing) Type() ColumnType { + return ColumnTypeNothing +} + +func (c ColNothing) Rows() int { + return int(c) +} + +func (c *ColNothing) DecodeColumn(r *Reader, rows int) error { + *c = ColNothing(rows) + if rows == 0 { + return nil + } + if _, err := r.ReadRaw(rows); err != nil { + return errors.Wrap(err, "read") + } + return nil +} + +func (c *ColNothing) Reset() { + *c = 0 +} + +func (c *ColNothing) Nullable() *ColNullable[Nothing] { + return &ColNullable[Nothing]{ + Values: c, + } +} + +func (c *ColNothing) Array() *ColArr[Nothing] { + return &ColArr[Nothing]{ + Data: c, + } +} + +func (c ColNothing) EncodeColumn(b *Buffer) { + if c == 0 { + return + } + b.PutRaw(make([]byte, c)) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_nullable.go b/vendor/github.com/ClickHouse/ch-go/proto/col_nullable.go new file mode 100644 index 0000000..516245f --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_nullable.go @@ -0,0 +1,135 @@ +package proto + +import "github.com/go-faster/errors" + +// Compile-time assertions for ColNullable. +var ( + _ ColInput = (*ColNullable[string])(nil) + _ ColResult = (*ColNullable[string])(nil) + _ Column = (*ColNullable[string])(nil) + _ ColumnOf[Nullable[string]] = (*ColNullable[string])(nil) + _ StateEncoder = (*ColNullable[string])(nil) + _ StateDecoder = (*ColNullable[string])(nil) + + _ = ColNullable[string]{ + Values: new(ColStr), + } +) + +// Nullable is T value that can be null. +type Nullable[T any] struct { + Set bool + Value T +} + +// NewNullable returns set value of Nullable[T] to v. +func NewNullable[T any](v T) Nullable[T] { + return Nullable[T]{Set: true, Value: v} +} + +// Null returns null value for Nullable[T]. +func Null[T any]() Nullable[T] { + return Nullable[T]{} +} + +func (n Nullable[T]) IsSet() bool { return n.Set } + +func (n Nullable[T]) Or(v T) T { + if !n.Set { + return v + } + return n.Value +} + +// NewColNullable returns new Nullable(T) from v column. +func NewColNullable[T any](v ColumnOf[T]) *ColNullable[T] { + return &ColNullable[T]{ + Values: v, + } +} + +// ColNullable represents Nullable(T) column. +// +// Nulls is nullable "mask" on Values column. +// For example, to encode [null, "", "hello", null, "world"] +// +// Values: ["", "", "hello", "", "world"] (len: 5) +// Nulls: [ 1, 0, 0, 1, 0] (len: 5) +// +// Values and Nulls row counts are always equal. +type ColNullable[T any] struct { + Nulls ColUInt8 + Values ColumnOf[T] +} + +func (c *ColNullable[T]) DecodeState(r *Reader) error { + if s, ok := c.Values.(StateDecoder); ok { + if err := s.DecodeState(r); err != nil { + return errors.Wrap(err, "values state") + } + } + return nil +} + +func (c ColNullable[T]) EncodeState(b *Buffer) { + if s, ok := c.Values.(StateEncoder); ok { + s.EncodeState(b) + } +} + +func (c ColNullable[T]) Type() ColumnType { + return ColumnTypeNullable.Sub(c.Values.Type()) +} + +func (c *ColNullable[T]) DecodeColumn(r *Reader, rows int) error { + if err := c.Nulls.DecodeColumn(r, rows); err != nil { + return errors.Wrap(err, "nulls") + } + if err := c.Values.DecodeColumn(r, rows); err != nil { + return errors.Wrap(err, "values") + } + return nil +} + +func (c ColNullable[T]) Rows() int { + return c.Nulls.Rows() +} + +func (c *ColNullable[T]) Append(v Nullable[T]) { + null := boolTrue + if v.Set { + null = boolFalse + } + c.Nulls.Append(null) + c.Values.Append(v.Value) +} + +func (c *ColNullable[T]) AppendArr(v []Nullable[T]) { + for _, vv := range v { + c.Append(vv) + } +} + +func (c ColNullable[T]) Row(i int) Nullable[T] { + return Nullable[T]{ + Value: c.Values.Row(i), + Set: c.Nulls.Row(i) == boolFalse, + } +} + +func (c *ColNullable[T]) Reset() { + c.Nulls.Reset() + c.Values.Reset() +} + +func (c ColNullable[T]) EncodeColumn(b *Buffer) { + c.Nulls.EncodeColumn(b) + c.Values.EncodeColumn(b) +} + +func (c ColNullable[T]) IsElemNull(i int) bool { + if i < c.Rows() { + return c.Nulls[i] == boolTrue + } + return false +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_point.go b/vendor/github.com/ClickHouse/ch-go/proto/col_point.go new file mode 100644 index 0000000..0e1549f --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_point.go @@ -0,0 +1,63 @@ +package proto + +import "github.com/go-faster/errors" + +type Point struct { + X, Y float64 +} + +// Compile-time assertions for ColPoint. +var ( + _ ColInput = ColPoint{} + _ ColResult = (*ColPoint)(nil) + _ Column = (*ColPoint)(nil) + _ ColumnOf[Point] = (*ColPoint)(nil) +) + +type ColPoint struct { + X, Y ColFloat64 +} + +func (c *ColPoint) Append(v Point) { + c.X.Append(v.X) + c.Y.Append(v.Y) +} + +func (c *ColPoint) AppendArr(v []Point) { + for _, vv := range v { + c.Append(vv) + } +} + +func (c ColPoint) Row(i int) Point { + return Point{ + X: c.X.Row(i), + Y: c.Y.Row(i), + } +} + +func (c ColPoint) Type() ColumnType { return ColumnTypePoint } +func (c ColPoint) Rows() int { return c.X.Rows() } + +func (c *ColPoint) DecodeColumn(r *Reader, rows int) error { + if err := c.X.DecodeColumn(r, rows); err != nil { + return errors.Wrap(err, "x") + } + if err := c.Y.DecodeColumn(r, rows); err != nil { + return errors.Wrap(err, "y") + } + return nil +} + +func (c *ColPoint) Reset() { + c.X.Reset() + c.Y.Reset() +} + +func (c ColPoint) EncodeColumn(b *Buffer) { + if b == nil { + return + } + c.X.EncodeColumn(b) + c.Y.EncodeColumn(b) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_raw.go b/vendor/github.com/ClickHouse/ch-go/proto/col_raw.go new file mode 100644 index 0000000..ba655f4 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_raw.go @@ -0,0 +1,35 @@ +package proto + +import "github.com/go-faster/errors" + +// ColRaw is Column that performs zero decoding or encoding. +// T, Size are required. +// +// TODO: support strings and T, Size inference. +// +// Useful for copying from one source to another. +type ColRaw struct { + T ColumnType // type of column + Size int // size of single value + + Data []byte // raw value of column + Count int // count of rows +} + +func (c ColRaw) Type() ColumnType { return c.T } +func (c ColRaw) Rows() int { return c.Count } +func (c ColRaw) EncodeColumn(b *Buffer) { b.Buf = append(b.Buf, c.Data...) } + +func (c *ColRaw) DecodeColumn(r *Reader, rows int) error { + c.Count = rows + c.Data = append(c.Data[:0], make([]byte, c.Size*rows)...) + if err := r.ReadFull(c.Data); err != nil { + return errors.Wrap(err, "read full") + } + return nil +} + +func (c *ColRaw) Reset() { + c.Count = 0 + c.Data = c.Data[:0] +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_raw_of.go b/vendor/github.com/ClickHouse/ch-go/proto/col_raw_of.go new file mode 100644 index 0000000..325a17b --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_raw_of.go @@ -0,0 +1,84 @@ +//go:build (amd64 || arm64 || riscv64) && !purego + +package proto + +import ( + "strconv" + "unsafe" + + "github.com/go-faster/errors" +) + +// ColRawOf is generic raw column. +type ColRawOf[X comparable] []X + +func (c *ColRawOf[X]) AppendArr(v []X) { + for _, x := range v { + c.Append(x) + } +} + +func (c ColRawOf[X]) Size() int { + var x X + return int(unsafe.Sizeof(x)) // #nosec G103 +} + +// Type returns ColumnType of ColRawOf. +func (c ColRawOf[X]) Type() ColumnType { + return ColumnTypeFixedString.With(strconv.Itoa(c.Size())) +} + +// Rows returns count of rows in column. +func (c ColRawOf[X]) Rows() int { + return len(c) +} + +// Row returns value of "i" row. +func (c ColRawOf[X]) Row(i int) X { + return c[i] +} + +// Reset resets data in row, preserving capacity for efficiency. +func (c *ColRawOf[X]) Reset() { + *c = (*c)[:0] +} + +// Append value to column. +func (c *ColRawOf[X]) Append(v X) { + *c = append(*c, v) +} + +// EncodeColumn encodes ColRawOf rows to *Buffer. +func (c ColRawOf[X]) EncodeColumn(b *Buffer) { + if len(c) == 0 { + return + } + offset := len(b.Buf) + var x X + size := unsafe.Sizeof(x) // #nosec G103 + b.Buf = append(b.Buf, make([]byte, int(size)*len(c))...) + s := *(*slice)(unsafe.Pointer(&c)) // #nosec G103 + s.Len *= size + s.Cap *= size + src := *(*[]byte)(unsafe.Pointer(&s)) // #nosec G103 + dst := b.Buf[offset:] + copy(dst, src) +} + +// DecodeColumn decodes ColRawOf rows from *Reader. +func (c *ColRawOf[X]) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + *c = append(*c, make([]X, rows)...) + s := *(*slice)(unsafe.Pointer(c)) // #nosec G103 + var x X + size := unsafe.Sizeof(x) // #nosec G103 + s.Len *= size + s.Cap *= size + dst := *(*[]byte)(unsafe.Pointer(&s)) // #nosec G103 + if err := r.ReadFull(dst); err != nil { + return errors.Wrap(err, "read full") + } + return nil +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_str.go b/vendor/github.com/ClickHouse/ch-go/proto/col_str.go new file mode 100644 index 0000000..8f48ad7 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_str.go @@ -0,0 +1,205 @@ +package proto + +import ( + "encoding/binary" + + "github.com/go-faster/errors" +) + +type Position struct { + Start int + End int +} + +// ColStr represents String column. +// +// Use ColBytes for []bytes ColumnOf implementation. +type ColStr struct { + Buf []byte + Pos []Position +} + +// Append string to column. +func (c *ColStr) Append(v string) { + start := len(c.Buf) + c.Buf = append(c.Buf, v...) + end := len(c.Buf) + c.Pos = append(c.Pos, Position{Start: start, End: end}) +} + +// AppendBytes append byte slice as string to column. +func (c *ColStr) AppendBytes(v []byte) { + start := len(c.Buf) + c.Buf = append(c.Buf, v...) + end := len(c.Buf) + c.Pos = append(c.Pos, Position{Start: start, End: end}) +} + +func (c *ColStr) AppendArr(v []string) { + for _, e := range v { + c.Append(e) + } +} + +// Compile-time assertions for ColStr. +var ( + _ ColInput = ColStr{} + _ ColResult = (*ColStr)(nil) + _ Column = (*ColStr)(nil) + _ ColumnOf[string] = (*ColStr)(nil) + _ Arrayable[string] = (*ColStr)(nil) +) + +// Type returns ColumnType of String. +func (ColStr) Type() ColumnType { + return ColumnTypeString +} + +// Rows returns count of rows in column. +func (c ColStr) Rows() int { + return len(c.Pos) +} + +// Reset resets data in row, preserving capacity for efficiency. +func (c *ColStr) Reset() { + c.Buf = c.Buf[:0] + c.Pos = c.Pos[:0] +} + +// EncodeColumn encodes String rows to *Buffer. +func (c ColStr) EncodeColumn(b *Buffer) { + buf := make([]byte, binary.MaxVarintLen64) + for _, p := range c.Pos { + n := binary.PutUvarint(buf, uint64(p.End-p.Start)) + b.Buf = append(b.Buf, buf[:n]...) + b.Buf = append(b.Buf, c.Buf[p.Start:p.End]...) + } +} + +// ForEach calls f on each string from column. +func (c ColStr) ForEach(f func(i int, s string) error) error { + return c.ForEachBytes(func(i int, b []byte) error { + return f(i, string(b)) + }) +} + +// First returns first row of column. +func (c ColStr) First() string { + return c.Row(0) +} + +// Row returns row with number i. +func (c ColStr) Row(i int) string { + p := c.Pos[i] + return string(c.Buf[p.Start:p.End]) +} + +// RowBytes returns row with number i as byte slice. +func (c ColStr) RowBytes(i int) []byte { + p := c.Pos[i] + return c.Buf[p.Start:p.End] +} + +// ForEachBytes calls f on each string from column as byte slice. +func (c ColStr) ForEachBytes(f func(i int, b []byte) error) error { + for i, p := range c.Pos { + if err := f(i, c.Buf[p.Start:p.End]); err != nil { + return err + } + } + return nil +} + +// DecodeColumn decodes String rows from *Reader. +func (c *ColStr) DecodeColumn(r *Reader, rows int) error { + var p Position + size := len(c.Pos) + if cap(c.Pos) < size+rows { + c.Pos = append(c.Pos, make([]Position, size+rows-cap(c.Pos))...) + } + c.Pos = c.Pos[:0] + c.Buf = c.Buf[:cap(c.Buf)] + for i := 0; i < rows; i++ { + n, err := r.StrLen() + if err != nil { + return errors.Wrapf(err, "row %d: read length", i) + } + + p.Start = p.End + p.End += n + + if len(c.Buf) < p.End { + var an int + if n < 128 { + // small size, do batch buffer alloc + an = n * (rows - i) + } else { + an = n + } + c.Buf = append(c.Buf, make([]byte, an)...) + } + if err := r.ReadFull(c.Buf[p.Start:p.End]); err != nil { + return errors.Wrapf(err, "row %d: read full", i) + } + c.Pos = append(c.Pos, p) + } + c.Buf = c.Buf[:p.End] + return nil +} + +// LowCardinality returns LowCardinality(String). +func (c *ColStr) LowCardinality() *ColLowCardinality[string] { + return &ColLowCardinality[string]{ + index: c, + } +} + +// Array is helper that creates Array(String). +func (c *ColStr) Array() *ColArr[string] { + return &ColArr[string]{ + Data: c, + } +} + +// Nullable is helper that creates Nullable(String). +func (c *ColStr) Nullable() *ColNullable[string] { + return &ColNullable[string]{ + Values: c, + } +} + +// ColBytes is ColStr wrapper to be ColumnOf for []byte. +type ColBytes struct { + ColStr +} + +// Row returns row with number i. +func (c ColBytes) Row(i int) []byte { + return c.RowBytes(i) +} + +// Append byte slice to column. +func (c *ColBytes) Append(v []byte) { + c.AppendBytes(v) +} + +// AppendArr append slice of byte slices to column. +func (c *ColBytes) AppendArr(v [][]byte) { + for _, s := range v { + c.Append(s) + } +} + +// Array is helper that creates Array(String). +func (c *ColBytes) Array() *ColArr[[]byte] { + return &ColArr[[]byte]{ + Data: c, + } +} + +// Nullable is helper that creates Nullable(String). +func (c *ColBytes) Nullable() *ColNullable[[]byte] { + return &ColNullable[[]byte]{ + Values: c, + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_tuple.go b/vendor/github.com/ClickHouse/ch-go/proto/col_tuple.go new file mode 100644 index 0000000..ac7ad6c --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_tuple.go @@ -0,0 +1,169 @@ +package proto + +import "github.com/go-faster/errors" + +// ColTuple is Tuple column. +// +// Basically it is just a group of columns. +type ColTuple []Column + +// Compile-time assertions for ColTuple. +var ( + _ ColInput = ColTuple(nil) + _ ColResult = ColTuple(nil) + _ Column = ColTuple(nil) + _ StateEncoder = ColTuple(nil) + _ StateDecoder = ColTuple(nil) + _ Inferable = ColTuple(nil) + _ Preparable = ColTuple(nil) +) + +func (c ColTuple) DecodeState(r *Reader) error { + for i, v := range c { + if s, ok := v.(StateDecoder); ok { + if err := s.DecodeState(r); err != nil { + return errors.Wrapf(err, "[%d]", i) + } + } + } + return nil +} + +// ColNamed is named column. +// Used in named tuples. +type ColNamed[T any] struct { + ColumnOf[T] + Name string +} + +func (c *ColNamed[T]) Infer(t ColumnType) error { + if v, ok := c.ColumnOf.(Inferable); ok { + if err := v.Infer(t); err != nil { + return errors.Wrap(err, "named") + } + } + return nil +} + +func (c *ColNamed[T]) Prepare() error { + if v, ok := c.ColumnOf.(Preparable); ok { + if err := v.Prepare(); err != nil { + return errors.Wrap(err, "named") + } + } + return nil +} + +func (c ColNamed[T]) DecodeState(r *Reader) error { + if v, ok := c.ColumnOf.(StateDecoder); ok { + if err := v.DecodeState(r); err != nil { + return errors.Wrap(err, "named") + } + } + return nil +} + +func (c ColNamed[T]) EncodeState(b *Buffer) { + if v, ok := c.ColumnOf.(StateEncoder); ok { + v.EncodeState(b) + } +} + +// Compile-time assertions for ColNamed. +var ( + _ ColInput = Named[string]((*ColStr)(nil), "name") + _ ColResult = Named[string]((*ColStr)(nil), "name") + _ Column = Named[string]((*ColStr)(nil), "name") + _ StateEncoder = Named[string]((*ColStr)(nil), "name") + _ StateDecoder = Named[string]((*ColStr)(nil), "name") + _ Inferable = Named[string]((*ColStr)(nil), "name") + _ Preparable = Named[string]((*ColStr)(nil), "name") +) + +func Named[T any](data ColumnOf[T], name string) *ColNamed[T] { + return &ColNamed[T]{ + ColumnOf: data, + Name: name, + } +} + +func (c ColNamed[T]) ColumnName() string { + return c.Name +} + +func (c ColNamed[T]) Type() ColumnType { + return ColumnType(c.Name + " " + c.ColumnOf.Type().String()) +} + +func (c ColTuple) Prepare() error { + for _, v := range c { + if s, ok := v.(Preparable); ok { + if err := s.Prepare(); err != nil { + return errors.Wrap(err, "prepare") + } + } + } + return nil +} + +func (c ColTuple) Infer(t ColumnType) error { + for _, v := range c { + if s, ok := v.(Inferable); ok { + if err := s.Infer(t); err != nil { + return errors.Wrap(err, "infer") + } + } + } + return nil +} + +func (c ColTuple) EncodeState(b *Buffer) { + for _, v := range c { + if s, ok := v.(StateEncoder); ok { + s.EncodeState(b) + } + } +} + +func (c ColTuple) Type() ColumnType { + var types []ColumnType + for _, v := range c { + types = append(types, v.Type()) + } + return ColumnTypeTuple.Sub(types...) +} + +func (c ColTuple) First() Column { + if len(c) == 0 { + return nil + } + return c[0] +} + +func (c ColTuple) Rows() int { + if f := c.First(); f != nil { + return f.Rows() + } + return 0 +} + +func (c ColTuple) DecodeColumn(r *Reader, rows int) error { + for i, v := range c { + if err := v.DecodeColumn(r, rows); err != nil { + return errors.Wrapf(err, "[%d]", i) + } + } + return nil +} + +func (c ColTuple) Reset() { + for _, v := range c { + v.Reset() + } +} + +func (c ColTuple) EncodeColumn(b *Buffer) { + for _, v := range c { + v.EncodeColumn(b) + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uint128_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uint128_gen.go new file mode 100644 index 0000000..e34f07e --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uint128_gen.go @@ -0,0 +1,71 @@ +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +// ColUInt128 represents UInt128 column. +type ColUInt128 []UInt128 + +// Compile-time assertions for ColUInt128. +var ( + _ ColInput = ColUInt128{} + _ ColResult = (*ColUInt128)(nil) + _ Column = (*ColUInt128)(nil) +) + +// Rows returns count of rows in column. +func (c ColUInt128) Rows() int { + return len(c) +} + +// Reset resets data in row, preserving capacity for efficiency. +func (c *ColUInt128) Reset() { + *c = (*c)[:0] +} + +// Type returns ColumnType of UInt128. +func (ColUInt128) Type() ColumnType { + return ColumnTypeUInt128 +} + +// Row returns i-th row of column. +func (c ColUInt128) Row(i int) UInt128 { + return c[i] +} + +// Append UInt128 to column. +func (c *ColUInt128) Append(v UInt128) { + *c = append(*c, v) +} + +// Append UInt128 slice to column. +func (c *ColUInt128) AppendArr(vs []UInt128) { + *c = append(*c, vs...) +} + +// LowCardinality returns LowCardinality for UInt128 . +func (c *ColUInt128) LowCardinality() *ColLowCardinality[UInt128] { + return &ColLowCardinality[UInt128]{ + index: c, + } +} + +// Array is helper that creates Array of UInt128. +func (c *ColUInt128) Array() *ColArr[UInt128] { + return &ColArr[UInt128]{ + Data: c, + } +} + +// Nullable is helper that creates Nullable(UInt128). +func (c *ColUInt128) Nullable() *ColNullable[UInt128] { + return &ColNullable[UInt128]{ + Values: c, + } +} + +// NewArrUInt128 returns new Array(UInt128). +func NewArrUInt128() *ColArr[UInt128] { + return &ColArr[UInt128]{ + Data: new(ColUInt128), + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uint128_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uint128_safe_gen.go new file mode 100644 index 0000000..bbe55dc --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uint128_safe_gen.go @@ -0,0 +1,55 @@ +//go:build !(amd64 || arm64 || riscv64) || purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "encoding/binary" + + "github.com/go-faster/errors" +) + +var _ = binary.LittleEndian // clickHouse uses LittleEndian + +// DecodeColumn decodes UInt128 rows from *Reader. +func (c *ColUInt128) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + const size = 128 / 8 + data, err := r.ReadRaw(rows * size) + if err != nil { + return errors.Wrap(err, "read") + } + v := *c + // Move bound check out of loop. + // + // See https://github.com/golang/go/issues/30945. + _ = data[len(data)-size] + for i := 0; i <= len(data)-size; i += size { + v = append(v, + binUInt128(data[i:i+size]), + ) + } + *c = v + return nil +} + +// EncodeColumn encodes UInt128 rows to *Buffer. +func (c ColUInt128) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + const size = 128 / 8 + offset := len(b.Buf) + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + for _, vv := range v { + binPutUInt128( + b.Buf[offset:offset+size], + vv, + ) + offset += size + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uint128_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uint128_unsafe_gen.go new file mode 100644 index 0000000..5989b5f --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uint128_unsafe_gen.go @@ -0,0 +1,45 @@ +//go:build (amd64 || arm64 || riscv64) && !purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "unsafe" + + "github.com/go-faster/errors" +) + +// DecodeColumn decodes UInt128 rows from *Reader. +func (c *ColUInt128) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + *c = append(*c, make([]UInt128, rows)...) + s := *(*slice)(unsafe.Pointer(c)) + const size = 128 / 8 + s.Len *= size + s.Cap *= size + dst := *(*[]byte)(unsafe.Pointer(&s)) + if err := r.ReadFull(dst); err != nil { + return errors.Wrap(err, "read full") + } + return nil +} + +// EncodeColumn encodes UInt128 rows to *Buffer. +func (c ColUInt128) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + offset := len(b.Buf) + const size = 128 / 8 + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + s := *(*slice)(unsafe.Pointer(&v)) + s.Len *= size + s.Cap *= size + src := *(*[]byte)(unsafe.Pointer(&s)) + dst := b.Buf[offset:] + copy(dst, src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uint16_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uint16_gen.go new file mode 100644 index 0000000..7bc8ba6 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uint16_gen.go @@ -0,0 +1,71 @@ +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +// ColUInt16 represents UInt16 column. +type ColUInt16 []uint16 + +// Compile-time assertions for ColUInt16. +var ( + _ ColInput = ColUInt16{} + _ ColResult = (*ColUInt16)(nil) + _ Column = (*ColUInt16)(nil) +) + +// Rows returns count of rows in column. +func (c ColUInt16) Rows() int { + return len(c) +} + +// Reset resets data in row, preserving capacity for efficiency. +func (c *ColUInt16) Reset() { + *c = (*c)[:0] +} + +// Type returns ColumnType of UInt16. +func (ColUInt16) Type() ColumnType { + return ColumnTypeUInt16 +} + +// Row returns i-th row of column. +func (c ColUInt16) Row(i int) uint16 { + return c[i] +} + +// Append uint16 to column. +func (c *ColUInt16) Append(v uint16) { + *c = append(*c, v) +} + +// Append uint16 slice to column. +func (c *ColUInt16) AppendArr(vs []uint16) { + *c = append(*c, vs...) +} + +// LowCardinality returns LowCardinality for UInt16 . +func (c *ColUInt16) LowCardinality() *ColLowCardinality[uint16] { + return &ColLowCardinality[uint16]{ + index: c, + } +} + +// Array is helper that creates Array of uint16. +func (c *ColUInt16) Array() *ColArr[uint16] { + return &ColArr[uint16]{ + Data: c, + } +} + +// Nullable is helper that creates Nullable(uint16). +func (c *ColUInt16) Nullable() *ColNullable[uint16] { + return &ColNullable[uint16]{ + Values: c, + } +} + +// NewArrUInt16 returns new Array(UInt16). +func NewArrUInt16() *ColArr[uint16] { + return &ColArr[uint16]{ + Data: new(ColUInt16), + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uint16_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uint16_safe_gen.go new file mode 100644 index 0000000..219f3a6 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uint16_safe_gen.go @@ -0,0 +1,55 @@ +//go:build !(amd64 || arm64 || riscv64) || purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "encoding/binary" + + "github.com/go-faster/errors" +) + +var _ = binary.LittleEndian // clickHouse uses LittleEndian + +// DecodeColumn decodes UInt16 rows from *Reader. +func (c *ColUInt16) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + const size = 16 / 8 + data, err := r.ReadRaw(rows * size) + if err != nil { + return errors.Wrap(err, "read") + } + v := *c + // Move bound check out of loop. + // + // See https://github.com/golang/go/issues/30945. + _ = data[len(data)-size] + for i := 0; i <= len(data)-size; i += size { + v = append(v, + binary.LittleEndian.Uint16(data[i:i+size]), + ) + } + *c = v + return nil +} + +// EncodeColumn encodes UInt16 rows to *Buffer. +func (c ColUInt16) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + const size = 16 / 8 + offset := len(b.Buf) + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + for _, vv := range v { + binary.LittleEndian.PutUint16( + b.Buf[offset:offset+size], + vv, + ) + offset += size + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uint16_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uint16_unsafe_gen.go new file mode 100644 index 0000000..d98d953 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uint16_unsafe_gen.go @@ -0,0 +1,45 @@ +//go:build (amd64 || arm64 || riscv64) && !purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "unsafe" + + "github.com/go-faster/errors" +) + +// DecodeColumn decodes UInt16 rows from *Reader. +func (c *ColUInt16) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + *c = append(*c, make([]uint16, rows)...) + s := *(*slice)(unsafe.Pointer(c)) + const size = 16 / 8 + s.Len *= size + s.Cap *= size + dst := *(*[]byte)(unsafe.Pointer(&s)) + if err := r.ReadFull(dst); err != nil { + return errors.Wrap(err, "read full") + } + return nil +} + +// EncodeColumn encodes UInt16 rows to *Buffer. +func (c ColUInt16) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + offset := len(b.Buf) + const size = 16 / 8 + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + s := *(*slice)(unsafe.Pointer(&v)) + s.Len *= size + s.Cap *= size + src := *(*[]byte)(unsafe.Pointer(&s)) + dst := b.Buf[offset:] + copy(dst, src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uint256_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uint256_gen.go new file mode 100644 index 0000000..b68a119 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uint256_gen.go @@ -0,0 +1,71 @@ +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +// ColUInt256 represents UInt256 column. +type ColUInt256 []UInt256 + +// Compile-time assertions for ColUInt256. +var ( + _ ColInput = ColUInt256{} + _ ColResult = (*ColUInt256)(nil) + _ Column = (*ColUInt256)(nil) +) + +// Rows returns count of rows in column. +func (c ColUInt256) Rows() int { + return len(c) +} + +// Reset resets data in row, preserving capacity for efficiency. +func (c *ColUInt256) Reset() { + *c = (*c)[:0] +} + +// Type returns ColumnType of UInt256. +func (ColUInt256) Type() ColumnType { + return ColumnTypeUInt256 +} + +// Row returns i-th row of column. +func (c ColUInt256) Row(i int) UInt256 { + return c[i] +} + +// Append UInt256 to column. +func (c *ColUInt256) Append(v UInt256) { + *c = append(*c, v) +} + +// Append UInt256 slice to column. +func (c *ColUInt256) AppendArr(vs []UInt256) { + *c = append(*c, vs...) +} + +// LowCardinality returns LowCardinality for UInt256 . +func (c *ColUInt256) LowCardinality() *ColLowCardinality[UInt256] { + return &ColLowCardinality[UInt256]{ + index: c, + } +} + +// Array is helper that creates Array of UInt256. +func (c *ColUInt256) Array() *ColArr[UInt256] { + return &ColArr[UInt256]{ + Data: c, + } +} + +// Nullable is helper that creates Nullable(UInt256). +func (c *ColUInt256) Nullable() *ColNullable[UInt256] { + return &ColNullable[UInt256]{ + Values: c, + } +} + +// NewArrUInt256 returns new Array(UInt256). +func NewArrUInt256() *ColArr[UInt256] { + return &ColArr[UInt256]{ + Data: new(ColUInt256), + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uint256_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uint256_safe_gen.go new file mode 100644 index 0000000..68633e1 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uint256_safe_gen.go @@ -0,0 +1,55 @@ +//go:build !(amd64 || arm64 || riscv64) || purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "encoding/binary" + + "github.com/go-faster/errors" +) + +var _ = binary.LittleEndian // clickHouse uses LittleEndian + +// DecodeColumn decodes UInt256 rows from *Reader. +func (c *ColUInt256) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + const size = 256 / 8 + data, err := r.ReadRaw(rows * size) + if err != nil { + return errors.Wrap(err, "read") + } + v := *c + // Move bound check out of loop. + // + // See https://github.com/golang/go/issues/30945. + _ = data[len(data)-size] + for i := 0; i <= len(data)-size; i += size { + v = append(v, + binUInt256(data[i:i+size]), + ) + } + *c = v + return nil +} + +// EncodeColumn encodes UInt256 rows to *Buffer. +func (c ColUInt256) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + const size = 256 / 8 + offset := len(b.Buf) + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + for _, vv := range v { + binPutUInt256( + b.Buf[offset:offset+size], + vv, + ) + offset += size + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uint256_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uint256_unsafe_gen.go new file mode 100644 index 0000000..02488d3 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uint256_unsafe_gen.go @@ -0,0 +1,45 @@ +//go:build (amd64 || arm64 || riscv64) && !purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "unsafe" + + "github.com/go-faster/errors" +) + +// DecodeColumn decodes UInt256 rows from *Reader. +func (c *ColUInt256) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + *c = append(*c, make([]UInt256, rows)...) + s := *(*slice)(unsafe.Pointer(c)) + const size = 256 / 8 + s.Len *= size + s.Cap *= size + dst := *(*[]byte)(unsafe.Pointer(&s)) + if err := r.ReadFull(dst); err != nil { + return errors.Wrap(err, "read full") + } + return nil +} + +// EncodeColumn encodes UInt256 rows to *Buffer. +func (c ColUInt256) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + offset := len(b.Buf) + const size = 256 / 8 + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + s := *(*slice)(unsafe.Pointer(&v)) + s.Len *= size + s.Cap *= size + src := *(*[]byte)(unsafe.Pointer(&s)) + dst := b.Buf[offset:] + copy(dst, src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uint32_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uint32_gen.go new file mode 100644 index 0000000..41abca5 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uint32_gen.go @@ -0,0 +1,71 @@ +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +// ColUInt32 represents UInt32 column. +type ColUInt32 []uint32 + +// Compile-time assertions for ColUInt32. +var ( + _ ColInput = ColUInt32{} + _ ColResult = (*ColUInt32)(nil) + _ Column = (*ColUInt32)(nil) +) + +// Rows returns count of rows in column. +func (c ColUInt32) Rows() int { + return len(c) +} + +// Reset resets data in row, preserving capacity for efficiency. +func (c *ColUInt32) Reset() { + *c = (*c)[:0] +} + +// Type returns ColumnType of UInt32. +func (ColUInt32) Type() ColumnType { + return ColumnTypeUInt32 +} + +// Row returns i-th row of column. +func (c ColUInt32) Row(i int) uint32 { + return c[i] +} + +// Append uint32 to column. +func (c *ColUInt32) Append(v uint32) { + *c = append(*c, v) +} + +// Append uint32 slice to column. +func (c *ColUInt32) AppendArr(vs []uint32) { + *c = append(*c, vs...) +} + +// LowCardinality returns LowCardinality for UInt32 . +func (c *ColUInt32) LowCardinality() *ColLowCardinality[uint32] { + return &ColLowCardinality[uint32]{ + index: c, + } +} + +// Array is helper that creates Array of uint32. +func (c *ColUInt32) Array() *ColArr[uint32] { + return &ColArr[uint32]{ + Data: c, + } +} + +// Nullable is helper that creates Nullable(uint32). +func (c *ColUInt32) Nullable() *ColNullable[uint32] { + return &ColNullable[uint32]{ + Values: c, + } +} + +// NewArrUInt32 returns new Array(UInt32). +func NewArrUInt32() *ColArr[uint32] { + return &ColArr[uint32]{ + Data: new(ColUInt32), + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uint32_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uint32_safe_gen.go new file mode 100644 index 0000000..0bc7de9 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uint32_safe_gen.go @@ -0,0 +1,55 @@ +//go:build !(amd64 || arm64 || riscv64) || purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "encoding/binary" + + "github.com/go-faster/errors" +) + +var _ = binary.LittleEndian // clickHouse uses LittleEndian + +// DecodeColumn decodes UInt32 rows from *Reader. +func (c *ColUInt32) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + const size = 32 / 8 + data, err := r.ReadRaw(rows * size) + if err != nil { + return errors.Wrap(err, "read") + } + v := *c + // Move bound check out of loop. + // + // See https://github.com/golang/go/issues/30945. + _ = data[len(data)-size] + for i := 0; i <= len(data)-size; i += size { + v = append(v, + binary.LittleEndian.Uint32(data[i:i+size]), + ) + } + *c = v + return nil +} + +// EncodeColumn encodes UInt32 rows to *Buffer. +func (c ColUInt32) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + const size = 32 / 8 + offset := len(b.Buf) + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + for _, vv := range v { + binary.LittleEndian.PutUint32( + b.Buf[offset:offset+size], + vv, + ) + offset += size + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uint32_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uint32_unsafe_gen.go new file mode 100644 index 0000000..3ddfa76 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uint32_unsafe_gen.go @@ -0,0 +1,45 @@ +//go:build (amd64 || arm64 || riscv64) && !purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "unsafe" + + "github.com/go-faster/errors" +) + +// DecodeColumn decodes UInt32 rows from *Reader. +func (c *ColUInt32) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + *c = append(*c, make([]uint32, rows)...) + s := *(*slice)(unsafe.Pointer(c)) + const size = 32 / 8 + s.Len *= size + s.Cap *= size + dst := *(*[]byte)(unsafe.Pointer(&s)) + if err := r.ReadFull(dst); err != nil { + return errors.Wrap(err, "read full") + } + return nil +} + +// EncodeColumn encodes UInt32 rows to *Buffer. +func (c ColUInt32) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + offset := len(b.Buf) + const size = 32 / 8 + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + s := *(*slice)(unsafe.Pointer(&v)) + s.Len *= size + s.Cap *= size + src := *(*[]byte)(unsafe.Pointer(&s)) + dst := b.Buf[offset:] + copy(dst, src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uint64_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uint64_gen.go new file mode 100644 index 0000000..4521cd4 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uint64_gen.go @@ -0,0 +1,71 @@ +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +// ColUInt64 represents UInt64 column. +type ColUInt64 []uint64 + +// Compile-time assertions for ColUInt64. +var ( + _ ColInput = ColUInt64{} + _ ColResult = (*ColUInt64)(nil) + _ Column = (*ColUInt64)(nil) +) + +// Rows returns count of rows in column. +func (c ColUInt64) Rows() int { + return len(c) +} + +// Reset resets data in row, preserving capacity for efficiency. +func (c *ColUInt64) Reset() { + *c = (*c)[:0] +} + +// Type returns ColumnType of UInt64. +func (ColUInt64) Type() ColumnType { + return ColumnTypeUInt64 +} + +// Row returns i-th row of column. +func (c ColUInt64) Row(i int) uint64 { + return c[i] +} + +// Append uint64 to column. +func (c *ColUInt64) Append(v uint64) { + *c = append(*c, v) +} + +// Append uint64 slice to column. +func (c *ColUInt64) AppendArr(vs []uint64) { + *c = append(*c, vs...) +} + +// LowCardinality returns LowCardinality for UInt64 . +func (c *ColUInt64) LowCardinality() *ColLowCardinality[uint64] { + return &ColLowCardinality[uint64]{ + index: c, + } +} + +// Array is helper that creates Array of uint64. +func (c *ColUInt64) Array() *ColArr[uint64] { + return &ColArr[uint64]{ + Data: c, + } +} + +// Nullable is helper that creates Nullable(uint64). +func (c *ColUInt64) Nullable() *ColNullable[uint64] { + return &ColNullable[uint64]{ + Values: c, + } +} + +// NewArrUInt64 returns new Array(UInt64). +func NewArrUInt64() *ColArr[uint64] { + return &ColArr[uint64]{ + Data: new(ColUInt64), + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uint64_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uint64_safe_gen.go new file mode 100644 index 0000000..deea8a4 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uint64_safe_gen.go @@ -0,0 +1,55 @@ +//go:build !(amd64 || arm64 || riscv64) || purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "encoding/binary" + + "github.com/go-faster/errors" +) + +var _ = binary.LittleEndian // clickHouse uses LittleEndian + +// DecodeColumn decodes UInt64 rows from *Reader. +func (c *ColUInt64) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + const size = 64 / 8 + data, err := r.ReadRaw(rows * size) + if err != nil { + return errors.Wrap(err, "read") + } + v := *c + // Move bound check out of loop. + // + // See https://github.com/golang/go/issues/30945. + _ = data[len(data)-size] + for i := 0; i <= len(data)-size; i += size { + v = append(v, + binary.LittleEndian.Uint64(data[i:i+size]), + ) + } + *c = v + return nil +} + +// EncodeColumn encodes UInt64 rows to *Buffer. +func (c ColUInt64) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + const size = 64 / 8 + offset := len(b.Buf) + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + for _, vv := range v { + binary.LittleEndian.PutUint64( + b.Buf[offset:offset+size], + vv, + ) + offset += size + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uint64_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uint64_unsafe_gen.go new file mode 100644 index 0000000..664f80f --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uint64_unsafe_gen.go @@ -0,0 +1,45 @@ +//go:build (amd64 || arm64 || riscv64) && !purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "unsafe" + + "github.com/go-faster/errors" +) + +// DecodeColumn decodes UInt64 rows from *Reader. +func (c *ColUInt64) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + *c = append(*c, make([]uint64, rows)...) + s := *(*slice)(unsafe.Pointer(c)) + const size = 64 / 8 + s.Len *= size + s.Cap *= size + dst := *(*[]byte)(unsafe.Pointer(&s)) + if err := r.ReadFull(dst); err != nil { + return errors.Wrap(err, "read full") + } + return nil +} + +// EncodeColumn encodes UInt64 rows to *Buffer. +func (c ColUInt64) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + offset := len(b.Buf) + const size = 64 / 8 + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + s := *(*slice)(unsafe.Pointer(&v)) + s.Len *= size + s.Cap *= size + src := *(*[]byte)(unsafe.Pointer(&s)) + dst := b.Buf[offset:] + copy(dst, src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uint8_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uint8_gen.go new file mode 100644 index 0000000..02c0516 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uint8_gen.go @@ -0,0 +1,71 @@ +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +// ColUInt8 represents UInt8 column. +type ColUInt8 []uint8 + +// Compile-time assertions for ColUInt8. +var ( + _ ColInput = ColUInt8{} + _ ColResult = (*ColUInt8)(nil) + _ Column = (*ColUInt8)(nil) +) + +// Rows returns count of rows in column. +func (c ColUInt8) Rows() int { + return len(c) +} + +// Reset resets data in row, preserving capacity for efficiency. +func (c *ColUInt8) Reset() { + *c = (*c)[:0] +} + +// Type returns ColumnType of UInt8. +func (ColUInt8) Type() ColumnType { + return ColumnTypeUInt8 +} + +// Row returns i-th row of column. +func (c ColUInt8) Row(i int) uint8 { + return c[i] +} + +// Append uint8 to column. +func (c *ColUInt8) Append(v uint8) { + *c = append(*c, v) +} + +// Append uint8 slice to column. +func (c *ColUInt8) AppendArr(vs []uint8) { + *c = append(*c, vs...) +} + +// LowCardinality returns LowCardinality for UInt8 . +func (c *ColUInt8) LowCardinality() *ColLowCardinality[uint8] { + return &ColLowCardinality[uint8]{ + index: c, + } +} + +// Array is helper that creates Array of uint8. +func (c *ColUInt8) Array() *ColArr[uint8] { + return &ColArr[uint8]{ + Data: c, + } +} + +// Nullable is helper that creates Nullable(uint8). +func (c *ColUInt8) Nullable() *ColNullable[uint8] { + return &ColNullable[uint8]{ + Values: c, + } +} + +// NewArrUInt8 returns new Array(UInt8). +func NewArrUInt8() *ColArr[uint8] { + return &ColArr[uint8]{ + Data: new(ColUInt8), + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uint8_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uint8_safe_gen.go new file mode 100644 index 0000000..ec5ff19 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uint8_safe_gen.go @@ -0,0 +1,33 @@ +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "encoding/binary" + + "github.com/go-faster/errors" +) + +var _ = binary.LittleEndian // clickHouse uses LittleEndian + +// DecodeColumn decodes UInt8 rows from *Reader. +func (c *ColUInt8) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + data, err := r.ReadRaw(rows) + if err != nil { + return errors.Wrap(err, "read") + } + *c = append(*c, data...) + return nil +} + +// EncodeColumn encodes UInt8 rows to *Buffer. +func (c ColUInt8) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + b.Buf = append(b.Buf, v...) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uuid.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uuid.go new file mode 100644 index 0000000..1b9685a --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uuid.go @@ -0,0 +1,33 @@ +package proto + +import ( + "github.com/google/uuid" +) + +// ColUUID is UUID column. +type ColUUID []uuid.UUID + +// Compile-time assertions for ColUUID. +var ( + _ ColInput = ColUUID{} + _ ColResult = (*ColUUID)(nil) + _ Column = (*ColUUID)(nil) + _ ColumnOf[uuid.UUID] = (*ColUUID)(nil) +) + +func (c ColUUID) Type() ColumnType { return ColumnTypeUUID } +func (c ColUUID) Rows() int { return len(c) } +func (c ColUUID) Row(i int) uuid.UUID { return c[i] } +func (c *ColUUID) Reset() { *c = (*c)[:0] } +func (c *ColUUID) Append(v uuid.UUID) { *c = append(*c, v) } +func (c *ColUUID) AppendArr(v []uuid.UUID) { *c = append(*c, v...) } + +// Nullable is helper that creates Nullable(uuid.UUID). +func (c *ColUUID) Nullable() *ColNullable[uuid.UUID] { + return NewColNullable[uuid.UUID](c) +} + +// Array is helper that creates Array of uuid.UUID. +func (c *ColUUID) Array() *ColArr[uuid.UUID] { + return NewArray[uuid.UUID](c) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uuid_safe.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uuid_safe.go new file mode 100644 index 0000000..8de9408 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uuid_safe.go @@ -0,0 +1,36 @@ +//go:build !(amd64 || arm64 || riscv64) || purego + +package proto + +import ( + "github.com/go-faster/errors" + "github.com/segmentio/asm/bswap" +) + +func (c *ColUUID) DecodeColumn(r *Reader, rows int) error { + const size = 16 + data, err := r.ReadRaw(rows * size) + if err != nil { + return errors.Wrap(err, "read") + } + v := *c + bswap.Swap64(data) // BE <-> LE + for i := 0; i < len(data); i += size { + // In-place conversion from slice to array. + // https://go.dev/ref/spec#Conversions_from_slice_to_array_pointer + v = append(v, *(*[size]byte)(data[i : i+size])) + } + *c = v + return nil +} + +func (c ColUUID) EncodeColumn(b *Buffer) { + const size = 16 + offset := len(b.Buf) + b.Buf = append(b.Buf, make([]byte, size*len(c))...) + for _, v := range c { + copy(b.Buf[offset:offset+size], v[:]) + offset += size + } + bswap.Swap64(b.Buf) // BE <-> LE +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uuid_unsafe.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uuid_unsafe.go new file mode 100644 index 0000000..18fa73f --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uuid_unsafe.go @@ -0,0 +1,49 @@ +//go:build (amd64 || arm64 || riscv64) && !purego + +package proto + +import ( + "unsafe" + + "github.com/go-faster/errors" + "github.com/google/uuid" + "github.com/segmentio/asm/bswap" +) + +func (c *ColUUID) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + *c = append(*c, make([]uuid.UUID, rows)...) + + // Memory layout of [N]UUID is same as [N*sizeof(UUID)]byte. + // So just interpret c as byte slice and read data into it. + s := *(*slice)(unsafe.Pointer(c)) // #nosec: G103 // memory layout matches + const size = 16 + s.Len *= size + s.Cap *= size + dst := *(*[]byte)(unsafe.Pointer(&s)) // #nosec: G103 // memory layout matches + if err := r.ReadFull(dst); err != nil { + return errors.Wrap(err, "read full") + } + bswap.Swap64(dst) // BE <-> LE + + return nil +} + +// EncodeColumn encodes ColUUID rows to *Buffer. +func (c ColUUID) EncodeColumn(b *Buffer) { + if len(c) == 0 { + return + } + offset := len(b.Buf) + const size = 16 + b.Buf = append(b.Buf, make([]byte, size*len(c))...) + s := *(*slice)(unsafe.Pointer(&c)) // #nosec: G103 // memory layout matches + s.Len *= size + s.Cap *= size + src := *(*[]byte)(unsafe.Pointer(&s)) // #nosec: G103 // memory layout matches + dst := b.Buf[offset:] + copy(dst, src) + bswap.Swap64(dst) // BE <-> LE +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/column.go b/vendor/github.com/ClickHouse/ch-go/proto/column.go new file mode 100644 index 0000000..1cdeb31 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/column.go @@ -0,0 +1,290 @@ +package proto + +import ( + "fmt" + "strings" + + "github.com/go-faster/errors" +) + +// ColInput column. +type ColInput interface { + Type() ColumnType + Rows() int + EncodeColumn(b *Buffer) +} + +// ColResult column. +type ColResult interface { + Type() ColumnType + Rows() int + DecodeColumn(r *Reader, rows int) error + Resettable +} + +type Column interface { + ColResult + ColInput +} + +// ColumnOf is generic Column(T) constraint. +type ColumnOf[T any] interface { + Column + Append(v T) + AppendArr(v []T) + Row(i int) T +} + +type StateEncoder interface { + EncodeState(b *Buffer) +} + +type StateDecoder interface { + DecodeState(r *Reader) error +} + +type Stateful interface { + StateEncoder + StateDecoder +} + +// Inferable can be inferenced from type. +type Inferable interface { + Infer(t ColumnType) error +} + +// Preparable should be prepared before encoding or decoding. +type Preparable interface { + Prepare() error +} + +// TODO: merge preparable with inferable? + +// ColumnType is type of column element. +type ColumnType string + +func (c ColumnType) String() string { + return string(c) +} + +func (c ColumnType) Base() ColumnType { + if c == "" { + return "" + } + var ( + v = string(c) + start = strings.Index(v, "(") + end = strings.LastIndex(v, ")") + ) + if start <= 0 || end <= 0 || end < start { + return c + } + return c[:start] +} + +// Conflicts reports whether two types conflict. +func (c ColumnType) Conflicts(b ColumnType) bool { + if c == b { + return false + } + { + a := c + if b.Base() == ColumnTypeEnum8 || b.Base() == ColumnTypeEnum16 { + a, b = b, a + } + switch { + case a.Base() == ColumnTypeEnum8 && b == ColumnTypeInt8: + return false + case a.Base() == ColumnTypeEnum16 && b == ColumnTypeInt16: + return false + } + } + if c.Base() != b.Base() { + return true + } + if c.normalizeCommas() == b.normalizeCommas() { + return false + } + switch c.Base() { + case ColumnTypeDateTime, ColumnTypeDateTime64: + // TODO(ernado): improve check + return false + } + return true +} + +func (c ColumnType) normalizeCommas() ColumnType { + // Should we check for escaped commas in enums here? + const sep = "," + var elems []string + for _, e := range strings.Split(string(c), sep) { + elems = append(elems, strings.TrimSpace(e)) + } + return ColumnType(strings.Join(elems, sep)) +} + +// With returns ColumnType(p1, p2, ...) from ColumnType. +func (c ColumnType) With(params ...string) ColumnType { + if len(params) == 0 { + return c + } + s := fmt.Sprintf("%s(%s)", + c, strings.Join(params, ", "), + ) + return ColumnType(s) +} + +// Sub of T returns T(A, B, ...). +func (c ColumnType) Sub(subtypes ...ColumnType) ColumnType { + var params []string + for _, t := range subtypes { + params = append(params, t.String()) + } + return c.With(params...) +} + +func (c ColumnType) Elem() ColumnType { + if c == "" { + return "" + } + var ( + v = string(c) + start = strings.Index(v, "(") + end = strings.LastIndex(v, ")") + ) + if start <= 0 || end <= 0 || end < start { + // No element. + return "" + } + return c[start+1 : end] +} + +// IsArray reports whether ColumnType is composite. +func (c ColumnType) IsArray() bool { + return strings.HasPrefix(string(c), string(ColumnTypeArray)) +} + +// Array returns Array(ColumnType). +func (c ColumnType) Array() ColumnType { + return ColumnTypeArray.Sub(c) +} + +// Common colum type names. Does not represent full set of supported types, +// because ColumnTypeArray is composable; actual type is composite. +// +// For example: Array(Int8) or even Array(Array(String)). +const ( + ColumnTypeNone ColumnType = "" + ColumnTypeInt8 ColumnType = "Int8" + ColumnTypeInt16 ColumnType = "Int16" + ColumnTypeInt32 ColumnType = "Int32" + ColumnTypeInt64 ColumnType = "Int64" + ColumnTypeInt128 ColumnType = "Int128" + ColumnTypeInt256 ColumnType = "Int256" + ColumnTypeUInt8 ColumnType = "UInt8" + ColumnTypeUInt16 ColumnType = "UInt16" + ColumnTypeUInt32 ColumnType = "UInt32" + ColumnTypeUInt64 ColumnType = "UInt64" + ColumnTypeUInt128 ColumnType = "UInt128" + ColumnTypeUInt256 ColumnType = "UInt256" + ColumnTypeFloat32 ColumnType = "Float32" + ColumnTypeFloat64 ColumnType = "Float64" + ColumnTypeString ColumnType = "String" + ColumnTypeFixedString ColumnType = "FixedString" + ColumnTypeArray ColumnType = "Array" + ColumnTypeIPv4 ColumnType = "IPv4" + ColumnTypeIPv6 ColumnType = "IPv6" + ColumnTypeDateTime ColumnType = "DateTime" + ColumnTypeDateTime64 ColumnType = "DateTime64" + ColumnTypeDate ColumnType = "Date" + ColumnTypeDate32 ColumnType = "Date32" + ColumnTypeUUID ColumnType = "UUID" + ColumnTypeEnum8 ColumnType = "Enum8" + ColumnTypeEnum16 ColumnType = "Enum16" + ColumnTypeLowCardinality ColumnType = "LowCardinality" + ColumnTypeMap ColumnType = "Map" + ColumnTypeBool ColumnType = "Bool" + ColumnTypeTuple ColumnType = "Tuple" + ColumnTypeNullable ColumnType = "Nullable" + ColumnTypeDecimal32 ColumnType = "Decimal32" + ColumnTypeDecimal64 ColumnType = "Decimal64" + ColumnTypeDecimal128 ColumnType = "Decimal128" + ColumnTypeDecimal256 ColumnType = "Decimal256" + ColumnTypePoint ColumnType = "Point" + ColumnTypeInterval ColumnType = "Interval" + ColumnTypeNothing ColumnType = "Nothing" +) + +// colWrap wraps Column with type t. +type colWrap struct { + Column + t ColumnType +} + +func (c colWrap) Type() ColumnType { return c.t } + +// Wrap Column with type parameters. +// +// So if c type is T, result type will be T(arg0, arg1, ...). +func Wrap(c Column, args ...interface{}) Column { + var params []string + for _, a := range args { + params = append(params, fmt.Sprint(a)) + } + t := c.Type().With(params...) + return Alias(c, t) +} + +// Alias column as other type. +// +// E.g. Bool is domain of UInt8, so can be aliased from UInt8. +func Alias(c Column, t ColumnType) Column { + return colWrap{ + Column: c, + t: t, + } +} + +// ColInfo wraps Name and Type of column. +type ColInfo struct { + Name string + Type ColumnType +} + +// ColInfoInput saves column info on decoding. +type ColInfoInput []ColInfo + +func (s *ColInfoInput) Reset() { + *s = (*s)[:0] +} + +func (s *ColInfoInput) DecodeResult(r *Reader, version int, b Block) error { + s.Reset() + if b.Rows > 0 { + return errors.New("got unexpected rows") + } + for i := 0; i < b.Columns; i++ { + columnName, err := r.Str() + if err != nil { + return errors.Wrapf(err, "column [%d] name", i) + } + columnTypeRaw, err := r.Str() + if err != nil { + return errors.Wrapf(err, "column [%d] type", i) + } + if FeatureCustomSerialization.In(version) { + customSerialization, err := r.Bool() + if err != nil { + return errors.Wrapf(err, "column [%d] custom serialization", i) + } + if customSerialization { + return errors.Wrapf(err, "column [%d] has custom serialization (not supported)", i) + } + } + *s = append(*s, ColInfo{ + Name: columnName, + Type: ColumnType(columnTypeRaw), + }) + } + return nil +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/compression.go b/vendor/github.com/ClickHouse/ch-go/proto/compression.go new file mode 100644 index 0000000..f3fc6f9 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/compression.go @@ -0,0 +1,17 @@ +package proto + +//go:generate go run github.com/dmarkham/enumer -type Compression -trimprefix Compression -output compression_enum.go + +// Compression status. +type Compression byte + +// Compression statuses. +const ( + CompressionDisabled Compression = 0 + CompressionEnabled Compression = 1 +) + +// Encode to buffer. +func (c Compression) Encode(b *Buffer) { + b.PutUVarInt(uint64(c)) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/compression_enum.go b/vendor/github.com/ClickHouse/ch-go/proto/compression_enum.go new file mode 100644 index 0000000..bb9af56 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/compression_enum.go @@ -0,0 +1,78 @@ +// Code generated by "enumer -type Compression -trimprefix Compression -output compression_enum.go"; DO NOT EDIT. + +package proto + +import ( + "fmt" + "strings" +) + +const _CompressionName = "DisabledEnabled" + +var _CompressionIndex = [...]uint8{0, 8, 15} + +const _CompressionLowerName = "disabledenabled" + +func (i Compression) String() string { + if i >= Compression(len(_CompressionIndex)-1) { + return fmt.Sprintf("Compression(%d)", i) + } + return _CompressionName[_CompressionIndex[i]:_CompressionIndex[i+1]] +} + +// An "invalid array index" compiler error signifies that the constant values have changed. +// Re-run the stringer command to generate them again. +func _CompressionNoOp() { + var x [1]struct{} + _ = x[CompressionDisabled-(0)] + _ = x[CompressionEnabled-(1)] +} + +var _CompressionValues = []Compression{CompressionDisabled, CompressionEnabled} + +var _CompressionNameToValueMap = map[string]Compression{ + _CompressionName[0:8]: CompressionDisabled, + _CompressionLowerName[0:8]: CompressionDisabled, + _CompressionName[8:15]: CompressionEnabled, + _CompressionLowerName[8:15]: CompressionEnabled, +} + +var _CompressionNames = []string{ + _CompressionName[0:8], + _CompressionName[8:15], +} + +// CompressionString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func CompressionString(s string) (Compression, error) { + if val, ok := _CompressionNameToValueMap[s]; ok { + return val, nil + } + + if val, ok := _CompressionNameToValueMap[strings.ToLower(s)]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to Compression values", s) +} + +// CompressionValues returns all values of the enum +func CompressionValues() []Compression { + return _CompressionValues +} + +// CompressionStrings returns a slice of all String values of the enum +func CompressionStrings() []string { + strs := make([]string, len(_CompressionNames)) + copy(strs, _CompressionNames) + return strs +} + +// IsACompression returns "true" if the value is listed in the enum definition. "false" otherwise +func (i Compression) IsACompression() bool { + for _, v := range _CompressionValues { + if i == v { + return true + } + } + return false +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/date.go b/vendor/github.com/ClickHouse/ch-go/proto/date.go new file mode 100644 index 0000000..44a3d58 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/date.go @@ -0,0 +1,46 @@ +package proto + +import "time" + +// Date represents Date value. +// +// https://clickhouse.com/docs/en/sql-reference/data-types/date/ +type Date uint16 + +// DateLayout is default time format for Date. +const DateLayout = "2006-01-02" + +// secInDay represents seconds in day. +// +// NB: works only on UTC, use time.Date, time.Time.AddDate. +const secInDay = 24 * 60 * 60 + +// Unix returns unix timestamp of Date. +func (d Date) Unix() int64 { + return secInDay * int64(d) +} + +// Time returns UTC starting time.Time of Date. +// +// You can use time.Unix(d.Unix(), 0) to get Time in time.Local location. +func (d Date) Time() time.Time { + return time.Unix(d.Unix(), 0).UTC() +} + +func (d Date) String() string { + return d.Time().UTC().Format(DateLayout) +} + +// ToDate returns Date of time.Time. +func ToDate(t time.Time) Date { + if t.IsZero() { + return 0 + } + _, offset := t.Zone() + return Date((t.Unix() + int64(offset)) / secInDay) +} + +// NewDate returns the Date corresponding to year, month and day in UTC. +func NewDate(year int, month time.Month, day int) Date { + return ToDate(time.Date(year, month, day, 0, 0, 0, 0, time.UTC)) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/date32.go b/vendor/github.com/ClickHouse/ch-go/proto/date32.go new file mode 100644 index 0000000..e67f69d --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/date32.go @@ -0,0 +1,38 @@ +package proto + +import "time" + +// Date32 represents Date32 value. +// +// https://clickhouse.com/docs/en/sql-reference/data-types/date32/ +type Date32 int32 + +// Unix returns unix timestamp of Date32. +// +// You can use time.Unix(d.Unix(), 0) to get Time in time.Local location. +func (d Date32) Unix() int64 { + return secInDay * int64(d) +} + +// Time returns UTC starting time.Time of Date32. +func (d Date32) Time() time.Time { + return time.Unix(d.Unix(), 0).UTC() +} + +func (d Date32) String() string { + return d.Time().Format(DateLayout) +} + +// ToDate32 returns Date32 of time.Time. +func ToDate32(t time.Time) Date32 { + if t.IsZero() { + return 0 + } + _, offset := t.Zone() + return Date32((t.Unix() + int64(offset)) / secInDay) +} + +// NewDate32 returns the Date32 corresponding to year, month and day in UTC. +func NewDate32(year int, month time.Month, day int) Date32 { + return ToDate32(time.Date(year, month, day, 0, 0, 0, 0, time.UTC)) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/datetime.go b/vendor/github.com/ClickHouse/ch-go/proto/datetime.go new file mode 100644 index 0000000..a7f48e3 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/datetime.go @@ -0,0 +1,21 @@ +package proto + +import "time" + +// DateTime represents DateTime type. +type DateTime uint32 + +// ToDateTime converts time.Time to DateTime. +func ToDateTime(t time.Time) DateTime { + if t.IsZero() { + return 0 + } + return DateTime(t.Unix()) +} + +// Time returns DateTime as time.Time. +func (d DateTime) Time() time.Time { + // https://clickhouse.com/docs/en/sql-reference/data-types/datetime/#usage-remarks + // ClickHouse stores UTC timestamps that are timezone-agnostic. + return time.Unix(int64(d), 0) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/datetime64.go b/vendor/github.com/ClickHouse/ch-go/proto/datetime64.go new file mode 100644 index 0000000..f60ba49 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/datetime64.go @@ -0,0 +1,62 @@ +package proto + +import ( + "time" +) + +// Precision of DateTime64. +// +// Tick size (precision): 10^(-precision) seconds. +// Valid range: [0:9]. +type Precision byte + +// Duration returns duration of single tick for precision. +func (p Precision) Duration() time.Duration { + return time.Nanosecond * time.Duration(p.Scale()) +} + +// Valid reports whether precision is valid. +func (p Precision) Valid() bool { + return p <= PrecisionMax +} + +func (p Precision) Scale() int64 { + d := int64(1) + for i := PrecisionNano; i > p; i-- { + d *= 10 + } + return d +} + +const ( + // PrecisionSecond is one second precision. + PrecisionSecond Precision = 0 + // PrecisionMilli is millisecond precision. + PrecisionMilli Precision = 3 + // PrecisionMicro is microsecond precision. + PrecisionMicro Precision = 6 + // PrecisionNano is nanosecond precision. + PrecisionNano Precision = 9 + + // PrecisionMax is maximum precision (nanosecond). + PrecisionMax = PrecisionNano +) + +// DateTime64 represents DateTime64 type. +// +// See https://clickhouse.com/docs/en/sql-reference/data-types/datetime64/. +type DateTime64 int64 + +// ToDateTime64 converts time.Time to DateTime64. +func ToDateTime64(t time.Time, p Precision) DateTime64 { + if t.IsZero() { + return 0 + } + return DateTime64(t.UnixNano() / p.Scale()) +} + +// Time returns DateTime64 as time.Time. +func (d DateTime64) Time(p Precision) time.Time { + nsec := int64(d) * p.Scale() + return time.Unix(nsec/1e9, nsec%1e9) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/decimal.go b/vendor/github.com/ClickHouse/ch-go/proto/decimal.go new file mode 100644 index 0000000..796a594 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/decimal.go @@ -0,0 +1,13 @@ +package proto + +// Decimal32 represents Decimal32 value. +type Decimal32 int32 + +// Decimal64 represents Decimal32 value. +type Decimal64 int64 + +// Decimal128 represents Decimal128 value. +type Decimal128 Int128 + +// Decimal256 represents Decimal256 value. +type Decimal256 Int256 diff --git a/vendor/github.com/ClickHouse/ch-go/proto/enum16.go b/vendor/github.com/ClickHouse/ch-go/proto/enum16.go new file mode 100644 index 0000000..a51bc71 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/enum16.go @@ -0,0 +1,6 @@ +package proto + +// Enum16 represents raw Enum16 value. +// +// Actual values should be taken from DDL. +type Enum16 int16 diff --git a/vendor/github.com/ClickHouse/ch-go/proto/enum8.go b/vendor/github.com/ClickHouse/ch-go/proto/enum8.go new file mode 100644 index 0000000..61c7375 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/enum8.go @@ -0,0 +1,6 @@ +package proto + +// Enum8 represents raw Enum8 value. +// +// Actual values should be taken from DDL. +type Enum8 int8 diff --git a/vendor/github.com/ClickHouse/ch-go/proto/error.go b/vendor/github.com/ClickHouse/ch-go/proto/error.go new file mode 100644 index 0000000..3eca148 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/error.go @@ -0,0 +1,15 @@ +package proto + +import "fmt" + +// Error on server side. +type Error int + +func (e Error) Error() string { + if e.IsAError() { + return fmt.Sprintf("%s (%d)", e.String(), e) + } + return fmt.Sprintf("UNKNOWN (%d)", e) +} + +//go:generate go run github.com/dmarkham/enumer -transform snake_upper -type Error -trimprefix Err -output error_enum.go diff --git a/vendor/github.com/ClickHouse/ch-go/proto/error_codes.go b/vendor/github.com/ClickHouse/ch-go/proto/error_codes.go new file mode 100644 index 0000000..ee1ac89 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/error_codes.go @@ -0,0 +1,376 @@ +package proto + +// Subset of possible errors. +const ( + ErrUnsupportedMethod Error = 1 + ErrUnsupportedParameter Error = 2 + ErrUnexpectedEndOfFile Error = 3 + ErrExpectedEndOfFile Error = 4 + ErrCannotParseText Error = 6 + ErrIncorrectNumberOfColumns Error = 7 + ErrThereIsNoColumn Error = 8 + ErrSizesOfColumnsDoesntMatch Error = 9 + ErrNotFoundColumnInBlock Error = 10 + ErrPositionOutOfBound Error = 11 + ErrParameterOutOfBound Error = 12 + ErrSizesOfColumnsInTupleDoesntMatch Error = 13 + ErrDuplicateColumn Error = 15 + ErrNoSuchColumnInTable Error = 16 + ErrDelimiterInStringLiteralDoesntMatch Error = 17 + ErrCannotInsertElementIntoConstantColumn Error = 18 + ErrSizeOfFixedStringDoesntMatch Error = 19 + ErrNumberOfColumnsDoesntMatch Error = 20 + ErrCannotReadAllDataFromTabSeparatedInput Error = 21 + ErrCannotParseAllValueFromTabSeparatedInput Error = 22 + ErrCannotReadFromIstream Error = 23 + ErrCannotWriteToOstream Error = 24 + ErrCannotParseEscapeSequence Error = 25 + ErrCannotParseQuotedString Error = 26 + ErrCannotParseInputAssertionFailed Error = 27 + ErrCannotPrintFloatOrDoubleNumber Error = 28 + ErrCannotPrintInteger Error = 29 + ErrCannotReadSizeOfCompressedChunk Error = 30 + ErrCannotReadCompressedChunk Error = 31 + ErrAttemptToReadAfterEOF Error = 32 + ErrCannotReadAllData Error = 33 + ErrTooManyArgumentsForFunction Error = 34 + ErrTooLessArgumentsForFunction Error = 35 + ErrBadArguments Error = 36 + ErrUnknownElementInAst Error = 37 + ErrCannotParseDate Error = 38 + ErrTooLargeSizeCompressed Error = 39 + ErrChecksumDoesntMatch Error = 40 + ErrCannotParseDatetime Error = 41 + ErrNumberOfArgumentsDoesntMatch Error = 42 + ErrIllegalTypeOfArgument Error = 43 + ErrIllegalColumn Error = 44 + ErrIllegalNumberOfResultColumns Error = 45 + ErrUnknownFunction Error = 46 + ErrUnknownIdentifier Error = 47 + ErrNotImplemented Error = 48 + ErrLogicalError Error = 49 + ErrUnknownType Error = 50 + ErrEmptyListOfColumnsQueried Error = 51 + ErrColumnQueriedMoreThanOnce Error = 52 + ErrTypeMismatch Error = 53 + ErrStorageDoesntAllowParameters Error = 54 + ErrStorageRequiresParameter Error = 55 + ErrUnknownStorage Error = 56 + ErrTableAlreadyExists Error = 57 + ErrTableMetadataAlreadyExists Error = 58 + ErrIllegalTypeOfColumnForFilter Error = 59 + ErrUnknownTable Error = 60 + ErrOnlyFilterColumnInBlock Error = 61 + ErrSyntaxError Error = 62 + ErrUnknownAggregateFunction Error = 63 + ErrCannotReadAggregateFunctionFromText Error = 64 + ErrCannotWriteAggregateFunctionAsText Error = 65 + ErrNotAColumn Error = 66 + ErrIllegalKeyOfAggregation Error = 67 + ErrCannotGetSizeOfField Error = 68 + ErrArgumentOutOfBound Error = 69 + ErrCannotConvertType Error = 70 + ErrCannotWriteAfterEndOfBuffer Error = 71 + ErrCannotParseNumber Error = 72 + ErrUnknownFormat Error = 73 + ErrCannotReadFromFileDescriptor Error = 74 + ErrCannotWriteToFileDescriptor Error = 75 + ErrCannotOpenFile Error = 76 + ErrCannotCloseFile Error = 77 + ErrUnknownTypeOfQuery Error = 78 + ErrIncorrectFileName Error = 79 + ErrIncorrectQuery Error = 80 + ErrUnknownDatabase Error = 81 + ErrDatabaseAlreadyExists Error = 82 + ErrDirectoryDoesntExist Error = 83 + ErrDirectoryAlreadyExists Error = 84 + ErrFormatIsNotSuitableForInput Error = 85 + ErrReceivedErrorFromRemoteIoServer Error = 86 + ErrCannotSeekThroughFile Error = 87 + ErrCannotTruncateFile Error = 88 + ErrUnknownCompressionMethod Error = 89 + ErrEmptyListOfColumnsPassed Error = 90 + ErrSizesOfMarksFilesAreInconsistent Error = 91 + ErrEmptyDataPassed Error = 92 + ErrUnknownAggregatedDataVariant Error = 93 + ErrCannotMergeDifferentAggregatedDataVariants Error = 94 + ErrCannotReadFromSocket Error = 95 + ErrCannotWriteToSocket Error = 96 + ErrCannotReadAllDataFromChunkedInput Error = 97 + ErrCannotWriteToEmptyBlockOutputStream Error = 98 + ErrUnknownPacketFromClient Error = 99 + ErrUnknownPacketFromServer Error = 100 + ErrUnexpectedPacketFromClient Error = 101 + ErrUnexpectedPacketFromServer Error = 102 + ErrReceivedDataForWrongQueryID Error = 103 + ErrTooSmallBufferSize Error = 104 + ErrCannotReadHistory Error = 105 + ErrCannotAppendHistory Error = 106 + ErrFileDoesntExist Error = 107 + ErrNoDataToInsert Error = 108 + ErrCannotBlockSignal Error = 109 + ErrCannotUnblockSignal Error = 110 + ErrCannotManipulateSigset Error = 111 + ErrCannotWaitForSignal Error = 112 + ErrThereIsNoSession Error = 113 + ErrCannotClockGettime Error = 114 + ErrUnknownSetting Error = 115 + ErrThereIsNoDefaultValue Error = 116 + ErrIncorrectData Error = 117 + ErrEngineRequired Error = 119 + ErrCannotInsertValueOfDifferentSizeIntoTuple Error = 120 + ErrUnknownSetDataVariant Error = 121 + ErrIncompatibleColumns Error = 122 + ErrUnknownTypeOfAstNode Error = 123 + ErrIncorrectElementOfSet Error = 124 + ErrIncorrectResultOfScalarSubquery Error = 125 + ErrCannotGetReturnType Error = 126 + ErrIllegalIndex Error = 127 + ErrTooLargeArraySize Error = 128 + ErrFunctionIsSpecial Error = 129 + ErrCannotReadArrayFromText Error = 130 + ErrTooLargeStringSize Error = 131 + ErrCannotCreateTableFromMetadata Error = 132 + ErrAggregateFunctionDoesntAllowParameters Error = 133 + ErrParametersToAggregateFunctionsMustBeLiterals Error = 134 + ErrZeroArrayOrTupleIndex Error = 135 + ErrUnknownElementInConfig Error = 137 + ErrExcessiveElementInConfig Error = 138 + ErrNoElementsInConfig Error = 139 + ErrAllRequestedColumnsAreMissing Error = 140 + ErrSamplingNotSupported Error = 141 + ErrNotFoundNode Error = 142 + ErrFoundMoreThanOneNode Error = 143 + ErrFirstDateIsBiggerThanLastDate Error = 144 + ErrUnknownOverflowMode Error = 145 + ErrQuerySectionDoesntMakeSense Error = 146 + ErrNotFoundFunctionElementForAggregate Error = 147 + ErrNotFoundRelationElementForCondition Error = 148 + ErrNotFoundRHSElementForCondition Error = 149 + ErrNoAttributesListed Error = 150 + ErrIndexOfColumnInSortClauseIsOutOfRange Error = 151 + ErrUnknownDirectionOfSorting Error = 152 + ErrIllegalDivision Error = 153 + ErrAggregateFunctionNotApplicable Error = 154 + ErrUnknownRelation Error = 155 + ErrDictionariesWasNotLoaded Error = 156 + ErrIllegalOverflowMode Error = 157 + ErrTooManyRows Error = 158 + ErrTimeoutExceeded Error = 159 + ErrTooSlow Error = 160 + ErrTooManyColumns Error = 161 + ErrTooDeepSubqueries Error = 162 + ErrTooDeepPipeline Error = 163 + ErrReadonly Error = 164 + ErrTooManyTemporaryColumns Error = 165 + ErrTooManyTemporaryNonConstColumns Error = 166 + ErrTooDeepAst Error = 167 + ErrTooBigAst Error = 168 + ErrBadTypeOfField Error = 169 + ErrBadGet Error = 170 + ErrBlocksHaveDifferentStructure Error = 171 + ErrCannotCreateDirectory Error = 172 + ErrCannotAllocateMemory Error = 173 + ErrCyclicAliases Error = 174 + ErrChunkNotFound Error = 176 + ErrDuplicateChunkName Error = 177 + ErrMultipleAliasesForExpression Error = 178 + ErrMultipleExpressionsForAlias Error = 179 + ErrThereIsNoProfile Error = 180 + ErrIllegalFinal Error = 181 + ErrIllegalPrewhere Error = 182 + ErrUnexpectedExpression Error = 183 + ErrIllegalAggregation Error = 184 + ErrUnsupportedMyisamBlockType Error = 185 + ErrUnsupportedCollationLocale Error = 186 + ErrCollationComparisonFailed Error = 187 + ErrUnknownAction Error = 188 + ErrTableMustNotBeCreatedManually Error = 189 + ErrSizesOfArraysDoesntMatch Error = 190 + ErrSetSizeLimitExceeded Error = 191 + ErrUnknownUser Error = 192 + ErrWrongPassword Error = 193 + ErrRequiredPassword Error = 194 + ErrIPAddressNotAllowed Error = 195 + ErrUnknownAddressPatternType Error = 196 + ErrServerRevisionIsTooOld Error = 197 + ErrDNSError Error = 198 + ErrUnknownQuota Error = 199 + ErrQuotaDoesntAllowKeys Error = 200 + ErrQuotaExpired Error = 201 + ErrTooManySimultaneousQueries Error = 202 + ErrNoFreeConnection Error = 203 + ErrCannotFsync Error = 204 + ErrNestedTypeTooDeep Error = 205 + ErrAliasRequired Error = 206 + ErrAmbiguousIdentifier Error = 207 + ErrEmptyNestedTable Error = 208 + ErrSocketTimeout Error = 209 + ErrNetworkError Error = 210 + ErrEmptyQuery Error = 211 + ErrUnknownLoadBalancing Error = 212 + ErrUnknownTotalsMode Error = 213 + ErrCannotStatvfs Error = 214 + ErrNotAnAggregate Error = 215 + ErrQueryWithSameIDIsAlreadyRunning Error = 216 + ErrClientHasConnectedToWrongPort Error = 217 + ErrTableIsDropped Error = 218 + ErrDatabaseNotEmpty Error = 219 + ErrDuplicateInterserverIoEndpoint Error = 220 + ErrNoSuchInterserverIoEndpoint Error = 221 + ErrAddingReplicaToNonEmptyTable Error = 222 + ErrUnexpectedAstStructure Error = 223 + ErrReplicaIsAlreadyActive Error = 224 + ErrNoZookeeper Error = 225 + ErrNoFileInDataPart Error = 226 + ErrUnexpectedFileInDataPart Error = 227 + ErrBadSizeOfFileInDataPart Error = 228 + ErrQueryIsTooLarge Error = 229 + ErrNotFoundExpectedDataPart Error = 230 + ErrTooManyUnexpectedDataParts Error = 231 + ErrNoSuchDataPart Error = 232 + ErrBadDataPartName Error = 233 + ErrNoReplicaHasPart Error = 234 + ErrDuplicateDataPart Error = 235 + ErrAborted Error = 236 + ErrNoReplicaNameGiven Error = 237 + ErrFormatVersionTooOld Error = 238 + ErrCannotMunmap Error = 239 + ErrCannotMremap Error = 240 + ErrMemoryLimitExceeded Error = 241 + ErrTableIsReadOnly Error = 242 + ErrNotEnoughSpace Error = 243 + ErrUnexpectedZookeeperError Error = 244 + ErrCorruptedData Error = 246 + ErrIncorrectMark Error = 247 + ErrInvalidPartitionValue Error = 248 + ErrNotEnoughBlockNumbers Error = 250 + ErrNoSuchReplica Error = 251 + ErrTooManyParts Error = 252 + ErrReplicaIsAlreadyExist Error = 253 + ErrNoActiveReplicas Error = 254 + ErrTooManyRetriesToFetchParts Error = 255 + ErrPartitionAlreadyExists Error = 256 + ErrPartitionDoesntExist Error = 257 + ErrUnionAllResultStructuresMismatch Error = 258 + ErrClientOutputFormatSpecified Error = 260 + ErrUnknownBlockInfoField Error = 261 + ErrBadCollation Error = 262 + ErrCannotCompileCode Error = 263 + ErrIncompatibleTypeOfJoin Error = 264 + ErrNoAvailableReplica Error = 265 + ErrMismatchReplicasDataSources Error = 266 + ErrStorageDoesntSupportParallelReplicas Error = 267 + ErrCPUIDError Error = 268 + ErrInfiniteLoop Error = 269 + ErrCannotCompress Error = 270 + ErrCannotDecompress Error = 271 + ErrAioSubmitError Error = 272 + ErrAioCompletionError Error = 273 + ErrAioReadError Error = 274 + ErrAioWriteError Error = 275 + ErrIndexNotUsed Error = 277 + ErrLeadershipLost Error = 278 + ErrAllConnectionTriesFailed Error = 279 + ErrNoAvailableData Error = 280 + ErrDictionaryIsEmpty Error = 281 + ErrIncorrectIndex Error = 282 + ErrUnknownDistributedProductMode Error = 283 + ErrUnknownGlobalSubqueriesMethod Error = 284 + ErrTooLessLiveReplicas Error = 285 + ErrUnsatisfiedQuorumForPreviousWrite Error = 286 + ErrUnknownFormatVersion Error = 287 + ErrDistributedInJoinSubqueryDenied Error = 288 + ErrReplicaIsNotInQuorum Error = 289 + ErrLimitExceeded Error = 290 + ErrDatabaseAccessDenied Error = 291 + ErrLeadershipChanged Error = 292 + ErrMongodbCannotAuthenticate Error = 293 + ErrInvalidBlockExtraInfo Error = 294 + ErrReceivedEmptyData Error = 295 + ErrNoRemoteShardFound Error = 296 + ErrShardHasNoConnections Error = 297 + ErrCannotPipe Error = 298 + ErrCannotFork Error = 299 + ErrCannotDlsym Error = 300 + ErrCannotCreateChildProcess Error = 301 + ErrChildWasNotExitedNormally Error = 302 + ErrCannotSelect Error = 303 + ErrCannotWaitpid Error = 304 + ErrTableWasNotDropped Error = 305 + ErrTooDeepRecursion Error = 306 + ErrTooManyBytes Error = 307 + ErrUnexpectedNodeInZookeeper Error = 308 + ErrFunctionCannotHaveParameters Error = 309 + ErrInvalidShardWeight Error = 317 + ErrInvalidConfigParameter Error = 318 + ErrUnknownStatusOfInsert Error = 319 + ErrValueIsOutOfRangeOfDataType Error = 321 + ErrBarrierTimeout Error = 335 + ErrUnknownDatabaseEngine Error = 336 + ErrDdlGuardIsActive Error = 337 + ErrUnfinished Error = 341 + ErrMetadataMismatch Error = 342 + ErrSupportIsDisabled Error = 344 + ErrTableDiffersTooMuch Error = 345 + ErrCannotConvertCharset Error = 346 + ErrCannotLoadConfig Error = 347 + ErrCannotInsertNullInOrdinaryColumn Error = 349 + ErrIncompatibleSourceTables Error = 350 + ErrAmbiguousTableName Error = 351 + ErrAmbiguousColumnName Error = 352 + ErrIndexOfPositionalArgumentIsOutOfRange Error = 353 + ErrZlibInflateFailed Error = 354 + ErrZlibDeflateFailed Error = 355 + ErrBadLambda Error = 356 + ErrReservedIdentifierName Error = 357 + ErrIntoOutfileNotAllowed Error = 358 + ErrTableSizeExceedsMaxDropSizeLimit Error = 359 + ErrCannotCreateCharsetConverter Error = 360 + ErrSeekPositionOutOfBound Error = 361 + ErrCurrentWriteBufferIsExhausted Error = 362 + ErrCannotCreateIoBuffer Error = 363 + ErrReceivedErrorTooManyRequests Error = 364 + ErrOutputIsNotSorted Error = 365 + ErrSizesOfNestedColumnsAreInconsistent Error = 366 + ErrTooManyFetches Error = 367 + ErrBadCast Error = 368 + ErrAllReplicasAreStale Error = 369 + ErrDataTypeCannotBeUsedInTables Error = 370 + ErrInconsistentClusterDefinition Error = 371 + ErrSessionNotFound Error = 372 + ErrSessionIsLocked Error = 373 + ErrInvalidSessionTimeout Error = 374 + ErrCannotDlopen Error = 375 + ErrCannotParseUUID Error = 376 + ErrIllegalSyntaxForDataType Error = 377 + ErrDataTypeCannotHaveArguments Error = 378 + ErrUnknownStatusOfDistributedDdlTask Error = 379 + ErrCannotKill Error = 380 + ErrHTTPLengthRequired Error = 381 + ErrCannotLoadCatboostModel Error = 382 + ErrCannotApplyCatboostModel Error = 383 + ErrPartIsTemporarilyLocked Error = 384 + ErrMultipleStreamsRequired Error = 385 + ErrNoCommonType Error = 386 + ErrExternalLoadableAlreadyExists Error = 387 + ErrCannotAssignOptimize Error = 388 + ErrInsertWasDeduplicated Error = 389 + ErrCannotGetCreateTableQuery Error = 390 + ErrExternalLibraryError Error = 391 + ErrQueryIsProhibited Error = 392 + ErrThereIsNoQuery Error = 393 + ErrQueryWasCancelled Error = 394 + ErrFunctionThrowIfValueIsNonZero Error = 395 + ErrTooManyRowsOrBytes Error = 396 + ErrQueryIsNotSupportedInMaterializedView Error = 397 + ErrCannotParseDomainValueFromString Error = 441 + ErrKeeperException Error = 999 + ErrPocoException Error = 1000 + ErrStdException Error = 1001 + ErrUnknownException Error = 1002 + ErrConditionalTreeParentNotFound Error = 2001 + ErrIllegalProjectionManipulator Error = 2002 + + ErrAuthenticationFailed Error = 516 +) diff --git a/vendor/github.com/ClickHouse/ch-go/proto/error_enum.go b/vendor/github.com/ClickHouse/ch-go/proto/error_enum.go new file mode 100644 index 0000000..6e7c2fe --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/error_enum.go @@ -0,0 +1,1916 @@ +// Code generated by "enumer -transform snake_upper -type Error -trimprefix Err -output error_enum.go"; DO NOT EDIT. + +package proto + +import ( + "fmt" + "strings" +) + +const _ErrorName = "UNSUPPORTED_METHODUNSUPPORTED_PARAMETERUNEXPECTED_END_OF_FILEEXPECTED_END_OF_FILECANNOT_PARSE_TEXTINCORRECT_NUMBER_OF_COLUMNSTHERE_IS_NO_COLUMNSIZES_OF_COLUMNS_DOESNT_MATCHNOT_FOUND_COLUMN_IN_BLOCKPOSITION_OUT_OF_BOUNDPARAMETER_OUT_OF_BOUNDSIZES_OF_COLUMNS_IN_TUPLE_DOESNT_MATCHDUPLICATE_COLUMNNO_SUCH_COLUMN_IN_TABLEDELIMITER_IN_STRING_LITERAL_DOESNT_MATCHCANNOT_INSERT_ELEMENT_INTO_CONSTANT_COLUMNSIZE_OF_FIXED_STRING_DOESNT_MATCHNUMBER_OF_COLUMNS_DOESNT_MATCHCANNOT_READ_ALL_DATA_FROM_TAB_SEPARATED_INPUTCANNOT_PARSE_ALL_VALUE_FROM_TAB_SEPARATED_INPUTCANNOT_READ_FROM_ISTREAMCANNOT_WRITE_TO_OSTREAMCANNOT_PARSE_ESCAPE_SEQUENCECANNOT_PARSE_QUOTED_STRINGCANNOT_PARSE_INPUT_ASSERTION_FAILEDCANNOT_PRINT_FLOAT_OR_DOUBLE_NUMBERCANNOT_PRINT_INTEGERCANNOT_READ_SIZE_OF_COMPRESSED_CHUNKCANNOT_READ_COMPRESSED_CHUNKATTEMPT_TO_READ_AFTER_EOFCANNOT_READ_ALL_DATATOO_MANY_ARGUMENTS_FOR_FUNCTIONTOO_LESS_ARGUMENTS_FOR_FUNCTIONBAD_ARGUMENTSUNKNOWN_ELEMENT_IN_ASTCANNOT_PARSE_DATETOO_LARGE_SIZE_COMPRESSEDCHECKSUM_DOESNT_MATCHCANNOT_PARSE_DATETIMENUMBER_OF_ARGUMENTS_DOESNT_MATCHILLEGAL_TYPE_OF_ARGUMENTILLEGAL_COLUMNILLEGAL_NUMBER_OF_RESULT_COLUMNSUNKNOWN_FUNCTIONUNKNOWN_IDENTIFIERNOT_IMPLEMENTEDLOGICAL_ERRORUNKNOWN_TYPEEMPTY_LIST_OF_COLUMNS_QUERIEDCOLUMN_QUERIED_MORE_THAN_ONCETYPE_MISMATCHSTORAGE_DOESNT_ALLOW_PARAMETERSSTORAGE_REQUIRES_PARAMETERUNKNOWN_STORAGETABLE_ALREADY_EXISTSTABLE_METADATA_ALREADY_EXISTSILLEGAL_TYPE_OF_COLUMN_FOR_FILTERUNKNOWN_TABLEONLY_FILTER_COLUMN_IN_BLOCKSYNTAX_ERRORUNKNOWN_AGGREGATE_FUNCTIONCANNOT_READ_AGGREGATE_FUNCTION_FROM_TEXTCANNOT_WRITE_AGGREGATE_FUNCTION_AS_TEXTNOT_A_COLUMNILLEGAL_KEY_OF_AGGREGATIONCANNOT_GET_SIZE_OF_FIELDARGUMENT_OUT_OF_BOUNDCANNOT_CONVERT_TYPECANNOT_WRITE_AFTER_END_OF_BUFFERCANNOT_PARSE_NUMBERUNKNOWN_FORMATCANNOT_READ_FROM_FILE_DESCRIPTORCANNOT_WRITE_TO_FILE_DESCRIPTORCANNOT_OPEN_FILECANNOT_CLOSE_FILEUNKNOWN_TYPE_OF_QUERYINCORRECT_FILE_NAMEINCORRECT_QUERYUNKNOWN_DATABASEDATABASE_ALREADY_EXISTSDIRECTORY_DOESNT_EXISTDIRECTORY_ALREADY_EXISTSFORMAT_IS_NOT_SUITABLE_FOR_INPUTRECEIVED_ERROR_FROM_REMOTE_IO_SERVERCANNOT_SEEK_THROUGH_FILECANNOT_TRUNCATE_FILEUNKNOWN_COMPRESSION_METHODEMPTY_LIST_OF_COLUMNS_PASSEDSIZES_OF_MARKS_FILES_ARE_INCONSISTENTEMPTY_DATA_PASSEDUNKNOWN_AGGREGATED_DATA_VARIANTCANNOT_MERGE_DIFFERENT_AGGREGATED_DATA_VARIANTSCANNOT_READ_FROM_SOCKETCANNOT_WRITE_TO_SOCKETCANNOT_READ_ALL_DATA_FROM_CHUNKED_INPUTCANNOT_WRITE_TO_EMPTY_BLOCK_OUTPUT_STREAMUNKNOWN_PACKET_FROM_CLIENTUNKNOWN_PACKET_FROM_SERVERUNEXPECTED_PACKET_FROM_CLIENTUNEXPECTED_PACKET_FROM_SERVERRECEIVED_DATA_FOR_WRONG_QUERY_IDTOO_SMALL_BUFFER_SIZECANNOT_READ_HISTORYCANNOT_APPEND_HISTORYFILE_DOESNT_EXISTNO_DATA_TO_INSERTCANNOT_BLOCK_SIGNALCANNOT_UNBLOCK_SIGNALCANNOT_MANIPULATE_SIGSETCANNOT_WAIT_FOR_SIGNALTHERE_IS_NO_SESSIONCANNOT_CLOCK_GETTIMEUNKNOWN_SETTINGTHERE_IS_NO_DEFAULT_VALUEINCORRECT_DATAENGINE_REQUIREDCANNOT_INSERT_VALUE_OF_DIFFERENT_SIZE_INTO_TUPLEUNKNOWN_SET_DATA_VARIANTINCOMPATIBLE_COLUMNSUNKNOWN_TYPE_OF_AST_NODEINCORRECT_ELEMENT_OF_SETINCORRECT_RESULT_OF_SCALAR_SUBQUERYCANNOT_GET_RETURN_TYPEILLEGAL_INDEXTOO_LARGE_ARRAY_SIZEFUNCTION_IS_SPECIALCANNOT_READ_ARRAY_FROM_TEXTTOO_LARGE_STRING_SIZECANNOT_CREATE_TABLE_FROM_METADATAAGGREGATE_FUNCTION_DOESNT_ALLOW_PARAMETERSPARAMETERS_TO_AGGREGATE_FUNCTIONS_MUST_BE_LITERALSZERO_ARRAY_OR_TUPLE_INDEXUNKNOWN_ELEMENT_IN_CONFIGEXCESSIVE_ELEMENT_IN_CONFIGNO_ELEMENTS_IN_CONFIGALL_REQUESTED_COLUMNS_ARE_MISSINGSAMPLING_NOT_SUPPORTEDNOT_FOUND_NODEFOUND_MORE_THAN_ONE_NODEFIRST_DATE_IS_BIGGER_THAN_LAST_DATEUNKNOWN_OVERFLOW_MODEQUERY_SECTION_DOESNT_MAKE_SENSENOT_FOUND_FUNCTION_ELEMENT_FOR_AGGREGATENOT_FOUND_RELATION_ELEMENT_FOR_CONDITIONNOT_FOUND_RHS_ELEMENT_FOR_CONDITIONNO_ATTRIBUTES_LISTEDINDEX_OF_COLUMN_IN_SORT_CLAUSE_IS_OUT_OF_RANGEUNKNOWN_DIRECTION_OF_SORTINGILLEGAL_DIVISIONAGGREGATE_FUNCTION_NOT_APPLICABLEUNKNOWN_RELATIONDICTIONARIES_WAS_NOT_LOADEDILLEGAL_OVERFLOW_MODETOO_MANY_ROWSTIMEOUT_EXCEEDEDTOO_SLOWTOO_MANY_COLUMNSTOO_DEEP_SUBQUERIESTOO_DEEP_PIPELINEREADONLYTOO_MANY_TEMPORARY_COLUMNSTOO_MANY_TEMPORARY_NON_CONST_COLUMNSTOO_DEEP_ASTTOO_BIG_ASTBAD_TYPE_OF_FIELDBAD_GETBLOCKS_HAVE_DIFFERENT_STRUCTURECANNOT_CREATE_DIRECTORYCANNOT_ALLOCATE_MEMORYCYCLIC_ALIASESCHUNK_NOT_FOUNDDUPLICATE_CHUNK_NAMEMULTIPLE_ALIASES_FOR_EXPRESSIONMULTIPLE_EXPRESSIONS_FOR_ALIASTHERE_IS_NO_PROFILEILLEGAL_FINALILLEGAL_PREWHEREUNEXPECTED_EXPRESSIONILLEGAL_AGGREGATIONUNSUPPORTED_MYISAM_BLOCK_TYPEUNSUPPORTED_COLLATION_LOCALECOLLATION_COMPARISON_FAILEDUNKNOWN_ACTIONTABLE_MUST_NOT_BE_CREATED_MANUALLYSIZES_OF_ARRAYS_DOESNT_MATCHSET_SIZE_LIMIT_EXCEEDEDUNKNOWN_USERWRONG_PASSWORDREQUIRED_PASSWORDIP_ADDRESS_NOT_ALLOWEDUNKNOWN_ADDRESS_PATTERN_TYPESERVER_REVISION_IS_TOO_OLDDNS_ERRORUNKNOWN_QUOTAQUOTA_DOESNT_ALLOW_KEYSQUOTA_EXPIREDTOO_MANY_SIMULTANEOUS_QUERIESNO_FREE_CONNECTIONCANNOT_FSYNCNESTED_TYPE_TOO_DEEPALIAS_REQUIREDAMBIGUOUS_IDENTIFIEREMPTY_NESTED_TABLESOCKET_TIMEOUTNETWORK_ERROREMPTY_QUERYUNKNOWN_LOAD_BALANCINGUNKNOWN_TOTALS_MODECANNOT_STATVFSNOT_AN_AGGREGATEQUERY_WITH_SAME_ID_IS_ALREADY_RUNNINGCLIENT_HAS_CONNECTED_TO_WRONG_PORTTABLE_IS_DROPPEDDATABASE_NOT_EMPTYDUPLICATE_INTERSERVER_IO_ENDPOINTNO_SUCH_INTERSERVER_IO_ENDPOINTADDING_REPLICA_TO_NON_EMPTY_TABLEUNEXPECTED_AST_STRUCTUREREPLICA_IS_ALREADY_ACTIVENO_ZOOKEEPERNO_FILE_IN_DATA_PARTUNEXPECTED_FILE_IN_DATA_PARTBAD_SIZE_OF_FILE_IN_DATA_PARTQUERY_IS_TOO_LARGENOT_FOUND_EXPECTED_DATA_PARTTOO_MANY_UNEXPECTED_DATA_PARTSNO_SUCH_DATA_PARTBAD_DATA_PART_NAMENO_REPLICA_HAS_PARTDUPLICATE_DATA_PARTABORTEDNO_REPLICA_NAME_GIVENFORMAT_VERSION_TOO_OLDCANNOT_MUNMAPCANNOT_MREMAPMEMORY_LIMIT_EXCEEDEDTABLE_IS_READ_ONLYNOT_ENOUGH_SPACEUNEXPECTED_ZOOKEEPER_ERRORCORRUPTED_DATAINCORRECT_MARKINVALID_PARTITION_VALUENOT_ENOUGH_BLOCK_NUMBERSNO_SUCH_REPLICATOO_MANY_PARTSREPLICA_IS_ALREADY_EXISTNO_ACTIVE_REPLICASTOO_MANY_RETRIES_TO_FETCH_PARTSPARTITION_ALREADY_EXISTSPARTITION_DOESNT_EXISTUNION_ALL_RESULT_STRUCTURES_MISMATCHCLIENT_OUTPUT_FORMAT_SPECIFIEDUNKNOWN_BLOCK_INFO_FIELDBAD_COLLATIONCANNOT_COMPILE_CODEINCOMPATIBLE_TYPE_OF_JOINNO_AVAILABLE_REPLICAMISMATCH_REPLICAS_DATA_SOURCESSTORAGE_DOESNT_SUPPORT_PARALLEL_REPLICASCPUID_ERRORINFINITE_LOOPCANNOT_COMPRESSCANNOT_DECOMPRESSAIO_SUBMIT_ERRORAIO_COMPLETION_ERRORAIO_READ_ERRORAIO_WRITE_ERRORINDEX_NOT_USEDLEADERSHIP_LOSTALL_CONNECTION_TRIES_FAILEDNO_AVAILABLE_DATADICTIONARY_IS_EMPTYINCORRECT_INDEXUNKNOWN_DISTRIBUTED_PRODUCT_MODEUNKNOWN_GLOBAL_SUBQUERIES_METHODTOO_LESS_LIVE_REPLICASUNSATISFIED_QUORUM_FOR_PREVIOUS_WRITEUNKNOWN_FORMAT_VERSIONDISTRIBUTED_IN_JOIN_SUBQUERY_DENIEDREPLICA_IS_NOT_IN_QUORUMLIMIT_EXCEEDEDDATABASE_ACCESS_DENIEDLEADERSHIP_CHANGEDMONGODB_CANNOT_AUTHENTICATEINVALID_BLOCK_EXTRA_INFORECEIVED_EMPTY_DATANO_REMOTE_SHARD_FOUNDSHARD_HAS_NO_CONNECTIONSCANNOT_PIPECANNOT_FORKCANNOT_DLSYMCANNOT_CREATE_CHILD_PROCESSCHILD_WAS_NOT_EXITED_NORMALLYCANNOT_SELECTCANNOT_WAITPIDTABLE_WAS_NOT_DROPPEDTOO_DEEP_RECURSIONTOO_MANY_BYTESUNEXPECTED_NODE_IN_ZOOKEEPERFUNCTION_CANNOT_HAVE_PARAMETERSINVALID_SHARD_WEIGHTINVALID_CONFIG_PARAMETERUNKNOWN_STATUS_OF_INSERTVALUE_IS_OUT_OF_RANGE_OF_DATA_TYPEBARRIER_TIMEOUTUNKNOWN_DATABASE_ENGINEDDL_GUARD_IS_ACTIVEUNFINISHEDMETADATA_MISMATCHSUPPORT_IS_DISABLEDTABLE_DIFFERS_TOO_MUCHCANNOT_CONVERT_CHARSETCANNOT_LOAD_CONFIGCANNOT_INSERT_NULL_IN_ORDINARY_COLUMNINCOMPATIBLE_SOURCE_TABLESAMBIGUOUS_TABLE_NAMEAMBIGUOUS_COLUMN_NAMEINDEX_OF_POSITIONAL_ARGUMENT_IS_OUT_OF_RANGEZLIB_INFLATE_FAILEDZLIB_DEFLATE_FAILEDBAD_LAMBDARESERVED_IDENTIFIER_NAMEINTO_OUTFILE_NOT_ALLOWEDTABLE_SIZE_EXCEEDS_MAX_DROP_SIZE_LIMITCANNOT_CREATE_CHARSET_CONVERTERSEEK_POSITION_OUT_OF_BOUNDCURRENT_WRITE_BUFFER_IS_EXHAUSTEDCANNOT_CREATE_IO_BUFFERRECEIVED_ERROR_TOO_MANY_REQUESTSOUTPUT_IS_NOT_SORTEDSIZES_OF_NESTED_COLUMNS_ARE_INCONSISTENTTOO_MANY_FETCHESBAD_CASTALL_REPLICAS_ARE_STALEDATA_TYPE_CANNOT_BE_USED_IN_TABLESINCONSISTENT_CLUSTER_DEFINITIONSESSION_NOT_FOUNDSESSION_IS_LOCKEDINVALID_SESSION_TIMEOUTCANNOT_DLOPENCANNOT_PARSE_UUIDILLEGAL_SYNTAX_FOR_DATA_TYPEDATA_TYPE_CANNOT_HAVE_ARGUMENTSUNKNOWN_STATUS_OF_DISTRIBUTED_DDL_TASKCANNOT_KILLHTTP_LENGTH_REQUIREDCANNOT_LOAD_CATBOOST_MODELCANNOT_APPLY_CATBOOST_MODELPART_IS_TEMPORARILY_LOCKEDMULTIPLE_STREAMS_REQUIREDNO_COMMON_TYPEEXTERNAL_LOADABLE_ALREADY_EXISTSCANNOT_ASSIGN_OPTIMIZEINSERT_WAS_DEDUPLICATEDCANNOT_GET_CREATE_TABLE_QUERYEXTERNAL_LIBRARY_ERRORQUERY_IS_PROHIBITEDTHERE_IS_NO_QUERYQUERY_WAS_CANCELLEDFUNCTION_THROW_IF_VALUE_IS_NON_ZEROTOO_MANY_ROWS_OR_BYTESQUERY_IS_NOT_SUPPORTED_IN_MATERIALIZED_VIEWCANNOT_PARSE_DOMAIN_VALUE_FROM_STRINGAUTHENTICATION_FAILEDKEEPER_EXCEPTIONPOCO_EXCEPTIONSTD_EXCEPTIONUNKNOWN_EXCEPTIONCONDITIONAL_TREE_PARENT_NOT_FOUNDILLEGAL_PROJECTION_MANIPULATOR" +const _ErrorLowerName = "unsupported_methodunsupported_parameterunexpected_end_of_fileexpected_end_of_filecannot_parse_textincorrect_number_of_columnsthere_is_no_columnsizes_of_columns_doesnt_matchnot_found_column_in_blockposition_out_of_boundparameter_out_of_boundsizes_of_columns_in_tuple_doesnt_matchduplicate_columnno_such_column_in_tabledelimiter_in_string_literal_doesnt_matchcannot_insert_element_into_constant_columnsize_of_fixed_string_doesnt_matchnumber_of_columns_doesnt_matchcannot_read_all_data_from_tab_separated_inputcannot_parse_all_value_from_tab_separated_inputcannot_read_from_istreamcannot_write_to_ostreamcannot_parse_escape_sequencecannot_parse_quoted_stringcannot_parse_input_assertion_failedcannot_print_float_or_double_numbercannot_print_integercannot_read_size_of_compressed_chunkcannot_read_compressed_chunkattempt_to_read_after_eofcannot_read_all_datatoo_many_arguments_for_functiontoo_less_arguments_for_functionbad_argumentsunknown_element_in_astcannot_parse_datetoo_large_size_compressedchecksum_doesnt_matchcannot_parse_datetimenumber_of_arguments_doesnt_matchillegal_type_of_argumentillegal_columnillegal_number_of_result_columnsunknown_functionunknown_identifiernot_implementedlogical_errorunknown_typeempty_list_of_columns_queriedcolumn_queried_more_than_oncetype_mismatchstorage_doesnt_allow_parametersstorage_requires_parameterunknown_storagetable_already_existstable_metadata_already_existsillegal_type_of_column_for_filterunknown_tableonly_filter_column_in_blocksyntax_errorunknown_aggregate_functioncannot_read_aggregate_function_from_textcannot_write_aggregate_function_as_textnot_a_columnillegal_key_of_aggregationcannot_get_size_of_fieldargument_out_of_boundcannot_convert_typecannot_write_after_end_of_buffercannot_parse_numberunknown_formatcannot_read_from_file_descriptorcannot_write_to_file_descriptorcannot_open_filecannot_close_fileunknown_type_of_queryincorrect_file_nameincorrect_queryunknown_databasedatabase_already_existsdirectory_doesnt_existdirectory_already_existsformat_is_not_suitable_for_inputreceived_error_from_remote_io_servercannot_seek_through_filecannot_truncate_fileunknown_compression_methodempty_list_of_columns_passedsizes_of_marks_files_are_inconsistentempty_data_passedunknown_aggregated_data_variantcannot_merge_different_aggregated_data_variantscannot_read_from_socketcannot_write_to_socketcannot_read_all_data_from_chunked_inputcannot_write_to_empty_block_output_streamunknown_packet_from_clientunknown_packet_from_serverunexpected_packet_from_clientunexpected_packet_from_serverreceived_data_for_wrong_query_idtoo_small_buffer_sizecannot_read_historycannot_append_historyfile_doesnt_existno_data_to_insertcannot_block_signalcannot_unblock_signalcannot_manipulate_sigsetcannot_wait_for_signalthere_is_no_sessioncannot_clock_gettimeunknown_settingthere_is_no_default_valueincorrect_dataengine_requiredcannot_insert_value_of_different_size_into_tupleunknown_set_data_variantincompatible_columnsunknown_type_of_ast_nodeincorrect_element_of_setincorrect_result_of_scalar_subquerycannot_get_return_typeillegal_indextoo_large_array_sizefunction_is_specialcannot_read_array_from_texttoo_large_string_sizecannot_create_table_from_metadataaggregate_function_doesnt_allow_parametersparameters_to_aggregate_functions_must_be_literalszero_array_or_tuple_indexunknown_element_in_configexcessive_element_in_configno_elements_in_configall_requested_columns_are_missingsampling_not_supportednot_found_nodefound_more_than_one_nodefirst_date_is_bigger_than_last_dateunknown_overflow_modequery_section_doesnt_make_sensenot_found_function_element_for_aggregatenot_found_relation_element_for_conditionnot_found_rhs_element_for_conditionno_attributes_listedindex_of_column_in_sort_clause_is_out_of_rangeunknown_direction_of_sortingillegal_divisionaggregate_function_not_applicableunknown_relationdictionaries_was_not_loadedillegal_overflow_modetoo_many_rowstimeout_exceededtoo_slowtoo_many_columnstoo_deep_subqueriestoo_deep_pipelinereadonlytoo_many_temporary_columnstoo_many_temporary_non_const_columnstoo_deep_asttoo_big_astbad_type_of_fieldbad_getblocks_have_different_structurecannot_create_directorycannot_allocate_memorycyclic_aliaseschunk_not_foundduplicate_chunk_namemultiple_aliases_for_expressionmultiple_expressions_for_aliasthere_is_no_profileillegal_finalillegal_prewhereunexpected_expressionillegal_aggregationunsupported_myisam_block_typeunsupported_collation_localecollation_comparison_failedunknown_actiontable_must_not_be_created_manuallysizes_of_arrays_doesnt_matchset_size_limit_exceededunknown_userwrong_passwordrequired_passwordip_address_not_allowedunknown_address_pattern_typeserver_revision_is_too_olddns_errorunknown_quotaquota_doesnt_allow_keysquota_expiredtoo_many_simultaneous_queriesno_free_connectioncannot_fsyncnested_type_too_deepalias_requiredambiguous_identifierempty_nested_tablesocket_timeoutnetwork_errorempty_queryunknown_load_balancingunknown_totals_modecannot_statvfsnot_an_aggregatequery_with_same_id_is_already_runningclient_has_connected_to_wrong_porttable_is_droppeddatabase_not_emptyduplicate_interserver_io_endpointno_such_interserver_io_endpointadding_replica_to_non_empty_tableunexpected_ast_structurereplica_is_already_activeno_zookeeperno_file_in_data_partunexpected_file_in_data_partbad_size_of_file_in_data_partquery_is_too_largenot_found_expected_data_parttoo_many_unexpected_data_partsno_such_data_partbad_data_part_nameno_replica_has_partduplicate_data_partabortedno_replica_name_givenformat_version_too_oldcannot_munmapcannot_mremapmemory_limit_exceededtable_is_read_onlynot_enough_spaceunexpected_zookeeper_errorcorrupted_dataincorrect_markinvalid_partition_valuenot_enough_block_numbersno_such_replicatoo_many_partsreplica_is_already_existno_active_replicastoo_many_retries_to_fetch_partspartition_already_existspartition_doesnt_existunion_all_result_structures_mismatchclient_output_format_specifiedunknown_block_info_fieldbad_collationcannot_compile_codeincompatible_type_of_joinno_available_replicamismatch_replicas_data_sourcesstorage_doesnt_support_parallel_replicascpuid_errorinfinite_loopcannot_compresscannot_decompressaio_submit_erroraio_completion_erroraio_read_erroraio_write_errorindex_not_usedleadership_lostall_connection_tries_failedno_available_datadictionary_is_emptyincorrect_indexunknown_distributed_product_modeunknown_global_subqueries_methodtoo_less_live_replicasunsatisfied_quorum_for_previous_writeunknown_format_versiondistributed_in_join_subquery_deniedreplica_is_not_in_quorumlimit_exceededdatabase_access_deniedleadership_changedmongodb_cannot_authenticateinvalid_block_extra_inforeceived_empty_datano_remote_shard_foundshard_has_no_connectionscannot_pipecannot_forkcannot_dlsymcannot_create_child_processchild_was_not_exited_normallycannot_selectcannot_waitpidtable_was_not_droppedtoo_deep_recursiontoo_many_bytesunexpected_node_in_zookeeperfunction_cannot_have_parametersinvalid_shard_weightinvalid_config_parameterunknown_status_of_insertvalue_is_out_of_range_of_data_typebarrier_timeoutunknown_database_engineddl_guard_is_activeunfinishedmetadata_mismatchsupport_is_disabledtable_differs_too_muchcannot_convert_charsetcannot_load_configcannot_insert_null_in_ordinary_columnincompatible_source_tablesambiguous_table_nameambiguous_column_nameindex_of_positional_argument_is_out_of_rangezlib_inflate_failedzlib_deflate_failedbad_lambdareserved_identifier_nameinto_outfile_not_allowedtable_size_exceeds_max_drop_size_limitcannot_create_charset_converterseek_position_out_of_boundcurrent_write_buffer_is_exhaustedcannot_create_io_bufferreceived_error_too_many_requestsoutput_is_not_sortedsizes_of_nested_columns_are_inconsistenttoo_many_fetchesbad_castall_replicas_are_staledata_type_cannot_be_used_in_tablesinconsistent_cluster_definitionsession_not_foundsession_is_lockedinvalid_session_timeoutcannot_dlopencannot_parse_uuidillegal_syntax_for_data_typedata_type_cannot_have_argumentsunknown_status_of_distributed_ddl_taskcannot_killhttp_length_requiredcannot_load_catboost_modelcannot_apply_catboost_modelpart_is_temporarily_lockedmultiple_streams_requiredno_common_typeexternal_loadable_already_existscannot_assign_optimizeinsert_was_deduplicatedcannot_get_create_table_queryexternal_library_errorquery_is_prohibitedthere_is_no_queryquery_was_cancelledfunction_throw_if_value_is_non_zerotoo_many_rows_or_bytesquery_is_not_supported_in_materialized_viewcannot_parse_domain_value_from_stringauthentication_failedkeeper_exceptionpoco_exceptionstd_exceptionunknown_exceptionconditional_tree_parent_not_foundillegal_projection_manipulator" + +var _ErrorMap = map[Error]string{ + 1: _ErrorName[0:18], + 2: _ErrorName[18:39], + 3: _ErrorName[39:61], + 4: _ErrorName[61:81], + 6: _ErrorName[81:98], + 7: _ErrorName[98:125], + 8: _ErrorName[125:143], + 9: _ErrorName[143:172], + 10: _ErrorName[172:197], + 11: _ErrorName[197:218], + 12: _ErrorName[218:240], + 13: _ErrorName[240:278], + 15: _ErrorName[278:294], + 16: _ErrorName[294:317], + 17: _ErrorName[317:357], + 18: _ErrorName[357:399], + 19: _ErrorName[399:432], + 20: _ErrorName[432:462], + 21: _ErrorName[462:507], + 22: _ErrorName[507:554], + 23: _ErrorName[554:578], + 24: _ErrorName[578:601], + 25: _ErrorName[601:629], + 26: _ErrorName[629:655], + 27: _ErrorName[655:690], + 28: _ErrorName[690:725], + 29: _ErrorName[725:745], + 30: _ErrorName[745:781], + 31: _ErrorName[781:809], + 32: _ErrorName[809:834], + 33: _ErrorName[834:854], + 34: _ErrorName[854:885], + 35: _ErrorName[885:916], + 36: _ErrorName[916:929], + 37: _ErrorName[929:951], + 38: _ErrorName[951:968], + 39: _ErrorName[968:993], + 40: _ErrorName[993:1014], + 41: _ErrorName[1014:1035], + 42: _ErrorName[1035:1067], + 43: _ErrorName[1067:1091], + 44: _ErrorName[1091:1105], + 45: _ErrorName[1105:1137], + 46: _ErrorName[1137:1153], + 47: _ErrorName[1153:1171], + 48: _ErrorName[1171:1186], + 49: _ErrorName[1186:1199], + 50: _ErrorName[1199:1211], + 51: _ErrorName[1211:1240], + 52: _ErrorName[1240:1269], + 53: _ErrorName[1269:1282], + 54: _ErrorName[1282:1313], + 55: _ErrorName[1313:1339], + 56: _ErrorName[1339:1354], + 57: _ErrorName[1354:1374], + 58: _ErrorName[1374:1403], + 59: _ErrorName[1403:1436], + 60: _ErrorName[1436:1449], + 61: _ErrorName[1449:1476], + 62: _ErrorName[1476:1488], + 63: _ErrorName[1488:1514], + 64: _ErrorName[1514:1554], + 65: _ErrorName[1554:1593], + 66: _ErrorName[1593:1605], + 67: _ErrorName[1605:1631], + 68: _ErrorName[1631:1655], + 69: _ErrorName[1655:1676], + 70: _ErrorName[1676:1695], + 71: _ErrorName[1695:1727], + 72: _ErrorName[1727:1746], + 73: _ErrorName[1746:1760], + 74: _ErrorName[1760:1792], + 75: _ErrorName[1792:1823], + 76: _ErrorName[1823:1839], + 77: _ErrorName[1839:1856], + 78: _ErrorName[1856:1877], + 79: _ErrorName[1877:1896], + 80: _ErrorName[1896:1911], + 81: _ErrorName[1911:1927], + 82: _ErrorName[1927:1950], + 83: _ErrorName[1950:1972], + 84: _ErrorName[1972:1996], + 85: _ErrorName[1996:2028], + 86: _ErrorName[2028:2064], + 87: _ErrorName[2064:2088], + 88: _ErrorName[2088:2108], + 89: _ErrorName[2108:2134], + 90: _ErrorName[2134:2162], + 91: _ErrorName[2162:2199], + 92: _ErrorName[2199:2216], + 93: _ErrorName[2216:2247], + 94: _ErrorName[2247:2294], + 95: _ErrorName[2294:2317], + 96: _ErrorName[2317:2339], + 97: _ErrorName[2339:2378], + 98: _ErrorName[2378:2419], + 99: _ErrorName[2419:2445], + 100: _ErrorName[2445:2471], + 101: _ErrorName[2471:2500], + 102: _ErrorName[2500:2529], + 103: _ErrorName[2529:2561], + 104: _ErrorName[2561:2582], + 105: _ErrorName[2582:2601], + 106: _ErrorName[2601:2622], + 107: _ErrorName[2622:2639], + 108: _ErrorName[2639:2656], + 109: _ErrorName[2656:2675], + 110: _ErrorName[2675:2696], + 111: _ErrorName[2696:2720], + 112: _ErrorName[2720:2742], + 113: _ErrorName[2742:2761], + 114: _ErrorName[2761:2781], + 115: _ErrorName[2781:2796], + 116: _ErrorName[2796:2821], + 117: _ErrorName[2821:2835], + 119: _ErrorName[2835:2850], + 120: _ErrorName[2850:2898], + 121: _ErrorName[2898:2922], + 122: _ErrorName[2922:2942], + 123: _ErrorName[2942:2966], + 124: _ErrorName[2966:2990], + 125: _ErrorName[2990:3025], + 126: _ErrorName[3025:3047], + 127: _ErrorName[3047:3060], + 128: _ErrorName[3060:3080], + 129: _ErrorName[3080:3099], + 130: _ErrorName[3099:3126], + 131: _ErrorName[3126:3147], + 132: _ErrorName[3147:3180], + 133: _ErrorName[3180:3222], + 134: _ErrorName[3222:3272], + 135: _ErrorName[3272:3297], + 137: _ErrorName[3297:3322], + 138: _ErrorName[3322:3349], + 139: _ErrorName[3349:3370], + 140: _ErrorName[3370:3403], + 141: _ErrorName[3403:3425], + 142: _ErrorName[3425:3439], + 143: _ErrorName[3439:3463], + 144: _ErrorName[3463:3498], + 145: _ErrorName[3498:3519], + 146: _ErrorName[3519:3550], + 147: _ErrorName[3550:3590], + 148: _ErrorName[3590:3630], + 149: _ErrorName[3630:3665], + 150: _ErrorName[3665:3685], + 151: _ErrorName[3685:3731], + 152: _ErrorName[3731:3759], + 153: _ErrorName[3759:3775], + 154: _ErrorName[3775:3808], + 155: _ErrorName[3808:3824], + 156: _ErrorName[3824:3851], + 157: _ErrorName[3851:3872], + 158: _ErrorName[3872:3885], + 159: _ErrorName[3885:3901], + 160: _ErrorName[3901:3909], + 161: _ErrorName[3909:3925], + 162: _ErrorName[3925:3944], + 163: _ErrorName[3944:3961], + 164: _ErrorName[3961:3969], + 165: _ErrorName[3969:3995], + 166: _ErrorName[3995:4031], + 167: _ErrorName[4031:4043], + 168: _ErrorName[4043:4054], + 169: _ErrorName[4054:4071], + 170: _ErrorName[4071:4078], + 171: _ErrorName[4078:4109], + 172: _ErrorName[4109:4132], + 173: _ErrorName[4132:4154], + 174: _ErrorName[4154:4168], + 176: _ErrorName[4168:4183], + 177: _ErrorName[4183:4203], + 178: _ErrorName[4203:4234], + 179: _ErrorName[4234:4264], + 180: _ErrorName[4264:4283], + 181: _ErrorName[4283:4296], + 182: _ErrorName[4296:4312], + 183: _ErrorName[4312:4333], + 184: _ErrorName[4333:4352], + 185: _ErrorName[4352:4381], + 186: _ErrorName[4381:4409], + 187: _ErrorName[4409:4436], + 188: _ErrorName[4436:4450], + 189: _ErrorName[4450:4484], + 190: _ErrorName[4484:4512], + 191: _ErrorName[4512:4535], + 192: _ErrorName[4535:4547], + 193: _ErrorName[4547:4561], + 194: _ErrorName[4561:4578], + 195: _ErrorName[4578:4600], + 196: _ErrorName[4600:4628], + 197: _ErrorName[4628:4654], + 198: _ErrorName[4654:4663], + 199: _ErrorName[4663:4676], + 200: _ErrorName[4676:4699], + 201: _ErrorName[4699:4712], + 202: _ErrorName[4712:4741], + 203: _ErrorName[4741:4759], + 204: _ErrorName[4759:4771], + 205: _ErrorName[4771:4791], + 206: _ErrorName[4791:4805], + 207: _ErrorName[4805:4825], + 208: _ErrorName[4825:4843], + 209: _ErrorName[4843:4857], + 210: _ErrorName[4857:4870], + 211: _ErrorName[4870:4881], + 212: _ErrorName[4881:4903], + 213: _ErrorName[4903:4922], + 214: _ErrorName[4922:4936], + 215: _ErrorName[4936:4952], + 216: _ErrorName[4952:4989], + 217: _ErrorName[4989:5023], + 218: _ErrorName[5023:5039], + 219: _ErrorName[5039:5057], + 220: _ErrorName[5057:5090], + 221: _ErrorName[5090:5121], + 222: _ErrorName[5121:5154], + 223: _ErrorName[5154:5178], + 224: _ErrorName[5178:5203], + 225: _ErrorName[5203:5215], + 226: _ErrorName[5215:5235], + 227: _ErrorName[5235:5263], + 228: _ErrorName[5263:5292], + 229: _ErrorName[5292:5310], + 230: _ErrorName[5310:5338], + 231: _ErrorName[5338:5368], + 232: _ErrorName[5368:5385], + 233: _ErrorName[5385:5403], + 234: _ErrorName[5403:5422], + 235: _ErrorName[5422:5441], + 236: _ErrorName[5441:5448], + 237: _ErrorName[5448:5469], + 238: _ErrorName[5469:5491], + 239: _ErrorName[5491:5504], + 240: _ErrorName[5504:5517], + 241: _ErrorName[5517:5538], + 242: _ErrorName[5538:5556], + 243: _ErrorName[5556:5572], + 244: _ErrorName[5572:5598], + 246: _ErrorName[5598:5612], + 247: _ErrorName[5612:5626], + 248: _ErrorName[5626:5649], + 250: _ErrorName[5649:5673], + 251: _ErrorName[5673:5688], + 252: _ErrorName[5688:5702], + 253: _ErrorName[5702:5726], + 254: _ErrorName[5726:5744], + 255: _ErrorName[5744:5775], + 256: _ErrorName[5775:5799], + 257: _ErrorName[5799:5821], + 258: _ErrorName[5821:5857], + 260: _ErrorName[5857:5887], + 261: _ErrorName[5887:5911], + 262: _ErrorName[5911:5924], + 263: _ErrorName[5924:5943], + 264: _ErrorName[5943:5968], + 265: _ErrorName[5968:5988], + 266: _ErrorName[5988:6018], + 267: _ErrorName[6018:6058], + 268: _ErrorName[6058:6069], + 269: _ErrorName[6069:6082], + 270: _ErrorName[6082:6097], + 271: _ErrorName[6097:6114], + 272: _ErrorName[6114:6130], + 273: _ErrorName[6130:6150], + 274: _ErrorName[6150:6164], + 275: _ErrorName[6164:6179], + 277: _ErrorName[6179:6193], + 278: _ErrorName[6193:6208], + 279: _ErrorName[6208:6235], + 280: _ErrorName[6235:6252], + 281: _ErrorName[6252:6271], + 282: _ErrorName[6271:6286], + 283: _ErrorName[6286:6318], + 284: _ErrorName[6318:6350], + 285: _ErrorName[6350:6372], + 286: _ErrorName[6372:6409], + 287: _ErrorName[6409:6431], + 288: _ErrorName[6431:6466], + 289: _ErrorName[6466:6490], + 290: _ErrorName[6490:6504], + 291: _ErrorName[6504:6526], + 292: _ErrorName[6526:6544], + 293: _ErrorName[6544:6571], + 294: _ErrorName[6571:6595], + 295: _ErrorName[6595:6614], + 296: _ErrorName[6614:6635], + 297: _ErrorName[6635:6659], + 298: _ErrorName[6659:6670], + 299: _ErrorName[6670:6681], + 300: _ErrorName[6681:6693], + 301: _ErrorName[6693:6720], + 302: _ErrorName[6720:6749], + 303: _ErrorName[6749:6762], + 304: _ErrorName[6762:6776], + 305: _ErrorName[6776:6797], + 306: _ErrorName[6797:6815], + 307: _ErrorName[6815:6829], + 308: _ErrorName[6829:6857], + 309: _ErrorName[6857:6888], + 317: _ErrorName[6888:6908], + 318: _ErrorName[6908:6932], + 319: _ErrorName[6932:6956], + 321: _ErrorName[6956:6990], + 335: _ErrorName[6990:7005], + 336: _ErrorName[7005:7028], + 337: _ErrorName[7028:7047], + 341: _ErrorName[7047:7057], + 342: _ErrorName[7057:7074], + 344: _ErrorName[7074:7093], + 345: _ErrorName[7093:7115], + 346: _ErrorName[7115:7137], + 347: _ErrorName[7137:7155], + 349: _ErrorName[7155:7192], + 350: _ErrorName[7192:7218], + 351: _ErrorName[7218:7238], + 352: _ErrorName[7238:7259], + 353: _ErrorName[7259:7303], + 354: _ErrorName[7303:7322], + 355: _ErrorName[7322:7341], + 356: _ErrorName[7341:7351], + 357: _ErrorName[7351:7375], + 358: _ErrorName[7375:7399], + 359: _ErrorName[7399:7437], + 360: _ErrorName[7437:7468], + 361: _ErrorName[7468:7494], + 362: _ErrorName[7494:7527], + 363: _ErrorName[7527:7550], + 364: _ErrorName[7550:7582], + 365: _ErrorName[7582:7602], + 366: _ErrorName[7602:7642], + 367: _ErrorName[7642:7658], + 368: _ErrorName[7658:7666], + 369: _ErrorName[7666:7688], + 370: _ErrorName[7688:7722], + 371: _ErrorName[7722:7753], + 372: _ErrorName[7753:7770], + 373: _ErrorName[7770:7787], + 374: _ErrorName[7787:7810], + 375: _ErrorName[7810:7823], + 376: _ErrorName[7823:7840], + 377: _ErrorName[7840:7868], + 378: _ErrorName[7868:7899], + 379: _ErrorName[7899:7937], + 380: _ErrorName[7937:7948], + 381: _ErrorName[7948:7968], + 382: _ErrorName[7968:7994], + 383: _ErrorName[7994:8021], + 384: _ErrorName[8021:8047], + 385: _ErrorName[8047:8072], + 386: _ErrorName[8072:8086], + 387: _ErrorName[8086:8118], + 388: _ErrorName[8118:8140], + 389: _ErrorName[8140:8163], + 390: _ErrorName[8163:8192], + 391: _ErrorName[8192:8214], + 392: _ErrorName[8214:8233], + 393: _ErrorName[8233:8250], + 394: _ErrorName[8250:8269], + 395: _ErrorName[8269:8304], + 396: _ErrorName[8304:8326], + 397: _ErrorName[8326:8369], + 441: _ErrorName[8369:8406], + 516: _ErrorName[8406:8427], + 999: _ErrorName[8427:8443], + 1000: _ErrorName[8443:8457], + 1001: _ErrorName[8457:8470], + 1002: _ErrorName[8470:8487], + 2001: _ErrorName[8487:8520], + 2002: _ErrorName[8520:8550], +} + +func (i Error) String() string { + if str, ok := _ErrorMap[i]; ok { + return str + } + return fmt.Sprintf("Error(%d)", i) +} + +// An "invalid array index" compiler error signifies that the constant values have changed. +// Re-run the stringer command to generate them again. +func _ErrorNoOp() { + var x [1]struct{} + _ = x[ErrUnsupportedMethod-(1)] + _ = x[ErrUnsupportedParameter-(2)] + _ = x[ErrUnexpectedEndOfFile-(3)] + _ = x[ErrExpectedEndOfFile-(4)] + _ = x[ErrCannotParseText-(6)] + _ = x[ErrIncorrectNumberOfColumns-(7)] + _ = x[ErrThereIsNoColumn-(8)] + _ = x[ErrSizesOfColumnsDoesntMatch-(9)] + _ = x[ErrNotFoundColumnInBlock-(10)] + _ = x[ErrPositionOutOfBound-(11)] + _ = x[ErrParameterOutOfBound-(12)] + _ = x[ErrSizesOfColumnsInTupleDoesntMatch-(13)] + _ = x[ErrDuplicateColumn-(15)] + _ = x[ErrNoSuchColumnInTable-(16)] + _ = x[ErrDelimiterInStringLiteralDoesntMatch-(17)] + _ = x[ErrCannotInsertElementIntoConstantColumn-(18)] + _ = x[ErrSizeOfFixedStringDoesntMatch-(19)] + _ = x[ErrNumberOfColumnsDoesntMatch-(20)] + _ = x[ErrCannotReadAllDataFromTabSeparatedInput-(21)] + _ = x[ErrCannotParseAllValueFromTabSeparatedInput-(22)] + _ = x[ErrCannotReadFromIstream-(23)] + _ = x[ErrCannotWriteToOstream-(24)] + _ = x[ErrCannotParseEscapeSequence-(25)] + _ = x[ErrCannotParseQuotedString-(26)] + _ = x[ErrCannotParseInputAssertionFailed-(27)] + _ = x[ErrCannotPrintFloatOrDoubleNumber-(28)] + _ = x[ErrCannotPrintInteger-(29)] + _ = x[ErrCannotReadSizeOfCompressedChunk-(30)] + _ = x[ErrCannotReadCompressedChunk-(31)] + _ = x[ErrAttemptToReadAfterEOF-(32)] + _ = x[ErrCannotReadAllData-(33)] + _ = x[ErrTooManyArgumentsForFunction-(34)] + _ = x[ErrTooLessArgumentsForFunction-(35)] + _ = x[ErrBadArguments-(36)] + _ = x[ErrUnknownElementInAst-(37)] + _ = x[ErrCannotParseDate-(38)] + _ = x[ErrTooLargeSizeCompressed-(39)] + _ = x[ErrChecksumDoesntMatch-(40)] + _ = x[ErrCannotParseDatetime-(41)] + _ = x[ErrNumberOfArgumentsDoesntMatch-(42)] + _ = x[ErrIllegalTypeOfArgument-(43)] + _ = x[ErrIllegalColumn-(44)] + _ = x[ErrIllegalNumberOfResultColumns-(45)] + _ = x[ErrUnknownFunction-(46)] + _ = x[ErrUnknownIdentifier-(47)] + _ = x[ErrNotImplemented-(48)] + _ = x[ErrLogicalError-(49)] + _ = x[ErrUnknownType-(50)] + _ = x[ErrEmptyListOfColumnsQueried-(51)] + _ = x[ErrColumnQueriedMoreThanOnce-(52)] + _ = x[ErrTypeMismatch-(53)] + _ = x[ErrStorageDoesntAllowParameters-(54)] + _ = x[ErrStorageRequiresParameter-(55)] + _ = x[ErrUnknownStorage-(56)] + _ = x[ErrTableAlreadyExists-(57)] + _ = x[ErrTableMetadataAlreadyExists-(58)] + _ = x[ErrIllegalTypeOfColumnForFilter-(59)] + _ = x[ErrUnknownTable-(60)] + _ = x[ErrOnlyFilterColumnInBlock-(61)] + _ = x[ErrSyntaxError-(62)] + _ = x[ErrUnknownAggregateFunction-(63)] + _ = x[ErrCannotReadAggregateFunctionFromText-(64)] + _ = x[ErrCannotWriteAggregateFunctionAsText-(65)] + _ = x[ErrNotAColumn-(66)] + _ = x[ErrIllegalKeyOfAggregation-(67)] + _ = x[ErrCannotGetSizeOfField-(68)] + _ = x[ErrArgumentOutOfBound-(69)] + _ = x[ErrCannotConvertType-(70)] + _ = x[ErrCannotWriteAfterEndOfBuffer-(71)] + _ = x[ErrCannotParseNumber-(72)] + _ = x[ErrUnknownFormat-(73)] + _ = x[ErrCannotReadFromFileDescriptor-(74)] + _ = x[ErrCannotWriteToFileDescriptor-(75)] + _ = x[ErrCannotOpenFile-(76)] + _ = x[ErrCannotCloseFile-(77)] + _ = x[ErrUnknownTypeOfQuery-(78)] + _ = x[ErrIncorrectFileName-(79)] + _ = x[ErrIncorrectQuery-(80)] + _ = x[ErrUnknownDatabase-(81)] + _ = x[ErrDatabaseAlreadyExists-(82)] + _ = x[ErrDirectoryDoesntExist-(83)] + _ = x[ErrDirectoryAlreadyExists-(84)] + _ = x[ErrFormatIsNotSuitableForInput-(85)] + _ = x[ErrReceivedErrorFromRemoteIoServer-(86)] + _ = x[ErrCannotSeekThroughFile-(87)] + _ = x[ErrCannotTruncateFile-(88)] + _ = x[ErrUnknownCompressionMethod-(89)] + _ = x[ErrEmptyListOfColumnsPassed-(90)] + _ = x[ErrSizesOfMarksFilesAreInconsistent-(91)] + _ = x[ErrEmptyDataPassed-(92)] + _ = x[ErrUnknownAggregatedDataVariant-(93)] + _ = x[ErrCannotMergeDifferentAggregatedDataVariants-(94)] + _ = x[ErrCannotReadFromSocket-(95)] + _ = x[ErrCannotWriteToSocket-(96)] + _ = x[ErrCannotReadAllDataFromChunkedInput-(97)] + _ = x[ErrCannotWriteToEmptyBlockOutputStream-(98)] + _ = x[ErrUnknownPacketFromClient-(99)] + _ = x[ErrUnknownPacketFromServer-(100)] + _ = x[ErrUnexpectedPacketFromClient-(101)] + _ = x[ErrUnexpectedPacketFromServer-(102)] + _ = x[ErrReceivedDataForWrongQueryID-(103)] + _ = x[ErrTooSmallBufferSize-(104)] + _ = x[ErrCannotReadHistory-(105)] + _ = x[ErrCannotAppendHistory-(106)] + _ = x[ErrFileDoesntExist-(107)] + _ = x[ErrNoDataToInsert-(108)] + _ = x[ErrCannotBlockSignal-(109)] + _ = x[ErrCannotUnblockSignal-(110)] + _ = x[ErrCannotManipulateSigset-(111)] + _ = x[ErrCannotWaitForSignal-(112)] + _ = x[ErrThereIsNoSession-(113)] + _ = x[ErrCannotClockGettime-(114)] + _ = x[ErrUnknownSetting-(115)] + _ = x[ErrThereIsNoDefaultValue-(116)] + _ = x[ErrIncorrectData-(117)] + _ = x[ErrEngineRequired-(119)] + _ = x[ErrCannotInsertValueOfDifferentSizeIntoTuple-(120)] + _ = x[ErrUnknownSetDataVariant-(121)] + _ = x[ErrIncompatibleColumns-(122)] + _ = x[ErrUnknownTypeOfAstNode-(123)] + _ = x[ErrIncorrectElementOfSet-(124)] + _ = x[ErrIncorrectResultOfScalarSubquery-(125)] + _ = x[ErrCannotGetReturnType-(126)] + _ = x[ErrIllegalIndex-(127)] + _ = x[ErrTooLargeArraySize-(128)] + _ = x[ErrFunctionIsSpecial-(129)] + _ = x[ErrCannotReadArrayFromText-(130)] + _ = x[ErrTooLargeStringSize-(131)] + _ = x[ErrCannotCreateTableFromMetadata-(132)] + _ = x[ErrAggregateFunctionDoesntAllowParameters-(133)] + _ = x[ErrParametersToAggregateFunctionsMustBeLiterals-(134)] + _ = x[ErrZeroArrayOrTupleIndex-(135)] + _ = x[ErrUnknownElementInConfig-(137)] + _ = x[ErrExcessiveElementInConfig-(138)] + _ = x[ErrNoElementsInConfig-(139)] + _ = x[ErrAllRequestedColumnsAreMissing-(140)] + _ = x[ErrSamplingNotSupported-(141)] + _ = x[ErrNotFoundNode-(142)] + _ = x[ErrFoundMoreThanOneNode-(143)] + _ = x[ErrFirstDateIsBiggerThanLastDate-(144)] + _ = x[ErrUnknownOverflowMode-(145)] + _ = x[ErrQuerySectionDoesntMakeSense-(146)] + _ = x[ErrNotFoundFunctionElementForAggregate-(147)] + _ = x[ErrNotFoundRelationElementForCondition-(148)] + _ = x[ErrNotFoundRHSElementForCondition-(149)] + _ = x[ErrNoAttributesListed-(150)] + _ = x[ErrIndexOfColumnInSortClauseIsOutOfRange-(151)] + _ = x[ErrUnknownDirectionOfSorting-(152)] + _ = x[ErrIllegalDivision-(153)] + _ = x[ErrAggregateFunctionNotApplicable-(154)] + _ = x[ErrUnknownRelation-(155)] + _ = x[ErrDictionariesWasNotLoaded-(156)] + _ = x[ErrIllegalOverflowMode-(157)] + _ = x[ErrTooManyRows-(158)] + _ = x[ErrTimeoutExceeded-(159)] + _ = x[ErrTooSlow-(160)] + _ = x[ErrTooManyColumns-(161)] + _ = x[ErrTooDeepSubqueries-(162)] + _ = x[ErrTooDeepPipeline-(163)] + _ = x[ErrReadonly-(164)] + _ = x[ErrTooManyTemporaryColumns-(165)] + _ = x[ErrTooManyTemporaryNonConstColumns-(166)] + _ = x[ErrTooDeepAst-(167)] + _ = x[ErrTooBigAst-(168)] + _ = x[ErrBadTypeOfField-(169)] + _ = x[ErrBadGet-(170)] + _ = x[ErrBlocksHaveDifferentStructure-(171)] + _ = x[ErrCannotCreateDirectory-(172)] + _ = x[ErrCannotAllocateMemory-(173)] + _ = x[ErrCyclicAliases-(174)] + _ = x[ErrChunkNotFound-(176)] + _ = x[ErrDuplicateChunkName-(177)] + _ = x[ErrMultipleAliasesForExpression-(178)] + _ = x[ErrMultipleExpressionsForAlias-(179)] + _ = x[ErrThereIsNoProfile-(180)] + _ = x[ErrIllegalFinal-(181)] + _ = x[ErrIllegalPrewhere-(182)] + _ = x[ErrUnexpectedExpression-(183)] + _ = x[ErrIllegalAggregation-(184)] + _ = x[ErrUnsupportedMyisamBlockType-(185)] + _ = x[ErrUnsupportedCollationLocale-(186)] + _ = x[ErrCollationComparisonFailed-(187)] + _ = x[ErrUnknownAction-(188)] + _ = x[ErrTableMustNotBeCreatedManually-(189)] + _ = x[ErrSizesOfArraysDoesntMatch-(190)] + _ = x[ErrSetSizeLimitExceeded-(191)] + _ = x[ErrUnknownUser-(192)] + _ = x[ErrWrongPassword-(193)] + _ = x[ErrRequiredPassword-(194)] + _ = x[ErrIPAddressNotAllowed-(195)] + _ = x[ErrUnknownAddressPatternType-(196)] + _ = x[ErrServerRevisionIsTooOld-(197)] + _ = x[ErrDNSError-(198)] + _ = x[ErrUnknownQuota-(199)] + _ = x[ErrQuotaDoesntAllowKeys-(200)] + _ = x[ErrQuotaExpired-(201)] + _ = x[ErrTooManySimultaneousQueries-(202)] + _ = x[ErrNoFreeConnection-(203)] + _ = x[ErrCannotFsync-(204)] + _ = x[ErrNestedTypeTooDeep-(205)] + _ = x[ErrAliasRequired-(206)] + _ = x[ErrAmbiguousIdentifier-(207)] + _ = x[ErrEmptyNestedTable-(208)] + _ = x[ErrSocketTimeout-(209)] + _ = x[ErrNetworkError-(210)] + _ = x[ErrEmptyQuery-(211)] + _ = x[ErrUnknownLoadBalancing-(212)] + _ = x[ErrUnknownTotalsMode-(213)] + _ = x[ErrCannotStatvfs-(214)] + _ = x[ErrNotAnAggregate-(215)] + _ = x[ErrQueryWithSameIDIsAlreadyRunning-(216)] + _ = x[ErrClientHasConnectedToWrongPort-(217)] + _ = x[ErrTableIsDropped-(218)] + _ = x[ErrDatabaseNotEmpty-(219)] + _ = x[ErrDuplicateInterserverIoEndpoint-(220)] + _ = x[ErrNoSuchInterserverIoEndpoint-(221)] + _ = x[ErrAddingReplicaToNonEmptyTable-(222)] + _ = x[ErrUnexpectedAstStructure-(223)] + _ = x[ErrReplicaIsAlreadyActive-(224)] + _ = x[ErrNoZookeeper-(225)] + _ = x[ErrNoFileInDataPart-(226)] + _ = x[ErrUnexpectedFileInDataPart-(227)] + _ = x[ErrBadSizeOfFileInDataPart-(228)] + _ = x[ErrQueryIsTooLarge-(229)] + _ = x[ErrNotFoundExpectedDataPart-(230)] + _ = x[ErrTooManyUnexpectedDataParts-(231)] + _ = x[ErrNoSuchDataPart-(232)] + _ = x[ErrBadDataPartName-(233)] + _ = x[ErrNoReplicaHasPart-(234)] + _ = x[ErrDuplicateDataPart-(235)] + _ = x[ErrAborted-(236)] + _ = x[ErrNoReplicaNameGiven-(237)] + _ = x[ErrFormatVersionTooOld-(238)] + _ = x[ErrCannotMunmap-(239)] + _ = x[ErrCannotMremap-(240)] + _ = x[ErrMemoryLimitExceeded-(241)] + _ = x[ErrTableIsReadOnly-(242)] + _ = x[ErrNotEnoughSpace-(243)] + _ = x[ErrUnexpectedZookeeperError-(244)] + _ = x[ErrCorruptedData-(246)] + _ = x[ErrIncorrectMark-(247)] + _ = x[ErrInvalidPartitionValue-(248)] + _ = x[ErrNotEnoughBlockNumbers-(250)] + _ = x[ErrNoSuchReplica-(251)] + _ = x[ErrTooManyParts-(252)] + _ = x[ErrReplicaIsAlreadyExist-(253)] + _ = x[ErrNoActiveReplicas-(254)] + _ = x[ErrTooManyRetriesToFetchParts-(255)] + _ = x[ErrPartitionAlreadyExists-(256)] + _ = x[ErrPartitionDoesntExist-(257)] + _ = x[ErrUnionAllResultStructuresMismatch-(258)] + _ = x[ErrClientOutputFormatSpecified-(260)] + _ = x[ErrUnknownBlockInfoField-(261)] + _ = x[ErrBadCollation-(262)] + _ = x[ErrCannotCompileCode-(263)] + _ = x[ErrIncompatibleTypeOfJoin-(264)] + _ = x[ErrNoAvailableReplica-(265)] + _ = x[ErrMismatchReplicasDataSources-(266)] + _ = x[ErrStorageDoesntSupportParallelReplicas-(267)] + _ = x[ErrCPUIDError-(268)] + _ = x[ErrInfiniteLoop-(269)] + _ = x[ErrCannotCompress-(270)] + _ = x[ErrCannotDecompress-(271)] + _ = x[ErrAioSubmitError-(272)] + _ = x[ErrAioCompletionError-(273)] + _ = x[ErrAioReadError-(274)] + _ = x[ErrAioWriteError-(275)] + _ = x[ErrIndexNotUsed-(277)] + _ = x[ErrLeadershipLost-(278)] + _ = x[ErrAllConnectionTriesFailed-(279)] + _ = x[ErrNoAvailableData-(280)] + _ = x[ErrDictionaryIsEmpty-(281)] + _ = x[ErrIncorrectIndex-(282)] + _ = x[ErrUnknownDistributedProductMode-(283)] + _ = x[ErrUnknownGlobalSubqueriesMethod-(284)] + _ = x[ErrTooLessLiveReplicas-(285)] + _ = x[ErrUnsatisfiedQuorumForPreviousWrite-(286)] + _ = x[ErrUnknownFormatVersion-(287)] + _ = x[ErrDistributedInJoinSubqueryDenied-(288)] + _ = x[ErrReplicaIsNotInQuorum-(289)] + _ = x[ErrLimitExceeded-(290)] + _ = x[ErrDatabaseAccessDenied-(291)] + _ = x[ErrLeadershipChanged-(292)] + _ = x[ErrMongodbCannotAuthenticate-(293)] + _ = x[ErrInvalidBlockExtraInfo-(294)] + _ = x[ErrReceivedEmptyData-(295)] + _ = x[ErrNoRemoteShardFound-(296)] + _ = x[ErrShardHasNoConnections-(297)] + _ = x[ErrCannotPipe-(298)] + _ = x[ErrCannotFork-(299)] + _ = x[ErrCannotDlsym-(300)] + _ = x[ErrCannotCreateChildProcess-(301)] + _ = x[ErrChildWasNotExitedNormally-(302)] + _ = x[ErrCannotSelect-(303)] + _ = x[ErrCannotWaitpid-(304)] + _ = x[ErrTableWasNotDropped-(305)] + _ = x[ErrTooDeepRecursion-(306)] + _ = x[ErrTooManyBytes-(307)] + _ = x[ErrUnexpectedNodeInZookeeper-(308)] + _ = x[ErrFunctionCannotHaveParameters-(309)] + _ = x[ErrInvalidShardWeight-(317)] + _ = x[ErrInvalidConfigParameter-(318)] + _ = x[ErrUnknownStatusOfInsert-(319)] + _ = x[ErrValueIsOutOfRangeOfDataType-(321)] + _ = x[ErrBarrierTimeout-(335)] + _ = x[ErrUnknownDatabaseEngine-(336)] + _ = x[ErrDdlGuardIsActive-(337)] + _ = x[ErrUnfinished-(341)] + _ = x[ErrMetadataMismatch-(342)] + _ = x[ErrSupportIsDisabled-(344)] + _ = x[ErrTableDiffersTooMuch-(345)] + _ = x[ErrCannotConvertCharset-(346)] + _ = x[ErrCannotLoadConfig-(347)] + _ = x[ErrCannotInsertNullInOrdinaryColumn-(349)] + _ = x[ErrIncompatibleSourceTables-(350)] + _ = x[ErrAmbiguousTableName-(351)] + _ = x[ErrAmbiguousColumnName-(352)] + _ = x[ErrIndexOfPositionalArgumentIsOutOfRange-(353)] + _ = x[ErrZlibInflateFailed-(354)] + _ = x[ErrZlibDeflateFailed-(355)] + _ = x[ErrBadLambda-(356)] + _ = x[ErrReservedIdentifierName-(357)] + _ = x[ErrIntoOutfileNotAllowed-(358)] + _ = x[ErrTableSizeExceedsMaxDropSizeLimit-(359)] + _ = x[ErrCannotCreateCharsetConverter-(360)] + _ = x[ErrSeekPositionOutOfBound-(361)] + _ = x[ErrCurrentWriteBufferIsExhausted-(362)] + _ = x[ErrCannotCreateIoBuffer-(363)] + _ = x[ErrReceivedErrorTooManyRequests-(364)] + _ = x[ErrOutputIsNotSorted-(365)] + _ = x[ErrSizesOfNestedColumnsAreInconsistent-(366)] + _ = x[ErrTooManyFetches-(367)] + _ = x[ErrBadCast-(368)] + _ = x[ErrAllReplicasAreStale-(369)] + _ = x[ErrDataTypeCannotBeUsedInTables-(370)] + _ = x[ErrInconsistentClusterDefinition-(371)] + _ = x[ErrSessionNotFound-(372)] + _ = x[ErrSessionIsLocked-(373)] + _ = x[ErrInvalidSessionTimeout-(374)] + _ = x[ErrCannotDlopen-(375)] + _ = x[ErrCannotParseUUID-(376)] + _ = x[ErrIllegalSyntaxForDataType-(377)] + _ = x[ErrDataTypeCannotHaveArguments-(378)] + _ = x[ErrUnknownStatusOfDistributedDdlTask-(379)] + _ = x[ErrCannotKill-(380)] + _ = x[ErrHTTPLengthRequired-(381)] + _ = x[ErrCannotLoadCatboostModel-(382)] + _ = x[ErrCannotApplyCatboostModel-(383)] + _ = x[ErrPartIsTemporarilyLocked-(384)] + _ = x[ErrMultipleStreamsRequired-(385)] + _ = x[ErrNoCommonType-(386)] + _ = x[ErrExternalLoadableAlreadyExists-(387)] + _ = x[ErrCannotAssignOptimize-(388)] + _ = x[ErrInsertWasDeduplicated-(389)] + _ = x[ErrCannotGetCreateTableQuery-(390)] + _ = x[ErrExternalLibraryError-(391)] + _ = x[ErrQueryIsProhibited-(392)] + _ = x[ErrThereIsNoQuery-(393)] + _ = x[ErrQueryWasCancelled-(394)] + _ = x[ErrFunctionThrowIfValueIsNonZero-(395)] + _ = x[ErrTooManyRowsOrBytes-(396)] + _ = x[ErrQueryIsNotSupportedInMaterializedView-(397)] + _ = x[ErrCannotParseDomainValueFromString-(441)] + _ = x[ErrAuthenticationFailed-(516)] + _ = x[ErrKeeperException-(999)] + _ = x[ErrPocoException-(1000)] + _ = x[ErrStdException-(1001)] + _ = x[ErrUnknownException-(1002)] + _ = x[ErrConditionalTreeParentNotFound-(2001)] + _ = x[ErrIllegalProjectionManipulator-(2002)] +} + +var _ErrorValues = []Error{ErrUnsupportedMethod, ErrUnsupportedParameter, ErrUnexpectedEndOfFile, ErrExpectedEndOfFile, ErrCannotParseText, ErrIncorrectNumberOfColumns, ErrThereIsNoColumn, ErrSizesOfColumnsDoesntMatch, ErrNotFoundColumnInBlock, ErrPositionOutOfBound, ErrParameterOutOfBound, ErrSizesOfColumnsInTupleDoesntMatch, ErrDuplicateColumn, ErrNoSuchColumnInTable, ErrDelimiterInStringLiteralDoesntMatch, ErrCannotInsertElementIntoConstantColumn, ErrSizeOfFixedStringDoesntMatch, ErrNumberOfColumnsDoesntMatch, ErrCannotReadAllDataFromTabSeparatedInput, ErrCannotParseAllValueFromTabSeparatedInput, ErrCannotReadFromIstream, ErrCannotWriteToOstream, ErrCannotParseEscapeSequence, ErrCannotParseQuotedString, ErrCannotParseInputAssertionFailed, ErrCannotPrintFloatOrDoubleNumber, ErrCannotPrintInteger, ErrCannotReadSizeOfCompressedChunk, ErrCannotReadCompressedChunk, ErrAttemptToReadAfterEOF, ErrCannotReadAllData, ErrTooManyArgumentsForFunction, ErrTooLessArgumentsForFunction, ErrBadArguments, ErrUnknownElementInAst, ErrCannotParseDate, ErrTooLargeSizeCompressed, ErrChecksumDoesntMatch, ErrCannotParseDatetime, ErrNumberOfArgumentsDoesntMatch, ErrIllegalTypeOfArgument, ErrIllegalColumn, ErrIllegalNumberOfResultColumns, ErrUnknownFunction, ErrUnknownIdentifier, ErrNotImplemented, ErrLogicalError, ErrUnknownType, ErrEmptyListOfColumnsQueried, ErrColumnQueriedMoreThanOnce, ErrTypeMismatch, ErrStorageDoesntAllowParameters, ErrStorageRequiresParameter, ErrUnknownStorage, ErrTableAlreadyExists, ErrTableMetadataAlreadyExists, ErrIllegalTypeOfColumnForFilter, ErrUnknownTable, ErrOnlyFilterColumnInBlock, ErrSyntaxError, ErrUnknownAggregateFunction, ErrCannotReadAggregateFunctionFromText, ErrCannotWriteAggregateFunctionAsText, ErrNotAColumn, ErrIllegalKeyOfAggregation, ErrCannotGetSizeOfField, ErrArgumentOutOfBound, ErrCannotConvertType, ErrCannotWriteAfterEndOfBuffer, ErrCannotParseNumber, ErrUnknownFormat, ErrCannotReadFromFileDescriptor, ErrCannotWriteToFileDescriptor, ErrCannotOpenFile, ErrCannotCloseFile, ErrUnknownTypeOfQuery, ErrIncorrectFileName, ErrIncorrectQuery, ErrUnknownDatabase, ErrDatabaseAlreadyExists, ErrDirectoryDoesntExist, ErrDirectoryAlreadyExists, ErrFormatIsNotSuitableForInput, ErrReceivedErrorFromRemoteIoServer, ErrCannotSeekThroughFile, ErrCannotTruncateFile, ErrUnknownCompressionMethod, ErrEmptyListOfColumnsPassed, ErrSizesOfMarksFilesAreInconsistent, ErrEmptyDataPassed, ErrUnknownAggregatedDataVariant, ErrCannotMergeDifferentAggregatedDataVariants, ErrCannotReadFromSocket, ErrCannotWriteToSocket, ErrCannotReadAllDataFromChunkedInput, ErrCannotWriteToEmptyBlockOutputStream, ErrUnknownPacketFromClient, ErrUnknownPacketFromServer, ErrUnexpectedPacketFromClient, ErrUnexpectedPacketFromServer, ErrReceivedDataForWrongQueryID, ErrTooSmallBufferSize, ErrCannotReadHistory, ErrCannotAppendHistory, ErrFileDoesntExist, ErrNoDataToInsert, ErrCannotBlockSignal, ErrCannotUnblockSignal, ErrCannotManipulateSigset, ErrCannotWaitForSignal, ErrThereIsNoSession, ErrCannotClockGettime, ErrUnknownSetting, ErrThereIsNoDefaultValue, ErrIncorrectData, ErrEngineRequired, ErrCannotInsertValueOfDifferentSizeIntoTuple, ErrUnknownSetDataVariant, ErrIncompatibleColumns, ErrUnknownTypeOfAstNode, ErrIncorrectElementOfSet, ErrIncorrectResultOfScalarSubquery, ErrCannotGetReturnType, ErrIllegalIndex, ErrTooLargeArraySize, ErrFunctionIsSpecial, ErrCannotReadArrayFromText, ErrTooLargeStringSize, ErrCannotCreateTableFromMetadata, ErrAggregateFunctionDoesntAllowParameters, ErrParametersToAggregateFunctionsMustBeLiterals, ErrZeroArrayOrTupleIndex, ErrUnknownElementInConfig, ErrExcessiveElementInConfig, ErrNoElementsInConfig, ErrAllRequestedColumnsAreMissing, ErrSamplingNotSupported, ErrNotFoundNode, ErrFoundMoreThanOneNode, ErrFirstDateIsBiggerThanLastDate, ErrUnknownOverflowMode, ErrQuerySectionDoesntMakeSense, ErrNotFoundFunctionElementForAggregate, ErrNotFoundRelationElementForCondition, ErrNotFoundRHSElementForCondition, ErrNoAttributesListed, ErrIndexOfColumnInSortClauseIsOutOfRange, ErrUnknownDirectionOfSorting, ErrIllegalDivision, ErrAggregateFunctionNotApplicable, ErrUnknownRelation, ErrDictionariesWasNotLoaded, ErrIllegalOverflowMode, ErrTooManyRows, ErrTimeoutExceeded, ErrTooSlow, ErrTooManyColumns, ErrTooDeepSubqueries, ErrTooDeepPipeline, ErrReadonly, ErrTooManyTemporaryColumns, ErrTooManyTemporaryNonConstColumns, ErrTooDeepAst, ErrTooBigAst, ErrBadTypeOfField, ErrBadGet, ErrBlocksHaveDifferentStructure, ErrCannotCreateDirectory, ErrCannotAllocateMemory, ErrCyclicAliases, ErrChunkNotFound, ErrDuplicateChunkName, ErrMultipleAliasesForExpression, ErrMultipleExpressionsForAlias, ErrThereIsNoProfile, ErrIllegalFinal, ErrIllegalPrewhere, ErrUnexpectedExpression, ErrIllegalAggregation, ErrUnsupportedMyisamBlockType, ErrUnsupportedCollationLocale, ErrCollationComparisonFailed, ErrUnknownAction, ErrTableMustNotBeCreatedManually, ErrSizesOfArraysDoesntMatch, ErrSetSizeLimitExceeded, ErrUnknownUser, ErrWrongPassword, ErrRequiredPassword, ErrIPAddressNotAllowed, ErrUnknownAddressPatternType, ErrServerRevisionIsTooOld, ErrDNSError, ErrUnknownQuota, ErrQuotaDoesntAllowKeys, ErrQuotaExpired, ErrTooManySimultaneousQueries, ErrNoFreeConnection, ErrCannotFsync, ErrNestedTypeTooDeep, ErrAliasRequired, ErrAmbiguousIdentifier, ErrEmptyNestedTable, ErrSocketTimeout, ErrNetworkError, ErrEmptyQuery, ErrUnknownLoadBalancing, ErrUnknownTotalsMode, ErrCannotStatvfs, ErrNotAnAggregate, ErrQueryWithSameIDIsAlreadyRunning, ErrClientHasConnectedToWrongPort, ErrTableIsDropped, ErrDatabaseNotEmpty, ErrDuplicateInterserverIoEndpoint, ErrNoSuchInterserverIoEndpoint, ErrAddingReplicaToNonEmptyTable, ErrUnexpectedAstStructure, ErrReplicaIsAlreadyActive, ErrNoZookeeper, ErrNoFileInDataPart, ErrUnexpectedFileInDataPart, ErrBadSizeOfFileInDataPart, ErrQueryIsTooLarge, ErrNotFoundExpectedDataPart, ErrTooManyUnexpectedDataParts, ErrNoSuchDataPart, ErrBadDataPartName, ErrNoReplicaHasPart, ErrDuplicateDataPart, ErrAborted, ErrNoReplicaNameGiven, ErrFormatVersionTooOld, ErrCannotMunmap, ErrCannotMremap, ErrMemoryLimitExceeded, ErrTableIsReadOnly, ErrNotEnoughSpace, ErrUnexpectedZookeeperError, ErrCorruptedData, ErrIncorrectMark, ErrInvalidPartitionValue, ErrNotEnoughBlockNumbers, ErrNoSuchReplica, ErrTooManyParts, ErrReplicaIsAlreadyExist, ErrNoActiveReplicas, ErrTooManyRetriesToFetchParts, ErrPartitionAlreadyExists, ErrPartitionDoesntExist, ErrUnionAllResultStructuresMismatch, ErrClientOutputFormatSpecified, ErrUnknownBlockInfoField, ErrBadCollation, ErrCannotCompileCode, ErrIncompatibleTypeOfJoin, ErrNoAvailableReplica, ErrMismatchReplicasDataSources, ErrStorageDoesntSupportParallelReplicas, ErrCPUIDError, ErrInfiniteLoop, ErrCannotCompress, ErrCannotDecompress, ErrAioSubmitError, ErrAioCompletionError, ErrAioReadError, ErrAioWriteError, ErrIndexNotUsed, ErrLeadershipLost, ErrAllConnectionTriesFailed, ErrNoAvailableData, ErrDictionaryIsEmpty, ErrIncorrectIndex, ErrUnknownDistributedProductMode, ErrUnknownGlobalSubqueriesMethod, ErrTooLessLiveReplicas, ErrUnsatisfiedQuorumForPreviousWrite, ErrUnknownFormatVersion, ErrDistributedInJoinSubqueryDenied, ErrReplicaIsNotInQuorum, ErrLimitExceeded, ErrDatabaseAccessDenied, ErrLeadershipChanged, ErrMongodbCannotAuthenticate, ErrInvalidBlockExtraInfo, ErrReceivedEmptyData, ErrNoRemoteShardFound, ErrShardHasNoConnections, ErrCannotPipe, ErrCannotFork, ErrCannotDlsym, ErrCannotCreateChildProcess, ErrChildWasNotExitedNormally, ErrCannotSelect, ErrCannotWaitpid, ErrTableWasNotDropped, ErrTooDeepRecursion, ErrTooManyBytes, ErrUnexpectedNodeInZookeeper, ErrFunctionCannotHaveParameters, ErrInvalidShardWeight, ErrInvalidConfigParameter, ErrUnknownStatusOfInsert, ErrValueIsOutOfRangeOfDataType, ErrBarrierTimeout, ErrUnknownDatabaseEngine, ErrDdlGuardIsActive, ErrUnfinished, ErrMetadataMismatch, ErrSupportIsDisabled, ErrTableDiffersTooMuch, ErrCannotConvertCharset, ErrCannotLoadConfig, ErrCannotInsertNullInOrdinaryColumn, ErrIncompatibleSourceTables, ErrAmbiguousTableName, ErrAmbiguousColumnName, ErrIndexOfPositionalArgumentIsOutOfRange, ErrZlibInflateFailed, ErrZlibDeflateFailed, ErrBadLambda, ErrReservedIdentifierName, ErrIntoOutfileNotAllowed, ErrTableSizeExceedsMaxDropSizeLimit, ErrCannotCreateCharsetConverter, ErrSeekPositionOutOfBound, ErrCurrentWriteBufferIsExhausted, ErrCannotCreateIoBuffer, ErrReceivedErrorTooManyRequests, ErrOutputIsNotSorted, ErrSizesOfNestedColumnsAreInconsistent, ErrTooManyFetches, ErrBadCast, ErrAllReplicasAreStale, ErrDataTypeCannotBeUsedInTables, ErrInconsistentClusterDefinition, ErrSessionNotFound, ErrSessionIsLocked, ErrInvalidSessionTimeout, ErrCannotDlopen, ErrCannotParseUUID, ErrIllegalSyntaxForDataType, ErrDataTypeCannotHaveArguments, ErrUnknownStatusOfDistributedDdlTask, ErrCannotKill, ErrHTTPLengthRequired, ErrCannotLoadCatboostModel, ErrCannotApplyCatboostModel, ErrPartIsTemporarilyLocked, ErrMultipleStreamsRequired, ErrNoCommonType, ErrExternalLoadableAlreadyExists, ErrCannotAssignOptimize, ErrInsertWasDeduplicated, ErrCannotGetCreateTableQuery, ErrExternalLibraryError, ErrQueryIsProhibited, ErrThereIsNoQuery, ErrQueryWasCancelled, ErrFunctionThrowIfValueIsNonZero, ErrTooManyRowsOrBytes, ErrQueryIsNotSupportedInMaterializedView, ErrCannotParseDomainValueFromString, ErrAuthenticationFailed, ErrKeeperException, ErrPocoException, ErrStdException, ErrUnknownException, ErrConditionalTreeParentNotFound, ErrIllegalProjectionManipulator} + +var _ErrorNameToValueMap = map[string]Error{ + _ErrorName[0:18]: ErrUnsupportedMethod, + _ErrorLowerName[0:18]: ErrUnsupportedMethod, + _ErrorName[18:39]: ErrUnsupportedParameter, + _ErrorLowerName[18:39]: ErrUnsupportedParameter, + _ErrorName[39:61]: ErrUnexpectedEndOfFile, + _ErrorLowerName[39:61]: ErrUnexpectedEndOfFile, + _ErrorName[61:81]: ErrExpectedEndOfFile, + _ErrorLowerName[61:81]: ErrExpectedEndOfFile, + _ErrorName[81:98]: ErrCannotParseText, + _ErrorLowerName[81:98]: ErrCannotParseText, + _ErrorName[98:125]: ErrIncorrectNumberOfColumns, + _ErrorLowerName[98:125]: ErrIncorrectNumberOfColumns, + _ErrorName[125:143]: ErrThereIsNoColumn, + _ErrorLowerName[125:143]: ErrThereIsNoColumn, + _ErrorName[143:172]: ErrSizesOfColumnsDoesntMatch, + _ErrorLowerName[143:172]: ErrSizesOfColumnsDoesntMatch, + _ErrorName[172:197]: ErrNotFoundColumnInBlock, + _ErrorLowerName[172:197]: ErrNotFoundColumnInBlock, + _ErrorName[197:218]: ErrPositionOutOfBound, + _ErrorLowerName[197:218]: ErrPositionOutOfBound, + _ErrorName[218:240]: ErrParameterOutOfBound, + _ErrorLowerName[218:240]: ErrParameterOutOfBound, + _ErrorName[240:278]: ErrSizesOfColumnsInTupleDoesntMatch, + _ErrorLowerName[240:278]: ErrSizesOfColumnsInTupleDoesntMatch, + _ErrorName[278:294]: ErrDuplicateColumn, + _ErrorLowerName[278:294]: ErrDuplicateColumn, + _ErrorName[294:317]: ErrNoSuchColumnInTable, + _ErrorLowerName[294:317]: ErrNoSuchColumnInTable, + _ErrorName[317:357]: ErrDelimiterInStringLiteralDoesntMatch, + _ErrorLowerName[317:357]: ErrDelimiterInStringLiteralDoesntMatch, + _ErrorName[357:399]: ErrCannotInsertElementIntoConstantColumn, + _ErrorLowerName[357:399]: ErrCannotInsertElementIntoConstantColumn, + _ErrorName[399:432]: ErrSizeOfFixedStringDoesntMatch, + _ErrorLowerName[399:432]: ErrSizeOfFixedStringDoesntMatch, + _ErrorName[432:462]: ErrNumberOfColumnsDoesntMatch, + _ErrorLowerName[432:462]: ErrNumberOfColumnsDoesntMatch, + _ErrorName[462:507]: ErrCannotReadAllDataFromTabSeparatedInput, + _ErrorLowerName[462:507]: ErrCannotReadAllDataFromTabSeparatedInput, + _ErrorName[507:554]: ErrCannotParseAllValueFromTabSeparatedInput, + _ErrorLowerName[507:554]: ErrCannotParseAllValueFromTabSeparatedInput, + _ErrorName[554:578]: ErrCannotReadFromIstream, + _ErrorLowerName[554:578]: ErrCannotReadFromIstream, + _ErrorName[578:601]: ErrCannotWriteToOstream, + _ErrorLowerName[578:601]: ErrCannotWriteToOstream, + _ErrorName[601:629]: ErrCannotParseEscapeSequence, + _ErrorLowerName[601:629]: ErrCannotParseEscapeSequence, + _ErrorName[629:655]: ErrCannotParseQuotedString, + _ErrorLowerName[629:655]: ErrCannotParseQuotedString, + _ErrorName[655:690]: ErrCannotParseInputAssertionFailed, + _ErrorLowerName[655:690]: ErrCannotParseInputAssertionFailed, + _ErrorName[690:725]: ErrCannotPrintFloatOrDoubleNumber, + _ErrorLowerName[690:725]: ErrCannotPrintFloatOrDoubleNumber, + _ErrorName[725:745]: ErrCannotPrintInteger, + _ErrorLowerName[725:745]: ErrCannotPrintInteger, + _ErrorName[745:781]: ErrCannotReadSizeOfCompressedChunk, + _ErrorLowerName[745:781]: ErrCannotReadSizeOfCompressedChunk, + _ErrorName[781:809]: ErrCannotReadCompressedChunk, + _ErrorLowerName[781:809]: ErrCannotReadCompressedChunk, + _ErrorName[809:834]: ErrAttemptToReadAfterEOF, + _ErrorLowerName[809:834]: ErrAttemptToReadAfterEOF, + _ErrorName[834:854]: ErrCannotReadAllData, + _ErrorLowerName[834:854]: ErrCannotReadAllData, + _ErrorName[854:885]: ErrTooManyArgumentsForFunction, + _ErrorLowerName[854:885]: ErrTooManyArgumentsForFunction, + _ErrorName[885:916]: ErrTooLessArgumentsForFunction, + _ErrorLowerName[885:916]: ErrTooLessArgumentsForFunction, + _ErrorName[916:929]: ErrBadArguments, + _ErrorLowerName[916:929]: ErrBadArguments, + _ErrorName[929:951]: ErrUnknownElementInAst, + _ErrorLowerName[929:951]: ErrUnknownElementInAst, + _ErrorName[951:968]: ErrCannotParseDate, + _ErrorLowerName[951:968]: ErrCannotParseDate, + _ErrorName[968:993]: ErrTooLargeSizeCompressed, + _ErrorLowerName[968:993]: ErrTooLargeSizeCompressed, + _ErrorName[993:1014]: ErrChecksumDoesntMatch, + _ErrorLowerName[993:1014]: ErrChecksumDoesntMatch, + _ErrorName[1014:1035]: ErrCannotParseDatetime, + _ErrorLowerName[1014:1035]: ErrCannotParseDatetime, + _ErrorName[1035:1067]: ErrNumberOfArgumentsDoesntMatch, + _ErrorLowerName[1035:1067]: ErrNumberOfArgumentsDoesntMatch, + _ErrorName[1067:1091]: ErrIllegalTypeOfArgument, + _ErrorLowerName[1067:1091]: ErrIllegalTypeOfArgument, + _ErrorName[1091:1105]: ErrIllegalColumn, + _ErrorLowerName[1091:1105]: ErrIllegalColumn, + _ErrorName[1105:1137]: ErrIllegalNumberOfResultColumns, + _ErrorLowerName[1105:1137]: ErrIllegalNumberOfResultColumns, + _ErrorName[1137:1153]: ErrUnknownFunction, + _ErrorLowerName[1137:1153]: ErrUnknownFunction, + _ErrorName[1153:1171]: ErrUnknownIdentifier, + _ErrorLowerName[1153:1171]: ErrUnknownIdentifier, + _ErrorName[1171:1186]: ErrNotImplemented, + _ErrorLowerName[1171:1186]: ErrNotImplemented, + _ErrorName[1186:1199]: ErrLogicalError, + _ErrorLowerName[1186:1199]: ErrLogicalError, + _ErrorName[1199:1211]: ErrUnknownType, + _ErrorLowerName[1199:1211]: ErrUnknownType, + _ErrorName[1211:1240]: ErrEmptyListOfColumnsQueried, + _ErrorLowerName[1211:1240]: ErrEmptyListOfColumnsQueried, + _ErrorName[1240:1269]: ErrColumnQueriedMoreThanOnce, + _ErrorLowerName[1240:1269]: ErrColumnQueriedMoreThanOnce, + _ErrorName[1269:1282]: ErrTypeMismatch, + _ErrorLowerName[1269:1282]: ErrTypeMismatch, + _ErrorName[1282:1313]: ErrStorageDoesntAllowParameters, + _ErrorLowerName[1282:1313]: ErrStorageDoesntAllowParameters, + _ErrorName[1313:1339]: ErrStorageRequiresParameter, + _ErrorLowerName[1313:1339]: ErrStorageRequiresParameter, + _ErrorName[1339:1354]: ErrUnknownStorage, + _ErrorLowerName[1339:1354]: ErrUnknownStorage, + _ErrorName[1354:1374]: ErrTableAlreadyExists, + _ErrorLowerName[1354:1374]: ErrTableAlreadyExists, + _ErrorName[1374:1403]: ErrTableMetadataAlreadyExists, + _ErrorLowerName[1374:1403]: ErrTableMetadataAlreadyExists, + _ErrorName[1403:1436]: ErrIllegalTypeOfColumnForFilter, + _ErrorLowerName[1403:1436]: ErrIllegalTypeOfColumnForFilter, + _ErrorName[1436:1449]: ErrUnknownTable, + _ErrorLowerName[1436:1449]: ErrUnknownTable, + _ErrorName[1449:1476]: ErrOnlyFilterColumnInBlock, + _ErrorLowerName[1449:1476]: ErrOnlyFilterColumnInBlock, + _ErrorName[1476:1488]: ErrSyntaxError, + _ErrorLowerName[1476:1488]: ErrSyntaxError, + _ErrorName[1488:1514]: ErrUnknownAggregateFunction, + _ErrorLowerName[1488:1514]: ErrUnknownAggregateFunction, + _ErrorName[1514:1554]: ErrCannotReadAggregateFunctionFromText, + _ErrorLowerName[1514:1554]: ErrCannotReadAggregateFunctionFromText, + _ErrorName[1554:1593]: ErrCannotWriteAggregateFunctionAsText, + _ErrorLowerName[1554:1593]: ErrCannotWriteAggregateFunctionAsText, + _ErrorName[1593:1605]: ErrNotAColumn, + _ErrorLowerName[1593:1605]: ErrNotAColumn, + _ErrorName[1605:1631]: ErrIllegalKeyOfAggregation, + _ErrorLowerName[1605:1631]: ErrIllegalKeyOfAggregation, + _ErrorName[1631:1655]: ErrCannotGetSizeOfField, + _ErrorLowerName[1631:1655]: ErrCannotGetSizeOfField, + _ErrorName[1655:1676]: ErrArgumentOutOfBound, + _ErrorLowerName[1655:1676]: ErrArgumentOutOfBound, + _ErrorName[1676:1695]: ErrCannotConvertType, + _ErrorLowerName[1676:1695]: ErrCannotConvertType, + _ErrorName[1695:1727]: ErrCannotWriteAfterEndOfBuffer, + _ErrorLowerName[1695:1727]: ErrCannotWriteAfterEndOfBuffer, + _ErrorName[1727:1746]: ErrCannotParseNumber, + _ErrorLowerName[1727:1746]: ErrCannotParseNumber, + _ErrorName[1746:1760]: ErrUnknownFormat, + _ErrorLowerName[1746:1760]: ErrUnknownFormat, + _ErrorName[1760:1792]: ErrCannotReadFromFileDescriptor, + _ErrorLowerName[1760:1792]: ErrCannotReadFromFileDescriptor, + _ErrorName[1792:1823]: ErrCannotWriteToFileDescriptor, + _ErrorLowerName[1792:1823]: ErrCannotWriteToFileDescriptor, + _ErrorName[1823:1839]: ErrCannotOpenFile, + _ErrorLowerName[1823:1839]: ErrCannotOpenFile, + _ErrorName[1839:1856]: ErrCannotCloseFile, + _ErrorLowerName[1839:1856]: ErrCannotCloseFile, + _ErrorName[1856:1877]: ErrUnknownTypeOfQuery, + _ErrorLowerName[1856:1877]: ErrUnknownTypeOfQuery, + _ErrorName[1877:1896]: ErrIncorrectFileName, + _ErrorLowerName[1877:1896]: ErrIncorrectFileName, + _ErrorName[1896:1911]: ErrIncorrectQuery, + _ErrorLowerName[1896:1911]: ErrIncorrectQuery, + _ErrorName[1911:1927]: ErrUnknownDatabase, + _ErrorLowerName[1911:1927]: ErrUnknownDatabase, + _ErrorName[1927:1950]: ErrDatabaseAlreadyExists, + _ErrorLowerName[1927:1950]: ErrDatabaseAlreadyExists, + _ErrorName[1950:1972]: ErrDirectoryDoesntExist, + _ErrorLowerName[1950:1972]: ErrDirectoryDoesntExist, + _ErrorName[1972:1996]: ErrDirectoryAlreadyExists, + _ErrorLowerName[1972:1996]: ErrDirectoryAlreadyExists, + _ErrorName[1996:2028]: ErrFormatIsNotSuitableForInput, + _ErrorLowerName[1996:2028]: ErrFormatIsNotSuitableForInput, + _ErrorName[2028:2064]: ErrReceivedErrorFromRemoteIoServer, + _ErrorLowerName[2028:2064]: ErrReceivedErrorFromRemoteIoServer, + _ErrorName[2064:2088]: ErrCannotSeekThroughFile, + _ErrorLowerName[2064:2088]: ErrCannotSeekThroughFile, + _ErrorName[2088:2108]: ErrCannotTruncateFile, + _ErrorLowerName[2088:2108]: ErrCannotTruncateFile, + _ErrorName[2108:2134]: ErrUnknownCompressionMethod, + _ErrorLowerName[2108:2134]: ErrUnknownCompressionMethod, + _ErrorName[2134:2162]: ErrEmptyListOfColumnsPassed, + _ErrorLowerName[2134:2162]: ErrEmptyListOfColumnsPassed, + _ErrorName[2162:2199]: ErrSizesOfMarksFilesAreInconsistent, + _ErrorLowerName[2162:2199]: ErrSizesOfMarksFilesAreInconsistent, + _ErrorName[2199:2216]: ErrEmptyDataPassed, + _ErrorLowerName[2199:2216]: ErrEmptyDataPassed, + _ErrorName[2216:2247]: ErrUnknownAggregatedDataVariant, + _ErrorLowerName[2216:2247]: ErrUnknownAggregatedDataVariant, + _ErrorName[2247:2294]: ErrCannotMergeDifferentAggregatedDataVariants, + _ErrorLowerName[2247:2294]: ErrCannotMergeDifferentAggregatedDataVariants, + _ErrorName[2294:2317]: ErrCannotReadFromSocket, + _ErrorLowerName[2294:2317]: ErrCannotReadFromSocket, + _ErrorName[2317:2339]: ErrCannotWriteToSocket, + _ErrorLowerName[2317:2339]: ErrCannotWriteToSocket, + _ErrorName[2339:2378]: ErrCannotReadAllDataFromChunkedInput, + _ErrorLowerName[2339:2378]: ErrCannotReadAllDataFromChunkedInput, + _ErrorName[2378:2419]: ErrCannotWriteToEmptyBlockOutputStream, + _ErrorLowerName[2378:2419]: ErrCannotWriteToEmptyBlockOutputStream, + _ErrorName[2419:2445]: ErrUnknownPacketFromClient, + _ErrorLowerName[2419:2445]: ErrUnknownPacketFromClient, + _ErrorName[2445:2471]: ErrUnknownPacketFromServer, + _ErrorLowerName[2445:2471]: ErrUnknownPacketFromServer, + _ErrorName[2471:2500]: ErrUnexpectedPacketFromClient, + _ErrorLowerName[2471:2500]: ErrUnexpectedPacketFromClient, + _ErrorName[2500:2529]: ErrUnexpectedPacketFromServer, + _ErrorLowerName[2500:2529]: ErrUnexpectedPacketFromServer, + _ErrorName[2529:2561]: ErrReceivedDataForWrongQueryID, + _ErrorLowerName[2529:2561]: ErrReceivedDataForWrongQueryID, + _ErrorName[2561:2582]: ErrTooSmallBufferSize, + _ErrorLowerName[2561:2582]: ErrTooSmallBufferSize, + _ErrorName[2582:2601]: ErrCannotReadHistory, + _ErrorLowerName[2582:2601]: ErrCannotReadHistory, + _ErrorName[2601:2622]: ErrCannotAppendHistory, + _ErrorLowerName[2601:2622]: ErrCannotAppendHistory, + _ErrorName[2622:2639]: ErrFileDoesntExist, + _ErrorLowerName[2622:2639]: ErrFileDoesntExist, + _ErrorName[2639:2656]: ErrNoDataToInsert, + _ErrorLowerName[2639:2656]: ErrNoDataToInsert, + _ErrorName[2656:2675]: ErrCannotBlockSignal, + _ErrorLowerName[2656:2675]: ErrCannotBlockSignal, + _ErrorName[2675:2696]: ErrCannotUnblockSignal, + _ErrorLowerName[2675:2696]: ErrCannotUnblockSignal, + _ErrorName[2696:2720]: ErrCannotManipulateSigset, + _ErrorLowerName[2696:2720]: ErrCannotManipulateSigset, + _ErrorName[2720:2742]: ErrCannotWaitForSignal, + _ErrorLowerName[2720:2742]: ErrCannotWaitForSignal, + _ErrorName[2742:2761]: ErrThereIsNoSession, + _ErrorLowerName[2742:2761]: ErrThereIsNoSession, + _ErrorName[2761:2781]: ErrCannotClockGettime, + _ErrorLowerName[2761:2781]: ErrCannotClockGettime, + _ErrorName[2781:2796]: ErrUnknownSetting, + _ErrorLowerName[2781:2796]: ErrUnknownSetting, + _ErrorName[2796:2821]: ErrThereIsNoDefaultValue, + _ErrorLowerName[2796:2821]: ErrThereIsNoDefaultValue, + _ErrorName[2821:2835]: ErrIncorrectData, + _ErrorLowerName[2821:2835]: ErrIncorrectData, + _ErrorName[2835:2850]: ErrEngineRequired, + _ErrorLowerName[2835:2850]: ErrEngineRequired, + _ErrorName[2850:2898]: ErrCannotInsertValueOfDifferentSizeIntoTuple, + _ErrorLowerName[2850:2898]: ErrCannotInsertValueOfDifferentSizeIntoTuple, + _ErrorName[2898:2922]: ErrUnknownSetDataVariant, + _ErrorLowerName[2898:2922]: ErrUnknownSetDataVariant, + _ErrorName[2922:2942]: ErrIncompatibleColumns, + _ErrorLowerName[2922:2942]: ErrIncompatibleColumns, + _ErrorName[2942:2966]: ErrUnknownTypeOfAstNode, + _ErrorLowerName[2942:2966]: ErrUnknownTypeOfAstNode, + _ErrorName[2966:2990]: ErrIncorrectElementOfSet, + _ErrorLowerName[2966:2990]: ErrIncorrectElementOfSet, + _ErrorName[2990:3025]: ErrIncorrectResultOfScalarSubquery, + _ErrorLowerName[2990:3025]: ErrIncorrectResultOfScalarSubquery, + _ErrorName[3025:3047]: ErrCannotGetReturnType, + _ErrorLowerName[3025:3047]: ErrCannotGetReturnType, + _ErrorName[3047:3060]: ErrIllegalIndex, + _ErrorLowerName[3047:3060]: ErrIllegalIndex, + _ErrorName[3060:3080]: ErrTooLargeArraySize, + _ErrorLowerName[3060:3080]: ErrTooLargeArraySize, + _ErrorName[3080:3099]: ErrFunctionIsSpecial, + _ErrorLowerName[3080:3099]: ErrFunctionIsSpecial, + _ErrorName[3099:3126]: ErrCannotReadArrayFromText, + _ErrorLowerName[3099:3126]: ErrCannotReadArrayFromText, + _ErrorName[3126:3147]: ErrTooLargeStringSize, + _ErrorLowerName[3126:3147]: ErrTooLargeStringSize, + _ErrorName[3147:3180]: ErrCannotCreateTableFromMetadata, + _ErrorLowerName[3147:3180]: ErrCannotCreateTableFromMetadata, + _ErrorName[3180:3222]: ErrAggregateFunctionDoesntAllowParameters, + _ErrorLowerName[3180:3222]: ErrAggregateFunctionDoesntAllowParameters, + _ErrorName[3222:3272]: ErrParametersToAggregateFunctionsMustBeLiterals, + _ErrorLowerName[3222:3272]: ErrParametersToAggregateFunctionsMustBeLiterals, + _ErrorName[3272:3297]: ErrZeroArrayOrTupleIndex, + _ErrorLowerName[3272:3297]: ErrZeroArrayOrTupleIndex, + _ErrorName[3297:3322]: ErrUnknownElementInConfig, + _ErrorLowerName[3297:3322]: ErrUnknownElementInConfig, + _ErrorName[3322:3349]: ErrExcessiveElementInConfig, + _ErrorLowerName[3322:3349]: ErrExcessiveElementInConfig, + _ErrorName[3349:3370]: ErrNoElementsInConfig, + _ErrorLowerName[3349:3370]: ErrNoElementsInConfig, + _ErrorName[3370:3403]: ErrAllRequestedColumnsAreMissing, + _ErrorLowerName[3370:3403]: ErrAllRequestedColumnsAreMissing, + _ErrorName[3403:3425]: ErrSamplingNotSupported, + _ErrorLowerName[3403:3425]: ErrSamplingNotSupported, + _ErrorName[3425:3439]: ErrNotFoundNode, + _ErrorLowerName[3425:3439]: ErrNotFoundNode, + _ErrorName[3439:3463]: ErrFoundMoreThanOneNode, + _ErrorLowerName[3439:3463]: ErrFoundMoreThanOneNode, + _ErrorName[3463:3498]: ErrFirstDateIsBiggerThanLastDate, + _ErrorLowerName[3463:3498]: ErrFirstDateIsBiggerThanLastDate, + _ErrorName[3498:3519]: ErrUnknownOverflowMode, + _ErrorLowerName[3498:3519]: ErrUnknownOverflowMode, + _ErrorName[3519:3550]: ErrQuerySectionDoesntMakeSense, + _ErrorLowerName[3519:3550]: ErrQuerySectionDoesntMakeSense, + _ErrorName[3550:3590]: ErrNotFoundFunctionElementForAggregate, + _ErrorLowerName[3550:3590]: ErrNotFoundFunctionElementForAggregate, + _ErrorName[3590:3630]: ErrNotFoundRelationElementForCondition, + _ErrorLowerName[3590:3630]: ErrNotFoundRelationElementForCondition, + _ErrorName[3630:3665]: ErrNotFoundRHSElementForCondition, + _ErrorLowerName[3630:3665]: ErrNotFoundRHSElementForCondition, + _ErrorName[3665:3685]: ErrNoAttributesListed, + _ErrorLowerName[3665:3685]: ErrNoAttributesListed, + _ErrorName[3685:3731]: ErrIndexOfColumnInSortClauseIsOutOfRange, + _ErrorLowerName[3685:3731]: ErrIndexOfColumnInSortClauseIsOutOfRange, + _ErrorName[3731:3759]: ErrUnknownDirectionOfSorting, + _ErrorLowerName[3731:3759]: ErrUnknownDirectionOfSorting, + _ErrorName[3759:3775]: ErrIllegalDivision, + _ErrorLowerName[3759:3775]: ErrIllegalDivision, + _ErrorName[3775:3808]: ErrAggregateFunctionNotApplicable, + _ErrorLowerName[3775:3808]: ErrAggregateFunctionNotApplicable, + _ErrorName[3808:3824]: ErrUnknownRelation, + _ErrorLowerName[3808:3824]: ErrUnknownRelation, + _ErrorName[3824:3851]: ErrDictionariesWasNotLoaded, + _ErrorLowerName[3824:3851]: ErrDictionariesWasNotLoaded, + _ErrorName[3851:3872]: ErrIllegalOverflowMode, + _ErrorLowerName[3851:3872]: ErrIllegalOverflowMode, + _ErrorName[3872:3885]: ErrTooManyRows, + _ErrorLowerName[3872:3885]: ErrTooManyRows, + _ErrorName[3885:3901]: ErrTimeoutExceeded, + _ErrorLowerName[3885:3901]: ErrTimeoutExceeded, + _ErrorName[3901:3909]: ErrTooSlow, + _ErrorLowerName[3901:3909]: ErrTooSlow, + _ErrorName[3909:3925]: ErrTooManyColumns, + _ErrorLowerName[3909:3925]: ErrTooManyColumns, + _ErrorName[3925:3944]: ErrTooDeepSubqueries, + _ErrorLowerName[3925:3944]: ErrTooDeepSubqueries, + _ErrorName[3944:3961]: ErrTooDeepPipeline, + _ErrorLowerName[3944:3961]: ErrTooDeepPipeline, + _ErrorName[3961:3969]: ErrReadonly, + _ErrorLowerName[3961:3969]: ErrReadonly, + _ErrorName[3969:3995]: ErrTooManyTemporaryColumns, + _ErrorLowerName[3969:3995]: ErrTooManyTemporaryColumns, + _ErrorName[3995:4031]: ErrTooManyTemporaryNonConstColumns, + _ErrorLowerName[3995:4031]: ErrTooManyTemporaryNonConstColumns, + _ErrorName[4031:4043]: ErrTooDeepAst, + _ErrorLowerName[4031:4043]: ErrTooDeepAst, + _ErrorName[4043:4054]: ErrTooBigAst, + _ErrorLowerName[4043:4054]: ErrTooBigAst, + _ErrorName[4054:4071]: ErrBadTypeOfField, + _ErrorLowerName[4054:4071]: ErrBadTypeOfField, + _ErrorName[4071:4078]: ErrBadGet, + _ErrorLowerName[4071:4078]: ErrBadGet, + _ErrorName[4078:4109]: ErrBlocksHaveDifferentStructure, + _ErrorLowerName[4078:4109]: ErrBlocksHaveDifferentStructure, + _ErrorName[4109:4132]: ErrCannotCreateDirectory, + _ErrorLowerName[4109:4132]: ErrCannotCreateDirectory, + _ErrorName[4132:4154]: ErrCannotAllocateMemory, + _ErrorLowerName[4132:4154]: ErrCannotAllocateMemory, + _ErrorName[4154:4168]: ErrCyclicAliases, + _ErrorLowerName[4154:4168]: ErrCyclicAliases, + _ErrorName[4168:4183]: ErrChunkNotFound, + _ErrorLowerName[4168:4183]: ErrChunkNotFound, + _ErrorName[4183:4203]: ErrDuplicateChunkName, + _ErrorLowerName[4183:4203]: ErrDuplicateChunkName, + _ErrorName[4203:4234]: ErrMultipleAliasesForExpression, + _ErrorLowerName[4203:4234]: ErrMultipleAliasesForExpression, + _ErrorName[4234:4264]: ErrMultipleExpressionsForAlias, + _ErrorLowerName[4234:4264]: ErrMultipleExpressionsForAlias, + _ErrorName[4264:4283]: ErrThereIsNoProfile, + _ErrorLowerName[4264:4283]: ErrThereIsNoProfile, + _ErrorName[4283:4296]: ErrIllegalFinal, + _ErrorLowerName[4283:4296]: ErrIllegalFinal, + _ErrorName[4296:4312]: ErrIllegalPrewhere, + _ErrorLowerName[4296:4312]: ErrIllegalPrewhere, + _ErrorName[4312:4333]: ErrUnexpectedExpression, + _ErrorLowerName[4312:4333]: ErrUnexpectedExpression, + _ErrorName[4333:4352]: ErrIllegalAggregation, + _ErrorLowerName[4333:4352]: ErrIllegalAggregation, + _ErrorName[4352:4381]: ErrUnsupportedMyisamBlockType, + _ErrorLowerName[4352:4381]: ErrUnsupportedMyisamBlockType, + _ErrorName[4381:4409]: ErrUnsupportedCollationLocale, + _ErrorLowerName[4381:4409]: ErrUnsupportedCollationLocale, + _ErrorName[4409:4436]: ErrCollationComparisonFailed, + _ErrorLowerName[4409:4436]: ErrCollationComparisonFailed, + _ErrorName[4436:4450]: ErrUnknownAction, + _ErrorLowerName[4436:4450]: ErrUnknownAction, + _ErrorName[4450:4484]: ErrTableMustNotBeCreatedManually, + _ErrorLowerName[4450:4484]: ErrTableMustNotBeCreatedManually, + _ErrorName[4484:4512]: ErrSizesOfArraysDoesntMatch, + _ErrorLowerName[4484:4512]: ErrSizesOfArraysDoesntMatch, + _ErrorName[4512:4535]: ErrSetSizeLimitExceeded, + _ErrorLowerName[4512:4535]: ErrSetSizeLimitExceeded, + _ErrorName[4535:4547]: ErrUnknownUser, + _ErrorLowerName[4535:4547]: ErrUnknownUser, + _ErrorName[4547:4561]: ErrWrongPassword, + _ErrorLowerName[4547:4561]: ErrWrongPassword, + _ErrorName[4561:4578]: ErrRequiredPassword, + _ErrorLowerName[4561:4578]: ErrRequiredPassword, + _ErrorName[4578:4600]: ErrIPAddressNotAllowed, + _ErrorLowerName[4578:4600]: ErrIPAddressNotAllowed, + _ErrorName[4600:4628]: ErrUnknownAddressPatternType, + _ErrorLowerName[4600:4628]: ErrUnknownAddressPatternType, + _ErrorName[4628:4654]: ErrServerRevisionIsTooOld, + _ErrorLowerName[4628:4654]: ErrServerRevisionIsTooOld, + _ErrorName[4654:4663]: ErrDNSError, + _ErrorLowerName[4654:4663]: ErrDNSError, + _ErrorName[4663:4676]: ErrUnknownQuota, + _ErrorLowerName[4663:4676]: ErrUnknownQuota, + _ErrorName[4676:4699]: ErrQuotaDoesntAllowKeys, + _ErrorLowerName[4676:4699]: ErrQuotaDoesntAllowKeys, + _ErrorName[4699:4712]: ErrQuotaExpired, + _ErrorLowerName[4699:4712]: ErrQuotaExpired, + _ErrorName[4712:4741]: ErrTooManySimultaneousQueries, + _ErrorLowerName[4712:4741]: ErrTooManySimultaneousQueries, + _ErrorName[4741:4759]: ErrNoFreeConnection, + _ErrorLowerName[4741:4759]: ErrNoFreeConnection, + _ErrorName[4759:4771]: ErrCannotFsync, + _ErrorLowerName[4759:4771]: ErrCannotFsync, + _ErrorName[4771:4791]: ErrNestedTypeTooDeep, + _ErrorLowerName[4771:4791]: ErrNestedTypeTooDeep, + _ErrorName[4791:4805]: ErrAliasRequired, + _ErrorLowerName[4791:4805]: ErrAliasRequired, + _ErrorName[4805:4825]: ErrAmbiguousIdentifier, + _ErrorLowerName[4805:4825]: ErrAmbiguousIdentifier, + _ErrorName[4825:4843]: ErrEmptyNestedTable, + _ErrorLowerName[4825:4843]: ErrEmptyNestedTable, + _ErrorName[4843:4857]: ErrSocketTimeout, + _ErrorLowerName[4843:4857]: ErrSocketTimeout, + _ErrorName[4857:4870]: ErrNetworkError, + _ErrorLowerName[4857:4870]: ErrNetworkError, + _ErrorName[4870:4881]: ErrEmptyQuery, + _ErrorLowerName[4870:4881]: ErrEmptyQuery, + _ErrorName[4881:4903]: ErrUnknownLoadBalancing, + _ErrorLowerName[4881:4903]: ErrUnknownLoadBalancing, + _ErrorName[4903:4922]: ErrUnknownTotalsMode, + _ErrorLowerName[4903:4922]: ErrUnknownTotalsMode, + _ErrorName[4922:4936]: ErrCannotStatvfs, + _ErrorLowerName[4922:4936]: ErrCannotStatvfs, + _ErrorName[4936:4952]: ErrNotAnAggregate, + _ErrorLowerName[4936:4952]: ErrNotAnAggregate, + _ErrorName[4952:4989]: ErrQueryWithSameIDIsAlreadyRunning, + _ErrorLowerName[4952:4989]: ErrQueryWithSameIDIsAlreadyRunning, + _ErrorName[4989:5023]: ErrClientHasConnectedToWrongPort, + _ErrorLowerName[4989:5023]: ErrClientHasConnectedToWrongPort, + _ErrorName[5023:5039]: ErrTableIsDropped, + _ErrorLowerName[5023:5039]: ErrTableIsDropped, + _ErrorName[5039:5057]: ErrDatabaseNotEmpty, + _ErrorLowerName[5039:5057]: ErrDatabaseNotEmpty, + _ErrorName[5057:5090]: ErrDuplicateInterserverIoEndpoint, + _ErrorLowerName[5057:5090]: ErrDuplicateInterserverIoEndpoint, + _ErrorName[5090:5121]: ErrNoSuchInterserverIoEndpoint, + _ErrorLowerName[5090:5121]: ErrNoSuchInterserverIoEndpoint, + _ErrorName[5121:5154]: ErrAddingReplicaToNonEmptyTable, + _ErrorLowerName[5121:5154]: ErrAddingReplicaToNonEmptyTable, + _ErrorName[5154:5178]: ErrUnexpectedAstStructure, + _ErrorLowerName[5154:5178]: ErrUnexpectedAstStructure, + _ErrorName[5178:5203]: ErrReplicaIsAlreadyActive, + _ErrorLowerName[5178:5203]: ErrReplicaIsAlreadyActive, + _ErrorName[5203:5215]: ErrNoZookeeper, + _ErrorLowerName[5203:5215]: ErrNoZookeeper, + _ErrorName[5215:5235]: ErrNoFileInDataPart, + _ErrorLowerName[5215:5235]: ErrNoFileInDataPart, + _ErrorName[5235:5263]: ErrUnexpectedFileInDataPart, + _ErrorLowerName[5235:5263]: ErrUnexpectedFileInDataPart, + _ErrorName[5263:5292]: ErrBadSizeOfFileInDataPart, + _ErrorLowerName[5263:5292]: ErrBadSizeOfFileInDataPart, + _ErrorName[5292:5310]: ErrQueryIsTooLarge, + _ErrorLowerName[5292:5310]: ErrQueryIsTooLarge, + _ErrorName[5310:5338]: ErrNotFoundExpectedDataPart, + _ErrorLowerName[5310:5338]: ErrNotFoundExpectedDataPart, + _ErrorName[5338:5368]: ErrTooManyUnexpectedDataParts, + _ErrorLowerName[5338:5368]: ErrTooManyUnexpectedDataParts, + _ErrorName[5368:5385]: ErrNoSuchDataPart, + _ErrorLowerName[5368:5385]: ErrNoSuchDataPart, + _ErrorName[5385:5403]: ErrBadDataPartName, + _ErrorLowerName[5385:5403]: ErrBadDataPartName, + _ErrorName[5403:5422]: ErrNoReplicaHasPart, + _ErrorLowerName[5403:5422]: ErrNoReplicaHasPart, + _ErrorName[5422:5441]: ErrDuplicateDataPart, + _ErrorLowerName[5422:5441]: ErrDuplicateDataPart, + _ErrorName[5441:5448]: ErrAborted, + _ErrorLowerName[5441:5448]: ErrAborted, + _ErrorName[5448:5469]: ErrNoReplicaNameGiven, + _ErrorLowerName[5448:5469]: ErrNoReplicaNameGiven, + _ErrorName[5469:5491]: ErrFormatVersionTooOld, + _ErrorLowerName[5469:5491]: ErrFormatVersionTooOld, + _ErrorName[5491:5504]: ErrCannotMunmap, + _ErrorLowerName[5491:5504]: ErrCannotMunmap, + _ErrorName[5504:5517]: ErrCannotMremap, + _ErrorLowerName[5504:5517]: ErrCannotMremap, + _ErrorName[5517:5538]: ErrMemoryLimitExceeded, + _ErrorLowerName[5517:5538]: ErrMemoryLimitExceeded, + _ErrorName[5538:5556]: ErrTableIsReadOnly, + _ErrorLowerName[5538:5556]: ErrTableIsReadOnly, + _ErrorName[5556:5572]: ErrNotEnoughSpace, + _ErrorLowerName[5556:5572]: ErrNotEnoughSpace, + _ErrorName[5572:5598]: ErrUnexpectedZookeeperError, + _ErrorLowerName[5572:5598]: ErrUnexpectedZookeeperError, + _ErrorName[5598:5612]: ErrCorruptedData, + _ErrorLowerName[5598:5612]: ErrCorruptedData, + _ErrorName[5612:5626]: ErrIncorrectMark, + _ErrorLowerName[5612:5626]: ErrIncorrectMark, + _ErrorName[5626:5649]: ErrInvalidPartitionValue, + _ErrorLowerName[5626:5649]: ErrInvalidPartitionValue, + _ErrorName[5649:5673]: ErrNotEnoughBlockNumbers, + _ErrorLowerName[5649:5673]: ErrNotEnoughBlockNumbers, + _ErrorName[5673:5688]: ErrNoSuchReplica, + _ErrorLowerName[5673:5688]: ErrNoSuchReplica, + _ErrorName[5688:5702]: ErrTooManyParts, + _ErrorLowerName[5688:5702]: ErrTooManyParts, + _ErrorName[5702:5726]: ErrReplicaIsAlreadyExist, + _ErrorLowerName[5702:5726]: ErrReplicaIsAlreadyExist, + _ErrorName[5726:5744]: ErrNoActiveReplicas, + _ErrorLowerName[5726:5744]: ErrNoActiveReplicas, + _ErrorName[5744:5775]: ErrTooManyRetriesToFetchParts, + _ErrorLowerName[5744:5775]: ErrTooManyRetriesToFetchParts, + _ErrorName[5775:5799]: ErrPartitionAlreadyExists, + _ErrorLowerName[5775:5799]: ErrPartitionAlreadyExists, + _ErrorName[5799:5821]: ErrPartitionDoesntExist, + _ErrorLowerName[5799:5821]: ErrPartitionDoesntExist, + _ErrorName[5821:5857]: ErrUnionAllResultStructuresMismatch, + _ErrorLowerName[5821:5857]: ErrUnionAllResultStructuresMismatch, + _ErrorName[5857:5887]: ErrClientOutputFormatSpecified, + _ErrorLowerName[5857:5887]: ErrClientOutputFormatSpecified, + _ErrorName[5887:5911]: ErrUnknownBlockInfoField, + _ErrorLowerName[5887:5911]: ErrUnknownBlockInfoField, + _ErrorName[5911:5924]: ErrBadCollation, + _ErrorLowerName[5911:5924]: ErrBadCollation, + _ErrorName[5924:5943]: ErrCannotCompileCode, + _ErrorLowerName[5924:5943]: ErrCannotCompileCode, + _ErrorName[5943:5968]: ErrIncompatibleTypeOfJoin, + _ErrorLowerName[5943:5968]: ErrIncompatibleTypeOfJoin, + _ErrorName[5968:5988]: ErrNoAvailableReplica, + _ErrorLowerName[5968:5988]: ErrNoAvailableReplica, + _ErrorName[5988:6018]: ErrMismatchReplicasDataSources, + _ErrorLowerName[5988:6018]: ErrMismatchReplicasDataSources, + _ErrorName[6018:6058]: ErrStorageDoesntSupportParallelReplicas, + _ErrorLowerName[6018:6058]: ErrStorageDoesntSupportParallelReplicas, + _ErrorName[6058:6069]: ErrCPUIDError, + _ErrorLowerName[6058:6069]: ErrCPUIDError, + _ErrorName[6069:6082]: ErrInfiniteLoop, + _ErrorLowerName[6069:6082]: ErrInfiniteLoop, + _ErrorName[6082:6097]: ErrCannotCompress, + _ErrorLowerName[6082:6097]: ErrCannotCompress, + _ErrorName[6097:6114]: ErrCannotDecompress, + _ErrorLowerName[6097:6114]: ErrCannotDecompress, + _ErrorName[6114:6130]: ErrAioSubmitError, + _ErrorLowerName[6114:6130]: ErrAioSubmitError, + _ErrorName[6130:6150]: ErrAioCompletionError, + _ErrorLowerName[6130:6150]: ErrAioCompletionError, + _ErrorName[6150:6164]: ErrAioReadError, + _ErrorLowerName[6150:6164]: ErrAioReadError, + _ErrorName[6164:6179]: ErrAioWriteError, + _ErrorLowerName[6164:6179]: ErrAioWriteError, + _ErrorName[6179:6193]: ErrIndexNotUsed, + _ErrorLowerName[6179:6193]: ErrIndexNotUsed, + _ErrorName[6193:6208]: ErrLeadershipLost, + _ErrorLowerName[6193:6208]: ErrLeadershipLost, + _ErrorName[6208:6235]: ErrAllConnectionTriesFailed, + _ErrorLowerName[6208:6235]: ErrAllConnectionTriesFailed, + _ErrorName[6235:6252]: ErrNoAvailableData, + _ErrorLowerName[6235:6252]: ErrNoAvailableData, + _ErrorName[6252:6271]: ErrDictionaryIsEmpty, + _ErrorLowerName[6252:6271]: ErrDictionaryIsEmpty, + _ErrorName[6271:6286]: ErrIncorrectIndex, + _ErrorLowerName[6271:6286]: ErrIncorrectIndex, + _ErrorName[6286:6318]: ErrUnknownDistributedProductMode, + _ErrorLowerName[6286:6318]: ErrUnknownDistributedProductMode, + _ErrorName[6318:6350]: ErrUnknownGlobalSubqueriesMethod, + _ErrorLowerName[6318:6350]: ErrUnknownGlobalSubqueriesMethod, + _ErrorName[6350:6372]: ErrTooLessLiveReplicas, + _ErrorLowerName[6350:6372]: ErrTooLessLiveReplicas, + _ErrorName[6372:6409]: ErrUnsatisfiedQuorumForPreviousWrite, + _ErrorLowerName[6372:6409]: ErrUnsatisfiedQuorumForPreviousWrite, + _ErrorName[6409:6431]: ErrUnknownFormatVersion, + _ErrorLowerName[6409:6431]: ErrUnknownFormatVersion, + _ErrorName[6431:6466]: ErrDistributedInJoinSubqueryDenied, + _ErrorLowerName[6431:6466]: ErrDistributedInJoinSubqueryDenied, + _ErrorName[6466:6490]: ErrReplicaIsNotInQuorum, + _ErrorLowerName[6466:6490]: ErrReplicaIsNotInQuorum, + _ErrorName[6490:6504]: ErrLimitExceeded, + _ErrorLowerName[6490:6504]: ErrLimitExceeded, + _ErrorName[6504:6526]: ErrDatabaseAccessDenied, + _ErrorLowerName[6504:6526]: ErrDatabaseAccessDenied, + _ErrorName[6526:6544]: ErrLeadershipChanged, + _ErrorLowerName[6526:6544]: ErrLeadershipChanged, + _ErrorName[6544:6571]: ErrMongodbCannotAuthenticate, + _ErrorLowerName[6544:6571]: ErrMongodbCannotAuthenticate, + _ErrorName[6571:6595]: ErrInvalidBlockExtraInfo, + _ErrorLowerName[6571:6595]: ErrInvalidBlockExtraInfo, + _ErrorName[6595:6614]: ErrReceivedEmptyData, + _ErrorLowerName[6595:6614]: ErrReceivedEmptyData, + _ErrorName[6614:6635]: ErrNoRemoteShardFound, + _ErrorLowerName[6614:6635]: ErrNoRemoteShardFound, + _ErrorName[6635:6659]: ErrShardHasNoConnections, + _ErrorLowerName[6635:6659]: ErrShardHasNoConnections, + _ErrorName[6659:6670]: ErrCannotPipe, + _ErrorLowerName[6659:6670]: ErrCannotPipe, + _ErrorName[6670:6681]: ErrCannotFork, + _ErrorLowerName[6670:6681]: ErrCannotFork, + _ErrorName[6681:6693]: ErrCannotDlsym, + _ErrorLowerName[6681:6693]: ErrCannotDlsym, + _ErrorName[6693:6720]: ErrCannotCreateChildProcess, + _ErrorLowerName[6693:6720]: ErrCannotCreateChildProcess, + _ErrorName[6720:6749]: ErrChildWasNotExitedNormally, + _ErrorLowerName[6720:6749]: ErrChildWasNotExitedNormally, + _ErrorName[6749:6762]: ErrCannotSelect, + _ErrorLowerName[6749:6762]: ErrCannotSelect, + _ErrorName[6762:6776]: ErrCannotWaitpid, + _ErrorLowerName[6762:6776]: ErrCannotWaitpid, + _ErrorName[6776:6797]: ErrTableWasNotDropped, + _ErrorLowerName[6776:6797]: ErrTableWasNotDropped, + _ErrorName[6797:6815]: ErrTooDeepRecursion, + _ErrorLowerName[6797:6815]: ErrTooDeepRecursion, + _ErrorName[6815:6829]: ErrTooManyBytes, + _ErrorLowerName[6815:6829]: ErrTooManyBytes, + _ErrorName[6829:6857]: ErrUnexpectedNodeInZookeeper, + _ErrorLowerName[6829:6857]: ErrUnexpectedNodeInZookeeper, + _ErrorName[6857:6888]: ErrFunctionCannotHaveParameters, + _ErrorLowerName[6857:6888]: ErrFunctionCannotHaveParameters, + _ErrorName[6888:6908]: ErrInvalidShardWeight, + _ErrorLowerName[6888:6908]: ErrInvalidShardWeight, + _ErrorName[6908:6932]: ErrInvalidConfigParameter, + _ErrorLowerName[6908:6932]: ErrInvalidConfigParameter, + _ErrorName[6932:6956]: ErrUnknownStatusOfInsert, + _ErrorLowerName[6932:6956]: ErrUnknownStatusOfInsert, + _ErrorName[6956:6990]: ErrValueIsOutOfRangeOfDataType, + _ErrorLowerName[6956:6990]: ErrValueIsOutOfRangeOfDataType, + _ErrorName[6990:7005]: ErrBarrierTimeout, + _ErrorLowerName[6990:7005]: ErrBarrierTimeout, + _ErrorName[7005:7028]: ErrUnknownDatabaseEngine, + _ErrorLowerName[7005:7028]: ErrUnknownDatabaseEngine, + _ErrorName[7028:7047]: ErrDdlGuardIsActive, + _ErrorLowerName[7028:7047]: ErrDdlGuardIsActive, + _ErrorName[7047:7057]: ErrUnfinished, + _ErrorLowerName[7047:7057]: ErrUnfinished, + _ErrorName[7057:7074]: ErrMetadataMismatch, + _ErrorLowerName[7057:7074]: ErrMetadataMismatch, + _ErrorName[7074:7093]: ErrSupportIsDisabled, + _ErrorLowerName[7074:7093]: ErrSupportIsDisabled, + _ErrorName[7093:7115]: ErrTableDiffersTooMuch, + _ErrorLowerName[7093:7115]: ErrTableDiffersTooMuch, + _ErrorName[7115:7137]: ErrCannotConvertCharset, + _ErrorLowerName[7115:7137]: ErrCannotConvertCharset, + _ErrorName[7137:7155]: ErrCannotLoadConfig, + _ErrorLowerName[7137:7155]: ErrCannotLoadConfig, + _ErrorName[7155:7192]: ErrCannotInsertNullInOrdinaryColumn, + _ErrorLowerName[7155:7192]: ErrCannotInsertNullInOrdinaryColumn, + _ErrorName[7192:7218]: ErrIncompatibleSourceTables, + _ErrorLowerName[7192:7218]: ErrIncompatibleSourceTables, + _ErrorName[7218:7238]: ErrAmbiguousTableName, + _ErrorLowerName[7218:7238]: ErrAmbiguousTableName, + _ErrorName[7238:7259]: ErrAmbiguousColumnName, + _ErrorLowerName[7238:7259]: ErrAmbiguousColumnName, + _ErrorName[7259:7303]: ErrIndexOfPositionalArgumentIsOutOfRange, + _ErrorLowerName[7259:7303]: ErrIndexOfPositionalArgumentIsOutOfRange, + _ErrorName[7303:7322]: ErrZlibInflateFailed, + _ErrorLowerName[7303:7322]: ErrZlibInflateFailed, + _ErrorName[7322:7341]: ErrZlibDeflateFailed, + _ErrorLowerName[7322:7341]: ErrZlibDeflateFailed, + _ErrorName[7341:7351]: ErrBadLambda, + _ErrorLowerName[7341:7351]: ErrBadLambda, + _ErrorName[7351:7375]: ErrReservedIdentifierName, + _ErrorLowerName[7351:7375]: ErrReservedIdentifierName, + _ErrorName[7375:7399]: ErrIntoOutfileNotAllowed, + _ErrorLowerName[7375:7399]: ErrIntoOutfileNotAllowed, + _ErrorName[7399:7437]: ErrTableSizeExceedsMaxDropSizeLimit, + _ErrorLowerName[7399:7437]: ErrTableSizeExceedsMaxDropSizeLimit, + _ErrorName[7437:7468]: ErrCannotCreateCharsetConverter, + _ErrorLowerName[7437:7468]: ErrCannotCreateCharsetConverter, + _ErrorName[7468:7494]: ErrSeekPositionOutOfBound, + _ErrorLowerName[7468:7494]: ErrSeekPositionOutOfBound, + _ErrorName[7494:7527]: ErrCurrentWriteBufferIsExhausted, + _ErrorLowerName[7494:7527]: ErrCurrentWriteBufferIsExhausted, + _ErrorName[7527:7550]: ErrCannotCreateIoBuffer, + _ErrorLowerName[7527:7550]: ErrCannotCreateIoBuffer, + _ErrorName[7550:7582]: ErrReceivedErrorTooManyRequests, + _ErrorLowerName[7550:7582]: ErrReceivedErrorTooManyRequests, + _ErrorName[7582:7602]: ErrOutputIsNotSorted, + _ErrorLowerName[7582:7602]: ErrOutputIsNotSorted, + _ErrorName[7602:7642]: ErrSizesOfNestedColumnsAreInconsistent, + _ErrorLowerName[7602:7642]: ErrSizesOfNestedColumnsAreInconsistent, + _ErrorName[7642:7658]: ErrTooManyFetches, + _ErrorLowerName[7642:7658]: ErrTooManyFetches, + _ErrorName[7658:7666]: ErrBadCast, + _ErrorLowerName[7658:7666]: ErrBadCast, + _ErrorName[7666:7688]: ErrAllReplicasAreStale, + _ErrorLowerName[7666:7688]: ErrAllReplicasAreStale, + _ErrorName[7688:7722]: ErrDataTypeCannotBeUsedInTables, + _ErrorLowerName[7688:7722]: ErrDataTypeCannotBeUsedInTables, + _ErrorName[7722:7753]: ErrInconsistentClusterDefinition, + _ErrorLowerName[7722:7753]: ErrInconsistentClusterDefinition, + _ErrorName[7753:7770]: ErrSessionNotFound, + _ErrorLowerName[7753:7770]: ErrSessionNotFound, + _ErrorName[7770:7787]: ErrSessionIsLocked, + _ErrorLowerName[7770:7787]: ErrSessionIsLocked, + _ErrorName[7787:7810]: ErrInvalidSessionTimeout, + _ErrorLowerName[7787:7810]: ErrInvalidSessionTimeout, + _ErrorName[7810:7823]: ErrCannotDlopen, + _ErrorLowerName[7810:7823]: ErrCannotDlopen, + _ErrorName[7823:7840]: ErrCannotParseUUID, + _ErrorLowerName[7823:7840]: ErrCannotParseUUID, + _ErrorName[7840:7868]: ErrIllegalSyntaxForDataType, + _ErrorLowerName[7840:7868]: ErrIllegalSyntaxForDataType, + _ErrorName[7868:7899]: ErrDataTypeCannotHaveArguments, + _ErrorLowerName[7868:7899]: ErrDataTypeCannotHaveArguments, + _ErrorName[7899:7937]: ErrUnknownStatusOfDistributedDdlTask, + _ErrorLowerName[7899:7937]: ErrUnknownStatusOfDistributedDdlTask, + _ErrorName[7937:7948]: ErrCannotKill, + _ErrorLowerName[7937:7948]: ErrCannotKill, + _ErrorName[7948:7968]: ErrHTTPLengthRequired, + _ErrorLowerName[7948:7968]: ErrHTTPLengthRequired, + _ErrorName[7968:7994]: ErrCannotLoadCatboostModel, + _ErrorLowerName[7968:7994]: ErrCannotLoadCatboostModel, + _ErrorName[7994:8021]: ErrCannotApplyCatboostModel, + _ErrorLowerName[7994:8021]: ErrCannotApplyCatboostModel, + _ErrorName[8021:8047]: ErrPartIsTemporarilyLocked, + _ErrorLowerName[8021:8047]: ErrPartIsTemporarilyLocked, + _ErrorName[8047:8072]: ErrMultipleStreamsRequired, + _ErrorLowerName[8047:8072]: ErrMultipleStreamsRequired, + _ErrorName[8072:8086]: ErrNoCommonType, + _ErrorLowerName[8072:8086]: ErrNoCommonType, + _ErrorName[8086:8118]: ErrExternalLoadableAlreadyExists, + _ErrorLowerName[8086:8118]: ErrExternalLoadableAlreadyExists, + _ErrorName[8118:8140]: ErrCannotAssignOptimize, + _ErrorLowerName[8118:8140]: ErrCannotAssignOptimize, + _ErrorName[8140:8163]: ErrInsertWasDeduplicated, + _ErrorLowerName[8140:8163]: ErrInsertWasDeduplicated, + _ErrorName[8163:8192]: ErrCannotGetCreateTableQuery, + _ErrorLowerName[8163:8192]: ErrCannotGetCreateTableQuery, + _ErrorName[8192:8214]: ErrExternalLibraryError, + _ErrorLowerName[8192:8214]: ErrExternalLibraryError, + _ErrorName[8214:8233]: ErrQueryIsProhibited, + _ErrorLowerName[8214:8233]: ErrQueryIsProhibited, + _ErrorName[8233:8250]: ErrThereIsNoQuery, + _ErrorLowerName[8233:8250]: ErrThereIsNoQuery, + _ErrorName[8250:8269]: ErrQueryWasCancelled, + _ErrorLowerName[8250:8269]: ErrQueryWasCancelled, + _ErrorName[8269:8304]: ErrFunctionThrowIfValueIsNonZero, + _ErrorLowerName[8269:8304]: ErrFunctionThrowIfValueIsNonZero, + _ErrorName[8304:8326]: ErrTooManyRowsOrBytes, + _ErrorLowerName[8304:8326]: ErrTooManyRowsOrBytes, + _ErrorName[8326:8369]: ErrQueryIsNotSupportedInMaterializedView, + _ErrorLowerName[8326:8369]: ErrQueryIsNotSupportedInMaterializedView, + _ErrorName[8369:8406]: ErrCannotParseDomainValueFromString, + _ErrorLowerName[8369:8406]: ErrCannotParseDomainValueFromString, + _ErrorName[8406:8427]: ErrAuthenticationFailed, + _ErrorLowerName[8406:8427]: ErrAuthenticationFailed, + _ErrorName[8427:8443]: ErrKeeperException, + _ErrorLowerName[8427:8443]: ErrKeeperException, + _ErrorName[8443:8457]: ErrPocoException, + _ErrorLowerName[8443:8457]: ErrPocoException, + _ErrorName[8457:8470]: ErrStdException, + _ErrorLowerName[8457:8470]: ErrStdException, + _ErrorName[8470:8487]: ErrUnknownException, + _ErrorLowerName[8470:8487]: ErrUnknownException, + _ErrorName[8487:8520]: ErrConditionalTreeParentNotFound, + _ErrorLowerName[8487:8520]: ErrConditionalTreeParentNotFound, + _ErrorName[8520:8550]: ErrIllegalProjectionManipulator, + _ErrorLowerName[8520:8550]: ErrIllegalProjectionManipulator, +} + +var _ErrorNames = []string{ + _ErrorName[0:18], + _ErrorName[18:39], + _ErrorName[39:61], + _ErrorName[61:81], + _ErrorName[81:98], + _ErrorName[98:125], + _ErrorName[125:143], + _ErrorName[143:172], + _ErrorName[172:197], + _ErrorName[197:218], + _ErrorName[218:240], + _ErrorName[240:278], + _ErrorName[278:294], + _ErrorName[294:317], + _ErrorName[317:357], + _ErrorName[357:399], + _ErrorName[399:432], + _ErrorName[432:462], + _ErrorName[462:507], + _ErrorName[507:554], + _ErrorName[554:578], + _ErrorName[578:601], + _ErrorName[601:629], + _ErrorName[629:655], + _ErrorName[655:690], + _ErrorName[690:725], + _ErrorName[725:745], + _ErrorName[745:781], + _ErrorName[781:809], + _ErrorName[809:834], + _ErrorName[834:854], + _ErrorName[854:885], + _ErrorName[885:916], + _ErrorName[916:929], + _ErrorName[929:951], + _ErrorName[951:968], + _ErrorName[968:993], + _ErrorName[993:1014], + _ErrorName[1014:1035], + _ErrorName[1035:1067], + _ErrorName[1067:1091], + _ErrorName[1091:1105], + _ErrorName[1105:1137], + _ErrorName[1137:1153], + _ErrorName[1153:1171], + _ErrorName[1171:1186], + _ErrorName[1186:1199], + _ErrorName[1199:1211], + _ErrorName[1211:1240], + _ErrorName[1240:1269], + _ErrorName[1269:1282], + _ErrorName[1282:1313], + _ErrorName[1313:1339], + _ErrorName[1339:1354], + _ErrorName[1354:1374], + _ErrorName[1374:1403], + _ErrorName[1403:1436], + _ErrorName[1436:1449], + _ErrorName[1449:1476], + _ErrorName[1476:1488], + _ErrorName[1488:1514], + _ErrorName[1514:1554], + _ErrorName[1554:1593], + _ErrorName[1593:1605], + _ErrorName[1605:1631], + _ErrorName[1631:1655], + _ErrorName[1655:1676], + _ErrorName[1676:1695], + _ErrorName[1695:1727], + _ErrorName[1727:1746], + _ErrorName[1746:1760], + _ErrorName[1760:1792], + _ErrorName[1792:1823], + _ErrorName[1823:1839], + _ErrorName[1839:1856], + _ErrorName[1856:1877], + _ErrorName[1877:1896], + _ErrorName[1896:1911], + _ErrorName[1911:1927], + _ErrorName[1927:1950], + _ErrorName[1950:1972], + _ErrorName[1972:1996], + _ErrorName[1996:2028], + _ErrorName[2028:2064], + _ErrorName[2064:2088], + _ErrorName[2088:2108], + _ErrorName[2108:2134], + _ErrorName[2134:2162], + _ErrorName[2162:2199], + _ErrorName[2199:2216], + _ErrorName[2216:2247], + _ErrorName[2247:2294], + _ErrorName[2294:2317], + _ErrorName[2317:2339], + _ErrorName[2339:2378], + _ErrorName[2378:2419], + _ErrorName[2419:2445], + _ErrorName[2445:2471], + _ErrorName[2471:2500], + _ErrorName[2500:2529], + _ErrorName[2529:2561], + _ErrorName[2561:2582], + _ErrorName[2582:2601], + _ErrorName[2601:2622], + _ErrorName[2622:2639], + _ErrorName[2639:2656], + _ErrorName[2656:2675], + _ErrorName[2675:2696], + _ErrorName[2696:2720], + _ErrorName[2720:2742], + _ErrorName[2742:2761], + _ErrorName[2761:2781], + _ErrorName[2781:2796], + _ErrorName[2796:2821], + _ErrorName[2821:2835], + _ErrorName[2835:2850], + _ErrorName[2850:2898], + _ErrorName[2898:2922], + _ErrorName[2922:2942], + _ErrorName[2942:2966], + _ErrorName[2966:2990], + _ErrorName[2990:3025], + _ErrorName[3025:3047], + _ErrorName[3047:3060], + _ErrorName[3060:3080], + _ErrorName[3080:3099], + _ErrorName[3099:3126], + _ErrorName[3126:3147], + _ErrorName[3147:3180], + _ErrorName[3180:3222], + _ErrorName[3222:3272], + _ErrorName[3272:3297], + _ErrorName[3297:3322], + _ErrorName[3322:3349], + _ErrorName[3349:3370], + _ErrorName[3370:3403], + _ErrorName[3403:3425], + _ErrorName[3425:3439], + _ErrorName[3439:3463], + _ErrorName[3463:3498], + _ErrorName[3498:3519], + _ErrorName[3519:3550], + _ErrorName[3550:3590], + _ErrorName[3590:3630], + _ErrorName[3630:3665], + _ErrorName[3665:3685], + _ErrorName[3685:3731], + _ErrorName[3731:3759], + _ErrorName[3759:3775], + _ErrorName[3775:3808], + _ErrorName[3808:3824], + _ErrorName[3824:3851], + _ErrorName[3851:3872], + _ErrorName[3872:3885], + _ErrorName[3885:3901], + _ErrorName[3901:3909], + _ErrorName[3909:3925], + _ErrorName[3925:3944], + _ErrorName[3944:3961], + _ErrorName[3961:3969], + _ErrorName[3969:3995], + _ErrorName[3995:4031], + _ErrorName[4031:4043], + _ErrorName[4043:4054], + _ErrorName[4054:4071], + _ErrorName[4071:4078], + _ErrorName[4078:4109], + _ErrorName[4109:4132], + _ErrorName[4132:4154], + _ErrorName[4154:4168], + _ErrorName[4168:4183], + _ErrorName[4183:4203], + _ErrorName[4203:4234], + _ErrorName[4234:4264], + _ErrorName[4264:4283], + _ErrorName[4283:4296], + _ErrorName[4296:4312], + _ErrorName[4312:4333], + _ErrorName[4333:4352], + _ErrorName[4352:4381], + _ErrorName[4381:4409], + _ErrorName[4409:4436], + _ErrorName[4436:4450], + _ErrorName[4450:4484], + _ErrorName[4484:4512], + _ErrorName[4512:4535], + _ErrorName[4535:4547], + _ErrorName[4547:4561], + _ErrorName[4561:4578], + _ErrorName[4578:4600], + _ErrorName[4600:4628], + _ErrorName[4628:4654], + _ErrorName[4654:4663], + _ErrorName[4663:4676], + _ErrorName[4676:4699], + _ErrorName[4699:4712], + _ErrorName[4712:4741], + _ErrorName[4741:4759], + _ErrorName[4759:4771], + _ErrorName[4771:4791], + _ErrorName[4791:4805], + _ErrorName[4805:4825], + _ErrorName[4825:4843], + _ErrorName[4843:4857], + _ErrorName[4857:4870], + _ErrorName[4870:4881], + _ErrorName[4881:4903], + _ErrorName[4903:4922], + _ErrorName[4922:4936], + _ErrorName[4936:4952], + _ErrorName[4952:4989], + _ErrorName[4989:5023], + _ErrorName[5023:5039], + _ErrorName[5039:5057], + _ErrorName[5057:5090], + _ErrorName[5090:5121], + _ErrorName[5121:5154], + _ErrorName[5154:5178], + _ErrorName[5178:5203], + _ErrorName[5203:5215], + _ErrorName[5215:5235], + _ErrorName[5235:5263], + _ErrorName[5263:5292], + _ErrorName[5292:5310], + _ErrorName[5310:5338], + _ErrorName[5338:5368], + _ErrorName[5368:5385], + _ErrorName[5385:5403], + _ErrorName[5403:5422], + _ErrorName[5422:5441], + _ErrorName[5441:5448], + _ErrorName[5448:5469], + _ErrorName[5469:5491], + _ErrorName[5491:5504], + _ErrorName[5504:5517], + _ErrorName[5517:5538], + _ErrorName[5538:5556], + _ErrorName[5556:5572], + _ErrorName[5572:5598], + _ErrorName[5598:5612], + _ErrorName[5612:5626], + _ErrorName[5626:5649], + _ErrorName[5649:5673], + _ErrorName[5673:5688], + _ErrorName[5688:5702], + _ErrorName[5702:5726], + _ErrorName[5726:5744], + _ErrorName[5744:5775], + _ErrorName[5775:5799], + _ErrorName[5799:5821], + _ErrorName[5821:5857], + _ErrorName[5857:5887], + _ErrorName[5887:5911], + _ErrorName[5911:5924], + _ErrorName[5924:5943], + _ErrorName[5943:5968], + _ErrorName[5968:5988], + _ErrorName[5988:6018], + _ErrorName[6018:6058], + _ErrorName[6058:6069], + _ErrorName[6069:6082], + _ErrorName[6082:6097], + _ErrorName[6097:6114], + _ErrorName[6114:6130], + _ErrorName[6130:6150], + _ErrorName[6150:6164], + _ErrorName[6164:6179], + _ErrorName[6179:6193], + _ErrorName[6193:6208], + _ErrorName[6208:6235], + _ErrorName[6235:6252], + _ErrorName[6252:6271], + _ErrorName[6271:6286], + _ErrorName[6286:6318], + _ErrorName[6318:6350], + _ErrorName[6350:6372], + _ErrorName[6372:6409], + _ErrorName[6409:6431], + _ErrorName[6431:6466], + _ErrorName[6466:6490], + _ErrorName[6490:6504], + _ErrorName[6504:6526], + _ErrorName[6526:6544], + _ErrorName[6544:6571], + _ErrorName[6571:6595], + _ErrorName[6595:6614], + _ErrorName[6614:6635], + _ErrorName[6635:6659], + _ErrorName[6659:6670], + _ErrorName[6670:6681], + _ErrorName[6681:6693], + _ErrorName[6693:6720], + _ErrorName[6720:6749], + _ErrorName[6749:6762], + _ErrorName[6762:6776], + _ErrorName[6776:6797], + _ErrorName[6797:6815], + _ErrorName[6815:6829], + _ErrorName[6829:6857], + _ErrorName[6857:6888], + _ErrorName[6888:6908], + _ErrorName[6908:6932], + _ErrorName[6932:6956], + _ErrorName[6956:6990], + _ErrorName[6990:7005], + _ErrorName[7005:7028], + _ErrorName[7028:7047], + _ErrorName[7047:7057], + _ErrorName[7057:7074], + _ErrorName[7074:7093], + _ErrorName[7093:7115], + _ErrorName[7115:7137], + _ErrorName[7137:7155], + _ErrorName[7155:7192], + _ErrorName[7192:7218], + _ErrorName[7218:7238], + _ErrorName[7238:7259], + _ErrorName[7259:7303], + _ErrorName[7303:7322], + _ErrorName[7322:7341], + _ErrorName[7341:7351], + _ErrorName[7351:7375], + _ErrorName[7375:7399], + _ErrorName[7399:7437], + _ErrorName[7437:7468], + _ErrorName[7468:7494], + _ErrorName[7494:7527], + _ErrorName[7527:7550], + _ErrorName[7550:7582], + _ErrorName[7582:7602], + _ErrorName[7602:7642], + _ErrorName[7642:7658], + _ErrorName[7658:7666], + _ErrorName[7666:7688], + _ErrorName[7688:7722], + _ErrorName[7722:7753], + _ErrorName[7753:7770], + _ErrorName[7770:7787], + _ErrorName[7787:7810], + _ErrorName[7810:7823], + _ErrorName[7823:7840], + _ErrorName[7840:7868], + _ErrorName[7868:7899], + _ErrorName[7899:7937], + _ErrorName[7937:7948], + _ErrorName[7948:7968], + _ErrorName[7968:7994], + _ErrorName[7994:8021], + _ErrorName[8021:8047], + _ErrorName[8047:8072], + _ErrorName[8072:8086], + _ErrorName[8086:8118], + _ErrorName[8118:8140], + _ErrorName[8140:8163], + _ErrorName[8163:8192], + _ErrorName[8192:8214], + _ErrorName[8214:8233], + _ErrorName[8233:8250], + _ErrorName[8250:8269], + _ErrorName[8269:8304], + _ErrorName[8304:8326], + _ErrorName[8326:8369], + _ErrorName[8369:8406], + _ErrorName[8406:8427], + _ErrorName[8427:8443], + _ErrorName[8443:8457], + _ErrorName[8457:8470], + _ErrorName[8470:8487], + _ErrorName[8487:8520], + _ErrorName[8520:8550], +} + +// ErrorString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func ErrorString(s string) (Error, error) { + if val, ok := _ErrorNameToValueMap[s]; ok { + return val, nil + } + + if val, ok := _ErrorNameToValueMap[strings.ToLower(s)]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to Error values", s) +} + +// ErrorValues returns all values of the enum +func ErrorValues() []Error { + return _ErrorValues +} + +// ErrorStrings returns a slice of all String values of the enum +func ErrorStrings() []string { + strs := make([]string, len(_ErrorNames)) + copy(strs, _ErrorNames) + return strs +} + +// IsAError returns "true" if the value is listed in the enum definition. "false" otherwise +func (i Error) IsAError() bool { + _, ok := _ErrorMap[i] + return ok +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/exception.go b/vendor/github.com/ClickHouse/ch-go/proto/exception.go new file mode 100644 index 0000000..ccda6f0 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/exception.go @@ -0,0 +1,59 @@ +package proto + +import "github.com/go-faster/errors" + +// Exception is server-side error. +type Exception struct { + Code Error + Name string + Message string + Stack string + Nested bool +} + +// DecodeAware decodes exception. +func (e *Exception) DecodeAware(r *Reader, _ int) error { + code, err := r.Int32() + if err != nil { + return errors.Wrap(err, "code") + } + e.Code = Error(code) + + { + s, err := r.Str() + if err != nil { + return errors.Wrap(err, "name") + } + e.Name = s + } + { + s, err := r.Str() + if err != nil { + return errors.Wrap(err, "message") + } + e.Message = s + } + { + s, err := r.Str() + if err != nil { + return errors.Wrap(err, "stack trace") + } + e.Stack = s + } + nested, err := r.Bool() + if err != nil { + return errors.Wrap(err, "nested") + } + e.Nested = nested + + return nil +} + +// EncodeAware encodes exception. +func (e *Exception) EncodeAware(b *Buffer, _ int) { + b.PutInt32(int32(e.Code)) + b.PutString(e.Name) + b.PutString(e.Message) + b.PutString(e.Stack) + b.PutBool(e.Nested) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/feature.go b/vendor/github.com/ClickHouse/ch-go/proto/feature.go new file mode 100644 index 0000000..b06deac --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/feature.go @@ -0,0 +1,45 @@ +package proto + +//go:generate go run github.com/dmarkham/enumer -type Feature -trimprefix Feature -output feature_enum.go + +// Feature represents server side feature. +type Feature int + +// src/Core/ProtocolDefines.h + +// Possible features. +const ( + FeatureBlockInfo Feature = 51903 + FeatureTimezone Feature = 54058 + FeatureQuotaKeyInClientInfo Feature = 54060 + FeatureDisplayName Feature = 54372 + FeatureVersionPatch Feature = 54401 + FeatureTempTables Feature = 50264 + FeatureServerLogs Feature = 54406 + FeatureColumnDefaultsMetadata Feature = 54410 + FeatureClientWriteInfo Feature = 54420 + FeatureSettingsSerializedAsStrings Feature = 54429 + FeatureInterServerSecret Feature = 54441 + FeatureOpenTelemetry Feature = 54442 + FeatureXForwardedForInClientInfo Feature = 54443 + FeatureRefererInClientInfo Feature = 54447 + FeatureDistributedDepth Feature = 54448 + FeatureQueryStartTime Feature = 54449 + FeatureProfileEvents Feature = 54451 + FeatureParallelReplicas Feature = 54453 + FeatureCustomSerialization Feature = 54454 + FeatureQuotaKey Feature = 54458 + FeatureAddendum Feature = 54458 + FeatureParameters Feature = 54459 + FeatureServerQueryTimeInProgress Feature = 54460 +) + +// Version reports protocol version when Feature was introduced. +func (f Feature) Version() int { + return int(f) +} + +// In reports whether feature is implemented in provided protocol version. +func (f Feature) In(v int) bool { + return v >= f.Version() +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/feature_enum.go b/vendor/github.com/ClickHouse/ch-go/proto/feature_enum.go new file mode 100644 index 0000000..95b453f --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/feature_enum.go @@ -0,0 +1,176 @@ +// Code generated by "enumer -type Feature -trimprefix Feature -output feature_enum.go"; DO NOT EDIT. + +package proto + +import ( + "fmt" + "strings" +) + +const _FeatureName = "TempTablesBlockInfoTimezoneQuotaKeyInClientInfoDisplayNameVersionPatchServerLogsColumnDefaultsMetadataClientWriteInfoSettingsSerializedAsStringsInterServerSecretOpenTelemetryXForwardedForInClientInfoRefererInClientInfoDistributedDepthQueryStartTimeProfileEventsParallelReplicasCustomSerializationQuotaKeyParametersServerQueryTimeInProgress" +const _FeatureLowerName = "temptablesblockinfotimezonequotakeyinclientinfodisplaynameversionpatchserverlogscolumndefaultsmetadataclientwriteinfosettingsserializedasstringsinterserversecretopentelemetryxforwardedforinclientinforefererinclientinfodistributeddepthquerystarttimeprofileeventsparallelreplicascustomserializationquotakeyparametersserverquerytimeinprogress" + +var _FeatureMap = map[Feature]string{ + 50264: _FeatureName[0:10], + 51903: _FeatureName[10:19], + 54058: _FeatureName[19:27], + 54060: _FeatureName[27:47], + 54372: _FeatureName[47:58], + 54401: _FeatureName[58:70], + 54406: _FeatureName[70:80], + 54410: _FeatureName[80:102], + 54420: _FeatureName[102:117], + 54429: _FeatureName[117:144], + 54441: _FeatureName[144:161], + 54442: _FeatureName[161:174], + 54443: _FeatureName[174:199], + 54447: _FeatureName[199:218], + 54448: _FeatureName[218:234], + 54449: _FeatureName[234:248], + 54451: _FeatureName[248:261], + 54453: _FeatureName[261:277], + 54454: _FeatureName[277:296], + 54458: _FeatureName[296:304], + 54459: _FeatureName[304:314], + 54460: _FeatureName[314:339], +} + +func (i Feature) String() string { + if str, ok := _FeatureMap[i]; ok { + return str + } + return fmt.Sprintf("Feature(%d)", i) +} + +// An "invalid array index" compiler error signifies that the constant values have changed. +// Re-run the stringer command to generate them again. +func _FeatureNoOp() { + var x [1]struct{} + _ = x[FeatureTempTables-(50264)] + _ = x[FeatureBlockInfo-(51903)] + _ = x[FeatureTimezone-(54058)] + _ = x[FeatureQuotaKeyInClientInfo-(54060)] + _ = x[FeatureDisplayName-(54372)] + _ = x[FeatureVersionPatch-(54401)] + _ = x[FeatureServerLogs-(54406)] + _ = x[FeatureColumnDefaultsMetadata-(54410)] + _ = x[FeatureClientWriteInfo-(54420)] + _ = x[FeatureSettingsSerializedAsStrings-(54429)] + _ = x[FeatureInterServerSecret-(54441)] + _ = x[FeatureOpenTelemetry-(54442)] + _ = x[FeatureXForwardedForInClientInfo-(54443)] + _ = x[FeatureRefererInClientInfo-(54447)] + _ = x[FeatureDistributedDepth-(54448)] + _ = x[FeatureQueryStartTime-(54449)] + _ = x[FeatureProfileEvents-(54451)] + _ = x[FeatureParallelReplicas-(54453)] + _ = x[FeatureCustomSerialization-(54454)] + _ = x[FeatureQuotaKey-(54458)] + _ = x[FeatureParameters-(54459)] + _ = x[FeatureServerQueryTimeInProgress-(54460)] +} + +var _FeatureValues = []Feature{FeatureTempTables, FeatureBlockInfo, FeatureTimezone, FeatureQuotaKeyInClientInfo, FeatureDisplayName, FeatureVersionPatch, FeatureServerLogs, FeatureColumnDefaultsMetadata, FeatureClientWriteInfo, FeatureSettingsSerializedAsStrings, FeatureInterServerSecret, FeatureOpenTelemetry, FeatureXForwardedForInClientInfo, FeatureRefererInClientInfo, FeatureDistributedDepth, FeatureQueryStartTime, FeatureProfileEvents, FeatureParallelReplicas, FeatureCustomSerialization, FeatureQuotaKey, FeatureParameters, FeatureServerQueryTimeInProgress} + +var _FeatureNameToValueMap = map[string]Feature{ + _FeatureName[0:10]: FeatureTempTables, + _FeatureLowerName[0:10]: FeatureTempTables, + _FeatureName[10:19]: FeatureBlockInfo, + _FeatureLowerName[10:19]: FeatureBlockInfo, + _FeatureName[19:27]: FeatureTimezone, + _FeatureLowerName[19:27]: FeatureTimezone, + _FeatureName[27:47]: FeatureQuotaKeyInClientInfo, + _FeatureLowerName[27:47]: FeatureQuotaKeyInClientInfo, + _FeatureName[47:58]: FeatureDisplayName, + _FeatureLowerName[47:58]: FeatureDisplayName, + _FeatureName[58:70]: FeatureVersionPatch, + _FeatureLowerName[58:70]: FeatureVersionPatch, + _FeatureName[70:80]: FeatureServerLogs, + _FeatureLowerName[70:80]: FeatureServerLogs, + _FeatureName[80:102]: FeatureColumnDefaultsMetadata, + _FeatureLowerName[80:102]: FeatureColumnDefaultsMetadata, + _FeatureName[102:117]: FeatureClientWriteInfo, + _FeatureLowerName[102:117]: FeatureClientWriteInfo, + _FeatureName[117:144]: FeatureSettingsSerializedAsStrings, + _FeatureLowerName[117:144]: FeatureSettingsSerializedAsStrings, + _FeatureName[144:161]: FeatureInterServerSecret, + _FeatureLowerName[144:161]: FeatureInterServerSecret, + _FeatureName[161:174]: FeatureOpenTelemetry, + _FeatureLowerName[161:174]: FeatureOpenTelemetry, + _FeatureName[174:199]: FeatureXForwardedForInClientInfo, + _FeatureLowerName[174:199]: FeatureXForwardedForInClientInfo, + _FeatureName[199:218]: FeatureRefererInClientInfo, + _FeatureLowerName[199:218]: FeatureRefererInClientInfo, + _FeatureName[218:234]: FeatureDistributedDepth, + _FeatureLowerName[218:234]: FeatureDistributedDepth, + _FeatureName[234:248]: FeatureQueryStartTime, + _FeatureLowerName[234:248]: FeatureQueryStartTime, + _FeatureName[248:261]: FeatureProfileEvents, + _FeatureLowerName[248:261]: FeatureProfileEvents, + _FeatureName[261:277]: FeatureParallelReplicas, + _FeatureLowerName[261:277]: FeatureParallelReplicas, + _FeatureName[277:296]: FeatureCustomSerialization, + _FeatureLowerName[277:296]: FeatureCustomSerialization, + _FeatureName[296:304]: FeatureQuotaKey, + _FeatureLowerName[296:304]: FeatureQuotaKey, + _FeatureName[304:314]: FeatureParameters, + _FeatureLowerName[304:314]: FeatureParameters, + _FeatureName[314:339]: FeatureServerQueryTimeInProgress, + _FeatureLowerName[314:339]: FeatureServerQueryTimeInProgress, +} + +var _FeatureNames = []string{ + _FeatureName[0:10], + _FeatureName[10:19], + _FeatureName[19:27], + _FeatureName[27:47], + _FeatureName[47:58], + _FeatureName[58:70], + _FeatureName[70:80], + _FeatureName[80:102], + _FeatureName[102:117], + _FeatureName[117:144], + _FeatureName[144:161], + _FeatureName[161:174], + _FeatureName[174:199], + _FeatureName[199:218], + _FeatureName[218:234], + _FeatureName[234:248], + _FeatureName[248:261], + _FeatureName[261:277], + _FeatureName[277:296], + _FeatureName[296:304], + _FeatureName[304:314], + _FeatureName[314:339], +} + +// FeatureString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func FeatureString(s string) (Feature, error) { + if val, ok := _FeatureNameToValueMap[s]; ok { + return val, nil + } + + if val, ok := _FeatureNameToValueMap[strings.ToLower(s)]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to Feature values", s) +} + +// FeatureValues returns all values of the enum +func FeatureValues() []Feature { + return _FeatureValues +} + +// FeatureStrings returns a slice of all String values of the enum +func FeatureStrings() []string { + strs := make([]string, len(_FeatureNames)) + copy(strs, _FeatureNames) + return strs +} + +// IsAFeature returns "true" if the value is listed in the enum definition. "false" otherwise +func (i Feature) IsAFeature() bool { + _, ok := _FeatureMap[i] + return ok +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/gen.go b/vendor/github.com/ClickHouse/ch-go/proto/gen.go new file mode 100644 index 0000000..f67bb41 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/gen.go @@ -0,0 +1,3 @@ +package proto + +//go:generate go run ./cmd/ch-gen-col diff --git a/vendor/github.com/ClickHouse/ch-go/proto/int128.go b/vendor/github.com/ClickHouse/ch-go/proto/int128.go new file mode 100644 index 0000000..f7ec378 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/int128.go @@ -0,0 +1,93 @@ +package proto + +import ( + "encoding/binary" + "math" +) + +// Int128 represents Int128 type. +type Int128 struct { + Low uint64 // first 64 bits + High uint64 // last 64 bits +} + +// Int value of Int128. +// +// Returns math.MaxInt if High is set. +func (i Int128) Int() int { + switch i.High { + case 0, math.MaxUint64: + return int(i.Low) + default: + return math.MaxInt + } +} + +// UInt64 value of Int128. +func (i Int128) UInt64() uint64 { + switch i.High { + case 0, math.MaxUint64: + return uint64(int(i.Low)) + default: + return math.MaxUint64 + } +} + +// Int128FromInt creates new Int128 from int. +func Int128FromInt(v int) Int128 { + var hi uint64 + if v < 0 { + hi = math.MaxUint64 + } + return Int128{ + High: hi, + Low: uint64(v), + } +} + +// Int128FromUInt64 creates new Int128 from uint64. +func Int128FromUInt64(v uint64) Int128 { + return Int128(UInt128FromUInt64(v)) +} + +// UInt128 represents UInt128 type. +type UInt128 struct { + Low uint64 // first 64 bits + High uint64 // last 64 bits +} + +// UInt64 returns UInt64 value of UInt128. +func (i UInt128) UInt64() uint64 { + if i.High > 0 { + return math.MaxUint64 + } + return i.Low +} + +// Int returns Int value of UInt128. +func (i UInt128) Int() int { + return int(i.UInt64()) +} + +// UInt128FromInt creates new UInt128 from int. +func UInt128FromInt(v int) UInt128 { + return UInt128(Int128FromInt(v)) +} + +// UInt128FromUInt64 creates new UInt128 from uint64. +func UInt128FromUInt64(v uint64) UInt128 { + return UInt128{Low: v} +} + +func binUInt128(b []byte) UInt128 { + _ = b[:128/8] // bounds check hint to compiler; see golang.org/issue/14808 + return UInt128{ + Low: binary.LittleEndian.Uint64(b[0 : 64/8]), + High: binary.LittleEndian.Uint64(b[64/8 : 128/8]), + } +} + +func binPutUInt128(b []byte, v UInt128) { + binary.LittleEndian.PutUint64(b[64/8:128/8], v.High) + binary.LittleEndian.PutUint64(b[0:64/8], v.Low) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/int256.go b/vendor/github.com/ClickHouse/ch-go/proto/int256.go new file mode 100644 index 0000000..df81b18 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/int256.go @@ -0,0 +1,68 @@ +package proto + +import ( + "encoding/binary" + "math" +) + +// Int256 is 256-bit signed integer. +type Int256 struct { + Low UInt128 // first 128 bits + High UInt128 // last 128 bits +} + +// Int256FromInt creates new Int256 from int. +func Int256FromInt(v int) Int256 { + var hi UInt128 + lo := UInt128{Low: uint64(v)} + if v < 0 { + hi = UInt128{ + Low: math.MaxUint64, + High: math.MaxUint64, + } + lo.High = math.MaxUint64 + } + return Int256{ + High: hi, + Low: lo, + } +} + +// UInt256 is 256-bit unsigned integer. +type UInt256 struct { + Low UInt128 // first 128 bits + High UInt128 // last 128 bits +} + +// UInt256FromInt creates new UInt256 from int. +func UInt256FromInt(v int) UInt256 { + return UInt256(Int256FromInt(v)) +} + +// UInt256FromUInt64 creates new UInt256 from uint64. +func UInt256FromUInt64(v uint64) UInt256 { + return UInt256{Low: UInt128{Low: v}} +} + +func binUInt256(b []byte) UInt256 { + _ = b[:256/8] // bounds check hint to compiler; see golang.org/issue/14808 + // Calling manually because binUInt128 is not inlining. + return UInt256{ + Low: UInt128{ + Low: binary.LittleEndian.Uint64(b[0 : 64/8]), + High: binary.LittleEndian.Uint64(b[64/8 : 128/8]), + }, + High: UInt128{ + Low: binary.LittleEndian.Uint64(b[128/8 : 192/8]), + High: binary.LittleEndian.Uint64(b[192/8 : 256/8]), + }, + } +} + +func binPutUInt256(b []byte, v UInt256) { + // Calling manually because binPutUInt128 is not inlining. + binary.LittleEndian.PutUint64(b[192/8:256/8], v.High.High) + binary.LittleEndian.PutUint64(b[128/8:192/8], v.High.Low) + binary.LittleEndian.PutUint64(b[64/8:128/8], v.Low.High) + binary.LittleEndian.PutUint64(b[0:64/8], v.Low.Low) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/interval_enum.go b/vendor/github.com/ClickHouse/ch-go/proto/interval_enum.go new file mode 100644 index 0000000..c7cb9f0 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/interval_enum.go @@ -0,0 +1,102 @@ +// Code generated by "enumer -type IntervalScale -output interval_enum.go"; DO NOT EDIT. + +package proto + +import ( + "fmt" + "strings" +) + +const _IntervalScaleName = "IntervalSecondIntervalMinuteIntervalHourIntervalDayIntervalWeekIntervalMonthIntervalQuarterIntervalYear" + +var _IntervalScaleIndex = [...]uint8{0, 14, 28, 40, 51, 63, 76, 91, 103} + +const _IntervalScaleLowerName = "intervalsecondintervalminuteintervalhourintervaldayintervalweekintervalmonthintervalquarterintervalyear" + +func (i IntervalScale) String() string { + if i >= IntervalScale(len(_IntervalScaleIndex)-1) { + return fmt.Sprintf("IntervalScale(%d)", i) + } + return _IntervalScaleName[_IntervalScaleIndex[i]:_IntervalScaleIndex[i+1]] +} + +// An "invalid array index" compiler error signifies that the constant values have changed. +// Re-run the stringer command to generate them again. +func _IntervalScaleNoOp() { + var x [1]struct{} + _ = x[IntervalSecond-(0)] + _ = x[IntervalMinute-(1)] + _ = x[IntervalHour-(2)] + _ = x[IntervalDay-(3)] + _ = x[IntervalWeek-(4)] + _ = x[IntervalMonth-(5)] + _ = x[IntervalQuarter-(6)] + _ = x[IntervalYear-(7)] +} + +var _IntervalScaleValues = []IntervalScale{IntervalSecond, IntervalMinute, IntervalHour, IntervalDay, IntervalWeek, IntervalMonth, IntervalQuarter, IntervalYear} + +var _IntervalScaleNameToValueMap = map[string]IntervalScale{ + _IntervalScaleName[0:14]: IntervalSecond, + _IntervalScaleLowerName[0:14]: IntervalSecond, + _IntervalScaleName[14:28]: IntervalMinute, + _IntervalScaleLowerName[14:28]: IntervalMinute, + _IntervalScaleName[28:40]: IntervalHour, + _IntervalScaleLowerName[28:40]: IntervalHour, + _IntervalScaleName[40:51]: IntervalDay, + _IntervalScaleLowerName[40:51]: IntervalDay, + _IntervalScaleName[51:63]: IntervalWeek, + _IntervalScaleLowerName[51:63]: IntervalWeek, + _IntervalScaleName[63:76]: IntervalMonth, + _IntervalScaleLowerName[63:76]: IntervalMonth, + _IntervalScaleName[76:91]: IntervalQuarter, + _IntervalScaleLowerName[76:91]: IntervalQuarter, + _IntervalScaleName[91:103]: IntervalYear, + _IntervalScaleLowerName[91:103]: IntervalYear, +} + +var _IntervalScaleNames = []string{ + _IntervalScaleName[0:14], + _IntervalScaleName[14:28], + _IntervalScaleName[28:40], + _IntervalScaleName[40:51], + _IntervalScaleName[51:63], + _IntervalScaleName[63:76], + _IntervalScaleName[76:91], + _IntervalScaleName[91:103], +} + +// IntervalScaleString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func IntervalScaleString(s string) (IntervalScale, error) { + if val, ok := _IntervalScaleNameToValueMap[s]; ok { + return val, nil + } + + if val, ok := _IntervalScaleNameToValueMap[strings.ToLower(s)]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to IntervalScale values", s) +} + +// IntervalScaleValues returns all values of the enum +func IntervalScaleValues() []IntervalScale { + return _IntervalScaleValues +} + +// IntervalScaleStrings returns a slice of all String values of the enum +func IntervalScaleStrings() []string { + strs := make([]string, len(_IntervalScaleNames)) + copy(strs, _IntervalScaleNames) + return strs +} + +// IsAIntervalScale returns "true" if the value is listed in the enum definition. "false" otherwise +func (i IntervalScale) IsAIntervalScale() bool { + for _, v := range _IntervalScaleValues { + if i == v { + return true + } + } + return false +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/ipv4.go b/vendor/github.com/ClickHouse/ch-go/proto/ipv4.go new file mode 100644 index 0000000..bdc83f5 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/ipv4.go @@ -0,0 +1,31 @@ +package proto + +import ( + "encoding/binary" + "net/netip" +) + +// IPv4 represents IPv4 address as uint32 number. +// +// Not using netip.Addr because uint32 is 5 times faster, +// consumes 6 times less memory and better represents IPv4. +// +// Use ToIP helper for convenience. +type IPv4 uint32 + +func (v IPv4) String() string { + return v.ToIP().String() +} + +// ToIP represents IPv4 as netaddr.IP. +func (v IPv4) ToIP() netip.Addr { + var buf [4]byte + binary.BigEndian.PutUint32(buf[:], uint32(v)) + return netip.AddrFrom4(buf) +} + +// ToIPv4 represents ip as IPv4. Panics if ip is not ipv4. +func ToIPv4(ip netip.Addr) IPv4 { + b := ip.As4() + return IPv4(binary.BigEndian.Uint32(b[:])) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/ipv6.go b/vendor/github.com/ClickHouse/ch-go/proto/ipv6.go new file mode 100644 index 0000000..60898c5 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/ipv6.go @@ -0,0 +1,25 @@ +package proto + +import ( + "net/netip" +) + +// IPv6 represents IPv6 address. +// +// Same as FixedString(16) internally in ClickHouse. +type IPv6 [16]byte + +func (v IPv6) String() string { + return v.ToIP().String() +} + +// ToIP represents IPv6 as netip.IP. +func (v IPv6) ToIP() netip.Addr { + return netip.AddrFrom16(v) +} + +// ToIPv6 represents ip as IPv6. +func ToIPv6(ip netip.Addr) IPv6 { return ip.As16() } + +func binIPv6(b []byte) IPv6 { return *(*[16]byte)(b) } +func binPutIPv6(b []byte, v IPv6) { copy(b, v[:]) } diff --git a/vendor/github.com/ClickHouse/ch-go/proto/profile.go b/vendor/github.com/ClickHouse/ch-go/proto/profile.go new file mode 100644 index 0000000..12ca441 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/profile.go @@ -0,0 +1,69 @@ +package proto + +import "github.com/go-faster/errors" + +type Profile struct { + Rows uint64 + Blocks uint64 + Bytes uint64 + AppliedLimit bool + RowsBeforeLimit uint64 + CalculatedRowsBeforeLimit bool +} + +func (p *Profile) DecodeAware(r *Reader, _ int) error { + { + v, err := r.UVarInt() + if err != nil { + return errors.Wrap(err, "rows") + } + p.Rows = v + } + { + v, err := r.UVarInt() + if err != nil { + return errors.Wrap(err, "blocks") + } + p.Blocks = v + } + { + v, err := r.UVarInt() + if err != nil { + return errors.Wrap(err, "bytes") + } + p.Bytes = v + } + { + v, err := r.Bool() + if err != nil { + return errors.Wrap(err, "applied limit") + } + p.AppliedLimit = v + } + { + v, err := r.UVarInt() + if err != nil { + return errors.Wrap(err, "rows before limit") + } + p.RowsBeforeLimit = v + } + { + v, err := r.Bool() + if err != nil { + return errors.Wrap(err, "calculated rows before limit") + } + p.CalculatedRowsBeforeLimit = v + } + + return nil +} + +func (p Profile) EncodeAware(b *Buffer, _ int) { + ServerCodeProfile.Encode(b) + b.PutUVarInt(p.Rows) + b.PutUVarInt(p.Blocks) + b.PutUVarInt(p.Bytes) + b.PutBool(p.AppliedLimit) + b.PutUVarInt(p.RowsBeforeLimit) + b.PutBool(p.CalculatedRowsBeforeLimit) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/profile_enum.go b/vendor/github.com/ClickHouse/ch-go/proto/profile_enum.go new file mode 100644 index 0000000..47b26fd --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/profile_enum.go @@ -0,0 +1,109 @@ +// Code generated by "enumer -type ProfileEventType -trimprefix Profile -text -json -output profile_enum.go"; DO NOT EDIT. + +package proto + +import ( + "encoding/json" + "fmt" + "strings" +) + +const _ProfileEventTypeName = "IncrementGauge" + +var _ProfileEventTypeIndex = [...]uint8{0, 9, 14} + +const _ProfileEventTypeLowerName = "incrementgauge" + +func (i ProfileEventType) String() string { + i -= 1 + if i >= ProfileEventType(len(_ProfileEventTypeIndex)-1) { + return fmt.Sprintf("ProfileEventType(%d)", i+1) + } + return _ProfileEventTypeName[_ProfileEventTypeIndex[i]:_ProfileEventTypeIndex[i+1]] +} + +// An "invalid array index" compiler error signifies that the constant values have changed. +// Re-run the stringer command to generate them again. +func _ProfileEventTypeNoOp() { + var x [1]struct{} + _ = x[ProfileIncrement-(1)] + _ = x[ProfileGauge-(2)] +} + +var _ProfileEventTypeValues = []ProfileEventType{ProfileIncrement, ProfileGauge} + +var _ProfileEventTypeNameToValueMap = map[string]ProfileEventType{ + _ProfileEventTypeName[0:9]: ProfileIncrement, + _ProfileEventTypeLowerName[0:9]: ProfileIncrement, + _ProfileEventTypeName[9:14]: ProfileGauge, + _ProfileEventTypeLowerName[9:14]: ProfileGauge, +} + +var _ProfileEventTypeNames = []string{ + _ProfileEventTypeName[0:9], + _ProfileEventTypeName[9:14], +} + +// ProfileEventTypeString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func ProfileEventTypeString(s string) (ProfileEventType, error) { + if val, ok := _ProfileEventTypeNameToValueMap[s]; ok { + return val, nil + } + + if val, ok := _ProfileEventTypeNameToValueMap[strings.ToLower(s)]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to ProfileEventType values", s) +} + +// ProfileEventTypeValues returns all values of the enum +func ProfileEventTypeValues() []ProfileEventType { + return _ProfileEventTypeValues +} + +// ProfileEventTypeStrings returns a slice of all String values of the enum +func ProfileEventTypeStrings() []string { + strs := make([]string, len(_ProfileEventTypeNames)) + copy(strs, _ProfileEventTypeNames) + return strs +} + +// IsAProfileEventType returns "true" if the value is listed in the enum definition. "false" otherwise +func (i ProfileEventType) IsAProfileEventType() bool { + for _, v := range _ProfileEventTypeValues { + if i == v { + return true + } + } + return false +} + +// MarshalJSON implements the json.Marshaler interface for ProfileEventType +func (i ProfileEventType) MarshalJSON() ([]byte, error) { + return json.Marshal(i.String()) +} + +// UnmarshalJSON implements the json.Unmarshaler interface for ProfileEventType +func (i *ProfileEventType) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return fmt.Errorf("ProfileEventType should be a string, got %s", data) + } + + var err error + *i, err = ProfileEventTypeString(s) + return err +} + +// MarshalText implements the encoding.TextMarshaler interface for ProfileEventType +func (i ProfileEventType) MarshalText() ([]byte, error) { + return []byte(i.String()), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface for ProfileEventType +func (i *ProfileEventType) UnmarshalText(text []byte) error { + var err error + *i, err = ProfileEventTypeString(string(text)) + return err +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/profile_events.go b/vendor/github.com/ClickHouse/ch-go/proto/profile_events.go new file mode 100644 index 0000000..32ccb46 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/profile_events.go @@ -0,0 +1,70 @@ +package proto + +import ( + "time" + + "github.com/go-faster/errors" +) + +// ProfileEvents is data of ServerProfileEvents packet. +type ProfileEvents struct { + Host ColStr + Time ColDateTime + ThreadID ColUInt64 + Type ColInt8 + Name ColStr + Value ColAuto // UInt64 or Int64 depending on version +} + +func (d *ProfileEvents) All() ([]ProfileEvent, error) { + var out []ProfileEvent + for i := range d.Type { + e := ProfileEvent{ + Time: d.Time.Row(i), + Host: d.Host.Row(i), + ThreadID: d.ThreadID[i], + Type: ProfileEventType(d.Type[i]), + Name: d.Name.Row(i), + } + switch data := d.Value.Data.(type) { + case *ColInt64: + e.Value = (*data)[i] + case *ColUInt64: + e.Value = int64((*data)[i]) + default: + return nil, errors.Errorf("unexpected type %q for metric column", data.Type()) + } + out = append(out, e) + } + return out, nil +} + +func (d *ProfileEvents) Result() Results { + return Results{ + {Name: "host_name", Data: &d.Host}, + {Name: "current_time", Data: &d.Time}, + {Name: "thread_id", Data: &d.ThreadID}, + {Name: "type", Data: &d.Type}, + {Name: "name", Data: &d.Name}, + {Name: "value", Data: &d.Value}, + } +} + +//go:generate go run github.com/dmarkham/enumer -type ProfileEventType -trimprefix Profile -text -json -output profile_enum.go + +type ProfileEventType byte + +const ( + ProfileIncrement ProfileEventType = 1 + ProfileGauge ProfileEventType = 2 +) + +// ProfileEvent is detailed profiling event from Server. +type ProfileEvent struct { + Type ProfileEventType `json:"type"` + Name string `json:"name"` + Value int64 `json:"value"` + Host string `json:"host_name"` + Time time.Time `json:"current_time"` + ThreadID uint64 `json:"thread_id"` +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/progress.go b/vendor/github.com/ClickHouse/ch-go/proto/progress.go new file mode 100644 index 0000000..b4f59f5 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/progress.go @@ -0,0 +1,78 @@ +package proto + +import ( + "github.com/go-faster/errors" +) + +// Progress of query execution. +type Progress struct { + Rows uint64 + Bytes uint64 + TotalRows uint64 + + WroteRows uint64 + WroteBytes uint64 + ElapsedNs uint64 +} + +func (p Progress) EncodeAware(b *Buffer, version int) { + b.PutUVarInt(p.Rows) + b.PutUVarInt(p.Bytes) + b.PutUVarInt(p.TotalRows) + if FeatureClientWriteInfo.In(version) { + b.PutUVarInt(p.WroteRows) + b.PutUVarInt(p.WroteBytes) + } + if FeatureServerQueryTimeInProgress.In(version) { + b.PutUVarInt(p.ElapsedNs) + } +} + +func (p *Progress) DecodeAware(r *Reader, version int) error { + { + v, err := r.UVarInt() + if err != nil { + return errors.Wrap(err, "rows") + } + p.Rows = v + } + { + v, err := r.UVarInt() + if err != nil { + return errors.Wrap(err, "bytes") + } + p.Bytes = v + } + { + v, err := r.UVarInt() + if err != nil { + return errors.Wrap(err, "total rows") + } + p.TotalRows = v + } + if FeatureClientWriteInfo.In(version) { + { + v, err := r.UVarInt() + if err != nil { + return errors.Wrap(err, "wrote rows") + } + p.WroteRows = v + } + { + v, err := r.UVarInt() + if err != nil { + return errors.Wrap(err, "wrote bytes") + } + p.WroteBytes = v + } + } + if FeatureServerQueryTimeInProgress.In(version) { + v, err := r.UVarInt() + if err != nil { + return errors.Wrap(err, "wrote rows") + } + p.ElapsedNs = v + } + + return nil +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/proto.go b/vendor/github.com/ClickHouse/ch-go/proto/proto.go new file mode 100644 index 0000000..a68ce67 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/proto.go @@ -0,0 +1,8 @@ +// Package proto implements ClickHouse wire protocol. +package proto + +// Defaults for ClientHello. +const ( + Version = 54460 + Name = "clickhouse/ch-go" +) diff --git a/vendor/github.com/ClickHouse/ch-go/proto/query.go b/vendor/github.com/ClickHouse/ch-go/proto/query.go new file mode 100644 index 0000000..ce7b6b5 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/query.go @@ -0,0 +1,213 @@ +package proto + +import ( + "github.com/go-faster/errors" +) + +type Query struct { + ID string + Body string + Secret string + Stage Stage + Compression Compression + Info ClientInfo + Settings []Setting + Parameters []Parameter +} + +type Parameter struct { + Key string + Value string +} + +func (p Parameter) Encode(b *Buffer) { + Setting{ + Key: p.Key, + Value: p.Value, + Custom: true, + }.Encode(b) +} + +func (p *Parameter) Decode(r *Reader) error { + var s Setting + if err := s.Decode(r); err != nil { + return errors.Wrap(err, "as setting") + } + + p.Key = s.Key + p.Value = s.Value + + return nil +} + +// src/Core/BaseSettings.h:191 (BaseSettingsHelpers.Flags) +const ( + settingFlagImportant = 0x01 + settingFlagCustom = 0x02 + settingFlagObsolete = 0x04 +) + +type Setting struct { + Key string + Value string + + Important bool + Custom bool + Obsolete bool +} + +func (s Setting) Encode(b *Buffer) { + b.PutString(s.Key) + { + var flags uint64 + if s.Important { + flags |= settingFlagImportant + } + if s.Custom { + flags |= settingFlagCustom + } + if s.Obsolete { + flags |= settingFlagObsolete + } + b.PutUVarInt(flags) + } + b.PutString(s.Value) +} + +func (s *Setting) Decode(r *Reader) error { + key, err := r.Str() + if err != nil { + return errors.Wrap(err, "key") + } + + if key == "" { + // End of settings. + return nil + } + + flags, err := r.UVarInt() + if err != nil { + return errors.Wrap(err, "flags") + } + + v, err := r.Str() + if err != nil { + return errors.Wrapf(err, "value (%s)", key) + } + + s.Key = key + s.Important = flags&settingFlagImportant != 0 + s.Custom = flags&settingFlagCustom != 0 + s.Obsolete = flags&settingFlagObsolete != 0 + s.Value = v + + return nil +} + +func (q *Query) DecodeAware(r *Reader, version int) error { + { + v, err := r.Str() + if err != nil { + return errors.Wrap(err, "query id") + } + q.ID = v + } + if FeatureClientWriteInfo.In(version) { + if err := q.Info.DecodeAware(r, version); err != nil { + return errors.Wrap(err, "client info") + } + } + if !FeatureSettingsSerializedAsStrings.In(version) { + return errors.New("unsupported version") + } + for { + var s Setting + if err := s.Decode(r); err != nil { + return errors.Wrap(err, "setting") + } + if s.Key == "" { + break + } + q.Settings = append(q.Settings, s) + } + if FeatureInterServerSecret.In(version) { + v, err := r.Str() + if err != nil { + return errors.Wrap(err, "inter-server secret") + } + q.Secret = v + } + + { + v, err := r.UVarInt() + if err != nil { + return errors.Wrap(err, "stage") + } + q.Stage = Stage(v) + if !q.Stage.IsAStage() { + return errors.Errorf("unknown stage %d", v) + } + } + { + v, err := r.UVarInt() + if err != nil { + return errors.Wrap(err, "compression") + } + q.Compression = Compression(v) + if !q.Compression.IsACompression() { + return errors.Errorf("unknown compression %d", v) + } + } + + { + v, err := r.Str() + if err != nil { + return errors.Wrap(err, "query body") + } + q.Body = v + } + if FeatureParameters.In(version) { + for { + var p Parameter + if err := p.Decode(r); err != nil { + return errors.Wrap(err, "parameter") + } + if p.Key == "" { + break + } + q.Parameters = append(q.Parameters, p) + } + } + return nil +} + +func (q Query) EncodeAware(b *Buffer, version int) { + ClientCodeQuery.Encode(b) + b.PutString(q.ID) + if FeatureClientWriteInfo.In(version) { + q.Info.EncodeAware(b, version) + } + + if FeatureSettingsSerializedAsStrings.In(version) { + for _, s := range q.Settings { + s.Encode(b) + } + } + b.PutString("") // end of settings + + if FeatureInterServerSecret.In(version) { + b.PutString(q.Secret) + } + + StageComplete.Encode(b) + q.Compression.Encode(b) + + b.PutString(q.Body) + + if FeatureParameters.In(version) { + for _, p := range q.Parameters { + p.Encode(b) + } + b.PutString("") // end of parameters + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/reader.go b/vendor/github.com/ClickHouse/ch-go/proto/reader.go new file mode 100644 index 0000000..fd52126 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/reader.go @@ -0,0 +1,290 @@ +package proto + +import ( + "bufio" + "encoding/binary" + "io" + "math" + + "github.com/go-faster/errors" + + "github.com/ClickHouse/ch-go/compress" +) + +// Decoder implements decoding from Reader. +type Decoder interface { + Decode(r *Reader) error +} + +// AwareDecoder implements encoding to Buffer that depends on version. +type AwareDecoder interface { + DecodeAware(r *Reader, version int) error +} + +// Reader implements ClickHouse protocol decoding from buffered reader. +// Not goroutine-safe. +type Reader struct { + raw *bufio.Reader // raw bytes, e.g. on the wire + data io.Reader // data, decompressed or same as raw + b *Buffer // internal buffer + + decompressed io.Reader // decompressed data stream, from raw +} + +func (r *Reader) ReadByte() (byte, error) { + if err := r.readFull(1); err != nil { + return 0, err + } + return r.b.Buf[0], nil +} + +// EnableCompression makes next reads use decompressed source of data. +func (r *Reader) EnableCompression() { + r.data = r.decompressed +} + +// DisableCompression makes next read use raw source of data. +func (r *Reader) DisableCompression() { + r.data = r.raw +} + +func (r *Reader) Read(p []byte) (n int, err error) { + return r.data.Read(p) +} + +// Decode value. +func (r *Reader) Decode(v Decoder) error { + return v.Decode(r) +} + +func (r *Reader) ReadFull(buf []byte) error { + if _, err := io.ReadFull(r, buf); err != nil { + return errors.Wrap(err, "read") + } + return nil +} + +func (r *Reader) readFull(n int) error { + r.b.Ensure(n) + return r.ReadFull(r.b.Buf) +} + +// ReadRaw reads raw n bytes. +func (r *Reader) ReadRaw(n int) ([]byte, error) { + if err := r.readFull(n); err != nil { + return nil, errors.Wrap(err, "read full") + } + + return r.b.Buf, nil +} + +// UVarInt reads uint64 from internal reader. +func (r *Reader) UVarInt() (uint64, error) { + n, err := binary.ReadUvarint(r) + if err != nil { + return 0, errors.Wrap(err, "read") + } + return n, nil +} + +func (r *Reader) StrLen() (int, error) { + n, err := r.Int() + if err != nil { + return 0, errors.Wrap(err, "read length") + } + + if n < 0 { + return 0, errors.Errorf("size %d is invalid", n) + } + + return n, nil +} + +// StrRaw decodes string to internal buffer and returns it directly. +// +// Do not retain returned slice. +func (r *Reader) StrRaw() ([]byte, error) { + n, err := r.StrLen() + if err != nil { + return nil, errors.Wrap(err, "read length") + } + r.b.Ensure(n) + if _, err := io.ReadFull(r.data, r.b.Buf); err != nil { + return nil, errors.Wrap(err, "read str") + } + + return r.b.Buf, nil +} + +// StrAppend decodes string and appends it to provided buf. +func (r *Reader) StrAppend(buf []byte) ([]byte, error) { + defer r.b.Reset() + + str, err := r.StrRaw() + if err != nil { + return nil, errors.Wrap(err, "raw") + } + + return append(buf, str...), nil +} + +// StrBytes decodes string and allocates new byte slice with result. +func (r *Reader) StrBytes() ([]byte, error) { + return r.StrAppend(nil) +} + +// Str decodes string. +func (r *Reader) Str() (string, error) { + s, err := r.StrBytes() + if err != nil { + return "", errors.Wrap(err, "bytes") + } + + return string(s), err +} + +// Int decodes uvarint as int. +func (r *Reader) Int() (int, error) { + n, err := r.UVarInt() + if err != nil { + return 0, errors.Wrap(err, "uvarint") + } + return int(n), nil +} + +// Int8 decodes int8 value. +func (r *Reader) Int8() (int8, error) { + v, err := r.UInt8() + if err != nil { + return 0, err + } + return int8(v), nil +} + +// Int16 decodes int16 value. +func (r *Reader) Int16() (int16, error) { + v, err := r.UInt16() + if err != nil { + return 0, err + } + return int16(v), nil +} + +// Int32 decodes int32 value. +func (r *Reader) Int32() (int32, error) { + v, err := r.UInt32() + if err != nil { + return 0, err + } + return int32(v), nil +} + +// Int64 decodes int64 value. +func (r *Reader) Int64() (int64, error) { + v, err := r.UInt64() + if err != nil { + return 0, err + } + return int64(v), nil +} + +// Int128 decodes Int128 value. +func (r *Reader) Int128() (Int128, error) { + v, err := r.UInt128() + if err != nil { + return Int128{}, err + } + return Int128(v), nil +} + +// Byte decodes byte value. +func (r *Reader) Byte() (byte, error) { + return r.UInt8() +} + +// UInt8 decodes uint8 value. +func (r *Reader) UInt8() (uint8, error) { + if err := r.readFull(1); err != nil { + return 0, errors.Wrap(err, "read") + } + return r.b.Buf[0], nil +} + +// UInt16 decodes uint16 value. +func (r *Reader) UInt16() (uint16, error) { + if err := r.readFull(2); err != nil { + return 0, errors.Wrap(err, "read") + } + return binary.LittleEndian.Uint16(r.b.Buf), nil +} + +// UInt32 decodes uint32 value. +func (r *Reader) UInt32() (uint32, error) { + if err := r.readFull(32 / 8); err != nil { + return 0, errors.Wrap(err, "read") + } + return binary.LittleEndian.Uint32(r.b.Buf), nil +} + +// UInt64 decodes uint64 value. +func (r *Reader) UInt64() (uint64, error) { + if err := r.readFull(64 / 8); err != nil { + return 0, errors.Wrap(err, "read") + } + return binary.LittleEndian.Uint64(r.b.Buf), nil +} + +// UInt128 decodes UInt128 value. +func (r *Reader) UInt128() (UInt128, error) { + if err := r.readFull(128 / 8); err != nil { + return UInt128{}, errors.Wrap(err, "read") + } + return binUInt128(r.b.Buf), nil +} + +// Float32 decodes float32 value. +func (r *Reader) Float32() (float32, error) { + v, err := r.UInt32() + if err != nil { + return 0, errors.Wrap(err, "bits") + } + return math.Float32frombits(v), nil +} + +// Float64 decodes float64 value. +func (r *Reader) Float64() (float64, error) { + v, err := r.UInt64() + if err != nil { + return 0, errors.Wrap(err, "bits") + } + return math.Float64frombits(v), nil +} + +// Bool decodes bool as uint8. +func (r *Reader) Bool() (bool, error) { + v, err := r.UInt8() + if err != nil { + return false, errors.Wrap(err, "uint8") + } + switch v { + case boolTrue: + return true, nil + case boolFalse: + return false, nil + default: + return false, errors.Errorf("unexpected value %d for boolean", v) + } +} + +const defaultReaderSize = 1024 * 128 // 128kb + +// NewReader initializes new Reader from provided io.Reader. +func NewReader(r io.Reader) *Reader { + c := bufio.NewReaderSize(r, defaultReaderSize) + return &Reader{ + raw: c, + data: c, + b: &Buffer{}, + decompressed: compress.NewReader(c), + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/reset.go b/vendor/github.com/ClickHouse/ch-go/proto/reset.go new file mode 100644 index 0000000..ca61e80 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/reset.go @@ -0,0 +1,12 @@ +package proto + +type Resettable interface { + Reset() +} + +// Reset is helper to reset columns. +func Reset(columns ...Resettable) { + for _, column := range columns { + column.Reset() + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/results.go b/vendor/github.com/ClickHouse/ch-go/proto/results.go new file mode 100644 index 0000000..d81c1e3 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/results.go @@ -0,0 +1,153 @@ +package proto + +import "github.com/go-faster/errors" + +// Result of Query. +type Result interface { + DecodeResult(r *Reader, version int, b Block) error +} + +// Results wrap []ResultColumn to implement Result. +type Results []ResultColumn + +type autoResults struct { + results *Results +} + +func (s autoResults) DecodeResult(r *Reader, version int, b Block) error { + return s.results.decodeAuto(r, version, b) +} + +func (s Results) Rows() int { + if len(s) == 0 { + return 0 + } + return s[0].Data.Rows() +} + +func (s *Results) Auto() Result { + return autoResults{results: s} +} + +func (s *Results) decodeAuto(r *Reader, version int, b Block) error { + if len(*s) > 0 { + // Already inferred. + return s.DecodeResult(r, version, b) + } + for i := 0; i < b.Columns; i++ { + columnName, err := r.Str() + if err != nil { + return errors.Wrapf(err, "column [%d] name", i) + } + columnTypeRaw, err := r.Str() + if err != nil { + return errors.Wrapf(err, "column [%d] type", i) + } + var customSerialization bool + if FeatureCustomSerialization.In(version) { + if customSerialization, err = r.Bool(); err != nil { + return errors.Wrapf(err, "column [%d] custom serialization", i) + } + if customSerialization { + // Not implemented. + return errors.Wrapf(err, "column [%d] has custom serialization (not supported)", i) + } + } + var ( + colType = ColumnType(columnTypeRaw) + col = &ColAuto{} + ) + if err := col.Infer(colType); err != nil { + return errors.Wrap(err, "column type inference") + } + col.Data.Reset() + if b.Rows != 0 { + if s, ok := col.Data.(Stateful); ok { + if err := s.DecodeState(r); err != nil { + return errors.Wrapf(err, "%s state", columnName) + } + } + if err := col.Data.DecodeColumn(r, b.Rows); err != nil { + return errors.Wrap(err, columnName) + } + } + *s = append(*s, ResultColumn{ + Name: columnName, + Data: col.Data, + }) + } + return nil +} + +func (s Results) DecodeResult(r *Reader, version int, b Block) error { + var ( + noTarget = len(s) == 0 + noRows = b.Rows == 0 + columnsMismatch = b.Columns != len(s) + allowMismatch = noTarget && noRows + ) + if columnsMismatch && !allowMismatch { + return errors.Errorf("%d (columns) != %d (target)", b.Columns, len(s)) + } + for i := 0; i < b.Columns; i++ { + columnName, err := r.Str() + if err != nil { + return errors.Wrapf(err, "column [%d] name", i) + } + columnType, err := r.Str() + if err != nil { + return errors.Wrapf(err, "column [%d] type", i) + } + if FeatureCustomSerialization.In(version) { + customSerialization, err := r.Bool() + if err != nil { + return errors.Wrapf(err, "column [%d] custom serialization", i) + } + if customSerialization { + // Not implemented. + return errors.Wrapf(err, "column [%d] has custom serialization (not supported)", i) + } + } + if noTarget { + // Just reading types and names. + continue + } + + // Checking column name and type. + t := s[i] + if t.Name == "" { + // Inferring column name. + t.Name = columnName + s[i] = t + } + if t.Name != columnName { + return errors.Errorf("[%d]: unexpected column %q (%q expected)", i, columnName, t.Name) + } + gotType := ColumnType(columnType) + if infer, ok := t.Data.(Inferable); ok { + if err := infer.Infer(gotType); err != nil { + return errors.Wrap(err, "infer") + } + } + hasType := t.Data.Type() + if gotType.Conflicts(hasType) { + return errors.Errorf("[%d]: %s: unexpected type %q (got) instead of %q (has)", + i, columnName, gotType, hasType, + ) + } + t.Data.Reset() + if b.Rows == 0 { + continue + } + if s, ok := t.Data.(StateDecoder); ok { + if err := s.DecodeState(r); err != nil { + return errors.Wrapf(err, "%s state", columnName) + } + } + if err := t.Data.DecodeColumn(r, b.Rows); err != nil { + return errors.Wrap(err, columnName) + } + } + + return nil +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/server_code.go b/vendor/github.com/ClickHouse/ch-go/proto/server_code.go new file mode 100644 index 0000000..d9fa55e --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/server_code.go @@ -0,0 +1,38 @@ +package proto + +//go:generate go run github.com/dmarkham/enumer -type ServerCode -trimprefix ServerCode -output server_code_enum.go + +// ServerCode is sent by server to client. +type ServerCode byte + +// Possible server codes. +const ( + ServerCodeHello ServerCode = 0 // Server part of "handshake" + ServerCodeData ServerCode = 1 // data block (can be compressed) + ServerCodeException ServerCode = 2 // runtime exception + ServerCodeProgress ServerCode = 3 // query execution progress (bytes, lines) + ServerCodePong ServerCode = 4 // ping response (ClientPing) + ServerCodeEndOfStream ServerCode = 5 // all packets were transmitted + ServerCodeProfile ServerCode = 6 // profiling info + ServerCodeTotals ServerCode = 7 // packet with total values (can be compressed) + ServerCodeExtremes ServerCode = 8 // packet with minimums and maximums (can be compressed) + ServerCodeTablesStatus ServerCode = 9 // response to TablesStatus + ServerCodeLog ServerCode = 10 // query execution system log + ServerCodeTableColumns ServerCode = 11 // columns description + ServerPartUUIDs ServerCode = 12 // list of unique parts ids. + ServerReadTaskRequest ServerCode = 13 // String (UUID) describes a request for which next task is needed + ServerProfileEvents ServerCode = 14 // Packet with profile events from server +) + +// Encode to buffer. +func (c ServerCode) Encode(b *Buffer) { b.PutByte(byte(c)) } + +// Compressible reports whether message can be compressed. +func (c ServerCode) Compressible() bool { + switch c { + case ServerCodeData, ServerCodeTotals, ServerCodeExtremes: + return true + default: + return false + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/server_code_enum.go b/vendor/github.com/ClickHouse/ch-go/proto/server_code_enum.go new file mode 100644 index 0000000..27f6793 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/server_code_enum.go @@ -0,0 +1,130 @@ +// Code generated by "enumer -type ServerCode -trimprefix ServerCode -output server_code_enum.go"; DO NOT EDIT. + +package proto + +import ( + "fmt" + "strings" +) + +const _ServerCodeName = "HelloDataExceptionProgressPongEndOfStreamProfileTotalsExtremesTablesStatusLogTableColumnsServerPartUUIDsServerReadTaskRequestServerProfileEvents" + +var _ServerCodeIndex = [...]uint8{0, 5, 9, 18, 26, 30, 41, 48, 54, 62, 74, 77, 89, 104, 125, 144} + +const _ServerCodeLowerName = "hellodataexceptionprogresspongendofstreamprofiletotalsextremestablesstatuslogtablecolumnsserverpartuuidsserverreadtaskrequestserverprofileevents" + +func (i ServerCode) String() string { + if i >= ServerCode(len(_ServerCodeIndex)-1) { + return fmt.Sprintf("ServerCode(%d)", i) + } + return _ServerCodeName[_ServerCodeIndex[i]:_ServerCodeIndex[i+1]] +} + +// An "invalid array index" compiler error signifies that the constant values have changed. +// Re-run the stringer command to generate them again. +func _ServerCodeNoOp() { + var x [1]struct{} + _ = x[ServerCodeHello-(0)] + _ = x[ServerCodeData-(1)] + _ = x[ServerCodeException-(2)] + _ = x[ServerCodeProgress-(3)] + _ = x[ServerCodePong-(4)] + _ = x[ServerCodeEndOfStream-(5)] + _ = x[ServerCodeProfile-(6)] + _ = x[ServerCodeTotals-(7)] + _ = x[ServerCodeExtremes-(8)] + _ = x[ServerCodeTablesStatus-(9)] + _ = x[ServerCodeLog-(10)] + _ = x[ServerCodeTableColumns-(11)] + _ = x[ServerPartUUIDs-(12)] + _ = x[ServerReadTaskRequest-(13)] + _ = x[ServerProfileEvents-(14)] +} + +var _ServerCodeValues = []ServerCode{ServerCodeHello, ServerCodeData, ServerCodeException, ServerCodeProgress, ServerCodePong, ServerCodeEndOfStream, ServerCodeProfile, ServerCodeTotals, ServerCodeExtremes, ServerCodeTablesStatus, ServerCodeLog, ServerCodeTableColumns, ServerPartUUIDs, ServerReadTaskRequest, ServerProfileEvents} + +var _ServerCodeNameToValueMap = map[string]ServerCode{ + _ServerCodeName[0:5]: ServerCodeHello, + _ServerCodeLowerName[0:5]: ServerCodeHello, + _ServerCodeName[5:9]: ServerCodeData, + _ServerCodeLowerName[5:9]: ServerCodeData, + _ServerCodeName[9:18]: ServerCodeException, + _ServerCodeLowerName[9:18]: ServerCodeException, + _ServerCodeName[18:26]: ServerCodeProgress, + _ServerCodeLowerName[18:26]: ServerCodeProgress, + _ServerCodeName[26:30]: ServerCodePong, + _ServerCodeLowerName[26:30]: ServerCodePong, + _ServerCodeName[30:41]: ServerCodeEndOfStream, + _ServerCodeLowerName[30:41]: ServerCodeEndOfStream, + _ServerCodeName[41:48]: ServerCodeProfile, + _ServerCodeLowerName[41:48]: ServerCodeProfile, + _ServerCodeName[48:54]: ServerCodeTotals, + _ServerCodeLowerName[48:54]: ServerCodeTotals, + _ServerCodeName[54:62]: ServerCodeExtremes, + _ServerCodeLowerName[54:62]: ServerCodeExtremes, + _ServerCodeName[62:74]: ServerCodeTablesStatus, + _ServerCodeLowerName[62:74]: ServerCodeTablesStatus, + _ServerCodeName[74:77]: ServerCodeLog, + _ServerCodeLowerName[74:77]: ServerCodeLog, + _ServerCodeName[77:89]: ServerCodeTableColumns, + _ServerCodeLowerName[77:89]: ServerCodeTableColumns, + _ServerCodeName[89:104]: ServerPartUUIDs, + _ServerCodeLowerName[89:104]: ServerPartUUIDs, + _ServerCodeName[104:125]: ServerReadTaskRequest, + _ServerCodeLowerName[104:125]: ServerReadTaskRequest, + _ServerCodeName[125:144]: ServerProfileEvents, + _ServerCodeLowerName[125:144]: ServerProfileEvents, +} + +var _ServerCodeNames = []string{ + _ServerCodeName[0:5], + _ServerCodeName[5:9], + _ServerCodeName[9:18], + _ServerCodeName[18:26], + _ServerCodeName[26:30], + _ServerCodeName[30:41], + _ServerCodeName[41:48], + _ServerCodeName[48:54], + _ServerCodeName[54:62], + _ServerCodeName[62:74], + _ServerCodeName[74:77], + _ServerCodeName[77:89], + _ServerCodeName[89:104], + _ServerCodeName[104:125], + _ServerCodeName[125:144], +} + +// ServerCodeString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func ServerCodeString(s string) (ServerCode, error) { + if val, ok := _ServerCodeNameToValueMap[s]; ok { + return val, nil + } + + if val, ok := _ServerCodeNameToValueMap[strings.ToLower(s)]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to ServerCode values", s) +} + +// ServerCodeValues returns all values of the enum +func ServerCodeValues() []ServerCode { + return _ServerCodeValues +} + +// ServerCodeStrings returns a slice of all String values of the enum +func ServerCodeStrings() []string { + strs := make([]string, len(_ServerCodeNames)) + copy(strs, _ServerCodeNames) + return strs +} + +// IsAServerCode returns "true" if the value is listed in the enum definition. "false" otherwise +func (i ServerCode) IsAServerCode() bool { + for _, v := range _ServerCodeValues { + if i == v { + return true + } + } + return false +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/server_hello.go b/vendor/github.com/ClickHouse/ch-go/proto/server_hello.go new file mode 100644 index 0000000..dbe8106 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/server_hello.go @@ -0,0 +1,120 @@ +package proto + +import ( + "fmt" + "strings" + + "github.com/go-faster/errors" +) + +// ServerHello is answer to ClientHello and represents ServerCodeHello message. +type ServerHello struct { + Name string + Major int + Minor int + Revision int + Timezone string + DisplayName string + Patch int +} + +// Features implemented by server. +func (s ServerHello) Features() []Feature { + var features []Feature + for _, f := range FeatureValues() { + if s.Has(f) { + features = append(features, f) + } + } + return features +} + +// Has reports whether Feature is implemented. +func (s ServerHello) Has(f Feature) bool { + return f.In(s.Revision) +} + +func (s ServerHello) String() string { + var b strings.Builder + b.WriteString(s.Name) + if s.DisplayName != "" { + _, _ = fmt.Fprintf(&b, " (%s", s.DisplayName) + if s.Timezone != "" { + b.WriteString(", ") + b.WriteString(s.Timezone) + } + b.WriteRune(')') + } + + _, _ = fmt.Fprintf(&b, " %d.%d", s.Major, s.Minor) + if s.Has(FeatureVersionPatch) { + _, _ = fmt.Fprintf(&b, ".%d", s.Patch) + } + _, _ = fmt.Fprintf(&b, " (%d)", s.Revision) + return b.String() +} + +// DecodeAware decodes ServerHello message from Reader. +func (s *ServerHello) DecodeAware(r *Reader, v int) error { + name, err := r.Str() + if err != nil { + return errors.Wrap(err, "str") + } + s.Name = name + + major, err := r.Int() + if err != nil { + return errors.Wrap(err, "major") + } + minor, err := r.Int() + if err != nil { + return errors.Wrap(err, "minor") + } + revision, err := r.Int() + if err != nil { + return errors.Wrap(err, "revision") + } + + s.Major, s.Minor, s.Revision = major, minor, revision + + if FeatureTimezone.In(v) { + v, err := r.Str() + if err != nil { + return errors.Wrap(err, "timezone") + } + s.Timezone = v + } + if FeatureDisplayName.In(v) { + v, err := r.Str() + if err != nil { + return errors.Wrap(err, "display name") + } + s.DisplayName = v + } + if FeatureVersionPatch.In(v) { + path, err := r.Int() + if err != nil { + return errors.Wrap(err, "patch") + } + s.Patch = path + } + + return nil +} + +func (s *ServerHello) EncodeAware(b *Buffer, v int) { + ServerCodeHello.Encode(b) + b.PutString(s.Name) + b.PutInt(s.Major) + b.PutInt(s.Minor) + b.PutInt(s.Revision) + if FeatureTimezone.In(v) { + b.PutString(s.Timezone) + } + if FeatureDisplayName.In(v) { + b.PutString(s.DisplayName) + } + if FeatureVersionPatch.In(v) { + b.PutInt(s.Patch) + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/server_log.go b/vendor/github.com/ClickHouse/ch-go/proto/server_log.go new file mode 100644 index 0000000..f6ba412 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/server_log.go @@ -0,0 +1,55 @@ +package proto + +import "time" + +// Log from server. +type Log struct { + QueryID string `json:"query_id"` + Source string `json:"source"` + Text string `json:"text"` + Time time.Time `json:"event_time"` + Host string `json:"host_name"` + ThreadID uint64 `json:"thread_id"` + Priority int8 `json:"priority"` +} + +// Logs from ServerCodeLog packet. +type Logs struct { + Time ColDateTime + TimeMicro ColUInt32 + HostName ColStr + QueryID ColStr + ThreadID ColUInt64 + Priority ColInt8 + Source ColStr + Text ColStr +} + +func (s *Logs) Result() Results { + return Results{ + {Name: "event_time", Data: &s.Time}, + {Name: "event_time_microseconds", Data: &s.TimeMicro}, + {Name: "host_name", Data: &s.HostName}, + {Name: "query_id", Data: &s.QueryID}, + {Name: "thread_id", Data: &s.ThreadID}, + {Name: "priority", Data: &s.Priority}, + {Name: "source", Data: &s.Source}, + {Name: "text", Data: &s.Text}, + } +} + +func (s Logs) All() []Log { + var out []Log + for i := 0; i < s.Source.Rows(); i++ { + out = append(out, Log{ + Time: s.Time.Row(i), + Host: s.HostName.Row(i), + QueryID: s.QueryID.Row(i), + ThreadID: s.ThreadID[i], + Priority: s.Priority[i], + Source: s.Source.Row(i), + Text: s.Text.Row(i), + }) + } + return out +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/slice_unsafe.go b/vendor/github.com/ClickHouse/ch-go/proto/slice_unsafe.go new file mode 100644 index 0000000..7d9e90e --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/slice_unsafe.go @@ -0,0 +1,17 @@ +//go:build (amd64 || arm64 || riscv64) && !purego + +package proto + +import "unsafe" + +// slice represents slice header. +// +// Used in optimizations when we can interpret [N]T as [M]byte, where +// M = sizeof(T) * N. +// +// NB: careful with endianness! +type slice struct { + Data unsafe.Pointer + Len uintptr + Cap uintptr +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/stage.go b/vendor/github.com/ClickHouse/ch-go/proto/stage.go new file mode 100644 index 0000000..22ff0a4 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/stage.go @@ -0,0 +1,16 @@ +package proto + +// Stage of query till SELECT should be executed. +type Stage byte + +// Encode to buffer. +func (s Stage) Encode(b *Buffer) { b.PutUVarInt(uint64(s)) } + +//go:generate go run github.com/dmarkham/enumer -type Stage -trimprefix Stage -output stage_enum.go + +// StageComplete is query complete. +const ( + StageFetchColumns Stage = 0 + StageWithMergeableState Stage = 1 + StageComplete Stage = 2 +) diff --git a/vendor/github.com/ClickHouse/ch-go/proto/stage_enum.go b/vendor/github.com/ClickHouse/ch-go/proto/stage_enum.go new file mode 100644 index 0000000..63511bd --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/stage_enum.go @@ -0,0 +1,82 @@ +// Code generated by "enumer -type Stage -trimprefix Stage -output stage_enum.go"; DO NOT EDIT. + +package proto + +import ( + "fmt" + "strings" +) + +const _StageName = "FetchColumnsWithMergeableStateComplete" + +var _StageIndex = [...]uint8{0, 12, 30, 38} + +const _StageLowerName = "fetchcolumnswithmergeablestatecomplete" + +func (i Stage) String() string { + if i >= Stage(len(_StageIndex)-1) { + return fmt.Sprintf("Stage(%d)", i) + } + return _StageName[_StageIndex[i]:_StageIndex[i+1]] +} + +// An "invalid array index" compiler error signifies that the constant values have changed. +// Re-run the stringer command to generate them again. +func _StageNoOp() { + var x [1]struct{} + _ = x[StageFetchColumns-(0)] + _ = x[StageWithMergeableState-(1)] + _ = x[StageComplete-(2)] +} + +var _StageValues = []Stage{StageFetchColumns, StageWithMergeableState, StageComplete} + +var _StageNameToValueMap = map[string]Stage{ + _StageName[0:12]: StageFetchColumns, + _StageLowerName[0:12]: StageFetchColumns, + _StageName[12:30]: StageWithMergeableState, + _StageLowerName[12:30]: StageWithMergeableState, + _StageName[30:38]: StageComplete, + _StageLowerName[30:38]: StageComplete, +} + +var _StageNames = []string{ + _StageName[0:12], + _StageName[12:30], + _StageName[30:38], +} + +// StageString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func StageString(s string) (Stage, error) { + if val, ok := _StageNameToValueMap[s]; ok { + return val, nil + } + + if val, ok := _StageNameToValueMap[strings.ToLower(s)]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to Stage values", s) +} + +// StageValues returns all values of the enum +func StageValues() []Stage { + return _StageValues +} + +// StageStrings returns a slice of all String values of the enum +func StageStrings() []string { + strs := make([]string, len(_StageNames)) + copy(strs, _StageNames) + return strs +} + +// IsAStage returns "true" if the value is listed in the enum definition. "false" otherwise +func (i Stage) IsAStage() bool { + for _, v := range _StageValues { + if i == v { + return true + } + } + return false +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/table_columns.go b/vendor/github.com/ClickHouse/ch-go/proto/table_columns.go new file mode 100644 index 0000000..1eaf071 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/table_columns.go @@ -0,0 +1,32 @@ +package proto + +import "github.com/go-faster/errors" + +type TableColumns struct { + First string + Second string +} + +func (c *TableColumns) DecodeAware(r *Reader, _ int) error { + { + v, err := r.Str() + if err != nil { + return errors.Wrap(err, "first") + } + c.First = v + } + { + v, err := r.Str() + if err != nil { + return errors.Wrap(err, "second") + } + c.Second = v + } + return nil +} + +func (c TableColumns) EncodeAware(b *Buffer, _ int) { + ServerCodeTableColumns.Encode(b) + b.PutString(c.First) + b.PutString(c.Second) +} diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/.gitignore b/vendor/github.com/ClickHouse/clickhouse-go/v2/.gitignore index 7f0f42e..2d3f9ae 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/.gitignore +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/.gitignore @@ -27,4 +27,15 @@ _testmain.go coverage.txt .idea/** -dev/* \ No newline at end of file +dev/* +.run/** + +vendor + +**.tfstate* +.terraform.lock.hcl +**/.terraform* +pipeline.auto.tfvars +*.tfvars + +.env diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/.golangci.yaml b/vendor/github.com/ClickHouse/clickhouse-go/v2/.golangci.yaml new file mode 100644 index 0000000..23e116a --- /dev/null +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/.golangci.yaml @@ -0,0 +1,71 @@ +run: + tests: false + skip-dirs: + - benchmark + - tests + - internal/cmd + +linters-settings: + gocritic: + disabled-checks: + - singleCaseSwitch + - commentFormatting + + decorder: + dec-order: + - type + - const + - var + - func + disable-dec-order-check: false + + revive: + enable-all-rules: true + rules: + - name: cyclomatic + disabled: true + - name: argument-limit + disabled: true + - name: function-length + disabled: true + - name: function-result-limit + disabled: true + - name: line-length-limit + disabled: true + - name: file-header + disabled: true + - name: cognitive-complexity + disabled: true + - name: banned-characters + disabled: true + - name: max-public-structs + disabled: true + - name: add-constant + disabled: true + - name: unhandled-error + disabled: true + - name: deep-exit + disabled: true + - name: nested-structs + disabled: true + + gofmt: + rewrite-rules: + - pattern: 'interface{}' + replacement: 'any' + - pattern: 'a[b:len(a)]' + replacement: 'a[b:]' + +linters: + disable-all: true + enable: + - asciicheck + - bodyclose + - depguard + - gocritic + - gofmt + - govet + - ineffassign + - importas + - misspell + - staticcheck diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/CHANGELOG.md b/vendor/github.com/ClickHouse/clickhouse-go/v2/CHANGELOG.md new file mode 100644 index 0000000..d481942 --- /dev/null +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/CHANGELOG.md @@ -0,0 +1,673 @@ +# v2.27.1, 2024-08-05 + +## What's Changed +### Fixes 🐛 +* Fix INSERT statement normalization match backtick table name by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1366 + + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.27.0...v2.27.1 + +# v2.27.0, 2024-08-01 + +## Breaking change notice + +v2.25.0 was released with a breaking change in https://github.com/ClickHouse/clickhouse-go/pull/1306. Please review your implementation. + +## What's Changed +### Enhancements 🎉 +* Unpack value of indirect types in array column to support nested structures in interfaced slices/arrays by @jmaicher in https://github.com/ClickHouse/clickhouse-go/pull/1350 +### Fixes 🐛 +* Common HTTP insert query normalization by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1341 +### Other Changes 🛠 +* Update examples std json by @xjeway in https://github.com/ClickHouse/clickhouse-go/pull/1240 +* ClickHouse 24.6 by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1352 +* ClickHouse 24.7 release by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1363 +* Update CHANGELOG with a breaking change note by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1364 + +## New Contributors +* @xjeway made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1240 + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.26.0...v2.27.0 + +# v2.26.0, 2024-06-25 + +## What's Changed +### Enhancements 🎉 +* Reintroduce the random connection strategy by @larry-cdn77 in https://github.com/ClickHouse/clickhouse-go/pull/1313 +* Make custom debug log function on-par with the built-in one by @vespian in https://github.com/ClickHouse/clickhouse-go/pull/1317 +* Remove date overflow check as it's normalised by ClickHouse server by @gogingersnap777 in https://github.com/ClickHouse/clickhouse-go/pull/1315 +* Batch: impl `Columns() []column.Interface` method by @egsam98 in https://github.com/ClickHouse/clickhouse-go/pull/1277 +### Fixes 🐛 +* Fix rows.Close do not return too early by @yujiarista in https://github.com/ClickHouse/clickhouse-go/pull/1314 +* Setting `X-Clickhouse-SSL-Certificate-Auth` header correctly given `X-ClickHouse-Key` by @gogingersnap777 in https://github.com/ClickHouse/clickhouse-go/pull/1316 +* Retry on network errors and fix retries on async inserts with `database/sql` interface by @tommyzli in https://github.com/ClickHouse/clickhouse-go/pull/1330 +* BatchInsert parentheses issue fix by @ramzes642 in https://github.com/ClickHouse/clickhouse-go/pull/1327 +### Other Changes 🛠 +* ClickHouse 24.5 by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1319 +* Align `allow_suspicious_low_cardinality_types` and `allow_suspicious_low_cardinality_types ` settings in tests due to ClickHouse Cloud incompatibility by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1331 +* Use HTTPs scheme in std connection failover tests by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1332 + +## New Contributors +* @larry-cdn77 made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1313 +* @vespian made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1317 +* @gogingersnap777 made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1315 +* @yujiarista made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1314 +* @egsam98 made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1277 +* @tommyzli made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1330 +* @ramzes642 made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1327 + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.25.0...v2.26.0 + +# v2.25.0, 2024-05-28 + +## What's Changed +### Breaking Changes 🚨 +* Add a compatibility layer for a database/sql driver to work with sql.NullString and ClickHouse nullable column by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1306 +### Other Changes 🛠 +* Use Go 1.22 in head tests by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1305 +* Skip flaky 1127 test by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1307 + + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.24.0...v2.25.0 + +# v2.24.0, 2024-05-08 + +## What's Changed +### Enhancements 🎉 +* Always compress responses when the client compression is on by @zhkvia in https://github.com/ClickHouse/clickhouse-go/pull/1286 +* Optional flag to close query with flush by @hongker in https://github.com/ClickHouse/clickhouse-go/pull/1276 +### Fixes 🐛 +* Fix prepare batch does not break on `values` substring in table name by @Wang in https://github.com/ClickHouse/clickhouse-go/pull/1290 +* Fix nil checks when appending slices of pointers by @markandrus in https://github.com/ClickHouse/clickhouse-go/pull/1283 +### Other Changes 🛠 +* Don't recreate keys from LC columns from direct stream by @genzgd in https://github.com/ClickHouse/clickhouse-go/pull/1291 + +## New Contributors +* @zhkvia made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1286 + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.23.2...v2.24.0 + +# v2.23.2, 2024-04-25 + +## What's Changed +### Fixes 🐛 +* Fixed panic on concurrent context key map write by @Wang in https://github.com/ClickHouse/clickhouse-go/pull/1284 +### Other Changes 🛠 +* Fix ClickHouse Terraform provider version by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1285 + +## New Contributors +* @Wang made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1284 + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.23.1...v2.23.2 + +# v2.23.1, 2024-04-15 + +## What's Changed +### Fixes 🐛 +* Zero-value timestamp to be formatted as toDateTime(0) in bind by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1260 +### Other Changes 🛠 +* Update #1127 test case to reproduce a progress handle when exception is thrown by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1259 +* Set max parallel for GH jobs by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1261 +* Ensure test container termination by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1274 + + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.23.0...v2.23.1 + +# v2.23.0, 2024-03-27 + +## What's Changed +### Enhancements 🎉 +* Implement `ConnBeginTx` as replacement for deprecated `Begin` by @FelipeLema in https://github.com/ClickHouse/clickhouse-go/pull/1255 +### Other Changes 🛠 +* Align error message assertion to new missing custom setting error formatting by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1256 +* CI chores by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1258 + +## New Contributors +* @FelipeLema made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1255 + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.22.4...v2.23.0 + +# v2.22.4, 2024-03-25 + +## What's Changed +### Fixes 🐛 +* Fix column name with parantheses handle in prepare batch by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1252 +### Other Changes 🛠 +* Fix TestBatchAppendRows work different on cloud by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1251 + + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.22.3...v2.22.4 + +# v2.22.3, 2024-03-25 + +## What's Changed +### Fixes 🐛 +* Fix panic on tuple scan on []any by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1249 +### Other Changes 🛠 +* Error channel deadlock fix test case by @threadedstream in https://github.com/ClickHouse/clickhouse-go/pull/1239 +* Add a test case for #1127 by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1242 +* Run cloud/head jobs when label by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1250 + + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.22.2...v2.22.3 + +# v2.22.2, 2024-03-18 + +## What's Changed +### Fixes 🐛 +* Fix for Map columns with Enums by @leklund in https://github.com/ClickHouse/clickhouse-go/pull/1236 + +## New Contributors +* @leklund made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1236 + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.22.1...v2.22.2 + +# v2.22.1, 2024-03-18 + +## What's Changed +### Fixes 🐛 +* Make errors channel buffered inside query() by @threadedstream in https://github.com/ClickHouse/clickhouse-go/pull/1237 + + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.22.0...v2.22.1 + +# v2.20.0, 2024-02-28 + +## What's Changed +### Enhancements 🎉 +* Support [n]byte/[]byte type Scan/Append to FixedString column by @rogeryk in https://github.com/ClickHouse/clickhouse-go/pull/1205 +### Other Changes 🛠 +* Enable cloud tests by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1202 +* Removed LowCardinality(UInt64) tests that caused allow_suspicious_low_cardinality_types related error by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1206 + + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.19.0...v2.20.0 + +# v2.19.0, 2024-02-26 + +## What's Changed +### Enhancements 🎉 +* handle ctx.Done() in acquire by @threadedstream in https://github.com/ClickHouse/clickhouse-go/pull/1199 +### Fixes 🐛 +* Fix panic on format nil *fmt.Stringer type value by @zaneli in https://github.com/ClickHouse/clickhouse-go/pull/1200 +### Other Changes 🛠 +* Update Go/ClickHouse versions by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1201 + +## New Contributors +* @threadedstream made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1199 +* @zaneli made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1200 + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.18.0...v2.19.0 + +# v2.18.0, 2024-02-01 + +## What's Changed +### Enhancements 🎉 +* Add WithAllocBufferColStrProvider string column allocator for batch insert performance boost by @hongker in https://github.com/ClickHouse/clickhouse-go/pull/1181 +### Fixes 🐛 +* Fix bind for seconds scale DateTime by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1184 +### Other Changes 🛠 +* resolves #1163 debugF function is not respected by @omurbekjk in https://github.com/ClickHouse/clickhouse-go/pull/1166 + +## New Contributors +* @omurbekjk made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1166 +* @hongker made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1181 + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.17.1...v2.18.0 + +# v2.17.1, 2023-12-27 + +## What's Changed +### Fixes 🐛 +* fix panic in contextWatchDog nil pointer check by @nityanandagohain in https://github.com/ClickHouse/clickhouse-go/pull/1168 + +## New Contributors +* @nityanandagohain made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1168 + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.17.0...v2.17.1 + +# v2.17.0, 2023-12-21 + +## What's Changed +### Enhancements 🎉 +* Iterable ordered map alternative with improved performance by @hanjm in https://github.com/ClickHouse/clickhouse-go/pull/1152 +* Support bool alias type by @yogasw in https://github.com/ClickHouse/clickhouse-go/pull/1156 +### Fixes 🐛 +* Update README - mention HTTP protocol usable only with `database/sql` interface by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1160 +* Fix README example for Debugf by @aramperes in https://github.com/ClickHouse/clickhouse-go/pull/1153 + +## New Contributors +* @yogasw made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1156 +* @aramperes made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1153 + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.16.0...v2.17.0 + +# v2.16.0, 2023-12-01 + +## What's Changed +### Enhancements 🎉 +* Add sql.Valuer support for all types by @deankarn in https://github.com/ClickHouse/clickhouse-go/pull/1144 +### Fixes 🐛 +* Fix DateTime64 range to actual supported range per ClickHouse documentation by @phil-schreiber in https://github.com/ClickHouse/clickhouse-go/pull/1148 + +## New Contributors +* @phil-schreiber made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1148 +* @deankarn made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1144 + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.15.0...v2.16.0 + +# v2.14.3, 2023-10-12 + +## What's Changed +### Fixes 🐛 +* Fix insertion of empty map into JSON column by using _dummy subcolumn by @leodido in https://github.com/ClickHouse/clickhouse-go/pull/1116 +### Other Changes 🛠 +* chore: specify method field on compression in example by @rdaniels6813 in https://github.com/ClickHouse/clickhouse-go/pull/1111 +* chore: remove extra error checks by @rutaka-n in https://github.com/ClickHouse/clickhouse-go/pull/1095 + +## New Contributors +* @leodido made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1116 +* @rdaniels6813 made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1111 +* @rutaka-n made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1095 + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.14.2...v2.14.3 + +# v2.14.2, 2023-10-04 + +## What's Changed +### Fixes 🐛 +* Fix: Block stream read process would be terminated by empty block with zero rows by @crisismaple in https://github.com/ClickHouse/clickhouse-go/pull/1104 +* Free compressor's buffer when FreeBufOnConnRelease enabled by @cergxx in https://github.com/ClickHouse/clickhouse-go/pull/1100 +* Fix truncate ` for HTTP adapter by @beck917 in https://github.com/ClickHouse/clickhouse-go/pull/1103 +### Other Changes 🛠 +* docs: update readme.md by @rfyiamcool in https://github.com/ClickHouse/clickhouse-go/pull/1068 +* Remove dependency on github.com/satori/go.uuid by @srikanthccv in https://github.com/ClickHouse/clickhouse-go/pull/1085 + +## New Contributors +* @rfyiamcool made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1068 +* @beck917 made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1103 +* @srikanthccv made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1085 + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.14.1...v2.14.2 + +# v2.14.1, 2023-09-14 + +## What's Changed +### Enhancements 🎉 +* parseDSN: support connection pool settings (#1082) by @hanjm in https://github.com/ClickHouse/clickhouse-go/pull/1084 + +## New Contributors +* @hanjm made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1084 + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.14.0...v2.14.1 + +# v2.14.0, 2023-09-12 + +## What's Changed +### Enhancements 🎉 +* Add FreeBufOnConnRelease to clickhouse.Options by @cergxx in https://github.com/ClickHouse/clickhouse-go/pull/1091 +* Improving object allocation for (positional) parameter binding by @mdonkers in https://github.com/ClickHouse/clickhouse-go/pull/1092 +### Fixes 🐛 +* Fix escaping double quote in SQL statement in prepare batch by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1083 +### Other Changes 🛠 +* Update Go & ClickHouse versions by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1079 +* Return status code from any http error by @RoryCrispin in https://github.com/ClickHouse/clickhouse-go/pull/1090 +* tests: fix dropped error by @alrs in https://github.com/ClickHouse/clickhouse-go/pull/1081 +* chore: unnecessary use of fmt.Sprintf by @testwill in https://github.com/ClickHouse/clickhouse-go/pull/1080 +* Run CI on self hosted runner by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1094 + +## New Contributors +* @cergxx made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1091 +* @alrs made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1081 +* @testwill made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1080 + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.13.4...v2.14 + +# v2.13.4, 2023-08-30 + +## What's Changed +### Fixes 🐛 +* fix(proto): add TCP protocol version in query packet by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1077 + + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.13.3...v2.13.4 + +# v2.13.3, 2023-08-23 + +## What's Changed +### Fixes 🐛 +* fix(column.json): fix bool type handling by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1073 + + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.13.2...v2.13.3 + +# v2.13.2, 2023-08-18 + +## What's Changed +### Fixes 🐛 +* fix: update ch-go to remove string length limit by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1071 +### Other Changes 🛠 +* Test against latest and head CH by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1060 + + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.13.1...v2.13.2 + +# v2.13.1, 2023-08-17 + +## What's Changed +### Fixes 🐛 +* fix: native format Date32 representation by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1069 + + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.13.0...v2.13.1 + +# v2.13.0, 2023-08-10 + +## What's Changed +### Enhancements 🎉 +* Support scan from uint8 to bool by @ValManP in https://github.com/ClickHouse/clickhouse-go/pull/1051 +* Binding arguments for AsyncInsert interface by @mdonkers in https://github.com/ClickHouse/clickhouse-go/pull/1052 +* Batch rows count API by @EpicStep in https://github.com/ClickHouse/clickhouse-go/pull/1063 +* Implement release connection in batch by @EpicStep in https://github.com/ClickHouse/clickhouse-go/pull/1062 +### Other Changes 🛠 +* Restore test against CH 23.7 by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1059 + +## New Contributors +* @ValManP made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1051 + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.12.1...v2.13.0 + +# v2.12.1, 2023-08-02 + +## What's Changed +### Fixes 🐛 +* Fix InsertAsync typo in docs by @et in https://github.com/ClickHouse/clickhouse-go/pull/1044 +* Fix panic and releasing in batch column by @EpicStep in https://github.com/ClickHouse/clickhouse-go/pull/1055 +* Docs/changelog fixes by @jmaicher in https://github.com/ClickHouse/clickhouse-go/pull/1046 +* Clarify error message re custom serializaion support by @RoryCrispin in https://github.com/ClickHouse/clickhouse-go/pull/1056 +* Fix send query on batch retry by @EpicStep in https://github.com/ClickHouse/clickhouse-go/pull/1045 +### Other Changes 🛠 +* Update ClickHouse versions by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1054 + +## New Contributors +* @et made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1044 +* @EpicStep made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1055 +* @jmaicher made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1046 +* @RoryCrispin made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1056 + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.12.0...v2.12.1 + +# v2.12.0, 2023-07-27 + +## What's Changed +### Enhancements 🎉 +* Implement elapsed time in query progress by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1039 +### Fixes 🐛 +* Release connection slot on connection acquire timeout by @sentanos in https://github.com/ClickHouse/clickhouse-go/pull/1042 + +## New Contributors +* @sentanos made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1042 + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.11.0...v2.12.0 + +# v2.11.0, 2023-07-20 + +## What's Changed +### Enhancements 🎉 +* Retry for batch API by @djosephsen in https://github.com/ClickHouse/clickhouse-go/pull/941 +### Fixes 🐛 +* Fix startAutoCloseIdleConnections cause goroutine leak by @YenchangChan in https://github.com/ClickHouse/clickhouse-go/pull/1011 +* Fix netip.Addr pointer panic by @anjmao in https://github.com/ClickHouse/clickhouse-go/pull/1029 +### Other Changes 🛠 +* Git actions terraform by @gingerwizard in https://github.com/ClickHouse/clickhouse-go/pull/1023 + +## New Contributors +* @YenchangChan made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1011 +* @djosephsen made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/941 +* @anjmao made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1029 + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.10.1...v2.11.0 + +# v2.10.1, 2023-06-06 + +## What's Changed +### Other Changes 🛠 +* Update outdated README.md by @kokizzu in https://github.com/ClickHouse/clickhouse-go/pull/1006 +* Remove incorrect usage of KeepAlive in DialContext by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1009 + +## New Contributors +* @kokizzu made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1006 + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.10.0...v2.10.1 + +# v2.10.0, 2023-05-17 + +## What's Changed +### Enhancements 🎉 +* Support [16]byte/[]byte typed scan/append for IPv6 column by @crisismaple in https://github.com/ClickHouse/clickhouse-go/pull/996 +* Add custom dialer option to http protocol by @stephaniehingtgen in https://github.com/ClickHouse/clickhouse-go/pull/998 +### Fixes 🐛 +* Tuple scan respects both value and pointer variable by @crisismaple in https://github.com/ClickHouse/clickhouse-go/pull/971 +* Auto close idle connections in native protocol in respect of ConnMaxLifetime option by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/999 + +## New Contributors +* @stephaniehingtgen made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/998 + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.9.3...v2.10.0 + +# v2.9.2, 2023-05-08 + +## What's Changed +### Fixes 🐛 +* Pass http.ProxyFromEnvironment configuration to http.Transport by @slvrtrn in https://github.com/ClickHouse/clickhouse-go/pull/987 +### Other Changes 🛠 +* Use `any` instead of `interface{}` by @candiduslynx in https://github.com/ClickHouse/clickhouse-go/pull/984 + +## New Contributors +* @candiduslynx made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/984 +* @slvrtrn made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/987 + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.9.1...v2.9.2 + +# v2.9.1, 2023-04-24 + +## What's Changed +### Enhancements 🎉 +* Do not return hard error on unparsable version in HTTP proto by @hexchain in https://github.com/ClickHouse/clickhouse-go/pull/975 +### Fixes 🐛 +* Return ErrBadConn in stdDriver Prepare if connection is broken by @czubocha in https://github.com/ClickHouse/clickhouse-go/pull/977 + +## New Contributors +* @czubocha made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/977 +* @hexchain made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/975 + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.9.0...v2.9.1 + +# v2.9.0, 2023-04-13 + +## What's Changed +### Enhancements 🎉 +* External tables support for HTTP protocol by @crisismaple in https://github.com/ClickHouse/clickhouse-go/pull/942 +* Support driver.Valuer in String and FixedString columns by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/946 +* Support boolean and pointer type parameter binding by @crisismaple in https://github.com/ClickHouse/clickhouse-go/pull/963 +* Support insert/scan IPv4 using UInt32/*UInt32 types by @crisismaple in https://github.com/ClickHouse/clickhouse-go/pull/966 +### Fixes 🐛 +* Reset the pointer to the nullable field by @xiaochaoren1 in https://github.com/ClickHouse/clickhouse-go/pull/964 +* Enable to use ternary operator with named arguments by @crisismaple in https://github.com/ClickHouse/clickhouse-go/pull/965 +### Other Changes 🛠 +* chore: explain async insert in docs by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/969 + +## New Contributors +* @xiaochaoren1 made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/964 + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.8.3...v2.9.0 + +## 2.8.3, 2023-04-03 + +### Bug fixes + +- Revert: Expire idle connections no longer acquired during lifetime [#958](https://github.com/ClickHouse/clickhouse-go/pull/958) by @jkaflik + +## 2.8.2, 2023-03-31 + +### Bug fixes + +- Expire idle connections no longer acquired during lifetime [#945](https://github.com/ClickHouse/clickhouse-go/pull/945) by @jkaflik + +## 2.8.1, 2023-03-29 + +### Bug fixes + +- Fix idle connection check for TLS connections [#951](https://github.com/ClickHouse/clickhouse-go/pull/951) by @jkaflik & @alekar + +## 2.8.0, 2023-03-27 + +### New features + +- Support customized "url path" in http connection [#938](https://github.com/ClickHouse/clickhouse-go/pull/938) by @crisismaple +- Allow Auth.Database option to be empty [#926](https://github.com/ClickHouse/clickhouse-go/pull/938) by @v4run + +### Chores + +- Bump github.com/stretchr/testify from 1.8.1 to 1.8.2 [#933](https://github.com/ClickHouse/clickhouse-go/pull/933) +- fix: small typo in the text of an error [#936](https://github.com/ClickHouse/clickhouse-go/pull/936) by @lspgn +- Improved bug template [#916](https://github.com/ClickHouse/clickhouse-go/pull/916) by @mshustov + +## 2.7.0, 2023-03-08 + +### New features + +- Date type with user location [#923](https://github.com/ClickHouse/clickhouse-go/pull/923) by @jkaflik +- Add AppendRow function to BatchColumn [#927](https://github.com/ClickHouse/clickhouse-go/pull/927) by @pikot + +### Bug fixes + +- fix: fix connect.compression's format verb [#924](https://github.com/ClickHouse/clickhouse-go/pull/924) by @mind1949 +- Add extra padding for strings shorter than FixedColumn length [#910](https://github.com/ClickHouse/clickhouse-go/pull/910) by @jkaflik + +### Chore + +- Bump github.com/andybalholm/brotli from 1.0.4 to 1.0.5 [#911](https://github.com/ClickHouse/clickhouse-go/pull/911) +- Bump github.com/paulmach/orb from 0.8.0 to 0.9.0 [#912](https://github.com/ClickHouse/clickhouse-go/pull/912) +- Bump golang.org/x/net from 0.0.0-20220722155237-a158d28d115b to 0.7.0 [#928](https://github.com/ClickHouse/clickhouse-go/pull/928) + +## 2.6.5, 2023-02-28 + +### Bug fixes + +- Fix array parameter formatting in binding mechanism [#921](https://github.com/ClickHouse/clickhouse-go/pull/921) by @genzgd + +## 2.6.4, 2023-02-23 + +### Bug fixes + +- Fixed concurrency issue in stdConnOpener [#918](https://github.com/ClickHouse/clickhouse-go/pull/918) by @jkaflik + +## 2.6.3, 2023-02-22 + +### Bug fixes + +- Fixed `lib/binary/string_safe.go` for non 64bit arch [#914](https://github.com/ClickHouse/clickhouse-go/pull/914) by @atoulme + +## 2.6.2, 2023-02-20 + +### Bug fixes + +- Fix decimal encoding with non-standard exponential representation [#909](https://github.com/ClickHouse/clickhouse-go/pull/909) by @vogrelord +- Add extra padding for strings shorter than FixedColumn length [#910](https://github.com/ClickHouse/clickhouse-go/pull/910) by @jkaflik + +### Chore + +- Remove Yandex ClickHouse image from Makefile [#895](https://github.com/ClickHouse/clickhouse-go/pull/895) by @alexey-milovidov +- Remove duplicate of error handling [#898](https://github.com/ClickHouse/clickhouse-go/pull/898) by @Astemirdum +- Bump github.com/ClickHouse/ch-go from 0.51.2 to 0.52.1 [#901](https://github.com/ClickHouse/clickhouse-go/pull/901) + +## 2.6.1, 2023-02-13 + +### Bug fixes + +- Do not reuse expired connections (`ConnMaxLifetime`) [#892](https://github.com/ClickHouse/clickhouse-go/pull/892) by @iamluc +- Extend default dial timeout value to 30s [#893](https://github.com/ClickHouse/clickhouse-go/pull/893) by @jkaflik +- Compression name fixed in sendQuery log [#884](https://github.com/ClickHouse/clickhouse-go/pull/884) by @fredngr + +## 2.6.0, 2023-01-27 + +### New features + +- Client info specification implementation [#876](https://github.com/ClickHouse/clickhouse-go/pull/876) by @jkaflik + +### Bug fixes + +- Better handling for broken connection errors in the std interface [#879](https://github.com/ClickHouse/clickhouse-go/pull/879) by @n-oden + +### Chore + +- Document way to provide table or database identifier with query parameters [#875](https://github.com/ClickHouse/clickhouse-go/pull/875) by @jkaflik +- Bump github.com/ClickHouse/ch-go from 0.51.0 to 0.51.2 [#881](https://github.com/ClickHouse/clickhouse-go/pull/881) + +## 2.5.1, 2023-01-10 + +### Bug fixes + +- Flag connection as closed on broken pipe [#871](https://github.com/ClickHouse/clickhouse-go/pull/871) by @n-oden + +## 2.5.0, 2023-01-10 + +### New features + +- Buffered compression column by column for a native protocol. Introduces the `MaxCompressionBuffer` option - max size (bytes) of compression buffer during column-by-column compression (default 10MiB) [#808](https://github.com/ClickHouse/clickhouse-go/pull/808) by @gingerwizard and @jkaflik +- Support custom types that implement `sql.Scanner` interface (e.g. `type customString string`) [#850](https://github.com/ClickHouse/clickhouse-go/pull/850) by @DarkDrim +- Append query options to the context instead of overwriting [#860](https://github.com/ClickHouse/clickhouse-go/pull/860) by @aaron276h +- Query parameters support [#854](https://github.com/ClickHouse/clickhouse-go/pull/854) by @jkaflik +- Expose `DialStrategy` function to the user for custom connection routing. [#855](https://github.com/ClickHouse/clickhouse-go/pull/855) by @jkaflik + +### Bug fixes + +- Close connection on `Cancel`. This is to make sure context timed out/canceled connection is not reused further [#764](https://github.com/ClickHouse/clickhouse-go/pull/764) by @gingerwizard +- Fully parse `secure` and `skip_verify` in DSN query parameters. [#862](https://github.com/ClickHouse/clickhouse-go/pull/862) by @n-oden + +### Chore + +- Added tests covering read-only user queries [#837](https://github.com/ClickHouse/clickhouse-go/pull/837) by @jkaflik +- Agreed on a batch append fail semantics [#853](https://github.com/ClickHouse/clickhouse-go/pull/853) by @jkaflik + +## 2.4.3, 2022-11-30 +### Bug Fixes +* Fix in batch concurrency - batch could panic if used in separate go routines.
+The issue was originally detected due to the use of a batch in a go routine and Abort being called after the connection was released on the batch. This would invalidate the connection which had been subsequently reassigned.
+This issue could occur as soon as the conn is released (this can happen in a number of places e.g. after Send or an Append error), and it potentially returns to the pool for use in another go routine. Subsequent releases could then occur e.g., the user calls Abort mainly but also Send would do it. The result is the connection being closed in the release function while another batch or query potentially used it.
+This release includes a guard to prevent release from being called more than once on a batch. It assumes that batches are not thread-safe - they aren't (only connections are). +## 2.4.2, 2022-11-24 +### Bug Fixes +- Don't panic on `Send()` on batch after invalid `Append`. [#830](https://github.com/ClickHouse/clickhouse-go/pull/830) +- Fix JSON issue with `nil` if column order is inconsisent. [#824](https://github.com/ClickHouse/clickhouse-go/pull/824) + +## 2.4.1, 2022-11-23 +### Bug Fixes +- Patch release to fix "Regression - escape character was not considered when comparing column names". [#828](https://github.com/ClickHouse/clickhouse-go/issues/828) + +## 2.4.0, 2022-11-22 +### New Features +- Support for Nullables in Tuples. [#821](https://github.com/ClickHouse/clickhouse-go/pull/821) [#817](https://github.com/ClickHouse/clickhouse-go/pull/817) +- Use headers for auth and not url if SSL. [#811](https://github.com/ClickHouse/clickhouse-go/pull/811) +- Support additional headers. [#811](https://github.com/ClickHouse/clickhouse-go/pull/811) +- Support int64 for DateTime. [#807](https://github.com/ClickHouse/clickhouse-go/pull/807) +- Support inserting Enums as int8/int16/int. [#802](https://github.com/ClickHouse/clickhouse-go/pull/802) +- Print error if unsupported server. [#792](https://github.com/ClickHouse/clickhouse-go/pull/792) +- Allow block buffer size to tuned for performance - see `BlockBufferSize`. [#776](https://github.com/ClickHouse/clickhouse-go/pull/776) +- Support custom datetime in Scan. [#767](https://github.com/ClickHouse/clickhouse-go/pull/767) +- Support insertion of an orderedmap. [#763](https://github.com/ClickHouse/clickhouse-go/pull/763) + +### Bug Fixes +- Decompress errors over HTTP. [#792](https://github.com/ClickHouse/clickhouse-go/pull/792) +- Use `timezone` vs `timeZone` so we work on older versions. [#781](https://github.com/ClickHouse/clickhouse-go/pull/781) +- Ensure only columns specified in INSERT are required in batch. [#790](https://github.com/ClickHouse/clickhouse-go/pull/790) +- Respect order of columns in insert for batch. [#790](https://github.com/ClickHouse/clickhouse-go/pull/790) +- Handle double pointers for Nullable columns when batch inserting. [#774](https://github.com/ClickHouse/clickhouse-go/pull/774) +- Use nil for `LowCardinality(Nullable(X))`. [#768](https://github.com/ClickHouse/clickhouse-go/pull/768) + +### Breaking Changes +- Align timezone handling with spec. [#776](https://github.com/ClickHouse/clickhouse-go/pull/766), specifically: + - If parsing strings for datetime, datetime64 or dates we assume the locale is Local (i.e. the client) if not specified in the string. + - The server (or column tz) is used for datetime and datetime64 rendering. For date/date32, these have no tz info in the server. For now, they will be rendered as UTC - consistent with the clickhouse-client + - Addresses bind when no location is set diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/LICENSE b/vendor/github.com/ClickHouse/clickhouse-go/v2/LICENSE index 7a4a3ea..65c5df8 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/LICENSE +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/LICENSE @@ -1,3 +1,4 @@ +Copyright 2016-2023 ClickHouse, Inc. Apache License Version 2.0, January 2004 @@ -187,7 +188,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright [yyyy] [name of copyright owner] + Copyright 2016-2023 ClickHouse, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -199,4 +200,4 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and - limitations under the License. \ No newline at end of file + limitations under the License. diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/Makefile b/vendor/github.com/ClickHouse/clickhouse-go/v2/Makefile index e925c29..acd3a74 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/Makefile +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/Makefile @@ -1,19 +1,21 @@ +CLICKHOUSE_VERSION ?= latest +CLICKHOUSE_TEST_TIMEOUT ?= 120s +CLICKHOUSE_QUORUM_INSERT ?= 1 + up: @docker compose up -d down: @docker compose down cli: - docker run -it --rm --net clickhouse-go_clickhouse --link clickhouse:clickhouse-server yandex/clickhouse-client --host clickhouse-server + docker run -it --rm --net clickhouse-go_clickhouse --link clickhouse:clickhouse-server --host clickhouse-server test: @go install -race -v - @go test -race -timeout 30s -count=1 -v . - @go test -race -timeout 30s -count=1 -v ./tests/... + @CLICKHOUSE_VERSION=$(CLICKHOUSE_VERSION) CLICKHOUSE_QUORUM_INSERT=$(CLICKHOUSE_QUORUM_INSERT) go test -race -timeout $(CLICKHOUSE_TEST_TIMEOUT) -count=1 -v ./... lint: golangci-lint run || : - gocritic check -disable=singleCaseSwitch ./... || : contributors: @git log --pretty="%an <%ae>%n%cn <%ce>" | sort -u -t '<' -k 2,2 | LC_ALL=C sort | \ @@ -24,7 +26,7 @@ staticcheck: staticcheck ./... codegen: contributors - @cd lib/column && go run codegen/main.go + @go run lib/column/codegen/main.go @go-licenser -licensor "ClickHouse, Inc." -.PHONY: contributors \ No newline at end of file +.PHONY: contributors diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/README.md b/vendor/github.com/ClickHouse/clickhouse-go/v2/README.md index 1cbcd10..0cef001 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/README.md +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/README.md @@ -1,38 +1,53 @@ # ClickHouse [![run-tests](https://github.com/ClickHouse/clickhouse-go/actions/workflows/run-tests.yml/badge.svg?branch=v2)](https://github.com/ClickHouse/clickhouse-go/actions/workflows/run-tests.yml) [![Go Reference](https://pkg.go.dev/badge/github.com/ClickHouse/clickhouse-go/v2.svg)](https://pkg.go.dev/github.com/ClickHouse/clickhouse-go/v2) -Golang SQL database driver for [ClickHouse](https://clickhouse.com/). +Golang SQL database client for [ClickHouse](https://clickhouse.com/). ## Versions -There are two version of this driver, v1 and v2, available as separate branches. +There are two version of this client, v1 and v2, available as separate branches. **v1 is now in a state of a maintenance - we will only accept PRs for bug and security fixes.** Users should use v2 which is production ready and [significantly faster than v1](#benchmark). +v2 has breaking changes for users migrating from v1. These were not properly tracked prior to this client being officially supported. We endeavour to track known differences [here](https://github.com/ClickHouse/clickhouse-go/blob/main/v1_v2_CHANGES.md) and resolve where possible. + ## Supported ClickHouse Versions -The driver is tested against the currently [supported versions](https://github.com/ClickHouse/ClickHouse/blob/master/SECURITY.md) of ClickHouse +The client is tested against the currently [supported versions](https://github.com/ClickHouse/ClickHouse/blob/master/SECURITY.md) of ClickHouse + +## Supported Golang Versions + +| Client Version | Golang Versions | +|----------------|-----------------| +| => 2.0 <= 2.2 | 1.17, 1.18 | +| >= 2.3 | 1.18.4+, 1.19 | +| >= 2.14 | 1.20, 1.21 | ## Key features -* Uses native ClickHouse TCP client-server protocol +* Uses ClickHouse native format for optimal performance. Utilises low level [ch-go](https://github.com/ClickHouse/ch-go) client for encoding/decoding and compression (versions >= 2.3.0). +* Supports native ClickHouse TCP client-server protocol * Compatibility with [`database/sql`](#std-databasesql-interface) ([slower](#benchmark) than [native interface](#native-interface)!) -* Marshal rows into structs ([ScanStruct](tests/scan_struct_test.go), [Select](examples/native/scan_struct/main.go)) +* [`database/sql`](#std-databasesql-interface) supports http protocol for transport. (Experimental) +* Marshal rows into structs ([ScanStruct](examples/clickhouse_api/scan_struct.go), [Select](examples/clickhouse_api/select_struct.go)) * Unmarshal struct to row ([AppendStruct](benchmark/v2/write-native-struct/main.go)) * Connection pool * Failover and load balancing -* [Bulk write support](examples/native/batch/main.go) (for `database/sql` [use](examples/std/batch/main.go) `begin->prepare->(in loop exec)->commit`) -* [AsyncInsert](benchmark/v2/write-async/main.go) +* [Bulk write support](examples/clickhouse_api/batch.go) (for `database/sql` [use](examples/std/batch.go) `begin->prepare->(in loop exec)->commit`) +* [PrepareBatch options](#preparebatch-options) +* [AsyncInsert](benchmark/v2/write-async/main.go) (more details in [Async insert](#async-insert) section) * Named and numeric placeholders support -* LZ4 compression support +* LZ4/ZSTD compression support * External data +* [Query parameters](examples/std/query_parameters.go) Support for the ClickHouse protocol advanced features using `Context`: * Query ID * Quota Key * Settings +* [Query parameters](examples/clickhouse_api/query_parameters.go) * OpenTelemetry * Execution events: * Logs @@ -40,6 +55,57 @@ Support for the ClickHouse protocol advanced features using `Context`: * Profile info * Profile events +## Documentation + +[https://clickhouse.com/docs/en/integrations/go](https://clickhouse.com/docs/en/integrations/go) + +# `clickhouse` interface (formally `native` interface) + +```go + conn, err := clickhouse.Open(&clickhouse.Options{ + Addr: []string{"127.0.0.1:9000"}, + Auth: clickhouse.Auth{ + Database: "default", + Username: "default", + Password: "", + }, + DialContext: func(ctx context.Context, addr string) (net.Conn, error) { + dialCount++ + var d net.Dialer + return d.DialContext(ctx, "tcp", addr) + }, + Debug: true, + Debugf: func(format string, v ...any) { + fmt.Printf(format+"\n", v...) + }, + Settings: clickhouse.Settings{ + "max_execution_time": 60, + }, + Compression: &clickhouse.Compression{ + Method: clickhouse.CompressionLZ4, + }, + DialTimeout: time.Second * 30, + MaxOpenConns: 5, + MaxIdleConns: 5, + ConnMaxLifetime: time.Duration(10) * time.Minute, + ConnOpenStrategy: clickhouse.ConnOpenInOrder, + BlockBufferSize: 10, + MaxCompressionBuffer: 10240, + ClientInfo: clickhouse.ClientInfo{ // optional, please see Client info section in the README.md + Products: []struct { + Name string + Version string + }{ + {Name: "my-app", Version: "0.1"}, + }, + }, + }) + if err != nil { + return err + } + return conn.Ping(context.Background()) +``` + # `database/sql` interface ## OpenDB @@ -58,27 +124,48 @@ conn := clickhouse.OpenDB(&clickhouse.Options{ Settings: clickhouse.Settings{ "max_execution_time": 60, }, - DialTimeout: 5 * time.Second, + DialTimeout: time.Second * 30, Compression: &clickhouse.Compression{ - clickhouse.CompressionLZ4, + Method: clickhouse.CompressionLZ4, }, Debug: true, + BlockBufferSize: 10, + MaxCompressionBuffer: 10240, + ClientInfo: clickhouse.ClientInfo{ // optional, please see Client info section in the README.md + Products: []struct { + Name string + Version string + }{ + {Name: "my-app", Version: "0.1"}, + }, + }, }) conn.SetMaxIdleConns(5) conn.SetMaxOpenConns(10) conn.SetConnMaxLifetime(time.Hour) ``` + ## DSN * hosts - comma-separated list of single address hosts for load-balancing and failover * username/password - auth credentials * database - select the current default database -* dial_timeout - a duration string is a possibly signed sequence of decimal numbers, each with optional fraction and a unit suffix such as "300ms", "1s". Valid time units are "ms", "s", "m". -* connection_open_strategy - random/in_order (default random). - * round-robin - choose a round-robin server from the set +* dial_timeout - a duration string is a possibly signed sequence of decimal numbers, each with optional fraction and a unit suffix such as "300ms", "1s". Valid time units are "ms", "s", "m". (default 30s) +* connection_open_strategy - random/round_robin/in_order (default in_order). + * random - choose random server from the set + * round_robin - choose a round-robin server from the set * in_order - first live server is chosen in specified order * debug - enable debug output (boolean value) -* compress - enable lz4 compression (boolean value) +* compress - compress - specify the compression algorithm - “none” (default), `zstd`, `lz4`, `gzip`, `deflate`, `br`. If set to `true`, `lz4` will be used. +* compress_level - Level of compression (default is 0). This is algorithm specific: + - `gzip` - `-2` (Best Speed) to `9` (Best Compression) + - `deflate` - `-2` (Best Speed) to `9` (Best Compression) + - `br` - `0` (Best Speed) to `11` (Best Compression) + - `zstd`, `lz4` - ignored +* block_buffer_size - size of block buffer (default 2) +* read_timeout - a duration string is a possibly signed sequence of decimal numbers, each with optional fraction and a unit suffix such as "300ms", "1s". Valid time units are "ms", "s", "m" (default 5m). +* max_compression_buffer - max size (bytes) of compression buffer during column by column compression (default 10MiB) +* client_info_product - optional list (comma separated) of product name and version pair separated with `/`. This value will be pass a part of client info. e.g. `client_info_product=my_app/1.0,my_module/0.1` More details in [Client info](#client-info) section. SSL/TLS parameters: @@ -91,47 +178,50 @@ Example: clickhouse://username:password@host1:9000,host2:9000/database?dial_timeout=200ms&max_execution_time=60 ``` -## Benchmark - -| [V1 (READ)](benchmark/v1/read/main.go) | [V2 (READ) std](benchmark/v2/read/main.go) | [V2 (READ) native](benchmark/v2/read-native/main.go) | -| -------------------------------------- | ------------------------------------------ | ---------------------------------------------------- | -| 1.218s | 924.390ms | 675.721ms | - - -| [V1 (WRITE)](benchmark/v1/write/main.go) | [V2 (WRITE) std](benchmark/v2/write/main.go) | [V2 (WRITE) native](benchmark/v2/write-native/main.go) | [V2 (WRITE) by column](benchmark/v2/write-native-columnar/main.go) | -| ---------------------------------------- | -------------------------------------------- | ------------------------------------------------------ | ------------------------------------------------------------------ | -| 1.899s | 1.177s | 699.203ms | 661.973ms | +### HTTP Support (Experimental) +The native format can be used over the HTTP protocol. This is useful in scenarios where users need to proxy traffic e.g. using [ChProxy](https://www.chproxy.org/) or via load balancers. - -## Install +This can be achieved by modifying the DSN to specify the HTTP protocol. ```sh -go get -u github.com/ClickHouse/clickhouse-go/v2 +http://host1:8123,host2:8123/database?dial_timeout=200ms&max_execution_time=60 ``` -## Examples +Alternatively, use `OpenDB` and specify the interface type. -### native interface +```go +conn := clickhouse.OpenDB(&clickhouse.Options{ + Addr: []string{"127.0.0.1:8123"}, + Auth: clickhouse.Auth{ + Database: "default", + Username: "default", + Password: "", + }, + Settings: clickhouse.Settings{ + "max_execution_time": 60, + }, + DialTimeout: 30 * time.Second, + Compression: &clickhouse.Compression{ + Method: clickhouse.CompressionLZ4, + }, + Protocol: clickhouse.HTTP, +}) +``` -* [batch](examples/native/batch/main.go) -* [async insert](examples/native/write-async) -* [batch struct](examples/native/write-struct/main.go) -* [columnar](examples/native/write-columnar/main.go) -* [scan struct](examples/native/scan_struct/main.go) -* [bind params](examples/native/bind/main.go) +**Note**: using HTTP protocol is possible only with `database/sql` interface. -### std `database/sql` interface +## Compression + +ZSTD/LZ4 compression is supported over native and http protocols. This is performed column by column at a block level and is only used for inserts. Compression buffer size is set as `MaxCompressionBuffer` option. -* [batch](examples/std/batch/main.go) -* [async insert](examples/std/write-async) -* [open db](examples/std/open_db/main.go) -* [bind params](examples/std/bind/main.go) +If using `Open` via the std interface and specifying a DSN, compression can be enabled via the `compress` flag. Currently, this is a boolean flag which enables `LZ4` compression. +Other compression methods will be added in future PRs. -#### A Note on TLS/SSL +## TLS/SSL -At a low level all driver connect methods (DSN/OpenDB/Open) will use the [Go tls package](https://pkg.go.dev/crypto/tls) to establish a secure connection. The driver knows to use TLS if the Options struct contains a non-nil tls.Config pointer. +At a low level all client connect methods (DSN/OpenDB/Open) will use the [Go tls package](https://pkg.go.dev/crypto/tls) to establish a secure connection. The client knows to use TLS if the Options struct contains a non-nil tls.Config pointer. Setting secure in the DSN creates a minimal tls.Config struct with only the InsecureSkipVerify field set (either true or false). It is equivalent to this code: @@ -148,14 +238,114 @@ This minimal tls.Config is normally all that is necessary to connect to the secu If additional TLS parameters are necessary the application code should set the desired fields in the tls.Config struct. That can include specific cipher suites, forcing a particular TLS version (like 1.2 or 1.3), adding an internal CA certificate chain, adding a client certificate (and private key) if required by the ClickHouse server, and most of the other options that come with a more specialized security setup. +### HTTPS (Experimental) + +To connect using HTTPS either: + +- Use `https` in your dsn string e.g. + + ```sh + https://host1:8443,host2:8443/database?dial_timeout=200ms&max_execution_time=60 + ``` + +- Specify the interface type as `HttpsInterface` e.g. + +```go +conn := clickhouse.OpenDB(&clickhouse.Options{ + Addr: []string{"127.0.0.1:8443"}, + Auth: clickhouse.Auth{ + Database: "default", + Username: "default", + Password: "", + }, + Protocol: clickhouse.HTTP, +}) +``` + +## Client info + + +Clickhouse-go implements [client info](https://docs.google.com/document/d/1924Dvy79KXIhfqKpi1EBVY3133pIdoMwgCQtZ-uhEKs/edit#heading=h.ah33hoz5xei2) as a part of language client specification. `client_name` for native protocol and HTTP `User-Agent` header values are provided with the exact client info string. + +Users can extend client options with additional product information included in client info. This might be useful for analysis [on a server side](https://clickhouse.com/docs/en/operations/system-tables/query_log/). + +Order is the highest abstraction to the lowest level implementation left to right. + +Usage examples for [native API](examples/clickhouse_api/client_info.go) and [database/sql](examples/std/client_info.go) are provided. + +## Async insert + +[Asynchronous insert](https://clickhouse.com/docs/en/optimize/asynchronous-inserts#enabling-asynchronous-inserts) is supported via dedicated `AsyncInsert` method. This allows to insert data with a non-blocking call. +Effectively, it controls a `async_insert` setting for the query. + +### Using with batch API + +Using native protocol, asynchronous insert does not support batching. It means, only inline query data is supported. Please see an example [here](examples/std/async.go). + +HTTP protocol supports batching. It can be enabled by setting `async_insert` when using standard `Prepare` method. + +For more details please see [asynchronous inserts](https://clickhouse.com/docs/en/optimize/asynchronous-inserts#enabling-asynchronous-inserts) documentation. + +## PrepareBatch options + +Available options: +- [WithReleaseConnection](examples/clickhouse_api/batch_release_connection.go) - after PrepareBatch connection will be returned to the pool. It can help you make a long-lived batch. + +## Benchmark + +| [V1 (READ)](benchmark/v1/read/main.go) | [V2 (READ) std](benchmark/v2/read/main.go) | [V2 (READ) clickhouse API](benchmark/v2/read-native/main.go) | +| -------------------------------------- | ------------------------------------------ |--------------------------------------------------------------| +| 1.218s | 924.390ms | 675.721ms | + + +| [V1 (WRITE)](benchmark/v1/write/main.go) | [V2 (WRITE) std](benchmark/v2/write/main.go) | [V2 (WRITE) clickhouse API](benchmark/v2/write-native/main.go) | [V2 (WRITE) by column](benchmark/v2/write-native-columnar/main.go) | +| ---------------------------------------- | -------------------------------------------- | ------------------------------------------------------ | ------------------------------------------------------------------ | +| 1.899s | 1.177s | 699.203ms | 661.973ms | + + + +## Install + +```sh +go get -u github.com/ClickHouse/clickhouse-go/v2 +``` + +## Examples + +### native interface + +* [batch](examples/clickhouse_api/batch.go) +* [batch with release connection](examples/clickhouse_api/batch_release_connection.go) +* [async insert](examples/clickhouse_api/async.go) +* [batch struct](examples/clickhouse_api/append_struct.go) +* [columnar](examples/clickhouse_api/columnar_insert.go) +* [scan struct](examples/clickhouse_api/scan_struct.go) +* [query parameters](examples/clickhouse_api/query_parameters.go) (deprecated in favour of native query parameters) +* [bind params](examples/clickhouse_api/bind.go) (deprecated in favour of native query parameters) +* [client info](examples/clickhouse_api/client_info.go) + +### std `database/sql` interface + +* [batch](examples/std/batch.go) +* [async insert](examples/std/async.go) +* [open db](examples/std/connect.go) +* [query parameters](examples/std/query_parameters.go) +* [bind params](examples/std/bind.go) (deprecated in favour of native query parameters) +* [client info](examples/std/client_info.go) + +## ClickHouse alternatives - ch-go + +Versions of this client >=2.3.x utilise [ch-go](https://github.com/ClickHouse/ch-go) for their low level encoding/decoding. This low lever client provides a high performance columnar interface and should be used in performance critical use cases. This client provides more familar row orientated and `database/sql` semantics at the cost of some performance. + +Both clients are supported by ClickHouse. + ## Third-party alternatives -* Database drivers: +* Database client/clients: * [mailru/go-clickhouse](https://github.com/mailru/go-clickhouse) (uses the HTTP protocol) * [uptrace/go-clickhouse](https://github.com/uptrace/go-clickhouse) (uses the native TCP protocol with `database/sql`-like API) * Drivers with columnar interface: * [vahid-sohrabloo/chconn](https://github.com/vahid-sohrabloo/chconn) - * [go-faster/ch](https://github.com/go-faster/ch) * Insert collectors: * [KittenHouse](https://github.com/YuriyNasretdinov/kittenhouse) diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/TYPES.md b/vendor/github.com/ClickHouse/clickhouse-go/v2/TYPES.md new file mode 100644 index 0000000..6712539 --- /dev/null +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/TYPES.md @@ -0,0 +1,82 @@ +The following table aims to capture the Golang types supported for each ClickHouse Column Type. + +Whilst each ClickHouse type often has a logical Golang type, we aim to support implicit conversions where possible and provided no precision loss will be incurred - thus alleviating the need for users to ensure their data aligns perfectly with ClickHouse types. + +This effort is ongoing and can be seperated in to insertion (`Append`/`AppendRow`) and read time (via a `Scan`). Should you need support for a specific conversion, please raise an issue. + +## Append Support + +All types can be inserted as a value or pointer. + +| | **ClickHouse Type** | String | Decimal | Bool | FixedString | UInt8 | UInt16 | UInt32 | UInt64 | UInt128 | UInt256 | Int8 | Int16 | Int32 | Int64 | Int128 | Int256 | Float32 | Float64 | UUID | Date | Date32 | DateTime | DateTime64 | Enum8 | Enum16 | Point | Ring | Polygon | MultiPolygon | +|---------------|---------------------|--------|---------|------|-------------|-------|--------|--------|--------|---------|---------|------|-------|-------|-------|--------|--------|---------|---------|------|------|--------|----------|------------|-------|--------|-------|------|---------|--------------| +| **Golang Type** | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +| uint | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +| unit64 | | | | | | | | | X | | | | | | | | | | | | | | | | | | | | | | +| uint32 | | | | | | | | X | | | | | | | | | | | | | | | | | | | | | | | +| uint16 | | | | | | | X | | | | | | | | | | | | | | | | | | | | | | | | +| uint8 | | | | | | X | | | | | | | | | | | | | | | | | | | | | | | | | +| int | | | | | | | | | | | | | | | | | | | | | | | | | X | X | | | | | +| int64 | | | | | | | | | | | | | | | X | | | | | | | | X | X | | | | | | | +| int32 | | | | | | | | | | | | | | X | | | | | | | | | | | | | | | | | +| int16 | | | | | | | | | | | | | X | | | | | | | | | | | | | X | | | | | +| int8 | | | | | | | | | | | | X | | | | | | | | | | | | | X | | | | | | +| float32 | | | | | | | | | | | | | | | | | | X | | | | | | | | | | | | | +| float64 | | | | | | | | | | | | | | | | | | | X | | | | | | | | | | | | +| string | | X | | | X | | | | | | | | | | | | | | | X | X | X | X | X | X | X | | | | | +| bool | | | | X | | | | | | | | | | | | | | | | | | | | | | | | | | | +| time.Time | | | | | | | | | | | | | | | | | | | | | X | X | X | X | | | | | | | +| big.Int | | | | | | | | | | X | X | | | | | X | X | | | | | | | | | | | | | | +| decimal.Decimal | | | X | | | | | | | | | | | | | | | | | | | | | | | | | | | | +| uuid.UUID | | | | | | | | | | | | | | | | | | | | X | | | | | | | | | | | +| orb.Point | | | | | | | | | | | | | | | | | | | | | | | | | | | X | | | | +| orb.Polygon | | | | | | | | | | | | | | | | | | | | | | | | | | | | | X | | +| orb.Ring | | | | | | | | | | | | | | | | | | | | | | | | | | | | X | | | +| orb.MultiPolygon | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | X | +| []byte | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | X | + | fmt.Stringer | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +| sql.NullString | | X | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +| sql.NullTime | | | | | | | | | | | | | | | | | | | | | X | X | X | X | | | | | | | +| sql.NullFloat64 | | | | | | | | | | | | | | | | | | | X | | | | | | | | | | | | +| sql.NullInt64 | | | | | | | | | | | | | | | X | | | | | | | | | | | | | | | | +| sql.NullInt32 | | | | | | | | | | | | | | X | | | | | | | | | | | | | | | | | +| sql.NullInt16 | | | | | | | | | | | | | X | | | | | | | | | | | | | | | | | | +| sql.NullBool | | | | X | | | | | | | | | | | | | | | | | | | | | | | | | | | + +## Scan Support + +All types can be read into a pointer or pointer to a pointer. + +| | **ClickHouse Type** | String | Decimal | Bool | FixedString | UInt8 | UInt16 | UInt32 | UInt64 | UInt128 | UInt256 | Int8 | Int16 | Int32 | Int64 | Int128 | Int256 | Float32 | Float64 | UUID | Date | Date32 | DateTime | DateTime64 | Enum8 | Enum16 | Point | Ring | Polygon | MultiPolygon | +|---------------|---------------------|--------|---------|------|-------------|-------|--------|--------|--------|---------|---------|------|-------|-------|-------|--------|--------|---------|---------|------|------|--------|----------|------------|-------|--------|-------|------|---------|--------------| +| **Golang Type** | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +| uint | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +| unit64 | | | | | | | | | X | | | | | | | | | | | | | | | | | | | | | | +| uint32 | | | | | | | | X | | | | | | | | | | | | | | | | | | | | | | | +| uint16 | | | | | | | X | | | | | | | | | | | | | | | | | | | | | | | | +| uint8 | | | | | | X | | | | | | | | | | | | | | | | | | | | | | | | | +| int | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +| int64 | | | | | | | | | | | | | | | X | | | | | | | | | | | | | | | | +| int32 | | | | | | | | | | | | | | X | | | | | | | | | | | | | | | | | +| int16 | | | | | | | | | | | | | X | | | | | | | | | | | | | | | | | | +| int8 | | | | | | | | | | | | X | | | | | | | | | | | | | | | | | | | +| float32 | | | | | | | | | | | | | | | | | | X | | | | | | | | | | | | | +| float64 | | | | | | | | | | | | | | | | | | | X | | | | | | | | | | | | +| string | | X | | | X | | | | | | | | | | | | | | | X | | | | | X | X | | | | | +| bool | | | | X | | | | | | | | | | | | | | | | | | | | | | | | | | | +| time.Time | | | | | | | | | | | | | | | | | | | | | X | X | X | X | | | | | | | +| big.Int | | | | | | | | | | X | X | | | | | X | X | | | | | | | | | | | | | | +| decimal.Decimal | | | X | | | | | | | | | | | | | | | | | | | | | | | | | | | | +| uuid.UUID | | | | | | | | | | | | | | | | | | | | X | | | | | | | | | | | +| orb.Point | | | | | | | | | | | | | | | | | | | | | | | | | | | X | | | | +| orb.Polygon | | | | | | | | | | | | | | | | | | | | | | | | | | | | | X | | +| orb.Ring | | | | | | | | | | | | | | | | | | | | | | | | | | | | X | | | +| orb.MultiPolygon | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | X | +| sql.Scan | | | | | | | | | | | | | | | | | | | | X | X | X | X | X | | | | | | | +| sql.NullString | | X | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +| sql.NullTime | | | | | | | | | | | | | | | | | | | | | X | X | X | X | | | | | | | +| sql.NullFloat64 | | | | | | | | | | | | | | | | | | | X | | | | | | | | | | | | +| sql.NullInt64 | | | | | | | | | | | | | | | X | | | | | | | | | | | | | | | | +| sql.NullInt32 | | | | | | | | | | | | | | X | | | | | | | | | | | | | | | | | +| sql.NullInt16 | | | | | | | | | | | | | X | | | | | | | | | | | | | | | | | | +| sql.NullBool | | | | X | | | | | | | | | | | | | | | | | | | | | | | | | | | diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/batch.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/batch.go new file mode 100644 index 0000000..15d1186 --- /dev/null +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/batch.go @@ -0,0 +1,58 @@ +// Licensed to ClickHouse, Inc. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. ClickHouse, Inc. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package clickhouse + +import ( + "fmt" + "regexp" + "strings" + + "github.com/pkg/errors" +) + +var normalizeInsertQueryMatch = regexp.MustCompile(`(?i)(INSERT\s+INTO\s+([^(]+)(?:\s*\([^()]*(?:\([^()]*\)[^()]*)*\))?)(?:\s*VALUES)?`) +var truncateFormat = regexp.MustCompile(`\sFORMAT\s+[^\s]+`) +var truncateValues = regexp.MustCompile(`\sVALUES\s.*$`) +var extractInsertColumnsMatch = regexp.MustCompile(`INSERT INTO .+\s\((?P.+)\)$`) + +func extractNormalizedInsertQueryAndColumns(query string) (normalizedQuery string, tableName string, columns []string, err error) { + query = truncateFormat.ReplaceAllString(query, "") + query = truncateValues.ReplaceAllString(query, "") + + matches := normalizeInsertQueryMatch.FindStringSubmatch(query) + if len(matches) == 0 { + err = errors.Errorf("invalid INSERT query: %s", query) + return + } + + normalizedQuery = fmt.Sprintf("%s FORMAT Native", matches[1]) + tableName = strings.TrimSpace(matches[2]) + + columns = make([]string, 0) + matches = extractInsertColumnsMatch.FindStringSubmatch(matches[1]) + if len(matches) == 2 { + columns = strings.Split(matches[1], ",") + for i := range columns { + // refers to https://clickhouse.com/docs/en/sql-reference/syntax#identifiers + // we can use identifiers with double quotes or backticks, for example: "id", `id`, but not both, like `"id"`. + columns[i] = strings.Trim(strings.Trim(strings.TrimSpace(columns[i]), "\""), "`") + } + } + + return +} diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/bind.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/bind.go index 07930dc..f637307 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/bind.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/bind.go @@ -25,10 +25,11 @@ import ( "strings" "time" + "github.com/ClickHouse/clickhouse-go/v2/lib/column" "github.com/ClickHouse/clickhouse-go/v2/lib/driver" ) -func Named(name string, value interface{}) driver.NamedValue { +func Named(name string, value any) driver.NamedValue { return driver.NamedValue{ Name: name, Value: value, @@ -45,9 +46,11 @@ const ( ) type GroupSet struct { - Value []interface{} + Value []any } +type ArraySet []any + func DateNamed(name string, value time.Time, scale TimeUnit) driver.NamedDateValue { return driver.NamedDateValue{ Name: name, @@ -56,80 +59,120 @@ func DateNamed(name string, value time.Time, scale TimeUnit) driver.NamedDateVal } } -var bindNumericRe = regexp.MustCompile(`\$[0-9]+`) -var bindPositionalRe = regexp.MustCompile(`[^\\][?]`) +var ( + bindNumericRe = regexp.MustCompile(`\$[0-9]+`) + bindPositionalRe = regexp.MustCompile(`[^\\][?]`) +) -func bind(tz *time.Location, query string, args ...interface{}) (string, error) { +func bind(tz *time.Location, query string, args ...any) (string, error) { if len(args) == 0 { return query, nil } var ( - haveNamed bool haveNumeric bool havePositional bool ) + + allArgumentsNamed, err := checkAllNamedArguments(args...) + if err != nil { + return "", err + } + + if allArgumentsNamed { + return bindNamed(tz, query, args...) + } + haveNumeric = bindNumericRe.MatchString(query) havePositional = bindPositionalRe.MatchString(query) if haveNumeric && havePositional { return "", ErrBindMixedParamsFormats } + if haveNumeric { + return bindNumeric(tz, query, args...) + } + return bindPositional(tz, query, args...) +} + +func checkAllNamedArguments(args ...any) (bool, error) { + var ( + haveNamed bool + haveAnonymous bool + ) for _, v := range args { switch v.(type) { case driver.NamedValue, driver.NamedDateValue: haveNamed = true default: + haveAnonymous = true } - if haveNamed && (haveNumeric || havePositional) { - return "", ErrBindMixedParamsFormats + if haveNamed && haveAnonymous { + return haveNamed, ErrBindMixedParamsFormats } } - if haveNamed { - return bindNamed(tz, query, args...) - } - if haveNumeric { - return bindNumeric(tz, query, args...) - } - return bindPositional(tz, query, args...) + return haveNamed, nil } -var bindPositionCharRe = regexp.MustCompile(`[?]`) - -func bindPositional(tz *time.Location, query string, args ...interface{}) (_ string, err error) { +func bindPositional(tz *time.Location, query string, args ...any) (_ string, err error) { var ( - unbind = make(map[int]struct{}) - params = make([]string, len(args)) + lastMatchIndex = -1 // Position of previous match for copying + argIndex = 0 // Index for the argument at current position + buf = make([]byte, 0, len(query)) + unbindCount = 0 // Number of positional arguments that couldn't be matched ) - for i, v := range args { - if fn, ok := v.(std_driver.Valuer); ok { - if v, err = fn.Value(); err != nil { - return "", nil + + for i := 0; i < len(query); i++ { + // It's fine looping through the query string as bytes, because the (fixed) characters we're looking for + // are in the ASCII range to won't take up more than one byte. + if query[i] == '?' { + if i > 0 && query[i-1] == '\\' { + // Copy all previous index to here characters + buf = append(buf, query[lastMatchIndex+1:i-1]...) + buf = append(buf, '?') + } else { + // Copy all previous index to here characters + buf = append(buf, query[lastMatchIndex+1:i]...) + + // Append the argument value + if argIndex < len(args) { + v := args[argIndex] + if fn, ok := v.(std_driver.Valuer); ok { + if v, err = fn.Value(); err != nil { + return "", nil + } + } + + value, err := format(tz, Seconds, v) + if err != nil { + return "", err + } + + buf = append(buf, value...) + argIndex++ + } else { + unbindCount++ + } } - } - params[i], err = format(tz, Seconds, v) - if err != nil { - return "", err + + lastMatchIndex = i } } - i := 0 - query = bindPositionalRe.ReplaceAllStringFunc(query, func(n string) string { - if i >= len(params) { - unbind[i] = struct{}{} - return "" - } - val := params[i] - i++ - return bindPositionCharRe.ReplaceAllStringFunc(n, func(m string) string { - return val - }) - }) - for param := range unbind { - return "", fmt.Errorf("have no arg for param ? at position %d", param) + + // If there were no replacements, quick return without copying the string + if lastMatchIndex < 0 { + return query, nil } - // replace \? escape sequence - return strings.ReplaceAll(query, "\\?", "?"), nil + + // Append the remainder + buf = append(buf, query[lastMatchIndex+1:]...) + + if unbindCount > 0 { + return "", fmt.Errorf("have no arg for param ? at last %d positions", unbindCount) + } + + return string(buf), nil } -func bindNumeric(tz *time.Location, query string, args ...interface{}) (_ string, err error) { +func bindNumeric(tz *time.Location, query string, args ...any) (_ string, err error) { var ( unbind = make(map[string]struct{}) params = make(map[string]string) @@ -161,7 +204,7 @@ func bindNumeric(tz *time.Location, query string, args ...interface{}) (_ string var bindNamedRe = regexp.MustCompile(`@[a-zA-Z0-9\_]+`) -func bindNamed(tz *time.Location, query string, args ...interface{}) (_ string, err error) { +func bindNamed(tz *time.Location, query string, args ...any) (_ string, err error) { var ( unbind = make(map[string]struct{}) params = make(map[string]string) @@ -203,7 +246,13 @@ func bindNamed(tz *time.Location, query string, args ...interface{}) (_ string, func formatTime(tz *time.Location, scale TimeUnit, value time.Time) (string, error) { switch value.Location().String() { - case "Local": + case "Local", "": + // It's required to pass timestamp as string due to decimal overflow for higher precision, + // but zero-value string "toDateTime('0')" will be not parsed by ClickHouse. + if value.Unix() == 0 { + return "toDateTime(0)", nil + } + switch scale { case Seconds: return fmt.Sprintf("toDateTime('%d')", value.Unix()), nil @@ -221,14 +270,16 @@ func formatTime(tz *time.Location, scale TimeUnit, value time.Time) (string, err return fmt.Sprintf("toDateTime64('%s', %d)", value.Format(fmt.Sprintf("2006-01-02 15:04:05.%0*d", int(scale*3), 0)), int(scale*3)), nil } if scale == Seconds { - return value.Format(fmt.Sprintf("toDateTime('2006-01-02 15:04:05', '%s')", value.Location().String())), nil + return fmt.Sprintf("toDateTime('%s', '%s')", value.Format("2006-01-02 15:04:05"), value.Location().String()), nil } return fmt.Sprintf("toDateTime64('%s', %d, '%s')", value.Format(fmt.Sprintf("2006-01-02 15:04:05.%0*d", int(scale*3), 0)), int(scale*3), value.Location().String()), nil } -func format(tz *time.Location, scale TimeUnit, v interface{}) (string, error) { +var stringQuoteReplacer = strings.NewReplacer(`\`, `\\`, `'`, `\'`) + +func format(tz *time.Location, scale TimeUnit, v any) (string, error) { quote := func(v string) string { - return "'" + strings.NewReplacer(`\`, `\\`, `'`, `\'`).Replace(v) + "'" + return "'" + stringQuoteReplacer.Replace(v) + "'" } switch v := v.(type) { case nil: @@ -237,33 +288,74 @@ func format(tz *time.Location, scale TimeUnit, v interface{}) (string, error) { return quote(v), nil case time.Time: return formatTime(tz, scale, v) + case bool: + if v { + return "1", nil + } + return "0", nil case GroupSet: - elements := make([]string, 0, len(v.Value)) - for _, e := range v.Value { - val, err := format(tz, scale, e) + val, err := join(tz, scale, v.Value) + if err != nil { + return "", err + } + return fmt.Sprintf("(%s)", val), nil + case []GroupSet: + val, err := join(tz, scale, v) + if err != nil { + return "", err + } + return val, err + case ArraySet: + val, err := join(tz, scale, v) + if err != nil { + return "", err + } + return fmt.Sprintf("[%s]", val), nil + case fmt.Stringer: + if v := reflect.ValueOf(v); v.Kind() == reflect.Pointer && + v.IsNil() && + v.Type().Elem().Implements(reflect.TypeOf((*fmt.Stringer)(nil)).Elem()) { + return "NULL", nil + } + return quote(v.String()), nil + case column.OrderedMap: + values := make([]string, 0) + for key := range v.Keys() { + name, err := format(tz, scale, key) if err != nil { return "", err } - elements = append(elements, val) + value, _ := v.Get(key) + val, err := format(tz, scale, value) + if err != nil { + return "", err + } + values = append(values, fmt.Sprintf("%s, %s", name, val)) } - return fmt.Sprintf("(%s)", strings.Join(elements, ", ")), nil - case []GroupSet: - items := make([]string, 0, len(v)) - for _, t := range v { - val, err := format(tz, scale, t) + + return "map(" + strings.Join(values, ", ") + ")", nil + case column.IterableOrderedMap: + values := make([]string, 0) + iter := v.Iterator() + for iter.Next() { + key, value := iter.Key(), iter.Value() + name, err := format(tz, scale, key) if err != nil { return "", err } - items = append(items, fmt.Sprintf("%s", val)) + val, err := format(tz, scale, value) + if err != nil { + return "", err + } + values = append(values, fmt.Sprintf("%s, %s", name, val)) } - return strings.Join(items, ", "), nil - case fmt.Stringer: - return quote(v.String()), nil + + return "map(" + strings.Join(values, ", ") + ")", nil } switch v := reflect.ValueOf(v); v.Kind() { case reflect.String: return quote(v.String()), nil - case reflect.Slice: + case reflect.Slice, reflect.Array: values := make([]string, 0, v.Len()) for i := 0; i < v.Len(); i++ { val, err := format(tz, scale, v.Index(i).Interface()) @@ -272,7 +364,7 @@ func format(tz *time.Location, scale TimeUnit, v interface{}) (string, error) { } values = append(values, val) } - return strings.Join(values, ", "), nil + return fmt.Sprintf("[%s]", strings.Join(values, ", ")), nil case reflect.Map: // map values := make([]string, 0, len(v.MapKeys())) for _, key := range v.MapKeys() { @@ -284,20 +376,32 @@ func format(tz *time.Location, scale TimeUnit, v interface{}) (string, error) { if err != nil { return "", err } - if v.MapIndex(key).Kind() == reflect.Slice { - // assume slices in maps are arrays - val = fmt.Sprintf("[%s]", val) - } - values = append(values, fmt.Sprintf("%s : %s", name, val)) + values = append(values, fmt.Sprintf("%s, %s", name, val)) } - return "{" + strings.Join(values, ", ") + "}", nil - + return "map(" + strings.Join(values, ", ") + ")", nil + case reflect.Ptr: + if v.IsNil() { + return "NULL", nil + } + return format(tz, scale, v.Elem().Interface()) } return fmt.Sprint(v), nil } -func rebind(in []std_driver.NamedValue) []interface{} { - args := make([]interface{}, 0, len(in)) +func join[E any](tz *time.Location, scale TimeUnit, values []E) (string, error) { + items := make([]string, len(values), len(values)) + for i := range values { + val, err := format(tz, scale, values[i]) + if err != nil { + return "", err + } + items[i] = val + } + return strings.Join(items, ", "), nil +} + +func rebind(in []std_driver.NamedValue) []any { + args := make([]any, 0, len(in)) for _, v := range in { switch { case len(v.Name) != 0: diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/clickhouse.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/clickhouse.go index 351cc6c..d7ad40e 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/clickhouse.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/clickhouse.go @@ -21,9 +21,13 @@ import ( "context" "errors" "fmt" + "math/rand" "sync/atomic" "time" + _ "time/tzdata" + + chproto "github.com/ClickHouse/ch-go/proto" "github.com/ClickHouse/clickhouse-go/v2/contributors" "github.com/ClickHouse/clickhouse-go/v2/lib/column" "github.com/ClickHouse/clickhouse-go/v2/lib/driver" @@ -40,11 +44,14 @@ type ( ) var ( + ErrBatchInvalid = errors.New("clickhouse: batch is invalid. check appended data is correct") ErrBatchAlreadySent = errors.New("clickhouse: batch has already been sent") + ErrBatchNotSent = errors.New("clickhouse: invalid retry, batch not sent yet") ErrAcquireConnTimeout = errors.New("clickhouse: acquire conn timeout. you can increase the number of max open conn or the dial timeout") ErrUnsupportedServerRevision = errors.New("clickhouse: unsupported server revision") ErrBindMixedParamsFormats = errors.New("clickhouse [bind]: mixed named, numeric or positional parameters") ErrAcquireConnNoAddress = errors.New("clickhouse: no valid address supplied") + ErrServerUnexpectedData = errors.New("code: 101, message: Unexpected packet Data received from client") ) type OpError struct { @@ -72,18 +79,25 @@ func (e *OpError) Error() string { } func Open(opt *Options) (driver.Conn, error) { - opt.setDefaults() - return &clickhouse{ - opt: opt, - idle: make(chan *connect, opt.MaxIdleConns), - open: make(chan struct{}, opt.MaxOpenConns), - }, nil + if opt == nil { + opt = &Options{} + } + o := opt.setDefaults() + conn := &clickhouse{ + opt: o, + idle: make(chan *connect, o.MaxIdleConns), + open: make(chan struct{}, o.MaxOpenConns), + exit: make(chan struct{}), + } + go conn.startAutoCloseIdleConnections() + return conn, nil } type clickhouse struct { opt *Options idle chan *connect open chan struct{} + exit chan struct{} connID int64 } @@ -108,25 +122,27 @@ func (ch *clickhouse) ServerVersion() (*driver.ServerVersion, error) { return &conn.server, nil } -func (ch *clickhouse) Query(ctx context.Context, query string, args ...interface{}) (rows driver.Rows, err error) { +func (ch *clickhouse) Query(ctx context.Context, query string, args ...any) (rows driver.Rows, err error) { conn, err := ch.acquire(ctx) if err != nil { return nil, err } + conn.debugf("[acquired] connection [%d]", conn.id) return conn.query(ctx, ch.release, query, args...) } -func (ch *clickhouse) QueryRow(ctx context.Context, query string, args ...interface{}) (rows driver.Row) { +func (ch *clickhouse) QueryRow(ctx context.Context, query string, args ...any) (rows driver.Row) { conn, err := ch.acquire(ctx) if err != nil { return &row{ err: err, } } + conn.debugf("[acquired] connection [%d]", conn.id) return conn.queryRow(ctx, ch.release, query, args...) } -func (ch *clickhouse) Exec(ctx context.Context, query string, args ...interface{}) error { +func (ch *clickhouse) Exec(ctx context.Context, query string, args ...any) error { conn, err := ch.acquire(ctx) if err != nil { return err @@ -139,24 +155,34 @@ func (ch *clickhouse) Exec(ctx context.Context, query string, args ...interface{ return nil } -func (ch *clickhouse) PrepareBatch(ctx context.Context, query string) (driver.Batch, error) { +func (ch *clickhouse) PrepareBatch(ctx context.Context, query string, opts ...driver.PrepareBatchOption) (driver.Batch, error) { conn, err := ch.acquire(ctx) if err != nil { return nil, err } - batch, err := conn.prepareBatch(ctx, query, ch.release) + batch, err := conn.prepareBatch(ctx, query, getPrepareBatchOptions(opts...), ch.release, ch.acquire) if err != nil { return nil, err } return batch, nil } -func (ch *clickhouse) AsyncInsert(ctx context.Context, query string, wait bool) error { +func getPrepareBatchOptions(opts ...driver.PrepareBatchOption) driver.PrepareBatchOptions { + var options driver.PrepareBatchOptions + + for _, opt := range opts { + opt(&options) + } + + return options +} + +func (ch *clickhouse) AsyncInsert(ctx context.Context, query string, wait bool, args ...any) error { conn, err := ch.acquire(ctx) if err != nil { return err } - if err := conn.asyncInsert(ctx, query, wait); err != nil { + if err := conn.asyncInsert(ctx, query, wait, args...); err != nil { ch.release(conn, err) return err } @@ -188,22 +214,48 @@ func (ch *clickhouse) Stats() driver.Stats { func (ch *clickhouse) dial(ctx context.Context) (conn *connect, err error) { connID := int(atomic.AddInt64(&ch.connID, 1)) - for i := range ch.opt.Addr { + + dialFunc := func(ctx context.Context, addr string, opt *Options) (DialResult, error) { + conn, err := dial(ctx, addr, connID, opt) + + return DialResult{conn}, err + } + + dialStrategy := DefaultDialStrategy + if ch.opt.DialStrategy != nil { + dialStrategy = ch.opt.DialStrategy + } + + result, err := dialStrategy(ctx, connID, ch.opt, dialFunc) + if err != nil { + return nil, err + } + return result.conn, nil +} + +func DefaultDialStrategy(ctx context.Context, connID int, opt *Options, dial Dial) (r DialResult, err error) { + random := rand.Int() + for i := range opt.Addr { var num int - switch ch.opt.ConnOpenStrategy { + switch opt.ConnOpenStrategy { case ConnOpenInOrder: num = i case ConnOpenRoundRobin: - num = (int(connID) + i) % len(ch.opt.Addr) + num = (int(connID) + i) % len(opt.Addr) + case ConnOpenRandom: + num = (random + i) % len(opt.Addr) } - if conn, err = dial(ctx, ch.opt.Addr[num], connID, ch.opt); err == nil { - return conn, nil + + if r, err = dial(ctx, opt.Addr[num], opt); err == nil { + return r, nil } } + if err == nil { err = ErrAcquireConnNoAddress } - return nil, err + + return r, err } func (ch *clickhouse) acquire(ctx context.Context) (conn *connect, err error) { @@ -217,10 +269,16 @@ func (ch *clickhouse) acquire(ctx context.Context) (conn *connect, err error) { select { case <-timer.C: return nil, ErrAcquireConnTimeout + case <-ctx.Done(): + return nil, ctx.Err() case ch.open <- struct{}{}: } select { case <-timer.C: + select { + case <-ch.open: + default: + } return nil, ErrAcquireConnTimeout case conn := <-ch.idle: if conn.isBad() { @@ -247,6 +305,41 @@ func (ch *clickhouse) acquire(ctx context.Context) (conn *connect, err error) { return conn, nil } +func (ch *clickhouse) startAutoCloseIdleConnections() { + ticker := time.NewTicker(ch.opt.ConnMaxLifetime) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + ch.closeIdleExpired() + case <-ch.exit: + return + } + } +} + +func (ch *clickhouse) closeIdleExpired() { + cutoff := time.Now().Add(-ch.opt.ConnMaxLifetime) + for { + select { + case conn := <-ch.idle: + if conn.connectedAt.Before(cutoff) { + conn.close() + } else { + select { + case ch.idle <- conn: + default: + conn.close() + } + return + } + default: + return + } + } +} + func (ch *clickhouse) release(conn *connect, err error) { if conn.released { return @@ -260,6 +353,10 @@ func (ch *clickhouse) release(conn *connect, err error) { conn.close() return } + if ch.opt.FreeBufOnConnRelease { + conn.buffer = new(chproto.Buffer) + conn.compressor.Data = nil + } select { case ch.idle <- conn: default: @@ -273,6 +370,7 @@ func (ch *clickhouse) Close() error { case c := <-ch.idle: c.close() default: + ch.exit <- struct{}{} return nil } } diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/clickhouse_options.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/clickhouse_options.go index 17a27e9..5737602 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/clickhouse_options.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/clickhouse_options.go @@ -27,10 +27,48 @@ import ( "strings" "time" - "github.com/ClickHouse/clickhouse-go/v2/lib/compress" + "github.com/ClickHouse/ch-go/compress" + "github.com/pkg/errors" ) -var CompressionLZ4 compress.Method = compress.LZ4 +type CompressionMethod byte + +func (c CompressionMethod) String() string { + switch c { + case CompressionNone: + return "none" + case CompressionZSTD: + return "zstd" + case CompressionLZ4: + return "lz4" + case CompressionGZIP: + return "gzip" + case CompressionDeflate: + return "deflate" + case CompressionBrotli: + return "br" + default: + return "" + } +} + +const ( + CompressionNone = CompressionMethod(compress.None) + CompressionLZ4 = CompressionMethod(compress.LZ4) + CompressionZSTD = CompressionMethod(compress.ZSTD) + CompressionGZIP = CompressionMethod(0x95) + CompressionDeflate = CompressionMethod(0x96) + CompressionBrotli = CompressionMethod(0x97) +) + +var compressionMap = map[string]CompressionMethod{ + "none": CompressionNone, + "zstd": CompressionZSTD, + "lz4": CompressionLZ4, + "gzip": CompressionGZIP, + "deflate": CompressionDeflate, + "br": CompressionBrotli, +} type Auth struct { // has_control_character Database string @@ -39,7 +77,9 @@ type Auth struct { // has_control_character } type Compression struct { - Method compress.Method + Method CompressionMethod + // this only applies to zlib and brotli compression algorithms + Level int } type ConnOpenStrategy uint8 @@ -47,15 +87,27 @@ type ConnOpenStrategy uint8 const ( ConnOpenInOrder ConnOpenStrategy = iota ConnOpenRoundRobin + ConnOpenRandom ) -type InterfaceType int +type Protocol int const ( - NativeInterface InterfaceType = iota - HttpInterface + Native Protocol = iota + HTTP ) +func (p Protocol) String() string { + switch p { + case Native: + return "native" + case HTTP: + return "http" + default: + return "" + } +} + func ParseDSN(dsn string) (*Options, error) { opt := &Options{} if err := opt.fromDSN(dsn); err != nil { @@ -64,23 +116,36 @@ func ParseDSN(dsn string) (*Options, error) { return opt, nil } +type Dial func(ctx context.Context, addr string, opt *Options) (DialResult, error) +type DialResult struct { + conn *connect +} + type Options struct { - Interface InterfaceType - - TLS *tls.Config - Addr []string - Auth Auth - DialContext func(ctx context.Context, addr string) (net.Conn, error) - Debug bool - Debugf func(format string, v ...interface{}) // only works when Debug is true - Settings Settings - Compression *Compression - DialTimeout time.Duration // default 1 second - MaxOpenConns int // default MaxIdleConns + 5 - MaxIdleConns int // default 5 - ConnMaxLifetime time.Duration // default 1 hour - ConnOpenStrategy ConnOpenStrategy + Protocol Protocol + ClientInfo ClientInfo + TLS *tls.Config + Addr []string + Auth Auth + DialContext func(ctx context.Context, addr string) (net.Conn, error) + DialStrategy func(ctx context.Context, connID int, options *Options, dial Dial) (DialResult, error) + Debug bool + Debugf func(format string, v ...any) // only works when Debug is true + Settings Settings + Compression *Compression + DialTimeout time.Duration // default 30 second + MaxOpenConns int // default MaxIdleConns + 5 + MaxIdleConns int // default 5 + ConnMaxLifetime time.Duration // default 1 hour + ConnOpenStrategy ConnOpenStrategy + FreeBufOnConnRelease bool // drop preserved memory buffer after each query + HttpHeaders map[string]string // set additional headers on HTTP requests + HttpUrlPath string // set additional URL path for HTTP requests + BlockBufferSize uint8 // default 2 - can be overwritten on query + MaxCompressionBuffer int // default 10485760 - measured in bytes i.e. 10MiB + + scheme string ReadTimeout time.Duration } @@ -89,6 +154,11 @@ func (o *Options) fromDSN(in string) error { if err != nil { return err } + + if dsn.Host == "" { + return errors.New("parse dsn address failed") + } + if o.Settings == nil { o.Settings = make(Settings) } @@ -103,40 +173,135 @@ func (o *Options) fromDSN(in string) error { skipVerify bool ) o.Auth.Database = strings.TrimPrefix(dsn.Path, "/") + for v := range params { switch v { case "debug": o.Debug, _ = strconv.ParseBool(params.Get(v)) case "compress": if on, _ := strconv.ParseBool(params.Get(v)); on { + if o.Compression == nil { + o.Compression = &Compression{} + } + + o.Compression.Method = CompressionLZ4 + continue + } + if compressMethod, ok := compressionMap[params.Get(v)]; ok { + if o.Compression == nil { + o.Compression = &Compression{ + // default for now same as Clickhouse - https://clickhouse.com/docs/en/operations/settings/settings#settings-http_zlib_compression_level + Level: 3, + } + } + + o.Compression.Method = compressMethod + } + case "compress_level": + level, err := strconv.ParseInt(params.Get(v), 10, 8) + if err != nil { + return errors.Wrap(err, "compress_level invalid value") + } + + if o.Compression == nil { o.Compression = &Compression{ - Method: CompressionLZ4, + // a level alone doesn't enable compression + Method: CompressionNone, + Level: int(level), } + continue } + + o.Compression.Level = int(level) + case "max_compression_buffer": + max, err := strconv.Atoi(params.Get(v)) + if err != nil { + return errors.Wrap(err, "max_compression_buffer invalid value") + } + o.MaxCompressionBuffer = max case "dial_timeout": duration, err := time.ParseDuration(params.Get(v)) if err != nil { return fmt.Errorf("clickhouse [dsn parse]: dial timeout: %s", err) } o.DialTimeout = duration + case "block_buffer_size": + if blockBufferSize, err := strconv.ParseUint(params.Get(v), 10, 8); err == nil { + if blockBufferSize <= 0 { + return fmt.Errorf("block_buffer_size must be greater than 0") + } + o.BlockBufferSize = uint8(blockBufferSize) + } else { + return err + } case "read_timeout": duration, err := time.ParseDuration(params.Get(v)) if err != nil { - return fmt.Errorf("clickhouse [dsn parse]: http timeout: %s", err) + return fmt.Errorf("clickhouse [dsn parse]:read timeout: %s", err) } o.ReadTimeout = duration case "secure": - secure = true + secureParam := params.Get(v) + if secureParam == "" { + secure = true + } else { + secure, err = strconv.ParseBool(secureParam) + if err != nil { + return fmt.Errorf("clickhouse [dsn parse]:secure: %s", err) + } + } case "skip_verify": - skipVerify = true + skipVerifyParam := params.Get(v) + if skipVerifyParam == "" { + skipVerify = true + } else { + skipVerify, err = strconv.ParseBool(skipVerifyParam) + if err != nil { + return fmt.Errorf("clickhouse [dsn parse]:verify: %s", err) + } + } case "connection_open_strategy": switch params.Get(v) { case "in_order": o.ConnOpenStrategy = ConnOpenInOrder case "round_robin": o.ConnOpenStrategy = ConnOpenRoundRobin + case "random": + o.ConnOpenStrategy = ConnOpenRandom + } + case "max_open_conns": + maxOpenConns, err := strconv.Atoi(params.Get(v)) + if err != nil { + return errors.Wrap(err, "max_open_conns invalid value") + } + o.MaxOpenConns = maxOpenConns + case "max_idle_conns": + maxIdleConns, err := strconv.Atoi(params.Get(v)) + if err != nil { + return errors.Wrap(err, "max_idle_conns invalid value") } + o.MaxIdleConns = maxIdleConns + case "conn_max_lifetime": + connMaxLifetime, err := time.ParseDuration(params.Get(v)) + if err != nil { + return errors.Wrap(err, "conn_max_lifetime invalid value") + } + o.ConnMaxLifetime = connMaxLifetime + case "username": + o.Auth.Username = params.Get(v) + case "password": + o.Auth.Password = params.Get(v) + case "client_info_product": + chunks := strings.Split(params.Get(v), ",") + + for _, chunk := range chunks { + name, version, _ := strings.Cut(chunk, "/") + o.ClientInfo.Products = append(o.ClientInfo.Products, struct{ Name, Version string }{ + name, + version, + }) + } default: switch p := strings.ToLower(params.Get(v)); p { case "true": @@ -146,6 +311,8 @@ func (o *Options) fromDSN(in string) error { default: if n, err := strconv.Atoi(p); err == nil { o.Settings[v] = n + } else { + o.Settings[v] = p } } } @@ -155,32 +322,34 @@ func (o *Options) fromDSN(in string) error { InsecureSkipVerify: skipVerify, } } + o.scheme = dsn.Scheme switch dsn.Scheme { case "http": if secure { - o.TLS = nil + return fmt.Errorf("clickhouse [dsn parse]: http with TLS specify") } - o.Interface = HttpInterface + o.Protocol = HTTP case "https": if !secure { - return fmt.Errorf("clickhouse [dsn parse]: https without TLS specify") + return fmt.Errorf("clickhouse [dsn parse]: https without TLS") } - o.Interface = HttpInterface + o.Protocol = HTTP default: - o.Interface = NativeInterface + o.Protocol = Native } return nil } -func (o *Options) setDefaults() { - if len(o.Auth.Database) == 0 { - o.Auth.Database = "default" - } +// receive copy of Options, so we don't modify original - so its reusable +func (o Options) setDefaults() *Options { if len(o.Auth.Username) == 0 { o.Auth.Username = "default" } if o.DialTimeout == 0 { - o.DialTimeout = time.Second + o.DialTimeout = time.Second * 30 + } + if o.ReadTimeout == 0 { + o.ReadTimeout = time.Second * time.Duration(300) } if o.MaxIdleConns <= 0 { o.MaxIdleConns = 5 @@ -191,4 +360,19 @@ func (o *Options) setDefaults() { if o.ConnMaxLifetime == 0 { o.ConnMaxLifetime = time.Hour } + if o.BlockBufferSize <= 0 { + o.BlockBufferSize = 2 + } + if o.MaxCompressionBuffer <= 0 { + o.MaxCompressionBuffer = 10485760 + } + if o.Addr == nil || len(o.Addr) == 0 { + switch o.Protocol { + case Native: + o.Addr = []string{"localhost:9000"} + case HTTP: + o.Addr = []string{"localhost:8123"} + } + } + return &o } diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/clickhouse_rows.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/clickhouse_rows.go index 2b0bd37..1686e90 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/clickhouse_rows.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/clickhouse_rows.go @@ -46,13 +46,15 @@ func (r *rows) Next() (result bool) { } next: if r.row >= r.block.Rows() { + if r.stream == nil { + return false + } select { case err := <-r.errors: if err != nil { r.err = err return false } - goto next case block := <-r.stream: if block == nil { return false @@ -63,19 +65,20 @@ next: } r.row, r.block = 0, block } + goto next } r.row++ return r.row <= r.block.Rows() } -func (r *rows) Scan(dest ...interface{}) error { +func (r *rows) Scan(dest ...any) error { if r.block == nil || (r.row == 0 && r.row >= r.block.Rows()) { // call without next when result is empty return io.EOF } return scan(r.block, r.row, dest...) } -func (r *rows) ScanStruct(dest interface{}) error { +func (r *rows) ScanStruct(dest any) error { values, err := r.structMap.Map("ScanStruct", r.columns, dest, true) if err != nil { return err @@ -83,7 +86,7 @@ func (r *rows) ScanStruct(dest interface{}) error { return r.Scan(values...) } -func (r *rows) Totals(dest ...interface{}) error { +func (r *rows) Totals(dest ...any) error { if r.totals == nil { return sql.ErrNoRows } @@ -95,27 +98,43 @@ func (r *rows) Columns() []string { } func (r *rows) Close() error { - active := 2 + if r.errors == nil && r.stream == nil { + return r.err + } + + if r.errors == nil { + for range r.stream { + } + return nil + } + + if r.stream == nil { + for err := range r.errors { + r.err = err + } + return r.err + } + + errorsClosed := false + streamClosed := false for { select { case _, ok := <-r.stream: if !ok { - active-- - if active == 0 { - return r.err - } + streamClosed = true } case err, ok := <-r.errors: if err != nil { r.err = err } if !ok { - active-- - if active == 0 { - return r.err - } + errorsClosed = true } } + + if errorsClosed && streamClosed { + return r.err + } } } @@ -132,7 +151,7 @@ func (r *row) Err() error { return r.err } -func (r *row) ScanStruct(dest interface{}) error { +func (r *row) ScanStruct(dest any) error { if r.err != nil { return r.err } @@ -143,7 +162,7 @@ func (r *row) ScanStruct(dest interface{}) error { return r.Scan(values...) } -func (r *row) Scan(dest ...interface{}) error { +func (r *row) Scan(dest ...any) error { if r.err != nil { return r.err } diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/clickhouse_std.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/clickhouse_std.go index cff3846..7b18480 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/clickhouse_std.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/clickhouse_std.go @@ -24,57 +24,66 @@ import ( "errors" "fmt" "io" + "log" + "math/rand" + "net" + "os" "reflect" "strings" "sync/atomic" + "syscall" "github.com/ClickHouse/clickhouse-go/v2/lib/column" + ldriver "github.com/ClickHouse/clickhouse-go/v2/lib/driver" ) var globalConnID int64 type stdConnOpener struct { - err error - opt *Options + err error + opt *Options + debugf func(format string, v ...any) } func (o *stdConnOpener) Driver() driver.Driver { - return &stdDriver{} + var debugf = func(format string, v ...any) {} + if o.opt.Debug { + if o.opt.Debugf != nil { + debugf = o.opt.Debugf + } else { + debugf = log.New(os.Stdout, "[clickhouse-std] ", 0).Printf + } + } + return &stdDriver{debugf: debugf} } func (o *stdConnOpener) Connect(ctx context.Context) (_ driver.Conn, err error) { if o.err != nil { + o.debugf("[connect] opener error: %v\n", o.err) return nil, o.err } var ( - runtimeTransport transport - connID = int(atomic.AddInt64(&globalConnID, 1)) - dialFunc func(ctx context.Context, addr string, num int, opt *Options) (transport, error) + conn stdConnect + connID = int(atomic.AddInt64(&globalConnID, 1)) + dialFunc func(ctx context.Context, addr string, num int, opt *Options) (stdConnect, error) ) - switch o.opt.Interface { - case HttpInterface: - dialFunc = func(ctx context.Context, addr string, num int, opt *Options) (transport, error) { - var conn *httpConnect - if conn, err = dialHttp(ctx, addr, num, o.opt); err == nil { - return &httpTransport{ - conn: conn, - }, nil - } - return nil, err + switch o.opt.Protocol { + case HTTP: + dialFunc = func(ctx context.Context, addr string, num int, opt *Options) (stdConnect, error) { + return dialHttp(ctx, addr, num, opt) } default: - dialFunc = func(ctx context.Context, addr string, num int, opt *Options) (transport, error) { - var conn *connect - if conn, err = dial(ctx, addr, num, o.opt); err == nil { - return &nativeTransport{ - conn: conn, - }, nil - } - return nil, err + dialFunc = func(ctx context.Context, addr string, num int, opt *Options) (stdConnect, error) { + return dial(ctx, addr, num, opt) } } + if o.opt.Addr == nil || len(o.opt.Addr) == 0 { + return nil, ErrAcquireConnNoAddress + } + + random := rand.Int() for i := range o.opt.Addr { var num int switch o.opt.ConnOpenStrategy { @@ -82,21 +91,75 @@ func (o *stdConnOpener) Connect(ctx context.Context) (_ driver.Conn, err error) num = i case ConnOpenRoundRobin: num = (int(connID) + i) % len(o.opt.Addr) + case ConnOpenRandom: + num = (random + i) % len(o.opt.Addr) } - if runtimeTransport, err = dialFunc(ctx, o.opt.Addr[num], connID, o.opt); err == nil { + if conn, err = dialFunc(ctx, o.opt.Addr[num], connID, o.opt); err == nil { + var debugf = func(format string, v ...any) {} + if o.opt.Debug { + if o.opt.Debugf != nil { + debugf = o.opt.Debugf + } else { + debugf = log.New(os.Stdout, fmt.Sprintf("[clickhouse-std][conn=%d][%s] ", num, o.opt.Addr[num]), 0).Printf + } + } return &stdDriver{ - transport: runtimeTransport, + conn: conn, + debugf: debugf, }, nil + } else { + o.debugf("[connect] error connecting to %s on connection %d: %v\n", o.opt.Addr[num], connID, err) } } + return nil, err } +var _ driver.Connector = (*stdConnOpener)(nil) + func init() { - sql.Register("clickhouse", &stdDriver{}) + var debugf = func(format string, v ...any) {} + sql.Register("clickhouse", &stdDriver{debugf: debugf}) +} + +// isConnBrokenError returns true if the error class indicates that the +// db connection is no longer usable and should be marked bad +func isConnBrokenError(err error) bool { + if errors.Is(err, io.EOF) || errors.Is(err, syscall.EPIPE) || errors.Is(err, syscall.ECONNRESET) { + return true + } + if _, ok := err.(*net.OpError); ok { + return true + } + return false +} + +func Connector(opt *Options) driver.Connector { + if opt == nil { + opt = &Options{} + } + + o := opt.setDefaults() + + var debugf = func(format string, v ...any) {} + if o.Debug { + if o.Debugf != nil { + debugf = o.Debugf + } else { + debugf = log.New(os.Stdout, "[clickhouse-std][opener] ", 0).Printf + } + } + return &stdConnOpener{ + opt: o, + debugf: debugf, + } } func OpenDB(opt *Options) *sql.DB { + var debugf = func(format string, v ...any) {} + if opt == nil { + opt = &Options{} + } var settings []string if opt.MaxIdleConns > 0 { settings = append(settings, "SetMaxIdleConns") @@ -107,182 +170,196 @@ func OpenDB(opt *Options) *sql.DB { if opt.ConnMaxLifetime > 0 { settings = append(settings, "SetConnMaxLifetime") } + if opt.Debug { + if opt.Debugf != nil { + debugf = opt.Debugf + } else { + debugf = log.New(os.Stdout, "[clickhouse-std][opener] ", 0).Printf + } + } if len(settings) != 0 { return sql.OpenDB(&stdConnOpener{ - err: fmt.Errorf("cannot connect. invalid settings. use %s (see https://pkg.go.dev/database/sql)", strings.Join(settings, ",")), + err: fmt.Errorf("cannot connect. invalid settings. use %s (see https://pkg.go.dev/database/sql)", strings.Join(settings, ",")), + debugf: debugf, }) } - opt.setDefaults() + o := opt.setDefaults() return sql.OpenDB(&stdConnOpener{ - opt: opt, + opt: o, + debugf: debugf, }) } -type transport interface { - ResetSession(ctx context.Context) error - Ping(ctx context.Context) error - Begin() (driver.Tx, error) - Commit() error - Rollback() error - CheckNamedValue(nv *driver.NamedValue) error - ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) - QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) - Prepare(query string) (driver.Stmt, error) - Close() error +type stdConnect interface { + isBad() bool + close() error + query(ctx context.Context, release func(*connect, error), query string, args ...any) (*rows, error) + exec(ctx context.Context, query string, args ...any) error + ping(ctx context.Context) (err error) + prepareBatch(ctx context.Context, query string, options ldriver.PrepareBatchOptions, release func(*connect, error), acquire func(context.Context) (*connect, error)) (ldriver.Batch, error) + asyncInsert(ctx context.Context, query string, wait bool, args ...any) error } type stdDriver struct { - transport + conn stdConnect + commit func() error + debugf func(format string, v ...any) } -func (d *stdDriver) Open(dsn string) (_ driver.Conn, err error) { +var _ driver.Conn = (*stdDriver)(nil) +var _ driver.ConnBeginTx = (*stdDriver)(nil) +var _ driver.ExecerContext = (*stdDriver)(nil) +var _ driver.QueryerContext = (*stdDriver)(nil) +var _ driver.ConnPrepareContext = (*stdDriver)(nil) + +func (std *stdDriver) Open(dsn string) (_ driver.Conn, err error) { var opt Options if err := opt.fromDSN(dsn); err != nil { + std.debugf("Open dsn error: %v\n", err) return nil, err } - opt.setDefaults() - return (&stdConnOpener{opt: &opt}).Connect(context.Background()) -} - -var ErrHttpNotSupported = errors.New("HTTP: not supported") - -type httpTransport struct { - conn *httpConnect -} - -func (h httpTransport) ResetSession(ctx context.Context) error { - //TODO implement me - return ErrHttpNotSupported -} - -func (h httpTransport) Ping(ctx context.Context) error { - return h.conn.ping(ctx) -} - -func (h httpTransport) Begin() (driver.Tx, error) { - //TODO implement me - return nil, ErrHttpNotSupported -} - -func (h httpTransport) Commit() error { - //TODO implement me - return ErrHttpNotSupported -} - -func (h httpTransport) Rollback() error { - //TODO implement me - return ErrHttpNotSupported -} - -func (h httpTransport) CheckNamedValue(nv *driver.NamedValue) error { - //TODO implement me - return ErrHttpNotSupported -} - -func (h httpTransport) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) { - //TODO implement me - return nil, ErrHttpNotSupported -} - -func (h httpTransport) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) { - //TODO implement me - return nil, ErrHttpNotSupported -} - -func (h httpTransport) Prepare(query string) (driver.Stmt, error) { - //TODO implement me - return nil, ErrHttpNotSupported + o := opt.setDefaults() + var debugf = func(format string, v ...any) {} + if o.Debug { + debugf = log.New(os.Stdout, "[clickhouse-std][opener] ", 0).Printf + } + o.ClientInfo.comment = []string{"database/sql"} + return (&stdConnOpener{opt: o, debugf: debugf}).Connect(context.Background()) } -func (h httpTransport) Close() error { - return h.conn.close() -} +var _ driver.Driver = (*stdDriver)(nil) -type nativeTransport struct { - conn *connect - commit func() error -} - -func (std *nativeTransport) ResetSession(ctx context.Context) error { +func (std *stdDriver) ResetSession(ctx context.Context) error { if std.conn.isBad() { + std.debugf("Resetting session because connection is bad") return driver.ErrBadConn } return nil } -func (std *nativeTransport) Ping(ctx context.Context) error { return std.conn.ping(ctx) } +var _ driver.SessionResetter = (*stdDriver)(nil) + +func (std *stdDriver) Ping(ctx context.Context) error { return std.conn.ping(ctx) } -func (std *nativeTransport) Begin() (driver.Tx, error) { return std, nil } +var _ driver.Pinger = (*stdDriver)(nil) -func (std *nativeTransport) Commit() error { +func (std *stdDriver) Begin() (driver.Tx, error) { return std, nil } +func (std *stdDriver) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) { + return std, nil +} + +func (std *stdDriver) Commit() error { if std.commit == nil { return nil } defer func() { std.commit = nil }() - return std.commit() + + if err := std.commit(); err != nil { + if isConnBrokenError(err) { + std.debugf("Commit got EOF error: resetting connection") + return driver.ErrBadConn + } + std.debugf("Commit error: %v\n", err) + return err + } + return nil } -func (std *nativeTransport) Rollback() error { +func (std *stdDriver) Rollback() error { std.commit = nil std.conn.close() return nil } -func (std *nativeTransport) CheckNamedValue(nv *driver.NamedValue) error { return nil } +var _ driver.Tx = (*stdDriver)(nil) + +func (std *stdDriver) CheckNamedValue(nv *driver.NamedValue) error { return nil } -func (std *nativeTransport) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) { +var _ driver.NamedValueChecker = (*stdDriver)(nil) + +func (std *stdDriver) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) { + var err error if options := queryOptions(ctx); options.async.ok { - if len(args) != 0 { - return nil, errors.New("clickhouse: you can't use parameters in an asynchronous insert") - } - return driver.RowsAffected(0), std.conn.asyncInsert(ctx, query, options.async.wait) + err = std.conn.asyncInsert(ctx, query, options.async.wait, rebind(args)...) + } else { + err = std.conn.exec(ctx, query, rebind(args)...) } - if err := std.conn.exec(ctx, query, rebind(args)...); err != nil { + + if err != nil { + if isConnBrokenError(err) { + std.debugf("ExecContext got a fatal error, resetting connection: %v\n", err) + return nil, driver.ErrBadConn + } + std.debugf("ExecContext error: %v\n", err) return nil, err } return driver.RowsAffected(0), nil } -func (std *nativeTransport) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) { +func (std *stdDriver) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) { r, err := std.conn.query(ctx, func(*connect, error) {}, query, rebind(args)...) + if isConnBrokenError(err) { + std.debugf("QueryContext got a fatal error, resetting connection: %v\n", err) + return nil, driver.ErrBadConn + } if err != nil { + std.debugf("QueryContext error: %v\n", err) return nil, err } return &stdRows{ - rows: r, + rows: r, + debugf: std.debugf, }, nil } -func (std *nativeTransport) Prepare(query string) (driver.Stmt, error) { +func (std *stdDriver) Prepare(query string) (driver.Stmt, error) { return std.PrepareContext(context.Background(), query) } -func (std *nativeTransport) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) { - batch, err := std.conn.prepareBatch(ctx, query, func(*connect, error) {}) +func (std *stdDriver) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) { + batch, err := std.conn.prepareBatch(ctx, query, ldriver.PrepareBatchOptions{}, func(*connect, error) {}, func(context.Context) (*connect, error) { return nil, nil }) if err != nil { + if isConnBrokenError(err) { + std.debugf("PrepareContext got a fatal error, resetting connection: %v\n", err) + return nil, driver.ErrBadConn + } + std.debugf("PrepareContext error: %v\n", err) return nil, err } std.commit = batch.Send return &stdBatch{ - batch: batch, + batch: batch, + debugf: std.debugf, }, nil } -func (std *nativeTransport) Close() error { return std.conn.close() } +func (std *stdDriver) Close() error { + err := std.conn.close() + if err != nil { + if isConnBrokenError(err) { + std.debugf("Close got a fatal error, resetting connection: %v\n", err) + return driver.ErrBadConn + } + std.debugf("Close error: %v\n", err) + } + return err +} type stdBatch struct { - batch *batch + batch ldriver.Batch + debugf func(format string, v ...any) } func (s *stdBatch) NumInput() int { return -1 } func (s *stdBatch) Exec(args []driver.Value) (driver.Result, error) { - values := make([]interface{}, 0, len(args)) + values := make([]any, 0, len(args)) for _, v := range args { values = append(values, v) } if err := s.batch.Append(values...); err != nil { + s.debugf("[batch][exec] append error: %v", err) return nil, err } return driver.RowsAffected(0), nil @@ -296,14 +373,18 @@ func (s *stdBatch) ExecContext(ctx context.Context, args []driver.NamedValue) (d return s.Exec(values) } +var _ driver.StmtExecContext = (*stdBatch)(nil) + func (s *stdBatch) Query(args []driver.Value) (driver.Rows, error) { + // Note: not implementing driver.StmtQueryContext accordingly return nil, errors.New("only Exec method supported in batch mode") } func (s *stdBatch) Close() error { return nil } type stdRows struct { - rows *rows + rows *rows + debugf func(format string, v ...any) } func (r *stdRows) Columns() []string { @@ -314,6 +395,8 @@ func (r *stdRows) ColumnTypeScanType(idx int) reflect.Type { return r.rows.block.Columns[idx].ScanType() } +var _ driver.RowsColumnTypeScanType = (*stdRows)(nil) + func (r *stdRows) ColumnTypeDatabaseTypeName(idx int) string { return string(r.rows.block.Columns[idx].Type()) } @@ -336,11 +419,19 @@ func (r *stdRows) ColumnTypePrecisionScale(idx int) (precision, scale int64, ok return 0, 0, false } +var _ driver.Rows = (*stdRows)(nil) +var _ driver.RowsNextResultSet = (*stdRows)(nil) +var _ driver.RowsColumnTypeDatabaseTypeName = (*stdRows)(nil) +var _ driver.RowsColumnTypeNullable = (*stdRows)(nil) +var _ driver.RowsColumnTypePrecisionScale = (*stdRows)(nil) + func (r *stdRows) Next(dest []driver.Value) error { if len(r.rows.block.Columns) != len(dest) { + err := fmt.Errorf("expected %d destination arguments in Next, not %d", len(r.rows.block.Columns), len(dest)) + r.debugf("Next length error: %v\n", err) return &OpError{ Op: "Next", - Err: fmt.Errorf("expected %d destination arguments in Next, not %d", len(r.rows.block.Columns), len(dest)), + Err: err, } } if r.rows.Next() { @@ -350,16 +441,33 @@ func (r *stdRows) Next(dest []driver.Value) error { case driver.Valuer: v, err := value.Value() if err != nil { + r.debugf("Next row error: %v\n", err) return err } dest[i] = v default: + // We don't know what is the destination type at this stage, + // but destination type might be a sql.Null* type that expects to receive a value + // instead of a pointer to a value. ClickHouse-go returns pointers to values for nullable columns. + // + // This is a compatibility layer to make sure that the driver works with the standard library. + // Due to reflection used it has a performance cost. + if nullable { + if value == nil { + dest[i] = nil + continue + } + rv := reflect.ValueOf(value) + value = rv.Elem().Interface() + } + dest[i] = value } } return nil } if err := r.rows.Err(); err != nil { + r.debugf("Next rows error: %v\n", err) return err } return io.EOF @@ -380,6 +488,14 @@ func (r *stdRows) NextResultSet() error { return nil } +var _ driver.RowsNextResultSet = (*stdRows)(nil) + func (r *stdRows) Close() error { - return r.rows.Close() + err := r.rows.Close() + if err != nil { + r.debugf("Rows Close error: %v\n", err) + } + return err } + +var _ driver.Rows = (*stdRows)(nil) diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/client_info.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/client_info.go new file mode 100644 index 0000000..7140069 --- /dev/null +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/client_info.go @@ -0,0 +1,86 @@ +// Licensed to ClickHouse, Inc. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. ClickHouse, Inc. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package clickhouse + +import ( + "fmt" + "github.com/ClickHouse/clickhouse-go/v2/lib/proto" + "runtime" + "sort" + "strings" +) + +const ClientName = "clickhouse-go" + +const ( + ClientVersionMajor = 2 + ClientVersionMinor = 27 + ClientVersionPatch = 1 + ClientTCPProtocolVersion = proto.DBMS_TCP_PROTOCOL_VERSION +) + +type ClientInfo struct { + Products []struct { + Name string + Version string + } + + comment []string +} + +func (o ClientInfo) String() string { + var s strings.Builder + + info := o + + info.Products = append(info.Products, struct{ Name, Version string }{ + Name: ClientName, + Version: fmt.Sprintf("%d.%d.%d", ClientVersionMajor, ClientVersionMinor, ClientVersionPatch), + }) + + encodedProducts := make([]string, len(info.Products)) + for i, product := range info.Products { + encodedProducts[i] = fmt.Sprintf("%s/%s", product.Name, product.Version) + } + s.WriteString(strings.Join(encodedProducts, " ")) + + lvMeta := "lv:go/" + runtime.Version()[2:] + osMeta := "os:" + runtime.GOOS + + chunks := append(info.comment, lvMeta, osMeta) // nolint:gocritic + + s.WriteByte(' ') + s.WriteByte('(') + s.WriteString(strings.Join(chunks, "; ")) + s.WriteByte(')') + + return s.String() +} + +func mapKeysInOrder[V any](m map[string]V) []string { + keys := make([]string, 0, len(m)) + for key := range m { + keys = append(keys, key) + } + + sort.Slice(keys, func(i, j int) bool { + return keys[i] < keys[j] + }) + + return keys +} diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/conn.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/conn.go index a38548d..6d831a0 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/conn.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/conn.go @@ -21,13 +21,18 @@ import ( "context" "crypto/tls" "fmt" + "io" "log" "net" "os" + "syscall" "time" - "github.com/ClickHouse/clickhouse-go/v2/lib/binary" - "github.com/ClickHouse/clickhouse-go/v2/lib/io" + "github.com/ClickHouse/clickhouse-go/v2/resources" + "github.com/pkg/errors" + + "github.com/ClickHouse/ch-go/compress" + chproto "github.com/ClickHouse/ch-go/proto" "github.com/ClickHouse/clickhouse-go/v2/lib/proto" ) @@ -35,7 +40,7 @@ func dial(ctx context.Context, addr string, num int, opt *Options) (*connect, er var ( err error conn net.Conn - debugf = func(format string, v ...interface{}) {} + debugf = func(format string, v ...any) {} ) switch { case opt.DialContext != nil: @@ -53,67 +58,103 @@ func dial(ctx context.Context, addr string, num int, opt *Options) (*connect, er } if opt.Debug { if opt.Debugf != nil { - debugf = opt.Debugf + debugf = func(format string, v ...any) { + opt.Debugf( + "[clickhouse][conn=%d][%s] "+format, + append([]interface{}{num, conn.RemoteAddr()}, v...)..., + ) + } } else { debugf = log.New(os.Stdout, fmt.Sprintf("[clickhouse][conn=%d][%s]", num, conn.RemoteAddr()), 0).Printf } } - var compression bool + compression := CompressionNone if opt.Compression != nil { - compression = opt.Compression.Method == CompressionLZ4 + switch opt.Compression.Method { + case CompressionLZ4, CompressionZSTD, CompressionNone: + compression = opt.Compression.Method + default: + return nil, fmt.Errorf("unsupported compression method for native protocol") + } } + var ( - stream = io.NewStream(conn) connect = &connect{ - opt: opt, - conn: conn, - debugf: debugf, - stream: stream, - encoder: binary.NewEncoder(stream), - decoder: binary.NewDecoder(stream), - revision: proto.ClientTCPProtocolVersion, - structMap: &structMap{}, - compression: compression, - connectedAt: time.Now(), + id: num, + opt: opt, + conn: conn, + debugf: debugf, + buffer: new(chproto.Buffer), + reader: chproto.NewReader(conn), + revision: ClientTCPProtocolVersion, + structMap: &structMap{}, + compression: compression, + connectedAt: time.Now(), + compressor: compress.NewWriter(), + readTimeout: opt.ReadTimeout, + blockBufferSize: opt.BlockBufferSize, + maxCompressionBuffer: opt.MaxCompressionBuffer, } ) if err := connect.handshake(opt.Auth.Database, opt.Auth.Username, opt.Auth.Password); err != nil { return nil, err } + if connect.revision >= proto.DBMS_MIN_PROTOCOL_VERSION_WITH_ADDENDUM { + if err := connect.sendAddendum(); err != nil { + return nil, err + } + } + + // warn only on the first connection in the pool + if num == 1 && !resources.ClientMeta.IsSupportedClickHouseVersion(connect.server.Version) { + debugf("[handshake] WARNING: version %v of ClickHouse is not supported by this client - client supports %v", connect.server.Version, resources.ClientMeta.SupportedVersions()) + } return connect, nil } // https://github.com/ClickHouse/ClickHouse/blob/master/src/Client/Connection.cpp type connect struct { - opt *Options - conn net.Conn - debugf func(format string, v ...interface{}) - server ServerVersion - stream *io.Stream - closed bool - encoder *binary.Encoder - decoder *binary.Decoder - released bool - revision uint64 - structMap *structMap - compression bool - // lastUsedIn time.Time - connectedAt time.Time + id int + opt *Options + conn net.Conn + debugf func(format string, v ...any) + server ServerVersion + closed bool + buffer *chproto.Buffer + reader *chproto.Reader + released bool + revision uint64 + structMap *structMap + compression CompressionMethod + connectedAt time.Time + compressor *compress.Writer + readTimeout time.Duration + blockBufferSize uint8 + maxCompressionBuffer int } func (c *connect) settings(querySettings Settings) []proto.Setting { + settingToProtoSetting := func(k string, v any) proto.Setting { + isCustom := false + if cv, ok := v.(CustomSetting); ok { + v = cv.Value + isCustom = true + } + + return proto.Setting{ + Key: k, + Value: v, + Important: !isCustom, + Custom: isCustom, + } + } + settings := make([]proto.Setting, 0, len(c.opt.Settings)+len(querySettings)) for k, v := range c.opt.Settings { - settings = append(settings, proto.Setting{ - Key: k, - Value: v, - }) + settings = append(settings, settingToProtoSetting(k, v)) } for k, v := range querySettings { - settings = append(settings, proto.Setting{ - Key: k, - Value: v, - }) + settings = append(settings, settingToProtoSetting(k, v)) } return settings } @@ -123,6 +164,11 @@ func (c *connect) isBad() bool { case c.closed: return true } + + if time.Since(c.connectedAt) >= c.opt.ConnMaxLifetime { + return true + } + if err := c.connCheck(); err != nil { return true } @@ -134,9 +180,8 @@ func (c *connect) close() error { return nil } c.closed = true - c.encoder = nil - c.decoder = nil - c.stream.Close() + c.buffer = nil + c.reader = nil if err := c.conn.Close(); err != nil { return err } @@ -145,7 +190,7 @@ func (c *connect) close() error { func (c *connect) progress() (*Progress, error) { var progress proto.Progress - if err := progress.Decode(c.decoder, c.revision); err != nil { + if err := progress.Decode(c.reader, c.revision); err != nil { return nil, err } c.debugf("[progress] %s", &progress) @@ -154,44 +199,110 @@ func (c *connect) progress() (*Progress, error) { func (c *connect) exception() error { var e Exception - if err := e.Decode(c.decoder); err != nil { + if err := e.Decode(c.reader); err != nil { return err } c.debugf("[exception] %s", e.Error()) return &e } +func (c *connect) compressBuffer(start int) error { + if c.compression != CompressionNone && len(c.buffer.Buf) > 0 { + data := c.buffer.Buf[start:] + if err := c.compressor.Compress(compress.Method(c.compression), data); err != nil { + return errors.Wrap(err, "compress") + } + c.buffer.Buf = append(c.buffer.Buf[:start], c.compressor.Data...) + } + return nil +} + func (c *connect) sendData(block *proto.Block, name string) error { - c.debugf("[send data] compression=%t", c.compression) - if err := c.encoder.Byte(proto.ClientData); err != nil { + c.debugf("[send data] compression=%q", c.compression) + c.buffer.PutByte(proto.ClientData) + c.buffer.PutString(name) + + compressionOffset := len(c.buffer.Buf) + + if err := block.EncodeHeader(c.buffer, c.revision); err != nil { return err } - if err := c.encoder.String(name); err != nil { + for i := range block.Columns { + if err := block.EncodeColumn(c.buffer, c.revision, i); err != nil { + return err + } + if len(c.buffer.Buf) >= c.maxCompressionBuffer { + if err := c.compressBuffer(compressionOffset); err != nil { + return err + } + c.debugf("[buff compress] buffer size: %d", len(c.buffer.Buf)) + if err := c.flush(); err != nil { + return err + } + compressionOffset = 0 + } + } + if err := c.compressBuffer(compressionOffset); err != nil { return err } - if c.compression { - c.stream.Compress(true) - defer func() { - c.stream.Compress(false) - c.encoder.Flush() - }() + if err := c.flush(); err != nil { + switch { + case errors.Is(err, syscall.EPIPE): + c.debugf("[send data] pipe is broken, closing connection") + c.closed = true + case errors.Is(err, io.EOF): + c.debugf("[send data] unexpected EOF, closing connection") + c.closed = true + default: + c.debugf("[send data] unexpected error: %v", err) + } + return err } - return block.Encode(c.encoder, c.revision) + defer func() { + c.buffer.Reset() + }() + return nil } -func (c *connect) readData(packet byte, compressible bool) (*proto.Block, error) { - if _, err := c.decoder.String(); err != nil { +func (c *connect) readData(ctx context.Context, packet byte, compressible bool) (*proto.Block, error) { + if _, err := c.reader.Str(); err != nil { + c.debugf("[read data] str error: %v", err) return nil, err } - if compressible && c.compression { - c.stream.Compress(true) - defer c.stream.Compress(false) + if compressible && c.compression != CompressionNone { + c.reader.EnableCompression() + defer c.reader.DisableCompression() + } + + opts := queryOptions(ctx) + location := c.server.Timezone + if opts.userLocation != nil { + location = opts.userLocation } - var block proto.Block - if err := block.Decode(c.decoder, c.revision); err != nil { + + block := proto.Block{Timezone: location} + if err := block.Decode(c.reader, c.revision); err != nil { + c.debugf("[read data] decode error: %v", err) return nil, err } block.Packet = packet - c.debugf("[read data] compression=%t. block: columns=%d, rows=%d", c.compression, len(block.Columns), block.Rows()) + c.debugf("[read data] compression=%q. block: columns=%d, rows=%d", c.compression, len(block.Columns), block.Rows()) return &block, nil } + +func (c *connect) flush() error { + if len(c.buffer.Buf) == 0 { + // Nothing to flush. + return nil + } + n, err := c.conn.Write(c.buffer.Buf) + if err != nil { + return errors.Wrap(err, "write") + } + if n != len(c.buffer.Buf) { + return errors.New("wrote less than expected") + } + + c.buffer.Reset() + return nil +} diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_async_insert.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_async_insert.go index a324bd1..f37e6e6 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_async_insert.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_async_insert.go @@ -19,9 +19,10 @@ package clickhouse import ( "context" + "github.com/ClickHouse/clickhouse-go/v2/lib/proto" ) -func (c *connect) asyncInsert(ctx context.Context, query string, wait bool) error { +func (c *connect) asyncInsert(ctx context.Context, query string, wait bool, args ...any) error { options := queryOptions(ctx) { options.settings["async_insert"] = 1 @@ -30,6 +31,16 @@ func (c *connect) asyncInsert(ctx context.Context, query string, wait bool) erro options.settings["wait_for_async_insert"] = 1 } } + + if len(args) > 0 { + queryParamsProtocolSupport := c.revision >= proto.DBMS_MIN_PROTOCOL_VERSION_WITH_PARAMETERS + var err error + query, err = bindQueryOrAppendParameters(queryParamsProtocolSupport, &options, query, c.server.Timezone, args...) + if err != nil { + return err + } + } + if err := c.sendQuery(query, &options); err != nil { return err } diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_batch.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_batch.go index c2af620..a729b2d 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_batch.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_batch.go @@ -22,21 +22,25 @@ import ( "fmt" "os" "regexp" - "strings" + "slices" "time" + "github.com/pkg/errors" + "github.com/ClickHouse/clickhouse-go/v2/lib/column" "github.com/ClickHouse/clickhouse-go/v2/lib/driver" "github.com/ClickHouse/clickhouse-go/v2/lib/proto" ) -var splitInsertRe = regexp.MustCompile(`(?i)\sVALUES\s*\(`) +var insertMatch = regexp.MustCompile(`(?i)(INSERT\s+INTO\s+[^( ]+(?:\s*\([^()]*(?:\([^()]*\)[^()]*)*\))?)(?:\s*VALUES)?`) +var columnMatch = regexp.MustCompile(`INSERT INTO .+\s\((?P.+)\)$`) -func (c *connect) prepareBatch(ctx context.Context, query string, release func(*connect, error)) (*batch, error) { - query = splitInsertRe.Split(query, -1)[0] - if !strings.HasSuffix(strings.TrimSpace(strings.ToUpper(query)), "VALUES") { - query += " VALUES" +func (c *connect) prepareBatch(ctx context.Context, query string, opts driver.PrepareBatchOptions, release func(*connect, error), acquire func(context.Context) (*connect, error)) (driver.Batch, error) { + query, _, queryColumns, verr := extractNormalizedInsertQueryAndColumns(query) + if verr != nil { + return nil, verr } + options := queryOptions(ctx) if deadline, ok := ctx.Deadline(); ok { c.conn.SetDeadline(deadline) @@ -54,25 +58,49 @@ func (c *connect) prepareBatch(ctx context.Context, query string, release func(* release(c, err) return nil, err } - return &batch{ - ctx: ctx, - conn: c, - block: block, - release: func(err error) { - release(c, err) - }, - onProcess: onProcess, - }, nil + // resort batch to specified columns + if err = block.SortColumns(queryColumns); err != nil { + return nil, err + } + + b := &batch{ + ctx: ctx, + query: query, + conn: c, + block: block, + released: false, + connRelease: release, + connAcquire: acquire, + onProcess: onProcess, + closeOnFlush: opts.CloseOnFlush, + } + + if opts.ReleaseConnection { + b.release(b.closeQuery()) + } + + return b, nil } type batch struct { - err error - ctx context.Context - conn *connect - sent bool - block *proto.Block - release func(error) - onProcess *onProcess + err error + ctx context.Context + query string + conn *connect + sent bool // sent signalize that batch is send to ClickHouse. + released bool // released signalize that conn was returned to pool and can't be used. + closeOnFlush bool // closeOnFlush signalize that batch should close query and release conn when use Flush + block *proto.Block + connRelease func(*connect, error) + connAcquire func(context.Context) (*connect, error) + onProcess *onProcess +} + +func (b *batch) release(err error) { + if !b.released { + b.released = true + b.connRelease(b.conn, err) + } } func (b *batch) Abort() error { @@ -86,18 +114,62 @@ func (b *batch) Abort() error { return nil } -func (b *batch) Append(v ...interface{}) error { +func (b *batch) Append(v ...any) error { if b.sent { return ErrBatchAlreadySent } + if b.err != nil { + return b.err + } + + if len(v) > 0 { + if r, ok := v[0].(*rows); ok { + return b.appendRowsBlocks(r) + } + } + if err := b.block.Append(v...); err != nil { + b.err = errors.Wrap(ErrBatchInvalid, err.Error()) b.release(err) return err } return nil } -func (b *batch) AppendStruct(v interface{}) error { +// appendRowsBlocks is an experimental feature that allows rows blocks be appended directly to the batch. +// This API is not stable and may be changed in the future. +// See: tests/batch_block_test.go +func (b *batch) appendRowsBlocks(r *rows) error { + var lastReadLock *proto.Block + var blockNum int + + for r.Next() { + if lastReadLock == nil { // make sure the first block is logged + b.conn.debugf("[batch.appendRowsBlocks] blockNum = %d", blockNum) + } + + // rows.Next() will read the next block from the server only if the current block is empty + // only if new block is available we should flush the current block + // the last block will be handled by the batch.Send() method + if lastReadLock != nil && lastReadLock != r.block { + if err := b.Flush(); err != nil { + return err + } + blockNum++ + b.conn.debugf("[batch.appendRowsBlocks] blockNum = %d", blockNum) + } + + b.block = r.block + lastReadLock = r.block + } + + return nil +} + +func (b *batch) AppendStruct(v any) error { + if b.err != nil { + return b.err + } values, err := b.conn.structMap.Map("AppendStruct", b.block.ColumnsNames(), v, false) if err != nil { return err @@ -105,14 +177,21 @@ func (b *batch) AppendStruct(v interface{}) error { return b.Append(values...) } +func (b *batch) IsSent() bool { + return b.sent +} + func (b *batch) Column(idx int) driver.BatchColumn { if len(b.block.Columns) <= idx { - b.release(nil) + err := &OpError{ + Op: "batch.Column", + Err: fmt.Errorf("invalid column index %d", idx), + } + + b.release(err) + return &batchColumn{ - err: &OpError{ - Op: "batch.Column", - Err: fmt.Errorf("invalid column index %d", idx), - }, + err: err, } } return &batchColumn{ @@ -126,49 +205,146 @@ func (b *batch) Column(idx int) driver.BatchColumn { } func (b *batch) Send() (err error) { + stopCW := contextWatchdog(b.ctx, func() { + // close TCP connection on context cancel. There is no other way simple way to interrupt underlying operations. + // as verified in the test, this is safe to do and cleanups resources later on + if b.conn != nil { + _ = b.conn.conn.Close() + } + }) + defer func() { + stopCW() b.sent = true b.release(err) }() - if b.sent { - return ErrBatchAlreadySent - } if b.err != nil { return b.err } + if b.sent || b.released { + if err = b.resetConnection(); err != nil { + return err + } + } if b.block.Rows() != 0 { if err = b.conn.sendData(b.block, ""); err != nil { + // there might be an error caused by context cancellation + // in this case we should return context error instead of net.OpError + if ctxErr := b.ctx.Err(); ctxErr != nil { + return ctxErr + } + return err } } - if err = b.conn.sendData(&proto.Block{}, ""); err != nil { + if err = b.closeQuery(); err != nil { return err } - if err = b.conn.encoder.Flush(); err != nil { + return nil +} + +func (b *batch) resetConnection() (err error) { + // acquire a new conn + if b.conn, err = b.connAcquire(b.ctx); err != nil { return err } - if err = b.conn.process(b.ctx, b.onProcess); err != nil { + + defer func() { + b.released = false + }() + + options := queryOptions(b.ctx) + if deadline, ok := b.ctx.Deadline(); ok { + b.conn.conn.SetDeadline(deadline) + defer b.conn.conn.SetDeadline(time.Time{}) + } + + if err = b.conn.sendQuery(b.query, &options); err != nil { + b.release(err) + return err + } + + if _, err = b.conn.firstBlock(b.ctx, b.onProcess); err != nil { + b.release(err) return err } + + return nil +} + +func (b *batch) Flush() error { + if b.sent { + return ErrBatchAlreadySent + } + if b.err != nil { + return b.err + } + if b.released { + if err := b.resetConnection(); err != nil { + return err + } + } + if b.block.Rows() != 0 { + if err := b.conn.sendData(b.block, ""); err != nil { + return err + } + if b.closeOnFlush { + b.release(b.closeQuery()) + } + } + b.block.Reset() + return nil +} + +func (b *batch) Rows() int { + return b.block.Rows() +} + +func (b *batch) Columns() []column.Interface { + return slices.Clone(b.block.Columns) +} + +func (b *batch) closeQuery() error { + if err := b.conn.sendData(&proto.Block{}, ""); err != nil { + return err + } + + if err := b.conn.process(b.ctx, b.onProcess); err != nil { + return err + } + return nil } type batchColumn struct { err error - batch *batch + batch driver.Batch column column.Interface release func(error) } -func (b *batchColumn) Append(v interface{}) (err error) { - if b.batch.sent { +func (b *batchColumn) Append(v any) (err error) { + if b.err != nil { + return b.err + } + if b.batch.IsSent() { return ErrBatchAlreadySent } + if _, err = b.column.Append(v); err != nil { + b.release(err) + return err + } + return nil +} + +func (b *batchColumn) AppendRow(v any) (err error) { if b.err != nil { - b.release(b.err) return b.err } - if _, err = b.column.Append(v); err != nil { + if b.batch.IsSent() { + return ErrBatchAlreadySent + } + if err = b.column.AppendRow(v); err != nil { b.release(err) return err } diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_check.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_check.go index f30194f..bf85a19 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_check.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_check.go @@ -21,14 +21,20 @@ package clickhouse import ( + "crypto/tls" "errors" "io" "syscall" ) func (c *connect) connCheck() error { + conn := c.conn + if tlsConn, ok := c.conn.(*tls.Conn); ok { + conn = tlsConn.NetConn() + } + var sysErr error - sysConn, ok := c.conn.(syscall.Conn) + sysConn, ok := conn.(syscall.Conn) if !ok { return nil } diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_exec.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_exec.go index 433aba7..6295491 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_exec.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_exec.go @@ -19,17 +19,23 @@ package clickhouse import ( "context" + "github.com/ClickHouse/clickhouse-go/v2/lib/proto" "time" ) -func (c *connect) exec(ctx context.Context, query string, args ...interface{}) error { +func (c *connect) exec(ctx context.Context, query string, args ...any) error { var ( - options = queryOptions(ctx) - body, err = bind(c.server.Timezone, query, args...) + options = queryOptions(ctx) + queryParamsProtocolSupport = c.revision >= proto.DBMS_MIN_PROTOCOL_VERSION_WITH_PARAMETERS + body, err = bindQueryOrAppendParameters(queryParamsProtocolSupport, &options, query, c.server.Timezone, args...) ) if err != nil { return err } + // set a read deadline - alternative to context.Read operation will fail if no data is received after deadline. + c.conn.SetReadDeadline(time.Now().Add(c.readTimeout)) + defer c.conn.SetReadDeadline(time.Time{}) + // context level deadlines override any read deadline if deadline, ok := ctx.Deadline(); ok { c.conn.SetDeadline(deadline) defer c.conn.SetDeadline(time.Time{}) diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_handshake.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_handshake.go index 09af51c..265c52c 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_handshake.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_handshake.go @@ -18,6 +18,7 @@ package clickhouse import ( + _ "embed" "fmt" "time" @@ -25,31 +26,33 @@ import ( ) func (c *connect) handshake(database, username, password string) error { + defer c.buffer.Reset() c.debugf("[handshake] -> %s", proto.ClientHandshake{}) + // set a read deadline - alternative to context.Read operation will fail if no data is received after deadline. + c.conn.SetReadDeadline(time.Now().Add(c.readTimeout)) + defer c.conn.SetReadDeadline(time.Time{}) + // context level deadlines override any read deadline c.conn.SetDeadline(time.Now().Add(c.opt.DialTimeout)) defer c.conn.SetDeadline(time.Time{}) { - c.encoder.Byte(proto.ClientHello) - if err := (&proto.ClientHandshake{}).Encode(c.encoder); err != nil { - return err + c.buffer.PutByte(proto.ClientHello) + handshake := &proto.ClientHandshake{ + ProtocolVersion: ClientTCPProtocolVersion, + ClientName: c.opt.ClientInfo.String(), + ClientVersion: proto.Version{ClientVersionMajor, ClientVersionMinor, ClientVersionPatch}, //nolint:govet } + handshake.Encode(c.buffer) { - if err := c.encoder.String(database); err != nil { - return err - } - if err := c.encoder.String(username); err != nil { - return err - } - if err := c.encoder.String(password); err != nil { - return err - } + c.buffer.PutString(database) + c.buffer.PutString(username) + c.buffer.PutString(password) } - if err := c.encoder.Flush(); err != nil { + if err := c.flush(); err != nil { return err } } { - packet, err := c.decoder.ReadByte() + packet, err := c.reader.ReadByte() if err != nil { return err } @@ -57,7 +60,7 @@ func (c *connect) handshake(database, username, password string) error { case proto.ServerException: return c.exception() case proto.ServerHello: - if err := c.server.Decode(c.decoder); err != nil { + if err := c.server.Decode(c.reader); err != nil { return err } case proto.ServerEndOfStream: @@ -70,6 +73,7 @@ func (c *connect) handshake(database, username, password string) error { if c.server.Revision < proto.DBMS_MIN_REVISION_WITH_CLIENT_INFO { return ErrUnsupportedServerRevision } + if c.revision > c.server.Revision { c.revision = c.server.Revision c.debugf("[handshake] downgrade client proto") @@ -77,3 +81,11 @@ func (c *connect) handshake(database, username, password string) error { c.debugf("[handshake] <- %s", c.server) return nil } + +func (c *connect) sendAddendum() error { + if c.revision >= proto.DBMS_MIN_PROTOCOL_VERSION_WITH_QUOTA_KEY { + c.buffer.PutString("") // todo quota key support + } + + return c.flush() +} diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_http.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_http.go index 51fab0f..2084fbd 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_http.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_http.go @@ -18,114 +18,551 @@ package clickhouse import ( + "bytes" + "compress/flate" + "compress/gzip" + "compress/zlib" "context" "database/sql/driver" "fmt" "io" + "log" + "mime/multipart" "net" "net/http" "net/url" + "os" "strings" + "sync" "time" + + "github.com/ClickHouse/clickhouse-go/v2/resources" + + "github.com/ClickHouse/ch-go/compress" + chproto "github.com/ClickHouse/ch-go/proto" + "github.com/ClickHouse/clickhouse-go/v2/lib/proto" + "github.com/andybalholm/brotli" + "github.com/pkg/errors" ) +const ( + quotaKeyParamName = "quota_key" + queryIDParamName = "query_id" +) + +type Pool[T any] struct { + pool *sync.Pool +} + +func NewPool[T any](fn func() T) Pool[T] { + return Pool[T]{ + pool: &sync.Pool{New: func() any { return fn() }}, + } +} + +func (p *Pool[T]) Get() T { + return p.pool.Get().(T) +} + +func (p *Pool[T]) Put(x T) { + p.pool.Put(x) +} + +type HTTPReaderWriter struct { + reader io.Reader + writer io.WriteCloser + err error + method CompressionMethod +} + +// NewReader will return a reader that will decompress data if needed. +func (rw *HTTPReaderWriter) NewReader(res *http.Response) (io.Reader, error) { + enc := res.Header.Get("Content-Encoding") + if !res.Uncompressed && rw.method.String() == enc { + switch rw.method { + case CompressionGZIP: + reader := rw.reader.(*gzip.Reader) + if err := reader.Reset(res.Body); err != nil { + return nil, err + } + return reader, nil + case CompressionDeflate: + reader := rw.reader + if err := reader.(flate.Resetter).Reset(res.Body, nil); err != nil { + return nil, err + } + return reader, nil + case CompressionBrotli: + reader := rw.reader.(*brotli.Reader) + if err := reader.Reset(res.Body); err != nil { + return nil, err + } + return reader, nil + } + } + return res.Body, nil +} + +func (rw *HTTPReaderWriter) reset(pw *io.PipeWriter) io.WriteCloser { + switch rw.method { + case CompressionGZIP: + rw.writer.(*gzip.Writer).Reset(pw) + return rw.writer + case CompressionDeflate: + rw.writer.(*zlib.Writer).Reset(pw) + return rw.writer + case CompressionBrotli: + rw.writer.(*brotli.Writer).Reset(pw) + return rw.writer + default: + return pw + } +} + func dialHttp(ctx context.Context, addr string, num int, opt *Options) (*httpConnect, error) { - url := &url.URL{ - Scheme: "http", + var debugf = func(format string, v ...any) {} + if opt.Debug { + if opt.Debugf != nil { + debugf = func(format string, v ...any) { + opt.Debugf( + "[clickhouse][conn=%d][%s] "+format, + append([]interface{}{num, addr}, v...)..., + ) + } + } else { + debugf = log.New(os.Stdout, fmt.Sprintf("[clickhouse][conn=%d][%s]", num, addr), 0).Printf + } + } + + if opt.scheme == "" { + switch opt.Protocol { + case HTTP: + opt.scheme = opt.Protocol.String() + if opt.TLS != nil { + opt.scheme = fmt.Sprintf("%ss", opt.scheme) + } + default: + return nil, errors.New("invalid interface type for http") + } + } + u := &url.URL{ + Scheme: opt.scheme, Host: addr, + Path: opt.HttpUrlPath, } - if opt.TLS != nil { - url.Scheme = "https" + headers := make(map[string]string) + for k, v := range opt.HttpHeaders { + headers[k] = v } - connect := &httpConnect{ - opt: opt, - transport: &http.Transport{ - DialContext: (&net.Dialer{ - Timeout: opt.DialTimeout, - KeepAlive: opt.ConnMaxLifetime, - }).DialContext, - MaxIdleConns: 1, - IdleConnTimeout: opt.ConnMaxLifetime, - ResponseHeaderTimeout: opt.ReadTimeout, - TLSClientConfig: opt.TLS, - }, - url: url, - location: time.UTC, // TODO: make configurable + if opt.TLS == nil && len(opt.Auth.Username) > 0 { + if len(opt.Auth.Password) > 0 { + u.User = url.UserPassword(opt.Auth.Username, opt.Auth.Password) + } else { + u.User = url.User(opt.Auth.Username) + } + } else if opt.TLS != nil && len(opt.Auth.Username) > 0 { + headers["X-ClickHouse-User"] = opt.Auth.Username + if len(opt.Auth.Password) > 0 { + headers["X-ClickHouse-Key"] = opt.Auth.Password + headers["X-ClickHouse-SSL-Certificate-Auth"] = "off" + } else { + headers["X-ClickHouse-SSL-Certificate-Auth"] = "on" + } + } + + headers["User-Agent"] = opt.ClientInfo.String() + + query := u.Query() + if len(opt.Auth.Database) > 0 { + query.Set("database", opt.Auth.Database) + } + + if opt.Compression == nil { + opt.Compression = &Compression{ + Method: CompressionNone, + } + } + + compressionPool, err := createCompressionPool(opt.Compression) + if err != nil { + return nil, err + } + + for k, v := range opt.Settings { + if cv, ok := v.(CustomSetting); ok { + v = cv.Value + } + + query.Set(k, fmt.Sprint(v)) + } + + query.Set("default_format", "Native") + u.RawQuery = query.Encode() + + t := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: opt.DialTimeout, + }).DialContext, + MaxIdleConns: 1, + IdleConnTimeout: opt.ConnMaxLifetime, + ResponseHeaderTimeout: opt.ReadTimeout, + TLSClientConfig: opt.TLS, + } + + if opt.DialContext != nil { + t.DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) { + return opt.DialContext(ctx, addr) + } } - if err := connect.ping(ctx); err != nil { + conn := &httpConnect{ + client: &http.Client{ + Transport: t, + }, + url: u, + buffer: new(chproto.Buffer), + compression: opt.Compression.Method, + blockCompressor: compress.NewWriter(), + compressionPool: compressionPool, + blockBufferSize: opt.BlockBufferSize, + headers: headers, + } + location, err := conn.readTimeZone(ctx) + if err != nil { return nil, err } + if num == 1 { + version, err := conn.readVersion(ctx) + if err != nil { + return nil, err + } + if !resources.ClientMeta.IsSupportedClickHouseVersion(version) { + debugf("WARNING: version %v of ClickHouse is not supported by this client\n", version) + } + } - return connect, nil + return &httpConnect{ + client: &http.Client{ + Transport: t, + }, + url: u, + buffer: new(chproto.Buffer), + compression: opt.Compression.Method, + blockCompressor: compress.NewWriter(), + compressionPool: compressionPool, + location: location, + blockBufferSize: opt.BlockBufferSize, + headers: headers, + }, nil } type httpConnect struct { - opt *Options - url *url.URL - transport *http.Transport - location *time.Location + url *url.URL + client *http.Client + location *time.Location + buffer *chproto.Buffer + compression CompressionMethod + blockCompressor *compress.Writer + compressionPool Pool[HTTPReaderWriter] + blockBufferSize uint8 + headers map[string]string +} + +func (h *httpConnect) isBad() bool { + return h.client == nil } -func (h *httpConnect) prepareRequest(ctx context.Context, query string, args ...interface{}) (*http.Request, error) { - query, err := bind(h.location, query, args) +func (h *httpConnect) readTimeZone(ctx context.Context) (*time.Location, error) { + rows, err := h.query(Context(ctx, ignoreExternalTables()), func(*connect, error) {}, "SELECT timezone()") if err != nil { return nil, err } - reader := strings.NewReader(query) + if !rows.Next() { + return nil, errors.New("unable to determine server timezone") + } - req, err := http.NewRequest(http.MethodPost, h.url.String(), reader) + var serverLocation string + if err := rows.Scan(&serverLocation); err != nil { + return nil, err + } - return req, err + location, err := time.LoadLocation(serverLocation) + if err != nil { + return nil, err + } + return location, nil } -func (h *httpConnect) executeRequest(ctx context.Context, req *http.Request) (io.ReadCloser, error) { +func (h *httpConnect) readVersion(ctx context.Context) (proto.Version, error) { + rows, err := h.query(Context(ctx, ignoreExternalTables()), func(*connect, error) {}, "SELECT version()") + if err != nil { + return proto.Version{}, err + } - if h.transport == nil { - return nil, driver.ErrBadConn + if !rows.Next() { + return proto.Version{}, errors.New("unable to determine version") } - resp, err := h.transport.RoundTrip(req) + var v string + if err := rows.Scan(&v); err != nil { + return proto.Version{}, err + } + version := proto.ParseVersion(v) + return version, nil +} + +func createCompressionPool(compression *Compression) (Pool[HTTPReaderWriter], error) { + pool := NewPool(func() HTTPReaderWriter { + switch compression.Method { + case CompressionGZIP: + // trick so we can init the reader to something to Reset when we reuse + writer, err := gzip.NewWriterLevel(io.Discard, compression.Level) + if err != nil { + return HTTPReaderWriter{err: err} + } + b := new(bytes.Buffer) + writer.Reset(b) + writer.Flush() + writer.Close() + reader, err := gzip.NewReader(bytes.NewReader(b.Bytes())) + return HTTPReaderWriter{writer: writer, reader: reader, err: err, method: compression.Method} + case CompressionDeflate: + writer, err := zlib.NewWriterLevel(io.Discard, compression.Level) + if err != nil { + return HTTPReaderWriter{err: err} + } + b := new(bytes.Buffer) + writer.Reset(b) + writer.Flush() + writer.Close() + reader, err := zlib.NewReader(bytes.NewReader(b.Bytes())) + if err != nil { + return HTTPReaderWriter{err: err} + } + return HTTPReaderWriter{writer: writer, reader: reader, method: compression.Method} + case CompressionBrotli: + writer := brotli.NewWriterLevel(io.Discard, compression.Level) + b := new(bytes.Buffer) + writer.Reset(b) + writer.Flush() + writer.Close() + reader := brotli.NewReader(bytes.NewReader(b.Bytes())) + return HTTPReaderWriter{writer: writer, reader: reader, method: compression.Method} + default: + return HTTPReaderWriter{method: CompressionNone} + } + }) + err := pool.Get().err if err != nil { + return pool, err + } + return pool, nil +} + +func (h *httpConnect) writeData(block *proto.Block) error { + // Saving offset of compressible data + start := len(h.buffer.Buf) + if err := block.Encode(h.buffer, 0); err != nil { + return err + } + if h.compression == CompressionLZ4 || h.compression == CompressionZSTD { + // Performing compression. Supported and requires + data := h.buffer.Buf[start:] + if err := h.blockCompressor.Compress(compress.Method(h.compression), data); err != nil { + return errors.Wrap(err, "compress") + } + h.buffer.Buf = append(h.buffer.Buf[:start], h.blockCompressor.Data...) + } + return nil +} + +func (h *httpConnect) readData(reader *chproto.Reader, timezone *time.Location) (*proto.Block, error) { + location := h.location + if timezone != nil { + location = timezone + } + + block := proto.Block{Timezone: location} + if h.compression == CompressionLZ4 || h.compression == CompressionZSTD { + reader.EnableCompression() + defer reader.DisableCompression() + } + if err := block.Decode(reader, 0); err != nil { return nil, err } - if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("clickhouse [execute]:: got no 200 code('%d')", resp.StatusCode) + return &block, nil +} + +func (h *httpConnect) sendStreamQuery(ctx context.Context, r io.Reader, options *QueryOptions, headers map[string]string) (*http.Response, error) { + req, err := h.createRequest(ctx, h.url.String(), r, options, headers) + if err != nil { + return nil, err + } + + res, err := h.executeRequest(req) + if err != nil { + return nil, err } - return resp.Body, nil + return res, nil } -func (h *httpConnect) ping(ctx context.Context) error { - req, err := h.prepareRequest(ctx, "SELECT 1") +func (h *httpConnect) sendQuery(ctx context.Context, query string, options *QueryOptions, headers map[string]string) (*http.Response, error) { + req, err := h.prepareRequest(ctx, query, options, headers) if err != nil { - return err + return nil, err } - res, err := h.executeRequest(ctx, req) + res, err := h.executeRequest(req) if err != nil { - return err + return nil, err } - s, err := io.ReadAll(res) + return res, nil +} + +func (h *httpConnect) readRawResponse(response *http.Response) (body []byte, err error) { + rw := h.compressionPool.Get() + defer h.compressionPool.Put(rw) + + reader, err := rw.NewReader(response) if err != nil { - return err + return nil, err + } + if h.compression == CompressionLZ4 || h.compression == CompressionZSTD { + chReader := chproto.NewReader(reader) + chReader.EnableCompression() + reader = chReader } - if strings.TrimSpace(string(s)) != "1" { - return fmt.Errorf("clickhouse [ping]:: expected result (1), got '%s' instead", string(s)) + body, err = io.ReadAll(reader) + if err != nil && !errors.Is(err, io.EOF) { + return nil, err } + return body, nil +} - return nil +func (h *httpConnect) createRequest(ctx context.Context, requestUrl string, reader io.Reader, options *QueryOptions, headers map[string]string) (*http.Request, error) { + req, err := http.NewRequestWithContext(ctx, http.MethodPost, requestUrl, reader) + if err != nil { + return nil, err + } + for k, v := range headers { + req.Header.Add(k, v) + } + var query url.Values + if options != nil { + query = req.URL.Query() + if options.queryID != "" { + query.Set(queryIDParamName, options.queryID) + } + if options.quotaKey != "" { + query.Set(quotaKeyParamName, options.quotaKey) + } + for key, value := range options.settings { + // check that query doesn't change format + if key == "default_format" { + continue + } + if cv, ok := value.(CustomSetting); ok { + value = cv.Value + } + query.Set(key, fmt.Sprint(value)) + } + for key, value := range options.parameters { + query.Set(fmt.Sprintf("param_%s", key), value) + } + req.URL.RawQuery = query.Encode() + } + return req, nil +} + +func (h *httpConnect) prepareRequest(ctx context.Context, query string, options *QueryOptions, headers map[string]string) (*http.Request, error) { + if options == nil || len(options.external) == 0 { + return h.createRequest(ctx, h.url.String(), strings.NewReader(query), options, headers) + } + return h.createRequestWithExternalTables(ctx, query, options, headers) +} + +func (h *httpConnect) createRequestWithExternalTables(ctx context.Context, query string, options *QueryOptions, headers map[string]string) (*http.Request, error) { + payload := &bytes.Buffer{} + w := multipart.NewWriter(payload) + currentUrl := new(url.URL) + *currentUrl = *h.url + queryValues := currentUrl.Query() + buf := &chproto.Buffer{} + for _, table := range options.external { + tableName := table.Name() + queryValues.Set(fmt.Sprintf("%v_format", tableName), "Native") + queryValues.Set(fmt.Sprintf("%v_structure", tableName), table.Structure()) + partWriter, err := w.CreateFormFile(tableName, "") + if err != nil { + return nil, err + } + buf.Reset() + err = table.Block().Encode(buf, 0) + if err != nil { + return nil, err + } + _, err = partWriter.Write(buf.Buf) + if err != nil { + return nil, err + } + } + currentUrl.RawQuery = queryValues.Encode() + err := w.WriteField("query", query) + if err != nil { + return nil, err + } + err = w.Close() + if err != nil { + return nil, err + } + headers["Content-Type"] = w.FormDataContentType() + return h.createRequest(ctx, currentUrl.String(), bytes.NewReader(payload.Bytes()), options, headers) +} + +func (h *httpConnect) executeRequest(req *http.Request) (*http.Response, error) { + if h.client == nil { + return nil, driver.ErrBadConn + } + resp, err := h.client.Do(req) + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusOK { + defer resp.Body.Close() + msg, err := h.readRawResponse(resp) + if err != nil { + return nil, fmt.Errorf("clickhouse [execute]:: %d code: failed to read the response: %w", resp.StatusCode, err) + } + return nil, fmt.Errorf("clickhouse [execute]:: %d code: %s", resp.StatusCode, string(msg)) + } + return resp, nil +} + +func (h *httpConnect) ping(ctx context.Context) error { + rows, err := h.query(Context(ctx, ignoreExternalTables()), nil, "SELECT 1") + if err != nil { + return err + } + column := rows.Columns() + // check that we got column 1 + if len(column) == 1 && column[0] == "1" { + return nil + } + return errors.New("clickhouse [ping]:: cannot ping clickhouse") } func (h *httpConnect) close() error { - if h.transport == nil { + if h.client == nil { return nil } - h.transport.CloseIdleConnections() - h.transport = nil + h.client.CloseIdleConnections() + h.client = nil return nil } diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/date_helpers.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_http_async_insert.go similarity index 53% rename from vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/date_helpers.go rename to vendor/github.com/ClickHouse/clickhouse-go/v2/conn_http_async_insert.go index 2e6691e..3e197f0 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/date_helpers.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_http_async_insert.go @@ -15,33 +15,35 @@ // specific language governing permissions and limitations // under the License. -package column +package clickhouse import ( - "fmt" - "time" + "context" + "io" ) -const secInDay = 24 * 60 * 60 +func (h *httpConnect) asyncInsert(ctx context.Context, query string, wait bool, args ...any) error { -func dateOverflow(min, max, v time.Time, format string) error { - if v.Before(min) || v.After(max) { - return &DateOverflowError{ - Min: min, - Max: max, - Value: v, - Format: format, + options := queryOptions(ctx) + options.settings["async_insert"] = 1 + options.settings["wait_for_async_insert"] = 0 + if wait { + options.settings["wait_for_async_insert"] = 1 + } + if len(args) > 0 { + var err error + query, err = bindQueryOrAppendParameters(true, &options, query, h.location, args...) + if err != nil { + return err } } - return nil -} -type DateOverflowError struct { - Min, Max time.Time - Value time.Time - Format string -} + res, err := h.sendQuery(ctx, query, &options, h.headers) + if res != nil { + defer res.Body.Close() + // we don't care about result, so just discard it to reuse connection + _, _ = io.Copy(io.Discard, res.Body) + } -func (e *DateOverflowError) Error() string { - return fmt.Sprintf("clickhouse: dateTime overflow. must be between %s and %s", e.Min.Format(e.Format), e.Max.Format(e.Format)) + return err } diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_http_batch.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_http_batch.go new file mode 100644 index 0000000..b4b2792 --- /dev/null +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_http_batch.go @@ -0,0 +1,232 @@ +// Licensed to ClickHouse, Inc. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. ClickHouse, Inc. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package clickhouse + +import ( + "context" + "fmt" + "io" + "slices" + + "github.com/ClickHouse/clickhouse-go/v2/lib/column" + "github.com/ClickHouse/clickhouse-go/v2/lib/driver" + "github.com/ClickHouse/clickhouse-go/v2/lib/proto" +) + +// release is ignored, because http used by std with empty release function. +// Also opts ignored because all options unused in http batch. +func (h *httpConnect) prepareBatch(ctx context.Context, query string, opts driver.PrepareBatchOptions, release func(*connect, error), acquire func(context.Context) (*connect, error)) (driver.Batch, error) { + query, tableName, queryColumns, err := extractNormalizedInsertQueryAndColumns(query) + if err != nil { + return nil, err + } + + describeTableQuery := fmt.Sprintf("DESCRIBE TABLE %s", tableName) + r, err := h.query(ctx, release, describeTableQuery) + if err != nil { + return nil, err + } + + block := &proto.Block{} + + columns := make(map[string]string) + var colNames []string + for r.Next() { + var ( + colName string + colType string + default_type string + ignore string + ) + + if err = r.Scan(&colName, &colType, &default_type, &ignore, &ignore, &ignore, &ignore); err != nil { + return nil, err + } + // these column types cannot be specified in INSERT queries + if default_type == "MATERIALIZED" || default_type == "ALIAS" { + continue + } + colNames = append(colNames, colName) + columns[colName] = colType + } + + switch len(queryColumns) { + case 0: + for _, colName := range colNames { + if err = block.AddColumn(colName, column.Type(columns[colName])); err != nil { + return nil, err + } + } + default: + // user has requested specific columns so only include these + for _, colName := range queryColumns { + if colType, ok := columns[colName]; ok { + if err = block.AddColumn(colName, column.Type(colType)); err != nil { + return nil, err + } + } else { + return nil, fmt.Errorf("column %s is not present in the table %s", colName, tableName) + } + } + } + + return &httpBatch{ + ctx: ctx, + conn: h, + structMap: &structMap{}, + block: block, + query: query, + }, nil +} + +type httpBatch struct { + query string + err error + ctx context.Context + conn *httpConnect + structMap *structMap + sent bool + block *proto.Block +} + +// Flush TODO: noop on http currently - requires streaming to be implemented +func (b *httpBatch) Flush() error { + return nil +} + +func (b *httpBatch) Abort() error { + defer func() { + b.sent = true + }() + if b.sent { + return ErrBatchAlreadySent + } + return nil +} + +func (b *httpBatch) Append(v ...any) error { + if b.sent { + return ErrBatchAlreadySent + } + if err := b.block.Append(v...); err != nil { + return err + } + return nil +} + +func (b *httpBatch) AppendStruct(v any) error { + values, err := b.structMap.Map("AppendStruct", b.block.ColumnsNames(), v, false) + if err != nil { + return err + } + return b.Append(values...) +} + +func (b *httpBatch) Column(idx int) driver.BatchColumn { + if len(b.block.Columns) <= idx { + return &batchColumn{ + err: &OpError{ + Op: "batch.Column", + Err: fmt.Errorf("invalid column index %d", idx), + }, + } + } + return &batchColumn{ + batch: b, + column: b.block.Columns[idx], + release: func(err error) { + b.err = err + }, + } +} + +func (b *httpBatch) IsSent() bool { + return b.sent +} + +func (b *httpBatch) Send() (err error) { + defer func() { + b.sent = true + }() + if b.sent { + return ErrBatchAlreadySent + } + if b.err != nil { + return b.err + } + options := queryOptions(b.ctx) + + headers := make(map[string]string) + + r, pw := io.Pipe() + crw := b.conn.compressionPool.Get() + w := crw.reset(pw) + + defer b.conn.compressionPool.Put(crw) + + switch b.conn.compression { + case CompressionGZIP, CompressionDeflate, CompressionBrotli: + headers["Content-Encoding"] = b.conn.compression.String() + case CompressionZSTD, CompressionLZ4: + options.settings["decompress"] = "1" + options.settings["compress"] = "1" + } + + go func() { + var err error = nil + defer pw.CloseWithError(err) + defer w.Close() + b.conn.buffer.Reset() + if b.block.Rows() != 0 { + if err = b.conn.writeData(b.block); err != nil { + return + } + } + if err = b.conn.writeData(&proto.Block{}); err != nil { + return + } + if _, err = w.Write(b.conn.buffer.Buf); err != nil { + return + } + }() + + options.settings["query"] = b.query + headers["Content-Type"] = "application/octet-stream" + for k, v := range b.conn.headers { + headers[k] = v + } + res, err := b.conn.sendStreamQuery(b.ctx, r, &options, headers) + + if res != nil { + defer res.Body.Close() + // we don't care about result, so just discard it to reuse connection + _, _ = io.Copy(io.Discard, res.Body) + } + + return err +} + +func (b *httpBatch) Rows() int { + return b.block.Rows() +} + +func (b *httpBatch) Columns() []column.Interface { + return slices.Clone(b.block.Columns) +} + +var _ driver.Batch = (*httpBatch)(nil) diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/compress/compress.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_http_exec.go similarity index 61% rename from vendor/github.com/ClickHouse/clickhouse-go/v2/lib/compress/compress.go rename to vendor/github.com/ClickHouse/clickhouse-go/v2/conn_http_exec.go index cfa1698..75198eb 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/compress/compress.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_http_exec.go @@ -15,27 +15,26 @@ // specific language governing permissions and limitations // under the License. -package compress +package clickhouse import ( - "encoding/binary" + "context" + "io" ) -var endian = binary.LittleEndian +func (h *httpConnect) exec(ctx context.Context, query string, args ...any) error { + options := queryOptions(ctx) + query, err := bindQueryOrAppendParameters(true, &options, query, h.location, args...) + if err != nil { + return err + } -type Method byte + res, err := h.sendQuery(ctx, query, &options, h.headers) + if res != nil { + defer res.Body.Close() + // we don't care about result, so just discard it to reuse connection + _, _ = io.Copy(io.Discard, res.Body) + } -const ( - NONE Method = 0x02 - LZ4 = 0x82 - ZSTD = 0x90 -) - -const ( - // ChecksumSize is 128bits for cityhash102 checksum - checksumSize = 16 - // CompressHeader magic + compressed_size + uncompressed_size - compressHeaderSize = 1 + 4 + 4 - headerSize = checksumSize + compressHeaderSize - maxBlockSize = 1 << 20 -) + return err +} diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_http_query.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_http_query.go new file mode 100644 index 0000000..7ee1200 --- /dev/null +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_http_query.go @@ -0,0 +1,125 @@ +// Licensed to ClickHouse, Inc. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. ClickHouse, Inc. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package clickhouse + +import ( + "context" + "errors" + "io" + + chproto "github.com/ClickHouse/ch-go/proto" + "github.com/ClickHouse/clickhouse-go/v2/lib/proto" +) + +// release is ignored, because http used by std with empty release function +func (h *httpConnect) query(ctx context.Context, release func(*connect, error), query string, args ...any) (*rows, error) { + options := queryOptions(ctx) + query, err := bindQueryOrAppendParameters(true, &options, query, h.location, args...) + if err != nil { + return nil, err + } + headers := make(map[string]string) + switch h.compression { + case CompressionZSTD, CompressionLZ4: + options.settings["compress"] = "1" + case CompressionGZIP, CompressionDeflate, CompressionBrotli: + // request encoding + headers["Accept-Encoding"] = h.compression.String() + } + + for k, v := range h.headers { + headers[k] = v + } + + res, err := h.sendQuery(ctx, query, &options, headers) + if err != nil { + return nil, err + } + + if res.ContentLength == 0 { + block := &proto.Block{} + return &rows{ + block: block, + columns: block.ColumnsNames(), + structMap: &structMap{}, + }, nil + } + + rw := h.compressionPool.Get() + // The HTTPReaderWriter.NewReader will create a reader that will decompress it if needed, + // cause adding Accept-Encoding:gzip on your request means response won’t be automatically decompressed + // per https://github.com/golang/go/blob/master/src/net/http/transport.go#L182-L190. + // Note user will need to have set enable_http_compression for CH to respond with compressed data. we don't set this + // automatically as they might not have permissions. + reader, err := rw.NewReader(res) + if err != nil { + res.Body.Close() + h.compressionPool.Put(rw) + return nil, err + } + chReader := chproto.NewReader(reader) + block, err := h.readData(chReader, options.userLocation) + if err != nil && !errors.Is(err, io.EOF) { + res.Body.Close() + h.compressionPool.Put(rw) + return nil, err + } + + bufferSize := h.blockBufferSize + if options.blockBufferSize > 0 { + // allow block buffer sze to be overridden per query + bufferSize = options.blockBufferSize + } + var ( + errCh = make(chan error) + stream = make(chan *proto.Block, bufferSize) + ) + go func() { + for { + block, err := h.readData(chReader, options.userLocation) + if err != nil { + // ch-go wraps EOF errors + if !errors.Is(err, io.EOF) { + errCh <- err + } + break + } + select { + case <-ctx.Done(): + errCh <- ctx.Err() + break + case stream <- block: + } + } + res.Body.Close() + h.compressionPool.Put(rw) + close(stream) + close(errCh) + }() + + if block == nil { + block = &proto.Block{} + } + return &rows{ + block: block, + stream: stream, + errors: errCh, + columns: block.ColumnsNames(), + structMap: &structMap{}, + }, nil +} diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_logs.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_logs.go index 4f234cc..d876b97 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_logs.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_logs.go @@ -18,6 +18,7 @@ package clickhouse import ( + "context" "time" "github.com/ClickHouse/clickhouse-go/v2/lib/proto" @@ -34,8 +35,8 @@ type Log struct { Text string } -func (c *connect) logs() ([]Log, error) { - block, err := c.readData(proto.ServerLog, false) +func (c *connect) logs(ctx context.Context) ([]Log, error) { + block, err := c.readData(ctx, proto.ServerLog, false) if err != nil { return nil, err } diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_ping.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_ping.go index 8426f34..cf81030 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_ping.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_ping.go @@ -28,20 +28,23 @@ import ( // Connection::ping // https://github.com/ClickHouse/ClickHouse/blob/master/src/Client/Connection.cpp func (c *connect) ping(ctx context.Context) (err error) { + // set a read deadline - alternative to context.Read operation will fail if no data is received after deadline. + c.conn.SetReadDeadline(time.Now().Add(c.readTimeout)) + defer c.conn.SetReadDeadline(time.Time{}) + // context level deadlines override any read deadline if deadline, ok := ctx.Deadline(); ok { c.conn.SetDeadline(deadline) defer c.conn.SetDeadline(time.Time{}) } c.debugf("[ping] -> ping") - if err := c.encoder.Byte(proto.ClientPing); err != nil { - return err - } - if err := c.encoder.Flush(); err != nil { + c.buffer.PutByte(proto.ClientPing) + if err := c.flush(); err != nil { return err } + var packet byte for { - if packet, err = c.decoder.ReadByte(); err != nil { + if packet, err = c.reader.ReadByte(); err != nil { return err } switch packet { diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_process.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_process.go index 88928c2..967e2ff 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_process.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_process.go @@ -20,10 +20,8 @@ package clickhouse import ( "context" "fmt" - "io" - "time" - "github.com/ClickHouse/clickhouse-go/v2/lib/proto" + "io" ) type onProcess struct { @@ -42,18 +40,18 @@ func (c *connect) firstBlock(ctx context.Context, on *onProcess) (*proto.Block, return nil, ctx.Err() default: } - packet, err := c.decoder.ReadByte() + packet, err := c.reader.ReadByte() if err != nil { return nil, err } switch packet { case proto.ServerData: - return c.readData(packet, true) + return c.readData(ctx, packet, true) case proto.ServerEndOfStream: c.debugf("[end of stream]") return nil, io.EOF default: - if err := c.handle(packet, on); err != nil { + if err := c.handle(ctx, packet, on); err != nil { return nil, err } } @@ -68,7 +66,7 @@ func (c *connect) process(ctx context.Context, on *onProcess) error { return ctx.Err() default: } - packet, err := c.decoder.ReadByte() + packet, err := c.reader.ReadByte() if err != nil { return err } @@ -77,16 +75,16 @@ func (c *connect) process(ctx context.Context, on *onProcess) error { c.debugf("[end of stream]") return nil } - if err := c.handle(packet, on); err != nil { + if err := c.handle(ctx, packet, on); err != nil { return err } } } -func (c *connect) handle(packet byte, on *onProcess) error { +func (c *connect) handle(ctx context.Context, packet byte, on *onProcess) error { switch packet { case proto.ServerData, proto.ServerTotals, proto.ServerExtremes: - block, err := c.readData(packet, true) + block, err := c.readData(ctx, packet, true) if err != nil { return err } @@ -97,25 +95,25 @@ func (c *connect) handle(packet byte, on *onProcess) error { return c.exception() case proto.ServerProfileInfo: var info proto.ProfileInfo - if err := info.Decode(c.decoder, c.revision); err != nil { + if err := info.Decode(c.reader, c.revision); err != nil { return err } c.debugf("[profile info] %s", &info) on.profileInfo(&info) case proto.ServerTableColumns: var info proto.TableColumns - if err := info.Decode(c.decoder, c.revision); err != nil { + if err := info.Decode(c.reader, c.revision); err != nil { return err } c.debugf("[table columns]") case proto.ServerProfileEvents: - events, err := c.profileEvents() + events, err := c.profileEvents(ctx) if err != nil { return err } on.profileEvents(events) case proto.ServerLog: - logs, err := c.logs() + logs, err := c.logs(ctx) if err != nil { return err } @@ -137,11 +135,12 @@ func (c *connect) handle(packet byte, on *onProcess) error { } func (c *connect) cancel() error { - c.conn.SetDeadline(time.Now().Add(2 * time.Second)) c.debugf("[cancel]") - c.closed = true - if err := c.encoder.Uvarint(proto.ClientCancel); err == nil { - return err + c.buffer.PutUVarInt(proto.ClientCancel) + wErr := c.flush() + // don't reuse a cancelled query as we don't drain the connection + if cErr := c.close(); cErr != nil { + return cErr } - return c.encoder.Flush() + return wErr } diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_profile_events.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_profile_events.go index 3fa7db7..6f21aaf 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_profile_events.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_profile_events.go @@ -18,6 +18,7 @@ package clickhouse import ( + "context" "reflect" "time" @@ -33,8 +34,8 @@ type ProfileEvent struct { Value int64 } -func (c *connect) profileEvents() ([]ProfileEvent, error) { - block, err := c.readData(proto.ServerProfileEvents, false) +func (c *connect) profileEvents(ctx context.Context) ([]ProfileEvent, error) { + block, err := c.readData(ctx, proto.ServerProfileEvents, false) if err != nil { return nil, err } diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_query.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_query.go index 821444b..dbd0393 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_query.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_query.go @@ -24,18 +24,24 @@ import ( "github.com/ClickHouse/clickhouse-go/v2/lib/proto" ) -func (c *connect) query(ctx context.Context, release func(*connect, error), query string, args ...interface{}) (*rows, error) { +func (c *connect) query(ctx context.Context, release func(*connect, error), query string, args ...any) (*rows, error) { var ( - options = queryOptions(ctx) - onProcess = options.onProcess() - body, err = bind(c.server.Timezone, query, args...) + options = queryOptions(ctx) + onProcess = options.onProcess() + queryParamsProtocolSupport = c.revision >= proto.DBMS_MIN_PROTOCOL_VERSION_WITH_PARAMETERS + body, err = bindQueryOrAppendParameters(queryParamsProtocolSupport, &options, query, c.server.Timezone, args...) ) if err != nil { + c.debugf("[bindQuery] error: %v", err) release(c, err) return nil, err } + // set a read deadline - alternative to context.Read operation will fail if no data is received after deadline. + c.conn.SetReadDeadline(time.Now().Add(c.readTimeout)) + defer c.conn.SetReadDeadline(time.Time{}) + // context level deadlines override any read deadline if deadline, ok := ctx.Deadline(); ok { c.conn.SetDeadline(deadline) defer c.conn.SetDeadline(time.Time{}) @@ -49,13 +55,18 @@ func (c *connect) query(ctx context.Context, release func(*connect, error), quer init, err := c.firstBlock(ctx, onProcess) if err != nil { + c.debugf("[query] first block error: %v", err) release(c, err) return nil, err } - + bufferSize := c.blockBufferSize + if options.blockBufferSize > 0 { + // allow block buffer sze to be overridden per query + bufferSize = options.blockBufferSize + } var ( - errors = make(chan error) - stream = make(chan *proto.Block, 2) + errors = make(chan error, 1) + stream = make(chan *proto.Block, bufferSize) ) go func() { @@ -64,6 +75,7 @@ func (c *connect) query(ctx context.Context, release func(*connect, error), quer } err := c.process(ctx, onProcess) if err != nil { + c.debugf("[query] process error: %v", err) errors <- err } close(stream) @@ -80,7 +92,7 @@ func (c *connect) query(ctx context.Context, release func(*connect, error), quer }, nil } -func (c *connect) queryRow(ctx context.Context, release func(*connect, error), query string, args ...interface{}) *row { +func (c *connect) queryRow(ctx context.Context, release func(*connect, error), query string, args ...any) *row { rows, err := c.query(ctx, release, query, args...) if err != nil { return &row{ diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_send_query.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_send_query.go index 867dd8d..8897a8c 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_send_query.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_send_query.go @@ -24,20 +24,22 @@ import ( // Connection::sendQuery // https://github.com/ClickHouse/ClickHouse/blob/master/src/Client/Connection.cpp func (c *connect) sendQuery(body string, o *QueryOptions) error { - c.debugf("[send query] compression=%t %s", c.compression, body) - if err := c.encoder.Byte(proto.ClientQuery); err != nil { - return err - } + c.debugf("[send query] compression=%q %s", c.compression, body) + c.buffer.PutByte(proto.ClientQuery) q := proto.Query{ - ID: o.queryID, - Body: body, - Span: o.span, - QuotaKey: o.quotaKey, - Compression: c.compression, - InitialAddress: c.conn.LocalAddr().String(), - Settings: c.settings(o.settings), + ClientTCPProtocolVersion: ClientTCPProtocolVersion, + ClientName: c.opt.ClientInfo.String(), + ClientVersion: proto.Version{ClientVersionMajor, ClientVersionMinor, ClientVersionPatch}, //nolint:govet + ID: o.queryID, + Body: body, + Span: o.span, + QuotaKey: o.quotaKey, + Compression: c.compression != CompressionNone, + InitialAddress: c.conn.LocalAddr().String(), + Settings: c.settings(o.settings), + Parameters: parametersToProtoParameters(o.parameters), } - if err := q.Encode(c.encoder, c.revision); err != nil { + if err := q.Encode(c.buffer, c.revision); err != nil { return err } for _, table := range o.external { @@ -48,5 +50,16 @@ func (c *connect) sendQuery(body string, o *QueryOptions) error { if err := c.sendData(&proto.Block{}, ""); err != nil { return err } - return c.encoder.Flush() + return c.flush() +} + +func parametersToProtoParameters(parameters Parameters) (s proto.Parameters) { + for k, v := range parameters { + s = append(s, proto.Parameter{ + Key: k, + Value: v, + }) + } + + return s } diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/context.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/context.go index b20f8be..67cd2c8 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/context.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/context.go @@ -31,7 +31,16 @@ var _contextOptionKey = &QueryOptions{ }, } -type Settings map[string]interface{} +type Settings map[string]any + +// CustomSetting is a helper struct to distinguish custom settings from important ones. +// For native protocol, is_important flag is set to value 0x02 (see https://github.com/ClickHouse/ClickHouse/blob/c873560fe7185f45eed56520ec7d033a7beb1551/src/Core/BaseSettings.h#L516-L521) +// Only string value is supported until formatting logic that exists in ClickHouse is implemented in clickhouse-go. (https://github.com/ClickHouse/ClickHouse/blob/master/src/Core/Field.cpp#L312 and https://github.com/ClickHouse/clickhouse-go/issues/992) +type CustomSetting struct { + Value string +} + +type Parameters map[string]string type ( QueryOption func(*QueryOptions) error QueryOptions struct { @@ -48,8 +57,11 @@ type ( profileInfo func(*ProfileInfo) profileEvents func([]ProfileEvent) } - settings Settings - external []*ext.Table + settings Settings + parameters Parameters + external []*ext.Table + blockBufferSize uint8 + userLocation *time.Location } ) @@ -67,6 +79,13 @@ func WithQueryID(queryID string) QueryOption { } } +func WithBlockBufferSize(size uint8) QueryOption { + return func(o *QueryOptions) error { + o.blockBufferSize = size + return nil + } +} + func WithQuotaKey(quotaKey string) QueryOption { return func(o *QueryOptions) error { o.quotaKey = quotaKey @@ -81,6 +100,13 @@ func WithSettings(settings Settings) QueryOption { } } +func WithParameters(params Parameters) QueryOption { + return func(o *QueryOptions) error { + o.parameters = params + return nil + } +} + func WithLogs(fn func(*Log)) QueryOption { return func(o *QueryOptions) error { o.events.logs = fn @@ -123,10 +149,22 @@ func WithStdAsync(wait bool) QueryOption { } } -func Context(parent context.Context, options ...QueryOption) context.Context { - opt := QueryOptions{ - settings: make(Settings), +func WithUserLocation(location *time.Location) QueryOption { + return func(o *QueryOptions) error { + o.userLocation = location + return nil + } +} + +func ignoreExternalTables() QueryOption { + return func(o *QueryOptions) error { + o.external = nil + return nil } +} + +func Context(parent context.Context, options ...QueryOption) context.Context { + opt := queryOptions(parent) for _, f := range options { f(&opt) } diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/cityhash102/city64.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/context_watchdog.go similarity index 52% rename from vendor/github.com/ClickHouse/clickhouse-go/v2/lib/cityhash102/city64.go rename to vendor/github.com/ClickHouse/clickhouse-go/v2/context_watchdog.go index 1dc1f8e..8c2257e 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/cityhash102/city64.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/context_watchdog.go @@ -15,48 +15,33 @@ // specific language governing permissions and limitations // under the License. -package cityhash102 - -import ( - "encoding/binary" - "hash" -) - -type City64 struct { - s []byte -} - -var _ hash.Hash64 = (*City64)(nil) -var _ hash.Hash = (*City64)(nil) - -func New64() hash.Hash64 { - return &City64{} -} - -func (this *City64) Sum(b []byte) []byte { - b2 := make([]byte, 8) - binary.BigEndian.PutUint64(b2, this.Sum64()) - b = append(b, b2...) - return b -} - -func (this *City64) Sum64() uint64 { - return CityHash64(this.s, uint32(len(this.s))) -} - -func (this *City64) Reset() { - this.s = this.s[0:0] -} - -func (this *City64) BlockSize() int { - return 1 -} - -func (this *City64) Write(s []byte) (n int, err error) { - this.s = append(this.s, s...) - return len(s), nil -} - -func (this *City64) Size() int { - return 8 +package clickhouse + +import "context" + +// contextWatchdog is a helper function to run a callback when the context is done. +// it has a cancellation function to prevent the callback from running. +// Useful for interrupting some logic when the context is done, +// but you want to not bother about context cancellation if your logic is already done. +// Example: +// stopCW := contextWatchdog(ctx, func() { /* do something */ }) +// // do something else +// defer stopCW() +func contextWatchdog(ctx context.Context, callback func()) (cancel func()) { + exit := make(chan struct{}) + + go func() { + for { + select { + case <-exit: + return + case <-ctx.Done(): + callback() + } + } + }() + + return func() { + exit <- struct{}{} + } } diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/contributors/contributors.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/contributors/contributors.go index 7fcbae2..35c4047 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/contributors/contributors.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/contributors/contributors.go @@ -22,6 +22,7 @@ import ( "strings" ) +//go:generate bash -c "git log \"--pretty=%an <%ae>\" | sort -u > list" //go:embed list var source string diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/contributors/list b/vendor/github.com/ClickHouse/clickhouse-go/v2/contributors/list index 34131a8..42a2a48 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/contributors/list +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/contributors/list @@ -1,86 +1 @@ -Abraham Adberstein -Aleksandr Petrukhin -Alex Bocharov -Alex Yang -Alexander Chumakov -Alexander Obukhov -Alexey Milovidov -Alexey Palazhchenko -Alvaro Tuso -Andrey Ustinov -Ashish Gaurav -Benjamin Rupp -Cem Sancak -Chao Wang -Chris Duncan -Dale McDiarmid -Damir Sayfutdinov -Dan Walters -Daniel Bershatsky -Danila Migalin -Danny.Dunn -Darío -Denis Krivak -Derek Perkins -Dmitry Markov -Dmitry Ponomarev -Dmitry Ponomarev -Egor.Gorlin -Eugene Formanenko -Evan Au -Ewan -Florian Lehner -Félix Mattrat -Geoff Genz -Ian McGraw -Ivan Blinkov -Ivan Ivanov -Jake Sylvestre -Jakub Chábek -James Hartig -Jan Was -Jeehoon Kim -John Troy -Jon Aquino -LI Tao -Maksim Sokolnikov -Marek Vavruša -Marek Vavruša -Mark Andrus Roberts -Michael Vigovsky -Nay Linn -Oleg Strokachuk -Richard Artoul -Robert Sköld -Robin Hahling -Ross Rothenstine -Sergei Sobolev -Sergey Melekhin -Taras Matsyk -Thibault Deutsch -Tsimafei Bredau -Vespertinus -Yury Korolev -Yury Yurochko -alex -anton troyanov -chengzhi -dmitry kuzmin -gaetan.rizio -hulb -ilker moral -jiyongwang -kshvakov -neverlee -nevseliev -pavel raskin -sundy-li <543950155@qq.com> -vahid sohrabloo -vasily.popov -vl4deee11 -vl4deee11 -vladislav doster -vvoronin -yuankun -zxc111 -李盼 +Kuba Kaflik diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/ext/ext.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/ext/ext.go index 48836a8..6fa2b7f 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/ext/ext.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/ext/ext.go @@ -18,8 +18,10 @@ package ext import ( + "fmt" "github.com/ClickHouse/clickhouse-go/v2/lib/column" "github.com/ClickHouse/clickhouse-go/v2/lib/proto" + "strings" ) func NewTable(name string, columns ...func(t *Table) error) (*Table, error) { @@ -44,11 +46,19 @@ func (tbl *Table) Name() string { return tbl.name } +func (tbl *Table) Structure() string { + columnStructure := make([]string, 0, len(tbl.block.Columns)) + for _, c := range tbl.block.Columns { + columnStructure = append(columnStructure, fmt.Sprintf("%v %v", c.Name(), c.Type())) + } + return strings.Join(columnStructure, ", ") +} + func (tbl *Table) Block() *proto.Block { return tbl.block } -func (tbl *Table) Append(v ...interface{}) error { +func (tbl *Table) Append(v ...any) error { return tbl.block.Append(v...) } diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/binary/decoder.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/binary/decoder.go deleted file mode 100644 index 6f34f2e..0000000 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/binary/decoder.go +++ /dev/null @@ -1,175 +0,0 @@ -// Licensed to ClickHouse, Inc. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. ClickHouse, Inc. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package binary - -import ( - "encoding/binary" - "io" - "math" -) - -func NewDecoder(r io.Reader) *Decoder { - return &Decoder{ - input: r, - } -} - -type Decoder struct { - input io.Reader - scratch [binary.MaxVarintLen64]byte -} - -func (decoder *Decoder) Raw(b []byte) error { - n, err := decoder.input.Read(b) - if err != nil { - return nil - } - if n != len(b) { - return io.EOF - } - return nil -} - -func (decoder *Decoder) Bool() (bool, error) { - v, err := decoder.ReadByte() - if err != nil { - return false, err - } - return v == 1, nil -} - -func (decoder *Decoder) Uvarint() (uint64, error) { - return binary.ReadUvarint(decoder) -} - -func (decoder *Decoder) Int8() (int8, error) { - v, err := decoder.ReadByte() - if err != nil { - return 0, err - } - return int8(v), nil -} - -func (decoder *Decoder) Int16() (int16, error) { - v, err := decoder.UInt16() - if err != nil { - return 0, err - } - return int16(v), nil -} - -func (decoder *Decoder) Int32() (int32, error) { - v, err := decoder.UInt32() - if err != nil { - return 0, err - } - return int32(v), nil -} - -func (decoder *Decoder) Int64() (int64, error) { - v, err := decoder.UInt64() - if err != nil { - return 0, err - } - return int64(v), nil -} - -func (decoder *Decoder) UInt8() (uint8, error) { - v, err := decoder.ReadByte() - if err != nil { - return 0, err - } - return uint8(v), nil -} - -func (decoder *Decoder) UInt16() (uint16, error) { - if _, err := decoder.input.Read(decoder.scratch[:2]); err != nil { - return 0, err - } - return uint16(decoder.scratch[0]) | uint16(decoder.scratch[1])<<8, nil -} - -func (decoder *Decoder) UInt32() (uint32, error) { - if _, err := decoder.input.Read(decoder.scratch[:4]); err != nil { - return 0, err - } - return uint32(decoder.scratch[0]) | - uint32(decoder.scratch[1])<<8 | - uint32(decoder.scratch[2])<<16 | - uint32(decoder.scratch[3])<<24, nil -} - -func (decoder *Decoder) UInt64() (uint64, error) { - if _, err := decoder.input.Read(decoder.scratch[:8]); err != nil { - return 0, err - } - return uint64(decoder.scratch[0]) | - uint64(decoder.scratch[1])<<8 | - uint64(decoder.scratch[2])<<16 | - uint64(decoder.scratch[3])<<24 | - uint64(decoder.scratch[4])<<32 | - uint64(decoder.scratch[5])<<40 | - uint64(decoder.scratch[6])<<48 | - uint64(decoder.scratch[7])<<56, nil -} - -func (decoder *Decoder) Float32() (float32, error) { - v, err := decoder.UInt32() - if err != nil { - return 0, err - } - return math.Float32frombits(v), nil -} - -func (decoder *Decoder) Float64() (float64, error) { - v, err := decoder.UInt64() - if err != nil { - return 0, err - } - return math.Float64frombits(v), nil -} - -func (decoder *Decoder) Fixed(ln int) ([]byte, error) { - if reader, ok := decoder.input.(interface{ Fixed(ln int) ([]byte, error) }); ok { - return reader.Fixed(ln) - } - buf := make([]byte, ln) - if _, err := decoder.input.Read(buf); err != nil { - return nil, err - } - return buf, nil -} - -func (decoder *Decoder) String() (string, error) { - strlen, err := decoder.Uvarint() - if err != nil { - return "", err - } - str, err := decoder.Fixed(int(strlen)) - if err != nil { - return "", err - } - return string(str), nil -} - -func (decoder *Decoder) ReadByte() (byte, error) { - if _, err := decoder.input.Read(decoder.scratch[:1]); err != nil { - return 0x0, err - } - return decoder.scratch[0], nil -} diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/binary/encoder.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/binary/encoder.go deleted file mode 100644 index 1ebc45c..0000000 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/binary/encoder.go +++ /dev/null @@ -1,146 +0,0 @@ -// Licensed to ClickHouse, Inc. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. ClickHouse, Inc. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package binary - -import ( - "encoding/binary" - "io" - "math" -) - -func NewEncoder(w io.Writer) *Encoder { - return &Encoder{ - output: w, - } -} - -type Encoder struct { - output io.Writer - scratch [binary.MaxVarintLen64]byte -} - -func (enc *Encoder) Raw(b []byte) error { - if _, err := enc.output.Write(b); err != nil { - return err - } - return nil -} - -func (enc *Encoder) Bool(v bool) error { - if v { - return enc.UInt8(1) - } - return enc.UInt8(0) -} - -func (enc *Encoder) Byte(b byte) error { - return enc.UInt8(b) -} - -func (enc *Encoder) Int8(v int8) error { - return enc.UInt8(uint8(v)) -} - -func (enc *Encoder) Int16(v int16) error { - return enc.UInt16(uint16(v)) -} - -func (enc *Encoder) Int32(v int32) error { - return enc.UInt32(uint32(v)) -} - -func (enc *Encoder) Int64(v int64) error { - return enc.UInt64(uint64(v)) -} - -func (enc *Encoder) UInt8(v uint8) error { - enc.scratch[0] = v - if _, err := enc.output.Write(enc.scratch[:1]); err != nil { - return err - } - return nil -} - -func (enc *Encoder) UInt16(v uint16) error { - enc.scratch[0] = byte(v) - enc.scratch[1] = byte(v >> 8) - if _, err := enc.output.Write(enc.scratch[:2]); err != nil { - return err - } - return nil -} - -func (enc *Encoder) UInt32(v uint32) error { - enc.scratch[0] = byte(v) - enc.scratch[1] = byte(v >> 8) - enc.scratch[2] = byte(v >> 16) - enc.scratch[3] = byte(v >> 24) - if _, err := enc.output.Write(enc.scratch[:4]); err != nil { - return err - } - return nil -} - -func (enc *Encoder) UInt64(v uint64) error { - enc.scratch[0] = byte(v) - enc.scratch[1] = byte(v >> 8) - enc.scratch[2] = byte(v >> 16) - enc.scratch[3] = byte(v >> 24) - enc.scratch[4] = byte(v >> 32) - enc.scratch[5] = byte(v >> 40) - enc.scratch[6] = byte(v >> 48) - enc.scratch[7] = byte(v >> 56) - if _, err := enc.output.Write(enc.scratch[:8]); err != nil { - return err - } - return nil -} - -func (enc *Encoder) Float32(v float32) error { - return enc.UInt32(math.Float32bits(v)) -} - -func (enc *Encoder) Float64(v float64) error { - return enc.UInt64(math.Float64bits(v)) -} - -func (enc *Encoder) Uvarint(v uint64) error { - ln := binary.PutUvarint(enc.scratch[:binary.MaxVarintLen64], v) - if _, err := enc.output.Write(enc.scratch[0:ln]); err != nil { - return err - } - return nil -} - -func (enc *Encoder) Flush() error { - if w, ok := enc.output.(interface{ Flush() error }); ok { - return w.Flush() - } - return nil -} - -func (enc *Encoder) String(v string) error { - str := Str2Bytes(v) - if err := enc.Uvarint(uint64(len(str))); err != nil { - return err - } - if _, err := enc.output.Write(str); err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/binary/string_safe.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/binary/string_safe.go index 0008b25..3280748 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/binary/string_safe.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/binary/string_safe.go @@ -20,6 +20,13 @@ package binary -func Str2Bytes(str string) []byte { - return []byte(str) +func Str2Bytes(str string, expectedLen int) []byte { + b := []byte(str) + if len(str) < expectedLen { + extended := make([]byte, expectedLen) + copy(extended, b) + return extended + } + + return b } diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/binary/string_unsafe.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/binary/string_unsafe.go index 8a101d1..3a4e8cd 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/binary/string_unsafe.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/binary/string_unsafe.go @@ -20,6 +20,14 @@ package binary -func Str2Bytes(str string) []byte { - return unsafeStr2Bytes(str) +func Str2Bytes(str string, expectedLen int) []byte { + b := unsafeStr2Bytes(str) + + if len(str) < expectedLen { + extended := make([]byte, expectedLen) + copy(extended, b) + return extended + } + + return b } diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/cityhash102/cityhash.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/cityhash102/cityhash.go deleted file mode 100644 index 410aafd..0000000 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/cityhash102/cityhash.go +++ /dev/null @@ -1,400 +0,0 @@ -// Licensed to ClickHouse, Inc. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. ClickHouse, Inc. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -/* - * Go implementation of Google city hash (MIT license) - * https://code.google.com/p/cityhash/ - * - * MIT License http://www.opensource.org/licenses/mit-license.php - * - * I don't even want to pretend to understand the details of city hash. - * I am only reproducing the logic in Go as faithfully as I can. - * - */ - -package cityhash102 - -import ( - "encoding/binary" -) - -const ( - k0 uint64 = 0xc3a5c85c97cb3127 - k1 uint64 = 0xb492b66fbe98f273 - k2 uint64 = 0x9ae16a3b2f90404f - k3 uint64 = 0xc949d7c7509e6557 - - kMul uint64 = 0x9ddfea08eb382d69 -) - -func fetch64(p []byte) uint64 { - return binary.LittleEndian.Uint64(p) - //return uint64InExpectedOrder(unalignedLoad64(p)) -} - -func fetch32(p []byte) uint32 { - return binary.LittleEndian.Uint32(p) - //return uint32InExpectedOrder(unalignedLoad32(p)) -} - -func rotate64(val uint64, shift uint32) uint64 { - if shift != 0 { - return ((val >> shift) | (val << (64 - shift))) - } - - return val -} - -func rotate32(val uint32, shift uint32) uint32 { - if shift != 0 { - return ((val >> shift) | (val << (32 - shift))) - } - - return val -} - -func swap64(a, b *uint64) { - *a, *b = *b, *a -} - -func swap32(a, b *uint32) { - *a, *b = *b, *a -} - -func permute3(a, b, c *uint32) { - swap32(a, b) - swap32(a, c) -} - -func rotate64ByAtLeast1(val uint64, shift uint32) uint64 { - return (val >> shift) | (val << (64 - shift)) -} - -func shiftMix(val uint64) uint64 { - return val ^ (val >> 47) -} - -type Uint128 [2]uint64 - -func (this *Uint128) setLower64(l uint64) { - this[0] = l -} - -func (this *Uint128) setHigher64(h uint64) { - this[1] = h -} - -func (this Uint128) Lower64() uint64 { - return this[0] -} - -func (this Uint128) Higher64() uint64 { - return this[1] -} - -func (this Uint128) Bytes() []byte { - b := make([]byte, 16) - binary.LittleEndian.PutUint64(b, this[0]) - binary.LittleEndian.PutUint64(b[8:], this[1]) - return b -} - -func hash128to64(x Uint128) uint64 { - // Murmur-inspired hashing. - var a = (x.Lower64() ^ x.Higher64()) * kMul - a ^= (a >> 47) - var b = (x.Higher64() ^ a) * kMul - b ^= (b >> 47) - b *= kMul - return b -} - -func hashLen16(u, v uint64) uint64 { - return hash128to64(Uint128{u, v}) -} - -func hashLen16_3(u, v, mul uint64) uint64 { - // Murmur-inspired hashing. - var a = (u ^ v) * mul - a ^= (a >> 47) - var b = (v ^ a) * mul - b ^= (b >> 47) - b *= mul - return b -} - -func hashLen0to16(s []byte, length uint32) uint64 { - if length > 8 { - var a = fetch64(s) - var b = fetch64(s[length-8:]) - - return hashLen16(a, rotate64ByAtLeast1(b+uint64(length), length)) ^ b - } - - if length >= 4 { - var a = fetch32(s) - return hashLen16(uint64(length)+(uint64(a)<<3), uint64(fetch32(s[length-4:]))) - } - - if length > 0 { - var a uint8 = uint8(s[0]) - var b uint8 = uint8(s[length>>1]) - var c uint8 = uint8(s[length-1]) - - var y uint32 = uint32(a) + (uint32(b) << 8) - var z uint32 = length + (uint32(c) << 2) - - return shiftMix(uint64(y)*k2^uint64(z)*k3) * k2 - } - - return k2 -} - -// This probably works well for 16-byte strings as well, but it may be overkill -func hashLen17to32(s []byte, length uint32) uint64 { - var a = fetch64(s) * k1 - var b = fetch64(s[8:]) - var c = fetch64(s[length-8:]) * k2 - var d = fetch64(s[length-16:]) * k0 - - return hashLen16(rotate64(a-b, 43)+rotate64(c, 30)+d, - a+rotate64(b^k3, 20)-c+uint64(length)) -} - -func weakHashLen32WithSeeds(w, x, y, z, a, b uint64) Uint128 { - a += w - b = rotate64(b+a+z, 21) - var c uint64 = a - a += x - a += y - b += rotate64(a, 44) - return Uint128{a + z, b + c} -} - -func weakHashLen32WithSeeds_3(s []byte, a, b uint64) Uint128 { - return weakHashLen32WithSeeds(fetch64(s), fetch64(s[8:]), fetch64(s[16:]), fetch64(s[24:]), a, b) -} - -func hashLen33to64(s []byte, length uint32) uint64 { - var z uint64 = fetch64(s[24:]) - var a uint64 = fetch64(s) + (uint64(length)+fetch64(s[length-16:]))*k0 - var b uint64 = rotate64(a+z, 52) - var c uint64 = rotate64(a, 37) - - a += fetch64(s[8:]) - c += rotate64(a, 7) - a += fetch64(s[16:]) - - var vf uint64 = a + z - var vs = b + rotate64(a, 31) + c - - a = fetch64(s[16:]) + fetch64(s[length-32:]) - z = fetch64(s[length-8:]) - b = rotate64(a+z, 52) - c = rotate64(a, 37) - a += fetch64(s[length-24:]) - c += rotate64(a, 7) - a += fetch64(s[length-16:]) - - wf := a + z - ws := b + rotate64(a, 31) + c - r := shiftMix((vf+ws)*k2 + (wf+vs)*k0) - return shiftMix(r*k0+vs) * k2 -} - -func CityHash64(s []byte, length uint32) uint64 { - if length <= 32 { - if length <= 16 { - return hashLen0to16(s, length) - } else { - return hashLen17to32(s, length) - } - } else if length <= 64 { - return hashLen33to64(s, length) - } - - var x uint64 = fetch64(s) - var y uint64 = fetch64(s[length-16:]) ^ k1 - var z uint64 = fetch64(s[length-56:]) ^ k0 - - var v Uint128 = weakHashLen32WithSeeds_3(s[length-64:], uint64(length), y) - var w Uint128 = weakHashLen32WithSeeds_3(s[length-32:], uint64(length)*k1, k0) - - z += shiftMix(v.Higher64()) * k1 - x = rotate64(z+x, 39) * k1 - y = rotate64(y, 33) * k1 - - length = (length - 1) & ^uint32(63) - for { - x = rotate64(x+y+v.Lower64()+fetch64(s[16:]), 37) * k1 - y = rotate64(y+v.Higher64()+fetch64(s[48:]), 42) * k1 - - x ^= w.Higher64() - y ^= v.Lower64() - - z = rotate64(z^w.Lower64(), 33) - v = weakHashLen32WithSeeds_3(s, v.Higher64()*k1, x+w.Lower64()) - w = weakHashLen32WithSeeds_3(s[32:], z+w.Higher64(), y) - - swap64(&z, &x) - s = s[64:] - length -= 64 - - if length == 0 { - break - } - } - - return hashLen16(hashLen16(v.Lower64(), w.Lower64())+shiftMix(y)*k1+z, hashLen16(v.Higher64(), w.Higher64())+x) -} - -func CityHash64WithSeed(s []byte, length uint32, seed uint64) uint64 { - return CityHash64WithSeeds(s, length, k2, seed) -} - -func CityHash64WithSeeds(s []byte, length uint32, seed0, seed1 uint64) uint64 { - return hashLen16(CityHash64(s, length)-seed0, seed1) -} - -func cityMurmur(s []byte, length uint32, seed Uint128) Uint128 { - var a uint64 = seed.Lower64() - var b uint64 = seed.Higher64() - var c uint64 = 0 - var d uint64 = 0 - var l int32 = int32(length) - 16 - - if l <= 0 { // len <= 16 - a = shiftMix(a*k1) * k1 - c = b*k1 + hashLen0to16(s, length) - - if length >= 8 { - d = shiftMix(a + fetch64(s)) - } else { - d = shiftMix(a + c) - } - - } else { // len > 16 - c = hashLen16(fetch64(s[length-8:])+k1, a) - d = hashLen16(b+uint64(length), c+fetch64(s[length-16:])) - a += d - - for { - a ^= shiftMix(fetch64(s)*k1) * k1 - a *= k1 - b ^= a - c ^= shiftMix(fetch64(s[8:])*k1) * k1 - c *= k1 - d ^= c - s = s[16:] - l -= 16 - - if l <= 0 { - break - } - } - } - a = hashLen16(a, c) - b = hashLen16(d, b) - return Uint128{a ^ b, hashLen16(b, a)} -} - -func CityHash128WithSeed(s []byte, length uint32, seed Uint128) Uint128 { - if length < 128 { - return cityMurmur(s, length, seed) - } - - // We expect length >= 128 to be the common case. Keep 56 bytes of state: - // v, w, x, y, and z. - var v, w Uint128 - var x uint64 = seed.Lower64() - var y uint64 = seed.Higher64() - var z uint64 = uint64(length) * k1 - - var pos uint32 - var t = s - - v.setLower64(rotate64(y^k1, 49)*k1 + fetch64(s)) - v.setHigher64(rotate64(v.Lower64(), 42)*k1 + fetch64(s[8:])) - w.setLower64(rotate64(y+z, 35)*k1 + x) - w.setHigher64(rotate64(x+fetch64(s[88:]), 53) * k1) - - // This is the same inner loop as CityHash64(), manually unrolled. - for { - x = rotate64(x+y+v.Lower64()+fetch64(s[16:]), 37) * k1 - y = rotate64(y+v.Higher64()+fetch64(s[48:]), 42) * k1 - - x ^= w.Higher64() - y ^= v.Lower64() - z = rotate64(z^w.Lower64(), 33) - v = weakHashLen32WithSeeds_3(s, v.Higher64()*k1, x+w.Lower64()) - w = weakHashLen32WithSeeds_3(s[32:], z+w.Higher64(), y) - swap64(&z, &x) - s = s[64:] - pos += 64 - - x = rotate64(x+y+v.Lower64()+fetch64(s[16:]), 37) * k1 - y = rotate64(y+v.Higher64()+fetch64(s[48:]), 42) * k1 - x ^= w.Higher64() - y ^= v.Lower64() - z = rotate64(z^w.Lower64(), 33) - v = weakHashLen32WithSeeds_3(s, v.Higher64()*k1, x+w.Lower64()) - w = weakHashLen32WithSeeds_3(s[32:], z+w.Higher64(), y) - swap64(&z, &x) - s = s[64:] - pos += 64 - length -= 128 - - if length < 128 { - break - } - } - - y += rotate64(w.Lower64(), 37)*k0 + z - x += rotate64(v.Lower64()+z, 49) * k0 - - // If 0 < length < 128, hash up to 4 chunks of 32 bytes each from the end of s. - var tailDone uint32 - for tailDone = 0; tailDone < length; { - tailDone += 32 - y = rotate64(y-x, 42)*k0 + v.Higher64() - - //TODO why not use origin_len ? - w.setLower64(w.Lower64() + fetch64(t[pos+length-tailDone+16:])) - x = rotate64(x, 49)*k0 + w.Lower64() - w.setLower64(w.Lower64() + v.Lower64()) - v = weakHashLen32WithSeeds_3(t[pos+length-tailDone:], v.Lower64(), v.Higher64()) - } - // At this point our 48 bytes of state should contain more than - // enough information for a strong 128-bit hash. We use two - // different 48-byte-to-8-byte hashes to get a 16-byte final result. - x = hashLen16(x, v.Lower64()) - y = hashLen16(y, w.Lower64()) - - return Uint128{hashLen16(x+v.Higher64(), w.Higher64()) + y, - hashLen16(x+w.Higher64(), y+v.Higher64())} -} - -func CityHash128(s []byte, length uint32) (result Uint128) { - if length >= 16 { - result = CityHash128WithSeed(s[16:length], length-16, Uint128{fetch64(s) ^ k3, fetch64(s[8:])}) - } else if length >= 8 { - result = CityHash128WithSeed(nil, 0, Uint128{fetch64(s) ^ (uint64(length) * k0), fetch64(s[length-8:]) ^ k1}) - } else { - result = CityHash128WithSeed(s, length, Uint128{k0, k1}) - } - return -} diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/cityhash102/doc.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/cityhash102/doc.go deleted file mode 100644 index e604690..0000000 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/cityhash102/doc.go +++ /dev/null @@ -1,22 +0,0 @@ -// Licensed to ClickHouse, Inc. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. ClickHouse, Inc. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -/** COPY from https://github.com/zentures/cityhash/ - -NOTE: The code is modified to be compatible with CityHash128 used in ClickHouse -*/ -package cityhash102 diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/array.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/array.go index a91c2c6..8f0ca08 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/array.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/array.go @@ -19,10 +19,10 @@ package column import ( "fmt" + "github.com/ClickHouse/ch-go/proto" "reflect" "strings" - - "github.com/ClickHouse/clickhouse-go/v2/lib/binary" + "time" ) type offset struct { @@ -39,11 +39,18 @@ type Array struct { name string } +func (col *Array) Reset() { + col.values.Reset() + for i := range col.offsets { + col.offsets[i].values.Reset() + } +} + func (col *Array) Name() string { return col.name } -func (col *Array) parse(t Type) (_ *Array, err error) { +func (col *Array) parse(t Type, tz *time.Location) (_ *Array, err error) { col.chType = t var typeStr = string(t) @@ -59,7 +66,7 @@ parse: } } if col.depth != 0 { - if col.values, err = Type(typeStr).Column(col.name); err != nil { + if col.values, err = Type(typeStr).Column(col.name, tz); err != nil { return nil, err } offsetScanTypes := make([]reflect.Type, 0, col.depth) @@ -94,12 +101,12 @@ func (col *Array) ScanType() reflect.Type { func (col *Array) Rows() int { if len(col.offsets) != 0 { - return len(col.offsets[0].values.data) + return col.offsets[0].values.Rows() } return 0 } -func (col *Array) Row(i int, ptr bool) interface{} { +func (col *Array) Row(i int, ptr bool) any { value, err := col.scan(col.ScanType(), i) if err != nil { fmt.Println(err) @@ -107,7 +114,7 @@ func (col *Array) Row(i int, ptr bool) interface{} { return value.Interface() } -func (col *Array) Append(v interface{}) (nulls []uint8, err error) { +func (col *Array) Append(v any) (nulls []uint8, err error) { value := reflect.Indirect(reflect.ValueOf(v)) if value.Kind() != reflect.Slice { return nil, &ColumnConverterError{ @@ -125,7 +132,15 @@ func (col *Array) Append(v interface{}) (nulls []uint8, err error) { return } -func (col *Array) AppendRow(v interface{}) error { +func (col *Array) AppendRow(v any) error { + if col.depth == 1 { + // try to use reflection-free method. + return col.appendRowPlain(v) + } + return col.appendRowDefault(v) +} + +func (col *Array) appendRowDefault(v any) error { var elem reflect.Value switch v := v.(type) { case reflect.Value: @@ -148,19 +163,61 @@ func (col *Array) AppendRow(v interface{}) error { return col.append(elem, 0) } +func appendRowPlain[T any](col *Array, arr []T) error { + col.appendOffset(0, uint64(len(arr))) + for _, item := range arr { + if err := col.values.AppendRow(item); err != nil { + return err + } + } + return nil +} + +func appendNullableRowPlain[T any](col *Array, arr []*T) error { + col.appendOffset(0, uint64(len(arr))) + for _, item := range arr { + var err error + if item == nil { + err = col.values.AppendRow(nil) + } else { + err = col.values.AppendRow(item) + } + if err != nil { + return err + } + } + return nil +} + func (col *Array) append(elem reflect.Value, level int) error { if level < col.depth { - offset := uint64(elem.Len()) - if ln := len(col.offsets[level].values.data); ln != 0 { - offset += col.offsets[level].values.data[ln-1] - } - col.offsets[level].values.data = append(col.offsets[level].values.data, offset) - for i := 0; i < elem.Len(); i++ { - if err := col.append(elem.Index(i), level+1); err != nil { - return err + switch elem.Kind() { + // reflect.Value.Len() & reflect.Value.Index() is called in `append` method which is only valid for + // Slice, Array and String that make sense here. + case reflect.Slice, reflect.Array, reflect.String: + col.appendOffset(level, uint64(elem.Len())) + for i := 0; i < elem.Len(); i++ { + el := elem.Index(i) + + if el.Kind() == reflect.Interface && !el.IsNil() { + el = el.Elem() + } + + if el.Kind() == reflect.Ptr && !el.IsNil() { + el = el.Elem() + } + + if err := col.append(el, level+1); err != nil { + return err + } } + return nil + } + return &ColumnConverterError{ + Op: "AppendRow", + To: "Array", + From: fmt.Sprintf("%T", elem), } - return nil } if elem.Kind() == reflect.Ptr && elem.IsNil() { return col.values.AppendRow(nil) @@ -168,49 +225,54 @@ func (col *Array) append(elem reflect.Value, level int) error { return col.values.AppendRow(elem.Interface()) } -func (col *Array) Decode(decoder *binary.Decoder, rows int) error { +func (col *Array) appendOffset(level int, offset uint64) { + if ln := col.offsets[level].values.Rows(); ln != 0 { + offset += col.offsets[level].values.col.Row(ln - 1) + } + col.offsets[level].values.col.Append(offset) +} + +func (col *Array) Decode(reader *proto.Reader, rows int) error { for _, offset := range col.offsets { - if err := offset.values.Decode(decoder, rows); err != nil { + if err := offset.values.col.DecodeColumn(reader, rows); err != nil { return err } switch { - case len(offset.values.data) > 0: - rows = int(offset.values.data[len(offset.values.data)-1]) + case offset.values.Rows() > 0: + rows = int(offset.values.col.Row(offset.values.col.Rows() - 1)) default: rows = 0 } } - return col.values.Decode(decoder, rows) + return col.values.Decode(reader, rows) } -func (col *Array) Encode(encoder *binary.Encoder) error { +func (col *Array) Encode(buffer *proto.Buffer) { for _, offset := range col.offsets { - if err := offset.values.Encode(encoder); err != nil { - return err - } + offset.values.col.EncodeColumn(buffer) } - return col.values.Encode(encoder) + col.values.Encode(buffer) } -func (col *Array) ReadStatePrefix(decoder *binary.Decoder) error { +func (col *Array) ReadStatePrefix(reader *proto.Reader) error { if serialize, ok := col.values.(CustomSerialization); ok { - if err := serialize.ReadStatePrefix(decoder); err != nil { + if err := serialize.ReadStatePrefix(reader); err != nil { return err } } return nil } -func (col *Array) WriteStatePrefix(encoder *binary.Encoder) error { +func (col *Array) WriteStatePrefix(buffer *proto.Buffer) error { if serialize, ok := col.values.(CustomSerialization); ok { - if err := serialize.WriteStatePrefix(encoder); err != nil { + if err := serialize.WriteStatePrefix(buffer); err != nil { return err } } return nil } -func (col *Array) ScanRow(dest interface{}, row int) error { +func (col *Array) ScanRow(dest any, row int) error { elem := reflect.Indirect(reflect.ValueOf(dest)) value, err := col.scan(elem.Type(), row) if err != nil { @@ -235,21 +297,17 @@ func (col *Array) scan(sliceType reflect.Type, row int) (reflect.Value, error) { } return subSlice, nil } - return reflect.Value{}, &Error{ - ColumnType: fmt.Sprint(sliceType.Kind()), - Err: fmt.Errorf("column %s - needs a slice or interface{}", col.Name()), - } } func (col *Array) scanSlice(sliceType reflect.Type, row int, level int) (reflect.Value, error) { // We could try and set - if it exceeds just return immediately offset := col.offsets[level] var ( - end = offset.values.data[row] + end = offset.values.col.Row(row) start = uint64(0) ) if row > 0 { - start = offset.values.data[row-1] + start = offset.values.col.Row(row - 1) } base := offset.scanType.Elem() isPtr := base.Kind() == reflect.Ptr @@ -264,7 +322,7 @@ func (col *Array) scanSlice(sliceType reflect.Type, row int, level int) (reflect default: return reflect.Value{}, &Error{ ColumnType: fmt.Sprint(sliceType.Kind()), - Err: fmt.Errorf("column %s - needs a slice or interface{}", col.Name()), + Err: fmt.Errorf("column %s - needs a slice or any", col.Name()), } } @@ -324,8 +382,8 @@ func (col *Array) scanSlice(sliceType reflect.Type, row int, level int) (reflect func (col *Array) scanSliceOfObjects(sliceType reflect.Type, row int) (reflect.Value, error) { if sliceType.Kind() == reflect.Interface { - // catches interface{} - Note this swallows custom interfaces to which maps couldn't conform - subMap := make(map[string]interface{}) + // catches any - Note this swallows custom interfaces to which maps couldn't conform + subMap := make(map[string]any) return col.scanSliceOfMaps(reflect.SliceOf(reflect.TypeOf(subMap)), row) } else if sliceType.Kind() == reflect.Slice { // make a slice of the right type - we need this to be a slice of a type capable of taking an object as nested @@ -338,20 +396,19 @@ func (col *Array) scanSliceOfObjects(sliceType reflect.Type, row int) (reflect.V // tuples can be read as arrays return col.scanSlice(sliceType, row, 0) case reflect.Interface: - // catches []interface{} - Note this swallows custom interfaces to which maps could never conform - subMap := make(map[string]interface{}) + // catches []any - Note this swallows custom interfaces to which maps could never conform + subMap := make(map[string]any) return col.scanSliceOfMaps(reflect.SliceOf(reflect.TypeOf(subMap)), row) default: return reflect.Value{}, &Error{ ColumnType: fmt.Sprint(sliceType.Elem().Kind()), - Err: fmt.Errorf("column %s - needs a slice of objects or an interface{}", col.Name()), + Err: fmt.Errorf("column %s - needs a slice of objects or an any", col.Name()), } } - return reflect.Value{}, nil } return reflect.Value{}, &Error{ ColumnType: fmt.Sprint(sliceType.Kind()), - Err: fmt.Errorf("column %s - needs a slice or interface{}", col.Name()), + Err: fmt.Errorf("column %s - needs a slice or any", col.Name()), } } @@ -360,7 +417,7 @@ func (col *Array) scanSliceOfMaps(sliceType reflect.Type, row int) (reflect.Valu if sliceType.Kind() != reflect.Slice { return reflect.Value{}, &ColumnConverterError{ Op: "ScanRow", - To: fmt.Sprintf("%s", sliceType), + To: sliceType.String(), From: string(col.Type()), } } @@ -374,11 +431,11 @@ func (col *Array) scanSliceOfMaps(sliceType reflect.Type, row int) (reflect.Valu // Array(Tuple so depth 1 for JSON offset := col.offsets[0] var ( - end = offset.values.data[row] + end = offset.values.col.Row(row) start = uint64(0) ) if row > 0 { - start = offset.values.data[row-1] + start = offset.values.col.Row(row - 1) } if end-start > 0 { rSlice := reflect.MakeSlice(sliceType, 0, int(end-start)) @@ -398,7 +455,7 @@ func (col *Array) scanSliceOfStructs(sliceType reflect.Type, row int) (reflect.V if sliceType.Kind() != reflect.Slice { return reflect.Value{}, &ColumnConverterError{ Op: "ScanRow", - To: fmt.Sprintf("%s", sliceType), + To: sliceType.String(), From: string(col.Type()), } } @@ -412,14 +469,14 @@ func (col *Array) scanSliceOfStructs(sliceType reflect.Type, row int) (reflect.V // Array(Tuple so depth 1 for JSON offset := col.offsets[0] var ( - end = offset.values.data[row] + end = offset.values.col.Row(row) start = uint64(0) ) if row > 0 { - start = offset.values.data[row-1] + start = offset.values.col.Row(row - 1) } if end-start > 0 { - // create a slice of the type from the sliceType - if this might be interface{} as its driven by the target datastructure + // create a slice of the type from the sliceType - if this might be any as its driven by the target datastructure rSlice := reflect.MakeSlice(sliceType, 0, int(end-start)) for i := start; i < end; i++ { sStruct := reflect.New(sliceType.Elem()).Elem() diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/array_gen.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/array_gen.go new file mode 100644 index 0000000..eeba8e0 --- /dev/null +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/array_gen.go @@ -0,0 +1,175 @@ +// Licensed to ClickHouse, Inc. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. ClickHouse, Inc. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated by make codegen DO NOT EDIT. +// source: lib/column/codegen/array.tpl + +package column + +import ( + "database/sql" + "database/sql/driver" + "fmt" + "github.com/ClickHouse/ch-go/proto" + "github.com/google/uuid" + "github.com/paulmach/orb" + "github.com/shopspring/decimal" + "math/big" + "net" + "net/netip" + "time" +) + +// appendRowPlain is a reflection-free realisation of append for plain arrays. +func (col *Array) appendRowPlain(v any) error { + switch tv := v.(type) { + case []float32: + return appendRowPlain(col, tv) + case []*float32: + return appendNullableRowPlain(col, tv) + case []float64: + return appendRowPlain(col, tv) + case []*float64: + return appendNullableRowPlain(col, tv) + case []int8: + return appendRowPlain(col, tv) + case []*int8: + return appendNullableRowPlain(col, tv) + case []int16: + return appendRowPlain(col, tv) + case []*int16: + return appendNullableRowPlain(col, tv) + case []int32: + return appendRowPlain(col, tv) + case []*int32: + return appendNullableRowPlain(col, tv) + case []int64: + return appendRowPlain(col, tv) + case []*int64: + return appendNullableRowPlain(col, tv) + case []uint8: + return appendRowPlain(col, tv) + case []*uint8: + return appendNullableRowPlain(col, tv) + case []uint16: + return appendRowPlain(col, tv) + case []*uint16: + return appendNullableRowPlain(col, tv) + case []uint32: + return appendRowPlain(col, tv) + case []*uint32: + return appendNullableRowPlain(col, tv) + case []uint64: + return appendRowPlain(col, tv) + case []*uint64: + return appendNullableRowPlain(col, tv) + case []string: + return appendRowPlain(col, tv) + case []*string: + return appendNullableRowPlain(col, tv) + case [][]byte: + return appendRowPlain(col, tv) + case []*[]byte: + return appendNullableRowPlain(col, tv) + case []sql.NullString: + return appendRowPlain(col, tv) + case []*sql.NullString: + return appendNullableRowPlain(col, tv) + case []int: + return appendRowPlain(col, tv) + case []*int: + return appendNullableRowPlain(col, tv) + case []uint: + return appendRowPlain(col, tv) + case []*uint: + return appendNullableRowPlain(col, tv) + case []big.Int: + return appendRowPlain(col, tv) + case []*big.Int: + return appendNullableRowPlain(col, tv) + case []decimal.Decimal: + return appendRowPlain(col, tv) + case []*decimal.Decimal: + return appendNullableRowPlain(col, tv) + case []bool: + return appendRowPlain(col, tv) + case []*bool: + return appendNullableRowPlain(col, tv) + case []sql.NullBool: + return appendRowPlain(col, tv) + case []*sql.NullBool: + return appendNullableRowPlain(col, tv) + case []time.Time: + return appendRowPlain(col, tv) + case []*time.Time: + return appendNullableRowPlain(col, tv) + case []sql.NullTime: + return appendRowPlain(col, tv) + case []*sql.NullTime: + return appendNullableRowPlain(col, tv) + case []uuid.UUID: + return appendRowPlain(col, tv) + case []*uuid.UUID: + return appendNullableRowPlain(col, tv) + case []netip.Addr: + return appendRowPlain(col, tv) + case []*netip.Addr: + return appendNullableRowPlain(col, tv) + case []net.IP: + return appendRowPlain(col, tv) + case []*net.IP: + return appendNullableRowPlain(col, tv) + case []proto.IPv6: + return appendRowPlain(col, tv) + case []*proto.IPv6: + return appendNullableRowPlain(col, tv) + case [][16]byte: + return appendRowPlain(col, tv) + case []*[16]byte: + return appendNullableRowPlain(col, tv) + case []orb.MultiPolygon: + return appendRowPlain(col, tv) + case []*orb.MultiPolygon: + return appendNullableRowPlain(col, tv) + case []orb.Point: + return appendRowPlain(col, tv) + case []*orb.Point: + return appendNullableRowPlain(col, tv) + case []orb.Polygon: + return appendRowPlain(col, tv) + case []*orb.Polygon: + return appendNullableRowPlain(col, tv) + case []orb.Ring: + return appendRowPlain(col, tv) + case []*orb.Ring: + return appendNullableRowPlain(col, tv) + default: + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return &ColumnConverterError{ + Op: "AppendRow", + To: "Array", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.appendRowPlain(val) + } + return col.appendRowDefault(v) + } +} diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/bigint.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/bigint.go index 3205ff6..49e179c 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/bigint.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/bigint.go @@ -18,19 +18,24 @@ package column import ( + "database/sql/driver" + "encoding/binary" "fmt" + "github.com/ClickHouse/ch-go/proto" "math/big" "reflect" - - "github.com/ClickHouse/clickhouse-go/v2/lib/binary" ) type BigInt struct { size int - data []byte chType Type name string signed bool + col proto.Column +} + +func (col *BigInt) Reset() { + col.col.Reset() } func (col *BigInt) Name() string { @@ -46,10 +51,10 @@ func (col *BigInt) ScanType() reflect.Type { } func (col *BigInt) Rows() int { - return len(col.data) / col.size + return col.col.Rows() } -func (col *BigInt) Row(i int, ptr bool) interface{} { +func (col *BigInt) Row(i int, ptr bool) any { value := col.row(i) if ptr { return value @@ -57,7 +62,7 @@ func (col *BigInt) Row(i int, ptr bool) interface{} { return *value } -func (col *BigInt) ScanRow(dest interface{}, row int) error { +func (col *BigInt) ScanRow(dest any, row int) error { switch d := dest.(type) { case *big.Int: *d = *col.row(row) @@ -74,24 +79,37 @@ func (col *BigInt) ScanRow(dest interface{}, row int) error { return nil } -func (col *BigInt) Append(v interface{}) (nulls []uint8, err error) { +func (col *BigInt) Append(v any) (nulls []uint8, err error) { switch v := v.(type) { case []big.Int: nulls = make([]uint8, len(v)) - for _, v := range v { - col.append(&v) + for i := range v { + col.append(&v[i]) } case []*big.Int: nulls = make([]uint8, len(v)) - for i, v := range v { + for i := range v { switch { - case v != nil: - col.append(v) + case v[i] != nil: + col.append(v[i]) default: - col.data, nulls[i] = append(col.data, make([]byte, col.size)...), 1 + nulls[i] = 1 + col.append(big.NewInt(0)) } } default: + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return nil, &ColumnConverterError{ + Op: "Append", + To: string(col.chType), + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.Append(val) + } return nil, &ColumnConverterError{ Op: "Append", To: string(col.chType), @@ -101,7 +119,7 @@ func (col *BigInt) Append(v interface{}) (nulls []uint8, err error) { return } -func (col *BigInt) AppendRow(v interface{}) error { +func (col *BigInt) AppendRow(v any) error { switch v := v.(type) { case big.Int: col.append(&v) @@ -110,11 +128,23 @@ func (col *BigInt) AppendRow(v interface{}) error { case v != nil: col.append(v) default: - col.data = append(col.data, make([]byte, col.size)...) + col.append(big.NewInt(0)) } case nil: - col.data = append(col.data, make([]byte, col.size)...) + col.append(big.NewInt(0)) default: + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return &ColumnConverterError{ + Op: "AppendRow", + To: string(col.chType), + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.AppendRow(val) + } return &ColumnConverterError{ Op: "AppendRow", To: string(col.chType), @@ -124,23 +154,82 @@ func (col *BigInt) AppendRow(v interface{}) error { return nil } -func (col *BigInt) Decode(decoder *binary.Decoder, rows int) error { - col.data = make([]byte, rows*col.size) - return decoder.Raw(col.data) +func (col *BigInt) Decode(reader *proto.Reader, rows int) error { + return col.col.DecodeColumn(reader, rows) } -func (col *BigInt) Encode(encoder *binary.Encoder) error { - return encoder.Raw(col.data) +func (col *BigInt) Encode(buffer *proto.Buffer) { + col.col.EncodeColumn(buffer) } func (col *BigInt) row(i int) *big.Int { - return rawToBigInt(col.data[i*col.size:(i+1)*col.size], col.signed) + b := make([]byte, col.size) + switch vCol := col.col.(type) { + case *proto.ColInt128: + v := vCol.Row(i) + binary.LittleEndian.PutUint64(b[0:64/8], v.Low) + binary.LittleEndian.PutUint64(b[64/8:128/8], v.High) + return rawToBigInt(b, true) + case *proto.ColUInt128: + v := vCol.Row(i) + binary.LittleEndian.PutUint64(b[0:64/8], v.Low) + binary.LittleEndian.PutUint64(b[64/8:128/8], v.High) + return rawToBigInt(b, false) + case *proto.ColInt256: + v := vCol.Row(i) + binary.LittleEndian.PutUint64(b[0:64/8], v.Low.Low) + binary.LittleEndian.PutUint64(b[64/8:128/8], v.Low.High) + binary.LittleEndian.PutUint64(b[128/8:192/8], v.High.Low) + binary.LittleEndian.PutUint64(b[192/8:256/8], v.High.High) + return rawToBigInt(b, true) + case *proto.ColUInt256: + v := vCol.Row(i) + binary.LittleEndian.PutUint64(b[0:64/8], v.Low.Low) + binary.LittleEndian.PutUint64(b[64/8:128/8], v.Low.High) + binary.LittleEndian.PutUint64(b[128/8:192/8], v.High.Low) + binary.LittleEndian.PutUint64(b[192/8:256/8], v.High.High) + return rawToBigInt(b, false) + } + return big.NewInt(0) } func (col *BigInt) append(v *big.Int) { dest := make([]byte, col.size) bigIntToRaw(dest, new(big.Int).Set(v)) - col.data = append(col.data, dest...) + switch v := col.col.(type) { + case *proto.ColInt128: + v.Append(proto.Int128{ + Low: binary.LittleEndian.Uint64(dest[0 : 64/8]), + High: binary.LittleEndian.Uint64(dest[64/8 : 128/8]), + }) + case *proto.ColUInt128: + v.Append(proto.UInt128{ + Low: binary.LittleEndian.Uint64(dest[0 : 64/8]), + High: binary.LittleEndian.Uint64(dest[64/8 : 128/8]), + }) + case *proto.ColInt256: + v.Append(proto.Int256{ + Low: proto.UInt128{ + Low: binary.LittleEndian.Uint64(dest[0 : 64/8]), + High: binary.LittleEndian.Uint64(dest[64/8 : 128/8]), + }, + High: proto.UInt128{ + Low: binary.LittleEndian.Uint64(dest[128/8 : 192/8]), + High: binary.LittleEndian.Uint64(dest[192/8 : 256/8]), + }, + }) + case *proto.ColUInt256: + v.Append(proto.UInt256{ + Low: proto.UInt128{ + Low: binary.LittleEndian.Uint64(dest[0 : 64/8]), + High: binary.LittleEndian.Uint64(dest[64/8 : 128/8]), + }, + High: proto.UInt128{ + Low: binary.LittleEndian.Uint64(dest[128/8 : 192/8]), + High: binary.LittleEndian.Uint64(dest[192/8 : 256/8]), + }, + }) + } } func bigIntToRaw(dest []byte, v *big.Int) { diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/bool.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/bool.go index 9b3ef93..3699a3c 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/bool.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/bool.go @@ -18,15 +18,20 @@ package column import ( + "database/sql" + "database/sql/driver" "fmt" + "github.com/ClickHouse/ch-go/proto" "reflect" - - "github.com/ClickHouse/clickhouse-go/v2/lib/binary" ) type Bool struct { - values UInt8 - name string + col proto.ColBool + name string +} + +func (col *Bool) Reset() { + col.col.Reset() } func (col *Bool) Name() string { @@ -42,10 +47,10 @@ func (col *Bool) ScanType() reflect.Type { } func (col *Bool) Rows() int { - return len(col.values.data) + return col.col.Rows() } -func (col *Bool) Row(i int, ptr bool) interface{} { +func (col *Bool) Row(i int, ptr bool) any { val := col.row(i) if ptr { return &val @@ -53,13 +58,15 @@ func (col *Bool) Row(i int, ptr bool) interface{} { return val } -func (col *Bool) ScanRow(dest interface{}, row int) error { +func (col *Bool) ScanRow(dest any, row int) error { switch d := dest.(type) { case *bool: *d = col.row(row) case **bool: *d = new(bool) **d = col.row(row) + case sql.Scanner: + return d.Scan(col.row(row)) default: return &ColumnConverterError{ Op: "ScanRow", @@ -70,36 +77,52 @@ func (col *Bool) ScanRow(dest interface{}, row int) error { return nil } -func (col *Bool) Append(v interface{}) (nulls []uint8, err error) { +func (col *Bool) Append(v any) (nulls []uint8, err error) { switch v := v.(type) { case []bool: - in := make([]uint8, 0, len(v)) for _, v := range v { - switch { - case v: - in = append(in, 1) - default: - in = append(in, 0) - } + col.col.Append(v) } - col.values.data, nulls = append(col.values.data, in...), make([]uint8, len(v)) case []*bool: nulls = make([]uint8, len(v)) - in := make([]uint8, 0, len(v)) for i, v := range v { - var value uint8 + var value bool switch { case v != nil: if *v { - value = 1 + value = true } default: nulls[i] = 1 } - in = append(in, value) + col.col.Append(value) + } + case []sql.NullBool: + nulls = make([]uint8, len(v)) + for i := range v { + col.Append(v[i]) + } + case []*sql.NullBool: + nulls = make([]uint8, len(v)) + for i := range v { + if v[i] == nil { + nulls[i] = 1 + } + col.Append(v[i]) } - col.values.data = append(col.values.data, in...) default: + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return nil, &ColumnConverterError{ + Op: "Append", + To: "Bool", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.Append(val) + } return nil, &ColumnConverterError{ Op: "Append", To: "Bool", @@ -109,7 +132,7 @@ func (col *Bool) Append(v interface{}) (nulls []uint8, err error) { return } -func (col *Bool) AppendRow(v interface{}) error { +func (col *Bool) AppendRow(v any) error { var value bool switch v := v.(type) { case bool: @@ -118,33 +141,50 @@ func (col *Bool) AppendRow(v interface{}) error { if v != nil { value = *v } + case sql.NullBool: + switch v.Valid { + case true: + value = v.Bool + } + case *sql.NullBool: + switch v.Valid { + case true: + value = v.Bool + } case nil: default: + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return &ColumnConverterError{ + Op: "AppendRow", + To: "Bool", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.AppendRow(val) + } return &ColumnConverterError{ Op: "AppendRow", To: "Bool", From: fmt.Sprintf("%T", v), } } - switch { - case value: - col.values.data = append(col.values.data, 1) - default: - col.values.data = append(col.values.data, 0) - } + col.col.Append(value) return nil } -func (col *Bool) Decode(decoder *binary.Decoder, rows int) error { - return col.values.Decode(decoder, rows) +func (col *Bool) Decode(reader *proto.Reader, rows int) error { + return col.col.DecodeColumn(reader, rows) } -func (col *Bool) Encode(encoder *binary.Encoder) error { - return col.values.Encode(encoder) +func (col *Bool) Encode(buffer *proto.Buffer) { + col.col.EncodeColumn(buffer) } func (col *Bool) row(i int) bool { - return col.values.data[i] == 1 + return col.col.Row(i) } var _ Interface = (*Bool)(nil) diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/column.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/column.go index 6994106..415344c 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/column.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/column.go @@ -19,12 +19,19 @@ package column import ( "fmt" + "github.com/ClickHouse/ch-go/proto" "reflect" + "regexp" "strings" - - "github.com/ClickHouse/clickhouse-go/v2/lib/binary" ) +// column names which match this must be escaped - see https://clickhouse.com/docs/en/sql-reference/syntax/#identifiers +var escapeColRegex = regexp.MustCompile("^[a-zA-Z_][0-9a-zA-Z_]*$") + +// to escape and unescape special chars +var colEscape = strings.NewReplacer("`", "\\`", "\\", "\\\\") +var colUnEscape = strings.NewReplacer("\\`", "`", "\\\\", "\\") + type Type string func (t Type) params() string { @@ -71,16 +78,17 @@ type Interface interface { Name() string Type() Type Rows() int - Row(i int, ptr bool) interface{} - ScanRow(dest interface{}, row int) error - Append(v interface{}) (nulls []uint8, err error) - AppendRow(v interface{}) error - Decode(decoder *binary.Decoder, rows int) error - Encode(*binary.Encoder) error + Row(i int, ptr bool) any + ScanRow(dest any, row int) error + Append(v any) (nulls []uint8, err error) + AppendRow(v any) error + Decode(reader *proto.Reader, rows int) error + Encode(buffer *proto.Buffer) ScanType() reflect.Type + Reset() } type CustomSerialization interface { - ReadStatePrefix(*binary.Decoder) error - WriteStatePrefix(*binary.Encoder) error + ReadStatePrefix(*proto.Reader) error + WriteStatePrefix(*proto.Buffer) error } diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/column_gen.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/column_gen.go index 73b85ab..5bd1180 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/column_gen.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/column_gen.go @@ -21,7 +21,10 @@ package column import ( + "database/sql" + "database/sql/driver" "fmt" + "github.com/ClickHouse/ch-go/proto" "github.com/google/uuid" "github.com/paulmach/orb" "github.com/shopspring/decimal" @@ -32,7 +35,7 @@ import ( "time" ) -func (t Type) Column(name string) (Interface, error) { +func (t Type) Column(name string, tz *time.Location) (Interface, error) { switch t { case "Float32": return &Float32{name: name}, nil @@ -60,6 +63,7 @@ func (t Type) Column(name string) (Interface, error) { chType: t, name: name, signed: true, + col: &proto.ColInt128{}, }, nil case "UInt128": return &BigInt{ @@ -67,6 +71,7 @@ func (t Type) Column(name string) (Interface, error) { chType: t, name: name, signed: false, + col: &proto.ColUInt128{}, }, nil case "Int256": return &BigInt{ @@ -74,6 +79,7 @@ func (t Type) Column(name string) (Interface, error) { chType: t, name: name, signed: true, + col: &proto.ColInt256{}, }, nil case "UInt256": return &BigInt{ @@ -81,6 +87,7 @@ func (t Type) Column(name string) (Interface, error) { chType: t, name: name, signed: false, + col: &proto.ColUInt256{}, }, nil case "IPv4": return &IPv4{name: name}, nil @@ -89,15 +96,15 @@ func (t Type) Column(name string) (Interface, error) { case "Bool", "Boolean": return &Bool{name: name}, nil case "Date": - return &Date{name: name}, nil + return &Date{name: name, location: tz}, nil case "Date32": - return &Date32{name: name}, nil + return &Date32{name: name, location: tz}, nil case "UUID": return &UUID{name: name}, nil case "Nothing": return &Nothing{name: name}, nil case "Ring": - set, err := (&Array{name: name}).parse("Array(Point)") + set, err := (&Array{name: name}).parse("Array(Point)", tz) if err != nil { return nil, err } @@ -107,7 +114,7 @@ func (t Type) Column(name string) (Interface, error) { name: name, }, nil case "Polygon": - set, err := (&Array{name: name}).parse("Array(Ring)") + set, err := (&Array{name: name}).parse("Array(Ring)", tz) if err != nil { return nil, err } @@ -117,7 +124,7 @@ func (t Type) Column(name string) (Interface, error) { name: name, }, nil case "MultiPolygon": - set, err := (&Array{name: name}).parse("Array(Polygon)") + set, err := (&Array{name: name}).parse("Array(Polygon)", tz) if err != nil { return nil, err } @@ -129,38 +136,38 @@ func (t Type) Column(name string) (Interface, error) { case "Point": return &Point{name: name}, nil case "String": - return &String{name: name}, nil + return &String{name: name, col: colStrProvider()}, nil case "Object('json')": - return &JSONObject{name: name, root: true}, nil + return &JSONObject{name: name, root: true, tz: tz}, nil } switch strType := string(t); { case strings.HasPrefix(string(t), "Map("): - return (&Map{name: name}).parse(t) + return (&Map{name: name}).parse(t, tz) case strings.HasPrefix(string(t), "Tuple("): - return (&Tuple{name: name}).parse(t) + return (&Tuple{name: name}).parse(t, tz) case strings.HasPrefix(string(t), "Decimal("): return (&Decimal{name: name}).parse(t) case strings.HasPrefix(strType, "Nested("): - return (&Nested{name: name}).parse(t) + return (&Nested{name: name}).parse(t, tz) case strings.HasPrefix(string(t), "Array("): - return (&Array{name: name}).parse(t) + return (&Array{name: name}).parse(t, tz) case strings.HasPrefix(string(t), "Interval"): return (&Interval{name: name}).parse(t) case strings.HasPrefix(string(t), "Nullable"): - return (&Nullable{name: name}).parse(t) + return (&Nullable{name: name}).parse(t, tz) case strings.HasPrefix(string(t), "FixedString"): return (&FixedString{name: name}).parse(t) case strings.HasPrefix(string(t), "LowCardinality"): - return (&LowCardinality{name: name}).parse(t) + return (&LowCardinality{name: name}).parse(t, tz) case strings.HasPrefix(string(t), "SimpleAggregateFunction"): - return (&SimpleAggregateFunction{name: name}).parse(t) + return (&SimpleAggregateFunction{name: name}).parse(t, tz) case strings.HasPrefix(string(t), "Enum8") || strings.HasPrefix(string(t), "Enum16"): return Enum(t, name) case strings.HasPrefix(string(t), "DateTime64"): - return (&DateTime64{name: name}).parse(t) + return (&DateTime64{name: name}).parse(t, tz) case strings.HasPrefix(strType, "DateTime") && !strings.HasPrefix(strType, "DateTime64"): - return (&DateTime{name: name}).parse(t) + return (&DateTime{name: name}).parse(t, tz) } return nil, &UnsupportedColumnTypeError{ t: t, @@ -169,44 +176,44 @@ func (t Type) Column(name string) (Interface, error) { type ( Float32 struct { - data []float32 name string + col proto.ColFloat32 } Float64 struct { - data []float64 name string + col proto.ColFloat64 } Int8 struct { - data []int8 name string + col proto.ColInt8 } Int16 struct { - data []int16 name string + col proto.ColInt16 } Int32 struct { - data []int32 name string + col proto.ColInt32 } Int64 struct { - data []int64 name string + col proto.ColInt64 } UInt8 struct { - data []uint8 name string + col proto.ColUInt8 } UInt16 struct { - data []uint16 name string + col proto.ColUInt16 } UInt32 struct { - data []uint32 name string + col proto.ColUInt32 } UInt64 struct { - data []uint64 name string + col proto.ColUInt64 } ) @@ -241,8 +248,8 @@ var ( scanTypeTime = reflect.TypeOf(time.Time{}) scanTypeRing = reflect.TypeOf(orb.Ring{}) scanTypePoint = reflect.TypeOf(orb.Point{}) - scanTypeSlice = reflect.TypeOf([]interface{}{}) - scanTypeMap = reflect.TypeOf(map[string]interface{}{}) + scanTypeSlice = reflect.TypeOf([]any{}) + scanTypeMap = reflect.TypeOf(map[string]any{}) scanTypeBigInt = reflect.TypeOf(&big.Int{}) scanTypeString = reflect.TypeOf("") scanTypePolygon = reflect.TypeOf(orb.Polygon{}) @@ -263,18 +270,25 @@ func (col *Float32) ScanType() reflect.Type { } func (col *Float32) Rows() int { - return len(col.data) + return col.col.Rows() } -func (col *Float32) ScanRow(dest interface{}, row int) error { - value := *col +func (col *Float32) Reset() { + col.col.Reset() +} + +func (col *Float32) ScanRow(dest any, row int) error { + value := col.col.Row(row) switch d := dest.(type) { case *float32: - *d = value.data[row] + *d = value case **float32: *d = new(float32) - **d = value.data[row] + **d = value default: + if scan, ok := dest.(sql.Scanner); ok { + return scan.Scan(value) + } return &ColumnConverterError{ Op: "ScanRow", To: fmt.Sprintf("%T", dest), @@ -285,29 +299,47 @@ func (col *Float32) ScanRow(dest interface{}, row int) error { return nil } -func (col *Float32) Row(i int, ptr bool) interface{} { - value := *col +func (col *Float32) Row(i int, ptr bool) any { + value := col.col.Row(i) if ptr { - return &value.data[i] + return &value } - return value.data[i] + return value } -func (col *Float32) Append(v interface{}) (nulls []uint8, err error) { +func (col *Float32) Append(v any) (nulls []uint8, err error) { switch v := v.(type) { case []float32: - col.data, nulls = append(col.data, v...), make([]uint8, len(v)) + nulls = make([]uint8, len(v)) + for i := range v { + col.col.Append(v[i]) + } case []*float32: nulls = make([]uint8, len(v)) - for i, v := range v { + for i := range v { switch { - case v != nil: - col.data = append(col.data, *v) + case v[i] != nil: + col.col.Append(*v[i]) default: - col.data, nulls[i] = append(col.data, 0), 1 + col.col.Append(0) + nulls[i] = 1 } } default: + + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return nil, &ColumnConverterError{ + Op: "Append", + To: "Float32", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.Append(val) + } + return nil, &ColumnConverterError{ Op: "Append", To: "Float32", @@ -317,29 +349,55 @@ func (col *Float32) Append(v interface{}) (nulls []uint8, err error) { return } -func (col *Float32) AppendRow(v interface{}) error { +func (col *Float32) AppendRow(v any) error { switch v := v.(type) { case float32: - col.data = append(col.data, v) + col.col.Append(v) case *float32: switch { case v != nil: - col.data = append(col.data, *v) + col.col.Append(*v) default: - col.data = append(col.data, 0) + col.col.Append(0) } case nil: - col.data = append(col.data, 0) + col.col.Append(0) default: - return &ColumnConverterError{ - Op: "AppendRow", - To: "Float32", - From: fmt.Sprintf("%T", v), + + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return &ColumnConverterError{ + Op: "AppendRow", + To: "Float32", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.AppendRow(val) + } + + if rv := reflect.ValueOf(v); rv.Kind() == col.ScanType().Kind() || rv.CanConvert(col.ScanType()) { + col.col.Append(rv.Convert(col.ScanType()).Interface().(float32)) + } else { + return &ColumnConverterError{ + Op: "AppendRow", + To: "Float32", + From: fmt.Sprintf("%T", v), + } } } return nil } +func (col *Float32) Decode(reader *proto.Reader, rows int) error { + return col.col.DecodeColumn(reader, rows) +} + +func (col *Float32) Encode(buffer *proto.Buffer) { + col.col.EncodeColumn(buffer) +} + func (col *Float64) Name() string { return col.name } @@ -353,18 +411,27 @@ func (col *Float64) ScanType() reflect.Type { } func (col *Float64) Rows() int { - return len(col.data) + return col.col.Rows() } -func (col *Float64) ScanRow(dest interface{}, row int) error { - value := *col +func (col *Float64) Reset() { + col.col.Reset() +} + +func (col *Float64) ScanRow(dest any, row int) error { + value := col.col.Row(row) switch d := dest.(type) { case *float64: - *d = value.data[row] + *d = value case **float64: *d = new(float64) - **d = value.data[row] + **d = value + case *sql.NullFloat64: + return d.Scan(value) default: + if scan, ok := dest.(sql.Scanner); ok { + return scan.Scan(value) + } return &ColumnConverterError{ Op: "ScanRow", To: fmt.Sprintf("%T", dest), @@ -375,29 +442,60 @@ func (col *Float64) ScanRow(dest interface{}, row int) error { return nil } -func (col *Float64) Row(i int, ptr bool) interface{} { - value := *col +func (col *Float64) Row(i int, ptr bool) any { + value := col.col.Row(i) if ptr { - return &value.data[i] + return &value } - return value.data[i] + return value } -func (col *Float64) Append(v interface{}) (nulls []uint8, err error) { +func (col *Float64) Append(v any) (nulls []uint8, err error) { switch v := v.(type) { case []float64: - col.data, nulls = append(col.data, v...), make([]uint8, len(v)) + nulls = make([]uint8, len(v)) + for i := range v { + col.col.Append(v[i]) + } case []*float64: nulls = make([]uint8, len(v)) - for i, v := range v { + for i := range v { switch { - case v != nil: - col.data = append(col.data, *v) + case v[i] != nil: + col.col.Append(*v[i]) default: - col.data, nulls[i] = append(col.data, 0), 1 + col.col.Append(0) + nulls[i] = 1 + } + } + case []sql.NullFloat64: + nulls = make([]uint8, len(v)) + for i := range v { + col.AppendRow(v[i]) + } + case []*sql.NullFloat64: + nulls = make([]uint8, len(v)) + for i := range v { + if v[i] == nil { + nulls[i] = 1 } + col.AppendRow(v[i]) } default: + + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return nil, &ColumnConverterError{ + Op: "Append", + To: "Float64", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.Append(val) + } + return nil, &ColumnConverterError{ Op: "Append", To: "Float64", @@ -407,29 +505,69 @@ func (col *Float64) Append(v interface{}) (nulls []uint8, err error) { return } -func (col *Float64) AppendRow(v interface{}) error { +func (col *Float64) AppendRow(v any) error { switch v := v.(type) { case float64: - col.data = append(col.data, v) + col.col.Append(v) case *float64: switch { case v != nil: - col.data = append(col.data, *v) + col.col.Append(*v) default: - col.data = append(col.data, 0) + col.col.Append(0) } case nil: - col.data = append(col.data, 0) + col.col.Append(0) + case sql.NullFloat64: + switch v.Valid { + case true: + col.col.Append(v.Float64) + default: + col.col.Append(0) + } + case *sql.NullFloat64: + switch v.Valid { + case true: + col.col.Append(v.Float64) + default: + col.col.Append(0) + } default: - return &ColumnConverterError{ - Op: "AppendRow", - To: "Float64", - From: fmt.Sprintf("%T", v), + + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return &ColumnConverterError{ + Op: "AppendRow", + To: "Float64", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.AppendRow(val) + } + + if rv := reflect.ValueOf(v); rv.Kind() == col.ScanType().Kind() || rv.CanConvert(col.ScanType()) { + col.col.Append(rv.Convert(col.ScanType()).Interface().(float64)) + } else { + return &ColumnConverterError{ + Op: "AppendRow", + To: "Float64", + From: fmt.Sprintf("%T", v), + } } } return nil } +func (col *Float64) Decode(reader *proto.Reader, rows int) error { + return col.col.DecodeColumn(reader, rows) +} + +func (col *Float64) Encode(buffer *proto.Buffer) { + col.col.EncodeColumn(buffer) +} + func (col *Int8) Name() string { return col.name } @@ -443,18 +581,32 @@ func (col *Int8) ScanType() reflect.Type { } func (col *Int8) Rows() int { - return len(col.data) + return col.col.Rows() +} + +func (col *Int8) Reset() { + col.col.Reset() } -func (col *Int8) ScanRow(dest interface{}, row int) error { - value := *col +func (col *Int8) ScanRow(dest any, row int) error { + value := col.col.Row(row) switch d := dest.(type) { case *int8: - *d = value.data[row] + *d = value case **int8: *d = new(int8) - **d = value.data[row] + **d = value + case *bool: + switch value { + case 0: + *d = false + default: + *d = true + } default: + if scan, ok := dest.(sql.Scanner); ok { + return scan.Scan(value) + } return &ColumnConverterError{ Op: "ScanRow", To: fmt.Sprintf("%T", dest), @@ -465,29 +617,67 @@ func (col *Int8) ScanRow(dest interface{}, row int) error { return nil } -func (col *Int8) Row(i int, ptr bool) interface{} { - value := *col +func (col *Int8) Row(i int, ptr bool) any { + value := col.col.Row(i) if ptr { - return &value.data[i] + return &value } - return value.data[i] + return value } -func (col *Int8) Append(v interface{}) (nulls []uint8, err error) { +func (col *Int8) Append(v any) (nulls []uint8, err error) { switch v := v.(type) { case []int8: - col.data, nulls = append(col.data, v...), make([]uint8, len(v)) + nulls = make([]uint8, len(v)) + for i := range v { + col.col.Append(v[i]) + } case []*int8: nulls = make([]uint8, len(v)) - for i, v := range v { + for i := range v { switch { - case v != nil: - col.data = append(col.data, *v) + case v[i] != nil: + col.col.Append(*v[i]) default: - col.data, nulls[i] = append(col.data, 0), 1 + col.col.Append(0) + nulls[i] = 1 + } + } + case []bool: + nulls = make([]uint8, len(v)) + for i := range v { + val := int8(0) + if v[i] { + val = 1 } + col.col.Append(val) + } + case []*bool: + nulls = make([]uint8, len(v)) + for i := range v { + val := int8(0) + if v[i] == nil { + nulls[i] = 1 + } else if *v[i] { + val = 1 + } + col.col.Append(val) } default: + + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return nil, &ColumnConverterError{ + Op: "Append", + To: "Int8", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.Append(val) + } + return nil, &ColumnConverterError{ Op: "Append", To: "Int8", @@ -497,29 +687,67 @@ func (col *Int8) Append(v interface{}) (nulls []uint8, err error) { return } -func (col *Int8) AppendRow(v interface{}) error { +func (col *Int8) AppendRow(v any) error { switch v := v.(type) { case int8: - col.data = append(col.data, v) + col.col.Append(v) case *int8: switch { case v != nil: - col.data = append(col.data, *v) + col.col.Append(*v) default: - col.data = append(col.data, 0) + col.col.Append(0) } case nil: - col.data = append(col.data, 0) + col.col.Append(0) + case bool: + val := int8(0) + if v { + val = 1 + } + col.col.Append(val) + case *bool: + val := int8(0) + if *v { + val = 1 + } + col.col.Append(val) default: - return &ColumnConverterError{ - Op: "AppendRow", - To: "Int8", - From: fmt.Sprintf("%T", v), + + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return &ColumnConverterError{ + Op: "AppendRow", + To: "Int8", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.AppendRow(val) + } + + if rv := reflect.ValueOf(v); rv.Kind() == col.ScanType().Kind() || rv.CanConvert(col.ScanType()) { + col.col.Append(rv.Convert(col.ScanType()).Interface().(int8)) + } else { + return &ColumnConverterError{ + Op: "AppendRow", + To: "Int8", + From: fmt.Sprintf("%T", v), + } } } return nil } +func (col *Int8) Decode(reader *proto.Reader, rows int) error { + return col.col.DecodeColumn(reader, rows) +} + +func (col *Int8) Encode(buffer *proto.Buffer) { + col.col.EncodeColumn(buffer) +} + func (col *Int16) Name() string { return col.name } @@ -533,18 +761,27 @@ func (col *Int16) ScanType() reflect.Type { } func (col *Int16) Rows() int { - return len(col.data) + return col.col.Rows() +} + +func (col *Int16) Reset() { + col.col.Reset() } -func (col *Int16) ScanRow(dest interface{}, row int) error { - value := *col +func (col *Int16) ScanRow(dest any, row int) error { + value := col.col.Row(row) switch d := dest.(type) { case *int16: - *d = value.data[row] + *d = value case **int16: *d = new(int16) - **d = value.data[row] + **d = value + case *sql.NullInt16: + return d.Scan(value) default: + if scan, ok := dest.(sql.Scanner); ok { + return scan.Scan(value) + } return &ColumnConverterError{ Op: "ScanRow", To: fmt.Sprintf("%T", dest), @@ -555,29 +792,60 @@ func (col *Int16) ScanRow(dest interface{}, row int) error { return nil } -func (col *Int16) Row(i int, ptr bool) interface{} { - value := *col +func (col *Int16) Row(i int, ptr bool) any { + value := col.col.Row(i) if ptr { - return &value.data[i] + return &value } - return value.data[i] + return value } -func (col *Int16) Append(v interface{}) (nulls []uint8, err error) { +func (col *Int16) Append(v any) (nulls []uint8, err error) { switch v := v.(type) { case []int16: - col.data, nulls = append(col.data, v...), make([]uint8, len(v)) + nulls = make([]uint8, len(v)) + for i := range v { + col.col.Append(v[i]) + } case []*int16: nulls = make([]uint8, len(v)) - for i, v := range v { + for i := range v { switch { - case v != nil: - col.data = append(col.data, *v) + case v[i] != nil: + col.col.Append(*v[i]) default: - col.data, nulls[i] = append(col.data, 0), 1 + col.col.Append(0) + nulls[i] = 1 + } + } + case []sql.NullInt16: + nulls = make([]uint8, len(v)) + for i := range v { + col.AppendRow(v[i]) + } + case []*sql.NullInt16: + nulls = make([]uint8, len(v)) + for i := range v { + if v[i] == nil { + nulls[i] = 1 } + col.AppendRow(v[i]) } default: + + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return nil, &ColumnConverterError{ + Op: "Append", + To: "Int16", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.Append(val) + } + return nil, &ColumnConverterError{ Op: "Append", To: "Int16", @@ -587,29 +855,69 @@ func (col *Int16) Append(v interface{}) (nulls []uint8, err error) { return } -func (col *Int16) AppendRow(v interface{}) error { +func (col *Int16) AppendRow(v any) error { switch v := v.(type) { case int16: - col.data = append(col.data, v) + col.col.Append(v) case *int16: switch { case v != nil: - col.data = append(col.data, *v) + col.col.Append(*v) default: - col.data = append(col.data, 0) + col.col.Append(0) } case nil: - col.data = append(col.data, 0) + col.col.Append(0) + case sql.NullInt16: + switch v.Valid { + case true: + col.col.Append(v.Int16) + default: + col.col.Append(0) + } + case *sql.NullInt16: + switch v.Valid { + case true: + col.col.Append(v.Int16) + default: + col.col.Append(0) + } default: - return &ColumnConverterError{ - Op: "AppendRow", - To: "Int16", - From: fmt.Sprintf("%T", v), + + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return &ColumnConverterError{ + Op: "AppendRow", + To: "Int16", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.AppendRow(val) + } + + if rv := reflect.ValueOf(v); rv.Kind() == col.ScanType().Kind() || rv.CanConvert(col.ScanType()) { + col.col.Append(rv.Convert(col.ScanType()).Interface().(int16)) + } else { + return &ColumnConverterError{ + Op: "AppendRow", + To: "Int16", + From: fmt.Sprintf("%T", v), + } } } return nil } +func (col *Int16) Decode(reader *proto.Reader, rows int) error { + return col.col.DecodeColumn(reader, rows) +} + +func (col *Int16) Encode(buffer *proto.Buffer) { + col.col.EncodeColumn(buffer) +} + func (col *Int32) Name() string { return col.name } @@ -623,18 +931,27 @@ func (col *Int32) ScanType() reflect.Type { } func (col *Int32) Rows() int { - return len(col.data) + return col.col.Rows() } -func (col *Int32) ScanRow(dest interface{}, row int) error { - value := *col +func (col *Int32) Reset() { + col.col.Reset() +} + +func (col *Int32) ScanRow(dest any, row int) error { + value := col.col.Row(row) switch d := dest.(type) { case *int32: - *d = value.data[row] + *d = value case **int32: *d = new(int32) - **d = value.data[row] + **d = value + case *sql.NullInt32: + return d.Scan(value) default: + if scan, ok := dest.(sql.Scanner); ok { + return scan.Scan(value) + } return &ColumnConverterError{ Op: "ScanRow", To: fmt.Sprintf("%T", dest), @@ -645,29 +962,60 @@ func (col *Int32) ScanRow(dest interface{}, row int) error { return nil } -func (col *Int32) Row(i int, ptr bool) interface{} { - value := *col +func (col *Int32) Row(i int, ptr bool) any { + value := col.col.Row(i) if ptr { - return &value.data[i] + return &value } - return value.data[i] + return value } -func (col *Int32) Append(v interface{}) (nulls []uint8, err error) { +func (col *Int32) Append(v any) (nulls []uint8, err error) { switch v := v.(type) { case []int32: - col.data, nulls = append(col.data, v...), make([]uint8, len(v)) + nulls = make([]uint8, len(v)) + for i := range v { + col.col.Append(v[i]) + } case []*int32: nulls = make([]uint8, len(v)) - for i, v := range v { + for i := range v { switch { - case v != nil: - col.data = append(col.data, *v) + case v[i] != nil: + col.col.Append(*v[i]) default: - col.data, nulls[i] = append(col.data, 0), 1 + col.col.Append(0) + nulls[i] = 1 + } + } + case []sql.NullInt32: + nulls = make([]uint8, len(v)) + for i := range v { + col.AppendRow(v[i]) + } + case []*sql.NullInt32: + nulls = make([]uint8, len(v)) + for i := range v { + if v[i] == nil { + nulls[i] = 1 } + col.AppendRow(v[i]) } default: + + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return nil, &ColumnConverterError{ + Op: "Append", + To: "Int32", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.Append(val) + } + return nil, &ColumnConverterError{ Op: "Append", To: "Int32", @@ -677,29 +1025,69 @@ func (col *Int32) Append(v interface{}) (nulls []uint8, err error) { return } -func (col *Int32) AppendRow(v interface{}) error { +func (col *Int32) AppendRow(v any) error { switch v := v.(type) { case int32: - col.data = append(col.data, v) + col.col.Append(v) case *int32: switch { case v != nil: - col.data = append(col.data, *v) + col.col.Append(*v) default: - col.data = append(col.data, 0) + col.col.Append(0) } case nil: - col.data = append(col.data, 0) + col.col.Append(0) + case sql.NullInt32: + switch v.Valid { + case true: + col.col.Append(v.Int32) + default: + col.col.Append(0) + } + case *sql.NullInt32: + switch v.Valid { + case true: + col.col.Append(v.Int32) + default: + col.col.Append(0) + } default: - return &ColumnConverterError{ - Op: "AppendRow", - To: "Int32", - From: fmt.Sprintf("%T", v), + + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return &ColumnConverterError{ + Op: "AppendRow", + To: "Int32", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.AppendRow(val) + } + + if rv := reflect.ValueOf(v); rv.Kind() == col.ScanType().Kind() || rv.CanConvert(col.ScanType()) { + col.col.Append(rv.Convert(col.ScanType()).Interface().(int32)) + } else { + return &ColumnConverterError{ + Op: "AppendRow", + To: "Int32", + From: fmt.Sprintf("%T", v), + } } } return nil } +func (col *Int32) Decode(reader *proto.Reader, rows int) error { + return col.col.DecodeColumn(reader, rows) +} + +func (col *Int32) Encode(buffer *proto.Buffer) { + col.col.EncodeColumn(buffer) +} + func (col *Int64) Name() string { return col.name } @@ -713,18 +1101,29 @@ func (col *Int64) ScanType() reflect.Type { } func (col *Int64) Rows() int { - return len(col.data) + return col.col.Rows() +} + +func (col *Int64) Reset() { + col.col.Reset() } -func (col *Int64) ScanRow(dest interface{}, row int) error { - value := *col +func (col *Int64) ScanRow(dest any, row int) error { + value := col.col.Row(row) switch d := dest.(type) { case *int64: - *d = value.data[row] + *d = value case **int64: *d = new(int64) - **d = value.data[row] + **d = value + case *time.Duration: + *d = time.Duration(value) + case *sql.NullInt64: + return d.Scan(value) default: + if scan, ok := dest.(sql.Scanner); ok { + return scan.Scan(value) + } return &ColumnConverterError{ Op: "ScanRow", To: fmt.Sprintf("%T", dest), @@ -735,29 +1134,60 @@ func (col *Int64) ScanRow(dest interface{}, row int) error { return nil } -func (col *Int64) Row(i int, ptr bool) interface{} { - value := *col +func (col *Int64) Row(i int, ptr bool) any { + value := col.col.Row(i) if ptr { - return &value.data[i] + return &value } - return value.data[i] + return value } -func (col *Int64) Append(v interface{}) (nulls []uint8, err error) { +func (col *Int64) Append(v any) (nulls []uint8, err error) { switch v := v.(type) { case []int64: - col.data, nulls = append(col.data, v...), make([]uint8, len(v)) + nulls = make([]uint8, len(v)) + for i := range v { + col.col.Append(v[i]) + } case []*int64: nulls = make([]uint8, len(v)) - for i, v := range v { + for i := range v { switch { - case v != nil: - col.data = append(col.data, *v) + case v[i] != nil: + col.col.Append(*v[i]) default: - col.data, nulls[i] = append(col.data, 0), 1 + col.col.Append(0) + nulls[i] = 1 + } + } + case []sql.NullInt64: + nulls = make([]uint8, len(v)) + for i := range v { + col.AppendRow(v[i]) + } + case []*sql.NullInt64: + nulls = make([]uint8, len(v)) + for i := range v { + if v[i] == nil { + nulls[i] = 1 } + col.AppendRow(v[i]) } default: + + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return nil, &ColumnConverterError{ + Op: "Append", + To: "Int64", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.Append(val) + } + return nil, &ColumnConverterError{ Op: "Append", To: "Int64", @@ -767,29 +1197,73 @@ func (col *Int64) Append(v interface{}) (nulls []uint8, err error) { return } -func (col *Int64) AppendRow(v interface{}) error { +func (col *Int64) AppendRow(v any) error { switch v := v.(type) { case int64: - col.data = append(col.data, v) + col.col.Append(v) case *int64: switch { case v != nil: - col.data = append(col.data, *v) + col.col.Append(*v) default: - col.data = append(col.data, 0) + col.col.Append(0) } case nil: - col.data = append(col.data, 0) + col.col.Append(0) + case sql.NullInt64: + switch v.Valid { + case true: + col.col.Append(v.Int64) + default: + col.col.Append(0) + } + case *sql.NullInt64: + switch v.Valid { + case true: + col.col.Append(v.Int64) + default: + col.col.Append(0) + } + case time.Duration: + col.col.Append(int64(v)) + case *time.Duration: + col.col.Append(int64(*v)) default: - return &ColumnConverterError{ - Op: "AppendRow", - To: "Int64", - From: fmt.Sprintf("%T", v), + + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return &ColumnConverterError{ + Op: "AppendRow", + To: "Int64", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.AppendRow(val) + } + + if rv := reflect.ValueOf(v); rv.Kind() == col.ScanType().Kind() || rv.CanConvert(col.ScanType()) { + col.col.Append(rv.Convert(col.ScanType()).Interface().(int64)) + } else { + return &ColumnConverterError{ + Op: "AppendRow", + To: "Int64", + From: fmt.Sprintf("%T", v), + } } } return nil } +func (col *Int64) Decode(reader *proto.Reader, rows int) error { + return col.col.DecodeColumn(reader, rows) +} + +func (col *Int64) Encode(buffer *proto.Buffer) { + col.col.EncodeColumn(buffer) +} + func (col *UInt8) Name() string { return col.name } @@ -803,18 +1277,32 @@ func (col *UInt8) ScanType() reflect.Type { } func (col *UInt8) Rows() int { - return len(col.data) + return col.col.Rows() +} + +func (col *UInt8) Reset() { + col.col.Reset() } -func (col *UInt8) ScanRow(dest interface{}, row int) error { - value := *col +func (col *UInt8) ScanRow(dest any, row int) error { + value := col.col.Row(row) switch d := dest.(type) { case *uint8: - *d = value.data[row] + *d = value case **uint8: *d = new(uint8) - **d = value.data[row] + **d = value + case *bool: + switch value { + case 0: + *d = false + default: + *d = true + } default: + if scan, ok := dest.(sql.Scanner); ok { + return scan.Scan(value) + } return &ColumnConverterError{ Op: "ScanRow", To: fmt.Sprintf("%T", dest), @@ -825,29 +1313,47 @@ func (col *UInt8) ScanRow(dest interface{}, row int) error { return nil } -func (col *UInt8) Row(i int, ptr bool) interface{} { - value := *col +func (col *UInt8) Row(i int, ptr bool) any { + value := col.col.Row(i) if ptr { - return &value.data[i] + return &value } - return value.data[i] + return value } -func (col *UInt8) Append(v interface{}) (nulls []uint8, err error) { +func (col *UInt8) Append(v any) (nulls []uint8, err error) { switch v := v.(type) { case []uint8: - col.data, nulls = append(col.data, v...), make([]uint8, len(v)) + nulls = make([]uint8, len(v)) + for i := range v { + col.col.Append(v[i]) + } case []*uint8: nulls = make([]uint8, len(v)) - for i, v := range v { + for i := range v { switch { - case v != nil: - col.data = append(col.data, *v) + case v[i] != nil: + col.col.Append(*v[i]) default: - col.data, nulls[i] = append(col.data, 0), 1 + col.col.Append(0) + nulls[i] = 1 } } default: + + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return nil, &ColumnConverterError{ + Op: "Append", + To: "UInt8", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.Append(val) + } + return nil, &ColumnConverterError{ Op: "Append", To: "UInt8", @@ -857,35 +1363,61 @@ func (col *UInt8) Append(v interface{}) (nulls []uint8, err error) { return } -func (col *UInt8) AppendRow(v interface{}) error { +func (col *UInt8) AppendRow(v any) error { switch v := v.(type) { case uint8: - col.data = append(col.data, v) + col.col.Append(v) case *uint8: switch { case v != nil: - col.data = append(col.data, *v) + col.col.Append(*v) default: - col.data = append(col.data, 0) + col.col.Append(0) } case nil: - col.data = append(col.data, 0) + col.col.Append(0) case bool: var t uint8 if v { t = 1 } - col.data = append(col.data, t) + col.col.Append(t) default: - return &ColumnConverterError{ - Op: "AppendRow", - To: "UInt8", - From: fmt.Sprintf("%T", v), + + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return &ColumnConverterError{ + Op: "AppendRow", + To: "UInt8", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.AppendRow(val) + } + + if rv := reflect.ValueOf(v); rv.Kind() == col.ScanType().Kind() || rv.CanConvert(col.ScanType()) { + col.col.Append(rv.Convert(col.ScanType()).Interface().(uint8)) + } else { + return &ColumnConverterError{ + Op: "AppendRow", + To: "UInt8", + From: fmt.Sprintf("%T", v), + } } } return nil } +func (col *UInt8) Decode(reader *proto.Reader, rows int) error { + return col.col.DecodeColumn(reader, rows) +} + +func (col *UInt8) Encode(buffer *proto.Buffer) { + col.col.EncodeColumn(buffer) +} + func (col *UInt16) Name() string { return col.name } @@ -899,18 +1431,25 @@ func (col *UInt16) ScanType() reflect.Type { } func (col *UInt16) Rows() int { - return len(col.data) + return col.col.Rows() } -func (col *UInt16) ScanRow(dest interface{}, row int) error { - value := *col +func (col *UInt16) Reset() { + col.col.Reset() +} + +func (col *UInt16) ScanRow(dest any, row int) error { + value := col.col.Row(row) switch d := dest.(type) { case *uint16: - *d = value.data[row] + *d = value case **uint16: *d = new(uint16) - **d = value.data[row] + **d = value default: + if scan, ok := dest.(sql.Scanner); ok { + return scan.Scan(value) + } return &ColumnConverterError{ Op: "ScanRow", To: fmt.Sprintf("%T", dest), @@ -921,29 +1460,47 @@ func (col *UInt16) ScanRow(dest interface{}, row int) error { return nil } -func (col *UInt16) Row(i int, ptr bool) interface{} { - value := *col +func (col *UInt16) Row(i int, ptr bool) any { + value := col.col.Row(i) if ptr { - return &value.data[i] + return &value } - return value.data[i] + return value } -func (col *UInt16) Append(v interface{}) (nulls []uint8, err error) { +func (col *UInt16) Append(v any) (nulls []uint8, err error) { switch v := v.(type) { case []uint16: - col.data, nulls = append(col.data, v...), make([]uint8, len(v)) + nulls = make([]uint8, len(v)) + for i := range v { + col.col.Append(v[i]) + } case []*uint16: nulls = make([]uint8, len(v)) - for i, v := range v { + for i := range v { switch { - case v != nil: - col.data = append(col.data, *v) + case v[i] != nil: + col.col.Append(*v[i]) default: - col.data, nulls[i] = append(col.data, 0), 1 + col.col.Append(0) + nulls[i] = 1 } } default: + + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return nil, &ColumnConverterError{ + Op: "Append", + To: "UInt16", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.Append(val) + } + return nil, &ColumnConverterError{ Op: "Append", To: "UInt16", @@ -953,29 +1510,55 @@ func (col *UInt16) Append(v interface{}) (nulls []uint8, err error) { return } -func (col *UInt16) AppendRow(v interface{}) error { +func (col *UInt16) AppendRow(v any) error { switch v := v.(type) { case uint16: - col.data = append(col.data, v) + col.col.Append(v) case *uint16: switch { case v != nil: - col.data = append(col.data, *v) + col.col.Append(*v) default: - col.data = append(col.data, 0) + col.col.Append(0) } case nil: - col.data = append(col.data, 0) + col.col.Append(0) default: - return &ColumnConverterError{ - Op: "AppendRow", - To: "UInt16", - From: fmt.Sprintf("%T", v), + + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return &ColumnConverterError{ + Op: "AppendRow", + To: "UInt16", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.AppendRow(val) + } + + if rv := reflect.ValueOf(v); rv.Kind() == col.ScanType().Kind() || rv.CanConvert(col.ScanType()) { + col.col.Append(rv.Convert(col.ScanType()).Interface().(uint16)) + } else { + return &ColumnConverterError{ + Op: "AppendRow", + To: "UInt16", + From: fmt.Sprintf("%T", v), + } } } return nil } +func (col *UInt16) Decode(reader *proto.Reader, rows int) error { + return col.col.DecodeColumn(reader, rows) +} + +func (col *UInt16) Encode(buffer *proto.Buffer) { + col.col.EncodeColumn(buffer) +} + func (col *UInt32) Name() string { return col.name } @@ -989,18 +1572,25 @@ func (col *UInt32) ScanType() reflect.Type { } func (col *UInt32) Rows() int { - return len(col.data) + return col.col.Rows() +} + +func (col *UInt32) Reset() { + col.col.Reset() } -func (col *UInt32) ScanRow(dest interface{}, row int) error { - value := *col +func (col *UInt32) ScanRow(dest any, row int) error { + value := col.col.Row(row) switch d := dest.(type) { case *uint32: - *d = value.data[row] + *d = value case **uint32: *d = new(uint32) - **d = value.data[row] + **d = value default: + if scan, ok := dest.(sql.Scanner); ok { + return scan.Scan(value) + } return &ColumnConverterError{ Op: "ScanRow", To: fmt.Sprintf("%T", dest), @@ -1011,29 +1601,47 @@ func (col *UInt32) ScanRow(dest interface{}, row int) error { return nil } -func (col *UInt32) Row(i int, ptr bool) interface{} { - value := *col +func (col *UInt32) Row(i int, ptr bool) any { + value := col.col.Row(i) if ptr { - return &value.data[i] + return &value } - return value.data[i] + return value } -func (col *UInt32) Append(v interface{}) (nulls []uint8, err error) { +func (col *UInt32) Append(v any) (nulls []uint8, err error) { switch v := v.(type) { case []uint32: - col.data, nulls = append(col.data, v...), make([]uint8, len(v)) + nulls = make([]uint8, len(v)) + for i := range v { + col.col.Append(v[i]) + } case []*uint32: nulls = make([]uint8, len(v)) - for i, v := range v { + for i := range v { switch { - case v != nil: - col.data = append(col.data, *v) + case v[i] != nil: + col.col.Append(*v[i]) default: - col.data, nulls[i] = append(col.data, 0), 1 + col.col.Append(0) + nulls[i] = 1 } } default: + + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return nil, &ColumnConverterError{ + Op: "Append", + To: "UInt32", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.Append(val) + } + return nil, &ColumnConverterError{ Op: "Append", To: "UInt32", @@ -1043,29 +1651,55 @@ func (col *UInt32) Append(v interface{}) (nulls []uint8, err error) { return } -func (col *UInt32) AppendRow(v interface{}) error { +func (col *UInt32) AppendRow(v any) error { switch v := v.(type) { case uint32: - col.data = append(col.data, v) + col.col.Append(v) case *uint32: switch { case v != nil: - col.data = append(col.data, *v) + col.col.Append(*v) default: - col.data = append(col.data, 0) + col.col.Append(0) } case nil: - col.data = append(col.data, 0) + col.col.Append(0) default: - return &ColumnConverterError{ - Op: "AppendRow", - To: "UInt32", - From: fmt.Sprintf("%T", v), + + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return &ColumnConverterError{ + Op: "AppendRow", + To: "UInt32", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.AppendRow(val) + } + + if rv := reflect.ValueOf(v); rv.Kind() == col.ScanType().Kind() || rv.CanConvert(col.ScanType()) { + col.col.Append(rv.Convert(col.ScanType()).Interface().(uint32)) + } else { + return &ColumnConverterError{ + Op: "AppendRow", + To: "UInt32", + From: fmt.Sprintf("%T", v), + } } } return nil } +func (col *UInt32) Decode(reader *proto.Reader, rows int) error { + return col.col.DecodeColumn(reader, rows) +} + +func (col *UInt32) Encode(buffer *proto.Buffer) { + col.col.EncodeColumn(buffer) +} + func (col *UInt64) Name() string { return col.name } @@ -1079,18 +1713,25 @@ func (col *UInt64) ScanType() reflect.Type { } func (col *UInt64) Rows() int { - return len(col.data) + return col.col.Rows() +} + +func (col *UInt64) Reset() { + col.col.Reset() } -func (col *UInt64) ScanRow(dest interface{}, row int) error { - value := *col +func (col *UInt64) ScanRow(dest any, row int) error { + value := col.col.Row(row) switch d := dest.(type) { case *uint64: - *d = value.data[row] + *d = value case **uint64: *d = new(uint64) - **d = value.data[row] + **d = value default: + if scan, ok := dest.(sql.Scanner); ok { + return scan.Scan(value) + } return &ColumnConverterError{ Op: "ScanRow", To: fmt.Sprintf("%T", dest), @@ -1101,29 +1742,47 @@ func (col *UInt64) ScanRow(dest interface{}, row int) error { return nil } -func (col *UInt64) Row(i int, ptr bool) interface{} { - value := *col +func (col *UInt64) Row(i int, ptr bool) any { + value := col.col.Row(i) if ptr { - return &value.data[i] + return &value } - return value.data[i] + return value } -func (col *UInt64) Append(v interface{}) (nulls []uint8, err error) { +func (col *UInt64) Append(v any) (nulls []uint8, err error) { switch v := v.(type) { case []uint64: - col.data, nulls = append(col.data, v...), make([]uint8, len(v)) + nulls = make([]uint8, len(v)) + for i := range v { + col.col.Append(v[i]) + } case []*uint64: nulls = make([]uint8, len(v)) - for i, v := range v { + for i := range v { switch { - case v != nil: - col.data = append(col.data, *v) + case v[i] != nil: + col.col.Append(*v[i]) default: - col.data, nulls[i] = append(col.data, 0), 1 + col.col.Append(0) + nulls[i] = 1 } } default: + + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return nil, &ColumnConverterError{ + Op: "Append", + To: "UInt64", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.Append(val) + } + return nil, &ColumnConverterError{ Op: "Append", To: "UInt64", @@ -1133,25 +1792,51 @@ func (col *UInt64) Append(v interface{}) (nulls []uint8, err error) { return } -func (col *UInt64) AppendRow(v interface{}) error { +func (col *UInt64) AppendRow(v any) error { switch v := v.(type) { case uint64: - col.data = append(col.data, v) + col.col.Append(v) case *uint64: switch { case v != nil: - col.data = append(col.data, *v) + col.col.Append(*v) default: - col.data = append(col.data, 0) + col.col.Append(0) } case nil: - col.data = append(col.data, 0) + col.col.Append(0) default: - return &ColumnConverterError{ - Op: "AppendRow", - To: "UInt64", - From: fmt.Sprintf("%T", v), + + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return &ColumnConverterError{ + Op: "AppendRow", + To: "UInt64", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.AppendRow(val) + } + + if rv := reflect.ValueOf(v); rv.Kind() == col.ScanType().Kind() || rv.CanConvert(col.ScanType()) { + col.col.Append(rv.Convert(col.ScanType()).Interface().(uint64)) + } else { + return &ColumnConverterError{ + Op: "AppendRow", + To: "UInt64", + From: fmt.Sprintf("%T", v), + } } } return nil } + +func (col *UInt64) Decode(reader *proto.Reader, rows int) error { + return col.col.DecodeColumn(reader, rows) +} + +func (col *UInt64) Encode(buffer *proto.Buffer) { + col.col.EncodeColumn(buffer) +} diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/column_gen_option.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/column_gen_option.go new file mode 100644 index 0000000..03f9369 --- /dev/null +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/column_gen_option.go @@ -0,0 +1,46 @@ +// Licensed to ClickHouse, Inc. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. ClickHouse, Inc. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package column + +import "github.com/ClickHouse/ch-go/proto" + +// ColStrProvider defines provider of proto.ColStr +type ColStrProvider func() proto.ColStr + +// colStrProvider provide proto.ColStr for Column() when type is String +var colStrProvider ColStrProvider = defaultColStrProvider + +// defaultColStrProvider defines sample provider for proto.ColStr +func defaultColStrProvider() proto.ColStr { + return proto.ColStr{} +} + +// issue: https://github.com/ClickHouse/clickhouse-go/issues/1164 +// WithAllocBufferColStrProvider allow pre alloc buffer cap for proto.ColStr +// +// It is more suitable for scenarios where a lot of data is written in batches +func WithAllocBufferColStrProvider(cap int) { + colStrProvider = func() proto.ColStr { + return proto.ColStr{Buf: make([]byte, 0, cap)} + } +} + +// WithColStrProvider more flexible than WithAllocBufferColStrProvider, such as use sync.Pool +func WithColStrProvider(provider ColStrProvider) { + colStrProvider = provider +} diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/column_safe_gen.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/column_safe_gen.go deleted file mode 100644 index f5d9b7b..0000000 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/column_safe_gen.go +++ /dev/null @@ -1,228 +0,0 @@ -// Licensed to ClickHouse, Inc. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. ClickHouse, Inc. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -//go:build !amd64 && !arm64 -// +build !amd64,!arm64 - -// Code generated by make codegen DO NOT EDIT. -// source: lib/column/codegen/column_safe.tpl - -package column - -import ( - "github.com/ClickHouse/clickhouse-go/v2/lib/binary" -) - -func (col *Float32) Decode(decoder *binary.Decoder, rows int) error { - for i := 0; i < rows; i++ { - v, err := decoder.Float32() - if err != nil { - return err - } - col.data = append(col.data, v) - } - return nil -} - -func (col *Float32) Encode(encoder *binary.Encoder) error { - for _, v := range col.data { - if err := encoder.Float32(v); err != nil { - return err - } - } - return nil -} - -func (col *Float64) Decode(decoder *binary.Decoder, rows int) error { - for i := 0; i < rows; i++ { - v, err := decoder.Float64() - if err != nil { - return err - } - col.data = append(col.data, v) - } - return nil -} - -func (col *Float64) Encode(encoder *binary.Encoder) error { - for _, v := range col.data { - if err := encoder.Float64(v); err != nil { - return err - } - } - return nil -} - -func (col *Int8) Decode(decoder *binary.Decoder, rows int) error { - for i := 0; i < rows; i++ { - v, err := decoder.Int8() - if err != nil { - return err - } - col.data = append(col.data, v) - } - return nil -} - -func (col *Int8) Encode(encoder *binary.Encoder) error { - for _, v := range col.data { - if err := encoder.Int8(v); err != nil { - return err - } - } - return nil -} - -func (col *Int16) Decode(decoder *binary.Decoder, rows int) error { - for i := 0; i < rows; i++ { - v, err := decoder.Int16() - if err != nil { - return err - } - col.data = append(col.data, v) - } - return nil -} - -func (col *Int16) Encode(encoder *binary.Encoder) error { - for _, v := range col.data { - if err := encoder.Int16(v); err != nil { - return err - } - } - return nil -} - -func (col *Int32) Decode(decoder *binary.Decoder, rows int) error { - for i := 0; i < rows; i++ { - v, err := decoder.Int32() - if err != nil { - return err - } - col.data = append(col.data, v) - } - return nil -} - -func (col *Int32) Encode(encoder *binary.Encoder) error { - for _, v := range col.data { - if err := encoder.Int32(v); err != nil { - return err - } - } - return nil -} - -func (col *Int64) Decode(decoder *binary.Decoder, rows int) error { - for i := 0; i < rows; i++ { - v, err := decoder.Int64() - if err != nil { - return err - } - col.data = append(col.data, v) - } - return nil -} - -func (col *Int64) Encode(encoder *binary.Encoder) error { - for _, v := range col.data { - if err := encoder.Int64(v); err != nil { - return err - } - } - return nil -} - -func (col *UInt8) Decode(decoder *binary.Decoder, rows int) error { - for i := 0; i < rows; i++ { - v, err := decoder.UInt8() - if err != nil { - return err - } - col.data = append(col.data, v) - } - return nil -} - -func (col *UInt8) Encode(encoder *binary.Encoder) error { - for _, v := range col.data { - if err := encoder.UInt8(v); err != nil { - return err - } - } - return nil -} - -func (col *UInt16) Decode(decoder *binary.Decoder, rows int) error { - for i := 0; i < rows; i++ { - v, err := decoder.UInt16() - if err != nil { - return err - } - col.data = append(col.data, v) - } - return nil -} - -func (col *UInt16) Encode(encoder *binary.Encoder) error { - for _, v := range col.data { - if err := encoder.UInt16(v); err != nil { - return err - } - } - return nil -} - -func (col *UInt32) Decode(decoder *binary.Decoder, rows int) error { - for i := 0; i < rows; i++ { - v, err := decoder.UInt32() - if err != nil { - return err - } - col.data = append(col.data, v) - } - return nil -} - -func (col *UInt32) Encode(encoder *binary.Encoder) error { - for _, v := range col.data { - if err := encoder.UInt32(v); err != nil { - return err - } - } - return nil -} - -func (col *UInt64) Decode(decoder *binary.Decoder, rows int) error { - for i := 0; i < rows; i++ { - v, err := decoder.UInt64() - if err != nil { - return err - } - col.data = append(col.data, v) - } - return nil -} - -func (col *UInt64) Encode(encoder *binary.Encoder) error { - for _, v := range col.data { - if err := encoder.UInt64(v); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/column_unsafe_gen.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/column_unsafe_gen.go deleted file mode 100644 index f77aa53..0000000 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/column_unsafe_gen.go +++ /dev/null @@ -1,410 +0,0 @@ -// Licensed to ClickHouse, Inc. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. ClickHouse, Inc. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -//go:build amd64 || arm64 -// +build amd64 arm64 - -// Code generated by make codegen DO NOT EDIT. -// source: lib/column/codegen/column_safe.tpl - -package column - -import ( - "github.com/ClickHouse/clickhouse-go/v2/lib/binary" - "reflect" - "unsafe" -) - -func (col *Float32) Decode(decoder *binary.Decoder, rows int) error { - if rows == 0 { - return nil - } - const size = 32 / 8 - - col.data = append(col.data, make([]float32, rows)...) - - var dst []byte - slice := (*reflect.SliceHeader)(unsafe.Pointer(&dst)) - slice.Data = (*reflect.SliceHeader)(unsafe.Pointer(col)).Data - slice.Len = len(col.data) * size - slice.Cap = cap(col.data) * size - - if err := decoder.Raw(dst); err != nil { - return err - } - return nil -} - -func (col *Float32) Encode(encoder *binary.Encoder) error { - if len(col.data) == 0 { - return nil - } - const size = 32 / 8 - scratch := make([]byte, size*len(col.data)) - { - var src []byte - slice := (*reflect.SliceHeader)(unsafe.Pointer(&src)) - slice.Data = (*reflect.SliceHeader)(unsafe.Pointer(col)).Data - slice.Len = len(col.data) * size - slice.Cap = cap(col.data) * size - - copy(scratch, src) - } - return encoder.Raw(scratch) -} - -func (col *Float64) Decode(decoder *binary.Decoder, rows int) error { - if rows == 0 { - return nil - } - const size = 64 / 8 - - col.data = append(col.data, make([]float64, rows)...) - - var dst []byte - slice := (*reflect.SliceHeader)(unsafe.Pointer(&dst)) - slice.Data = (*reflect.SliceHeader)(unsafe.Pointer(col)).Data - slice.Len = len(col.data) * size - slice.Cap = cap(col.data) * size - - if err := decoder.Raw(dst); err != nil { - return err - } - return nil -} - -func (col *Float64) Encode(encoder *binary.Encoder) error { - if len(col.data) == 0 { - return nil - } - const size = 64 / 8 - scratch := make([]byte, size*len(col.data)) - { - var src []byte - slice := (*reflect.SliceHeader)(unsafe.Pointer(&src)) - slice.Data = (*reflect.SliceHeader)(unsafe.Pointer(col)).Data - slice.Len = len(col.data) * size - slice.Cap = cap(col.data) * size - - copy(scratch, src) - } - return encoder.Raw(scratch) -} - -func (col *Int8) Decode(decoder *binary.Decoder, rows int) error { - if rows == 0 { - return nil - } - const size = 8 / 8 - - col.data = append(col.data, make([]int8, rows)...) - - var dst []byte - slice := (*reflect.SliceHeader)(unsafe.Pointer(&dst)) - slice.Data = (*reflect.SliceHeader)(unsafe.Pointer(col)).Data - slice.Len = len(col.data) * size - slice.Cap = cap(col.data) * size - - if err := decoder.Raw(dst); err != nil { - return err - } - return nil -} - -func (col *Int8) Encode(encoder *binary.Encoder) error { - if len(col.data) == 0 { - return nil - } - const size = 8 / 8 - scratch := make([]byte, size*len(col.data)) - { - var src []byte - slice := (*reflect.SliceHeader)(unsafe.Pointer(&src)) - slice.Data = (*reflect.SliceHeader)(unsafe.Pointer(col)).Data - slice.Len = len(col.data) * size - slice.Cap = cap(col.data) * size - - copy(scratch, src) - } - return encoder.Raw(scratch) -} - -func (col *Int16) Decode(decoder *binary.Decoder, rows int) error { - if rows == 0 { - return nil - } - const size = 16 / 8 - - col.data = append(col.data, make([]int16, rows)...) - - var dst []byte - slice := (*reflect.SliceHeader)(unsafe.Pointer(&dst)) - slice.Data = (*reflect.SliceHeader)(unsafe.Pointer(col)).Data - slice.Len = len(col.data) * size - slice.Cap = cap(col.data) * size - - if err := decoder.Raw(dst); err != nil { - return err - } - return nil -} - -func (col *Int16) Encode(encoder *binary.Encoder) error { - if len(col.data) == 0 { - return nil - } - const size = 16 / 8 - scratch := make([]byte, size*len(col.data)) - { - var src []byte - slice := (*reflect.SliceHeader)(unsafe.Pointer(&src)) - slice.Data = (*reflect.SliceHeader)(unsafe.Pointer(col)).Data - slice.Len = len(col.data) * size - slice.Cap = cap(col.data) * size - - copy(scratch, src) - } - return encoder.Raw(scratch) -} - -func (col *Int32) Decode(decoder *binary.Decoder, rows int) error { - if rows == 0 { - return nil - } - const size = 32 / 8 - - col.data = append(col.data, make([]int32, rows)...) - - var dst []byte - slice := (*reflect.SliceHeader)(unsafe.Pointer(&dst)) - slice.Data = (*reflect.SliceHeader)(unsafe.Pointer(col)).Data - slice.Len = len(col.data) * size - slice.Cap = cap(col.data) * size - - if err := decoder.Raw(dst); err != nil { - return err - } - return nil -} - -func (col *Int32) Encode(encoder *binary.Encoder) error { - if len(col.data) == 0 { - return nil - } - const size = 32 / 8 - scratch := make([]byte, size*len(col.data)) - { - var src []byte - slice := (*reflect.SliceHeader)(unsafe.Pointer(&src)) - slice.Data = (*reflect.SliceHeader)(unsafe.Pointer(col)).Data - slice.Len = len(col.data) * size - slice.Cap = cap(col.data) * size - - copy(scratch, src) - } - return encoder.Raw(scratch) -} - -func (col *Int64) Decode(decoder *binary.Decoder, rows int) error { - if rows == 0 { - return nil - } - const size = 64 / 8 - - col.data = append(col.data, make([]int64, rows)...) - - var dst []byte - slice := (*reflect.SliceHeader)(unsafe.Pointer(&dst)) - slice.Data = (*reflect.SliceHeader)(unsafe.Pointer(col)).Data - slice.Len = len(col.data) * size - slice.Cap = cap(col.data) * size - - if err := decoder.Raw(dst); err != nil { - return err - } - return nil -} - -func (col *Int64) Encode(encoder *binary.Encoder) error { - if len(col.data) == 0 { - return nil - } - const size = 64 / 8 - scratch := make([]byte, size*len(col.data)) - { - var src []byte - slice := (*reflect.SliceHeader)(unsafe.Pointer(&src)) - slice.Data = (*reflect.SliceHeader)(unsafe.Pointer(col)).Data - slice.Len = len(col.data) * size - slice.Cap = cap(col.data) * size - - copy(scratch, src) - } - return encoder.Raw(scratch) -} - -func (col *UInt8) Decode(decoder *binary.Decoder, rows int) error { - if rows == 0 { - return nil - } - const size = 8 / 8 - - col.data = append(col.data, make([]uint8, rows)...) - - var dst []byte - slice := (*reflect.SliceHeader)(unsafe.Pointer(&dst)) - slice.Data = (*reflect.SliceHeader)(unsafe.Pointer(col)).Data - slice.Len = len(col.data) * size - slice.Cap = cap(col.data) * size - - if err := decoder.Raw(dst); err != nil { - return err - } - return nil -} - -func (col *UInt8) Encode(encoder *binary.Encoder) error { - if len(col.data) == 0 { - return nil - } - const size = 8 / 8 - scratch := make([]byte, size*len(col.data)) - { - var src []byte - slice := (*reflect.SliceHeader)(unsafe.Pointer(&src)) - slice.Data = (*reflect.SliceHeader)(unsafe.Pointer(col)).Data - slice.Len = len(col.data) * size - slice.Cap = cap(col.data) * size - - copy(scratch, src) - } - return encoder.Raw(scratch) -} - -func (col *UInt16) Decode(decoder *binary.Decoder, rows int) error { - if rows == 0 { - return nil - } - const size = 16 / 8 - - col.data = append(col.data, make([]uint16, rows)...) - - var dst []byte - slice := (*reflect.SliceHeader)(unsafe.Pointer(&dst)) - slice.Data = (*reflect.SliceHeader)(unsafe.Pointer(col)).Data - slice.Len = len(col.data) * size - slice.Cap = cap(col.data) * size - - if err := decoder.Raw(dst); err != nil { - return err - } - return nil -} - -func (col *UInt16) Encode(encoder *binary.Encoder) error { - if len(col.data) == 0 { - return nil - } - const size = 16 / 8 - scratch := make([]byte, size*len(col.data)) - { - var src []byte - slice := (*reflect.SliceHeader)(unsafe.Pointer(&src)) - slice.Data = (*reflect.SliceHeader)(unsafe.Pointer(col)).Data - slice.Len = len(col.data) * size - slice.Cap = cap(col.data) * size - - copy(scratch, src) - } - return encoder.Raw(scratch) -} - -func (col *UInt32) Decode(decoder *binary.Decoder, rows int) error { - if rows == 0 { - return nil - } - const size = 32 / 8 - - col.data = append(col.data, make([]uint32, rows)...) - - var dst []byte - slice := (*reflect.SliceHeader)(unsafe.Pointer(&dst)) - slice.Data = (*reflect.SliceHeader)(unsafe.Pointer(col)).Data - slice.Len = len(col.data) * size - slice.Cap = cap(col.data) * size - - if err := decoder.Raw(dst); err != nil { - return err - } - return nil -} - -func (col *UInt32) Encode(encoder *binary.Encoder) error { - if len(col.data) == 0 { - return nil - } - const size = 32 / 8 - scratch := make([]byte, size*len(col.data)) - { - var src []byte - slice := (*reflect.SliceHeader)(unsafe.Pointer(&src)) - slice.Data = (*reflect.SliceHeader)(unsafe.Pointer(col)).Data - slice.Len = len(col.data) * size - slice.Cap = cap(col.data) * size - - copy(scratch, src) - } - return encoder.Raw(scratch) -} - -func (col *UInt64) Decode(decoder *binary.Decoder, rows int) error { - if rows == 0 { - return nil - } - const size = 64 / 8 - - col.data = append(col.data, make([]uint64, rows)...) - - var dst []byte - slice := (*reflect.SliceHeader)(unsafe.Pointer(&dst)) - slice.Data = (*reflect.SliceHeader)(unsafe.Pointer(col)).Data - slice.Len = len(col.data) * size - slice.Cap = cap(col.data) * size - - if err := decoder.Raw(dst); err != nil { - return err - } - return nil -} - -func (col *UInt64) Encode(encoder *binary.Encoder) error { - if len(col.data) == 0 { - return nil - } - const size = 64 / 8 - scratch := make([]byte, size*len(col.data)) - { - var src []byte - slice := (*reflect.SliceHeader)(unsafe.Pointer(&src)) - slice.Data = (*reflect.SliceHeader)(unsafe.Pointer(col)).Data - slice.Len = len(col.data) * size - slice.Cap = cap(col.data) * size - - copy(scratch, src) - } - return encoder.Raw(scratch) -} diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/date.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/date.go index 0232548..16d46e1 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/date.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/date.go @@ -18,11 +18,13 @@ package column import ( + "database/sql" + "database/sql/driver" "fmt" "reflect" "time" - "github.com/ClickHouse/clickhouse-go/v2/lib/binary" + "github.com/ClickHouse/ch-go/proto" ) var ( @@ -30,9 +32,24 @@ var ( maxDate, _ = time.Parse("2006-01-02 15:04:05", "2106-01-01 00:00:00") ) +const ( + defaultDateFormatNoZone = "2006-01-02" + defaultDateFormatWithZone = "2006-01-02 -07:00" +) + type Date struct { - values Int16 - name string + col proto.ColDate + name string + location *time.Location +} + +func (col *Date) parse(t Type, tz *time.Location) (_ *Date, err error) { + col.location = tz + return col, nil +} + +func (col *Date) Reset() { + col.col.Reset() } func (col *Date) Name() string { @@ -48,10 +65,10 @@ func (col *Date) ScanType() reflect.Type { } func (col *Date) Rows() int { - return len(col.values.data) + return col.col.Rows() } -func (col *Date) Row(i int, ptr bool) interface{} { +func (col *Date) Row(i int, ptr bool) any { value := col.row(i) if ptr { return &value @@ -59,14 +76,19 @@ func (col *Date) Row(i int, ptr bool) interface{} { return value } -func (col *Date) ScanRow(dest interface{}, row int) error { +func (col *Date) ScanRow(dest any, row int) error { switch d := dest.(type) { case *time.Time: *d = col.row(row) case **time.Time: *d = new(time.Time) **d = col.row(row) + case *sql.NullTime: + return d.Scan(col.row(row)) default: + if scan, ok := dest.(sql.Scanner); ok { + return scan.Scan(col.row(row)) + } return &ColumnConverterError{ Op: "ScanRow", To: fmt.Sprintf("%T", dest), @@ -76,31 +98,72 @@ func (col *Date) ScanRow(dest interface{}, row int) error { return nil } -func (col *Date) Append(v interface{}) (nulls []uint8, err error) { +func (col *Date) Append(v any) (nulls []uint8, err error) { switch v := v.(type) { case []time.Time: - in := make([]int16, 0, len(v)) for _, t := range v { - if err := dateOverflow(minDate, maxDate, t, "2006-01-02"); err != nil { - return nil, err - } - in = append(in, int16(t.Unix()/secInDay)) + col.col.Append(t) } - col.values.data, nulls = append(col.values.data, in...), make([]uint8, len(v)) case []*time.Time: nulls = make([]uint8, len(v)) for i, v := range v { switch { case v != nil: - if err := dateOverflow(minDate, maxDate, *v, "2006-01-02"); err != nil { + col.col.Append(*v) + default: + nulls[i] = 1 + col.col.Append(time.Time{}) + } + } + case []sql.NullTime: + nulls = make([]uint8, len(v)) + for i := range v { + col.AppendRow(v[i]) + } + case []*sql.NullTime: + nulls = make([]uint8, len(v)) + for i := range v { + if v[i] == nil { + nulls[i] = 1 + } + col.AppendRow(v[i]) + } + case []string: + nulls = make([]uint8, len(v)) + for i := range v { + value, err := col.parseDate(v[i]) + if err != nil { + return nil, err + } + col.col.Append(value) + } + case []*string: + nulls = make([]uint8, len(v)) + for i := range v { + if v[i] == nil || *v[i] == "" { + nulls[i] = 1 + col.col.Append(time.Time{}) + } else { + value, err := col.parseDate(*v[i]) + if err != nil { return nil, err } - col.values.data = append(col.values.data, int16(v.Unix()/secInDay)) - default: - col.values.data, nulls[i] = append(col.values.data, 0), 1 + col.col.Append(value) } } default: + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return nil, &ColumnConverterError{ + Op: "Append", + To: "Date", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.Append(val) + } return nil, &ColumnConverterError{ Op: "Append", To: "Date", @@ -110,43 +173,111 @@ func (col *Date) Append(v interface{}) (nulls []uint8, err error) { return } -func (col *Date) AppendRow(v interface{}) error { - var date int16 +func (col *Date) AppendRow(v any) error { switch v := v.(type) { case time.Time: - if err := dateOverflow(minDate, maxDate, v, "2006-01-02"); err != nil { + col.col.Append(v) + case *time.Time: + switch { + case v != nil: + col.col.Append(*v) + default: + col.col.Append(time.Time{}) + } + case sql.NullTime: + switch v.Valid { + case true: + col.col.Append(v.Time) + default: + col.col.Append(time.Time{}) + } + case *sql.NullTime: + switch v.Valid { + case true: + col.col.Append(v.Time) + default: + col.col.Append(time.Time{}) + } + case nil: + col.col.Append(time.Time{}) + case string: + datetime, err := col.parseDate(v) + if err != nil { return err } - date = int16(v.Unix() / secInDay) - case *time.Time: - if v != nil { - if err := dateOverflow(minDate, maxDate, *v, "2006-01-02"); err != nil { + col.col.Append(datetime) + case *string: + if v == nil || *v == "" { + col.col.Append(time.Time{}) + } else { + datetime, err := col.parseDate(*v) + if err != nil { return err } - date = int16(v.Unix() / secInDay) + col.col.Append(datetime) } - case nil: default: + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return &ColumnConverterError{ + Op: "AppendRow", + To: "Date", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.AppendRow(val) + } + s, ok := v.(fmt.Stringer) + if ok { + return col.AppendRow(s.String()) + } return &ColumnConverterError{ Op: "AppendRow", To: "Date", From: fmt.Sprintf("%T", v), } } - col.values.data = append(col.values.data, date) return nil } -func (col *Date) Decode(decoder *binary.Decoder, rows int) error { - return col.values.Decode(decoder, rows) +func parseDate(value string, minDate time.Time, maxDate time.Time, location *time.Location) (tv time.Time, err error) { + if location == nil { + location = time.Local + } + if tv, err = time.Parse(defaultDateFormatWithZone, value); err == nil { + return tv, nil + } + if tv, err = time.Parse(defaultDateFormatNoZone, value); err == nil { + return time.Date( + tv.Year(), tv.Month(), tv.Day(), tv.Hour(), tv.Minute(), tv.Second(), tv.Nanosecond(), location, + ), nil + } + return time.Time{}, err } -func (col *Date) Encode(encoder *binary.Encoder) error { - return col.values.Encode(encoder) +func (col *Date) parseDate(value string) (tv time.Time, err error) { + return parseDate(value, minDate, maxDate, col.location) +} + +func (col *Date) Decode(reader *proto.Reader, rows int) error { + return col.col.DecodeColumn(reader, rows) +} + +func (col *Date) Encode(buffer *proto.Buffer) { + col.col.EncodeColumn(buffer) } func (col *Date) row(i int) time.Time { - return time.Unix(int64(col.values.data[i])*secInDay, 0).UTC() + t := col.col.Row(i) + + if col.location != nil { + // proto.Date is normalized as time.Time with UTC timezone. + // We make sure Date return from ClickHouse matches server timezone or user defined location. + t = time.Date(t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second(), t.Nanosecond(), col.location) + } + return t } var _ Interface = (*Date)(nil) diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/date32.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/date32.go index e5b6d7b..174b376 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/date32.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/date32.go @@ -18,21 +18,28 @@ package column import ( + "database/sql" + "database/sql/driver" "fmt" "reflect" "time" - "github.com/ClickHouse/clickhouse-go/v2/lib/binary" + "github.com/ClickHouse/ch-go/proto" ) var ( - minDate32, _ = time.Parse("2006-01-02 15:04:05", "1925-01-01 00:00:00") - maxDate32, _ = time.Parse("2006-01-02 15:04:05", "2283-11-11 00:00:00") + minDate32, _ = time.Parse("2006-01-02 15:04:05", "1900-01-01 00:00:00") + maxDate32, _ = time.Parse("2006-01-02 15:04:05", "2299-12-31 00:00:00") ) type Date32 struct { - values Int32 - name string + col proto.ColDate32 + name string + location *time.Location +} + +func (col *Date32) Reset() { + col.col.Reset() } func (col *Date32) Name() string { @@ -48,10 +55,10 @@ func (col *Date32) ScanType() reflect.Type { } func (col *Date32) Rows() int { - return len(col.values.data) + return col.col.Rows() } -func (col *Date32) Row(i int, ptr bool) interface{} { +func (col *Date32) Row(i int, ptr bool) any { value := col.row(i) if ptr { return &value @@ -59,14 +66,19 @@ func (col *Date32) Row(i int, ptr bool) interface{} { return value } -func (col *Date32) ScanRow(dest interface{}, row int) error { +func (col *Date32) ScanRow(dest any, row int) error { switch d := dest.(type) { case *time.Time: *d = col.row(row) case **time.Time: *d = new(time.Time) **d = col.row(row) + case *sql.NullTime: + return d.Scan(col.row(row)) default: + if scan, ok := dest.(sql.Scanner); ok { + return scan.Scan(col.row(row)) + } return &ColumnConverterError{ Op: "ScanRow", To: fmt.Sprintf("%T", dest), @@ -76,31 +88,72 @@ func (col *Date32) ScanRow(dest interface{}, row int) error { return nil } -func (col *Date32) Append(v interface{}) (nulls []uint8, err error) { +func (col *Date32) Append(v any) (nulls []uint8, err error) { switch v := v.(type) { case []time.Time: - in := make([]int32, 0, len(v)) for _, t := range v { - if err := dateOverflow(minDate32, maxDate32, t, "2006-01-02"); err != nil { - return nil, err - } - in = append(in, timeToInt32(t)) + col.col.Append(t) } - col.values.data, nulls = append(col.values.data, in...), make([]uint8, len(v)) case []*time.Time: nulls = make([]uint8, len(v)) for i, v := range v { switch { case v != nil: - if err := dateOverflow(minDate32, maxDate32, *v, "2006-01-02"); err != nil { + col.col.Append(*v) + default: + nulls[i] = 1 + col.col.Append(time.Time{}) + } + } + case []sql.NullTime: + nulls = make([]uint8, len(v)) + for i := range v { + col.AppendRow(v[i]) + } + case []*sql.NullTime: + nulls = make([]uint8, len(v)) + for i := range v { + if v[i] == nil { + nulls[i] = 1 + } + col.AppendRow(v[i]) + } + case []string: + nulls = make([]uint8, len(v)) + for i := range v { + value, err := col.parseDate(v[i]) + if err != nil { + return nil, err + } + col.col.Append(value) + } + case []*string: + nulls = make([]uint8, len(v)) + for i := range v { + if v[i] == nil || *v[i] == "" { + nulls[i] = 1 + col.col.Append(time.Time{}) + } else { + value, err := col.parseDate(*v[i]) + if err != nil { return nil, err } - col.values.data = append(col.values.data, timeToInt32(*v)) - default: - col.values.data, nulls[i] = append(col.values.data, 0), 1 + col.col.Append(value) } } default: + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return nil, &ColumnConverterError{ + Op: "Append", + To: "Date32", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.Append(val) + } return nil, &ColumnConverterError{ Op: "Append", To: "Date32", @@ -110,47 +163,96 @@ func (col *Date32) Append(v interface{}) (nulls []uint8, err error) { return } -func (col *Date32) AppendRow(v interface{}) error { - var date int32 +func (col *Date32) AppendRow(v any) error { switch v := v.(type) { case time.Time: - if err := dateOverflow(minDate32, maxDate32, v, "2006-01-02"); err != nil { + col.col.Append(v) + case *time.Time: + switch { + case v != nil: + col.col.Append(*v) + default: + col.col.Append(time.Time{}) + } + case sql.NullTime: + switch v.Valid { + case true: + col.col.Append(v.Time) + default: + col.col.Append(time.Time{}) + } + case *sql.NullTime: + switch v.Valid { + case true: + col.col.Append(v.Time) + default: + col.col.Append(time.Time{}) + } + case nil: + col.col.Append(time.Time{}) + case string: + value, err := col.parseDate(v) + if err != nil { return err } - date = timeToInt32(v) - case *time.Time: - if v != nil { - if err := dateOverflow(minDate32, maxDate32, *v, "2006-01-02"); err != nil { + col.col.Append(value) + case *string: + if v == nil || *v == "" { + col.col.Append(time.Time{}) + } else { + value, err := col.parseDate(*v) + if err != nil { return err } - date = timeToInt32(*v) + col.col.Append(value) } - case nil: default: + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return &ColumnConverterError{ + Op: "AppendRow", + To: "Date32", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.AppendRow(val) + } + s, ok := v.(fmt.Stringer) + if ok { + return col.AppendRow(s.String()) + } return &ColumnConverterError{ Op: "AppendRow", To: "Date32", From: fmt.Sprintf("%T", v), } } - col.values.data = append(col.values.data, date) return nil } -func (col *Date32) Decode(decoder *binary.Decoder, rows int) error { - return col.values.Decode(decoder, rows) +func (col *Date32) parseDate(value string) (datetime time.Time, err error) { + return parseDate(value, minDate32, maxDate32, col.location) } -func (col *Date32) Encode(encoder *binary.Encoder) error { - return col.values.Encode(encoder) +func (col *Date32) Decode(reader *proto.Reader, rows int) error { + return col.col.DecodeColumn(reader, rows) } -func (col *Date32) row(i int) time.Time { - return time.Unix((int64(col.values.data[i]) * secInDay), 0).UTC() +func (col *Date32) Encode(buffer *proto.Buffer) { + col.col.EncodeColumn(buffer) } -func timeToInt32(t time.Time) int32 { - return int32(t.Unix() / secInDay) +func (col *Date32) row(i int) time.Time { + t := col.col.Row(i) + + if col.location != nil { + // proto.Date is normalized as time.Time with UTC timezone. + // We make sure Date return from ClickHouse matches server timezone or user defined location. + t = time.Date(t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second(), t.Nanosecond(), col.location) + } + return t } var _ Interface = (*Date32)(nil) diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/datetime.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/datetime.go index 2f9f685..027931a 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/datetime.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/datetime.go @@ -18,12 +18,15 @@ package column import ( + "database/sql" + "database/sql/driver" "fmt" "reflect" "strings" "time" - "github.com/ClickHouse/clickhouse-go/v2/lib/binary" + "github.com/ClickHouse/ch-go/proto" + "github.com/ClickHouse/clickhouse-go/v2/lib/timezone" ) @@ -32,25 +35,36 @@ var ( maxDateTime, _ = time.Parse("2006-01-02 15:04:05", "2105-12-31 23:59:59") ) +const ( + defaultDateTimeFormatNoZone = "2006-01-02 15:04:05" + defaultDateTimeFormatWithZone = "2006-01-02 15:04:05 -07:00" +) + type DateTime struct { - chType Type - values UInt32 - timezone *time.Location - name string + chType Type + name string + col proto.ColDateTime +} + +func (col *DateTime) Reset() { + col.col.Reset() } func (col *DateTime) Name() string { return col.name } -func (col *DateTime) parse(t Type) (_ *DateTime, err error) { +func (col *DateTime) parse(t Type, tz *time.Location) (_ *DateTime, err error) { if col.chType = t; col.chType == "DateTime" { + col.col.Location = tz return col, nil } var name = strings.TrimSuffix(strings.TrimPrefix(string(t), "DateTime('"), "')") - if col.timezone, err = timezone.Load(name); err != nil { + timezone, err := timezone.Load(name) + if err != nil { return nil, err } + col.col.Location = timezone return col, nil } @@ -63,10 +77,10 @@ func (col *DateTime) ScanType() reflect.Type { } func (col *DateTime) Rows() int { - return len(col.values.data) + return col.col.Rows() } -func (col *DateTime) Row(i int, ptr bool) interface{} { +func (col *DateTime) Row(i int, ptr bool) any { value := col.row(i) if ptr { return &value @@ -74,14 +88,19 @@ func (col *DateTime) Row(i int, ptr bool) interface{} { return value } -func (col *DateTime) ScanRow(dest interface{}, row int) error { +func (col *DateTime) ScanRow(dest any, row int) error { switch d := dest.(type) { case *time.Time: *d = col.row(row) case **time.Time: *d = new(time.Time) **d = col.row(row) + case *sql.NullTime: + return d.Scan(col.row(row)) default: + if scan, ok := dest.(sql.Scanner); ok { + return scan.Scan(col.row(row)) + } return &ColumnConverterError{ Op: "ScanRow", To: fmt.Sprintf("%T", dest), @@ -91,31 +110,92 @@ func (col *DateTime) ScanRow(dest interface{}, row int) error { return nil } -func (col *DateTime) Append(v interface{}) (nulls []uint8, err error) { +func (col *DateTime) Append(v any) (nulls []uint8, err error) { switch v := v.(type) { + // we assume int64 is in seconds and don't currently scale to the precision + case []int64: + nulls = make([]uint8, len(v)) + for i := range v { + col.col.Append(time.Unix(v[i], 0)) + } + case []*int64: + nulls = make([]uint8, len(v)) + for i := range v { + switch { + case v[i] != nil: + col.col.Append(time.Unix(*v[i], 0)) + default: + col.col.Append(time.Time{}) + nulls[i] = 1 + } + } case []time.Time: - in := make([]uint32, 0, len(v)) - for _, t := range v { - if err := dateOverflow(minDateTime, maxDateTime, t, "2006-01-02 15:04:05"); err != nil { + nulls = make([]uint8, len(v)) + for i := range v { + col.col.Append(v[i]) + } + + case []*time.Time: + nulls = make([]uint8, len(v)) + for i := range v { + switch { + case v[i] != nil: + col.col.Append(*v[i]) + default: + nulls[i] = 1 + col.col.Append(time.Time{}) + } + } + case []sql.NullTime: + nulls = make([]uint8, len(v)) + for i := range v { + col.AppendRow(v[i]) + } + case []*sql.NullTime: + nulls = make([]uint8, len(v)) + for i := range v { + if v[i] == nil { + nulls[i] = 1 + } + col.AppendRow(v[i]) + } + case []string: + nulls = make([]uint8, len(v)) + for i := range v { + value, err := col.parseDateTime(v[i]) + if err != nil { return nil, err } - in = append(in, uint32(t.Unix())) + col.col.Append(value) } - col.values.data, nulls = append(col.values.data, in...), make([]uint8, len(v)) - case []*time.Time: + case []*string: nulls = make([]uint8, len(v)) - for i, v := range v { + for i := range v { switch { - case v != nil: - if err := dateOverflow(minDateTime, maxDateTime, *v, "2006-01-02 15:04:05"); err != nil { + case v[i] == nil || *v[i] == "": + nulls[i] = 1 + col.col.Append(time.Time{}) + default: + value, err := col.parseDateTime(*v[i]) + if err != nil { return nil, err } - col.values.data = append(col.values.data, uint32(v.Unix())) - default: - col.values.data, nulls[i] = append(col.values.data, 0), 1 + col.col.Append(value) } } default: + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return nil, &ColumnConverterError{ + Op: "Append", + To: "DateTime", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.Append(val) + } return nil, &ColumnConverterError{ Op: "Append", To: "DateTime", @@ -125,47 +205,108 @@ func (col *DateTime) Append(v interface{}) (nulls []uint8, err error) { return } -func (col *DateTime) AppendRow(v interface{}) error { - var datetime uint32 +func (col *DateTime) AppendRow(v any) error { switch v := v.(type) { + // we assume int64 is in seconds and don't currently scale to the precision + case int64: + col.col.Append(time.Unix(v, 0)) + case *int64: + switch { + case v != nil: + col.col.Append(time.Unix(*v, 0)) + default: + col.col.Append(time.Time{}) + } case time.Time: - if err := dateOverflow(minDateTime, maxDateTime, v, "2006-01-02 15:04:05"); err != nil { + col.col.Append(v) + case *time.Time: + switch { + case v != nil: + col.col.Append(*v) + default: + col.col.Append(time.Time{}) + } + case sql.NullTime: + switch v.Valid { + case true: + col.col.Append(v.Time) + default: + col.col.Append(time.Time{}) + } + case *sql.NullTime: + switch v.Valid { + case true: + col.col.Append(v.Time) + default: + col.col.Append(time.Time{}) + } + case nil: + col.col.Append(time.Time{}) + case string: + dateTime, err := col.parseDateTime(v) + if err != nil { return err } - datetime = uint32(v.Unix()) - case *time.Time: - if v != nil { - if err := dateOverflow(minDateTime, maxDateTime, *v, "2006-01-02 15:04:05"); err != nil { + col.col.Append(dateTime) + case *string: + if v == nil || *v == "" { + col.col.Append(time.Time{}) + } else { + dateTime, err := col.parseDateTime(*v) + if err != nil { return err } - datetime = uint32(v.Unix()) + col.col.Append(dateTime) } - case nil: default: + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return &ColumnConverterError{ + Op: "AppendRow", + To: "DateTime", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.AppendRow(val) + } + s, ok := v.(fmt.Stringer) + if ok { + return col.AppendRow(s.String()) + } return &ColumnConverterError{ Op: "AppendRow", To: "DateTime", From: fmt.Sprintf("%T", v), } } - col.values.data = append(col.values.data, datetime) return nil } -func (col *DateTime) Decode(decoder *binary.Decoder, rows int) error { - return col.values.Decode(decoder, rows) +func (col *DateTime) Decode(reader *proto.Reader, rows int) error { + return col.col.DecodeColumn(reader, rows) } -func (col *DateTime) Encode(encoder *binary.Encoder) error { - return col.values.Encode(encoder) +func (col *DateTime) Encode(buffer *proto.Buffer) { + col.col.EncodeColumn(buffer) } func (col *DateTime) row(i int) time.Time { - v := time.Unix(int64(col.values.data[i]), 0) - if col.timezone != nil { - v = v.In(col.timezone) - } + v := col.col.Row(i) return v } +func (col *DateTime) parseDateTime(value string) (tv time.Time, err error) { + if tv, err = time.Parse(defaultDateTimeFormatWithZone, value); err == nil { + return tv, nil + } + if tv, err = time.Parse(defaultDateTimeFormatNoZone, value); err == nil { + return time.Date( + tv.Year(), tv.Month(), tv.Day(), tv.Hour(), tv.Minute(), tv.Second(), tv.Nanosecond(), time.Local, + ), nil + } + return time.Time{}, err +} + var _ Interface = (*DateTime)(nil) diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/datetime64.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/datetime64.go index 519713e..518176e 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/datetime64.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/datetime64.go @@ -18,6 +18,8 @@ package column import ( + "database/sql" + "database/sql/driver" "fmt" "math" "reflect" @@ -25,41 +27,59 @@ import ( "strings" "time" - "github.com/ClickHouse/clickhouse-go/v2/lib/binary" + "github.com/ClickHouse/ch-go/proto" + "github.com/ClickHouse/clickhouse-go/v2/lib/timezone" ) var ( - minDateTime64, _ = time.Parse("2006-01-02 15:04:05", "1925-01-01 00:00:00") - maxDateTime64, _ = time.Parse("2006-01-02 15:04:05", "2283-11-11 00:00:00") + minDateTime64, _ = time.Parse("2006-01-02 15:04:05", "1900-01-01 00:00:00") + maxDateTime64, _ = time.Parse("2006-01-02 15:04:05", "2262-04-11 23:47:16") +) + +const ( + defaultDateTime64FormatNoZone = "2006-01-02 15:04:05.999999999" + defaultDateTime64FormatWithZone = "2006-01-02 15:04:05.999999999 -07:00" ) type DateTime64 struct { - chType Type - values Int64 - timezone *time.Location - precision int - name string + chType Type + timezone *time.Location + name string + col proto.ColDateTime64 +} + +func (col *DateTime64) Reset() { + col.col.Reset() } func (col *DateTime64) Name() string { return col.name } -func (col *DateTime64) parse(t Type) (_ Interface, err error) { +func (col *DateTime64) parse(t Type, tz *time.Location) (_ Interface, err error) { col.chType = t switch params := strings.Split(t.params(), ","); len(params) { case 2: - if col.precision, err = strconv.Atoi(params[0]); err != nil { + precision, err := strconv.ParseInt(params[0], 10, 8) + if err != nil { return nil, err } - if col.timezone, err = timezone.Load(params[1][2 : len(params[1])-1]); err != nil { + p := byte(precision) + col.col.WithPrecision(proto.Precision(p)) + timezone, err := timezone.Load(params[1][2 : len(params[1])-1]) + if err != nil { return nil, err } + col.col.WithLocation(timezone) case 1: - if col.precision, err = strconv.Atoi(params[0]); err != nil { + precision, err := strconv.ParseInt(params[0], 10, 8) + if err != nil { return nil, err } + p := byte(precision) + col.col.WithPrecision(proto.Precision(p)) + col.col.WithLocation(tz) default: return nil, &UnsupportedColumnTypeError{ t: t, @@ -77,10 +97,10 @@ func (col *DateTime64) ScanType() reflect.Type { } func (col *DateTime64) Rows() int { - return len(col.values.data) + return col.col.Rows() } -func (col *DateTime64) Row(i int, ptr bool) interface{} { +func (col *DateTime64) Row(i int, ptr bool) any { value := col.row(i) if ptr { return &value @@ -88,14 +108,19 @@ func (col *DateTime64) Row(i int, ptr bool) interface{} { return value } -func (col *DateTime64) ScanRow(dest interface{}, row int) error { +func (col *DateTime64) ScanRow(dest any, row int) error { switch d := dest.(type) { case *time.Time: *d = col.row(row) case **time.Time: *d = new(time.Time) **d = col.row(row) + case *sql.NullTime: + return d.Scan(col.row(row)) default: + if scan, ok := dest.(sql.Scanner); ok { + return scan.Scan(col.row(row)) + } return &ColumnConverterError{ Op: "ScanRow", To: fmt.Sprintf("%T", dest), @@ -105,53 +130,77 @@ func (col *DateTime64) ScanRow(dest interface{}, row int) error { return nil } -func (col *DateTime64) Append(v interface{}) (nulls []uint8, err error) { +func (col *DateTime64) Append(v any) (nulls []uint8, err error) { switch v := v.(type) { + // we assume int64 is in milliseconds and don't currently scale to the precision - no tests to indicate intended + // historical behaviour case []int64: - col.values.data, nulls = append(col.values.data, v...), make([]uint8, len(v)) + nulls = make([]uint8, len(v)) + for i := range v { + col.col.Append(time.UnixMilli(v[i])) + } case []*int64: nulls = make([]uint8, len(v)) - for i, v := range v { + for i := range v { switch { - case v != nil: - col.values.data = append(col.values.data, *v) + case v[i] != nil: + col.col.Append(time.UnixMilli(*v[i])) default: - col.values.data, nulls[i] = append(col.values.data, 0), 1 + col.col.Append(time.UnixMilli(0)) + nulls[i] = 1 } } case []time.Time: - in := make([]int64, 0, len(v)) - for _, t := range v { - if err := dateOverflow(minDateTime64, maxDateTime64, t, "2006-01-02 15:04:05"); err != nil { - return nil, err - } - in = append(in, col.timeToInt64(t)) + nulls = make([]uint8, len(v)) + for i := range v { + col.col.Append(v[i]) } - col.values.data, nulls = append(col.values.data, in...), make([]uint8, len(v)) case []*time.Time: nulls = make([]uint8, len(v)) - for i, v := range v { + for i := range v { switch { - case v != nil: - if err := dateOverflow(minDateTime64, maxDateTime64, *v, "2006-01-02 15:04:05"); err != nil { - return nil, err - } - col.values.data = append(col.values.data, col.timeToInt64(*v)) + case v[i] != nil: + col.col.Append(*v[i]) default: - col.values.data, nulls[i] = append(col.values.data, 0), 1 + col.col.Append(time.Time{}) + nulls[i] = 1 } } case []string: - in := make([]int64, 0, len(v)) - for _, t := range v { - value, err := col.parseString(t) + nulls = make([]uint8, len(v)) + for i := range v { + value, err := col.parseDateTime(v[i]) if err != nil { return nil, err } - in = append(in, value) + col.col.Append(value) + } + case []sql.NullTime: + nulls = make([]uint8, len(v)) + for i := range v { + col.AppendRow(v[i]) + } + case []*sql.NullTime: + nulls = make([]uint8, len(v)) + for i := range v { + if v[i] == nil { + nulls[i] = 1 + } + col.AppendRow(v[i]) } - col.values.data, nulls = append(col.values.data, in...), make([]uint8, len(v)) default: + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return nil, &ColumnConverterError{ + Op: "Append", + To: "Datetime64", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.Append(val) + } return nil, &ColumnConverterError{ Op: "Append", To: "Datetime64", @@ -161,63 +210,84 @@ func (col *DateTime64) Append(v interface{}) (nulls []uint8, err error) { return } -func (col *DateTime64) AppendRow(v interface{}) error { - var datetime int64 +func (col *DateTime64) AppendRow(v any) error { switch v := v.(type) { case int64: - datetime = v + col.col.Append(time.UnixMilli(v)) case *int64: - if v != nil { - datetime = *v + switch { + case v != nil: + col.col.Append(time.UnixMilli(*v)) + default: + col.col.Append(time.Time{}) } case time.Time: - if err := dateOverflow(minDateTime64, maxDateTime64, v, "2006-01-02 15:04:05"); err != nil { - return err - } - datetime = col.timeToInt64(v) + col.col.Append(v) case *time.Time: - if v != nil { - if err := dateOverflow(minDateTime64, maxDateTime64, *v, "2006-01-02 15:04:05"); err != nil { - return err - } - datetime = col.timeToInt64(*v) + switch { + case v != nil: + col.col.Append(*v) + default: + col.col.Append(time.Time{}) + } + case sql.NullTime: + switch v.Valid { + case true: + col.col.Append(v.Time) + default: + col.col.Append(time.Time{}) + } + case *sql.NullTime: + switch v.Valid { + case true: + col.col.Append(v.Time) + default: + col.col.Append(time.Time{}) } case string: - var err error - datetime, err = col.parseString(v) + datetime, err := col.parseDateTime(v) if err != nil { return err } + col.col.Append(datetime) case nil: + col.col.Append(time.Time{}) default: + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return &ColumnConverterError{ + Op: "AppendRow", + To: "Datetime64", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.AppendRow(val) + } + s, ok := v.(fmt.Stringer) + if ok { + return col.AppendRow(s.String()) + } return &ColumnConverterError{ Op: "AppendRow", To: "Datetime64", From: fmt.Sprintf("%T", v), } } - col.values.data = append(col.values.data, datetime) return nil } -func (col *DateTime64) Decode(decoder *binary.Decoder, rows int) error { - return col.values.Decode(decoder, rows) +func (col *DateTime64) Decode(reader *proto.Reader, rows int) error { + return col.col.DecodeColumn(reader, rows) } -func (col *DateTime64) Encode(encoder *binary.Encoder) error { - return col.values.Encode(encoder) +func (col *DateTime64) Encode(buffer *proto.Buffer) { + col.col.EncodeColumn(buffer) } func (col *DateTime64) row(i int) time.Time { - var nano int64 - if col.precision < 19 { - nano = col.values.data[i] * int64(math.Pow10(9-col.precision)) - } - var ( - sec = nano / int64(10e8) - nsec = nano - sec*10e8 - time = time.Unix(sec, nsec) - ) + time := col.col.Row(i) if col.timezone != nil { time = time.In(col.timezone) } @@ -229,17 +299,19 @@ func (col *DateTime64) timeToInt64(t time.Time) int64 { if !t.IsZero() { timestamp = t.UnixNano() } - return timestamp / int64(math.Pow10(9-col.precision)) + return timestamp / int64(math.Pow10(9-int(col.col.Precision))) } -func (col *DateTime64) parseString(value string) (int64, error) { - tv, err := time.Parse("2006-01-02 15:04:05.999", value) - if err != nil { - return 0, err +func (col *DateTime64) parseDateTime(value string) (tv time.Time, err error) { + if tv, err = time.Parse(defaultDateTime64FormatWithZone, value); err == nil { + return tv, nil + } + if tv, err = time.Parse(defaultDateTime64FormatNoZone, value); err == nil { + return time.Date( + tv.Year(), tv.Month(), tv.Day(), tv.Hour(), tv.Minute(), tv.Second(), tv.Nanosecond(), time.Local, + ), nil } - // scale to the appropriate units based on the precision - val := tv.UnixMilli() * int64(math.Pow10(col.precision-3)) - return val, nil + return time.Time{}, err } var _ Interface = (*DateTime64)(nil) diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/decimal.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/decimal.go index 6f163c6..74b7d75 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/decimal.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/decimal.go @@ -18,6 +18,9 @@ package column import ( + "database/sql" + "database/sql/driver" + "encoding/binary" "errors" "fmt" "math/big" @@ -25,23 +28,27 @@ import ( "strconv" "strings" - "github.com/ClickHouse/clickhouse-go/v2/lib/binary" + "github.com/ClickHouse/ch-go/proto" + "github.com/shopspring/decimal" ) type Decimal struct { chType Type scale int - nobits int // its domain is {32, 64, 128, 256} precision int - values []decimal.Decimal name string + col proto.Column } func (col *Decimal) Name() string { return col.name } +func (col *Decimal) Reset() { + col.col.Reset() +} + func (col *Decimal) parse(t Type) (_ *Decimal, err error) { col.chType = t params := strings.Split(t.params(), ",") @@ -64,13 +71,13 @@ func (col *Decimal) parse(t Type) (_ *Decimal, err error) { } switch { case col.precision <= 9: - col.nobits = 32 + col.col = &proto.ColDecimal32{} case col.precision <= 18: - col.nobits = 64 + col.col = &proto.ColDecimal64{} case col.precision <= 38: - col.nobits = 128 + col.col = &proto.ColDecimal128{} default: - col.nobits = 256 + col.col = &proto.ColDecimal256{} } return col, nil } @@ -84,25 +91,57 @@ func (col *Decimal) ScanType() reflect.Type { } func (col *Decimal) Rows() int { - return len(col.values) + return col.col.Rows() } -func (col *Decimal) Row(i int, ptr bool) interface{} { - value := col.values[i] +func (col *Decimal) Row(i int, ptr bool) any { + value := col.row(i) if ptr { - return &value + return value } - return value + return *value } -func (col *Decimal) ScanRow(dest interface{}, row int) error { +func (col *Decimal) row(i int) *decimal.Decimal { + var value decimal.Decimal + switch vCol := col.col.(type) { + case *proto.ColDecimal32: + v := vCol.Row(i) + value = decimal.New(int64(v), int32(-col.scale)) + case *proto.ColDecimal64: + v := vCol.Row(i) + value = decimal.New(int64(v), int32(-col.scale)) + case *proto.ColDecimal128: + v := vCol.Row(i) + b := make([]byte, 16) + binary.LittleEndian.PutUint64(b[0:64/8], v.Low) + binary.LittleEndian.PutUint64(b[64/8:128/8], v.High) + bv := rawToBigInt(b, true) + value = decimal.NewFromBigInt(bv, int32(-col.scale)) + case *proto.ColDecimal256: + v := vCol.Row(i) + b := make([]byte, 32) + binary.LittleEndian.PutUint64(b[0:64/8], v.Low.Low) + binary.LittleEndian.PutUint64(b[64/8:128/8], v.Low.High) + binary.LittleEndian.PutUint64(b[128/8:192/8], v.High.Low) + binary.LittleEndian.PutUint64(b[192/8:256/8], v.High.High) + bv := rawToBigInt(b, true) + value = decimal.NewFromBigInt(bv, int32(-col.scale)) + } + return &value +} + +func (col *Decimal) ScanRow(dest any, row int) error { switch d := dest.(type) { case *decimal.Decimal: - *d = col.values[row] + *d = *col.row(row) case **decimal.Decimal: *d = new(decimal.Decimal) - **d = col.values[row] + **d = *col.row(row) default: + if scan, ok := dest.(sql.Scanner); ok { + return scan.Scan(*col.row(row)) + } return &ColumnConverterError{ Op: "ScanRow", To: fmt.Sprintf("%T", dest), @@ -112,21 +151,38 @@ func (col *Decimal) ScanRow(dest interface{}, row int) error { return nil } -func (col *Decimal) Append(v interface{}) (nulls []uint8, err error) { +func (col *Decimal) Append(v any) (nulls []uint8, err error) { switch v := v.(type) { case []decimal.Decimal: - col.values, nulls = append(col.values, v...), make([]uint8, len(v)) + nulls = make([]uint8, len(v)) + for i := range v { + col.append(&v[i]) + } case []*decimal.Decimal: nulls = make([]uint8, len(v)) - for i, v := range v { + for i := range v { switch { - case v != nil: - col.values = append(col.values, *v) + case v[i] != nil: + col.append(v[i]) default: - col.values, nulls[i] = append(col.values, decimal.New(0, 0)), 1 + nulls[i] = 1 + value := decimal.New(0, 0) + col.append(&value) } } default: + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return nil, &ColumnConverterError{ + Op: "Append", + To: string(col.chType), + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.Append(val) + } return nil, &ColumnConverterError{ Op: "Append", To: string(col.chType), @@ -136,7 +192,7 @@ func (col *Decimal) Append(v interface{}) (nulls []uint8, err error) { return } -func (col *Decimal) AppendRow(v interface{}) error { +func (col *Decimal) AppendRow(v any) error { value := decimal.New(0, 0) switch v := v.(type) { case decimal.Decimal: @@ -147,100 +203,71 @@ func (col *Decimal) AppendRow(v interface{}) error { } case nil: default: + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return &ColumnConverterError{ + Op: "AppendRow", + To: string(col.chType), + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.AppendRow(val) + } return &ColumnConverterError{ Op: "AppendRow", To: string(col.chType), From: fmt.Sprintf("%T", v), } } - col.values = append(col.values, value) + col.append(&value) return nil } -func (col *Decimal) Decode(decoder *binary.Decoder, rows int) error { - switch col.nobits { - case 32: - var base Int32 - if err := base.Decode(decoder, rows); err != nil { - return err - } - for _, v := range base.data { - col.values = append(col.values, decimal.New(int64(v), int32(-col.scale))) - } - case 64: - var base Int64 - if err := base.Decode(decoder, rows); err != nil { - return err - } - for _, v := range base.data { - col.values = append(col.values, decimal.New(int64(v), int32(-col.scale))) - } - case 128, 256: - var ( - size = col.nobits / 8 - scratch = make([]byte, rows*size) - ) - if err := decoder.Raw(scratch); err != nil { - return err - } - for i := 0; i < rows; i++ { - col.values = append(col.values, decimal.NewFromBigInt( - rawToBigInt(scratch[i*size:(i+1)*size], true), - int32(-col.scale), - )) - } - default: - return fmt.Errorf("unsupported %s", col.chType) +func (col *Decimal) append(v *decimal.Decimal) { + switch vCol := col.col.(type) { + case *proto.ColDecimal32: + var part uint32 + part = uint32(decimal.NewFromBigInt(v.Coefficient(), v.Exponent()+int32(col.scale)).IntPart()) + vCol.Append(proto.Decimal32(part)) + case *proto.ColDecimal64: + var part uint64 + part = uint64(decimal.NewFromBigInt(v.Coefficient(), v.Exponent()+int32(col.scale)).IntPart()) + vCol.Append(proto.Decimal64(part)) + case *proto.ColDecimal128: + var bi *big.Int + bi = decimal.NewFromBigInt(v.Coefficient(), v.Exponent()+int32(col.scale)).BigInt() + dest := make([]byte, 16) + bigIntToRaw(dest, bi) + vCol.Append(proto.Decimal128{ + Low: binary.LittleEndian.Uint64(dest[0 : 64/8]), + High: binary.LittleEndian.Uint64(dest[64/8 : 128/8]), + }) + case *proto.ColDecimal256: + var bi *big.Int + bi = decimal.NewFromBigInt(v.Coefficient(), v.Exponent()+int32(col.scale)).BigInt() + dest := make([]byte, 32) + bigIntToRaw(dest, bi) + vCol.Append(proto.Decimal256{ + Low: proto.UInt128{ + Low: binary.LittleEndian.Uint64(dest[0 : 64/8]), + High: binary.LittleEndian.Uint64(dest[64/8 : 128/8]), + }, + High: proto.UInt128{ + Low: binary.LittleEndian.Uint64(dest[128/8 : 192/8]), + High: binary.LittleEndian.Uint64(dest[192/8 : 256/8]), + }, + }) } - return nil } -func (col *Decimal) Encode(encoder *binary.Encoder) error { - switch col.nobits { - case 32: - var base UInt32 - for _, v := range col.values { - var part uint32 - switch { - case v.Exponent() != int32(col.scale): - part = uint32(decimal.NewFromBigInt(v.Coefficient(), v.Exponent()+int32(col.scale)).IntPart()) - default: - part = uint32(v.IntPart()) - } - base.data = append(base.data, part) - } - return base.Encode(encoder) - case 64: - var base UInt64 - for _, v := range col.values { - var part uint64 - switch { - case v.Exponent() != int32(col.scale): - part = uint64(decimal.NewFromBigInt(v.Coefficient(), v.Exponent()+int32(col.scale)).IntPart()) - default: - part = uint64(v.IntPart()) - } - base.data = append(base.data, part) - } - return base.Encode(encoder) - case 128, 256: - var ( - size = col.nobits / 8 - scratch = make([]byte, col.Rows()*size) - ) - for i, v := range col.values { - var bi *big.Int - switch { - case v.Exponent() != int32(col.scale): - bi = decimal.NewFromBigInt(v.Coefficient(), v.Exponent()+int32(col.scale)).BigInt() - default: - bi = v.BigInt() - } - bigIntToRaw(scratch[i*size:(i+1)*size], bi) - } - return encoder.Raw(scratch) - } - return fmt.Errorf("unsupported %s", col.chType) +func (col *Decimal) Decode(reader *proto.Reader, rows int) error { + return col.col.DecodeColumn(reader, rows) +} + +func (col *Decimal) Encode(buffer *proto.Buffer) { + col.col.EncodeColumn(buffer) } func (col *Decimal) Scale() int64 { diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/enum.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/enum.go index 33517d0..935dd2d 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/enum.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/enum.go @@ -19,6 +19,7 @@ package column import ( "errors" + "github.com/ClickHouse/ch-go/proto" "math" "strconv" "strings" @@ -73,8 +74,8 @@ func Enum(chType Type, name string) (Interface, error) { } if strings.HasPrefix(columnType, "Enum8") { enum := Enum8{ - iv: make(map[string]uint8, len(idents)), - vi: make(map[uint8]string, len(idents)), + iv: make(map[string]proto.Enum8, len(idents)), + vi: make(map[proto.Enum8]string, len(idents)), chType: chType, name: name, } @@ -85,20 +86,21 @@ func Enum(chType Type, name string) (Interface, error) { Err: errors.New("invalid Enum"), } } - enum.iv[idents[i]] = uint8(indexes[i]) - enum.vi[uint8(indexes[i])] = idents[i] + v := int8(indexes[i]) + enum.iv[idents[i]] = proto.Enum8(v) + enum.vi[proto.Enum8(v)] = idents[i] } return &enum, nil } enum := Enum16{ - iv: make(map[string]uint16, len(idents)), - vi: make(map[uint16]string, len(idents)), + iv: make(map[string]proto.Enum16, len(idents)), + vi: make(map[proto.Enum16]string, len(idents)), chType: chType, name: name, } for i := range idents { - enum.iv[idents[i]] = uint16(indexes[i]) - enum.vi[uint16(indexes[i])] = idents[i] + enum.iv[idents[i]] = proto.Enum16(indexes[i]) + enum.vi[proto.Enum16(indexes[i])] = idents[i] } return &enum, nil } diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/enum16.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/enum16.go index ef7a735..c394e7f 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/enum16.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/enum16.go @@ -18,20 +18,25 @@ package column import ( + "database/sql" + "database/sql/driver" "fmt" + "github.com/ClickHouse/ch-go/proto" "reflect" - - "github.com/ClickHouse/clickhouse-go/v2/lib/binary" ) type Enum16 struct { - iv map[string]uint16 - vi map[uint16]string + iv map[string]proto.Enum16 + vi map[proto.Enum16]string chType Type - values UInt16 + col proto.ColEnum16 name string } +func (col *Enum16) Reset() { + col.col.Reset() +} + func (col *Enum16) Name() string { return col.name } @@ -45,25 +50,29 @@ func (col *Enum16) ScanType() reflect.Type { } func (col *Enum16) Rows() int { - return len(col.values.data) + return col.col.Rows() } -func (col *Enum16) Row(i int, ptr bool) interface{} { - value := col.vi[col.values.data[i]] +func (col *Enum16) Row(i int, ptr bool) any { + value := col.vi[col.col.Row(i)] if ptr { return &value } return value } -func (col *Enum16) ScanRow(dest interface{}, row int) error { +func (col *Enum16) ScanRow(dest any, row int) error { + value := col.col.Row(row) switch d := dest.(type) { case *string: - *d = col.vi[col.values.data[row]] + *d = col.vi[value] case **string: *d = new(string) - **d = col.vi[col.values.data[row]] + **d = col.vi[value] default: + if scan, ok := dest.(sql.Scanner); ok { + return scan.Scan(col.vi[value]) + } return &ColumnConverterError{ Op: "ScanRow", To: fmt.Sprintf("%T", dest), @@ -73,8 +82,48 @@ func (col *Enum16) ScanRow(dest interface{}, row int) error { return nil } -func (col *Enum16) Append(v interface{}) (nulls []uint8, err error) { +func (col *Enum16) Append(v any) (nulls []uint8, err error) { switch v := v.(type) { + case []int16: + nulls = make([]uint8, len(v)) + for _, elem := range v { + if err = col.AppendRow(elem); err != nil { + return nil, err + } + } + case []*int16: + nulls = make([]uint8, len(v)) + for i, elem := range v { + switch { + case elem != nil: + if err = col.AppendRow(elem); err != nil { + return nil, err + } + default: + col.col.Append(0) + nulls[i] = 1 + } + } + case []int: + nulls = make([]uint8, len(v)) + for _, elem := range v { + if err = col.AppendRow(elem); err != nil { + return nil, err + } + } + case []*int: + nulls = make([]uint8, len(v)) + for i, elem := range v { + switch { + case elem != nil: + if err = col.AppendRow(elem); err != nil { + return nil, err + } + default: + col.col.Append(0) + nulls[i] = 1 + } + } case []string: nulls = make([]uint8, len(v)) for _, elem := range v { @@ -85,7 +134,7 @@ func (col *Enum16) Append(v interface{}) (nulls []uint8, err error) { ColumnType: string(col.chType), } } - col.values.data = append(col.values.data, v) + col.col.Append(v) } case []*string: nulls = make([]uint8, len(v)) @@ -99,17 +148,65 @@ func (col *Enum16) Append(v interface{}) (nulls []uint8, err error) { ColumnType: string(col.chType), } } - col.values.data = append(col.values.data, v) + col.col.Append(v) default: - col.values.data, nulls[i] = append(col.values.data, 0), 1 + col.col.Append(0) + nulls[i] = 1 } } + default: + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return nil, &ColumnConverterError{ + Op: "Append", + To: "Enum16", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.Append(val) + } + return nil, &ColumnConverterError{ + Op: "Append", + To: "Enum16", + From: fmt.Sprintf("%T", v), + } } return } -func (col *Enum16) AppendRow(elem interface{}) error { +func (col *Enum16) AppendRow(elem any) error { switch elem := elem.(type) { + case int16: + return col.AppendRow(int(elem)) + case *int16: + return col.AppendRow(int(*elem)) + case int: + v := proto.Enum16(elem) + _, ok := col.vi[v] + if !ok { + return &Error{ + Err: fmt.Errorf("unknown element %v", elem), + ColumnType: string(col.chType), + } + } + col.col.Append(v) + case *int: + switch { + case elem != nil: + v := proto.Enum16(*elem) + _, ok := col.vi[v] + if !ok { + return &Error{ + Err: fmt.Errorf("unknown element %v", *elem), + ColumnType: string(col.chType), + } + } + col.col.Append(v) + default: + col.col.Append(0) + } case string: v, ok := col.iv[elem] if !ok { @@ -118,7 +215,7 @@ func (col *Enum16) AppendRow(elem interface{}) error { ColumnType: string(col.chType), } } - col.values.data = append(col.values.data, v) + col.col.Append(v) case *string: switch { case elem != nil: @@ -129,28 +226,44 @@ func (col *Enum16) AppendRow(elem interface{}) error { ColumnType: string(col.chType), } } - col.values.data = append(col.values.data, v) + col.col.Append(v) default: - col.values.data = append(col.values.data, 0) + col.col.Append(0) } case nil: - col.values.data = append(col.values.data, 0) + col.col.Append(0) default: - return &ColumnConverterError{ - Op: "AppendRow", - To: "Enum16", - From: fmt.Sprintf("%T", elem), + if valuer, ok := elem.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return &ColumnConverterError{ + Op: "AppendRow", + To: "Enum16", + From: fmt.Sprintf("%T", elem), + Hint: "could not get driver.Valuer value", + } + } + return col.AppendRow(val) + } + if s, ok := elem.(fmt.Stringer); ok { + return col.AppendRow(s.String()) + } else { + return &ColumnConverterError{ + Op: "AppendRow", + To: "Enum16", + From: fmt.Sprintf("%T", elem), + } } } return nil } -func (col *Enum16) Decode(decoder *binary.Decoder, rows int) error { - return col.values.Decode(decoder, rows) +func (col *Enum16) Decode(reader *proto.Reader, rows int) error { + return col.col.DecodeColumn(reader, rows) } -func (col *Enum16) Encode(encoder *binary.Encoder) error { - return col.values.Encode(encoder) +func (col *Enum16) Encode(buffer *proto.Buffer) { + col.col.EncodeColumn(buffer) } var _ Interface = (*Enum16)(nil) diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/enum8.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/enum8.go index 89762d9..4aee561 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/enum8.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/enum8.go @@ -18,18 +18,23 @@ package column import ( + "database/sql" + "database/sql/driver" "fmt" + "github.com/ClickHouse/ch-go/proto" "reflect" - - "github.com/ClickHouse/clickhouse-go/v2/lib/binary" ) type Enum8 struct { - iv map[string]uint8 - vi map[uint8]string + iv map[string]proto.Enum8 + vi map[proto.Enum8]string chType Type - values UInt8 name string + col proto.ColEnum8 +} + +func (col *Enum8) Reset() { + col.col.Reset() } func (col *Enum8) Name() string { @@ -45,25 +50,29 @@ func (col *Enum8) ScanType() reflect.Type { } func (col *Enum8) Rows() int { - return len(col.values.data) + return col.col.Rows() } -func (col *Enum8) Row(i int, ptr bool) interface{} { - value := col.vi[col.values.data[i]] +func (col *Enum8) Row(i int, ptr bool) any { + value := col.vi[col.col.Row(i)] if ptr { return &value } return value } -func (col *Enum8) ScanRow(dest interface{}, row int) error { +func (col *Enum8) ScanRow(dest any, row int) error { + v := col.col.Row(row) switch d := dest.(type) { case *string: - *d = col.vi[col.values.data[row]] + *d = col.vi[v] case **string: *d = new(string) - **d = col.vi[col.values.data[row]] + **d = col.vi[v] default: + if scan, ok := dest.(sql.Scanner); ok { + return scan.Scan(col.vi[v]) + } return &ColumnConverterError{ Op: "ScanRow", To: fmt.Sprintf("%T", dest), @@ -73,38 +82,91 @@ func (col *Enum8) ScanRow(dest interface{}, row int) error { return nil } -func (col *Enum8) Append(v interface{}) (nulls []uint8, err error) { +func (col *Enum8) Append(v any) (nulls []uint8, err error) { switch v := v.(type) { + case []int8: + nulls = make([]uint8, len(v)) + for _, elem := range v { + if err = col.AppendRow(elem); err != nil { + return nil, err + } + } + case []*int8: + nulls = make([]uint8, len(v)) + for i, elem := range v { + switch { + case elem != nil: + if err = col.AppendRow(elem); err != nil { + return nil, err + } + default: + col.col.Append(0) + nulls[i] = 1 + } + } + case []int: + nulls = make([]uint8, len(v)) + for _, elem := range v { + if err = col.AppendRow(elem); err != nil { + return nil, err + } + } + case []*int: + nulls = make([]uint8, len(v)) + for i, elem := range v { + switch { + case elem != nil: + if err = col.AppendRow(elem); err != nil { + return nil, err + } + default: + col.col.Append(0) + nulls[i] = 1 + } + } case []string: nulls = make([]uint8, len(v)) for _, elem := range v { - v, ok := col.iv[elem] + val, ok := col.iv[elem] if !ok { return nil, &Error{ Err: fmt.Errorf("unknown element %q", elem), ColumnType: string(col.chType), } } - col.values.data = append(col.values.data, v) + col.col.Append(val) } case []*string: nulls = make([]uint8, len(v)) for i, elem := range v { switch { case elem != nil: - v, ok := col.iv[*elem] + val, ok := col.iv[*elem] if !ok { return nil, &Error{ Err: fmt.Errorf("unknown element %q", *elem), ColumnType: string(col.chType), } } - col.values.data = append(col.values.data, v) + col.col.Append(val) default: - col.values.data, nulls[i] = append(col.values.data, 0), 1 + col.col.Append(0) + nulls[i] = 1 } } default: + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return nil, &ColumnConverterError{ + Op: "Append", + To: "Enum8", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.Append(val) + } return nil, &ColumnConverterError{ Op: "Append", To: "Enum8", @@ -114,8 +176,37 @@ func (col *Enum8) Append(v interface{}) (nulls []uint8, err error) { return } -func (col *Enum8) AppendRow(elem interface{}) error { +func (col *Enum8) AppendRow(elem any) error { switch elem := elem.(type) { + case int8: + return col.AppendRow(int(elem)) + case *int8: + return col.AppendRow(int(*elem)) + case int: + v := proto.Enum8(elem) + _, ok := col.vi[v] + if !ok { + return &Error{ + Err: fmt.Errorf("unknown element %v", elem), + ColumnType: string(col.chType), + } + } + col.col.Append(v) + case *int: + switch { + case elem != nil: + v := proto.Enum8(*elem) + _, ok := col.vi[v] + if !ok { + return &Error{ + Err: fmt.Errorf("unknown element %v", *elem), + ColumnType: string(col.chType), + } + } + col.col.Append(v) + default: + col.col.Append(0) + } case string: v, ok := col.iv[elem] if !ok { @@ -124,7 +215,7 @@ func (col *Enum8) AppendRow(elem interface{}) error { ColumnType: string(col.chType), } } - col.values.data = append(col.values.data, v) + col.col.Append(v) case *string: switch { case elem != nil: @@ -135,28 +226,45 @@ func (col *Enum8) AppendRow(elem interface{}) error { ColumnType: string(col.chType), } } - col.values.data = append(col.values.data, v) + col.col.Append(v) default: - col.values.data = append(col.values.data, 0) + col.col.Append(0) } case nil: - col.values.data = append(col.values.data, 0) + col.col.Append(0) default: - return &ColumnConverterError{ - Op: "AppendRow", - To: "Enum8", - From: fmt.Sprintf("%T", elem), + if valuer, ok := elem.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return &ColumnConverterError{ + Op: "AppendRow", + To: "Enum8", + From: fmt.Sprintf("%T", elem), + Hint: "could not get driver.Valuer value", + } + } + return col.AppendRow(val) + } + + if s, ok := elem.(fmt.Stringer); ok { + return col.AppendRow(s.String()) + } else { + return &ColumnConverterError{ + Op: "AppendRow", + To: "Enum8", + From: fmt.Sprintf("%T", elem), + } } } return nil } -func (col *Enum8) Decode(decoder *binary.Decoder, rows int) error { - return col.values.Decode(decoder, rows) +func (col *Enum8) Decode(reader *proto.Reader, rows int) error { + return col.col.DecodeColumn(reader, rows) } -func (col *Enum8) Encode(encoder *binary.Encoder) error { - return col.values.Encode(encoder) +func (col *Enum8) Encode(buffer *proto.Buffer) { + col.col.EncodeColumn(buffer) } var _ Interface = (*Enum8)(nil) diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/fixed_string.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/fixed_string.go index ca96152..7b81467 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/fixed_string.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/fixed_string.go @@ -18,17 +18,24 @@ package column import ( + "database/sql" + "database/sql/driver" "encoding" "fmt" "reflect" + "github.com/ClickHouse/ch-go/proto" + "github.com/ClickHouse/clickhouse-go/v2/lib/binary" ) type FixedString struct { - data []byte - size int name string + col proto.ColFixedStr +} + +func (col *FixedString) Reset() { + col.col.Reset() } func (col *FixedString) Name() string { @@ -36,14 +43,14 @@ func (col *FixedString) Name() string { } func (col *FixedString) parse(t Type) (*FixedString, error) { - if _, err := fmt.Sscanf(string(t), "FixedString(%d)", &col.size); err != nil { + if _, err := fmt.Sscanf(string(t), "FixedString(%d)", &col.col.Size); err != nil { return nil, err } return col, nil } func (col *FixedString) Type() Type { - return Type(fmt.Sprintf("FixedString(%d)", col.size)) + return Type(fmt.Sprintf("FixedString(%d)", col.col.Size)) } func (col *FixedString) ScanType() reflect.Type { @@ -51,13 +58,10 @@ func (col *FixedString) ScanType() reflect.Type { } func (col *FixedString) Rows() int { - if col.size == 0 { - return 0 - } - return len(col.data) / col.size + return col.col.Rows() } -func (col *FixedString) Row(i int, ptr bool) interface{} { +func (col *FixedString) Row(i int, ptr bool) any { value := col.row(i) if ptr { return &value @@ -65,7 +69,7 @@ func (col *FixedString) Row(i int, ptr bool) interface{} { return value } -func (col *FixedString) ScanRow(dest interface{}, row int) error { +func (col *FixedString) ScanRow(dest any, row int) error { switch d := dest.(type) { case *string: *d = col.row(row) @@ -74,7 +78,30 @@ func (col *FixedString) ScanRow(dest interface{}, row int) error { **d = col.row(row) case encoding.BinaryUnmarshaler: return d.UnmarshalBinary(col.rowBytes(row)) + case *[]byte: + *d = col.rowBytes(row) default: + // handle for *[n]byte + if t := reflect.TypeOf(dest); t.Kind() == reflect.Pointer && + t.Elem().Kind() == reflect.Array && + t.Elem().Elem() == reflect.TypeOf(byte(0)) { + size := t.Elem().Len() + if size != col.col.Size { + return &ColumnConverterError{ + Op: "ScanRow", + To: fmt.Sprintf("%T", dest), + From: "FixedString", + Hint: fmt.Sprintf("invalid size %d, expect %d", size, col.col.Size), + } + } + rv := reflect.ValueOf(dest).Elem() + reflect.Copy(rv, reflect.ValueOf(col.row(row))) + return nil + } + + if scan, ok := dest.(sql.Scanner); ok { + return scan.Scan(col.row(row)) + } return &ColumnConverterError{ Op: "ScanRow", To: fmt.Sprintf("%T", dest), @@ -84,17 +111,17 @@ func (col *FixedString) ScanRow(dest interface{}, row int) error { return nil } -func (col *FixedString) Append(v interface{}) (nulls []uint8, err error) { +func (col *FixedString) Append(v any) (nulls []uint8, err error) { switch v := v.(type) { case []string: + nulls = make([]uint8, len(v)) for _, v := range v { if v == "" { - col.data = append(col.data, make([]byte, col.size)...) + col.col.Append(make([]byte, col.col.Size)) } else { - col.data = append(col.data, binary.Str2Bytes(v)...) + col.col.Append(binary.Str2Bytes(v, col.col.Size)) } } - nulls = make([]uint8, len(v)) case []*string: nulls = make([]uint8, len(v)) for i, v := range v { @@ -103,12 +130,12 @@ func (col *FixedString) Append(v interface{}) (nulls []uint8, err error) { } switch { case v == nil: - col.data = append(col.data, make([]byte, col.size)...) + col.col.Append(make([]byte, col.col.Size)) default: if *v == "" { - col.data = append(col.data, make([]byte, col.size)...) + col.col.Append(make([]byte, col.col.Size)) } else { - col.data = append(col.data, binary.Str2Bytes(*v)...) + col.col.Append(binary.Str2Bytes(*v, col.col.Size)) } } } @@ -117,8 +144,53 @@ func (col *FixedString) Append(v interface{}) (nulls []uint8, err error) { if err != nil { return nil, err } - col.data, nulls = append(col.data, data...), make([]uint8, len(data)/col.size) + col.col.Append(data) + nulls = make([]uint8, len(data)/col.col.Size) + case [][]byte: + nulls = make([]uint8, len(v)) + for i, v := range v { + if v == nil { + nulls[i] = 1 + } + n := len(v) + if n == 0 { + col.col.Append(make([]byte, col.col.Size)) + } else if n >= col.col.Size { + col.col.Append(v[0:col.col.Size]) + } else { + data := make([]byte, col.col.Size) + copy(data, v) + col.col.Append(data) + } + } default: + // handle for [][n]byte + if t := reflect.TypeOf(v); t.Kind() == reflect.Slice && + t.Elem().Kind() == reflect.Array && + t.Elem().Elem() == reflect.TypeOf(byte(0)) { + rv := reflect.ValueOf(v) + nulls = make([]uint8, rv.Len()) + for i := 0; i < rv.Len(); i++ { + e := rv.Index(i) + data := make([]byte, e.Len()) + reflect.Copy(reflect.ValueOf(data), e) + col.col.Append(data) + } + return + } + + if s, ok := v.(driver.Valuer); ok { + val, err := s.Value() + if err != nil { + return nil, &ColumnConverterError{ + Op: "Append", + To: "FixedString", + From: fmt.Sprintf("%T", s), + Hint: "could not get driver.Valuer value", + } + } + return col.Append(val) + } return nil, &ColumnConverterError{ Op: "Append", To: "FixedString", @@ -128,17 +200,19 @@ func (col *FixedString) Append(v interface{}) (nulls []uint8, err error) { return } -func (col *FixedString) AppendRow(v interface{}) (err error) { - data := make([]byte, col.size) +func (col *FixedString) AppendRow(v any) (err error) { + data := make([]byte, col.col.Size) switch v := v.(type) { + case []byte: + copy(data, v) case string: if v != "" { - data = binary.Str2Bytes(v) + data = binary.Str2Bytes(v, col.col.Size) } case *string: if v != nil { if *v != "" { - data = binary.Str2Bytes(*v) + data = binary.Str2Bytes(*v, col.col.Size) } } case nil: @@ -147,37 +221,62 @@ func (col *FixedString) AppendRow(v interface{}) (err error) { return err } default: + if t := reflect.TypeOf(v); t.Kind() == reflect.Array && t.Elem() == reflect.TypeOf(byte(0)) { + if t.Len() != col.col.Size { + return &ColumnConverterError{ + Op: "AppendRow", + To: "FixedString", + From: fmt.Sprintf("%T", v), + Hint: fmt.Sprintf("invalid size %d, expect %d", t.Len(), col.col.Size), + } + } + reflect.Copy(reflect.ValueOf(data), reflect.ValueOf(v)) + col.col.Append(data) + return nil + } + + if s, ok := v.(driver.Valuer); ok { + val, err := s.Value() + if err != nil { + return &ColumnConverterError{ + Op: "AppendRow", + To: "FixedString", + From: fmt.Sprintf("%T", s), + Hint: "could not get driver.Valuer value", + } + } + return col.AppendRow(val) + } + + if s, ok := v.(fmt.Stringer); ok { + return col.AppendRow(s.String()) + } + return &ColumnConverterError{ Op: "AppendRow", To: "FixedString", From: fmt.Sprintf("%T", v), } } - col.data = append(col.data, data...) + col.col.Append(data) return nil } -func (col *FixedString) Decode(decoder *binary.Decoder, rows int) error { - col.data = make([]byte, col.size*rows) - return decoder.Raw(col.data) +func (col *FixedString) Decode(reader *proto.Reader, rows int) error { + return col.col.DecodeColumn(reader, rows) } -func (col *FixedString) Encode(encoder *binary.Encoder) error { - if len(col.data)%col.size != 0 { - return &Error{ - ColumnType: string(col.Type()), - Err: fmt.Errorf("invalid column size. must be a multiple of %d bytes got %d bytes", col.size, len(col.data)), - } - } - return encoder.Raw(col.data) +func (col *FixedString) Encode(buffer *proto.Buffer) { + col.col.EncodeColumn(buffer) } func (col *FixedString) row(i int) string { - return string(col.data[i*col.size : (i+1)*col.size]) + v := col.col.Row(i) + return string(v) } func (col *FixedString) rowBytes(i int) []byte { - return col.data[i*col.size : (i+1)*col.size] + return col.col.Row(i) } var _ Interface = (*FixedString)(nil) diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/geo_multi_polygon.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/geo_multi_polygon.go index c232704..1263b89 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/geo_multi_polygon.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/geo_multi_polygon.go @@ -18,10 +18,11 @@ package column import ( + "database/sql/driver" "fmt" + "github.com/ClickHouse/ch-go/proto" "reflect" - "github.com/ClickHouse/clickhouse-go/v2/lib/binary" "github.com/paulmach/orb" ) @@ -30,6 +31,10 @@ type MultiPolygon struct { name string } +func (col *MultiPolygon) Reset() { + col.set.Reset() +} + func (col *MultiPolygon) Name() string { return col.name } @@ -46,7 +51,7 @@ func (col *MultiPolygon) Rows() int { return col.set.Rows() } -func (col *MultiPolygon) Row(i int, ptr bool) interface{} { +func (col *MultiPolygon) Row(i int, ptr bool) any { value := col.row(i) if ptr { return &value @@ -54,7 +59,7 @@ func (col *MultiPolygon) Row(i int, ptr bool) interface{} { return value } -func (col *MultiPolygon) ScanRow(dest interface{}, row int) error { +func (col *MultiPolygon) ScanRow(dest any, row int) error { switch d := dest.(type) { case *orb.MultiPolygon: *d = col.row(row) @@ -72,7 +77,7 @@ func (col *MultiPolygon) ScanRow(dest interface{}, row int) error { return nil } -func (col *MultiPolygon) Append(v interface{}) (nulls []uint8, err error) { +func (col *MultiPolygon) Append(v any) (nulls []uint8, err error) { switch v := v.(type) { case []orb.MultiPolygon: values := make([][]orb.Polygon, 0, len(v)) @@ -80,8 +85,31 @@ func (col *MultiPolygon) Append(v interface{}) (nulls []uint8, err error) { values = append(values, v) } return col.set.Append(values) - + case []*orb.MultiPolygon: + nulls = make([]uint8, len(v)) + values := make([][]orb.Polygon, 0, len(v)) + for i, v := range v { + if v == nil { + nulls[i] = 1 + values = append(values, orb.MultiPolygon{}) + } else { + values = append(values, *v) + } + } + return col.set.Append(values) default: + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return nil, &ColumnConverterError{ + Op: "Append", + To: "MultiPolygon", + From: fmt.Sprintf("%T", v), + Hint: fmt.Sprintf("could not get driver.Valuer value, try using %s", col.Type()), + } + } + return col.Append(val) + } return nil, &ColumnConverterError{ Op: "Append", To: "MultiPolygon", @@ -90,11 +118,25 @@ func (col *MultiPolygon) Append(v interface{}) (nulls []uint8, err error) { } } -func (col *MultiPolygon) AppendRow(v interface{}) error { +func (col *MultiPolygon) AppendRow(v any) error { switch v := v.(type) { case orb.MultiPolygon: return col.set.AppendRow([]orb.Polygon(v)) + case *orb.MultiPolygon: + return col.set.AppendRow([]orb.Polygon(*v)) default: + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return &ColumnConverterError{ + Op: "AppendRow", + To: "MultiPolygon", + From: fmt.Sprintf("%T", v), + Hint: fmt.Sprintf("could not get driver.Valuer value, try using %s", col.Type()), + } + } + return col.AppendRow(val) + } return &ColumnConverterError{ Op: "AppendRow", To: "MultiPolygon", @@ -103,12 +145,12 @@ func (col *MultiPolygon) AppendRow(v interface{}) error { } } -func (col *MultiPolygon) Decode(decoder *binary.Decoder, rows int) error { - return col.set.Decode(decoder, rows) +func (col *MultiPolygon) Decode(reader *proto.Reader, rows int) error { + return col.set.Decode(reader, rows) } -func (col *MultiPolygon) Encode(encoder *binary.Encoder) error { - return col.set.Encode(encoder) +func (col *MultiPolygon) Encode(buffer *proto.Buffer) { + col.set.Encode(buffer) } func (col *MultiPolygon) row(i int) orb.MultiPolygon { diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/geo_point.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/geo_point.go index 7fdc443..4a4fe16 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/geo_point.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/geo_point.go @@ -18,17 +18,21 @@ package column import ( + "database/sql/driver" "fmt" + "github.com/ClickHouse/ch-go/proto" "reflect" - "github.com/ClickHouse/clickhouse-go/v2/lib/binary" "github.com/paulmach/orb" ) type Point struct { - lon Float64 - lat Float64 name string + col proto.ColPoint +} + +func (col *Point) Reset() { + col.col.Reset() } func (col *Point) Name() string { @@ -44,10 +48,10 @@ func (col *Point) ScanType() reflect.Type { } func (col *Point) Rows() int { - return col.lon.Rows() + return col.col.Rows() } -func (col *Point) Row(i int, ptr bool) interface{} { +func (col *Point) Row(i int, ptr bool) any { value := col.row(i) if ptr { return &value @@ -55,7 +59,7 @@ func (col *Point) Row(i int, ptr bool) interface{} { return value } -func (col *Point) ScanRow(dest interface{}, row int) error { +func (col *Point) ScanRow(dest any, row int) error { switch d := dest.(type) { case *orb.Point: *d = col.row(row) @@ -73,15 +77,42 @@ func (col *Point) ScanRow(dest interface{}, row int) error { return nil } -func (col *Point) Append(v interface{}) (nulls []uint8, err error) { +func (col *Point) Append(v any) (nulls []uint8, err error) { switch v := v.(type) { case []orb.Point: nulls = make([]uint8, len(v)) for _, v := range v { - col.lon.data = append(col.lon.data, v.Lon()) - col.lat.data = append(col.lat.data, v.Lat()) + col.col.Append(proto.Point{ + X: v.Lon(), + Y: v.Lat(), + }) + } + case []*orb.Point: + nulls = make([]uint8, len(v)) + for i, v := range v { + if v == nil { + nulls[i] = 1 + col.col.Append(proto.Point{}) + } else { + col.col.Append(proto.Point{ + X: v.Lon(), + Y: v.Lat(), + }) + } } default: + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return nil, &ColumnConverterError{ + Op: "Append", + To: "Point", + From: fmt.Sprintf("%T", v), + Hint: fmt.Sprintf("could not get driver.Valuer value, try using %s", col.Type()), + } + } + return col.Append(val) + } return nil, &ColumnConverterError{ Op: "Append", To: "Point", @@ -90,12 +121,31 @@ func (col *Point) Append(v interface{}) (nulls []uint8, err error) { } return } -func (col *Point) AppendRow(v interface{}) error { +func (col *Point) AppendRow(v any) error { switch v := v.(type) { case orb.Point: - col.lon.data = append(col.lon.data, v.Lon()) - col.lat.data = append(col.lat.data, v.Lat()) + col.col.Append(proto.Point{ + X: v.Lon(), + Y: v.Lat(), + }) + case *orb.Point: + col.col.Append(proto.Point{ + X: v.Lon(), + Y: v.Lat(), + }) default: + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return &ColumnConverterError{ + Op: "AppendRow", + To: "Point", + From: fmt.Sprintf("%T", v), + Hint: fmt.Sprintf("could not get driver.Valuer value, try using %s", col.Type()), + } + } + return col.AppendRow(val) + } return &ColumnConverterError{ Op: "AppendRow", To: "Point", @@ -105,27 +155,19 @@ func (col *Point) AppendRow(v interface{}) error { return nil } -func (col *Point) Decode(decoder *binary.Decoder, rows int) error { - if err := col.lon.Decode(decoder, rows); err != nil { - return err - } - if err := col.lat.Decode(decoder, rows); err != nil { - return err - } - return nil +func (col *Point) Decode(reader *proto.Reader, rows int) error { + return col.col.DecodeColumn(reader, rows) } -func (col *Point) Encode(encoder *binary.Encoder) error { - if err := col.lon.Encode(encoder); err != nil { - return err - } - return col.lat.Encode(encoder) +func (col *Point) Encode(buffer *proto.Buffer) { + col.col.EncodeColumn(buffer) } func (col *Point) row(i int) orb.Point { + p := col.col.Row(i) return orb.Point{ - col.lon.data[i], - col.lat.data[i], + p.X, + p.Y, } } diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/geo_polygon.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/geo_polygon.go index 96fd42a..6e78b1a 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/geo_polygon.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/geo_polygon.go @@ -18,10 +18,11 @@ package column import ( + "database/sql/driver" "fmt" + "github.com/ClickHouse/ch-go/proto" "reflect" - "github.com/ClickHouse/clickhouse-go/v2/lib/binary" "github.com/paulmach/orb" ) @@ -30,6 +31,10 @@ type Polygon struct { name string } +func (col *Polygon) Reset() { + col.set.Reset() +} + func (col *Polygon) Name() string { return col.name } @@ -46,7 +51,7 @@ func (col *Polygon) Rows() int { return col.set.Rows() } -func (col *Polygon) Row(i int, ptr bool) interface{} { +func (col *Polygon) Row(i int, ptr bool) any { value := col.row(i) if ptr { return &value @@ -54,7 +59,7 @@ func (col *Polygon) Row(i int, ptr bool) interface{} { return value } -func (col *Polygon) ScanRow(dest interface{}, row int) error { +func (col *Polygon) ScanRow(dest any, row int) error { switch d := dest.(type) { case *orb.Polygon: *d = col.row(row) @@ -72,7 +77,7 @@ func (col *Polygon) ScanRow(dest interface{}, row int) error { return nil } -func (col *Polygon) Append(v interface{}) (nulls []uint8, err error) { +func (col *Polygon) Append(v any) (nulls []uint8, err error) { switch v := v.(type) { case []orb.Polygon: values := make([][]orb.Ring, 0, len(v)) @@ -80,8 +85,31 @@ func (col *Polygon) Append(v interface{}) (nulls []uint8, err error) { values = append(values, v) } return col.set.Append(values) - + case []*orb.Polygon: + nulls = make([]uint8, len(v)) + values := make([][]orb.Ring, 0, len(v)) + for i, v := range v { + if v == nil { + nulls[i] = 1 + values = append(values, orb.Polygon{}) + } else { + values = append(values, *v) + } + } + return col.set.Append(values) default: + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return nil, &ColumnConverterError{ + Op: "Append", + To: "Polygon", + From: fmt.Sprintf("%T", v), + Hint: fmt.Sprintf("could not get driver.Valuer value, try using %s", col.Type()), + } + } + return col.Append(val) + } return nil, &ColumnConverterError{ Op: "Append", To: "Polygon", @@ -90,11 +118,25 @@ func (col *Polygon) Append(v interface{}) (nulls []uint8, err error) { } } -func (col *Polygon) AppendRow(v interface{}) error { +func (col *Polygon) AppendRow(v any) error { switch v := v.(type) { case orb.Polygon: return col.set.AppendRow([]orb.Ring(v)) + case *orb.Polygon: + return col.set.AppendRow([]orb.Ring(*v)) default: + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return &ColumnConverterError{ + Op: "AppendRow", + To: "Polygon", + From: fmt.Sprintf("%T", v), + Hint: fmt.Sprintf("could not get driver.Valuer value, try using %s", col.Type()), + } + } + return col.AppendRow(val) + } return &ColumnConverterError{ Op: "AppendRow", To: "Polygon", @@ -103,12 +145,12 @@ func (col *Polygon) AppendRow(v interface{}) error { } } -func (col *Polygon) Decode(decoder *binary.Decoder, rows int) error { - return col.set.Decode(decoder, rows) +func (col *Polygon) Decode(reader *proto.Reader, rows int) error { + return col.set.Decode(reader, rows) } -func (col *Polygon) Encode(encoder *binary.Encoder) error { - return col.set.Encode(encoder) +func (col *Polygon) Encode(buffer *proto.Buffer) { + col.set.Encode(buffer) } func (col *Polygon) row(i int) orb.Polygon { diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/geo_ring.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/geo_ring.go index 99e0804..a64de47 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/geo_ring.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/geo_ring.go @@ -18,10 +18,11 @@ package column import ( + "database/sql/driver" "fmt" + "github.com/ClickHouse/ch-go/proto" "reflect" - "github.com/ClickHouse/clickhouse-go/v2/lib/binary" "github.com/paulmach/orb" ) @@ -30,6 +31,10 @@ type Ring struct { name string } +func (col *Ring) Reset() { + col.set.Reset() +} + func (col *Ring) Name() string { return col.name } @@ -46,7 +51,7 @@ func (col *Ring) Rows() int { return col.set.Rows() } -func (col *Ring) Row(i int, ptr bool) interface{} { +func (col *Ring) Row(i int, ptr bool) any { value := col.row(i) if ptr { return &value @@ -54,7 +59,7 @@ func (col *Ring) Row(i int, ptr bool) interface{} { return value } -func (col *Ring) ScanRow(dest interface{}, row int) error { +func (col *Ring) ScanRow(dest any, row int) error { switch d := dest.(type) { case *orb.Ring: *d = col.row(row) @@ -72,7 +77,7 @@ func (col *Ring) ScanRow(dest interface{}, row int) error { return nil } -func (col *Ring) Append(v interface{}) (nulls []uint8, err error) { +func (col *Ring) Append(v any) (nulls []uint8, err error) { switch v := v.(type) { case []orb.Ring: values := make([][]orb.Point, 0, len(v)) @@ -80,8 +85,31 @@ func (col *Ring) Append(v interface{}) (nulls []uint8, err error) { values = append(values, v) } return col.set.Append(values) - + case []*orb.Ring: + nulls = make([]uint8, len(v)) + values := make([][]orb.Point, 0, len(v)) + for i, v := range v { + if v == nil { + nulls[i] = 1 + values = append(values, orb.Ring{}) + } else { + values = append(values, *v) + } + } + return col.set.Append(values) default: + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return nil, &ColumnConverterError{ + Op: "Append", + To: "Ring", + From: fmt.Sprintf("%T", v), + Hint: fmt.Sprintf("could not get driver.Valuer value, try using %s", col.Type()), + } + } + return col.Append(val) + } return nil, &ColumnConverterError{ Op: "Append", To: "Ring", @@ -90,11 +118,25 @@ func (col *Ring) Append(v interface{}) (nulls []uint8, err error) { } } -func (col *Ring) AppendRow(v interface{}) error { +func (col *Ring) AppendRow(v any) error { switch v := v.(type) { case orb.Ring: return col.set.AppendRow([]orb.Point(v)) + case *orb.Ring: + return col.set.AppendRow([]orb.Point(*v)) default: + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return &ColumnConverterError{ + Op: "AppendRow", + To: "Ring", + From: fmt.Sprintf("%T", v), + Hint: fmt.Sprintf("could not get driver.Valuer value, try using %s", col.Type()), + } + } + return col.AppendRow(val) + } return &ColumnConverterError{ Op: "AppendRow", To: "Ring", @@ -103,12 +145,12 @@ func (col *Ring) AppendRow(v interface{}) error { } } -func (col *Ring) Decode(decoder *binary.Decoder, rows int) error { - return col.set.Decode(decoder, rows) +func (col *Ring) Decode(reader *proto.Reader, rows int) error { + return col.set.Decode(reader, rows) } -func (col *Ring) Encode(encoder *binary.Encoder) error { - return col.set.Encode(encoder) +func (col *Ring) Encode(buffer *proto.Buffer) { + col.set.Encode(buffer) } func (col *Ring) row(i int) orb.Ring { diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/interval.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/interval.go index cca95e6..307fc64 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/interval.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/interval.go @@ -20,16 +20,19 @@ package column import ( "errors" "fmt" + "github.com/ClickHouse/ch-go/proto" "reflect" "strings" - - "github.com/ClickHouse/clickhouse-go/v2/lib/binary" ) type Interval struct { chType Type - values Int64 name string + col proto.ColInt64 +} + +func (col *Interval) Reset() { + col.col.Reset() } func (col *Interval) Name() string { @@ -48,11 +51,15 @@ func (col *Interval) parse(t Type) (Interface, error) { func (col *Interval) Type() Type { return col.chType } func (col *Interval) ScanType() reflect.Type { return scanTypeString } -func (col *Interval) Rows() int { return len(col.values.data) } -func (col *Interval) Row(i int, ptr bool) interface{} { - return col.row(i) +func (col *Interval) Rows() int { return col.col.Rows() } +func (col *Interval) Row(i int, ptr bool) any { + val := col.row(i) + if ptr { + return &val + } + return val } -func (col *Interval) ScanRow(dest interface{}, row int) error { +func (col *Interval) ScanRow(dest any, row int) error { switch d := dest.(type) { case *string: *d = col.row(row) @@ -69,34 +76,31 @@ func (col *Interval) ScanRow(dest interface{}, row int) error { return nil } -func (Interval) Append(interface{}) ([]uint8, error) { +func (Interval) Append(any) ([]uint8, error) { return nil, &Error{ ColumnType: "Interval", Err: errors.New("data type values can't be stored in tables"), } } -func (Interval) AppendRow(interface{}) error { +func (Interval) AppendRow(any) error { return &Error{ ColumnType: "Interval", Err: errors.New("data type values can't be stored in tables"), } } -func (col *Interval) Decode(decoder *binary.Decoder, rows int) error { - return col.values.Decode(decoder, rows) +func (col *Interval) Decode(reader *proto.Reader, rows int) error { + return col.col.DecodeColumn(reader, rows) } -func (Interval) Encode(*binary.Encoder) error { - return &Error{ - ColumnType: "Interval", - Err: errors.New("data type values can't be stored in tables"), - } +func (Interval) Encode(buffer *proto.Buffer) { } func (col *Interval) row(i int) string { - v := fmt.Sprintf("%d %s", col.values.data[i], strings.TrimPrefix(string(col.chType), "Interval")) - if col.values.data[i] > 1 { + val := col.col.Row(i) + v := fmt.Sprintf("%d %s", val, strings.TrimPrefix(string(col.chType), "Interval")) + if val > 1 { v += "s" } return v diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/ipv4.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/ipv4.go index 8ae0ac9..3d4c252 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/ipv4.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/ipv4.go @@ -18,16 +18,22 @@ package column import ( + "database/sql/driver" + "encoding/binary" "fmt" + "github.com/ClickHouse/ch-go/proto" "net" + "net/netip" "reflect" - - "github.com/ClickHouse/clickhouse-go/v2/lib/binary" ) type IPv4 struct { - data []byte name string + col proto.ColIPv4 +} + +func (col *IPv4) Reset() { + col.col.Reset() } func (col *IPv4) Name() string { @@ -43,10 +49,10 @@ func (col *IPv4) ScanType() reflect.Type { } func (col *IPv4) Rows() int { - return len(col.data) / net.IPv4len + return col.col.Rows() } -func (col *IPv4) Row(i int, ptr bool) interface{} { +func (col *IPv4) Row(i int, ptr bool) any { value := col.row(i) if ptr { return &value @@ -54,7 +60,7 @@ func (col *IPv4) Row(i int, ptr bool) interface{} { return value } -func (col *IPv4) ScanRow(dest interface{}, row int) error { +func (col *IPv4) ScanRow(dest any, row int) error { switch d := dest.(type) { case *string: *d = col.row(row).String() @@ -66,6 +72,32 @@ func (col *IPv4) ScanRow(dest interface{}, row int) error { case **net.IP: *d = new(net.IP) **d = col.row(row) + case *netip.Addr: + *d = col.rowAddr(row) + case **netip.Addr: + *d = new(netip.Addr) + **d = col.rowAddr(row) + case *uint32: + ipV4 := col.row(row).To4() + if ipV4 == nil { + return &ColumnConverterError{ + Op: "ScanRow", + To: fmt.Sprintf("%T", dest), + From: "IPv4", + } + } + *d = binary.BigEndian.Uint32(ipV4[:]) + case **uint32: + ipV4 := col.row(row).To4() + if ipV4 == nil { + return &ColumnConverterError{ + Op: "ScanRow", + To: fmt.Sprintf("%T", dest), + From: "IPv4", + } + } + *d = new(uint32) + **d = binary.BigEndian.Uint32(ipV4[:]) default: return &ColumnConverterError{ Op: "ScanRow", @@ -76,82 +108,114 @@ func (col *IPv4) ScanRow(dest interface{}, row int) error { return nil } -// appendIPv4Str appends bytes of the IPv4-formatted string to result byte array. -// If IP is not valid V4 error will be returned. -func appendIPv4Str(data []byte, strIp string) ([]byte, error) { - ip := net.ParseIP(strIp) - if ip == nil { - return nil, &ColumnConverterError{ +func strToIPV4(strIp string) (netip.Addr, error) { + ip, err := netip.ParseAddr(strIp) + if err != nil { + return netip.Addr{}, &ColumnConverterError{ Op: "Append", To: "IPv4", Hint: "invalid IP format", } } - return appendIPv4(data, ip) + return ip, nil } -// appendIPv4 appends bytes of IPv4 to result byte array. -// If IP is not valid V4 error will be returned. -func appendIPv4(data []byte, ip net.IP) ([]byte, error) { - ip = ip.To4() - if ip == nil { - return nil, &ColumnConverterError{ - Op: "Append", - To: "IPv4", - From: "IPv6", - Hint: "invalid IP version", - } +func (col *IPv4) AppendV4IPs(ips []netip.Addr) { + for i := range ips { + col.col.Append(proto.ToIPv4(ips[i])) } - return append(data, IPv4ToBytes(ip)...), nil } -func (col *IPv4) Append(v interface{}) (nulls []uint8, err error) { - var data []byte +func (col *IPv4) Append(v any) (nulls []uint8, err error) { switch v := v.(type) { case []string: nulls = make([]uint8, len(v)) - for _, v := range v { - data, err = appendIPv4Str(data, v) + ips := make([]netip.Addr, len(v), len(v)) + for i := range v { + ip, err := strToIPV4(v[i]) if err != nil { - return + return nulls, err } + ips[i] = ip } + col.AppendV4IPs(ips) case []*string: nulls = make([]uint8, len(v)) - for i, v := range v { + ips := make([]netip.Addr, len(v), len(v)) + for i := range v { switch { - case v != nil: - data, err = appendIPv4Str(data, *v) + case v[i] != nil: + ip, err := strToIPV4(*v[i]) if err != nil { - return + return nulls, err } + ips[i] = ip default: - data, nulls[i] = append(data, make([]byte, net.IPv4len)...), 1 + ips[i] = netip.Addr{} + nulls[i] = 1 } } - case []net.IP: + col.AppendV4IPs(ips) + case []netip.Addr: nulls = make([]uint8, len(v)) - for _, v := range v { - data, err = appendIPv4(data, v) - if err != nil { - return + col.AppendV4IPs(v) + case []*netip.Addr: + nulls = make([]uint8, len(v)) + for i := range v { + switch { + case v[i] != nil: + col.col.Append(proto.ToIPv4(*v[i])) + default: + nulls[i] = 1 + col.col.Append(0) } } + case []net.IP: + nulls = make([]uint8, len(v)) + for i := range v { + col.col.Append(proto.ToIPv4(netIPToNetIPAddr(v[i]))) + } case []*net.IP: nulls = make([]uint8, len(v)) - for i, v := range v { + for i := range v { switch { - case v != nil: - data, err = appendIPv4(data, *v) - if err != nil { - return - } + case v[i] != nil: + col.col.Append(proto.ToIPv4(netIPToNetIPAddr(*v[i]))) + default: + nulls[i] = 1 + col.col.Append(0) + } + } + case []uint32: + nulls = make([]uint8, len(v)) + for i := range v { + col.col.Append(proto.IPv4(v[i])) + } + case []*uint32: + nulls = make([]uint8, len(v)) + for i := range v { + switch { + case v[i] != nil: + col.col.Append(proto.IPv4(*v[i])) default: - data, nulls[i] = append(data, make([]byte, net.IPv4len)...), 1 + nulls[i] = 1 + col.col.Append(0) } } default: + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return nil, &ColumnConverterError{ + Op: "Append", + To: "IPv4", + From: fmt.Sprintf("%T", v), + Hint: fmt.Sprintf("could not get driver.Valuer value, try using %s", col.Type()), + } + } + return col.Append(val) + } return nil, &ColumnConverterError{ Op: "Append", To: "IPv4", @@ -159,33 +223,75 @@ func (col *IPv4) Append(v interface{}) (nulls []uint8, err error) { } } - col.data = append(col.data, data...) return } -func (col *IPv4) AppendRow(v interface{}) (err error) { +func (col *IPv4) AppendRow(v any) (err error) { switch v := v.(type) { case string: - col.data, err = appendIPv4Str(col.data, v) + ip, err := strToIPV4(v) + if err != nil { + return err + } + col.col.Append(proto.ToIPv4(ip)) case *string: switch { case v != nil: - col.data, err = appendIPv4Str(col.data, *v) + ip, err := strToIPV4(*v) + if err != nil { + return err + } + col.col.Append(proto.ToIPv4(ip)) default: - col.data, err = appendIPv4(col.data, make(net.IP, net.IPv4len)) + col.col.Append(0) + } + case netip.Addr: + col.col.Append(proto.ToIPv4(v)) + case *netip.Addr: + switch { + case v != nil: + col.col.Append(proto.ToIPv4(*v)) + default: + col.col.Append(0) } case net.IP: - col.data, err = appendIPv4(col.data, v) + switch { + case len(v) == 0: + col.col.Append(0) + default: + col.col.Append(proto.ToIPv4(netIPToNetIPAddr(v))) + } case *net.IP: switch { case v != nil: - col.data, err = appendIPv4(col.data, *v) + col.col.Append(proto.ToIPv4(netIPToNetIPAddr(*v))) default: - col.data, err = appendIPv4(col.data, make(net.IP, net.IPv4len)) + col.col.Append(0) } case nil: - col.data, err = appendIPv4(col.data, make(net.IP, net.IPv4len)) + col.col.Append(0) + case uint32: + col.col.Append(proto.IPv4(v)) + case *uint32: + switch { + case v != nil: + col.col.Append(proto.IPv4(*v)) + default: + col.col.Append(0) + } default: + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return &ColumnConverterError{ + Op: "AppendRow", + To: "IPv4", + From: fmt.Sprintf("%T", v), + Hint: fmt.Sprintf("could not get driver.Valuer value, try using %s", col.Type()), + } + } + return col.AppendRow(val) + } return &ColumnConverterError{ Op: "AppendRow", To: "IPv4", @@ -196,22 +302,33 @@ func (col *IPv4) AppendRow(v interface{}) (err error) { return } -func (col *IPv4) Decode(decoder *binary.Decoder, rows int) error { - col.data = make([]byte, net.IPv4len*rows) - return decoder.Raw(col.data) +func (col *IPv4) Decode(reader *proto.Reader, rows int) error { + return col.col.DecodeColumn(reader, rows) } -func (col *IPv4) Encode(encoder *binary.Encoder) error { - return encoder.Raw(col.data) +func (col *IPv4) Encode(buffer *proto.Buffer) { + col.col.EncodeColumn(buffer) } +// TODO: This should probably return an netip.Addr func (col *IPv4) row(i int) net.IP { - src := col.data[i*net.IPv4len : (i+1)*net.IPv4len] - return net.IPv4(src[3], src[2], src[1], src[0]).To4() + src := col.col.Row(i).ToIP() + ip := src.As4() + return net.IPv4(ip[0], ip[1], ip[2], ip[3]).To4() } -func IPv4ToBytes(ip net.IP) []byte { - return []byte{ip[3], ip[2], ip[1], ip[0]} +func (col *IPv4) rowAddr(i int) netip.Addr { + return col.col.Row(i).ToIP() +} + +func netIPToNetIPAddr(ip net.IP) netip.Addr { + switch len(ip) { + case 4: + return netip.AddrFrom4([4]byte{ip[0], ip[1], ip[2], ip[3]}) + case 16: + return netip.AddrFrom4([4]byte{ip[12], ip[13], ip[14], ip[15]}) + } + return netip.Addr{} } var _ Interface = (*IPv4)(nil) diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/ipv6.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/ipv6.go index 17426aa..a67d17a 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/ipv6.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/ipv6.go @@ -18,18 +18,23 @@ package column import ( + "database/sql/driver" "fmt" + "github.com/ClickHouse/ch-go/proto" "net" + "net/netip" "reflect" - - "github.com/ClickHouse/clickhouse-go/v2/lib/binary" ) type IPv6 struct { - data []byte + col proto.ColIPv6 name string } +func (col *IPv6) Reset() { + col.col.Reset() +} + func (col *IPv6) Name() string { return col.name } @@ -43,10 +48,10 @@ func (col *IPv6) ScanType() reflect.Type { } func (col *IPv6) Rows() int { - return len(col.data) / net.IPv6len + return col.col.Rows() } -func (col *IPv6) Row(i int, ptr bool) interface{} { +func (col *IPv6) Row(i int, ptr bool) any { value := col.row(i) if ptr { return &value @@ -54,7 +59,7 @@ func (col *IPv6) Row(i int, ptr bool) interface{} { return value } -func (col *IPv6) ScanRow(dest interface{}, row int) error { +func (col *IPv6) ScanRow(dest any, row int) error { switch d := dest.(type) { case *string: *d = col.row(row).String() @@ -66,6 +71,26 @@ func (col *IPv6) ScanRow(dest interface{}, row int) error { case **net.IP: *d = new(net.IP) **d = col.row(row) + case *netip.Addr: + *d = col.rowAddr(row) + case **netip.Addr: + *d = new(netip.Addr) + **d = col.rowAddr(row) + case *[]byte: + *d = col.row(row) + case **[]byte: + *d = new([]byte) + **d = col.row(row) + case *proto.IPv6: + *d = col.col.Row(row) + case **proto.IPv6: + *d = new(proto.IPv6) + **d = col.col.Row(row) + case *[16]byte: + *d = col.col.Row(row) + case **[16]byte: + *d = new([16]byte) + **d = col.col.Row(row) default: return &ColumnConverterError{ Op: "ScanRow", @@ -76,136 +101,281 @@ func (col *IPv6) ScanRow(dest interface{}, row int) error { return nil } -// appendIPv6Str appends bytes of the IPv6-formatted string to result byte array. -// If IP is not valid V4 error will be returned. -func appendIPv6Str(data []byte, strIp string) ([]byte, error) { - ip := net.ParseIP(strIp) - if ip == nil { - return nil, &ColumnConverterError{ - Op: "Append", - To: "IPv6", - Hint: "invalid IP format", - } +func strToIPV6(strIp string) (netip.Addr, error) { + ip, err := netip.ParseAddr(strIp) + if err != nil { + return netip.Addr{}, err } - return appendIPv6(data, ip) + return ip, nil } -// appendIPv6 appends bytes of IPv6 to result byte array. -// If IP is not valid V4 error will be returned. -func appendIPv6(data []byte, ip net.IP) ([]byte, error) { - ip = ip.To16() - if ip == nil { - return nil, &ColumnConverterError{ - Op: "Append", - To: "IPv6", - Hint: "invalid IP version", - } +func (col *IPv6) AppendV6IPs(ips []netip.Addr) { + for i := range ips { + col.col.Append(proto.ToIPv6(ips[i])) } - return append(data, ip[:]...), nil } -func (col *IPv6) Append(v interface{}) (nulls []uint8, err error) { - var data []byte - +func (col *IPv6) Append(v any) (nulls []uint8, err error) { switch v := v.(type) { case []string: nulls = make([]uint8, len(v)) - for _, v := range v { - data, err = appendIPv6Str(data, v) + ips := make([]netip.Addr, len(v), len(v)) + for i := range v { + ip, err := strToIPV6(v[i]) if err != nil { - return + return nulls, &ColumnConverterError{ + Op: "Append", + To: "IPv6", + Hint: "invalid IP format", + } } + ips[i] = ip } + col.AppendV6IPs(ips) case []*string: nulls = make([]uint8, len(v)) - for i, v := range v { + ips := make([]netip.Addr, len(v), len(v)) + for i := range v { switch { - case v != nil: - data, err = appendIPv6Str(data, *v) + case v[i] != nil: + ip, err := strToIPV6(*v[i]) if err != nil { - return + return nulls, &ColumnConverterError{ + Op: "Append", + To: "IPv6", + Hint: "invalid IP format", + } } + ips[i] = ip default: - data, nulls[i] = append(data, make([]byte, net.IPv6len)...), 1 + ips[i] = netip.Addr{} + nulls[i] = 1 } } - case []net.IP: + col.AppendV6IPs(ips) + case []netip.Addr: nulls = make([]uint8, len(v)) for _, v := range v { - data, err = appendIPv6(data, v) - if err != nil { - return + col.col.Append(proto.ToIPv6(v)) + } + case []*netip.Addr: + nulls = make([]uint8, len(v)) + for i, v := range v { + switch { + case v != nil: + col.col.Append(proto.ToIPv6(*v)) + default: + nulls[i] = 1 + col.col.Append([16]byte{}) } } + case []net.IP: + nulls = make([]uint8, len(v)) + for _, v := range v { + col.col.Append(proto.ToIPv6(netip.AddrFrom16(IPv6ToBytes(v)))) + } case []*net.IP: nulls = make([]uint8, len(v)) for i, v := range v { switch { case v != nil: - data, err = appendIPv6(data, *v) - if err != nil { - return - } + col.col.Append(proto.ToIPv6(netip.AddrFrom16(IPv6ToBytes(*v)))) default: - data, nulls[i] = append(data, make([]byte, net.IPv6len)...), 1 + nulls[i] = 1 + col.col.Append([16]byte{}) + } + } + case [][]byte: + nulls = make([]uint8, len(v)) + for _, v := range v { + col.col.Append(proto.ToIPv6(netip.AddrFrom16(IPv6ToBytes(v)))) + } + case []*[]byte: + nulls = make([]uint8, len(v)) + for i, v := range v { + switch { + case v != nil: + col.col.Append(proto.ToIPv6(netip.AddrFrom16(IPv6ToBytes(*v)))) + default: + nulls[i] = 1 + col.col.Append([16]byte{}) + } + } + case [][16]byte: + for _, v := range v { + col.col.Append(v) + } + case []*[16]byte: + nulls = make([]uint8, len(v)) + for i, v := range v { + switch { + case v != nil: + col.col.Append(*v) + default: + nulls[i] = 1 + col.col.Append([16]byte{}) + } + } + case []proto.IPv6: + for _, v := range v { + col.col.Append(v) + } + case []*proto.IPv6: + nulls = make([]uint8, len(v)) + for i, v := range v { + switch { + case v != nil: + col.col.Append(*v) + default: + nulls[i] = 1 + col.col.Append([16]byte{}) } } default: + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return nil, &ColumnConverterError{ + Op: "Append", + To: "IPv6", + From: fmt.Sprintf("%T", v), + Hint: fmt.Sprintf("could not get driver.Valuer value, try using %s", col.Type()), + } + } + return col.Append(val) + } return nil, &ColumnConverterError{ Op: "Append", To: "IPv6", From: fmt.Sprintf("%T", v), } } - - col.data = append(col.data, data...) return } -func (col *IPv6) AppendRow(v interface{}) (err error) { +func (col *IPv6) AppendRow(v any) (err error) { switch v := v.(type) { case string: - col.data, err = appendIPv6Str(col.data, v) + ip, err := strToIPV6(v) + if err != nil { + return &ColumnConverterError{ + Op: "Append", + To: "IPv6", + Hint: "invalid IP format", + } + } + col.col.Append(ip.As16()) case *string: switch { case v != nil: - col.data, err = appendIPv6Str(col.data, *v) + ip, err := strToIPV6(*v) + if err != nil { + return &ColumnConverterError{ + Op: "Append", + To: "IPv6", + Hint: "invalid IP format", + } + } + col.col.Append(ip.As16()) + default: + col.col.Append([16]byte{}) + } + case netip.Addr: + col.col.Append(proto.ToIPv6(v)) + case *netip.Addr: + switch { + case v != nil: + col.col.Append(proto.ToIPv6(*v)) default: - col.data, err = appendIPv6(col.data, make(net.IP, net.IPv6len)) + col.col.Append([16]byte{}) } case net.IP: - col.data, err = appendIPv6(col.data, v) + col.col.Append(proto.ToIPv6(netip.AddrFrom16(IPv6ToBytes(v)))) case *net.IP: switch { case v != nil: - col.data, err = appendIPv6(col.data, *v) + col.col.Append(proto.ToIPv6(netip.AddrFrom16(IPv6ToBytes(*v)))) default: - col.data, err = appendIPv6(col.data, make(net.IP, net.IPv6len)) + col.col.Append([16]byte{}) + } + case []byte: + col.col.Append(proto.ToIPv6(netip.AddrFrom16(IPv6ToBytes(v)))) + case *[]byte: + switch { + case v != nil: + col.col.Append(proto.ToIPv6(netip.AddrFrom16(IPv6ToBytes(*v)))) + default: + col.col.Append([16]byte{}) + } + case [16]byte: + col.col.Append(v) + case *[16]byte: + switch { + case v != nil: + col.col.Append(*v) + default: + col.col.Append([16]byte{}) + } + case proto.IPv6: + col.col.Append(v) + case *proto.IPv6: + switch { + case v != nil: + col.col.Append(*v) + default: + col.col.Append([16]byte{}) } case nil: - col.data, err = appendIPv6(col.data, make(net.IP, net.IPv6len)) + col.col.Append([16]byte{}) default: + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return &ColumnConverterError{ + Op: "AppendRow", + To: "IPv6", + From: fmt.Sprintf("%T", v), + Hint: fmt.Sprintf("could not get driver.Valuer value, try using %s", col.Type()), + } + } + return col.AppendRow(val) + } return &ColumnConverterError{ Op: "AppendRow", To: "IPv6", From: fmt.Sprintf("%T", v), } } - return } -func (col *IPv6) Decode(decoder *binary.Decoder, rows int) error { - col.data = make([]byte, net.IPv6len*rows) - return decoder.Raw(col.data) +func (col *IPv6) Decode(reader *proto.Reader, rows int) error { + return col.col.DecodeColumn(reader, rows) +} + +func (col *IPv6) Encode(buffer *proto.Buffer) { + col.col.EncodeColumn(buffer) } -func (col *IPv6) Encode(encoder *binary.Encoder) error { - return encoder.Raw(col.data) +func IPv6ToBytes(ip net.IP) [16]byte { + if ip == nil { + return [16]byte{} + } + + if len(ip) == 4 { + ip = ip.To16() + } + return [16]byte{ip[0], ip[1], ip[2], ip[3], ip[4], ip[5], ip[6], ip[7], ip[8], ip[9], ip[10], ip[11], ip[12], ip[13], ip[14], ip[15]} } +// TODO: This should probably return an netip.Addr func (col *IPv6) row(i int) net.IP { - return col.data[i*net.IPv6len : (i+1)*net.IPv6len] + src := col.col.Row(i) + return src[:] +} + +func (col *IPv6) rowAddr(i int) netip.Addr { + return col.col.Row(i).ToIP() } var _ Interface = (*IPv6)(nil) diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/json.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/json.go index b9ea5b7..0978a0b 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/json.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/json.go @@ -19,9 +19,10 @@ package column import ( "fmt" - "github.com/ClickHouse/clickhouse-go/v2/lib/binary" + "github.com/ClickHouse/ch-go/proto" "reflect" "strings" + "time" ) // inverse mapping - go types to clickhouse types @@ -39,16 +40,16 @@ var kindMappings = map[reflect.Kind]string{ reflect.Uint64: "UInt64", reflect.Float32: "Float32", reflect.Float64: "Float64", - reflect.Bool: "Boolean", + reflect.Bool: "Bool", } // complex types for which a mapping exists - currently we map to String but could enhance in the future for other types var typeMappings = map[string]struct{}{ // currently JSON doesn't support DateTime, Decimal or IP so mapped to String - "time.Time": struct{}{}, - "decimal.Decimal": struct{}{}, - "net.IP": struct{}{}, - "uuid.UUID": struct{}{}, + "time.Time": {}, + "decimal.Decimal": {}, + "net.IP": {}, + "uuid.UUID": {}, } type JSON interface { @@ -65,7 +66,7 @@ type JSONParent interface { rows() int } -func parseType(name string, vType reflect.Type, values interface{}, isArray bool, jCol JSONParent, numEmpty int) error { +func parseType(name string, vType reflect.Type, values any, isArray bool, jCol JSONParent, numEmpty int) error { _, ok := typeMappings[vType.String()] if !ok { return &UnsupportedColumnTypeError{ @@ -106,7 +107,7 @@ func parseType(name string, vType reflect.Type, values interface{}, isArray bool return col.AppendRow(fmt.Sprint(values)) } -func parsePrimitive(name string, kind reflect.Kind, values interface{}, isArray bool, jCol JSONParent, numEmpty int) error { +func parsePrimitive(name string, kind reflect.Kind, values any, isArray bool, jCol JSONParent, numEmpty int) error { ct, ok := kindMappings[kind] if !ok { return &UnsupportedColumnTypeError{ @@ -116,7 +117,7 @@ func parsePrimitive(name string, kind reflect.Kind, values interface{}, isArray var err error if isArray { ct = fmt.Sprintf("Array(%s)", ct) - // if we have a []interface{} we will need to cast to the target column type - this will be based on the first + // if we have a []any we will need to cast to the target column type - this will be based on the first // values types. Inconsistent slices will fail. values, err = convertSlice(values) if err != nil { @@ -144,11 +145,11 @@ func parsePrimitive(name string, kind reflect.Kind, values interface{}, isArray return col.AppendRow(values) } -// converts a []interface{} of primitives to a typed slice +// converts a []any of primitives to a typed slice // maybe this can be done with reflection but likely slower. investigate. // this uses the first value to determine the type - subsequent values must currently be of the same type - we might cast later // but wider driver doesn't support e.g. int to int64 -func convertSlice(values interface{}) (interface{}, error) { +func convertSlice(values any) (any, error) { rValues := reflect.ValueOf(values) if rValues.Len() == 0 || rValues.Index(0).Kind() != reflect.Interface { return values, nil @@ -162,7 +163,7 @@ func convertSlice(values interface{}) (interface{}, error) { } } if fType == nil { - return []interface{}{}, nil + return []any{}, nil } typedSlice := reflect.MakeSlice(reflect.SliceOf(fType), 0, rValues.Len()) for i := 0; i < rValues.Len(); i++ { @@ -185,30 +186,46 @@ func convertSlice(values interface{}) (interface{}, error) { func (jCol *JSONList) createNewOffsets(num int) { for i := 0; i < num; i++ { //single depth so can take 1st - if len(jCol.offsets[0].values.data) == 0 { + if jCol.offsets[0].values.col.Rows() == 0 { // first entry in the column - jCol.offsets[0].values.data = []uint64{0} + jCol.offsets[0].values.col.Append(0) } else { // entry for this object to see offset from last - offsets are cumulative - jCol.offsets[0].values.data = append(jCol.offsets[0].values.data, jCol.offsets[0].values.data[len(jCol.offsets[0].values.data)-1]) + jCol.offsets[0].values.col.Append(jCol.offsets[0].values.col.Row(jCol.offsets[0].values.col.Rows() - 1)) } } } -func getFieldName(field reflect.StructField) (string, bool) { +func getStructFieldName(field reflect.StructField) (string, bool) { name := field.Name - jsonTag := field.Tag.Get("json") - if jsonTag == "" { - return name, false - } + tag := field.Tag.Get("json") // not a standard but we allow - to omit fields - if jsonTag == "-" { + if tag == "-" { return name, true } - return jsonTag, false + if tag != "" { + return tag, false + } + // support ch tag as well as this is used elsewhere + tag = field.Tag.Get("ch") + if tag == "-" { + return name, true + } + if tag != "" { + return tag, false + } + return name, false } -func parseSlice(name string, values interface{}, jCol JSONParent, preFill int) error { +// ensures numeric keys and ` are escaped properly +func getMapFieldName(name string) string { + if !escapeColRegex.MatchString(name) { + return fmt.Sprintf("`%s`", colEscape.Replace(name)) + } + return colEscape.Replace(name) +} + +func parseSlice(name string, values any, jCol JSONParent, preFill int) error { fType := reflect.TypeOf(values).Elem() sKind := fType.Kind() rValues := reflect.ValueOf(values) @@ -245,7 +262,7 @@ func parseSlice(name string, values interface{}, jCol JSONParent, preFill int) e col.createNewOffsets(preFill + 1) for i := 0; i < rValues.Len(); i++ { // increment offset - col.offsets[0].values.data[len(col.offsets[0].values.data)-1] += 1 + col.offsets[0].values.col[col.offsets[0].values.col.Rows()-1] += 1 value := rValues.Index(i) sKind = value.Kind() if sKind == reflect.Interface { @@ -267,7 +284,7 @@ func parseSlice(name string, values interface{}, jCol JSONParent, preFill int) e return err } default: - // only happens if slice has a primitive mixed with complex types in a []interface{} + // only happens if slice has a primitive mixed with complex types in a []any return &Error{ ColumnType: fmt.Sprint(sKind), Err: fmt.Errorf("slices must be same dimension in column %s", col.Name()), @@ -288,10 +305,10 @@ func parseStruct(name string, structVal reflect.Value, jCol JSONParent, preFill } func iterateStruct(structVal reflect.Value, col JSONParent, preFill int) error { - // structs generally have consistent field counts but we ignore nil values that are interface{} as we can't infer from + // structs generally have consistent field counts but we ignore nil values that are any as we can't infer from // these until they occur - so we might need to either backfill when to do occur or insert empty based on previous if structVal.Kind() == reflect.Interface { - // can happen if passed from []interface{} + // can happen if passed from []any structVal = structVal.Elem() } @@ -305,7 +322,7 @@ func iterateStruct(structVal reflect.Value, col JSONParent, preFill int) error { newColumn := false for i := 0; i < structVal.NumField(); i++ { - fName, omit := getFieldName(structVal.Type().Field(i)) + fName, omit := getStructFieldName(structVal.Type().Field(i)) if omit { continue } @@ -395,7 +412,7 @@ func iterateMap(mapVal reflect.Value, col JSONParent, preFill int) error { // two inconsistent options - 1. new - map has new columns 2. massing - map has missing columns // for (1) we need to update previous, for (2) we need to ensure we add a null entry if mapVal.Kind() == reflect.Interface { - // can happen if passed from []interface{} + // can happen if passed from []any mapVal = mapVal.Elem() } @@ -410,7 +427,13 @@ func iterateMap(mapVal reflect.Value, col JSONParent, preFill int) error { addedColumns := make([]string, len(mapVal.MapKeys()), len(mapVal.MapKeys())) newColumn := false for i, key := range mapVal.MapKeys() { - name := key.Interface().(string) + if newColumn { + // reset as otherwise prefill overflow to other fields. But don't reset if this prefill has come from + // a higher level + preFill = 0 + } + + name := getMapFieldName(key.Interface().(string)) if _, ok := columnLookup[name]; !ok && len(currentColumns) > 0 { // new column - need to handle preFill = numRows @@ -454,11 +477,6 @@ func iterateMap(mapVal reflect.Value, col JSONParent, preFill int) error { } } addedColumns[i] = name - if newColumn { - // reset as otherwise prefill overflow to other fields. But don't reset if this prefill has come from - // a higher level - preFill = 0 - } } // handle missing missingColumns := difference(currentColumns, addedColumns) @@ -470,7 +488,7 @@ func iterateMap(mapVal reflect.Value, col JSONParent, preFill int) error { return nil } -func appendStructOrMap(jCol *JSONObject, data interface{}) error { +func appendStructOrMap(jCol *JSONObject, data any) error { vData := reflect.ValueOf(data) kind := vData.Kind() if kind == reflect.Struct { @@ -483,6 +501,13 @@ func appendStructOrMap(jCol *JSONObject, data interface{}) error { Err: fmt.Errorf("map keys must be string for column %s", jCol.Name()), } } + if jCol.columns == nil && vData.Len() == 0 { + // if map is empty, we need to create an empty Tuple to make sure subcolumns protocol is happy + // _dummy is a ClickHouse internal name for empty Tuple subcolumn + // it has the same effect as `INSERT INTO single_json_type_table VALUES ('{}');` + jCol.upsertValue("_dummy", "Int8") + return jCol.insertEmptyColumn("_dummy") + } return iterateMap(vData, jCol, 0) } return &UnsupportedColumnTypeError{ @@ -496,6 +521,10 @@ type JSONValue struct { origType reflect.Type } +func (jCol *JSONValue) Reset() { + jCol.Interface.Reset() +} + func (jCol *JSONValue) appendEmptyValue() error { switch jCol.Interface.(type) { case *Array: @@ -537,13 +566,13 @@ func (jCol *JSONList) rows() int { return jCol.values.(*JSONObject).Rows() } -func createJSONList(name string) (jCol *JSONList) { +func createJSONList(name string, tz *time.Location) (jCol *JSONList) { // lists are represented as Nested which are in turn encoded as Array(Tuple()). We thus pass a Array(JSONObject()) // as this encodes like a tuple lCol := &JSONList{ name: name, } - lCol.values = &JSONObject{} + lCol.values = &JSONObject{tz: tz} // depth should always be one as nested arrays aren't possible lCol.depth = 1 lCol.scanType = scanTypeSlice @@ -566,7 +595,8 @@ func (jCol *JSONList) insertEmptyColumn(name string) error { func (jCol *JSONList) upsertValue(name string, ct string) (*JSONValue, error) { // check if column exists and reuse if same type, error if same name and different type - cols := jCol.values.(*JSONObject).columns + jObj := jCol.values.(*JSONObject) + cols := jObj.columns for i := range cols { sCol := cols[i] if sCol.Name() == name { @@ -588,19 +618,20 @@ func (jCol *JSONList) upsertValue(name string, ct string) (*JSONValue, error) { return vCol, nil } } - col, err := Type(ct).Column(name) + col, err := Type(ct).Column(name, jObj.tz) if err != nil { return nil, err } vCol := &JSONValue{ Interface: col, } - jCol.values.(*JSONObject).columns = append(cols, vCol) + jCol.values.(*JSONObject).columns = append(cols, vCol) // nolint:gocritic return vCol, nil } func (jCol *JSONList) upsertList(name string) (*JSONList, error) { // check if column exists and reuse if same type, error if same name and different type + jObj := jCol.values.(*JSONObject) cols := jCol.values.(*JSONObject).columns for i := range cols { sCol := cols[i] @@ -615,15 +646,16 @@ func (jCol *JSONList) upsertList(name string) (*JSONList, error) { return sCol, nil } } - lCol := createJSONList(name) - jCol.values.(*JSONObject).columns = append(cols, lCol) + lCol := createJSONList(name, jObj.tz) + jCol.values.(*JSONObject).columns = append(cols, lCol) // nolint:gocritic return lCol, nil } func (jCol *JSONList) upsertObject(name string) (*JSONObject, error) { // check if column exists and reuse if same type, error if same name and different type - cols := jCol.values.(*JSONObject).columns + jObj := jCol.values.(*JSONObject) + cols := jObj.columns for i := range cols { sCol := cols[i] if sCol.Name() == name { @@ -642,8 +674,9 @@ func (jCol *JSONList) upsertObject(name string) (*JSONObject, error) { // as this encodes like a tuple oCol := &JSONObject{ name: name, + tz: jObj.tz, } - jCol.values.(*JSONObject).columns = append(cols, oCol) + jCol.values.(*JSONObject).columns = append(cols, oCol) // nolint:gocritic return oCol, nil } @@ -665,6 +698,13 @@ type JSONObject struct { name string root bool encoding uint8 + tz *time.Location +} + +func (jCol *JSONObject) Reset() { + for i := range jCol.columns { + jCol.columns[i].Reset() + } } func (jCol *JSONObject) Name() string { @@ -728,7 +768,7 @@ func (jCol *JSONObject) upsertValue(name string, ct string) (*JSONValue, error) return vCol, nil } } - col, err := Type(ct).Column(name) + col, err := Type(ct).Column(name, jCol.tz) if err != nil { return nil, err } @@ -754,7 +794,7 @@ func (jCol *JSONObject) upsertList(name string) (*JSONList, error) { return sCol, nil } } - lCol := createJSONList(name) + lCol := createJSONList(name, jCol.tz) jCol.columns = append(jCol.columns, lCol) return lCol, nil } @@ -778,6 +818,7 @@ func (jCol *JSONObject) upsertObject(name string) (*JSONObject, error) { // not present so create oCol := &JSONObject{ name: name, + tz: jCol.tz, } jCol.columns = append(jCol.columns, oCol) return oCol, nil @@ -812,15 +853,17 @@ func (jCol *JSONObject) Rows() int { return 0 } -func (jCol *JSONObject) Row(i int, ptr bool) interface{} { +// ClickHouse returns JSON as a tuple i.e. these will never be invoked + +func (jCol *JSONObject) Row(i int, ptr bool) any { panic("Not implemented") } -func (jCol *JSONObject) ScanRow(dest interface{}, row int) error { +func (jCol *JSONObject) ScanRow(dest any, row int) error { panic("Not implemented") } -func (jCol *JSONObject) Append(v interface{}) (nulls []uint8, err error) { +func (jCol *JSONObject) Append(v any) (nulls []uint8, err error) { jSlice := reflect.ValueOf(v) if jSlice.Kind() != reflect.Slice { return nil, &ColumnConverterError{ @@ -837,7 +880,7 @@ func (jCol *JSONObject) Append(v interface{}) (nulls []uint8, err error) { return nil, nil } -func (jCol *JSONObject) AppendRow(v interface{}) error { +func (jCol *JSONObject) AppendRow(v any) error { if reflect.ValueOf(v).Kind() == reflect.Struct || reflect.ValueOf(v).Kind() == reflect.Map { if jCol.columns != nil && jCol.encoding == 1 { return &Error{ @@ -871,31 +914,27 @@ func (jCol *JSONObject) AppendRow(v interface{}) error { return nil } -func (jCol *JSONObject) Decode(decoder *binary.Decoder, rows int) error { +func (jCol *JSONObject) Decode(reader *proto.Reader, rows int) error { panic("Not implemented") } -func (jCol *JSONObject) Encode(encoder *binary.Encoder) error { +func (jCol *JSONObject) Encode(buffer *proto.Buffer) { if jCol.root && jCol.encoding == 0 { - if err := encoder.String(string(jCol.FullType())); err != nil { - return err - } + buffer.PutString(string(jCol.FullType())) } for _, c := range jCol.columns { - if err := c.Encode(encoder); err != nil { - return err - } + c.Encode(buffer) } - return nil } -func (jCol *JSONObject) ReadStatePrefix(decoder *binary.Decoder) error { - _, err := decoder.UInt8() +func (jCol *JSONObject) ReadStatePrefix(reader *proto.Reader) error { + _, err := reader.UInt8() return err } -func (jCol *JSONObject) WriteStatePrefix(encoder *binary.Encoder) error { - return encoder.UInt8(jCol.encoding) +func (jCol *JSONObject) WriteStatePrefix(buffer *proto.Buffer) error { + buffer.PutUInt8(jCol.encoding) + return nil } var ( diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/lowcardinality.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/lowcardinality.go index 8c0fabd..393d4da 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/lowcardinality.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/lowcardinality.go @@ -20,11 +20,10 @@ package column import ( "errors" "fmt" + "github.com/ClickHouse/ch-go/proto" "math" "reflect" "time" - - "github.com/ClickHouse/clickhouse-go/v2/lib/binary" ) const indexTypeMask = 0b11111111 @@ -65,19 +64,30 @@ type LowCardinality struct { append struct { keys []int - index map[interface{}]int + index map[any]int } name string } +func (col *LowCardinality) Reset() { + col.rows = 0 + col.index.Reset() + col.keys8.Reset() + col.keys16.Reset() + col.keys32.Reset() + col.keys64.Reset() + col.append.index = make(map[any]int) + col.append.keys = col.append.keys[:0] +} + func (col *LowCardinality) Name() string { return col.name } -func (col *LowCardinality) parse(t Type) (_ *LowCardinality, err error) { +func (col *LowCardinality) parse(t Type, tz *time.Location) (_ *LowCardinality, err error) { col.chType = t - col.append.index = make(map[interface{}]int) - if col.index, err = Type(t.params()).Column(col.name); err != nil { + col.append.index = make(map[any]int) + if col.index, err = Type(t.params()).Column(col.name, tz); err != nil { return nil, err } if nullable, ok := col.index.(*Nullable); ok { @@ -98,7 +108,7 @@ func (col *LowCardinality) Rows() int { return col.rows } -func (col *LowCardinality) Row(i int, ptr bool) interface{} { +func (col *LowCardinality) Row(i int, ptr bool) any { idx := col.indexRowNum(i) if idx == 0 && col.nullable { return nil @@ -106,7 +116,7 @@ func (col *LowCardinality) Row(i int, ptr bool) interface{} { return col.index.Row(idx, ptr) } -func (col *LowCardinality) ScanRow(dest interface{}, row int) error { +func (col *LowCardinality) ScanRow(dest any, row int) error { idx := col.indexRowNum(row) if idx == 0 && col.nullable { return nil @@ -114,7 +124,7 @@ func (col *LowCardinality) ScanRow(dest interface{}, row int) error { return col.index.ScanRow(dest, idx) } -func (col *LowCardinality) Append(v interface{}) (nulls []uint8, err error) { +func (col *LowCardinality) Append(v any) (nulls []uint8, err error) { value := reflect.Indirect(reflect.ValueOf(v)) if value.Kind() != reflect.Slice { return nil, &ColumnConverterError{ @@ -131,14 +141,15 @@ func (col *LowCardinality) Append(v interface{}) (nulls []uint8, err error) { return } -func (col *LowCardinality) AppendRow(v interface{}) error { +func (col *LowCardinality) AppendRow(v any) error { col.rows++ if col.index.Rows() == 0 { // init if col.index.AppendRow(nil); col.nullable { col.index.AppendRow(nil) } } - if v == nil { + // second check is unfortunate - but we could be passed a *type(nil) e.g. via LowCardinality(Nullable(String)) + if v == nil || (reflect.ValueOf(v).Kind() == reflect.Ptr && reflect.ValueOf(v).IsNil()) { col.append.keys = append(col.append.keys, 0) return nil } @@ -156,11 +167,11 @@ func (col *LowCardinality) AppendRow(v interface{}) error { return nil } -func (col *LowCardinality) Decode(decoder *binary.Decoder, rows int) error { +func (col *LowCardinality) Decode(reader *proto.Reader, rows int) error { if rows == 0 { return nil } - indexSerializationType, err := decoder.UInt64() + indexSerializationType, err := reader.UInt64() if err != nil { return err } @@ -185,77 +196,64 @@ func (col *LowCardinality) Decode(decoder *binary.Decoder, rows int) error { Err: errors.New("additional keys bit is missing"), } } - indexRows, err := decoder.Int64() + indexRows, err := reader.Int64() if err != nil { return err } - if err := col.index.Decode(decoder, int(indexRows)); err != nil { + if err := col.index.Decode(reader, int(indexRows)); err != nil { return err } - keysRows, err := decoder.Int64() + keysRows, err := reader.Int64() if err != nil { return err } col.rows = int(keysRows) - return col.keys().Decode(decoder, col.rows) + return col.keys().Decode(reader, col.rows) } -func (col *LowCardinality) Encode(encoder *binary.Encoder) error { +func (col *LowCardinality) Encode(buffer *proto.Buffer) { if col.rows == 0 { - return nil + return } defer func() { col.append.keys, col.append.index = nil, nil }() ixLen := uint64(len(col.append.index)) switch { + case col.keys().Rows() > 0: + // We already have keys, so this column is probably in a block directly decoded from the server, and we should + // not reset them case ixLen < math.MaxUint8: col.key = keyUInt8 for _, v := range col.append.keys { - if err := col.keys8.AppendRow(uint8(v)); err != nil { - return err - } + col.keys8.AppendRow(uint8(v)) } case ixLen < math.MaxUint16: col.key = keyUInt16 for _, v := range col.append.keys { - if err := col.keys16.AppendRow(uint16(v)); err != nil { - return err - } + col.keys16.AppendRow(uint16(v)) } case ixLen < math.MaxUint32: col.key = keyUInt32 for _, v := range col.append.keys { - if err := col.keys32.AppendRow(uint32(v)); err != nil { - return err - } + col.keys32.AppendRow(uint32(v)) } default: col.key = keyUInt64 for _, v := range col.append.keys { - if err := col.keys64.AppendRow(uint64(v)); err != nil { - return err - } + col.keys64.AppendRow(uint64(v)) } } - if err := encoder.UInt64(updateAll | uint64(col.key)); err != nil { - return err - } - if err := encoder.Int64(int64(col.index.Rows())); err != nil { - return err - } - if err := col.index.Encode(encoder); err != nil { - return err - } + buffer.PutUInt64(updateAll | uint64(col.key)) + buffer.PutInt64(int64(col.index.Rows())) + col.index.Encode(buffer) keys := col.keys() - if err := encoder.Int64(int64(keys.Rows())); err != nil { - return err - } - return keys.Encode(encoder) + buffer.PutInt64(int64(keys.Rows())) + keys.Encode(buffer) } -func (col *LowCardinality) ReadStatePrefix(decoder *binary.Decoder) error { - keyVersion, err := decoder.UInt64() +func (col *LowCardinality) ReadStatePrefix(reader *proto.Reader) error { + keyVersion, err := reader.UInt64() if err != nil { return err } @@ -268,8 +266,9 @@ func (col *LowCardinality) ReadStatePrefix(decoder *binary.Decoder) error { return nil } -func (col *LowCardinality) WriteStatePrefix(encoder *binary.Encoder) error { - return encoder.UInt64(sharedDictionariesWithAdditionalKeys) +func (col *LowCardinality) WriteStatePrefix(buffer *proto.Buffer) error { + buffer.PutUInt64(sharedDictionariesWithAdditionalKeys) + return nil } func (col *LowCardinality) keys() Interface { diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/map.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/map.go index e019364..79dc75a 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/map.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/map.go @@ -18,11 +18,13 @@ package column import ( + "database/sql/driver" "fmt" "reflect" "strings" + "time" - "github.com/ClickHouse/clickhouse-go/v2/lib/binary" + "github.com/ClickHouse/ch-go/proto" ) // https://github.com/ClickHouse/ClickHouse/blob/master/src/Columns/ColumnMap.cpp @@ -35,17 +37,50 @@ type Map struct { name string } +type OrderedMap interface { + Get(key any) (any, bool) + Put(key any, value any) + Keys() <-chan any +} + +type MapIterator interface { + Next() bool + Key() any + Value() any +} + +type IterableOrderedMap interface { + Put(key any, value any) + Iterator() MapIterator +} + +func (col *Map) Reset() { + col.keys.Reset() + col.values.Reset() + col.offsets.Reset() +} + func (col *Map) Name() string { return col.name } -func (col *Map) parse(t Type) (_ Interface, err error) { +func (col *Map) parse(t Type, tz *time.Location) (_ Interface, err error) { col.chType = t - if types := strings.SplitN(t.params(), ",", 2); len(types) == 2 { - if col.keys, err = Type(strings.TrimSpace(types[0])).Column(col.name); err != nil { + types := make([]string, 2, 2) + typeParams := t.params() + idx := strings.Index(typeParams, ",") + if strings.HasPrefix(typeParams, "Enum") { + idx = strings.Index(typeParams, "),") + 1 + } + if idx > 0 { + types[0] = typeParams[:idx] + types[1] = typeParams[idx+1:] + } + if types[0] != "" && types[1] != "" { + if col.keys, err = Type(strings.TrimSpace(types[0])).Column(col.name, tz); err != nil { return nil, err } - if col.values, err = Type(strings.TrimSpace(types[1])).Column(col.name); err != nil { + if col.values, err = Type(strings.TrimSpace(types[1])).Column(col.name, tz); err != nil { return nil, err } col.scanType = reflect.MapOf( @@ -68,32 +103,56 @@ func (col *Map) ScanType() reflect.Type { } func (col *Map) Rows() int { - return len(col.offsets.data) + return col.offsets.col.Rows() } -func (col *Map) Row(i int, ptr bool) interface{} { +func (col *Map) Row(i int, ptr bool) any { return col.row(i).Interface() } -func (col *Map) ScanRow(dest interface{}, i int) error { +func (col *Map) ScanRow(dest any, i int) error { value := reflect.Indirect(reflect.ValueOf(dest)) - if value.Type() != col.scanType { - return &ColumnConverterError{ - Op: "ScanRow", - To: fmt.Sprintf("%T", dest), - From: string(col.chType), - Hint: fmt.Sprintf("try using %s", col.scanType), + if value.Type() == col.scanType { + value.Set(col.row(i)) + return nil + } + if om, ok := dest.(IterableOrderedMap); ok { + keys, values := col.orderedRow(i) + for i := range keys { + om.Put(keys[i], values[i]) } + return nil } - { - value.Set(col.row(i)) + if om, ok := dest.(OrderedMap); ok { + keys, values := col.orderedRow(i) + for i := range keys { + om.Put(keys[i], values[i]) + } + return nil + } + return &ColumnConverterError{ + Op: "ScanRow", + To: fmt.Sprintf("%T", dest), + From: string(col.chType), + Hint: fmt.Sprintf("try using %s", col.scanType), } - return nil } -func (col *Map) Append(v interface{}) (nulls []uint8, err error) { +func (col *Map) Append(v any) (nulls []uint8, err error) { value := reflect.Indirect(reflect.ValueOf(v)) if value.Kind() != reflect.Slice { + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return nil, &ColumnConverterError{ + Op: "Append", + To: string(col.chType), + From: fmt.Sprintf("%T", v), + Hint: fmt.Sprintf("could not get driver.Valuer value, try using %s", col.scanType), + } + } + return col.Append(val) + } return nil, &ColumnConverterError{ Op: "Append", To: string(col.chType), @@ -109,73 +168,131 @@ func (col *Map) Append(v interface{}) (nulls []uint8, err error) { return } -func (col *Map) AppendRow(v interface{}) error { +func (col *Map) AppendRow(v any) error { value := reflect.Indirect(reflect.ValueOf(v)) - if value.Type() != col.scanType { - return &ColumnConverterError{ - Op: "AppendRow", - To: string(col.chType), - From: fmt.Sprintf("%T", v), - Hint: fmt.Sprintf("try using %s", col.scanType), + if value.Type() == col.scanType { + var ( + size int64 + iter = value.MapRange() + ) + for iter.Next() { + size++ + if err := col.keys.AppendRow(iter.Key().Interface()); err != nil { + return err + } + if err := col.values.AppendRow(iter.Value().Interface()); err != nil { + return err + } + } + var prev int64 + if n := col.offsets.Rows(); n != 0 { + prev = col.offsets.col.Row(n - 1) } + col.offsets.col.Append(prev + size) + return nil } - var ( - size int64 - iter = value.MapRange() - ) - for iter.Next() { - size++ - if err := col.keys.AppendRow(iter.Key().Interface()); err != nil { - return err + + if orderedMap, ok := v.(IterableOrderedMap); ok { + var size int64 + iter := orderedMap.Iterator() + for iter.Next() { + key, value := iter.Key(), iter.Value() + size++ + if err := col.keys.AppendRow(key); err != nil { + return err + } + if err := col.values.AppendRow(value); err != nil { + return err + } } - if err := col.values.AppendRow(iter.Value().Interface()); err != nil { - return err + var prev int64 + if n := col.offsets.Rows(); n != 0 { + prev = col.offsets.col.Row(n - 1) } + col.offsets.col.Append(prev + size) + return nil } - var prev int64 - if n := len(col.offsets.data); n != 0 { - prev = col.offsets.data[n-1] + + if orderedMap, ok := v.(OrderedMap); ok { + var size int64 + for key := range orderedMap.Keys() { + value, ok := orderedMap.Get(key) + if !ok { + return fmt.Errorf("ordered map has key %v but no corresponding value", key) + } + size++ + if err := col.keys.AppendRow(key); err != nil { + return err + } + if err := col.values.AppendRow(value); err != nil { + return err + } + } + var prev int64 + if n := col.offsets.Rows(); n != 0 { + prev = col.offsets.col.Row(n - 1) + } + col.offsets.col.Append(prev + size) + return nil } - col.offsets.data = append(col.offsets.data, prev+size) - return nil -} -func (col *Map) Decode(decoder *binary.Decoder, rows int) error { - if err := col.offsets.Decode(decoder, rows); err != nil { - return err + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return &ColumnConverterError{ + Op: "AppendRow", + To: string(col.chType), + From: fmt.Sprintf("%T", v), + Hint: fmt.Sprintf("could not get driver.Valuer value, try using %s", col.scanType), + } + } + return col.AppendRow(val) } - size := int(col.offsets.data[len(col.offsets.data)-1]) - if err := col.keys.Decode(decoder, size); err != nil { - return err + + return &ColumnConverterError{ + Op: "AppendRow", + To: string(col.chType), + From: fmt.Sprintf("%T", v), + Hint: fmt.Sprintf("try using %s", col.scanType), } - return col.values.Decode(decoder, size) + } -func (col *Map) Encode(encoder *binary.Encoder) error { - if err := col.offsets.Encode(encoder); err != nil { +func (col *Map) Decode(reader *proto.Reader, rows int) error { + if err := col.offsets.col.DecodeColumn(reader, rows); err != nil { return err } - if err := col.keys.Encode(encoder); err != nil { - return err + if i := col.offsets.Rows(); i != 0 { + size := int(col.offsets.col.Row(i - 1)) + if err := col.keys.Decode(reader, size); err != nil { + return err + } + return col.values.Decode(reader, size) } - return col.values.Encode(encoder) + return nil +} + +func (col *Map) Encode(buffer *proto.Buffer) { + col.offsets.col.EncodeColumn(buffer) + col.keys.Encode(buffer) + col.values.Encode(buffer) } -func (col *Map) ReadStatePrefix(decoder *binary.Decoder) error { +func (col *Map) ReadStatePrefix(reader *proto.Reader) error { if serialize, ok := col.keys.(CustomSerialization); ok { - if err := serialize.ReadStatePrefix(decoder); err != nil { + if err := serialize.ReadStatePrefix(reader); err != nil { return err } } if serialize, ok := col.values.(CustomSerialization); ok { - if err := serialize.ReadStatePrefix(decoder); err != nil { + if err := serialize.ReadStatePrefix(reader); err != nil { return err } } return nil } -func (col *Map) WriteStatePrefix(encoder *binary.Encoder) error { +func (col *Map) WriteStatePrefix(encoder *proto.Buffer) error { if serialize, ok := col.keys.(CustomSerialization); ok { if err := serialize.WriteStatePrefix(encoder); err != nil { return err @@ -195,10 +312,10 @@ func (col *Map) row(n int) reflect.Value { value = reflect.MakeMap(col.scanType) ) if n != 0 { - prev = col.offsets.data[n-1] + prev = col.offsets.col.Row(n - 1) } var ( - size = int(col.offsets.data[n] - prev) + size = int(col.offsets.col.Row(n) - prev) from = int(prev) ) for next := 0; next < size; next++ { @@ -210,6 +327,24 @@ func (col *Map) row(n int) reflect.Value { return value } +func (col *Map) orderedRow(n int) ([]any, []any) { + var prev int64 + if n != 0 { + prev = col.offsets.col.Row(n - 1) + } + var ( + size = int(col.offsets.col.Row(n) - prev) + from = int(prev) + ) + keys := make([]any, size) + values := make([]any, size) + for next := 0; next < size; next++ { + keys[next] = col.keys.Row(from+next, false) + values[next] = col.values.Row(from+next, false) + } + return keys, values +} + var ( _ Interface = (*Map)(nil) _ CustomSerialization = (*Map)(nil) diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/nested.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/nested.go index ada43af..d961bd0 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/nested.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/nested.go @@ -20,6 +20,7 @@ package column import ( "fmt" "strings" + "time" ) type Nested struct { @@ -27,6 +28,10 @@ type Nested struct { name string } +func (col *Nested) Reset() { + col.Interface.Reset() +} + func asDDL(cols []namedCol) string { sCols := make([]string, len(cols), len(cols)) for i := range cols { @@ -35,9 +40,9 @@ func asDDL(cols []namedCol) string { return strings.Join(sCols, ", ") } -func (col *Nested) parse(t Type) (_ Interface, err error) { +func (col *Nested) parse(t Type, tz *time.Location) (_ Interface, err error) { columns := fmt.Sprintf("Array(Tuple(%s))", asDDL(nestedColumns(t.params()))) - if col.Interface, err = (&Array{name: col.name}).parse(Type(columns)); err != nil { + if col.Interface, err = (&Array{name: col.name}).parse(Type(columns), tz); err != nil { return nil, err } return col, nil diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/nothing.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/nothing.go index 0dfb038..1a9d8fd 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/nothing.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/nothing.go @@ -19,48 +19,48 @@ package column import ( "errors" + "github.com/ClickHouse/ch-go/proto" "reflect" - - "github.com/ClickHouse/clickhouse-go/v2/lib/binary" ) type Nothing struct { name string + col proto.ColNothing +} + +func (col *Nothing) Reset() { + col.col.Reset() } -func (n Nothing) Name() string { - return n.name +func (col Nothing) Name() string { + return col.name } -func (Nothing) Type() Type { return "Nothing" } -func (Nothing) ScanType() reflect.Type { return reflect.TypeOf(nil) } -func (Nothing) Rows() int { return 0 } -func (Nothing) Row(int, bool) interface{} { return nil } -func (Nothing) ScanRow(interface{}, int) error { return nil } -func (Nothing) Append(interface{}) ([]uint8, error) { +func (Nothing) Type() Type { return "Nothing" } +func (Nothing) ScanType() reflect.Type { return reflect.TypeOf((*any)(nil)) } +func (Nothing) Rows() int { return 0 } +func (Nothing) Row(int, bool) any { return nil } +func (Nothing) ScanRow(any, int) error { + return nil +} +func (Nothing) Append(any) ([]uint8, error) { return nil, &Error{ ColumnType: "Nothing", Err: errors.New("data type values can't be stored in tables"), } } -func (Nothing) AppendRow(interface{}) error { +func (col Nothing) AppendRow(any) error { return &Error{ ColumnType: "Nothing", Err: errors.New("data type values can't be stored in tables"), } } -func (Nothing) Decode(decoder *binary.Decoder, rows int) error { - scratch := make([]byte, rows) - if err := decoder.Raw(scratch); err != nil { - return err - } - return nil + +func (col Nothing) Decode(reader *proto.Reader, rows int) error { + return col.col.DecodeColumn(reader, rows) } -func (Nothing) Encode(*binary.Encoder) error { - return &Error{ - ColumnType: "Nothing", - Err: errors.New("data type values can't be stored in tables"), - } + +func (Nothing) Encode(buffer *proto.Buffer) { } var _ Interface = (*Nothing)(nil) diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/nullable.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/nullable.go index 58aeae1..d168e29 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/nullable.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/nullable.go @@ -18,26 +18,33 @@ package column import ( + "database/sql" + "database/sql/driver" + "github.com/ClickHouse/ch-go/proto" "reflect" - - "github.com/ClickHouse/clickhouse-go/v2/lib/binary" + "time" ) type Nullable struct { base Interface - nulls UInt8 + nulls proto.ColUInt8 enable bool scanType reflect.Type name string } +func (col *Nullable) Reset() { + col.base.Reset() + col.nulls.Reset() +} + func (col *Nullable) Name() string { return col.name } -func (col *Nullable) parse(t Type) (_ *Nullable, err error) { +func (col *Nullable) parse(t Type, tz *time.Location) (_ *Nullable, err error) { col.enable = true - if col.base, err = Type(t.params()).Column(col.name); err != nil { + if col.base, err = Type(t.params()).Column(col.name, tz); err != nil { return nil, err } switch base := col.base.ScanType(); { @@ -67,67 +74,113 @@ func (col *Nullable) Rows() int { if !col.enable { return col.base.Rows() } - return len(col.nulls.data) + return col.nulls.Rows() } -func (col *Nullable) Row(i int, ptr bool) interface{} { +func (col *Nullable) Row(i int, ptr bool) any { if col.enable { - if col.nulls.data[i] == 1 { + if col.nulls.Row(i) == 1 { return nil } } return col.base.Row(i, true) } -func (col *Nullable) ScanRow(dest interface{}, row int) error { +func (col *Nullable) ScanRow(dest any, row int) error { if col.enable { - if col.nulls.data[row] == 1 { + switch col.nulls.Row(row) { + case 1: + switch v := dest.(type) { + case **uint64: + *v = nil + case **int64: + *v = nil + case **uint32: + *v = nil + case **int32: + *v = nil + case **uint16: + *v = nil + case **int16: + *v = nil + case **uint8: + *v = nil + case **int8: + *v = nil + case **string: + *v = nil + case **float32: + *v = nil + case **float64: + *v = nil + case **time.Time: + *v = nil + } + if scan, ok := dest.(sql.Scanner); ok { + return scan.Scan(nil) + } return nil } } return col.base.ScanRow(dest, row) } -func (col *Nullable) Append(v interface{}) ([]uint8, error) { +func (col *Nullable) Append(v any) ([]uint8, error) { nulls, err := col.base.Append(v) if err != nil { return nil, err } - col.nulls.data = append(col.nulls.data, nulls...) + for i := range nulls { + col.nulls.Append(nulls[i]) + } return nulls, nil } -func (col *Nullable) AppendRow(v interface{}) error { - if v == nil || (reflect.ValueOf(v).Kind() == reflect.Ptr && reflect.ValueOf(v).IsNil()) { - col.nulls.data = append(col.nulls.data, 1) +func (col *Nullable) AppendRow(v any) error { + // Might receive double pointers like **String, because of how Nullable columns are read + // Unpack because we can't write double pointers + rv := reflect.ValueOf(v) + if v != nil && rv.Kind() == reflect.Pointer && !rv.IsNil() && rv.Elem().Kind() == reflect.Pointer { + v = rv.Elem().Interface() + rv = reflect.ValueOf(v) + } + + if v == nil || (rv.Kind() == reflect.Pointer && rv.IsNil()) { + col.nulls.Append(1) + // used to detect sql.Null* types + } else if val, ok := v.(driver.Valuer); ok { + val, err := val.Value() + if err != nil { + return err + } + if val == nil { + col.nulls.Append(1) + } else { + col.nulls.Append(0) + } } else { - col.nulls.data = append(col.nulls.data, 0) + col.nulls.Append(0) } return col.base.AppendRow(v) } -func (col *Nullable) Decode(decoder *binary.Decoder, rows int) (err error) { +func (col *Nullable) Decode(reader *proto.Reader, rows int) error { if col.enable { - if err := col.nulls.Decode(decoder, rows); err != nil { + if err := col.nulls.DecodeColumn(reader, rows); err != nil { return err } } - if err := col.base.Decode(decoder, rows); err != nil { + if err := col.base.Decode(reader, rows); err != nil { return err } return nil } -func (col *Nullable) Encode(encoder *binary.Encoder) error { +func (col *Nullable) Encode(buffer *proto.Buffer) { if col.enable { - if err := col.nulls.Encode(encoder); err != nil { - return err - } + col.nulls.EncodeColumn(buffer) } - if err := col.base.Encode(encoder); err != nil { - return err - } - return nil + col.base.Encode(buffer) } var _ Interface = (*Nullable)(nil) diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/simple_aggregate_function.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/simple_aggregate_function.go index 87693c6..50a3055 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/simple_aggregate_function.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/simple_aggregate_function.go @@ -18,10 +18,10 @@ package column import ( + "github.com/ClickHouse/ch-go/proto" "reflect" "strings" - - "github.com/ClickHouse/clickhouse-go/v2/lib/binary" + "time" ) type SimpleAggregateFunction struct { @@ -30,14 +30,18 @@ type SimpleAggregateFunction struct { name string } +func (col *SimpleAggregateFunction) Reset() { + col.base.Reset() +} + func (col *SimpleAggregateFunction) Name() string { return col.name } -func (col *SimpleAggregateFunction) parse(t Type) (_ Interface, err error) { +func (col *SimpleAggregateFunction) parse(t Type, tz *time.Location) (_ Interface, err error) { col.chType = t base := strings.TrimSpace(strings.SplitN(t.params(), ",", 2)[1]) - if col.base, err = Type(base).Column(col.name); err == nil { + if col.base, err = Type(base).Column(col.name, tz); err == nil { return col, nil } return nil, &UnsupportedColumnTypeError{ @@ -54,23 +58,23 @@ func (col *SimpleAggregateFunction) ScanType() reflect.Type { func (col *SimpleAggregateFunction) Rows() int { return col.base.Rows() } -func (col *SimpleAggregateFunction) Row(i int, ptr bool) interface{} { +func (col *SimpleAggregateFunction) Row(i int, ptr bool) any { return col.base.Row(i, ptr) } -func (col *SimpleAggregateFunction) ScanRow(dest interface{}, rows int) error { +func (col *SimpleAggregateFunction) ScanRow(dest any, rows int) error { return col.base.ScanRow(dest, rows) } -func (col *SimpleAggregateFunction) Append(v interface{}) ([]uint8, error) { +func (col *SimpleAggregateFunction) Append(v any) ([]uint8, error) { return col.base.Append(v) } -func (col *SimpleAggregateFunction) AppendRow(v interface{}) error { +func (col *SimpleAggregateFunction) AppendRow(v any) error { return col.base.AppendRow(v) } -func (col *SimpleAggregateFunction) Decode(decoder *binary.Decoder, rows int) error { - return col.base.Decode(decoder, rows) +func (col *SimpleAggregateFunction) Decode(reader *proto.Reader, rows int) error { + return col.base.Decode(reader, rows) } -func (col *SimpleAggregateFunction) Encode(encoder *binary.Encoder) error { - return col.base.Encode(encoder) +func (col *SimpleAggregateFunction) Encode(buffer *proto.Buffer) { + col.base.Encode(buffer) } var _ Interface = (*SimpleAggregateFunction)(nil) diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/string.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/string.go index 9b4261b..5ce480b 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/string.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/string.go @@ -18,15 +18,23 @@ package column import ( + "database/sql" + "database/sql/driver" "encoding" "fmt" - "github.com/ClickHouse/clickhouse-go/v2/lib/binary" + "github.com/ClickHouse/ch-go/proto" "reflect" + + "github.com/ClickHouse/clickhouse-go/v2/lib/binary" ) type String struct { name string - data []string + col proto.ColStr +} + +func (col *String) Reset() { + col.col.Reset() } func (col String) Name() string { @@ -42,28 +50,33 @@ func (String) ScanType() reflect.Type { } func (col *String) Rows() int { - return len(col.data) + return col.col.Rows() } -func (col *String) Row(i int, ptr bool) interface{} { - value := *col +func (col *String) Row(i int, ptr bool) any { + val := col.col.Row(i) if ptr { - return &value.data[i] + return &val } - return value.data[i] + return val } -func (col *String) ScanRow(dest interface{}, row int) error { - v := *col +func (col *String) ScanRow(dest any, row int) error { + val := col.Row(row, false).(string) switch d := dest.(type) { case *string: - *d = v.data[row] + *d = val case **string: *d = new(string) - **d = v.data[row] + **d = val + case *sql.NullString: + return d.Scan(val) case encoding.BinaryUnmarshaler: - return d.UnmarshalBinary(binary.Str2Bytes(v.data[row])) + return d.UnmarshalBinary(binary.Str2Bytes(val, len(val))) default: + if scan, ok := dest.(sql.Scanner); ok { + return scan.Scan(val) + } return &ColumnConverterError{ Op: "ScanRow", To: fmt.Sprintf("%T", dest), @@ -73,44 +86,53 @@ func (col *String) ScanRow(dest interface{}, row int) error { return nil } -func (col *String) Append(v interface{}) (nulls []uint8, err error) { - switch v := v.(type) { - case []string: - col.data, nulls = append(col.data, v...), make([]uint8, len(v)) - case []*string: - nulls = make([]uint8, len(v)) - for i, v := range v { - switch { - case v != nil: - col.data = append(col.data, *v) - default: - col.data, nulls[i] = append(col.data, ""), 1 - } - } - default: - return nil, &ColumnConverterError{ - Op: "Append", - To: "String", - From: fmt.Sprintf("%T", v), - } - } - return -} - -func (col *String) AppendRow(v interface{}) error { +func (col *String) AppendRow(v any) error { switch v := v.(type) { case string: - col.data = append(col.data, v) + col.col.Append(v) case *string: switch { case v != nil: - col.data = append(col.data, *v) + col.col.Append(*v) default: - col.data = append(col.data, "") + col.col.Append("") } + case sql.NullString: + switch v.Valid { + case true: + col.col.Append(v.String) + default: + col.col.Append("") + } + case *sql.NullString: + switch v.Valid { + case true: + col.col.Append(v.String) + default: + col.col.Append("") + } + case []byte: + col.col.AppendBytes(v) case nil: - col.data = append(col.data, "") + col.col.Append("") default: + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return &ColumnConverterError{ + Op: "AppendRow", + To: "String", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.AppendRow(val) + } + + if s, ok := v.(fmt.Stringer); ok { + return col.AppendRow(s.String()) + } + return &ColumnConverterError{ Op: "AppendRow", To: "String", @@ -120,24 +142,69 @@ func (col *String) AppendRow(v interface{}) error { return nil } -func (col *String) Decode(decoder *binary.Decoder, rows int) error { - for i := 0; i < rows; i++ { - v, err := decoder.String() - if err != nil { - return err +func (col *String) Append(v any) (nulls []uint8, err error) { + switch v := v.(type) { + case []string: + col.col.AppendArr(v) + nulls = make([]uint8, len(v)) + case []*string: + nulls = make([]uint8, len(v)) + for i := range v { + switch { + case v[i] != nil: + col.col.Append(*v[i]) + default: + col.col.Append("") + nulls[i] = 1 + } } - col.data = append(col.data, v) - } - return nil -} + case []sql.NullString: + nulls = make([]uint8, len(v)) + for i := range v { + col.AppendRow(v[i]) + } + case []*sql.NullString: + nulls = make([]uint8, len(v)) + for i := range v { + if v[i] == nil { + nulls[i] = 1 + } + col.AppendRow(v[i]) + } + case [][]byte: + nulls = make([]uint8, len(v)) + for i := range v { + col.col.Append(string(v[i])) + } + default: -func (col *String) Encode(encoder *binary.Encoder) error { - for _, v := range col.data { - if err := encoder.String(v); err != nil { - return err + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return nil, &ColumnConverterError{ + Op: "Append", + To: "String", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.Append(val) + } + return nil, &ColumnConverterError{ + Op: "Append", + To: "String", + From: fmt.Sprintf("%T", v), } } - return nil + return +} + +func (col *String) Decode(reader *proto.Reader, rows int) error { + return col.col.DecodeColumn(reader, rows) +} + +func (col *String) Encode(buffer *proto.Buffer) { + col.col.EncodeColumn(buffer) } var _ Interface = (*String)(nil) diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/tuple.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/tuple.go index fc2120c..95c4e24 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/tuple.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/tuple.go @@ -18,15 +18,17 @@ package column import ( + "database/sql" + "database/sql/driver" "fmt" - "github.com/google/uuid" - "github.com/shopspring/decimal" "net" "reflect" "strings" "time" - "github.com/ClickHouse/clickhouse-go/v2/lib/binary" + "github.com/ClickHouse/ch-go/proto" + "github.com/google/uuid" + "github.com/shopspring/decimal" ) type Tuple struct { @@ -34,7 +36,13 @@ type Tuple struct { columns []Interface name string isNamed bool // true if all columns are named - index map[string]int // map from col name to off set in columns + index map[string]int // map from col name to offset in columns +} + +func (col *Tuple) Reset() { + for i := range col.columns { + col.columns[i].Reset() + } } func (col *Tuple) Name() string { @@ -46,7 +54,7 @@ type namedCol struct { colType Type } -func (col *Tuple) parse(t Type) (_ Interface, err error) { +func (col *Tuple) parse(t Type, tz *time.Location) (_ Interface, err error) { col.chType = t var ( element []rune @@ -91,7 +99,7 @@ func (col *Tuple) parse(t Type) (_ Interface, err error) { if ct.name == "" { isNamed = false } - column, err := ct.colType.Column(ct.name) + column, err := ct.colType.Column(ct.name, tz) if err != nil { return nil, err } @@ -125,14 +133,17 @@ func (col *Tuple) Rows() int { return 0 } -func (col *Tuple) Row(i int, ptr bool) interface{} { +func (col *Tuple) Row(i int, ptr bool) any { tuple := reflect.New(col.ScanType()) value := tuple.Interface() if err := col.ScanRow(value, i); err != nil { // if this happens we have an unexplained problem return nil } - return value + if ptr { + return value + } + return tuple.Elem().Interface() } func setJSONFieldValue(field reflect.Value, value reflect.Value) error { @@ -191,14 +202,25 @@ func setJSONFieldValue(field reflect.Value, value reflect.Value) error { // check if our target is a string if field.Kind() == reflect.String { - field.Set(reflect.ValueOf(fmt.Sprint(value.Interface()))) - return nil + if v := reflect.ValueOf(fmt.Sprint(value.Interface())); v.Type().AssignableTo(field.Type()) { + field.Set(v) + return nil + } } if value.CanConvert(field.Type()) { field.Set(value.Convert(field.Type())) return nil } + // check if our target implements sql.Scanner + sqlScanner := reflect.TypeOf((*sql.Scanner)(nil)).Elem() + if fieldAddr := field.Addr(); field.Kind() != reflect.Ptr && fieldAddr.Type().Implements(sqlScanner) { + returns := fieldAddr.MethodByName("Scan").Call([]reflect.Value{value}) + if len(returns) > 0 && returns[0].IsNil() { + return nil + } + } + return &ColumnConverterError{ Op: "ScanRow", To: fmt.Sprintf("%T", field.Interface()), @@ -210,7 +232,10 @@ func setJSONFieldValue(field reflect.Value, value reflect.Value) error { func getStructFieldValue(field reflect.Value, name string) (reflect.Value, bool) { tField := field.Type() for i := 0; i < tField.NumField(); i++ { - if jsonTag := tField.Field(i).Tag.Get("json"); jsonTag == name { + if tag := tField.Field(i).Tag.Get("json"); tag == name { + return field.Field(i), true + } + if tag := tField.Field(i).Tag.Get("ch"); tag == name { return field.Field(i), true } } @@ -218,6 +243,14 @@ func getStructFieldValue(field reflect.Value, name string) (reflect.Value, bool) return sField, sField.IsValid() } +func unescapeColName(colName string) string { + s := []rune(colName) + if s[0:1][0] == '`' && s[len(s)-1:][0] == '`' { + return colUnEscape.Replace(string(s[1 : len(s)-1])) + } + return colUnEscape.Replace(colName) +} + func (col *Tuple) scanMap(targetMap reflect.Value, row int) error { if targetMap.Type().Key().Kind() != reflect.String { return &Error{ @@ -226,6 +259,7 @@ func (col *Tuple) scanMap(targetMap reflect.Value, row int) error { } } for _, c := range col.columns { + colName := unescapeColName(c.Name()) switch dCol := c.(type) { case *Tuple: switch targetMap.Type().Elem().Kind() { @@ -234,25 +268,25 @@ func (col *Tuple) scanMap(targetMap reflect.Value, row int) error { if err := dCol.scanStruct(rStruct, row); err != nil { return err } - targetMap.SetMapIndex(reflect.ValueOf(c.Name()), rStruct) + targetMap.SetMapIndex(reflect.ValueOf(colName), rStruct) case reflect.Map: // get a typed map newMap := reflect.MakeMap(targetMap.Type().Elem()) if err := dCol.scanMap(newMap, row); err != nil { return err } - targetMap.SetMapIndex(reflect.ValueOf(c.Name()), newMap) + targetMap.SetMapIndex(reflect.ValueOf(colName), newMap) case reflect.Interface: - // catches interface{} - Note this swallows custom interfaces to which maps couldn't conform - newMap := reflect.ValueOf(make(map[string]interface{})) + // catches any - Note this swallows custom interfaces to which maps couldn't conform + newMap := reflect.ValueOf(make(map[string]any)) if err := dCol.scanMap(newMap, row); err != nil { return err } - targetMap.SetMapIndex(reflect.ValueOf(c.Name()), newMap) + targetMap.SetMapIndex(reflect.ValueOf(colName), newMap) default: return &Error{ ColumnType: fmt.Sprint(targetMap.Type().Elem().Kind()), - Err: fmt.Errorf("column %s - needs a map/struct or interface{}", col.Name()), + Err: fmt.Errorf("column %s - needs a map/struct or any", col.Name()), } } case *Nested: @@ -261,21 +295,31 @@ func (col *Tuple) scanMap(targetMap reflect.Value, row int) error { if err != nil { return err } - // this wont work if targetMap is a map[string][]interface{} and we try to set a typed slice - targetMap.SetMapIndex(reflect.ValueOf(c.Name()), subSlice) + // this wont work if targetMap is a map[string][]any and we try to set a typed slice + targetMap.SetMapIndex(reflect.ValueOf(colName), subSlice) case *Array: subSlice, err := dCol.scan(targetMap.Type().Elem(), row) if err != nil { return err } - targetMap.SetMapIndex(reflect.ValueOf(c.Name()), subSlice) + targetMap.SetMapIndex(reflect.ValueOf(colName), subSlice) default: - field := reflect.New(reflect.TypeOf(c.Row(0, false))).Elem() - value := reflect.ValueOf(c.Row(row, false)) - if err := setJSONFieldValue(field, value); err != nil { - return err + val := c.Row(row, false) + if val != nil { + field := reflect.New(reflect.TypeOf(val)).Elem() + value := reflect.ValueOf(val) + if err := setJSONFieldValue(field, value); err != nil { + return err + } + targetMap.SetMapIndex(reflect.ValueOf(colName), field) + } else { + if _, isNullable := c.(*Nullable); !isNullable { + targetMap.SetMapIndex(reflect.ValueOf(colName), reflect.Zero(c.ScanType().Elem())) + } else { + targetMap.SetMapIndex(reflect.ValueOf(colName), reflect.Zero(c.ScanType())) + } } - targetMap.SetMapIndex(reflect.ValueOf(c.Name()), field) + } } return nil @@ -303,8 +347,8 @@ func (col *Tuple) scanStruct(targetStruct reflect.Value, row int) error { } sField.Set(newMap) case reflect.Interface: - // catches []interface{} -Note this swallows custom interfaces to which maps couldn't conform - newMap := reflect.ValueOf(make(map[string]interface{})) + // catches []any -Note this swallows custom interfaces to which maps couldn't conform + newMap := reflect.ValueOf(make(map[string]any)) if err := dCol.scanMap(newMap, row); err != nil { return err } @@ -312,7 +356,7 @@ func (col *Tuple) scanStruct(targetStruct reflect.Value, row int) error { default: return &Error{ ColumnType: fmt.Sprint(sField.Kind()), - Err: fmt.Errorf("column %s - needs a map/struct/slice or interface{}", col.Name()), + Err: fmt.Errorf("column %s - needs a map/struct/slice or any", col.Name()), } } case *Nested: @@ -363,9 +407,12 @@ func (col *Tuple) scanSlice(targetType reflect.Type, row int) (reflect.Value, er rSlice = reflect.Append(rSlice, subSlice) default: field := reflect.New(c.ScanType()).Elem() - value := reflect.ValueOf(c.Row(row, false)) - if err := setJSONFieldValue(field, value); err != nil { - return reflect.Value{}, err + val := c.Row(row, false) + if val != nil { + value := reflect.ValueOf(val) + if err := setJSONFieldValue(field, value); err != nil { + return reflect.Value{}, err + } } rSlice = reflect.Append(rSlice, field) } @@ -383,6 +430,14 @@ func (col *Tuple) scan(targetType reflect.Type, row int) (reflect.Value, error) } return rStruct, nil case reflect.Map: + if !col.isNamed { + return reflect.Value{}, &ColumnConverterError{ + Op: "ScanRow", + To: targetType.String(), + From: string(col.chType), + Hint: "cannot use maps for unnamed tuples, use slice", + } + } rMap := reflect.MakeMap(targetType) if err := col.scanMap(rMap, row); err != nil { return reflect.Value{}, nil @@ -392,12 +447,20 @@ func (col *Tuple) scan(targetType reflect.Type, row int) (reflect.Value, error) //tuples can be scanned into slices - specifically default for unnamed tuples rSlice, err := col.scanSlice(targetType, row) if err != nil { - return reflect.Value{}, nil + return reflect.Value{}, err } return rSlice, nil case reflect.Interface: - // catches interface{} -Note this swallows custom interfaces to which maps couldn't conform - rMap := reflect.ValueOf(make(map[string]interface{})) + // catches any -Note this swallows custom interfaces to which maps couldn't conform + if !col.isNamed { + return reflect.Value{}, &ColumnConverterError{ + Op: "ScanRow", + To: fmt.Sprintf("%s", targetType), + From: string(col.chType), + Hint: "cannot use interface for unnamed tuples, use slice", + } + } + rMap := reflect.ValueOf(make(map[string]any)) if err := col.scanMap(rMap, row); err != nil { return reflect.Value{}, err } @@ -405,11 +468,11 @@ func (col *Tuple) scan(targetType reflect.Type, row int) (reflect.Value, error) } return reflect.Value{}, &Error{ ColumnType: fmt.Sprint(targetType.Kind()), - Err: fmt.Errorf("column %s - needs a map/struct/slice or interface{}", col.Name()), + Err: fmt.Errorf("column %s - needs a map/struct/slice or any", col.Name()), } } -func (col *Tuple) ScanRow(dest interface{}, row int) error { +func (col *Tuple) ScanRow(dest any, row int) error { value := reflect.Indirect(reflect.ValueOf(dest)) tuple, err := col.scan(value.Type(), row) if err != nil { @@ -419,22 +482,27 @@ func (col *Tuple) ScanRow(dest interface{}, row int) error { return nil } -func (col *Tuple) Append(v interface{}) (nulls []uint8, err error) { - switch v := v.(type) { - case [][]interface{}: - for _, v := range v { - if err := col.AppendRow(v); err != nil { +func (col *Tuple) Append(v any) (nulls []uint8, err error) { + value := reflect.ValueOf(v) + if value.Kind() == reflect.Slice { + for i := 0; i < value.Len(); i++ { + if err := col.AppendRow(value.Index(i).Interface()); err != nil { return nil, err } } return nil, nil - case []*[]interface{}: - for _, v := range v { - if err := col.AppendRow(v); err != nil { - return nil, err + } + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return nil, &ColumnConverterError{ + Op: "Append", + To: string(col.chType), + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", } } - return nil, nil + return col.Append(val) } return nil, &ColumnConverterError{ Op: "Append", @@ -443,50 +511,74 @@ func (col *Tuple) Append(v interface{}) (nulls []uint8, err error) { } } -func (col *Tuple) AppendRow(v interface{}) error { - switch v := v.(type) { - case []interface{}: - if len(v) != len(col.columns) { +func (col *Tuple) AppendRow(v any) error { + // allows support of tuples where map or slice is typed and NOT any. Will fail if tuple isn't consistent + value := reflect.ValueOf(v) + if value.Kind() == reflect.Pointer { + value = value.Elem() + } + switch value.Kind() { + case reflect.Map: + if !col.isNamed { return &Error{ ColumnType: string(col.chType), - Err: fmt.Errorf("invalid size. expected %d got %d", len(col.columns), len(v)), - } - } - for i, v := range v { - if err := col.columns[i].AppendRow(v); err != nil { - return err + Err: fmt.Errorf("converting from %T is not supported for unnamed tuples - use a slice", v), } } - return nil - case *[]interface{}: - if v == nil { - return &ColumnConverterError{ - Op: "AppendRow", - To: string(col.chType), - From: fmt.Sprintf("%T", v), - Hint: "invalid (nil) pointer value", + if value.Type().Key().Kind() != reflect.String { + return &Error{ + ColumnType: fmt.Sprint(value.Type().Key().Kind()), + Err: fmt.Errorf("map keys must be string for column %s", col.Name()), } } - if len(*v) != len(col.columns) { + if value.Len() != len(col.columns) { return &Error{ ColumnType: string(col.chType), - Err: fmt.Errorf("invalid size. expected %d got %d", len(col.columns), len(*v)), + Err: fmt.Errorf("invalid size. expected %d got %d", len(col.columns), value.Len()), } } - for i, v := range *v { - if err := col.columns[i].AppendRow(v); err != nil { + for _, key := range value.MapKeys() { + name := getMapFieldName(key.Interface().(string)) + if _, ok := col.index[name]; !ok { + return &Error{ + ColumnType: string(col.chType), + Err: fmt.Errorf("sub column '%s' does not exist in %s", name, col.Name()), + } + } + if err := col.columns[col.index[name]].AppendRow(value.MapIndex(key).Interface()); err != nil { return err } } return nil - case map[string]interface{}: - for name, v := range v { - if err := col.columns[col.index[name]].AppendRow(v); err != nil { + case reflect.Slice: + if value.Len() != len(col.columns) { + return &Error{ + ColumnType: string(col.chType), + Err: fmt.Errorf("invalid size. expected %d got %d", len(col.columns), value.Len()), + } + } + for i := 0; i < value.Len(); i++ { + elem := value.Index(i) + if err := col.columns[i].AppendRow(elem.Interface()); err != nil { return err } } return nil } + + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return &ColumnConverterError{ + Op: "AppendRow", + To: string(col.chType), + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.AppendRow(val) + } + return &ColumnConverterError{ Op: "AppendRow", To: string(col.chType), @@ -494,28 +586,25 @@ func (col *Tuple) AppendRow(v interface{}) error { } } -func (col *Tuple) Decode(decoder *binary.Decoder, rows int) error { +func (col *Tuple) Decode(reader *proto.Reader, rows int) error { for _, c := range col.columns { - if err := c.Decode(decoder, rows); err != nil { + if err := c.Decode(reader, rows); err != nil { return err } } return nil } -func (col *Tuple) Encode(encoder *binary.Encoder) error { +func (col *Tuple) Encode(buffer *proto.Buffer) { for _, c := range col.columns { - if err := c.Encode(encoder); err != nil { - return err - } + c.Encode(buffer) } - return nil } -func (col *Tuple) ReadStatePrefix(decoder *binary.Decoder) error { +func (col *Tuple) ReadStatePrefix(reader *proto.Reader) error { for _, c := range col.columns { if serialize, ok := c.(CustomSerialization); ok { - if err := serialize.ReadStatePrefix(decoder); err != nil { + if err := serialize.ReadStatePrefix(reader); err != nil { return err } } @@ -523,10 +612,10 @@ func (col *Tuple) ReadStatePrefix(decoder *binary.Decoder) error { return nil } -func (col *Tuple) WriteStatePrefix(encoder *binary.Encoder) error { +func (col *Tuple) WriteStatePrefix(buffer *proto.Buffer) error { for _, c := range col.columns { if serialize, ok := c.(CustomSerialization); ok { - if err := serialize.WriteStatePrefix(encoder); err != nil { + if err := serialize.WriteStatePrefix(buffer); err != nil { return err } } diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/uuid.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/uuid.go index 6e582e3..bf2a1c4 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/uuid.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/uuid.go @@ -18,20 +18,24 @@ package column import ( + "database/sql" + "database/sql/driver" "fmt" + "github.com/ClickHouse/ch-go/proto" "reflect" - "github.com/ClickHouse/clickhouse-go/v2/lib/binary" "github.com/google/uuid" ) -const uuidSize = 16 - type UUID struct { - data []byte + col proto.ColUUID name string } +func (col *UUID) Reset() { + col.col.Reset() +} + func (col *UUID) Name() string { return col.name } @@ -45,10 +49,10 @@ func (col *UUID) ScanType() reflect.Type { } func (col *UUID) Rows() int { - return len(col.data) / uuidSize + return col.col.Rows() } -func (col *UUID) Row(i int, ptr bool) interface{} { +func (col *UUID) Row(i int, ptr bool) any { value := col.row(i) if ptr { return &value @@ -56,7 +60,7 @@ func (col *UUID) Row(i int, ptr bool) interface{} { return value } -func (col *UUID) ScanRow(dest interface{}, row int) error { +func (col *UUID) ScanRow(dest any, row int) error { switch d := dest.(type) { case *string: *d = col.row(row).String() @@ -69,6 +73,9 @@ func (col *UUID) ScanRow(dest interface{}, row int) error { *d = new(uuid.UUID) **d = col.row(row) default: + if scan, ok := dest.(sql.Scanner); ok { + return scan.Scan(col.row(row).String()) + } return &ColumnConverterError{ Op: "ScanRow", To: fmt.Sprintf("%T", dest), @@ -79,54 +86,64 @@ func (col *UUID) ScanRow(dest interface{}, row int) error { return nil } -func (col *UUID) Append(v interface{}) (nulls []uint8, err error) { +func (col *UUID) Append(v any) (nulls []uint8, err error) { switch v := v.(type) { case []string: nulls = make([]uint8, len(v)) - var data []byte for _, v := range v { var u uuid.UUID u, err = uuid.Parse(v) if err != nil { return } - col.data = append(col.data, swap(u[:])...) + col.col.Append(u) } - col.data = append(col.data, data...) case []*string: nulls = make([]uint8, len(v)) - var data []byte for i, v := range v { switch { case v != nil: - var tmp uuid.UUID - tmp, err = uuid.Parse(*v) + var value uuid.UUID + value, err = uuid.Parse(*v) if err != nil { return } - data = append(data, swap(tmp[:])...) + col.col.Append(value) default: - data, nulls[i] = append(data, make([]byte, uuidSize)...), 1 + nulls[i] = 1 + col.col.Append(uuid.UUID{}) } } - col.data = append(col.data, data...) case []uuid.UUID: nulls = make([]uint8, len(v)) for _, v := range v { - col.data = append(col.data, swap(v[:])...) + col.col.Append(v) } case []*uuid.UUID: nulls = make([]uint8, len(v)) for i, v := range v { switch { case v != nil: - tmp := *v - col.data = append(col.data, swap(tmp[:])...) + col.col.Append(*v) default: - col.data, nulls[i] = append(col.data, make([]byte, uuidSize)...), 1 + nulls[i] = 1 + col.col.Append(uuid.UUID{}) } } default: + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return nil, &ColumnConverterError{ + Op: "Append", + To: "UUID", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.Append(val) + } + return nil, &ColumnConverterError{ Op: "Append", To: "UUID", @@ -136,38 +153,52 @@ func (col *UUID) Append(v interface{}) (nulls []uint8, err error) { return } -func (col *UUID) AppendRow(v interface{}) error { +func (col *UUID) AppendRow(v any) error { switch v := v.(type) { case string: u, err := uuid.Parse(v) if err != nil { return err } - col.data = append(col.data, swap(u[:])...) + col.col.Append(u) case *string: switch { case v != nil: - tmp, err := uuid.Parse(*v) + value, err := uuid.Parse(*v) if err != nil { return err } - col.data = append(col.data, swap(tmp[:])...) + col.col.Append(value) default: - col.data = append(col.data, make([]byte, uuidSize)...) + col.col.Append(uuid.UUID{}) } case uuid.UUID: - col.data = append(col.data, swap(v[:])...) + col.col.Append(v) case *uuid.UUID: switch { case v != nil: - tmp := *v - col.data = append(col.data, swap(tmp[:])...) + col.col.Append(*v) default: - col.data = append(col.data, make([]byte, uuidSize)...) + col.col.Append(uuid.UUID{}) } case nil: - col.data = append(col.data, make([]byte, uuidSize)...) + col.col.Append(uuid.UUID{}) default: + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return &ColumnConverterError{ + Op: "AppendRow", + To: "UUID", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.AppendRow(val) + } + if s, ok := v.(fmt.Stringer); ok { + return col.AppendRow(s.String()) + } return &ColumnConverterError{ Op: "AppendRow", To: "UUID", @@ -177,32 +208,16 @@ func (col *UUID) AppendRow(v interface{}) error { return nil } -func (col *UUID) Decode(decoder *binary.Decoder, rows int) error { - col.data = make([]byte, uuidSize*rows) - return decoder.Raw(col.data) +func (col *UUID) Decode(reader *proto.Reader, rows int) error { + return col.col.DecodeColumn(reader, rows) } -func (col *UUID) Encode(encoder *binary.Encoder) error { - return encoder.Raw(col.data) +func (col *UUID) Encode(buffer *proto.Buffer) { + col.col.EncodeColumn(buffer) } func (col *UUID) row(i int) (uuid uuid.UUID) { - copy(uuid[:], col.data[i*uuidSize:(i+1)*uuidSize]) - swap(uuid[:]) - return + return col.col.Row(i) } var _ Interface = (*UUID)(nil) - -func swap(src []byte) []byte { - _ = src[15] - src[0], src[7] = src[7], src[0] - src[1], src[6] = src[6], src[1] - src[2], src[5] = src[5], src[2] - src[3], src[4] = src[4], src[3] - src[8], src[15] = src[15], src[8] - src[9], src[14] = src[14], src[9] - src[10], src[13] = src[13], src[10] - src[11], src[12] = src[12], src[11] - return src -} diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/compress/compress_reader.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/compress/compress_reader.go deleted file mode 100644 index 68be5c5..0000000 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/compress/compress_reader.go +++ /dev/null @@ -1,111 +0,0 @@ -// Licensed to ClickHouse, Inc. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. ClickHouse, Inc. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package compress - -import ( - "fmt" - "io" - - "github.com/pierrec/lz4/v4" -) - -func NewReader(r io.Reader) *Reader { - return &Reader{ - r: r, - pos: maxBlockSize, - data: make([]byte, maxBlockSize), - zdata: make([]byte, lz4.CompressBlockBound(maxBlockSize)+headerSize), - header: make([]byte, headerSize), - } -} - -type Reader struct { - r io.Reader - pos int - data []byte - zdata []byte - header []byte -} - -func (r *Reader) Read(p []byte) (int, error) { - bytesRead, n := 0, len(p) - if r.pos < len(r.data) { - copyedSize := copy(p, r.data[r.pos:]) - { - bytesRead += copyedSize - r.pos += copyedSize - } - } - for bytesRead < n { - if err := r.readBlock(); err != nil { - return bytesRead, err - } - copyedSize := copy(p[bytesRead:], r.data) - { - bytesRead += copyedSize - r.pos = copyedSize - } - } - return n, nil -} - -func (r *Reader) readBlock() (err error) { - r.pos = 0 - var n int - if n, err = io.ReadFull(r.r, r.header); err != nil { - return - } - if n != len(r.header) { - return fmt.Errorf("LZ4 decompression header EOF") - } - var ( - compressedSize = int(endian.Uint32(r.header[17:])) - 9 - decompressedSize = int(endian.Uint32(r.header[21:])) - ) - if compressedSize > cap(r.zdata) { - r.zdata = make([]byte, compressedSize) - } - if decompressedSize > cap(r.data) { - r.data = make([]byte, decompressedSize) - } - - r.data, r.zdata = r.data[:decompressedSize], r.zdata[:compressedSize] - - switch r.header[16] { - case LZ4: - default: - return fmt.Errorf("unknown compression method: 0x%02x ", r.header[16]) - } - // @TODO checksum - if n, err = io.ReadFull(r.r, r.zdata); err != nil { - return - } - if n != len(r.zdata) { - return fmt.Errorf("decompress read size not match") - } - if _, err = lz4.UncompressBlock(r.zdata, r.data); err != nil { - return - } - return nil -} - -func (r *Reader) Close() error { - r.data = nil - r.zdata = nil - return nil -} diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/compress/compress_writer.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/compress/compress_writer.go deleted file mode 100644 index 1fb9f7c..0000000 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/compress/compress_writer.go +++ /dev/null @@ -1,91 +0,0 @@ -// Licensed to ClickHouse, Inc. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. ClickHouse, Inc. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package compress - -import ( - "io" - - "github.com/ClickHouse/clickhouse-go/v2/lib/cityhash102" - "github.com/pierrec/lz4/v4" -) - -func NewWriter(wr io.Writer) *Writer { - return &Writer{ - wr: wr, - data: make([]byte, maxBlockSize), - zdata: make([]byte, lz4.CompressBlockBound(maxBlockSize)+headerSize), - } -} - -type Writer struct { - wr io.Writer - pos int - data []byte - zdata []byte - compressor lz4.Compressor -} - -func (w *Writer) Write(p []byte) (n int, err error) { - for len(p) > 0 { - m := copy(w.data[w.pos:], p) - w.pos += m - p = p[m:] - if w.pos == len(w.data) { - if err = w.Flush(); err != nil { - return n, err - } - } - n += m - } - return n, nil -} - -func (w *Writer) Flush() (err error) { - if w.pos == 0 { - return - } - compressedSize, err := w.compressor.CompressBlock(w.data[:w.pos], w.zdata[headerSize:]) - if err != nil { - return err - } - compressedSize += compressHeaderSize - // fill the header, compressed_size_32 + uncompressed_size_32 - w.zdata[16] = LZ4 - endian.PutUint32(w.zdata[17:], uint32(compressedSize)) - endian.PutUint32(w.zdata[21:], uint32(w.pos)) - // fill the checksum - checkSum := cityhash102.CityHash128(w.zdata[16:], uint32(compressedSize)) - { - endian.PutUint64(w.zdata[0:], checkSum.Lower64()) - endian.PutUint64(w.zdata[8:], checkSum.Higher64()) - } - if _, err := w.wr.Write(w.zdata[:compressedSize+checksumSize]); err != nil { - return err - } - /*if w, ok := cw.writer.(WriteFlusher); ok { - err = w.Flush() - }*/ - w.pos = 0 - return -} - -func (w *Writer) Close() error { - w.data = nil - w.zdata = nil - return nil -} diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/driver/driver.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/driver/driver.go index 40f8839..a3a9c35 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/driver/driver.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/driver/driver.go @@ -22,6 +22,7 @@ import ( "reflect" "time" + "github.com/ClickHouse/clickhouse-go/v2/lib/column" "github.com/ClickHouse/clickhouse-go/v2/lib/proto" ) @@ -30,7 +31,7 @@ type ServerVersion = proto.ServerHandshake type ( NamedValue struct { Name string - Value interface{} + Value any } NamedDateValue struct { @@ -51,40 +52,45 @@ type ( Conn interface { Contributors() []string ServerVersion() (*ServerVersion, error) - Select(ctx context.Context, dest interface{}, query string, args ...interface{}) error - Query(ctx context.Context, query string, args ...interface{}) (Rows, error) - QueryRow(ctx context.Context, query string, args ...interface{}) Row - PrepareBatch(ctx context.Context, query string) (Batch, error) - Exec(ctx context.Context, query string, args ...interface{}) error - AsyncInsert(ctx context.Context, query string, wait bool) error + Select(ctx context.Context, dest any, query string, args ...any) error + Query(ctx context.Context, query string, args ...any) (Rows, error) + QueryRow(ctx context.Context, query string, args ...any) Row + PrepareBatch(ctx context.Context, query string, opts ...PrepareBatchOption) (Batch, error) + Exec(ctx context.Context, query string, args ...any) error + AsyncInsert(ctx context.Context, query string, wait bool, args ...any) error Ping(context.Context) error Stats() Stats Close() error } Row interface { Err() error - Scan(dest ...interface{}) error - ScanStruct(dest interface{}) error + Scan(dest ...any) error + ScanStruct(dest any) error } Rows interface { Next() bool - Scan(dest ...interface{}) error - ScanStruct(dest interface{}) error + Scan(dest ...any) error + ScanStruct(dest any) error ColumnTypes() []ColumnType - Totals(dest ...interface{}) error + Totals(dest ...any) error Columns() []string Close() error Err() error } Batch interface { Abort() error - Append(v ...interface{}) error - AppendStruct(v interface{}) error + Append(v ...any) error + AppendStruct(v any) error Column(int) BatchColumn + Flush() error Send() error + IsSent() bool + Rows() int + Columns() []column.Interface } BatchColumn interface { - Append(interface{}) error + Append(any) error + AppendRow(any) error } ColumnType interface { Name() string diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/driver/options.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/driver/options.go new file mode 100644 index 0000000..c214c2c --- /dev/null +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/driver/options.go @@ -0,0 +1,21 @@ +package driver + +type PrepareBatchOptions struct { + ReleaseConnection bool + CloseOnFlush bool +} + +type PrepareBatchOption func(options *PrepareBatchOptions) + +func WithReleaseConnection() PrepareBatchOption { + return func(options *PrepareBatchOptions) { + options.ReleaseConnection = true + } +} + +// WithCloseOnFlush closes batch INSERT query when Flush is executed +func WithCloseOnFlush() PrepareBatchOption { + return func(options *PrepareBatchOptions) { + options.CloseOnFlush = true + } +} diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/io/stream.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/io/stream.go deleted file mode 100644 index ead3ce2..0000000 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/io/stream.go +++ /dev/null @@ -1,82 +0,0 @@ -// Licensed to ClickHouse, Inc. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. ClickHouse, Inc. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package io - -import ( - "bufio" - "io" - - "github.com/ClickHouse/clickhouse-go/v2/lib/compress" -) - -const ( - maxReaderSize = 1 << 20 - maxWriterSize = 1 << 20 -) - -func NewStream(rw io.ReadWriter) *Stream { - stream := Stream{ - r: bufio.NewReaderSize(rw, maxReaderSize), - w: bufio.NewWriterSize(rw, maxWriterSize), - } - stream.compress.r = compress.NewReader(stream.r) - stream.compress.w = compress.NewWriter(stream.w) - return &stream -} - -type Stream struct { - r *bufio.Reader - w *bufio.Writer - compress struct { - enable bool - r *compress.Reader - w *compress.Writer - } -} - -func (s *Stream) Compress(v bool) { - s.compress.enable = v -} - -func (s *Stream) Read(p []byte) (int, error) { - if s.compress.enable { - return io.ReadFull(s.compress.r, p) - } - return io.ReadFull(s.r, p) -} - -func (s *Stream) Write(p []byte) (int, error) { - if s.compress.enable { - return s.compress.w.Write(p) - } - return s.w.Write(p) -} - -func (s *Stream) Flush() error { - if err := s.compress.w.Flush(); err != nil { - return err - } - return s.w.Flush() -} - -func (s *Stream) Close() error { - s.r, s.w = nil, nil - s.compress.r.Close() - s.compress.w.Close() - return nil -} diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/proto/block.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/proto/block.go index ca25f89..6debe8c 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/proto/block.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/proto/block.go @@ -20,15 +20,18 @@ package proto import ( "errors" "fmt" + "sort" + "time" - "github.com/ClickHouse/clickhouse-go/v2/lib/binary" + "github.com/ClickHouse/ch-go/proto" "github.com/ClickHouse/clickhouse-go/v2/lib/column" ) type Block struct { - names []string - Packet byte - Columns []column.Interface + names []string + Packet byte + Columns []column.Interface + Timezone *time.Location } func (b *Block) Rows() int { @@ -39,7 +42,7 @@ func (b *Block) Rows() int { } func (b *Block) AddColumn(name string, ct column.Type) error { - column, err := ct.Column(name) + column, err := ct.Column(name, b.Timezone) if err != nil { return err } @@ -47,7 +50,7 @@ func (b *Block) AddColumn(name string, ct column.Type) error { return nil } -func (b *Block) Append(v ...interface{}) (err error) { +func (b *Block) Append(v ...any) (err error) { columns := b.Columns if len(columns) != len(v) { return &BlockError{ @@ -71,39 +74,83 @@ func (b *Block) ColumnsNames() []string { return b.names } -func (b *Block) Encode(encoder *binary.Encoder, revision uint64) error { - if revision > 0 { - if err := encodeBlockInfo(encoder); err != nil { - return err +// SortColumns sorts our block according to the requested order - a slice of column names. Names must be identical in requested order and block. +func (b *Block) SortColumns(columns []string) error { + if len(columns) == 0 { + // no preferred sort order + return nil + } + if len(columns) != len(b.Columns) { + return fmt.Errorf("requested column order is incorrect length to sort block - expected %d, got %d", len(b.Columns), len(columns)) + } + missing := difference(b.names, columns) + if len(missing) > 0 { + return fmt.Errorf("block cannot be sorted - missing columns in requested order: %v", missing) + } + lookup := make(map[string]int) + for i, col := range columns { + lookup[col] = i + } + // we assume both lists have the same + sort.Slice(b.Columns, func(i, j int) bool { + iRank, jRank := lookup[b.Columns[i].Name()], lookup[b.Columns[j].Name()] + return iRank < jRank + }) + sort.Slice(b.names, func(i, j int) bool { + iRank, jRank := lookup[b.names[i]], lookup[b.names[j]] + return iRank < jRank + }) + return nil +} + +func difference(a, b []string) []string { + mb := make(map[string]struct{}, len(b)) + for _, x := range b { + mb[x] = struct{}{} + } + var diff []string + for _, x := range a { + if _, found := mb[x]; !found { + diff = append(diff, x) } } + return diff +} + +func (b *Block) EncodeHeader(buffer *proto.Buffer, revision uint64) (err error) { + if revision > 0 { + encodeBlockInfo(buffer) + } var rows int if len(b.Columns) != 0 { rows = b.Columns[0].Rows() for _, c := range b.Columns[1:] { - if rows != c.Rows() { + cRows := c.Rows() + if rows != cRows { return &BlockError{ Op: "Encode", - Err: errors.New("mismatched len of columns"), + Err: fmt.Errorf("mismatched len of columns - expected %d, received %d for col %s", rows, cRows, c.Name()), } } } } - if err := encoder.Uvarint(uint64(len(b.Columns))); err != nil { - return err - } - if err := encoder.Uvarint(uint64(rows)); err != nil { - return err - } - for _, c := range b.Columns { - if err := encoder.String(c.Name()); err != nil { - return err - } - if err := encoder.String(string(c.Type())); err != nil { - return err + buffer.PutUVarInt(uint64(len(b.Columns))) + buffer.PutUVarInt(uint64(rows)) + return nil +} + +func (b *Block) EncodeColumn(buffer *proto.Buffer, revision uint64, i int) (err error) { + if i >= 0 && i < len(b.Columns) { + c := b.Columns[i] + buffer.PutString(c.Name()) + buffer.PutString(string(c.Type())) + + if revision >= DBMS_MIN_REVISION_WITH_CUSTOM_SERIALIZATION { + buffer.PutBool(false) } + if serialize, ok := c.(column.CustomSerialization); ok { - if err := serialize.WriteStatePrefix(encoder); err != nil { + if err := serialize.WriteStatePrefix(buffer); err != nil { return &BlockError{ Op: "Encode", Err: err, @@ -111,20 +158,30 @@ func (b *Block) Encode(encoder *binary.Encoder, revision uint64) error { } } } - if err := c.Encode(encoder); err != nil { - return &BlockError{ - Op: "Encode", - Err: err, - ColumnName: c.Name(), - } + c.Encode(buffer) + return nil + } + return &BlockError{ + Op: "Encode", + Err: fmt.Errorf("%d is out of range of %d columns", i, len(b.Columns)), + } +} + +func (b *Block) Encode(buffer *proto.Buffer, revision uint64) (err error) { + if err := b.EncodeHeader(buffer, revision); err != nil { + return err + } + for i := range b.Columns { + if err := b.EncodeColumn(buffer, revision, i); err != nil { + return err } } return nil } -func (b *Block) Decode(decoder *binary.Decoder, revision uint64) (err error) { +func (b *Block) Decode(reader *proto.Reader, revision uint64) (err error) { if revision > 0 { - if err := decodeBlockInfo(decoder); err != nil { + if err := decodeBlockInfo(reader); err != nil { return err } } @@ -132,37 +189,52 @@ func (b *Block) Decode(decoder *binary.Decoder, revision uint64) (err error) { numRows uint64 numCols uint64 ) - if numCols, err = decoder.Uvarint(); err != nil { + if numCols, err = reader.UVarInt(); err != nil { return err } - if numRows, err = decoder.Uvarint(); err != nil { + if numRows, err = reader.UVarInt(); err != nil { return err } if numRows > 1_000_000_000 { return &BlockError{ Op: "Decode", - Err: errors.New("more then 1 billion rows in block"), + Err: errors.New("more then 1 billion rows in block - suspiciously big - preventing OOM"), } } - b.Columns = make([]column.Interface, 0, numCols) + b.Columns = make([]column.Interface, numCols, numCols) + b.names = make([]string, numCols, numCols) for i := 0; i < int(numCols); i++ { var ( columnName string columnType string ) - if columnName, err = decoder.String(); err != nil { + if columnName, err = reader.Str(); err != nil { return err } - if columnType, err = decoder.String(); err != nil { + if columnType, err = reader.Str(); err != nil { return err } - c, err := column.Type(columnType).Column(columnName) + c, err := column.Type(columnType).Column(columnName, b.Timezone) if err != nil { return err } + + if revision >= DBMS_MIN_REVISION_WITH_CUSTOM_SERIALIZATION { + hasCustom, err := reader.Bool() + if err != nil { + return err + } + if hasCustom { + return &BlockError{ + Op: "Decode", + Err: errors.New(fmt.Sprintf("custom serialization for column %s. not supported by clickhouse-go driver", columnName)), + } + } + } + if numRows != 0 { if serialize, ok := c.(column.CustomSerialization); ok { - if err := serialize.ReadStatePrefix(decoder); err != nil { + if err := serialize.ReadStatePrefix(reader); err != nil { return &BlockError{ Op: "Decode", Err: err, @@ -170,7 +242,7 @@ func (b *Block) Decode(decoder *binary.Decoder, revision uint64) (err error) { } } } - if err := c.Decode(decoder, int(numRows)); err != nil { + if err := c.Decode(reader, int(numRows)); err != nil { return &BlockError{ Op: "Decode", Err: err, @@ -178,45 +250,42 @@ func (b *Block) Decode(decoder *binary.Decoder, revision uint64) (err error) { } } } - b.names, b.Columns = append(b.names, columnName), append(b.Columns, c) + b.names[i] = columnName + b.Columns[i] = c } return nil } -func encodeBlockInfo(encoder *binary.Encoder) error { - { - if err := encoder.Uvarint(1); err != nil { - return err - } - if err := encoder.Bool(false); err != nil { - return err - } - if err := encoder.Uvarint(2); err != nil { - return err - } - if err := encoder.Int32(-1); err != nil { - return err - } +func (b *Block) Reset() { + for i := range b.Columns { + b.Columns[i].Reset() } - return encoder.Uvarint(0) } -func decodeBlockInfo(decoder *binary.Decoder) error { +func encodeBlockInfo(buffer *proto.Buffer) { + buffer.PutUVarInt(1) + buffer.PutBool(false) + buffer.PutUVarInt(2) + buffer.PutInt32(-1) + buffer.PutUVarInt(0) +} + +func decodeBlockInfo(reader *proto.Reader) error { { - if _, err := decoder.Uvarint(); err != nil { + if _, err := reader.UVarInt(); err != nil { return err } - if _, err := decoder.Bool(); err != nil { + if _, err := reader.Bool(); err != nil { return err } - if _, err := decoder.Uvarint(); err != nil { + if _, err := reader.UVarInt(); err != nil { return err } - if _, err := decoder.Int32(); err != nil { + if _, err := reader.Int32(); err != nil { return err } } - if _, err := decoder.Uvarint(); err != nil { + if _, err := reader.UVarInt(); err != nil { return err } return nil @@ -232,8 +301,6 @@ func (e *BlockError) Error() string { switch err := e.Err.(type) { case *column.Error: return fmt.Sprintf("clickhouse [%s]: (%s %s) %s", e.Op, e.ColumnName, err.ColumnType, err.Err) - case *column.DateOverflowError: - return fmt.Sprintf("clickhouse: dateTime overflow. %s must be between %s and %s", e.ColumnName, err.Min.Format(err.Format), err.Max.Format(err.Format)) } return fmt.Sprintf("clickhouse [%s]: %s %s", e.Op, e.ColumnName, e.Err) } diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/proto/const.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/proto/const.go index 6eac3cd..cae29e9 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/proto/const.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/proto/const.go @@ -19,20 +19,25 @@ package proto // see https://github.com/ClickHouse/ClickHouse/blob/master/src/Core/Protocol.h const ( - DBMS_MIN_REVISION_WITH_CLIENT_INFO = 54032 - DBMS_MIN_REVISION_WITH_SERVER_TIMEZONE = 54058 - DBMS_MIN_REVISION_WITH_QUOTA_KEY_IN_CLIENT_INFO = 54060 - DBMS_MIN_REVISION_WITH_SERVER_DISPLAY_NAME = 54372 - DBMS_MIN_REVISION_WITH_VERSION_PATCH = 54401 - DBMS_MIN_REVISION_WITH_CLIENT_WRITE_INFO = 54420 - DBMS_MIN_REVISION_WITH_SETTINGS_SERIALIZED_AS_STRINGS = 54429 - DBMS_MIN_REVISION_WITH_INTERSERVER_SECRET = 54441 - DBMS_MIN_REVISION_WITH_OPENTELEMETRY = 54442 - DBMS_MIN_PROTOCOL_VERSION_WITH_DISTRIBUTED_DEPTH = 54448 - DBMS_MIN_PROTOCOL_VERSION_WITH_INITIAL_QUERY_START_TIME = 54449 - DBMS_MIN_PROTOCOL_VERSION_WITH_INCREMENTAL_PROFILE_EVENTS = 54451 - DBMS_MIN_REVISION_WITH_PARALLEL_REPLICAS = 54453 - DBMS_TCP_PROTOCOL_VERSION = DBMS_MIN_REVISION_WITH_PARALLEL_REPLICAS + DBMS_MIN_REVISION_WITH_CLIENT_INFO = 54032 + DBMS_MIN_REVISION_WITH_SERVER_TIMEZONE = 54058 + DBMS_MIN_REVISION_WITH_QUOTA_KEY_IN_CLIENT_INFO = 54060 + DBMS_MIN_REVISION_WITH_SERVER_DISPLAY_NAME = 54372 + DBMS_MIN_REVISION_WITH_VERSION_PATCH = 54401 + DBMS_MIN_REVISION_WITH_CLIENT_WRITE_INFO = 54420 + DBMS_MIN_REVISION_WITH_SETTINGS_SERIALIZED_AS_STRINGS = 54429 + DBMS_MIN_REVISION_WITH_INTERSERVER_SECRET = 54441 + DBMS_MIN_REVISION_WITH_OPENTELEMETRY = 54442 + DBMS_MIN_PROTOCOL_VERSION_WITH_DISTRIBUTED_DEPTH = 54448 + DBMS_MIN_PROTOCOL_VERSION_WITH_INITIAL_QUERY_START_TIME = 54449 + DBMS_MIN_PROTOCOL_VERSION_WITH_INCREMENTAL_PROFILE_EVENTS = 54451 + DBMS_MIN_REVISION_WITH_PARALLEL_REPLICAS = 54453 + DBMS_MIN_REVISION_WITH_CUSTOM_SERIALIZATION = 54454 + DBMS_MIN_PROTOCOL_VERSION_WITH_ADDENDUM = 54458 + DBMS_MIN_PROTOCOL_VERSION_WITH_QUOTA_KEY = 54458 + DBMS_MIN_PROTOCOL_VERSION_WITH_PARAMETERS = 54459 + DBMS_MIN_PROTOCOL_VERSION_WITH_SERVER_QUERY_TIME_IN_PROGRES = 54460 + DBMS_TCP_PROTOCOL_VERSION = DBMS_MIN_PROTOCOL_VERSION_WITH_SERVER_QUERY_TIME_IN_PROGRES ) const ( diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/proto/exception.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/proto/exception.go index c58bef0..2bcc99b 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/proto/exception.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/proto/exception.go @@ -21,7 +21,7 @@ import ( "fmt" "strings" - "github.com/ClickHouse/clickhouse-go/v2/lib/binary" + "github.com/ClickHouse/ch-go/proto" ) type Exception struct { @@ -37,11 +37,11 @@ func (e *Exception) Error() string { return fmt.Sprintf("code: %d, message: %s", e.Code, e.Message) } -func (e *Exception) Decode(decoder *binary.Decoder) (err error) { +func (e *Exception) Decode(reader *proto.Reader) (err error) { var exceptions []Exception for { var ex Exception - if err := ex.decode(decoder); err != nil { + if err := ex.decode(reader); err != nil { return err } if exceptions = append(exceptions, ex); !ex.nested { @@ -60,21 +60,21 @@ func (e *Exception) Decode(decoder *binary.Decoder) (err error) { return nil } -func (e *Exception) decode(decoder *binary.Decoder) (err error) { - if e.Code, err = decoder.Int32(); err != nil { +func (e *Exception) decode(reader *proto.Reader) (err error) { + if e.Code, err = reader.Int32(); err != nil { return err } - if e.Name, err = decoder.String(); err != nil { + if e.Name, err = reader.Str(); err != nil { return err } - if e.Message, err = decoder.String(); err != nil { + if e.Message, err = reader.Str(); err != nil { return err } e.Message = strings.TrimSpace(strings.TrimPrefix(e.Message, e.Name+":")) - if e.StackTrace, err = decoder.String(); err != nil { + if e.StackTrace, err = reader.Str(); err != nil { return err } - if e.nested, err = decoder.Bool(); err != nil { + if e.nested, err = reader.Bool(); err != nil { return err } return nil diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/proto/handshake.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/proto/handshake.go index e82957b..6ee6209 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/proto/handshake.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/proto/handshake.go @@ -19,66 +19,87 @@ package proto import ( "fmt" + chproto "github.com/ClickHouse/ch-go/proto" + "gopkg.in/yaml.v3" + "strconv" + "strings" "time" - "github.com/ClickHouse/clickhouse-go/v2/lib/binary" "github.com/ClickHouse/clickhouse-go/v2/lib/timezone" ) -const ClientName = "Golang SQLDriver" +type ClientHandshake struct { + ProtocolVersion uint64 -const ( - ClientVersionMajor = 1 - ClientVersionMinor = 1 - ClientTCPProtocolVersion = DBMS_TCP_PROTOCOL_VERSION -) - -type ClientHandshake struct{} + ClientName string + ClientVersion Version +} -func (ClientHandshake) Encode(encoder *binary.Encoder) error { - if err := encoder.String(ClientName); err != nil { - return err - } - if err := encoder.Uvarint(ClientVersionMajor); err != nil { - return err - } - if err := encoder.Uvarint(ClientVersionMinor); err != nil { - return err - } - return encoder.Uvarint(ClientTCPProtocolVersion) +func (h ClientHandshake) Encode(buffer *chproto.Buffer) { + buffer.PutString(h.ClientName) + buffer.PutUVarInt(h.ClientVersion.Major) + buffer.PutUVarInt(h.ClientVersion.Minor) + buffer.PutUVarInt(h.ProtocolVersion) } -func (ClientHandshake) String() string { - return fmt.Sprintf("%s %d.%d.%d", ClientName, ClientVersionMajor, ClientVersionMinor, ClientTCPProtocolVersion) +func (h ClientHandshake) String() string { + return fmt.Sprintf("%s %d.%d.%d", h.ClientName, h.ClientVersion.Major, h.ClientVersion.Minor, h.ClientVersion.Patch) } type ServerHandshake struct { Name string DisplayName string Revision uint64 - Version struct { - Major uint64 - Minor uint64 - Patch uint64 + Version Version + Timezone *time.Location +} + +type Version struct { + Major uint64 + Minor uint64 + Patch uint64 +} + +func ParseVersion(v string) (ver Version) { + var err error + parts := strings.Split(v, ".") + if len(parts) < 3 { + return ver + } + if ver.Major, err = strconv.ParseUint(parts[0], 10, 8); err != nil { + return ver + } + if ver.Minor, err = strconv.ParseUint(parts[1], 10, 8); err != nil { + return ver + } + if ver.Patch, err = strconv.ParseUint(parts[2], 10, 8); err != nil { + return ver } - Timezone *time.Location + return ver } -func (srv *ServerHandshake) Decode(decoder *binary.Decoder) (err error) { - if srv.Name, err = decoder.String(); err != nil { +func CheckMinVersion(constraint Version, version Version) bool { + if version.Major < constraint.Major || (version.Major == constraint.Major && version.Minor < constraint.Minor) || (version.Major == constraint.Major && version.Minor == constraint.Minor && version.Patch < constraint.Patch) { + return false + } + return true +} + +func (srv *ServerHandshake) Decode(reader *chproto.Reader) (err error) { + if srv.Name, err = reader.Str(); err != nil { return fmt.Errorf("could not read server name: %v", err) } - if srv.Version.Major, err = decoder.Uvarint(); err != nil { + if srv.Version.Major, err = reader.UVarInt(); err != nil { return fmt.Errorf("could not read server major version: %v", err) } - if srv.Version.Minor, err = decoder.Uvarint(); err != nil { + if srv.Version.Minor, err = reader.UVarInt(); err != nil { return fmt.Errorf("could not read server minor version: %v", err) } - if srv.Revision, err = decoder.Uvarint(); err != nil { + if srv.Revision, err = reader.UVarInt(); err != nil { return fmt.Errorf("could not read server revision: %v", err) } if srv.Revision >= DBMS_MIN_REVISION_WITH_SERVER_TIMEZONE { - name, err := decoder.String() + name, err := reader.Str() if err != nil { return fmt.Errorf("could not read server timezone: %v", err) } @@ -87,12 +108,12 @@ func (srv *ServerHandshake) Decode(decoder *binary.Decoder) (err error) { } } if srv.Revision >= DBMS_MIN_REVISION_WITH_SERVER_DISPLAY_NAME { - if srv.DisplayName, err = decoder.String(); err != nil { + if srv.DisplayName, err = reader.Str(); err != nil { return fmt.Errorf("could not read server display name: %v", err) } } if srv.Revision >= DBMS_MIN_REVISION_WITH_VERSION_PATCH { - if srv.Version.Patch, err = decoder.Uvarint(); err != nil { + if srv.Version.Patch, err = reader.UVarInt(); err != nil { return fmt.Errorf("could not read server patch: %v", err) } } else { @@ -110,3 +131,35 @@ func (srv ServerHandshake) String() string { srv.Timezone, ) } + +func (v Version) String() string { + return fmt.Sprintf("%d.%d.%d", + v.Major, + v.Minor, + v.Patch, + ) +} + +func (v *Version) UnmarshalYAML(value *yaml.Node) (err error) { + versions := strings.Split(value.Value, ".") + if len(versions) < 1 || len(versions) > 3 { + return fmt.Errorf("%s is not a valid version", value.Value) + } + for i := range versions { + switch i { + case 0: + if v.Major, err = strconv.ParseUint(versions[i], 10, 8); err != nil { + return err + } + case 1: + if v.Minor, err = strconv.ParseUint(versions[i], 10, 8); err != nil { + return err + } + case 2: + if v.Patch, err = strconv.ParseUint(versions[i], 10, 8); err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/proto/profile_info.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/proto/profile_info.go index 5617529..3f46d3d 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/proto/profile_info.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/proto/profile_info.go @@ -19,8 +19,7 @@ package proto import ( "fmt" - - "github.com/ClickHouse/clickhouse-go/v2/lib/binary" + chproto "github.com/ClickHouse/ch-go/proto" ) type ProfileInfo struct { @@ -32,23 +31,23 @@ type ProfileInfo struct { CalculatedRowsBeforeLimit bool } -func (p *ProfileInfo) Decode(decoder *binary.Decoder, revision uint64) (err error) { - if p.Rows, err = decoder.Uvarint(); err != nil { +func (p *ProfileInfo) Decode(reader *chproto.Reader, revision uint64) (err error) { + if p.Rows, err = reader.UVarInt(); err != nil { return err } - if p.Blocks, err = decoder.Uvarint(); err != nil { + if p.Blocks, err = reader.UVarInt(); err != nil { return err } - if p.Bytes, err = decoder.Uvarint(); err != nil { + if p.Bytes, err = reader.UVarInt(); err != nil { return err } - if p.AppliedLimit, err = decoder.Bool(); err != nil { + if p.AppliedLimit, err = reader.Bool(); err != nil { return err } - if p.RowsBeforeLimit, err = decoder.Uvarint(); err != nil { + if p.RowsBeforeLimit, err = reader.UVarInt(); err != nil { return err } - if p.CalculatedRowsBeforeLimit, err = decoder.Bool(); err != nil { + if p.CalculatedRowsBeforeLimit, err = reader.Bool(); err != nil { return err } return nil diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/proto/progress.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/proto/progress.go index 7e77c47..4bff94e 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/proto/progress.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/proto/progress.go @@ -19,8 +19,9 @@ package proto import ( "fmt" + "time" - "github.com/ClickHouse/clickhouse-go/v2/lib/binary" + chproto "github.com/ClickHouse/ch-go/proto" ) type Progress struct { @@ -29,40 +30,51 @@ type Progress struct { TotalRows uint64 WroteRows uint64 WroteBytes uint64 + Elapsed time.Duration withClient bool } -func (p *Progress) Decode(decoder *binary.Decoder, revision uint64) (err error) { - if p.Rows, err = decoder.Uvarint(); err != nil { +func (p *Progress) Decode(reader *chproto.Reader, revision uint64) (err error) { + if p.Rows, err = reader.UVarInt(); err != nil { return err } - if p.Bytes, err = decoder.Uvarint(); err != nil { + if p.Bytes, err = reader.UVarInt(); err != nil { return err } - if p.TotalRows, err = decoder.Uvarint(); err != nil { + if p.TotalRows, err = reader.UVarInt(); err != nil { return err } if revision >= DBMS_MIN_REVISION_WITH_CLIENT_WRITE_INFO { p.withClient = true - if p.WroteRows, err = decoder.Uvarint(); err != nil { + if p.WroteRows, err = reader.UVarInt(); err != nil { return err } - if p.WroteBytes, err = decoder.Uvarint(); err != nil { + if p.WroteBytes, err = reader.UVarInt(); err != nil { return err } } + + if revision >= DBMS_MIN_PROTOCOL_VERSION_WITH_SERVER_QUERY_TIME_IN_PROGRES { + var n uint64 + if n, err = reader.UVarInt(); err != nil { + return err + } + p.Elapsed = time.Duration(n) * time.Nanosecond + } + return nil } func (p *Progress) String() string { if !p.withClient { - return fmt.Sprintf("rows=%d, bytes=%d, total rows=%d", p.Rows, p.Bytes, p.TotalRows) + return fmt.Sprintf("rows=%d, bytes=%d, total rows=%d, elapsed=%s", p.Rows, p.Bytes, p.TotalRows, p.Elapsed.String()) } - return fmt.Sprintf("rows=%d, bytes=%d, total rows=%d, wrote rows=%d wrote bytes=%d", + return fmt.Sprintf("rows=%d, bytes=%d, total rows=%d, wrote rows=%d wrote bytes=%d elapsed=%s", p.Rows, p.Bytes, p.TotalRows, p.WroteRows, p.WroteBytes, + p.Elapsed.String(), ) } diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/proto/query.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/proto/query.go index 492f5c6..330bd8a 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/proto/query.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/proto/query.go @@ -20,10 +20,10 @@ package proto import ( stdbin "encoding/binary" "fmt" - "os" - - "github.com/ClickHouse/clickhouse-go/v2/lib/binary" + chproto "github.com/ClickHouse/ch-go/proto" "go.opentelemetry.io/otel/trace" + "os" + "strings" ) var ( @@ -32,38 +32,49 @@ var ( ) type Query struct { - ID string - Span trace.SpanContext - Body string - QuotaKey string - Settings Settings - Compression bool - InitialUser string - InitialAddress string + ID string + ClientName string + ClientVersion Version + ClientTCPProtocolVersion uint64 + Span trace.SpanContext + Body string + QuotaKey string + Settings Settings + Parameters Parameters + Compression bool + InitialUser string + InitialAddress string } -func (q *Query) Encode(encoder *binary.Encoder, revision uint64) error { - if err := encoder.String(q.ID); err != nil { - return err - } +func (q *Query) Encode(buffer *chproto.Buffer, revision uint64) error { + buffer.PutString(q.ID) // client_info - if err := q.encodeClientInfo(encoder, revision); err != nil { + if err := q.encodeClientInfo(buffer, revision); err != nil { return err } // settings - if err := q.Settings.Encode(encoder, revision); err != nil { + if err := q.Settings.Encode(buffer, revision); err != nil { return err } - encoder.String("" /* empty string is a marker of the end of setting */) + buffer.PutString("") /* empty string is a marker of the end of setting */ if revision >= DBMS_MIN_REVISION_WITH_INTERSERVER_SECRET { - encoder.String("") + buffer.PutString("") } { - encoder.Byte(StateComplete) - encoder.Bool(q.Compression) + buffer.PutByte(StateComplete) + buffer.PutBool(q.Compression) } - return encoder.String(q.Body) + buffer.PutString(q.Body) + + if revision >= DBMS_MIN_PROTOCOL_VERSION_WITH_PARAMETERS { + if err := q.Parameters.Encode(buffer, revision); err != nil { + return err + } + buffer.PutString("") /* empty string is a marker of the end of parameters */ + } + + return nil } func swap64(b []byte) { @@ -73,57 +84,57 @@ func swap64(b []byte) { } } -func (q *Query) encodeClientInfo(encoder *binary.Encoder, revision uint64) error { - encoder.Byte(ClientQueryInitial) - encoder.String(q.InitialUser) // initial_user - encoder.String("") // initial_query_id - encoder.String(q.InitialAddress) // initial_address +func (q *Query) encodeClientInfo(buffer *chproto.Buffer, revision uint64) error { + buffer.PutByte(ClientQueryInitial) + buffer.PutString(q.InitialUser) // initial_user + buffer.PutString("") // initial_query_id + buffer.PutString(q.InitialAddress) // initial_address if revision >= DBMS_MIN_PROTOCOL_VERSION_WITH_INITIAL_QUERY_START_TIME { - encoder.Int64(0) // initial_query_start_time_microseconds + buffer.PutInt64(0) // initial_query_start_time_microseconds } - encoder.Byte(1) // interface [tcp - 1, http - 2] + buffer.PutByte(1) // interface [tcp - 1, http - 2] { - encoder.String(osUser) - encoder.String(hostname) - encoder.String(ClientName) - encoder.Uvarint(ClientVersionMajor) - encoder.Uvarint(ClientVersionMinor) - encoder.Uvarint(ClientTCPProtocolVersion) + buffer.PutString(osUser) + buffer.PutString(hostname) + buffer.PutString(q.ClientName) + buffer.PutUVarInt(q.ClientVersion.Major) + buffer.PutUVarInt(q.ClientVersion.Minor) + buffer.PutUVarInt(q.ClientTCPProtocolVersion) } if revision >= DBMS_MIN_REVISION_WITH_QUOTA_KEY_IN_CLIENT_INFO { - encoder.String(q.QuotaKey) + buffer.PutString(q.QuotaKey) } if revision >= DBMS_MIN_PROTOCOL_VERSION_WITH_DISTRIBUTED_DEPTH { - encoder.Uvarint(0) + buffer.PutUVarInt(0) } if revision >= DBMS_MIN_REVISION_WITH_VERSION_PATCH { - encoder.Uvarint(0) + buffer.PutUVarInt(0) } if revision >= DBMS_MIN_REVISION_WITH_OPENTELEMETRY { switch { case q.Span.IsValid(): - encoder.Byte(1) + buffer.PutByte(1) { v := q.Span.TraceID() swap64(v[:]) // https://github.com/ClickHouse/ClickHouse/issues/34369 - encoder.Raw(v[:]) + buffer.PutRaw(v[:]) } { v := q.Span.SpanID() swap64(v[:]) // https://github.com/ClickHouse/ClickHouse/issues/34369 - encoder.Raw(v[:]) + buffer.PutRaw(v[:]) } - encoder.String(q.Span.TraceState().String()) - encoder.Byte(byte(q.Span.TraceFlags())) + buffer.PutString(q.Span.TraceState().String()) + buffer.PutByte(byte(q.Span.TraceFlags())) default: - encoder.Byte(0) + buffer.PutByte(0) } } if revision >= DBMS_MIN_REVISION_WITH_PARALLEL_REPLICAS { - encoder.Uvarint(0) // collaborate_with_initiator - encoder.Uvarint(0) // count_participating_replicas - encoder.Uvarint(0) // number_of_current_replica + buffer.PutUVarInt(0) // collaborate_with_initiator + buffer.PutUVarInt(0) // count_participating_replicas + buffer.PutUVarInt(0) // number_of_current_replica } return nil } @@ -131,23 +142,28 @@ func (q *Query) encodeClientInfo(encoder *binary.Encoder, revision uint64) error type Settings []Setting type Setting struct { - Key string - Value interface{} + Key string + Value any + Important bool + Custom bool } -func (s Settings) Encode(encoder *binary.Encoder, revision uint64) error { +const ( + settingFlagImportant = 0x01 + settingFlagCustom = 0x02 +) + +func (s Settings) Encode(buffer *chproto.Buffer, revision uint64) error { for _, s := range s { - if err := s.encode(encoder, revision); err != nil { + if err := s.encode(buffer, revision); err != nil { return err } } return nil } -func (s *Setting) encode(encoder *binary.Encoder, revision uint64) error { - if err := encoder.String(s.Key); err != nil { - return err - } +func (s *Setting) encode(buffer *chproto.Buffer, revision uint64) error { + buffer.PutString(s.Key) if revision <= DBMS_MIN_REVISION_WITH_SETTINGS_SERIALIZED_AS_STRINGS { var value uint64 switch v := s.Value.(type) { @@ -160,10 +176,73 @@ func (s *Setting) encode(encoder *binary.Encoder, revision uint64) error { default: return fmt.Errorf("query setting %s has unsupported data type", s.Key) } - return encoder.Uvarint(value) + buffer.PutUVarInt(value) + return nil + } + + { + var flags uint64 + if s.Important { + flags |= settingFlagImportant + } + if s.Custom { + flags |= settingFlagCustom + } + buffer.PutUVarInt(flags) } - if err := encoder.Bool(true); err != nil { // is_important + + if s.Custom { + fieldDump, err := encodeFieldDump(s.Value) + if err != nil { + return err + } + + buffer.PutString(fieldDump) + } else { + buffer.PutString(fmt.Sprint(s.Value)) + } + + return nil +} + +type Parameters []Parameter + +type Parameter struct { + Key string + Value string +} + +func (s Parameters) Encode(buffer *chproto.Buffer, revision uint64) error { + for _, s := range s { + if err := s.encode(buffer, revision); err != nil { + return err + } + } + return nil +} + +func (s *Parameter) encode(buffer *chproto.Buffer, revision uint64) error { + buffer.PutString(s.Key) + buffer.PutUVarInt(uint64(settingFlagCustom)) + + fieldDump, err := encodeFieldDump(s.Value) + if err != nil { return err } - return encoder.String(fmt.Sprint(s.Value)) + + buffer.PutString(fieldDump) + + return nil +} + +// encodes a field dump with an appropriate type format +// implements the same logic as in ClickHouse Field::restoreFromDump (https://github.com/ClickHouse/ClickHouse/blob/master/src/Core/Field.cpp#L312) +// currently, only string type is supported +func encodeFieldDump(value any) (string, error) { + switch v := value.(type) { + case string: + return fmt.Sprintf("'%v'", strings.ReplaceAll(v, "'", "\\'")), nil + } + + return "", fmt.Errorf("unsupported field type %T", value) } diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/proto/table_columns.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/proto/table_columns.go index 0f37d51..c39e660 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/proto/table_columns.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/proto/table_columns.go @@ -19,8 +19,7 @@ package proto import ( "fmt" - - "github.com/ClickHouse/clickhouse-go/v2/lib/binary" + chproto "github.com/ClickHouse/ch-go/proto" ) type TableColumns struct { @@ -28,11 +27,11 @@ type TableColumns struct { Second string } -func (t *TableColumns) Decode(decoder *binary.Decoder, revision uint64) (err error) { - if t.First, err = decoder.String(); err != nil { +func (t *TableColumns) Decode(reader *chproto.Reader, revision uint64) (err error) { + if t.First, err = reader.Str(); err != nil { return err } - if t.Second, err = decoder.String(); err != nil { + if t.Second, err = reader.Str(); err != nil { return err } return nil diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/query_parameters.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/query_parameters.go new file mode 100644 index 0000000..9b7e94c --- /dev/null +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/query_parameters.go @@ -0,0 +1,60 @@ +// Licensed to ClickHouse, Inc. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. ClickHouse, Inc. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package clickhouse + +import ( + "github.com/ClickHouse/clickhouse-go/v2/lib/driver" + "github.com/pkg/errors" + "regexp" + "time" +) + +var ( + ErrExpectedStringValueInNamedValueForQueryParameter = errors.New("expected string value in NamedValue for query parameter") + + hasQueryParamsRe = regexp.MustCompile("{.+:.+}") +) + +func bindQueryOrAppendParameters(paramsProtocolSupport bool, options *QueryOptions, query string, timezone *time.Location, args ...any) (string, error) { + // prefer native query parameters over legacy bind if query parameters provided explicit + if len(options.parameters) > 0 { + return query, nil + } + + // validate if query contains a {:} syntax, so it's intentional use of query parameters + // parameter values will be loaded from `args ...any` for compatibility + if paramsProtocolSupport && + len(args) > 0 && + hasQueryParamsRe.MatchString(query) { + options.parameters = make(Parameters, len(args)) + for _, a := range args { + if p, ok := a.(driver.NamedValue); ok { + if str, ok := p.Value.(string); ok { + options.parameters[p.Name] = str + continue + } + } + + return "", ErrExpectedStringValueInNamedValueForQueryParameter + } + + return query, nil + } + + return bind(timezone, query, args...) +} diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/resources/meta.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/resources/meta.go new file mode 100644 index 0000000..1173690 --- /dev/null +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/resources/meta.go @@ -0,0 +1,78 @@ +// Licensed to ClickHouse, Inc. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. ClickHouse, Inc. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package resources + +import ( + _ "embed" + "github.com/ClickHouse/clickhouse-go/v2/lib/proto" + "gopkg.in/yaml.v3" + "strings" +) + +type Meta struct { + ClickhouseVersions []proto.Version `yaml:"clickhouse_versions"` + GoVersions []proto.Version `yaml:"go_versions"` + hVersion proto.Version +} + +//go:embed meta.yml +var metaFile []byte +var ClientMeta Meta + +func init() { + if err := yaml.Unmarshal(metaFile, &ClientMeta); err != nil { + panic(err) + } + ClientMeta.hVersion = ClientMeta.findGreatestVersion() +} + +func (m *Meta) IsSupportedClickHouseVersion(v proto.Version) bool { + for _, version := range m.ClickhouseVersions { + if version.Major == v.Major && version.Minor == v.Minor { + // check our patch is greater + return v.Patch >= version.Patch + } + } + return proto.CheckMinVersion(m.hVersion, v) +} + +func (m *Meta) SupportedVersions() string { + versions := make([]string, len(m.ClickhouseVersions), len(m.ClickhouseVersions)) + for i := range m.ClickhouseVersions { + versions[i] = m.ClickhouseVersions[i].String() + } + return strings.Join(versions, ", ") +} + +func (m *Meta) findGreatestVersion() proto.Version { + var maxVersion proto.Version + for _, version := range m.ClickhouseVersions { + if version.Major > maxVersion.Major { + maxVersion = version + } else if version.Major == maxVersion.Major { + if version.Minor > maxVersion.Minor { + maxVersion = version + } else if version.Minor == maxVersion.Minor { + if version.Patch > maxVersion.Patch { + maxVersion = version + } + } + } + } + return maxVersion +} diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/resources/meta.yml b/vendor/github.com/ClickHouse/clickhouse-go/v2/resources/meta.yml new file mode 100644 index 0000000..327c635 --- /dev/null +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/resources/meta.yml @@ -0,0 +1,9 @@ +clickhouse_versions: + - 22.3 + - 22.8 + - 22.9 + - 22.10 + - 22.11 +go_versions: + - 1.18 + - 1.19 diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/scan.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/scan.go index 81ae3fe..744eaf0 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/scan.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/scan.go @@ -26,7 +26,7 @@ import ( "github.com/ClickHouse/clickhouse-go/v2/lib/proto" ) -func (ch *clickhouse) Select(ctx context.Context, dest interface{}, query string, args ...interface{}) error { +func (ch *clickhouse) Select(ctx context.Context, dest any, query string, args ...any) error { value := reflect.ValueOf(dest) if value.Kind() != reflect.Ptr { return &OpError{ @@ -70,7 +70,7 @@ func (ch *clickhouse) Select(ctx context.Context, dest interface{}, query string return rows.Err() } -func scan(block *proto.Block, row int, dest ...interface{}) error { +func scan(block *proto.Block, row int, dest ...any) error { columns := block.Columns if len(columns) != len(dest) { return &OpError{ diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/struct_map.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/struct_map.go index 06d3947..c9ff567 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/struct_map.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/struct_map.go @@ -27,7 +27,7 @@ type structMap struct { cache sync.Map } -func (m *structMap) Map(op string, columns []string, s interface{}, ptr bool) ([]interface{}, error) { +func (m *structMap) Map(op string, columns []string, s any, ptr bool) ([]any, error) { v := reflect.ValueOf(s) if v.Kind() != reflect.Ptr { return nil, &OpError{ @@ -54,7 +54,7 @@ func (m *structMap) Map(op string, columns []string, s interface{}, ptr bool) ([ var ( index map[string][]int - values = make([]interface{}, 0, len(columns)) + values = make([]any, 0, len(columns)) ) switch idx, found := m.cache.Load(t); { diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/v1_v2_CHANGES.md b/vendor/github.com/ClickHouse/clickhouse-go/v2/v1_v2_CHANGES.md new file mode 100644 index 0000000..0ea34ec --- /dev/null +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/v1_v2_CHANGES.md @@ -0,0 +1,8 @@ +# Breaking Changes v1 to v2 + +Known breaking changes for v1 to v2 are collated below. These are subject to change, if a fix is possible, and reflect the latest release only. + +- v1 allowed precision loss when inserting types. For example, a sql.NullInt32 could be inserted to a UInt8 column and float64 and Decimals were interchangeable. Whilst v2 aims to be flexible, it will not transparently loose precision. Users must accept and explicitly perform this work outside the client. +- strings cannot be inserted in Date or DateTime columns in v2. [#574](https://github.com/ClickHouse/clickhouse-go/issues/574) +- Arrays must be strongly typed in v2 e.g. a `[]any` containing strings cannot be inserted into a string column. This conversion must be down outside the client, since it incurs a cost - the array must be iterated and converted. The client will not conceal this overhead in v2. +- v1 used a connection strategy of random. v2 uses in_order by default. diff --git a/vendor/github.com/andybalholm/brotli/LICENSE b/vendor/github.com/andybalholm/brotli/LICENSE new file mode 100644 index 0000000..33b7cdd --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2009, 2010, 2013-2016 by the Brotli Authors. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/andybalholm/brotli/README.md b/vendor/github.com/andybalholm/brotli/README.md new file mode 100644 index 0000000..0062521 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/README.md @@ -0,0 +1,14 @@ +This package is a brotli compressor and decompressor implemented in Go. +It was translated from the reference implementation (https://github.com/google/brotli) +with the `c2go` tool at https://github.com/andybalholm/c2go. + +I have been working on new compression algorithms (not translated from C) +in the matchfinder package. +You can use them with the NewWriterV2 function. +Currently they give better results than the old implementation +(at least for compressing my test file, Newton’s *Opticks*) +on levels 2 to 6. + +I am using it in production with https://github.com/andybalholm/redwood. + +API documentation is found at https://pkg.go.dev/github.com/andybalholm/brotli?tab=doc. diff --git a/vendor/github.com/andybalholm/brotli/backward_references.go b/vendor/github.com/andybalholm/brotli/backward_references.go new file mode 100644 index 0000000..008c054 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/backward_references.go @@ -0,0 +1,185 @@ +package brotli + +import ( + "sync" +) + +/* Copyright 2013 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Function to find backward reference copies. */ + +func computeDistanceCode(distance uint, max_distance uint, dist_cache []int) uint { + if distance <= max_distance { + var distance_plus_3 uint = distance + 3 + var offset0 uint = distance_plus_3 - uint(dist_cache[0]) + var offset1 uint = distance_plus_3 - uint(dist_cache[1]) + if distance == uint(dist_cache[0]) { + return 0 + } else if distance == uint(dist_cache[1]) { + return 1 + } else if offset0 < 7 { + return (0x9750468 >> (4 * offset0)) & 0xF + } else if offset1 < 7 { + return (0xFDB1ACE >> (4 * offset1)) & 0xF + } else if distance == uint(dist_cache[2]) { + return 2 + } else if distance == uint(dist_cache[3]) { + return 3 + } + } + + return distance + numDistanceShortCodes - 1 +} + +var hasherSearchResultPool sync.Pool + +func createBackwardReferences(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint, params *encoderParams, hasher hasherHandle, dist_cache []int, last_insert_len *uint, commands *[]command, num_literals *uint) { + var max_backward_limit uint = maxBackwardLimit(params.lgwin) + var insert_length uint = *last_insert_len + var pos_end uint = position + num_bytes + var store_end uint + if num_bytes >= hasher.StoreLookahead() { + store_end = position + num_bytes - hasher.StoreLookahead() + 1 + } else { + store_end = position + } + var random_heuristics_window_size uint = literalSpreeLengthForSparseSearch(params) + var apply_random_heuristics uint = position + random_heuristics_window_size + var gap uint = 0 + /* Set maximum distance, see section 9.1. of the spec. */ + + const kMinScore uint = scoreBase + 100 + + /* For speed up heuristics for random data. */ + + /* Minimum score to accept a backward reference. */ + hasher.PrepareDistanceCache(dist_cache) + sr2, _ := hasherSearchResultPool.Get().(*hasherSearchResult) + if sr2 == nil { + sr2 = &hasherSearchResult{} + } + sr, _ := hasherSearchResultPool.Get().(*hasherSearchResult) + if sr == nil { + sr = &hasherSearchResult{} + } + + for position+hasher.HashTypeLength() < pos_end { + var max_length uint = pos_end - position + var max_distance uint = brotli_min_size_t(position, max_backward_limit) + sr.len = 0 + sr.len_code_delta = 0 + sr.distance = 0 + sr.score = kMinScore + hasher.FindLongestMatch(¶ms.dictionary, ringbuffer, ringbuffer_mask, dist_cache, position, max_length, max_distance, gap, params.dist.max_distance, sr) + if sr.score > kMinScore { + /* Found a match. Let's look for something even better ahead. */ + var delayed_backward_references_in_row int = 0 + max_length-- + for ; ; max_length-- { + var cost_diff_lazy uint = 175 + if params.quality < minQualityForExtensiveReferenceSearch { + sr2.len = brotli_min_size_t(sr.len-1, max_length) + } else { + sr2.len = 0 + } + sr2.len_code_delta = 0 + sr2.distance = 0 + sr2.score = kMinScore + max_distance = brotli_min_size_t(position+1, max_backward_limit) + hasher.FindLongestMatch(¶ms.dictionary, ringbuffer, ringbuffer_mask, dist_cache, position+1, max_length, max_distance, gap, params.dist.max_distance, sr2) + if sr2.score >= sr.score+cost_diff_lazy { + /* Ok, let's just write one byte for now and start a match from the + next byte. */ + position++ + + insert_length++ + *sr = *sr2 + delayed_backward_references_in_row++ + if delayed_backward_references_in_row < 4 && position+hasher.HashTypeLength() < pos_end { + continue + } + } + + break + } + + apply_random_heuristics = position + 2*sr.len + random_heuristics_window_size + max_distance = brotli_min_size_t(position, max_backward_limit) + { + /* The first 16 codes are special short-codes, + and the minimum offset is 1. */ + var distance_code uint = computeDistanceCode(sr.distance, max_distance+gap, dist_cache) + if (sr.distance <= (max_distance + gap)) && distance_code > 0 { + dist_cache[3] = dist_cache[2] + dist_cache[2] = dist_cache[1] + dist_cache[1] = dist_cache[0] + dist_cache[0] = int(sr.distance) + hasher.PrepareDistanceCache(dist_cache) + } + + *commands = append(*commands, makeCommand(¶ms.dist, insert_length, sr.len, sr.len_code_delta, distance_code)) + } + + *num_literals += insert_length + insert_length = 0 + /* Put the hash keys into the table, if there are enough bytes left. + Depending on the hasher implementation, it can push all positions + in the given range or only a subset of them. + Avoid hash poisoning with RLE data. */ + { + var range_start uint = position + 2 + var range_end uint = brotli_min_size_t(position+sr.len, store_end) + if sr.distance < sr.len>>2 { + range_start = brotli_min_size_t(range_end, brotli_max_size_t(range_start, position+sr.len-(sr.distance<<2))) + } + + hasher.StoreRange(ringbuffer, ringbuffer_mask, range_start, range_end) + } + + position += sr.len + } else { + insert_length++ + position++ + + /* If we have not seen matches for a long time, we can skip some + match lookups. Unsuccessful match lookups are very very expensive + and this kind of a heuristic speeds up compression quite + a lot. */ + if position > apply_random_heuristics { + /* Going through uncompressible data, jump. */ + if position > apply_random_heuristics+4*random_heuristics_window_size { + var kMargin uint = brotli_max_size_t(hasher.StoreLookahead()-1, 4) + /* It is quite a long time since we saw a copy, so we assume + that this data is not compressible, and store hashes less + often. Hashes of non compressible data are less likely to + turn out to be useful in the future, too, so we store less of + them to not to flood out the hash table of good compressible + data. */ + + var pos_jump uint = brotli_min_size_t(position+16, pos_end-kMargin) + for ; position < pos_jump; position += 4 { + hasher.Store(ringbuffer, ringbuffer_mask, position) + insert_length += 4 + } + } else { + var kMargin uint = brotli_max_size_t(hasher.StoreLookahead()-1, 2) + var pos_jump uint = brotli_min_size_t(position+8, pos_end-kMargin) + for ; position < pos_jump; position += 2 { + hasher.Store(ringbuffer, ringbuffer_mask, position) + insert_length += 2 + } + } + } + } + } + + insert_length += pos_end - position + *last_insert_len = insert_length + + hasherSearchResultPool.Put(sr) + hasherSearchResultPool.Put(sr2) +} diff --git a/vendor/github.com/andybalholm/brotli/backward_references_hq.go b/vendor/github.com/andybalholm/brotli/backward_references_hq.go new file mode 100644 index 0000000..21629c1 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/backward_references_hq.go @@ -0,0 +1,796 @@ +package brotli + +import "math" + +type zopfliNode struct { + length uint32 + distance uint32 + dcode_insert_length uint32 + u struct { + cost float32 + next uint32 + shortcut uint32 + } +} + +const maxEffectiveDistanceAlphabetSize = 544 + +const kInfinity float32 = 1.7e38 /* ~= 2 ^ 127 */ + +var kDistanceCacheIndex = []uint32{0, 1, 2, 3, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1} + +var kDistanceCacheOffset = []int{0, 0, 0, 0, -1, 1, -2, 2, -3, 3, -1, 1, -2, 2, -3, 3} + +func initZopfliNodes(array []zopfliNode, length uint) { + var stub zopfliNode + var i uint + stub.length = 1 + stub.distance = 0 + stub.dcode_insert_length = 0 + stub.u.cost = kInfinity + for i = 0; i < length; i++ { + array[i] = stub + } +} + +func zopfliNodeCopyLength(self *zopfliNode) uint32 { + return self.length & 0x1FFFFFF +} + +func zopfliNodeLengthCode(self *zopfliNode) uint32 { + var modifier uint32 = self.length >> 25 + return zopfliNodeCopyLength(self) + 9 - modifier +} + +func zopfliNodeCopyDistance(self *zopfliNode) uint32 { + return self.distance +} + +func zopfliNodeDistanceCode(self *zopfliNode) uint32 { + var short_code uint32 = self.dcode_insert_length >> 27 + if short_code == 0 { + return zopfliNodeCopyDistance(self) + numDistanceShortCodes - 1 + } else { + return short_code - 1 + } +} + +func zopfliNodeCommandLength(self *zopfliNode) uint32 { + return zopfliNodeCopyLength(self) + (self.dcode_insert_length & 0x7FFFFFF) +} + +/* Histogram based cost model for zopflification. */ +type zopfliCostModel struct { + cost_cmd_ [numCommandSymbols]float32 + cost_dist_ []float32 + distance_histogram_size uint32 + literal_costs_ []float32 + min_cost_cmd_ float32 + num_bytes_ uint +} + +func initZopfliCostModel(self *zopfliCostModel, dist *distanceParams, num_bytes uint) { + var distance_histogram_size uint32 = dist.alphabet_size + if distance_histogram_size > maxEffectiveDistanceAlphabetSize { + distance_histogram_size = maxEffectiveDistanceAlphabetSize + } + + self.num_bytes_ = num_bytes + self.literal_costs_ = make([]float32, (num_bytes + 2)) + self.cost_dist_ = make([]float32, (dist.alphabet_size)) + self.distance_histogram_size = distance_histogram_size +} + +func cleanupZopfliCostModel(self *zopfliCostModel) { + self.literal_costs_ = nil + self.cost_dist_ = nil +} + +func setCost(histogram []uint32, histogram_size uint, literal_histogram bool, cost []float32) { + var sum uint = 0 + var missing_symbol_sum uint + var log2sum float32 + var missing_symbol_cost float32 + var i uint + for i = 0; i < histogram_size; i++ { + sum += uint(histogram[i]) + } + + log2sum = float32(fastLog2(sum)) + missing_symbol_sum = sum + if !literal_histogram { + for i = 0; i < histogram_size; i++ { + if histogram[i] == 0 { + missing_symbol_sum++ + } + } + } + + missing_symbol_cost = float32(fastLog2(missing_symbol_sum)) + 2 + for i = 0; i < histogram_size; i++ { + if histogram[i] == 0 { + cost[i] = missing_symbol_cost + continue + } + + /* Shannon bits for this symbol. */ + cost[i] = log2sum - float32(fastLog2(uint(histogram[i]))) + + /* Cannot be coded with less than 1 bit */ + if cost[i] < 1 { + cost[i] = 1 + } + } +} + +func zopfliCostModelSetFromCommands(self *zopfliCostModel, position uint, ringbuffer []byte, ringbuffer_mask uint, commands []command, last_insert_len uint) { + var histogram_literal [numLiteralSymbols]uint32 + var histogram_cmd [numCommandSymbols]uint32 + var histogram_dist [maxEffectiveDistanceAlphabetSize]uint32 + var cost_literal [numLiteralSymbols]float32 + var pos uint = position - last_insert_len + var min_cost_cmd float32 = kInfinity + var cost_cmd []float32 = self.cost_cmd_[:] + var literal_costs []float32 + + histogram_literal = [numLiteralSymbols]uint32{} + histogram_cmd = [numCommandSymbols]uint32{} + histogram_dist = [maxEffectiveDistanceAlphabetSize]uint32{} + + for i := range commands { + var inslength uint = uint(commands[i].insert_len_) + var copylength uint = uint(commandCopyLen(&commands[i])) + var distcode uint = uint(commands[i].dist_prefix_) & 0x3FF + var cmdcode uint = uint(commands[i].cmd_prefix_) + var j uint + + histogram_cmd[cmdcode]++ + if cmdcode >= 128 { + histogram_dist[distcode]++ + } + + for j = 0; j < inslength; j++ { + histogram_literal[ringbuffer[(pos+j)&ringbuffer_mask]]++ + } + + pos += inslength + copylength + } + + setCost(histogram_literal[:], numLiteralSymbols, true, cost_literal[:]) + setCost(histogram_cmd[:], numCommandSymbols, false, cost_cmd) + setCost(histogram_dist[:], uint(self.distance_histogram_size), false, self.cost_dist_) + + for i := 0; i < numCommandSymbols; i++ { + min_cost_cmd = brotli_min_float(min_cost_cmd, cost_cmd[i]) + } + + self.min_cost_cmd_ = min_cost_cmd + { + literal_costs = self.literal_costs_ + var literal_carry float32 = 0.0 + num_bytes := int(self.num_bytes_) + literal_costs[0] = 0.0 + for i := 0; i < num_bytes; i++ { + literal_carry += cost_literal[ringbuffer[(position+uint(i))&ringbuffer_mask]] + literal_costs[i+1] = literal_costs[i] + literal_carry + literal_carry -= literal_costs[i+1] - literal_costs[i] + } + } +} + +func zopfliCostModelSetFromLiteralCosts(self *zopfliCostModel, position uint, ringbuffer []byte, ringbuffer_mask uint) { + var literal_costs []float32 = self.literal_costs_ + var literal_carry float32 = 0.0 + var cost_dist []float32 = self.cost_dist_ + var cost_cmd []float32 = self.cost_cmd_[:] + var num_bytes uint = self.num_bytes_ + var i uint + estimateBitCostsForLiterals(position, num_bytes, ringbuffer_mask, ringbuffer, literal_costs[1:]) + literal_costs[0] = 0.0 + for i = 0; i < num_bytes; i++ { + literal_carry += literal_costs[i+1] + literal_costs[i+1] = literal_costs[i] + literal_carry + literal_carry -= literal_costs[i+1] - literal_costs[i] + } + + for i = 0; i < numCommandSymbols; i++ { + cost_cmd[i] = float32(fastLog2(uint(11 + uint32(i)))) + } + + for i = 0; uint32(i) < self.distance_histogram_size; i++ { + cost_dist[i] = float32(fastLog2(uint(20 + uint32(i)))) + } + + self.min_cost_cmd_ = float32(fastLog2(11)) +} + +func zopfliCostModelGetCommandCost(self *zopfliCostModel, cmdcode uint16) float32 { + return self.cost_cmd_[cmdcode] +} + +func zopfliCostModelGetDistanceCost(self *zopfliCostModel, distcode uint) float32 { + return self.cost_dist_[distcode] +} + +func zopfliCostModelGetLiteralCosts(self *zopfliCostModel, from uint, to uint) float32 { + return self.literal_costs_[to] - self.literal_costs_[from] +} + +func zopfliCostModelGetMinCostCmd(self *zopfliCostModel) float32 { + return self.min_cost_cmd_ +} + +/* REQUIRES: len >= 2, start_pos <= pos */ +/* REQUIRES: cost < kInfinity, nodes[start_pos].cost < kInfinity */ +/* Maintains the "ZopfliNode array invariant". */ +func updateZopfliNode(nodes []zopfliNode, pos uint, start_pos uint, len uint, len_code uint, dist uint, short_code uint, cost float32) { + var next *zopfliNode = &nodes[pos+len] + next.length = uint32(len | (len+9-len_code)<<25) + next.distance = uint32(dist) + next.dcode_insert_length = uint32(short_code<<27 | (pos - start_pos)) + next.u.cost = cost +} + +type posData struct { + pos uint + distance_cache [4]int + costdiff float32 + cost float32 +} + +/* Maintains the smallest 8 cost difference together with their positions */ +type startPosQueue struct { + q_ [8]posData + idx_ uint +} + +func initStartPosQueue(self *startPosQueue) { + self.idx_ = 0 +} + +func startPosQueueSize(self *startPosQueue) uint { + return brotli_min_size_t(self.idx_, 8) +} + +func startPosQueuePush(self *startPosQueue, posdata *posData) { + var offset uint = ^(self.idx_) & 7 + self.idx_++ + var len uint = startPosQueueSize(self) + var i uint + var q []posData = self.q_[:] + q[offset] = *posdata + + /* Restore the sorted order. In the list of |len| items at most |len - 1| + adjacent element comparisons / swaps are required. */ + for i = 1; i < len; i++ { + if q[offset&7].costdiff > q[(offset+1)&7].costdiff { + var tmp posData = q[offset&7] + q[offset&7] = q[(offset+1)&7] + q[(offset+1)&7] = tmp + } + + offset++ + } +} + +func startPosQueueAt(self *startPosQueue, k uint) *posData { + return &self.q_[(k-self.idx_)&7] +} + +/* Returns the minimum possible copy length that can improve the cost of any */ +/* future position. */ +func computeMinimumCopyLength(start_cost float32, nodes []zopfliNode, num_bytes uint, pos uint) uint { + var min_cost float32 = start_cost + var len uint = 2 + var next_len_bucket uint = 4 + /* Compute the minimum possible cost of reaching any future position. */ + + var next_len_offset uint = 10 + for pos+len <= num_bytes && nodes[pos+len].u.cost <= min_cost { + /* We already reached (pos + len) with no more cost than the minimum + possible cost of reaching anything from this pos, so there is no point in + looking for lengths <= len. */ + len++ + + if len == next_len_offset { + /* We reached the next copy length code bucket, so we add one more + extra bit to the minimum cost. */ + min_cost += 1.0 + + next_len_offset += next_len_bucket + next_len_bucket *= 2 + } + } + + return uint(len) +} + +/* REQUIRES: nodes[pos].cost < kInfinity + REQUIRES: nodes[0..pos] satisfies that "ZopfliNode array invariant". */ +func computeDistanceShortcut(block_start uint, pos uint, max_backward_limit uint, gap uint, nodes []zopfliNode) uint32 { + var clen uint = uint(zopfliNodeCopyLength(&nodes[pos])) + var ilen uint = uint(nodes[pos].dcode_insert_length & 0x7FFFFFF) + var dist uint = uint(zopfliNodeCopyDistance(&nodes[pos])) + + /* Since |block_start + pos| is the end position of the command, the copy part + starts from |block_start + pos - clen|. Distances that are greater than + this or greater than |max_backward_limit| + |gap| are static dictionary + references, and do not update the last distances. + Also distance code 0 (last distance) does not update the last distances. */ + if pos == 0 { + return 0 + } else if dist+clen <= block_start+pos+gap && dist <= max_backward_limit+gap && zopfliNodeDistanceCode(&nodes[pos]) > 0 { + return uint32(pos) + } else { + return nodes[pos-clen-ilen].u.shortcut + } +} + +/* Fills in dist_cache[0..3] with the last four distances (as defined by + Section 4. of the Spec) that would be used at (block_start + pos) if we + used the shortest path of commands from block_start, computed from + nodes[0..pos]. The last four distances at block_start are in + starting_dist_cache[0..3]. + REQUIRES: nodes[pos].cost < kInfinity + REQUIRES: nodes[0..pos] satisfies that "ZopfliNode array invariant". */ +func computeDistanceCache(pos uint, starting_dist_cache []int, nodes []zopfliNode, dist_cache []int) { + var idx int = 0 + var p uint = uint(nodes[pos].u.shortcut) + for idx < 4 && p > 0 { + var ilen uint = uint(nodes[p].dcode_insert_length & 0x7FFFFFF) + var clen uint = uint(zopfliNodeCopyLength(&nodes[p])) + var dist uint = uint(zopfliNodeCopyDistance(&nodes[p])) + dist_cache[idx] = int(dist) + idx++ + + /* Because of prerequisite, p >= clen + ilen >= 2. */ + p = uint(nodes[p-clen-ilen].u.shortcut) + } + + for ; idx < 4; idx++ { + dist_cache[idx] = starting_dist_cache[0] + starting_dist_cache = starting_dist_cache[1:] + } +} + +/* Maintains "ZopfliNode array invariant" and pushes node to the queue, if it + is eligible. */ +func evaluateNode(block_start uint, pos uint, max_backward_limit uint, gap uint, starting_dist_cache []int, model *zopfliCostModel, queue *startPosQueue, nodes []zopfliNode) { + /* Save cost, because ComputeDistanceCache invalidates it. */ + var node_cost float32 = nodes[pos].u.cost + nodes[pos].u.shortcut = computeDistanceShortcut(block_start, pos, max_backward_limit, gap, nodes) + if node_cost <= zopfliCostModelGetLiteralCosts(model, 0, pos) { + var posdata posData + posdata.pos = pos + posdata.cost = node_cost + posdata.costdiff = node_cost - zopfliCostModelGetLiteralCosts(model, 0, pos) + computeDistanceCache(pos, starting_dist_cache, nodes, posdata.distance_cache[:]) + startPosQueuePush(queue, &posdata) + } +} + +/* Returns longest copy length. */ +func updateNodes(num_bytes uint, block_start uint, pos uint, ringbuffer []byte, ringbuffer_mask uint, params *encoderParams, max_backward_limit uint, starting_dist_cache []int, num_matches uint, matches []backwardMatch, model *zopfliCostModel, queue *startPosQueue, nodes []zopfliNode) uint { + var cur_ix uint = block_start + pos + var cur_ix_masked uint = cur_ix & ringbuffer_mask + var max_distance uint = brotli_min_size_t(cur_ix, max_backward_limit) + var max_len uint = num_bytes - pos + var max_zopfli_len uint = maxZopfliLen(params) + var max_iters uint = maxZopfliCandidates(params) + var min_len uint + var result uint = 0 + var k uint + var gap uint = 0 + + evaluateNode(block_start, pos, max_backward_limit, gap, starting_dist_cache, model, queue, nodes) + { + var posdata *posData = startPosQueueAt(queue, 0) + var min_cost float32 = (posdata.cost + zopfliCostModelGetMinCostCmd(model) + zopfliCostModelGetLiteralCosts(model, posdata.pos, pos)) + min_len = computeMinimumCopyLength(min_cost, nodes, num_bytes, pos) + } + + /* Go over the command starting positions in order of increasing cost + difference. */ + for k = 0; k < max_iters && k < startPosQueueSize(queue); k++ { + var posdata *posData = startPosQueueAt(queue, k) + var start uint = posdata.pos + var inscode uint16 = getInsertLengthCode(pos - start) + var start_costdiff float32 = posdata.costdiff + var base_cost float32 = start_costdiff + float32(getInsertExtra(inscode)) + zopfliCostModelGetLiteralCosts(model, 0, pos) + var best_len uint = min_len - 1 + var j uint = 0 + /* Look for last distance matches using the distance cache from this + starting position. */ + for ; j < numDistanceShortCodes && best_len < max_len; j++ { + var idx uint = uint(kDistanceCacheIndex[j]) + var backward uint = uint(posdata.distance_cache[idx] + kDistanceCacheOffset[j]) + var prev_ix uint = cur_ix - backward + var len uint = 0 + var continuation byte = ringbuffer[cur_ix_masked+best_len] + if cur_ix_masked+best_len > ringbuffer_mask { + break + } + + if backward > max_distance+gap { + /* Word dictionary -> ignore. */ + continue + } + + if backward <= max_distance { + /* Regular backward reference. */ + if prev_ix >= cur_ix { + continue + } + + prev_ix &= ringbuffer_mask + if prev_ix+best_len > ringbuffer_mask || continuation != ringbuffer[prev_ix+best_len] { + continue + } + + len = findMatchLengthWithLimit(ringbuffer[prev_ix:], ringbuffer[cur_ix_masked:], max_len) + } else { + continue + } + { + var dist_cost float32 = base_cost + zopfliCostModelGetDistanceCost(model, j) + var l uint + for l = best_len + 1; l <= len; l++ { + var copycode uint16 = getCopyLengthCode(l) + var cmdcode uint16 = combineLengthCodes(inscode, copycode, j == 0) + var tmp float32 + if cmdcode < 128 { + tmp = base_cost + } else { + tmp = dist_cost + } + var cost float32 = tmp + float32(getCopyExtra(copycode)) + zopfliCostModelGetCommandCost(model, cmdcode) + if cost < nodes[pos+l].u.cost { + updateZopfliNode(nodes, pos, start, l, l, backward, j+1, cost) + result = brotli_max_size_t(result, l) + } + + best_len = l + } + } + } + + /* At higher iterations look only for new last distance matches, since + looking only for new command start positions with the same distances + does not help much. */ + if k >= 2 { + continue + } + { + /* Loop through all possible copy lengths at this position. */ + var len uint = min_len + for j = 0; j < num_matches; j++ { + var match backwardMatch = matches[j] + var dist uint = uint(match.distance) + var is_dictionary_match bool = (dist > max_distance+gap) + var dist_code uint = dist + numDistanceShortCodes - 1 + var dist_symbol uint16 + var distextra uint32 + var distnumextra uint32 + var dist_cost float32 + var max_match_len uint + /* We already tried all possible last distance matches, so we can use + normal distance code here. */ + prefixEncodeCopyDistance(dist_code, uint(params.dist.num_direct_distance_codes), uint(params.dist.distance_postfix_bits), &dist_symbol, &distextra) + + distnumextra = uint32(dist_symbol) >> 10 + dist_cost = base_cost + float32(distnumextra) + zopfliCostModelGetDistanceCost(model, uint(dist_symbol)&0x3FF) + + /* Try all copy lengths up until the maximum copy length corresponding + to this distance. If the distance refers to the static dictionary, or + the maximum length is long enough, try only one maximum length. */ + max_match_len = backwardMatchLength(&match) + + if len < max_match_len && (is_dictionary_match || max_match_len > max_zopfli_len) { + len = max_match_len + } + + for ; len <= max_match_len; len++ { + var len_code uint + if is_dictionary_match { + len_code = backwardMatchLengthCode(&match) + } else { + len_code = len + } + var copycode uint16 = getCopyLengthCode(len_code) + var cmdcode uint16 = combineLengthCodes(inscode, copycode, false) + var cost float32 = dist_cost + float32(getCopyExtra(copycode)) + zopfliCostModelGetCommandCost(model, cmdcode) + if cost < nodes[pos+len].u.cost { + updateZopfliNode(nodes, pos, start, uint(len), len_code, dist, 0, cost) + if len > result { + result = len + } + } + } + } + } + } + + return result +} + +func computeShortestPathFromNodes(num_bytes uint, nodes []zopfliNode) uint { + var index uint = num_bytes + var num_commands uint = 0 + for nodes[index].dcode_insert_length&0x7FFFFFF == 0 && nodes[index].length == 1 { + index-- + } + nodes[index].u.next = math.MaxUint32 + for index != 0 { + var len uint = uint(zopfliNodeCommandLength(&nodes[index])) + index -= uint(len) + nodes[index].u.next = uint32(len) + num_commands++ + } + + return num_commands +} + +/* REQUIRES: nodes != NULL and len(nodes) >= num_bytes + 1 */ +func zopfliCreateCommands(num_bytes uint, block_start uint, nodes []zopfliNode, dist_cache []int, last_insert_len *uint, params *encoderParams, commands *[]command, num_literals *uint) { + var max_backward_limit uint = maxBackwardLimit(params.lgwin) + var pos uint = 0 + var offset uint32 = nodes[0].u.next + var i uint + var gap uint = 0 + for i = 0; offset != math.MaxUint32; i++ { + var next *zopfliNode = &nodes[uint32(pos)+offset] + var copy_length uint = uint(zopfliNodeCopyLength(next)) + var insert_length uint = uint(next.dcode_insert_length & 0x7FFFFFF) + pos += insert_length + offset = next.u.next + if i == 0 { + insert_length += *last_insert_len + *last_insert_len = 0 + } + { + var distance uint = uint(zopfliNodeCopyDistance(next)) + var len_code uint = uint(zopfliNodeLengthCode(next)) + var max_distance uint = brotli_min_size_t(block_start+pos, max_backward_limit) + var is_dictionary bool = (distance > max_distance+gap) + var dist_code uint = uint(zopfliNodeDistanceCode(next)) + *commands = append(*commands, makeCommand(¶ms.dist, insert_length, copy_length, int(len_code)-int(copy_length), dist_code)) + + if !is_dictionary && dist_code > 0 { + dist_cache[3] = dist_cache[2] + dist_cache[2] = dist_cache[1] + dist_cache[1] = dist_cache[0] + dist_cache[0] = int(distance) + } + } + + *num_literals += insert_length + pos += copy_length + } + + *last_insert_len += num_bytes - pos +} + +func zopfliIterate(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint, params *encoderParams, gap uint, dist_cache []int, model *zopfliCostModel, num_matches []uint32, matches []backwardMatch, nodes []zopfliNode) uint { + var max_backward_limit uint = maxBackwardLimit(params.lgwin) + var max_zopfli_len uint = maxZopfliLen(params) + var queue startPosQueue + var cur_match_pos uint = 0 + var i uint + nodes[0].length = 0 + nodes[0].u.cost = 0 + initStartPosQueue(&queue) + for i = 0; i+3 < num_bytes; i++ { + var skip uint = updateNodes(num_bytes, position, i, ringbuffer, ringbuffer_mask, params, max_backward_limit, dist_cache, uint(num_matches[i]), matches[cur_match_pos:], model, &queue, nodes) + if skip < longCopyQuickStep { + skip = 0 + } + cur_match_pos += uint(num_matches[i]) + if num_matches[i] == 1 && backwardMatchLength(&matches[cur_match_pos-1]) > max_zopfli_len { + skip = brotli_max_size_t(backwardMatchLength(&matches[cur_match_pos-1]), skip) + } + + if skip > 1 { + skip-- + for skip != 0 { + i++ + if i+3 >= num_bytes { + break + } + evaluateNode(position, i, max_backward_limit, gap, dist_cache, model, &queue, nodes) + cur_match_pos += uint(num_matches[i]) + skip-- + } + } + } + + return computeShortestPathFromNodes(num_bytes, nodes) +} + +/* Computes the shortest path of commands from position to at most + position + num_bytes. + + On return, path->size() is the number of commands found and path[i] is the + length of the i-th command (copy length plus insert length). + Note that the sum of the lengths of all commands can be less than num_bytes. + + On return, the nodes[0..num_bytes] array will have the following + "ZopfliNode array invariant": + For each i in [1..num_bytes], if nodes[i].cost < kInfinity, then + (1) nodes[i].copy_length() >= 2 + (2) nodes[i].command_length() <= i and + (3) nodes[i - nodes[i].command_length()].cost < kInfinity + + REQUIRES: nodes != nil and len(nodes) >= num_bytes + 1 */ +func zopfliComputeShortestPath(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint, params *encoderParams, dist_cache []int, hasher *h10, nodes []zopfliNode) uint { + var max_backward_limit uint = maxBackwardLimit(params.lgwin) + var max_zopfli_len uint = maxZopfliLen(params) + var model zopfliCostModel + var queue startPosQueue + var matches [2 * (maxNumMatchesH10 + 64)]backwardMatch + var store_end uint + if num_bytes >= hasher.StoreLookahead() { + store_end = position + num_bytes - hasher.StoreLookahead() + 1 + } else { + store_end = position + } + var i uint + var gap uint = 0 + var lz_matches_offset uint = 0 + nodes[0].length = 0 + nodes[0].u.cost = 0 + initZopfliCostModel(&model, ¶ms.dist, num_bytes) + zopfliCostModelSetFromLiteralCosts(&model, position, ringbuffer, ringbuffer_mask) + initStartPosQueue(&queue) + for i = 0; i+hasher.HashTypeLength()-1 < num_bytes; i++ { + var pos uint = position + i + var max_distance uint = brotli_min_size_t(pos, max_backward_limit) + var skip uint + var num_matches uint + num_matches = findAllMatchesH10(hasher, ¶ms.dictionary, ringbuffer, ringbuffer_mask, pos, num_bytes-i, max_distance, gap, params, matches[lz_matches_offset:]) + if num_matches > 0 && backwardMatchLength(&matches[num_matches-1]) > max_zopfli_len { + matches[0] = matches[num_matches-1] + num_matches = 1 + } + + skip = updateNodes(num_bytes, position, i, ringbuffer, ringbuffer_mask, params, max_backward_limit, dist_cache, num_matches, matches[:], &model, &queue, nodes) + if skip < longCopyQuickStep { + skip = 0 + } + if num_matches == 1 && backwardMatchLength(&matches[0]) > max_zopfli_len { + skip = brotli_max_size_t(backwardMatchLength(&matches[0]), skip) + } + + if skip > 1 { + /* Add the tail of the copy to the hasher. */ + hasher.StoreRange(ringbuffer, ringbuffer_mask, pos+1, brotli_min_size_t(pos+skip, store_end)) + + skip-- + for skip != 0 { + i++ + if i+hasher.HashTypeLength()-1 >= num_bytes { + break + } + evaluateNode(position, i, max_backward_limit, gap, dist_cache, &model, &queue, nodes) + skip-- + } + } + } + + cleanupZopfliCostModel(&model) + return computeShortestPathFromNodes(num_bytes, nodes) +} + +func createZopfliBackwardReferences(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint, params *encoderParams, hasher *h10, dist_cache []int, last_insert_len *uint, commands *[]command, num_literals *uint) { + var nodes []zopfliNode + nodes = make([]zopfliNode, (num_bytes + 1)) + initZopfliNodes(nodes, num_bytes+1) + zopfliComputeShortestPath(num_bytes, position, ringbuffer, ringbuffer_mask, params, dist_cache, hasher, nodes) + zopfliCreateCommands(num_bytes, position, nodes, dist_cache, last_insert_len, params, commands, num_literals) + nodes = nil +} + +func createHqZopfliBackwardReferences(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint, params *encoderParams, hasher hasherHandle, dist_cache []int, last_insert_len *uint, commands *[]command, num_literals *uint) { + var max_backward_limit uint = maxBackwardLimit(params.lgwin) + var num_matches []uint32 = make([]uint32, num_bytes) + var matches_size uint = 4 * num_bytes + var store_end uint + if num_bytes >= hasher.StoreLookahead() { + store_end = position + num_bytes - hasher.StoreLookahead() + 1 + } else { + store_end = position + } + var cur_match_pos uint = 0 + var i uint + var orig_num_literals uint + var orig_last_insert_len uint + var orig_dist_cache [4]int + var orig_num_commands int + var model zopfliCostModel + var nodes []zopfliNode + var matches []backwardMatch = make([]backwardMatch, matches_size) + var gap uint = 0 + var shadow_matches uint = 0 + var new_array []backwardMatch + for i = 0; i+hasher.HashTypeLength()-1 < num_bytes; i++ { + var pos uint = position + i + var max_distance uint = brotli_min_size_t(pos, max_backward_limit) + var max_length uint = num_bytes - i + var num_found_matches uint + var cur_match_end uint + var j uint + + /* Ensure that we have enough free slots. */ + if matches_size < cur_match_pos+maxNumMatchesH10+shadow_matches { + var new_size uint = matches_size + if new_size == 0 { + new_size = cur_match_pos + maxNumMatchesH10 + shadow_matches + } + + for new_size < cur_match_pos+maxNumMatchesH10+shadow_matches { + new_size *= 2 + } + + new_array = make([]backwardMatch, new_size) + if matches_size != 0 { + copy(new_array, matches[:matches_size]) + } + + matches = new_array + matches_size = new_size + } + + num_found_matches = findAllMatchesH10(hasher.(*h10), ¶ms.dictionary, ringbuffer, ringbuffer_mask, pos, max_length, max_distance, gap, params, matches[cur_match_pos+shadow_matches:]) + cur_match_end = cur_match_pos + num_found_matches + for j = cur_match_pos; j+1 < cur_match_end; j++ { + assert(backwardMatchLength(&matches[j]) <= backwardMatchLength(&matches[j+1])) + } + + num_matches[i] = uint32(num_found_matches) + if num_found_matches > 0 { + var match_len uint = backwardMatchLength(&matches[cur_match_end-1]) + if match_len > maxZopfliLenQuality11 { + var skip uint = match_len - 1 + matches[cur_match_pos] = matches[cur_match_end-1] + cur_match_pos++ + num_matches[i] = 1 + + /* Add the tail of the copy to the hasher. */ + hasher.StoreRange(ringbuffer, ringbuffer_mask, pos+1, brotli_min_size_t(pos+match_len, store_end)) + var pos uint = i + for i := 0; i < int(skip); i++ { + num_matches[pos+1:][i] = 0 + } + i += skip + } else { + cur_match_pos = cur_match_end + } + } + } + + orig_num_literals = *num_literals + orig_last_insert_len = *last_insert_len + copy(orig_dist_cache[:], dist_cache[:4]) + orig_num_commands = len(*commands) + nodes = make([]zopfliNode, (num_bytes + 1)) + initZopfliCostModel(&model, ¶ms.dist, num_bytes) + for i = 0; i < 2; i++ { + initZopfliNodes(nodes, num_bytes+1) + if i == 0 { + zopfliCostModelSetFromLiteralCosts(&model, position, ringbuffer, ringbuffer_mask) + } else { + zopfliCostModelSetFromCommands(&model, position, ringbuffer, ringbuffer_mask, (*commands)[orig_num_commands:], orig_last_insert_len) + } + + *commands = (*commands)[:orig_num_commands] + *num_literals = orig_num_literals + *last_insert_len = orig_last_insert_len + copy(dist_cache, orig_dist_cache[:4]) + zopfliIterate(num_bytes, position, ringbuffer, ringbuffer_mask, params, gap, dist_cache, &model, num_matches, matches, nodes) + zopfliCreateCommands(num_bytes, position, nodes, dist_cache, last_insert_len, params, commands, num_literals) + } + + cleanupZopfliCostModel(&model) + nodes = nil + matches = nil + num_matches = nil +} diff --git a/vendor/github.com/andybalholm/brotli/bit_cost.go b/vendor/github.com/andybalholm/brotli/bit_cost.go new file mode 100644 index 0000000..0005fc1 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/bit_cost.go @@ -0,0 +1,436 @@ +package brotli + +/* Copyright 2013 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Functions to estimate the bit cost of Huffman trees. */ +func shannonEntropy(population []uint32, size uint, total *uint) float64 { + var sum uint = 0 + var retval float64 = 0 + var population_end []uint32 = population[size:] + var p uint + for -cap(population) < -cap(population_end) { + p = uint(population[0]) + population = population[1:] + sum += p + retval -= float64(p) * fastLog2(p) + } + + if sum != 0 { + retval += float64(sum) * fastLog2(sum) + } + *total = sum + return retval +} + +func bitsEntropy(population []uint32, size uint) float64 { + var sum uint + var retval float64 = shannonEntropy(population, size, &sum) + if retval < float64(sum) { + /* At least one bit per literal is needed. */ + retval = float64(sum) + } + + return retval +} + +const kOneSymbolHistogramCost float64 = 12 +const kTwoSymbolHistogramCost float64 = 20 +const kThreeSymbolHistogramCost float64 = 28 +const kFourSymbolHistogramCost float64 = 37 + +func populationCostLiteral(histogram *histogramLiteral) float64 { + var data_size uint = histogramDataSizeLiteral() + var count int = 0 + var s [5]uint + var bits float64 = 0.0 + var i uint + if histogram.total_count_ == 0 { + return kOneSymbolHistogramCost + } + + for i = 0; i < data_size; i++ { + if histogram.data_[i] > 0 { + s[count] = i + count++ + if count > 4 { + break + } + } + } + + if count == 1 { + return kOneSymbolHistogramCost + } + + if count == 2 { + return kTwoSymbolHistogramCost + float64(histogram.total_count_) + } + + if count == 3 { + var histo0 uint32 = histogram.data_[s[0]] + var histo1 uint32 = histogram.data_[s[1]] + var histo2 uint32 = histogram.data_[s[2]] + var histomax uint32 = brotli_max_uint32_t(histo0, brotli_max_uint32_t(histo1, histo2)) + return kThreeSymbolHistogramCost + 2*(float64(histo0)+float64(histo1)+float64(histo2)) - float64(histomax) + } + + if count == 4 { + var histo [4]uint32 + var h23 uint32 + var histomax uint32 + for i = 0; i < 4; i++ { + histo[i] = histogram.data_[s[i]] + } + + /* Sort */ + for i = 0; i < 4; i++ { + var j uint + for j = i + 1; j < 4; j++ { + if histo[j] > histo[i] { + var tmp uint32 = histo[j] + histo[j] = histo[i] + histo[i] = tmp + } + } + } + + h23 = histo[2] + histo[3] + histomax = brotli_max_uint32_t(h23, histo[0]) + return kFourSymbolHistogramCost + 3*float64(h23) + 2*(float64(histo[0])+float64(histo[1])) - float64(histomax) + } + { + var max_depth uint = 1 + var depth_histo = [codeLengthCodes]uint32{0} + /* In this loop we compute the entropy of the histogram and simultaneously + build a simplified histogram of the code length codes where we use the + zero repeat code 17, but we don't use the non-zero repeat code 16. */ + + var log2total float64 = fastLog2(histogram.total_count_) + for i = 0; i < data_size; { + if histogram.data_[i] > 0 { + var log2p float64 = log2total - fastLog2(uint(histogram.data_[i])) + /* Compute -log2(P(symbol)) = -log2(count(symbol)/total_count) = + = log2(total_count) - log2(count(symbol)) */ + + var depth uint = uint(log2p + 0.5) + /* Approximate the bit depth by round(-log2(P(symbol))) */ + bits += float64(histogram.data_[i]) * log2p + + if depth > 15 { + depth = 15 + } + + if depth > max_depth { + max_depth = depth + } + + depth_histo[depth]++ + i++ + } else { + var reps uint32 = 1 + /* Compute the run length of zeros and add the appropriate number of 0 + and 17 code length codes to the code length code histogram. */ + + var k uint + for k = i + 1; k < data_size && histogram.data_[k] == 0; k++ { + reps++ + } + + i += uint(reps) + if i == data_size { + /* Don't add any cost for the last zero run, since these are encoded + only implicitly. */ + break + } + + if reps < 3 { + depth_histo[0] += reps + } else { + reps -= 2 + for reps > 0 { + depth_histo[repeatZeroCodeLength]++ + + /* Add the 3 extra bits for the 17 code length code. */ + bits += 3 + + reps >>= 3 + } + } + } + } + + /* Add the estimated encoding cost of the code length code histogram. */ + bits += float64(18 + 2*max_depth) + + /* Add the entropy of the code length code histogram. */ + bits += bitsEntropy(depth_histo[:], codeLengthCodes) + } + + return bits +} + +func populationCostCommand(histogram *histogramCommand) float64 { + var data_size uint = histogramDataSizeCommand() + var count int = 0 + var s [5]uint + var bits float64 = 0.0 + var i uint + if histogram.total_count_ == 0 { + return kOneSymbolHistogramCost + } + + for i = 0; i < data_size; i++ { + if histogram.data_[i] > 0 { + s[count] = i + count++ + if count > 4 { + break + } + } + } + + if count == 1 { + return kOneSymbolHistogramCost + } + + if count == 2 { + return kTwoSymbolHistogramCost + float64(histogram.total_count_) + } + + if count == 3 { + var histo0 uint32 = histogram.data_[s[0]] + var histo1 uint32 = histogram.data_[s[1]] + var histo2 uint32 = histogram.data_[s[2]] + var histomax uint32 = brotli_max_uint32_t(histo0, brotli_max_uint32_t(histo1, histo2)) + return kThreeSymbolHistogramCost + 2*(float64(histo0)+float64(histo1)+float64(histo2)) - float64(histomax) + } + + if count == 4 { + var histo [4]uint32 + var h23 uint32 + var histomax uint32 + for i = 0; i < 4; i++ { + histo[i] = histogram.data_[s[i]] + } + + /* Sort */ + for i = 0; i < 4; i++ { + var j uint + for j = i + 1; j < 4; j++ { + if histo[j] > histo[i] { + var tmp uint32 = histo[j] + histo[j] = histo[i] + histo[i] = tmp + } + } + } + + h23 = histo[2] + histo[3] + histomax = brotli_max_uint32_t(h23, histo[0]) + return kFourSymbolHistogramCost + 3*float64(h23) + 2*(float64(histo[0])+float64(histo[1])) - float64(histomax) + } + { + var max_depth uint = 1 + var depth_histo = [codeLengthCodes]uint32{0} + /* In this loop we compute the entropy of the histogram and simultaneously + build a simplified histogram of the code length codes where we use the + zero repeat code 17, but we don't use the non-zero repeat code 16. */ + + var log2total float64 = fastLog2(histogram.total_count_) + for i = 0; i < data_size; { + if histogram.data_[i] > 0 { + var log2p float64 = log2total - fastLog2(uint(histogram.data_[i])) + /* Compute -log2(P(symbol)) = -log2(count(symbol)/total_count) = + = log2(total_count) - log2(count(symbol)) */ + + var depth uint = uint(log2p + 0.5) + /* Approximate the bit depth by round(-log2(P(symbol))) */ + bits += float64(histogram.data_[i]) * log2p + + if depth > 15 { + depth = 15 + } + + if depth > max_depth { + max_depth = depth + } + + depth_histo[depth]++ + i++ + } else { + var reps uint32 = 1 + /* Compute the run length of zeros and add the appropriate number of 0 + and 17 code length codes to the code length code histogram. */ + + var k uint + for k = i + 1; k < data_size && histogram.data_[k] == 0; k++ { + reps++ + } + + i += uint(reps) + if i == data_size { + /* Don't add any cost for the last zero run, since these are encoded + only implicitly. */ + break + } + + if reps < 3 { + depth_histo[0] += reps + } else { + reps -= 2 + for reps > 0 { + depth_histo[repeatZeroCodeLength]++ + + /* Add the 3 extra bits for the 17 code length code. */ + bits += 3 + + reps >>= 3 + } + } + } + } + + /* Add the estimated encoding cost of the code length code histogram. */ + bits += float64(18 + 2*max_depth) + + /* Add the entropy of the code length code histogram. */ + bits += bitsEntropy(depth_histo[:], codeLengthCodes) + } + + return bits +} + +func populationCostDistance(histogram *histogramDistance) float64 { + var data_size uint = histogramDataSizeDistance() + var count int = 0 + var s [5]uint + var bits float64 = 0.0 + var i uint + if histogram.total_count_ == 0 { + return kOneSymbolHistogramCost + } + + for i = 0; i < data_size; i++ { + if histogram.data_[i] > 0 { + s[count] = i + count++ + if count > 4 { + break + } + } + } + + if count == 1 { + return kOneSymbolHistogramCost + } + + if count == 2 { + return kTwoSymbolHistogramCost + float64(histogram.total_count_) + } + + if count == 3 { + var histo0 uint32 = histogram.data_[s[0]] + var histo1 uint32 = histogram.data_[s[1]] + var histo2 uint32 = histogram.data_[s[2]] + var histomax uint32 = brotli_max_uint32_t(histo0, brotli_max_uint32_t(histo1, histo2)) + return kThreeSymbolHistogramCost + 2*(float64(histo0)+float64(histo1)+float64(histo2)) - float64(histomax) + } + + if count == 4 { + var histo [4]uint32 + var h23 uint32 + var histomax uint32 + for i = 0; i < 4; i++ { + histo[i] = histogram.data_[s[i]] + } + + /* Sort */ + for i = 0; i < 4; i++ { + var j uint + for j = i + 1; j < 4; j++ { + if histo[j] > histo[i] { + var tmp uint32 = histo[j] + histo[j] = histo[i] + histo[i] = tmp + } + } + } + + h23 = histo[2] + histo[3] + histomax = brotli_max_uint32_t(h23, histo[0]) + return kFourSymbolHistogramCost + 3*float64(h23) + 2*(float64(histo[0])+float64(histo[1])) - float64(histomax) + } + { + var max_depth uint = 1 + var depth_histo = [codeLengthCodes]uint32{0} + /* In this loop we compute the entropy of the histogram and simultaneously + build a simplified histogram of the code length codes where we use the + zero repeat code 17, but we don't use the non-zero repeat code 16. */ + + var log2total float64 = fastLog2(histogram.total_count_) + for i = 0; i < data_size; { + if histogram.data_[i] > 0 { + var log2p float64 = log2total - fastLog2(uint(histogram.data_[i])) + /* Compute -log2(P(symbol)) = -log2(count(symbol)/total_count) = + = log2(total_count) - log2(count(symbol)) */ + + var depth uint = uint(log2p + 0.5) + /* Approximate the bit depth by round(-log2(P(symbol))) */ + bits += float64(histogram.data_[i]) * log2p + + if depth > 15 { + depth = 15 + } + + if depth > max_depth { + max_depth = depth + } + + depth_histo[depth]++ + i++ + } else { + var reps uint32 = 1 + /* Compute the run length of zeros and add the appropriate number of 0 + and 17 code length codes to the code length code histogram. */ + + var k uint + for k = i + 1; k < data_size && histogram.data_[k] == 0; k++ { + reps++ + } + + i += uint(reps) + if i == data_size { + /* Don't add any cost for the last zero run, since these are encoded + only implicitly. */ + break + } + + if reps < 3 { + depth_histo[0] += reps + } else { + reps -= 2 + for reps > 0 { + depth_histo[repeatZeroCodeLength]++ + + /* Add the 3 extra bits for the 17 code length code. */ + bits += 3 + + reps >>= 3 + } + } + } + } + + /* Add the estimated encoding cost of the code length code histogram. */ + bits += float64(18 + 2*max_depth) + + /* Add the entropy of the code length code histogram. */ + bits += bitsEntropy(depth_histo[:], codeLengthCodes) + } + + return bits +} diff --git a/vendor/github.com/andybalholm/brotli/bit_reader.go b/vendor/github.com/andybalholm/brotli/bit_reader.go new file mode 100644 index 0000000..fba8687 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/bit_reader.go @@ -0,0 +1,266 @@ +package brotli + +import "encoding/binary" + +/* Copyright 2013 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Bit reading helpers */ + +const shortFillBitWindowRead = (8 >> 1) + +var kBitMask = [33]uint32{ + 0x00000000, + 0x00000001, + 0x00000003, + 0x00000007, + 0x0000000F, + 0x0000001F, + 0x0000003F, + 0x0000007F, + 0x000000FF, + 0x000001FF, + 0x000003FF, + 0x000007FF, + 0x00000FFF, + 0x00001FFF, + 0x00003FFF, + 0x00007FFF, + 0x0000FFFF, + 0x0001FFFF, + 0x0003FFFF, + 0x0007FFFF, + 0x000FFFFF, + 0x001FFFFF, + 0x003FFFFF, + 0x007FFFFF, + 0x00FFFFFF, + 0x01FFFFFF, + 0x03FFFFFF, + 0x07FFFFFF, + 0x0FFFFFFF, + 0x1FFFFFFF, + 0x3FFFFFFF, + 0x7FFFFFFF, + 0xFFFFFFFF, +} + +func bitMask(n uint32) uint32 { + return kBitMask[n] +} + +type bitReader struct { + val_ uint64 + bit_pos_ uint32 + input []byte + input_len uint + byte_pos uint +} + +type bitReaderState struct { + val_ uint64 + bit_pos_ uint32 + input []byte + input_len uint + byte_pos uint +} + +/* Initializes the BrotliBitReader fields. */ + +/* Ensures that accumulator is not empty. + May consume up to sizeof(brotli_reg_t) - 1 bytes of input. + Returns false if data is required but there is no input available. + For BROTLI_ALIGNED_READ this function also prepares bit reader for aligned + reading. */ +func bitReaderSaveState(from *bitReader, to *bitReaderState) { + to.val_ = from.val_ + to.bit_pos_ = from.bit_pos_ + to.input = from.input + to.input_len = from.input_len + to.byte_pos = from.byte_pos +} + +func bitReaderRestoreState(to *bitReader, from *bitReaderState) { + to.val_ = from.val_ + to.bit_pos_ = from.bit_pos_ + to.input = from.input + to.input_len = from.input_len + to.byte_pos = from.byte_pos +} + +func getAvailableBits(br *bitReader) uint32 { + return 64 - br.bit_pos_ +} + +/* Returns amount of unread bytes the bit reader still has buffered from the + BrotliInput, including whole bytes in br->val_. */ +func getRemainingBytes(br *bitReader) uint { + return uint(uint32(br.input_len-br.byte_pos) + (getAvailableBits(br) >> 3)) +} + +/* Checks if there is at least |num| bytes left in the input ring-buffer + (excluding the bits remaining in br->val_). */ +func checkInputAmount(br *bitReader, num uint) bool { + return br.input_len-br.byte_pos >= num +} + +/* Guarantees that there are at least |n_bits| + 1 bits in accumulator. + Precondition: accumulator contains at least 1 bit. + |n_bits| should be in the range [1..24] for regular build. For portable + non-64-bit little-endian build only 16 bits are safe to request. */ +func fillBitWindow(br *bitReader, n_bits uint32) { + if br.bit_pos_ >= 32 { + br.val_ >>= 32 + br.bit_pos_ ^= 32 /* here same as -= 32 because of the if condition */ + br.val_ |= (uint64(binary.LittleEndian.Uint32(br.input[br.byte_pos:]))) << 32 + br.byte_pos += 4 + } +} + +/* Mostly like BrotliFillBitWindow, but guarantees only 16 bits and reads no + more than BROTLI_SHORT_FILL_BIT_WINDOW_READ bytes of input. */ +func fillBitWindow16(br *bitReader) { + fillBitWindow(br, 17) +} + +/* Tries to pull one byte of input to accumulator. + Returns false if there is no input available. */ +func pullByte(br *bitReader) bool { + if br.byte_pos == br.input_len { + return false + } + + br.val_ >>= 8 + br.val_ |= (uint64(br.input[br.byte_pos])) << 56 + br.bit_pos_ -= 8 + br.byte_pos++ + return true +} + +/* Returns currently available bits. + The number of valid bits could be calculated by BrotliGetAvailableBits. */ +func getBitsUnmasked(br *bitReader) uint64 { + return br.val_ >> br.bit_pos_ +} + +/* Like BrotliGetBits, but does not mask the result. + The result contains at least 16 valid bits. */ +func get16BitsUnmasked(br *bitReader) uint32 { + fillBitWindow(br, 16) + return uint32(getBitsUnmasked(br)) +} + +/* Returns the specified number of bits from |br| without advancing bit + position. */ +func getBits(br *bitReader, n_bits uint32) uint32 { + fillBitWindow(br, n_bits) + return uint32(getBitsUnmasked(br)) & bitMask(n_bits) +} + +/* Tries to peek the specified amount of bits. Returns false, if there + is not enough input. */ +func safeGetBits(br *bitReader, n_bits uint32, val *uint32) bool { + for getAvailableBits(br) < n_bits { + if !pullByte(br) { + return false + } + } + + *val = uint32(getBitsUnmasked(br)) & bitMask(n_bits) + return true +} + +/* Advances the bit pos by |n_bits|. */ +func dropBits(br *bitReader, n_bits uint32) { + br.bit_pos_ += n_bits +} + +func bitReaderUnload(br *bitReader) { + var unused_bytes uint32 = getAvailableBits(br) >> 3 + var unused_bits uint32 = unused_bytes << 3 + br.byte_pos -= uint(unused_bytes) + if unused_bits == 64 { + br.val_ = 0 + } else { + br.val_ <<= unused_bits + } + + br.bit_pos_ += unused_bits +} + +/* Reads the specified number of bits from |br| and advances the bit pos. + Precondition: accumulator MUST contain at least |n_bits|. */ +func takeBits(br *bitReader, n_bits uint32, val *uint32) { + *val = uint32(getBitsUnmasked(br)) & bitMask(n_bits) + dropBits(br, n_bits) +} + +/* Reads the specified number of bits from |br| and advances the bit pos. + Assumes that there is enough input to perform BrotliFillBitWindow. */ +func readBits(br *bitReader, n_bits uint32) uint32 { + var val uint32 + fillBitWindow(br, n_bits) + takeBits(br, n_bits, &val) + return val +} + +/* Tries to read the specified amount of bits. Returns false, if there + is not enough input. |n_bits| MUST be positive. */ +func safeReadBits(br *bitReader, n_bits uint32, val *uint32) bool { + for getAvailableBits(br) < n_bits { + if !pullByte(br) { + return false + } + } + + takeBits(br, n_bits, val) + return true +} + +/* Advances the bit reader position to the next byte boundary and verifies + that any skipped bits are set to zero. */ +func bitReaderJumpToByteBoundary(br *bitReader) bool { + var pad_bits_count uint32 = getAvailableBits(br) & 0x7 + var pad_bits uint32 = 0 + if pad_bits_count != 0 { + takeBits(br, pad_bits_count, &pad_bits) + } + + return pad_bits == 0 +} + +/* Copies remaining input bytes stored in the bit reader to the output. Value + |num| may not be larger than BrotliGetRemainingBytes. The bit reader must be + warmed up again after this. */ +func copyBytes(dest []byte, br *bitReader, num uint) { + for getAvailableBits(br) >= 8 && num > 0 { + dest[0] = byte(getBitsUnmasked(br)) + dropBits(br, 8) + dest = dest[1:] + num-- + } + + copy(dest, br.input[br.byte_pos:][:num]) + br.byte_pos += num +} + +func initBitReader(br *bitReader) { + br.val_ = 0 + br.bit_pos_ = 64 +} + +func warmupBitReader(br *bitReader) bool { + /* Fixing alignment after unaligned BrotliFillWindow would result accumulator + overflow. If unalignment is caused by BrotliSafeReadBits, then there is + enough space in accumulator to fix alignment. */ + if getAvailableBits(br) == 0 { + if !pullByte(br) { + return false + } + } + + return true +} diff --git a/vendor/github.com/andybalholm/brotli/bitwriter.go b/vendor/github.com/andybalholm/brotli/bitwriter.go new file mode 100644 index 0000000..dfc6036 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/bitwriter.go @@ -0,0 +1,56 @@ +package brotli + +/* Copyright 2010 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Write bits into a byte array. */ + +type bitWriter struct { + dst []byte + + // Data waiting to be written is the low nbits of bits. + bits uint64 + nbits uint +} + +func (w *bitWriter) writeBits(nb uint, b uint64) { + w.bits |= b << w.nbits + w.nbits += nb + if w.nbits >= 32 { + bits := w.bits + w.bits >>= 32 + w.nbits -= 32 + w.dst = append(w.dst, + byte(bits), + byte(bits>>8), + byte(bits>>16), + byte(bits>>24), + ) + } +} + +func (w *bitWriter) writeSingleBit(bit bool) { + if bit { + w.writeBits(1, 1) + } else { + w.writeBits(1, 0) + } +} + +func (w *bitWriter) jumpToByteBoundary() { + dst := w.dst + for w.nbits != 0 { + dst = append(dst, byte(w.bits)) + w.bits >>= 8 + if w.nbits > 8 { // Avoid underflow + w.nbits -= 8 + } else { + w.nbits = 0 + } + } + w.bits = 0 + w.dst = dst +} diff --git a/vendor/github.com/andybalholm/brotli/block_splitter.go b/vendor/github.com/andybalholm/brotli/block_splitter.go new file mode 100644 index 0000000..978a131 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/block_splitter.go @@ -0,0 +1,144 @@ +package brotli + +/* Copyright 2013 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Block split point selection utilities. */ + +type blockSplit struct { + num_types uint + num_blocks uint + types []byte + lengths []uint32 + types_alloc_size uint + lengths_alloc_size uint +} + +const ( + kMaxLiteralHistograms uint = 100 + kMaxCommandHistograms uint = 50 + kLiteralBlockSwitchCost float64 = 28.1 + kCommandBlockSwitchCost float64 = 13.5 + kDistanceBlockSwitchCost float64 = 14.6 + kLiteralStrideLength uint = 70 + kCommandStrideLength uint = 40 + kSymbolsPerLiteralHistogram uint = 544 + kSymbolsPerCommandHistogram uint = 530 + kSymbolsPerDistanceHistogram uint = 544 + kMinLengthForBlockSplitting uint = 128 + kIterMulForRefining uint = 2 + kMinItersForRefining uint = 100 +) + +func countLiterals(cmds []command) uint { + var total_length uint = 0 + /* Count how many we have. */ + + for i := range cmds { + total_length += uint(cmds[i].insert_len_) + } + + return total_length +} + +func copyLiteralsToByteArray(cmds []command, data []byte, offset uint, mask uint, literals []byte) { + var pos uint = 0 + var from_pos uint = offset & mask + for i := range cmds { + var insert_len uint = uint(cmds[i].insert_len_) + if from_pos+insert_len > mask { + var head_size uint = mask + 1 - from_pos + copy(literals[pos:], data[from_pos:][:head_size]) + from_pos = 0 + pos += head_size + insert_len -= head_size + } + + if insert_len > 0 { + copy(literals[pos:], data[from_pos:][:insert_len]) + pos += insert_len + } + + from_pos = uint((uint32(from_pos+insert_len) + commandCopyLen(&cmds[i])) & uint32(mask)) + } +} + +func myRand(seed *uint32) uint32 { + /* Initial seed should be 7. In this case, loop length is (1 << 29). */ + *seed *= 16807 + + return *seed +} + +func bitCost(count uint) float64 { + if count == 0 { + return -2.0 + } else { + return fastLog2(count) + } +} + +const histogramsPerBatch = 64 + +const clustersPerBatch = 16 + +func initBlockSplit(self *blockSplit) { + self.num_types = 0 + self.num_blocks = 0 + self.types = self.types[:0] + self.lengths = self.lengths[:0] + self.types_alloc_size = 0 + self.lengths_alloc_size = 0 +} + +func splitBlock(cmds []command, data []byte, pos uint, mask uint, params *encoderParams, literal_split *blockSplit, insert_and_copy_split *blockSplit, dist_split *blockSplit) { + { + var literals_count uint = countLiterals(cmds) + var literals []byte = make([]byte, literals_count) + + /* Create a continuous array of literals. */ + copyLiteralsToByteArray(cmds, data, pos, mask, literals) + + /* Create the block split on the array of literals. + Literal histograms have alphabet size 256. */ + splitByteVectorLiteral(literals, literals_count, kSymbolsPerLiteralHistogram, kMaxLiteralHistograms, kLiteralStrideLength, kLiteralBlockSwitchCost, params, literal_split) + + literals = nil + } + { + var insert_and_copy_codes []uint16 = make([]uint16, len(cmds)) + /* Compute prefix codes for commands. */ + + for i := range cmds { + insert_and_copy_codes[i] = cmds[i].cmd_prefix_ + } + + /* Create the block split on the array of command prefixes. */ + splitByteVectorCommand(insert_and_copy_codes, kSymbolsPerCommandHistogram, kMaxCommandHistograms, kCommandStrideLength, kCommandBlockSwitchCost, params, insert_and_copy_split) + + /* TODO: reuse for distances? */ + + insert_and_copy_codes = nil + } + { + var distance_prefixes []uint16 = make([]uint16, len(cmds)) + var j uint = 0 + /* Create a continuous array of distance prefixes. */ + + for i := range cmds { + var cmd *command = &cmds[i] + if commandCopyLen(cmd) != 0 && cmd.cmd_prefix_ >= 128 { + distance_prefixes[j] = cmd.dist_prefix_ & 0x3FF + j++ + } + } + + /* Create the block split on the array of distance prefixes. */ + splitByteVectorDistance(distance_prefixes, j, kSymbolsPerDistanceHistogram, kMaxCommandHistograms, kCommandStrideLength, kDistanceBlockSwitchCost, params, dist_split) + + distance_prefixes = nil + } +} diff --git a/vendor/github.com/andybalholm/brotli/block_splitter_command.go b/vendor/github.com/andybalholm/brotli/block_splitter_command.go new file mode 100644 index 0000000..9dec13e --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/block_splitter_command.go @@ -0,0 +1,434 @@ +package brotli + +import "math" + +/* Copyright 2013 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +func initialEntropyCodesCommand(data []uint16, length uint, stride uint, num_histograms uint, histograms []histogramCommand) { + var seed uint32 = 7 + var block_length uint = length / num_histograms + var i uint + clearHistogramsCommand(histograms, num_histograms) + for i = 0; i < num_histograms; i++ { + var pos uint = length * i / num_histograms + if i != 0 { + pos += uint(myRand(&seed) % uint32(block_length)) + } + + if pos+stride >= length { + pos = length - stride - 1 + } + + histogramAddVectorCommand(&histograms[i], data[pos:], stride) + } +} + +func randomSampleCommand(seed *uint32, data []uint16, length uint, stride uint, sample *histogramCommand) { + var pos uint = 0 + if stride >= length { + stride = length + } else { + pos = uint(myRand(seed) % uint32(length-stride+1)) + } + + histogramAddVectorCommand(sample, data[pos:], stride) +} + +func refineEntropyCodesCommand(data []uint16, length uint, stride uint, num_histograms uint, histograms []histogramCommand) { + var iters uint = kIterMulForRefining*length/stride + kMinItersForRefining + var seed uint32 = 7 + var iter uint + iters = ((iters + num_histograms - 1) / num_histograms) * num_histograms + for iter = 0; iter < iters; iter++ { + var sample histogramCommand + histogramClearCommand(&sample) + randomSampleCommand(&seed, data, length, stride, &sample) + histogramAddHistogramCommand(&histograms[iter%num_histograms], &sample) + } +} + +/* Assigns a block id from the range [0, num_histograms) to each data element + in data[0..length) and fills in block_id[0..length) with the assigned values. + Returns the number of blocks, i.e. one plus the number of block switches. */ +func findBlocksCommand(data []uint16, length uint, block_switch_bitcost float64, num_histograms uint, histograms []histogramCommand, insert_cost []float64, cost []float64, switch_signal []byte, block_id []byte) uint { + var data_size uint = histogramDataSizeCommand() + var bitmaplen uint = (num_histograms + 7) >> 3 + var num_blocks uint = 1 + var i uint + var j uint + assert(num_histograms <= 256) + if num_histograms <= 1 { + for i = 0; i < length; i++ { + block_id[i] = 0 + } + + return 1 + } + + for i := 0; i < int(data_size*num_histograms); i++ { + insert_cost[i] = 0 + } + for i = 0; i < num_histograms; i++ { + insert_cost[i] = fastLog2(uint(uint32(histograms[i].total_count_))) + } + + for i = data_size; i != 0; { + i-- + for j = 0; j < num_histograms; j++ { + insert_cost[i*num_histograms+j] = insert_cost[j] - bitCost(uint(histograms[j].data_[i])) + } + } + + for i := 0; i < int(num_histograms); i++ { + cost[i] = 0 + } + for i := 0; i < int(length*bitmaplen); i++ { + switch_signal[i] = 0 + } + + /* After each iteration of this loop, cost[k] will contain the difference + between the minimum cost of arriving at the current byte position using + entropy code k, and the minimum cost of arriving at the current byte + position. This difference is capped at the block switch cost, and if it + reaches block switch cost, it means that when we trace back from the last + position, we need to switch here. */ + for i = 0; i < length; i++ { + var byte_ix uint = i + var ix uint = byte_ix * bitmaplen + var insert_cost_ix uint = uint(data[byte_ix]) * num_histograms + var min_cost float64 = 1e99 + var block_switch_cost float64 = block_switch_bitcost + var k uint + for k = 0; k < num_histograms; k++ { + /* We are coding the symbol in data[byte_ix] with entropy code k. */ + cost[k] += insert_cost[insert_cost_ix+k] + + if cost[k] < min_cost { + min_cost = cost[k] + block_id[byte_ix] = byte(k) + } + } + + /* More blocks for the beginning. */ + if byte_ix < 2000 { + block_switch_cost *= 0.77 + 0.07*float64(byte_ix)/2000 + } + + for k = 0; k < num_histograms; k++ { + cost[k] -= min_cost + if cost[k] >= block_switch_cost { + var mask byte = byte(1 << (k & 7)) + cost[k] = block_switch_cost + assert(k>>3 < bitmaplen) + switch_signal[ix+(k>>3)] |= mask + /* Trace back from the last position and switch at the marked places. */ + } + } + } + { + var byte_ix uint = length - 1 + var ix uint = byte_ix * bitmaplen + var cur_id byte = block_id[byte_ix] + for byte_ix > 0 { + var mask byte = byte(1 << (cur_id & 7)) + assert(uint(cur_id)>>3 < bitmaplen) + byte_ix-- + ix -= bitmaplen + if switch_signal[ix+uint(cur_id>>3)]&mask != 0 { + if cur_id != block_id[byte_ix] { + cur_id = block_id[byte_ix] + num_blocks++ + } + } + + block_id[byte_ix] = cur_id + } + } + + return num_blocks +} + +var remapBlockIdsCommand_kInvalidId uint16 = 256 + +func remapBlockIdsCommand(block_ids []byte, length uint, new_id []uint16, num_histograms uint) uint { + var next_id uint16 = 0 + var i uint + for i = 0; i < num_histograms; i++ { + new_id[i] = remapBlockIdsCommand_kInvalidId + } + + for i = 0; i < length; i++ { + assert(uint(block_ids[i]) < num_histograms) + if new_id[block_ids[i]] == remapBlockIdsCommand_kInvalidId { + new_id[block_ids[i]] = next_id + next_id++ + } + } + + for i = 0; i < length; i++ { + block_ids[i] = byte(new_id[block_ids[i]]) + assert(uint(block_ids[i]) < num_histograms) + } + + assert(uint(next_id) <= num_histograms) + return uint(next_id) +} + +func buildBlockHistogramsCommand(data []uint16, length uint, block_ids []byte, num_histograms uint, histograms []histogramCommand) { + var i uint + clearHistogramsCommand(histograms, num_histograms) + for i = 0; i < length; i++ { + histogramAddCommand(&histograms[block_ids[i]], uint(data[i])) + } +} + +var clusterBlocksCommand_kInvalidIndex uint32 = math.MaxUint32 + +func clusterBlocksCommand(data []uint16, length uint, num_blocks uint, block_ids []byte, split *blockSplit) { + var histogram_symbols []uint32 = make([]uint32, num_blocks) + var block_lengths []uint32 = make([]uint32, num_blocks) + var expected_num_clusters uint = clustersPerBatch * (num_blocks + histogramsPerBatch - 1) / histogramsPerBatch + var all_histograms_size uint = 0 + var all_histograms_capacity uint = expected_num_clusters + var all_histograms []histogramCommand = make([]histogramCommand, all_histograms_capacity) + var cluster_size_size uint = 0 + var cluster_size_capacity uint = expected_num_clusters + var cluster_size []uint32 = make([]uint32, cluster_size_capacity) + var num_clusters uint = 0 + var histograms []histogramCommand = make([]histogramCommand, brotli_min_size_t(num_blocks, histogramsPerBatch)) + var max_num_pairs uint = histogramsPerBatch * histogramsPerBatch / 2 + var pairs_capacity uint = max_num_pairs + 1 + var pairs []histogramPair = make([]histogramPair, pairs_capacity) + var pos uint = 0 + var clusters []uint32 + var num_final_clusters uint + var new_index []uint32 + var i uint + var sizes = [histogramsPerBatch]uint32{0} + var new_clusters = [histogramsPerBatch]uint32{0} + var symbols = [histogramsPerBatch]uint32{0} + var remap = [histogramsPerBatch]uint32{0} + + for i := 0; i < int(num_blocks); i++ { + block_lengths[i] = 0 + } + { + var block_idx uint = 0 + for i = 0; i < length; i++ { + assert(block_idx < num_blocks) + block_lengths[block_idx]++ + if i+1 == length || block_ids[i] != block_ids[i+1] { + block_idx++ + } + } + + assert(block_idx == num_blocks) + } + + for i = 0; i < num_blocks; i += histogramsPerBatch { + var num_to_combine uint = brotli_min_size_t(num_blocks-i, histogramsPerBatch) + var num_new_clusters uint + var j uint + for j = 0; j < num_to_combine; j++ { + var k uint + histogramClearCommand(&histograms[j]) + for k = 0; uint32(k) < block_lengths[i+j]; k++ { + histogramAddCommand(&histograms[j], uint(data[pos])) + pos++ + } + + histograms[j].bit_cost_ = populationCostCommand(&histograms[j]) + new_clusters[j] = uint32(j) + symbols[j] = uint32(j) + sizes[j] = 1 + } + + num_new_clusters = histogramCombineCommand(histograms, sizes[:], symbols[:], new_clusters[:], []histogramPair(pairs), num_to_combine, num_to_combine, histogramsPerBatch, max_num_pairs) + if all_histograms_capacity < (all_histograms_size + num_new_clusters) { + var _new_size uint + if all_histograms_capacity == 0 { + _new_size = all_histograms_size + num_new_clusters + } else { + _new_size = all_histograms_capacity + } + var new_array []histogramCommand + for _new_size < (all_histograms_size + num_new_clusters) { + _new_size *= 2 + } + new_array = make([]histogramCommand, _new_size) + if all_histograms_capacity != 0 { + copy(new_array, all_histograms[:all_histograms_capacity]) + } + + all_histograms = new_array + all_histograms_capacity = _new_size + } + + brotli_ensure_capacity_uint32_t(&cluster_size, &cluster_size_capacity, cluster_size_size+num_new_clusters) + for j = 0; j < num_new_clusters; j++ { + all_histograms[all_histograms_size] = histograms[new_clusters[j]] + all_histograms_size++ + cluster_size[cluster_size_size] = sizes[new_clusters[j]] + cluster_size_size++ + remap[new_clusters[j]] = uint32(j) + } + + for j = 0; j < num_to_combine; j++ { + histogram_symbols[i+j] = uint32(num_clusters) + remap[symbols[j]] + } + + num_clusters += num_new_clusters + assert(num_clusters == cluster_size_size) + assert(num_clusters == all_histograms_size) + } + + histograms = nil + + max_num_pairs = brotli_min_size_t(64*num_clusters, (num_clusters/2)*num_clusters) + if pairs_capacity < max_num_pairs+1 { + pairs = nil + pairs = make([]histogramPair, (max_num_pairs + 1)) + } + + clusters = make([]uint32, num_clusters) + for i = 0; i < num_clusters; i++ { + clusters[i] = uint32(i) + } + + num_final_clusters = histogramCombineCommand(all_histograms, cluster_size, histogram_symbols, clusters, pairs, num_clusters, num_blocks, maxNumberOfBlockTypes, max_num_pairs) + pairs = nil + cluster_size = nil + + new_index = make([]uint32, num_clusters) + for i = 0; i < num_clusters; i++ { + new_index[i] = clusterBlocksCommand_kInvalidIndex + } + pos = 0 + { + var next_index uint32 = 0 + for i = 0; i < num_blocks; i++ { + var histo histogramCommand + var j uint + var best_out uint32 + var best_bits float64 + histogramClearCommand(&histo) + for j = 0; uint32(j) < block_lengths[i]; j++ { + histogramAddCommand(&histo, uint(data[pos])) + pos++ + } + + if i == 0 { + best_out = histogram_symbols[0] + } else { + best_out = histogram_symbols[i-1] + } + best_bits = histogramBitCostDistanceCommand(&histo, &all_histograms[best_out]) + for j = 0; j < num_final_clusters; j++ { + var cur_bits float64 = histogramBitCostDistanceCommand(&histo, &all_histograms[clusters[j]]) + if cur_bits < best_bits { + best_bits = cur_bits + best_out = clusters[j] + } + } + + histogram_symbols[i] = best_out + if new_index[best_out] == clusterBlocksCommand_kInvalidIndex { + new_index[best_out] = next_index + next_index++ + } + } + } + + clusters = nil + all_histograms = nil + brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, num_blocks) + brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, num_blocks) + { + var cur_length uint32 = 0 + var block_idx uint = 0 + var max_type byte = 0 + for i = 0; i < num_blocks; i++ { + cur_length += block_lengths[i] + if i+1 == num_blocks || histogram_symbols[i] != histogram_symbols[i+1] { + var id byte = byte(new_index[histogram_symbols[i]]) + split.types[block_idx] = id + split.lengths[block_idx] = cur_length + max_type = brotli_max_uint8_t(max_type, id) + cur_length = 0 + block_idx++ + } + } + + split.num_blocks = block_idx + split.num_types = uint(max_type) + 1 + } + + new_index = nil + block_lengths = nil + histogram_symbols = nil +} + +func splitByteVectorCommand(data []uint16, literals_per_histogram uint, max_histograms uint, sampling_stride_length uint, block_switch_cost float64, params *encoderParams, split *blockSplit) { + length := uint(len(data)) + var data_size uint = histogramDataSizeCommand() + var num_histograms uint = length/literals_per_histogram + 1 + var histograms []histogramCommand + if num_histograms > max_histograms { + num_histograms = max_histograms + } + + if length == 0 { + split.num_types = 1 + return + } else if length < kMinLengthForBlockSplitting { + brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, split.num_blocks+1) + brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, split.num_blocks+1) + split.num_types = 1 + split.types[split.num_blocks] = 0 + split.lengths[split.num_blocks] = uint32(length) + split.num_blocks++ + return + } + + histograms = make([]histogramCommand, num_histograms) + + /* Find good entropy codes. */ + initialEntropyCodesCommand(data, length, sampling_stride_length, num_histograms, histograms) + + refineEntropyCodesCommand(data, length, sampling_stride_length, num_histograms, histograms) + { + var block_ids []byte = make([]byte, length) + var num_blocks uint = 0 + var bitmaplen uint = (num_histograms + 7) >> 3 + var insert_cost []float64 = make([]float64, (data_size * num_histograms)) + var cost []float64 = make([]float64, num_histograms) + var switch_signal []byte = make([]byte, (length * bitmaplen)) + var new_id []uint16 = make([]uint16, num_histograms) + var iters uint + if params.quality < hqZopflificationQuality { + iters = 3 + } else { + iters = 10 + } + /* Find a good path through literals with the good entropy codes. */ + + var i uint + for i = 0; i < iters; i++ { + num_blocks = findBlocksCommand(data, length, block_switch_cost, num_histograms, histograms, insert_cost, cost, switch_signal, block_ids) + num_histograms = remapBlockIdsCommand(block_ids, length, new_id, num_histograms) + buildBlockHistogramsCommand(data, length, block_ids, num_histograms, histograms) + } + + insert_cost = nil + cost = nil + switch_signal = nil + new_id = nil + histograms = nil + clusterBlocksCommand(data, length, num_blocks, block_ids, split) + block_ids = nil + } +} diff --git a/vendor/github.com/andybalholm/brotli/block_splitter_distance.go b/vendor/github.com/andybalholm/brotli/block_splitter_distance.go new file mode 100644 index 0000000..953530d --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/block_splitter_distance.go @@ -0,0 +1,433 @@ +package brotli + +import "math" + +/* Copyright 2013 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +func initialEntropyCodesDistance(data []uint16, length uint, stride uint, num_histograms uint, histograms []histogramDistance) { + var seed uint32 = 7 + var block_length uint = length / num_histograms + var i uint + clearHistogramsDistance(histograms, num_histograms) + for i = 0; i < num_histograms; i++ { + var pos uint = length * i / num_histograms + if i != 0 { + pos += uint(myRand(&seed) % uint32(block_length)) + } + + if pos+stride >= length { + pos = length - stride - 1 + } + + histogramAddVectorDistance(&histograms[i], data[pos:], stride) + } +} + +func randomSampleDistance(seed *uint32, data []uint16, length uint, stride uint, sample *histogramDistance) { + var pos uint = 0 + if stride >= length { + stride = length + } else { + pos = uint(myRand(seed) % uint32(length-stride+1)) + } + + histogramAddVectorDistance(sample, data[pos:], stride) +} + +func refineEntropyCodesDistance(data []uint16, length uint, stride uint, num_histograms uint, histograms []histogramDistance) { + var iters uint = kIterMulForRefining*length/stride + kMinItersForRefining + var seed uint32 = 7 + var iter uint + iters = ((iters + num_histograms - 1) / num_histograms) * num_histograms + for iter = 0; iter < iters; iter++ { + var sample histogramDistance + histogramClearDistance(&sample) + randomSampleDistance(&seed, data, length, stride, &sample) + histogramAddHistogramDistance(&histograms[iter%num_histograms], &sample) + } +} + +/* Assigns a block id from the range [0, num_histograms) to each data element + in data[0..length) and fills in block_id[0..length) with the assigned values. + Returns the number of blocks, i.e. one plus the number of block switches. */ +func findBlocksDistance(data []uint16, length uint, block_switch_bitcost float64, num_histograms uint, histograms []histogramDistance, insert_cost []float64, cost []float64, switch_signal []byte, block_id []byte) uint { + var data_size uint = histogramDataSizeDistance() + var bitmaplen uint = (num_histograms + 7) >> 3 + var num_blocks uint = 1 + var i uint + var j uint + assert(num_histograms <= 256) + if num_histograms <= 1 { + for i = 0; i < length; i++ { + block_id[i] = 0 + } + + return 1 + } + + for i := 0; i < int(data_size*num_histograms); i++ { + insert_cost[i] = 0 + } + for i = 0; i < num_histograms; i++ { + insert_cost[i] = fastLog2(uint(uint32(histograms[i].total_count_))) + } + + for i = data_size; i != 0; { + i-- + for j = 0; j < num_histograms; j++ { + insert_cost[i*num_histograms+j] = insert_cost[j] - bitCost(uint(histograms[j].data_[i])) + } + } + + for i := 0; i < int(num_histograms); i++ { + cost[i] = 0 + } + for i := 0; i < int(length*bitmaplen); i++ { + switch_signal[i] = 0 + } + + /* After each iteration of this loop, cost[k] will contain the difference + between the minimum cost of arriving at the current byte position using + entropy code k, and the minimum cost of arriving at the current byte + position. This difference is capped at the block switch cost, and if it + reaches block switch cost, it means that when we trace back from the last + position, we need to switch here. */ + for i = 0; i < length; i++ { + var byte_ix uint = i + var ix uint = byte_ix * bitmaplen + var insert_cost_ix uint = uint(data[byte_ix]) * num_histograms + var min_cost float64 = 1e99 + var block_switch_cost float64 = block_switch_bitcost + var k uint + for k = 0; k < num_histograms; k++ { + /* We are coding the symbol in data[byte_ix] with entropy code k. */ + cost[k] += insert_cost[insert_cost_ix+k] + + if cost[k] < min_cost { + min_cost = cost[k] + block_id[byte_ix] = byte(k) + } + } + + /* More blocks for the beginning. */ + if byte_ix < 2000 { + block_switch_cost *= 0.77 + 0.07*float64(byte_ix)/2000 + } + + for k = 0; k < num_histograms; k++ { + cost[k] -= min_cost + if cost[k] >= block_switch_cost { + var mask byte = byte(1 << (k & 7)) + cost[k] = block_switch_cost + assert(k>>3 < bitmaplen) + switch_signal[ix+(k>>3)] |= mask + /* Trace back from the last position and switch at the marked places. */ + } + } + } + { + var byte_ix uint = length - 1 + var ix uint = byte_ix * bitmaplen + var cur_id byte = block_id[byte_ix] + for byte_ix > 0 { + var mask byte = byte(1 << (cur_id & 7)) + assert(uint(cur_id)>>3 < bitmaplen) + byte_ix-- + ix -= bitmaplen + if switch_signal[ix+uint(cur_id>>3)]&mask != 0 { + if cur_id != block_id[byte_ix] { + cur_id = block_id[byte_ix] + num_blocks++ + } + } + + block_id[byte_ix] = cur_id + } + } + + return num_blocks +} + +var remapBlockIdsDistance_kInvalidId uint16 = 256 + +func remapBlockIdsDistance(block_ids []byte, length uint, new_id []uint16, num_histograms uint) uint { + var next_id uint16 = 0 + var i uint + for i = 0; i < num_histograms; i++ { + new_id[i] = remapBlockIdsDistance_kInvalidId + } + + for i = 0; i < length; i++ { + assert(uint(block_ids[i]) < num_histograms) + if new_id[block_ids[i]] == remapBlockIdsDistance_kInvalidId { + new_id[block_ids[i]] = next_id + next_id++ + } + } + + for i = 0; i < length; i++ { + block_ids[i] = byte(new_id[block_ids[i]]) + assert(uint(block_ids[i]) < num_histograms) + } + + assert(uint(next_id) <= num_histograms) + return uint(next_id) +} + +func buildBlockHistogramsDistance(data []uint16, length uint, block_ids []byte, num_histograms uint, histograms []histogramDistance) { + var i uint + clearHistogramsDistance(histograms, num_histograms) + for i = 0; i < length; i++ { + histogramAddDistance(&histograms[block_ids[i]], uint(data[i])) + } +} + +var clusterBlocksDistance_kInvalidIndex uint32 = math.MaxUint32 + +func clusterBlocksDistance(data []uint16, length uint, num_blocks uint, block_ids []byte, split *blockSplit) { + var histogram_symbols []uint32 = make([]uint32, num_blocks) + var block_lengths []uint32 = make([]uint32, num_blocks) + var expected_num_clusters uint = clustersPerBatch * (num_blocks + histogramsPerBatch - 1) / histogramsPerBatch + var all_histograms_size uint = 0 + var all_histograms_capacity uint = expected_num_clusters + var all_histograms []histogramDistance = make([]histogramDistance, all_histograms_capacity) + var cluster_size_size uint = 0 + var cluster_size_capacity uint = expected_num_clusters + var cluster_size []uint32 = make([]uint32, cluster_size_capacity) + var num_clusters uint = 0 + var histograms []histogramDistance = make([]histogramDistance, brotli_min_size_t(num_blocks, histogramsPerBatch)) + var max_num_pairs uint = histogramsPerBatch * histogramsPerBatch / 2 + var pairs_capacity uint = max_num_pairs + 1 + var pairs []histogramPair = make([]histogramPair, pairs_capacity) + var pos uint = 0 + var clusters []uint32 + var num_final_clusters uint + var new_index []uint32 + var i uint + var sizes = [histogramsPerBatch]uint32{0} + var new_clusters = [histogramsPerBatch]uint32{0} + var symbols = [histogramsPerBatch]uint32{0} + var remap = [histogramsPerBatch]uint32{0} + + for i := 0; i < int(num_blocks); i++ { + block_lengths[i] = 0 + } + { + var block_idx uint = 0 + for i = 0; i < length; i++ { + assert(block_idx < num_blocks) + block_lengths[block_idx]++ + if i+1 == length || block_ids[i] != block_ids[i+1] { + block_idx++ + } + } + + assert(block_idx == num_blocks) + } + + for i = 0; i < num_blocks; i += histogramsPerBatch { + var num_to_combine uint = brotli_min_size_t(num_blocks-i, histogramsPerBatch) + var num_new_clusters uint + var j uint + for j = 0; j < num_to_combine; j++ { + var k uint + histogramClearDistance(&histograms[j]) + for k = 0; uint32(k) < block_lengths[i+j]; k++ { + histogramAddDistance(&histograms[j], uint(data[pos])) + pos++ + } + + histograms[j].bit_cost_ = populationCostDistance(&histograms[j]) + new_clusters[j] = uint32(j) + symbols[j] = uint32(j) + sizes[j] = 1 + } + + num_new_clusters = histogramCombineDistance(histograms, sizes[:], symbols[:], new_clusters[:], []histogramPair(pairs), num_to_combine, num_to_combine, histogramsPerBatch, max_num_pairs) + if all_histograms_capacity < (all_histograms_size + num_new_clusters) { + var _new_size uint + if all_histograms_capacity == 0 { + _new_size = all_histograms_size + num_new_clusters + } else { + _new_size = all_histograms_capacity + } + var new_array []histogramDistance + for _new_size < (all_histograms_size + num_new_clusters) { + _new_size *= 2 + } + new_array = make([]histogramDistance, _new_size) + if all_histograms_capacity != 0 { + copy(new_array, all_histograms[:all_histograms_capacity]) + } + + all_histograms = new_array + all_histograms_capacity = _new_size + } + + brotli_ensure_capacity_uint32_t(&cluster_size, &cluster_size_capacity, cluster_size_size+num_new_clusters) + for j = 0; j < num_new_clusters; j++ { + all_histograms[all_histograms_size] = histograms[new_clusters[j]] + all_histograms_size++ + cluster_size[cluster_size_size] = sizes[new_clusters[j]] + cluster_size_size++ + remap[new_clusters[j]] = uint32(j) + } + + for j = 0; j < num_to_combine; j++ { + histogram_symbols[i+j] = uint32(num_clusters) + remap[symbols[j]] + } + + num_clusters += num_new_clusters + assert(num_clusters == cluster_size_size) + assert(num_clusters == all_histograms_size) + } + + histograms = nil + + max_num_pairs = brotli_min_size_t(64*num_clusters, (num_clusters/2)*num_clusters) + if pairs_capacity < max_num_pairs+1 { + pairs = nil + pairs = make([]histogramPair, (max_num_pairs + 1)) + } + + clusters = make([]uint32, num_clusters) + for i = 0; i < num_clusters; i++ { + clusters[i] = uint32(i) + } + + num_final_clusters = histogramCombineDistance(all_histograms, cluster_size, histogram_symbols, clusters, pairs, num_clusters, num_blocks, maxNumberOfBlockTypes, max_num_pairs) + pairs = nil + cluster_size = nil + + new_index = make([]uint32, num_clusters) + for i = 0; i < num_clusters; i++ { + new_index[i] = clusterBlocksDistance_kInvalidIndex + } + pos = 0 + { + var next_index uint32 = 0 + for i = 0; i < num_blocks; i++ { + var histo histogramDistance + var j uint + var best_out uint32 + var best_bits float64 + histogramClearDistance(&histo) + for j = 0; uint32(j) < block_lengths[i]; j++ { + histogramAddDistance(&histo, uint(data[pos])) + pos++ + } + + if i == 0 { + best_out = histogram_symbols[0] + } else { + best_out = histogram_symbols[i-1] + } + best_bits = histogramBitCostDistanceDistance(&histo, &all_histograms[best_out]) + for j = 0; j < num_final_clusters; j++ { + var cur_bits float64 = histogramBitCostDistanceDistance(&histo, &all_histograms[clusters[j]]) + if cur_bits < best_bits { + best_bits = cur_bits + best_out = clusters[j] + } + } + + histogram_symbols[i] = best_out + if new_index[best_out] == clusterBlocksDistance_kInvalidIndex { + new_index[best_out] = next_index + next_index++ + } + } + } + + clusters = nil + all_histograms = nil + brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, num_blocks) + brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, num_blocks) + { + var cur_length uint32 = 0 + var block_idx uint = 0 + var max_type byte = 0 + for i = 0; i < num_blocks; i++ { + cur_length += block_lengths[i] + if i+1 == num_blocks || histogram_symbols[i] != histogram_symbols[i+1] { + var id byte = byte(new_index[histogram_symbols[i]]) + split.types[block_idx] = id + split.lengths[block_idx] = cur_length + max_type = brotli_max_uint8_t(max_type, id) + cur_length = 0 + block_idx++ + } + } + + split.num_blocks = block_idx + split.num_types = uint(max_type) + 1 + } + + new_index = nil + block_lengths = nil + histogram_symbols = nil +} + +func splitByteVectorDistance(data []uint16, length uint, literals_per_histogram uint, max_histograms uint, sampling_stride_length uint, block_switch_cost float64, params *encoderParams, split *blockSplit) { + var data_size uint = histogramDataSizeDistance() + var num_histograms uint = length/literals_per_histogram + 1 + var histograms []histogramDistance + if num_histograms > max_histograms { + num_histograms = max_histograms + } + + if length == 0 { + split.num_types = 1 + return + } else if length < kMinLengthForBlockSplitting { + brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, split.num_blocks+1) + brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, split.num_blocks+1) + split.num_types = 1 + split.types[split.num_blocks] = 0 + split.lengths[split.num_blocks] = uint32(length) + split.num_blocks++ + return + } + + histograms = make([]histogramDistance, num_histograms) + + /* Find good entropy codes. */ + initialEntropyCodesDistance(data, length, sampling_stride_length, num_histograms, histograms) + + refineEntropyCodesDistance(data, length, sampling_stride_length, num_histograms, histograms) + { + var block_ids []byte = make([]byte, length) + var num_blocks uint = 0 + var bitmaplen uint = (num_histograms + 7) >> 3 + var insert_cost []float64 = make([]float64, (data_size * num_histograms)) + var cost []float64 = make([]float64, num_histograms) + var switch_signal []byte = make([]byte, (length * bitmaplen)) + var new_id []uint16 = make([]uint16, num_histograms) + var iters uint + if params.quality < hqZopflificationQuality { + iters = 3 + } else { + iters = 10 + } + /* Find a good path through literals with the good entropy codes. */ + + var i uint + for i = 0; i < iters; i++ { + num_blocks = findBlocksDistance(data, length, block_switch_cost, num_histograms, histograms, insert_cost, cost, switch_signal, block_ids) + num_histograms = remapBlockIdsDistance(block_ids, length, new_id, num_histograms) + buildBlockHistogramsDistance(data, length, block_ids, num_histograms, histograms) + } + + insert_cost = nil + cost = nil + switch_signal = nil + new_id = nil + histograms = nil + clusterBlocksDistance(data, length, num_blocks, block_ids, split) + block_ids = nil + } +} diff --git a/vendor/github.com/andybalholm/brotli/block_splitter_literal.go b/vendor/github.com/andybalholm/brotli/block_splitter_literal.go new file mode 100644 index 0000000..1c895cf --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/block_splitter_literal.go @@ -0,0 +1,433 @@ +package brotli + +import "math" + +/* Copyright 2013 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +func initialEntropyCodesLiteral(data []byte, length uint, stride uint, num_histograms uint, histograms []histogramLiteral) { + var seed uint32 = 7 + var block_length uint = length / num_histograms + var i uint + clearHistogramsLiteral(histograms, num_histograms) + for i = 0; i < num_histograms; i++ { + var pos uint = length * i / num_histograms + if i != 0 { + pos += uint(myRand(&seed) % uint32(block_length)) + } + + if pos+stride >= length { + pos = length - stride - 1 + } + + histogramAddVectorLiteral(&histograms[i], data[pos:], stride) + } +} + +func randomSampleLiteral(seed *uint32, data []byte, length uint, stride uint, sample *histogramLiteral) { + var pos uint = 0 + if stride >= length { + stride = length + } else { + pos = uint(myRand(seed) % uint32(length-stride+1)) + } + + histogramAddVectorLiteral(sample, data[pos:], stride) +} + +func refineEntropyCodesLiteral(data []byte, length uint, stride uint, num_histograms uint, histograms []histogramLiteral) { + var iters uint = kIterMulForRefining*length/stride + kMinItersForRefining + var seed uint32 = 7 + var iter uint + iters = ((iters + num_histograms - 1) / num_histograms) * num_histograms + for iter = 0; iter < iters; iter++ { + var sample histogramLiteral + histogramClearLiteral(&sample) + randomSampleLiteral(&seed, data, length, stride, &sample) + histogramAddHistogramLiteral(&histograms[iter%num_histograms], &sample) + } +} + +/* Assigns a block id from the range [0, num_histograms) to each data element + in data[0..length) and fills in block_id[0..length) with the assigned values. + Returns the number of blocks, i.e. one plus the number of block switches. */ +func findBlocksLiteral(data []byte, length uint, block_switch_bitcost float64, num_histograms uint, histograms []histogramLiteral, insert_cost []float64, cost []float64, switch_signal []byte, block_id []byte) uint { + var data_size uint = histogramDataSizeLiteral() + var bitmaplen uint = (num_histograms + 7) >> 3 + var num_blocks uint = 1 + var i uint + var j uint + assert(num_histograms <= 256) + if num_histograms <= 1 { + for i = 0; i < length; i++ { + block_id[i] = 0 + } + + return 1 + } + + for i := 0; i < int(data_size*num_histograms); i++ { + insert_cost[i] = 0 + } + for i = 0; i < num_histograms; i++ { + insert_cost[i] = fastLog2(uint(uint32(histograms[i].total_count_))) + } + + for i = data_size; i != 0; { + i-- + for j = 0; j < num_histograms; j++ { + insert_cost[i*num_histograms+j] = insert_cost[j] - bitCost(uint(histograms[j].data_[i])) + } + } + + for i := 0; i < int(num_histograms); i++ { + cost[i] = 0 + } + for i := 0; i < int(length*bitmaplen); i++ { + switch_signal[i] = 0 + } + + /* After each iteration of this loop, cost[k] will contain the difference + between the minimum cost of arriving at the current byte position using + entropy code k, and the minimum cost of arriving at the current byte + position. This difference is capped at the block switch cost, and if it + reaches block switch cost, it means that when we trace back from the last + position, we need to switch here. */ + for i = 0; i < length; i++ { + var byte_ix uint = i + var ix uint = byte_ix * bitmaplen + var insert_cost_ix uint = uint(data[byte_ix]) * num_histograms + var min_cost float64 = 1e99 + var block_switch_cost float64 = block_switch_bitcost + var k uint + for k = 0; k < num_histograms; k++ { + /* We are coding the symbol in data[byte_ix] with entropy code k. */ + cost[k] += insert_cost[insert_cost_ix+k] + + if cost[k] < min_cost { + min_cost = cost[k] + block_id[byte_ix] = byte(k) + } + } + + /* More blocks for the beginning. */ + if byte_ix < 2000 { + block_switch_cost *= 0.77 + 0.07*float64(byte_ix)/2000 + } + + for k = 0; k < num_histograms; k++ { + cost[k] -= min_cost + if cost[k] >= block_switch_cost { + var mask byte = byte(1 << (k & 7)) + cost[k] = block_switch_cost + assert(k>>3 < bitmaplen) + switch_signal[ix+(k>>3)] |= mask + /* Trace back from the last position and switch at the marked places. */ + } + } + } + { + var byte_ix uint = length - 1 + var ix uint = byte_ix * bitmaplen + var cur_id byte = block_id[byte_ix] + for byte_ix > 0 { + var mask byte = byte(1 << (cur_id & 7)) + assert(uint(cur_id)>>3 < bitmaplen) + byte_ix-- + ix -= bitmaplen + if switch_signal[ix+uint(cur_id>>3)]&mask != 0 { + if cur_id != block_id[byte_ix] { + cur_id = block_id[byte_ix] + num_blocks++ + } + } + + block_id[byte_ix] = cur_id + } + } + + return num_blocks +} + +var remapBlockIdsLiteral_kInvalidId uint16 = 256 + +func remapBlockIdsLiteral(block_ids []byte, length uint, new_id []uint16, num_histograms uint) uint { + var next_id uint16 = 0 + var i uint + for i = 0; i < num_histograms; i++ { + new_id[i] = remapBlockIdsLiteral_kInvalidId + } + + for i = 0; i < length; i++ { + assert(uint(block_ids[i]) < num_histograms) + if new_id[block_ids[i]] == remapBlockIdsLiteral_kInvalidId { + new_id[block_ids[i]] = next_id + next_id++ + } + } + + for i = 0; i < length; i++ { + block_ids[i] = byte(new_id[block_ids[i]]) + assert(uint(block_ids[i]) < num_histograms) + } + + assert(uint(next_id) <= num_histograms) + return uint(next_id) +} + +func buildBlockHistogramsLiteral(data []byte, length uint, block_ids []byte, num_histograms uint, histograms []histogramLiteral) { + var i uint + clearHistogramsLiteral(histograms, num_histograms) + for i = 0; i < length; i++ { + histogramAddLiteral(&histograms[block_ids[i]], uint(data[i])) + } +} + +var clusterBlocksLiteral_kInvalidIndex uint32 = math.MaxUint32 + +func clusterBlocksLiteral(data []byte, length uint, num_blocks uint, block_ids []byte, split *blockSplit) { + var histogram_symbols []uint32 = make([]uint32, num_blocks) + var block_lengths []uint32 = make([]uint32, num_blocks) + var expected_num_clusters uint = clustersPerBatch * (num_blocks + histogramsPerBatch - 1) / histogramsPerBatch + var all_histograms_size uint = 0 + var all_histograms_capacity uint = expected_num_clusters + var all_histograms []histogramLiteral = make([]histogramLiteral, all_histograms_capacity) + var cluster_size_size uint = 0 + var cluster_size_capacity uint = expected_num_clusters + var cluster_size []uint32 = make([]uint32, cluster_size_capacity) + var num_clusters uint = 0 + var histograms []histogramLiteral = make([]histogramLiteral, brotli_min_size_t(num_blocks, histogramsPerBatch)) + var max_num_pairs uint = histogramsPerBatch * histogramsPerBatch / 2 + var pairs_capacity uint = max_num_pairs + 1 + var pairs []histogramPair = make([]histogramPair, pairs_capacity) + var pos uint = 0 + var clusters []uint32 + var num_final_clusters uint + var new_index []uint32 + var i uint + var sizes = [histogramsPerBatch]uint32{0} + var new_clusters = [histogramsPerBatch]uint32{0} + var symbols = [histogramsPerBatch]uint32{0} + var remap = [histogramsPerBatch]uint32{0} + + for i := 0; i < int(num_blocks); i++ { + block_lengths[i] = 0 + } + { + var block_idx uint = 0 + for i = 0; i < length; i++ { + assert(block_idx < num_blocks) + block_lengths[block_idx]++ + if i+1 == length || block_ids[i] != block_ids[i+1] { + block_idx++ + } + } + + assert(block_idx == num_blocks) + } + + for i = 0; i < num_blocks; i += histogramsPerBatch { + var num_to_combine uint = brotli_min_size_t(num_blocks-i, histogramsPerBatch) + var num_new_clusters uint + var j uint + for j = 0; j < num_to_combine; j++ { + var k uint + histogramClearLiteral(&histograms[j]) + for k = 0; uint32(k) < block_lengths[i+j]; k++ { + histogramAddLiteral(&histograms[j], uint(data[pos])) + pos++ + } + + histograms[j].bit_cost_ = populationCostLiteral(&histograms[j]) + new_clusters[j] = uint32(j) + symbols[j] = uint32(j) + sizes[j] = 1 + } + + num_new_clusters = histogramCombineLiteral(histograms, sizes[:], symbols[:], new_clusters[:], []histogramPair(pairs), num_to_combine, num_to_combine, histogramsPerBatch, max_num_pairs) + if all_histograms_capacity < (all_histograms_size + num_new_clusters) { + var _new_size uint + if all_histograms_capacity == 0 { + _new_size = all_histograms_size + num_new_clusters + } else { + _new_size = all_histograms_capacity + } + var new_array []histogramLiteral + for _new_size < (all_histograms_size + num_new_clusters) { + _new_size *= 2 + } + new_array = make([]histogramLiteral, _new_size) + if all_histograms_capacity != 0 { + copy(new_array, all_histograms[:all_histograms_capacity]) + } + + all_histograms = new_array + all_histograms_capacity = _new_size + } + + brotli_ensure_capacity_uint32_t(&cluster_size, &cluster_size_capacity, cluster_size_size+num_new_clusters) + for j = 0; j < num_new_clusters; j++ { + all_histograms[all_histograms_size] = histograms[new_clusters[j]] + all_histograms_size++ + cluster_size[cluster_size_size] = sizes[new_clusters[j]] + cluster_size_size++ + remap[new_clusters[j]] = uint32(j) + } + + for j = 0; j < num_to_combine; j++ { + histogram_symbols[i+j] = uint32(num_clusters) + remap[symbols[j]] + } + + num_clusters += num_new_clusters + assert(num_clusters == cluster_size_size) + assert(num_clusters == all_histograms_size) + } + + histograms = nil + + max_num_pairs = brotli_min_size_t(64*num_clusters, (num_clusters/2)*num_clusters) + if pairs_capacity < max_num_pairs+1 { + pairs = nil + pairs = make([]histogramPair, (max_num_pairs + 1)) + } + + clusters = make([]uint32, num_clusters) + for i = 0; i < num_clusters; i++ { + clusters[i] = uint32(i) + } + + num_final_clusters = histogramCombineLiteral(all_histograms, cluster_size, histogram_symbols, clusters, pairs, num_clusters, num_blocks, maxNumberOfBlockTypes, max_num_pairs) + pairs = nil + cluster_size = nil + + new_index = make([]uint32, num_clusters) + for i = 0; i < num_clusters; i++ { + new_index[i] = clusterBlocksLiteral_kInvalidIndex + } + pos = 0 + { + var next_index uint32 = 0 + for i = 0; i < num_blocks; i++ { + var histo histogramLiteral + var j uint + var best_out uint32 + var best_bits float64 + histogramClearLiteral(&histo) + for j = 0; uint32(j) < block_lengths[i]; j++ { + histogramAddLiteral(&histo, uint(data[pos])) + pos++ + } + + if i == 0 { + best_out = histogram_symbols[0] + } else { + best_out = histogram_symbols[i-1] + } + best_bits = histogramBitCostDistanceLiteral(&histo, &all_histograms[best_out]) + for j = 0; j < num_final_clusters; j++ { + var cur_bits float64 = histogramBitCostDistanceLiteral(&histo, &all_histograms[clusters[j]]) + if cur_bits < best_bits { + best_bits = cur_bits + best_out = clusters[j] + } + } + + histogram_symbols[i] = best_out + if new_index[best_out] == clusterBlocksLiteral_kInvalidIndex { + new_index[best_out] = next_index + next_index++ + } + } + } + + clusters = nil + all_histograms = nil + brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, num_blocks) + brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, num_blocks) + { + var cur_length uint32 = 0 + var block_idx uint = 0 + var max_type byte = 0 + for i = 0; i < num_blocks; i++ { + cur_length += block_lengths[i] + if i+1 == num_blocks || histogram_symbols[i] != histogram_symbols[i+1] { + var id byte = byte(new_index[histogram_symbols[i]]) + split.types[block_idx] = id + split.lengths[block_idx] = cur_length + max_type = brotli_max_uint8_t(max_type, id) + cur_length = 0 + block_idx++ + } + } + + split.num_blocks = block_idx + split.num_types = uint(max_type) + 1 + } + + new_index = nil + block_lengths = nil + histogram_symbols = nil +} + +func splitByteVectorLiteral(data []byte, length uint, literals_per_histogram uint, max_histograms uint, sampling_stride_length uint, block_switch_cost float64, params *encoderParams, split *blockSplit) { + var data_size uint = histogramDataSizeLiteral() + var num_histograms uint = length/literals_per_histogram + 1 + var histograms []histogramLiteral + if num_histograms > max_histograms { + num_histograms = max_histograms + } + + if length == 0 { + split.num_types = 1 + return + } else if length < kMinLengthForBlockSplitting { + brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, split.num_blocks+1) + brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, split.num_blocks+1) + split.num_types = 1 + split.types[split.num_blocks] = 0 + split.lengths[split.num_blocks] = uint32(length) + split.num_blocks++ + return + } + + histograms = make([]histogramLiteral, num_histograms) + + /* Find good entropy codes. */ + initialEntropyCodesLiteral(data, length, sampling_stride_length, num_histograms, histograms) + + refineEntropyCodesLiteral(data, length, sampling_stride_length, num_histograms, histograms) + { + var block_ids []byte = make([]byte, length) + var num_blocks uint = 0 + var bitmaplen uint = (num_histograms + 7) >> 3 + var insert_cost []float64 = make([]float64, (data_size * num_histograms)) + var cost []float64 = make([]float64, num_histograms) + var switch_signal []byte = make([]byte, (length * bitmaplen)) + var new_id []uint16 = make([]uint16, num_histograms) + var iters uint + if params.quality < hqZopflificationQuality { + iters = 3 + } else { + iters = 10 + } + /* Find a good path through literals with the good entropy codes. */ + + var i uint + for i = 0; i < iters; i++ { + num_blocks = findBlocksLiteral(data, length, block_switch_cost, num_histograms, histograms, insert_cost, cost, switch_signal, block_ids) + num_histograms = remapBlockIdsLiteral(block_ids, length, new_id, num_histograms) + buildBlockHistogramsLiteral(data, length, block_ids, num_histograms, histograms) + } + + insert_cost = nil + cost = nil + switch_signal = nil + new_id = nil + histograms = nil + clusterBlocksLiteral(data, length, num_blocks, block_ids, split) + block_ids = nil + } +} diff --git a/vendor/github.com/andybalholm/brotli/brotli_bit_stream.go b/vendor/github.com/andybalholm/brotli/brotli_bit_stream.go new file mode 100644 index 0000000..ee65529 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/brotli_bit_stream.go @@ -0,0 +1,1539 @@ +package brotli + +import ( + "math" + "sync" +) + +const maxHuffmanTreeSize = (2*numCommandSymbols + 1) + +/* +The maximum size of Huffman dictionary for distances assuming that + + NPOSTFIX = 0 and NDIRECT = 0. +*/ +const maxSimpleDistanceAlphabetSize = 140 + +/* +Represents the range of values belonging to a prefix code: + + [offset, offset + 2^nbits) +*/ +type prefixCodeRange struct { + offset uint32 + nbits uint32 +} + +var kBlockLengthPrefixCode = [numBlockLenSymbols]prefixCodeRange{ + prefixCodeRange{1, 2}, + prefixCodeRange{5, 2}, + prefixCodeRange{9, 2}, + prefixCodeRange{13, 2}, + prefixCodeRange{17, 3}, + prefixCodeRange{25, 3}, + prefixCodeRange{33, 3}, + prefixCodeRange{41, 3}, + prefixCodeRange{49, 4}, + prefixCodeRange{65, 4}, + prefixCodeRange{81, 4}, + prefixCodeRange{97, 4}, + prefixCodeRange{113, 5}, + prefixCodeRange{145, 5}, + prefixCodeRange{177, 5}, + prefixCodeRange{209, 5}, + prefixCodeRange{241, 6}, + prefixCodeRange{305, 6}, + prefixCodeRange{369, 7}, + prefixCodeRange{497, 8}, + prefixCodeRange{753, 9}, + prefixCodeRange{1265, 10}, + prefixCodeRange{2289, 11}, + prefixCodeRange{4337, 12}, + prefixCodeRange{8433, 13}, + prefixCodeRange{16625, 24}, +} + +func blockLengthPrefixCode(len uint32) uint32 { + var code uint32 + if len >= 177 { + if len >= 753 { + code = 20 + } else { + code = 14 + } + } else if len >= 41 { + code = 7 + } else { + code = 0 + } + for code < (numBlockLenSymbols-1) && len >= kBlockLengthPrefixCode[code+1].offset { + code++ + } + return code +} + +func getBlockLengthPrefixCode(len uint32, code *uint, n_extra *uint32, extra *uint32) { + *code = uint(blockLengthPrefixCode(uint32(len))) + *n_extra = kBlockLengthPrefixCode[*code].nbits + *extra = len - kBlockLengthPrefixCode[*code].offset +} + +type blockTypeCodeCalculator struct { + last_type uint + second_last_type uint +} + +func initBlockTypeCodeCalculator(self *blockTypeCodeCalculator) { + self.last_type = 1 + self.second_last_type = 0 +} + +func nextBlockTypeCode(calculator *blockTypeCodeCalculator, type_ byte) uint { + var type_code uint + if uint(type_) == calculator.last_type+1 { + type_code = 1 + } else if uint(type_) == calculator.second_last_type { + type_code = 0 + } else { + type_code = uint(type_) + 2 + } + calculator.second_last_type = calculator.last_type + calculator.last_type = uint(type_) + return type_code +} + +/* +|nibblesbits| represents the 2 bits to encode MNIBBLES (0-3) + + REQUIRES: length > 0 + REQUIRES: length <= (1 << 24) +*/ +func encodeMlen(length uint, bits *uint64, numbits *uint, nibblesbits *uint64) { + var lg uint + if length == 1 { + lg = 1 + } else { + lg = uint(log2FloorNonZero(uint(uint32(length-1)))) + 1 + } + var tmp uint + if lg < 16 { + tmp = 16 + } else { + tmp = (lg + 3) + } + var mnibbles uint = tmp / 4 + assert(length > 0) + assert(length <= 1<<24) + assert(lg <= 24) + *nibblesbits = uint64(mnibbles) - 4 + *numbits = mnibbles * 4 + *bits = uint64(length) - 1 +} + +func storeCommandExtra(cmd *command, storage_ix *uint, storage []byte) { + var copylen_code uint32 = commandCopyLenCode(cmd) + var inscode uint16 = getInsertLengthCode(uint(cmd.insert_len_)) + var copycode uint16 = getCopyLengthCode(uint(copylen_code)) + var insnumextra uint32 = getInsertExtra(inscode) + var insextraval uint64 = uint64(cmd.insert_len_) - uint64(getInsertBase(inscode)) + var copyextraval uint64 = uint64(copylen_code) - uint64(getCopyBase(copycode)) + var bits uint64 = copyextraval< 0 + REQUIRES: length <= (1 << 24) +*/ +func storeCompressedMetaBlockHeader(is_final_block bool, length uint, storage_ix *uint, storage []byte) { + var lenbits uint64 + var nlenbits uint + var nibblesbits uint64 + var is_final uint64 + if is_final_block { + is_final = 1 + } else { + is_final = 0 + } + + /* Write ISLAST bit. */ + writeBits(1, is_final, storage_ix, storage) + + /* Write ISEMPTY bit. */ + if is_final_block { + writeBits(1, 0, storage_ix, storage) + } + + encodeMlen(length, &lenbits, &nlenbits, &nibblesbits) + writeBits(2, nibblesbits, storage_ix, storage) + writeBits(nlenbits, lenbits, storage_ix, storage) + + if !is_final_block { + /* Write ISUNCOMPRESSED bit. */ + writeBits(1, 0, storage_ix, storage) + } +} + +/* +Stores the uncompressed meta-block header. + + REQUIRES: length > 0 + REQUIRES: length <= (1 << 24) +*/ +func storeUncompressedMetaBlockHeader(length uint, storage_ix *uint, storage []byte) { + var lenbits uint64 + var nlenbits uint + var nibblesbits uint64 + + /* Write ISLAST bit. + Uncompressed block cannot be the last one, so set to 0. */ + writeBits(1, 0, storage_ix, storage) + + encodeMlen(length, &lenbits, &nlenbits, &nibblesbits) + writeBits(2, nibblesbits, storage_ix, storage) + writeBits(nlenbits, lenbits, storage_ix, storage) + + /* Write ISUNCOMPRESSED bit. */ + writeBits(1, 1, storage_ix, storage) +} + +var storeHuffmanTreeOfHuffmanTreeToBitMask_kStorageOrder = [codeLengthCodes]byte{1, 2, 3, 4, 0, 5, 17, 6, 16, 7, 8, 9, 10, 11, 12, 13, 14, 15} + +var storeHuffmanTreeOfHuffmanTreeToBitMask_kHuffmanBitLengthHuffmanCodeSymbols = [6]byte{0, 7, 3, 2, 1, 15} +var storeHuffmanTreeOfHuffmanTreeToBitMask_kHuffmanBitLengthHuffmanCodeBitLengths = [6]byte{2, 4, 3, 2, 2, 4} + +func storeHuffmanTreeOfHuffmanTreeToBitMask(num_codes int, code_length_bitdepth []byte, storage_ix *uint, storage []byte) { + var skip_some uint = 0 + var codes_to_store uint = codeLengthCodes + /* The bit lengths of the Huffman code over the code length alphabet + are compressed with the following static Huffman code: + Symbol Code + ------ ---- + 0 00 + 1 1110 + 2 110 + 3 01 + 4 10 + 5 1111 */ + + /* Throw away trailing zeros: */ + if num_codes > 1 { + for ; codes_to_store > 0; codes_to_store-- { + if code_length_bitdepth[storeHuffmanTreeOfHuffmanTreeToBitMask_kStorageOrder[codes_to_store-1]] != 0 { + break + } + } + } + + if code_length_bitdepth[storeHuffmanTreeOfHuffmanTreeToBitMask_kStorageOrder[0]] == 0 && code_length_bitdepth[storeHuffmanTreeOfHuffmanTreeToBitMask_kStorageOrder[1]] == 0 { + skip_some = 2 /* skips two. */ + if code_length_bitdepth[storeHuffmanTreeOfHuffmanTreeToBitMask_kStorageOrder[2]] == 0 { + skip_some = 3 /* skips three. */ + } + } + + writeBits(2, uint64(skip_some), storage_ix, storage) + { + var i uint + for i = skip_some; i < codes_to_store; i++ { + var l uint = uint(code_length_bitdepth[storeHuffmanTreeOfHuffmanTreeToBitMask_kStorageOrder[i]]) + writeBits(uint(storeHuffmanTreeOfHuffmanTreeToBitMask_kHuffmanBitLengthHuffmanCodeBitLengths[l]), uint64(storeHuffmanTreeOfHuffmanTreeToBitMask_kHuffmanBitLengthHuffmanCodeSymbols[l]), storage_ix, storage) + } + } +} + +func storeHuffmanTreeToBitMask(huffman_tree_size uint, huffman_tree []byte, huffman_tree_extra_bits []byte, code_length_bitdepth []byte, code_length_bitdepth_symbols []uint16, storage_ix *uint, storage []byte) { + var i uint + for i = 0; i < huffman_tree_size; i++ { + var ix uint = uint(huffman_tree[i]) + writeBits(uint(code_length_bitdepth[ix]), uint64(code_length_bitdepth_symbols[ix]), storage_ix, storage) + + /* Extra bits */ + switch ix { + case repeatPreviousCodeLength: + writeBits(2, uint64(huffman_tree_extra_bits[i]), storage_ix, storage) + + case repeatZeroCodeLength: + writeBits(3, uint64(huffman_tree_extra_bits[i]), storage_ix, storage) + } + } +} + +func storeSimpleHuffmanTree(depths []byte, symbols []uint, num_symbols uint, max_bits uint, storage_ix *uint, storage []byte) { + /* value of 1 indicates a simple Huffman code */ + writeBits(2, 1, storage_ix, storage) + + writeBits(2, uint64(num_symbols)-1, storage_ix, storage) /* NSYM - 1 */ + { + /* Sort */ + var i uint + for i = 0; i < num_symbols; i++ { + var j uint + for j = i + 1; j < num_symbols; j++ { + if depths[symbols[j]] < depths[symbols[i]] { + var tmp uint = symbols[j] + symbols[j] = symbols[i] + symbols[i] = tmp + } + } + } + } + + if num_symbols == 2 { + writeBits(max_bits, uint64(symbols[0]), storage_ix, storage) + writeBits(max_bits, uint64(symbols[1]), storage_ix, storage) + } else if num_symbols == 3 { + writeBits(max_bits, uint64(symbols[0]), storage_ix, storage) + writeBits(max_bits, uint64(symbols[1]), storage_ix, storage) + writeBits(max_bits, uint64(symbols[2]), storage_ix, storage) + } else { + writeBits(max_bits, uint64(symbols[0]), storage_ix, storage) + writeBits(max_bits, uint64(symbols[1]), storage_ix, storage) + writeBits(max_bits, uint64(symbols[2]), storage_ix, storage) + writeBits(max_bits, uint64(symbols[3]), storage_ix, storage) + + /* tree-select */ + var tmp int + if depths[symbols[0]] == 1 { + tmp = 1 + } else { + tmp = 0 + } + writeBits(1, uint64(tmp), storage_ix, storage) + } +} + +/* +num = alphabet size + + depths = symbol depths +*/ +func storeHuffmanTree(depths []byte, num uint, tree []huffmanTree, storage_ix *uint, storage []byte) { + var huffman_tree [numCommandSymbols]byte + var huffman_tree_extra_bits [numCommandSymbols]byte + var huffman_tree_size uint = 0 + var code_length_bitdepth = [codeLengthCodes]byte{0} + var code_length_bitdepth_symbols [codeLengthCodes]uint16 + var huffman_tree_histogram = [codeLengthCodes]uint32{0} + var i uint + var num_codes int = 0 + /* Write the Huffman tree into the brotli-representation. + The command alphabet is the largest, so this allocation will fit all + alphabets. */ + + var code uint = 0 + + assert(num <= numCommandSymbols) + + writeHuffmanTree(depths, num, &huffman_tree_size, huffman_tree[:], huffman_tree_extra_bits[:]) + + /* Calculate the statistics of the Huffman tree in brotli-representation. */ + for i = 0; i < huffman_tree_size; i++ { + huffman_tree_histogram[huffman_tree[i]]++ + } + + for i = 0; i < codeLengthCodes; i++ { + if huffman_tree_histogram[i] != 0 { + if num_codes == 0 { + code = i + num_codes = 1 + } else if num_codes == 1 { + num_codes = 2 + break + } + } + } + + /* Calculate another Huffman tree to use for compressing both the + earlier Huffman tree with. */ + createHuffmanTree(huffman_tree_histogram[:], codeLengthCodes, 5, tree, code_length_bitdepth[:]) + + convertBitDepthsToSymbols(code_length_bitdepth[:], codeLengthCodes, code_length_bitdepth_symbols[:]) + + /* Now, we have all the data, let's start storing it */ + storeHuffmanTreeOfHuffmanTreeToBitMask(num_codes, code_length_bitdepth[:], storage_ix, storage) + + if num_codes == 1 { + code_length_bitdepth[code] = 0 + } + + /* Store the real Huffman tree now. */ + storeHuffmanTreeToBitMask(huffman_tree_size, huffman_tree[:], huffman_tree_extra_bits[:], code_length_bitdepth[:], code_length_bitdepth_symbols[:], storage_ix, storage) +} + +/* +Builds a Huffman tree from histogram[0:length] into depth[0:length] and + + bits[0:length] and stores the encoded tree to the bit stream. +*/ +func buildAndStoreHuffmanTree(histogram []uint32, histogram_length uint, alphabet_size uint, tree []huffmanTree, depth []byte, bits []uint16, storage_ix *uint, storage []byte) { + var count uint = 0 + var s4 = [4]uint{0} + var i uint + var max_bits uint = 0 + for i = 0; i < histogram_length; i++ { + if histogram[i] != 0 { + if count < 4 { + s4[count] = i + } else if count > 4 { + break + } + + count++ + } + } + { + var max_bits_counter uint = alphabet_size - 1 + for max_bits_counter != 0 { + max_bits_counter >>= 1 + max_bits++ + } + } + + if count <= 1 { + writeBits(4, 1, storage_ix, storage) + writeBits(max_bits, uint64(s4[0]), storage_ix, storage) + depth[s4[0]] = 0 + bits[s4[0]] = 0 + return + } + + for i := 0; i < int(histogram_length); i++ { + depth[i] = 0 + } + createHuffmanTree(histogram, histogram_length, 15, tree, depth) + convertBitDepthsToSymbols(depth, histogram_length, bits) + + if count <= 4 { + storeSimpleHuffmanTree(depth, s4[:], count, max_bits, storage_ix, storage) + } else { + storeHuffmanTree(depth, histogram_length, tree, storage_ix, storage) + } +} + +func sortHuffmanTree1(v0 huffmanTree, v1 huffmanTree) bool { + return v0.total_count_ < v1.total_count_ +} + +var huffmanTreePool sync.Pool + +func buildAndStoreHuffmanTreeFast(histogram []uint32, histogram_total uint, max_bits uint, depth []byte, bits []uint16, storage_ix *uint, storage []byte) { + var count uint = 0 + var symbols = [4]uint{0} + var length uint = 0 + var total uint = histogram_total + for total != 0 { + if histogram[length] != 0 { + if count < 4 { + symbols[count] = length + } + + count++ + total -= uint(histogram[length]) + } + + length++ + } + + if count <= 1 { + writeBits(4, 1, storage_ix, storage) + writeBits(max_bits, uint64(symbols[0]), storage_ix, storage) + depth[symbols[0]] = 0 + bits[symbols[0]] = 0 + return + } + + for i := 0; i < int(length); i++ { + depth[i] = 0 + } + { + var max_tree_size uint = 2*length + 1 + tree, _ := huffmanTreePool.Get().(*[]huffmanTree) + if tree == nil || cap(*tree) < int(max_tree_size) { + tmp := make([]huffmanTree, max_tree_size) + tree = &tmp + } else { + *tree = (*tree)[:max_tree_size] + } + var count_limit uint32 + for count_limit = 1; ; count_limit *= 2 { + var node int = 0 + var l uint + for l = length; l != 0; { + l-- + if histogram[l] != 0 { + if histogram[l] >= count_limit { + initHuffmanTree(&(*tree)[node:][0], histogram[l], -1, int16(l)) + } else { + initHuffmanTree(&(*tree)[node:][0], count_limit, -1, int16(l)) + } + + node++ + } + } + { + var n int = node + /* Points to the next leaf node. */ /* Points to the next non-leaf node. */ + var sentinel huffmanTree + var i int = 0 + var j int = n + 1 + var k int + + sortHuffmanTreeItems(*tree, uint(n), huffmanTreeComparator(sortHuffmanTree1)) + + /* The nodes are: + [0, n): the sorted leaf nodes that we start with. + [n]: we add a sentinel here. + [n + 1, 2n): new parent nodes are added here, starting from + (n+1). These are naturally in ascending order. + [2n]: we add a sentinel at the end as well. + There will be (2n+1) elements at the end. */ + initHuffmanTree(&sentinel, math.MaxUint32, -1, -1) + + (*tree)[node] = sentinel + node++ + (*tree)[node] = sentinel + node++ + + for k = n - 1; k > 0; k-- { + var left int + var right int + if (*tree)[i].total_count_ <= (*tree)[j].total_count_ { + left = i + i++ + } else { + left = j + j++ + } + + if (*tree)[i].total_count_ <= (*tree)[j].total_count_ { + right = i + i++ + } else { + right = j + j++ + } + + /* The sentinel node becomes the parent node. */ + (*tree)[node-1].total_count_ = (*tree)[left].total_count_ + (*tree)[right].total_count_ + + (*tree)[node-1].index_left_ = int16(left) + (*tree)[node-1].index_right_or_value_ = int16(right) + + /* Add back the last sentinel node. */ + (*tree)[node] = sentinel + node++ + } + + if setDepth(2*n-1, *tree, depth, 14) { + /* We need to pack the Huffman tree in 14 bits. If this was not + successful, add fake entities to the lowest values and retry. */ + break + } + } + } + + huffmanTreePool.Put(tree) + } + + convertBitDepthsToSymbols(depth, length, bits) + if count <= 4 { + var i uint + + /* value of 1 indicates a simple Huffman code */ + writeBits(2, 1, storage_ix, storage) + + writeBits(2, uint64(count)-1, storage_ix, storage) /* NSYM - 1 */ + + /* Sort */ + for i = 0; i < count; i++ { + var j uint + for j = i + 1; j < count; j++ { + if depth[symbols[j]] < depth[symbols[i]] { + var tmp uint = symbols[j] + symbols[j] = symbols[i] + symbols[i] = tmp + } + } + } + + if count == 2 { + writeBits(max_bits, uint64(symbols[0]), storage_ix, storage) + writeBits(max_bits, uint64(symbols[1]), storage_ix, storage) + } else if count == 3 { + writeBits(max_bits, uint64(symbols[0]), storage_ix, storage) + writeBits(max_bits, uint64(symbols[1]), storage_ix, storage) + writeBits(max_bits, uint64(symbols[2]), storage_ix, storage) + } else { + writeBits(max_bits, uint64(symbols[0]), storage_ix, storage) + writeBits(max_bits, uint64(symbols[1]), storage_ix, storage) + writeBits(max_bits, uint64(symbols[2]), storage_ix, storage) + writeBits(max_bits, uint64(symbols[3]), storage_ix, storage) + + /* tree-select */ + var tmp int + if depth[symbols[0]] == 1 { + tmp = 1 + } else { + tmp = 0 + } + writeBits(1, uint64(tmp), storage_ix, storage) + } + } else { + var previous_value byte = 8 + var i uint + + /* Complex Huffman Tree */ + storeStaticCodeLengthCode(storage_ix, storage) + + /* Actual RLE coding. */ + for i = 0; i < length; { + var value byte = depth[i] + var reps uint = 1 + var k uint + for k = i + 1; k < length && depth[k] == value; k++ { + reps++ + } + + i += reps + if value == 0 { + writeBits(uint(kZeroRepsDepth[reps]), kZeroRepsBits[reps], storage_ix, storage) + } else { + if previous_value != value { + writeBits(uint(kCodeLengthDepth[value]), uint64(kCodeLengthBits[value]), storage_ix, storage) + reps-- + } + + if reps < 3 { + for reps != 0 { + reps-- + writeBits(uint(kCodeLengthDepth[value]), uint64(kCodeLengthBits[value]), storage_ix, storage) + } + } else { + reps -= 3 + writeBits(uint(kNonZeroRepsDepth[reps]), kNonZeroRepsBits[reps], storage_ix, storage) + } + + previous_value = value + } + } + } +} + +func buildAndStoreHuffmanTreeFastBW(histogram []uint32, histogram_total uint, max_bits uint, depth []byte, bits []uint16, bw *bitWriter) { + var count uint = 0 + var symbols = [4]uint{0} + var length uint = 0 + var total uint = histogram_total + for total != 0 { + if histogram[length] != 0 { + if count < 4 { + symbols[count] = length + } + + count++ + total -= uint(histogram[length]) + } + + length++ + } + + if count <= 1 { + bw.writeBits(4, 1) + bw.writeBits(max_bits, uint64(symbols[0])) + depth[symbols[0]] = 0 + bits[symbols[0]] = 0 + return + } + + for i := 0; i < int(length); i++ { + depth[i] = 0 + } + { + var max_tree_size uint = 2*length + 1 + tree, _ := huffmanTreePool.Get().(*[]huffmanTree) + if tree == nil || cap(*tree) < int(max_tree_size) { + tmp := make([]huffmanTree, max_tree_size) + tree = &tmp + } else { + *tree = (*tree)[:max_tree_size] + } + var count_limit uint32 + for count_limit = 1; ; count_limit *= 2 { + var node int = 0 + var l uint + for l = length; l != 0; { + l-- + if histogram[l] != 0 { + if histogram[l] >= count_limit { + initHuffmanTree(&(*tree)[node:][0], histogram[l], -1, int16(l)) + } else { + initHuffmanTree(&(*tree)[node:][0], count_limit, -1, int16(l)) + } + + node++ + } + } + { + var n int = node + /* Points to the next leaf node. */ /* Points to the next non-leaf node. */ + var sentinel huffmanTree + var i int = 0 + var j int = n + 1 + var k int + + sortHuffmanTreeItems(*tree, uint(n), huffmanTreeComparator(sortHuffmanTree1)) + + /* The nodes are: + [0, n): the sorted leaf nodes that we start with. + [n]: we add a sentinel here. + [n + 1, 2n): new parent nodes are added here, starting from + (n+1). These are naturally in ascending order. + [2n]: we add a sentinel at the end as well. + There will be (2n+1) elements at the end. */ + initHuffmanTree(&sentinel, math.MaxUint32, -1, -1) + + (*tree)[node] = sentinel + node++ + (*tree)[node] = sentinel + node++ + + for k = n - 1; k > 0; k-- { + var left int + var right int + if (*tree)[i].total_count_ <= (*tree)[j].total_count_ { + left = i + i++ + } else { + left = j + j++ + } + + if (*tree)[i].total_count_ <= (*tree)[j].total_count_ { + right = i + i++ + } else { + right = j + j++ + } + + /* The sentinel node becomes the parent node. */ + (*tree)[node-1].total_count_ = (*tree)[left].total_count_ + (*tree)[right].total_count_ + + (*tree)[node-1].index_left_ = int16(left) + (*tree)[node-1].index_right_or_value_ = int16(right) + + /* Add back the last sentinel node. */ + (*tree)[node] = sentinel + node++ + } + + if setDepth(2*n-1, *tree, depth, 14) { + /* We need to pack the Huffman tree in 14 bits. If this was not + successful, add fake entities to the lowest values and retry. */ + break + } + } + } + + huffmanTreePool.Put(tree) + } + + convertBitDepthsToSymbols(depth, length, bits) + if count <= 4 { + var i uint + + /* value of 1 indicates a simple Huffman code */ + bw.writeBits(2, 1) + + bw.writeBits(2, uint64(count)-1) /* NSYM - 1 */ + + /* Sort */ + for i = 0; i < count; i++ { + var j uint + for j = i + 1; j < count; j++ { + if depth[symbols[j]] < depth[symbols[i]] { + var tmp uint = symbols[j] + symbols[j] = symbols[i] + symbols[i] = tmp + } + } + } + + if count == 2 { + bw.writeBits(max_bits, uint64(symbols[0])) + bw.writeBits(max_bits, uint64(symbols[1])) + } else if count == 3 { + bw.writeBits(max_bits, uint64(symbols[0])) + bw.writeBits(max_bits, uint64(symbols[1])) + bw.writeBits(max_bits, uint64(symbols[2])) + } else { + bw.writeBits(max_bits, uint64(symbols[0])) + bw.writeBits(max_bits, uint64(symbols[1])) + bw.writeBits(max_bits, uint64(symbols[2])) + bw.writeBits(max_bits, uint64(symbols[3])) + + /* tree-select */ + bw.writeSingleBit(depth[symbols[0]] == 1) + } + } else { + var previous_value byte = 8 + var i uint + + /* Complex Huffman Tree */ + storeStaticCodeLengthCodeBW(bw) + + /* Actual RLE coding. */ + for i = 0; i < length; { + var value byte = depth[i] + var reps uint = 1 + var k uint + for k = i + 1; k < length && depth[k] == value; k++ { + reps++ + } + + i += reps + if value == 0 { + bw.writeBits(uint(kZeroRepsDepth[reps]), kZeroRepsBits[reps]) + } else { + if previous_value != value { + bw.writeBits(uint(kCodeLengthDepth[value]), uint64(kCodeLengthBits[value])) + reps-- + } + + if reps < 3 { + for reps != 0 { + reps-- + bw.writeBits(uint(kCodeLengthDepth[value]), uint64(kCodeLengthBits[value])) + } + } else { + reps -= 3 + bw.writeBits(uint(kNonZeroRepsDepth[reps]), kNonZeroRepsBits[reps]) + } + + previous_value = value + } + } + } +} + +func indexOf(v []byte, v_size uint, value byte) uint { + var i uint = 0 + for ; i < v_size; i++ { + if v[i] == value { + return i + } + } + + return i +} + +func moveToFront(v []byte, index uint) { + var value byte = v[index] + var i uint + for i = index; i != 0; i-- { + v[i] = v[i-1] + } + + v[0] = value +} + +func moveToFrontTransform(v_in []uint32, v_size uint, v_out []uint32) { + var i uint + var mtf [256]byte + var max_value uint32 + if v_size == 0 { + return + } + + max_value = v_in[0] + for i = 1; i < v_size; i++ { + if v_in[i] > max_value { + max_value = v_in[i] + } + } + + assert(max_value < 256) + for i = 0; uint32(i) <= max_value; i++ { + mtf[i] = byte(i) + } + { + var mtf_size uint = uint(max_value + 1) + for i = 0; i < v_size; i++ { + var index uint = indexOf(mtf[:], mtf_size, byte(v_in[i])) + assert(index < mtf_size) + v_out[i] = uint32(index) + moveToFront(mtf[:], index) + } + } +} + +/* +Finds runs of zeros in v[0..in_size) and replaces them with a prefix code of + + the run length plus extra bits (lower 9 bits is the prefix code and the rest + are the extra bits). Non-zero values in v[] are shifted by + *max_length_prefix. Will not create prefix codes bigger than the initial + value of *max_run_length_prefix. The prefix code of run length L is simply + Log2Floor(L) and the number of extra bits is the same as the prefix code. +*/ +func runLengthCodeZeros(in_size uint, v []uint32, out_size *uint, max_run_length_prefix *uint32) { + var max_reps uint32 = 0 + var i uint + var max_prefix uint32 + for i = 0; i < in_size; { + var reps uint32 = 0 + for ; i < in_size && v[i] != 0; i++ { + } + for ; i < in_size && v[i] == 0; i++ { + reps++ + } + + max_reps = brotli_max_uint32_t(reps, max_reps) + } + + if max_reps > 0 { + max_prefix = log2FloorNonZero(uint(max_reps)) + } else { + max_prefix = 0 + } + max_prefix = brotli_min_uint32_t(max_prefix, *max_run_length_prefix) + *max_run_length_prefix = max_prefix + *out_size = 0 + for i = 0; i < in_size; { + assert(*out_size <= i) + if v[i] != 0 { + v[*out_size] = v[i] + *max_run_length_prefix + i++ + (*out_size)++ + } else { + var reps uint32 = 1 + var k uint + for k = i + 1; k < in_size && v[k] == 0; k++ { + reps++ + } + + i += uint(reps) + for reps != 0 { + if reps < 2< 0) + writeSingleBit(use_rle, storage_ix, storage) + if use_rle { + writeBits(4, uint64(max_run_length_prefix)-1, storage_ix, storage) + } + } + + buildAndStoreHuffmanTree(histogram[:], uint(uint32(num_clusters)+max_run_length_prefix), uint(uint32(num_clusters)+max_run_length_prefix), tree, depths[:], bits[:], storage_ix, storage) + for i = 0; i < num_rle_symbols; i++ { + var rle_symbol uint32 = rle_symbols[i] & encodeContextMap_kSymbolMask + var extra_bits_val uint32 = rle_symbols[i] >> symbolBits + writeBits(uint(depths[rle_symbol]), uint64(bits[rle_symbol]), storage_ix, storage) + if rle_symbol > 0 && rle_symbol <= max_run_length_prefix { + writeBits(uint(rle_symbol), uint64(extra_bits_val), storage_ix, storage) + } + } + + writeBits(1, 1, storage_ix, storage) /* use move-to-front */ + rle_symbols = nil +} + +/* Stores the block switch command with index block_ix to the bit stream. */ +func storeBlockSwitch(code *blockSplitCode, block_len uint32, block_type byte, is_first_block bool, storage_ix *uint, storage []byte) { + var typecode uint = nextBlockTypeCode(&code.type_code_calculator, block_type) + var lencode uint + var len_nextra uint32 + var len_extra uint32 + if !is_first_block { + writeBits(uint(code.type_depths[typecode]), uint64(code.type_bits[typecode]), storage_ix, storage) + } + + getBlockLengthPrefixCode(block_len, &lencode, &len_nextra, &len_extra) + + writeBits(uint(code.length_depths[lencode]), uint64(code.length_bits[lencode]), storage_ix, storage) + writeBits(uint(len_nextra), uint64(len_extra), storage_ix, storage) +} + +/* +Builds a BlockSplitCode data structure from the block split given by the + + vector of block types and block lengths and stores it to the bit stream. +*/ +func buildAndStoreBlockSplitCode(types []byte, lengths []uint32, num_blocks uint, num_types uint, tree []huffmanTree, code *blockSplitCode, storage_ix *uint, storage []byte) { + var type_histo [maxBlockTypeSymbols]uint32 + var length_histo [numBlockLenSymbols]uint32 + var i uint + var type_code_calculator blockTypeCodeCalculator + for i := 0; i < int(num_types+2); i++ { + type_histo[i] = 0 + } + length_histo = [numBlockLenSymbols]uint32{} + initBlockTypeCodeCalculator(&type_code_calculator) + for i = 0; i < num_blocks; i++ { + var type_code uint = nextBlockTypeCode(&type_code_calculator, types[i]) + if i != 0 { + type_histo[type_code]++ + } + length_histo[blockLengthPrefixCode(lengths[i])]++ + } + + storeVarLenUint8(num_types-1, storage_ix, storage) + if num_types > 1 { /* TODO: else? could StoreBlockSwitch occur? */ + buildAndStoreHuffmanTree(type_histo[0:], num_types+2, num_types+2, tree, code.type_depths[0:], code.type_bits[0:], storage_ix, storage) + buildAndStoreHuffmanTree(length_histo[0:], numBlockLenSymbols, numBlockLenSymbols, tree, code.length_depths[0:], code.length_bits[0:], storage_ix, storage) + storeBlockSwitch(code, lengths[0], types[0], true, storage_ix, storage) + } +} + +/* Stores a context map where the histogram type is always the block type. */ +func storeTrivialContextMap(num_types uint, context_bits uint, tree []huffmanTree, storage_ix *uint, storage []byte) { + storeVarLenUint8(num_types-1, storage_ix, storage) + if num_types > 1 { + var repeat_code uint = context_bits - 1 + var repeat_bits uint = (1 << repeat_code) - 1 + var alphabet_size uint = num_types + repeat_code + var histogram [maxContextMapSymbols]uint32 + var depths [maxContextMapSymbols]byte + var bits [maxContextMapSymbols]uint16 + var i uint + for i := 0; i < int(alphabet_size); i++ { + histogram[i] = 0 + } + + /* Write RLEMAX. */ + writeBits(1, 1, storage_ix, storage) + + writeBits(4, uint64(repeat_code)-1, storage_ix, storage) + histogram[repeat_code] = uint32(num_types) + histogram[0] = 1 + for i = context_bits; i < alphabet_size; i++ { + histogram[i] = 1 + } + + buildAndStoreHuffmanTree(histogram[:], alphabet_size, alphabet_size, tree, depths[:], bits[:], storage_ix, storage) + for i = 0; i < num_types; i++ { + var tmp uint + if i == 0 { + tmp = 0 + } else { + tmp = i + context_bits - 1 + } + var code uint = tmp + writeBits(uint(depths[code]), uint64(bits[code]), storage_ix, storage) + writeBits(uint(depths[repeat_code]), uint64(bits[repeat_code]), storage_ix, storage) + writeBits(repeat_code, uint64(repeat_bits), storage_ix, storage) + } + + /* Write IMTF (inverse-move-to-front) bit. */ + writeBits(1, 1, storage_ix, storage) + } +} + +/* Manages the encoding of one block category (literal, command or distance). */ +type blockEncoder struct { + histogram_length_ uint + num_block_types_ uint + block_types_ []byte + block_lengths_ []uint32 + num_blocks_ uint + block_split_code_ blockSplitCode + block_ix_ uint + block_len_ uint + entropy_ix_ uint + depths_ []byte + bits_ []uint16 +} + +var blockEncoderPool sync.Pool + +func getBlockEncoder(histogram_length uint, num_block_types uint, block_types []byte, block_lengths []uint32, num_blocks uint) *blockEncoder { + self, _ := blockEncoderPool.Get().(*blockEncoder) + + if self != nil { + self.block_ix_ = 0 + self.entropy_ix_ = 0 + self.depths_ = self.depths_[:0] + self.bits_ = self.bits_[:0] + } else { + self = &blockEncoder{} + } + + self.histogram_length_ = histogram_length + self.num_block_types_ = num_block_types + self.block_types_ = block_types + self.block_lengths_ = block_lengths + self.num_blocks_ = num_blocks + initBlockTypeCodeCalculator(&self.block_split_code_.type_code_calculator) + if num_blocks == 0 { + self.block_len_ = 0 + } else { + self.block_len_ = uint(block_lengths[0]) + } + + return self +} + +func cleanupBlockEncoder(self *blockEncoder) { + blockEncoderPool.Put(self) +} + +/* +Creates entropy codes of block lengths and block types and stores them + + to the bit stream. +*/ +func buildAndStoreBlockSwitchEntropyCodes(self *blockEncoder, tree []huffmanTree, storage_ix *uint, storage []byte) { + buildAndStoreBlockSplitCode(self.block_types_, self.block_lengths_, self.num_blocks_, self.num_block_types_, tree, &self.block_split_code_, storage_ix, storage) +} + +/* +Stores the next symbol with the entropy code of the current block type. + + Updates the block type and block length at block boundaries. +*/ +func storeSymbol(self *blockEncoder, symbol uint, storage_ix *uint, storage []byte) { + if self.block_len_ == 0 { + self.block_ix_++ + var block_ix uint = self.block_ix_ + var block_len uint32 = self.block_lengths_[block_ix] + var block_type byte = self.block_types_[block_ix] + self.block_len_ = uint(block_len) + self.entropy_ix_ = uint(block_type) * self.histogram_length_ + storeBlockSwitch(&self.block_split_code_, block_len, block_type, false, storage_ix, storage) + } + + self.block_len_-- + { + var ix uint = self.entropy_ix_ + symbol + writeBits(uint(self.depths_[ix]), uint64(self.bits_[ix]), storage_ix, storage) + } +} + +/* +Stores the next symbol with the entropy code of the current block type and + + context value. + Updates the block type and block length at block boundaries. +*/ +func storeSymbolWithContext(self *blockEncoder, symbol uint, context uint, context_map []uint32, storage_ix *uint, storage []byte, context_bits uint) { + if self.block_len_ == 0 { + self.block_ix_++ + var block_ix uint = self.block_ix_ + var block_len uint32 = self.block_lengths_[block_ix] + var block_type byte = self.block_types_[block_ix] + self.block_len_ = uint(block_len) + self.entropy_ix_ = uint(block_type) << context_bits + storeBlockSwitch(&self.block_split_code_, block_len, block_type, false, storage_ix, storage) + } + + self.block_len_-- + { + var histo_ix uint = uint(context_map[self.entropy_ix_+context]) + var ix uint = histo_ix*self.histogram_length_ + symbol + writeBits(uint(self.depths_[ix]), uint64(self.bits_[ix]), storage_ix, storage) + } +} + +func buildAndStoreEntropyCodesLiteral(self *blockEncoder, histograms []histogramLiteral, histograms_size uint, alphabet_size uint, tree []huffmanTree, storage_ix *uint, storage []byte) { + var table_size uint = histograms_size * self.histogram_length_ + if cap(self.depths_) < int(table_size) { + self.depths_ = make([]byte, table_size) + } else { + self.depths_ = self.depths_[:table_size] + } + if cap(self.bits_) < int(table_size) { + self.bits_ = make([]uint16, table_size) + } else { + self.bits_ = self.bits_[:table_size] + } + { + var i uint + for i = 0; i < histograms_size; i++ { + var ix uint = i * self.histogram_length_ + buildAndStoreHuffmanTree(histograms[i].data_[0:], self.histogram_length_, alphabet_size, tree, self.depths_[ix:], self.bits_[ix:], storage_ix, storage) + } + } +} + +func buildAndStoreEntropyCodesCommand(self *blockEncoder, histograms []histogramCommand, histograms_size uint, alphabet_size uint, tree []huffmanTree, storage_ix *uint, storage []byte) { + var table_size uint = histograms_size * self.histogram_length_ + if cap(self.depths_) < int(table_size) { + self.depths_ = make([]byte, table_size) + } else { + self.depths_ = self.depths_[:table_size] + } + if cap(self.bits_) < int(table_size) { + self.bits_ = make([]uint16, table_size) + } else { + self.bits_ = self.bits_[:table_size] + } + { + var i uint + for i = 0; i < histograms_size; i++ { + var ix uint = i * self.histogram_length_ + buildAndStoreHuffmanTree(histograms[i].data_[0:], self.histogram_length_, alphabet_size, tree, self.depths_[ix:], self.bits_[ix:], storage_ix, storage) + } + } +} + +func buildAndStoreEntropyCodesDistance(self *blockEncoder, histograms []histogramDistance, histograms_size uint, alphabet_size uint, tree []huffmanTree, storage_ix *uint, storage []byte) { + var table_size uint = histograms_size * self.histogram_length_ + if cap(self.depths_) < int(table_size) { + self.depths_ = make([]byte, table_size) + } else { + self.depths_ = self.depths_[:table_size] + } + if cap(self.bits_) < int(table_size) { + self.bits_ = make([]uint16, table_size) + } else { + self.bits_ = self.bits_[:table_size] + } + { + var i uint + for i = 0; i < histograms_size; i++ { + var ix uint = i * self.histogram_length_ + buildAndStoreHuffmanTree(histograms[i].data_[0:], self.histogram_length_, alphabet_size, tree, self.depths_[ix:], self.bits_[ix:], storage_ix, storage) + } + } +} + +func jumpToByteBoundary(storage_ix *uint, storage []byte) { + *storage_ix = (*storage_ix + 7) &^ 7 + storage[*storage_ix>>3] = 0 +} + +func storeMetaBlock(input []byte, start_pos uint, length uint, mask uint, prev_byte byte, prev_byte2 byte, is_last bool, params *encoderParams, literal_context_mode int, commands []command, mb *metaBlockSplit, storage_ix *uint, storage []byte) { + var pos uint = start_pos + var i uint + var num_distance_symbols uint32 = params.dist.alphabet_size + var num_effective_distance_symbols uint32 = num_distance_symbols + var tree []huffmanTree + var literal_context_lut contextLUT = getContextLUT(literal_context_mode) + var dist *distanceParams = ¶ms.dist + if params.large_window && num_effective_distance_symbols > numHistogramDistanceSymbols { + num_effective_distance_symbols = numHistogramDistanceSymbols + } + + storeCompressedMetaBlockHeader(is_last, length, storage_ix, storage) + + tree = make([]huffmanTree, maxHuffmanTreeSize) + literal_enc := getBlockEncoder(numLiteralSymbols, mb.literal_split.num_types, mb.literal_split.types, mb.literal_split.lengths, mb.literal_split.num_blocks) + command_enc := getBlockEncoder(numCommandSymbols, mb.command_split.num_types, mb.command_split.types, mb.command_split.lengths, mb.command_split.num_blocks) + distance_enc := getBlockEncoder(uint(num_effective_distance_symbols), mb.distance_split.num_types, mb.distance_split.types, mb.distance_split.lengths, mb.distance_split.num_blocks) + + buildAndStoreBlockSwitchEntropyCodes(literal_enc, tree, storage_ix, storage) + buildAndStoreBlockSwitchEntropyCodes(command_enc, tree, storage_ix, storage) + buildAndStoreBlockSwitchEntropyCodes(distance_enc, tree, storage_ix, storage) + + writeBits(2, uint64(dist.distance_postfix_bits), storage_ix, storage) + writeBits(4, uint64(dist.num_direct_distance_codes)>>dist.distance_postfix_bits, storage_ix, storage) + for i = 0; i < mb.literal_split.num_types; i++ { + writeBits(2, uint64(literal_context_mode), storage_ix, storage) + } + + if mb.literal_context_map_size == 0 { + storeTrivialContextMap(mb.literal_histograms_size, literalContextBits, tree, storage_ix, storage) + } else { + encodeContextMap(mb.literal_context_map, mb.literal_context_map_size, mb.literal_histograms_size, tree, storage_ix, storage) + } + + if mb.distance_context_map_size == 0 { + storeTrivialContextMap(mb.distance_histograms_size, distanceContextBits, tree, storage_ix, storage) + } else { + encodeContextMap(mb.distance_context_map, mb.distance_context_map_size, mb.distance_histograms_size, tree, storage_ix, storage) + } + + buildAndStoreEntropyCodesLiteral(literal_enc, mb.literal_histograms, mb.literal_histograms_size, numLiteralSymbols, tree, storage_ix, storage) + buildAndStoreEntropyCodesCommand(command_enc, mb.command_histograms, mb.command_histograms_size, numCommandSymbols, tree, storage_ix, storage) + buildAndStoreEntropyCodesDistance(distance_enc, mb.distance_histograms, mb.distance_histograms_size, uint(num_distance_symbols), tree, storage_ix, storage) + tree = nil + + for _, cmd := range commands { + var cmd_code uint = uint(cmd.cmd_prefix_) + storeSymbol(command_enc, cmd_code, storage_ix, storage) + storeCommandExtra(&cmd, storage_ix, storage) + if mb.literal_context_map_size == 0 { + var j uint + for j = uint(cmd.insert_len_); j != 0; j-- { + storeSymbol(literal_enc, uint(input[pos&mask]), storage_ix, storage) + pos++ + } + } else { + var j uint + for j = uint(cmd.insert_len_); j != 0; j-- { + var context uint = uint(getContext(prev_byte, prev_byte2, literal_context_lut)) + var literal byte = input[pos&mask] + storeSymbolWithContext(literal_enc, uint(literal), context, mb.literal_context_map, storage_ix, storage, literalContextBits) + prev_byte2 = prev_byte + prev_byte = literal + pos++ + } + } + + pos += uint(commandCopyLen(&cmd)) + if commandCopyLen(&cmd) != 0 { + prev_byte2 = input[(pos-2)&mask] + prev_byte = input[(pos-1)&mask] + if cmd.cmd_prefix_ >= 128 { + var dist_code uint = uint(cmd.dist_prefix_) & 0x3FF + var distnumextra uint32 = uint32(cmd.dist_prefix_) >> 10 + var distextra uint64 = uint64(cmd.dist_extra_) + if mb.distance_context_map_size == 0 { + storeSymbol(distance_enc, dist_code, storage_ix, storage) + } else { + var context uint = uint(commandDistanceContext(&cmd)) + storeSymbolWithContext(distance_enc, dist_code, context, mb.distance_context_map, storage_ix, storage, distanceContextBits) + } + + writeBits(uint(distnumextra), distextra, storage_ix, storage) + } + } + } + + cleanupBlockEncoder(distance_enc) + cleanupBlockEncoder(command_enc) + cleanupBlockEncoder(literal_enc) + if is_last { + jumpToByteBoundary(storage_ix, storage) + } +} + +func buildHistograms(input []byte, start_pos uint, mask uint, commands []command, lit_histo *histogramLiteral, cmd_histo *histogramCommand, dist_histo *histogramDistance) { + var pos uint = start_pos + for _, cmd := range commands { + var j uint + histogramAddCommand(cmd_histo, uint(cmd.cmd_prefix_)) + for j = uint(cmd.insert_len_); j != 0; j-- { + histogramAddLiteral(lit_histo, uint(input[pos&mask])) + pos++ + } + + pos += uint(commandCopyLen(&cmd)) + if commandCopyLen(&cmd) != 0 && cmd.cmd_prefix_ >= 128 { + histogramAddDistance(dist_histo, uint(cmd.dist_prefix_)&0x3FF) + } + } +} + +func storeDataWithHuffmanCodes(input []byte, start_pos uint, mask uint, commands []command, lit_depth []byte, lit_bits []uint16, cmd_depth []byte, cmd_bits []uint16, dist_depth []byte, dist_bits []uint16, storage_ix *uint, storage []byte) { + var pos uint = start_pos + for _, cmd := range commands { + var cmd_code uint = uint(cmd.cmd_prefix_) + var j uint + writeBits(uint(cmd_depth[cmd_code]), uint64(cmd_bits[cmd_code]), storage_ix, storage) + storeCommandExtra(&cmd, storage_ix, storage) + for j = uint(cmd.insert_len_); j != 0; j-- { + var literal byte = input[pos&mask] + writeBits(uint(lit_depth[literal]), uint64(lit_bits[literal]), storage_ix, storage) + pos++ + } + + pos += uint(commandCopyLen(&cmd)) + if commandCopyLen(&cmd) != 0 && cmd.cmd_prefix_ >= 128 { + var dist_code uint = uint(cmd.dist_prefix_) & 0x3FF + var distnumextra uint32 = uint32(cmd.dist_prefix_) >> 10 + var distextra uint32 = cmd.dist_extra_ + writeBits(uint(dist_depth[dist_code]), uint64(dist_bits[dist_code]), storage_ix, storage) + writeBits(uint(distnumextra), uint64(distextra), storage_ix, storage) + } + } +} + +func storeMetaBlockTrivial(input []byte, start_pos uint, length uint, mask uint, is_last bool, params *encoderParams, commands []command, storage_ix *uint, storage []byte) { + var lit_histo histogramLiteral + var cmd_histo histogramCommand + var dist_histo histogramDistance + var lit_depth [numLiteralSymbols]byte + var lit_bits [numLiteralSymbols]uint16 + var cmd_depth [numCommandSymbols]byte + var cmd_bits [numCommandSymbols]uint16 + var dist_depth [maxSimpleDistanceAlphabetSize]byte + var dist_bits [maxSimpleDistanceAlphabetSize]uint16 + var tree []huffmanTree + var num_distance_symbols uint32 = params.dist.alphabet_size + + storeCompressedMetaBlockHeader(is_last, length, storage_ix, storage) + + histogramClearLiteral(&lit_histo) + histogramClearCommand(&cmd_histo) + histogramClearDistance(&dist_histo) + + buildHistograms(input, start_pos, mask, commands, &lit_histo, &cmd_histo, &dist_histo) + + writeBits(13, 0, storage_ix, storage) + + tree = make([]huffmanTree, maxHuffmanTreeSize) + buildAndStoreHuffmanTree(lit_histo.data_[:], numLiteralSymbols, numLiteralSymbols, tree, lit_depth[:], lit_bits[:], storage_ix, storage) + buildAndStoreHuffmanTree(cmd_histo.data_[:], numCommandSymbols, numCommandSymbols, tree, cmd_depth[:], cmd_bits[:], storage_ix, storage) + buildAndStoreHuffmanTree(dist_histo.data_[:], maxSimpleDistanceAlphabetSize, uint(num_distance_symbols), tree, dist_depth[:], dist_bits[:], storage_ix, storage) + tree = nil + storeDataWithHuffmanCodes(input, start_pos, mask, commands, lit_depth[:], lit_bits[:], cmd_depth[:], cmd_bits[:], dist_depth[:], dist_bits[:], storage_ix, storage) + if is_last { + jumpToByteBoundary(storage_ix, storage) + } +} + +func storeMetaBlockFast(input []byte, start_pos uint, length uint, mask uint, is_last bool, params *encoderParams, commands []command, storage_ix *uint, storage []byte) { + var num_distance_symbols uint32 = params.dist.alphabet_size + var distance_alphabet_bits uint32 = log2FloorNonZero(uint(num_distance_symbols-1)) + 1 + + storeCompressedMetaBlockHeader(is_last, length, storage_ix, storage) + + writeBits(13, 0, storage_ix, storage) + + if len(commands) <= 128 { + var histogram = [numLiteralSymbols]uint32{0} + var pos uint = start_pos + var num_literals uint = 0 + var lit_depth [numLiteralSymbols]byte + var lit_bits [numLiteralSymbols]uint16 + for _, cmd := range commands { + var j uint + for j = uint(cmd.insert_len_); j != 0; j-- { + histogram[input[pos&mask]]++ + pos++ + } + + num_literals += uint(cmd.insert_len_) + pos += uint(commandCopyLen(&cmd)) + } + + buildAndStoreHuffmanTreeFast(histogram[:], num_literals, /* max_bits = */ + 8, lit_depth[:], lit_bits[:], storage_ix, storage) + + storeStaticCommandHuffmanTree(storage_ix, storage) + storeStaticDistanceHuffmanTree(storage_ix, storage) + storeDataWithHuffmanCodes(input, start_pos, mask, commands, lit_depth[:], lit_bits[:], kStaticCommandCodeDepth[:], kStaticCommandCodeBits[:], kStaticDistanceCodeDepth[:], kStaticDistanceCodeBits[:], storage_ix, storage) + } else { + var lit_histo histogramLiteral + var cmd_histo histogramCommand + var dist_histo histogramDistance + var lit_depth [numLiteralSymbols]byte + var lit_bits [numLiteralSymbols]uint16 + var cmd_depth [numCommandSymbols]byte + var cmd_bits [numCommandSymbols]uint16 + var dist_depth [maxSimpleDistanceAlphabetSize]byte + var dist_bits [maxSimpleDistanceAlphabetSize]uint16 + histogramClearLiteral(&lit_histo) + histogramClearCommand(&cmd_histo) + histogramClearDistance(&dist_histo) + buildHistograms(input, start_pos, mask, commands, &lit_histo, &cmd_histo, &dist_histo) + buildAndStoreHuffmanTreeFast(lit_histo.data_[:], lit_histo.total_count_, /* max_bits = */ + 8, lit_depth[:], lit_bits[:], storage_ix, storage) + + buildAndStoreHuffmanTreeFast(cmd_histo.data_[:], cmd_histo.total_count_, /* max_bits = */ + 10, cmd_depth[:], cmd_bits[:], storage_ix, storage) + + buildAndStoreHuffmanTreeFast(dist_histo.data_[:], dist_histo.total_count_, /* max_bits = */ + uint(distance_alphabet_bits), dist_depth[:], dist_bits[:], storage_ix, storage) + + storeDataWithHuffmanCodes(input, start_pos, mask, commands, lit_depth[:], lit_bits[:], cmd_depth[:], cmd_bits[:], dist_depth[:], dist_bits[:], storage_ix, storage) + } + + if is_last { + jumpToByteBoundary(storage_ix, storage) + } +} + +/* +This is for storing uncompressed blocks (simple raw storage of + + bytes-as-bytes). +*/ +func storeUncompressedMetaBlock(is_final_block bool, input []byte, position uint, mask uint, len uint, storage_ix *uint, storage []byte) { + var masked_pos uint = position & mask + storeUncompressedMetaBlockHeader(uint(len), storage_ix, storage) + jumpToByteBoundary(storage_ix, storage) + + if masked_pos+len > mask+1 { + var len1 uint = mask + 1 - masked_pos + copy(storage[*storage_ix>>3:], input[masked_pos:][:len1]) + *storage_ix += len1 << 3 + len -= len1 + masked_pos = 0 + } + + copy(storage[*storage_ix>>3:], input[masked_pos:][:len]) + *storage_ix += uint(len << 3) + + /* We need to clear the next 4 bytes to continue to be + compatible with BrotliWriteBits. */ + writeBitsPrepareStorage(*storage_ix, storage) + + /* Since the uncompressed block itself may not be the final block, add an + empty one after this. */ + if is_final_block { + writeBits(1, 1, storage_ix, storage) /* islast */ + writeBits(1, 1, storage_ix, storage) /* isempty */ + jumpToByteBoundary(storage_ix, storage) + } +} diff --git a/vendor/github.com/andybalholm/brotli/cluster.go b/vendor/github.com/andybalholm/brotli/cluster.go new file mode 100644 index 0000000..df8a328 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/cluster.go @@ -0,0 +1,30 @@ +package brotli + +/* Copyright 2013 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Functions for clustering similar histograms together. */ + +type histogramPair struct { + idx1 uint32 + idx2 uint32 + cost_combo float64 + cost_diff float64 +} + +func histogramPairIsLess(p1 *histogramPair, p2 *histogramPair) bool { + if p1.cost_diff != p2.cost_diff { + return p1.cost_diff > p2.cost_diff + } + + return (p1.idx2 - p1.idx1) > (p2.idx2 - p2.idx1) +} + +/* Returns entropy reduction of the context map when we combine two clusters. */ +func clusterCostDiff(size_a uint, size_b uint) float64 { + var size_c uint = size_a + size_b + return float64(size_a)*fastLog2(size_a) + float64(size_b)*fastLog2(size_b) - float64(size_c)*fastLog2(size_c) +} diff --git a/vendor/github.com/andybalholm/brotli/cluster_command.go b/vendor/github.com/andybalholm/brotli/cluster_command.go new file mode 100644 index 0000000..45b569b --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/cluster_command.go @@ -0,0 +1,164 @@ +package brotli + +/* Copyright 2013 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Computes the bit cost reduction by combining out[idx1] and out[idx2] and if + it is below a threshold, stores the pair (idx1, idx2) in the *pairs queue. */ +func compareAndPushToQueueCommand(out []histogramCommand, cluster_size []uint32, idx1 uint32, idx2 uint32, max_num_pairs uint, pairs []histogramPair, num_pairs *uint) { + var is_good_pair bool = false + var p histogramPair + p.idx2 = 0 + p.idx1 = p.idx2 + p.cost_combo = 0 + p.cost_diff = p.cost_combo + if idx1 == idx2 { + return + } + + if idx2 < idx1 { + var t uint32 = idx2 + idx2 = idx1 + idx1 = t + } + + p.idx1 = idx1 + p.idx2 = idx2 + p.cost_diff = 0.5 * clusterCostDiff(uint(cluster_size[idx1]), uint(cluster_size[idx2])) + p.cost_diff -= out[idx1].bit_cost_ + p.cost_diff -= out[idx2].bit_cost_ + + if out[idx1].total_count_ == 0 { + p.cost_combo = out[idx2].bit_cost_ + is_good_pair = true + } else if out[idx2].total_count_ == 0 { + p.cost_combo = out[idx1].bit_cost_ + is_good_pair = true + } else { + var threshold float64 + if *num_pairs == 0 { + threshold = 1e99 + } else { + threshold = brotli_max_double(0.0, pairs[0].cost_diff) + } + var combo histogramCommand = out[idx1] + var cost_combo float64 + histogramAddHistogramCommand(&combo, &out[idx2]) + cost_combo = populationCostCommand(&combo) + if cost_combo < threshold-p.cost_diff { + p.cost_combo = cost_combo + is_good_pair = true + } + } + + if is_good_pair { + p.cost_diff += p.cost_combo + if *num_pairs > 0 && histogramPairIsLess(&pairs[0], &p) { + /* Replace the top of the queue if needed. */ + if *num_pairs < max_num_pairs { + pairs[*num_pairs] = pairs[0] + (*num_pairs)++ + } + + pairs[0] = p + } else if *num_pairs < max_num_pairs { + pairs[*num_pairs] = p + (*num_pairs)++ + } + } +} + +func histogramCombineCommand(out []histogramCommand, cluster_size []uint32, symbols []uint32, clusters []uint32, pairs []histogramPair, num_clusters uint, symbols_size uint, max_clusters uint, max_num_pairs uint) uint { + var cost_diff_threshold float64 = 0.0 + var min_cluster_size uint = 1 + var num_pairs uint = 0 + { + /* We maintain a vector of histogram pairs, with the property that the pair + with the maximum bit cost reduction is the first. */ + var idx1 uint + for idx1 = 0; idx1 < num_clusters; idx1++ { + var idx2 uint + for idx2 = idx1 + 1; idx2 < num_clusters; idx2++ { + compareAndPushToQueueCommand(out, cluster_size, clusters[idx1], clusters[idx2], max_num_pairs, pairs[0:], &num_pairs) + } + } + } + + for num_clusters > min_cluster_size { + var best_idx1 uint32 + var best_idx2 uint32 + var i uint + if pairs[0].cost_diff >= cost_diff_threshold { + cost_diff_threshold = 1e99 + min_cluster_size = max_clusters + continue + } + + /* Take the best pair from the top of heap. */ + best_idx1 = pairs[0].idx1 + + best_idx2 = pairs[0].idx2 + histogramAddHistogramCommand(&out[best_idx1], &out[best_idx2]) + out[best_idx1].bit_cost_ = pairs[0].cost_combo + cluster_size[best_idx1] += cluster_size[best_idx2] + for i = 0; i < symbols_size; i++ { + if symbols[i] == best_idx2 { + symbols[i] = best_idx1 + } + } + + for i = 0; i < num_clusters; i++ { + if clusters[i] == best_idx2 { + copy(clusters[i:], clusters[i+1:][:num_clusters-i-1]) + break + } + } + + num_clusters-- + { + /* Remove pairs intersecting the just combined best pair. */ + var copy_to_idx uint = 0 + for i = 0; i < num_pairs; i++ { + var p *histogramPair = &pairs[i] + if p.idx1 == best_idx1 || p.idx2 == best_idx1 || p.idx1 == best_idx2 || p.idx2 == best_idx2 { + /* Remove invalid pair from the queue. */ + continue + } + + if histogramPairIsLess(&pairs[0], p) { + /* Replace the top of the queue if needed. */ + var front histogramPair = pairs[0] + pairs[0] = *p + pairs[copy_to_idx] = front + } else { + pairs[copy_to_idx] = *p + } + + copy_to_idx++ + } + + num_pairs = copy_to_idx + } + + /* Push new pairs formed with the combined histogram to the heap. */ + for i = 0; i < num_clusters; i++ { + compareAndPushToQueueCommand(out, cluster_size, best_idx1, clusters[i], max_num_pairs, pairs[0:], &num_pairs) + } + } + + return num_clusters +} + +/* What is the bit cost of moving histogram from cur_symbol to candidate. */ +func histogramBitCostDistanceCommand(histogram *histogramCommand, candidate *histogramCommand) float64 { + if histogram.total_count_ == 0 { + return 0.0 + } else { + var tmp histogramCommand = *histogram + histogramAddHistogramCommand(&tmp, candidate) + return populationCostCommand(&tmp) - candidate.bit_cost_ + } +} diff --git a/vendor/github.com/andybalholm/brotli/cluster_distance.go b/vendor/github.com/andybalholm/brotli/cluster_distance.go new file mode 100644 index 0000000..1aaa86e --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/cluster_distance.go @@ -0,0 +1,326 @@ +package brotli + +import "math" + +/* Copyright 2013 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Computes the bit cost reduction by combining out[idx1] and out[idx2] and if + it is below a threshold, stores the pair (idx1, idx2) in the *pairs queue. */ +func compareAndPushToQueueDistance(out []histogramDistance, cluster_size []uint32, idx1 uint32, idx2 uint32, max_num_pairs uint, pairs []histogramPair, num_pairs *uint) { + var is_good_pair bool = false + var p histogramPair + p.idx2 = 0 + p.idx1 = p.idx2 + p.cost_combo = 0 + p.cost_diff = p.cost_combo + if idx1 == idx2 { + return + } + + if idx2 < idx1 { + var t uint32 = idx2 + idx2 = idx1 + idx1 = t + } + + p.idx1 = idx1 + p.idx2 = idx2 + p.cost_diff = 0.5 * clusterCostDiff(uint(cluster_size[idx1]), uint(cluster_size[idx2])) + p.cost_diff -= out[idx1].bit_cost_ + p.cost_diff -= out[idx2].bit_cost_ + + if out[idx1].total_count_ == 0 { + p.cost_combo = out[idx2].bit_cost_ + is_good_pair = true + } else if out[idx2].total_count_ == 0 { + p.cost_combo = out[idx1].bit_cost_ + is_good_pair = true + } else { + var threshold float64 + if *num_pairs == 0 { + threshold = 1e99 + } else { + threshold = brotli_max_double(0.0, pairs[0].cost_diff) + } + var combo histogramDistance = out[idx1] + var cost_combo float64 + histogramAddHistogramDistance(&combo, &out[idx2]) + cost_combo = populationCostDistance(&combo) + if cost_combo < threshold-p.cost_diff { + p.cost_combo = cost_combo + is_good_pair = true + } + } + + if is_good_pair { + p.cost_diff += p.cost_combo + if *num_pairs > 0 && histogramPairIsLess(&pairs[0], &p) { + /* Replace the top of the queue if needed. */ + if *num_pairs < max_num_pairs { + pairs[*num_pairs] = pairs[0] + (*num_pairs)++ + } + + pairs[0] = p + } else if *num_pairs < max_num_pairs { + pairs[*num_pairs] = p + (*num_pairs)++ + } + } +} + +func histogramCombineDistance(out []histogramDistance, cluster_size []uint32, symbols []uint32, clusters []uint32, pairs []histogramPair, num_clusters uint, symbols_size uint, max_clusters uint, max_num_pairs uint) uint { + var cost_diff_threshold float64 = 0.0 + var min_cluster_size uint = 1 + var num_pairs uint = 0 + { + /* We maintain a vector of histogram pairs, with the property that the pair + with the maximum bit cost reduction is the first. */ + var idx1 uint + for idx1 = 0; idx1 < num_clusters; idx1++ { + var idx2 uint + for idx2 = idx1 + 1; idx2 < num_clusters; idx2++ { + compareAndPushToQueueDistance(out, cluster_size, clusters[idx1], clusters[idx2], max_num_pairs, pairs[0:], &num_pairs) + } + } + } + + for num_clusters > min_cluster_size { + var best_idx1 uint32 + var best_idx2 uint32 + var i uint + if pairs[0].cost_diff >= cost_diff_threshold { + cost_diff_threshold = 1e99 + min_cluster_size = max_clusters + continue + } + + /* Take the best pair from the top of heap. */ + best_idx1 = pairs[0].idx1 + + best_idx2 = pairs[0].idx2 + histogramAddHistogramDistance(&out[best_idx1], &out[best_idx2]) + out[best_idx1].bit_cost_ = pairs[0].cost_combo + cluster_size[best_idx1] += cluster_size[best_idx2] + for i = 0; i < symbols_size; i++ { + if symbols[i] == best_idx2 { + symbols[i] = best_idx1 + } + } + + for i = 0; i < num_clusters; i++ { + if clusters[i] == best_idx2 { + copy(clusters[i:], clusters[i+1:][:num_clusters-i-1]) + break + } + } + + num_clusters-- + { + /* Remove pairs intersecting the just combined best pair. */ + var copy_to_idx uint = 0 + for i = 0; i < num_pairs; i++ { + var p *histogramPair = &pairs[i] + if p.idx1 == best_idx1 || p.idx2 == best_idx1 || p.idx1 == best_idx2 || p.idx2 == best_idx2 { + /* Remove invalid pair from the queue. */ + continue + } + + if histogramPairIsLess(&pairs[0], p) { + /* Replace the top of the queue if needed. */ + var front histogramPair = pairs[0] + pairs[0] = *p + pairs[copy_to_idx] = front + } else { + pairs[copy_to_idx] = *p + } + + copy_to_idx++ + } + + num_pairs = copy_to_idx + } + + /* Push new pairs formed with the combined histogram to the heap. */ + for i = 0; i < num_clusters; i++ { + compareAndPushToQueueDistance(out, cluster_size, best_idx1, clusters[i], max_num_pairs, pairs[0:], &num_pairs) + } + } + + return num_clusters +} + +/* What is the bit cost of moving histogram from cur_symbol to candidate. */ +func histogramBitCostDistanceDistance(histogram *histogramDistance, candidate *histogramDistance) float64 { + if histogram.total_count_ == 0 { + return 0.0 + } else { + var tmp histogramDistance = *histogram + histogramAddHistogramDistance(&tmp, candidate) + return populationCostDistance(&tmp) - candidate.bit_cost_ + } +} + +/* Find the best 'out' histogram for each of the 'in' histograms. + When called, clusters[0..num_clusters) contains the unique values from + symbols[0..in_size), but this property is not preserved in this function. + Note: we assume that out[]->bit_cost_ is already up-to-date. */ +func histogramRemapDistance(in []histogramDistance, in_size uint, clusters []uint32, num_clusters uint, out []histogramDistance, symbols []uint32) { + var i uint + for i = 0; i < in_size; i++ { + var best_out uint32 + if i == 0 { + best_out = symbols[0] + } else { + best_out = symbols[i-1] + } + var best_bits float64 = histogramBitCostDistanceDistance(&in[i], &out[best_out]) + var j uint + for j = 0; j < num_clusters; j++ { + var cur_bits float64 = histogramBitCostDistanceDistance(&in[i], &out[clusters[j]]) + if cur_bits < best_bits { + best_bits = cur_bits + best_out = clusters[j] + } + } + + symbols[i] = best_out + } + + /* Recompute each out based on raw and symbols. */ + for i = 0; i < num_clusters; i++ { + histogramClearDistance(&out[clusters[i]]) + } + + for i = 0; i < in_size; i++ { + histogramAddHistogramDistance(&out[symbols[i]], &in[i]) + } +} + +/* Reorders elements of the out[0..length) array and changes values in + symbols[0..length) array in the following way: + * when called, symbols[] contains indexes into out[], and has N unique + values (possibly N < length) + * on return, symbols'[i] = f(symbols[i]) and + out'[symbols'[i]] = out[symbols[i]], for each 0 <= i < length, + where f is a bijection between the range of symbols[] and [0..N), and + the first occurrences of values in symbols'[i] come in consecutive + increasing order. + Returns N, the number of unique values in symbols[]. */ + +var histogramReindexDistance_kInvalidIndex uint32 = math.MaxUint32 + +func histogramReindexDistance(out []histogramDistance, symbols []uint32, length uint) uint { + var new_index []uint32 = make([]uint32, length) + var next_index uint32 + var tmp []histogramDistance + var i uint + for i = 0; i < length; i++ { + new_index[i] = histogramReindexDistance_kInvalidIndex + } + + next_index = 0 + for i = 0; i < length; i++ { + if new_index[symbols[i]] == histogramReindexDistance_kInvalidIndex { + new_index[symbols[i]] = next_index + next_index++ + } + } + + /* TODO: by using idea of "cycle-sort" we can avoid allocation of + tmp and reduce the number of copying by the factor of 2. */ + tmp = make([]histogramDistance, next_index) + + next_index = 0 + for i = 0; i < length; i++ { + if new_index[symbols[i]] == next_index { + tmp[next_index] = out[symbols[i]] + next_index++ + } + + symbols[i] = new_index[symbols[i]] + } + + new_index = nil + for i = 0; uint32(i) < next_index; i++ { + out[i] = tmp[i] + } + + tmp = nil + return uint(next_index) +} + +func clusterHistogramsDistance(in []histogramDistance, in_size uint, max_histograms uint, out []histogramDistance, out_size *uint, histogram_symbols []uint32) { + var cluster_size []uint32 = make([]uint32, in_size) + var clusters []uint32 = make([]uint32, in_size) + var num_clusters uint = 0 + var max_input_histograms uint = 64 + var pairs_capacity uint = max_input_histograms * max_input_histograms / 2 + var pairs []histogramPair = make([]histogramPair, (pairs_capacity + 1)) + var i uint + + /* For the first pass of clustering, we allow all pairs. */ + for i = 0; i < in_size; i++ { + cluster_size[i] = 1 + } + + for i = 0; i < in_size; i++ { + out[i] = in[i] + out[i].bit_cost_ = populationCostDistance(&in[i]) + histogram_symbols[i] = uint32(i) + } + + for i = 0; i < in_size; i += max_input_histograms { + var num_to_combine uint = brotli_min_size_t(in_size-i, max_input_histograms) + var num_new_clusters uint + var j uint + for j = 0; j < num_to_combine; j++ { + clusters[num_clusters+j] = uint32(i + j) + } + + num_new_clusters = histogramCombineDistance(out, cluster_size, histogram_symbols[i:], clusters[num_clusters:], pairs, num_to_combine, num_to_combine, max_histograms, pairs_capacity) + num_clusters += num_new_clusters + } + { + /* For the second pass, we limit the total number of histogram pairs. + After this limit is reached, we only keep searching for the best pair. */ + var max_num_pairs uint = brotli_min_size_t(64*num_clusters, (num_clusters/2)*num_clusters) + if pairs_capacity < (max_num_pairs + 1) { + var _new_size uint + if pairs_capacity == 0 { + _new_size = max_num_pairs + 1 + } else { + _new_size = pairs_capacity + } + var new_array []histogramPair + for _new_size < (max_num_pairs + 1) { + _new_size *= 2 + } + new_array = make([]histogramPair, _new_size) + if pairs_capacity != 0 { + copy(new_array, pairs[:pairs_capacity]) + } + + pairs = new_array + pairs_capacity = _new_size + } + + /* Collapse similar histograms. */ + num_clusters = histogramCombineDistance(out, cluster_size, histogram_symbols, clusters, pairs, num_clusters, in_size, max_histograms, max_num_pairs) + } + + pairs = nil + cluster_size = nil + + /* Find the optimal map from original histograms to the final ones. */ + histogramRemapDistance(in, in_size, clusters, num_clusters, out, histogram_symbols) + + clusters = nil + + /* Convert the context map to a canonical form. */ + *out_size = histogramReindexDistance(out, histogram_symbols, in_size) +} diff --git a/vendor/github.com/andybalholm/brotli/cluster_literal.go b/vendor/github.com/andybalholm/brotli/cluster_literal.go new file mode 100644 index 0000000..6ba66f3 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/cluster_literal.go @@ -0,0 +1,326 @@ +package brotli + +import "math" + +/* Copyright 2013 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Computes the bit cost reduction by combining out[idx1] and out[idx2] and if + it is below a threshold, stores the pair (idx1, idx2) in the *pairs queue. */ +func compareAndPushToQueueLiteral(out []histogramLiteral, cluster_size []uint32, idx1 uint32, idx2 uint32, max_num_pairs uint, pairs []histogramPair, num_pairs *uint) { + var is_good_pair bool = false + var p histogramPair + p.idx2 = 0 + p.idx1 = p.idx2 + p.cost_combo = 0 + p.cost_diff = p.cost_combo + if idx1 == idx2 { + return + } + + if idx2 < idx1 { + var t uint32 = idx2 + idx2 = idx1 + idx1 = t + } + + p.idx1 = idx1 + p.idx2 = idx2 + p.cost_diff = 0.5 * clusterCostDiff(uint(cluster_size[idx1]), uint(cluster_size[idx2])) + p.cost_diff -= out[idx1].bit_cost_ + p.cost_diff -= out[idx2].bit_cost_ + + if out[idx1].total_count_ == 0 { + p.cost_combo = out[idx2].bit_cost_ + is_good_pair = true + } else if out[idx2].total_count_ == 0 { + p.cost_combo = out[idx1].bit_cost_ + is_good_pair = true + } else { + var threshold float64 + if *num_pairs == 0 { + threshold = 1e99 + } else { + threshold = brotli_max_double(0.0, pairs[0].cost_diff) + } + var combo histogramLiteral = out[idx1] + var cost_combo float64 + histogramAddHistogramLiteral(&combo, &out[idx2]) + cost_combo = populationCostLiteral(&combo) + if cost_combo < threshold-p.cost_diff { + p.cost_combo = cost_combo + is_good_pair = true + } + } + + if is_good_pair { + p.cost_diff += p.cost_combo + if *num_pairs > 0 && histogramPairIsLess(&pairs[0], &p) { + /* Replace the top of the queue if needed. */ + if *num_pairs < max_num_pairs { + pairs[*num_pairs] = pairs[0] + (*num_pairs)++ + } + + pairs[0] = p + } else if *num_pairs < max_num_pairs { + pairs[*num_pairs] = p + (*num_pairs)++ + } + } +} + +func histogramCombineLiteral(out []histogramLiteral, cluster_size []uint32, symbols []uint32, clusters []uint32, pairs []histogramPair, num_clusters uint, symbols_size uint, max_clusters uint, max_num_pairs uint) uint { + var cost_diff_threshold float64 = 0.0 + var min_cluster_size uint = 1 + var num_pairs uint = 0 + { + /* We maintain a vector of histogram pairs, with the property that the pair + with the maximum bit cost reduction is the first. */ + var idx1 uint + for idx1 = 0; idx1 < num_clusters; idx1++ { + var idx2 uint + for idx2 = idx1 + 1; idx2 < num_clusters; idx2++ { + compareAndPushToQueueLiteral(out, cluster_size, clusters[idx1], clusters[idx2], max_num_pairs, pairs[0:], &num_pairs) + } + } + } + + for num_clusters > min_cluster_size { + var best_idx1 uint32 + var best_idx2 uint32 + var i uint + if pairs[0].cost_diff >= cost_diff_threshold { + cost_diff_threshold = 1e99 + min_cluster_size = max_clusters + continue + } + + /* Take the best pair from the top of heap. */ + best_idx1 = pairs[0].idx1 + + best_idx2 = pairs[0].idx2 + histogramAddHistogramLiteral(&out[best_idx1], &out[best_idx2]) + out[best_idx1].bit_cost_ = pairs[0].cost_combo + cluster_size[best_idx1] += cluster_size[best_idx2] + for i = 0; i < symbols_size; i++ { + if symbols[i] == best_idx2 { + symbols[i] = best_idx1 + } + } + + for i = 0; i < num_clusters; i++ { + if clusters[i] == best_idx2 { + copy(clusters[i:], clusters[i+1:][:num_clusters-i-1]) + break + } + } + + num_clusters-- + { + /* Remove pairs intersecting the just combined best pair. */ + var copy_to_idx uint = 0 + for i = 0; i < num_pairs; i++ { + var p *histogramPair = &pairs[i] + if p.idx1 == best_idx1 || p.idx2 == best_idx1 || p.idx1 == best_idx2 || p.idx2 == best_idx2 { + /* Remove invalid pair from the queue. */ + continue + } + + if histogramPairIsLess(&pairs[0], p) { + /* Replace the top of the queue if needed. */ + var front histogramPair = pairs[0] + pairs[0] = *p + pairs[copy_to_idx] = front + } else { + pairs[copy_to_idx] = *p + } + + copy_to_idx++ + } + + num_pairs = copy_to_idx + } + + /* Push new pairs formed with the combined histogram to the heap. */ + for i = 0; i < num_clusters; i++ { + compareAndPushToQueueLiteral(out, cluster_size, best_idx1, clusters[i], max_num_pairs, pairs[0:], &num_pairs) + } + } + + return num_clusters +} + +/* What is the bit cost of moving histogram from cur_symbol to candidate. */ +func histogramBitCostDistanceLiteral(histogram *histogramLiteral, candidate *histogramLiteral) float64 { + if histogram.total_count_ == 0 { + return 0.0 + } else { + var tmp histogramLiteral = *histogram + histogramAddHistogramLiteral(&tmp, candidate) + return populationCostLiteral(&tmp) - candidate.bit_cost_ + } +} + +/* Find the best 'out' histogram for each of the 'in' histograms. + When called, clusters[0..num_clusters) contains the unique values from + symbols[0..in_size), but this property is not preserved in this function. + Note: we assume that out[]->bit_cost_ is already up-to-date. */ +func histogramRemapLiteral(in []histogramLiteral, in_size uint, clusters []uint32, num_clusters uint, out []histogramLiteral, symbols []uint32) { + var i uint + for i = 0; i < in_size; i++ { + var best_out uint32 + if i == 0 { + best_out = symbols[0] + } else { + best_out = symbols[i-1] + } + var best_bits float64 = histogramBitCostDistanceLiteral(&in[i], &out[best_out]) + var j uint + for j = 0; j < num_clusters; j++ { + var cur_bits float64 = histogramBitCostDistanceLiteral(&in[i], &out[clusters[j]]) + if cur_bits < best_bits { + best_bits = cur_bits + best_out = clusters[j] + } + } + + symbols[i] = best_out + } + + /* Recompute each out based on raw and symbols. */ + for i = 0; i < num_clusters; i++ { + histogramClearLiteral(&out[clusters[i]]) + } + + for i = 0; i < in_size; i++ { + histogramAddHistogramLiteral(&out[symbols[i]], &in[i]) + } +} + +/* Reorders elements of the out[0..length) array and changes values in + symbols[0..length) array in the following way: + * when called, symbols[] contains indexes into out[], and has N unique + values (possibly N < length) + * on return, symbols'[i] = f(symbols[i]) and + out'[symbols'[i]] = out[symbols[i]], for each 0 <= i < length, + where f is a bijection between the range of symbols[] and [0..N), and + the first occurrences of values in symbols'[i] come in consecutive + increasing order. + Returns N, the number of unique values in symbols[]. */ + +var histogramReindexLiteral_kInvalidIndex uint32 = math.MaxUint32 + +func histogramReindexLiteral(out []histogramLiteral, symbols []uint32, length uint) uint { + var new_index []uint32 = make([]uint32, length) + var next_index uint32 + var tmp []histogramLiteral + var i uint + for i = 0; i < length; i++ { + new_index[i] = histogramReindexLiteral_kInvalidIndex + } + + next_index = 0 + for i = 0; i < length; i++ { + if new_index[symbols[i]] == histogramReindexLiteral_kInvalidIndex { + new_index[symbols[i]] = next_index + next_index++ + } + } + + /* TODO: by using idea of "cycle-sort" we can avoid allocation of + tmp and reduce the number of copying by the factor of 2. */ + tmp = make([]histogramLiteral, next_index) + + next_index = 0 + for i = 0; i < length; i++ { + if new_index[symbols[i]] == next_index { + tmp[next_index] = out[symbols[i]] + next_index++ + } + + symbols[i] = new_index[symbols[i]] + } + + new_index = nil + for i = 0; uint32(i) < next_index; i++ { + out[i] = tmp[i] + } + + tmp = nil + return uint(next_index) +} + +func clusterHistogramsLiteral(in []histogramLiteral, in_size uint, max_histograms uint, out []histogramLiteral, out_size *uint, histogram_symbols []uint32) { + var cluster_size []uint32 = make([]uint32, in_size) + var clusters []uint32 = make([]uint32, in_size) + var num_clusters uint = 0 + var max_input_histograms uint = 64 + var pairs_capacity uint = max_input_histograms * max_input_histograms / 2 + var pairs []histogramPair = make([]histogramPair, (pairs_capacity + 1)) + var i uint + + /* For the first pass of clustering, we allow all pairs. */ + for i = 0; i < in_size; i++ { + cluster_size[i] = 1 + } + + for i = 0; i < in_size; i++ { + out[i] = in[i] + out[i].bit_cost_ = populationCostLiteral(&in[i]) + histogram_symbols[i] = uint32(i) + } + + for i = 0; i < in_size; i += max_input_histograms { + var num_to_combine uint = brotli_min_size_t(in_size-i, max_input_histograms) + var num_new_clusters uint + var j uint + for j = 0; j < num_to_combine; j++ { + clusters[num_clusters+j] = uint32(i + j) + } + + num_new_clusters = histogramCombineLiteral(out, cluster_size, histogram_symbols[i:], clusters[num_clusters:], pairs, num_to_combine, num_to_combine, max_histograms, pairs_capacity) + num_clusters += num_new_clusters + } + { + /* For the second pass, we limit the total number of histogram pairs. + After this limit is reached, we only keep searching for the best pair. */ + var max_num_pairs uint = brotli_min_size_t(64*num_clusters, (num_clusters/2)*num_clusters) + if pairs_capacity < (max_num_pairs + 1) { + var _new_size uint + if pairs_capacity == 0 { + _new_size = max_num_pairs + 1 + } else { + _new_size = pairs_capacity + } + var new_array []histogramPair + for _new_size < (max_num_pairs + 1) { + _new_size *= 2 + } + new_array = make([]histogramPair, _new_size) + if pairs_capacity != 0 { + copy(new_array, pairs[:pairs_capacity]) + } + + pairs = new_array + pairs_capacity = _new_size + } + + /* Collapse similar histograms. */ + num_clusters = histogramCombineLiteral(out, cluster_size, histogram_symbols, clusters, pairs, num_clusters, in_size, max_histograms, max_num_pairs) + } + + pairs = nil + cluster_size = nil + + /* Find the optimal map from original histograms to the final ones. */ + histogramRemapLiteral(in, in_size, clusters, num_clusters, out, histogram_symbols) + + clusters = nil + + /* Convert the context map to a canonical form. */ + *out_size = histogramReindexLiteral(out, histogram_symbols, in_size) +} diff --git a/vendor/github.com/andybalholm/brotli/command.go b/vendor/github.com/andybalholm/brotli/command.go new file mode 100644 index 0000000..b1662a5 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/command.go @@ -0,0 +1,254 @@ +package brotli + +var kInsBase = []uint32{ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 8, + 10, + 14, + 18, + 26, + 34, + 50, + 66, + 98, + 130, + 194, + 322, + 578, + 1090, + 2114, + 6210, + 22594, +} + +var kInsExtra = []uint32{ + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 1, + 2, + 2, + 3, + 3, + 4, + 4, + 5, + 5, + 6, + 7, + 8, + 9, + 10, + 12, + 14, + 24, +} + +var kCopyBase = []uint32{ + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 12, + 14, + 18, + 22, + 30, + 38, + 54, + 70, + 102, + 134, + 198, + 326, + 582, + 1094, + 2118, +} + +var kCopyExtra = []uint32{ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 1, + 2, + 2, + 3, + 3, + 4, + 4, + 5, + 5, + 6, + 7, + 8, + 9, + 10, + 24, +} + +func getInsertLengthCode(insertlen uint) uint16 { + if insertlen < 6 { + return uint16(insertlen) + } else if insertlen < 130 { + var nbits uint32 = log2FloorNonZero(insertlen-2) - 1 + return uint16((nbits << 1) + uint32((insertlen-2)>>nbits) + 2) + } else if insertlen < 2114 { + return uint16(log2FloorNonZero(insertlen-66) + 10) + } else if insertlen < 6210 { + return 21 + } else if insertlen < 22594 { + return 22 + } else { + return 23 + } +} + +func getCopyLengthCode(copylen uint) uint16 { + if copylen < 10 { + return uint16(copylen - 2) + } else if copylen < 134 { + var nbits uint32 = log2FloorNonZero(copylen-6) - 1 + return uint16((nbits << 1) + uint32((copylen-6)>>nbits) + 4) + } else if copylen < 2118 { + return uint16(log2FloorNonZero(copylen-70) + 12) + } else { + return 23 + } +} + +func combineLengthCodes(inscode uint16, copycode uint16, use_last_distance bool) uint16 { + var bits64 uint16 = uint16(copycode&0x7 | (inscode&0x7)<<3) + if use_last_distance && inscode < 8 && copycode < 16 { + if copycode < 8 { + return bits64 + } else { + return bits64 | 64 + } + } else { + /* Specification: 5 Encoding of ... (last table) */ + /* offset = 2 * index, where index is in range [0..8] */ + var offset uint32 = 2 * ((uint32(copycode) >> 3) + 3*(uint32(inscode)>>3)) + + /* All values in specification are K * 64, + where K = [2, 3, 6, 4, 5, 8, 7, 9, 10], + i + 1 = [1, 2, 3, 4, 5, 6, 7, 8, 9], + K - i - 1 = [1, 1, 3, 0, 0, 2, 0, 1, 2] = D. + All values in D require only 2 bits to encode. + Magic constant is shifted 6 bits left, to avoid final multiplication. */ + offset = (offset << 5) + 0x40 + ((0x520D40 >> offset) & 0xC0) + + return uint16(offset | uint32(bits64)) + } +} + +func getLengthCode(insertlen uint, copylen uint, use_last_distance bool, code *uint16) { + var inscode uint16 = getInsertLengthCode(insertlen) + var copycode uint16 = getCopyLengthCode(copylen) + *code = combineLengthCodes(inscode, copycode, use_last_distance) +} + +func getInsertBase(inscode uint16) uint32 { + return kInsBase[inscode] +} + +func getInsertExtra(inscode uint16) uint32 { + return kInsExtra[inscode] +} + +func getCopyBase(copycode uint16) uint32 { + return kCopyBase[copycode] +} + +func getCopyExtra(copycode uint16) uint32 { + return kCopyExtra[copycode] +} + +type command struct { + insert_len_ uint32 + copy_len_ uint32 + dist_extra_ uint32 + cmd_prefix_ uint16 + dist_prefix_ uint16 +} + +/* distance_code is e.g. 0 for same-as-last short code, or 16 for offset 1. */ +func makeCommand(dist *distanceParams, insertlen uint, copylen uint, copylen_code_delta int, distance_code uint) (cmd command) { + /* Don't rely on signed int representation, use honest casts. */ + var delta uint32 = uint32(byte(int8(copylen_code_delta))) + cmd.insert_len_ = uint32(insertlen) + cmd.copy_len_ = uint32(uint32(copylen) | delta<<25) + + /* The distance prefix and extra bits are stored in this Command as if + npostfix and ndirect were 0, they are only recomputed later after the + clustering if needed. */ + prefixEncodeCopyDistance(distance_code, uint(dist.num_direct_distance_codes), uint(dist.distance_postfix_bits), &cmd.dist_prefix_, &cmd.dist_extra_) + getLengthCode(insertlen, uint(int(copylen)+copylen_code_delta), (cmd.dist_prefix_&0x3FF == 0), &cmd.cmd_prefix_) + + return cmd +} + +func makeInsertCommand(insertlen uint) (cmd command) { + cmd.insert_len_ = uint32(insertlen) + cmd.copy_len_ = 4 << 25 + cmd.dist_extra_ = 0 + cmd.dist_prefix_ = numDistanceShortCodes + getLengthCode(insertlen, 4, false, &cmd.cmd_prefix_) + return cmd +} + +func commandRestoreDistanceCode(self *command, dist *distanceParams) uint32 { + if uint32(self.dist_prefix_&0x3FF) < numDistanceShortCodes+dist.num_direct_distance_codes { + return uint32(self.dist_prefix_) & 0x3FF + } else { + var dcode uint32 = uint32(self.dist_prefix_) & 0x3FF + var nbits uint32 = uint32(self.dist_prefix_) >> 10 + var extra uint32 = self.dist_extra_ + var postfix_mask uint32 = (1 << dist.distance_postfix_bits) - 1 + var hcode uint32 = (dcode - dist.num_direct_distance_codes - numDistanceShortCodes) >> dist.distance_postfix_bits + var lcode uint32 = (dcode - dist.num_direct_distance_codes - numDistanceShortCodes) & postfix_mask + var offset uint32 = ((2 + (hcode & 1)) << nbits) - 4 + return ((offset + extra) << dist.distance_postfix_bits) + lcode + dist.num_direct_distance_codes + numDistanceShortCodes + } +} + +func commandDistanceContext(self *command) uint32 { + var r uint32 = uint32(self.cmd_prefix_) >> 6 + var c uint32 = uint32(self.cmd_prefix_) & 7 + if (r == 0 || r == 2 || r == 4 || r == 7) && (c <= 2) { + return c + } + + return 3 +} + +func commandCopyLen(self *command) uint32 { + return self.copy_len_ & 0x1FFFFFF +} + +func commandCopyLenCode(self *command) uint32 { + var modifier uint32 = self.copy_len_ >> 25 + var delta int32 = int32(int8(byte(modifier | (modifier&0x40)<<1))) + return uint32(int32(self.copy_len_&0x1FFFFFF) + delta) +} diff --git a/vendor/github.com/andybalholm/brotli/compress_fragment.go b/vendor/github.com/andybalholm/brotli/compress_fragment.go new file mode 100644 index 0000000..c9bd057 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/compress_fragment.go @@ -0,0 +1,834 @@ +package brotli + +import "encoding/binary" + +/* Copyright 2015 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Function for fast encoding of an input fragment, independently from the input + history. This function uses one-pass processing: when we find a backward + match, we immediately emit the corresponding command and literal codes to + the bit stream. + + Adapted from the CompressFragment() function in + https://github.com/google/snappy/blob/master/snappy.cc */ + +const maxDistance_compress_fragment = 262128 + +func hash5(p []byte, shift uint) uint32 { + var h uint64 = (binary.LittleEndian.Uint64(p) << 24) * uint64(kHashMul32) + return uint32(h >> shift) +} + +func hashBytesAtOffset5(v uint64, offset int, shift uint) uint32 { + assert(offset >= 0) + assert(offset <= 3) + { + var h uint64 = ((v >> uint(8*offset)) << 24) * uint64(kHashMul32) + return uint32(h >> shift) + } +} + +func isMatch5(p1 []byte, p2 []byte) bool { + return binary.LittleEndian.Uint32(p1) == binary.LittleEndian.Uint32(p2) && + p1[4] == p2[4] +} + +/* Builds a literal prefix code into "depths" and "bits" based on the statistics + of the "input" string and stores it into the bit stream. + Note that the prefix code here is built from the pre-LZ77 input, therefore + we can only approximate the statistics of the actual literal stream. + Moreover, for long inputs we build a histogram from a sample of the input + and thus have to assign a non-zero depth for each literal. + Returns estimated compression ratio millibytes/char for encoding given input + with generated code. */ +func buildAndStoreLiteralPrefixCode(input []byte, input_size uint, depths []byte, bits []uint16, storage_ix *uint, storage []byte) uint { + var histogram = [256]uint32{0} + var histogram_total uint + var i uint + if input_size < 1<<15 { + for i = 0; i < input_size; i++ { + histogram[input[i]]++ + } + + histogram_total = input_size + for i = 0; i < 256; i++ { + /* We weigh the first 11 samples with weight 3 to account for the + balancing effect of the LZ77 phase on the histogram. */ + var adjust uint32 = 2 * brotli_min_uint32_t(histogram[i], 11) + histogram[i] += adjust + histogram_total += uint(adjust) + } + } else { + const kSampleRate uint = 29 + for i = 0; i < input_size; i += kSampleRate { + histogram[input[i]]++ + } + + histogram_total = (input_size + kSampleRate - 1) / kSampleRate + for i = 0; i < 256; i++ { + /* We add 1 to each population count to avoid 0 bit depths (since this is + only a sample and we don't know if the symbol appears or not), and we + weigh the first 11 samples with weight 3 to account for the balancing + effect of the LZ77 phase on the histogram (more frequent symbols are + more likely to be in backward references instead as literals). */ + var adjust uint32 = 1 + 2*brotli_min_uint32_t(histogram[i], 11) + histogram[i] += adjust + histogram_total += uint(adjust) + } + } + + buildAndStoreHuffmanTreeFast(histogram[:], histogram_total, /* max_bits = */ + 8, depths, bits, storage_ix, storage) + { + var literal_ratio uint = 0 + for i = 0; i < 256; i++ { + if histogram[i] != 0 { + literal_ratio += uint(histogram[i] * uint32(depths[i])) + } + } + + /* Estimated encoding ratio, millibytes per symbol. */ + return (literal_ratio * 125) / histogram_total + } +} + +/* Builds a command and distance prefix code (each 64 symbols) into "depth" and + "bits" based on "histogram" and stores it into the bit stream. */ +func buildAndStoreCommandPrefixCode1(histogram []uint32, depth []byte, bits []uint16, storage_ix *uint, storage []byte) { + var tree [129]huffmanTree + var cmd_depth = [numCommandSymbols]byte{0} + /* Tree size for building a tree over 64 symbols is 2 * 64 + 1. */ + + var cmd_bits [64]uint16 + + createHuffmanTree(histogram, 64, 15, tree[:], depth) + createHuffmanTree(histogram[64:], 64, 14, tree[:], depth[64:]) + + /* We have to jump through a few hoops here in order to compute + the command bits because the symbols are in a different order than in + the full alphabet. This looks complicated, but having the symbols + in this order in the command bits saves a few branches in the Emit* + functions. */ + copy(cmd_depth[:], depth[:24]) + + copy(cmd_depth[24:][:], depth[40:][:8]) + copy(cmd_depth[32:][:], depth[24:][:8]) + copy(cmd_depth[40:][:], depth[48:][:8]) + copy(cmd_depth[48:][:], depth[32:][:8]) + copy(cmd_depth[56:][:], depth[56:][:8]) + convertBitDepthsToSymbols(cmd_depth[:], 64, cmd_bits[:]) + copy(bits, cmd_bits[:24]) + copy(bits[24:], cmd_bits[32:][:8]) + copy(bits[32:], cmd_bits[48:][:8]) + copy(bits[40:], cmd_bits[24:][:8]) + copy(bits[48:], cmd_bits[40:][:8]) + copy(bits[56:], cmd_bits[56:][:8]) + convertBitDepthsToSymbols(depth[64:], 64, bits[64:]) + { + /* Create the bit length array for the full command alphabet. */ + var i uint + for i := 0; i < int(64); i++ { + cmd_depth[i] = 0 + } /* only 64 first values were used */ + copy(cmd_depth[:], depth[:8]) + copy(cmd_depth[64:][:], depth[8:][:8]) + copy(cmd_depth[128:][:], depth[16:][:8]) + copy(cmd_depth[192:][:], depth[24:][:8]) + copy(cmd_depth[384:][:], depth[32:][:8]) + for i = 0; i < 8; i++ { + cmd_depth[128+8*i] = depth[40+i] + cmd_depth[256+8*i] = depth[48+i] + cmd_depth[448+8*i] = depth[56+i] + } + + storeHuffmanTree(cmd_depth[:], numCommandSymbols, tree[:], storage_ix, storage) + } + + storeHuffmanTree(depth[64:], 64, tree[:], storage_ix, storage) +} + +/* REQUIRES: insertlen < 6210 */ +func emitInsertLen1(insertlen uint, depth []byte, bits []uint16, histo []uint32, storage_ix *uint, storage []byte) { + if insertlen < 6 { + var code uint = insertlen + 40 + writeBits(uint(depth[code]), uint64(bits[code]), storage_ix, storage) + histo[code]++ + } else if insertlen < 130 { + var tail uint = insertlen - 2 + var nbits uint32 = log2FloorNonZero(tail) - 1 + var prefix uint = tail >> nbits + var inscode uint = uint((nbits << 1) + uint32(prefix) + 42) + writeBits(uint(depth[inscode]), uint64(bits[inscode]), storage_ix, storage) + writeBits(uint(nbits), uint64(tail)-(uint64(prefix)<> nbits + var code uint = uint((nbits << 1) + uint32(prefix) + 20) + writeBits(uint(depth[code]), uint64(bits[code]), storage_ix, storage) + writeBits(uint(nbits), uint64(tail)-(uint64(prefix)<> nbits + var code uint = uint((nbits << 1) + uint32(prefix) + 4) + writeBits(uint(depth[code]), uint64(bits[code]), storage_ix, storage) + writeBits(uint(nbits), uint64(tail)-(uint64(prefix)<> 5) + 30 + writeBits(uint(depth[code]), uint64(bits[code]), storage_ix, storage) + writeBits(5, uint64(tail)&31, storage_ix, storage) + writeBits(uint(depth[64]), uint64(bits[64]), storage_ix, storage) + histo[code]++ + histo[64]++ + } else if copylen < 2120 { + var tail uint = copylen - 72 + var nbits uint32 = log2FloorNonZero(tail) + var code uint = uint(nbits + 28) + writeBits(uint(depth[code]), uint64(bits[code]), storage_ix, storage) + writeBits(uint(nbits), uint64(tail)-(uint64(uint(1))<> nbits) & 1 + var offset uint = (2 + prefix) << nbits + var distcode uint = uint(2*(nbits-1) + uint32(prefix) + 80) + writeBits(uint(depth[distcode]), uint64(bits[distcode]), storage_ix, storage) + writeBits(uint(nbits), uint64(d)-uint64(offset), storage_ix, storage) + histo[distcode]++ +} + +func emitLiterals(input []byte, len uint, depth []byte, bits []uint16, storage_ix *uint, storage []byte) { + var j uint + for j = 0; j < len; j++ { + var lit byte = input[j] + writeBits(uint(depth[lit]), uint64(bits[lit]), storage_ix, storage) + } +} + +/* REQUIRES: len <= 1 << 24. */ +func storeMetaBlockHeader1(len uint, is_uncompressed bool, storage_ix *uint, storage []byte) { + var nibbles uint = 6 + + /* ISLAST */ + writeBits(1, 0, storage_ix, storage) + + if len <= 1<<16 { + nibbles = 4 + } else if len <= 1<<20 { + nibbles = 5 + } + + writeBits(2, uint64(nibbles)-4, storage_ix, storage) + writeBits(nibbles*4, uint64(len)-1, storage_ix, storage) + + /* ISUNCOMPRESSED */ + writeSingleBit(is_uncompressed, storage_ix, storage) +} + +func updateBits(n_bits uint, bits uint32, pos uint, array []byte) { + for n_bits > 0 { + var byte_pos uint = pos >> 3 + var n_unchanged_bits uint = pos & 7 + var n_changed_bits uint = brotli_min_size_t(n_bits, 8-n_unchanged_bits) + var total_bits uint = n_unchanged_bits + n_changed_bits + var mask uint32 = (^((1 << total_bits) - 1)) | ((1 << n_unchanged_bits) - 1) + var unchanged_bits uint32 = uint32(array[byte_pos]) & mask + var changed_bits uint32 = bits & ((1 << n_changed_bits) - 1) + array[byte_pos] = byte(changed_bits<>= n_changed_bits + pos += n_changed_bits + } +} + +func rewindBitPosition1(new_storage_ix uint, storage_ix *uint, storage []byte) { + var bitpos uint = new_storage_ix & 7 + var mask uint = (1 << bitpos) - 1 + storage[new_storage_ix>>3] &= byte(mask) + *storage_ix = new_storage_ix +} + +var shouldMergeBlock_kSampleRate uint = 43 + +func shouldMergeBlock(data []byte, len uint, depths []byte) bool { + var histo = [256]uint{0} + var i uint + for i = 0; i < len; i += shouldMergeBlock_kSampleRate { + histo[data[i]]++ + } + { + var total uint = (len + shouldMergeBlock_kSampleRate - 1) / shouldMergeBlock_kSampleRate + var r float64 = (fastLog2(total)+0.5)*float64(total) + 200 + for i = 0; i < 256; i++ { + r -= float64(histo[i]) * (float64(depths[i]) + fastLog2(histo[i])) + } + + return r >= 0.0 + } +} + +func shouldUseUncompressedMode(metablock_start []byte, next_emit []byte, insertlen uint, literal_ratio uint) bool { + var compressed uint = uint(-cap(next_emit) + cap(metablock_start)) + if compressed*50 > insertlen { + return false + } else { + return literal_ratio > 980 + } +} + +func emitUncompressedMetaBlock1(begin []byte, end []byte, storage_ix_start uint, storage_ix *uint, storage []byte) { + var len uint = uint(-cap(end) + cap(begin)) + rewindBitPosition1(storage_ix_start, storage_ix, storage) + storeMetaBlockHeader1(uint(len), true, storage_ix, storage) + *storage_ix = (*storage_ix + 7) &^ 7 + copy(storage[*storage_ix>>3:], begin[:len]) + *storage_ix += uint(len << 3) + storage[*storage_ix>>3] = 0 +} + +var kCmdHistoSeed = [128]uint32{ + 0, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 0, + 0, + 0, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 0, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 0, + 0, + 0, + 0, +} + +var compressFragmentFastImpl_kFirstBlockSize uint = 3 << 15 +var compressFragmentFastImpl_kMergeBlockSize uint = 1 << 16 + +func compressFragmentFastImpl(in []byte, input_size uint, is_last bool, table []int, table_bits uint, cmd_depth []byte, cmd_bits []uint16, cmd_code_numbits *uint, cmd_code []byte, storage_ix *uint, storage []byte) { + var cmd_histo [128]uint32 + var ip_end int + var next_emit int = 0 + var base_ip int = 0 + var input int = 0 + const kInputMarginBytes uint = windowGap + const kMinMatchLen uint = 5 + var metablock_start int = input + var block_size uint = brotli_min_size_t(input_size, compressFragmentFastImpl_kFirstBlockSize) + var total_block_size uint = block_size + var mlen_storage_ix uint = *storage_ix + 3 + var lit_depth [256]byte + var lit_bits [256]uint16 + var literal_ratio uint + var ip int + var last_distance int + var shift uint = 64 - table_bits + + /* "next_emit" is a pointer to the first byte that is not covered by a + previous copy. Bytes between "next_emit" and the start of the next copy or + the end of the input will be emitted as literal bytes. */ + + /* Save the start of the first block for position and distance computations. + */ + + /* Save the bit position of the MLEN field of the meta-block header, so that + we can update it later if we decide to extend this meta-block. */ + storeMetaBlockHeader1(block_size, false, storage_ix, storage) + + /* No block splits, no contexts. */ + writeBits(13, 0, storage_ix, storage) + + literal_ratio = buildAndStoreLiteralPrefixCode(in[input:], block_size, lit_depth[:], lit_bits[:], storage_ix, storage) + { + /* Store the pre-compressed command and distance prefix codes. */ + var i uint + for i = 0; i+7 < *cmd_code_numbits; i += 8 { + writeBits(8, uint64(cmd_code[i>>3]), storage_ix, storage) + } + } + + writeBits(*cmd_code_numbits&7, uint64(cmd_code[*cmd_code_numbits>>3]), storage_ix, storage) + + /* Initialize the command and distance histograms. We will gather + statistics of command and distance codes during the processing + of this block and use it to update the command and distance + prefix codes for the next block. */ +emit_commands: + copy(cmd_histo[:], kCmdHistoSeed[:]) + + /* "ip" is the input pointer. */ + ip = input + + last_distance = -1 + ip_end = int(uint(input) + block_size) + + if block_size >= kInputMarginBytes { + var len_limit uint = brotli_min_size_t(block_size-kMinMatchLen, input_size-kInputMarginBytes) + var ip_limit int = int(uint(input) + len_limit) + /* For the last block, we need to keep a 16 bytes margin so that we can be + sure that all distances are at most window size - 16. + For all other blocks, we only need to keep a margin of 5 bytes so that + we don't go over the block size with a copy. */ + + var next_hash uint32 + ip++ + for next_hash = hash5(in[ip:], shift); ; { + var skip uint32 = 32 + var next_ip int = ip + /* Step 1: Scan forward in the input looking for a 5-byte-long match. + If we get close to exhausting the input then goto emit_remainder. + + Heuristic match skipping: If 32 bytes are scanned with no matches + found, start looking only at every other byte. If 32 more bytes are + scanned, look at every third byte, etc.. When a match is found, + immediately go back to looking at every byte. This is a small loss + (~5% performance, ~0.1% density) for compressible data due to more + bookkeeping, but for non-compressible data (such as JPEG) it's a huge + win since the compressor quickly "realizes" the data is incompressible + and doesn't bother looking for matches everywhere. + + The "skip" variable keeps track of how many bytes there are since the + last match; dividing it by 32 (i.e. right-shifting by five) gives the + number of bytes to move ahead for each iteration. */ + + var candidate int + assert(next_emit < ip) + + trawl: + for { + var hash uint32 = next_hash + var bytes_between_hash_lookups uint32 = skip >> 5 + skip++ + assert(hash == hash5(in[next_ip:], shift)) + ip = next_ip + next_ip = int(uint32(ip) + bytes_between_hash_lookups) + if next_ip > ip_limit { + goto emit_remainder + } + + next_hash = hash5(in[next_ip:], shift) + candidate = ip - last_distance + if isMatch5(in[ip:], in[candidate:]) { + if candidate < ip { + table[hash] = int(ip - base_ip) + break + } + } + + candidate = base_ip + table[hash] + assert(candidate >= base_ip) + assert(candidate < ip) + + table[hash] = int(ip - base_ip) + if isMatch5(in[ip:], in[candidate:]) { + break + } + } + + /* Check copy distance. If candidate is not feasible, continue search. + Checking is done outside of hot loop to reduce overhead. */ + if ip-candidate > maxDistance_compress_fragment { + goto trawl + } + + /* Step 2: Emit the found match together with the literal bytes from + "next_emit" to the bit stream, and then see if we can find a next match + immediately afterwards. Repeat until we find no match for the input + without emitting some literal bytes. */ + { + var base int = ip + /* > 0 */ + var matched uint = 5 + findMatchLengthWithLimit(in[candidate+5:], in[ip+5:], uint(ip_end-ip)-5) + var distance int = int(base - candidate) + /* We have a 5-byte match at ip, and we need to emit bytes in + [next_emit, ip). */ + + var insert uint = uint(base - next_emit) + ip += int(matched) + if insert < 6210 { + emitInsertLen1(insert, cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage) + } else if shouldUseUncompressedMode(in[metablock_start:], in[next_emit:], insert, literal_ratio) { + emitUncompressedMetaBlock1(in[metablock_start:], in[base:], mlen_storage_ix-3, storage_ix, storage) + input_size -= uint(base - input) + input = base + next_emit = input + goto next_block + } else { + emitLongInsertLen(insert, cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage) + } + + emitLiterals(in[next_emit:], insert, lit_depth[:], lit_bits[:], storage_ix, storage) + if distance == last_distance { + writeBits(uint(cmd_depth[64]), uint64(cmd_bits[64]), storage_ix, storage) + cmd_histo[64]++ + } else { + emitDistance1(uint(distance), cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage) + last_distance = distance + } + + emitCopyLenLastDistance1(matched, cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage) + + next_emit = ip + if ip >= ip_limit { + goto emit_remainder + } + + /* We could immediately start working at ip now, but to improve + compression we first update "table" with the hashes of some positions + within the last copy. */ + { + var input_bytes uint64 = binary.LittleEndian.Uint64(in[ip-3:]) + var prev_hash uint32 = hashBytesAtOffset5(input_bytes, 0, shift) + var cur_hash uint32 = hashBytesAtOffset5(input_bytes, 3, shift) + table[prev_hash] = int(ip - base_ip - 3) + prev_hash = hashBytesAtOffset5(input_bytes, 1, shift) + table[prev_hash] = int(ip - base_ip - 2) + prev_hash = hashBytesAtOffset5(input_bytes, 2, shift) + table[prev_hash] = int(ip - base_ip - 1) + + candidate = base_ip + table[cur_hash] + table[cur_hash] = int(ip - base_ip) + } + } + + for isMatch5(in[ip:], in[candidate:]) { + var base int = ip + /* We have a 5-byte match at ip, and no need to emit any literal bytes + prior to ip. */ + + var matched uint = 5 + findMatchLengthWithLimit(in[candidate+5:], in[ip+5:], uint(ip_end-ip)-5) + if ip-candidate > maxDistance_compress_fragment { + break + } + ip += int(matched) + last_distance = int(base - candidate) /* > 0 */ + emitCopyLen1(matched, cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage) + emitDistance1(uint(last_distance), cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage) + + next_emit = ip + if ip >= ip_limit { + goto emit_remainder + } + + /* We could immediately start working at ip now, but to improve + compression we first update "table" with the hashes of some positions + within the last copy. */ + { + var input_bytes uint64 = binary.LittleEndian.Uint64(in[ip-3:]) + var prev_hash uint32 = hashBytesAtOffset5(input_bytes, 0, shift) + var cur_hash uint32 = hashBytesAtOffset5(input_bytes, 3, shift) + table[prev_hash] = int(ip - base_ip - 3) + prev_hash = hashBytesAtOffset5(input_bytes, 1, shift) + table[prev_hash] = int(ip - base_ip - 2) + prev_hash = hashBytesAtOffset5(input_bytes, 2, shift) + table[prev_hash] = int(ip - base_ip - 1) + + candidate = base_ip + table[cur_hash] + table[cur_hash] = int(ip - base_ip) + } + } + + ip++ + next_hash = hash5(in[ip:], shift) + } + } + +emit_remainder: + assert(next_emit <= ip_end) + input += int(block_size) + input_size -= block_size + block_size = brotli_min_size_t(input_size, compressFragmentFastImpl_kMergeBlockSize) + + /* Decide if we want to continue this meta-block instead of emitting the + last insert-only command. */ + if input_size > 0 && total_block_size+block_size <= 1<<20 && shouldMergeBlock(in[input:], block_size, lit_depth[:]) { + assert(total_block_size > 1<<16) + + /* Update the size of the current meta-block and continue emitting commands. + We can do this because the current size and the new size both have 5 + nibbles. */ + total_block_size += block_size + + updateBits(20, uint32(total_block_size-1), mlen_storage_ix, storage) + goto emit_commands + } + + /* Emit the remaining bytes as literals. */ + if next_emit < ip_end { + var insert uint = uint(ip_end - next_emit) + if insert < 6210 { + emitInsertLen1(insert, cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage) + emitLiterals(in[next_emit:], insert, lit_depth[:], lit_bits[:], storage_ix, storage) + } else if shouldUseUncompressedMode(in[metablock_start:], in[next_emit:], insert, literal_ratio) { + emitUncompressedMetaBlock1(in[metablock_start:], in[ip_end:], mlen_storage_ix-3, storage_ix, storage) + } else { + emitLongInsertLen(insert, cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage) + emitLiterals(in[next_emit:], insert, lit_depth[:], lit_bits[:], storage_ix, storage) + } + } + + next_emit = ip_end + + /* If we have more data, write a new meta-block header and prefix codes and + then continue emitting commands. */ +next_block: + if input_size > 0 { + metablock_start = input + block_size = brotli_min_size_t(input_size, compressFragmentFastImpl_kFirstBlockSize) + total_block_size = block_size + + /* Save the bit position of the MLEN field of the meta-block header, so that + we can update it later if we decide to extend this meta-block. */ + mlen_storage_ix = *storage_ix + 3 + + storeMetaBlockHeader1(block_size, false, storage_ix, storage) + + /* No block splits, no contexts. */ + writeBits(13, 0, storage_ix, storage) + + literal_ratio = buildAndStoreLiteralPrefixCode(in[input:], block_size, lit_depth[:], lit_bits[:], storage_ix, storage) + buildAndStoreCommandPrefixCode1(cmd_histo[:], cmd_depth, cmd_bits, storage_ix, storage) + goto emit_commands + } + + if !is_last { + /* If this is not the last block, update the command and distance prefix + codes for the next block and store the compressed forms. */ + cmd_code[0] = 0 + + *cmd_code_numbits = 0 + buildAndStoreCommandPrefixCode1(cmd_histo[:], cmd_depth, cmd_bits, cmd_code_numbits, cmd_code) + } +} + +/* Compresses "input" string to the "*storage" buffer as one or more complete + meta-blocks, and updates the "*storage_ix" bit position. + + If "is_last" is 1, emits an additional empty last meta-block. + + "cmd_depth" and "cmd_bits" contain the command and distance prefix codes + (see comment in encode.h) used for the encoding of this input fragment. + If "is_last" is 0, they are updated to reflect the statistics + of this input fragment, to be used for the encoding of the next fragment. + + "*cmd_code_numbits" is the number of bits of the compressed representation + of the command and distance prefix codes, and "cmd_code" is an array of + at least "(*cmd_code_numbits + 7) >> 3" size that contains the compressed + command and distance prefix codes. If "is_last" is 0, these are also + updated to represent the updated "cmd_depth" and "cmd_bits". + + REQUIRES: "input_size" is greater than zero, or "is_last" is 1. + REQUIRES: "input_size" is less or equal to maximal metablock size (1 << 24). + REQUIRES: All elements in "table[0..table_size-1]" are initialized to zero. + REQUIRES: "table_size" is an odd (9, 11, 13, 15) power of two + OUTPUT: maximal copy distance <= |input_size| + OUTPUT: maximal copy distance <= BROTLI_MAX_BACKWARD_LIMIT(18) */ +func compressFragmentFast(input []byte, input_size uint, is_last bool, table []int, table_size uint, cmd_depth []byte, cmd_bits []uint16, cmd_code_numbits *uint, cmd_code []byte, storage_ix *uint, storage []byte) { + var initial_storage_ix uint = *storage_ix + var table_bits uint = uint(log2FloorNonZero(table_size)) + + if input_size == 0 { + assert(is_last) + writeBits(1, 1, storage_ix, storage) /* islast */ + writeBits(1, 1, storage_ix, storage) /* isempty */ + *storage_ix = (*storage_ix + 7) &^ 7 + return + } + + compressFragmentFastImpl(input, input_size, is_last, table, table_bits, cmd_depth, cmd_bits, cmd_code_numbits, cmd_code, storage_ix, storage) + + /* If output is larger than single uncompressed block, rewrite it. */ + if *storage_ix-initial_storage_ix > 31+(input_size<<3) { + emitUncompressedMetaBlock1(input, input[input_size:], initial_storage_ix, storage_ix, storage) + } + + if is_last { + writeBits(1, 1, storage_ix, storage) /* islast */ + writeBits(1, 1, storage_ix, storage) /* isempty */ + *storage_ix = (*storage_ix + 7) &^ 7 + } +} diff --git a/vendor/github.com/andybalholm/brotli/compress_fragment_two_pass.go b/vendor/github.com/andybalholm/brotli/compress_fragment_two_pass.go new file mode 100644 index 0000000..79f9c7f --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/compress_fragment_two_pass.go @@ -0,0 +1,773 @@ +package brotli + +import "encoding/binary" + +/* Copyright 2015 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Function for fast encoding of an input fragment, independently from the input + history. This function uses two-pass processing: in the first pass we save + the found backward matches and literal bytes into a buffer, and in the + second pass we emit them into the bit stream using prefix codes built based + on the actual command and literal byte histograms. */ + +const kCompressFragmentTwoPassBlockSize uint = 1 << 17 + +func hash1(p []byte, shift uint, length uint) uint32 { + var h uint64 = (binary.LittleEndian.Uint64(p) << ((8 - length) * 8)) * uint64(kHashMul32) + return uint32(h >> shift) +} + +func hashBytesAtOffset(v uint64, offset uint, shift uint, length uint) uint32 { + assert(offset <= 8-length) + { + var h uint64 = ((v >> (8 * offset)) << ((8 - length) * 8)) * uint64(kHashMul32) + return uint32(h >> shift) + } +} + +func isMatch1(p1 []byte, p2 []byte, length uint) bool { + if binary.LittleEndian.Uint32(p1) != binary.LittleEndian.Uint32(p2) { + return false + } + if length == 4 { + return true + } + return p1[4] == p2[4] && p1[5] == p2[5] +} + +/* +Builds a command and distance prefix code (each 64 symbols) into "depth" and + + "bits" based on "histogram" and stores it into the bit stream. +*/ +func buildAndStoreCommandPrefixCode(histogram []uint32, depth []byte, bits []uint16, storage_ix *uint, storage []byte) { + var tree [129]huffmanTree + var cmd_depth = [numCommandSymbols]byte{0} + /* Tree size for building a tree over 64 symbols is 2 * 64 + 1. */ + + var cmd_bits [64]uint16 + createHuffmanTree(histogram, 64, 15, tree[:], depth) + createHuffmanTree(histogram[64:], 64, 14, tree[:], depth[64:]) + + /* We have to jump through a few hoops here in order to compute + the command bits because the symbols are in a different order than in + the full alphabet. This looks complicated, but having the symbols + in this order in the command bits saves a few branches in the Emit* + functions. */ + copy(cmd_depth[:], depth[24:][:24]) + + copy(cmd_depth[24:][:], depth[:8]) + copy(cmd_depth[32:][:], depth[48:][:8]) + copy(cmd_depth[40:][:], depth[8:][:8]) + copy(cmd_depth[48:][:], depth[56:][:8]) + copy(cmd_depth[56:][:], depth[16:][:8]) + convertBitDepthsToSymbols(cmd_depth[:], 64, cmd_bits[:]) + copy(bits, cmd_bits[24:][:8]) + copy(bits[8:], cmd_bits[40:][:8]) + copy(bits[16:], cmd_bits[56:][:8]) + copy(bits[24:], cmd_bits[:24]) + copy(bits[48:], cmd_bits[32:][:8]) + copy(bits[56:], cmd_bits[48:][:8]) + convertBitDepthsToSymbols(depth[64:], 64, bits[64:]) + { + /* Create the bit length array for the full command alphabet. */ + var i uint + for i := 0; i < int(64); i++ { + cmd_depth[i] = 0 + } /* only 64 first values were used */ + copy(cmd_depth[:], depth[24:][:8]) + copy(cmd_depth[64:][:], depth[32:][:8]) + copy(cmd_depth[128:][:], depth[40:][:8]) + copy(cmd_depth[192:][:], depth[48:][:8]) + copy(cmd_depth[384:][:], depth[56:][:8]) + for i = 0; i < 8; i++ { + cmd_depth[128+8*i] = depth[i] + cmd_depth[256+8*i] = depth[8+i] + cmd_depth[448+8*i] = depth[16+i] + } + + storeHuffmanTree(cmd_depth[:], numCommandSymbols, tree[:], storage_ix, storage) + } + + storeHuffmanTree(depth[64:], 64, tree[:], storage_ix, storage) +} + +func emitInsertLen(insertlen uint32, commands *[]uint32) { + if insertlen < 6 { + (*commands)[0] = insertlen + } else if insertlen < 130 { + var tail uint32 = insertlen - 2 + var nbits uint32 = log2FloorNonZero(uint(tail)) - 1 + var prefix uint32 = tail >> nbits + var inscode uint32 = (nbits << 1) + prefix + 2 + var extra uint32 = tail - (prefix << nbits) + (*commands)[0] = inscode | extra<<8 + } else if insertlen < 2114 { + var tail uint32 = insertlen - 66 + var nbits uint32 = log2FloorNonZero(uint(tail)) + var code uint32 = nbits + 10 + var extra uint32 = tail - (1 << nbits) + (*commands)[0] = code | extra<<8 + } else if insertlen < 6210 { + var extra uint32 = insertlen - 2114 + (*commands)[0] = 21 | extra<<8 + } else if insertlen < 22594 { + var extra uint32 = insertlen - 6210 + (*commands)[0] = 22 | extra<<8 + } else { + var extra uint32 = insertlen - 22594 + (*commands)[0] = 23 | extra<<8 + } + + *commands = (*commands)[1:] +} + +func emitCopyLen(copylen uint, commands *[]uint32) { + if copylen < 10 { + (*commands)[0] = uint32(copylen + 38) + } else if copylen < 134 { + var tail uint = copylen - 6 + var nbits uint = uint(log2FloorNonZero(tail) - 1) + var prefix uint = tail >> nbits + var code uint = (nbits << 1) + prefix + 44 + var extra uint = tail - (prefix << nbits) + (*commands)[0] = uint32(code | extra<<8) + } else if copylen < 2118 { + var tail uint = copylen - 70 + var nbits uint = uint(log2FloorNonZero(tail)) + var code uint = nbits + 52 + var extra uint = tail - (uint(1) << nbits) + (*commands)[0] = uint32(code | extra<<8) + } else { + var extra uint = copylen - 2118 + (*commands)[0] = uint32(63 | extra<<8) + } + + *commands = (*commands)[1:] +} + +func emitCopyLenLastDistance(copylen uint, commands *[]uint32) { + if copylen < 12 { + (*commands)[0] = uint32(copylen + 20) + *commands = (*commands)[1:] + } else if copylen < 72 { + var tail uint = copylen - 8 + var nbits uint = uint(log2FloorNonZero(tail) - 1) + var prefix uint = tail >> nbits + var code uint = (nbits << 1) + prefix + 28 + var extra uint = tail - (prefix << nbits) + (*commands)[0] = uint32(code | extra<<8) + *commands = (*commands)[1:] + } else if copylen < 136 { + var tail uint = copylen - 8 + var code uint = (tail >> 5) + 54 + var extra uint = tail & 31 + (*commands)[0] = uint32(code | extra<<8) + *commands = (*commands)[1:] + (*commands)[0] = 64 + *commands = (*commands)[1:] + } else if copylen < 2120 { + var tail uint = copylen - 72 + var nbits uint = uint(log2FloorNonZero(tail)) + var code uint = nbits + 52 + var extra uint = tail - (uint(1) << nbits) + (*commands)[0] = uint32(code | extra<<8) + *commands = (*commands)[1:] + (*commands)[0] = 64 + *commands = (*commands)[1:] + } else { + var extra uint = copylen - 2120 + (*commands)[0] = uint32(63 | extra<<8) + *commands = (*commands)[1:] + (*commands)[0] = 64 + *commands = (*commands)[1:] + } +} + +func emitDistance(distance uint32, commands *[]uint32) { + var d uint32 = distance + 3 + var nbits uint32 = log2FloorNonZero(uint(d)) - 1 + var prefix uint32 = (d >> nbits) & 1 + var offset uint32 = (2 + prefix) << nbits + var distcode uint32 = 2*(nbits-1) + prefix + 80 + var extra uint32 = d - offset + (*commands)[0] = distcode | extra<<8 + *commands = (*commands)[1:] +} + +/* REQUIRES: len <= 1 << 24. */ +func storeMetaBlockHeader(len uint, is_uncompressed bool, storage_ix *uint, storage []byte) { + var nibbles uint = 6 + + /* ISLAST */ + writeBits(1, 0, storage_ix, storage) + + if len <= 1<<16 { + nibbles = 4 + } else if len <= 1<<20 { + nibbles = 5 + } + + writeBits(2, uint64(nibbles)-4, storage_ix, storage) + writeBits(nibbles*4, uint64(len)-1, storage_ix, storage) + + /* ISUNCOMPRESSED */ + writeSingleBit(is_uncompressed, storage_ix, storage) +} + +func storeMetaBlockHeaderBW(len uint, is_uncompressed bool, bw *bitWriter) { + var nibbles uint = 6 + + /* ISLAST */ + bw.writeBits(1, 0) + + if len <= 1<<16 { + nibbles = 4 + } else if len <= 1<<20 { + nibbles = 5 + } + + bw.writeBits(2, uint64(nibbles)-4) + bw.writeBits(nibbles*4, uint64(len)-1) + + /* ISUNCOMPRESSED */ + bw.writeSingleBit(is_uncompressed) +} + +func createCommands(input []byte, block_size uint, input_size uint, base_ip_ptr []byte, table []int, table_bits uint, min_match uint, literals *[]byte, commands *[]uint32) { + var ip int = 0 + var shift uint = 64 - table_bits + var ip_end int = int(block_size) + var base_ip int = -cap(base_ip_ptr) + cap(input) + var next_emit int = 0 + var last_distance int = -1 + /* "ip" is the input pointer. */ + + const kInputMarginBytes uint = windowGap + + /* "next_emit" is a pointer to the first byte that is not covered by a + previous copy. Bytes between "next_emit" and the start of the next copy or + the end of the input will be emitted as literal bytes. */ + if block_size >= kInputMarginBytes { + var len_limit uint = brotli_min_size_t(block_size-min_match, input_size-kInputMarginBytes) + var ip_limit int = int(len_limit) + /* For the last block, we need to keep a 16 bytes margin so that we can be + sure that all distances are at most window size - 16. + For all other blocks, we only need to keep a margin of 5 bytes so that + we don't go over the block size with a copy. */ + + var next_hash uint32 + ip++ + for next_hash = hash1(input[ip:], shift, min_match); ; { + var skip uint32 = 32 + var next_ip int = ip + /* Step 1: Scan forward in the input looking for a 6-byte-long match. + If we get close to exhausting the input then goto emit_remainder. + + Heuristic match skipping: If 32 bytes are scanned with no matches + found, start looking only at every other byte. If 32 more bytes are + scanned, look at every third byte, etc.. When a match is found, + immediately go back to looking at every byte. This is a small loss + (~5% performance, ~0.1% density) for compressible data due to more + bookkeeping, but for non-compressible data (such as JPEG) it's a huge + win since the compressor quickly "realizes" the data is incompressible + and doesn't bother looking for matches everywhere. + + The "skip" variable keeps track of how many bytes there are since the + last match; dividing it by 32 (ie. right-shifting by five) gives the + number of bytes to move ahead for each iteration. */ + + var candidate int + + assert(next_emit < ip) + + trawl: + for { + var hash uint32 = next_hash + var bytes_between_hash_lookups uint32 = skip >> 5 + skip++ + ip = next_ip + assert(hash == hash1(input[ip:], shift, min_match)) + next_ip = int(uint32(ip) + bytes_between_hash_lookups) + if next_ip > ip_limit { + goto emit_remainder + } + + next_hash = hash1(input[next_ip:], shift, min_match) + candidate = ip - last_distance + if isMatch1(input[ip:], base_ip_ptr[candidate-base_ip:], min_match) { + if candidate < ip { + table[hash] = int(ip - base_ip) + break + } + } + + candidate = base_ip + table[hash] + assert(candidate >= base_ip) + assert(candidate < ip) + + table[hash] = int(ip - base_ip) + if isMatch1(input[ip:], base_ip_ptr[candidate-base_ip:], min_match) { + break + } + } + + /* Check copy distance. If candidate is not feasible, continue search. + Checking is done outside of hot loop to reduce overhead. */ + if ip-candidate > maxDistance_compress_fragment { + goto trawl + } + + /* Step 2: Emit the found match together with the literal bytes from + "next_emit", and then see if we can find a next match immediately + afterwards. Repeat until we find no match for the input + without emitting some literal bytes. */ + { + var base int = ip + /* > 0 */ + var matched uint = min_match + findMatchLengthWithLimit(base_ip_ptr[uint(candidate-base_ip)+min_match:], input[uint(ip)+min_match:], uint(ip_end-ip)-min_match) + var distance int = int(base - candidate) + /* We have a 6-byte match at ip, and we need to emit bytes in + [next_emit, ip). */ + + var insert int = int(base - next_emit) + ip += int(matched) + emitInsertLen(uint32(insert), commands) + copy(*literals, input[next_emit:][:uint(insert)]) + *literals = (*literals)[insert:] + if distance == last_distance { + (*commands)[0] = 64 + *commands = (*commands)[1:] + } else { + emitDistance(uint32(distance), commands) + last_distance = distance + } + + emitCopyLenLastDistance(matched, commands) + + next_emit = ip + if ip >= ip_limit { + goto emit_remainder + } + { + var input_bytes uint64 + var cur_hash uint32 + /* We could immediately start working at ip now, but to improve + compression we first update "table" with the hashes of some + positions within the last copy. */ + + var prev_hash uint32 + if min_match == 4 { + input_bytes = binary.LittleEndian.Uint64(input[ip-3:]) + cur_hash = hashBytesAtOffset(input_bytes, 3, shift, min_match) + prev_hash = hashBytesAtOffset(input_bytes, 0, shift, min_match) + table[prev_hash] = int(ip - base_ip - 3) + prev_hash = hashBytesAtOffset(input_bytes, 1, shift, min_match) + table[prev_hash] = int(ip - base_ip - 2) + prev_hash = hashBytesAtOffset(input_bytes, 0, shift, min_match) + table[prev_hash] = int(ip - base_ip - 1) + } else { + input_bytes = binary.LittleEndian.Uint64(input[ip-5:]) + prev_hash = hashBytesAtOffset(input_bytes, 0, shift, min_match) + table[prev_hash] = int(ip - base_ip - 5) + prev_hash = hashBytesAtOffset(input_bytes, 1, shift, min_match) + table[prev_hash] = int(ip - base_ip - 4) + prev_hash = hashBytesAtOffset(input_bytes, 2, shift, min_match) + table[prev_hash] = int(ip - base_ip - 3) + input_bytes = binary.LittleEndian.Uint64(input[ip-2:]) + cur_hash = hashBytesAtOffset(input_bytes, 2, shift, min_match) + prev_hash = hashBytesAtOffset(input_bytes, 0, shift, min_match) + table[prev_hash] = int(ip - base_ip - 2) + prev_hash = hashBytesAtOffset(input_bytes, 1, shift, min_match) + table[prev_hash] = int(ip - base_ip - 1) + } + + candidate = base_ip + table[cur_hash] + table[cur_hash] = int(ip - base_ip) + } + } + + for ip-candidate <= maxDistance_compress_fragment && isMatch1(input[ip:], base_ip_ptr[candidate-base_ip:], min_match) { + var base int = ip + /* We have a 6-byte match at ip, and no need to emit any + literal bytes prior to ip. */ + + var matched uint = min_match + findMatchLengthWithLimit(base_ip_ptr[uint(candidate-base_ip)+min_match:], input[uint(ip)+min_match:], uint(ip_end-ip)-min_match) + ip += int(matched) + last_distance = int(base - candidate) /* > 0 */ + emitCopyLen(matched, commands) + emitDistance(uint32(last_distance), commands) + + next_emit = ip + if ip >= ip_limit { + goto emit_remainder + } + { + var input_bytes uint64 + var cur_hash uint32 + /* We could immediately start working at ip now, but to improve + compression we first update "table" with the hashes of some + positions within the last copy. */ + + var prev_hash uint32 + if min_match == 4 { + input_bytes = binary.LittleEndian.Uint64(input[ip-3:]) + cur_hash = hashBytesAtOffset(input_bytes, 3, shift, min_match) + prev_hash = hashBytesAtOffset(input_bytes, 0, shift, min_match) + table[prev_hash] = int(ip - base_ip - 3) + prev_hash = hashBytesAtOffset(input_bytes, 1, shift, min_match) + table[prev_hash] = int(ip - base_ip - 2) + prev_hash = hashBytesAtOffset(input_bytes, 2, shift, min_match) + table[prev_hash] = int(ip - base_ip - 1) + } else { + input_bytes = binary.LittleEndian.Uint64(input[ip-5:]) + prev_hash = hashBytesAtOffset(input_bytes, 0, shift, min_match) + table[prev_hash] = int(ip - base_ip - 5) + prev_hash = hashBytesAtOffset(input_bytes, 1, shift, min_match) + table[prev_hash] = int(ip - base_ip - 4) + prev_hash = hashBytesAtOffset(input_bytes, 2, shift, min_match) + table[prev_hash] = int(ip - base_ip - 3) + input_bytes = binary.LittleEndian.Uint64(input[ip-2:]) + cur_hash = hashBytesAtOffset(input_bytes, 2, shift, min_match) + prev_hash = hashBytesAtOffset(input_bytes, 0, shift, min_match) + table[prev_hash] = int(ip - base_ip - 2) + prev_hash = hashBytesAtOffset(input_bytes, 1, shift, min_match) + table[prev_hash] = int(ip - base_ip - 1) + } + + candidate = base_ip + table[cur_hash] + table[cur_hash] = int(ip - base_ip) + } + } + + ip++ + next_hash = hash1(input[ip:], shift, min_match) + } + } + +emit_remainder: + assert(next_emit <= ip_end) + + /* Emit the remaining bytes as literals. */ + if next_emit < ip_end { + var insert uint32 = uint32(ip_end - next_emit) + emitInsertLen(insert, commands) + copy(*literals, input[next_emit:][:insert]) + *literals = (*literals)[insert:] + } +} + +var storeCommands_kNumExtraBits = [128]uint32{ + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 1, + 2, + 2, + 3, + 3, + 4, + 4, + 5, + 5, + 6, + 7, + 8, + 9, + 10, + 12, + 14, + 24, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 1, + 2, + 2, + 3, + 3, + 4, + 4, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 1, + 2, + 2, + 3, + 3, + 4, + 4, + 5, + 5, + 6, + 7, + 8, + 9, + 10, + 24, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 1, + 2, + 2, + 3, + 3, + 4, + 4, + 5, + 5, + 6, + 6, + 7, + 7, + 8, + 8, + 9, + 9, + 10, + 10, + 11, + 11, + 12, + 12, + 13, + 13, + 14, + 14, + 15, + 15, + 16, + 16, + 17, + 17, + 18, + 18, + 19, + 19, + 20, + 20, + 21, + 21, + 22, + 22, + 23, + 23, + 24, + 24, +} +var storeCommands_kInsertOffset = [24]uint32{ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 8, + 10, + 14, + 18, + 26, + 34, + 50, + 66, + 98, + 130, + 194, + 322, + 578, + 1090, + 2114, + 6210, + 22594, +} + +func storeCommands(literals []byte, num_literals uint, commands []uint32, num_commands uint, storage_ix *uint, storage []byte) { + var lit_depths [256]byte + var lit_bits [256]uint16 + var lit_histo = [256]uint32{0} + var cmd_depths = [128]byte{0} + var cmd_bits = [128]uint16{0} + var cmd_histo = [128]uint32{0} + var i uint + for i = 0; i < num_literals; i++ { + lit_histo[literals[i]]++ + } + + buildAndStoreHuffmanTreeFast(lit_histo[:], num_literals, /* max_bits = */ + 8, lit_depths[:], lit_bits[:], storage_ix, storage) + + for i = 0; i < num_commands; i++ { + var code uint32 = commands[i] & 0xFF + assert(code < 128) + cmd_histo[code]++ + } + + cmd_histo[1] += 1 + cmd_histo[2] += 1 + cmd_histo[64] += 1 + cmd_histo[84] += 1 + buildAndStoreCommandPrefixCode(cmd_histo[:], cmd_depths[:], cmd_bits[:], storage_ix, storage) + + for i = 0; i < num_commands; i++ { + var cmd uint32 = commands[i] + var code uint32 = cmd & 0xFF + var extra uint32 = cmd >> 8 + assert(code < 128) + writeBits(uint(cmd_depths[code]), uint64(cmd_bits[code]), storage_ix, storage) + writeBits(uint(storeCommands_kNumExtraBits[code]), uint64(extra), storage_ix, storage) + if code < 24 { + var insert uint32 = storeCommands_kInsertOffset[code] + extra + var j uint32 + for j = 0; j < insert; j++ { + var lit byte = literals[0] + writeBits(uint(lit_depths[lit]), uint64(lit_bits[lit]), storage_ix, storage) + literals = literals[1:] + } + } + } +} + +/* Acceptable loss for uncompressible speedup is 2% */ +const minRatio = 0.98 + +const sampleRate = 43 + +func shouldCompress(input []byte, input_size uint, num_literals uint) bool { + var corpus_size float64 = float64(input_size) + if float64(num_literals) < minRatio*corpus_size { + return true + } else { + var literal_histo = [256]uint32{0} + var max_total_bit_cost float64 = corpus_size * 8 * minRatio / sampleRate + var i uint + for i = 0; i < input_size; i += sampleRate { + literal_histo[input[i]]++ + } + + return bitsEntropy(literal_histo[:], 256) < max_total_bit_cost + } +} + +func rewindBitPosition(new_storage_ix uint, storage_ix *uint, storage []byte) { + var bitpos uint = new_storage_ix & 7 + var mask uint = (1 << bitpos) - 1 + storage[new_storage_ix>>3] &= byte(mask) + *storage_ix = new_storage_ix +} + +func emitUncompressedMetaBlock(input []byte, input_size uint, storage_ix *uint, storage []byte) { + storeMetaBlockHeader(input_size, true, storage_ix, storage) + *storage_ix = (*storage_ix + 7) &^ 7 + copy(storage[*storage_ix>>3:], input[:input_size]) + *storage_ix += input_size << 3 + storage[*storage_ix>>3] = 0 +} + +func compressFragmentTwoPassImpl(input []byte, input_size uint, is_last bool, command_buf []uint32, literal_buf []byte, table []int, table_bits uint, min_match uint, storage_ix *uint, storage []byte) { + /* Save the start of the first block for position and distance computations. + */ + var base_ip []byte = input + + for input_size > 0 { + var block_size uint = brotli_min_size_t(input_size, kCompressFragmentTwoPassBlockSize) + var commands []uint32 = command_buf + var literals []byte = literal_buf + var num_literals uint + createCommands(input, block_size, input_size, base_ip, table, table_bits, min_match, &literals, &commands) + num_literals = uint(-cap(literals) + cap(literal_buf)) + if shouldCompress(input, block_size, num_literals) { + var num_commands uint = uint(-cap(commands) + cap(command_buf)) + storeMetaBlockHeader(block_size, false, storage_ix, storage) + + /* No block splits, no contexts. */ + writeBits(13, 0, storage_ix, storage) + + storeCommands(literal_buf, num_literals, command_buf, num_commands, storage_ix, storage) + } else { + /* Since we did not find many backward references and the entropy of + the data is close to 8 bits, we can simply emit an uncompressed block. + This makes compression speed of uncompressible data about 3x faster. */ + emitUncompressedMetaBlock(input, block_size, storage_ix, storage) + } + + input = input[block_size:] + input_size -= block_size + } +} + +/* +Compresses "input" string to the "*storage" buffer as one or more complete + + meta-blocks, and updates the "*storage_ix" bit position. + + If "is_last" is 1, emits an additional empty last meta-block. + + REQUIRES: "input_size" is greater than zero, or "is_last" is 1. + REQUIRES: "input_size" is less or equal to maximal metablock size (1 << 24). + REQUIRES: "command_buf" and "literal_buf" point to at least + kCompressFragmentTwoPassBlockSize long arrays. + REQUIRES: All elements in "table[0..table_size-1]" are initialized to zero. + REQUIRES: "table_size" is a power of two + OUTPUT: maximal copy distance <= |input_size| + OUTPUT: maximal copy distance <= BROTLI_MAX_BACKWARD_LIMIT(18) +*/ +func compressFragmentTwoPass(input []byte, input_size uint, is_last bool, command_buf []uint32, literal_buf []byte, table []int, table_size uint, storage_ix *uint, storage []byte) { + var initial_storage_ix uint = *storage_ix + var table_bits uint = uint(log2FloorNonZero(table_size)) + var min_match uint + if table_bits <= 15 { + min_match = 4 + } else { + min_match = 6 + } + compressFragmentTwoPassImpl(input, input_size, is_last, command_buf, literal_buf, table, table_bits, min_match, storage_ix, storage) + + /* If output is larger than single uncompressed block, rewrite it. */ + if *storage_ix-initial_storage_ix > 31+(input_size<<3) { + rewindBitPosition(initial_storage_ix, storage_ix, storage) + emitUncompressedMetaBlock(input, input_size, storage_ix, storage) + } + + if is_last { + writeBits(1, 1, storage_ix, storage) /* islast */ + writeBits(1, 1, storage_ix, storage) /* isempty */ + *storage_ix = (*storage_ix + 7) &^ 7 + } +} diff --git a/vendor/github.com/andybalholm/brotli/constants.go b/vendor/github.com/andybalholm/brotli/constants.go new file mode 100644 index 0000000..a880dff --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/constants.go @@ -0,0 +1,77 @@ +package brotli + +/* Copyright 2016 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Specification: 7.3. Encoding of the context map */ +const contextMapMaxRle = 16 + +/* Specification: 2. Compressed representation overview */ +const maxNumberOfBlockTypes = 256 + +/* Specification: 3.3. Alphabet sizes: insert-and-copy length */ +const numLiteralSymbols = 256 + +const numCommandSymbols = 704 + +const numBlockLenSymbols = 26 + +const maxContextMapSymbols = (maxNumberOfBlockTypes + contextMapMaxRle) + +const maxBlockTypeSymbols = (maxNumberOfBlockTypes + 2) + +/* Specification: 3.5. Complex prefix codes */ +const repeatPreviousCodeLength = 16 + +const repeatZeroCodeLength = 17 + +const codeLengthCodes = (repeatZeroCodeLength + 1) + +/* "code length of 8 is repeated" */ +const initialRepeatedCodeLength = 8 + +/* "Large Window Brotli" */ +const largeMaxDistanceBits = 62 + +const largeMinWbits = 10 + +const largeMaxWbits = 30 + +/* Specification: 4. Encoding of distances */ +const numDistanceShortCodes = 16 + +const maxNpostfix = 3 + +const maxNdirect = 120 + +const maxDistanceBits = 24 + +func distanceAlphabetSize(NPOSTFIX uint, NDIRECT uint, MAXNBITS uint) uint { + return numDistanceShortCodes + NDIRECT + uint(MAXNBITS<<(NPOSTFIX+1)) +} + +/* numDistanceSymbols == 1128 */ +const numDistanceSymbols = 1128 + +const maxDistance = 0x3FFFFFC + +const maxAllowedDistance = 0x7FFFFFFC + +/* 7.1. Context modes and context ID lookup for literals */ +/* "context IDs for literals are in the range of 0..63" */ +const literalContextBits = 6 + +/* 7.2. Context ID for distances */ +const distanceContextBits = 2 + +/* 9.1. Format of the Stream Header */ +/* Number of slack bytes for window size. Don't confuse + with BROTLI_NUM_DISTANCE_SHORT_CODES. */ +const windowGap = 16 + +func maxBackwardLimit(W uint) uint { + return (uint(1) << W) - windowGap +} diff --git a/vendor/github.com/andybalholm/brotli/context.go b/vendor/github.com/andybalholm/brotli/context.go new file mode 100644 index 0000000..884ff8a --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/context.go @@ -0,0 +1,2176 @@ +package brotli + +/* Lookup table to map the previous two bytes to a context id. + +There are four different context modeling modes defined here: + contextLSB6: context id is the least significant 6 bits of the last byte, + contextMSB6: context id is the most significant 6 bits of the last byte, + contextUTF8: second-order context model tuned for UTF8-encoded text, + contextSigned: second-order context model tuned for signed integers. + +If |p1| and |p2| are the previous two bytes, and |mode| is current context +mode, we calculate the context as: + + context = ContextLut(mode)[p1] | ContextLut(mode)[p2 + 256]. + +For contextUTF8 mode, if the previous two bytes are ASCII characters +(i.e. < 128), this will be equivalent to + + context = 4 * context1(p1) + context2(p2), + +where context1 is based on the previous byte in the following way: + + 0 : non-ASCII control + 1 : \t, \n, \r + 2 : space + 3 : other punctuation + 4 : " ' + 5 : % + 6 : ( < [ { + 7 : ) > ] } + 8 : , ; : + 9 : . + 10 : = + 11 : number + 12 : upper-case vowel + 13 : upper-case consonant + 14 : lower-case vowel + 15 : lower-case consonant + +and context2 is based on the second last byte: + + 0 : control, space + 1 : punctuation + 2 : upper-case letter, number + 3 : lower-case letter + +If the last byte is ASCII, and the second last byte is not (in a valid UTF8 +stream it will be a continuation byte, value between 128 and 191), the +context is the same as if the second last byte was an ASCII control or space. + +If the last byte is a UTF8 lead byte (value >= 192), then the next byte will +be a continuation byte and the context id is 2 or 3 depending on the LSB of +the last byte and to a lesser extent on the second last byte if it is ASCII. + +If the last byte is a UTF8 continuation byte, the second last byte can be: + - continuation byte: the next byte is probably ASCII or lead byte (assuming + 4-byte UTF8 characters are rare) and the context id is 0 or 1. + - lead byte (192 - 207): next byte is ASCII or lead byte, context is 0 or 1 + - lead byte (208 - 255): next byte is continuation byte, context is 2 or 3 + +The possible value combinations of the previous two bytes, the range of +context ids and the type of the next byte is summarized in the table below: + +|--------\-----------------------------------------------------------------| +| \ Last byte | +| Second \---------------------------------------------------------------| +| last byte \ ASCII | cont. byte | lead byte | +| \ (0-127) | (128-191) | (192-) | +|=============|===================|=====================|==================| +| ASCII | next: ASCII/lead | not valid | next: cont. | +| (0-127) | context: 4 - 63 | | context: 2 - 3 | +|-------------|-------------------|---------------------|------------------| +| cont. byte | next: ASCII/lead | next: ASCII/lead | next: cont. | +| (128-191) | context: 4 - 63 | context: 0 - 1 | context: 2 - 3 | +|-------------|-------------------|---------------------|------------------| +| lead byte | not valid | next: ASCII/lead | not valid | +| (192-207) | | context: 0 - 1 | | +|-------------|-------------------|---------------------|------------------| +| lead byte | not valid | next: cont. | not valid | +| (208-) | | context: 2 - 3 | | +|-------------|-------------------|---------------------|------------------| +*/ + +const ( + contextLSB6 = 0 + contextMSB6 = 1 + contextUTF8 = 2 + contextSigned = 3 +) + +/* Common context lookup table for all context modes. */ +var kContextLookup = [2048]byte{ + /* CONTEXT_LSB6, last byte. */ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 33, + 34, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 44, + 45, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 53, + 54, + 55, + 56, + 57, + 58, + 59, + 60, + 61, + 62, + 63, + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 33, + 34, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 44, + 45, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 53, + 54, + 55, + 56, + 57, + 58, + 59, + 60, + 61, + 62, + 63, + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 33, + 34, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 44, + 45, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 53, + 54, + 55, + 56, + 57, + 58, + 59, + 60, + 61, + 62, + 63, + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 33, + 34, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 44, + 45, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 53, + 54, + 55, + 56, + 57, + 58, + 59, + 60, + 61, + 62, + 63, + + /* CONTEXT_LSB6, second last byte, */ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + + /* CONTEXT_MSB6, last byte. */ + 0, + 0, + 0, + 0, + 1, + 1, + 1, + 1, + 2, + 2, + 2, + 2, + 3, + 3, + 3, + 3, + 4, + 4, + 4, + 4, + 5, + 5, + 5, + 5, + 6, + 6, + 6, + 6, + 7, + 7, + 7, + 7, + 8, + 8, + 8, + 8, + 9, + 9, + 9, + 9, + 10, + 10, + 10, + 10, + 11, + 11, + 11, + 11, + 12, + 12, + 12, + 12, + 13, + 13, + 13, + 13, + 14, + 14, + 14, + 14, + 15, + 15, + 15, + 15, + 16, + 16, + 16, + 16, + 17, + 17, + 17, + 17, + 18, + 18, + 18, + 18, + 19, + 19, + 19, + 19, + 20, + 20, + 20, + 20, + 21, + 21, + 21, + 21, + 22, + 22, + 22, + 22, + 23, + 23, + 23, + 23, + 24, + 24, + 24, + 24, + 25, + 25, + 25, + 25, + 26, + 26, + 26, + 26, + 27, + 27, + 27, + 27, + 28, + 28, + 28, + 28, + 29, + 29, + 29, + 29, + 30, + 30, + 30, + 30, + 31, + 31, + 31, + 31, + 32, + 32, + 32, + 32, + 33, + 33, + 33, + 33, + 34, + 34, + 34, + 34, + 35, + 35, + 35, + 35, + 36, + 36, + 36, + 36, + 37, + 37, + 37, + 37, + 38, + 38, + 38, + 38, + 39, + 39, + 39, + 39, + 40, + 40, + 40, + 40, + 41, + 41, + 41, + 41, + 42, + 42, + 42, + 42, + 43, + 43, + 43, + 43, + 44, + 44, + 44, + 44, + 45, + 45, + 45, + 45, + 46, + 46, + 46, + 46, + 47, + 47, + 47, + 47, + 48, + 48, + 48, + 48, + 49, + 49, + 49, + 49, + 50, + 50, + 50, + 50, + 51, + 51, + 51, + 51, + 52, + 52, + 52, + 52, + 53, + 53, + 53, + 53, + 54, + 54, + 54, + 54, + 55, + 55, + 55, + 55, + 56, + 56, + 56, + 56, + 57, + 57, + 57, + 57, + 58, + 58, + 58, + 58, + 59, + 59, + 59, + 59, + 60, + 60, + 60, + 60, + 61, + 61, + 61, + 61, + 62, + 62, + 62, + 62, + 63, + 63, + 63, + 63, + + /* CONTEXT_MSB6, second last byte, */ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + + /* CONTEXT_UTF8, last byte. */ + /* ASCII range. */ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 4, + 4, + 0, + 0, + 4, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8, + 12, + 16, + 12, + 12, + 20, + 12, + 16, + 24, + 28, + 12, + 12, + 32, + 12, + 36, + 12, + 44, + 44, + 44, + 44, + 44, + 44, + 44, + 44, + 44, + 44, + 32, + 32, + 24, + 40, + 28, + 12, + 12, + 48, + 52, + 52, + 52, + 48, + 52, + 52, + 52, + 48, + 52, + 52, + 52, + 52, + 52, + 48, + 52, + 52, + 52, + 52, + 52, + 48, + 52, + 52, + 52, + 52, + 52, + 24, + 12, + 28, + 12, + 12, + 12, + 56, + 60, + 60, + 60, + 56, + 60, + 60, + 60, + 56, + 60, + 60, + 60, + 60, + 60, + 56, + 60, + 60, + 60, + 60, + 60, + 56, + 60, + 60, + 60, + 60, + 60, + 24, + 12, + 28, + 12, + 0, + + /* UTF8 continuation byte range. */ + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + + /* UTF8 lead byte range. */ + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + + /* CONTEXT_UTF8 second last byte. */ + /* ASCII range. */ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 1, + 1, + 1, + 1, + 1, + 1, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 1, + 1, + 1, + 1, + 0, + + /* UTF8 continuation byte range. */ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + + /* UTF8 lead byte range. */ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + + /* CONTEXT_SIGNED, last byte, same as the above values shifted by 3 bits. */ + 0, + 8, + 8, + 8, + 8, + 8, + 8, + 8, + 8, + 8, + 8, + 8, + 8, + 8, + 8, + 8, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 48, + 48, + 48, + 48, + 48, + 48, + 48, + 48, + 48, + 48, + 48, + 48, + 48, + 48, + 48, + 56, + + /* CONTEXT_SIGNED, second last byte. */ + 0, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 7, +} + +type contextLUT []byte + +func getContextLUT(mode int) contextLUT { + return kContextLookup[mode<<9:] +} + +func getContext(p1 byte, p2 byte, lut contextLUT) byte { + return lut[p1] | lut[256+int(p2)] +} diff --git a/vendor/github.com/andybalholm/brotli/decode.go b/vendor/github.com/andybalholm/brotli/decode.go new file mode 100644 index 0000000..9d9513b --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/decode.go @@ -0,0 +1,2581 @@ +package brotli + +/* Copyright 2013 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +const ( + decoderResultError = 0 + decoderResultSuccess = 1 + decoderResultNeedsMoreInput = 2 + decoderResultNeedsMoreOutput = 3 +) + +/** + * Error code for detailed logging / production debugging. + * + * See ::BrotliDecoderGetErrorCode and ::BROTLI_LAST_ERROR_CODE. + */ +const ( + decoderNoError = 0 + decoderSuccess = 1 + decoderNeedsMoreInput = 2 + decoderNeedsMoreOutput = 3 + decoderErrorFormatExuberantNibble = -1 + decoderErrorFormatReserved = -2 + decoderErrorFormatExuberantMetaNibble = -3 + decoderErrorFormatSimpleHuffmanAlphabet = -4 + decoderErrorFormatSimpleHuffmanSame = -5 + decoderErrorFormatClSpace = -6 + decoderErrorFormatHuffmanSpace = -7 + decoderErrorFormatContextMapRepeat = -8 + decoderErrorFormatBlockLength1 = -9 + decoderErrorFormatBlockLength2 = -10 + decoderErrorFormatTransform = -11 + decoderErrorFormatDictionary = -12 + decoderErrorFormatWindowBits = -13 + decoderErrorFormatPadding1 = -14 + decoderErrorFormatPadding2 = -15 + decoderErrorFormatDistance = -16 + decoderErrorDictionaryNotSet = -19 + decoderErrorInvalidArguments = -20 + decoderErrorAllocContextModes = -21 + decoderErrorAllocTreeGroups = -22 + decoderErrorAllocContextMap = -25 + decoderErrorAllocRingBuffer1 = -26 + decoderErrorAllocRingBuffer2 = -27 + decoderErrorAllocBlockTypeTrees = -30 + decoderErrorUnreachable = -31 +) + +const huffmanTableBits = 8 + +const huffmanTableMask = 0xFF + +/* We need the slack region for the following reasons: + - doing up to two 16-byte copies for fast backward copying + - inserting transformed dictionary word (5 prefix + 24 base + 8 suffix) */ +const kRingBufferWriteAheadSlack uint32 = 42 + +var kCodeLengthCodeOrder = [codeLengthCodes]byte{1, 2, 3, 4, 0, 5, 17, 6, 16, 7, 8, 9, 10, 11, 12, 13, 14, 15} + +/* Static prefix code for the complex code length code lengths. */ +var kCodeLengthPrefixLength = [16]byte{2, 2, 2, 3, 2, 2, 2, 4, 2, 2, 2, 3, 2, 2, 2, 4} + +var kCodeLengthPrefixValue = [16]byte{0, 4, 3, 2, 0, 4, 3, 1, 0, 4, 3, 2, 0, 4, 3, 5} + +/* Saves error code and converts it to BrotliDecoderResult. */ +func saveErrorCode(s *Reader, e int) int { + s.error_code = int(e) + switch e { + case decoderSuccess: + return decoderResultSuccess + + case decoderNeedsMoreInput: + return decoderResultNeedsMoreInput + + case decoderNeedsMoreOutput: + return decoderResultNeedsMoreOutput + + default: + return decoderResultError + } +} + +/* Decodes WBITS by reading 1 - 7 bits, or 0x11 for "Large Window Brotli". + Precondition: bit-reader accumulator has at least 8 bits. */ +func decodeWindowBits(s *Reader, br *bitReader) int { + var n uint32 + var large_window bool = s.large_window + s.large_window = false + takeBits(br, 1, &n) + if n == 0 { + s.window_bits = 16 + return decoderSuccess + } + + takeBits(br, 3, &n) + if n != 0 { + s.window_bits = 17 + n + return decoderSuccess + } + + takeBits(br, 3, &n) + if n == 1 { + if large_window { + takeBits(br, 1, &n) + if n == 1 { + return decoderErrorFormatWindowBits + } + + s.large_window = true + return decoderSuccess + } else { + return decoderErrorFormatWindowBits + } + } + + if n != 0 { + s.window_bits = 8 + n + return decoderSuccess + } + + s.window_bits = 17 + return decoderSuccess +} + +/* Decodes a number in the range [0..255], by reading 1 - 11 bits. */ +func decodeVarLenUint8(s *Reader, br *bitReader, value *uint32) int { + var bits uint32 + switch s.substate_decode_uint8 { + case stateDecodeUint8None: + if !safeReadBits(br, 1, &bits) { + return decoderNeedsMoreInput + } + + if bits == 0 { + *value = 0 + return decoderSuccess + } + fallthrough + + /* Fall through. */ + case stateDecodeUint8Short: + if !safeReadBits(br, 3, &bits) { + s.substate_decode_uint8 = stateDecodeUint8Short + return decoderNeedsMoreInput + } + + if bits == 0 { + *value = 1 + s.substate_decode_uint8 = stateDecodeUint8None + return decoderSuccess + } + + /* Use output value as a temporary storage. It MUST be persisted. */ + *value = bits + fallthrough + + /* Fall through. */ + case stateDecodeUint8Long: + if !safeReadBits(br, *value, &bits) { + s.substate_decode_uint8 = stateDecodeUint8Long + return decoderNeedsMoreInput + } + + *value = (1 << *value) + bits + s.substate_decode_uint8 = stateDecodeUint8None + return decoderSuccess + + default: + return decoderErrorUnreachable + } +} + +/* Decodes a metablock length and flags by reading 2 - 31 bits. */ +func decodeMetaBlockLength(s *Reader, br *bitReader) int { + var bits uint32 + var i int + for { + switch s.substate_metablock_header { + case stateMetablockHeaderNone: + if !safeReadBits(br, 1, &bits) { + return decoderNeedsMoreInput + } + + if bits != 0 { + s.is_last_metablock = 1 + } else { + s.is_last_metablock = 0 + } + s.meta_block_remaining_len = 0 + s.is_uncompressed = 0 + s.is_metadata = 0 + if s.is_last_metablock == 0 { + s.substate_metablock_header = stateMetablockHeaderNibbles + break + } + + s.substate_metablock_header = stateMetablockHeaderEmpty + fallthrough + + /* Fall through. */ + case stateMetablockHeaderEmpty: + if !safeReadBits(br, 1, &bits) { + return decoderNeedsMoreInput + } + + if bits != 0 { + s.substate_metablock_header = stateMetablockHeaderNone + return decoderSuccess + } + + s.substate_metablock_header = stateMetablockHeaderNibbles + fallthrough + + /* Fall through. */ + case stateMetablockHeaderNibbles: + if !safeReadBits(br, 2, &bits) { + return decoderNeedsMoreInput + } + + s.size_nibbles = uint(byte(bits + 4)) + s.loop_counter = 0 + if bits == 3 { + s.is_metadata = 1 + s.substate_metablock_header = stateMetablockHeaderReserved + break + } + + s.substate_metablock_header = stateMetablockHeaderSize + fallthrough + + /* Fall through. */ + case stateMetablockHeaderSize: + i = s.loop_counter + + for ; i < int(s.size_nibbles); i++ { + if !safeReadBits(br, 4, &bits) { + s.loop_counter = i + return decoderNeedsMoreInput + } + + if uint(i+1) == s.size_nibbles && s.size_nibbles > 4 && bits == 0 { + return decoderErrorFormatExuberantNibble + } + + s.meta_block_remaining_len |= int(bits << uint(i*4)) + } + + s.substate_metablock_header = stateMetablockHeaderUncompressed + fallthrough + + /* Fall through. */ + case stateMetablockHeaderUncompressed: + if s.is_last_metablock == 0 { + if !safeReadBits(br, 1, &bits) { + return decoderNeedsMoreInput + } + + if bits != 0 { + s.is_uncompressed = 1 + } else { + s.is_uncompressed = 0 + } + } + + s.meta_block_remaining_len++ + s.substate_metablock_header = stateMetablockHeaderNone + return decoderSuccess + + case stateMetablockHeaderReserved: + if !safeReadBits(br, 1, &bits) { + return decoderNeedsMoreInput + } + + if bits != 0 { + return decoderErrorFormatReserved + } + + s.substate_metablock_header = stateMetablockHeaderBytes + fallthrough + + /* Fall through. */ + case stateMetablockHeaderBytes: + if !safeReadBits(br, 2, &bits) { + return decoderNeedsMoreInput + } + + if bits == 0 { + s.substate_metablock_header = stateMetablockHeaderNone + return decoderSuccess + } + + s.size_nibbles = uint(byte(bits)) + s.substate_metablock_header = stateMetablockHeaderMetadata + fallthrough + + /* Fall through. */ + case stateMetablockHeaderMetadata: + i = s.loop_counter + + for ; i < int(s.size_nibbles); i++ { + if !safeReadBits(br, 8, &bits) { + s.loop_counter = i + return decoderNeedsMoreInput + } + + if uint(i+1) == s.size_nibbles && s.size_nibbles > 1 && bits == 0 { + return decoderErrorFormatExuberantMetaNibble + } + + s.meta_block_remaining_len |= int(bits << uint(i*8)) + } + + s.meta_block_remaining_len++ + s.substate_metablock_header = stateMetablockHeaderNone + return decoderSuccess + + default: + return decoderErrorUnreachable + } + } +} + +/* Decodes the Huffman code. + This method doesn't read data from the bit reader, BUT drops the amount of + bits that correspond to the decoded symbol. + bits MUST contain at least 15 (BROTLI_HUFFMAN_MAX_CODE_LENGTH) valid bits. */ +func decodeSymbol(bits uint32, table []huffmanCode, br *bitReader) uint32 { + table = table[bits&huffmanTableMask:] + if table[0].bits > huffmanTableBits { + var nbits uint32 = uint32(table[0].bits) - huffmanTableBits + dropBits(br, huffmanTableBits) + table = table[uint32(table[0].value)+((bits>>huffmanTableBits)&bitMask(nbits)):] + } + + dropBits(br, uint32(table[0].bits)) + return uint32(table[0].value) +} + +/* Reads and decodes the next Huffman code from bit-stream. + This method peeks 16 bits of input and drops 0 - 15 of them. */ +func readSymbol(table []huffmanCode, br *bitReader) uint32 { + return decodeSymbol(get16BitsUnmasked(br), table, br) +} + +/* Same as DecodeSymbol, but it is known that there is less than 15 bits of + input are currently available. */ +func safeDecodeSymbol(table []huffmanCode, br *bitReader, result *uint32) bool { + var val uint32 + var available_bits uint32 = getAvailableBits(br) + if available_bits == 0 { + if table[0].bits == 0 { + *result = uint32(table[0].value) + return true + } + + return false /* No valid bits at all. */ + } + + val = uint32(getBitsUnmasked(br)) + table = table[val&huffmanTableMask:] + if table[0].bits <= huffmanTableBits { + if uint32(table[0].bits) <= available_bits { + dropBits(br, uint32(table[0].bits)) + *result = uint32(table[0].value) + return true + } else { + return false /* Not enough bits for the first level. */ + } + } + + if available_bits <= huffmanTableBits { + return false /* Not enough bits to move to the second level. */ + } + + /* Speculatively drop HUFFMAN_TABLE_BITS. */ + val = (val & bitMask(uint32(table[0].bits))) >> huffmanTableBits + + available_bits -= huffmanTableBits + table = table[uint32(table[0].value)+val:] + if available_bits < uint32(table[0].bits) { + return false /* Not enough bits for the second level. */ + } + + dropBits(br, huffmanTableBits+uint32(table[0].bits)) + *result = uint32(table[0].value) + return true +} + +func safeReadSymbol(table []huffmanCode, br *bitReader, result *uint32) bool { + var val uint32 + if safeGetBits(br, 15, &val) { + *result = decodeSymbol(val, table, br) + return true + } + + return safeDecodeSymbol(table, br, result) +} + +/* Makes a look-up in first level Huffman table. Peeks 8 bits. */ +func preloadSymbol(safe int, table []huffmanCode, br *bitReader, bits *uint32, value *uint32) { + if safe != 0 { + return + } + + table = table[getBits(br, huffmanTableBits):] + *bits = uint32(table[0].bits) + *value = uint32(table[0].value) +} + +/* Decodes the next Huffman code using data prepared by PreloadSymbol. + Reads 0 - 15 bits. Also peeks 8 following bits. */ +func readPreloadedSymbol(table []huffmanCode, br *bitReader, bits *uint32, value *uint32) uint32 { + var result uint32 = *value + var ext []huffmanCode + if *bits > huffmanTableBits { + var val uint32 = get16BitsUnmasked(br) + ext = table[val&huffmanTableMask:][*value:] + var mask uint32 = bitMask((*bits - huffmanTableBits)) + dropBits(br, huffmanTableBits) + ext = ext[(val>>huffmanTableBits)&mask:] + dropBits(br, uint32(ext[0].bits)) + result = uint32(ext[0].value) + } else { + dropBits(br, *bits) + } + + preloadSymbol(0, table, br, bits, value) + return result +} + +func log2Floor(x uint32) uint32 { + var result uint32 = 0 + for x != 0 { + x >>= 1 + result++ + } + + return result +} + +/* Reads (s->symbol + 1) symbols. + Totally 1..4 symbols are read, 1..11 bits each. + The list of symbols MUST NOT contain duplicates. */ +func readSimpleHuffmanSymbols(alphabet_size uint32, max_symbol uint32, s *Reader) int { + var br *bitReader = &s.br + var max_bits uint32 = log2Floor(alphabet_size - 1) + var i uint32 = s.sub_loop_counter + /* max_bits == 1..11; symbol == 0..3; 1..44 bits will be read. */ + + var num_symbols uint32 = s.symbol + for i <= num_symbols { + var v uint32 + if !safeReadBits(br, max_bits, &v) { + s.sub_loop_counter = i + s.substate_huffman = stateHuffmanSimpleRead + return decoderNeedsMoreInput + } + + if v >= max_symbol { + return decoderErrorFormatSimpleHuffmanAlphabet + } + + s.symbols_lists_array[i] = uint16(v) + i++ + } + + for i = 0; i < num_symbols; i++ { + var k uint32 = i + 1 + for ; k <= num_symbols; k++ { + if s.symbols_lists_array[i] == s.symbols_lists_array[k] { + return decoderErrorFormatSimpleHuffmanSame + } + } + } + + return decoderSuccess +} + +/* Process single decoded symbol code length: + A) reset the repeat variable + B) remember code length (if it is not 0) + C) extend corresponding index-chain + D) reduce the Huffman space + E) update the histogram */ +func processSingleCodeLength(code_len uint32, symbol *uint32, repeat *uint32, space *uint32, prev_code_len *uint32, symbol_lists symbolList, code_length_histo []uint16, next_symbol []int) { + *repeat = 0 + if code_len != 0 { /* code_len == 1..15 */ + symbolListPut(symbol_lists, next_symbol[code_len], uint16(*symbol)) + next_symbol[code_len] = int(*symbol) + *prev_code_len = code_len + *space -= 32768 >> code_len + code_length_histo[code_len]++ + } + + (*symbol)++ +} + +/* Process repeated symbol code length. + A) Check if it is the extension of previous repeat sequence; if the decoded + value is not BROTLI_REPEAT_PREVIOUS_CODE_LENGTH, then it is a new + symbol-skip + B) Update repeat variable + C) Check if operation is feasible (fits alphabet) + D) For each symbol do the same operations as in ProcessSingleCodeLength + + PRECONDITION: code_len == BROTLI_REPEAT_PREVIOUS_CODE_LENGTH or + code_len == BROTLI_REPEAT_ZERO_CODE_LENGTH */ +func processRepeatedCodeLength(code_len uint32, repeat_delta uint32, alphabet_size uint32, symbol *uint32, repeat *uint32, space *uint32, prev_code_len *uint32, repeat_code_len *uint32, symbol_lists symbolList, code_length_histo []uint16, next_symbol []int) { + var old_repeat uint32 /* for BROTLI_REPEAT_ZERO_CODE_LENGTH */ /* for BROTLI_REPEAT_ZERO_CODE_LENGTH */ + var extra_bits uint32 = 3 + var new_len uint32 = 0 + if code_len == repeatPreviousCodeLength { + new_len = *prev_code_len + extra_bits = 2 + } + + if *repeat_code_len != new_len { + *repeat = 0 + *repeat_code_len = new_len + } + + old_repeat = *repeat + if *repeat > 0 { + *repeat -= 2 + *repeat <<= extra_bits + } + + *repeat += repeat_delta + 3 + repeat_delta = *repeat - old_repeat + if *symbol+repeat_delta > alphabet_size { + *symbol = alphabet_size + *space = 0xFFFFF + return + } + + if *repeat_code_len != 0 { + var last uint = uint(*symbol + repeat_delta) + var next int = next_symbol[*repeat_code_len] + for { + symbolListPut(symbol_lists, next, uint16(*symbol)) + next = int(*symbol) + (*symbol)++ + if (*symbol) == uint32(last) { + break + } + } + + next_symbol[*repeat_code_len] = next + *space -= repeat_delta << (15 - *repeat_code_len) + code_length_histo[*repeat_code_len] = uint16(uint32(code_length_histo[*repeat_code_len]) + repeat_delta) + } else { + *symbol += repeat_delta + } +} + +/* Reads and decodes symbol codelengths. */ +func readSymbolCodeLengths(alphabet_size uint32, s *Reader) int { + var br *bitReader = &s.br + var symbol uint32 = s.symbol + var repeat uint32 = s.repeat + var space uint32 = s.space + var prev_code_len uint32 = s.prev_code_len + var repeat_code_len uint32 = s.repeat_code_len + var symbol_lists symbolList = s.symbol_lists + var code_length_histo []uint16 = s.code_length_histo[:] + var next_symbol []int = s.next_symbol[:] + if !warmupBitReader(br) { + return decoderNeedsMoreInput + } + var p []huffmanCode + for symbol < alphabet_size && space > 0 { + p = s.table[:] + var code_len uint32 + if !checkInputAmount(br, shortFillBitWindowRead) { + s.symbol = symbol + s.repeat = repeat + s.prev_code_len = prev_code_len + s.repeat_code_len = repeat_code_len + s.space = space + return decoderNeedsMoreInput + } + + fillBitWindow16(br) + p = p[getBitsUnmasked(br)&uint64(bitMask(huffmanMaxCodeLengthCodeLength)):] + dropBits(br, uint32(p[0].bits)) /* Use 1..5 bits. */ + code_len = uint32(p[0].value) /* code_len == 0..17 */ + if code_len < repeatPreviousCodeLength { + processSingleCodeLength(code_len, &symbol, &repeat, &space, &prev_code_len, symbol_lists, code_length_histo, next_symbol) /* code_len == 16..17, extra_bits == 2..3 */ + } else { + var extra_bits uint32 + if code_len == repeatPreviousCodeLength { + extra_bits = 2 + } else { + extra_bits = 3 + } + var repeat_delta uint32 = uint32(getBitsUnmasked(br)) & bitMask(extra_bits) + dropBits(br, extra_bits) + processRepeatedCodeLength(code_len, repeat_delta, alphabet_size, &symbol, &repeat, &space, &prev_code_len, &repeat_code_len, symbol_lists, code_length_histo, next_symbol) + } + } + + s.space = space + return decoderSuccess +} + +func safeReadSymbolCodeLengths(alphabet_size uint32, s *Reader) int { + var br *bitReader = &s.br + var get_byte bool = false + var p []huffmanCode + for s.symbol < alphabet_size && s.space > 0 { + p = s.table[:] + var code_len uint32 + var available_bits uint32 + var bits uint32 = 0 + if get_byte && !pullByte(br) { + return decoderNeedsMoreInput + } + get_byte = false + available_bits = getAvailableBits(br) + if available_bits != 0 { + bits = uint32(getBitsUnmasked(br)) + } + + p = p[bits&bitMask(huffmanMaxCodeLengthCodeLength):] + if uint32(p[0].bits) > available_bits { + get_byte = true + continue + } + + code_len = uint32(p[0].value) /* code_len == 0..17 */ + if code_len < repeatPreviousCodeLength { + dropBits(br, uint32(p[0].bits)) + processSingleCodeLength(code_len, &s.symbol, &s.repeat, &s.space, &s.prev_code_len, s.symbol_lists, s.code_length_histo[:], s.next_symbol[:]) /* code_len == 16..17, extra_bits == 2..3 */ + } else { + var extra_bits uint32 = code_len - 14 + var repeat_delta uint32 = (bits >> p[0].bits) & bitMask(extra_bits) + if available_bits < uint32(p[0].bits)+extra_bits { + get_byte = true + continue + } + + dropBits(br, uint32(p[0].bits)+extra_bits) + processRepeatedCodeLength(code_len, repeat_delta, alphabet_size, &s.symbol, &s.repeat, &s.space, &s.prev_code_len, &s.repeat_code_len, s.symbol_lists, s.code_length_histo[:], s.next_symbol[:]) + } + } + + return decoderSuccess +} + +/* Reads and decodes 15..18 codes using static prefix code. + Each code is 2..4 bits long. In total 30..72 bits are used. */ +func readCodeLengthCodeLengths(s *Reader) int { + var br *bitReader = &s.br + var num_codes uint32 = s.repeat + var space uint32 = s.space + var i uint32 = s.sub_loop_counter + for ; i < codeLengthCodes; i++ { + var code_len_idx byte = kCodeLengthCodeOrder[i] + var ix uint32 + var v uint32 + if !safeGetBits(br, 4, &ix) { + var available_bits uint32 = getAvailableBits(br) + if available_bits != 0 { + ix = uint32(getBitsUnmasked(br) & 0xF) + } else { + ix = 0 + } + + if uint32(kCodeLengthPrefixLength[ix]) > available_bits { + s.sub_loop_counter = i + s.repeat = num_codes + s.space = space + s.substate_huffman = stateHuffmanComplex + return decoderNeedsMoreInput + } + } + + v = uint32(kCodeLengthPrefixValue[ix]) + dropBits(br, uint32(kCodeLengthPrefixLength[ix])) + s.code_length_code_lengths[code_len_idx] = byte(v) + if v != 0 { + space = space - (32 >> v) + num_codes++ + s.code_length_histo[v]++ + if space-1 >= 32 { + /* space is 0 or wrapped around. */ + break + } + } + } + + if num_codes != 1 && space != 0 { + return decoderErrorFormatClSpace + } + + return decoderSuccess +} + +/* Decodes the Huffman tables. + There are 2 scenarios: + A) Huffman code contains only few symbols (1..4). Those symbols are read + directly; their code lengths are defined by the number of symbols. + For this scenario 4 - 49 bits will be read. + + B) 2-phase decoding: + B.1) Small Huffman table is decoded; it is specified with code lengths + encoded with predefined entropy code. 32 - 74 bits are used. + B.2) Decoded table is used to decode code lengths of symbols in resulting + Huffman table. In worst case 3520 bits are read. */ +func readHuffmanCode(alphabet_size uint32, max_symbol uint32, table []huffmanCode, opt_table_size *uint32, s *Reader) int { + var br *bitReader = &s.br + + /* Unnecessary masking, but might be good for safety. */ + alphabet_size &= 0x7FF + + /* State machine. */ + for { + switch s.substate_huffman { + case stateHuffmanNone: + if !safeReadBits(br, 2, &s.sub_loop_counter) { + return decoderNeedsMoreInput + } + + /* The value is used as follows: + 1 for simple code; + 0 for no skipping, 2 skips 2 code lengths, 3 skips 3 code lengths */ + if s.sub_loop_counter != 1 { + s.space = 32 + s.repeat = 0 /* num_codes */ + var i int + for i = 0; i <= huffmanMaxCodeLengthCodeLength; i++ { + s.code_length_histo[i] = 0 + } + + for i = 0; i < codeLengthCodes; i++ { + s.code_length_code_lengths[i] = 0 + } + + s.substate_huffman = stateHuffmanComplex + continue + } + fallthrough + + /* Read symbols, codes & code lengths directly. */ + case stateHuffmanSimpleSize: + if !safeReadBits(br, 2, &s.symbol) { /* num_symbols */ + s.substate_huffman = stateHuffmanSimpleSize + return decoderNeedsMoreInput + } + + s.sub_loop_counter = 0 + fallthrough + + case stateHuffmanSimpleRead: + { + var result int = readSimpleHuffmanSymbols(alphabet_size, max_symbol, s) + if result != decoderSuccess { + return result + } + } + fallthrough + + case stateHuffmanSimpleBuild: + var table_size uint32 + if s.symbol == 3 { + var bits uint32 + if !safeReadBits(br, 1, &bits) { + s.substate_huffman = stateHuffmanSimpleBuild + return decoderNeedsMoreInput + } + + s.symbol += bits + } + + table_size = buildSimpleHuffmanTable(table, huffmanTableBits, s.symbols_lists_array[:], s.symbol) + if opt_table_size != nil { + *opt_table_size = table_size + } + + s.substate_huffman = stateHuffmanNone + return decoderSuccess + + /* Decode Huffman-coded code lengths. */ + case stateHuffmanComplex: + { + var i uint32 + var result int = readCodeLengthCodeLengths(s) + if result != decoderSuccess { + return result + } + + buildCodeLengthsHuffmanTable(s.table[:], s.code_length_code_lengths[:], s.code_length_histo[:]) + for i = 0; i < 16; i++ { + s.code_length_histo[i] = 0 + } + + for i = 0; i <= huffmanMaxCodeLength; i++ { + s.next_symbol[i] = int(i) - (huffmanMaxCodeLength + 1) + symbolListPut(s.symbol_lists, s.next_symbol[i], 0xFFFF) + } + + s.symbol = 0 + s.prev_code_len = initialRepeatedCodeLength + s.repeat = 0 + s.repeat_code_len = 0 + s.space = 32768 + s.substate_huffman = stateHuffmanLengthSymbols + } + fallthrough + + case stateHuffmanLengthSymbols: + var table_size uint32 + var result int = readSymbolCodeLengths(max_symbol, s) + if result == decoderNeedsMoreInput { + result = safeReadSymbolCodeLengths(max_symbol, s) + } + + if result != decoderSuccess { + return result + } + + if s.space != 0 { + return decoderErrorFormatHuffmanSpace + } + + table_size = buildHuffmanTable(table, huffmanTableBits, s.symbol_lists, s.code_length_histo[:]) + if opt_table_size != nil { + *opt_table_size = table_size + } + + s.substate_huffman = stateHuffmanNone + return decoderSuccess + + default: + return decoderErrorUnreachable + } + } +} + +/* Decodes a block length by reading 3..39 bits. */ +func readBlockLength(table []huffmanCode, br *bitReader) uint32 { + var code uint32 + var nbits uint32 + code = readSymbol(table, br) + nbits = kBlockLengthPrefixCode[code].nbits /* nbits == 2..24 */ + return kBlockLengthPrefixCode[code].offset + readBits(br, nbits) +} + +/* WARNING: if state is not BROTLI_STATE_READ_BLOCK_LENGTH_NONE, then + reading can't be continued with ReadBlockLength. */ +func safeReadBlockLength(s *Reader, result *uint32, table []huffmanCode, br *bitReader) bool { + var index uint32 + if s.substate_read_block_length == stateReadBlockLengthNone { + if !safeReadSymbol(table, br, &index) { + return false + } + } else { + index = s.block_length_index + } + { + var bits uint32 /* nbits == 2..24 */ + var nbits uint32 = kBlockLengthPrefixCode[index].nbits + if !safeReadBits(br, nbits, &bits) { + s.block_length_index = index + s.substate_read_block_length = stateReadBlockLengthSuffix + return false + } + + *result = kBlockLengthPrefixCode[index].offset + bits + s.substate_read_block_length = stateReadBlockLengthNone + return true + } +} + +/* Transform: + 1) initialize list L with values 0, 1,... 255 + 2) For each input element X: + 2.1) let Y = L[X] + 2.2) remove X-th element from L + 2.3) prepend Y to L + 2.4) append Y to output + + In most cases max(Y) <= 7, so most of L remains intact. + To reduce the cost of initialization, we reuse L, remember the upper bound + of Y values, and reinitialize only first elements in L. + + Most of input values are 0 and 1. To reduce number of branches, we replace + inner for loop with do-while. */ +func inverseMoveToFrontTransform(v []byte, v_len uint32, state *Reader) { + var mtf [256]byte + var i int + for i = 1; i < 256; i++ { + mtf[i] = byte(i) + } + var mtf_1 byte + + /* Transform the input. */ + for i = 0; uint32(i) < v_len; i++ { + var index int = int(v[i]) + var value byte = mtf[index] + v[i] = value + mtf_1 = value + for index >= 1 { + index-- + mtf[index+1] = mtf[index] + } + + mtf[0] = mtf_1 + } +} + +/* Decodes a series of Huffman table using ReadHuffmanCode function. */ +func huffmanTreeGroupDecode(group *huffmanTreeGroup, s *Reader) int { + if s.substate_tree_group != stateTreeGroupLoop { + s.next = group.codes + s.htree_index = 0 + s.substate_tree_group = stateTreeGroupLoop + } + + for s.htree_index < int(group.num_htrees) { + var table_size uint32 + var result int = readHuffmanCode(uint32(group.alphabet_size), uint32(group.max_symbol), s.next, &table_size, s) + if result != decoderSuccess { + return result + } + group.htrees[s.htree_index] = s.next + s.next = s.next[table_size:] + s.htree_index++ + } + + s.substate_tree_group = stateTreeGroupNone + return decoderSuccess +} + +/* Decodes a context map. + Decoding is done in 4 phases: + 1) Read auxiliary information (6..16 bits) and allocate memory. + In case of trivial context map, decoding is finished at this phase. + 2) Decode Huffman table using ReadHuffmanCode function. + This table will be used for reading context map items. + 3) Read context map items; "0" values could be run-length encoded. + 4) Optionally, apply InverseMoveToFront transform to the resulting map. */ +func decodeContextMap(context_map_size uint32, num_htrees *uint32, context_map_arg *[]byte, s *Reader) int { + var br *bitReader = &s.br + var result int = decoderSuccess + + switch int(s.substate_context_map) { + case stateContextMapNone: + result = decodeVarLenUint8(s, br, num_htrees) + if result != decoderSuccess { + return result + } + + (*num_htrees)++ + s.context_index = 0 + *context_map_arg = make([]byte, uint(context_map_size)) + if *context_map_arg == nil { + return decoderErrorAllocContextMap + } + + if *num_htrees <= 1 { + for i := 0; i < int(context_map_size); i++ { + (*context_map_arg)[i] = 0 + } + return decoderSuccess + } + + s.substate_context_map = stateContextMapReadPrefix + fallthrough + /* Fall through. */ + case stateContextMapReadPrefix: + { + var bits uint32 + + /* In next stage ReadHuffmanCode uses at least 4 bits, so it is safe + to peek 4 bits ahead. */ + if !safeGetBits(br, 5, &bits) { + return decoderNeedsMoreInput + } + + if bits&1 != 0 { /* Use RLE for zeros. */ + s.max_run_length_prefix = (bits >> 1) + 1 + dropBits(br, 5) + } else { + s.max_run_length_prefix = 0 + dropBits(br, 1) + } + + s.substate_context_map = stateContextMapHuffman + } + fallthrough + + /* Fall through. */ + case stateContextMapHuffman: + { + var alphabet_size uint32 = *num_htrees + s.max_run_length_prefix + result = readHuffmanCode(alphabet_size, alphabet_size, s.context_map_table[:], nil, s) + if result != decoderSuccess { + return result + } + s.code = 0xFFFF + s.substate_context_map = stateContextMapDecode + } + fallthrough + + /* Fall through. */ + case stateContextMapDecode: + { + var context_index uint32 = s.context_index + var max_run_length_prefix uint32 = s.max_run_length_prefix + var context_map []byte = *context_map_arg + var code uint32 = s.code + var skip_preamble bool = (code != 0xFFFF) + for context_index < context_map_size || skip_preamble { + if !skip_preamble { + if !safeReadSymbol(s.context_map_table[:], br, &code) { + s.code = 0xFFFF + s.context_index = context_index + return decoderNeedsMoreInput + } + + if code == 0 { + context_map[context_index] = 0 + context_index++ + continue + } + + if code > max_run_length_prefix { + context_map[context_index] = byte(code - max_run_length_prefix) + context_index++ + continue + } + } else { + skip_preamble = false + } + + /* RLE sub-stage. */ + { + var reps uint32 + if !safeReadBits(br, code, &reps) { + s.code = code + s.context_index = context_index + return decoderNeedsMoreInput + } + + reps += 1 << code + if context_index+reps > context_map_size { + return decoderErrorFormatContextMapRepeat + } + + for { + context_map[context_index] = 0 + context_index++ + reps-- + if reps == 0 { + break + } + } + } + } + } + fallthrough + + case stateContextMapTransform: + var bits uint32 + if !safeReadBits(br, 1, &bits) { + s.substate_context_map = stateContextMapTransform + return decoderNeedsMoreInput + } + + if bits != 0 { + inverseMoveToFrontTransform(*context_map_arg, context_map_size, s) + } + + s.substate_context_map = stateContextMapNone + return decoderSuccess + + default: + return decoderErrorUnreachable + } +} + +/* Decodes a command or literal and updates block type ring-buffer. + Reads 3..54 bits. */ +func decodeBlockTypeAndLength(safe int, s *Reader, tree_type int) bool { + var max_block_type uint32 = s.num_block_types[tree_type] + type_tree := s.block_type_trees[tree_type*huffmanMaxSize258:] + len_tree := s.block_len_trees[tree_type*huffmanMaxSize26:] + var br *bitReader = &s.br + var ringbuffer []uint32 = s.block_type_rb[tree_type*2:] + var block_type uint32 + if max_block_type <= 1 { + return false + } + + /* Read 0..15 + 3..39 bits. */ + if safe == 0 { + block_type = readSymbol(type_tree, br) + s.block_length[tree_type] = readBlockLength(len_tree, br) + } else { + var memento bitReaderState + bitReaderSaveState(br, &memento) + if !safeReadSymbol(type_tree, br, &block_type) { + return false + } + if !safeReadBlockLength(s, &s.block_length[tree_type], len_tree, br) { + s.substate_read_block_length = stateReadBlockLengthNone + bitReaderRestoreState(br, &memento) + return false + } + } + + if block_type == 1 { + block_type = ringbuffer[1] + 1 + } else if block_type == 0 { + block_type = ringbuffer[0] + } else { + block_type -= 2 + } + + if block_type >= max_block_type { + block_type -= max_block_type + } + + ringbuffer[0] = ringbuffer[1] + ringbuffer[1] = block_type + return true +} + +func detectTrivialLiteralBlockTypes(s *Reader) { + var i uint + for i = 0; i < 8; i++ { + s.trivial_literal_contexts[i] = 0 + } + for i = 0; uint32(i) < s.num_block_types[0]; i++ { + var offset uint = i << literalContextBits + var error uint = 0 + var sample uint = uint(s.context_map[offset]) + var j uint + for j = 0; j < 1<>5] |= 1 << (i & 31) + } + } +} + +func prepareLiteralDecoding(s *Reader) { + var context_mode byte + var trivial uint + var block_type uint32 = s.block_type_rb[1] + var context_offset uint32 = block_type << literalContextBits + s.context_map_slice = s.context_map[context_offset:] + trivial = uint(s.trivial_literal_contexts[block_type>>5]) + s.trivial_literal_context = int((trivial >> (block_type & 31)) & 1) + s.literal_htree = []huffmanCode(s.literal_hgroup.htrees[s.context_map_slice[0]]) + context_mode = s.context_modes[block_type] & 3 + s.context_lookup = getContextLUT(int(context_mode)) +} + +/* Decodes the block type and updates the state for literal context. + Reads 3..54 bits. */ +func decodeLiteralBlockSwitchInternal(safe int, s *Reader) bool { + if !decodeBlockTypeAndLength(safe, s, 0) { + return false + } + + prepareLiteralDecoding(s) + return true +} + +func decodeLiteralBlockSwitch(s *Reader) { + decodeLiteralBlockSwitchInternal(0, s) +} + +func safeDecodeLiteralBlockSwitch(s *Reader) bool { + return decodeLiteralBlockSwitchInternal(1, s) +} + +/* Block switch for insert/copy length. + Reads 3..54 bits. */ +func decodeCommandBlockSwitchInternal(safe int, s *Reader) bool { + if !decodeBlockTypeAndLength(safe, s, 1) { + return false + } + + s.htree_command = []huffmanCode(s.insert_copy_hgroup.htrees[s.block_type_rb[3]]) + return true +} + +func decodeCommandBlockSwitch(s *Reader) { + decodeCommandBlockSwitchInternal(0, s) +} + +func safeDecodeCommandBlockSwitch(s *Reader) bool { + return decodeCommandBlockSwitchInternal(1, s) +} + +/* Block switch for distance codes. + Reads 3..54 bits. */ +func decodeDistanceBlockSwitchInternal(safe int, s *Reader) bool { + if !decodeBlockTypeAndLength(safe, s, 2) { + return false + } + + s.dist_context_map_slice = s.dist_context_map[s.block_type_rb[5]< s.ringbuffer_size { + pos = uint(s.ringbuffer_size) + } else { + pos = uint(s.pos) + } + var partial_pos_rb uint = (s.rb_roundtrips * uint(s.ringbuffer_size)) + pos + return partial_pos_rb - s.partial_pos_out +} + +/* Dumps output. + Returns BROTLI_DECODER_NEEDS_MORE_OUTPUT only if there is more output to push + and either ring-buffer is as big as window size, or |force| is true. */ +func writeRingBuffer(s *Reader, available_out *uint, next_out *[]byte, total_out *uint, force bool) int { + start := s.ringbuffer[s.partial_pos_out&uint(s.ringbuffer_mask):] + var to_write uint = unwrittenBytes(s, true) + var num_written uint = *available_out + if num_written > to_write { + num_written = to_write + } + + if s.meta_block_remaining_len < 0 { + return decoderErrorFormatBlockLength1 + } + + if next_out != nil && *next_out == nil { + *next_out = start + } else { + if next_out != nil { + copy(*next_out, start[:num_written]) + *next_out = (*next_out)[num_written:] + } + } + + *available_out -= num_written + s.partial_pos_out += num_written + if total_out != nil { + *total_out = s.partial_pos_out + } + + if num_written < to_write { + if s.ringbuffer_size == 1<= s.ringbuffer_size { + s.pos -= s.ringbuffer_size + s.rb_roundtrips++ + if uint(s.pos) != 0 { + s.should_wrap_ringbuffer = 1 + } else { + s.should_wrap_ringbuffer = 0 + } + } + + return decoderSuccess +} + +func wrapRingBuffer(s *Reader) { + if s.should_wrap_ringbuffer != 0 { + copy(s.ringbuffer, s.ringbuffer_end[:uint(s.pos)]) + s.should_wrap_ringbuffer = 0 + } +} + +/* Allocates ring-buffer. + + s->ringbuffer_size MUST be updated by BrotliCalculateRingBufferSize before + this function is called. + + Last two bytes of ring-buffer are initialized to 0, so context calculation + could be done uniformly for the first two and all other positions. */ +func ensureRingBuffer(s *Reader) bool { + var old_ringbuffer []byte + if s.ringbuffer_size == s.new_ringbuffer_size { + return true + } + spaceNeeded := int(s.new_ringbuffer_size) + int(kRingBufferWriteAheadSlack) + if len(s.ringbuffer) < spaceNeeded { + old_ringbuffer = s.ringbuffer + s.ringbuffer = make([]byte, spaceNeeded) + } + + s.ringbuffer[s.new_ringbuffer_size-2] = 0 + s.ringbuffer[s.new_ringbuffer_size-1] = 0 + + if old_ringbuffer != nil { + copy(s.ringbuffer, old_ringbuffer[:uint(s.pos)]) + } + + s.ringbuffer_size = s.new_ringbuffer_size + s.ringbuffer_mask = s.new_ringbuffer_size - 1 + s.ringbuffer_end = s.ringbuffer[s.ringbuffer_size:] + + return true +} + +func copyUncompressedBlockToOutput(available_out *uint, next_out *[]byte, total_out *uint, s *Reader) int { + /* TODO: avoid allocation for single uncompressed block. */ + if !ensureRingBuffer(s) { + return decoderErrorAllocRingBuffer1 + } + + /* State machine */ + for { + switch s.substate_uncompressed { + case stateUncompressedNone: + { + var nbytes int = int(getRemainingBytes(&s.br)) + if nbytes > s.meta_block_remaining_len { + nbytes = s.meta_block_remaining_len + } + + if s.pos+nbytes > s.ringbuffer_size { + nbytes = s.ringbuffer_size - s.pos + } + + /* Copy remaining bytes from s->br.buf_ to ring-buffer. */ + copyBytes(s.ringbuffer[s.pos:], &s.br, uint(nbytes)) + + s.pos += nbytes + s.meta_block_remaining_len -= nbytes + if s.pos < 1<>1 >= min_size { + new_ringbuffer_size >>= 1 + } + } + + s.new_ringbuffer_size = new_ringbuffer_size +} + +/* Reads 1..256 2-bit context modes. */ +func readContextModes(s *Reader) int { + var br *bitReader = &s.br + var i int = s.loop_counter + + for i < int(s.num_block_types[0]) { + var bits uint32 + if !safeReadBits(br, 2, &bits) { + s.loop_counter = i + return decoderNeedsMoreInput + } + + s.context_modes[i] = byte(bits) + i++ + } + + return decoderSuccess +} + +func takeDistanceFromRingBuffer(s *Reader) { + if s.distance_code == 0 { + s.dist_rb_idx-- + s.distance_code = s.dist_rb[s.dist_rb_idx&3] + + /* Compensate double distance-ring-buffer roll for dictionary items. */ + s.distance_context = 1 + } else { + var distance_code int = s.distance_code << 1 + const kDistanceShortCodeIndexOffset uint32 = 0xAAAFFF1B + const kDistanceShortCodeValueOffset uint32 = 0xFA5FA500 + var v int = (s.dist_rb_idx + int(kDistanceShortCodeIndexOffset>>uint(distance_code))) & 0x3 + /* kDistanceShortCodeIndexOffset has 2-bit values from LSB: + 3, 2, 1, 0, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2 */ + + /* kDistanceShortCodeValueOffset has 2-bit values from LSB: + -0, 0,-0, 0,-1, 1,-2, 2,-3, 3,-1, 1,-2, 2,-3, 3 */ + s.distance_code = s.dist_rb[v] + + v = int(kDistanceShortCodeValueOffset>>uint(distance_code)) & 0x3 + if distance_code&0x3 != 0 { + s.distance_code += v + } else { + s.distance_code -= v + if s.distance_code <= 0 { + /* A huge distance will cause a () soon. + This is a little faster than failing here. */ + s.distance_code = 0x7FFFFFFF + } + } + } +} + +func safeReadBitsMaybeZero(br *bitReader, n_bits uint32, val *uint32) bool { + if n_bits != 0 { + return safeReadBits(br, n_bits, val) + } else { + *val = 0 + return true + } +} + +/* Precondition: s->distance_code < 0. */ +func readDistanceInternal(safe int, s *Reader, br *bitReader) bool { + var distval int + var memento bitReaderState + var distance_tree []huffmanCode = []huffmanCode(s.distance_hgroup.htrees[s.dist_htree_index]) + if safe == 0 { + s.distance_code = int(readSymbol(distance_tree, br)) + } else { + var code uint32 + bitReaderSaveState(br, &memento) + if !safeReadSymbol(distance_tree, br, &code) { + return false + } + + s.distance_code = int(code) + } + + /* Convert the distance code to the actual distance by possibly + looking up past distances from the s->ringbuffer. */ + s.distance_context = 0 + + if s.distance_code&^0xF == 0 { + takeDistanceFromRingBuffer(s) + s.block_length[2]-- + return true + } + + distval = s.distance_code - int(s.num_direct_distance_codes) + if distval >= 0 { + var nbits uint32 + var postfix int + var offset int + if safe == 0 && (s.distance_postfix_bits == 0) { + nbits = (uint32(distval) >> 1) + 1 + offset = ((2 + (distval & 1)) << nbits) - 4 + s.distance_code = int(s.num_direct_distance_codes) + offset + int(readBits(br, nbits)) + } else { + /* This branch also works well when s->distance_postfix_bits == 0. */ + var bits uint32 + postfix = distval & s.distance_postfix_mask + distval >>= s.distance_postfix_bits + nbits = (uint32(distval) >> 1) + 1 + if safe != 0 { + if !safeReadBitsMaybeZero(br, nbits, &bits) { + s.distance_code = -1 /* Restore precondition. */ + bitReaderRestoreState(br, &memento) + return false + } + } else { + bits = readBits(br, nbits) + } + + offset = ((2 + (distval & 1)) << nbits) - 4 + s.distance_code = int(s.num_direct_distance_codes) + ((offset + int(bits)) << s.distance_postfix_bits) + postfix + } + } + + s.distance_code = s.distance_code - numDistanceShortCodes + 1 + s.block_length[2]-- + return true +} + +func readDistance(s *Reader, br *bitReader) { + readDistanceInternal(0, s, br) +} + +func safeReadDistance(s *Reader, br *bitReader) bool { + return readDistanceInternal(1, s, br) +} + +func readCommandInternal(safe int, s *Reader, br *bitReader, insert_length *int) bool { + var cmd_code uint32 + var insert_len_extra uint32 = 0 + var copy_length uint32 + var v cmdLutElement + var memento bitReaderState + if safe == 0 { + cmd_code = readSymbol(s.htree_command, br) + } else { + bitReaderSaveState(br, &memento) + if !safeReadSymbol(s.htree_command, br, &cmd_code) { + return false + } + } + + v = kCmdLut[cmd_code] + s.distance_code = int(v.distance_code) + s.distance_context = int(v.context) + s.dist_htree_index = s.dist_context_map_slice[s.distance_context] + *insert_length = int(v.insert_len_offset) + if safe == 0 { + if v.insert_len_extra_bits != 0 { + insert_len_extra = readBits(br, uint32(v.insert_len_extra_bits)) + } + + copy_length = readBits(br, uint32(v.copy_len_extra_bits)) + } else { + if !safeReadBitsMaybeZero(br, uint32(v.insert_len_extra_bits), &insert_len_extra) || !safeReadBitsMaybeZero(br, uint32(v.copy_len_extra_bits), ©_length) { + bitReaderRestoreState(br, &memento) + return false + } + } + + s.copy_length = int(copy_length) + int(v.copy_len_offset) + s.block_length[1]-- + *insert_length += int(insert_len_extra) + return true +} + +func readCommand(s *Reader, br *bitReader, insert_length *int) { + readCommandInternal(0, s, br, insert_length) +} + +func safeReadCommand(s *Reader, br *bitReader, insert_length *int) bool { + return readCommandInternal(1, s, br, insert_length) +} + +func checkInputAmountMaybeSafe(safe int, br *bitReader, num uint) bool { + if safe != 0 { + return true + } + + return checkInputAmount(br, num) +} + +func processCommandsInternal(safe int, s *Reader) int { + var pos int = s.pos + var i int = s.loop_counter + var result int = decoderSuccess + var br *bitReader = &s.br + var hc []huffmanCode + + if !checkInputAmountMaybeSafe(safe, br, 28) { + result = decoderNeedsMoreInput + goto saveStateAndReturn + } + + if safe == 0 { + warmupBitReader(br) + } + + /* Jump into state machine. */ + if s.state == stateCommandBegin { + goto CommandBegin + } else if s.state == stateCommandInner { + goto CommandInner + } else if s.state == stateCommandPostDecodeLiterals { + goto CommandPostDecodeLiterals + } else if s.state == stateCommandPostWrapCopy { + goto CommandPostWrapCopy + } else { + return decoderErrorUnreachable + } + +CommandBegin: + if safe != 0 { + s.state = stateCommandBegin + } + + if !checkInputAmountMaybeSafe(safe, br, 28) { /* 156 bits + 7 bytes */ + s.state = stateCommandBegin + result = decoderNeedsMoreInput + goto saveStateAndReturn + } + + if s.block_length[1] == 0 { + if safe != 0 { + if !safeDecodeCommandBlockSwitch(s) { + result = decoderNeedsMoreInput + goto saveStateAndReturn + } + } else { + decodeCommandBlockSwitch(s) + } + + goto CommandBegin + } + + /* Read the insert/copy length in the command. */ + if safe != 0 { + if !safeReadCommand(s, br, &i) { + result = decoderNeedsMoreInput + goto saveStateAndReturn + } + } else { + readCommand(s, br, &i) + } + + if i == 0 { + goto CommandPostDecodeLiterals + } + + s.meta_block_remaining_len -= i + +CommandInner: + if safe != 0 { + s.state = stateCommandInner + } + + /* Read the literals in the command. */ + if s.trivial_literal_context != 0 { + var bits uint32 + var value uint32 + preloadSymbol(safe, s.literal_htree, br, &bits, &value) + for { + if !checkInputAmountMaybeSafe(safe, br, 28) { /* 162 bits + 7 bytes */ + s.state = stateCommandInner + result = decoderNeedsMoreInput + goto saveStateAndReturn + } + + if s.block_length[0] == 0 { + if safe != 0 { + if !safeDecodeLiteralBlockSwitch(s) { + result = decoderNeedsMoreInput + goto saveStateAndReturn + } + } else { + decodeLiteralBlockSwitch(s) + } + + preloadSymbol(safe, s.literal_htree, br, &bits, &value) + if s.trivial_literal_context == 0 { + goto CommandInner + } + } + + if safe == 0 { + s.ringbuffer[pos] = byte(readPreloadedSymbol(s.literal_htree, br, &bits, &value)) + } else { + var literal uint32 + if !safeReadSymbol(s.literal_htree, br, &literal) { + result = decoderNeedsMoreInput + goto saveStateAndReturn + } + + s.ringbuffer[pos] = byte(literal) + } + + s.block_length[0]-- + pos++ + if pos == s.ringbuffer_size { + s.state = stateCommandInnerWrite + i-- + goto saveStateAndReturn + } + i-- + if i == 0 { + break + } + } + } else { + var p1 byte = s.ringbuffer[(pos-1)&s.ringbuffer_mask] + var p2 byte = s.ringbuffer[(pos-2)&s.ringbuffer_mask] + for { + var context byte + if !checkInputAmountMaybeSafe(safe, br, 28) { /* 162 bits + 7 bytes */ + s.state = stateCommandInner + result = decoderNeedsMoreInput + goto saveStateAndReturn + } + + if s.block_length[0] == 0 { + if safe != 0 { + if !safeDecodeLiteralBlockSwitch(s) { + result = decoderNeedsMoreInput + goto saveStateAndReturn + } + } else { + decodeLiteralBlockSwitch(s) + } + + if s.trivial_literal_context != 0 { + goto CommandInner + } + } + + context = getContext(p1, p2, s.context_lookup) + hc = []huffmanCode(s.literal_hgroup.htrees[s.context_map_slice[context]]) + p2 = p1 + if safe == 0 { + p1 = byte(readSymbol(hc, br)) + } else { + var literal uint32 + if !safeReadSymbol(hc, br, &literal) { + result = decoderNeedsMoreInput + goto saveStateAndReturn + } + + p1 = byte(literal) + } + + s.ringbuffer[pos] = p1 + s.block_length[0]-- + pos++ + if pos == s.ringbuffer_size { + s.state = stateCommandInnerWrite + i-- + goto saveStateAndReturn + } + i-- + if i == 0 { + break + } + } + } + + if s.meta_block_remaining_len <= 0 { + s.state = stateMetablockDone + goto saveStateAndReturn + } + +CommandPostDecodeLiterals: + if safe != 0 { + s.state = stateCommandPostDecodeLiterals + } + + if s.distance_code >= 0 { + /* Implicit distance case. */ + if s.distance_code != 0 { + s.distance_context = 0 + } else { + s.distance_context = 1 + } + + s.dist_rb_idx-- + s.distance_code = s.dist_rb[s.dist_rb_idx&3] + } else { + /* Read distance code in the command, unless it was implicitly zero. */ + if s.block_length[2] == 0 { + if safe != 0 { + if !safeDecodeDistanceBlockSwitch(s) { + result = decoderNeedsMoreInput + goto saveStateAndReturn + } + } else { + decodeDistanceBlockSwitch(s) + } + } + + if safe != 0 { + if !safeReadDistance(s, br) { + result = decoderNeedsMoreInput + goto saveStateAndReturn + } + } else { + readDistance(s, br) + } + } + + if s.max_distance != s.max_backward_distance { + if pos < s.max_backward_distance { + s.max_distance = pos + } else { + s.max_distance = s.max_backward_distance + } + } + + i = s.copy_length + + /* Apply copy of LZ77 back-reference, or static dictionary reference if + the distance is larger than the max LZ77 distance */ + if s.distance_code > s.max_distance { + /* The maximum allowed distance is BROTLI_MAX_ALLOWED_DISTANCE = 0x7FFFFFFC. + With this choice, no signed overflow can occur after decoding + a special distance code (e.g., after adding 3 to the last distance). */ + if s.distance_code > maxAllowedDistance { + return decoderErrorFormatDistance + } + + if i >= minDictionaryWordLength && i <= maxDictionaryWordLength { + var address int = s.distance_code - s.max_distance - 1 + var words *dictionary = s.dictionary + var trans *transforms = s.transforms + var offset int = int(s.dictionary.offsets_by_length[i]) + var shift uint32 = uint32(s.dictionary.size_bits_by_length[i]) + var mask int = int(bitMask(shift)) + var word_idx int = address & mask + var transform_idx int = address >> shift + + /* Compensate double distance-ring-buffer roll. */ + s.dist_rb_idx += s.distance_context + + offset += word_idx * i + if words.data == nil { + return decoderErrorDictionaryNotSet + } + + if transform_idx < int(trans.num_transforms) { + word := words.data[offset:] + var len int = i + if transform_idx == int(trans.cutOffTransforms[0]) { + copy(s.ringbuffer[pos:], word[:uint(len)]) + } else { + len = transformDictionaryWord(s.ringbuffer[pos:], word, int(len), trans, transform_idx) + } + + pos += int(len) + s.meta_block_remaining_len -= int(len) + if pos >= s.ringbuffer_size { + s.state = stateCommandPostWrite1 + goto saveStateAndReturn + } + } else { + return decoderErrorFormatTransform + } + } else { + return decoderErrorFormatDictionary + } + } else { + var src_start int = (pos - s.distance_code) & s.ringbuffer_mask + copy_dst := s.ringbuffer[pos:] + copy_src := s.ringbuffer[src_start:] + var dst_end int = pos + i + var src_end int = src_start + i + + /* Update the recent distances cache. */ + s.dist_rb[s.dist_rb_idx&3] = s.distance_code + + s.dist_rb_idx++ + s.meta_block_remaining_len -= i + + /* There are 32+ bytes of slack in the ring-buffer allocation. + Also, we have 16 short codes, that make these 16 bytes irrelevant + in the ring-buffer. Let's copy over them as a first guess. */ + copy(copy_dst, copy_src[:16]) + + if src_end > pos && dst_end > src_start { + /* Regions intersect. */ + goto CommandPostWrapCopy + } + + if dst_end >= s.ringbuffer_size || src_end >= s.ringbuffer_size { + /* At least one region wraps. */ + goto CommandPostWrapCopy + } + + pos += i + if i > 16 { + if i > 32 { + copy(copy_dst[16:], copy_src[16:][:uint(i-16)]) + } else { + /* This branch covers about 45% cases. + Fixed size short copy allows more compiler optimizations. */ + copy(copy_dst[16:], copy_src[16:][:16]) + } + } + } + + if s.meta_block_remaining_len <= 0 { + /* Next metablock, if any. */ + s.state = stateMetablockDone + + goto saveStateAndReturn + } else { + goto CommandBegin + } +CommandPostWrapCopy: + { + var wrap_guard int = s.ringbuffer_size - pos + for { + i-- + if i < 0 { + break + } + s.ringbuffer[pos] = s.ringbuffer[(pos-s.distance_code)&s.ringbuffer_mask] + pos++ + wrap_guard-- + if wrap_guard == 0 { + s.state = stateCommandPostWrite2 + goto saveStateAndReturn + } + } + } + + if s.meta_block_remaining_len <= 0 { + /* Next metablock, if any. */ + s.state = stateMetablockDone + + goto saveStateAndReturn + } else { + goto CommandBegin + } + +saveStateAndReturn: + s.pos = pos + s.loop_counter = i + return result +} + +func processCommands(s *Reader) int { + return processCommandsInternal(0, s) +} + +func safeProcessCommands(s *Reader) int { + return processCommandsInternal(1, s) +} + +/* Returns the maximum number of distance symbols which can only represent + distances not exceeding BROTLI_MAX_ALLOWED_DISTANCE. */ + +var maxDistanceSymbol_bound = [maxNpostfix + 1]uint32{0, 4, 12, 28} +var maxDistanceSymbol_diff = [maxNpostfix + 1]uint32{73, 126, 228, 424} + +func maxDistanceSymbol(ndirect uint32, npostfix uint32) uint32 { + var postfix uint32 = 1 << npostfix + if ndirect < maxDistanceSymbol_bound[npostfix] { + return ndirect + maxDistanceSymbol_diff[npostfix] + postfix + } else if ndirect > maxDistanceSymbol_bound[npostfix]+postfix { + return ndirect + maxDistanceSymbol_diff[npostfix] + } else { + return maxDistanceSymbol_bound[npostfix] + maxDistanceSymbol_diff[npostfix] + postfix + } +} + +/* Invariant: input stream is never overconsumed: + - invalid input implies that the whole stream is invalid -> any amount of + input could be read and discarded + - when result is "needs more input", then at least one more byte is REQUIRED + to complete decoding; all input data MUST be consumed by decoder, so + client could swap the input buffer + - when result is "needs more output" decoder MUST ensure that it doesn't + hold more than 7 bits in bit reader; this saves client from swapping input + buffer ahead of time + - when result is "success" decoder MUST return all unused data back to input + buffer; this is possible because the invariant is held on enter */ +func decoderDecompressStream(s *Reader, available_in *uint, next_in *[]byte, available_out *uint, next_out *[]byte) int { + var result int = decoderSuccess + var br *bitReader = &s.br + + /* Do not try to process further in a case of unrecoverable error. */ + if int(s.error_code) < 0 { + return decoderResultError + } + + if *available_out != 0 && (next_out == nil || *next_out == nil) { + return saveErrorCode(s, decoderErrorInvalidArguments) + } + + if *available_out == 0 { + next_out = nil + } + if s.buffer_length == 0 { /* Just connect bit reader to input stream. */ + br.input_len = *available_in + br.input = *next_in + br.byte_pos = 0 + } else { + /* At least one byte of input is required. More than one byte of input may + be required to complete the transaction -> reading more data must be + done in a loop -> do it in a main loop. */ + result = decoderNeedsMoreInput + + br.input = s.buffer.u8[:] + br.byte_pos = 0 + } + + /* State machine */ + for { + if result != decoderSuccess { + /* Error, needs more input/output. */ + if result == decoderNeedsMoreInput { + if s.ringbuffer != nil { /* Pro-actively push output. */ + var intermediate_result int = writeRingBuffer(s, available_out, next_out, nil, true) + + /* WriteRingBuffer checks s->meta_block_remaining_len validity. */ + if int(intermediate_result) < 0 { + result = intermediate_result + break + } + } + + if s.buffer_length != 0 { /* Used with internal buffer. */ + if br.byte_pos == br.input_len { + /* Successfully finished read transaction. + Accumulator contains less than 8 bits, because internal buffer + is expanded byte-by-byte until it is enough to complete read. */ + s.buffer_length = 0 + + /* Switch to input stream and restart. */ + result = decoderSuccess + + br.input_len = *available_in + br.input = *next_in + br.byte_pos = 0 + continue + } else if *available_in != 0 { + /* Not enough data in buffer, but can take one more byte from + input stream. */ + result = decoderSuccess + + s.buffer.u8[s.buffer_length] = (*next_in)[0] + s.buffer_length++ + br.input_len = uint(s.buffer_length) + *next_in = (*next_in)[1:] + (*available_in)-- + + /* Retry with more data in buffer. */ + continue + } + + /* Can't finish reading and no more input. */ + break + /* Input stream doesn't contain enough input. */ + } else { + /* Copy tail to internal buffer and return. */ + *next_in = br.input[br.byte_pos:] + + *available_in = br.input_len - br.byte_pos + for *available_in != 0 { + s.buffer.u8[s.buffer_length] = (*next_in)[0] + s.buffer_length++ + *next_in = (*next_in)[1:] + (*available_in)-- + } + + break + } + } + + /* Unreachable. */ + + /* Fail or needs more output. */ + if s.buffer_length != 0 { + /* Just consumed the buffered input and produced some output. Otherwise + it would result in "needs more input". Reset internal buffer. */ + s.buffer_length = 0 + } else { + /* Using input stream in last iteration. When decoder switches to input + stream it has less than 8 bits in accumulator, so it is safe to + return unused accumulator bits there. */ + bitReaderUnload(br) + + *available_in = br.input_len - br.byte_pos + *next_in = br.input[br.byte_pos:] + } + + break + } + + switch s.state { + /* Prepare to the first read. */ + case stateUninited: + if !warmupBitReader(br) { + result = decoderNeedsMoreInput + break + } + + /* Decode window size. */ + result = decodeWindowBits(s, br) /* Reads 1..8 bits. */ + if result != decoderSuccess { + break + } + + if s.large_window { + s.state = stateLargeWindowBits + break + } + + s.state = stateInitialize + + case stateLargeWindowBits: + if !safeReadBits(br, 6, &s.window_bits) { + result = decoderNeedsMoreInput + break + } + + if s.window_bits < largeMinWbits || s.window_bits > largeMaxWbits { + result = decoderErrorFormatWindowBits + break + } + + s.state = stateInitialize + fallthrough + + /* Maximum distance, see section 9.1. of the spec. */ + /* Fall through. */ + case stateInitialize: + s.max_backward_distance = (1 << s.window_bits) - windowGap + + /* Allocate memory for both block_type_trees and block_len_trees. */ + s.block_type_trees = make([]huffmanCode, (3 * (huffmanMaxSize258 + huffmanMaxSize26))) + + if s.block_type_trees == nil { + result = decoderErrorAllocBlockTypeTrees + break + } + + s.block_len_trees = s.block_type_trees[3*huffmanMaxSize258:] + + s.state = stateMetablockBegin + fallthrough + + /* Fall through. */ + case stateMetablockBegin: + decoderStateMetablockBegin(s) + + s.state = stateMetablockHeader + fallthrough + + /* Fall through. */ + case stateMetablockHeader: + result = decodeMetaBlockLength(s, br) + /* Reads 2 - 31 bits. */ + if result != decoderSuccess { + break + } + + if s.is_metadata != 0 || s.is_uncompressed != 0 { + if !bitReaderJumpToByteBoundary(br) { + result = decoderErrorFormatPadding1 + break + } + } + + if s.is_metadata != 0 { + s.state = stateMetadata + break + } + + if s.meta_block_remaining_len == 0 { + s.state = stateMetablockDone + break + } + + calculateRingBufferSize(s) + if s.is_uncompressed != 0 { + s.state = stateUncompressed + break + } + + s.loop_counter = 0 + s.state = stateHuffmanCode0 + + case stateUncompressed: + result = copyUncompressedBlockToOutput(available_out, next_out, nil, s) + if result == decoderSuccess { + s.state = stateMetablockDone + } + + case stateMetadata: + for ; s.meta_block_remaining_len > 0; s.meta_block_remaining_len-- { + var bits uint32 + + /* Read one byte and ignore it. */ + if !safeReadBits(br, 8, &bits) { + result = decoderNeedsMoreInput + break + } + } + + if result == decoderSuccess { + s.state = stateMetablockDone + } + + case stateHuffmanCode0: + if s.loop_counter >= 3 { + s.state = stateMetablockHeader2 + break + } + + /* Reads 1..11 bits. */ + result = decodeVarLenUint8(s, br, &s.num_block_types[s.loop_counter]) + + if result != decoderSuccess { + break + } + + s.num_block_types[s.loop_counter]++ + if s.num_block_types[s.loop_counter] < 2 { + s.loop_counter++ + break + } + + s.state = stateHuffmanCode1 + fallthrough + + case stateHuffmanCode1: + { + var alphabet_size uint32 = s.num_block_types[s.loop_counter] + 2 + var tree_offset int = s.loop_counter * huffmanMaxSize258 + result = readHuffmanCode(alphabet_size, alphabet_size, s.block_type_trees[tree_offset:], nil, s) + if result != decoderSuccess { + break + } + s.state = stateHuffmanCode2 + } + fallthrough + + case stateHuffmanCode2: + { + var alphabet_size uint32 = numBlockLenSymbols + var tree_offset int = s.loop_counter * huffmanMaxSize26 + result = readHuffmanCode(alphabet_size, alphabet_size, s.block_len_trees[tree_offset:], nil, s) + if result != decoderSuccess { + break + } + s.state = stateHuffmanCode3 + } + fallthrough + + case stateHuffmanCode3: + var tree_offset int = s.loop_counter * huffmanMaxSize26 + if !safeReadBlockLength(s, &s.block_length[s.loop_counter], s.block_len_trees[tree_offset:], br) { + result = decoderNeedsMoreInput + break + } + + s.loop_counter++ + s.state = stateHuffmanCode0 + + case stateMetablockHeader2: + { + var bits uint32 + if !safeReadBits(br, 6, &bits) { + result = decoderNeedsMoreInput + break + } + + s.distance_postfix_bits = bits & bitMask(2) + bits >>= 2 + s.num_direct_distance_codes = numDistanceShortCodes + (bits << s.distance_postfix_bits) + s.distance_postfix_mask = int(bitMask(s.distance_postfix_bits)) + s.context_modes = make([]byte, uint(s.num_block_types[0])) + if s.context_modes == nil { + result = decoderErrorAllocContextModes + break + } + + s.loop_counter = 0 + s.state = stateContextModes + } + fallthrough + + case stateContextModes: + result = readContextModes(s) + + if result != decoderSuccess { + break + } + + s.state = stateContextMap1 + fallthrough + + case stateContextMap1: + result = decodeContextMap(s.num_block_types[0]<= 3 { + prepareLiteralDecoding(s) + s.dist_context_map_slice = s.dist_context_map + s.htree_command = []huffmanCode(s.insert_copy_hgroup.htrees[0]) + if !ensureRingBuffer(s) { + result = decoderErrorAllocRingBuffer2 + break + } + + s.state = stateCommandBegin + } + + case stateCommandBegin, stateCommandInner, stateCommandPostDecodeLiterals, stateCommandPostWrapCopy: + result = processCommands(s) + + if result == decoderNeedsMoreInput { + result = safeProcessCommands(s) + } + + case stateCommandInnerWrite, stateCommandPostWrite1, stateCommandPostWrite2: + result = writeRingBuffer(s, available_out, next_out, nil, false) + + if result != decoderSuccess { + break + } + + wrapRingBuffer(s) + if s.ringbuffer_size == 1<= uint64(block_size) { + return 0 + } + return block_size - uint(delta) +} + +/* Wraps 64-bit input position to 32-bit ring-buffer position preserving + "not-a-first-lap" feature. */ +func wrapPosition(position uint64) uint32 { + var result uint32 = uint32(position) + var gb uint64 = position >> 30 + if gb > 2 { + /* Wrap every 2GiB; The first 3GB are continuous. */ + result = result&((1<<30)-1) | (uint32((gb-1)&1)+1)<<30 + } + + return result +} + +func (s *Writer) getStorage(size int) []byte { + if len(s.storage) < size { + s.storage = make([]byte, size) + } + + return s.storage +} + +func hashTableSize(max_table_size uint, input_size uint) uint { + var htsize uint = 256 + for htsize < max_table_size && htsize < input_size { + htsize <<= 1 + } + + return htsize +} + +func getHashTable(s *Writer, quality int, input_size uint, table_size *uint) []int { + var max_table_size uint = maxHashTableSize(quality) + var htsize uint = hashTableSize(max_table_size, input_size) + /* Use smaller hash table when input.size() is smaller, since we + fill the table, incurring O(hash table size) overhead for + compression, and if the input is short, we won't need that + many hash table entries anyway. */ + + var table []int + assert(max_table_size >= 256) + if quality == fastOnePassCompressionQuality { + /* Only odd shifts are supported by fast-one-pass. */ + if htsize&0xAAAAA == 0 { + htsize <<= 1 + } + } + + if htsize <= uint(len(s.small_table_)) { + table = s.small_table_[:] + } else { + if htsize > s.large_table_size_ { + s.large_table_size_ = htsize + s.large_table_ = nil + s.large_table_ = make([]int, htsize) + } + + table = s.large_table_ + } + + *table_size = htsize + for i := 0; i < int(htsize); i++ { + table[i] = 0 + } + return table +} + +func encodeWindowBits(lgwin int, large_window bool, last_bytes *uint16, last_bytes_bits *byte) { + if large_window { + *last_bytes = uint16((lgwin&0x3F)<<8 | 0x11) + *last_bytes_bits = 14 + } else { + if lgwin == 16 { + *last_bytes = 0 + *last_bytes_bits = 1 + } else if lgwin == 17 { + *last_bytes = 1 + *last_bytes_bits = 7 + } else if lgwin > 17 { + *last_bytes = uint16((lgwin-17)<<1 | 0x01) + *last_bytes_bits = 4 + } else { + *last_bytes = uint16((lgwin-8)<<4 | 0x01) + *last_bytes_bits = 7 + } + } +} + +/* Decide about the context map based on the ability of the prediction + ability of the previous byte UTF8-prefix on the next byte. The + prediction ability is calculated as Shannon entropy. Here we need + Shannon entropy instead of 'BitsEntropy' since the prefix will be + encoded with the remaining 6 bits of the following byte, and + BitsEntropy will assume that symbol to be stored alone using Huffman + coding. */ + +var kStaticContextMapContinuation = [64]uint32{ + 1, 1, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +} +var kStaticContextMapSimpleUTF8 = [64]uint32{ + 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +} + +func chooseContextMap(quality int, bigram_histo []uint32, num_literal_contexts *uint, literal_context_map *[]uint32) { + var monogram_histo = [3]uint32{0} + var two_prefix_histo = [6]uint32{0} + var total uint + var i uint + var dummy uint + var entropy [4]float64 + for i = 0; i < 9; i++ { + monogram_histo[i%3] += bigram_histo[i] + two_prefix_histo[i%6] += bigram_histo[i] + } + + entropy[1] = shannonEntropy(monogram_histo[:], 3, &dummy) + entropy[2] = (shannonEntropy(two_prefix_histo[:], 3, &dummy) + shannonEntropy(two_prefix_histo[3:], 3, &dummy)) + entropy[3] = 0 + for i = 0; i < 3; i++ { + entropy[3] += shannonEntropy(bigram_histo[3*i:], 3, &dummy) + } + + total = uint(monogram_histo[0] + monogram_histo[1] + monogram_histo[2]) + assert(total != 0) + entropy[0] = 1.0 / float64(total) + entropy[1] *= entropy[0] + entropy[2] *= entropy[0] + entropy[3] *= entropy[0] + + if quality < minQualityForHqContextModeling { + /* 3 context models is a bit slower, don't use it at lower qualities. */ + entropy[3] = entropy[1] * 10 + } + + /* If expected savings by symbol are less than 0.2 bits, skip the + context modeling -- in exchange for faster decoding speed. */ + if entropy[1]-entropy[2] < 0.2 && entropy[1]-entropy[3] < 0.2 { + *num_literal_contexts = 1 + } else if entropy[2]-entropy[3] < 0.02 { + *num_literal_contexts = 2 + *literal_context_map = kStaticContextMapSimpleUTF8[:] + } else { + *num_literal_contexts = 3 + *literal_context_map = kStaticContextMapContinuation[:] + } +} + +/* Decide if we want to use a more complex static context map containing 13 + context values, based on the entropy reduction of histograms over the + first 5 bits of literals. */ + +var kStaticContextMapComplexUTF8 = [64]uint32{ + 11, 11, 12, 12, /* 0 special */ + 0, 0, 0, 0, /* 4 lf */ + 1, 1, 9, 9, /* 8 space */ + 2, 2, 2, 2, /* !, first after space/lf and after something else. */ + 1, 1, 1, 1, /* " */ + 8, 3, 3, 3, /* % */ + 1, 1, 1, 1, /* ({[ */ + 2, 2, 2, 2, /* }]) */ + 8, 4, 4, 4, /* :; */ + 8, 7, 4, 4, /* . */ + 8, 0, 0, 0, /* > */ + 3, 3, 3, 3, /* [0..9] */ + 5, 5, 10, 5, /* [A-Z] */ + 5, 5, 10, 5, + 6, 6, 6, 6, /* [a-z] */ + 6, 6, 6, 6, +} + +func shouldUseComplexStaticContextMap(input []byte, start_pos uint, length uint, mask uint, quality int, size_hint uint, num_literal_contexts *uint, literal_context_map *[]uint32) bool { + /* Try the more complex static context map only for long data. */ + if size_hint < 1<<20 { + return false + } else { + var end_pos uint = start_pos + length + var combined_histo = [32]uint32{0} + var context_histo = [13][32]uint32{[32]uint32{0}} + var total uint32 = 0 + var entropy [3]float64 + var dummy uint + var i uint + var utf8_lut contextLUT = getContextLUT(contextUTF8) + /* To make entropy calculations faster and to fit on the stack, we collect + histograms over the 5 most significant bits of literals. One histogram + without context and 13 additional histograms for each context value. */ + for ; start_pos+64 <= end_pos; start_pos += 4096 { + var stride_end_pos uint = start_pos + 64 + var prev2 byte = input[start_pos&mask] + var prev1 byte = input[(start_pos+1)&mask] + var pos uint + + /* To make the analysis of the data faster we only examine 64 byte long + strides at every 4kB intervals. */ + for pos = start_pos + 2; pos < stride_end_pos; pos++ { + var literal byte = input[pos&mask] + var context byte = byte(kStaticContextMapComplexUTF8[getContext(prev1, prev2, utf8_lut)]) + total++ + combined_histo[literal>>3]++ + context_histo[context][literal>>3]++ + prev2 = prev1 + prev1 = literal + } + } + + entropy[1] = shannonEntropy(combined_histo[:], 32, &dummy) + entropy[2] = 0 + for i = 0; i < 13; i++ { + entropy[2] += shannonEntropy(context_histo[i][0:], 32, &dummy) + } + + entropy[0] = 1.0 / float64(total) + entropy[1] *= entropy[0] + entropy[2] *= entropy[0] + + /* The triggering heuristics below were tuned by compressing the individual + files of the silesia corpus. If we skip this kind of context modeling + for not very well compressible input (i.e. entropy using context modeling + is 60% of maximal entropy) or if expected savings by symbol are less + than 0.2 bits, then in every case when it triggers, the final compression + ratio is improved. Note however that this heuristics might be too strict + for some cases and could be tuned further. */ + if entropy[2] > 3.0 || entropy[1]-entropy[2] < 0.2 { + return false + } else { + *num_literal_contexts = 13 + *literal_context_map = kStaticContextMapComplexUTF8[:] + return true + } + } +} + +func decideOverLiteralContextModeling(input []byte, start_pos uint, length uint, mask uint, quality int, size_hint uint, num_literal_contexts *uint, literal_context_map *[]uint32) { + if quality < minQualityForContextModeling || length < 64 { + return + } else if shouldUseComplexStaticContextMap(input, start_pos, length, mask, quality, size_hint, num_literal_contexts, literal_context_map) { + } else /* Context map was already set, nothing else to do. */ + { + var end_pos uint = start_pos + length + /* Gather bi-gram data of the UTF8 byte prefixes. To make the analysis of + UTF8 data faster we only examine 64 byte long strides at every 4kB + intervals. */ + + var bigram_prefix_histo = [9]uint32{0} + for ; start_pos+64 <= end_pos; start_pos += 4096 { + var lut = [4]int{0, 0, 1, 2} + var stride_end_pos uint = start_pos + 64 + var prev int = lut[input[start_pos&mask]>>6] * 3 + var pos uint + for pos = start_pos + 1; pos < stride_end_pos; pos++ { + var literal byte = input[pos&mask] + bigram_prefix_histo[prev+lut[literal>>6]]++ + prev = lut[literal>>6] * 3 + } + } + + chooseContextMap(quality, bigram_prefix_histo[0:], num_literal_contexts, literal_context_map) + } +} + +func shouldCompress_encode(data []byte, mask uint, last_flush_pos uint64, bytes uint, num_literals uint, num_commands uint) bool { + /* TODO: find more precise minimal block overhead. */ + if bytes <= 2 { + return false + } + if num_commands < (bytes>>8)+2 { + if float64(num_literals) > 0.99*float64(bytes) { + var literal_histo = [256]uint32{0} + const kSampleRate uint32 = 13 + const kMinEntropy float64 = 7.92 + var bit_cost_threshold float64 = float64(bytes) * kMinEntropy / float64(kSampleRate) + var t uint = uint((uint32(bytes) + kSampleRate - 1) / kSampleRate) + var pos uint32 = uint32(last_flush_pos) + var i uint + for i = 0; i < t; i++ { + literal_histo[data[pos&uint32(mask)]]++ + pos += kSampleRate + } + + if bitsEntropy(literal_histo[:], 256) > bit_cost_threshold { + return false + } + } + } + + return true +} + +/* Chooses the literal context mode for a metablock */ +func chooseContextMode(params *encoderParams, data []byte, pos uint, mask uint, length uint) int { + /* We only do the computation for the option of something else than + CONTEXT_UTF8 for the highest qualities */ + if params.quality >= minQualityForHqBlockSplitting && !isMostlyUTF8(data, pos, mask, length, kMinUTF8Ratio) { + return contextSigned + } + + return contextUTF8 +} + +func writeMetaBlockInternal(data []byte, mask uint, last_flush_pos uint64, bytes uint, is_last bool, literal_context_mode int, params *encoderParams, prev_byte byte, prev_byte2 byte, num_literals uint, commands []command, saved_dist_cache []int, dist_cache []int, storage_ix *uint, storage []byte) { + var wrapped_last_flush_pos uint32 = wrapPosition(last_flush_pos) + var last_bytes uint16 + var last_bytes_bits byte + var literal_context_lut contextLUT = getContextLUT(literal_context_mode) + var block_params encoderParams = *params + + if bytes == 0 { + /* Write the ISLAST and ISEMPTY bits. */ + writeBits(2, 3, storage_ix, storage) + + *storage_ix = (*storage_ix + 7) &^ 7 + return + } + + if !shouldCompress_encode(data, mask, last_flush_pos, bytes, num_literals, uint(len(commands))) { + /* Restore the distance cache, as its last update by + CreateBackwardReferences is now unused. */ + copy(dist_cache, saved_dist_cache[:4]) + + storeUncompressedMetaBlock(is_last, data, uint(wrapped_last_flush_pos), mask, bytes, storage_ix, storage) + return + } + + assert(*storage_ix <= 14) + last_bytes = uint16(storage[1])<<8 | uint16(storage[0]) + last_bytes_bits = byte(*storage_ix) + if params.quality <= maxQualityForStaticEntropyCodes { + storeMetaBlockFast(data, uint(wrapped_last_flush_pos), bytes, mask, is_last, params, commands, storage_ix, storage) + } else if params.quality < minQualityForBlockSplit { + storeMetaBlockTrivial(data, uint(wrapped_last_flush_pos), bytes, mask, is_last, params, commands, storage_ix, storage) + } else { + mb := getMetaBlockSplit() + if params.quality < minQualityForHqBlockSplitting { + var num_literal_contexts uint = 1 + var literal_context_map []uint32 = nil + if !params.disable_literal_context_modeling { + decideOverLiteralContextModeling(data, uint(wrapped_last_flush_pos), bytes, mask, params.quality, params.size_hint, &num_literal_contexts, &literal_context_map) + } + + buildMetaBlockGreedy(data, uint(wrapped_last_flush_pos), mask, prev_byte, prev_byte2, literal_context_lut, num_literal_contexts, literal_context_map, commands, mb) + } else { + buildMetaBlock(data, uint(wrapped_last_flush_pos), mask, &block_params, prev_byte, prev_byte2, commands, literal_context_mode, mb) + } + + if params.quality >= minQualityForOptimizeHistograms { + /* The number of distance symbols effectively used for distance + histograms. It might be less than distance alphabet size + for "Large Window Brotli" (32-bit). */ + var num_effective_dist_codes uint32 = block_params.dist.alphabet_size + if num_effective_dist_codes > numHistogramDistanceSymbols { + num_effective_dist_codes = numHistogramDistanceSymbols + } + + optimizeHistograms(num_effective_dist_codes, mb) + } + + storeMetaBlock(data, uint(wrapped_last_flush_pos), bytes, mask, prev_byte, prev_byte2, is_last, &block_params, literal_context_mode, commands, mb, storage_ix, storage) + freeMetaBlockSplit(mb) + } + + if bytes+4 < *storage_ix>>3 { + /* Restore the distance cache and last byte. */ + copy(dist_cache, saved_dist_cache[:4]) + + storage[0] = byte(last_bytes) + storage[1] = byte(last_bytes >> 8) + *storage_ix = uint(last_bytes_bits) + storeUncompressedMetaBlock(is_last, data, uint(wrapped_last_flush_pos), mask, bytes, storage_ix, storage) + } +} + +func chooseDistanceParams(params *encoderParams) { + var distance_postfix_bits uint32 = 0 + var num_direct_distance_codes uint32 = 0 + + if params.quality >= minQualityForNonzeroDistanceParams { + var ndirect_msb uint32 + if params.mode == modeFont { + distance_postfix_bits = 1 + num_direct_distance_codes = 12 + } else { + distance_postfix_bits = params.dist.distance_postfix_bits + num_direct_distance_codes = params.dist.num_direct_distance_codes + } + + ndirect_msb = (num_direct_distance_codes >> distance_postfix_bits) & 0x0F + if distance_postfix_bits > maxNpostfix || num_direct_distance_codes > maxNdirect || ndirect_msb<>25)), (last_command.dist_prefix_&0x3FF == 0), &last_command.cmd_prefix_) + } +} + +/* + Processes the accumulated input data and writes + the new output meta-block to s.dest, if one has been + created (otherwise the processed input data is buffered internally). + If |is_last| or |force_flush| is true, an output meta-block is + always created. However, until |is_last| is true encoder may retain up + to 7 bits of the last byte of output. To force encoder to dump the remaining + bits use WriteMetadata() to append an empty meta-data block. + Returns false if the size of the input data is larger than + input_block_size(). +*/ +func encodeData(s *Writer, is_last bool, force_flush bool) bool { + var delta uint64 = unprocessedInputSize(s) + var bytes uint32 = uint32(delta) + var wrapped_last_processed_pos uint32 = wrapPosition(s.last_processed_pos_) + var data []byte + var mask uint32 + var literal_context_mode int + + data = s.ringbuffer_.buffer_ + mask = s.ringbuffer_.mask_ + + /* Adding more blocks after "last" block is forbidden. */ + if s.is_last_block_emitted_ { + return false + } + if is_last { + s.is_last_block_emitted_ = true + } + + if delta > uint64(inputBlockSize(s)) { + return false + } + + if s.params.quality == fastTwoPassCompressionQuality { + if s.command_buf_ == nil || cap(s.command_buf_) < int(kCompressFragmentTwoPassBlockSize) { + s.command_buf_ = make([]uint32, kCompressFragmentTwoPassBlockSize) + s.literal_buf_ = make([]byte, kCompressFragmentTwoPassBlockSize) + } else { + s.command_buf_ = s.command_buf_[:kCompressFragmentTwoPassBlockSize] + s.literal_buf_ = s.literal_buf_[:kCompressFragmentTwoPassBlockSize] + } + } + + if s.params.quality == fastOnePassCompressionQuality || s.params.quality == fastTwoPassCompressionQuality { + var storage []byte + var storage_ix uint = uint(s.last_bytes_bits_) + var table_size uint + var table []int + + if delta == 0 && !is_last { + /* We have no new input data and we don't have to finish the stream, so + nothing to do. */ + return true + } + + storage = s.getStorage(int(2*bytes + 503)) + storage[0] = byte(s.last_bytes_) + storage[1] = byte(s.last_bytes_ >> 8) + table = getHashTable(s, s.params.quality, uint(bytes), &table_size) + if s.params.quality == fastOnePassCompressionQuality { + compressFragmentFast(data[wrapped_last_processed_pos&mask:], uint(bytes), is_last, table, table_size, s.cmd_depths_[:], s.cmd_bits_[:], &s.cmd_code_numbits_, s.cmd_code_[:], &storage_ix, storage) + } else { + compressFragmentTwoPass(data[wrapped_last_processed_pos&mask:], uint(bytes), is_last, s.command_buf_, s.literal_buf_, table, table_size, &storage_ix, storage) + } + + s.last_bytes_ = uint16(storage[storage_ix>>3]) + s.last_bytes_bits_ = byte(storage_ix & 7) + updateLastProcessedPos(s) + s.writeOutput(storage[:storage_ix>>3]) + return true + } + { + /* Theoretical max number of commands is 1 per 2 bytes. */ + newsize := len(s.commands) + int(bytes)/2 + 1 + if newsize > cap(s.commands) { + /* Reserve a bit more memory to allow merging with a next block + without reallocation: that would impact speed. */ + newsize += int(bytes/4) + 16 + + new_commands := make([]command, len(s.commands), newsize) + if s.commands != nil { + copy(new_commands, s.commands) + } + + s.commands = new_commands + } + } + + initOrStitchToPreviousBlock(&s.hasher_, data, uint(mask), &s.params, uint(wrapped_last_processed_pos), uint(bytes), is_last) + + literal_context_mode = chooseContextMode(&s.params, data, uint(wrapPosition(s.last_flush_pos_)), uint(mask), uint(s.input_pos_-s.last_flush_pos_)) + + if len(s.commands) != 0 && s.last_insert_len_ == 0 { + extendLastCommand(s, &bytes, &wrapped_last_processed_pos) + } + + if s.params.quality == zopflificationQuality { + assert(s.params.hasher.type_ == 10) + createZopfliBackwardReferences(uint(bytes), uint(wrapped_last_processed_pos), data, uint(mask), &s.params, s.hasher_.(*h10), s.dist_cache_[:], &s.last_insert_len_, &s.commands, &s.num_literals_) + } else if s.params.quality == hqZopflificationQuality { + assert(s.params.hasher.type_ == 10) + createHqZopfliBackwardReferences(uint(bytes), uint(wrapped_last_processed_pos), data, uint(mask), &s.params, s.hasher_, s.dist_cache_[:], &s.last_insert_len_, &s.commands, &s.num_literals_) + } else { + createBackwardReferences(uint(bytes), uint(wrapped_last_processed_pos), data, uint(mask), &s.params, s.hasher_, s.dist_cache_[:], &s.last_insert_len_, &s.commands, &s.num_literals_) + } + { + var max_length uint = maxMetablockSize(&s.params) + var max_literals uint = max_length / 8 + max_commands := int(max_length / 8) + var processed_bytes uint = uint(s.input_pos_ - s.last_flush_pos_) + var next_input_fits_metablock bool = (processed_bytes+inputBlockSize(s) <= max_length) + var should_flush bool = (s.params.quality < minQualityForBlockSplit && s.num_literals_+uint(len(s.commands)) >= maxNumDelayedSymbols) + /* If maximal possible additional block doesn't fit metablock, flush now. */ + /* TODO: Postpone decision until next block arrives? */ + + /* If block splitting is not used, then flush as soon as there is some + amount of commands / literals produced. */ + if !is_last && !force_flush && !should_flush && next_input_fits_metablock && s.num_literals_ < max_literals && len(s.commands) < max_commands { + /* Merge with next input block. Everything will happen later. */ + if updateLastProcessedPos(s) { + hasherReset(s.hasher_) + } + + return true + } + } + + /* Create the last insert-only command. */ + if s.last_insert_len_ > 0 { + s.commands = append(s.commands, makeInsertCommand(s.last_insert_len_)) + s.num_literals_ += s.last_insert_len_ + s.last_insert_len_ = 0 + } + + if !is_last && s.input_pos_ == s.last_flush_pos_ { + /* We have no new input data and we don't have to finish the stream, so + nothing to do. */ + return true + } + + assert(s.input_pos_ >= s.last_flush_pos_) + assert(s.input_pos_ > s.last_flush_pos_ || is_last) + assert(s.input_pos_-s.last_flush_pos_ <= 1<<24) + { + var metablock_size uint32 = uint32(s.input_pos_ - s.last_flush_pos_) + var storage []byte = s.getStorage(int(2*metablock_size + 503)) + var storage_ix uint = uint(s.last_bytes_bits_) + storage[0] = byte(s.last_bytes_) + storage[1] = byte(s.last_bytes_ >> 8) + writeMetaBlockInternal(data, uint(mask), s.last_flush_pos_, uint(metablock_size), is_last, literal_context_mode, &s.params, s.prev_byte_, s.prev_byte2_, s.num_literals_, s.commands, s.saved_dist_cache_[:], s.dist_cache_[:], &storage_ix, storage) + s.last_bytes_ = uint16(storage[storage_ix>>3]) + s.last_bytes_bits_ = byte(storage_ix & 7) + s.last_flush_pos_ = s.input_pos_ + if updateLastProcessedPos(s) { + hasherReset(s.hasher_) + } + + if s.last_flush_pos_ > 0 { + s.prev_byte_ = data[(uint32(s.last_flush_pos_)-1)&mask] + } + + if s.last_flush_pos_ > 1 { + s.prev_byte2_ = data[uint32(s.last_flush_pos_-2)&mask] + } + + s.commands = s.commands[:0] + s.num_literals_ = 0 + + /* Save the state of the distance cache in case we need to restore it for + emitting an uncompressed block. */ + copy(s.saved_dist_cache_[:], s.dist_cache_[:]) + + s.writeOutput(storage[:storage_ix>>3]) + return true + } +} + +/* Dumps remaining output bits and metadata header to |header|. + Returns number of produced bytes. + REQUIRED: |header| should be 8-byte aligned and at least 16 bytes long. + REQUIRED: |block_size| <= (1 << 24). */ +func writeMetadataHeader(s *Writer, block_size uint, header []byte) uint { + storage_ix := uint(s.last_bytes_bits_) + header[0] = byte(s.last_bytes_) + header[1] = byte(s.last_bytes_ >> 8) + s.last_bytes_ = 0 + s.last_bytes_bits_ = 0 + + writeBits(1, 0, &storage_ix, header) + writeBits(2, 3, &storage_ix, header) + writeBits(1, 0, &storage_ix, header) + if block_size == 0 { + writeBits(2, 0, &storage_ix, header) + } else { + var nbits uint32 + if block_size == 1 { + nbits = 0 + } else { + nbits = log2FloorNonZero(uint(uint32(block_size)-1)) + 1 + } + var nbytes uint32 = (nbits + 7) / 8 + writeBits(2, uint64(nbytes), &storage_ix, header) + writeBits(uint(8*nbytes), uint64(block_size)-1, &storage_ix, header) + } + + return (storage_ix + 7) >> 3 +} + +func injectBytePaddingBlock(s *Writer) { + var seal uint32 = uint32(s.last_bytes_) + var seal_bits uint = uint(s.last_bytes_bits_) + s.last_bytes_ = 0 + s.last_bytes_bits_ = 0 + + /* is_last = 0, data_nibbles = 11, reserved = 0, meta_nibbles = 00 */ + seal |= 0x6 << seal_bits + + seal_bits += 6 + + destination := s.tiny_buf_.u8[:] + + destination[0] = byte(seal) + if seal_bits > 8 { + destination[1] = byte(seal >> 8) + } + if seal_bits > 16 { + destination[2] = byte(seal >> 16) + } + s.writeOutput(destination[:(seal_bits+7)>>3]) +} + +func checkFlushComplete(s *Writer) { + if s.stream_state_ == streamFlushRequested && s.err == nil { + s.stream_state_ = streamProcessing + } +} + +func encoderCompressStreamFast(s *Writer, op int, available_in *uint, next_in *[]byte) bool { + var block_size_limit uint = uint(1) << s.params.lgwin + var buf_size uint = brotli_min_size_t(kCompressFragmentTwoPassBlockSize, brotli_min_size_t(*available_in, block_size_limit)) + var command_buf []uint32 = nil + var literal_buf []byte = nil + if s.params.quality != fastOnePassCompressionQuality && s.params.quality != fastTwoPassCompressionQuality { + return false + } + + if s.params.quality == fastTwoPassCompressionQuality { + if s.command_buf_ == nil || cap(s.command_buf_) < int(buf_size) { + s.command_buf_ = make([]uint32, buf_size) + s.literal_buf_ = make([]byte, buf_size) + } else { + s.command_buf_ = s.command_buf_[:buf_size] + s.literal_buf_ = s.literal_buf_[:buf_size] + } + + command_buf = s.command_buf_ + literal_buf = s.literal_buf_ + } + + for { + if s.stream_state_ == streamFlushRequested && s.last_bytes_bits_ != 0 { + injectBytePaddingBlock(s) + continue + } + + /* Compress block only when stream is not + finished, there is no pending flush request, and there is either + additional input or pending operation. */ + if s.stream_state_ == streamProcessing && (*available_in != 0 || op != int(operationProcess)) { + var block_size uint = brotli_min_size_t(block_size_limit, *available_in) + var is_last bool = (*available_in == block_size) && (op == int(operationFinish)) + var force_flush bool = (*available_in == block_size) && (op == int(operationFlush)) + var max_out_size uint = 2*block_size + 503 + var storage []byte = nil + var storage_ix uint = uint(s.last_bytes_bits_) + var table_size uint + var table []int + + if force_flush && block_size == 0 { + s.stream_state_ = streamFlushRequested + continue + } + + storage = s.getStorage(int(max_out_size)) + + storage[0] = byte(s.last_bytes_) + storage[1] = byte(s.last_bytes_ >> 8) + table = getHashTable(s, s.params.quality, block_size, &table_size) + + if s.params.quality == fastOnePassCompressionQuality { + compressFragmentFast(*next_in, block_size, is_last, table, table_size, s.cmd_depths_[:], s.cmd_bits_[:], &s.cmd_code_numbits_, s.cmd_code_[:], &storage_ix, storage) + } else { + compressFragmentTwoPass(*next_in, block_size, is_last, command_buf, literal_buf, table, table_size, &storage_ix, storage) + } + + *next_in = (*next_in)[block_size:] + *available_in -= block_size + var out_bytes uint = storage_ix >> 3 + s.writeOutput(storage[:out_bytes]) + + s.last_bytes_ = uint16(storage[storage_ix>>3]) + s.last_bytes_bits_ = byte(storage_ix & 7) + + if force_flush { + s.stream_state_ = streamFlushRequested + } + if is_last { + s.stream_state_ = streamFinished + } + continue + } + + break + } + + checkFlushComplete(s) + return true +} + +func processMetadata(s *Writer, available_in *uint, next_in *[]byte) bool { + if *available_in > 1<<24 { + return false + } + + /* Switch to metadata block workflow, if required. */ + if s.stream_state_ == streamProcessing { + s.remaining_metadata_bytes_ = uint32(*available_in) + s.stream_state_ = streamMetadataHead + } + + if s.stream_state_ != streamMetadataHead && s.stream_state_ != streamMetadataBody { + return false + } + + for { + if s.stream_state_ == streamFlushRequested && s.last_bytes_bits_ != 0 { + injectBytePaddingBlock(s) + continue + } + + if s.input_pos_ != s.last_flush_pos_ { + var result bool = encodeData(s, false, true) + if !result { + return false + } + continue + } + + if s.stream_state_ == streamMetadataHead { + n := writeMetadataHeader(s, uint(s.remaining_metadata_bytes_), s.tiny_buf_.u8[:]) + s.writeOutput(s.tiny_buf_.u8[:n]) + s.stream_state_ = streamMetadataBody + continue + } else { + /* Exit workflow only when there is no more input and no more output. + Otherwise client may continue producing empty metadata blocks. */ + if s.remaining_metadata_bytes_ == 0 { + s.remaining_metadata_bytes_ = math.MaxUint32 + s.stream_state_ = streamProcessing + break + } + + /* This guarantees progress in "TakeOutput" workflow. */ + var c uint32 = brotli_min_uint32_t(s.remaining_metadata_bytes_, 16) + copy(s.tiny_buf_.u8[:], (*next_in)[:c]) + *next_in = (*next_in)[c:] + *available_in -= uint(c) + s.remaining_metadata_bytes_ -= c + s.writeOutput(s.tiny_buf_.u8[:c]) + + continue + } + } + + return true +} + +func updateSizeHint(s *Writer, available_in uint) { + if s.params.size_hint == 0 { + var delta uint64 = unprocessedInputSize(s) + var tail uint64 = uint64(available_in) + var limit uint32 = 1 << 30 + var total uint32 + if (delta >= uint64(limit)) || (tail >= uint64(limit)) || ((delta + tail) >= uint64(limit)) { + total = limit + } else { + total = uint32(delta + tail) + } + + s.params.size_hint = uint(total) + } +} + +func encoderCompressStream(s *Writer, op int, available_in *uint, next_in *[]byte) bool { + if !ensureInitialized(s) { + return false + } + + /* Unfinished metadata block; check requirements. */ + if s.remaining_metadata_bytes_ != math.MaxUint32 { + if uint32(*available_in) != s.remaining_metadata_bytes_ { + return false + } + if op != int(operationEmitMetadata) { + return false + } + } + + if op == int(operationEmitMetadata) { + updateSizeHint(s, 0) /* First data metablock might be emitted here. */ + return processMetadata(s, available_in, next_in) + } + + if s.stream_state_ == streamMetadataHead || s.stream_state_ == streamMetadataBody { + return false + } + + if s.stream_state_ != streamProcessing && *available_in != 0 { + return false + } + + if s.params.quality == fastOnePassCompressionQuality || s.params.quality == fastTwoPassCompressionQuality { + return encoderCompressStreamFast(s, op, available_in, next_in) + } + + for { + var remaining_block_size uint = remainingInputBlockSize(s) + + if remaining_block_size != 0 && *available_in != 0 { + var copy_input_size uint = brotli_min_size_t(remaining_block_size, *available_in) + copyInputToRingBuffer(s, copy_input_size, *next_in) + *next_in = (*next_in)[copy_input_size:] + *available_in -= copy_input_size + continue + } + + if s.stream_state_ == streamFlushRequested && s.last_bytes_bits_ != 0 { + injectBytePaddingBlock(s) + continue + } + + /* Compress data only when stream is not + finished and there is no pending flush request. */ + if s.stream_state_ == streamProcessing { + if remaining_block_size == 0 || op != int(operationProcess) { + var is_last bool = ((*available_in == 0) && op == int(operationFinish)) + var force_flush bool = ((*available_in == 0) && op == int(operationFlush)) + var result bool + updateSizeHint(s, *available_in) + result = encodeData(s, is_last, force_flush) + if !result { + return false + } + if force_flush { + s.stream_state_ = streamFlushRequested + } + if is_last { + s.stream_state_ = streamFinished + } + continue + } + } + + break + } + + checkFlushComplete(s) + return true +} + +func (w *Writer) writeOutput(data []byte) { + if w.err != nil { + return + } + + _, w.err = w.dst.Write(data) + if w.err == nil { + checkFlushComplete(w) + } +} diff --git a/vendor/github.com/andybalholm/brotli/encoder.go b/vendor/github.com/andybalholm/brotli/encoder.go new file mode 100644 index 0000000..650d1e4 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/encoder.go @@ -0,0 +1,168 @@ +package brotli + +import "github.com/andybalholm/brotli/matchfinder" + +// An Encoder implements the matchfinder.Encoder interface, writing in Brotli format. +type Encoder struct { + wroteHeader bool + bw bitWriter + distCache []distanceCode +} + +func (e *Encoder) Reset() { + e.wroteHeader = false + e.bw = bitWriter{} +} + +func (e *Encoder) Encode(dst []byte, src []byte, matches []matchfinder.Match, lastBlock bool) []byte { + e.bw.dst = dst + if !e.wroteHeader { + e.bw.writeBits(4, 15) + e.wroteHeader = true + } + + var literalHisto [256]uint32 + var commandHisto [704]uint32 + var distanceHisto [64]uint32 + literalCount := 0 + commandCount := 0 + distanceCount := 0 + + if len(e.distCache) < len(matches) { + e.distCache = make([]distanceCode, len(matches)) + } + + // first pass: build the histograms + pos := 0 + + // d is the ring buffer of the last 4 distances. + d := [4]int{-10, -10, -10, -10} + for i, m := range matches { + if m.Unmatched > 0 { + for _, c := range src[pos : pos+m.Unmatched] { + literalHisto[c]++ + } + literalCount += m.Unmatched + } + + insertCode := getInsertLengthCode(uint(m.Unmatched)) + copyCode := getCopyLengthCode(uint(m.Length)) + if m.Length == 0 { + // If the stream ends with unmatched bytes, we need a dummy copy length. + copyCode = 2 + } + command := combineLengthCodes(insertCode, copyCode, false) + commandHisto[command]++ + commandCount++ + + if command >= 128 && m.Length != 0 { + var distCode distanceCode + switch m.Distance { + case d[3]: + distCode.code = 0 + case d[2]: + distCode.code = 1 + case d[1]: + distCode.code = 2 + case d[0]: + distCode.code = 3 + case d[3] - 1: + distCode.code = 4 + case d[3] + 1: + distCode.code = 5 + case d[3] - 2: + distCode.code = 6 + case d[3] + 2: + distCode.code = 7 + case d[3] - 3: + distCode.code = 8 + case d[3] + 3: + distCode.code = 9 + + // In my testing, codes 10–15 actually reduced the compression ratio. + + default: + distCode = getDistanceCode(m.Distance) + } + e.distCache[i] = distCode + distanceHisto[distCode.code]++ + distanceCount++ + if distCode.code != 0 { + d[0], d[1], d[2], d[3] = d[1], d[2], d[3], m.Distance + } + } + + pos += m.Unmatched + m.Length + } + + storeMetaBlockHeaderBW(uint(len(src)), false, &e.bw) + e.bw.writeBits(13, 0) + + var literalDepths [256]byte + var literalBits [256]uint16 + buildAndStoreHuffmanTreeFastBW(literalHisto[:], uint(literalCount), 8, literalDepths[:], literalBits[:], &e.bw) + + var commandDepths [704]byte + var commandBits [704]uint16 + buildAndStoreHuffmanTreeFastBW(commandHisto[:], uint(commandCount), 10, commandDepths[:], commandBits[:], &e.bw) + + var distanceDepths [64]byte + var distanceBits [64]uint16 + buildAndStoreHuffmanTreeFastBW(distanceHisto[:], uint(distanceCount), 6, distanceDepths[:], distanceBits[:], &e.bw) + + pos = 0 + for i, m := range matches { + insertCode := getInsertLengthCode(uint(m.Unmatched)) + copyCode := getCopyLengthCode(uint(m.Length)) + if m.Length == 0 { + // If the stream ends with unmatched bytes, we need a dummy copy length. + copyCode = 2 + } + command := combineLengthCodes(insertCode, copyCode, false) + e.bw.writeBits(uint(commandDepths[command]), uint64(commandBits[command])) + if kInsExtra[insertCode] > 0 { + e.bw.writeBits(uint(kInsExtra[insertCode]), uint64(m.Unmatched)-uint64(kInsBase[insertCode])) + } + if kCopyExtra[copyCode] > 0 { + e.bw.writeBits(uint(kCopyExtra[copyCode]), uint64(m.Length)-uint64(kCopyBase[copyCode])) + } + + if m.Unmatched > 0 { + for _, c := range src[pos : pos+m.Unmatched] { + e.bw.writeBits(uint(literalDepths[c]), uint64(literalBits[c])) + } + } + + if command >= 128 && m.Length != 0 { + distCode := e.distCache[i] + e.bw.writeBits(uint(distanceDepths[distCode.code]), uint64(distanceBits[distCode.code])) + if distCode.nExtra > 0 { + e.bw.writeBits(distCode.nExtra, distCode.extraBits) + } + } + + pos += m.Unmatched + m.Length + } + + if lastBlock { + e.bw.writeBits(2, 3) // islast + isempty + e.bw.jumpToByteBoundary() + } + return e.bw.dst +} + +type distanceCode struct { + code int + nExtra uint + extraBits uint64 +} + +func getDistanceCode(distance int) distanceCode { + d := distance + 3 + nbits := log2FloorNonZero(uint(d)) - 1 + prefix := (d >> nbits) & 1 + offset := (2 + prefix) << nbits + distcode := int(2*(nbits-1)) + prefix + 16 + extra := d - offset + return distanceCode{distcode, uint(nbits), uint64(extra)} +} diff --git a/vendor/github.com/andybalholm/brotli/encoder_dict.go b/vendor/github.com/andybalholm/brotli/encoder_dict.go new file mode 100644 index 0000000..55c051c --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/encoder_dict.go @@ -0,0 +1,22 @@ +package brotli + +/* Dictionary data (words and transforms) for 1 possible context */ +type encoderDictionary struct { + words *dictionary + cutoffTransformsCount uint32 + cutoffTransforms uint64 + hash_table []uint16 + buckets []uint16 + dict_words []dictWord +} + +func initEncoderDictionary(dict *encoderDictionary) { + dict.words = getDictionary() + + dict.hash_table = kStaticDictionaryHash[:] + dict.buckets = kStaticDictionaryBuckets[:] + dict.dict_words = kStaticDictionaryWords[:] + + dict.cutoffTransformsCount = kCutoffTransformsCount + dict.cutoffTransforms = kCutoffTransforms +} diff --git a/vendor/github.com/andybalholm/brotli/entropy_encode.go b/vendor/github.com/andybalholm/brotli/entropy_encode.go new file mode 100644 index 0000000..3f469a3 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/entropy_encode.go @@ -0,0 +1,592 @@ +package brotli + +import "math" + +/* Copyright 2010 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Entropy encoding (Huffman) utilities. */ + +/* A node of a Huffman tree. */ +type huffmanTree struct { + total_count_ uint32 + index_left_ int16 + index_right_or_value_ int16 +} + +func initHuffmanTree(self *huffmanTree, count uint32, left int16, right int16) { + self.total_count_ = count + self.index_left_ = left + self.index_right_or_value_ = right +} + +/* Input size optimized Shell sort. */ +type huffmanTreeComparator func(huffmanTree, huffmanTree) bool + +var sortHuffmanTreeItems_gaps = []uint{132, 57, 23, 10, 4, 1} + +func sortHuffmanTreeItems(items []huffmanTree, n uint, comparator huffmanTreeComparator) { + if n < 13 { + /* Insertion sort. */ + var i uint + for i = 1; i < n; i++ { + var tmp huffmanTree = items[i] + var k uint = i + var j uint = i - 1 + for comparator(tmp, items[j]) { + items[k] = items[j] + k = j + if j == 0 { + break + } + j-- + } + + items[k] = tmp + } + + return + } else { + var g int + if n < 57 { + g = 2 + } else { + g = 0 + } + for ; g < 6; g++ { + var gap uint = sortHuffmanTreeItems_gaps[g] + var i uint + for i = gap; i < n; i++ { + var j uint = i + var tmp huffmanTree = items[i] + for ; j >= gap && comparator(tmp, items[j-gap]); j -= gap { + items[j] = items[j-gap] + } + + items[j] = tmp + } + } + } +} + +/* Returns 1 if assignment of depths succeeded, otherwise 0. */ +func setDepth(p0 int, pool []huffmanTree, depth []byte, max_depth int) bool { + var stack [16]int + var level int = 0 + var p int = p0 + assert(max_depth <= 15) + stack[0] = -1 + for { + if pool[p].index_left_ >= 0 { + level++ + if level > max_depth { + return false + } + stack[level] = int(pool[p].index_right_or_value_) + p = int(pool[p].index_left_) + continue + } else { + depth[pool[p].index_right_or_value_] = byte(level) + } + + for level >= 0 && stack[level] == -1 { + level-- + } + if level < 0 { + return true + } + p = stack[level] + stack[level] = -1 + } +} + +/* Sort the root nodes, least popular first. */ +func sortHuffmanTree(v0 huffmanTree, v1 huffmanTree) bool { + if v0.total_count_ != v1.total_count_ { + return v0.total_count_ < v1.total_count_ + } + + return v0.index_right_or_value_ > v1.index_right_or_value_ +} + +/* This function will create a Huffman tree. + + The catch here is that the tree cannot be arbitrarily deep. + Brotli specifies a maximum depth of 15 bits for "code trees" + and 7 bits for "code length code trees." + + count_limit is the value that is to be faked as the minimum value + and this minimum value is raised until the tree matches the + maximum length requirement. + + This algorithm is not of excellent performance for very long data blocks, + especially when population counts are longer than 2**tree_limit, but + we are not planning to use this with extremely long blocks. + + See http://en.wikipedia.org/wiki/Huffman_coding */ +func createHuffmanTree(data []uint32, length uint, tree_limit int, tree []huffmanTree, depth []byte) { + var count_limit uint32 + var sentinel huffmanTree + initHuffmanTree(&sentinel, math.MaxUint32, -1, -1) + + /* For block sizes below 64 kB, we never need to do a second iteration + of this loop. Probably all of our block sizes will be smaller than + that, so this loop is mostly of academic interest. If we actually + would need this, we would be better off with the Katajainen algorithm. */ + for count_limit = 1; ; count_limit *= 2 { + var n uint = 0 + var i uint + var j uint + var k uint + for i = length; i != 0; { + i-- + if data[i] != 0 { + var count uint32 = brotli_max_uint32_t(data[i], count_limit) + initHuffmanTree(&tree[n], count, -1, int16(i)) + n++ + } + } + + if n == 1 { + depth[tree[0].index_right_or_value_] = 1 /* Only one element. */ + break + } + + sortHuffmanTreeItems(tree, n, huffmanTreeComparator(sortHuffmanTree)) + + /* The nodes are: + [0, n): the sorted leaf nodes that we start with. + [n]: we add a sentinel here. + [n + 1, 2n): new parent nodes are added here, starting from + (n+1). These are naturally in ascending order. + [2n]: we add a sentinel at the end as well. + There will be (2n+1) elements at the end. */ + tree[n] = sentinel + + tree[n+1] = sentinel + + i = 0 /* Points to the next leaf node. */ + j = n + 1 /* Points to the next non-leaf node. */ + for k = n - 1; k != 0; k-- { + var left uint + var right uint + if tree[i].total_count_ <= tree[j].total_count_ { + left = i + i++ + } else { + left = j + j++ + } + + if tree[i].total_count_ <= tree[j].total_count_ { + right = i + i++ + } else { + right = j + j++ + } + { + /* The sentinel node becomes the parent node. */ + var j_end uint = 2*n - k + tree[j_end].total_count_ = tree[left].total_count_ + tree[right].total_count_ + tree[j_end].index_left_ = int16(left) + tree[j_end].index_right_or_value_ = int16(right) + + /* Add back the last sentinel node. */ + tree[j_end+1] = sentinel + } + } + + if setDepth(int(2*n-1), tree[0:], depth, tree_limit) { + /* We need to pack the Huffman tree in tree_limit bits. If this was not + successful, add fake entities to the lowest values and retry. */ + break + } + } +} + +func reverse(v []byte, start uint, end uint) { + end-- + for start < end { + var tmp byte = v[start] + v[start] = v[end] + v[end] = tmp + start++ + end-- + } +} + +func writeHuffmanTreeRepetitions(previous_value byte, value byte, repetitions uint, tree_size *uint, tree []byte, extra_bits_data []byte) { + assert(repetitions > 0) + if previous_value != value { + tree[*tree_size] = value + extra_bits_data[*tree_size] = 0 + (*tree_size)++ + repetitions-- + } + + if repetitions == 7 { + tree[*tree_size] = value + extra_bits_data[*tree_size] = 0 + (*tree_size)++ + repetitions-- + } + + if repetitions < 3 { + var i uint + for i = 0; i < repetitions; i++ { + tree[*tree_size] = value + extra_bits_data[*tree_size] = 0 + (*tree_size)++ + } + } else { + var start uint = *tree_size + repetitions -= 3 + for { + tree[*tree_size] = repeatPreviousCodeLength + extra_bits_data[*tree_size] = byte(repetitions & 0x3) + (*tree_size)++ + repetitions >>= 2 + if repetitions == 0 { + break + } + + repetitions-- + } + + reverse(tree, start, *tree_size) + reverse(extra_bits_data, start, *tree_size) + } +} + +func writeHuffmanTreeRepetitionsZeros(repetitions uint, tree_size *uint, tree []byte, extra_bits_data []byte) { + if repetitions == 11 { + tree[*tree_size] = 0 + extra_bits_data[*tree_size] = 0 + (*tree_size)++ + repetitions-- + } + + if repetitions < 3 { + var i uint + for i = 0; i < repetitions; i++ { + tree[*tree_size] = 0 + extra_bits_data[*tree_size] = 0 + (*tree_size)++ + } + } else { + var start uint = *tree_size + repetitions -= 3 + for { + tree[*tree_size] = repeatZeroCodeLength + extra_bits_data[*tree_size] = byte(repetitions & 0x7) + (*tree_size)++ + repetitions >>= 3 + if repetitions == 0 { + break + } + + repetitions-- + } + + reverse(tree, start, *tree_size) + reverse(extra_bits_data, start, *tree_size) + } +} + +/* Change the population counts in a way that the consequent + Huffman tree compression, especially its RLE-part will be more + likely to compress this data more efficiently. + + length contains the size of the histogram. + counts contains the population counts. + good_for_rle is a buffer of at least length size */ +func optimizeHuffmanCountsForRLE(length uint, counts []uint32, good_for_rle []byte) { + var nonzero_count uint = 0 + var stride uint + var limit uint + var sum uint + var streak_limit uint = 1240 + var i uint + /* Let's make the Huffman code more compatible with RLE encoding. */ + for i = 0; i < length; i++ { + if counts[i] != 0 { + nonzero_count++ + } + } + + if nonzero_count < 16 { + return + } + + for length != 0 && counts[length-1] == 0 { + length-- + } + + if length == 0 { + return /* All zeros. */ + } + + /* Now counts[0..length - 1] does not have trailing zeros. */ + { + var nonzeros uint = 0 + var smallest_nonzero uint32 = 1 << 30 + for i = 0; i < length; i++ { + if counts[i] != 0 { + nonzeros++ + if smallest_nonzero > counts[i] { + smallest_nonzero = counts[i] + } + } + } + + if nonzeros < 5 { + /* Small histogram will model it well. */ + return + } + + if smallest_nonzero < 4 { + var zeros uint = length - nonzeros + if zeros < 6 { + for i = 1; i < length-1; i++ { + if counts[i-1] != 0 && counts[i] == 0 && counts[i+1] != 0 { + counts[i] = 1 + } + } + } + } + + if nonzeros < 28 { + return + } + } + + /* 2) Let's mark all population counts that already can be encoded + with an RLE code. */ + for i := 0; i < int(length); i++ { + good_for_rle[i] = 0 + } + { + var symbol uint32 = counts[0] + /* Let's not spoil any of the existing good RLE codes. + Mark any seq of 0's that is longer as 5 as a good_for_rle. + Mark any seq of non-0's that is longer as 7 as a good_for_rle. */ + + var step uint = 0 + for i = 0; i <= length; i++ { + if i == length || counts[i] != symbol { + if (symbol == 0 && step >= 5) || (symbol != 0 && step >= 7) { + var k uint + for k = 0; k < step; k++ { + good_for_rle[i-k-1] = 1 + } + } + + step = 1 + if i != length { + symbol = counts[i] + } + } else { + step++ + } + } + } + + /* 3) Let's replace those population counts that lead to more RLE codes. + Math here is in 24.8 fixed point representation. */ + stride = 0 + + limit = uint(256*(counts[0]+counts[1]+counts[2])/3 + 420) + sum = 0 + for i = 0; i <= length; i++ { + if i == length || good_for_rle[i] != 0 || (i != 0 && good_for_rle[i-1] != 0) || (256*counts[i]-uint32(limit)+uint32(streak_limit)) >= uint32(2*streak_limit) { + if stride >= 4 || (stride >= 3 && sum == 0) { + var k uint + var count uint = (sum + stride/2) / stride + /* The stride must end, collapse what we have, if we have enough (4). */ + if count == 0 { + count = 1 + } + + if sum == 0 { + /* Don't make an all zeros stride to be upgraded to ones. */ + count = 0 + } + + for k = 0; k < stride; k++ { + /* We don't want to change value at counts[i], + that is already belonging to the next stride. Thus - 1. */ + counts[i-k-1] = uint32(count) + } + } + + stride = 0 + sum = 0 + if i < length-2 { + /* All interesting strides have a count of at least 4, */ + /* at least when non-zeros. */ + limit = uint(256*(counts[i]+counts[i+1]+counts[i+2])/3 + 420) + } else if i < length { + limit = uint(256 * counts[i]) + } else { + limit = 0 + } + } + + stride++ + if i != length { + sum += uint(counts[i]) + if stride >= 4 { + limit = (256*sum + stride/2) / stride + } + + if stride == 4 { + limit += 120 + } + } + } +} + +func decideOverRLEUse(depth []byte, length uint, use_rle_for_non_zero *bool, use_rle_for_zero *bool) { + var total_reps_zero uint = 0 + var total_reps_non_zero uint = 0 + var count_reps_zero uint = 1 + var count_reps_non_zero uint = 1 + var i uint + for i = 0; i < length; { + var value byte = depth[i] + var reps uint = 1 + var k uint + for k = i + 1; k < length && depth[k] == value; k++ { + reps++ + } + + if reps >= 3 && value == 0 { + total_reps_zero += reps + count_reps_zero++ + } + + if reps >= 4 && value != 0 { + total_reps_non_zero += reps + count_reps_non_zero++ + } + + i += reps + } + + *use_rle_for_non_zero = total_reps_non_zero > count_reps_non_zero*2 + *use_rle_for_zero = total_reps_zero > count_reps_zero*2 +} + +/* Write a Huffman tree from bit depths into the bit-stream representation + of a Huffman tree. The generated Huffman tree is to be compressed once + more using a Huffman tree */ +func writeHuffmanTree(depth []byte, length uint, tree_size *uint, tree []byte, extra_bits_data []byte) { + var previous_value byte = initialRepeatedCodeLength + var i uint + var use_rle_for_non_zero bool = false + var use_rle_for_zero bool = false + var new_length uint = length + /* Throw away trailing zeros. */ + for i = 0; i < length; i++ { + if depth[length-i-1] == 0 { + new_length-- + } else { + break + } + } + + /* First gather statistics on if it is a good idea to do RLE. */ + if length > 50 { + /* Find RLE coding for longer codes. + Shorter codes seem not to benefit from RLE. */ + decideOverRLEUse(depth, new_length, &use_rle_for_non_zero, &use_rle_for_zero) + } + + /* Actual RLE coding. */ + for i = 0; i < new_length; { + var value byte = depth[i] + var reps uint = 1 + if (value != 0 && use_rle_for_non_zero) || (value == 0 && use_rle_for_zero) { + var k uint + for k = i + 1; k < new_length && depth[k] == value; k++ { + reps++ + } + } + + if value == 0 { + writeHuffmanTreeRepetitionsZeros(reps, tree_size, tree, extra_bits_data) + } else { + writeHuffmanTreeRepetitions(previous_value, value, reps, tree_size, tree, extra_bits_data) + previous_value = value + } + + i += reps + } +} + +var reverseBits_kLut = [16]uint{ + 0x00, + 0x08, + 0x04, + 0x0C, + 0x02, + 0x0A, + 0x06, + 0x0E, + 0x01, + 0x09, + 0x05, + 0x0D, + 0x03, + 0x0B, + 0x07, + 0x0F, +} + +func reverseBits(num_bits uint, bits uint16) uint16 { + var retval uint = reverseBits_kLut[bits&0x0F] + var i uint + for i = 4; i < num_bits; i += 4 { + retval <<= 4 + bits = uint16(bits >> 4) + retval |= reverseBits_kLut[bits&0x0F] + } + + retval >>= ((0 - num_bits) & 0x03) + return uint16(retval) +} + +/* 0..15 are values for bits */ +const maxHuffmanBits = 16 + +/* Get the actual bit values for a tree of bit depths. */ +func convertBitDepthsToSymbols(depth []byte, len uint, bits []uint16) { + var bl_count = [maxHuffmanBits]uint16{0} + var next_code [maxHuffmanBits]uint16 + var i uint + /* In Brotli, all bit depths are [1..15] + 0 bit depth means that the symbol does not exist. */ + + var code int = 0 + for i = 0; i < len; i++ { + bl_count[depth[i]]++ + } + + bl_count[0] = 0 + next_code[0] = 0 + for i = 1; i < maxHuffmanBits; i++ { + code = (code + int(bl_count[i-1])) << 1 + next_code[i] = uint16(code) + } + + for i = 0; i < len; i++ { + if depth[i] != 0 { + bits[i] = reverseBits(uint(depth[i]), next_code[depth[i]]) + next_code[depth[i]]++ + } + } +} diff --git a/vendor/github.com/andybalholm/brotli/entropy_encode_static.go b/vendor/github.com/andybalholm/brotli/entropy_encode_static.go new file mode 100644 index 0000000..294aff4 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/entropy_encode_static.go @@ -0,0 +1,4399 @@ +package brotli + +var kCodeLengthDepth = [18]byte{4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 0, 4, 4} + +var kStaticCommandCodeDepth = [numCommandSymbols]byte{ + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, +} + +var kStaticDistanceCodeDepth = [64]byte{ + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, +} + +var kCodeLengthBits = [18]uint32{0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 15, 31, 0, 11, 7} + +func storeStaticCodeLengthCode(storage_ix *uint, storage []byte) { + writeBits(40, 0x0000FF55555554, storage_ix, storage) +} + +func storeStaticCodeLengthCodeBW(bw *bitWriter) { + bw.writeBits(32, 0x55555554) + bw.writeBits(8, 0xFF) +} + +var kZeroRepsBits = [numCommandSymbols]uint64{ + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000007, + 0x00000017, + 0x00000027, + 0x00000037, + 0x00000047, + 0x00000057, + 0x00000067, + 0x00000077, + 0x00000770, + 0x00000b87, + 0x00001387, + 0x00001b87, + 0x00002387, + 0x00002b87, + 0x00003387, + 0x00003b87, + 0x00000397, + 0x00000b97, + 0x00001397, + 0x00001b97, + 0x00002397, + 0x00002b97, + 0x00003397, + 0x00003b97, + 0x000003a7, + 0x00000ba7, + 0x000013a7, + 0x00001ba7, + 0x000023a7, + 0x00002ba7, + 0x000033a7, + 0x00003ba7, + 0x000003b7, + 0x00000bb7, + 0x000013b7, + 0x00001bb7, + 0x000023b7, + 0x00002bb7, + 0x000033b7, + 0x00003bb7, + 0x000003c7, + 0x00000bc7, + 0x000013c7, + 0x00001bc7, + 0x000023c7, + 0x00002bc7, + 0x000033c7, + 0x00003bc7, + 0x000003d7, + 0x00000bd7, + 0x000013d7, + 0x00001bd7, + 0x000023d7, + 0x00002bd7, + 0x000033d7, + 0x00003bd7, + 0x000003e7, + 0x00000be7, + 0x000013e7, + 0x00001be7, + 0x000023e7, + 0x00002be7, + 0x000033e7, + 0x00003be7, + 0x000003f7, + 0x00000bf7, + 0x000013f7, + 0x00001bf7, + 0x000023f7, + 0x00002bf7, + 0x000033f7, + 0x00003bf7, + 0x0001c387, + 0x0005c387, + 0x0009c387, + 0x000dc387, + 0x0011c387, + 0x0015c387, + 0x0019c387, + 0x001dc387, + 0x0001cb87, + 0x0005cb87, + 0x0009cb87, + 0x000dcb87, + 0x0011cb87, + 0x0015cb87, + 0x0019cb87, + 0x001dcb87, + 0x0001d387, + 0x0005d387, + 0x0009d387, + 0x000dd387, + 0x0011d387, + 0x0015d387, + 0x0019d387, + 0x001dd387, + 0x0001db87, + 0x0005db87, + 0x0009db87, + 0x000ddb87, + 0x0011db87, + 0x0015db87, + 0x0019db87, + 0x001ddb87, + 0x0001e387, + 0x0005e387, + 0x0009e387, + 0x000de387, + 0x0011e387, + 0x0015e387, + 0x0019e387, + 0x001de387, + 0x0001eb87, + 0x0005eb87, + 0x0009eb87, + 0x000deb87, + 0x0011eb87, + 0x0015eb87, + 0x0019eb87, + 0x001deb87, + 0x0001f387, + 0x0005f387, + 0x0009f387, + 0x000df387, + 0x0011f387, + 0x0015f387, + 0x0019f387, + 0x001df387, + 0x0001fb87, + 0x0005fb87, + 0x0009fb87, + 0x000dfb87, + 0x0011fb87, + 0x0015fb87, + 0x0019fb87, + 0x001dfb87, + 0x0001c397, + 0x0005c397, + 0x0009c397, + 0x000dc397, + 0x0011c397, + 0x0015c397, + 0x0019c397, + 0x001dc397, + 0x0001cb97, + 0x0005cb97, + 0x0009cb97, + 0x000dcb97, + 0x0011cb97, + 0x0015cb97, + 0x0019cb97, + 0x001dcb97, + 0x0001d397, + 0x0005d397, + 0x0009d397, + 0x000dd397, + 0x0011d397, + 0x0015d397, + 0x0019d397, + 0x001dd397, + 0x0001db97, + 0x0005db97, + 0x0009db97, + 0x000ddb97, + 0x0011db97, + 0x0015db97, + 0x0019db97, + 0x001ddb97, + 0x0001e397, + 0x0005e397, + 0x0009e397, + 0x000de397, + 0x0011e397, + 0x0015e397, + 0x0019e397, + 0x001de397, + 0x0001eb97, + 0x0005eb97, + 0x0009eb97, + 0x000deb97, + 0x0011eb97, + 0x0015eb97, + 0x0019eb97, + 0x001deb97, + 0x0001f397, + 0x0005f397, + 0x0009f397, + 0x000df397, + 0x0011f397, + 0x0015f397, + 0x0019f397, + 0x001df397, + 0x0001fb97, + 0x0005fb97, + 0x0009fb97, + 0x000dfb97, + 0x0011fb97, + 0x0015fb97, + 0x0019fb97, + 0x001dfb97, + 0x0001c3a7, + 0x0005c3a7, + 0x0009c3a7, + 0x000dc3a7, + 0x0011c3a7, + 0x0015c3a7, + 0x0019c3a7, + 0x001dc3a7, + 0x0001cba7, + 0x0005cba7, + 0x0009cba7, + 0x000dcba7, + 0x0011cba7, + 0x0015cba7, + 0x0019cba7, + 0x001dcba7, + 0x0001d3a7, + 0x0005d3a7, + 0x0009d3a7, + 0x000dd3a7, + 0x0011d3a7, + 0x0015d3a7, + 0x0019d3a7, + 0x001dd3a7, + 0x0001dba7, + 0x0005dba7, + 0x0009dba7, + 0x000ddba7, + 0x0011dba7, + 0x0015dba7, + 0x0019dba7, + 0x001ddba7, + 0x0001e3a7, + 0x0005e3a7, + 0x0009e3a7, + 0x000de3a7, + 0x0011e3a7, + 0x0015e3a7, + 0x0019e3a7, + 0x001de3a7, + 0x0001eba7, + 0x0005eba7, + 0x0009eba7, + 0x000deba7, + 0x0011eba7, + 0x0015eba7, + 0x0019eba7, + 0x001deba7, + 0x0001f3a7, + 0x0005f3a7, + 0x0009f3a7, + 0x000df3a7, + 0x0011f3a7, + 0x0015f3a7, + 0x0019f3a7, + 0x001df3a7, + 0x0001fba7, + 0x0005fba7, + 0x0009fba7, + 0x000dfba7, + 0x0011fba7, + 0x0015fba7, + 0x0019fba7, + 0x001dfba7, + 0x0001c3b7, + 0x0005c3b7, + 0x0009c3b7, + 0x000dc3b7, + 0x0011c3b7, + 0x0015c3b7, + 0x0019c3b7, + 0x001dc3b7, + 0x0001cbb7, + 0x0005cbb7, + 0x0009cbb7, + 0x000dcbb7, + 0x0011cbb7, + 0x0015cbb7, + 0x0019cbb7, + 0x001dcbb7, + 0x0001d3b7, + 0x0005d3b7, + 0x0009d3b7, + 0x000dd3b7, + 0x0011d3b7, + 0x0015d3b7, + 0x0019d3b7, + 0x001dd3b7, + 0x0001dbb7, + 0x0005dbb7, + 0x0009dbb7, + 0x000ddbb7, + 0x0011dbb7, + 0x0015dbb7, + 0x0019dbb7, + 0x001ddbb7, + 0x0001e3b7, + 0x0005e3b7, + 0x0009e3b7, + 0x000de3b7, + 0x0011e3b7, + 0x0015e3b7, + 0x0019e3b7, + 0x001de3b7, + 0x0001ebb7, + 0x0005ebb7, + 0x0009ebb7, + 0x000debb7, + 0x0011ebb7, + 0x0015ebb7, + 0x0019ebb7, + 0x001debb7, + 0x0001f3b7, + 0x0005f3b7, + 0x0009f3b7, + 0x000df3b7, + 0x0011f3b7, + 0x0015f3b7, + 0x0019f3b7, + 0x001df3b7, + 0x0001fbb7, + 0x0005fbb7, + 0x0009fbb7, + 0x000dfbb7, + 0x0011fbb7, + 0x0015fbb7, + 0x0019fbb7, + 0x001dfbb7, + 0x0001c3c7, + 0x0005c3c7, + 0x0009c3c7, + 0x000dc3c7, + 0x0011c3c7, + 0x0015c3c7, + 0x0019c3c7, + 0x001dc3c7, + 0x0001cbc7, + 0x0005cbc7, + 0x0009cbc7, + 0x000dcbc7, + 0x0011cbc7, + 0x0015cbc7, + 0x0019cbc7, + 0x001dcbc7, + 0x0001d3c7, + 0x0005d3c7, + 0x0009d3c7, + 0x000dd3c7, + 0x0011d3c7, + 0x0015d3c7, + 0x0019d3c7, + 0x001dd3c7, + 0x0001dbc7, + 0x0005dbc7, + 0x0009dbc7, + 0x000ddbc7, + 0x0011dbc7, + 0x0015dbc7, + 0x0019dbc7, + 0x001ddbc7, + 0x0001e3c7, + 0x0005e3c7, + 0x0009e3c7, + 0x000de3c7, + 0x0011e3c7, + 0x0015e3c7, + 0x0019e3c7, + 0x001de3c7, + 0x0001ebc7, + 0x0005ebc7, + 0x0009ebc7, + 0x000debc7, + 0x0011ebc7, + 0x0015ebc7, + 0x0019ebc7, + 0x001debc7, + 0x0001f3c7, + 0x0005f3c7, + 0x0009f3c7, + 0x000df3c7, + 0x0011f3c7, + 0x0015f3c7, + 0x0019f3c7, + 0x001df3c7, + 0x0001fbc7, + 0x0005fbc7, + 0x0009fbc7, + 0x000dfbc7, + 0x0011fbc7, + 0x0015fbc7, + 0x0019fbc7, + 0x001dfbc7, + 0x0001c3d7, + 0x0005c3d7, + 0x0009c3d7, + 0x000dc3d7, + 0x0011c3d7, + 0x0015c3d7, + 0x0019c3d7, + 0x001dc3d7, + 0x0001cbd7, + 0x0005cbd7, + 0x0009cbd7, + 0x000dcbd7, + 0x0011cbd7, + 0x0015cbd7, + 0x0019cbd7, + 0x001dcbd7, + 0x0001d3d7, + 0x0005d3d7, + 0x0009d3d7, + 0x000dd3d7, + 0x0011d3d7, + 0x0015d3d7, + 0x0019d3d7, + 0x001dd3d7, + 0x0001dbd7, + 0x0005dbd7, + 0x0009dbd7, + 0x000ddbd7, + 0x0011dbd7, + 0x0015dbd7, + 0x0019dbd7, + 0x001ddbd7, + 0x0001e3d7, + 0x0005e3d7, + 0x0009e3d7, + 0x000de3d7, + 0x0011e3d7, + 0x0015e3d7, + 0x0019e3d7, + 0x001de3d7, + 0x0001ebd7, + 0x0005ebd7, + 0x0009ebd7, + 0x000debd7, + 0x0011ebd7, + 0x0015ebd7, + 0x0019ebd7, + 0x001debd7, + 0x0001f3d7, + 0x0005f3d7, + 0x0009f3d7, + 0x000df3d7, + 0x0011f3d7, + 0x0015f3d7, + 0x0019f3d7, + 0x001df3d7, + 0x0001fbd7, + 0x0005fbd7, + 0x0009fbd7, + 0x000dfbd7, + 0x0011fbd7, + 0x0015fbd7, + 0x0019fbd7, + 0x001dfbd7, + 0x0001c3e7, + 0x0005c3e7, + 0x0009c3e7, + 0x000dc3e7, + 0x0011c3e7, + 0x0015c3e7, + 0x0019c3e7, + 0x001dc3e7, + 0x0001cbe7, + 0x0005cbe7, + 0x0009cbe7, + 0x000dcbe7, + 0x0011cbe7, + 0x0015cbe7, + 0x0019cbe7, + 0x001dcbe7, + 0x0001d3e7, + 0x0005d3e7, + 0x0009d3e7, + 0x000dd3e7, + 0x0011d3e7, + 0x0015d3e7, + 0x0019d3e7, + 0x001dd3e7, + 0x0001dbe7, + 0x0005dbe7, + 0x0009dbe7, + 0x000ddbe7, + 0x0011dbe7, + 0x0015dbe7, + 0x0019dbe7, + 0x001ddbe7, + 0x0001e3e7, + 0x0005e3e7, + 0x0009e3e7, + 0x000de3e7, + 0x0011e3e7, + 0x0015e3e7, + 0x0019e3e7, + 0x001de3e7, + 0x0001ebe7, + 0x0005ebe7, + 0x0009ebe7, + 0x000debe7, + 0x0011ebe7, + 0x0015ebe7, + 0x0019ebe7, + 0x001debe7, + 0x0001f3e7, + 0x0005f3e7, + 0x0009f3e7, + 0x000df3e7, + 0x0011f3e7, + 0x0015f3e7, + 0x0019f3e7, + 0x001df3e7, + 0x0001fbe7, + 0x0005fbe7, + 0x0009fbe7, + 0x000dfbe7, + 0x0011fbe7, + 0x0015fbe7, + 0x0019fbe7, + 0x001dfbe7, + 0x0001c3f7, + 0x0005c3f7, + 0x0009c3f7, + 0x000dc3f7, + 0x0011c3f7, + 0x0015c3f7, + 0x0019c3f7, + 0x001dc3f7, + 0x0001cbf7, + 0x0005cbf7, + 0x0009cbf7, + 0x000dcbf7, + 0x0011cbf7, + 0x0015cbf7, + 0x0019cbf7, + 0x001dcbf7, + 0x0001d3f7, + 0x0005d3f7, + 0x0009d3f7, + 0x000dd3f7, + 0x0011d3f7, + 0x0015d3f7, + 0x0019d3f7, + 0x001dd3f7, + 0x0001dbf7, + 0x0005dbf7, + 0x0009dbf7, + 0x000ddbf7, + 0x0011dbf7, + 0x0015dbf7, + 0x0019dbf7, + 0x001ddbf7, + 0x0001e3f7, + 0x0005e3f7, + 0x0009e3f7, + 0x000de3f7, + 0x0011e3f7, + 0x0015e3f7, + 0x0019e3f7, + 0x001de3f7, + 0x0001ebf7, + 0x0005ebf7, + 0x0009ebf7, + 0x000debf7, + 0x0011ebf7, + 0x0015ebf7, + 0x0019ebf7, + 0x001debf7, + 0x0001f3f7, + 0x0005f3f7, + 0x0009f3f7, + 0x000df3f7, + 0x0011f3f7, + 0x0015f3f7, + 0x0019f3f7, + 0x001df3f7, + 0x0001fbf7, + 0x0005fbf7, + 0x0009fbf7, + 0x000dfbf7, + 0x0011fbf7, + 0x0015fbf7, + 0x0019fbf7, + 0x001dfbf7, + 0x00e1c387, + 0x02e1c387, + 0x04e1c387, + 0x06e1c387, + 0x08e1c387, + 0x0ae1c387, + 0x0ce1c387, + 0x0ee1c387, + 0x00e5c387, + 0x02e5c387, + 0x04e5c387, + 0x06e5c387, + 0x08e5c387, + 0x0ae5c387, + 0x0ce5c387, + 0x0ee5c387, + 0x00e9c387, + 0x02e9c387, + 0x04e9c387, + 0x06e9c387, + 0x08e9c387, + 0x0ae9c387, + 0x0ce9c387, + 0x0ee9c387, + 0x00edc387, + 0x02edc387, + 0x04edc387, + 0x06edc387, + 0x08edc387, + 0x0aedc387, + 0x0cedc387, + 0x0eedc387, + 0x00f1c387, + 0x02f1c387, + 0x04f1c387, + 0x06f1c387, + 0x08f1c387, + 0x0af1c387, + 0x0cf1c387, + 0x0ef1c387, + 0x00f5c387, + 0x02f5c387, + 0x04f5c387, + 0x06f5c387, + 0x08f5c387, + 0x0af5c387, + 0x0cf5c387, + 0x0ef5c387, + 0x00f9c387, + 0x02f9c387, + 0x04f9c387, + 0x06f9c387, + 0x08f9c387, + 0x0af9c387, + 0x0cf9c387, + 0x0ef9c387, + 0x00fdc387, + 0x02fdc387, + 0x04fdc387, + 0x06fdc387, + 0x08fdc387, + 0x0afdc387, + 0x0cfdc387, + 0x0efdc387, + 0x00e1cb87, + 0x02e1cb87, + 0x04e1cb87, + 0x06e1cb87, + 0x08e1cb87, + 0x0ae1cb87, + 0x0ce1cb87, + 0x0ee1cb87, + 0x00e5cb87, + 0x02e5cb87, + 0x04e5cb87, + 0x06e5cb87, + 0x08e5cb87, + 0x0ae5cb87, + 0x0ce5cb87, + 0x0ee5cb87, + 0x00e9cb87, + 0x02e9cb87, + 0x04e9cb87, + 0x06e9cb87, + 0x08e9cb87, + 0x0ae9cb87, + 0x0ce9cb87, + 0x0ee9cb87, + 0x00edcb87, + 0x02edcb87, + 0x04edcb87, + 0x06edcb87, + 0x08edcb87, + 0x0aedcb87, + 0x0cedcb87, + 0x0eedcb87, + 0x00f1cb87, + 0x02f1cb87, + 0x04f1cb87, + 0x06f1cb87, + 0x08f1cb87, + 0x0af1cb87, + 0x0cf1cb87, + 0x0ef1cb87, + 0x00f5cb87, + 0x02f5cb87, + 0x04f5cb87, + 0x06f5cb87, + 0x08f5cb87, + 0x0af5cb87, + 0x0cf5cb87, + 0x0ef5cb87, + 0x00f9cb87, + 0x02f9cb87, + 0x04f9cb87, + 0x06f9cb87, + 0x08f9cb87, +} + +var kZeroRepsDepth = [numCommandSymbols]uint32{ + 0, + 4, + 8, + 7, + 7, + 7, + 7, + 7, + 7, + 7, + 7, + 11, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, +} + +var kNonZeroRepsBits = [numCommandSymbols]uint64{ + 0x0000000b, + 0x0000001b, + 0x0000002b, + 0x0000003b, + 0x000002cb, + 0x000006cb, + 0x00000acb, + 0x00000ecb, + 0x000002db, + 0x000006db, + 0x00000adb, + 0x00000edb, + 0x000002eb, + 0x000006eb, + 0x00000aeb, + 0x00000eeb, + 0x000002fb, + 0x000006fb, + 0x00000afb, + 0x00000efb, + 0x0000b2cb, + 0x0001b2cb, + 0x0002b2cb, + 0x0003b2cb, + 0x0000b6cb, + 0x0001b6cb, + 0x0002b6cb, + 0x0003b6cb, + 0x0000bacb, + 0x0001bacb, + 0x0002bacb, + 0x0003bacb, + 0x0000becb, + 0x0001becb, + 0x0002becb, + 0x0003becb, + 0x0000b2db, + 0x0001b2db, + 0x0002b2db, + 0x0003b2db, + 0x0000b6db, + 0x0001b6db, + 0x0002b6db, + 0x0003b6db, + 0x0000badb, + 0x0001badb, + 0x0002badb, + 0x0003badb, + 0x0000bedb, + 0x0001bedb, + 0x0002bedb, + 0x0003bedb, + 0x0000b2eb, + 0x0001b2eb, + 0x0002b2eb, + 0x0003b2eb, + 0x0000b6eb, + 0x0001b6eb, + 0x0002b6eb, + 0x0003b6eb, + 0x0000baeb, + 0x0001baeb, + 0x0002baeb, + 0x0003baeb, + 0x0000beeb, + 0x0001beeb, + 0x0002beeb, + 0x0003beeb, + 0x0000b2fb, + 0x0001b2fb, + 0x0002b2fb, + 0x0003b2fb, + 0x0000b6fb, + 0x0001b6fb, + 0x0002b6fb, + 0x0003b6fb, + 0x0000bafb, + 0x0001bafb, + 0x0002bafb, + 0x0003bafb, + 0x0000befb, + 0x0001befb, + 0x0002befb, + 0x0003befb, + 0x002cb2cb, + 0x006cb2cb, + 0x00acb2cb, + 0x00ecb2cb, + 0x002db2cb, + 0x006db2cb, + 0x00adb2cb, + 0x00edb2cb, + 0x002eb2cb, + 0x006eb2cb, + 0x00aeb2cb, + 0x00eeb2cb, + 0x002fb2cb, + 0x006fb2cb, + 0x00afb2cb, + 0x00efb2cb, + 0x002cb6cb, + 0x006cb6cb, + 0x00acb6cb, + 0x00ecb6cb, + 0x002db6cb, + 0x006db6cb, + 0x00adb6cb, + 0x00edb6cb, + 0x002eb6cb, + 0x006eb6cb, + 0x00aeb6cb, + 0x00eeb6cb, + 0x002fb6cb, + 0x006fb6cb, + 0x00afb6cb, + 0x00efb6cb, + 0x002cbacb, + 0x006cbacb, + 0x00acbacb, + 0x00ecbacb, + 0x002dbacb, + 0x006dbacb, + 0x00adbacb, + 0x00edbacb, + 0x002ebacb, + 0x006ebacb, + 0x00aebacb, + 0x00eebacb, + 0x002fbacb, + 0x006fbacb, + 0x00afbacb, + 0x00efbacb, + 0x002cbecb, + 0x006cbecb, + 0x00acbecb, + 0x00ecbecb, + 0x002dbecb, + 0x006dbecb, + 0x00adbecb, + 0x00edbecb, + 0x002ebecb, + 0x006ebecb, + 0x00aebecb, + 0x00eebecb, + 0x002fbecb, + 0x006fbecb, + 0x00afbecb, + 0x00efbecb, + 0x002cb2db, + 0x006cb2db, + 0x00acb2db, + 0x00ecb2db, + 0x002db2db, + 0x006db2db, + 0x00adb2db, + 0x00edb2db, + 0x002eb2db, + 0x006eb2db, + 0x00aeb2db, + 0x00eeb2db, + 0x002fb2db, + 0x006fb2db, + 0x00afb2db, + 0x00efb2db, + 0x002cb6db, + 0x006cb6db, + 0x00acb6db, + 0x00ecb6db, + 0x002db6db, + 0x006db6db, + 0x00adb6db, + 0x00edb6db, + 0x002eb6db, + 0x006eb6db, + 0x00aeb6db, + 0x00eeb6db, + 0x002fb6db, + 0x006fb6db, + 0x00afb6db, + 0x00efb6db, + 0x002cbadb, + 0x006cbadb, + 0x00acbadb, + 0x00ecbadb, + 0x002dbadb, + 0x006dbadb, + 0x00adbadb, + 0x00edbadb, + 0x002ebadb, + 0x006ebadb, + 0x00aebadb, + 0x00eebadb, + 0x002fbadb, + 0x006fbadb, + 0x00afbadb, + 0x00efbadb, + 0x002cbedb, + 0x006cbedb, + 0x00acbedb, + 0x00ecbedb, + 0x002dbedb, + 0x006dbedb, + 0x00adbedb, + 0x00edbedb, + 0x002ebedb, + 0x006ebedb, + 0x00aebedb, + 0x00eebedb, + 0x002fbedb, + 0x006fbedb, + 0x00afbedb, + 0x00efbedb, + 0x002cb2eb, + 0x006cb2eb, + 0x00acb2eb, + 0x00ecb2eb, + 0x002db2eb, + 0x006db2eb, + 0x00adb2eb, + 0x00edb2eb, + 0x002eb2eb, + 0x006eb2eb, + 0x00aeb2eb, + 0x00eeb2eb, + 0x002fb2eb, + 0x006fb2eb, + 0x00afb2eb, + 0x00efb2eb, + 0x002cb6eb, + 0x006cb6eb, + 0x00acb6eb, + 0x00ecb6eb, + 0x002db6eb, + 0x006db6eb, + 0x00adb6eb, + 0x00edb6eb, + 0x002eb6eb, + 0x006eb6eb, + 0x00aeb6eb, + 0x00eeb6eb, + 0x002fb6eb, + 0x006fb6eb, + 0x00afb6eb, + 0x00efb6eb, + 0x002cbaeb, + 0x006cbaeb, + 0x00acbaeb, + 0x00ecbaeb, + 0x002dbaeb, + 0x006dbaeb, + 0x00adbaeb, + 0x00edbaeb, + 0x002ebaeb, + 0x006ebaeb, + 0x00aebaeb, + 0x00eebaeb, + 0x002fbaeb, + 0x006fbaeb, + 0x00afbaeb, + 0x00efbaeb, + 0x002cbeeb, + 0x006cbeeb, + 0x00acbeeb, + 0x00ecbeeb, + 0x002dbeeb, + 0x006dbeeb, + 0x00adbeeb, + 0x00edbeeb, + 0x002ebeeb, + 0x006ebeeb, + 0x00aebeeb, + 0x00eebeeb, + 0x002fbeeb, + 0x006fbeeb, + 0x00afbeeb, + 0x00efbeeb, + 0x002cb2fb, + 0x006cb2fb, + 0x00acb2fb, + 0x00ecb2fb, + 0x002db2fb, + 0x006db2fb, + 0x00adb2fb, + 0x00edb2fb, + 0x002eb2fb, + 0x006eb2fb, + 0x00aeb2fb, + 0x00eeb2fb, + 0x002fb2fb, + 0x006fb2fb, + 0x00afb2fb, + 0x00efb2fb, + 0x002cb6fb, + 0x006cb6fb, + 0x00acb6fb, + 0x00ecb6fb, + 0x002db6fb, + 0x006db6fb, + 0x00adb6fb, + 0x00edb6fb, + 0x002eb6fb, + 0x006eb6fb, + 0x00aeb6fb, + 0x00eeb6fb, + 0x002fb6fb, + 0x006fb6fb, + 0x00afb6fb, + 0x00efb6fb, + 0x002cbafb, + 0x006cbafb, + 0x00acbafb, + 0x00ecbafb, + 0x002dbafb, + 0x006dbafb, + 0x00adbafb, + 0x00edbafb, + 0x002ebafb, + 0x006ebafb, + 0x00aebafb, + 0x00eebafb, + 0x002fbafb, + 0x006fbafb, + 0x00afbafb, + 0x00efbafb, + 0x002cbefb, + 0x006cbefb, + 0x00acbefb, + 0x00ecbefb, + 0x002dbefb, + 0x006dbefb, + 0x00adbefb, + 0x00edbefb, + 0x002ebefb, + 0x006ebefb, + 0x00aebefb, + 0x00eebefb, + 0x002fbefb, + 0x006fbefb, + 0x00afbefb, + 0x00efbefb, + 0x0b2cb2cb, + 0x1b2cb2cb, + 0x2b2cb2cb, + 0x3b2cb2cb, + 0x0b6cb2cb, + 0x1b6cb2cb, + 0x2b6cb2cb, + 0x3b6cb2cb, + 0x0bacb2cb, + 0x1bacb2cb, + 0x2bacb2cb, + 0x3bacb2cb, + 0x0becb2cb, + 0x1becb2cb, + 0x2becb2cb, + 0x3becb2cb, + 0x0b2db2cb, + 0x1b2db2cb, + 0x2b2db2cb, + 0x3b2db2cb, + 0x0b6db2cb, + 0x1b6db2cb, + 0x2b6db2cb, + 0x3b6db2cb, + 0x0badb2cb, + 0x1badb2cb, + 0x2badb2cb, + 0x3badb2cb, + 0x0bedb2cb, + 0x1bedb2cb, + 0x2bedb2cb, + 0x3bedb2cb, + 0x0b2eb2cb, + 0x1b2eb2cb, + 0x2b2eb2cb, + 0x3b2eb2cb, + 0x0b6eb2cb, + 0x1b6eb2cb, + 0x2b6eb2cb, + 0x3b6eb2cb, + 0x0baeb2cb, + 0x1baeb2cb, + 0x2baeb2cb, + 0x3baeb2cb, + 0x0beeb2cb, + 0x1beeb2cb, + 0x2beeb2cb, + 0x3beeb2cb, + 0x0b2fb2cb, + 0x1b2fb2cb, + 0x2b2fb2cb, + 0x3b2fb2cb, + 0x0b6fb2cb, + 0x1b6fb2cb, + 0x2b6fb2cb, + 0x3b6fb2cb, + 0x0bafb2cb, + 0x1bafb2cb, + 0x2bafb2cb, + 0x3bafb2cb, + 0x0befb2cb, + 0x1befb2cb, + 0x2befb2cb, + 0x3befb2cb, + 0x0b2cb6cb, + 0x1b2cb6cb, + 0x2b2cb6cb, + 0x3b2cb6cb, + 0x0b6cb6cb, + 0x1b6cb6cb, + 0x2b6cb6cb, + 0x3b6cb6cb, + 0x0bacb6cb, + 0x1bacb6cb, + 0x2bacb6cb, + 0x3bacb6cb, + 0x0becb6cb, + 0x1becb6cb, + 0x2becb6cb, + 0x3becb6cb, + 0x0b2db6cb, + 0x1b2db6cb, + 0x2b2db6cb, + 0x3b2db6cb, + 0x0b6db6cb, + 0x1b6db6cb, + 0x2b6db6cb, + 0x3b6db6cb, + 0x0badb6cb, + 0x1badb6cb, + 0x2badb6cb, + 0x3badb6cb, + 0x0bedb6cb, + 0x1bedb6cb, + 0x2bedb6cb, + 0x3bedb6cb, + 0x0b2eb6cb, + 0x1b2eb6cb, + 0x2b2eb6cb, + 0x3b2eb6cb, + 0x0b6eb6cb, + 0x1b6eb6cb, + 0x2b6eb6cb, + 0x3b6eb6cb, + 0x0baeb6cb, + 0x1baeb6cb, + 0x2baeb6cb, + 0x3baeb6cb, + 0x0beeb6cb, + 0x1beeb6cb, + 0x2beeb6cb, + 0x3beeb6cb, + 0x0b2fb6cb, + 0x1b2fb6cb, + 0x2b2fb6cb, + 0x3b2fb6cb, + 0x0b6fb6cb, + 0x1b6fb6cb, + 0x2b6fb6cb, + 0x3b6fb6cb, + 0x0bafb6cb, + 0x1bafb6cb, + 0x2bafb6cb, + 0x3bafb6cb, + 0x0befb6cb, + 0x1befb6cb, + 0x2befb6cb, + 0x3befb6cb, + 0x0b2cbacb, + 0x1b2cbacb, + 0x2b2cbacb, + 0x3b2cbacb, + 0x0b6cbacb, + 0x1b6cbacb, + 0x2b6cbacb, + 0x3b6cbacb, + 0x0bacbacb, + 0x1bacbacb, + 0x2bacbacb, + 0x3bacbacb, + 0x0becbacb, + 0x1becbacb, + 0x2becbacb, + 0x3becbacb, + 0x0b2dbacb, + 0x1b2dbacb, + 0x2b2dbacb, + 0x3b2dbacb, + 0x0b6dbacb, + 0x1b6dbacb, + 0x2b6dbacb, + 0x3b6dbacb, + 0x0badbacb, + 0x1badbacb, + 0x2badbacb, + 0x3badbacb, + 0x0bedbacb, + 0x1bedbacb, + 0x2bedbacb, + 0x3bedbacb, + 0x0b2ebacb, + 0x1b2ebacb, + 0x2b2ebacb, + 0x3b2ebacb, + 0x0b6ebacb, + 0x1b6ebacb, + 0x2b6ebacb, + 0x3b6ebacb, + 0x0baebacb, + 0x1baebacb, + 0x2baebacb, + 0x3baebacb, + 0x0beebacb, + 0x1beebacb, + 0x2beebacb, + 0x3beebacb, + 0x0b2fbacb, + 0x1b2fbacb, + 0x2b2fbacb, + 0x3b2fbacb, + 0x0b6fbacb, + 0x1b6fbacb, + 0x2b6fbacb, + 0x3b6fbacb, + 0x0bafbacb, + 0x1bafbacb, + 0x2bafbacb, + 0x3bafbacb, + 0x0befbacb, + 0x1befbacb, + 0x2befbacb, + 0x3befbacb, + 0x0b2cbecb, + 0x1b2cbecb, + 0x2b2cbecb, + 0x3b2cbecb, + 0x0b6cbecb, + 0x1b6cbecb, + 0x2b6cbecb, + 0x3b6cbecb, + 0x0bacbecb, + 0x1bacbecb, + 0x2bacbecb, + 0x3bacbecb, + 0x0becbecb, + 0x1becbecb, + 0x2becbecb, + 0x3becbecb, + 0x0b2dbecb, + 0x1b2dbecb, + 0x2b2dbecb, + 0x3b2dbecb, + 0x0b6dbecb, + 0x1b6dbecb, + 0x2b6dbecb, + 0x3b6dbecb, + 0x0badbecb, + 0x1badbecb, + 0x2badbecb, + 0x3badbecb, + 0x0bedbecb, + 0x1bedbecb, + 0x2bedbecb, + 0x3bedbecb, + 0x0b2ebecb, + 0x1b2ebecb, + 0x2b2ebecb, + 0x3b2ebecb, + 0x0b6ebecb, + 0x1b6ebecb, + 0x2b6ebecb, + 0x3b6ebecb, + 0x0baebecb, + 0x1baebecb, + 0x2baebecb, + 0x3baebecb, + 0x0beebecb, + 0x1beebecb, + 0x2beebecb, + 0x3beebecb, + 0x0b2fbecb, + 0x1b2fbecb, + 0x2b2fbecb, + 0x3b2fbecb, + 0x0b6fbecb, + 0x1b6fbecb, + 0x2b6fbecb, + 0x3b6fbecb, + 0x0bafbecb, + 0x1bafbecb, + 0x2bafbecb, + 0x3bafbecb, + 0x0befbecb, + 0x1befbecb, + 0x2befbecb, + 0x3befbecb, + 0x0b2cb2db, + 0x1b2cb2db, + 0x2b2cb2db, + 0x3b2cb2db, + 0x0b6cb2db, + 0x1b6cb2db, + 0x2b6cb2db, + 0x3b6cb2db, + 0x0bacb2db, + 0x1bacb2db, + 0x2bacb2db, + 0x3bacb2db, + 0x0becb2db, + 0x1becb2db, + 0x2becb2db, + 0x3becb2db, + 0x0b2db2db, + 0x1b2db2db, + 0x2b2db2db, + 0x3b2db2db, + 0x0b6db2db, + 0x1b6db2db, + 0x2b6db2db, + 0x3b6db2db, + 0x0badb2db, + 0x1badb2db, + 0x2badb2db, + 0x3badb2db, + 0x0bedb2db, + 0x1bedb2db, + 0x2bedb2db, + 0x3bedb2db, + 0x0b2eb2db, + 0x1b2eb2db, + 0x2b2eb2db, + 0x3b2eb2db, + 0x0b6eb2db, + 0x1b6eb2db, + 0x2b6eb2db, + 0x3b6eb2db, + 0x0baeb2db, + 0x1baeb2db, + 0x2baeb2db, + 0x3baeb2db, + 0x0beeb2db, + 0x1beeb2db, + 0x2beeb2db, + 0x3beeb2db, + 0x0b2fb2db, + 0x1b2fb2db, + 0x2b2fb2db, + 0x3b2fb2db, + 0x0b6fb2db, + 0x1b6fb2db, + 0x2b6fb2db, + 0x3b6fb2db, + 0x0bafb2db, + 0x1bafb2db, + 0x2bafb2db, + 0x3bafb2db, + 0x0befb2db, + 0x1befb2db, + 0x2befb2db, + 0x3befb2db, + 0x0b2cb6db, + 0x1b2cb6db, + 0x2b2cb6db, + 0x3b2cb6db, + 0x0b6cb6db, + 0x1b6cb6db, + 0x2b6cb6db, + 0x3b6cb6db, + 0x0bacb6db, + 0x1bacb6db, + 0x2bacb6db, + 0x3bacb6db, + 0x0becb6db, + 0x1becb6db, + 0x2becb6db, + 0x3becb6db, + 0x0b2db6db, + 0x1b2db6db, + 0x2b2db6db, + 0x3b2db6db, + 0x0b6db6db, + 0x1b6db6db, + 0x2b6db6db, + 0x3b6db6db, + 0x0badb6db, + 0x1badb6db, + 0x2badb6db, + 0x3badb6db, + 0x0bedb6db, + 0x1bedb6db, + 0x2bedb6db, + 0x3bedb6db, + 0x0b2eb6db, + 0x1b2eb6db, + 0x2b2eb6db, + 0x3b2eb6db, + 0x0b6eb6db, + 0x1b6eb6db, + 0x2b6eb6db, + 0x3b6eb6db, + 0x0baeb6db, + 0x1baeb6db, + 0x2baeb6db, + 0x3baeb6db, +} + +var kNonZeroRepsDepth = [numCommandSymbols]uint32{ + 6, + 6, + 6, + 6, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, +} + +var kStaticCommandCodeBits = [numCommandSymbols]uint16{ + 0, + 256, + 128, + 384, + 64, + 320, + 192, + 448, + 32, + 288, + 160, + 416, + 96, + 352, + 224, + 480, + 16, + 272, + 144, + 400, + 80, + 336, + 208, + 464, + 48, + 304, + 176, + 432, + 112, + 368, + 240, + 496, + 8, + 264, + 136, + 392, + 72, + 328, + 200, + 456, + 40, + 296, + 168, + 424, + 104, + 360, + 232, + 488, + 24, + 280, + 152, + 408, + 88, + 344, + 216, + 472, + 56, + 312, + 184, + 440, + 120, + 376, + 248, + 504, + 4, + 260, + 132, + 388, + 68, + 324, + 196, + 452, + 36, + 292, + 164, + 420, + 100, + 356, + 228, + 484, + 20, + 276, + 148, + 404, + 84, + 340, + 212, + 468, + 52, + 308, + 180, + 436, + 116, + 372, + 244, + 500, + 12, + 268, + 140, + 396, + 76, + 332, + 204, + 460, + 44, + 300, + 172, + 428, + 108, + 364, + 236, + 492, + 28, + 284, + 156, + 412, + 92, + 348, + 220, + 476, + 60, + 316, + 188, + 444, + 124, + 380, + 252, + 508, + 2, + 258, + 130, + 386, + 66, + 322, + 194, + 450, + 34, + 290, + 162, + 418, + 98, + 354, + 226, + 482, + 18, + 274, + 146, + 402, + 82, + 338, + 210, + 466, + 50, + 306, + 178, + 434, + 114, + 370, + 242, + 498, + 10, + 266, + 138, + 394, + 74, + 330, + 202, + 458, + 42, + 298, + 170, + 426, + 106, + 362, + 234, + 490, + 26, + 282, + 154, + 410, + 90, + 346, + 218, + 474, + 58, + 314, + 186, + 442, + 122, + 378, + 250, + 506, + 6, + 262, + 134, + 390, + 70, + 326, + 198, + 454, + 38, + 294, + 166, + 422, + 102, + 358, + 230, + 486, + 22, + 278, + 150, + 406, + 86, + 342, + 214, + 470, + 54, + 310, + 182, + 438, + 118, + 374, + 246, + 502, + 14, + 270, + 142, + 398, + 78, + 334, + 206, + 462, + 46, + 302, + 174, + 430, + 110, + 366, + 238, + 494, + 30, + 286, + 158, + 414, + 94, + 350, + 222, + 478, + 62, + 318, + 190, + 446, + 126, + 382, + 254, + 510, + 1, + 257, + 129, + 385, + 65, + 321, + 193, + 449, + 33, + 289, + 161, + 417, + 97, + 353, + 225, + 481, + 17, + 273, + 145, + 401, + 81, + 337, + 209, + 465, + 49, + 305, + 177, + 433, + 113, + 369, + 241, + 497, + 9, + 265, + 137, + 393, + 73, + 329, + 201, + 457, + 41, + 297, + 169, + 425, + 105, + 361, + 233, + 489, + 25, + 281, + 153, + 409, + 89, + 345, + 217, + 473, + 57, + 313, + 185, + 441, + 121, + 377, + 249, + 505, + 5, + 261, + 133, + 389, + 69, + 325, + 197, + 453, + 37, + 293, + 165, + 421, + 101, + 357, + 229, + 485, + 21, + 277, + 149, + 405, + 85, + 341, + 213, + 469, + 53, + 309, + 181, + 437, + 117, + 373, + 245, + 501, + 13, + 269, + 141, + 397, + 77, + 333, + 205, + 461, + 45, + 301, + 173, + 429, + 109, + 365, + 237, + 493, + 29, + 285, + 157, + 413, + 93, + 349, + 221, + 477, + 61, + 317, + 189, + 445, + 125, + 381, + 253, + 509, + 3, + 259, + 131, + 387, + 67, + 323, + 195, + 451, + 35, + 291, + 163, + 419, + 99, + 355, + 227, + 483, + 19, + 275, + 147, + 403, + 83, + 339, + 211, + 467, + 51, + 307, + 179, + 435, + 115, + 371, + 243, + 499, + 11, + 267, + 139, + 395, + 75, + 331, + 203, + 459, + 43, + 299, + 171, + 427, + 107, + 363, + 235, + 491, + 27, + 283, + 155, + 411, + 91, + 347, + 219, + 475, + 59, + 315, + 187, + 443, + 123, + 379, + 251, + 507, + 7, + 1031, + 519, + 1543, + 263, + 1287, + 775, + 1799, + 135, + 1159, + 647, + 1671, + 391, + 1415, + 903, + 1927, + 71, + 1095, + 583, + 1607, + 327, + 1351, + 839, + 1863, + 199, + 1223, + 711, + 1735, + 455, + 1479, + 967, + 1991, + 39, + 1063, + 551, + 1575, + 295, + 1319, + 807, + 1831, + 167, + 1191, + 679, + 1703, + 423, + 1447, + 935, + 1959, + 103, + 1127, + 615, + 1639, + 359, + 1383, + 871, + 1895, + 231, + 1255, + 743, + 1767, + 487, + 1511, + 999, + 2023, + 23, + 1047, + 535, + 1559, + 279, + 1303, + 791, + 1815, + 151, + 1175, + 663, + 1687, + 407, + 1431, + 919, + 1943, + 87, + 1111, + 599, + 1623, + 343, + 1367, + 855, + 1879, + 215, + 1239, + 727, + 1751, + 471, + 1495, + 983, + 2007, + 55, + 1079, + 567, + 1591, + 311, + 1335, + 823, + 1847, + 183, + 1207, + 695, + 1719, + 439, + 1463, + 951, + 1975, + 119, + 1143, + 631, + 1655, + 375, + 1399, + 887, + 1911, + 247, + 1271, + 759, + 1783, + 503, + 1527, + 1015, + 2039, + 15, + 1039, + 527, + 1551, + 271, + 1295, + 783, + 1807, + 143, + 1167, + 655, + 1679, + 399, + 1423, + 911, + 1935, + 79, + 1103, + 591, + 1615, + 335, + 1359, + 847, + 1871, + 207, + 1231, + 719, + 1743, + 463, + 1487, + 975, + 1999, + 47, + 1071, + 559, + 1583, + 303, + 1327, + 815, + 1839, + 175, + 1199, + 687, + 1711, + 431, + 1455, + 943, + 1967, + 111, + 1135, + 623, + 1647, + 367, + 1391, + 879, + 1903, + 239, + 1263, + 751, + 1775, + 495, + 1519, + 1007, + 2031, + 31, + 1055, + 543, + 1567, + 287, + 1311, + 799, + 1823, + 159, + 1183, + 671, + 1695, + 415, + 1439, + 927, + 1951, + 95, + 1119, + 607, + 1631, + 351, + 1375, + 863, + 1887, + 223, + 1247, + 735, + 1759, + 479, + 1503, + 991, + 2015, + 63, + 1087, + 575, + 1599, + 319, + 1343, + 831, + 1855, + 191, + 1215, + 703, + 1727, + 447, + 1471, + 959, + 1983, + 127, + 1151, + 639, + 1663, + 383, + 1407, + 895, + 1919, + 255, + 1279, + 767, + 1791, + 511, + 1535, + 1023, + 2047, +} + +func storeStaticCommandHuffmanTree(storage_ix *uint, storage []byte) { + writeBits(56, 0x92624416307003, storage_ix, storage) + writeBits(3, 0x00000000, storage_ix, storage) +} + +var kStaticDistanceCodeBits = [64]uint16{ + 0, + 32, + 16, + 48, + 8, + 40, + 24, + 56, + 4, + 36, + 20, + 52, + 12, + 44, + 28, + 60, + 2, + 34, + 18, + 50, + 10, + 42, + 26, + 58, + 6, + 38, + 22, + 54, + 14, + 46, + 30, + 62, + 1, + 33, + 17, + 49, + 9, + 41, + 25, + 57, + 5, + 37, + 21, + 53, + 13, + 45, + 29, + 61, + 3, + 35, + 19, + 51, + 11, + 43, + 27, + 59, + 7, + 39, + 23, + 55, + 15, + 47, + 31, + 63, +} + +func storeStaticDistanceHuffmanTree(storage_ix *uint, storage []byte) { + writeBits(28, 0x0369DC03, storage_ix, storage) +} diff --git a/vendor/github.com/andybalholm/brotli/fast_log.go b/vendor/github.com/andybalholm/brotli/fast_log.go new file mode 100644 index 0000000..9d6607f --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/fast_log.go @@ -0,0 +1,290 @@ +package brotli + +import ( + "math" + "math/bits" +) + +/* Copyright 2013 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Utilities for fast computation of logarithms. */ + +func log2FloorNonZero(n uint) uint32 { + return uint32(bits.Len(n)) - 1 +} + +/* A lookup table for small values of log2(int) to be used in entropy + computation. + + ", ".join(["%.16ff" % x for x in [0.0]+[log2(x) for x in range(1, 256)]]) */ +var kLog2Table = []float32{ + 0.0000000000000000, + 0.0000000000000000, + 1.0000000000000000, + 1.5849625007211563, + 2.0000000000000000, + 2.3219280948873622, + 2.5849625007211561, + 2.8073549220576042, + 3.0000000000000000, + 3.1699250014423126, + 3.3219280948873626, + 3.4594316186372978, + 3.5849625007211565, + 3.7004397181410922, + 3.8073549220576037, + 3.9068905956085187, + 4.0000000000000000, + 4.0874628412503400, + 4.1699250014423122, + 4.2479275134435852, + 4.3219280948873626, + 4.3923174227787607, + 4.4594316186372973, + 4.5235619560570131, + 4.5849625007211570, + 4.6438561897747244, + 4.7004397181410926, + 4.7548875021634691, + 4.8073549220576037, + 4.8579809951275728, + 4.9068905956085187, + 4.9541963103868758, + 5.0000000000000000, + 5.0443941193584534, + 5.0874628412503400, + 5.1292830169449664, + 5.1699250014423122, + 5.2094533656289501, + 5.2479275134435852, + 5.2854022188622487, + 5.3219280948873626, + 5.3575520046180838, + 5.3923174227787607, + 5.4262647547020979, + 5.4594316186372973, + 5.4918530963296748, + 5.5235619560570131, + 5.5545888516776376, + 5.5849625007211570, + 5.6147098441152083, + 5.6438561897747244, + 5.6724253419714961, + 5.7004397181410926, + 5.7279204545631996, + 5.7548875021634691, + 5.7813597135246599, + 5.8073549220576046, + 5.8328900141647422, + 5.8579809951275719, + 5.8826430493618416, + 5.9068905956085187, + 5.9307373375628867, + 5.9541963103868758, + 5.9772799234999168, + 6.0000000000000000, + 6.0223678130284544, + 6.0443941193584534, + 6.0660891904577721, + 6.0874628412503400, + 6.1085244567781700, + 6.1292830169449672, + 6.1497471195046822, + 6.1699250014423122, + 6.1898245588800176, + 6.2094533656289510, + 6.2288186904958804, + 6.2479275134435861, + 6.2667865406949019, + 6.2854022188622487, + 6.3037807481771031, + 6.3219280948873617, + 6.3398500028846252, + 6.3575520046180847, + 6.3750394313469254, + 6.3923174227787598, + 6.4093909361377026, + 6.4262647547020979, + 6.4429434958487288, + 6.4594316186372982, + 6.4757334309663976, + 6.4918530963296748, + 6.5077946401986964, + 6.5235619560570131, + 6.5391588111080319, + 6.5545888516776376, + 6.5698556083309478, + 6.5849625007211561, + 6.5999128421871278, + 6.6147098441152092, + 6.6293566200796095, + 6.6438561897747253, + 6.6582114827517955, + 6.6724253419714952, + 6.6865005271832185, + 6.7004397181410917, + 6.7142455176661224, + 6.7279204545631988, + 6.7414669864011465, + 6.7548875021634691, + 6.7681843247769260, + 6.7813597135246599, + 6.7944158663501062, + 6.8073549220576037, + 6.8201789624151887, + 6.8328900141647422, + 6.8454900509443757, + 6.8579809951275719, + 6.8703647195834048, + 6.8826430493618416, + 6.8948177633079437, + 6.9068905956085187, + 6.9188632372745955, + 6.9307373375628867, + 6.9425145053392399, + 6.9541963103868758, + 6.9657842846620879, + 6.9772799234999168, + 6.9886846867721664, + 7.0000000000000000, + 7.0112272554232540, + 7.0223678130284544, + 7.0334230015374501, + 7.0443941193584534, + 7.0552824355011898, + 7.0660891904577721, + 7.0768155970508317, + 7.0874628412503400, + 7.0980320829605272, + 7.1085244567781700, + 7.1189410727235076, + 7.1292830169449664, + 7.1395513523987937, + 7.1497471195046822, + 7.1598713367783891, + 7.1699250014423130, + 7.1799090900149345, + 7.1898245588800176, + 7.1996723448363644, + 7.2094533656289492, + 7.2191685204621621, + 7.2288186904958804, + 7.2384047393250794, + 7.2479275134435861, + 7.2573878426926521, + 7.2667865406949019, + 7.2761244052742384, + 7.2854022188622487, + 7.2946207488916270, + 7.3037807481771031, + 7.3128829552843557, + 7.3219280948873617, + 7.3309168781146177, + 7.3398500028846243, + 7.3487281542310781, + 7.3575520046180847, + 7.3663222142458151, + 7.3750394313469254, + 7.3837042924740528, + 7.3923174227787607, + 7.4008794362821844, + 7.4093909361377026, + 7.4178525148858991, + 7.4262647547020979, + 7.4346282276367255, + 7.4429434958487288, + 7.4512111118323299, + 7.4594316186372973, + 7.4676055500829976, + 7.4757334309663976, + 7.4838157772642564, + 7.4918530963296748, + 7.4998458870832057, + 7.5077946401986964, + 7.5156998382840436, + 7.5235619560570131, + 7.5313814605163119, + 7.5391588111080319, + 7.5468944598876373, + 7.5545888516776376, + 7.5622424242210728, + 7.5698556083309478, + 7.5774288280357487, + 7.5849625007211561, + 7.5924570372680806, + 7.5999128421871278, + 7.6073303137496113, + 7.6147098441152075, + 7.6220518194563764, + 7.6293566200796095, + 7.6366246205436488, + 7.6438561897747244, + 7.6510516911789290, + 7.6582114827517955, + 7.6653359171851765, + 7.6724253419714952, + 7.6794800995054464, + 7.6865005271832185, + 7.6934869574993252, + 7.7004397181410926, + 7.7073591320808825, + 7.7142455176661224, + 7.7210991887071856, + 7.7279204545631996, + 7.7347096202258392, + 7.7414669864011465, + 7.7481928495894596, + 7.7548875021634691, + 7.7615512324444795, + 7.7681843247769260, + 7.7747870596011737, + 7.7813597135246608, + 7.7879025593914317, + 7.7944158663501062, + 7.8008998999203047, + 7.8073549220576037, + 7.8137811912170374, + 7.8201789624151887, + 7.8265484872909159, + 7.8328900141647422, + 7.8392037880969445, + 7.8454900509443757, + 7.8517490414160571, + 7.8579809951275719, + 7.8641861446542798, + 7.8703647195834048, + 7.8765169465650002, + 7.8826430493618425, + 7.8887432488982601, + 7.8948177633079446, + 7.9008668079807496, + 7.9068905956085187, + 7.9128893362299619, + 7.9188632372745955, + 7.9248125036057813, + 7.9307373375628867, + 7.9366379390025719, + 7.9425145053392399, + 7.9483672315846778, + 7.9541963103868758, + 7.9600019320680806, + 7.9657842846620870, + 7.9715435539507720, + 7.9772799234999168, + 7.9829935746943104, + 7.9886846867721664, + 7.9943534368588578, +} + +/* Faster logarithm for small integers, with the property of log2(0) == 0. */ +func fastLog2(v uint) float64 { + if v < uint(len(kLog2Table)) { + return float64(kLog2Table[v]) + } + + return math.Log2(float64(v)) +} diff --git a/vendor/github.com/andybalholm/brotli/find_match_length.go b/vendor/github.com/andybalholm/brotli/find_match_length.go new file mode 100644 index 0000000..09d2ae6 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/find_match_length.go @@ -0,0 +1,45 @@ +package brotli + +import ( + "encoding/binary" + "math/bits" + "runtime" +) + +/* Copyright 2010 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Function to find maximal matching prefixes of strings. */ +func findMatchLengthWithLimit(s1 []byte, s2 []byte, limit uint) uint { + var matched uint = 0 + _, _ = s1[limit-1], s2[limit-1] // bounds check + switch runtime.GOARCH { + case "amd64": + // Compare 8 bytes at at time. + for matched+8 <= limit { + w1 := binary.LittleEndian.Uint64(s1[matched:]) + w2 := binary.LittleEndian.Uint64(s2[matched:]) + if w1 != w2 { + return matched + uint(bits.TrailingZeros64(w1^w2)>>3) + } + matched += 8 + } + case "386": + // Compare 4 bytes at at time. + for matched+4 <= limit { + w1 := binary.LittleEndian.Uint32(s1[matched:]) + w2 := binary.LittleEndian.Uint32(s2[matched:]) + if w1 != w2 { + return matched + uint(bits.TrailingZeros32(w1^w2)>>3) + } + matched += 4 + } + } + for matched < limit && s1[matched] == s2[matched] { + matched++ + } + return matched +} diff --git a/vendor/github.com/andybalholm/brotli/h10.go b/vendor/github.com/andybalholm/brotli/h10.go new file mode 100644 index 0000000..5662fbb --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/h10.go @@ -0,0 +1,287 @@ +package brotli + +import "encoding/binary" + +/* Copyright 2016 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +func (*h10) HashTypeLength() uint { + return 4 +} + +func (*h10) StoreLookahead() uint { + return 128 +} + +func hashBytesH10(data []byte) uint32 { + var h uint32 = binary.LittleEndian.Uint32(data) * kHashMul32 + + /* The higher bits contain more mixture from the multiplication, + so we take our results from there. */ + return h >> (32 - 17) +} + +/* A (forgetful) hash table where each hash bucket contains a binary tree of + sequences whose first 4 bytes share the same hash code. + Each sequence is 128 long and is identified by its starting + position in the input data. The binary tree is sorted by the lexicographic + order of the sequences, and it is also a max-heap with respect to the + starting positions. */ +type h10 struct { + hasherCommon + window_mask_ uint + buckets_ [1 << 17]uint32 + invalid_pos_ uint32 + forest []uint32 +} + +func (h *h10) Initialize(params *encoderParams) { + h.window_mask_ = (1 << params.lgwin) - 1 + h.invalid_pos_ = uint32(0 - h.window_mask_) + var num_nodes uint = uint(1) << params.lgwin + h.forest = make([]uint32, 2*num_nodes) +} + +func (h *h10) Prepare(one_shot bool, input_size uint, data []byte) { + var invalid_pos uint32 = h.invalid_pos_ + var i uint32 + for i = 0; i < 1<<17; i++ { + h.buckets_[i] = invalid_pos + } +} + +func leftChildIndexH10(self *h10, pos uint) uint { + return 2 * (pos & self.window_mask_) +} + +func rightChildIndexH10(self *h10, pos uint) uint { + return 2*(pos&self.window_mask_) + 1 +} + +/* Stores the hash of the next 4 bytes and in a single tree-traversal, the + hash bucket's binary tree is searched for matches and is re-rooted at the + current position. + + If less than 128 data is available, the hash bucket of the + current position is searched for matches, but the state of the hash table + is not changed, since we can not know the final sorting order of the + current (incomplete) sequence. + + This function must be called with increasing cur_ix positions. */ +func storeAndFindMatchesH10(self *h10, data []byte, cur_ix uint, ring_buffer_mask uint, max_length uint, max_backward uint, best_len *uint, matches []backwardMatch) []backwardMatch { + var cur_ix_masked uint = cur_ix & ring_buffer_mask + var max_comp_len uint = brotli_min_size_t(max_length, 128) + var should_reroot_tree bool = (max_length >= 128) + var key uint32 = hashBytesH10(data[cur_ix_masked:]) + var forest []uint32 = self.forest + var prev_ix uint = uint(self.buckets_[key]) + var node_left uint = leftChildIndexH10(self, cur_ix) + var node_right uint = rightChildIndexH10(self, cur_ix) + var best_len_left uint = 0 + var best_len_right uint = 0 + var depth_remaining uint + /* The forest index of the rightmost node of the left subtree of the new + root, updated as we traverse and re-root the tree of the hash bucket. */ + + /* The forest index of the leftmost node of the right subtree of the new + root, updated as we traverse and re-root the tree of the hash bucket. */ + + /* The match length of the rightmost node of the left subtree of the new + root, updated as we traverse and re-root the tree of the hash bucket. */ + + /* The match length of the leftmost node of the right subtree of the new + root, updated as we traverse and re-root the tree of the hash bucket. */ + if should_reroot_tree { + self.buckets_[key] = uint32(cur_ix) + } + + for depth_remaining = 64; ; depth_remaining-- { + var backward uint = cur_ix - prev_ix + var prev_ix_masked uint = prev_ix & ring_buffer_mask + if backward == 0 || backward > max_backward || depth_remaining == 0 { + if should_reroot_tree { + forest[node_left] = self.invalid_pos_ + forest[node_right] = self.invalid_pos_ + } + + break + } + { + var cur_len uint = brotli_min_size_t(best_len_left, best_len_right) + var len uint + assert(cur_len <= 128) + len = cur_len + findMatchLengthWithLimit(data[cur_ix_masked+cur_len:], data[prev_ix_masked+cur_len:], max_length-cur_len) + if matches != nil && len > *best_len { + *best_len = uint(len) + initBackwardMatch(&matches[0], backward, uint(len)) + matches = matches[1:] + } + + if len >= max_comp_len { + if should_reroot_tree { + forest[node_left] = forest[leftChildIndexH10(self, prev_ix)] + forest[node_right] = forest[rightChildIndexH10(self, prev_ix)] + } + + break + } + + if data[cur_ix_masked+len] > data[prev_ix_masked+len] { + best_len_left = uint(len) + if should_reroot_tree { + forest[node_left] = uint32(prev_ix) + } + + node_left = rightChildIndexH10(self, prev_ix) + prev_ix = uint(forest[node_left]) + } else { + best_len_right = uint(len) + if should_reroot_tree { + forest[node_right] = uint32(prev_ix) + } + + node_right = leftChildIndexH10(self, prev_ix) + prev_ix = uint(forest[node_right]) + } + } + } + + return matches +} + +/* Finds all backward matches of &data[cur_ix & ring_buffer_mask] up to the + length of max_length and stores the position cur_ix in the hash table. + + Sets *num_matches to the number of matches found, and stores the found + matches in matches[0] to matches[*num_matches - 1]. The matches will be + sorted by strictly increasing length and (non-strictly) increasing + distance. */ +func findAllMatchesH10(handle *h10, dictionary *encoderDictionary, data []byte, ring_buffer_mask uint, cur_ix uint, max_length uint, max_backward uint, gap uint, params *encoderParams, matches []backwardMatch) uint { + var orig_matches []backwardMatch = matches + var cur_ix_masked uint = cur_ix & ring_buffer_mask + var best_len uint = 1 + var short_match_max_backward uint + if params.quality != hqZopflificationQuality { + short_match_max_backward = 16 + } else { + short_match_max_backward = 64 + } + var stop uint = cur_ix - short_match_max_backward + var dict_matches [maxStaticDictionaryMatchLen + 1]uint32 + var i uint + if cur_ix < short_match_max_backward { + stop = 0 + } + for i = cur_ix - 1; i > stop && best_len <= 2; i-- { + var prev_ix uint = i + var backward uint = cur_ix - prev_ix + if backward > max_backward { + break + } + + prev_ix &= ring_buffer_mask + if data[cur_ix_masked] != data[prev_ix] || data[cur_ix_masked+1] != data[prev_ix+1] { + continue + } + { + var len uint = findMatchLengthWithLimit(data[prev_ix:], data[cur_ix_masked:], max_length) + if len > best_len { + best_len = uint(len) + initBackwardMatch(&matches[0], backward, uint(len)) + matches = matches[1:] + } + } + } + + if best_len < max_length { + matches = storeAndFindMatchesH10(handle, data, cur_ix, ring_buffer_mask, max_length, max_backward, &best_len, matches) + } + + for i = 0; i <= maxStaticDictionaryMatchLen; i++ { + dict_matches[i] = kInvalidMatch + } + { + var minlen uint = brotli_max_size_t(4, best_len+1) + if findAllStaticDictionaryMatches(dictionary, data[cur_ix_masked:], minlen, max_length, dict_matches[0:]) { + var maxlen uint = brotli_min_size_t(maxStaticDictionaryMatchLen, max_length) + var l uint + for l = minlen; l <= maxlen; l++ { + var dict_id uint32 = dict_matches[l] + if dict_id < kInvalidMatch { + var distance uint = max_backward + gap + uint(dict_id>>5) + 1 + if distance <= params.dist.max_distance { + initDictionaryBackwardMatch(&matches[0], distance, l, uint(dict_id&31)) + matches = matches[1:] + } + } + } + } + } + + return uint(-cap(matches) + cap(orig_matches)) +} + +/* Stores the hash of the next 4 bytes and re-roots the binary tree at the + current sequence, without returning any matches. + REQUIRES: ix + 128 <= end-of-current-block */ +func (h *h10) Store(data []byte, mask uint, ix uint) { + var max_backward uint = h.window_mask_ - windowGap + 1 + /* Maximum distance is window size - 16, see section 9.1. of the spec. */ + storeAndFindMatchesH10(h, data, ix, mask, 128, max_backward, nil, nil) +} + +func (h *h10) StoreRange(data []byte, mask uint, ix_start uint, ix_end uint) { + var i uint = ix_start + var j uint = ix_start + if ix_start+63 <= ix_end { + i = ix_end - 63 + } + + if ix_start+512 <= i { + for ; j < i; j += 8 { + h.Store(data, mask, j) + } + } + + for ; i < ix_end; i++ { + h.Store(data, mask, i) + } +} + +func (h *h10) StitchToPreviousBlock(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint) { + if num_bytes >= h.HashTypeLength()-1 && position >= 128 { + var i_start uint = position - 128 + 1 + var i_end uint = brotli_min_size_t(position, i_start+num_bytes) + /* Store the last `128 - 1` positions in the hasher. + These could not be calculated before, since they require knowledge + of both the previous and the current block. */ + + var i uint + for i = i_start; i < i_end; i++ { + /* Maximum distance is window size - 16, see section 9.1. of the spec. + Furthermore, we have to make sure that we don't look further back + from the start of the next block than the window size, otherwise we + could access already overwritten areas of the ring-buffer. */ + var max_backward uint = h.window_mask_ - brotli_max_size_t(windowGap-1, position-i) + + /* We know that i + 128 <= position + num_bytes, i.e. the + end of the current block and that we have at least + 128 tail in the ring-buffer. */ + storeAndFindMatchesH10(h, ringbuffer, i, ringbuffer_mask, 128, max_backward, nil, nil) + } + } +} + +/* MAX_NUM_MATCHES == 64 + MAX_TREE_SEARCH_DEPTH */ +const maxNumMatchesH10 = 128 + +func (*h10) FindLongestMatch(dictionary *encoderDictionary, data []byte, ring_buffer_mask uint, distance_cache []int, cur_ix uint, max_length uint, max_backward uint, gap uint, max_distance uint, out *hasherSearchResult) { + panic("unimplemented") +} + +func (*h10) PrepareDistanceCache(distance_cache []int) { + panic("unimplemented") +} diff --git a/vendor/github.com/andybalholm/brotli/h5.go b/vendor/github.com/andybalholm/brotli/h5.go new file mode 100644 index 0000000..f391b73 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/h5.go @@ -0,0 +1,214 @@ +package brotli + +import "encoding/binary" + +/* Copyright 2010 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* A (forgetful) hash table to the data seen by the compressor, to + help create backward references to previous data. + + This is a hash map of fixed size (bucket_size_) to a ring buffer of + fixed size (block_size_). The ring buffer contains the last block_size_ + index positions of the given hash key in the compressed data. */ +func (*h5) HashTypeLength() uint { + return 4 +} + +func (*h5) StoreLookahead() uint { + return 4 +} + +/* HashBytes is the function that chooses the bucket to place the address in. */ +func hashBytesH5(data []byte, shift int) uint32 { + var h uint32 = binary.LittleEndian.Uint32(data) * kHashMul32 + + /* The higher bits contain more mixture from the multiplication, + so we take our results from there. */ + return uint32(h >> uint(shift)) +} + +type h5 struct { + hasherCommon + bucket_size_ uint + block_size_ uint + hash_shift_ int + block_mask_ uint32 + num []uint16 + buckets []uint32 +} + +func (h *h5) Initialize(params *encoderParams) { + h.hash_shift_ = 32 - h.params.bucket_bits + h.bucket_size_ = uint(1) << uint(h.params.bucket_bits) + h.block_size_ = uint(1) << uint(h.params.block_bits) + h.block_mask_ = uint32(h.block_size_ - 1) + h.num = make([]uint16, h.bucket_size_) + h.buckets = make([]uint32, h.block_size_*h.bucket_size_) +} + +func (h *h5) Prepare(one_shot bool, input_size uint, data []byte) { + var num []uint16 = h.num + var partial_prepare_threshold uint = h.bucket_size_ >> 6 + /* Partial preparation is 100 times slower (per socket). */ + if one_shot && input_size <= partial_prepare_threshold { + var i uint + for i = 0; i < input_size; i++ { + var key uint32 = hashBytesH5(data[i:], h.hash_shift_) + num[key] = 0 + } + } else { + for i := 0; i < int(h.bucket_size_); i++ { + num[i] = 0 + } + } +} + +/* Look at 4 bytes at &data[ix & mask]. + Compute a hash from these, and store the value of ix at that position. */ +func (h *h5) Store(data []byte, mask uint, ix uint) { + var num []uint16 = h.num + var key uint32 = hashBytesH5(data[ix&mask:], h.hash_shift_) + var minor_ix uint = uint(num[key]) & uint(h.block_mask_) + var offset uint = minor_ix + uint(key<= h.HashTypeLength()-1 && position >= 3 { + /* Prepare the hashes for three last bytes of the last write. + These could not be calculated before, since they require knowledge + of both the previous and the current block. */ + h.Store(ringbuffer, ringbuffer_mask, position-3) + h.Store(ringbuffer, ringbuffer_mask, position-2) + h.Store(ringbuffer, ringbuffer_mask, position-1) + } +} + +func (h *h5) PrepareDistanceCache(distance_cache []int) { + prepareDistanceCache(distance_cache, h.params.num_last_distances_to_check) +} + +/* Find a longest backward match of &data[cur_ix] up to the length of + max_length and stores the position cur_ix in the hash table. + + REQUIRES: PrepareDistanceCacheH5 must be invoked for current distance cache + values; if this method is invoked repeatedly with the same distance + cache values, it is enough to invoke PrepareDistanceCacheH5 once. + + Does not look for matches longer than max_length. + Does not look for matches further away than max_backward. + Writes the best match into |out|. + |out|->score is updated only if a better match is found. */ +func (h *h5) FindLongestMatch(dictionary *encoderDictionary, data []byte, ring_buffer_mask uint, distance_cache []int, cur_ix uint, max_length uint, max_backward uint, gap uint, max_distance uint, out *hasherSearchResult) { + var num []uint16 = h.num + var buckets []uint32 = h.buckets + var cur_ix_masked uint = cur_ix & ring_buffer_mask + var min_score uint = out.score + var best_score uint = out.score + var best_len uint = out.len + var i uint + var bucket []uint32 + /* Don't accept a short copy from far away. */ + out.len = 0 + + out.len_code_delta = 0 + + /* Try last distance first. */ + for i = 0; i < uint(h.params.num_last_distances_to_check); i++ { + var backward uint = uint(distance_cache[i]) + var prev_ix uint = uint(cur_ix - backward) + if prev_ix >= cur_ix { + continue + } + + if backward > max_backward { + continue + } + + prev_ix &= ring_buffer_mask + + if cur_ix_masked+best_len > ring_buffer_mask || prev_ix+best_len > ring_buffer_mask || data[cur_ix_masked+best_len] != data[prev_ix+best_len] { + continue + } + { + var len uint = findMatchLengthWithLimit(data[prev_ix:], data[cur_ix_masked:], max_length) + if len >= 3 || (len == 2 && i < 2) { + /* Comparing for >= 2 does not change the semantics, but just saves for + a few unnecessary binary logarithms in backward reference score, + since we are not interested in such short matches. */ + var score uint = backwardReferenceScoreUsingLastDistance(uint(len)) + if best_score < score { + if i != 0 { + score -= backwardReferencePenaltyUsingLastDistance(i) + } + if best_score < score { + best_score = score + best_len = uint(len) + out.len = best_len + out.distance = backward + out.score = best_score + } + } + } + } + } + { + var key uint32 = hashBytesH5(data[cur_ix_masked:], h.hash_shift_) + bucket = buckets[key< h.block_size_ { + down = uint(num[key]) - h.block_size_ + } else { + down = 0 + } + for i = uint(num[key]); i > down; { + var prev_ix uint + i-- + prev_ix = uint(bucket[uint32(i)&h.block_mask_]) + var backward uint = cur_ix - prev_ix + if backward > max_backward { + break + } + + prev_ix &= ring_buffer_mask + if cur_ix_masked+best_len > ring_buffer_mask || prev_ix+best_len > ring_buffer_mask || data[cur_ix_masked+best_len] != data[prev_ix+best_len] { + continue + } + { + var len uint = findMatchLengthWithLimit(data[prev_ix:], data[cur_ix_masked:], max_length) + if len >= 4 { + /* Comparing for >= 3 does not change the semantics, but just saves + for a few unnecessary binary logarithms in backward reference + score, since we are not interested in such short matches. */ + var score uint = backwardReferenceScore(uint(len), backward) + if best_score < score { + best_score = score + best_len = uint(len) + out.len = best_len + out.distance = backward + out.score = best_score + } + } + } + } + + bucket[uint32(num[key])&h.block_mask_] = uint32(cur_ix) + num[key]++ + } + + if min_score == out.score { + searchInStaticDictionary(dictionary, h, data[cur_ix_masked:], max_length, max_backward+gap, max_distance, out, false) + } +} diff --git a/vendor/github.com/andybalholm/brotli/h6.go b/vendor/github.com/andybalholm/brotli/h6.go new file mode 100644 index 0000000..80bb224 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/h6.go @@ -0,0 +1,216 @@ +package brotli + +import "encoding/binary" + +/* Copyright 2010 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* A (forgetful) hash table to the data seen by the compressor, to + help create backward references to previous data. + + This is a hash map of fixed size (bucket_size_) to a ring buffer of + fixed size (block_size_). The ring buffer contains the last block_size_ + index positions of the given hash key in the compressed data. */ +func (*h6) HashTypeLength() uint { + return 8 +} + +func (*h6) StoreLookahead() uint { + return 8 +} + +/* HashBytes is the function that chooses the bucket to place the address in. */ +func hashBytesH6(data []byte, mask uint64, shift int) uint32 { + var h uint64 = (binary.LittleEndian.Uint64(data) & mask) * kHashMul64Long + + /* The higher bits contain more mixture from the multiplication, + so we take our results from there. */ + return uint32(h >> uint(shift)) +} + +type h6 struct { + hasherCommon + bucket_size_ uint + block_size_ uint + hash_shift_ int + hash_mask_ uint64 + block_mask_ uint32 + num []uint16 + buckets []uint32 +} + +func (h *h6) Initialize(params *encoderParams) { + h.hash_shift_ = 64 - h.params.bucket_bits + h.hash_mask_ = (^(uint64(0))) >> uint(64-8*h.params.hash_len) + h.bucket_size_ = uint(1) << uint(h.params.bucket_bits) + h.block_size_ = uint(1) << uint(h.params.block_bits) + h.block_mask_ = uint32(h.block_size_ - 1) + h.num = make([]uint16, h.bucket_size_) + h.buckets = make([]uint32, h.block_size_*h.bucket_size_) +} + +func (h *h6) Prepare(one_shot bool, input_size uint, data []byte) { + var num []uint16 = h.num + var partial_prepare_threshold uint = h.bucket_size_ >> 6 + /* Partial preparation is 100 times slower (per socket). */ + if one_shot && input_size <= partial_prepare_threshold { + var i uint + for i = 0; i < input_size; i++ { + var key uint32 = hashBytesH6(data[i:], h.hash_mask_, h.hash_shift_) + num[key] = 0 + } + } else { + for i := 0; i < int(h.bucket_size_); i++ { + num[i] = 0 + } + } +} + +/* Look at 4 bytes at &data[ix & mask]. + Compute a hash from these, and store the value of ix at that position. */ +func (h *h6) Store(data []byte, mask uint, ix uint) { + var num []uint16 = h.num + var key uint32 = hashBytesH6(data[ix&mask:], h.hash_mask_, h.hash_shift_) + var minor_ix uint = uint(num[key]) & uint(h.block_mask_) + var offset uint = minor_ix + uint(key<= h.HashTypeLength()-1 && position >= 3 { + /* Prepare the hashes for three last bytes of the last write. + These could not be calculated before, since they require knowledge + of both the previous and the current block. */ + h.Store(ringbuffer, ringbuffer_mask, position-3) + h.Store(ringbuffer, ringbuffer_mask, position-2) + h.Store(ringbuffer, ringbuffer_mask, position-1) + } +} + +func (h *h6) PrepareDistanceCache(distance_cache []int) { + prepareDistanceCache(distance_cache, h.params.num_last_distances_to_check) +} + +/* Find a longest backward match of &data[cur_ix] up to the length of + max_length and stores the position cur_ix in the hash table. + + REQUIRES: PrepareDistanceCacheH6 must be invoked for current distance cache + values; if this method is invoked repeatedly with the same distance + cache values, it is enough to invoke PrepareDistanceCacheH6 once. + + Does not look for matches longer than max_length. + Does not look for matches further away than max_backward. + Writes the best match into |out|. + |out|->score is updated only if a better match is found. */ +func (h *h6) FindLongestMatch(dictionary *encoderDictionary, data []byte, ring_buffer_mask uint, distance_cache []int, cur_ix uint, max_length uint, max_backward uint, gap uint, max_distance uint, out *hasherSearchResult) { + var num []uint16 = h.num + var buckets []uint32 = h.buckets + var cur_ix_masked uint = cur_ix & ring_buffer_mask + var min_score uint = out.score + var best_score uint = out.score + var best_len uint = out.len + var i uint + var bucket []uint32 + /* Don't accept a short copy from far away. */ + out.len = 0 + + out.len_code_delta = 0 + + /* Try last distance first. */ + for i = 0; i < uint(h.params.num_last_distances_to_check); i++ { + var backward uint = uint(distance_cache[i]) + var prev_ix uint = uint(cur_ix - backward) + if prev_ix >= cur_ix { + continue + } + + if backward > max_backward { + continue + } + + prev_ix &= ring_buffer_mask + + if cur_ix_masked+best_len > ring_buffer_mask || prev_ix+best_len > ring_buffer_mask || data[cur_ix_masked+best_len] != data[prev_ix+best_len] { + continue + } + { + var len uint = findMatchLengthWithLimit(data[prev_ix:], data[cur_ix_masked:], max_length) + if len >= 3 || (len == 2 && i < 2) { + /* Comparing for >= 2 does not change the semantics, but just saves for + a few unnecessary binary logarithms in backward reference score, + since we are not interested in such short matches. */ + var score uint = backwardReferenceScoreUsingLastDistance(uint(len)) + if best_score < score { + if i != 0 { + score -= backwardReferencePenaltyUsingLastDistance(i) + } + if best_score < score { + best_score = score + best_len = uint(len) + out.len = best_len + out.distance = backward + out.score = best_score + } + } + } + } + } + { + var key uint32 = hashBytesH6(data[cur_ix_masked:], h.hash_mask_, h.hash_shift_) + bucket = buckets[key< h.block_size_ { + down = uint(num[key]) - h.block_size_ + } else { + down = 0 + } + for i = uint(num[key]); i > down; { + var prev_ix uint + i-- + prev_ix = uint(bucket[uint32(i)&h.block_mask_]) + var backward uint = cur_ix - prev_ix + if backward > max_backward { + break + } + + prev_ix &= ring_buffer_mask + if cur_ix_masked+best_len > ring_buffer_mask || prev_ix+best_len > ring_buffer_mask || data[cur_ix_masked+best_len] != data[prev_ix+best_len] { + continue + } + { + var len uint = findMatchLengthWithLimit(data[prev_ix:], data[cur_ix_masked:], max_length) + if len >= 4 { + /* Comparing for >= 3 does not change the semantics, but just saves + for a few unnecessary binary logarithms in backward reference + score, since we are not interested in such short matches. */ + var score uint = backwardReferenceScore(uint(len), backward) + if best_score < score { + best_score = score + best_len = uint(len) + out.len = best_len + out.distance = backward + out.score = best_score + } + } + } + } + + bucket[uint32(num[key])&h.block_mask_] = uint32(cur_ix) + num[key]++ + } + + if min_score == out.score { + searchInStaticDictionary(dictionary, h, data[cur_ix_masked:], max_length, max_backward+gap, max_distance, out, false) + } +} diff --git a/vendor/github.com/andybalholm/brotli/hash.go b/vendor/github.com/andybalholm/brotli/hash.go new file mode 100644 index 0000000..00f812e --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/hash.go @@ -0,0 +1,342 @@ +package brotli + +import ( + "encoding/binary" + "fmt" +) + +type hasherCommon struct { + params hasherParams + is_prepared_ bool + dict_num_lookups uint + dict_num_matches uint +} + +func (h *hasherCommon) Common() *hasherCommon { + return h +} + +type hasherHandle interface { + Common() *hasherCommon + Initialize(params *encoderParams) + Prepare(one_shot bool, input_size uint, data []byte) + StitchToPreviousBlock(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint) + HashTypeLength() uint + StoreLookahead() uint + PrepareDistanceCache(distance_cache []int) + FindLongestMatch(dictionary *encoderDictionary, data []byte, ring_buffer_mask uint, distance_cache []int, cur_ix uint, max_length uint, max_backward uint, gap uint, max_distance uint, out *hasherSearchResult) + StoreRange(data []byte, mask uint, ix_start uint, ix_end uint) + Store(data []byte, mask uint, ix uint) +} + +const kCutoffTransformsCount uint32 = 10 + +/* 0, 12, 27, 23, 42, 63, 56, 48, 59, 64 */ +/* 0+0, 4+8, 8+19, 12+11, 16+26, 20+43, 24+32, 28+20, 32+27, 36+28 */ +const kCutoffTransforms uint64 = 0x071B520ADA2D3200 + +type hasherSearchResult struct { + len uint + distance uint + score uint + len_code_delta int +} + +/* kHashMul32 multiplier has these properties: + * The multiplier must be odd. Otherwise we may lose the highest bit. + * No long streaks of ones or zeros. + * There is no effort to ensure that it is a prime, the oddity is enough + for this use. + * The number has been tuned heuristically against compression benchmarks. */ +const kHashMul32 uint32 = 0x1E35A7BD + +const kHashMul64 uint64 = 0x1E35A7BD1E35A7BD + +const kHashMul64Long uint64 = 0x1FE35A7BD3579BD3 + +func hash14(data []byte) uint32 { + var h uint32 = binary.LittleEndian.Uint32(data) * kHashMul32 + + /* The higher bits contain more mixture from the multiplication, + so we take our results from there. */ + return h >> (32 - 14) +} + +func prepareDistanceCache(distance_cache []int, num_distances int) { + if num_distances > 4 { + var last_distance int = distance_cache[0] + distance_cache[4] = last_distance - 1 + distance_cache[5] = last_distance + 1 + distance_cache[6] = last_distance - 2 + distance_cache[7] = last_distance + 2 + distance_cache[8] = last_distance - 3 + distance_cache[9] = last_distance + 3 + if num_distances > 10 { + var next_last_distance int = distance_cache[1] + distance_cache[10] = next_last_distance - 1 + distance_cache[11] = next_last_distance + 1 + distance_cache[12] = next_last_distance - 2 + distance_cache[13] = next_last_distance + 2 + distance_cache[14] = next_last_distance - 3 + distance_cache[15] = next_last_distance + 3 + } + } +} + +const literalByteScore = 135 + +const distanceBitPenalty = 30 + +/* Score must be positive after applying maximal penalty. */ +const scoreBase = (distanceBitPenalty * 8 * 8) + +/* Usually, we always choose the longest backward reference. This function + allows for the exception of that rule. + + If we choose a backward reference that is further away, it will + usually be coded with more bits. We approximate this by assuming + log2(distance). If the distance can be expressed in terms of the + last four distances, we use some heuristic constants to estimate + the bits cost. For the first up to four literals we use the bit + cost of the literals from the literal cost model, after that we + use the average bit cost of the cost model. + + This function is used to sometimes discard a longer backward reference + when it is not much longer and the bit cost for encoding it is more + than the saved literals. + + backward_reference_offset MUST be positive. */ +func backwardReferenceScore(copy_length uint, backward_reference_offset uint) uint { + return scoreBase + literalByteScore*uint(copy_length) - distanceBitPenalty*uint(log2FloorNonZero(backward_reference_offset)) +} + +func backwardReferenceScoreUsingLastDistance(copy_length uint) uint { + return literalByteScore*uint(copy_length) + scoreBase + 15 +} + +func backwardReferencePenaltyUsingLastDistance(distance_short_code uint) uint { + return uint(39) + ((0x1CA10 >> (distance_short_code & 0xE)) & 0xE) +} + +func testStaticDictionaryItem(dictionary *encoderDictionary, item uint, data []byte, max_length uint, max_backward uint, max_distance uint, out *hasherSearchResult) bool { + var len uint + var word_idx uint + var offset uint + var matchlen uint + var backward uint + var score uint + len = item & 0x1F + word_idx = item >> 5 + offset = uint(dictionary.words.offsets_by_length[len]) + len*word_idx + if len > max_length { + return false + } + + matchlen = findMatchLengthWithLimit(data, dictionary.words.data[offset:], uint(len)) + if matchlen+uint(dictionary.cutoffTransformsCount) <= len || matchlen == 0 { + return false + } + { + var cut uint = len - matchlen + var transform_id uint = (cut << 2) + uint((dictionary.cutoffTransforms>>(cut*6))&0x3F) + backward = max_backward + 1 + word_idx + (transform_id << dictionary.words.size_bits_by_length[len]) + } + + if backward > max_distance { + return false + } + + score = backwardReferenceScore(matchlen, backward) + if score < out.score { + return false + } + + out.len = matchlen + out.len_code_delta = int(len) - int(matchlen) + out.distance = backward + out.score = score + return true +} + +func searchInStaticDictionary(dictionary *encoderDictionary, handle hasherHandle, data []byte, max_length uint, max_backward uint, max_distance uint, out *hasherSearchResult, shallow bool) { + var key uint + var i uint + var self *hasherCommon = handle.Common() + if self.dict_num_matches < self.dict_num_lookups>>7 { + return + } + + key = uint(hash14(data) << 1) + for i = 0; ; (func() { i++; key++ })() { + var tmp uint + if shallow { + tmp = 1 + } else { + tmp = 2 + } + if i >= tmp { + break + } + var item uint = uint(dictionary.hash_table[key]) + self.dict_num_lookups++ + if item != 0 { + var item_matches bool = testStaticDictionaryItem(dictionary, item, data, max_length, max_backward, max_distance, out) + if item_matches { + self.dict_num_matches++ + } + } + } +} + +type backwardMatch struct { + distance uint32 + length_and_code uint32 +} + +func initBackwardMatch(self *backwardMatch, dist uint, len uint) { + self.distance = uint32(dist) + self.length_and_code = uint32(len << 5) +} + +func initDictionaryBackwardMatch(self *backwardMatch, dist uint, len uint, len_code uint) { + self.distance = uint32(dist) + var tmp uint + if len == len_code { + tmp = 0 + } else { + tmp = len_code + } + self.length_and_code = uint32(len<<5 | tmp) +} + +func backwardMatchLength(self *backwardMatch) uint { + return uint(self.length_and_code >> 5) +} + +func backwardMatchLengthCode(self *backwardMatch) uint { + var code uint = uint(self.length_and_code) & 31 + if code != 0 { + return code + } else { + return backwardMatchLength(self) + } +} + +func hasherReset(handle hasherHandle) { + if handle == nil { + return + } + handle.Common().is_prepared_ = false +} + +func newHasher(typ int) hasherHandle { + switch typ { + case 2: + return &hashLongestMatchQuickly{ + bucketBits: 16, + bucketSweep: 1, + hashLen: 5, + useDictionary: true, + } + case 3: + return &hashLongestMatchQuickly{ + bucketBits: 16, + bucketSweep: 2, + hashLen: 5, + useDictionary: false, + } + case 4: + return &hashLongestMatchQuickly{ + bucketBits: 17, + bucketSweep: 4, + hashLen: 5, + useDictionary: true, + } + case 5: + return new(h5) + case 6: + return new(h6) + case 10: + return new(h10) + case 35: + return &hashComposite{ + ha: newHasher(3), + hb: &hashRolling{jump: 4}, + } + case 40: + return &hashForgetfulChain{ + bucketBits: 15, + numBanks: 1, + bankBits: 16, + numLastDistancesToCheck: 4, + } + case 41: + return &hashForgetfulChain{ + bucketBits: 15, + numBanks: 1, + bankBits: 16, + numLastDistancesToCheck: 10, + } + case 42: + return &hashForgetfulChain{ + bucketBits: 15, + numBanks: 512, + bankBits: 9, + numLastDistancesToCheck: 16, + } + case 54: + return &hashLongestMatchQuickly{ + bucketBits: 20, + bucketSweep: 4, + hashLen: 7, + useDictionary: false, + } + case 55: + return &hashComposite{ + ha: newHasher(54), + hb: &hashRolling{jump: 4}, + } + case 65: + return &hashComposite{ + ha: newHasher(6), + hb: &hashRolling{jump: 1}, + } + } + + panic(fmt.Sprintf("unknown hasher type: %d", typ)) +} + +func hasherSetup(handle *hasherHandle, params *encoderParams, data []byte, position uint, input_size uint, is_last bool) { + var self hasherHandle = nil + var common *hasherCommon = nil + var one_shot bool = (position == 0 && is_last) + if *handle == nil { + chooseHasher(params, ¶ms.hasher) + self = newHasher(params.hasher.type_) + + *handle = self + common = self.Common() + common.params = params.hasher + self.Initialize(params) + } + + self = *handle + common = self.Common() + if !common.is_prepared_ { + self.Prepare(one_shot, input_size, data) + + if position == 0 { + common.dict_num_lookups = 0 + common.dict_num_matches = 0 + } + + common.is_prepared_ = true + } +} + +func initOrStitchToPreviousBlock(handle *hasherHandle, data []byte, mask uint, params *encoderParams, position uint, input_size uint, is_last bool) { + var self hasherHandle + hasherSetup(handle, params, data, position, input_size, is_last) + self = *handle + self.StitchToPreviousBlock(input_size, position, data, mask) +} diff --git a/vendor/github.com/andybalholm/brotli/hash_composite.go b/vendor/github.com/andybalholm/brotli/hash_composite.go new file mode 100644 index 0000000..a65fe2e --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/hash_composite.go @@ -0,0 +1,93 @@ +package brotli + +/* Copyright 2018 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +func (h *hashComposite) HashTypeLength() uint { + var a uint = h.ha.HashTypeLength() + var b uint = h.hb.HashTypeLength() + if a > b { + return a + } else { + return b + } +} + +func (h *hashComposite) StoreLookahead() uint { + var a uint = h.ha.StoreLookahead() + var b uint = h.hb.StoreLookahead() + if a > b { + return a + } else { + return b + } +} + +/* Composite hasher: This hasher allows to combine two other hashers, HASHER_A + and HASHER_B. */ +type hashComposite struct { + hasherCommon + ha hasherHandle + hb hasherHandle + params *encoderParams +} + +func (h *hashComposite) Initialize(params *encoderParams) { + h.params = params +} + +/* TODO: Initialize of the hashers is defered to Prepare (and params + remembered here) because we don't get the one_shot and input_size params + here that are needed to know the memory size of them. Instead provide + those params to all hashers InitializehashComposite */ +func (h *hashComposite) Prepare(one_shot bool, input_size uint, data []byte) { + if h.ha == nil { + var common_a *hasherCommon + var common_b *hasherCommon + + common_a = h.ha.Common() + common_a.params = h.params.hasher + common_a.is_prepared_ = false + common_a.dict_num_lookups = 0 + common_a.dict_num_matches = 0 + h.ha.Initialize(h.params) + + common_b = h.hb.Common() + common_b.params = h.params.hasher + common_b.is_prepared_ = false + common_b.dict_num_lookups = 0 + common_b.dict_num_matches = 0 + h.hb.Initialize(h.params) + } + + h.ha.Prepare(one_shot, input_size, data) + h.hb.Prepare(one_shot, input_size, data) +} + +func (h *hashComposite) Store(data []byte, mask uint, ix uint) { + h.ha.Store(data, mask, ix) + h.hb.Store(data, mask, ix) +} + +func (h *hashComposite) StoreRange(data []byte, mask uint, ix_start uint, ix_end uint) { + h.ha.StoreRange(data, mask, ix_start, ix_end) + h.hb.StoreRange(data, mask, ix_start, ix_end) +} + +func (h *hashComposite) StitchToPreviousBlock(num_bytes uint, position uint, ringbuffer []byte, ring_buffer_mask uint) { + h.ha.StitchToPreviousBlock(num_bytes, position, ringbuffer, ring_buffer_mask) + h.hb.StitchToPreviousBlock(num_bytes, position, ringbuffer, ring_buffer_mask) +} + +func (h *hashComposite) PrepareDistanceCache(distance_cache []int) { + h.ha.PrepareDistanceCache(distance_cache) + h.hb.PrepareDistanceCache(distance_cache) +} + +func (h *hashComposite) FindLongestMatch(dictionary *encoderDictionary, data []byte, ring_buffer_mask uint, distance_cache []int, cur_ix uint, max_length uint, max_backward uint, gap uint, max_distance uint, out *hasherSearchResult) { + h.ha.FindLongestMatch(dictionary, data, ring_buffer_mask, distance_cache, cur_ix, max_length, max_backward, gap, max_distance, out) + h.hb.FindLongestMatch(dictionary, data, ring_buffer_mask, distance_cache, cur_ix, max_length, max_backward, gap, max_distance, out) +} diff --git a/vendor/github.com/andybalholm/brotli/hash_forgetful_chain.go b/vendor/github.com/andybalholm/brotli/hash_forgetful_chain.go new file mode 100644 index 0000000..306e46d --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/hash_forgetful_chain.go @@ -0,0 +1,252 @@ +package brotli + +import "encoding/binary" + +/* Copyright 2016 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +func (*hashForgetfulChain) HashTypeLength() uint { + return 4 +} + +func (*hashForgetfulChain) StoreLookahead() uint { + return 4 +} + +/* HashBytes is the function that chooses the bucket to place the address in.*/ +func (h *hashForgetfulChain) HashBytes(data []byte) uint { + var hash uint32 = binary.LittleEndian.Uint32(data) * kHashMul32 + + /* The higher bits contain more mixture from the multiplication, + so we take our results from there. */ + return uint(hash >> (32 - h.bucketBits)) +} + +type slot struct { + delta uint16 + next uint16 +} + +/* A (forgetful) hash table to the data seen by the compressor, to + help create backward references to previous data. + + Hashes are stored in chains which are bucketed to groups. Group of chains + share a storage "bank". When more than "bank size" chain nodes are added, + oldest nodes are replaced; this way several chains may share a tail. */ +type hashForgetfulChain struct { + hasherCommon + + bucketBits uint + numBanks uint + bankBits uint + numLastDistancesToCheck int + + addr []uint32 + head []uint16 + tiny_hash [65536]byte + banks [][]slot + free_slot_idx []uint16 + max_hops uint +} + +func (h *hashForgetfulChain) Initialize(params *encoderParams) { + var q uint + if params.quality > 6 { + q = 7 + } else { + q = 8 + } + h.max_hops = q << uint(params.quality-4) + + bankSize := 1 << h.bankBits + bucketSize := 1 << h.bucketBits + + h.addr = make([]uint32, bucketSize) + h.head = make([]uint16, bucketSize) + h.banks = make([][]slot, h.numBanks) + for i := range h.banks { + h.banks[i] = make([]slot, bankSize) + } + h.free_slot_idx = make([]uint16, h.numBanks) +} + +func (h *hashForgetfulChain) Prepare(one_shot bool, input_size uint, data []byte) { + var partial_prepare_threshold uint = (1 << h.bucketBits) >> 6 + /* Partial preparation is 100 times slower (per socket). */ + if one_shot && input_size <= partial_prepare_threshold { + var i uint + for i = 0; i < input_size; i++ { + var bucket uint = h.HashBytes(data[i:]) + + /* See InitEmpty comment. */ + h.addr[bucket] = 0xCCCCCCCC + + h.head[bucket] = 0xCCCC + } + } else { + /* Fill |addr| array with 0xCCCCCCCC value. Because of wrapping, position + processed by hasher never reaches 3GB + 64M; this makes all new chains + to be terminated after the first node. */ + for i := range h.addr { + h.addr[i] = 0xCCCCCCCC + } + + for i := range h.head { + h.head[i] = 0 + } + } + + h.tiny_hash = [65536]byte{} + for i := range h.free_slot_idx { + h.free_slot_idx[i] = 0 + } +} + +/* Look at 4 bytes at &data[ix & mask]. Compute a hash from these, and prepend + node to corresponding chain; also update tiny_hash for current position. */ +func (h *hashForgetfulChain) Store(data []byte, mask uint, ix uint) { + var key uint = h.HashBytes(data[ix&mask:]) + var bank uint = key & (h.numBanks - 1) + idx := uint(h.free_slot_idx[bank]) & ((1 << h.bankBits) - 1) + h.free_slot_idx[bank]++ + var delta uint = ix - uint(h.addr[key]) + h.tiny_hash[uint16(ix)] = byte(key) + if delta > 0xFFFF { + delta = 0xFFFF + } + h.banks[bank][idx].delta = uint16(delta) + h.banks[bank][idx].next = h.head[key] + h.addr[key] = uint32(ix) + h.head[key] = uint16(idx) +} + +func (h *hashForgetfulChain) StoreRange(data []byte, mask uint, ix_start uint, ix_end uint) { + var i uint + for i = ix_start; i < ix_end; i++ { + h.Store(data, mask, i) + } +} + +func (h *hashForgetfulChain) StitchToPreviousBlock(num_bytes uint, position uint, ringbuffer []byte, ring_buffer_mask uint) { + if num_bytes >= h.HashTypeLength()-1 && position >= 3 { + /* Prepare the hashes for three last bytes of the last write. + These could not be calculated before, since they require knowledge + of both the previous and the current block. */ + h.Store(ringbuffer, ring_buffer_mask, position-3) + h.Store(ringbuffer, ring_buffer_mask, position-2) + h.Store(ringbuffer, ring_buffer_mask, position-1) + } +} + +func (h *hashForgetfulChain) PrepareDistanceCache(distance_cache []int) { + prepareDistanceCache(distance_cache, h.numLastDistancesToCheck) +} + +/* Find a longest backward match of &data[cur_ix] up to the length of + max_length and stores the position cur_ix in the hash table. + + REQUIRES: PrepareDistanceCachehashForgetfulChain must be invoked for current distance cache + values; if this method is invoked repeatedly with the same distance + cache values, it is enough to invoke PrepareDistanceCachehashForgetfulChain once. + + Does not look for matches longer than max_length. + Does not look for matches further away than max_backward. + Writes the best match into |out|. + |out|->score is updated only if a better match is found. */ +func (h *hashForgetfulChain) FindLongestMatch(dictionary *encoderDictionary, data []byte, ring_buffer_mask uint, distance_cache []int, cur_ix uint, max_length uint, max_backward uint, gap uint, max_distance uint, out *hasherSearchResult) { + var cur_ix_masked uint = cur_ix & ring_buffer_mask + var min_score uint = out.score + var best_score uint = out.score + var best_len uint = out.len + var key uint = h.HashBytes(data[cur_ix_masked:]) + var tiny_hash byte = byte(key) + /* Don't accept a short copy from far away. */ + out.len = 0 + + out.len_code_delta = 0 + + /* Try last distance first. */ + for i := 0; i < h.numLastDistancesToCheck; i++ { + var backward uint = uint(distance_cache[i]) + var prev_ix uint = (cur_ix - backward) + + /* For distance code 0 we want to consider 2-byte matches. */ + if i > 0 && h.tiny_hash[uint16(prev_ix)] != tiny_hash { + continue + } + if prev_ix >= cur_ix || backward > max_backward { + continue + } + + prev_ix &= ring_buffer_mask + { + var len uint = findMatchLengthWithLimit(data[prev_ix:], data[cur_ix_masked:], max_length) + if len >= 2 { + var score uint = backwardReferenceScoreUsingLastDistance(uint(len)) + if best_score < score { + if i != 0 { + score -= backwardReferencePenaltyUsingLastDistance(uint(i)) + } + if best_score < score { + best_score = score + best_len = uint(len) + out.len = best_len + out.distance = backward + out.score = best_score + } + } + } + } + } + { + var bank uint = key & (h.numBanks - 1) + var backward uint = 0 + var hops uint = h.max_hops + var delta uint = cur_ix - uint(h.addr[key]) + var slot uint = uint(h.head[key]) + for { + tmp6 := hops + hops-- + if tmp6 == 0 { + break + } + var prev_ix uint + var last uint = slot + backward += delta + if backward > max_backward { + break + } + prev_ix = (cur_ix - backward) & ring_buffer_mask + slot = uint(h.banks[bank][last].next) + delta = uint(h.banks[bank][last].delta) + if cur_ix_masked+best_len > ring_buffer_mask || prev_ix+best_len > ring_buffer_mask || data[cur_ix_masked+best_len] != data[prev_ix+best_len] { + continue + } + { + var len uint = findMatchLengthWithLimit(data[prev_ix:], data[cur_ix_masked:], max_length) + if len >= 4 { + /* Comparing for >= 3 does not change the semantics, but just saves + for a few unnecessary binary logarithms in backward reference + score, since we are not interested in such short matches. */ + var score uint = backwardReferenceScore(uint(len), backward) + if best_score < score { + best_score = score + best_len = uint(len) + out.len = best_len + out.distance = backward + out.score = best_score + } + } + } + } + + h.Store(data, ring_buffer_mask, cur_ix) + } + + if out.score == min_score { + searchInStaticDictionary(dictionary, h, data[cur_ix_masked:], max_length, max_backward+gap, max_distance, out, false) + } +} diff --git a/vendor/github.com/andybalholm/brotli/hash_longest_match_quickly.go b/vendor/github.com/andybalholm/brotli/hash_longest_match_quickly.go new file mode 100644 index 0000000..9375dc1 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/hash_longest_match_quickly.go @@ -0,0 +1,214 @@ +package brotli + +import "encoding/binary" + +/* Copyright 2010 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* For BUCKET_SWEEP == 1, enabling the dictionary lookup makes compression + a little faster (0.5% - 1%) and it compresses 0.15% better on small text + and HTML inputs. */ + +func (*hashLongestMatchQuickly) HashTypeLength() uint { + return 8 +} + +func (*hashLongestMatchQuickly) StoreLookahead() uint { + return 8 +} + +/* HashBytes is the function that chooses the bucket to place + the address in. The HashLongestMatch and hashLongestMatchQuickly + classes have separate, different implementations of hashing. */ +func (h *hashLongestMatchQuickly) HashBytes(data []byte) uint32 { + var hash uint64 = ((binary.LittleEndian.Uint64(data) << (64 - 8*h.hashLen)) * kHashMul64) + + /* The higher bits contain more mixture from the multiplication, + so we take our results from there. */ + return uint32(hash >> (64 - h.bucketBits)) +} + +/* A (forgetful) hash table to the data seen by the compressor, to + help create backward references to previous data. + + This is a hash map of fixed size (1 << 16). Starting from the + given index, 1 buckets are used to store values of a key. */ +type hashLongestMatchQuickly struct { + hasherCommon + + bucketBits uint + bucketSweep int + hashLen uint + useDictionary bool + + buckets []uint32 +} + +func (h *hashLongestMatchQuickly) Initialize(params *encoderParams) { + h.buckets = make([]uint32, 1<> 7 + /* Partial preparation is 100 times slower (per socket). */ + if one_shot && input_size <= partial_prepare_threshold { + var i uint + for i = 0; i < input_size; i++ { + var key uint32 = h.HashBytes(data[i:]) + for j := 0; j < h.bucketSweep; j++ { + h.buckets[key+uint32(j)] = 0 + } + } + } else { + /* It is not strictly necessary to fill this buffer here, but + not filling will make the results of the compression stochastic + (but correct). This is because random data would cause the + system to find accidentally good backward references here and there. */ + for i := range h.buckets { + h.buckets[i] = 0 + } + } +} + +/* Look at 5 bytes at &data[ix & mask]. + Compute a hash from these, and store the value somewhere within + [ix .. ix+3]. */ +func (h *hashLongestMatchQuickly) Store(data []byte, mask uint, ix uint) { + var key uint32 = h.HashBytes(data[ix&mask:]) + var off uint32 = uint32(ix>>3) % uint32(h.bucketSweep) + /* Wiggle the value with the bucket sweep range. */ + h.buckets[key+off] = uint32(ix) +} + +func (h *hashLongestMatchQuickly) StoreRange(data []byte, mask uint, ix_start uint, ix_end uint) { + var i uint + for i = ix_start; i < ix_end; i++ { + h.Store(data, mask, i) + } +} + +func (h *hashLongestMatchQuickly) StitchToPreviousBlock(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint) { + if num_bytes >= h.HashTypeLength()-1 && position >= 3 { + /* Prepare the hashes for three last bytes of the last write. + These could not be calculated before, since they require knowledge + of both the previous and the current block. */ + h.Store(ringbuffer, ringbuffer_mask, position-3) + h.Store(ringbuffer, ringbuffer_mask, position-2) + h.Store(ringbuffer, ringbuffer_mask, position-1) + } +} + +func (*hashLongestMatchQuickly) PrepareDistanceCache(distance_cache []int) { +} + +/* Find a longest backward match of &data[cur_ix & ring_buffer_mask] + up to the length of max_length and stores the position cur_ix in the + hash table. + + Does not look for matches longer than max_length. + Does not look for matches further away than max_backward. + Writes the best match into |out|. + |out|->score is updated only if a better match is found. */ +func (h *hashLongestMatchQuickly) FindLongestMatch(dictionary *encoderDictionary, data []byte, ring_buffer_mask uint, distance_cache []int, cur_ix uint, max_length uint, max_backward uint, gap uint, max_distance uint, out *hasherSearchResult) { + var best_len_in uint = out.len + var cur_ix_masked uint = cur_ix & ring_buffer_mask + var key uint32 = h.HashBytes(data[cur_ix_masked:]) + var compare_char int = int(data[cur_ix_masked+best_len_in]) + var min_score uint = out.score + var best_score uint = out.score + var best_len uint = best_len_in + var cached_backward uint = uint(distance_cache[0]) + var prev_ix uint = cur_ix - cached_backward + var bucket []uint32 + out.len_code_delta = 0 + if prev_ix < cur_ix { + prev_ix &= uint(uint32(ring_buffer_mask)) + if compare_char == int(data[prev_ix+best_len]) { + var len uint = findMatchLengthWithLimit(data[prev_ix:], data[cur_ix_masked:], max_length) + if len >= 4 { + var score uint = backwardReferenceScoreUsingLastDistance(uint(len)) + if best_score < score { + best_score = score + best_len = uint(len) + out.len = uint(len) + out.distance = cached_backward + out.score = best_score + compare_char = int(data[cur_ix_masked+best_len]) + if h.bucketSweep == 1 { + h.buckets[key] = uint32(cur_ix) + return + } + } + } + } + } + + if h.bucketSweep == 1 { + var backward uint + var len uint + + /* Only one to look for, don't bother to prepare for a loop. */ + prev_ix = uint(h.buckets[key]) + + h.buckets[key] = uint32(cur_ix) + backward = cur_ix - prev_ix + prev_ix &= uint(uint32(ring_buffer_mask)) + if compare_char != int(data[prev_ix+best_len_in]) { + return + } + + if backward == 0 || backward > max_backward { + return + } + + len = findMatchLengthWithLimit(data[prev_ix:], data[cur_ix_masked:], max_length) + if len >= 4 { + var score uint = backwardReferenceScore(uint(len), backward) + if best_score < score { + out.len = uint(len) + out.distance = backward + out.score = score + return + } + } + } else { + bucket = h.buckets[key:] + var i int + prev_ix = uint(bucket[0]) + bucket = bucket[1:] + for i = 0; i < h.bucketSweep; (func() { i++; tmp3 := bucket; bucket = bucket[1:]; prev_ix = uint(tmp3[0]) })() { + var backward uint = cur_ix - prev_ix + var len uint + prev_ix &= uint(uint32(ring_buffer_mask)) + if compare_char != int(data[prev_ix+best_len]) { + continue + } + + if backward == 0 || backward > max_backward { + continue + } + + len = findMatchLengthWithLimit(data[prev_ix:], data[cur_ix_masked:], max_length) + if len >= 4 { + var score uint = backwardReferenceScore(uint(len), backward) + if best_score < score { + best_score = score + best_len = uint(len) + out.len = best_len + out.distance = backward + out.score = score + compare_char = int(data[cur_ix_masked+best_len]) + } + } + } + } + + if h.useDictionary && min_score == out.score { + searchInStaticDictionary(dictionary, h, data[cur_ix_masked:], max_length, max_backward+gap, max_distance, out, true) + } + + h.buckets[key+uint32((cur_ix>>3)%uint(h.bucketSweep))] = uint32(cur_ix) +} diff --git a/vendor/github.com/andybalholm/brotli/hash_rolling.go b/vendor/github.com/andybalholm/brotli/hash_rolling.go new file mode 100644 index 0000000..6630fc0 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/hash_rolling.go @@ -0,0 +1,168 @@ +package brotli + +/* Copyright 2018 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* NOTE: this hasher does not search in the dictionary. It is used as + backup-hasher, the main hasher already searches in it. */ + +const kRollingHashMul32 uint32 = 69069 + +const kInvalidPosHashRolling uint32 = 0xffffffff + +/* This hasher uses a longer forward length, but returning a higher value here + will hurt compression by the main hasher when combined with a composite + hasher. The hasher tests for forward itself instead. */ +func (*hashRolling) HashTypeLength() uint { + return 4 +} + +func (*hashRolling) StoreLookahead() uint { + return 4 +} + +/* Computes a code from a single byte. A lookup table of 256 values could be + used, but simply adding 1 works about as good. */ +func (*hashRolling) HashByte(b byte) uint32 { + return uint32(b) + 1 +} + +func (h *hashRolling) HashRollingFunctionInitial(state uint32, add byte, factor uint32) uint32 { + return uint32(factor*state + h.HashByte(add)) +} + +func (h *hashRolling) HashRollingFunction(state uint32, add byte, rem byte, factor uint32, factor_remove uint32) uint32 { + return uint32(factor*state + h.HashByte(add) - factor_remove*h.HashByte(rem)) +} + +/* Rolling hash for long distance long string matches. Stores one position + per bucket, bucket key is computed over a long region. */ +type hashRolling struct { + hasherCommon + + jump int + + state uint32 + table []uint32 + next_ix uint + factor uint32 + factor_remove uint32 +} + +func (h *hashRolling) Initialize(params *encoderParams) { + h.state = 0 + h.next_ix = 0 + + h.factor = kRollingHashMul32 + + /* Compute the factor of the oldest byte to remove: factor**steps modulo + 0xffffffff (the multiplications rely on 32-bit overflow) */ + h.factor_remove = 1 + + for i := 0; i < 32; i += h.jump { + h.factor_remove *= h.factor + } + + h.table = make([]uint32, 16777216) + for i := 0; i < 16777216; i++ { + h.table[i] = kInvalidPosHashRolling + } +} + +func (h *hashRolling) Prepare(one_shot bool, input_size uint, data []byte) { + /* Too small size, cannot use this hasher. */ + if input_size < 32 { + return + } + h.state = 0 + for i := 0; i < 32; i += h.jump { + h.state = h.HashRollingFunctionInitial(h.state, data[i], h.factor) + } +} + +func (*hashRolling) Store(data []byte, mask uint, ix uint) { +} + +func (*hashRolling) StoreRange(data []byte, mask uint, ix_start uint, ix_end uint) { +} + +func (h *hashRolling) StitchToPreviousBlock(num_bytes uint, position uint, ringbuffer []byte, ring_buffer_mask uint) { + var position_masked uint + /* In this case we must re-initialize the hasher from scratch from the + current position. */ + + var available uint = num_bytes + if position&uint(h.jump-1) != 0 { + var diff uint = uint(h.jump) - (position & uint(h.jump-1)) + if diff > available { + available = 0 + } else { + available = available - diff + } + position += diff + } + + position_masked = position & ring_buffer_mask + + /* wrapping around ringbuffer not handled. */ + if available > ring_buffer_mask-position_masked { + available = ring_buffer_mask - position_masked + } + + h.Prepare(false, available, ringbuffer[position&ring_buffer_mask:]) + h.next_ix = position +} + +func (*hashRolling) PrepareDistanceCache(distance_cache []int) { +} + +func (h *hashRolling) FindLongestMatch(dictionary *encoderDictionary, data []byte, ring_buffer_mask uint, distance_cache []int, cur_ix uint, max_length uint, max_backward uint, gap uint, max_distance uint, out *hasherSearchResult) { + var cur_ix_masked uint = cur_ix & ring_buffer_mask + var pos uint = h.next_ix + + if cur_ix&uint(h.jump-1) != 0 { + return + } + + /* Not enough lookahead */ + if max_length < 32 { + return + } + + for pos = h.next_ix; pos <= cur_ix; pos += uint(h.jump) { + var code uint32 = h.state & ((16777216 * 64) - 1) + var rem byte = data[pos&ring_buffer_mask] + var add byte = data[(pos+32)&ring_buffer_mask] + var found_ix uint = uint(kInvalidPosHashRolling) + + h.state = h.HashRollingFunction(h.state, add, rem, h.factor, h.factor_remove) + + if code < 16777216 { + found_ix = uint(h.table[code]) + h.table[code] = uint32(pos) + if pos == cur_ix && uint32(found_ix) != kInvalidPosHashRolling { + /* The cast to 32-bit makes backward distances up to 4GB work even + if cur_ix is above 4GB, despite using 32-bit values in the table. */ + var backward uint = uint(uint32(cur_ix - found_ix)) + if backward <= max_backward { + var found_ix_masked uint = found_ix & ring_buffer_mask + var len uint = findMatchLengthWithLimit(data[found_ix_masked:], data[cur_ix_masked:], max_length) + if len >= 4 && len > out.len { + var score uint = backwardReferenceScore(uint(len), backward) + if score > out.score { + out.len = uint(len) + out.distance = backward + out.score = score + out.len_code_delta = 0 + } + } + } + } + } + } + + h.next_ix = cur_ix + uint(h.jump) +} diff --git a/vendor/github.com/andybalholm/brotli/histogram.go b/vendor/github.com/andybalholm/brotli/histogram.go new file mode 100644 index 0000000..0346622 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/histogram.go @@ -0,0 +1,226 @@ +package brotli + +import "math" + +/* The distance symbols effectively used by "Large Window Brotli" (32-bit). */ +const numHistogramDistanceSymbols = 544 + +type histogramLiteral struct { + data_ [numLiteralSymbols]uint32 + total_count_ uint + bit_cost_ float64 +} + +func histogramClearLiteral(self *histogramLiteral) { + self.data_ = [numLiteralSymbols]uint32{} + self.total_count_ = 0 + self.bit_cost_ = math.MaxFloat64 +} + +func clearHistogramsLiteral(array []histogramLiteral, length uint) { + var i uint + for i = 0; i < length; i++ { + histogramClearLiteral(&array[i:][0]) + } +} + +func histogramAddLiteral(self *histogramLiteral, val uint) { + self.data_[val]++ + self.total_count_++ +} + +func histogramAddVectorLiteral(self *histogramLiteral, p []byte, n uint) { + self.total_count_ += n + n += 1 + for { + n-- + if n == 0 { + break + } + self.data_[p[0]]++ + p = p[1:] + } +} + +func histogramAddHistogramLiteral(self *histogramLiteral, v *histogramLiteral) { + var i uint + self.total_count_ += v.total_count_ + for i = 0; i < numLiteralSymbols; i++ { + self.data_[i] += v.data_[i] + } +} + +func histogramDataSizeLiteral() uint { + return numLiteralSymbols +} + +type histogramCommand struct { + data_ [numCommandSymbols]uint32 + total_count_ uint + bit_cost_ float64 +} + +func histogramClearCommand(self *histogramCommand) { + self.data_ = [numCommandSymbols]uint32{} + self.total_count_ = 0 + self.bit_cost_ = math.MaxFloat64 +} + +func clearHistogramsCommand(array []histogramCommand, length uint) { + var i uint + for i = 0; i < length; i++ { + histogramClearCommand(&array[i:][0]) + } +} + +func histogramAddCommand(self *histogramCommand, val uint) { + self.data_[val]++ + self.total_count_++ +} + +func histogramAddVectorCommand(self *histogramCommand, p []uint16, n uint) { + self.total_count_ += n + n += 1 + for { + n-- + if n == 0 { + break + } + self.data_[p[0]]++ + p = p[1:] + } +} + +func histogramAddHistogramCommand(self *histogramCommand, v *histogramCommand) { + var i uint + self.total_count_ += v.total_count_ + for i = 0; i < numCommandSymbols; i++ { + self.data_[i] += v.data_[i] + } +} + +func histogramDataSizeCommand() uint { + return numCommandSymbols +} + +type histogramDistance struct { + data_ [numDistanceSymbols]uint32 + total_count_ uint + bit_cost_ float64 +} + +func histogramClearDistance(self *histogramDistance) { + self.data_ = [numDistanceSymbols]uint32{} + self.total_count_ = 0 + self.bit_cost_ = math.MaxFloat64 +} + +func clearHistogramsDistance(array []histogramDistance, length uint) { + var i uint + for i = 0; i < length; i++ { + histogramClearDistance(&array[i:][0]) + } +} + +func histogramAddDistance(self *histogramDistance, val uint) { + self.data_[val]++ + self.total_count_++ +} + +func histogramAddVectorDistance(self *histogramDistance, p []uint16, n uint) { + self.total_count_ += n + n += 1 + for { + n-- + if n == 0 { + break + } + self.data_[p[0]]++ + p = p[1:] + } +} + +func histogramAddHistogramDistance(self *histogramDistance, v *histogramDistance) { + var i uint + self.total_count_ += v.total_count_ + for i = 0; i < numDistanceSymbols; i++ { + self.data_[i] += v.data_[i] + } +} + +func histogramDataSizeDistance() uint { + return numDistanceSymbols +} + +type blockSplitIterator struct { + split_ *blockSplit + idx_ uint + type_ uint + length_ uint +} + +func initBlockSplitIterator(self *blockSplitIterator, split *blockSplit) { + self.split_ = split + self.idx_ = 0 + self.type_ = 0 + if len(split.lengths) > 0 { + self.length_ = uint(split.lengths[0]) + } else { + self.length_ = 0 + } +} + +func blockSplitIteratorNext(self *blockSplitIterator) { + if self.length_ == 0 { + self.idx_++ + self.type_ = uint(self.split_.types[self.idx_]) + self.length_ = uint(self.split_.lengths[self.idx_]) + } + + self.length_-- +} + +func buildHistogramsWithContext(cmds []command, literal_split *blockSplit, insert_and_copy_split *blockSplit, dist_split *blockSplit, ringbuffer []byte, start_pos uint, mask uint, prev_byte byte, prev_byte2 byte, context_modes []int, literal_histograms []histogramLiteral, insert_and_copy_histograms []histogramCommand, copy_dist_histograms []histogramDistance) { + var pos uint = start_pos + var literal_it blockSplitIterator + var insert_and_copy_it blockSplitIterator + var dist_it blockSplitIterator + + initBlockSplitIterator(&literal_it, literal_split) + initBlockSplitIterator(&insert_and_copy_it, insert_and_copy_split) + initBlockSplitIterator(&dist_it, dist_split) + for i := range cmds { + var cmd *command = &cmds[i] + var j uint + blockSplitIteratorNext(&insert_and_copy_it) + histogramAddCommand(&insert_and_copy_histograms[insert_and_copy_it.type_], uint(cmd.cmd_prefix_)) + + /* TODO: unwrap iterator blocks. */ + for j = uint(cmd.insert_len_); j != 0; j-- { + var context uint + blockSplitIteratorNext(&literal_it) + context = literal_it.type_ + if context_modes != nil { + var lut contextLUT = getContextLUT(context_modes[context]) + context = (context << literalContextBits) + uint(getContext(prev_byte, prev_byte2, lut)) + } + + histogramAddLiteral(&literal_histograms[context], uint(ringbuffer[pos&mask])) + prev_byte2 = prev_byte + prev_byte = ringbuffer[pos&mask] + pos++ + } + + pos += uint(commandCopyLen(cmd)) + if commandCopyLen(cmd) != 0 { + prev_byte2 = ringbuffer[(pos-2)&mask] + prev_byte = ringbuffer[(pos-1)&mask] + if cmd.cmd_prefix_ >= 128 { + var context uint + blockSplitIteratorNext(&dist_it) + context = uint(uint32(dist_it.type_< bestQ && + (spec.Value == "*" || spec.Value == offer) { + bestQ = spec.Q + bestOffer = offer + } + } + } + if bestQ == 0 { + bestOffer = "" + } + return bestOffer +} + +// acceptSpec describes an Accept* header. +type acceptSpec struct { + Value string + Q float64 +} + +// parseAccept parses Accept* headers. +func parseAccept(header http.Header, key string) (specs []acceptSpec) { +loop: + for _, s := range header[key] { + for { + var spec acceptSpec + spec.Value, s = expectTokenSlash(s) + if spec.Value == "" { + continue loop + } + spec.Q = 1.0 + s = skipSpace(s) + if strings.HasPrefix(s, ";") { + s = skipSpace(s[1:]) + if !strings.HasPrefix(s, "q=") { + continue loop + } + spec.Q, s = expectQuality(s[2:]) + if spec.Q < 0.0 { + continue loop + } + } + specs = append(specs, spec) + s = skipSpace(s) + if !strings.HasPrefix(s, ",") { + continue loop + } + s = skipSpace(s[1:]) + } + } + return +} + +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isSpace == 0 { + break + } + } + return s[i:] +} + +func expectTokenSlash(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + b := s[i] + if (octetTypes[b]&isToken == 0) && b != '/' { + break + } + } + return s[:i], s[i:] +} + +func expectQuality(s string) (q float64, rest string) { + switch { + case len(s) == 0: + return -1, "" + case s[0] == '0': + q = 0 + case s[0] == '1': + q = 1 + default: + return -1, "" + } + s = s[1:] + if !strings.HasPrefix(s, ".") { + return q, s + } + s = s[1:] + i := 0 + n := 0 + d := 1 + for ; i < len(s); i++ { + b := s[i] + if b < '0' || b > '9' { + break + } + n = n*10 + int(b) - '0' + d *= 10 + } + return q + float64(n)/float64(d), s[i:] +} + +// Octet types from RFC 2616. +var octetTypes [256]octetType + +type octetType byte + +const ( + isToken octetType = 1 << iota + isSpace +) + +func init() { + // OCTET = + // CHAR = + // CTL = + // CR = + // LF = + // SP = + // HT = + // <"> = + // CRLF = CR LF + // LWS = [CRLF] 1*( SP | HT ) + // TEXT = + // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> + // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT + // token = 1* + // qdtext = > + + for c := 0; c < 256; c++ { + var t octetType + isCtl := c <= 31 || c == 127 + isChar := 0 <= c && c <= 127 + isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) + if strings.ContainsRune(" \t\r\n", rune(c)) { + t |= isSpace + } + if isChar && !isCtl && !isSeparator { + t |= isToken + } + octetTypes[c] = t + } +} diff --git a/vendor/github.com/andybalholm/brotli/huffman.go b/vendor/github.com/andybalholm/brotli/huffman.go new file mode 100644 index 0000000..182f3d2 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/huffman.go @@ -0,0 +1,653 @@ +package brotli + +/* Copyright 2013 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Utilities for building Huffman decoding tables. */ + +const huffmanMaxCodeLength = 15 + +/* Maximum possible Huffman table size for an alphabet size of (index * 32), + max code length 15 and root table bits 8. */ +var kMaxHuffmanTableSize = []uint16{ + 256, + 402, + 436, + 468, + 500, + 534, + 566, + 598, + 630, + 662, + 694, + 726, + 758, + 790, + 822, + 854, + 886, + 920, + 952, + 984, + 1016, + 1048, + 1080, + 1112, + 1144, + 1176, + 1208, + 1240, + 1272, + 1304, + 1336, + 1368, + 1400, + 1432, + 1464, + 1496, + 1528, +} + +/* BROTLI_NUM_BLOCK_LEN_SYMBOLS == 26 */ +const huffmanMaxSize26 = 396 + +/* BROTLI_MAX_BLOCK_TYPE_SYMBOLS == 258 */ +const huffmanMaxSize258 = 632 + +/* BROTLI_MAX_CONTEXT_MAP_SYMBOLS == 272 */ +const huffmanMaxSize272 = 646 + +const huffmanMaxCodeLengthCodeLength = 5 + +/* Do not create this struct directly - use the ConstructHuffmanCode + * constructor below! */ +type huffmanCode struct { + bits byte + value uint16 +} + +func constructHuffmanCode(bits byte, value uint16) huffmanCode { + var h huffmanCode + h.bits = bits + h.value = value + return h +} + +/* Builds Huffman lookup table assuming code lengths are in symbol order. */ + +/* Builds Huffman lookup table assuming code lengths are in symbol order. + Returns size of resulting table. */ + +/* Builds a simple Huffman table. The |num_symbols| parameter is to be + interpreted as follows: 0 means 1 symbol, 1 means 2 symbols, + 2 means 3 symbols, 3 means 4 symbols with lengths [2, 2, 2, 2], + 4 means 4 symbols with lengths [1, 2, 3, 3]. */ + +/* Contains a collection of Huffman trees with the same alphabet size. */ +/* max_symbol is needed due to simple codes since log2(alphabet_size) could be + greater than log2(max_symbol). */ +type huffmanTreeGroup struct { + htrees [][]huffmanCode + codes []huffmanCode + alphabet_size uint16 + max_symbol uint16 + num_htrees uint16 +} + +const reverseBitsMax = 8 + +const reverseBitsBase = 0 + +var kReverseBits = [1 << reverseBitsMax]byte{ + 0x00, + 0x80, + 0x40, + 0xC0, + 0x20, + 0xA0, + 0x60, + 0xE0, + 0x10, + 0x90, + 0x50, + 0xD0, + 0x30, + 0xB0, + 0x70, + 0xF0, + 0x08, + 0x88, + 0x48, + 0xC8, + 0x28, + 0xA8, + 0x68, + 0xE8, + 0x18, + 0x98, + 0x58, + 0xD8, + 0x38, + 0xB8, + 0x78, + 0xF8, + 0x04, + 0x84, + 0x44, + 0xC4, + 0x24, + 0xA4, + 0x64, + 0xE4, + 0x14, + 0x94, + 0x54, + 0xD4, + 0x34, + 0xB4, + 0x74, + 0xF4, + 0x0C, + 0x8C, + 0x4C, + 0xCC, + 0x2C, + 0xAC, + 0x6C, + 0xEC, + 0x1C, + 0x9C, + 0x5C, + 0xDC, + 0x3C, + 0xBC, + 0x7C, + 0xFC, + 0x02, + 0x82, + 0x42, + 0xC2, + 0x22, + 0xA2, + 0x62, + 0xE2, + 0x12, + 0x92, + 0x52, + 0xD2, + 0x32, + 0xB2, + 0x72, + 0xF2, + 0x0A, + 0x8A, + 0x4A, + 0xCA, + 0x2A, + 0xAA, + 0x6A, + 0xEA, + 0x1A, + 0x9A, + 0x5A, + 0xDA, + 0x3A, + 0xBA, + 0x7A, + 0xFA, + 0x06, + 0x86, + 0x46, + 0xC6, + 0x26, + 0xA6, + 0x66, + 0xE6, + 0x16, + 0x96, + 0x56, + 0xD6, + 0x36, + 0xB6, + 0x76, + 0xF6, + 0x0E, + 0x8E, + 0x4E, + 0xCE, + 0x2E, + 0xAE, + 0x6E, + 0xEE, + 0x1E, + 0x9E, + 0x5E, + 0xDE, + 0x3E, + 0xBE, + 0x7E, + 0xFE, + 0x01, + 0x81, + 0x41, + 0xC1, + 0x21, + 0xA1, + 0x61, + 0xE1, + 0x11, + 0x91, + 0x51, + 0xD1, + 0x31, + 0xB1, + 0x71, + 0xF1, + 0x09, + 0x89, + 0x49, + 0xC9, + 0x29, + 0xA9, + 0x69, + 0xE9, + 0x19, + 0x99, + 0x59, + 0xD9, + 0x39, + 0xB9, + 0x79, + 0xF9, + 0x05, + 0x85, + 0x45, + 0xC5, + 0x25, + 0xA5, + 0x65, + 0xE5, + 0x15, + 0x95, + 0x55, + 0xD5, + 0x35, + 0xB5, + 0x75, + 0xF5, + 0x0D, + 0x8D, + 0x4D, + 0xCD, + 0x2D, + 0xAD, + 0x6D, + 0xED, + 0x1D, + 0x9D, + 0x5D, + 0xDD, + 0x3D, + 0xBD, + 0x7D, + 0xFD, + 0x03, + 0x83, + 0x43, + 0xC3, + 0x23, + 0xA3, + 0x63, + 0xE3, + 0x13, + 0x93, + 0x53, + 0xD3, + 0x33, + 0xB3, + 0x73, + 0xF3, + 0x0B, + 0x8B, + 0x4B, + 0xCB, + 0x2B, + 0xAB, + 0x6B, + 0xEB, + 0x1B, + 0x9B, + 0x5B, + 0xDB, + 0x3B, + 0xBB, + 0x7B, + 0xFB, + 0x07, + 0x87, + 0x47, + 0xC7, + 0x27, + 0xA7, + 0x67, + 0xE7, + 0x17, + 0x97, + 0x57, + 0xD7, + 0x37, + 0xB7, + 0x77, + 0xF7, + 0x0F, + 0x8F, + 0x4F, + 0xCF, + 0x2F, + 0xAF, + 0x6F, + 0xEF, + 0x1F, + 0x9F, + 0x5F, + 0xDF, + 0x3F, + 0xBF, + 0x7F, + 0xFF, +} + +const reverseBitsLowest = (uint64(1) << (reverseBitsMax - 1 + reverseBitsBase)) + +/* Returns reverse(num >> BROTLI_REVERSE_BITS_BASE, BROTLI_REVERSE_BITS_MAX), + where reverse(value, len) is the bit-wise reversal of the len least + significant bits of value. */ +func reverseBits8(num uint64) uint64 { + return uint64(kReverseBits[num]) +} + +/* Stores code in table[0], table[step], table[2*step], ..., table[end] */ +/* Assumes that end is an integer multiple of step */ +func replicateValue(table []huffmanCode, step int, end int, code huffmanCode) { + for { + end -= step + table[end] = code + if end <= 0 { + break + } + } +} + +/* Returns the table width of the next 2nd level table. |count| is the histogram + of bit lengths for the remaining symbols, |len| is the code length of the + next processed symbol. */ +func nextTableBitSize(count []uint16, len int, root_bits int) int { + var left int = 1 << uint(len-root_bits) + for len < huffmanMaxCodeLength { + left -= int(count[len]) + if left <= 0 { + break + } + len++ + left <<= 1 + } + + return len - root_bits +} + +func buildCodeLengthsHuffmanTable(table []huffmanCode, code_lengths []byte, count []uint16) { + var code huffmanCode /* current table entry */ /* symbol index in original or sorted table */ /* prefix code */ /* prefix code addend */ /* step size to replicate values in current table */ /* size of current table */ /* symbols sorted by code length */ + var symbol int + var key uint64 + var key_step uint64 + var step int + var table_size int + var sorted [codeLengthCodes]int + var offset [huffmanMaxCodeLengthCodeLength + 1]int + var bits int + var bits_count int + /* offsets in sorted table for each length */ + assert(huffmanMaxCodeLengthCodeLength <= reverseBitsMax) + + /* Generate offsets into sorted symbol table by code length. */ + symbol = -1 + + bits = 1 + var i int + for i = 0; i < huffmanMaxCodeLengthCodeLength; i++ { + symbol += int(count[bits]) + offset[bits] = symbol + bits++ + } + + /* Symbols with code length 0 are placed after all other symbols. */ + offset[0] = codeLengthCodes - 1 + + /* Sort symbols by length, by symbol order within each length. */ + symbol = codeLengthCodes + + for { + var i int + for i = 0; i < 6; i++ { + symbol-- + sorted[offset[code_lengths[symbol]]] = symbol + offset[code_lengths[symbol]]-- + } + if symbol == 0 { + break + } + } + + table_size = 1 << huffmanMaxCodeLengthCodeLength + + /* Special case: all symbols but one have 0 code length. */ + if offset[0] == 0 { + code = constructHuffmanCode(0, uint16(sorted[0])) + for key = 0; key < uint64(table_size); key++ { + table[key] = code + } + + return + } + + /* Fill in table. */ + key = 0 + + key_step = reverseBitsLowest + symbol = 0 + bits = 1 + step = 2 + for { + for bits_count = int(count[bits]); bits_count != 0; bits_count-- { + code = constructHuffmanCode(byte(bits), uint16(sorted[symbol])) + symbol++ + replicateValue(table[reverseBits8(key):], step, table_size, code) + key += key_step + } + + step <<= 1 + key_step >>= 1 + bits++ + if bits > huffmanMaxCodeLengthCodeLength { + break + } + } +} + +func buildHuffmanTable(root_table []huffmanCode, root_bits int, symbol_lists symbolList, count []uint16) uint32 { + var code huffmanCode /* current table entry */ /* next available space in table */ /* current code length */ /* symbol index in original or sorted table */ /* prefix code */ /* prefix code addend */ /* 2nd level table prefix code */ /* 2nd level table prefix code addend */ /* step size to replicate values in current table */ /* key length of current table */ /* size of current table */ /* sum of root table size and 2nd level table sizes */ + var table []huffmanCode + var len int + var symbol int + var key uint64 + var key_step uint64 + var sub_key uint64 + var sub_key_step uint64 + var step int + var table_bits int + var table_size int + var total_size int + var max_length int = -1 + var bits int + var bits_count int + + assert(root_bits <= reverseBitsMax) + assert(huffmanMaxCodeLength-root_bits <= reverseBitsMax) + + for symbolListGet(symbol_lists, max_length) == 0xFFFF { + max_length-- + } + max_length += huffmanMaxCodeLength + 1 + + table = root_table + table_bits = root_bits + table_size = 1 << uint(table_bits) + total_size = table_size + + /* Fill in the root table. Reduce the table size to if possible, + and create the repetitions by memcpy. */ + if table_bits > max_length { + table_bits = max_length + table_size = 1 << uint(table_bits) + } + + key = 0 + key_step = reverseBitsLowest + bits = 1 + step = 2 + for { + symbol = bits - (huffmanMaxCodeLength + 1) + for bits_count = int(count[bits]); bits_count != 0; bits_count-- { + symbol = int(symbolListGet(symbol_lists, symbol)) + code = constructHuffmanCode(byte(bits), uint16(symbol)) + replicateValue(table[reverseBits8(key):], step, table_size, code) + key += key_step + } + + step <<= 1 + key_step >>= 1 + bits++ + if bits > table_bits { + break + } + } + + /* If root_bits != table_bits then replicate to fill the remaining slots. */ + for total_size != table_size { + copy(table[table_size:], table[:uint(table_size)]) + table_size <<= 1 + } + + /* Fill in 2nd level tables and add pointers to root table. */ + key_step = reverseBitsLowest >> uint(root_bits-1) + + sub_key = reverseBitsLowest << 1 + sub_key_step = reverseBitsLowest + len = root_bits + 1 + step = 2 + for ; len <= max_length; len++ { + symbol = len - (huffmanMaxCodeLength + 1) + for ; count[len] != 0; count[len]-- { + if sub_key == reverseBitsLowest<<1 { + table = table[table_size:] + table_bits = nextTableBitSize(count, int(len), root_bits) + table_size = 1 << uint(table_bits) + total_size += table_size + sub_key = reverseBits8(key) + key += key_step + root_table[sub_key] = constructHuffmanCode(byte(table_bits+root_bits), uint16(uint64(uint(-cap(table)+cap(root_table)))-sub_key)) + sub_key = 0 + } + + symbol = int(symbolListGet(symbol_lists, symbol)) + code = constructHuffmanCode(byte(len-root_bits), uint16(symbol)) + replicateValue(table[reverseBits8(sub_key):], step, table_size, code) + sub_key += sub_key_step + } + + step <<= 1 + sub_key_step >>= 1 + } + + return uint32(total_size) +} + +func buildSimpleHuffmanTable(table []huffmanCode, root_bits int, val []uint16, num_symbols uint32) uint32 { + var table_size uint32 = 1 + var goal_size uint32 = 1 << uint(root_bits) + switch num_symbols { + case 0: + table[0] = constructHuffmanCode(0, val[0]) + + case 1: + if val[1] > val[0] { + table[0] = constructHuffmanCode(1, val[0]) + table[1] = constructHuffmanCode(1, val[1]) + } else { + table[0] = constructHuffmanCode(1, val[1]) + table[1] = constructHuffmanCode(1, val[0]) + } + + table_size = 2 + + case 2: + table[0] = constructHuffmanCode(1, val[0]) + table[2] = constructHuffmanCode(1, val[0]) + if val[2] > val[1] { + table[1] = constructHuffmanCode(2, val[1]) + table[3] = constructHuffmanCode(2, val[2]) + } else { + table[1] = constructHuffmanCode(2, val[2]) + table[3] = constructHuffmanCode(2, val[1]) + } + + table_size = 4 + + case 3: + var i int + var k int + for i = 0; i < 3; i++ { + for k = i + 1; k < 4; k++ { + if val[k] < val[i] { + var t uint16 = val[k] + val[k] = val[i] + val[i] = t + } + } + } + + table[0] = constructHuffmanCode(2, val[0]) + table[2] = constructHuffmanCode(2, val[1]) + table[1] = constructHuffmanCode(2, val[2]) + table[3] = constructHuffmanCode(2, val[3]) + table_size = 4 + + case 4: + if val[3] < val[2] { + var t uint16 = val[3] + val[3] = val[2] + val[2] = t + } + + table[0] = constructHuffmanCode(1, val[0]) + table[1] = constructHuffmanCode(2, val[1]) + table[2] = constructHuffmanCode(1, val[0]) + table[3] = constructHuffmanCode(3, val[2]) + table[4] = constructHuffmanCode(1, val[0]) + table[5] = constructHuffmanCode(2, val[1]) + table[6] = constructHuffmanCode(1, val[0]) + table[7] = constructHuffmanCode(3, val[3]) + table_size = 8 + } + + for table_size != goal_size { + copy(table[table_size:], table[:uint(table_size)]) + table_size <<= 1 + } + + return goal_size +} diff --git a/vendor/github.com/andybalholm/brotli/literal_cost.go b/vendor/github.com/andybalholm/brotli/literal_cost.go new file mode 100644 index 0000000..5a9ace9 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/literal_cost.go @@ -0,0 +1,182 @@ +package brotli + +func utf8Position(last uint, c uint, clamp uint) uint { + if c < 128 { + return 0 /* Next one is the 'Byte 1' again. */ + } else if c >= 192 { /* Next one is the 'Byte 2' of utf-8 encoding. */ + return brotli_min_size_t(1, clamp) + } else { + /* Let's decide over the last byte if this ends the sequence. */ + if last < 0xE0 { + return 0 /* Completed two or three byte coding. */ /* Next one is the 'Byte 3' of utf-8 encoding. */ + } else { + return brotli_min_size_t(2, clamp) + } + } +} + +func decideMultiByteStatsLevel(pos uint, len uint, mask uint, data []byte) uint { + var counts = [3]uint{0} /* should be 2, but 1 compresses better. */ + var max_utf8 uint = 1 + var last_c uint = 0 + var i uint + for i = 0; i < len; i++ { + var c uint = uint(data[(pos+i)&mask]) + counts[utf8Position(last_c, c, 2)]++ + last_c = c + } + + if counts[2] < 500 { + max_utf8 = 1 + } + + if counts[1]+counts[2] < 25 { + max_utf8 = 0 + } + + return max_utf8 +} + +func estimateBitCostsForLiteralsUTF8(pos uint, len uint, mask uint, data []byte, cost []float32) { + var max_utf8 uint = decideMultiByteStatsLevel(pos, uint(len), mask, data) + /* Bootstrap histograms. */ + var histogram = [3][256]uint{[256]uint{0}} + var window_half uint = 495 + var in_window uint = brotli_min_size_t(window_half, uint(len)) + var in_window_utf8 = [3]uint{0} + /* max_utf8 is 0 (normal ASCII single byte modeling), + 1 (for 2-byte UTF-8 modeling), or 2 (for 3-byte UTF-8 modeling). */ + + var i uint + { + var last_c uint = 0 + var utf8_pos uint = 0 + for i = 0; i < in_window; i++ { + var c uint = uint(data[(pos+i)&mask]) + histogram[utf8_pos][c]++ + in_window_utf8[utf8_pos]++ + utf8_pos = utf8Position(last_c, c, max_utf8) + last_c = c + } + } + + /* Compute bit costs with sliding window. */ + for i = 0; i < len; i++ { + if i >= window_half { + var c uint + var last_c uint + if i < window_half+1 { + c = 0 + } else { + c = uint(data[(pos+i-window_half-1)&mask]) + } + if i < window_half+2 { + last_c = 0 + } else { + last_c = uint(data[(pos+i-window_half-2)&mask]) + } + /* Remove a byte in the past. */ + + var utf8_pos2 uint = utf8Position(last_c, c, max_utf8) + histogram[utf8_pos2][data[(pos+i-window_half)&mask]]-- + in_window_utf8[utf8_pos2]-- + } + + if i+window_half < len { + var c uint = uint(data[(pos+i+window_half-1)&mask]) + var last_c uint = uint(data[(pos+i+window_half-2)&mask]) + /* Add a byte in the future. */ + + var utf8_pos2 uint = utf8Position(last_c, c, max_utf8) + histogram[utf8_pos2][data[(pos+i+window_half)&mask]]++ + in_window_utf8[utf8_pos2]++ + } + { + var c uint + var last_c uint + if i < 1 { + c = 0 + } else { + c = uint(data[(pos+i-1)&mask]) + } + if i < 2 { + last_c = 0 + } else { + last_c = uint(data[(pos+i-2)&mask]) + } + var utf8_pos uint = utf8Position(last_c, c, max_utf8) + var masked_pos uint = (pos + i) & mask + var histo uint = histogram[utf8_pos][data[masked_pos]] + var lit_cost float64 + if histo == 0 { + histo = 1 + } + + lit_cost = fastLog2(in_window_utf8[utf8_pos]) - fastLog2(histo) + lit_cost += 0.02905 + if lit_cost < 1.0 { + lit_cost *= 0.5 + lit_cost += 0.5 + } + + /* Make the first bytes more expensive -- seems to help, not sure why. + Perhaps because the entropy source is changing its properties + rapidly in the beginning of the file, perhaps because the beginning + of the data is a statistical "anomaly". */ + if i < 2000 { + lit_cost += 0.7 - (float64(2000-i) / 2000.0 * 0.35) + } + + cost[i] = float32(lit_cost) + } + } +} + +func estimateBitCostsForLiterals(pos uint, len uint, mask uint, data []byte, cost []float32) { + if isMostlyUTF8(data, pos, mask, uint(len), kMinUTF8Ratio) { + estimateBitCostsForLiteralsUTF8(pos, uint(len), mask, data, cost) + return + } else { + var histogram = [256]uint{0} + var window_half uint = 2000 + var in_window uint = brotli_min_size_t(window_half, uint(len)) + var i uint + /* Bootstrap histogram. */ + for i = 0; i < in_window; i++ { + histogram[data[(pos+i)&mask]]++ + } + + /* Compute bit costs with sliding window. */ + for i = 0; i < len; i++ { + var histo uint + if i >= window_half { + /* Remove a byte in the past. */ + histogram[data[(pos+i-window_half)&mask]]-- + + in_window-- + } + + if i+window_half < len { + /* Add a byte in the future. */ + histogram[data[(pos+i+window_half)&mask]]++ + + in_window++ + } + + histo = histogram[data[(pos+i)&mask]] + if histo == 0 { + histo = 1 + } + { + var lit_cost float64 = fastLog2(in_window) - fastLog2(histo) + lit_cost += 0.029 + if lit_cost < 1.0 { + lit_cost *= 0.5 + lit_cost += 0.5 + } + + cost[i] = float32(lit_cost) + } + } + } +} diff --git a/vendor/github.com/andybalholm/brotli/matchfinder/emitter.go b/vendor/github.com/andybalholm/brotli/matchfinder/emitter.go new file mode 100644 index 0000000..37ed8e1 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/matchfinder/emitter.go @@ -0,0 +1,45 @@ +package matchfinder + +// An absoluteMatch is like a Match, but it stores indexes into the byte +// stream instead of lengths. +type absoluteMatch struct { + // Start is the index of the first byte. + Start int + + // End is the index of the byte after the last byte + // (so that End - Start = Length). + End int + + // Match is the index of the previous data that matches + // (Start - Match = Distance). + Match int +} + +// A matchEmitter manages the output of matches for a MatchFinder. +type matchEmitter struct { + // Dst is the destination slice that Matches are added to. + Dst []Match + + // NextEmit is the index of the next byte to emit. + NextEmit int +} + +func (e *matchEmitter) emit(m absoluteMatch) { + e.Dst = append(e.Dst, Match{ + Unmatched: m.Start - e.NextEmit, + Length: m.End - m.Start, + Distance: m.Start - m.Match, + }) + e.NextEmit = m.End +} + +// trim shortens m if it extends past maxEnd. Then if the length is at least +// minLength, the match is emitted. +func (e *matchEmitter) trim(m absoluteMatch, maxEnd int, minLength int) { + if m.End > maxEnd { + m.End = maxEnd + } + if m.End-m.Start >= minLength { + e.emit(m) + } +} diff --git a/vendor/github.com/andybalholm/brotli/matchfinder/m0.go b/vendor/github.com/andybalholm/brotli/matchfinder/m0.go new file mode 100644 index 0000000..773b7c4 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/matchfinder/m0.go @@ -0,0 +1,169 @@ +package matchfinder + +import ( + "encoding/binary" +) + +// M0 is an implementation of the MatchFinder interface based +// on the algorithm used by snappy, but modified to be more like the algorithm +// used by compression level 0 of the brotli reference implementation. +// +// It has a maximum block size of 65536 bytes. +type M0 struct { + // Lazy turns on "lazy matching," for higher compression but less speed. + Lazy bool + + MaxDistance int + MaxLength int +} + +func (M0) Reset() {} + +const ( + m0HashLen = 5 + + m0TableBits = 14 + m0TableSize = 1 << m0TableBits + m0Shift = 32 - m0TableBits + // m0TableMask is redundant, but helps the compiler eliminate bounds + // checks. + m0TableMask = m0TableSize - 1 +) + +func (m M0) hash(data uint64) uint64 { + hash := (data << (64 - 8*m0HashLen)) * hashMul64 + return hash >> (64 - m0TableBits) +} + +// FindMatches looks for matches in src, appends them to dst, and returns dst. +// src must not be longer than 65536 bytes. +func (m M0) FindMatches(dst []Match, src []byte) []Match { + const inputMargin = 16 - 1 + const minNonLiteralBlockSize = 1 + 1 + inputMargin + + if len(src) < minNonLiteralBlockSize { + dst = append(dst, Match{ + Unmatched: len(src), + }) + return dst + } + if len(src) > 65536 { + panic("block too long") + } + + var table [m0TableSize]uint16 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + nextHash := m.hash(binary.LittleEndian.Uint64(src[s:])) + + for { + // Copied from the C++ snappy implementation: + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match + // is found, immediately go back to looking at every byte. This is a + // small loss (~5% performance, ~0.1% density) for compressible data + // due to more bookkeeping, but for non-compressible data (such as + // JPEG) it's a huge win since the compressor quickly "realizes" the + // data is incompressible and doesn't bother looking for matches + // everywhere. + // + // The "skip" variable keeps track of how many bytes there are since + // the last match; dividing it by 32 (ie. right-shifting by five) gives + // the number of bytes to move ahead for each iteration. + skip := 32 + + nextS := s + candidate := 0 + for { + s = nextS + bytesBetweenHashLookups := skip >> 5 + nextS = s + bytesBetweenHashLookups + skip += bytesBetweenHashLookups + if nextS > sLimit { + goto emitRemainder + } + candidate = int(table[nextHash&m0TableMask]) + table[nextHash&m0TableMask] = uint16(s) + nextHash = m.hash(binary.LittleEndian.Uint64(src[nextS:])) + if m.MaxDistance != 0 && s-candidate > m.MaxDistance { + continue + } + if binary.LittleEndian.Uint32(src[s:]) == binary.LittleEndian.Uint32(src[candidate:]) { + break + } + } + + // Invariant: we have a 4-byte match at s. + base := s + s = extendMatch(src, candidate+4, s+4) + + origBase := base + if m.Lazy && base+1 < sLimit { + newBase := base + 1 + h := m.hash(binary.LittleEndian.Uint64(src[newBase:])) + newCandidate := int(table[h&m0TableMask]) + table[h&m0TableMask] = uint16(newBase) + okDistance := true + if m.MaxDistance != 0 && newBase-newCandidate > m.MaxDistance { + okDistance = false + } + if okDistance && binary.LittleEndian.Uint32(src[newBase:]) == binary.LittleEndian.Uint32(src[newCandidate:]) { + newS := extendMatch(src, newCandidate+4, newBase+4) + if newS-newBase > s-base+1 { + s = newS + base = newBase + candidate = newCandidate + } + } + } + + if m.MaxLength != 0 && s-base > m.MaxLength { + s = base + m.MaxLength + } + dst = append(dst, Match{ + Unmatched: base - nextEmit, + Length: s - base, + Distance: base - candidate, + }) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if m.Lazy { + // If lazy matching is enabled, we update the hash table for + // every byte in the match. + for i := origBase + 2; i < s-1; i++ { + x := binary.LittleEndian.Uint64(src[i:]) + table[m.hash(x)&m0TableMask] = uint16(i) + } + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. + x := binary.LittleEndian.Uint64(src[s-1:]) + prevHash := m.hash(x >> 0) + table[prevHash&m0TableMask] = uint16(s - 1) + nextHash = m.hash(x >> 8) + } + +emitRemainder: + if nextEmit < len(src) { + dst = append(dst, Match{ + Unmatched: len(src) - nextEmit, + }) + } + return dst +} diff --git a/vendor/github.com/andybalholm/brotli/matchfinder/m4.go b/vendor/github.com/andybalholm/brotli/matchfinder/m4.go new file mode 100644 index 0000000..5b2acba --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/matchfinder/m4.go @@ -0,0 +1,297 @@ +package matchfinder + +import ( + "encoding/binary" + "math/bits" + "runtime" +) + +// M4 is an implementation of the MatchFinder +// interface that uses a hash table to find matches, +// optional match chains, +// and the advanced parsing technique from +// https://fastcompression.blogspot.com/2011/12/advanced-parsing-strategies.html. +type M4 struct { + // MaxDistance is the maximum distance (in bytes) to look back for + // a match. The default is 65535. + MaxDistance int + + // MinLength is the length of the shortest match to return. + // The default is 4. + MinLength int + + // HashLen is the number of bytes to use to calculate the hashes. + // The maximum is 8 and the default is 6. + HashLen int + + // TableBits is the number of bits in the hash table indexes. + // The default is 17 (128K entries). + TableBits int + + // ChainLength is how many entries to search on the "match chain" of older + // locations with the same hash as the current location. + ChainLength int + + // DistanceBitCost is used when comparing two matches to see + // which is better. The comparison is primarily based on the length + // of the matches, but it can also take the distance into account, + // in terms of the number of bits needed to represent the distance. + // One byte of length is given a score of 256, so 32 (256/8) would + // be a reasonable first guess for the value of one bit. + // (The default is 0, which bases the comparison solely on length.) + DistanceBitCost int + + table []uint32 + chain []uint16 + + history []byte +} + +func (q *M4) Reset() { + for i := range q.table { + q.table[i] = 0 + } + q.history = q.history[:0] + q.chain = q.chain[:0] +} + +func (q *M4) score(m absoluteMatch) int { + return (m.End-m.Start)*256 + bits.LeadingZeros32(uint32(m.Start-m.Match))*q.DistanceBitCost +} + +func (q *M4) FindMatches(dst []Match, src []byte) []Match { + if q.MaxDistance == 0 { + q.MaxDistance = 65535 + } + if q.MinLength == 0 { + q.MinLength = 4 + } + if q.HashLen == 0 { + q.HashLen = 6 + } + if q.TableBits == 0 { + q.TableBits = 17 + } + if len(q.table) < 1< q.MaxDistance*2 { + // Trim down the history buffer. + delta := len(q.history) - q.MaxDistance + copy(q.history, q.history[delta:]) + q.history = q.history[:q.MaxDistance] + if q.ChainLength > 0 { + q.chain = q.chain[:q.MaxDistance] + } + + for i, v := range q.table { + newV := int(v) - delta + if newV < 0 { + newV = 0 + } + q.table[i] = uint32(newV) + } + } + + // Append src to the history buffer. + e.NextEmit = len(q.history) + q.history = append(q.history, src...) + if q.ChainLength > 0 { + q.chain = append(q.chain, make([]uint16, len(src))...) + } + src = q.history + + // matches stores the matches that have been found but not emitted, + // in reverse order. (matches[0] is the most recent one.) + var matches [3]absoluteMatch + for i := e.NextEmit; i < len(src)-7; i++ { + if matches[0] != (absoluteMatch{}) && i >= matches[0].End { + // We have found some matches, and we're far enough along that we probably + // won't find overlapping matches, so we might as well emit them. + if matches[1] != (absoluteMatch{}) { + e.trim(matches[1], matches[0].Start, q.MinLength) + } + e.emit(matches[0]) + matches = [3]absoluteMatch{} + } + + // Calculate and store the hash. + h := ((binary.LittleEndian.Uint64(src[i:]) & (1<<(8*q.HashLen) - 1)) * hashMul64) >> (64 - q.TableBits) + candidate := int(q.table[h]) + q.table[h] = uint32(i) + if q.ChainLength > 0 && candidate != 0 { + delta := i - candidate + if delta < 1<<16 { + q.chain[i] = uint16(delta) + } + } + + if i < matches[0].End && i != matches[0].End+2-q.HashLen { + continue + } + if candidate == 0 || i-candidate > q.MaxDistance { + continue + } + + // Look for a match. + var currentMatch absoluteMatch + + if i-candidate != matches[0].Start-matches[0].Match { + if binary.LittleEndian.Uint32(src[candidate:]) == binary.LittleEndian.Uint32(src[i:]) { + m := extendMatch2(src, i, candidate, e.NextEmit) + if m.End-m.Start > q.MinLength { + currentMatch = m + } + } + } + + for j := 0; j < q.ChainLength; j++ { + delta := q.chain[candidate] + if delta == 0 { + break + } + candidate -= int(delta) + if candidate <= 0 || i-candidate > q.MaxDistance { + break + } + if i-candidate != matches[0].Start-matches[0].Match { + if binary.LittleEndian.Uint32(src[candidate:]) == binary.LittleEndian.Uint32(src[i:]) { + m := extendMatch2(src, i, candidate, e.NextEmit) + if m.End-m.Start > q.MinLength && q.score(m) > q.score(currentMatch) { + currentMatch = m + } + } + } + } + + if currentMatch.End-currentMatch.Start < q.MinLength { + continue + } + + overlapPenalty := 0 + if matches[0] != (absoluteMatch{}) { + overlapPenalty = 275 + if currentMatch.Start <= matches[1].End { + // This match would completely replace the previous match, + // so there is no penalty for overlap. + overlapPenalty = 0 + } + } + + if q.score(currentMatch) <= q.score(matches[0])+overlapPenalty { + continue + } + + matches = [3]absoluteMatch{ + currentMatch, + matches[0], + matches[1], + } + + if matches[2] == (absoluteMatch{}) { + continue + } + + // We have three matches, so it's time to emit one and/or eliminate one. + switch { + case matches[0].Start < matches[2].End: + // The first and third matches overlap; discard the one in between. + matches = [3]absoluteMatch{ + matches[0], + matches[2], + absoluteMatch{}, + } + + case matches[0].Start < matches[2].End+q.MinLength: + // The first and third matches don't overlap, but there's no room for + // another match between them. Emit the first match and discard the second. + e.emit(matches[2]) + matches = [3]absoluteMatch{ + matches[0], + absoluteMatch{}, + absoluteMatch{}, + } + + default: + // Emit the first match, shortening it if necessary to avoid overlap with the second. + e.trim(matches[2], matches[1].Start, q.MinLength) + matches[2] = absoluteMatch{} + } + } + + // We've found all the matches now; emit the remaining ones. + if matches[1] != (absoluteMatch{}) { + e.trim(matches[1], matches[0].Start, q.MinLength) + } + if matches[0] != (absoluteMatch{}) { + e.emit(matches[0]) + } + + dst = e.Dst + if e.NextEmit < len(src) { + dst = append(dst, Match{ + Unmatched: len(src) - e.NextEmit, + }) + } + + return dst +} + +const hashMul64 = 0x1E35A7BD1E35A7BD + +// extendMatch returns the largest k such that k <= len(src) and that +// src[i:i+k-j] and src[j:k] have the same contents. +// +// It assumes that: +// +// 0 <= i && i < j && j <= len(src) +func extendMatch(src []byte, i, j int) int { + switch runtime.GOARCH { + case "amd64": + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + for j+8 < len(src) { + iBytes := binary.LittleEndian.Uint64(src[i:]) + jBytes := binary.LittleEndian.Uint64(src[j:]) + if iBytes != jBytes { + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. The BSF instruction finds the + // least significant 1 bit, the amd64 architecture is little-endian, and + // the shift by 3 converts a bit index to a byte index. + return j + bits.TrailingZeros64(iBytes^jBytes)>>3 + } + i, j = i+8, j+8 + } + case "386": + // On a 32-bit CPU, we do it 4 bytes at a time. + for j+4 < len(src) { + iBytes := binary.LittleEndian.Uint32(src[i:]) + jBytes := binary.LittleEndian.Uint32(src[j:]) + if iBytes != jBytes { + return j + bits.TrailingZeros32(iBytes^jBytes)>>3 + } + i, j = i+4, j+4 + } + } + for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { + } + return j +} + +// Given a 4-byte match at src[start] and src[candidate], extendMatch2 extends it +// upward as far as possible, and downward no farther than to min. +func extendMatch2(src []byte, start, candidate, min int) absoluteMatch { + end := extendMatch(src, candidate+4, start+4) + for start > min && candidate > 0 && src[start-1] == src[candidate-1] { + start-- + candidate-- + } + return absoluteMatch{ + Start: start, + End: end, + Match: candidate, + } +} diff --git a/vendor/github.com/andybalholm/brotli/matchfinder/matchfinder.go b/vendor/github.com/andybalholm/brotli/matchfinder/matchfinder.go new file mode 100644 index 0000000..f6bcfdb --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/matchfinder/matchfinder.go @@ -0,0 +1,103 @@ +// The matchfinder package defines reusable components for data compression. +// +// Many compression libraries have two main parts: +// - Something that looks for repeated sequences of bytes +// - An encoder for the compressed data format (often an entropy coder) +// +// Although these are logically two separate steps, the implementations are +// usually closely tied together. You can't use flate's matcher with snappy's +// encoder, for example. This package defines interfaces and an intermediate +// representation to allow mixing and matching compression components. +package matchfinder + +import "io" + +// A Match is the basic unit of LZ77 compression. +type Match struct { + Unmatched int // the number of unmatched bytes since the previous match + Length int // the number of bytes in the matched string; it may be 0 at the end of the input + Distance int // how far back in the stream to copy from +} + +// A MatchFinder performs the LZ77 stage of compression, looking for matches. +type MatchFinder interface { + // FindMatches looks for matches in src, appends them to dst, and returns dst. + FindMatches(dst []Match, src []byte) []Match + + // Reset clears any internal state, preparing the MatchFinder to be used with + // a new stream. + Reset() +} + +// An Encoder encodes the data in its final format. +type Encoder interface { + // Encode appends the encoded format of src to dst, using the match + // information from matches. + Encode(dst []byte, src []byte, matches []Match, lastBlock bool) []byte + + // Reset clears any internal state, preparing the Encoder to be used with + // a new stream. + Reset() +} + +// A Writer uses MatchFinder and Encoder to write compressed data to Dest. +type Writer struct { + Dest io.Writer + MatchFinder MatchFinder + Encoder Encoder + + // BlockSize is the number of bytes to compress at a time. If it is zero, + // each Write operation will be treated as one block. + BlockSize int + + err error + inBuf []byte + outBuf []byte + matches []Match +} + +func (w *Writer) Write(p []byte) (n int, err error) { + if w.err != nil { + return 0, w.err + } + + if w.BlockSize == 0 { + return w.writeBlock(p, false) + } + + w.inBuf = append(w.inBuf, p...) + var pos int + for pos = 0; pos+w.BlockSize <= len(w.inBuf) && w.err == nil; pos += w.BlockSize { + w.writeBlock(w.inBuf[pos:pos+w.BlockSize], false) + } + if pos > 0 { + n := copy(w.inBuf, w.inBuf[pos:]) + w.inBuf = w.inBuf[:n] + } + + return len(p), w.err +} + +func (w *Writer) writeBlock(p []byte, lastBlock bool) (n int, err error) { + w.outBuf = w.outBuf[:0] + w.matches = w.MatchFinder.FindMatches(w.matches[:0], p) + w.outBuf = w.Encoder.Encode(w.outBuf, p, w.matches, lastBlock) + _, w.err = w.Dest.Write(w.outBuf) + return len(p), w.err +} + +func (w *Writer) Close() error { + w.writeBlock(w.inBuf, true) + w.inBuf = w.inBuf[:0] + return w.err +} + +func (w *Writer) Reset(newDest io.Writer) { + w.MatchFinder.Reset() + w.Encoder.Reset() + w.err = nil + w.inBuf = w.inBuf[:0] + w.outBuf = w.outBuf[:0] + w.matches = w.matches[:0] + w.Dest = newDest +} diff --git a/vendor/github.com/andybalholm/brotli/matchfinder/textencoder.go b/vendor/github.com/andybalholm/brotli/matchfinder/textencoder.go new file mode 100644 index 0000000..75ecc59 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/matchfinder/textencoder.go @@ -0,0 +1,53 @@ +package matchfinder + +import "fmt" + +// A TextEncoder is an Encoder that produces a human-readable representation of +// the LZ77 compression. Matches are replaced with symbols. +type TextEncoder struct{} + +func (t TextEncoder) Reset() {} + +func (t TextEncoder) Encode(dst []byte, src []byte, matches []Match, lastBlock bool) []byte { + pos := 0 + for _, m := range matches { + if m.Unmatched > 0 { + dst = append(dst, src[pos:pos+m.Unmatched]...) + pos += m.Unmatched + } + if m.Length > 0 { + dst = append(dst, []byte(fmt.Sprintf("<%d,%d>", m.Length, m.Distance))...) + pos += m.Length + } + } + if pos < len(src) { + dst = append(dst, src[pos:]...) + } + return dst +} + +// A NoMatchFinder implements MatchFinder, but doesn't find any matches. +// It can be used to implement the equivalent of the standard library flate package's +// HuffmanOnly setting. +type NoMatchFinder struct{} + +func (n NoMatchFinder) Reset() {} + +func (n NoMatchFinder) FindMatches(dst []Match, src []byte) []Match { + return append(dst, Match{ + Unmatched: len(src), + }) +} + +// AutoReset wraps a MatchFinder that can return references to data in previous +// blocks, and calls Reset before each block. It is useful for (e.g.) using a +// snappy Encoder with a MatchFinder designed for flate. (Snappy doesn't +// support references between blocks.) +type AutoReset struct { + MatchFinder +} + +func (a AutoReset) FindMatches(dst []Match, src []byte) []Match { + a.Reset() + return a.MatchFinder.FindMatches(dst, src) +} diff --git a/vendor/github.com/andybalholm/brotli/memory.go b/vendor/github.com/andybalholm/brotli/memory.go new file mode 100644 index 0000000..a07c705 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/memory.go @@ -0,0 +1,66 @@ +package brotli + +/* Copyright 2016 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* +Dynamically grows array capacity to at least the requested size +T: data type +A: array +C: capacity +R: requested size +*/ +func brotli_ensure_capacity_uint8_t(a *[]byte, c *uint, r uint) { + if *c < r { + var new_size uint = *c + if new_size == 0 { + new_size = r + } + + for new_size < r { + new_size *= 2 + } + + if cap(*a) < int(new_size) { + var new_array []byte = make([]byte, new_size) + if *c != 0 { + copy(new_array, (*a)[:*c]) + } + + *a = new_array + } else { + *a = (*a)[:new_size] + } + + *c = new_size + } +} + +func brotli_ensure_capacity_uint32_t(a *[]uint32, c *uint, r uint) { + var new_array []uint32 + if *c < r { + var new_size uint = *c + if new_size == 0 { + new_size = r + } + + for new_size < r { + new_size *= 2 + } + + if cap(*a) < int(new_size) { + new_array = make([]uint32, new_size) + if *c != 0 { + copy(new_array, (*a)[:*c]) + } + + *a = new_array + } else { + *a = (*a)[:new_size] + } + *c = new_size + } +} diff --git a/vendor/github.com/andybalholm/brotli/metablock.go b/vendor/github.com/andybalholm/brotli/metablock.go new file mode 100644 index 0000000..3014df8 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/metablock.go @@ -0,0 +1,574 @@ +package brotli + +import ( + "sync" +) + +/* Copyright 2014 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Algorithms for distributing the literals and commands of a metablock between + block types and contexts. */ + +type metaBlockSplit struct { + literal_split blockSplit + command_split blockSplit + distance_split blockSplit + literal_context_map []uint32 + literal_context_map_size uint + distance_context_map []uint32 + distance_context_map_size uint + literal_histograms []histogramLiteral + literal_histograms_size uint + command_histograms []histogramCommand + command_histograms_size uint + distance_histograms []histogramDistance + distance_histograms_size uint +} + +var metaBlockPool sync.Pool + +func getMetaBlockSplit() *metaBlockSplit { + mb, _ := metaBlockPool.Get().(*metaBlockSplit) + + if mb == nil { + mb = &metaBlockSplit{} + } else { + initBlockSplit(&mb.literal_split) + initBlockSplit(&mb.command_split) + initBlockSplit(&mb.distance_split) + mb.literal_context_map = mb.literal_context_map[:0] + mb.literal_context_map_size = 0 + mb.distance_context_map = mb.distance_context_map[:0] + mb.distance_context_map_size = 0 + mb.literal_histograms = mb.literal_histograms[:0] + mb.command_histograms = mb.command_histograms[:0] + mb.distance_histograms = mb.distance_histograms[:0] + } + return mb +} + +func freeMetaBlockSplit(mb *metaBlockSplit) { + metaBlockPool.Put(mb) +} + +func initDistanceParams(params *encoderParams, npostfix uint32, ndirect uint32) { + var dist_params *distanceParams = ¶ms.dist + var alphabet_size uint32 + var max_distance uint32 + + dist_params.distance_postfix_bits = npostfix + dist_params.num_direct_distance_codes = ndirect + + alphabet_size = uint32(distanceAlphabetSize(uint(npostfix), uint(ndirect), maxDistanceBits)) + max_distance = ndirect + (1 << (maxDistanceBits + npostfix + 2)) - (1 << (npostfix + 2)) + + if params.large_window { + var bound = [maxNpostfix + 1]uint32{0, 4, 12, 28} + var postfix uint32 = 1 << npostfix + alphabet_size = uint32(distanceAlphabetSize(uint(npostfix), uint(ndirect), largeMaxDistanceBits)) + + /* The maximum distance is set so that no distance symbol used can encode + a distance larger than BROTLI_MAX_ALLOWED_DISTANCE with all + its extra bits set. */ + if ndirect < bound[npostfix] { + max_distance = maxAllowedDistance - (bound[npostfix] - ndirect) + } else if ndirect >= bound[npostfix]+postfix { + max_distance = (3 << 29) - 4 + (ndirect - bound[npostfix]) + } else { + max_distance = maxAllowedDistance + } + } + + dist_params.alphabet_size = alphabet_size + dist_params.max_distance = uint(max_distance) +} + +func recomputeDistancePrefixes(cmds []command, orig_params *distanceParams, new_params *distanceParams) { + if orig_params.distance_postfix_bits == new_params.distance_postfix_bits && orig_params.num_direct_distance_codes == new_params.num_direct_distance_codes { + return + } + + for i := range cmds { + var cmd *command = &cmds[i] + if commandCopyLen(cmd) != 0 && cmd.cmd_prefix_ >= 128 { + prefixEncodeCopyDistance(uint(commandRestoreDistanceCode(cmd, orig_params)), uint(new_params.num_direct_distance_codes), uint(new_params.distance_postfix_bits), &cmd.dist_prefix_, &cmd.dist_extra_) + } + } +} + +func computeDistanceCost(cmds []command, orig_params *distanceParams, new_params *distanceParams, cost *float64) bool { + var equal_params bool = false + var dist_prefix uint16 + var dist_extra uint32 + var extra_bits float64 = 0.0 + var histo histogramDistance + histogramClearDistance(&histo) + + if orig_params.distance_postfix_bits == new_params.distance_postfix_bits && orig_params.num_direct_distance_codes == new_params.num_direct_distance_codes { + equal_params = true + } + + for i := range cmds { + cmd := &cmds[i] + if commandCopyLen(cmd) != 0 && cmd.cmd_prefix_ >= 128 { + if equal_params { + dist_prefix = cmd.dist_prefix_ + } else { + var distance uint32 = commandRestoreDistanceCode(cmd, orig_params) + if distance > uint32(new_params.max_distance) { + return false + } + + prefixEncodeCopyDistance(uint(distance), uint(new_params.num_direct_distance_codes), uint(new_params.distance_postfix_bits), &dist_prefix, &dist_extra) + } + + histogramAddDistance(&histo, uint(dist_prefix)&0x3FF) + extra_bits += float64(dist_prefix >> 10) + } + } + + *cost = populationCostDistance(&histo) + extra_bits + return true +} + +var buildMetaBlock_kMaxNumberOfHistograms uint = 256 + +func buildMetaBlock(ringbuffer []byte, pos uint, mask uint, params *encoderParams, prev_byte byte, prev_byte2 byte, cmds []command, literal_context_mode int, mb *metaBlockSplit) { + var distance_histograms []histogramDistance + var literal_histograms []histogramLiteral + var literal_context_modes []int = nil + var literal_histograms_size uint + var distance_histograms_size uint + var i uint + var literal_context_multiplier uint = 1 + var npostfix uint32 + var ndirect_msb uint32 = 0 + var check_orig bool = true + var best_dist_cost float64 = 1e99 + var orig_params encoderParams = *params + /* Histogram ids need to fit in one byte. */ + + var new_params encoderParams = *params + + for npostfix = 0; npostfix <= maxNpostfix; npostfix++ { + for ; ndirect_msb < 16; ndirect_msb++ { + var ndirect uint32 = ndirect_msb << npostfix + var skip bool + var dist_cost float64 + initDistanceParams(&new_params, npostfix, ndirect) + if npostfix == orig_params.dist.distance_postfix_bits && ndirect == orig_params.dist.num_direct_distance_codes { + check_orig = false + } + + skip = !computeDistanceCost(cmds, &orig_params.dist, &new_params.dist, &dist_cost) + if skip || (dist_cost > best_dist_cost) { + break + } + + best_dist_cost = dist_cost + params.dist = new_params.dist + } + + if ndirect_msb > 0 { + ndirect_msb-- + } + ndirect_msb /= 2 + } + + if check_orig { + var dist_cost float64 + computeDistanceCost(cmds, &orig_params.dist, &orig_params.dist, &dist_cost) + if dist_cost < best_dist_cost { + /* NB: currently unused; uncomment when more param tuning is added. */ + /* best_dist_cost = dist_cost; */ + params.dist = orig_params.dist + } + } + + recomputeDistancePrefixes(cmds, &orig_params.dist, ¶ms.dist) + + splitBlock(cmds, ringbuffer, pos, mask, params, &mb.literal_split, &mb.command_split, &mb.distance_split) + + if !params.disable_literal_context_modeling { + literal_context_multiplier = 1 << literalContextBits + literal_context_modes = make([]int, (mb.literal_split.num_types)) + for i = 0; i < mb.literal_split.num_types; i++ { + literal_context_modes[i] = literal_context_mode + } + } + + literal_histograms_size = mb.literal_split.num_types * literal_context_multiplier + literal_histograms = make([]histogramLiteral, literal_histograms_size) + clearHistogramsLiteral(literal_histograms, literal_histograms_size) + + distance_histograms_size = mb.distance_split.num_types << distanceContextBits + distance_histograms = make([]histogramDistance, distance_histograms_size) + clearHistogramsDistance(distance_histograms, distance_histograms_size) + + mb.command_histograms_size = mb.command_split.num_types + if cap(mb.command_histograms) < int(mb.command_histograms_size) { + mb.command_histograms = make([]histogramCommand, (mb.command_histograms_size)) + } else { + mb.command_histograms = mb.command_histograms[:mb.command_histograms_size] + } + clearHistogramsCommand(mb.command_histograms, mb.command_histograms_size) + + buildHistogramsWithContext(cmds, &mb.literal_split, &mb.command_split, &mb.distance_split, ringbuffer, pos, mask, prev_byte, prev_byte2, literal_context_modes, literal_histograms, mb.command_histograms, distance_histograms) + literal_context_modes = nil + + mb.literal_context_map_size = mb.literal_split.num_types << literalContextBits + if cap(mb.literal_context_map) < int(mb.literal_context_map_size) { + mb.literal_context_map = make([]uint32, (mb.literal_context_map_size)) + } else { + mb.literal_context_map = mb.literal_context_map[:mb.literal_context_map_size] + } + + mb.literal_histograms_size = mb.literal_context_map_size + if cap(mb.literal_histograms) < int(mb.literal_histograms_size) { + mb.literal_histograms = make([]histogramLiteral, (mb.literal_histograms_size)) + } else { + mb.literal_histograms = mb.literal_histograms[:mb.literal_histograms_size] + } + + clusterHistogramsLiteral(literal_histograms, literal_histograms_size, buildMetaBlock_kMaxNumberOfHistograms, mb.literal_histograms, &mb.literal_histograms_size, mb.literal_context_map) + literal_histograms = nil + + if params.disable_literal_context_modeling { + /* Distribute assignment to all contexts. */ + for i = mb.literal_split.num_types; i != 0; { + var j uint = 0 + i-- + for ; j < 1< 0 { + var entropy [maxStaticContexts]float64 + var combined_histo []histogramLiteral = make([]histogramLiteral, (2 * num_contexts)) + var combined_entropy [2 * maxStaticContexts]float64 + var diff = [2]float64{0.0} + /* Try merging the set of histograms for the current block type with the + respective set of histograms for the last and second last block types. + Decide over the split based on the total reduction of entropy across + all contexts. */ + + var i uint + for i = 0; i < num_contexts; i++ { + var curr_histo_ix uint = self.curr_histogram_ix_ + i + var j uint + entropy[i] = bitsEntropy(histograms[curr_histo_ix].data_[:], self.alphabet_size_) + for j = 0; j < 2; j++ { + var jx uint = j*num_contexts + i + var last_histogram_ix uint = self.last_histogram_ix_[j] + i + combined_histo[jx] = histograms[curr_histo_ix] + histogramAddHistogramLiteral(&combined_histo[jx], &histograms[last_histogram_ix]) + combined_entropy[jx] = bitsEntropy(combined_histo[jx].data_[0:], self.alphabet_size_) + diff[j] += combined_entropy[jx] - entropy[i] - last_entropy[jx] + } + } + + if split.num_types < self.max_block_types_ && diff[0] > self.split_threshold_ && diff[1] > self.split_threshold_ { + /* Create new block. */ + split.lengths[self.num_blocks_] = uint32(self.block_size_) + + split.types[self.num_blocks_] = byte(split.num_types) + self.last_histogram_ix_[1] = self.last_histogram_ix_[0] + self.last_histogram_ix_[0] = split.num_types * num_contexts + for i = 0; i < num_contexts; i++ { + last_entropy[num_contexts+i] = last_entropy[i] + last_entropy[i] = entropy[i] + } + + self.num_blocks_++ + split.num_types++ + self.curr_histogram_ix_ += num_contexts + if self.curr_histogram_ix_ < *self.histograms_size_ { + clearHistogramsLiteral(self.histograms_[self.curr_histogram_ix_:], self.num_contexts_) + } + + self.block_size_ = 0 + self.merge_last_count_ = 0 + self.target_block_size_ = self.min_block_size_ + } else if diff[1] < diff[0]-20.0 { + split.lengths[self.num_blocks_] = uint32(self.block_size_) + split.types[self.num_blocks_] = split.types[self.num_blocks_-2] + /* Combine this block with second last block. */ + + var tmp uint = self.last_histogram_ix_[0] + self.last_histogram_ix_[0] = self.last_histogram_ix_[1] + self.last_histogram_ix_[1] = tmp + for i = 0; i < num_contexts; i++ { + histograms[self.last_histogram_ix_[0]+i] = combined_histo[num_contexts+i] + last_entropy[num_contexts+i] = last_entropy[i] + last_entropy[i] = combined_entropy[num_contexts+i] + histogramClearLiteral(&histograms[self.curr_histogram_ix_+i]) + } + + self.num_blocks_++ + self.block_size_ = 0 + self.merge_last_count_ = 0 + self.target_block_size_ = self.min_block_size_ + } else { + /* Combine this block with last block. */ + split.lengths[self.num_blocks_-1] += uint32(self.block_size_) + + for i = 0; i < num_contexts; i++ { + histograms[self.last_histogram_ix_[0]+i] = combined_histo[i] + last_entropy[i] = combined_entropy[i] + if split.num_types == 1 { + last_entropy[num_contexts+i] = last_entropy[i] + } + + histogramClearLiteral(&histograms[self.curr_histogram_ix_+i]) + } + + self.block_size_ = 0 + self.merge_last_count_++ + if self.merge_last_count_ > 1 { + self.target_block_size_ += self.min_block_size_ + } + } + + combined_histo = nil + } + + if is_final { + *self.histograms_size_ = split.num_types * num_contexts + split.num_blocks = self.num_blocks_ + } +} + +/* Adds the next symbol to the current block type and context. When the + current block reaches the target size, decides on merging the block. */ +func contextBlockSplitterAddSymbol(self *contextBlockSplitter, symbol uint, context uint) { + histogramAddLiteral(&self.histograms_[self.curr_histogram_ix_+context], symbol) + self.block_size_++ + if self.block_size_ == self.target_block_size_ { + contextBlockSplitterFinishBlock(self, false) /* is_final = */ + } +} + +func mapStaticContexts(num_contexts uint, static_context_map []uint32, mb *metaBlockSplit) { + var i uint + mb.literal_context_map_size = mb.literal_split.num_types << literalContextBits + if cap(mb.literal_context_map) < int(mb.literal_context_map_size) { + mb.literal_context_map = make([]uint32, (mb.literal_context_map_size)) + } else { + mb.literal_context_map = mb.literal_context_map[:mb.literal_context_map_size] + } + + for i = 0; i < mb.literal_split.num_types; i++ { + var offset uint32 = uint32(i * num_contexts) + var j uint + for j = 0; j < 1<= 128 { + blockSplitterAddSymbolDistance(&dist_blocks, uint(cmd.dist_prefix_)&0x3FF) + } + } + } + + if num_contexts == 1 { + blockSplitterFinishBlockLiteral(&lit_blocks.plain, true) /* is_final = */ + } else { + contextBlockSplitterFinishBlock(&lit_blocks.ctx, true) /* is_final = */ + } + + blockSplitterFinishBlockCommand(&cmd_blocks, true) /* is_final = */ + blockSplitterFinishBlockDistance(&dist_blocks, true) /* is_final = */ + + if num_contexts > 1 { + mapStaticContexts(num_contexts, static_context_map, mb) + } +} + +func buildMetaBlockGreedy(ringbuffer []byte, pos uint, mask uint, prev_byte byte, prev_byte2 byte, literal_context_lut contextLUT, num_contexts uint, static_context_map []uint32, commands []command, mb *metaBlockSplit) { + if num_contexts == 1 { + buildMetaBlockGreedyInternal(ringbuffer, pos, mask, prev_byte, prev_byte2, literal_context_lut, 1, nil, commands, mb) + } else { + buildMetaBlockGreedyInternal(ringbuffer, pos, mask, prev_byte, prev_byte2, literal_context_lut, num_contexts, static_context_map, commands, mb) + } +} + +func optimizeHistograms(num_distance_codes uint32, mb *metaBlockSplit) { + var good_for_rle [numCommandSymbols]byte + var i uint + for i = 0; i < mb.literal_histograms_size; i++ { + optimizeHuffmanCountsForRLE(256, mb.literal_histograms[i].data_[:], good_for_rle[:]) + } + + for i = 0; i < mb.command_histograms_size; i++ { + optimizeHuffmanCountsForRLE(numCommandSymbols, mb.command_histograms[i].data_[:], good_for_rle[:]) + } + + for i = 0; i < mb.distance_histograms_size; i++ { + optimizeHuffmanCountsForRLE(uint(num_distance_codes), mb.distance_histograms[i].data_[:], good_for_rle[:]) + } +} diff --git a/vendor/github.com/andybalholm/brotli/metablock_command.go b/vendor/github.com/andybalholm/brotli/metablock_command.go new file mode 100644 index 0000000..14c7b77 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/metablock_command.go @@ -0,0 +1,165 @@ +package brotli + +/* Copyright 2015 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Greedy block splitter for one block category (literal, command or distance). + */ +type blockSplitterCommand struct { + alphabet_size_ uint + min_block_size_ uint + split_threshold_ float64 + num_blocks_ uint + split_ *blockSplit + histograms_ []histogramCommand + histograms_size_ *uint + target_block_size_ uint + block_size_ uint + curr_histogram_ix_ uint + last_histogram_ix_ [2]uint + last_entropy_ [2]float64 + merge_last_count_ uint +} + +func initBlockSplitterCommand(self *blockSplitterCommand, alphabet_size uint, min_block_size uint, split_threshold float64, num_symbols uint, split *blockSplit, histograms *[]histogramCommand, histograms_size *uint) { + var max_num_blocks uint = num_symbols/min_block_size + 1 + var max_num_types uint = brotli_min_size_t(max_num_blocks, maxNumberOfBlockTypes+1) + /* We have to allocate one more histogram than the maximum number of block + types for the current histogram when the meta-block is too big. */ + self.alphabet_size_ = alphabet_size + + self.min_block_size_ = min_block_size + self.split_threshold_ = split_threshold + self.num_blocks_ = 0 + self.split_ = split + self.histograms_size_ = histograms_size + self.target_block_size_ = min_block_size + self.block_size_ = 0 + self.curr_histogram_ix_ = 0 + self.merge_last_count_ = 0 + brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, max_num_blocks) + brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, max_num_blocks) + self.split_.num_blocks = max_num_blocks + *histograms_size = max_num_types + if histograms == nil || cap(*histograms) < int(*histograms_size) { + *histograms = make([]histogramCommand, (*histograms_size)) + } else { + *histograms = (*histograms)[:*histograms_size] + } + self.histograms_ = *histograms + + /* Clear only current histogram. */ + histogramClearCommand(&self.histograms_[0]) + + self.last_histogram_ix_[1] = 0 + self.last_histogram_ix_[0] = self.last_histogram_ix_[1] +} + +/* Does either of three things: + (1) emits the current block with a new block type; + (2) emits the current block with the type of the second last block; + (3) merges the current block with the last block. */ +func blockSplitterFinishBlockCommand(self *blockSplitterCommand, is_final bool) { + var split *blockSplit = self.split_ + var last_entropy []float64 = self.last_entropy_[:] + var histograms []histogramCommand = self.histograms_ + self.block_size_ = brotli_max_size_t(self.block_size_, self.min_block_size_) + if self.num_blocks_ == 0 { + /* Create first block. */ + split.lengths[0] = uint32(self.block_size_) + + split.types[0] = 0 + last_entropy[0] = bitsEntropy(histograms[0].data_[:], self.alphabet_size_) + last_entropy[1] = last_entropy[0] + self.num_blocks_++ + split.num_types++ + self.curr_histogram_ix_++ + if self.curr_histogram_ix_ < *self.histograms_size_ { + histogramClearCommand(&histograms[self.curr_histogram_ix_]) + } + self.block_size_ = 0 + } else if self.block_size_ > 0 { + var entropy float64 = bitsEntropy(histograms[self.curr_histogram_ix_].data_[:], self.alphabet_size_) + var combined_histo [2]histogramCommand + var combined_entropy [2]float64 + var diff [2]float64 + var j uint + for j = 0; j < 2; j++ { + var last_histogram_ix uint = self.last_histogram_ix_[j] + combined_histo[j] = histograms[self.curr_histogram_ix_] + histogramAddHistogramCommand(&combined_histo[j], &histograms[last_histogram_ix]) + combined_entropy[j] = bitsEntropy(combined_histo[j].data_[0:], self.alphabet_size_) + diff[j] = combined_entropy[j] - entropy - last_entropy[j] + } + + if split.num_types < maxNumberOfBlockTypes && diff[0] > self.split_threshold_ && diff[1] > self.split_threshold_ { + /* Create new block. */ + split.lengths[self.num_blocks_] = uint32(self.block_size_) + + split.types[self.num_blocks_] = byte(split.num_types) + self.last_histogram_ix_[1] = self.last_histogram_ix_[0] + self.last_histogram_ix_[0] = uint(byte(split.num_types)) + last_entropy[1] = last_entropy[0] + last_entropy[0] = entropy + self.num_blocks_++ + split.num_types++ + self.curr_histogram_ix_++ + if self.curr_histogram_ix_ < *self.histograms_size_ { + histogramClearCommand(&histograms[self.curr_histogram_ix_]) + } + self.block_size_ = 0 + self.merge_last_count_ = 0 + self.target_block_size_ = self.min_block_size_ + } else if diff[1] < diff[0]-20.0 { + split.lengths[self.num_blocks_] = uint32(self.block_size_) + split.types[self.num_blocks_] = split.types[self.num_blocks_-2] + /* Combine this block with second last block. */ + + var tmp uint = self.last_histogram_ix_[0] + self.last_histogram_ix_[0] = self.last_histogram_ix_[1] + self.last_histogram_ix_[1] = tmp + histograms[self.last_histogram_ix_[0]] = combined_histo[1] + last_entropy[1] = last_entropy[0] + last_entropy[0] = combined_entropy[1] + self.num_blocks_++ + self.block_size_ = 0 + histogramClearCommand(&histograms[self.curr_histogram_ix_]) + self.merge_last_count_ = 0 + self.target_block_size_ = self.min_block_size_ + } else { + /* Combine this block with last block. */ + split.lengths[self.num_blocks_-1] += uint32(self.block_size_) + + histograms[self.last_histogram_ix_[0]] = combined_histo[0] + last_entropy[0] = combined_entropy[0] + if split.num_types == 1 { + last_entropy[1] = last_entropy[0] + } + + self.block_size_ = 0 + histogramClearCommand(&histograms[self.curr_histogram_ix_]) + self.merge_last_count_++ + if self.merge_last_count_ > 1 { + self.target_block_size_ += self.min_block_size_ + } + } + } + + if is_final { + *self.histograms_size_ = split.num_types + split.num_blocks = self.num_blocks_ + } +} + +/* Adds the next symbol to the current histogram. When the current histogram + reaches the target size, decides on merging the block. */ +func blockSplitterAddSymbolCommand(self *blockSplitterCommand, symbol uint) { + histogramAddCommand(&self.histograms_[self.curr_histogram_ix_], symbol) + self.block_size_++ + if self.block_size_ == self.target_block_size_ { + blockSplitterFinishBlockCommand(self, false) /* is_final = */ + } +} diff --git a/vendor/github.com/andybalholm/brotli/metablock_distance.go b/vendor/github.com/andybalholm/brotli/metablock_distance.go new file mode 100644 index 0000000..5110a81 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/metablock_distance.go @@ -0,0 +1,165 @@ +package brotli + +/* Copyright 2015 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Greedy block splitter for one block category (literal, command or distance). + */ +type blockSplitterDistance struct { + alphabet_size_ uint + min_block_size_ uint + split_threshold_ float64 + num_blocks_ uint + split_ *blockSplit + histograms_ []histogramDistance + histograms_size_ *uint + target_block_size_ uint + block_size_ uint + curr_histogram_ix_ uint + last_histogram_ix_ [2]uint + last_entropy_ [2]float64 + merge_last_count_ uint +} + +func initBlockSplitterDistance(self *blockSplitterDistance, alphabet_size uint, min_block_size uint, split_threshold float64, num_symbols uint, split *blockSplit, histograms *[]histogramDistance, histograms_size *uint) { + var max_num_blocks uint = num_symbols/min_block_size + 1 + var max_num_types uint = brotli_min_size_t(max_num_blocks, maxNumberOfBlockTypes+1) + /* We have to allocate one more histogram than the maximum number of block + types for the current histogram when the meta-block is too big. */ + self.alphabet_size_ = alphabet_size + + self.min_block_size_ = min_block_size + self.split_threshold_ = split_threshold + self.num_blocks_ = 0 + self.split_ = split + self.histograms_size_ = histograms_size + self.target_block_size_ = min_block_size + self.block_size_ = 0 + self.curr_histogram_ix_ = 0 + self.merge_last_count_ = 0 + brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, max_num_blocks) + brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, max_num_blocks) + self.split_.num_blocks = max_num_blocks + *histograms_size = max_num_types + if histograms == nil || cap(*histograms) < int(*histograms_size) { + *histograms = make([]histogramDistance, *histograms_size) + } else { + *histograms = (*histograms)[:*histograms_size] + } + self.histograms_ = *histograms + + /* Clear only current histogram. */ + histogramClearDistance(&self.histograms_[0]) + + self.last_histogram_ix_[1] = 0 + self.last_histogram_ix_[0] = self.last_histogram_ix_[1] +} + +/* Does either of three things: + (1) emits the current block with a new block type; + (2) emits the current block with the type of the second last block; + (3) merges the current block with the last block. */ +func blockSplitterFinishBlockDistance(self *blockSplitterDistance, is_final bool) { + var split *blockSplit = self.split_ + var last_entropy []float64 = self.last_entropy_[:] + var histograms []histogramDistance = self.histograms_ + self.block_size_ = brotli_max_size_t(self.block_size_, self.min_block_size_) + if self.num_blocks_ == 0 { + /* Create first block. */ + split.lengths[0] = uint32(self.block_size_) + + split.types[0] = 0 + last_entropy[0] = bitsEntropy(histograms[0].data_[:], self.alphabet_size_) + last_entropy[1] = last_entropy[0] + self.num_blocks_++ + split.num_types++ + self.curr_histogram_ix_++ + if self.curr_histogram_ix_ < *self.histograms_size_ { + histogramClearDistance(&histograms[self.curr_histogram_ix_]) + } + self.block_size_ = 0 + } else if self.block_size_ > 0 { + var entropy float64 = bitsEntropy(histograms[self.curr_histogram_ix_].data_[:], self.alphabet_size_) + var combined_histo [2]histogramDistance + var combined_entropy [2]float64 + var diff [2]float64 + var j uint + for j = 0; j < 2; j++ { + var last_histogram_ix uint = self.last_histogram_ix_[j] + combined_histo[j] = histograms[self.curr_histogram_ix_] + histogramAddHistogramDistance(&combined_histo[j], &histograms[last_histogram_ix]) + combined_entropy[j] = bitsEntropy(combined_histo[j].data_[0:], self.alphabet_size_) + diff[j] = combined_entropy[j] - entropy - last_entropy[j] + } + + if split.num_types < maxNumberOfBlockTypes && diff[0] > self.split_threshold_ && diff[1] > self.split_threshold_ { + /* Create new block. */ + split.lengths[self.num_blocks_] = uint32(self.block_size_) + + split.types[self.num_blocks_] = byte(split.num_types) + self.last_histogram_ix_[1] = self.last_histogram_ix_[0] + self.last_histogram_ix_[0] = uint(byte(split.num_types)) + last_entropy[1] = last_entropy[0] + last_entropy[0] = entropy + self.num_blocks_++ + split.num_types++ + self.curr_histogram_ix_++ + if self.curr_histogram_ix_ < *self.histograms_size_ { + histogramClearDistance(&histograms[self.curr_histogram_ix_]) + } + self.block_size_ = 0 + self.merge_last_count_ = 0 + self.target_block_size_ = self.min_block_size_ + } else if diff[1] < diff[0]-20.0 { + split.lengths[self.num_blocks_] = uint32(self.block_size_) + split.types[self.num_blocks_] = split.types[self.num_blocks_-2] + /* Combine this block with second last block. */ + + var tmp uint = self.last_histogram_ix_[0] + self.last_histogram_ix_[0] = self.last_histogram_ix_[1] + self.last_histogram_ix_[1] = tmp + histograms[self.last_histogram_ix_[0]] = combined_histo[1] + last_entropy[1] = last_entropy[0] + last_entropy[0] = combined_entropy[1] + self.num_blocks_++ + self.block_size_ = 0 + histogramClearDistance(&histograms[self.curr_histogram_ix_]) + self.merge_last_count_ = 0 + self.target_block_size_ = self.min_block_size_ + } else { + /* Combine this block with last block. */ + split.lengths[self.num_blocks_-1] += uint32(self.block_size_) + + histograms[self.last_histogram_ix_[0]] = combined_histo[0] + last_entropy[0] = combined_entropy[0] + if split.num_types == 1 { + last_entropy[1] = last_entropy[0] + } + + self.block_size_ = 0 + histogramClearDistance(&histograms[self.curr_histogram_ix_]) + self.merge_last_count_++ + if self.merge_last_count_ > 1 { + self.target_block_size_ += self.min_block_size_ + } + } + } + + if is_final { + *self.histograms_size_ = split.num_types + split.num_blocks = self.num_blocks_ + } +} + +/* Adds the next symbol to the current histogram. When the current histogram + reaches the target size, decides on merging the block. */ +func blockSplitterAddSymbolDistance(self *blockSplitterDistance, symbol uint) { + histogramAddDistance(&self.histograms_[self.curr_histogram_ix_], symbol) + self.block_size_++ + if self.block_size_ == self.target_block_size_ { + blockSplitterFinishBlockDistance(self, false) /* is_final = */ + } +} diff --git a/vendor/github.com/andybalholm/brotli/metablock_literal.go b/vendor/github.com/andybalholm/brotli/metablock_literal.go new file mode 100644 index 0000000..307f8da --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/metablock_literal.go @@ -0,0 +1,165 @@ +package brotli + +/* Copyright 2015 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Greedy block splitter for one block category (literal, command or distance). + */ +type blockSplitterLiteral struct { + alphabet_size_ uint + min_block_size_ uint + split_threshold_ float64 + num_blocks_ uint + split_ *blockSplit + histograms_ []histogramLiteral + histograms_size_ *uint + target_block_size_ uint + block_size_ uint + curr_histogram_ix_ uint + last_histogram_ix_ [2]uint + last_entropy_ [2]float64 + merge_last_count_ uint +} + +func initBlockSplitterLiteral(self *blockSplitterLiteral, alphabet_size uint, min_block_size uint, split_threshold float64, num_symbols uint, split *blockSplit, histograms *[]histogramLiteral, histograms_size *uint) { + var max_num_blocks uint = num_symbols/min_block_size + 1 + var max_num_types uint = brotli_min_size_t(max_num_blocks, maxNumberOfBlockTypes+1) + /* We have to allocate one more histogram than the maximum number of block + types for the current histogram when the meta-block is too big. */ + self.alphabet_size_ = alphabet_size + + self.min_block_size_ = min_block_size + self.split_threshold_ = split_threshold + self.num_blocks_ = 0 + self.split_ = split + self.histograms_size_ = histograms_size + self.target_block_size_ = min_block_size + self.block_size_ = 0 + self.curr_histogram_ix_ = 0 + self.merge_last_count_ = 0 + brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, max_num_blocks) + brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, max_num_blocks) + self.split_.num_blocks = max_num_blocks + *histograms_size = max_num_types + if histograms == nil || cap(*histograms) < int(*histograms_size) { + *histograms = make([]histogramLiteral, *histograms_size) + } else { + *histograms = (*histograms)[:*histograms_size] + } + self.histograms_ = *histograms + + /* Clear only current histogram. */ + histogramClearLiteral(&self.histograms_[0]) + + self.last_histogram_ix_[1] = 0 + self.last_histogram_ix_[0] = self.last_histogram_ix_[1] +} + +/* Does either of three things: + (1) emits the current block with a new block type; + (2) emits the current block with the type of the second last block; + (3) merges the current block with the last block. */ +func blockSplitterFinishBlockLiteral(self *blockSplitterLiteral, is_final bool) { + var split *blockSplit = self.split_ + var last_entropy []float64 = self.last_entropy_[:] + var histograms []histogramLiteral = self.histograms_ + self.block_size_ = brotli_max_size_t(self.block_size_, self.min_block_size_) + if self.num_blocks_ == 0 { + /* Create first block. */ + split.lengths[0] = uint32(self.block_size_) + + split.types[0] = 0 + last_entropy[0] = bitsEntropy(histograms[0].data_[:], self.alphabet_size_) + last_entropy[1] = last_entropy[0] + self.num_blocks_++ + split.num_types++ + self.curr_histogram_ix_++ + if self.curr_histogram_ix_ < *self.histograms_size_ { + histogramClearLiteral(&histograms[self.curr_histogram_ix_]) + } + self.block_size_ = 0 + } else if self.block_size_ > 0 { + var entropy float64 = bitsEntropy(histograms[self.curr_histogram_ix_].data_[:], self.alphabet_size_) + var combined_histo [2]histogramLiteral + var combined_entropy [2]float64 + var diff [2]float64 + var j uint + for j = 0; j < 2; j++ { + var last_histogram_ix uint = self.last_histogram_ix_[j] + combined_histo[j] = histograms[self.curr_histogram_ix_] + histogramAddHistogramLiteral(&combined_histo[j], &histograms[last_histogram_ix]) + combined_entropy[j] = bitsEntropy(combined_histo[j].data_[0:], self.alphabet_size_) + diff[j] = combined_entropy[j] - entropy - last_entropy[j] + } + + if split.num_types < maxNumberOfBlockTypes && diff[0] > self.split_threshold_ && diff[1] > self.split_threshold_ { + /* Create new block. */ + split.lengths[self.num_blocks_] = uint32(self.block_size_) + + split.types[self.num_blocks_] = byte(split.num_types) + self.last_histogram_ix_[1] = self.last_histogram_ix_[0] + self.last_histogram_ix_[0] = uint(byte(split.num_types)) + last_entropy[1] = last_entropy[0] + last_entropy[0] = entropy + self.num_blocks_++ + split.num_types++ + self.curr_histogram_ix_++ + if self.curr_histogram_ix_ < *self.histograms_size_ { + histogramClearLiteral(&histograms[self.curr_histogram_ix_]) + } + self.block_size_ = 0 + self.merge_last_count_ = 0 + self.target_block_size_ = self.min_block_size_ + } else if diff[1] < diff[0]-20.0 { + split.lengths[self.num_blocks_] = uint32(self.block_size_) + split.types[self.num_blocks_] = split.types[self.num_blocks_-2] + /* Combine this block with second last block. */ + + var tmp uint = self.last_histogram_ix_[0] + self.last_histogram_ix_[0] = self.last_histogram_ix_[1] + self.last_histogram_ix_[1] = tmp + histograms[self.last_histogram_ix_[0]] = combined_histo[1] + last_entropy[1] = last_entropy[0] + last_entropy[0] = combined_entropy[1] + self.num_blocks_++ + self.block_size_ = 0 + histogramClearLiteral(&histograms[self.curr_histogram_ix_]) + self.merge_last_count_ = 0 + self.target_block_size_ = self.min_block_size_ + } else { + /* Combine this block with last block. */ + split.lengths[self.num_blocks_-1] += uint32(self.block_size_) + + histograms[self.last_histogram_ix_[0]] = combined_histo[0] + last_entropy[0] = combined_entropy[0] + if split.num_types == 1 { + last_entropy[1] = last_entropy[0] + } + + self.block_size_ = 0 + histogramClearLiteral(&histograms[self.curr_histogram_ix_]) + self.merge_last_count_++ + if self.merge_last_count_ > 1 { + self.target_block_size_ += self.min_block_size_ + } + } + } + + if is_final { + *self.histograms_size_ = split.num_types + split.num_blocks = self.num_blocks_ + } +} + +/* Adds the next symbol to the current histogram. When the current histogram + reaches the target size, decides on merging the block. */ +func blockSplitterAddSymbolLiteral(self *blockSplitterLiteral, symbol uint) { + histogramAddLiteral(&self.histograms_[self.curr_histogram_ix_], symbol) + self.block_size_++ + if self.block_size_ == self.target_block_size_ { + blockSplitterFinishBlockLiteral(self, false) /* is_final = */ + } +} diff --git a/vendor/github.com/andybalholm/brotli/params.go b/vendor/github.com/andybalholm/brotli/params.go new file mode 100644 index 0000000..0a4c687 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/params.go @@ -0,0 +1,37 @@ +package brotli + +/* Copyright 2017 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Parameters for the Brotli encoder with chosen quality levels. */ +type hasherParams struct { + type_ int + bucket_bits int + block_bits int + hash_len int + num_last_distances_to_check int +} + +type distanceParams struct { + distance_postfix_bits uint32 + num_direct_distance_codes uint32 + alphabet_size uint32 + max_distance uint +} + +/* Encoding parameters */ +type encoderParams struct { + mode int + quality int + lgwin uint + lgblock int + size_hint uint + disable_literal_context_modeling bool + large_window bool + hasher hasherParams + dist distanceParams + dictionary encoderDictionary +} diff --git a/vendor/github.com/andybalholm/brotli/platform.go b/vendor/github.com/andybalholm/brotli/platform.go new file mode 100644 index 0000000..4ebfb15 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/platform.go @@ -0,0 +1,103 @@ +package brotli + +/* Copyright 2013 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +func brotli_min_double(a float64, b float64) float64 { + if a < b { + return a + } else { + return b + } +} + +func brotli_max_double(a float64, b float64) float64 { + if a > b { + return a + } else { + return b + } +} + +func brotli_min_float(a float32, b float32) float32 { + if a < b { + return a + } else { + return b + } +} + +func brotli_max_float(a float32, b float32) float32 { + if a > b { + return a + } else { + return b + } +} + +func brotli_min_int(a int, b int) int { + if a < b { + return a + } else { + return b + } +} + +func brotli_max_int(a int, b int) int { + if a > b { + return a + } else { + return b + } +} + +func brotli_min_size_t(a uint, b uint) uint { + if a < b { + return a + } else { + return b + } +} + +func brotli_max_size_t(a uint, b uint) uint { + if a > b { + return a + } else { + return b + } +} + +func brotli_min_uint32_t(a uint32, b uint32) uint32 { + if a < b { + return a + } else { + return b + } +} + +func brotli_max_uint32_t(a uint32, b uint32) uint32 { + if a > b { + return a + } else { + return b + } +} + +func brotli_min_uint8_t(a byte, b byte) byte { + if a < b { + return a + } else { + return b + } +} + +func brotli_max_uint8_t(a byte, b byte) byte { + if a > b { + return a + } else { + return b + } +} diff --git a/vendor/github.com/andybalholm/brotli/prefix.go b/vendor/github.com/andybalholm/brotli/prefix.go new file mode 100644 index 0000000..484df0d --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/prefix.go @@ -0,0 +1,30 @@ +package brotli + +/* Copyright 2013 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Functions for encoding of integers into prefix codes the amount of extra + bits, and the actual values of the extra bits. */ + +/* Here distance_code is an intermediate code, i.e. one of the special codes or + the actual distance increased by BROTLI_NUM_DISTANCE_SHORT_CODES - 1. */ +func prefixEncodeCopyDistance(distance_code uint, num_direct_codes uint, postfix_bits uint, code *uint16, extra_bits *uint32) { + if distance_code < numDistanceShortCodes+num_direct_codes { + *code = uint16(distance_code) + *extra_bits = 0 + return + } else { + var dist uint = (uint(1) << (postfix_bits + 2)) + (distance_code - numDistanceShortCodes - num_direct_codes) + var bucket uint = uint(log2FloorNonZero(dist) - 1) + var postfix_mask uint = (1 << postfix_bits) - 1 + var postfix uint = dist & postfix_mask + var prefix uint = (dist >> bucket) & 1 + var offset uint = (2 + prefix) << bucket + var nbits uint = bucket - postfix_bits + *code = uint16(nbits<<10 | (numDistanceShortCodes + num_direct_codes + ((2*(nbits-1) + prefix) << postfix_bits) + postfix)) + *extra_bits = uint32((dist - offset) >> postfix_bits) + } +} diff --git a/vendor/github.com/andybalholm/brotli/prefix_dec.go b/vendor/github.com/andybalholm/brotli/prefix_dec.go new file mode 100644 index 0000000..183f0d5 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/prefix_dec.go @@ -0,0 +1,723 @@ +package brotli + +/* Copyright 2013 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +type cmdLutElement struct { + insert_len_extra_bits byte + copy_len_extra_bits byte + distance_code int8 + context byte + insert_len_offset uint16 + copy_len_offset uint16 +} + +var kCmdLut = [numCommandSymbols]cmdLutElement{ + cmdLutElement{0x00, 0x00, 0, 0x00, 0x0000, 0x0002}, + cmdLutElement{0x00, 0x00, 0, 0x01, 0x0000, 0x0003}, + cmdLutElement{0x00, 0x00, 0, 0x02, 0x0000, 0x0004}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0000, 0x0005}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0000, 0x0006}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0000, 0x0007}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0000, 0x0008}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0000, 0x0009}, + cmdLutElement{0x00, 0x00, 0, 0x00, 0x0001, 0x0002}, + cmdLutElement{0x00, 0x00, 0, 0x01, 0x0001, 0x0003}, + cmdLutElement{0x00, 0x00, 0, 0x02, 0x0001, 0x0004}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0001, 0x0005}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0001, 0x0006}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0001, 0x0007}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0001, 0x0008}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0001, 0x0009}, + cmdLutElement{0x00, 0x00, 0, 0x00, 0x0002, 0x0002}, + cmdLutElement{0x00, 0x00, 0, 0x01, 0x0002, 0x0003}, + cmdLutElement{0x00, 0x00, 0, 0x02, 0x0002, 0x0004}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0002, 0x0005}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0002, 0x0006}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0002, 0x0007}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0002, 0x0008}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0002, 0x0009}, + cmdLutElement{0x00, 0x00, 0, 0x00, 0x0003, 0x0002}, + cmdLutElement{0x00, 0x00, 0, 0x01, 0x0003, 0x0003}, + cmdLutElement{0x00, 0x00, 0, 0x02, 0x0003, 0x0004}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0003, 0x0005}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0003, 0x0006}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0003, 0x0007}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0003, 0x0008}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0003, 0x0009}, + cmdLutElement{0x00, 0x00, 0, 0x00, 0x0004, 0x0002}, + cmdLutElement{0x00, 0x00, 0, 0x01, 0x0004, 0x0003}, + cmdLutElement{0x00, 0x00, 0, 0x02, 0x0004, 0x0004}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0004, 0x0005}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0004, 0x0006}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0004, 0x0007}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0004, 0x0008}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0004, 0x0009}, + cmdLutElement{0x00, 0x00, 0, 0x00, 0x0005, 0x0002}, + cmdLutElement{0x00, 0x00, 0, 0x01, 0x0005, 0x0003}, + cmdLutElement{0x00, 0x00, 0, 0x02, 0x0005, 0x0004}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0005, 0x0005}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0005, 0x0006}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0005, 0x0007}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0005, 0x0008}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0005, 0x0009}, + cmdLutElement{0x01, 0x00, 0, 0x00, 0x0006, 0x0002}, + cmdLutElement{0x01, 0x00, 0, 0x01, 0x0006, 0x0003}, + cmdLutElement{0x01, 0x00, 0, 0x02, 0x0006, 0x0004}, + cmdLutElement{0x01, 0x00, 0, 0x03, 0x0006, 0x0005}, + cmdLutElement{0x01, 0x00, 0, 0x03, 0x0006, 0x0006}, + cmdLutElement{0x01, 0x00, 0, 0x03, 0x0006, 0x0007}, + cmdLutElement{0x01, 0x00, 0, 0x03, 0x0006, 0x0008}, + cmdLutElement{0x01, 0x00, 0, 0x03, 0x0006, 0x0009}, + cmdLutElement{0x01, 0x00, 0, 0x00, 0x0008, 0x0002}, + cmdLutElement{0x01, 0x00, 0, 0x01, 0x0008, 0x0003}, + cmdLutElement{0x01, 0x00, 0, 0x02, 0x0008, 0x0004}, + cmdLutElement{0x01, 0x00, 0, 0x03, 0x0008, 0x0005}, + cmdLutElement{0x01, 0x00, 0, 0x03, 0x0008, 0x0006}, + cmdLutElement{0x01, 0x00, 0, 0x03, 0x0008, 0x0007}, + cmdLutElement{0x01, 0x00, 0, 0x03, 0x0008, 0x0008}, + cmdLutElement{0x01, 0x00, 0, 0x03, 0x0008, 0x0009}, + cmdLutElement{0x00, 0x01, 0, 0x03, 0x0000, 0x000a}, + cmdLutElement{0x00, 0x01, 0, 0x03, 0x0000, 0x000c}, + cmdLutElement{0x00, 0x02, 0, 0x03, 0x0000, 0x000e}, + cmdLutElement{0x00, 0x02, 0, 0x03, 0x0000, 0x0012}, + cmdLutElement{0x00, 0x03, 0, 0x03, 0x0000, 0x0016}, + cmdLutElement{0x00, 0x03, 0, 0x03, 0x0000, 0x001e}, + cmdLutElement{0x00, 0x04, 0, 0x03, 0x0000, 0x0026}, + cmdLutElement{0x00, 0x04, 0, 0x03, 0x0000, 0x0036}, + cmdLutElement{0x00, 0x01, 0, 0x03, 0x0001, 0x000a}, + cmdLutElement{0x00, 0x01, 0, 0x03, 0x0001, 0x000c}, + cmdLutElement{0x00, 0x02, 0, 0x03, 0x0001, 0x000e}, + cmdLutElement{0x00, 0x02, 0, 0x03, 0x0001, 0x0012}, + cmdLutElement{0x00, 0x03, 0, 0x03, 0x0001, 0x0016}, + cmdLutElement{0x00, 0x03, 0, 0x03, 0x0001, 0x001e}, + cmdLutElement{0x00, 0x04, 0, 0x03, 0x0001, 0x0026}, + cmdLutElement{0x00, 0x04, 0, 0x03, 0x0001, 0x0036}, + cmdLutElement{0x00, 0x01, 0, 0x03, 0x0002, 0x000a}, + cmdLutElement{0x00, 0x01, 0, 0x03, 0x0002, 0x000c}, + cmdLutElement{0x00, 0x02, 0, 0x03, 0x0002, 0x000e}, + cmdLutElement{0x00, 0x02, 0, 0x03, 0x0002, 0x0012}, + cmdLutElement{0x00, 0x03, 0, 0x03, 0x0002, 0x0016}, + cmdLutElement{0x00, 0x03, 0, 0x03, 0x0002, 0x001e}, + cmdLutElement{0x00, 0x04, 0, 0x03, 0x0002, 0x0026}, + cmdLutElement{0x00, 0x04, 0, 0x03, 0x0002, 0x0036}, + cmdLutElement{0x00, 0x01, 0, 0x03, 0x0003, 0x000a}, + cmdLutElement{0x00, 0x01, 0, 0x03, 0x0003, 0x000c}, + cmdLutElement{0x00, 0x02, 0, 0x03, 0x0003, 0x000e}, + cmdLutElement{0x00, 0x02, 0, 0x03, 0x0003, 0x0012}, + cmdLutElement{0x00, 0x03, 0, 0x03, 0x0003, 0x0016}, + cmdLutElement{0x00, 0x03, 0, 0x03, 0x0003, 0x001e}, + cmdLutElement{0x00, 0x04, 0, 0x03, 0x0003, 0x0026}, + cmdLutElement{0x00, 0x04, 0, 0x03, 0x0003, 0x0036}, + cmdLutElement{0x00, 0x01, 0, 0x03, 0x0004, 0x000a}, + cmdLutElement{0x00, 0x01, 0, 0x03, 0x0004, 0x000c}, + cmdLutElement{0x00, 0x02, 0, 0x03, 0x0004, 0x000e}, + cmdLutElement{0x00, 0x02, 0, 0x03, 0x0004, 0x0012}, + cmdLutElement{0x00, 0x03, 0, 0x03, 0x0004, 0x0016}, + cmdLutElement{0x00, 0x03, 0, 0x03, 0x0004, 0x001e}, + cmdLutElement{0x00, 0x04, 0, 0x03, 0x0004, 0x0026}, + cmdLutElement{0x00, 0x04, 0, 0x03, 0x0004, 0x0036}, + cmdLutElement{0x00, 0x01, 0, 0x03, 0x0005, 0x000a}, + cmdLutElement{0x00, 0x01, 0, 0x03, 0x0005, 0x000c}, + cmdLutElement{0x00, 0x02, 0, 0x03, 0x0005, 0x000e}, + cmdLutElement{0x00, 0x02, 0, 0x03, 0x0005, 0x0012}, + cmdLutElement{0x00, 0x03, 0, 0x03, 0x0005, 0x0016}, + cmdLutElement{0x00, 0x03, 0, 0x03, 0x0005, 0x001e}, + cmdLutElement{0x00, 0x04, 0, 0x03, 0x0005, 0x0026}, + cmdLutElement{0x00, 0x04, 0, 0x03, 0x0005, 0x0036}, + cmdLutElement{0x01, 0x01, 0, 0x03, 0x0006, 0x000a}, + cmdLutElement{0x01, 0x01, 0, 0x03, 0x0006, 0x000c}, + cmdLutElement{0x01, 0x02, 0, 0x03, 0x0006, 0x000e}, + cmdLutElement{0x01, 0x02, 0, 0x03, 0x0006, 0x0012}, + cmdLutElement{0x01, 0x03, 0, 0x03, 0x0006, 0x0016}, + cmdLutElement{0x01, 0x03, 0, 0x03, 0x0006, 0x001e}, + cmdLutElement{0x01, 0x04, 0, 0x03, 0x0006, 0x0026}, + cmdLutElement{0x01, 0x04, 0, 0x03, 0x0006, 0x0036}, + cmdLutElement{0x01, 0x01, 0, 0x03, 0x0008, 0x000a}, + cmdLutElement{0x01, 0x01, 0, 0x03, 0x0008, 0x000c}, + cmdLutElement{0x01, 0x02, 0, 0x03, 0x0008, 0x000e}, + cmdLutElement{0x01, 0x02, 0, 0x03, 0x0008, 0x0012}, + cmdLutElement{0x01, 0x03, 0, 0x03, 0x0008, 0x0016}, + cmdLutElement{0x01, 0x03, 0, 0x03, 0x0008, 0x001e}, + cmdLutElement{0x01, 0x04, 0, 0x03, 0x0008, 0x0026}, + cmdLutElement{0x01, 0x04, 0, 0x03, 0x0008, 0x0036}, + cmdLutElement{0x00, 0x00, -1, 0x00, 0x0000, 0x0002}, + cmdLutElement{0x00, 0x00, -1, 0x01, 0x0000, 0x0003}, + cmdLutElement{0x00, 0x00, -1, 0x02, 0x0000, 0x0004}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0000, 0x0005}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0000, 0x0006}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0000, 0x0007}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0000, 0x0008}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0000, 0x0009}, + cmdLutElement{0x00, 0x00, -1, 0x00, 0x0001, 0x0002}, + cmdLutElement{0x00, 0x00, -1, 0x01, 0x0001, 0x0003}, + cmdLutElement{0x00, 0x00, -1, 0x02, 0x0001, 0x0004}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0001, 0x0005}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0001, 0x0006}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0001, 0x0007}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0001, 0x0008}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0001, 0x0009}, + cmdLutElement{0x00, 0x00, -1, 0x00, 0x0002, 0x0002}, + cmdLutElement{0x00, 0x00, -1, 0x01, 0x0002, 0x0003}, + cmdLutElement{0x00, 0x00, -1, 0x02, 0x0002, 0x0004}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0002, 0x0005}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0002, 0x0006}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0002, 0x0007}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0002, 0x0008}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0002, 0x0009}, + cmdLutElement{0x00, 0x00, -1, 0x00, 0x0003, 0x0002}, + cmdLutElement{0x00, 0x00, -1, 0x01, 0x0003, 0x0003}, + cmdLutElement{0x00, 0x00, -1, 0x02, 0x0003, 0x0004}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0003, 0x0005}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0003, 0x0006}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0003, 0x0007}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0003, 0x0008}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0003, 0x0009}, + cmdLutElement{0x00, 0x00, -1, 0x00, 0x0004, 0x0002}, + cmdLutElement{0x00, 0x00, -1, 0x01, 0x0004, 0x0003}, + cmdLutElement{0x00, 0x00, -1, 0x02, 0x0004, 0x0004}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0004, 0x0005}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0004, 0x0006}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0004, 0x0007}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0004, 0x0008}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0004, 0x0009}, + cmdLutElement{0x00, 0x00, -1, 0x00, 0x0005, 0x0002}, + cmdLutElement{0x00, 0x00, -1, 0x01, 0x0005, 0x0003}, + cmdLutElement{0x00, 0x00, -1, 0x02, 0x0005, 0x0004}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0005, 0x0005}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0005, 0x0006}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0005, 0x0007}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0005, 0x0008}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0005, 0x0009}, + cmdLutElement{0x01, 0x00, -1, 0x00, 0x0006, 0x0002}, + cmdLutElement{0x01, 0x00, -1, 0x01, 0x0006, 0x0003}, + cmdLutElement{0x01, 0x00, -1, 0x02, 0x0006, 0x0004}, + cmdLutElement{0x01, 0x00, -1, 0x03, 0x0006, 0x0005}, + cmdLutElement{0x01, 0x00, -1, 0x03, 0x0006, 0x0006}, + cmdLutElement{0x01, 0x00, -1, 0x03, 0x0006, 0x0007}, + cmdLutElement{0x01, 0x00, -1, 0x03, 0x0006, 0x0008}, + cmdLutElement{0x01, 0x00, -1, 0x03, 0x0006, 0x0009}, + cmdLutElement{0x01, 0x00, -1, 0x00, 0x0008, 0x0002}, + cmdLutElement{0x01, 0x00, -1, 0x01, 0x0008, 0x0003}, + cmdLutElement{0x01, 0x00, -1, 0x02, 0x0008, 0x0004}, + cmdLutElement{0x01, 0x00, -1, 0x03, 0x0008, 0x0005}, + cmdLutElement{0x01, 0x00, -1, 0x03, 0x0008, 0x0006}, + cmdLutElement{0x01, 0x00, -1, 0x03, 0x0008, 0x0007}, + cmdLutElement{0x01, 0x00, -1, 0x03, 0x0008, 0x0008}, + cmdLutElement{0x01, 0x00, -1, 0x03, 0x0008, 0x0009}, + cmdLutElement{0x00, 0x01, -1, 0x03, 0x0000, 0x000a}, + cmdLutElement{0x00, 0x01, -1, 0x03, 0x0000, 0x000c}, + cmdLutElement{0x00, 0x02, -1, 0x03, 0x0000, 0x000e}, + cmdLutElement{0x00, 0x02, -1, 0x03, 0x0000, 0x0012}, + cmdLutElement{0x00, 0x03, -1, 0x03, 0x0000, 0x0016}, + cmdLutElement{0x00, 0x03, -1, 0x03, 0x0000, 0x001e}, + cmdLutElement{0x00, 0x04, -1, 0x03, 0x0000, 0x0026}, + cmdLutElement{0x00, 0x04, -1, 0x03, 0x0000, 0x0036}, + cmdLutElement{0x00, 0x01, -1, 0x03, 0x0001, 0x000a}, + cmdLutElement{0x00, 0x01, -1, 0x03, 0x0001, 0x000c}, + cmdLutElement{0x00, 0x02, -1, 0x03, 0x0001, 0x000e}, + cmdLutElement{0x00, 0x02, -1, 0x03, 0x0001, 0x0012}, + cmdLutElement{0x00, 0x03, -1, 0x03, 0x0001, 0x0016}, + cmdLutElement{0x00, 0x03, -1, 0x03, 0x0001, 0x001e}, + cmdLutElement{0x00, 0x04, -1, 0x03, 0x0001, 0x0026}, + cmdLutElement{0x00, 0x04, -1, 0x03, 0x0001, 0x0036}, + cmdLutElement{0x00, 0x01, -1, 0x03, 0x0002, 0x000a}, + cmdLutElement{0x00, 0x01, -1, 0x03, 0x0002, 0x000c}, + cmdLutElement{0x00, 0x02, -1, 0x03, 0x0002, 0x000e}, + cmdLutElement{0x00, 0x02, -1, 0x03, 0x0002, 0x0012}, + cmdLutElement{0x00, 0x03, -1, 0x03, 0x0002, 0x0016}, + cmdLutElement{0x00, 0x03, -1, 0x03, 0x0002, 0x001e}, + cmdLutElement{0x00, 0x04, -1, 0x03, 0x0002, 0x0026}, + cmdLutElement{0x00, 0x04, -1, 0x03, 0x0002, 0x0036}, + cmdLutElement{0x00, 0x01, -1, 0x03, 0x0003, 0x000a}, + cmdLutElement{0x00, 0x01, -1, 0x03, 0x0003, 0x000c}, + cmdLutElement{0x00, 0x02, -1, 0x03, 0x0003, 0x000e}, + cmdLutElement{0x00, 0x02, -1, 0x03, 0x0003, 0x0012}, + cmdLutElement{0x00, 0x03, -1, 0x03, 0x0003, 0x0016}, + cmdLutElement{0x00, 0x03, -1, 0x03, 0x0003, 0x001e}, + cmdLutElement{0x00, 0x04, -1, 0x03, 0x0003, 0x0026}, + cmdLutElement{0x00, 0x04, -1, 0x03, 0x0003, 0x0036}, + cmdLutElement{0x00, 0x01, -1, 0x03, 0x0004, 0x000a}, + cmdLutElement{0x00, 0x01, -1, 0x03, 0x0004, 0x000c}, + cmdLutElement{0x00, 0x02, -1, 0x03, 0x0004, 0x000e}, + cmdLutElement{0x00, 0x02, -1, 0x03, 0x0004, 0x0012}, + cmdLutElement{0x00, 0x03, -1, 0x03, 0x0004, 0x0016}, + cmdLutElement{0x00, 0x03, -1, 0x03, 0x0004, 0x001e}, + cmdLutElement{0x00, 0x04, -1, 0x03, 0x0004, 0x0026}, + cmdLutElement{0x00, 0x04, -1, 0x03, 0x0004, 0x0036}, + cmdLutElement{0x00, 0x01, -1, 0x03, 0x0005, 0x000a}, + cmdLutElement{0x00, 0x01, -1, 0x03, 0x0005, 0x000c}, + cmdLutElement{0x00, 0x02, -1, 0x03, 0x0005, 0x000e}, + cmdLutElement{0x00, 0x02, -1, 0x03, 0x0005, 0x0012}, + cmdLutElement{0x00, 0x03, -1, 0x03, 0x0005, 0x0016}, + cmdLutElement{0x00, 0x03, -1, 0x03, 0x0005, 0x001e}, + cmdLutElement{0x00, 0x04, -1, 0x03, 0x0005, 0x0026}, + cmdLutElement{0x00, 0x04, -1, 0x03, 0x0005, 0x0036}, + cmdLutElement{0x01, 0x01, -1, 0x03, 0x0006, 0x000a}, + cmdLutElement{0x01, 0x01, -1, 0x03, 0x0006, 0x000c}, + cmdLutElement{0x01, 0x02, -1, 0x03, 0x0006, 0x000e}, + cmdLutElement{0x01, 0x02, -1, 0x03, 0x0006, 0x0012}, + cmdLutElement{0x01, 0x03, -1, 0x03, 0x0006, 0x0016}, + cmdLutElement{0x01, 0x03, -1, 0x03, 0x0006, 0x001e}, + cmdLutElement{0x01, 0x04, -1, 0x03, 0x0006, 0x0026}, + cmdLutElement{0x01, 0x04, -1, 0x03, 0x0006, 0x0036}, + cmdLutElement{0x01, 0x01, -1, 0x03, 0x0008, 0x000a}, + cmdLutElement{0x01, 0x01, -1, 0x03, 0x0008, 0x000c}, + cmdLutElement{0x01, 0x02, -1, 0x03, 0x0008, 0x000e}, + cmdLutElement{0x01, 0x02, -1, 0x03, 0x0008, 0x0012}, + cmdLutElement{0x01, 0x03, -1, 0x03, 0x0008, 0x0016}, + cmdLutElement{0x01, 0x03, -1, 0x03, 0x0008, 0x001e}, + cmdLutElement{0x01, 0x04, -1, 0x03, 0x0008, 0x0026}, + cmdLutElement{0x01, 0x04, -1, 0x03, 0x0008, 0x0036}, + cmdLutElement{0x02, 0x00, -1, 0x00, 0x000a, 0x0002}, + cmdLutElement{0x02, 0x00, -1, 0x01, 0x000a, 0x0003}, + cmdLutElement{0x02, 0x00, -1, 0x02, 0x000a, 0x0004}, + cmdLutElement{0x02, 0x00, -1, 0x03, 0x000a, 0x0005}, + cmdLutElement{0x02, 0x00, -1, 0x03, 0x000a, 0x0006}, + cmdLutElement{0x02, 0x00, -1, 0x03, 0x000a, 0x0007}, + cmdLutElement{0x02, 0x00, -1, 0x03, 0x000a, 0x0008}, + cmdLutElement{0x02, 0x00, -1, 0x03, 0x000a, 0x0009}, + cmdLutElement{0x02, 0x00, -1, 0x00, 0x000e, 0x0002}, + cmdLutElement{0x02, 0x00, -1, 0x01, 0x000e, 0x0003}, + cmdLutElement{0x02, 0x00, -1, 0x02, 0x000e, 0x0004}, + cmdLutElement{0x02, 0x00, -1, 0x03, 0x000e, 0x0005}, + cmdLutElement{0x02, 0x00, -1, 0x03, 0x000e, 0x0006}, + cmdLutElement{0x02, 0x00, -1, 0x03, 0x000e, 0x0007}, + cmdLutElement{0x02, 0x00, -1, 0x03, 0x000e, 0x0008}, + cmdLutElement{0x02, 0x00, -1, 0x03, 0x000e, 0x0009}, + cmdLutElement{0x03, 0x00, -1, 0x00, 0x0012, 0x0002}, + cmdLutElement{0x03, 0x00, -1, 0x01, 0x0012, 0x0003}, + cmdLutElement{0x03, 0x00, -1, 0x02, 0x0012, 0x0004}, + cmdLutElement{0x03, 0x00, -1, 0x03, 0x0012, 0x0005}, + cmdLutElement{0x03, 0x00, -1, 0x03, 0x0012, 0x0006}, + cmdLutElement{0x03, 0x00, -1, 0x03, 0x0012, 0x0007}, + cmdLutElement{0x03, 0x00, -1, 0x03, 0x0012, 0x0008}, + cmdLutElement{0x03, 0x00, -1, 0x03, 0x0012, 0x0009}, + cmdLutElement{0x03, 0x00, -1, 0x00, 0x001a, 0x0002}, + cmdLutElement{0x03, 0x00, -1, 0x01, 0x001a, 0x0003}, + cmdLutElement{0x03, 0x00, -1, 0x02, 0x001a, 0x0004}, + cmdLutElement{0x03, 0x00, -1, 0x03, 0x001a, 0x0005}, + cmdLutElement{0x03, 0x00, -1, 0x03, 0x001a, 0x0006}, + cmdLutElement{0x03, 0x00, -1, 0x03, 0x001a, 0x0007}, + cmdLutElement{0x03, 0x00, -1, 0x03, 0x001a, 0x0008}, + cmdLutElement{0x03, 0x00, -1, 0x03, 0x001a, 0x0009}, + cmdLutElement{0x04, 0x00, -1, 0x00, 0x0022, 0x0002}, + cmdLutElement{0x04, 0x00, -1, 0x01, 0x0022, 0x0003}, + cmdLutElement{0x04, 0x00, -1, 0x02, 0x0022, 0x0004}, + cmdLutElement{0x04, 0x00, -1, 0x03, 0x0022, 0x0005}, + cmdLutElement{0x04, 0x00, -1, 0x03, 0x0022, 0x0006}, + cmdLutElement{0x04, 0x00, -1, 0x03, 0x0022, 0x0007}, + cmdLutElement{0x04, 0x00, -1, 0x03, 0x0022, 0x0008}, + cmdLutElement{0x04, 0x00, -1, 0x03, 0x0022, 0x0009}, + cmdLutElement{0x04, 0x00, -1, 0x00, 0x0032, 0x0002}, + cmdLutElement{0x04, 0x00, -1, 0x01, 0x0032, 0x0003}, + cmdLutElement{0x04, 0x00, -1, 0x02, 0x0032, 0x0004}, + cmdLutElement{0x04, 0x00, -1, 0x03, 0x0032, 0x0005}, + cmdLutElement{0x04, 0x00, -1, 0x03, 0x0032, 0x0006}, + cmdLutElement{0x04, 0x00, -1, 0x03, 0x0032, 0x0007}, + cmdLutElement{0x04, 0x00, -1, 0x03, 0x0032, 0x0008}, + cmdLutElement{0x04, 0x00, -1, 0x03, 0x0032, 0x0009}, + cmdLutElement{0x05, 0x00, -1, 0x00, 0x0042, 0x0002}, + cmdLutElement{0x05, 0x00, -1, 0x01, 0x0042, 0x0003}, + cmdLutElement{0x05, 0x00, -1, 0x02, 0x0042, 0x0004}, + cmdLutElement{0x05, 0x00, -1, 0x03, 0x0042, 0x0005}, + cmdLutElement{0x05, 0x00, -1, 0x03, 0x0042, 0x0006}, + cmdLutElement{0x05, 0x00, -1, 0x03, 0x0042, 0x0007}, + cmdLutElement{0x05, 0x00, -1, 0x03, 0x0042, 0x0008}, + cmdLutElement{0x05, 0x00, -1, 0x03, 0x0042, 0x0009}, + cmdLutElement{0x05, 0x00, -1, 0x00, 0x0062, 0x0002}, + cmdLutElement{0x05, 0x00, -1, 0x01, 0x0062, 0x0003}, + cmdLutElement{0x05, 0x00, -1, 0x02, 0x0062, 0x0004}, + cmdLutElement{0x05, 0x00, -1, 0x03, 0x0062, 0x0005}, + cmdLutElement{0x05, 0x00, -1, 0x03, 0x0062, 0x0006}, + cmdLutElement{0x05, 0x00, -1, 0x03, 0x0062, 0x0007}, + cmdLutElement{0x05, 0x00, -1, 0x03, 0x0062, 0x0008}, + cmdLutElement{0x05, 0x00, -1, 0x03, 0x0062, 0x0009}, + cmdLutElement{0x02, 0x01, -1, 0x03, 0x000a, 0x000a}, + cmdLutElement{0x02, 0x01, -1, 0x03, 0x000a, 0x000c}, + cmdLutElement{0x02, 0x02, -1, 0x03, 0x000a, 0x000e}, + cmdLutElement{0x02, 0x02, -1, 0x03, 0x000a, 0x0012}, + cmdLutElement{0x02, 0x03, -1, 0x03, 0x000a, 0x0016}, + cmdLutElement{0x02, 0x03, -1, 0x03, 0x000a, 0x001e}, + cmdLutElement{0x02, 0x04, -1, 0x03, 0x000a, 0x0026}, + cmdLutElement{0x02, 0x04, -1, 0x03, 0x000a, 0x0036}, + cmdLutElement{0x02, 0x01, -1, 0x03, 0x000e, 0x000a}, + cmdLutElement{0x02, 0x01, -1, 0x03, 0x000e, 0x000c}, + cmdLutElement{0x02, 0x02, -1, 0x03, 0x000e, 0x000e}, + cmdLutElement{0x02, 0x02, -1, 0x03, 0x000e, 0x0012}, + cmdLutElement{0x02, 0x03, -1, 0x03, 0x000e, 0x0016}, + cmdLutElement{0x02, 0x03, -1, 0x03, 0x000e, 0x001e}, + cmdLutElement{0x02, 0x04, -1, 0x03, 0x000e, 0x0026}, + cmdLutElement{0x02, 0x04, -1, 0x03, 0x000e, 0x0036}, + cmdLutElement{0x03, 0x01, -1, 0x03, 0x0012, 0x000a}, + cmdLutElement{0x03, 0x01, -1, 0x03, 0x0012, 0x000c}, + cmdLutElement{0x03, 0x02, -1, 0x03, 0x0012, 0x000e}, + cmdLutElement{0x03, 0x02, -1, 0x03, 0x0012, 0x0012}, + cmdLutElement{0x03, 0x03, -1, 0x03, 0x0012, 0x0016}, + cmdLutElement{0x03, 0x03, -1, 0x03, 0x0012, 0x001e}, + cmdLutElement{0x03, 0x04, -1, 0x03, 0x0012, 0x0026}, + cmdLutElement{0x03, 0x04, -1, 0x03, 0x0012, 0x0036}, + cmdLutElement{0x03, 0x01, -1, 0x03, 0x001a, 0x000a}, + cmdLutElement{0x03, 0x01, -1, 0x03, 0x001a, 0x000c}, + cmdLutElement{0x03, 0x02, -1, 0x03, 0x001a, 0x000e}, + cmdLutElement{0x03, 0x02, -1, 0x03, 0x001a, 0x0012}, + cmdLutElement{0x03, 0x03, -1, 0x03, 0x001a, 0x0016}, + cmdLutElement{0x03, 0x03, -1, 0x03, 0x001a, 0x001e}, + cmdLutElement{0x03, 0x04, -1, 0x03, 0x001a, 0x0026}, + cmdLutElement{0x03, 0x04, -1, 0x03, 0x001a, 0x0036}, + cmdLutElement{0x04, 0x01, -1, 0x03, 0x0022, 0x000a}, + cmdLutElement{0x04, 0x01, -1, 0x03, 0x0022, 0x000c}, + cmdLutElement{0x04, 0x02, -1, 0x03, 0x0022, 0x000e}, + cmdLutElement{0x04, 0x02, -1, 0x03, 0x0022, 0x0012}, + cmdLutElement{0x04, 0x03, -1, 0x03, 0x0022, 0x0016}, + cmdLutElement{0x04, 0x03, -1, 0x03, 0x0022, 0x001e}, + cmdLutElement{0x04, 0x04, -1, 0x03, 0x0022, 0x0026}, + cmdLutElement{0x04, 0x04, -1, 0x03, 0x0022, 0x0036}, + cmdLutElement{0x04, 0x01, -1, 0x03, 0x0032, 0x000a}, + cmdLutElement{0x04, 0x01, -1, 0x03, 0x0032, 0x000c}, + cmdLutElement{0x04, 0x02, -1, 0x03, 0x0032, 0x000e}, + cmdLutElement{0x04, 0x02, -1, 0x03, 0x0032, 0x0012}, + cmdLutElement{0x04, 0x03, -1, 0x03, 0x0032, 0x0016}, + cmdLutElement{0x04, 0x03, -1, 0x03, 0x0032, 0x001e}, + cmdLutElement{0x04, 0x04, -1, 0x03, 0x0032, 0x0026}, + cmdLutElement{0x04, 0x04, -1, 0x03, 0x0032, 0x0036}, + cmdLutElement{0x05, 0x01, -1, 0x03, 0x0042, 0x000a}, + cmdLutElement{0x05, 0x01, -1, 0x03, 0x0042, 0x000c}, + cmdLutElement{0x05, 0x02, -1, 0x03, 0x0042, 0x000e}, + cmdLutElement{0x05, 0x02, -1, 0x03, 0x0042, 0x0012}, + cmdLutElement{0x05, 0x03, -1, 0x03, 0x0042, 0x0016}, + cmdLutElement{0x05, 0x03, -1, 0x03, 0x0042, 0x001e}, + cmdLutElement{0x05, 0x04, -1, 0x03, 0x0042, 0x0026}, + cmdLutElement{0x05, 0x04, -1, 0x03, 0x0042, 0x0036}, + cmdLutElement{0x05, 0x01, -1, 0x03, 0x0062, 0x000a}, + cmdLutElement{0x05, 0x01, -1, 0x03, 0x0062, 0x000c}, + cmdLutElement{0x05, 0x02, -1, 0x03, 0x0062, 0x000e}, + cmdLutElement{0x05, 0x02, -1, 0x03, 0x0062, 0x0012}, + cmdLutElement{0x05, 0x03, -1, 0x03, 0x0062, 0x0016}, + cmdLutElement{0x05, 0x03, -1, 0x03, 0x0062, 0x001e}, + cmdLutElement{0x05, 0x04, -1, 0x03, 0x0062, 0x0026}, + cmdLutElement{0x05, 0x04, -1, 0x03, 0x0062, 0x0036}, + cmdLutElement{0x00, 0x05, -1, 0x03, 0x0000, 0x0046}, + cmdLutElement{0x00, 0x05, -1, 0x03, 0x0000, 0x0066}, + cmdLutElement{0x00, 0x06, -1, 0x03, 0x0000, 0x0086}, + cmdLutElement{0x00, 0x07, -1, 0x03, 0x0000, 0x00c6}, + cmdLutElement{0x00, 0x08, -1, 0x03, 0x0000, 0x0146}, + cmdLutElement{0x00, 0x09, -1, 0x03, 0x0000, 0x0246}, + cmdLutElement{0x00, 0x0a, -1, 0x03, 0x0000, 0x0446}, + cmdLutElement{0x00, 0x18, -1, 0x03, 0x0000, 0x0846}, + cmdLutElement{0x00, 0x05, -1, 0x03, 0x0001, 0x0046}, + cmdLutElement{0x00, 0x05, -1, 0x03, 0x0001, 0x0066}, + cmdLutElement{0x00, 0x06, -1, 0x03, 0x0001, 0x0086}, + cmdLutElement{0x00, 0x07, -1, 0x03, 0x0001, 0x00c6}, + cmdLutElement{0x00, 0x08, -1, 0x03, 0x0001, 0x0146}, + cmdLutElement{0x00, 0x09, -1, 0x03, 0x0001, 0x0246}, + cmdLutElement{0x00, 0x0a, -1, 0x03, 0x0001, 0x0446}, + cmdLutElement{0x00, 0x18, -1, 0x03, 0x0001, 0x0846}, + cmdLutElement{0x00, 0x05, -1, 0x03, 0x0002, 0x0046}, + cmdLutElement{0x00, 0x05, -1, 0x03, 0x0002, 0x0066}, + cmdLutElement{0x00, 0x06, -1, 0x03, 0x0002, 0x0086}, + cmdLutElement{0x00, 0x07, -1, 0x03, 0x0002, 0x00c6}, + cmdLutElement{0x00, 0x08, -1, 0x03, 0x0002, 0x0146}, + cmdLutElement{0x00, 0x09, -1, 0x03, 0x0002, 0x0246}, + cmdLutElement{0x00, 0x0a, -1, 0x03, 0x0002, 0x0446}, + cmdLutElement{0x00, 0x18, -1, 0x03, 0x0002, 0x0846}, + cmdLutElement{0x00, 0x05, -1, 0x03, 0x0003, 0x0046}, + cmdLutElement{0x00, 0x05, -1, 0x03, 0x0003, 0x0066}, + cmdLutElement{0x00, 0x06, -1, 0x03, 0x0003, 0x0086}, + cmdLutElement{0x00, 0x07, -1, 0x03, 0x0003, 0x00c6}, + cmdLutElement{0x00, 0x08, -1, 0x03, 0x0003, 0x0146}, + cmdLutElement{0x00, 0x09, -1, 0x03, 0x0003, 0x0246}, + cmdLutElement{0x00, 0x0a, -1, 0x03, 0x0003, 0x0446}, + cmdLutElement{0x00, 0x18, -1, 0x03, 0x0003, 0x0846}, + cmdLutElement{0x00, 0x05, -1, 0x03, 0x0004, 0x0046}, + cmdLutElement{0x00, 0x05, -1, 0x03, 0x0004, 0x0066}, + cmdLutElement{0x00, 0x06, -1, 0x03, 0x0004, 0x0086}, + cmdLutElement{0x00, 0x07, -1, 0x03, 0x0004, 0x00c6}, + cmdLutElement{0x00, 0x08, -1, 0x03, 0x0004, 0x0146}, + cmdLutElement{0x00, 0x09, -1, 0x03, 0x0004, 0x0246}, + cmdLutElement{0x00, 0x0a, -1, 0x03, 0x0004, 0x0446}, + cmdLutElement{0x00, 0x18, -1, 0x03, 0x0004, 0x0846}, + cmdLutElement{0x00, 0x05, -1, 0x03, 0x0005, 0x0046}, + cmdLutElement{0x00, 0x05, -1, 0x03, 0x0005, 0x0066}, + cmdLutElement{0x00, 0x06, -1, 0x03, 0x0005, 0x0086}, + cmdLutElement{0x00, 0x07, -1, 0x03, 0x0005, 0x00c6}, + cmdLutElement{0x00, 0x08, -1, 0x03, 0x0005, 0x0146}, + cmdLutElement{0x00, 0x09, -1, 0x03, 0x0005, 0x0246}, + cmdLutElement{0x00, 0x0a, -1, 0x03, 0x0005, 0x0446}, + cmdLutElement{0x00, 0x18, -1, 0x03, 0x0005, 0x0846}, + cmdLutElement{0x01, 0x05, -1, 0x03, 0x0006, 0x0046}, + cmdLutElement{0x01, 0x05, -1, 0x03, 0x0006, 0x0066}, + cmdLutElement{0x01, 0x06, -1, 0x03, 0x0006, 0x0086}, + cmdLutElement{0x01, 0x07, -1, 0x03, 0x0006, 0x00c6}, + cmdLutElement{0x01, 0x08, -1, 0x03, 0x0006, 0x0146}, + cmdLutElement{0x01, 0x09, -1, 0x03, 0x0006, 0x0246}, + cmdLutElement{0x01, 0x0a, -1, 0x03, 0x0006, 0x0446}, + cmdLutElement{0x01, 0x18, -1, 0x03, 0x0006, 0x0846}, + cmdLutElement{0x01, 0x05, -1, 0x03, 0x0008, 0x0046}, + cmdLutElement{0x01, 0x05, -1, 0x03, 0x0008, 0x0066}, + cmdLutElement{0x01, 0x06, -1, 0x03, 0x0008, 0x0086}, + cmdLutElement{0x01, 0x07, -1, 0x03, 0x0008, 0x00c6}, + cmdLutElement{0x01, 0x08, -1, 0x03, 0x0008, 0x0146}, + cmdLutElement{0x01, 0x09, -1, 0x03, 0x0008, 0x0246}, + cmdLutElement{0x01, 0x0a, -1, 0x03, 0x0008, 0x0446}, + cmdLutElement{0x01, 0x18, -1, 0x03, 0x0008, 0x0846}, + cmdLutElement{0x06, 0x00, -1, 0x00, 0x0082, 0x0002}, + cmdLutElement{0x06, 0x00, -1, 0x01, 0x0082, 0x0003}, + cmdLutElement{0x06, 0x00, -1, 0x02, 0x0082, 0x0004}, + cmdLutElement{0x06, 0x00, -1, 0x03, 0x0082, 0x0005}, + cmdLutElement{0x06, 0x00, -1, 0x03, 0x0082, 0x0006}, + cmdLutElement{0x06, 0x00, -1, 0x03, 0x0082, 0x0007}, + cmdLutElement{0x06, 0x00, -1, 0x03, 0x0082, 0x0008}, + cmdLutElement{0x06, 0x00, -1, 0x03, 0x0082, 0x0009}, + cmdLutElement{0x07, 0x00, -1, 0x00, 0x00c2, 0x0002}, + cmdLutElement{0x07, 0x00, -1, 0x01, 0x00c2, 0x0003}, + cmdLutElement{0x07, 0x00, -1, 0x02, 0x00c2, 0x0004}, + cmdLutElement{0x07, 0x00, -1, 0x03, 0x00c2, 0x0005}, + cmdLutElement{0x07, 0x00, -1, 0x03, 0x00c2, 0x0006}, + cmdLutElement{0x07, 0x00, -1, 0x03, 0x00c2, 0x0007}, + cmdLutElement{0x07, 0x00, -1, 0x03, 0x00c2, 0x0008}, + cmdLutElement{0x07, 0x00, -1, 0x03, 0x00c2, 0x0009}, + cmdLutElement{0x08, 0x00, -1, 0x00, 0x0142, 0x0002}, + cmdLutElement{0x08, 0x00, -1, 0x01, 0x0142, 0x0003}, + cmdLutElement{0x08, 0x00, -1, 0x02, 0x0142, 0x0004}, + cmdLutElement{0x08, 0x00, -1, 0x03, 0x0142, 0x0005}, + cmdLutElement{0x08, 0x00, -1, 0x03, 0x0142, 0x0006}, + cmdLutElement{0x08, 0x00, -1, 0x03, 0x0142, 0x0007}, + cmdLutElement{0x08, 0x00, -1, 0x03, 0x0142, 0x0008}, + cmdLutElement{0x08, 0x00, -1, 0x03, 0x0142, 0x0009}, + cmdLutElement{0x09, 0x00, -1, 0x00, 0x0242, 0x0002}, + cmdLutElement{0x09, 0x00, -1, 0x01, 0x0242, 0x0003}, + cmdLutElement{0x09, 0x00, -1, 0x02, 0x0242, 0x0004}, + cmdLutElement{0x09, 0x00, -1, 0x03, 0x0242, 0x0005}, + cmdLutElement{0x09, 0x00, -1, 0x03, 0x0242, 0x0006}, + cmdLutElement{0x09, 0x00, -1, 0x03, 0x0242, 0x0007}, + cmdLutElement{0x09, 0x00, -1, 0x03, 0x0242, 0x0008}, + cmdLutElement{0x09, 0x00, -1, 0x03, 0x0242, 0x0009}, + cmdLutElement{0x0a, 0x00, -1, 0x00, 0x0442, 0x0002}, + cmdLutElement{0x0a, 0x00, -1, 0x01, 0x0442, 0x0003}, + cmdLutElement{0x0a, 0x00, -1, 0x02, 0x0442, 0x0004}, + cmdLutElement{0x0a, 0x00, -1, 0x03, 0x0442, 0x0005}, + cmdLutElement{0x0a, 0x00, -1, 0x03, 0x0442, 0x0006}, + cmdLutElement{0x0a, 0x00, -1, 0x03, 0x0442, 0x0007}, + cmdLutElement{0x0a, 0x00, -1, 0x03, 0x0442, 0x0008}, + cmdLutElement{0x0a, 0x00, -1, 0x03, 0x0442, 0x0009}, + cmdLutElement{0x0c, 0x00, -1, 0x00, 0x0842, 0x0002}, + cmdLutElement{0x0c, 0x00, -1, 0x01, 0x0842, 0x0003}, + cmdLutElement{0x0c, 0x00, -1, 0x02, 0x0842, 0x0004}, + cmdLutElement{0x0c, 0x00, -1, 0x03, 0x0842, 0x0005}, + cmdLutElement{0x0c, 0x00, -1, 0x03, 0x0842, 0x0006}, + cmdLutElement{0x0c, 0x00, -1, 0x03, 0x0842, 0x0007}, + cmdLutElement{0x0c, 0x00, -1, 0x03, 0x0842, 0x0008}, + cmdLutElement{0x0c, 0x00, -1, 0x03, 0x0842, 0x0009}, + cmdLutElement{0x0e, 0x00, -1, 0x00, 0x1842, 0x0002}, + cmdLutElement{0x0e, 0x00, -1, 0x01, 0x1842, 0x0003}, + cmdLutElement{0x0e, 0x00, -1, 0x02, 0x1842, 0x0004}, + cmdLutElement{0x0e, 0x00, -1, 0x03, 0x1842, 0x0005}, + cmdLutElement{0x0e, 0x00, -1, 0x03, 0x1842, 0x0006}, + cmdLutElement{0x0e, 0x00, -1, 0x03, 0x1842, 0x0007}, + cmdLutElement{0x0e, 0x00, -1, 0x03, 0x1842, 0x0008}, + cmdLutElement{0x0e, 0x00, -1, 0x03, 0x1842, 0x0009}, + cmdLutElement{0x18, 0x00, -1, 0x00, 0x5842, 0x0002}, + cmdLutElement{0x18, 0x00, -1, 0x01, 0x5842, 0x0003}, + cmdLutElement{0x18, 0x00, -1, 0x02, 0x5842, 0x0004}, + cmdLutElement{0x18, 0x00, -1, 0x03, 0x5842, 0x0005}, + cmdLutElement{0x18, 0x00, -1, 0x03, 0x5842, 0x0006}, + cmdLutElement{0x18, 0x00, -1, 0x03, 0x5842, 0x0007}, + cmdLutElement{0x18, 0x00, -1, 0x03, 0x5842, 0x0008}, + cmdLutElement{0x18, 0x00, -1, 0x03, 0x5842, 0x0009}, + cmdLutElement{0x02, 0x05, -1, 0x03, 0x000a, 0x0046}, + cmdLutElement{0x02, 0x05, -1, 0x03, 0x000a, 0x0066}, + cmdLutElement{0x02, 0x06, -1, 0x03, 0x000a, 0x0086}, + cmdLutElement{0x02, 0x07, -1, 0x03, 0x000a, 0x00c6}, + cmdLutElement{0x02, 0x08, -1, 0x03, 0x000a, 0x0146}, + cmdLutElement{0x02, 0x09, -1, 0x03, 0x000a, 0x0246}, + cmdLutElement{0x02, 0x0a, -1, 0x03, 0x000a, 0x0446}, + cmdLutElement{0x02, 0x18, -1, 0x03, 0x000a, 0x0846}, + cmdLutElement{0x02, 0x05, -1, 0x03, 0x000e, 0x0046}, + cmdLutElement{0x02, 0x05, -1, 0x03, 0x000e, 0x0066}, + cmdLutElement{0x02, 0x06, -1, 0x03, 0x000e, 0x0086}, + cmdLutElement{0x02, 0x07, -1, 0x03, 0x000e, 0x00c6}, + cmdLutElement{0x02, 0x08, -1, 0x03, 0x000e, 0x0146}, + cmdLutElement{0x02, 0x09, -1, 0x03, 0x000e, 0x0246}, + cmdLutElement{0x02, 0x0a, -1, 0x03, 0x000e, 0x0446}, + cmdLutElement{0x02, 0x18, -1, 0x03, 0x000e, 0x0846}, + cmdLutElement{0x03, 0x05, -1, 0x03, 0x0012, 0x0046}, + cmdLutElement{0x03, 0x05, -1, 0x03, 0x0012, 0x0066}, + cmdLutElement{0x03, 0x06, -1, 0x03, 0x0012, 0x0086}, + cmdLutElement{0x03, 0x07, -1, 0x03, 0x0012, 0x00c6}, + cmdLutElement{0x03, 0x08, -1, 0x03, 0x0012, 0x0146}, + cmdLutElement{0x03, 0x09, -1, 0x03, 0x0012, 0x0246}, + cmdLutElement{0x03, 0x0a, -1, 0x03, 0x0012, 0x0446}, + cmdLutElement{0x03, 0x18, -1, 0x03, 0x0012, 0x0846}, + cmdLutElement{0x03, 0x05, -1, 0x03, 0x001a, 0x0046}, + cmdLutElement{0x03, 0x05, -1, 0x03, 0x001a, 0x0066}, + cmdLutElement{0x03, 0x06, -1, 0x03, 0x001a, 0x0086}, + cmdLutElement{0x03, 0x07, -1, 0x03, 0x001a, 0x00c6}, + cmdLutElement{0x03, 0x08, -1, 0x03, 0x001a, 0x0146}, + cmdLutElement{0x03, 0x09, -1, 0x03, 0x001a, 0x0246}, + cmdLutElement{0x03, 0x0a, -1, 0x03, 0x001a, 0x0446}, + cmdLutElement{0x03, 0x18, -1, 0x03, 0x001a, 0x0846}, + cmdLutElement{0x04, 0x05, -1, 0x03, 0x0022, 0x0046}, + cmdLutElement{0x04, 0x05, -1, 0x03, 0x0022, 0x0066}, + cmdLutElement{0x04, 0x06, -1, 0x03, 0x0022, 0x0086}, + cmdLutElement{0x04, 0x07, -1, 0x03, 0x0022, 0x00c6}, + cmdLutElement{0x04, 0x08, -1, 0x03, 0x0022, 0x0146}, + cmdLutElement{0x04, 0x09, -1, 0x03, 0x0022, 0x0246}, + cmdLutElement{0x04, 0x0a, -1, 0x03, 0x0022, 0x0446}, + cmdLutElement{0x04, 0x18, -1, 0x03, 0x0022, 0x0846}, + cmdLutElement{0x04, 0x05, -1, 0x03, 0x0032, 0x0046}, + cmdLutElement{0x04, 0x05, -1, 0x03, 0x0032, 0x0066}, + cmdLutElement{0x04, 0x06, -1, 0x03, 0x0032, 0x0086}, + cmdLutElement{0x04, 0x07, -1, 0x03, 0x0032, 0x00c6}, + cmdLutElement{0x04, 0x08, -1, 0x03, 0x0032, 0x0146}, + cmdLutElement{0x04, 0x09, -1, 0x03, 0x0032, 0x0246}, + cmdLutElement{0x04, 0x0a, -1, 0x03, 0x0032, 0x0446}, + cmdLutElement{0x04, 0x18, -1, 0x03, 0x0032, 0x0846}, + cmdLutElement{0x05, 0x05, -1, 0x03, 0x0042, 0x0046}, + cmdLutElement{0x05, 0x05, -1, 0x03, 0x0042, 0x0066}, + cmdLutElement{0x05, 0x06, -1, 0x03, 0x0042, 0x0086}, + cmdLutElement{0x05, 0x07, -1, 0x03, 0x0042, 0x00c6}, + cmdLutElement{0x05, 0x08, -1, 0x03, 0x0042, 0x0146}, + cmdLutElement{0x05, 0x09, -1, 0x03, 0x0042, 0x0246}, + cmdLutElement{0x05, 0x0a, -1, 0x03, 0x0042, 0x0446}, + cmdLutElement{0x05, 0x18, -1, 0x03, 0x0042, 0x0846}, + cmdLutElement{0x05, 0x05, -1, 0x03, 0x0062, 0x0046}, + cmdLutElement{0x05, 0x05, -1, 0x03, 0x0062, 0x0066}, + cmdLutElement{0x05, 0x06, -1, 0x03, 0x0062, 0x0086}, + cmdLutElement{0x05, 0x07, -1, 0x03, 0x0062, 0x00c6}, + cmdLutElement{0x05, 0x08, -1, 0x03, 0x0062, 0x0146}, + cmdLutElement{0x05, 0x09, -1, 0x03, 0x0062, 0x0246}, + cmdLutElement{0x05, 0x0a, -1, 0x03, 0x0062, 0x0446}, + cmdLutElement{0x05, 0x18, -1, 0x03, 0x0062, 0x0846}, + cmdLutElement{0x06, 0x01, -1, 0x03, 0x0082, 0x000a}, + cmdLutElement{0x06, 0x01, -1, 0x03, 0x0082, 0x000c}, + cmdLutElement{0x06, 0x02, -1, 0x03, 0x0082, 0x000e}, + cmdLutElement{0x06, 0x02, -1, 0x03, 0x0082, 0x0012}, + cmdLutElement{0x06, 0x03, -1, 0x03, 0x0082, 0x0016}, + cmdLutElement{0x06, 0x03, -1, 0x03, 0x0082, 0x001e}, + cmdLutElement{0x06, 0x04, -1, 0x03, 0x0082, 0x0026}, + cmdLutElement{0x06, 0x04, -1, 0x03, 0x0082, 0x0036}, + cmdLutElement{0x07, 0x01, -1, 0x03, 0x00c2, 0x000a}, + cmdLutElement{0x07, 0x01, -1, 0x03, 0x00c2, 0x000c}, + cmdLutElement{0x07, 0x02, -1, 0x03, 0x00c2, 0x000e}, + cmdLutElement{0x07, 0x02, -1, 0x03, 0x00c2, 0x0012}, + cmdLutElement{0x07, 0x03, -1, 0x03, 0x00c2, 0x0016}, + cmdLutElement{0x07, 0x03, -1, 0x03, 0x00c2, 0x001e}, + cmdLutElement{0x07, 0x04, -1, 0x03, 0x00c2, 0x0026}, + cmdLutElement{0x07, 0x04, -1, 0x03, 0x00c2, 0x0036}, + cmdLutElement{0x08, 0x01, -1, 0x03, 0x0142, 0x000a}, + cmdLutElement{0x08, 0x01, -1, 0x03, 0x0142, 0x000c}, + cmdLutElement{0x08, 0x02, -1, 0x03, 0x0142, 0x000e}, + cmdLutElement{0x08, 0x02, -1, 0x03, 0x0142, 0x0012}, + cmdLutElement{0x08, 0x03, -1, 0x03, 0x0142, 0x0016}, + cmdLutElement{0x08, 0x03, -1, 0x03, 0x0142, 0x001e}, + cmdLutElement{0x08, 0x04, -1, 0x03, 0x0142, 0x0026}, + cmdLutElement{0x08, 0x04, -1, 0x03, 0x0142, 0x0036}, + cmdLutElement{0x09, 0x01, -1, 0x03, 0x0242, 0x000a}, + cmdLutElement{0x09, 0x01, -1, 0x03, 0x0242, 0x000c}, + cmdLutElement{0x09, 0x02, -1, 0x03, 0x0242, 0x000e}, + cmdLutElement{0x09, 0x02, -1, 0x03, 0x0242, 0x0012}, + cmdLutElement{0x09, 0x03, -1, 0x03, 0x0242, 0x0016}, + cmdLutElement{0x09, 0x03, -1, 0x03, 0x0242, 0x001e}, + cmdLutElement{0x09, 0x04, -1, 0x03, 0x0242, 0x0026}, + cmdLutElement{0x09, 0x04, -1, 0x03, 0x0242, 0x0036}, + cmdLutElement{0x0a, 0x01, -1, 0x03, 0x0442, 0x000a}, + cmdLutElement{0x0a, 0x01, -1, 0x03, 0x0442, 0x000c}, + cmdLutElement{0x0a, 0x02, -1, 0x03, 0x0442, 0x000e}, + cmdLutElement{0x0a, 0x02, -1, 0x03, 0x0442, 0x0012}, + cmdLutElement{0x0a, 0x03, -1, 0x03, 0x0442, 0x0016}, + cmdLutElement{0x0a, 0x03, -1, 0x03, 0x0442, 0x001e}, + cmdLutElement{0x0a, 0x04, -1, 0x03, 0x0442, 0x0026}, + cmdLutElement{0x0a, 0x04, -1, 0x03, 0x0442, 0x0036}, + cmdLutElement{0x0c, 0x01, -1, 0x03, 0x0842, 0x000a}, + cmdLutElement{0x0c, 0x01, -1, 0x03, 0x0842, 0x000c}, + cmdLutElement{0x0c, 0x02, -1, 0x03, 0x0842, 0x000e}, + cmdLutElement{0x0c, 0x02, -1, 0x03, 0x0842, 0x0012}, + cmdLutElement{0x0c, 0x03, -1, 0x03, 0x0842, 0x0016}, + cmdLutElement{0x0c, 0x03, -1, 0x03, 0x0842, 0x001e}, + cmdLutElement{0x0c, 0x04, -1, 0x03, 0x0842, 0x0026}, + cmdLutElement{0x0c, 0x04, -1, 0x03, 0x0842, 0x0036}, + cmdLutElement{0x0e, 0x01, -1, 0x03, 0x1842, 0x000a}, + cmdLutElement{0x0e, 0x01, -1, 0x03, 0x1842, 0x000c}, + cmdLutElement{0x0e, 0x02, -1, 0x03, 0x1842, 0x000e}, + cmdLutElement{0x0e, 0x02, -1, 0x03, 0x1842, 0x0012}, + cmdLutElement{0x0e, 0x03, -1, 0x03, 0x1842, 0x0016}, + cmdLutElement{0x0e, 0x03, -1, 0x03, 0x1842, 0x001e}, + cmdLutElement{0x0e, 0x04, -1, 0x03, 0x1842, 0x0026}, + cmdLutElement{0x0e, 0x04, -1, 0x03, 0x1842, 0x0036}, + cmdLutElement{0x18, 0x01, -1, 0x03, 0x5842, 0x000a}, + cmdLutElement{0x18, 0x01, -1, 0x03, 0x5842, 0x000c}, + cmdLutElement{0x18, 0x02, -1, 0x03, 0x5842, 0x000e}, + cmdLutElement{0x18, 0x02, -1, 0x03, 0x5842, 0x0012}, + cmdLutElement{0x18, 0x03, -1, 0x03, 0x5842, 0x0016}, + cmdLutElement{0x18, 0x03, -1, 0x03, 0x5842, 0x001e}, + cmdLutElement{0x18, 0x04, -1, 0x03, 0x5842, 0x0026}, + cmdLutElement{0x18, 0x04, -1, 0x03, 0x5842, 0x0036}, + cmdLutElement{0x06, 0x05, -1, 0x03, 0x0082, 0x0046}, + cmdLutElement{0x06, 0x05, -1, 0x03, 0x0082, 0x0066}, + cmdLutElement{0x06, 0x06, -1, 0x03, 0x0082, 0x0086}, + cmdLutElement{0x06, 0x07, -1, 0x03, 0x0082, 0x00c6}, + cmdLutElement{0x06, 0x08, -1, 0x03, 0x0082, 0x0146}, + cmdLutElement{0x06, 0x09, -1, 0x03, 0x0082, 0x0246}, + cmdLutElement{0x06, 0x0a, -1, 0x03, 0x0082, 0x0446}, + cmdLutElement{0x06, 0x18, -1, 0x03, 0x0082, 0x0846}, + cmdLutElement{0x07, 0x05, -1, 0x03, 0x00c2, 0x0046}, + cmdLutElement{0x07, 0x05, -1, 0x03, 0x00c2, 0x0066}, + cmdLutElement{0x07, 0x06, -1, 0x03, 0x00c2, 0x0086}, + cmdLutElement{0x07, 0x07, -1, 0x03, 0x00c2, 0x00c6}, + cmdLutElement{0x07, 0x08, -1, 0x03, 0x00c2, 0x0146}, + cmdLutElement{0x07, 0x09, -1, 0x03, 0x00c2, 0x0246}, + cmdLutElement{0x07, 0x0a, -1, 0x03, 0x00c2, 0x0446}, + cmdLutElement{0x07, 0x18, -1, 0x03, 0x00c2, 0x0846}, + cmdLutElement{0x08, 0x05, -1, 0x03, 0x0142, 0x0046}, + cmdLutElement{0x08, 0x05, -1, 0x03, 0x0142, 0x0066}, + cmdLutElement{0x08, 0x06, -1, 0x03, 0x0142, 0x0086}, + cmdLutElement{0x08, 0x07, -1, 0x03, 0x0142, 0x00c6}, + cmdLutElement{0x08, 0x08, -1, 0x03, 0x0142, 0x0146}, + cmdLutElement{0x08, 0x09, -1, 0x03, 0x0142, 0x0246}, + cmdLutElement{0x08, 0x0a, -1, 0x03, 0x0142, 0x0446}, + cmdLutElement{0x08, 0x18, -1, 0x03, 0x0142, 0x0846}, + cmdLutElement{0x09, 0x05, -1, 0x03, 0x0242, 0x0046}, + cmdLutElement{0x09, 0x05, -1, 0x03, 0x0242, 0x0066}, + cmdLutElement{0x09, 0x06, -1, 0x03, 0x0242, 0x0086}, + cmdLutElement{0x09, 0x07, -1, 0x03, 0x0242, 0x00c6}, + cmdLutElement{0x09, 0x08, -1, 0x03, 0x0242, 0x0146}, + cmdLutElement{0x09, 0x09, -1, 0x03, 0x0242, 0x0246}, + cmdLutElement{0x09, 0x0a, -1, 0x03, 0x0242, 0x0446}, + cmdLutElement{0x09, 0x18, -1, 0x03, 0x0242, 0x0846}, + cmdLutElement{0x0a, 0x05, -1, 0x03, 0x0442, 0x0046}, + cmdLutElement{0x0a, 0x05, -1, 0x03, 0x0442, 0x0066}, + cmdLutElement{0x0a, 0x06, -1, 0x03, 0x0442, 0x0086}, + cmdLutElement{0x0a, 0x07, -1, 0x03, 0x0442, 0x00c6}, + cmdLutElement{0x0a, 0x08, -1, 0x03, 0x0442, 0x0146}, + cmdLutElement{0x0a, 0x09, -1, 0x03, 0x0442, 0x0246}, + cmdLutElement{0x0a, 0x0a, -1, 0x03, 0x0442, 0x0446}, + cmdLutElement{0x0a, 0x18, -1, 0x03, 0x0442, 0x0846}, + cmdLutElement{0x0c, 0x05, -1, 0x03, 0x0842, 0x0046}, + cmdLutElement{0x0c, 0x05, -1, 0x03, 0x0842, 0x0066}, + cmdLutElement{0x0c, 0x06, -1, 0x03, 0x0842, 0x0086}, + cmdLutElement{0x0c, 0x07, -1, 0x03, 0x0842, 0x00c6}, + cmdLutElement{0x0c, 0x08, -1, 0x03, 0x0842, 0x0146}, + cmdLutElement{0x0c, 0x09, -1, 0x03, 0x0842, 0x0246}, + cmdLutElement{0x0c, 0x0a, -1, 0x03, 0x0842, 0x0446}, + cmdLutElement{0x0c, 0x18, -1, 0x03, 0x0842, 0x0846}, + cmdLutElement{0x0e, 0x05, -1, 0x03, 0x1842, 0x0046}, + cmdLutElement{0x0e, 0x05, -1, 0x03, 0x1842, 0x0066}, + cmdLutElement{0x0e, 0x06, -1, 0x03, 0x1842, 0x0086}, + cmdLutElement{0x0e, 0x07, -1, 0x03, 0x1842, 0x00c6}, + cmdLutElement{0x0e, 0x08, -1, 0x03, 0x1842, 0x0146}, + cmdLutElement{0x0e, 0x09, -1, 0x03, 0x1842, 0x0246}, + cmdLutElement{0x0e, 0x0a, -1, 0x03, 0x1842, 0x0446}, + cmdLutElement{0x0e, 0x18, -1, 0x03, 0x1842, 0x0846}, + cmdLutElement{0x18, 0x05, -1, 0x03, 0x5842, 0x0046}, + cmdLutElement{0x18, 0x05, -1, 0x03, 0x5842, 0x0066}, + cmdLutElement{0x18, 0x06, -1, 0x03, 0x5842, 0x0086}, + cmdLutElement{0x18, 0x07, -1, 0x03, 0x5842, 0x00c6}, + cmdLutElement{0x18, 0x08, -1, 0x03, 0x5842, 0x0146}, + cmdLutElement{0x18, 0x09, -1, 0x03, 0x5842, 0x0246}, + cmdLutElement{0x18, 0x0a, -1, 0x03, 0x5842, 0x0446}, + cmdLutElement{0x18, 0x18, -1, 0x03, 0x5842, 0x0846}, +} diff --git a/vendor/github.com/andybalholm/brotli/quality.go b/vendor/github.com/andybalholm/brotli/quality.go new file mode 100644 index 0000000..49709a3 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/quality.go @@ -0,0 +1,196 @@ +package brotli + +const fastOnePassCompressionQuality = 0 + +const fastTwoPassCompressionQuality = 1 + +const zopflificationQuality = 10 + +const hqZopflificationQuality = 11 + +const maxQualityForStaticEntropyCodes = 2 + +const minQualityForBlockSplit = 4 + +const minQualityForNonzeroDistanceParams = 4 + +const minQualityForOptimizeHistograms = 4 + +const minQualityForExtensiveReferenceSearch = 5 + +const minQualityForContextModeling = 5 + +const minQualityForHqContextModeling = 7 + +const minQualityForHqBlockSplitting = 10 + +/* For quality below MIN_QUALITY_FOR_BLOCK_SPLIT there is no block splitting, + so we buffer at most this much literals and commands. */ +const maxNumDelayedSymbols = 0x2FFF + +/* Returns hash-table size for quality levels 0 and 1. */ +func maxHashTableSize(quality int) uint { + if quality == fastOnePassCompressionQuality { + return 1 << 15 + } else { + return 1 << 17 + } +} + +/* The maximum length for which the zopflification uses distinct distances. */ +const maxZopfliLenQuality10 = 150 + +const maxZopfliLenQuality11 = 325 + +/* Do not thoroughly search when a long copy is found. */ +const longCopyQuickStep = 16384 + +func maxZopfliLen(params *encoderParams) uint { + if params.quality <= 10 { + return maxZopfliLenQuality10 + } else { + return maxZopfliLenQuality11 + } +} + +/* Number of best candidates to evaluate to expand Zopfli chain. */ +func maxZopfliCandidates(params *encoderParams) uint { + if params.quality <= 10 { + return 1 + } else { + return 5 + } +} + +func sanitizeParams(params *encoderParams) { + params.quality = brotli_min_int(maxQuality, brotli_max_int(minQuality, params.quality)) + if params.quality <= maxQualityForStaticEntropyCodes { + params.large_window = false + } + + if params.lgwin < minWindowBits { + params.lgwin = minWindowBits + } else { + var max_lgwin int + if params.large_window { + max_lgwin = largeMaxWindowBits + } else { + max_lgwin = maxWindowBits + } + if params.lgwin > uint(max_lgwin) { + params.lgwin = uint(max_lgwin) + } + } +} + +/* Returns optimized lg_block value. */ +func computeLgBlock(params *encoderParams) int { + var lgblock int = params.lgblock + if params.quality == fastOnePassCompressionQuality || params.quality == fastTwoPassCompressionQuality { + lgblock = int(params.lgwin) + } else if params.quality < minQualityForBlockSplit { + lgblock = 14 + } else if lgblock == 0 { + lgblock = 16 + if params.quality >= 9 && params.lgwin > uint(lgblock) { + lgblock = brotli_min_int(18, int(params.lgwin)) + } + } else { + lgblock = brotli_min_int(maxInputBlockBits, brotli_max_int(minInputBlockBits, lgblock)) + } + + return lgblock +} + +/* Returns log2 of the size of main ring buffer area. + Allocate at least lgwin + 1 bits for the ring buffer so that the newly + added block fits there completely and we still get lgwin bits and at least + read_block_size_bits + 1 bits because the copy tail length needs to be + smaller than ring-buffer size. */ +func computeRbBits(params *encoderParams) int { + return 1 + brotli_max_int(int(params.lgwin), params.lgblock) +} + +func maxMetablockSize(params *encoderParams) uint { + var bits int = brotli_min_int(computeRbBits(params), maxInputBlockBits) + return uint(1) << uint(bits) +} + +/* When searching for backward references and have not seen matches for a long + time, we can skip some match lookups. Unsuccessful match lookups are very + expensive and this kind of a heuristic speeds up compression quite a lot. + At first 8 byte strides are taken and every second byte is put to hasher. + After 4x more literals stride by 16 bytes, every put 4-th byte to hasher. + Applied only to qualities 2 to 9. */ +func literalSpreeLengthForSparseSearch(params *encoderParams) uint { + if params.quality < 9 { + return 64 + } else { + return 512 + } +} + +func chooseHasher(params *encoderParams, hparams *hasherParams) { + if params.quality > 9 { + hparams.type_ = 10 + } else if params.quality == 4 && params.size_hint >= 1<<20 { + hparams.type_ = 54 + } else if params.quality < 5 { + hparams.type_ = params.quality + } else if params.lgwin <= 16 { + if params.quality < 7 { + hparams.type_ = 40 + } else if params.quality < 9 { + hparams.type_ = 41 + } else { + hparams.type_ = 42 + } + } else if params.size_hint >= 1<<20 && params.lgwin >= 19 { + hparams.type_ = 6 + hparams.block_bits = params.quality - 1 + hparams.bucket_bits = 15 + hparams.hash_len = 5 + if params.quality < 7 { + hparams.num_last_distances_to_check = 4 + } else if params.quality < 9 { + hparams.num_last_distances_to_check = 10 + } else { + hparams.num_last_distances_to_check = 16 + } + } else { + hparams.type_ = 5 + hparams.block_bits = params.quality - 1 + if params.quality < 7 { + hparams.bucket_bits = 14 + } else { + hparams.bucket_bits = 15 + } + if params.quality < 7 { + hparams.num_last_distances_to_check = 4 + } else if params.quality < 9 { + hparams.num_last_distances_to_check = 10 + } else { + hparams.num_last_distances_to_check = 16 + } + } + + if params.lgwin > 24 { + /* Different hashers for large window brotli: not for qualities <= 2, + these are too fast for large window. Not for qualities >= 10: their + hasher already works well with large window. So the changes are: + H3 --> H35: for quality 3. + H54 --> H55: for quality 4 with size hint > 1MB + H6 --> H65: for qualities 5, 6, 7, 8, 9. */ + if hparams.type_ == 3 { + hparams.type_ = 35 + } + + if hparams.type_ == 54 { + hparams.type_ = 55 + } + + if hparams.type_ == 6 { + hparams.type_ = 65 + } + } +} diff --git a/vendor/github.com/andybalholm/brotli/reader.go b/vendor/github.com/andybalholm/brotli/reader.go new file mode 100644 index 0000000..9419c79 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/reader.go @@ -0,0 +1,108 @@ +package brotli + +import ( + "errors" + "io" +) + +type decodeError int + +func (err decodeError) Error() string { + return "brotli: " + string(decoderErrorString(int(err))) +} + +var errExcessiveInput = errors.New("brotli: excessive input") +var errInvalidState = errors.New("brotli: invalid state") + +// readBufSize is a "good" buffer size that avoids excessive round-trips +// between C and Go but doesn't waste too much memory on buffering. +// It is arbitrarily chosen to be equal to the constant used in io.Copy. +const readBufSize = 32 * 1024 + +// NewReader creates a new Reader reading the given reader. +func NewReader(src io.Reader) *Reader { + r := new(Reader) + r.Reset(src) + return r +} + +// Reset discards the Reader's state and makes it equivalent to the result of +// its original state from NewReader, but reading from src instead. +// This permits reusing a Reader rather than allocating a new one. +// Error is always nil +func (r *Reader) Reset(src io.Reader) error { + if r.error_code < 0 { + // There was an unrecoverable error, leaving the Reader's state + // undefined. Clear out everything but the buffer. + *r = Reader{buf: r.buf} + } + + decoderStateInit(r) + r.src = src + if r.buf == nil { + r.buf = make([]byte, readBufSize) + } + return nil +} + +func (r *Reader) Read(p []byte) (n int, err error) { + if !decoderHasMoreOutput(r) && len(r.in) == 0 { + m, readErr := r.src.Read(r.buf) + if m == 0 { + // If readErr is `nil`, we just proxy underlying stream behavior. + return 0, readErr + } + r.in = r.buf[:m] + } + + if len(p) == 0 { + return 0, nil + } + + for { + var written uint + in_len := uint(len(r.in)) + out_len := uint(len(p)) + in_remaining := in_len + out_remaining := out_len + result := decoderDecompressStream(r, &in_remaining, &r.in, &out_remaining, &p) + written = out_len - out_remaining + n = int(written) + + switch result { + case decoderResultSuccess: + if len(r.in) > 0 { + return n, errExcessiveInput + } + return n, nil + case decoderResultError: + return n, decodeError(decoderGetErrorCode(r)) + case decoderResultNeedsMoreOutput: + if n == 0 { + return 0, io.ErrShortBuffer + } + return n, nil + case decoderNeedsMoreInput: + } + + if len(r.in) != 0 { + return 0, errInvalidState + } + + // Calling r.src.Read may block. Don't block if we have data to return. + if n > 0 { + return n, nil + } + + // Top off the buffer. + encN, err := r.src.Read(r.buf) + if encN == 0 { + // Not enough data to complete decoding. + if err == io.EOF { + return 0, io.ErrUnexpectedEOF + } + return 0, err + } + r.in = r.buf[:encN] + } +} diff --git a/vendor/github.com/andybalholm/brotli/ringbuffer.go b/vendor/github.com/andybalholm/brotli/ringbuffer.go new file mode 100644 index 0000000..1c8f86f --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/ringbuffer.go @@ -0,0 +1,134 @@ +package brotli + +/* Copyright 2013 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* A ringBuffer(window_bits, tail_bits) contains `1 << window_bits' bytes of + data in a circular manner: writing a byte writes it to: + `position() % (1 << window_bits)'. + For convenience, the ringBuffer array contains another copy of the + first `1 << tail_bits' bytes: + buffer_[i] == buffer_[i + (1 << window_bits)], if i < (1 << tail_bits), + and another copy of the last two bytes: + buffer_[-1] == buffer_[(1 << window_bits) - 1] and + buffer_[-2] == buffer_[(1 << window_bits) - 2]. */ +type ringBuffer struct { + size_ uint32 + mask_ uint32 + tail_size_ uint32 + total_size_ uint32 + cur_size_ uint32 + pos_ uint32 + data_ []byte + buffer_ []byte +} + +func ringBufferInit(rb *ringBuffer) { + rb.pos_ = 0 +} + +func ringBufferSetup(params *encoderParams, rb *ringBuffer) { + var window_bits int = computeRbBits(params) + var tail_bits int = params.lgblock + *(*uint32)(&rb.size_) = 1 << uint(window_bits) + *(*uint32)(&rb.mask_) = (1 << uint(window_bits)) - 1 + *(*uint32)(&rb.tail_size_) = 1 << uint(tail_bits) + *(*uint32)(&rb.total_size_) = rb.size_ + rb.tail_size_ +} + +const kSlackForEightByteHashingEverywhere uint = 7 + +/* Allocates or re-allocates data_ to the given length + plus some slack + region before and after. Fills the slack regions with zeros. */ +func ringBufferInitBuffer(buflen uint32, rb *ringBuffer) { + var new_data []byte + var i uint + size := 2 + int(buflen) + int(kSlackForEightByteHashingEverywhere) + if cap(rb.data_) < size { + new_data = make([]byte, size) + } else { + new_data = rb.data_[:size] + } + if rb.data_ != nil { + copy(new_data, rb.data_[:2+rb.cur_size_+uint32(kSlackForEightByteHashingEverywhere)]) + } + + rb.data_ = new_data + rb.cur_size_ = buflen + rb.buffer_ = rb.data_[2:] + rb.data_[1] = 0 + rb.data_[0] = rb.data_[1] + for i = 0; i < kSlackForEightByteHashingEverywhere; i++ { + rb.buffer_[rb.cur_size_+uint32(i)] = 0 + } +} + +func ringBufferWriteTail(bytes []byte, n uint, rb *ringBuffer) { + var masked_pos uint = uint(rb.pos_ & rb.mask_) + if uint32(masked_pos) < rb.tail_size_ { + /* Just fill the tail buffer with the beginning data. */ + var p uint = uint(rb.size_ + uint32(masked_pos)) + copy(rb.buffer_[p:], bytes[:brotli_min_size_t(n, uint(rb.tail_size_-uint32(masked_pos)))]) + } +} + +/* Push bytes into the ring buffer. */ +func ringBufferWrite(bytes []byte, n uint, rb *ringBuffer) { + if rb.pos_ == 0 && uint32(n) < rb.tail_size_ { + /* Special case for the first write: to process the first block, we don't + need to allocate the whole ring-buffer and we don't need the tail + either. However, we do this memory usage optimization only if the + first write is less than the tail size, which is also the input block + size, otherwise it is likely that other blocks will follow and we + will need to reallocate to the full size anyway. */ + rb.pos_ = uint32(n) + + ringBufferInitBuffer(rb.pos_, rb) + copy(rb.buffer_, bytes[:n]) + return + } + + if rb.cur_size_ < rb.total_size_ { + /* Lazily allocate the full buffer. */ + ringBufferInitBuffer(rb.total_size_, rb) + + /* Initialize the last two bytes to zero, so that we don't have to worry + later when we copy the last two bytes to the first two positions. */ + rb.buffer_[rb.size_-2] = 0 + + rb.buffer_[rb.size_-1] = 0 + } + { + var masked_pos uint = uint(rb.pos_ & rb.mask_) + + /* The length of the writes is limited so that we do not need to worry + about a write */ + ringBufferWriteTail(bytes, n, rb) + + if uint32(masked_pos+n) <= rb.size_ { + /* A single write fits. */ + copy(rb.buffer_[masked_pos:], bytes[:n]) + } else { + /* Split into two writes. + Copy into the end of the buffer, including the tail buffer. */ + copy(rb.buffer_[masked_pos:], bytes[:brotli_min_size_t(n, uint(rb.total_size_-uint32(masked_pos)))]) + + /* Copy into the beginning of the buffer */ + copy(rb.buffer_, bytes[rb.size_-uint32(masked_pos):][:uint32(n)-(rb.size_-uint32(masked_pos))]) + } + } + { + var not_first_lap bool = rb.pos_&(1<<31) != 0 + var rb_pos_mask uint32 = (1 << 31) - 1 + rb.data_[0] = rb.buffer_[rb.size_-2] + rb.data_[1] = rb.buffer_[rb.size_-1] + rb.pos_ = (rb.pos_ & rb_pos_mask) + uint32(uint32(n)&rb_pos_mask) + if not_first_lap { + /* Wrap, but preserve not-a-first-lap feature. */ + rb.pos_ |= 1 << 31 + } + } +} diff --git a/vendor/github.com/andybalholm/brotli/state.go b/vendor/github.com/andybalholm/brotli/state.go new file mode 100644 index 0000000..38d753e --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/state.go @@ -0,0 +1,294 @@ +package brotli + +import "io" + +/* Copyright 2015 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Brotli state for partial streaming decoding. */ +const ( + stateUninited = iota + stateLargeWindowBits + stateInitialize + stateMetablockBegin + stateMetablockHeader + stateMetablockHeader2 + stateContextModes + stateCommandBegin + stateCommandInner + stateCommandPostDecodeLiterals + stateCommandPostWrapCopy + stateUncompressed + stateMetadata + stateCommandInnerWrite + stateMetablockDone + stateCommandPostWrite1 + stateCommandPostWrite2 + stateHuffmanCode0 + stateHuffmanCode1 + stateHuffmanCode2 + stateHuffmanCode3 + stateContextMap1 + stateContextMap2 + stateTreeGroup + stateDone +) + +const ( + stateMetablockHeaderNone = iota + stateMetablockHeaderEmpty + stateMetablockHeaderNibbles + stateMetablockHeaderSize + stateMetablockHeaderUncompressed + stateMetablockHeaderReserved + stateMetablockHeaderBytes + stateMetablockHeaderMetadata +) + +const ( + stateUncompressedNone = iota + stateUncompressedWrite +) + +const ( + stateTreeGroupNone = iota + stateTreeGroupLoop +) + +const ( + stateContextMapNone = iota + stateContextMapReadPrefix + stateContextMapHuffman + stateContextMapDecode + stateContextMapTransform +) + +const ( + stateHuffmanNone = iota + stateHuffmanSimpleSize + stateHuffmanSimpleRead + stateHuffmanSimpleBuild + stateHuffmanComplex + stateHuffmanLengthSymbols +) + +const ( + stateDecodeUint8None = iota + stateDecodeUint8Short + stateDecodeUint8Long +) + +const ( + stateReadBlockLengthNone = iota + stateReadBlockLengthSuffix +) + +type Reader struct { + src io.Reader + buf []byte // scratch space for reading from src + in []byte // current chunk to decode; usually aliases buf + + state int + loop_counter int + br bitReader + buffer struct { + u64 uint64 + u8 [8]byte + } + buffer_length uint32 + pos int + max_backward_distance int + max_distance int + ringbuffer_size int + ringbuffer_mask int + dist_rb_idx int + dist_rb [4]int + error_code int + sub_loop_counter uint32 + ringbuffer []byte + ringbuffer_end []byte + htree_command []huffmanCode + context_lookup []byte + context_map_slice []byte + dist_context_map_slice []byte + literal_hgroup huffmanTreeGroup + insert_copy_hgroup huffmanTreeGroup + distance_hgroup huffmanTreeGroup + block_type_trees []huffmanCode + block_len_trees []huffmanCode + trivial_literal_context int + distance_context int + meta_block_remaining_len int + block_length_index uint32 + block_length [3]uint32 + num_block_types [3]uint32 + block_type_rb [6]uint32 + distance_postfix_bits uint32 + num_direct_distance_codes uint32 + distance_postfix_mask int + num_dist_htrees uint32 + dist_context_map []byte + literal_htree []huffmanCode + dist_htree_index byte + repeat_code_len uint32 + prev_code_len uint32 + copy_length int + distance_code int + rb_roundtrips uint + partial_pos_out uint + symbol uint32 + repeat uint32 + space uint32 + table [32]huffmanCode + symbol_lists symbolList + symbols_lists_array [huffmanMaxCodeLength + 1 + numCommandSymbols]uint16 + next_symbol [32]int + code_length_code_lengths [codeLengthCodes]byte + code_length_histo [16]uint16 + htree_index int + next []huffmanCode + context_index uint32 + max_run_length_prefix uint32 + code uint32 + context_map_table [huffmanMaxSize272]huffmanCode + substate_metablock_header int + substate_tree_group int + substate_context_map int + substate_uncompressed int + substate_huffman int + substate_decode_uint8 int + substate_read_block_length int + is_last_metablock uint + is_uncompressed uint + is_metadata uint + should_wrap_ringbuffer uint + canny_ringbuffer_allocation uint + large_window bool + size_nibbles uint + window_bits uint32 + new_ringbuffer_size int + num_literal_htrees uint32 + context_map []byte + context_modes []byte + dictionary *dictionary + transforms *transforms + trivial_literal_contexts [8]uint32 +} + +func decoderStateInit(s *Reader) bool { + s.error_code = 0 /* BROTLI_DECODER_NO_ERROR */ + + initBitReader(&s.br) + s.state = stateUninited + s.large_window = false + s.substate_metablock_header = stateMetablockHeaderNone + s.substate_tree_group = stateTreeGroupNone + s.substate_context_map = stateContextMapNone + s.substate_uncompressed = stateUncompressedNone + s.substate_huffman = stateHuffmanNone + s.substate_decode_uint8 = stateDecodeUint8None + s.substate_read_block_length = stateReadBlockLengthNone + + s.buffer_length = 0 + s.loop_counter = 0 + s.pos = 0 + s.rb_roundtrips = 0 + s.partial_pos_out = 0 + + s.block_type_trees = nil + s.block_len_trees = nil + s.ringbuffer_size = 0 + s.new_ringbuffer_size = 0 + s.ringbuffer_mask = 0 + + s.context_map = nil + s.context_modes = nil + s.dist_context_map = nil + s.context_map_slice = nil + s.dist_context_map_slice = nil + + s.sub_loop_counter = 0 + + s.literal_hgroup.codes = nil + s.literal_hgroup.htrees = nil + s.insert_copy_hgroup.codes = nil + s.insert_copy_hgroup.htrees = nil + s.distance_hgroup.codes = nil + s.distance_hgroup.htrees = nil + + s.is_last_metablock = 0 + s.is_uncompressed = 0 + s.is_metadata = 0 + s.should_wrap_ringbuffer = 0 + s.canny_ringbuffer_allocation = 1 + + s.window_bits = 0 + s.max_distance = 0 + s.dist_rb[0] = 16 + s.dist_rb[1] = 15 + s.dist_rb[2] = 11 + s.dist_rb[3] = 4 + s.dist_rb_idx = 0 + s.block_type_trees = nil + s.block_len_trees = nil + + s.symbol_lists.storage = s.symbols_lists_array[:] + s.symbol_lists.offset = huffmanMaxCodeLength + 1 + + s.dictionary = getDictionary() + s.transforms = getTransforms() + + return true +} + +func decoderStateMetablockBegin(s *Reader) { + s.meta_block_remaining_len = 0 + s.block_length[0] = 1 << 24 + s.block_length[1] = 1 << 24 + s.block_length[2] = 1 << 24 + s.num_block_types[0] = 1 + s.num_block_types[1] = 1 + s.num_block_types[2] = 1 + s.block_type_rb[0] = 1 + s.block_type_rb[1] = 0 + s.block_type_rb[2] = 1 + s.block_type_rb[3] = 0 + s.block_type_rb[4] = 1 + s.block_type_rb[5] = 0 + s.context_map = nil + s.context_modes = nil + s.dist_context_map = nil + s.context_map_slice = nil + s.literal_htree = nil + s.dist_context_map_slice = nil + s.dist_htree_index = 0 + s.context_lookup = nil + s.literal_hgroup.codes = nil + s.literal_hgroup.htrees = nil + s.insert_copy_hgroup.codes = nil + s.insert_copy_hgroup.htrees = nil + s.distance_hgroup.codes = nil + s.distance_hgroup.htrees = nil +} + +func decoderStateCleanupAfterMetablock(s *Reader) { + s.context_modes = nil + s.context_map = nil + s.dist_context_map = nil + s.literal_hgroup.htrees = nil + s.insert_copy_hgroup.htrees = nil + s.distance_hgroup.htrees = nil +} + +func decoderHuffmanTreeGroupInit(s *Reader, group *huffmanTreeGroup, alphabet_size uint32, max_symbol uint32, ntrees uint32) bool { + var max_table_size uint = uint(kMaxHuffmanTableSize[(alphabet_size+31)>>5]) + group.alphabet_size = uint16(alphabet_size) + group.max_symbol = uint16(max_symbol) + group.num_htrees = uint16(ntrees) + group.htrees = make([][]huffmanCode, ntrees) + group.codes = make([]huffmanCode, (uint(ntrees) * max_table_size)) + return !(group.codes == nil) +} diff --git a/vendor/github.com/andybalholm/brotli/static_dict.go b/vendor/github.com/andybalholm/brotli/static_dict.go new file mode 100644 index 0000000..bc05566 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/static_dict.go @@ -0,0 +1,662 @@ +package brotli + +import "encoding/binary" + +/* Copyright 2013 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Class to model the static dictionary. */ + +const maxStaticDictionaryMatchLen = 37 + +const kInvalidMatch uint32 = 0xFFFFFFF + +/* Copyright 2013 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ +func hash(data []byte) uint32 { + var h uint32 = binary.LittleEndian.Uint32(data) * kDictHashMul32 + + /* The higher bits contain more mixture from the multiplication, + so we take our results from there. */ + return h >> uint(32-kDictNumBits) +} + +func addMatch(distance uint, len uint, len_code uint, matches []uint32) { + var match uint32 = uint32((distance << 5) + len_code) + matches[len] = brotli_min_uint32_t(matches[len], match) +} + +func dictMatchLength(dict *dictionary, data []byte, id uint, len uint, maxlen uint) uint { + var offset uint = uint(dict.offsets_by_length[len]) + len*id + return findMatchLengthWithLimit(dict.data[offset:], data, brotli_min_size_t(uint(len), maxlen)) +} + +func isMatch(d *dictionary, w dictWord, data []byte, max_length uint) bool { + if uint(w.len) > max_length { + return false + } else { + var offset uint = uint(d.offsets_by_length[w.len]) + uint(w.len)*uint(w.idx) + var dict []byte = d.data[offset:] + if w.transform == 0 { + /* Match against base dictionary word. */ + return findMatchLengthWithLimit(dict, data, uint(w.len)) == uint(w.len) + } else if w.transform == 10 { + /* Match against uppercase first transform. + Note that there are only ASCII uppercase words in the lookup table. */ + return dict[0] >= 'a' && dict[0] <= 'z' && (dict[0]^32) == data[0] && findMatchLengthWithLimit(dict[1:], data[1:], uint(w.len)-1) == uint(w.len-1) + } else { + /* Match against uppercase all transform. + Note that there are only ASCII uppercase words in the lookup table. */ + var i uint + for i = 0; i < uint(w.len); i++ { + if dict[i] >= 'a' && dict[i] <= 'z' { + if (dict[i] ^ 32) != data[i] { + return false + } + } else { + if dict[i] != data[i] { + return false + } + } + } + + return true + } + } +} + +func findAllStaticDictionaryMatches(dict *encoderDictionary, data []byte, min_length uint, max_length uint, matches []uint32) bool { + var has_found_match bool = false + { + var offset uint = uint(dict.buckets[hash(data)]) + var end bool = offset == 0 + for !end { + w := dict.dict_words[offset] + offset++ + var l uint = uint(w.len) & 0x1F + var n uint = uint(1) << dict.words.size_bits_by_length[l] + var id uint = uint(w.idx) + end = !(w.len&0x80 == 0) + w.len = byte(l) + if w.transform == 0 { + var matchlen uint = dictMatchLength(dict.words, data, id, l, max_length) + var s []byte + var minlen uint + var maxlen uint + var len uint + + /* Transform "" + BROTLI_TRANSFORM_IDENTITY + "" */ + if matchlen == l { + addMatch(id, l, l, matches) + has_found_match = true + } + + /* Transforms "" + BROTLI_TRANSFORM_OMIT_LAST_1 + "" and + "" + BROTLI_TRANSFORM_OMIT_LAST_1 + "ing " */ + if matchlen >= l-1 { + addMatch(id+12*n, l-1, l, matches) + if l+2 < max_length && data[l-1] == 'i' && data[l] == 'n' && data[l+1] == 'g' && data[l+2] == ' ' { + addMatch(id+49*n, l+3, l, matches) + } + + has_found_match = true + } + + /* Transform "" + BROTLI_TRANSFORM_OMIT_LAST_# + "" (# = 2 .. 9) */ + minlen = min_length + + if l > 9 { + minlen = brotli_max_size_t(minlen, l-9) + } + maxlen = brotli_min_size_t(matchlen, l-2) + for len = minlen; len <= maxlen; len++ { + var cut uint = l - len + var transform_id uint = (cut << 2) + uint((dict.cutoffTransforms>>(cut*6))&0x3F) + addMatch(id+transform_id*n, uint(len), l, matches) + has_found_match = true + } + + if matchlen < l || l+6 >= max_length { + continue + } + + s = data[l:] + + /* Transforms "" + BROTLI_TRANSFORM_IDENTITY + */ + if s[0] == ' ' { + addMatch(id+n, l+1, l, matches) + if s[1] == 'a' { + if s[2] == ' ' { + addMatch(id+28*n, l+3, l, matches) + } else if s[2] == 's' { + if s[3] == ' ' { + addMatch(id+46*n, l+4, l, matches) + } + } else if s[2] == 't' { + if s[3] == ' ' { + addMatch(id+60*n, l+4, l, matches) + } + } else if s[2] == 'n' { + if s[3] == 'd' && s[4] == ' ' { + addMatch(id+10*n, l+5, l, matches) + } + } + } else if s[1] == 'b' { + if s[2] == 'y' && s[3] == ' ' { + addMatch(id+38*n, l+4, l, matches) + } + } else if s[1] == 'i' { + if s[2] == 'n' { + if s[3] == ' ' { + addMatch(id+16*n, l+4, l, matches) + } + } else if s[2] == 's' { + if s[3] == ' ' { + addMatch(id+47*n, l+4, l, matches) + } + } + } else if s[1] == 'f' { + if s[2] == 'o' { + if s[3] == 'r' && s[4] == ' ' { + addMatch(id+25*n, l+5, l, matches) + } + } else if s[2] == 'r' { + if s[3] == 'o' && s[4] == 'm' && s[5] == ' ' { + addMatch(id+37*n, l+6, l, matches) + } + } + } else if s[1] == 'o' { + if s[2] == 'f' { + if s[3] == ' ' { + addMatch(id+8*n, l+4, l, matches) + } + } else if s[2] == 'n' { + if s[3] == ' ' { + addMatch(id+45*n, l+4, l, matches) + } + } + } else if s[1] == 'n' { + if s[2] == 'o' && s[3] == 't' && s[4] == ' ' { + addMatch(id+80*n, l+5, l, matches) + } + } else if s[1] == 't' { + if s[2] == 'h' { + if s[3] == 'e' { + if s[4] == ' ' { + addMatch(id+5*n, l+5, l, matches) + } + } else if s[3] == 'a' { + if s[4] == 't' && s[5] == ' ' { + addMatch(id+29*n, l+6, l, matches) + } + } + } else if s[2] == 'o' { + if s[3] == ' ' { + addMatch(id+17*n, l+4, l, matches) + } + } + } else if s[1] == 'w' { + if s[2] == 'i' && s[3] == 't' && s[4] == 'h' && s[5] == ' ' { + addMatch(id+35*n, l+6, l, matches) + } + } + } else if s[0] == '"' { + addMatch(id+19*n, l+1, l, matches) + if s[1] == '>' { + addMatch(id+21*n, l+2, l, matches) + } + } else if s[0] == '.' { + addMatch(id+20*n, l+1, l, matches) + if s[1] == ' ' { + addMatch(id+31*n, l+2, l, matches) + if s[2] == 'T' && s[3] == 'h' { + if s[4] == 'e' { + if s[5] == ' ' { + addMatch(id+43*n, l+6, l, matches) + } + } else if s[4] == 'i' { + if s[5] == 's' && s[6] == ' ' { + addMatch(id+75*n, l+7, l, matches) + } + } + } + } + } else if s[0] == ',' { + addMatch(id+76*n, l+1, l, matches) + if s[1] == ' ' { + addMatch(id+14*n, l+2, l, matches) + } + } else if s[0] == '\n' { + addMatch(id+22*n, l+1, l, matches) + if s[1] == '\t' { + addMatch(id+50*n, l+2, l, matches) + } + } else if s[0] == ']' { + addMatch(id+24*n, l+1, l, matches) + } else if s[0] == '\'' { + addMatch(id+36*n, l+1, l, matches) + } else if s[0] == ':' { + addMatch(id+51*n, l+1, l, matches) + } else if s[0] == '(' { + addMatch(id+57*n, l+1, l, matches) + } else if s[0] == '=' { + if s[1] == '"' { + addMatch(id+70*n, l+2, l, matches) + } else if s[1] == '\'' { + addMatch(id+86*n, l+2, l, matches) + } + } else if s[0] == 'a' { + if s[1] == 'l' && s[2] == ' ' { + addMatch(id+84*n, l+3, l, matches) + } + } else if s[0] == 'e' { + if s[1] == 'd' { + if s[2] == ' ' { + addMatch(id+53*n, l+3, l, matches) + } + } else if s[1] == 'r' { + if s[2] == ' ' { + addMatch(id+82*n, l+3, l, matches) + } + } else if s[1] == 's' { + if s[2] == 't' && s[3] == ' ' { + addMatch(id+95*n, l+4, l, matches) + } + } + } else if s[0] == 'f' { + if s[1] == 'u' && s[2] == 'l' && s[3] == ' ' { + addMatch(id+90*n, l+4, l, matches) + } + } else if s[0] == 'i' { + if s[1] == 'v' { + if s[2] == 'e' && s[3] == ' ' { + addMatch(id+92*n, l+4, l, matches) + } + } else if s[1] == 'z' { + if s[2] == 'e' && s[3] == ' ' { + addMatch(id+100*n, l+4, l, matches) + } + } + } else if s[0] == 'l' { + if s[1] == 'e' { + if s[2] == 's' && s[3] == 's' && s[4] == ' ' { + addMatch(id+93*n, l+5, l, matches) + } + } else if s[1] == 'y' { + if s[2] == ' ' { + addMatch(id+61*n, l+3, l, matches) + } + } + } else if s[0] == 'o' { + if s[1] == 'u' && s[2] == 's' && s[3] == ' ' { + addMatch(id+106*n, l+4, l, matches) + } + } + } else { + var is_all_caps bool = (w.transform != transformUppercaseFirst) + /* Set is_all_caps=0 for BROTLI_TRANSFORM_UPPERCASE_FIRST and + is_all_caps=1 otherwise (BROTLI_TRANSFORM_UPPERCASE_ALL) + transform. */ + + var s []byte + if !isMatch(dict.words, w, data, max_length) { + continue + } + + /* Transform "" + kUppercase{First,All} + "" */ + var tmp int + if is_all_caps { + tmp = 44 + } else { + tmp = 9 + } + addMatch(id+uint(tmp)*n, l, l, matches) + + has_found_match = true + if l+1 >= max_length { + continue + } + + /* Transforms "" + kUppercase{First,All} + */ + s = data[l:] + + if s[0] == ' ' { + var tmp int + if is_all_caps { + tmp = 68 + } else { + tmp = 4 + } + addMatch(id+uint(tmp)*n, l+1, l, matches) + } else if s[0] == '"' { + var tmp int + if is_all_caps { + tmp = 87 + } else { + tmp = 66 + } + addMatch(id+uint(tmp)*n, l+1, l, matches) + if s[1] == '>' { + var tmp int + if is_all_caps { + tmp = 97 + } else { + tmp = 69 + } + addMatch(id+uint(tmp)*n, l+2, l, matches) + } + } else if s[0] == '.' { + var tmp int + if is_all_caps { + tmp = 101 + } else { + tmp = 79 + } + addMatch(id+uint(tmp)*n, l+1, l, matches) + if s[1] == ' ' { + var tmp int + if is_all_caps { + tmp = 114 + } else { + tmp = 88 + } + addMatch(id+uint(tmp)*n, l+2, l, matches) + } + } else if s[0] == ',' { + var tmp int + if is_all_caps { + tmp = 112 + } else { + tmp = 99 + } + addMatch(id+uint(tmp)*n, l+1, l, matches) + if s[1] == ' ' { + var tmp int + if is_all_caps { + tmp = 107 + } else { + tmp = 58 + } + addMatch(id+uint(tmp)*n, l+2, l, matches) + } + } else if s[0] == '\'' { + var tmp int + if is_all_caps { + tmp = 94 + } else { + tmp = 74 + } + addMatch(id+uint(tmp)*n, l+1, l, matches) + } else if s[0] == '(' { + var tmp int + if is_all_caps { + tmp = 113 + } else { + tmp = 78 + } + addMatch(id+uint(tmp)*n, l+1, l, matches) + } else if s[0] == '=' { + if s[1] == '"' { + var tmp int + if is_all_caps { + tmp = 105 + } else { + tmp = 104 + } + addMatch(id+uint(tmp)*n, l+2, l, matches) + } else if s[1] == '\'' { + var tmp int + if is_all_caps { + tmp = 116 + } else { + tmp = 108 + } + addMatch(id+uint(tmp)*n, l+2, l, matches) + } + } + } + } + } + + /* Transforms with prefixes " " and "." */ + if max_length >= 5 && (data[0] == ' ' || data[0] == '.') { + var is_space bool = (data[0] == ' ') + var offset uint = uint(dict.buckets[hash(data[1:])]) + var end bool = offset == 0 + for !end { + w := dict.dict_words[offset] + offset++ + var l uint = uint(w.len) & 0x1F + var n uint = uint(1) << dict.words.size_bits_by_length[l] + var id uint = uint(w.idx) + end = !(w.len&0x80 == 0) + w.len = byte(l) + if w.transform == 0 { + var s []byte + if !isMatch(dict.words, w, data[1:], max_length-1) { + continue + } + + /* Transforms " " + BROTLI_TRANSFORM_IDENTITY + "" and + "." + BROTLI_TRANSFORM_IDENTITY + "" */ + var tmp int + if is_space { + tmp = 6 + } else { + tmp = 32 + } + addMatch(id+uint(tmp)*n, l+1, l, matches) + + has_found_match = true + if l+2 >= max_length { + continue + } + + /* Transforms " " + BROTLI_TRANSFORM_IDENTITY + and + "." + BROTLI_TRANSFORM_IDENTITY + + */ + s = data[l+1:] + + if s[0] == ' ' { + var tmp int + if is_space { + tmp = 2 + } else { + tmp = 77 + } + addMatch(id+uint(tmp)*n, l+2, l, matches) + } else if s[0] == '(' { + var tmp int + if is_space { + tmp = 89 + } else { + tmp = 67 + } + addMatch(id+uint(tmp)*n, l+2, l, matches) + } else if is_space { + if s[0] == ',' { + addMatch(id+103*n, l+2, l, matches) + if s[1] == ' ' { + addMatch(id+33*n, l+3, l, matches) + } + } else if s[0] == '.' { + addMatch(id+71*n, l+2, l, matches) + if s[1] == ' ' { + addMatch(id+52*n, l+3, l, matches) + } + } else if s[0] == '=' { + if s[1] == '"' { + addMatch(id+81*n, l+3, l, matches) + } else if s[1] == '\'' { + addMatch(id+98*n, l+3, l, matches) + } + } + } + } else if is_space { + var is_all_caps bool = (w.transform != transformUppercaseFirst) + /* Set is_all_caps=0 for BROTLI_TRANSFORM_UPPERCASE_FIRST and + is_all_caps=1 otherwise (BROTLI_TRANSFORM_UPPERCASE_ALL) + transform. */ + + var s []byte + if !isMatch(dict.words, w, data[1:], max_length-1) { + continue + } + + /* Transforms " " + kUppercase{First,All} + "" */ + var tmp int + if is_all_caps { + tmp = 85 + } else { + tmp = 30 + } + addMatch(id+uint(tmp)*n, l+1, l, matches) + + has_found_match = true + if l+2 >= max_length { + continue + } + + /* Transforms " " + kUppercase{First,All} + */ + s = data[l+1:] + + if s[0] == ' ' { + var tmp int + if is_all_caps { + tmp = 83 + } else { + tmp = 15 + } + addMatch(id+uint(tmp)*n, l+2, l, matches) + } else if s[0] == ',' { + if !is_all_caps { + addMatch(id+109*n, l+2, l, matches) + } + + if s[1] == ' ' { + var tmp int + if is_all_caps { + tmp = 111 + } else { + tmp = 65 + } + addMatch(id+uint(tmp)*n, l+3, l, matches) + } + } else if s[0] == '.' { + var tmp int + if is_all_caps { + tmp = 115 + } else { + tmp = 96 + } + addMatch(id+uint(tmp)*n, l+2, l, matches) + if s[1] == ' ' { + var tmp int + if is_all_caps { + tmp = 117 + } else { + tmp = 91 + } + addMatch(id+uint(tmp)*n, l+3, l, matches) + } + } else if s[0] == '=' { + if s[1] == '"' { + var tmp int + if is_all_caps { + tmp = 110 + } else { + tmp = 118 + } + addMatch(id+uint(tmp)*n, l+3, l, matches) + } else if s[1] == '\'' { + var tmp int + if is_all_caps { + tmp = 119 + } else { + tmp = 120 + } + addMatch(id+uint(tmp)*n, l+3, l, matches) + } + } + } + } + } + + if max_length >= 6 { + /* Transforms with prefixes "e ", "s ", ", " and "\xC2\xA0" */ + if (data[1] == ' ' && (data[0] == 'e' || data[0] == 's' || data[0] == ',')) || (data[0] == 0xC2 && data[1] == 0xA0) { + var offset uint = uint(dict.buckets[hash(data[2:])]) + var end bool = offset == 0 + for !end { + w := dict.dict_words[offset] + offset++ + var l uint = uint(w.len) & 0x1F + var n uint = uint(1) << dict.words.size_bits_by_length[l] + var id uint = uint(w.idx) + end = !(w.len&0x80 == 0) + w.len = byte(l) + if w.transform == 0 && isMatch(dict.words, w, data[2:], max_length-2) { + if data[0] == 0xC2 { + addMatch(id+102*n, l+2, l, matches) + has_found_match = true + } else if l+2 < max_length && data[l+2] == ' ' { + var t uint = 13 + if data[0] == 'e' { + t = 18 + } else if data[0] == 's' { + t = 7 + } + addMatch(id+t*n, l+3, l, matches) + has_found_match = true + } + } + } + } + } + + if max_length >= 9 { + /* Transforms with prefixes " the " and ".com/" */ + if (data[0] == ' ' && data[1] == 't' && data[2] == 'h' && data[3] == 'e' && data[4] == ' ') || (data[0] == '.' && data[1] == 'c' && data[2] == 'o' && data[3] == 'm' && data[4] == '/') { + var offset uint = uint(dict.buckets[hash(data[5:])]) + var end bool = offset == 0 + for !end { + w := dict.dict_words[offset] + offset++ + var l uint = uint(w.len) & 0x1F + var n uint = uint(1) << dict.words.size_bits_by_length[l] + var id uint = uint(w.idx) + end = !(w.len&0x80 == 0) + w.len = byte(l) + if w.transform == 0 && isMatch(dict.words, w, data[5:], max_length-5) { + var tmp int + if data[0] == ' ' { + tmp = 41 + } else { + tmp = 72 + } + addMatch(id+uint(tmp)*n, l+5, l, matches) + has_found_match = true + if l+5 < max_length { + var s []byte = data[l+5:] + if data[0] == ' ' { + if l+8 < max_length && s[0] == ' ' && s[1] == 'o' && s[2] == 'f' && s[3] == ' ' { + addMatch(id+62*n, l+9, l, matches) + if l+12 < max_length && s[4] == 't' && s[5] == 'h' && s[6] == 'e' && s[7] == ' ' { + addMatch(id+73*n, l+13, l, matches) + } + } + } + } + } + } + } + } + + return has_found_match +} diff --git a/vendor/github.com/andybalholm/brotli/static_dict_lut.go b/vendor/github.com/andybalholm/brotli/static_dict_lut.go new file mode 100644 index 0000000..b33963e --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/static_dict_lut.go @@ -0,0 +1,75094 @@ +package brotli + +/* Copyright 2017 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Lookup table for static dictionary and transforms. */ + +type dictWord struct { + len byte + transform byte + idx uint16 +} + +const kDictNumBits int = 15 + +const kDictHashMul32 uint32 = 0x1E35A7BD + +var kStaticDictionaryBuckets = [32768]uint16{ + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3, + 6, + 0, + 0, + 0, + 0, + 0, + 20, + 0, + 0, + 0, + 21, + 0, + 22, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23, + 0, + 0, + 25, + 0, + 29, + 0, + 53, + 0, + 0, + 0, + 0, + 0, + 0, + 55, + 0, + 0, + 0, + 0, + 0, + 0, + 61, + 76, + 0, + 0, + 0, + 94, + 0, + 0, + 0, + 0, + 0, + 0, + 96, + 0, + 97, + 0, + 98, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 99, + 101, + 106, + 108, + 0, + 0, + 0, + 0, + 0, + 110, + 0, + 111, + 112, + 0, + 113, + 118, + 124, + 0, + 0, + 0, + 0, + 0, + 125, + 128, + 0, + 0, + 0, + 0, + 129, + 0, + 0, + 131, + 0, + 0, + 0, + 0, + 0, + 0, + 132, + 0, + 0, + 135, + 0, + 0, + 0, + 137, + 0, + 0, + 0, + 0, + 0, + 138, + 139, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 142, + 143, + 144, + 0, + 0, + 0, + 0, + 0, + 145, + 0, + 0, + 0, + 146, + 149, + 151, + 152, + 0, + 0, + 153, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 154, + 0, + 0, + 0, + 0, + 0, + 0, + 155, + 0, + 0, + 0, + 0, + 160, + 182, + 0, + 0, + 0, + 0, + 0, + 0, + 183, + 0, + 0, + 0, + 188, + 189, + 0, + 0, + 192, + 0, + 0, + 0, + 0, + 0, + 0, + 194, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 197, + 202, + 209, + 0, + 0, + 210, + 0, + 224, + 0, + 0, + 0, + 225, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 231, + 0, + 0, + 0, + 232, + 0, + 240, + 0, + 0, + 242, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 244, + 0, + 0, + 0, + 246, + 0, + 0, + 249, + 251, + 253, + 0, + 0, + 0, + 0, + 0, + 258, + 0, + 0, + 261, + 263, + 0, + 0, + 0, + 267, + 0, + 0, + 268, + 0, + 269, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 271, + 0, + 0, + 0, + 0, + 0, + 0, + 272, + 0, + 273, + 0, + 277, + 0, + 278, + 286, + 0, + 0, + 0, + 0, + 287, + 0, + 289, + 290, + 291, + 0, + 0, + 0, + 295, + 0, + 0, + 296, + 297, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 298, + 0, + 0, + 0, + 299, + 0, + 0, + 305, + 0, + 324, + 0, + 0, + 0, + 0, + 0, + 327, + 0, + 328, + 329, + 0, + 0, + 0, + 0, + 336, + 0, + 0, + 340, + 0, + 341, + 342, + 343, + 0, + 0, + 346, + 0, + 348, + 0, + 0, + 0, + 0, + 0, + 0, + 349, + 351, + 0, + 0, + 355, + 0, + 363, + 0, + 364, + 0, + 368, + 369, + 0, + 370, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 372, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 373, + 0, + 375, + 0, + 0, + 0, + 0, + 376, + 377, + 0, + 0, + 394, + 395, + 396, + 0, + 0, + 398, + 0, + 0, + 0, + 0, + 400, + 0, + 0, + 408, + 0, + 0, + 0, + 0, + 420, + 0, + 0, + 0, + 0, + 0, + 0, + 421, + 0, + 0, + 422, + 423, + 0, + 0, + 429, + 435, + 436, + 442, + 0, + 0, + 443, + 0, + 444, + 445, + 453, + 456, + 0, + 457, + 0, + 0, + 0, + 0, + 0, + 458, + 0, + 0, + 0, + 459, + 0, + 0, + 0, + 460, + 0, + 462, + 463, + 465, + 0, + 0, + 0, + 0, + 0, + 0, + 466, + 469, + 0, + 0, + 0, + 0, + 0, + 0, + 470, + 0, + 0, + 0, + 474, + 0, + 476, + 0, + 0, + 0, + 0, + 483, + 0, + 485, + 0, + 0, + 0, + 486, + 0, + 0, + 488, + 491, + 492, + 0, + 0, + 497, + 499, + 500, + 0, + 501, + 0, + 0, + 0, + 505, + 0, + 0, + 506, + 0, + 0, + 0, + 507, + 0, + 0, + 0, + 509, + 0, + 0, + 0, + 0, + 511, + 512, + 519, + 0, + 0, + 0, + 0, + 0, + 0, + 529, + 530, + 0, + 0, + 0, + 534, + 0, + 0, + 0, + 0, + 543, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 553, + 0, + 0, + 0, + 0, + 557, + 560, + 0, + 0, + 0, + 0, + 0, + 0, + 561, + 0, + 564, + 0, + 0, + 0, + 0, + 0, + 0, + 565, + 566, + 0, + 575, + 0, + 619, + 0, + 620, + 0, + 0, + 623, + 624, + 0, + 0, + 0, + 625, + 0, + 0, + 626, + 627, + 0, + 0, + 628, + 0, + 0, + 0, + 0, + 630, + 0, + 631, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 641, + 0, + 0, + 0, + 0, + 643, + 656, + 668, + 0, + 0, + 0, + 673, + 0, + 0, + 0, + 674, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 682, + 0, + 687, + 0, + 690, + 0, + 693, + 699, + 700, + 0, + 0, + 0, + 0, + 0, + 0, + 704, + 705, + 0, + 0, + 0, + 0, + 707, + 710, + 0, + 711, + 0, + 0, + 0, + 0, + 726, + 0, + 0, + 729, + 0, + 0, + 0, + 730, + 731, + 0, + 0, + 0, + 0, + 0, + 752, + 0, + 0, + 0, + 762, + 0, + 763, + 0, + 0, + 767, + 0, + 0, + 0, + 770, + 774, + 0, + 0, + 775, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 776, + 0, + 0, + 0, + 777, + 783, + 0, + 0, + 0, + 785, + 788, + 0, + 0, + 0, + 0, + 790, + 0, + 0, + 0, + 793, + 0, + 0, + 0, + 0, + 794, + 0, + 0, + 804, + 819, + 821, + 0, + 827, + 0, + 0, + 0, + 834, + 0, + 0, + 835, + 0, + 0, + 0, + 841, + 0, + 844, + 0, + 850, + 851, + 859, + 0, + 860, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 874, + 0, + 876, + 0, + 877, + 890, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 893, + 894, + 898, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 899, + 0, + 0, + 0, + 900, + 904, + 906, + 0, + 0, + 0, + 907, + 0, + 908, + 909, + 0, + 910, + 0, + 0, + 0, + 0, + 911, + 0, + 0, + 0, + 0, + 0, + 916, + 0, + 0, + 0, + 922, + 925, + 0, + 930, + 0, + 934, + 0, + 0, + 0, + 0, + 0, + 943, + 0, + 0, + 944, + 0, + 953, + 954, + 0, + 0, + 0, + 0, + 0, + 0, + 955, + 0, + 962, + 963, + 0, + 0, + 976, + 0, + 0, + 977, + 978, + 979, + 980, + 0, + 981, + 0, + 0, + 0, + 0, + 984, + 0, + 0, + 985, + 0, + 0, + 987, + 989, + 991, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 992, + 0, + 0, + 0, + 993, + 0, + 0, + 0, + 0, + 0, + 0, + 996, + 0, + 0, + 0, + 1000, + 0, + 0, + 0, + 0, + 0, + 1002, + 0, + 0, + 0, + 0, + 1005, + 1007, + 0, + 0, + 0, + 1009, + 0, + 0, + 0, + 1010, + 0, + 0, + 0, + 0, + 0, + 0, + 1011, + 0, + 1012, + 0, + 0, + 0, + 0, + 1014, + 1016, + 0, + 0, + 0, + 1020, + 0, + 1021, + 0, + 0, + 0, + 0, + 1022, + 0, + 0, + 0, + 1024, + 0, + 0, + 0, + 0, + 0, + 0, + 1025, + 0, + 0, + 1026, + 1027, + 0, + 0, + 0, + 0, + 0, + 1031, + 0, + 1033, + 0, + 0, + 0, + 0, + 1034, + 0, + 0, + 0, + 1037, + 1040, + 0, + 0, + 0, + 1042, + 1043, + 0, + 0, + 1053, + 0, + 1054, + 0, + 0, + 1057, + 0, + 0, + 0, + 1058, + 0, + 0, + 1060, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1061, + 0, + 0, + 1062, + 0, + 0, + 0, + 0, + 1063, + 0, + 0, + 0, + 0, + 1064, + 0, + 0, + 0, + 0, + 0, + 1065, + 0, + 0, + 0, + 0, + 1066, + 1067, + 0, + 0, + 0, + 1069, + 1070, + 1072, + 0, + 0, + 0, + 0, + 0, + 0, + 1073, + 0, + 1075, + 0, + 0, + 0, + 0, + 0, + 0, + 1080, + 1084, + 0, + 0, + 0, + 0, + 1088, + 0, + 0, + 0, + 0, + 0, + 0, + 1094, + 0, + 1095, + 0, + 1107, + 0, + 0, + 0, + 1112, + 1114, + 0, + 1119, + 0, + 1122, + 0, + 0, + 1126, + 0, + 1129, + 0, + 1130, + 0, + 0, + 0, + 0, + 0, + 1132, + 0, + 0, + 0, + 0, + 0, + 0, + 1144, + 0, + 0, + 1145, + 1146, + 0, + 1148, + 1149, + 0, + 0, + 1150, + 1151, + 0, + 0, + 0, + 0, + 1152, + 0, + 1153, + 0, + 0, + 0, + 0, + 0, + 1154, + 0, + 1163, + 0, + 0, + 0, + 1164, + 0, + 0, + 0, + 0, + 0, + 1165, + 0, + 1167, + 0, + 1170, + 0, + 0, + 0, + 0, + 0, + 1171, + 1172, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1173, + 1175, + 1177, + 0, + 1186, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1195, + 0, + 0, + 1221, + 0, + 0, + 1224, + 0, + 0, + 1227, + 0, + 0, + 0, + 0, + 0, + 1228, + 1229, + 0, + 0, + 1230, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1231, + 0, + 0, + 0, + 1233, + 0, + 0, + 1243, + 1244, + 1246, + 1248, + 0, + 0, + 0, + 0, + 1254, + 1255, + 1258, + 1259, + 0, + 0, + 0, + 1260, + 0, + 0, + 1261, + 0, + 0, + 0, + 1262, + 1264, + 0, + 0, + 1265, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1266, + 0, + 1267, + 0, + 0, + 0, + 0, + 1273, + 1274, + 1276, + 1289, + 0, + 0, + 1291, + 1292, + 1293, + 0, + 0, + 1294, + 1295, + 1296, + 0, + 0, + 0, + 0, + 1302, + 0, + 1304, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1311, + 1312, + 0, + 1314, + 0, + 1316, + 1320, + 1321, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1322, + 1323, + 1324, + 0, + 1335, + 0, + 1336, + 0, + 0, + 0, + 0, + 1341, + 1342, + 0, + 1346, + 0, + 1357, + 0, + 0, + 0, + 1358, + 1360, + 0, + 0, + 0, + 0, + 0, + 0, + 1361, + 0, + 0, + 0, + 1362, + 1365, + 0, + 1366, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1379, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1386, + 0, + 1388, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1395, + 0, + 0, + 0, + 0, + 1403, + 0, + 1405, + 0, + 0, + 1407, + 0, + 0, + 0, + 0, + 0, + 1408, + 1409, + 0, + 1410, + 0, + 0, + 0, + 1412, + 1413, + 1416, + 0, + 0, + 1429, + 1451, + 0, + 0, + 1454, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1455, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1456, + 0, + 0, + 0, + 0, + 1459, + 1460, + 1461, + 1475, + 0, + 0, + 0, + 0, + 0, + 0, + 1477, + 0, + 1480, + 0, + 1481, + 0, + 0, + 1486, + 0, + 0, + 1495, + 0, + 0, + 0, + 1496, + 0, + 0, + 1498, + 1499, + 1501, + 1520, + 1521, + 0, + 0, + 0, + 1526, + 0, + 0, + 0, + 0, + 1528, + 1529, + 0, + 1533, + 1536, + 0, + 0, + 0, + 1537, + 1538, + 1549, + 0, + 1550, + 1558, + 1559, + 1572, + 0, + 1573, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1575, + 0, + 0, + 0, + 0, + 0, + 1579, + 0, + 1599, + 0, + 1603, + 0, + 1604, + 0, + 1605, + 0, + 0, + 0, + 0, + 0, + 1608, + 1610, + 0, + 0, + 0, + 0, + 1611, + 0, + 1615, + 0, + 1616, + 1618, + 0, + 1619, + 0, + 0, + 1622, + 0, + 0, + 0, + 0, + 1634, + 0, + 0, + 0, + 1635, + 0, + 0, + 0, + 1641, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1643, + 0, + 0, + 0, + 1650, + 0, + 0, + 1652, + 0, + 0, + 0, + 0, + 0, + 1653, + 0, + 0, + 0, + 1654, + 0, + 0, + 0, + 0, + 1655, + 0, + 1662, + 0, + 0, + 1663, + 1664, + 0, + 0, + 1668, + 0, + 0, + 1669, + 1670, + 0, + 1672, + 1673, + 0, + 0, + 0, + 0, + 0, + 1674, + 0, + 0, + 0, + 1675, + 1676, + 1680, + 0, + 1682, + 0, + 0, + 1687, + 0, + 0, + 0, + 0, + 0, + 1704, + 0, + 0, + 1705, + 0, + 0, + 1721, + 0, + 0, + 0, + 0, + 1734, + 1735, + 0, + 0, + 0, + 0, + 1737, + 0, + 0, + 0, + 0, + 1739, + 0, + 0, + 1740, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1741, + 1743, + 0, + 0, + 0, + 0, + 1745, + 0, + 0, + 0, + 1749, + 0, + 0, + 0, + 1751, + 0, + 0, + 0, + 0, + 0, + 0, + 1760, + 0, + 0, + 0, + 0, + 1765, + 0, + 0, + 0, + 0, + 0, + 1784, + 0, + 1785, + 1787, + 0, + 0, + 0, + 0, + 1788, + 1789, + 0, + 0, + 0, + 0, + 1790, + 1791, + 1793, + 0, + 1798, + 1799, + 0, + 0, + 0, + 0, + 1801, + 0, + 1803, + 1805, + 0, + 0, + 0, + 1806, + 1811, + 0, + 1812, + 1814, + 0, + 1821, + 0, + 0, + 0, + 0, + 0, + 1822, + 1833, + 0, + 0, + 0, + 0, + 0, + 0, + 1848, + 0, + 0, + 0, + 0, + 0, + 0, + 1857, + 0, + 0, + 0, + 1859, + 0, + 0, + 0, + 0, + 1861, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1866, + 0, + 1921, + 1925, + 0, + 0, + 0, + 1929, + 1930, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1931, + 0, + 0, + 0, + 0, + 1932, + 0, + 0, + 0, + 1934, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1946, + 0, + 0, + 1948, + 0, + 0, + 0, + 0, + 1950, + 0, + 1957, + 0, + 1958, + 0, + 0, + 0, + 0, + 0, + 1965, + 1967, + 0, + 0, + 0, + 0, + 1968, + 0, + 1969, + 0, + 1971, + 1972, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1973, + 0, + 0, + 0, + 0, + 1975, + 0, + 0, + 0, + 0, + 1976, + 1979, + 0, + 1982, + 0, + 0, + 0, + 0, + 1984, + 1988, + 0, + 0, + 0, + 0, + 1990, + 2004, + 2008, + 0, + 0, + 0, + 2012, + 2013, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2015, + 0, + 2016, + 2017, + 0, + 0, + 0, + 0, + 2021, + 0, + 0, + 2025, + 0, + 0, + 0, + 0, + 0, + 2029, + 2036, + 2040, + 0, + 2042, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2043, + 0, + 0, + 0, + 0, + 0, + 2045, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2046, + 2047, + 0, + 2048, + 2049, + 0, + 2059, + 0, + 0, + 2063, + 0, + 2064, + 2065, + 0, + 0, + 2066, + 0, + 0, + 0, + 0, + 0, + 0, + 2069, + 0, + 0, + 0, + 0, + 2070, + 0, + 2071, + 0, + 2072, + 0, + 0, + 0, + 0, + 2080, + 2082, + 2083, + 0, + 0, + 0, + 0, + 0, + 2085, + 0, + 2086, + 2088, + 2089, + 2105, + 0, + 0, + 0, + 0, + 2107, + 0, + 0, + 2116, + 2117, + 0, + 2120, + 0, + 0, + 2122, + 0, + 0, + 0, + 0, + 0, + 2123, + 0, + 0, + 2125, + 2127, + 2128, + 0, + 0, + 0, + 2130, + 0, + 0, + 0, + 2137, + 2139, + 2140, + 2141, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2144, + 2145, + 0, + 0, + 2146, + 2149, + 0, + 0, + 0, + 0, + 2150, + 0, + 0, + 2151, + 2158, + 0, + 2159, + 0, + 2160, + 0, + 0, + 0, + 0, + 0, + 0, + 2161, + 2162, + 0, + 0, + 2194, + 2202, + 0, + 0, + 0, + 0, + 0, + 0, + 2205, + 2217, + 0, + 2220, + 0, + 2221, + 0, + 2222, + 2224, + 0, + 0, + 0, + 0, + 2237, + 0, + 0, + 0, + 0, + 0, + 2238, + 0, + 2239, + 2241, + 0, + 0, + 2242, + 0, + 0, + 0, + 0, + 0, + 2243, + 0, + 0, + 0, + 0, + 0, + 0, + 2252, + 0, + 0, + 2253, + 0, + 0, + 0, + 2257, + 2258, + 0, + 0, + 0, + 2260, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2262, + 0, + 2264, + 0, + 0, + 0, + 0, + 0, + 2269, + 2270, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2271, + 0, + 2273, + 0, + 0, + 0, + 0, + 2277, + 0, + 0, + 0, + 0, + 2278, + 0, + 0, + 0, + 0, + 2279, + 0, + 2280, + 0, + 2283, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2287, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2289, + 2290, + 0, + 0, + 0, + 0, + 2291, + 0, + 2292, + 0, + 0, + 0, + 2293, + 2295, + 2296, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2298, + 0, + 0, + 0, + 0, + 0, + 2303, + 0, + 2305, + 0, + 0, + 2306, + 0, + 2307, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2313, + 2314, + 2315, + 2316, + 0, + 0, + 2318, + 0, + 2319, + 0, + 2322, + 0, + 0, + 2323, + 0, + 2324, + 0, + 2326, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2335, + 0, + 2336, + 2338, + 2339, + 0, + 2340, + 0, + 0, + 0, + 2355, + 0, + 2375, + 0, + 2382, + 2386, + 0, + 2387, + 0, + 0, + 2394, + 0, + 0, + 0, + 0, + 2395, + 0, + 2397, + 0, + 0, + 0, + 0, + 0, + 2398, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2399, + 2402, + 2404, + 2408, + 2411, + 0, + 0, + 0, + 2413, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2415, + 0, + 0, + 2416, + 2417, + 2419, + 0, + 2420, + 0, + 0, + 0, + 0, + 0, + 2425, + 0, + 0, + 0, + 2426, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2427, + 2428, + 0, + 2429, + 0, + 0, + 2430, + 2434, + 0, + 2436, + 0, + 0, + 0, + 0, + 0, + 0, + 2441, + 2442, + 0, + 2445, + 0, + 0, + 2446, + 2457, + 0, + 2459, + 0, + 0, + 2462, + 0, + 2464, + 0, + 2477, + 0, + 2478, + 2486, + 0, + 0, + 0, + 2491, + 0, + 0, + 2493, + 0, + 0, + 2494, + 0, + 2495, + 0, + 2513, + 2523, + 0, + 0, + 0, + 0, + 2524, + 0, + 0, + 0, + 0, + 0, + 0, + 2528, + 2529, + 2530, + 0, + 0, + 2531, + 0, + 2533, + 0, + 0, + 2534, + 2535, + 0, + 2536, + 2537, + 0, + 2538, + 0, + 2539, + 2540, + 0, + 0, + 0, + 2545, + 2546, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2548, + 0, + 0, + 2549, + 0, + 2550, + 2555, + 0, + 0, + 0, + 0, + 0, + 2557, + 0, + 2560, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2561, + 0, + 2576, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2577, + 2578, + 0, + 0, + 0, + 2579, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2580, + 0, + 0, + 0, + 0, + 2581, + 0, + 0, + 0, + 0, + 2583, + 0, + 2584, + 0, + 2588, + 2590, + 0, + 0, + 0, + 2591, + 0, + 0, + 0, + 0, + 2593, + 2594, + 0, + 2595, + 0, + 2601, + 2602, + 0, + 0, + 2603, + 0, + 2605, + 0, + 0, + 0, + 2606, + 2607, + 2611, + 0, + 2615, + 0, + 0, + 0, + 2617, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2619, + 0, + 0, + 2620, + 0, + 0, + 0, + 2621, + 0, + 2623, + 0, + 2625, + 0, + 0, + 2628, + 2629, + 0, + 0, + 2635, + 2636, + 2637, + 0, + 0, + 2639, + 0, + 0, + 0, + 2642, + 0, + 0, + 0, + 0, + 2643, + 0, + 2644, + 0, + 2649, + 0, + 0, + 0, + 0, + 0, + 0, + 2655, + 2656, + 0, + 0, + 2657, + 0, + 0, + 0, + 0, + 0, + 2658, + 0, + 0, + 0, + 0, + 0, + 2659, + 0, + 0, + 0, + 0, + 2664, + 2685, + 0, + 2687, + 0, + 2688, + 0, + 0, + 2689, + 0, + 0, + 2694, + 0, + 2695, + 0, + 0, + 2698, + 0, + 2701, + 2706, + 0, + 0, + 0, + 2707, + 0, + 2709, + 2710, + 2711, + 0, + 0, + 0, + 2720, + 2730, + 2735, + 0, + 0, + 0, + 0, + 2738, + 2740, + 0, + 0, + 0, + 0, + 2747, + 0, + 0, + 0, + 0, + 0, + 0, + 2748, + 0, + 0, + 2749, + 0, + 0, + 0, + 0, + 0, + 2750, + 0, + 0, + 2752, + 2754, + 0, + 0, + 0, + 0, + 0, + 2758, + 0, + 0, + 0, + 0, + 2762, + 0, + 0, + 0, + 0, + 2763, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2764, + 2767, + 0, + 0, + 0, + 0, + 2768, + 0, + 0, + 2770, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2771, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2772, + 0, + 0, + 0, + 0, + 0, + 2773, + 2776, + 0, + 0, + 2783, + 0, + 0, + 2784, + 0, + 2789, + 0, + 2790, + 0, + 0, + 0, + 2792, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2793, + 2795, + 0, + 0, + 0, + 0, + 0, + 0, + 2796, + 0, + 0, + 0, + 0, + 0, + 0, + 2797, + 2799, + 0, + 0, + 0, + 0, + 2803, + 0, + 0, + 0, + 0, + 2806, + 0, + 2807, + 2808, + 2817, + 2819, + 0, + 0, + 0, + 0, + 0, + 2821, + 0, + 0, + 0, + 0, + 2822, + 2823, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2824, + 0, + 0, + 2828, + 0, + 2834, + 0, + 0, + 0, + 0, + 0, + 0, + 2836, + 0, + 2838, + 0, + 0, + 2839, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2841, + 0, + 0, + 0, + 2842, + 0, + 0, + 0, + 0, + 0, + 2843, + 2844, + 0, + 0, + 0, + 0, + 2846, + 0, + 0, + 2847, + 0, + 2849, + 0, + 2853, + 0, + 0, + 0, + 0, + 0, + 2857, + 0, + 0, + 0, + 0, + 2858, + 0, + 2859, + 0, + 0, + 2860, + 0, + 2862, + 2868, + 0, + 0, + 0, + 0, + 2875, + 0, + 2876, + 0, + 0, + 2877, + 2878, + 2884, + 2889, + 2890, + 0, + 0, + 2891, + 0, + 0, + 2892, + 0, + 0, + 0, + 2906, + 2912, + 0, + 2913, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2916, + 0, + 2934, + 0, + 0, + 0, + 0, + 0, + 2935, + 0, + 0, + 0, + 0, + 2939, + 0, + 2940, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2941, + 0, + 0, + 0, + 2946, + 0, + 2949, + 0, + 0, + 2950, + 2954, + 2955, + 0, + 0, + 0, + 2959, + 2961, + 0, + 0, + 2962, + 0, + 2963, + 0, + 0, + 0, + 0, + 0, + 0, + 2964, + 2965, + 2966, + 2967, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2969, + 0, + 0, + 0, + 0, + 0, + 2970, + 2975, + 0, + 2982, + 2983, + 2984, + 0, + 0, + 0, + 0, + 0, + 2989, + 0, + 0, + 2990, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2991, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2998, + 0, + 3000, + 3001, + 0, + 0, + 3002, + 0, + 0, + 0, + 3003, + 0, + 0, + 3012, + 0, + 0, + 3022, + 0, + 0, + 3024, + 0, + 0, + 3025, + 3027, + 0, + 0, + 0, + 3030, + 0, + 0, + 0, + 0, + 3034, + 3035, + 0, + 0, + 3036, + 0, + 3039, + 0, + 3049, + 0, + 0, + 3050, + 0, + 0, + 0, + 0, + 0, + 0, + 3051, + 0, + 3053, + 0, + 0, + 0, + 0, + 3057, + 0, + 3058, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3063, + 0, + 0, + 3073, + 3074, + 3078, + 3079, + 0, + 3080, + 3086, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3087, + 0, + 3092, + 0, + 3095, + 0, + 3099, + 0, + 0, + 0, + 3100, + 0, + 3101, + 3102, + 0, + 3122, + 0, + 0, + 0, + 3124, + 0, + 3125, + 0, + 0, + 0, + 0, + 0, + 0, + 3132, + 3134, + 0, + 0, + 3136, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3147, + 0, + 0, + 3149, + 0, + 0, + 0, + 0, + 0, + 3150, + 3151, + 3152, + 0, + 0, + 0, + 0, + 3158, + 0, + 0, + 3160, + 0, + 0, + 3161, + 0, + 0, + 3162, + 0, + 3163, + 3166, + 3168, + 0, + 0, + 3169, + 3170, + 0, + 0, + 3171, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3182, + 0, + 3184, + 0, + 0, + 3188, + 0, + 0, + 3194, + 0, + 0, + 0, + 0, + 0, + 0, + 3204, + 0, + 0, + 0, + 0, + 3209, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3216, + 3217, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3219, + 0, + 0, + 3220, + 3222, + 0, + 3223, + 0, + 0, + 0, + 0, + 3224, + 0, + 3225, + 3226, + 0, + 3228, + 3233, + 0, + 3239, + 3241, + 3242, + 0, + 0, + 3251, + 3252, + 3253, + 3255, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3260, + 0, + 0, + 3261, + 0, + 0, + 0, + 3267, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3271, + 0, + 0, + 0, + 3278, + 0, + 3282, + 0, + 0, + 0, + 3284, + 0, + 0, + 0, + 3285, + 3286, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3287, + 3292, + 0, + 0, + 0, + 0, + 3294, + 3296, + 0, + 0, + 3299, + 3300, + 3301, + 0, + 3302, + 0, + 0, + 0, + 0, + 0, + 3304, + 3306, + 0, + 0, + 0, + 0, + 0, + 0, + 3308, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3311, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3312, + 3314, + 3315, + 0, + 3318, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3319, + 0, + 0, + 0, + 0, + 0, + 3321, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3322, + 0, + 0, + 3324, + 3325, + 0, + 0, + 3326, + 0, + 0, + 3328, + 3329, + 3331, + 0, + 0, + 3335, + 0, + 0, + 3337, + 0, + 3338, + 0, + 0, + 0, + 0, + 3343, + 3347, + 0, + 0, + 0, + 3348, + 0, + 0, + 3351, + 0, + 0, + 0, + 0, + 0, + 0, + 3354, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3355, + 0, + 0, + 3365, + 3366, + 3367, + 0, + 0, + 0, + 0, + 0, + 0, + 3368, + 3369, + 0, + 3370, + 0, + 0, + 3373, + 0, + 0, + 3376, + 0, + 0, + 3377, + 0, + 3379, + 3387, + 0, + 0, + 0, + 0, + 0, + 3390, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3402, + 0, + 3403, + 3436, + 3437, + 3439, + 0, + 0, + 3441, + 0, + 0, + 0, + 3442, + 0, + 0, + 3449, + 0, + 0, + 0, + 3450, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3451, + 0, + 0, + 3452, + 0, + 3453, + 3456, + 0, + 3457, + 0, + 0, + 3458, + 0, + 3459, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3460, + 0, + 0, + 3469, + 3470, + 0, + 0, + 3475, + 0, + 0, + 0, + 3480, + 3487, + 3489, + 0, + 3490, + 0, + 0, + 3491, + 3499, + 0, + 3500, + 0, + 0, + 3501, + 0, + 0, + 0, + 3502, + 0, + 3514, + 0, + 0, + 0, + 3516, + 3517, + 0, + 0, + 0, + 3518, + 0, + 0, + 0, + 0, + 3520, + 3521, + 3522, + 0, + 0, + 3526, + 3530, + 0, + 0, + 0, + 0, + 3531, + 0, + 0, + 0, + 0, + 3536, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3539, + 3541, + 0, + 0, + 3542, + 3544, + 0, + 3547, + 3548, + 0, + 0, + 3550, + 0, + 3553, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3554, + 0, + 3555, + 0, + 3558, + 0, + 3559, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3563, + 0, + 3581, + 0, + 0, + 0, + 3599, + 0, + 0, + 0, + 3600, + 0, + 3601, + 0, + 3602, + 3603, + 0, + 0, + 3606, + 3608, + 0, + 3610, + 3611, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3612, + 3616, + 3619, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3624, + 3628, + 0, + 3629, + 3634, + 3635, + 0, + 0, + 0, + 0, + 0, + 0, + 3636, + 0, + 3637, + 0, + 0, + 3638, + 3651, + 0, + 0, + 0, + 0, + 0, + 0, + 3652, + 3653, + 0, + 0, + 0, + 0, + 3656, + 3657, + 0, + 0, + 0, + 0, + 0, + 3658, + 0, + 0, + 0, + 0, + 3659, + 0, + 3661, + 3663, + 3664, + 0, + 3665, + 0, + 3692, + 0, + 0, + 0, + 3694, + 3696, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3698, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3700, + 0, + 0, + 3701, + 0, + 0, + 0, + 3708, + 3709, + 0, + 0, + 0, + 3711, + 3712, + 0, + 0, + 0, + 0, + 0, + 3723, + 0, + 3724, + 3725, + 0, + 0, + 3726, + 0, + 0, + 0, + 0, + 0, + 0, + 3728, + 3729, + 0, + 3734, + 3735, + 3737, + 0, + 0, + 0, + 3743, + 0, + 3745, + 0, + 0, + 3746, + 0, + 0, + 3747, + 3748, + 0, + 3757, + 0, + 3759, + 3766, + 3767, + 0, + 3768, + 0, + 0, + 0, + 0, + 3769, + 0, + 0, + 3771, + 0, + 3774, + 0, + 0, + 0, + 0, + 0, + 0, + 3775, + 0, + 0, + 0, + 0, + 0, + 0, + 3776, + 0, + 3777, + 3786, + 0, + 3788, + 3789, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3791, + 0, + 3811, + 0, + 0, + 0, + 0, + 0, + 3814, + 3815, + 3816, + 3820, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3821, + 0, + 0, + 3825, + 0, + 0, + 0, + 0, + 3835, + 0, + 0, + 3848, + 3849, + 0, + 0, + 0, + 0, + 3850, + 3851, + 3853, + 0, + 0, + 0, + 0, + 3859, + 0, + 3860, + 3862, + 0, + 0, + 0, + 0, + 0, + 3863, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3873, + 0, + 3874, + 0, + 3875, + 3886, + 0, + 3887, + 0, + 0, + 0, + 0, + 3892, + 3913, + 0, + 3914, + 0, + 0, + 0, + 3925, + 3931, + 0, + 0, + 0, + 0, + 3934, + 3941, + 3942, + 0, + 0, + 0, + 0, + 3943, + 0, + 0, + 0, + 3944, + 0, + 0, + 0, + 0, + 0, + 3945, + 0, + 3947, + 0, + 0, + 0, + 3956, + 3957, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3958, + 0, + 3959, + 3965, + 0, + 0, + 0, + 0, + 3966, + 0, + 0, + 0, + 3967, + 0, + 0, + 0, + 3968, + 3974, + 0, + 0, + 0, + 0, + 0, + 3975, + 3977, + 3978, + 0, + 0, + 0, + 0, + 3980, + 0, + 3985, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3986, + 4011, + 0, + 0, + 4017, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 4018, + 0, + 0, + 0, + 0, + 4019, + 0, + 4023, + 0, + 0, + 0, + 4027, + 4028, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 4031, + 4034, + 0, + 0, + 4035, + 4037, + 4039, + 4040, + 0, + 0, + 0, + 0, + 0, + 4059, + 0, + 4060, + 4061, + 0, + 4062, + 4063, + 4066, + 0, + 0, + 4072, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 4088, + 0, + 0, + 0, + 0, + 0, + 4091, + 0, + 0, + 0, + 0, + 4094, + 4095, + 0, + 0, + 4096, + 0, + 0, + 0, + 0, + 0, + 4098, + 4099, + 0, + 0, + 0, + 4101, + 0, + 4104, + 0, + 0, + 0, + 4105, + 4108, + 0, + 4113, + 0, + 0, + 4115, + 4116, + 0, + 4126, + 0, + 0, + 4127, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 4128, + 4132, + 4133, + 0, + 4134, + 0, + 0, + 0, + 4137, + 0, + 0, + 4141, + 0, + 0, + 0, + 0, + 4144, + 4146, + 4147, + 0, + 0, + 0, + 0, + 4148, + 0, + 0, + 4311, + 0, + 0, + 0, + 4314, + 4329, + 0, + 4331, + 4332, + 0, + 4333, + 0, + 4334, + 0, + 0, + 0, + 4335, + 0, + 4336, + 0, + 0, + 0, + 4337, + 0, + 0, + 0, + 4342, + 4345, + 4346, + 4350, + 0, + 4351, + 4352, + 0, + 4354, + 4355, + 0, + 0, + 4364, + 0, + 0, + 0, + 0, + 4369, + 0, + 0, + 0, + 4373, + 0, + 4374, + 0, + 0, + 0, + 0, + 4377, + 0, + 0, + 0, + 0, + 4378, + 0, + 0, + 0, + 4380, + 0, + 0, + 0, + 4381, + 4382, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 4384, + 0, + 0, + 0, + 0, + 4385, + 0, + 0, + 0, + 4386, + 0, + 0, + 0, + 4391, + 4398, + 0, + 0, + 0, + 0, + 4407, + 4409, + 0, + 0, + 0, + 0, + 4410, + 0, + 0, + 4411, + 0, + 4414, + 4415, + 4418, + 0, + 4427, + 4428, + 4430, + 0, + 4431, + 0, + 4448, + 0, + 0, + 0, + 0, + 0, + 4449, + 0, + 0, + 0, + 4451, + 4452, + 0, + 4453, + 4454, + 0, + 4456, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 4459, + 0, + 4463, + 0, + 0, + 0, + 0, + 0, + 4466, + 0, + 4467, + 0, + 4469, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 4470, + 4471, + 0, + 4473, + 0, + 0, + 4475, + 0, + 0, + 0, + 0, + 4477, + 4478, + 0, + 0, + 0, + 4479, + 4481, + 0, + 4482, + 0, + 4484, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 4486, + 0, + 0, + 4488, + 0, + 0, + 4497, + 0, + 4508, + 0, + 0, + 4510, + 4511, + 0, + 4520, + 4523, + 0, + 4524, + 0, + 4525, + 0, + 4527, + 0, + 0, + 4528, + 0, + 0, + 0, + 0, + 4530, + 0, + 4531, + 0, + 0, + 4532, + 0, + 0, + 0, + 4533, + 0, + 0, + 0, + 0, + 0, + 4535, + 0, + 0, + 0, + 4536, + 0, + 0, + 0, + 0, + 0, + 4541, + 4543, + 4544, + 4545, + 4547, + 0, + 4548, + 0, + 0, + 0, + 0, + 4550, + 4551, + 0, + 4553, + 0, + 0, + 0, + 0, + 4562, + 0, + 0, + 4571, + 0, + 0, + 0, + 4574, + 0, + 0, + 0, + 4575, + 0, + 4576, + 0, + 4577, + 0, + 0, + 0, + 4581, + 0, + 0, + 0, + 0, + 0, + 4582, + 0, + 0, + 4586, + 0, + 0, + 0, + 4588, + 0, + 0, + 4597, + 0, + 4598, + 0, + 0, + 0, + 0, + 4616, + 4617, + 0, + 4618, + 0, + 0, + 0, + 0, + 4619, + 0, + 4620, + 0, + 0, + 4621, + 0, + 4624, + 0, + 0, + 0, + 0, + 0, + 4625, + 0, + 0, + 0, + 0, + 4657, + 0, + 4659, + 0, + 4667, + 0, + 0, + 0, + 4668, + 4670, + 0, + 4672, + 0, + 0, + 0, + 0, + 0, + 4673, + 4676, + 0, + 0, + 0, + 0, + 4687, + 0, + 0, + 0, + 0, + 4697, + 0, + 0, + 0, + 0, + 4699, + 0, + 4701, + 0, + 0, + 0, + 0, + 4702, + 0, + 0, + 4706, + 0, + 0, + 4713, + 0, + 0, + 0, + 4714, + 4715, + 4716, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 4717, + 0, + 0, + 4720, + 0, + 4721, + 4729, + 4735, + 0, + 0, + 0, + 4737, + 0, + 0, + 0, + 4739, + 0, + 0, + 0, + 4740, + 0, + 0, + 0, + 4741, + 0, + 0, + 0, + 0, + 0, + 4742, + 0, + 4745, + 4746, + 4747, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 4748, + 0, + 0, + 0, + 4749, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 4751, + 4786, + 0, + 4787, + 0, + 4788, + 4796, + 0, + 0, + 4797, + 4798, + 0, + 4799, + 4806, + 4807, + 0, + 0, + 0, + 0, + 4809, + 4810, + 0, + 0, + 0, + 0, + 0, + 0, + 4811, + 0, + 0, + 0, + 0, + 0, + 4812, + 0, + 4813, + 0, + 0, + 4815, + 0, + 4821, + 4822, + 0, + 0, + 0, + 0, + 4823, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 4824, + 0, + 0, + 0, + 0, + 4826, + 0, + 0, + 0, + 4828, + 0, + 4829, + 0, + 0, + 0, + 4843, + 0, + 0, + 4847, + 0, + 4853, + 4855, + 4858, + 0, + 0, + 0, + 0, + 0, + 4859, + 0, + 4864, + 0, + 0, + 4879, + 0, + 0, + 0, + 0, + 4880, + 0, + 0, + 0, + 0, + 4881, + 0, + 4882, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 4883, + 0, + 0, + 0, + 0, + 4884, + 0, + 0, + 0, + 0, + 0, + 4886, + 4887, + 4888, + 4894, + 4896, + 0, + 4902, + 0, + 0, + 4905, + 0, + 0, + 4915, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 4916, + 4917, + 4919, + 4921, + 0, + 0, + 0, + 0, + 0, + 4926, + 0, + 0, + 0, + 0, + 4927, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 4929, + 0, + 4930, + 4931, + 0, + 4938, + 0, + 4952, + 0, + 4953, + 4957, + 4960, + 4964, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5019, + 5020, + 5022, + 0, + 0, + 0, + 0, + 0, + 5023, + 0, + 0, + 0, + 5024, + 0, + 0, + 0, + 5025, + 0, + 0, + 0, + 0, + 5028, + 0, + 0, + 0, + 0, + 5029, + 5030, + 5031, + 0, + 5033, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5034, + 5035, + 0, + 5036, + 0, + 0, + 5037, + 0, + 0, + 0, + 0, + 5038, + 0, + 0, + 5039, + 0, + 0, + 0, + 5041, + 5042, + 0, + 0, + 0, + 0, + 5044, + 5049, + 5054, + 0, + 5055, + 0, + 5057, + 0, + 0, + 0, + 5060, + 0, + 0, + 0, + 0, + 0, + 5063, + 0, + 5064, + 5065, + 0, + 5067, + 0, + 0, + 0, + 5068, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5076, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5077, + 0, + 0, + 5078, + 5080, + 0, + 0, + 5083, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5085, + 0, + 0, + 0, + 0, + 0, + 0, + 5098, + 5099, + 5101, + 5105, + 5107, + 0, + 5108, + 0, + 5109, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5110, + 0, + 0, + 0, + 0, + 0, + 5117, + 5118, + 0, + 5121, + 0, + 5122, + 0, + 0, + 5130, + 0, + 0, + 0, + 5137, + 0, + 0, + 0, + 5148, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5151, + 5154, + 0, + 0, + 0, + 5155, + 0, + 0, + 5156, + 5159, + 5161, + 0, + 0, + 0, + 0, + 5162, + 0, + 0, + 0, + 0, + 5163, + 5164, + 0, + 5166, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5167, + 0, + 0, + 0, + 5172, + 0, + 0, + 0, + 0, + 0, + 0, + 5178, + 5179, + 0, + 0, + 5190, + 0, + 0, + 5191, + 5192, + 5194, + 0, + 0, + 5198, + 5201, + 0, + 0, + 0, + 0, + 0, + 5203, + 0, + 5206, + 5209, + 0, + 0, + 0, + 0, + 0, + 0, + 5213, + 0, + 5214, + 5216, + 0, + 0, + 0, + 0, + 0, + 5217, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5218, + 5219, + 0, + 5231, + 0, + 0, + 5244, + 5249, + 0, + 5254, + 0, + 5255, + 0, + 0, + 5257, + 0, + 0, + 0, + 0, + 0, + 5258, + 0, + 5260, + 5270, + 0, + 5277, + 0, + 0, + 0, + 0, + 0, + 0, + 5280, + 5281, + 5282, + 5283, + 0, + 0, + 0, + 0, + 0, + 5284, + 0, + 5285, + 0, + 0, + 0, + 0, + 0, + 5287, + 5288, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5289, + 5291, + 0, + 0, + 5294, + 0, + 0, + 5295, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5304, + 0, + 0, + 5306, + 5307, + 5308, + 0, + 5309, + 0, + 0, + 5310, + 0, + 0, + 0, + 0, + 5311, + 5312, + 0, + 5313, + 0, + 0, + 0, + 0, + 0, + 5316, + 0, + 0, + 0, + 5317, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5325, + 0, + 0, + 0, + 0, + 0, + 0, + 5326, + 0, + 5327, + 5329, + 0, + 5332, + 0, + 0, + 0, + 0, + 5338, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5340, + 0, + 0, + 5341, + 0, + 0, + 0, + 5342, + 0, + 5343, + 5344, + 0, + 0, + 5345, + 0, + 0, + 0, + 0, + 0, + 0, + 5347, + 5348, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5349, + 0, + 5350, + 0, + 5354, + 0, + 0, + 0, + 0, + 5358, + 0, + 0, + 5359, + 0, + 0, + 5361, + 0, + 0, + 5365, + 0, + 5367, + 0, + 5373, + 0, + 0, + 0, + 5379, + 0, + 0, + 0, + 5380, + 0, + 0, + 0, + 5382, + 0, + 5384, + 0, + 0, + 0, + 0, + 0, + 0, + 5385, + 0, + 0, + 0, + 0, + 5387, + 0, + 0, + 0, + 0, + 0, + 0, + 5388, + 5390, + 5393, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5396, + 0, + 0, + 0, + 0, + 5397, + 5402, + 0, + 0, + 0, + 0, + 0, + 5403, + 0, + 0, + 0, + 5404, + 5405, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5406, + 0, + 0, + 0, + 0, + 5410, + 0, + 0, + 5411, + 0, + 5415, + 0, + 0, + 0, + 0, + 5416, + 5434, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5438, + 0, + 5440, + 0, + 0, + 0, + 0, + 0, + 0, + 5441, + 5442, + 0, + 0, + 0, + 5443, + 5444, + 5447, + 0, + 0, + 5448, + 5449, + 5451, + 0, + 0, + 0, + 5456, + 5457, + 0, + 0, + 0, + 5459, + 0, + 0, + 0, + 5461, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5464, + 0, + 5466, + 0, + 0, + 5467, + 0, + 5470, + 0, + 0, + 5473, + 0, + 0, + 5474, + 0, + 0, + 5476, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5477, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5484, + 0, + 0, + 5485, + 5486, + 0, + 0, + 0, + 0, + 0, + 5488, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5489, + 0, + 0, + 0, + 0, + 0, + 5507, + 0, + 0, + 0, + 5510, + 0, + 5511, + 0, + 0, + 5512, + 0, + 0, + 0, + 5513, + 0, + 5515, + 0, + 0, + 5516, + 5517, + 0, + 5518, + 0, + 0, + 5522, + 0, + 0, + 0, + 0, + 0, + 5534, + 5535, + 0, + 0, + 5536, + 0, + 5538, + 0, + 0, + 5543, + 0, + 5544, + 0, + 0, + 5545, + 0, + 5547, + 0, + 5557, + 0, + 0, + 5558, + 0, + 5560, + 5567, + 0, + 0, + 0, + 0, + 5568, + 0, + 0, + 0, + 5571, + 5573, + 0, + 5574, + 0, + 5575, + 0, + 0, + 0, + 0, + 5577, + 0, + 0, + 5598, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5600, + 5609, + 0, + 0, + 0, + 0, + 5610, + 0, + 0, + 5612, + 0, + 5624, + 0, + 5625, + 0, + 0, + 0, + 5629, + 0, + 5641, + 0, + 5642, + 5643, + 0, + 0, + 0, + 0, + 0, + 0, + 5651, + 0, + 0, + 0, + 5652, + 5653, + 0, + 5661, + 5662, + 5678, + 0, + 5679, + 0, + 0, + 0, + 0, + 5685, + 5686, + 0, + 0, + 0, + 0, + 0, + 5690, + 5692, + 0, + 5703, + 0, + 0, + 0, + 0, + 0, + 5706, + 0, + 0, + 0, + 0, + 5707, + 0, + 0, + 0, + 0, + 0, + 0, + 5708, + 0, + 0, + 5709, + 0, + 5710, + 0, + 0, + 0, + 5712, + 0, + 5733, + 0, + 5734, + 5735, + 0, + 0, + 5744, + 5751, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5752, + 0, + 5754, + 0, + 0, + 0, + 0, + 0, + 0, + 5757, + 5758, + 0, + 5760, + 5761, + 0, + 0, + 0, + 0, + 5763, + 5764, + 5765, + 0, + 5766, + 0, + 5767, + 5768, + 0, + 5770, + 0, + 0, + 0, + 0, + 5776, + 5780, + 0, + 0, + 0, + 0, + 5782, + 0, + 0, + 0, + 0, + 5784, + 0, + 0, + 5788, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5797, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5799, + 0, + 0, + 5801, + 0, + 0, + 0, + 5811, + 0, + 0, + 0, + 0, + 0, + 0, + 5816, + 0, + 0, + 5827, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5830, + 5831, + 0, + 0, + 5832, + 0, + 0, + 5833, + 0, + 5835, + 5844, + 5845, + 0, + 5846, + 0, + 0, + 0, + 0, + 0, + 5850, + 0, + 0, + 0, + 0, + 0, + 5852, + 0, + 5855, + 5857, + 0, + 0, + 5859, + 0, + 5861, + 0, + 0, + 5863, + 0, + 5865, + 0, + 0, + 0, + 5873, + 5875, + 0, + 0, + 0, + 5877, + 0, + 5879, + 0, + 0, + 0, + 5888, + 0, + 0, + 5889, + 5891, + 0, + 5894, + 0, + 0, + 0, + 0, + 0, + 0, + 5895, + 0, + 5897, + 0, + 0, + 0, + 0, + 0, + 0, + 5907, + 0, + 5911, + 0, + 0, + 5912, + 0, + 5913, + 5922, + 5924, + 0, + 5927, + 5928, + 0, + 0, + 0, + 0, + 5929, + 5930, + 0, + 5933, + 0, + 0, + 0, + 0, + 5949, + 0, + 0, + 5951, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5953, + 0, + 0, + 5954, + 0, + 5959, + 5960, + 5961, + 0, + 5964, + 0, + 0, + 0, + 5976, + 5978, + 5987, + 5990, + 0, + 0, + 0, + 0, + 0, + 5991, + 0, + 5992, + 0, + 0, + 0, + 5994, + 5995, + 0, + 0, + 5996, + 0, + 0, + 6001, + 6003, + 0, + 0, + 0, + 0, + 6007, + 0, + 0, + 0, + 0, + 0, + 6008, + 0, + 0, + 6009, + 0, + 6010, + 0, + 0, + 0, + 6011, + 6015, + 0, + 6017, + 0, + 6019, + 0, + 6023, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6025, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6026, + 0, + 6030, + 0, + 0, + 6032, + 0, + 0, + 0, + 6033, + 6038, + 6040, + 0, + 0, + 0, + 6041, + 6045, + 0, + 0, + 6046, + 0, + 0, + 6053, + 0, + 0, + 6054, + 0, + 6055, + 0, + 0, + 0, + 0, + 0, + 0, + 6057, + 0, + 6063, + 0, + 0, + 0, + 6064, + 0, + 6066, + 6071, + 6072, + 0, + 0, + 0, + 0, + 0, + 0, + 6075, + 6076, + 0, + 0, + 6077, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6078, + 6079, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6080, + 0, + 6083, + 0, + 0, + 0, + 0, + 0, + 6084, + 0, + 0, + 6088, + 0, + 6089, + 0, + 0, + 6093, + 6105, + 0, + 0, + 6107, + 0, + 6110, + 0, + 0, + 0, + 6111, + 6125, + 6126, + 0, + 0, + 0, + 6129, + 0, + 0, + 0, + 0, + 6130, + 0, + 0, + 0, + 6131, + 6134, + 0, + 0, + 0, + 0, + 0, + 0, + 6142, + 0, + 0, + 0, + 0, + 0, + 6144, + 0, + 0, + 6146, + 6151, + 6153, + 0, + 6156, + 0, + 6163, + 0, + 6180, + 6181, + 0, + 0, + 0, + 0, + 0, + 6182, + 0, + 0, + 0, + 0, + 6184, + 6195, + 0, + 0, + 6206, + 0, + 6208, + 0, + 0, + 6212, + 6213, + 6214, + 0, + 6215, + 0, + 0, + 0, + 6228, + 0, + 0, + 0, + 6234, + 0, + 0, + 0, + 0, + 0, + 0, + 6235, + 6240, + 0, + 6242, + 6243, + 6244, + 0, + 6250, + 6255, + 0, + 0, + 0, + 0, + 0, + 6257, + 0, + 0, + 0, + 6258, + 6278, + 0, + 6284, + 0, + 0, + 0, + 6285, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6286, + 0, + 0, + 0, + 6320, + 0, + 0, + 6322, + 6332, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6334, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6335, + 0, + 0, + 6337, + 0, + 6338, + 0, + 6339, + 6340, + 0, + 0, + 6356, + 6357, + 6369, + 0, + 0, + 0, + 6370, + 6371, + 6372, + 0, + 6373, + 0, + 0, + 0, + 0, + 0, + 6376, + 0, + 0, + 0, + 0, + 0, + 6382, + 6383, + 6384, + 0, + 0, + 0, + 0, + 6386, + 0, + 6389, + 6397, + 6400, + 6411, + 0, + 6414, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6415, + 6416, + 0, + 0, + 0, + 0, + 0, + 0, + 6417, + 0, + 0, + 0, + 0, + 6418, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6420, + 0, + 6421, + 6423, + 6425, + 0, + 6429, + 6430, + 0, + 6433, + 6438, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6439, + 6440, + 0, + 0, + 6441, + 0, + 0, + 6444, + 0, + 0, + 0, + 0, + 6446, + 0, + 0, + 0, + 0, + 6447, + 6448, + 0, + 0, + 6450, + 0, + 0, + 0, + 6454, + 0, + 0, + 6455, + 0, + 6461, + 0, + 0, + 0, + 0, + 0, + 0, + 6462, + 0, + 0, + 6463, + 0, + 6464, + 0, + 6465, + 6467, + 0, + 0, + 0, + 6468, + 0, + 6479, + 6480, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6481, + 0, + 0, + 6485, + 6487, + 0, + 0, + 0, + 0, + 0, + 0, + 6493, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6494, + 6495, + 6496, + 0, + 0, + 0, + 0, + 0, + 6498, + 0, + 0, + 0, + 6507, + 6508, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6511, + 6512, + 0, + 0, + 0, + 0, + 6513, + 0, + 0, + 0, + 6514, + 0, + 0, + 0, + 0, + 0, + 6516, + 0, + 0, + 6517, + 6518, + 0, + 0, + 0, + 6519, + 6520, + 6521, + 0, + 6523, + 0, + 0, + 0, + 0, + 6524, + 6528, + 0, + 6530, + 0, + 0, + 6532, + 0, + 6578, + 0, + 0, + 0, + 6583, + 0, + 6584, + 0, + 0, + 0, + 6587, + 0, + 0, + 0, + 6590, + 0, + 6591, + 0, + 0, + 0, + 0, + 0, + 6592, + 0, + 0, + 0, + 0, + 6593, + 6594, + 0, + 0, + 0, + 0, + 0, + 6599, + 6600, + 0, + 0, + 6601, + 6602, + 6604, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6608, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6610, + 6611, + 0, + 6615, + 0, + 6616, + 6618, + 6620, + 0, + 6637, + 0, + 0, + 0, + 0, + 6639, + 0, + 0, + 0, + 0, + 6641, + 0, + 6642, + 0, + 0, + 0, + 6647, + 0, + 6660, + 6663, + 0, + 6664, + 0, + 6666, + 6669, + 0, + 6675, + 6676, + 6677, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6678, + 0, + 0, + 0, + 6679, + 0, + 6680, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6693, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6704, + 6705, + 6706, + 0, + 0, + 6711, + 6713, + 0, + 0, + 0, + 0, + 0, + 6716, + 0, + 0, + 0, + 6717, + 0, + 6719, + 6724, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6725, + 6726, + 0, + 0, + 0, + 0, + 0, + 6728, + 6729, + 6735, + 0, + 6737, + 6742, + 0, + 0, + 6743, + 6750, + 0, + 6751, + 0, + 0, + 6752, + 6753, + 0, + 0, + 0, + 0, + 0, + 0, + 6754, + 0, + 0, + 0, + 0, + 0, + 6756, + 0, + 0, + 0, + 0, + 0, + 0, + 6763, + 0, + 0, + 6764, + 6765, + 0, + 0, + 0, + 6770, + 0, + 0, + 0, + 6776, + 6780, + 0, + 6781, + 0, + 0, + 0, + 6783, + 0, + 6784, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6785, + 0, + 0, + 0, + 6792, + 0, + 0, + 0, + 6793, + 0, + 0, + 6802, + 0, + 0, + 0, + 0, + 0, + 6803, + 0, + 0, + 0, + 6804, + 0, + 0, + 0, + 6812, + 0, + 0, + 6823, + 0, + 6824, + 6839, + 0, + 0, + 0, + 0, + 6852, + 0, + 0, + 6854, + 0, + 6856, + 6857, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6867, + 0, + 6868, + 6870, + 6872, + 0, + 0, + 0, + 6873, + 6874, + 0, + 0, + 0, + 0, + 0, + 6875, + 0, + 0, + 6877, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6878, + 0, + 0, + 0, + 6879, + 0, + 6880, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6887, + 0, + 6888, + 6891, + 6893, + 0, + 6895, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6899, + 0, + 0, + 0, + 0, + 6901, + 0, + 0, + 0, + 0, + 6910, + 0, + 6911, + 0, + 0, + 6912, + 0, + 0, + 6913, + 6914, + 0, + 0, + 0, + 6915, + 0, + 0, + 0, + 6916, + 6919, + 0, + 0, + 0, + 0, + 0, + 0, + 6924, + 0, + 6925, + 0, + 0, + 0, + 6926, + 6927, + 6928, + 0, + 6929, + 0, + 6930, + 0, + 0, + 6931, + 6935, + 0, + 6936, + 0, + 0, + 0, + 0, + 6939, + 6940, + 6941, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6942, + 6948, + 6949, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6952, + 6954, + 6963, + 6965, + 6966, + 0, + 0, + 6967, + 6968, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6969, + 0, + 0, + 6970, + 6979, + 0, + 0, + 6980, + 0, + 0, + 6983, + 0, + 0, + 0, + 0, + 0, + 6984, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6988, + 6990, + 6992, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6995, + 0, + 0, + 0, + 7012, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7019, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7021, + 0, + 0, + 7022, + 7023, + 7028, + 0, + 7030, + 7033, + 0, + 0, + 0, + 0, + 0, + 0, + 7038, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7039, + 0, + 0, + 0, + 0, + 0, + 7046, + 0, + 7047, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7048, + 7052, + 0, + 0, + 0, + 0, + 0, + 7054, + 0, + 7060, + 0, + 0, + 0, + 0, + 7061, + 0, + 7065, + 0, + 0, + 0, + 0, + 7067, + 7069, + 0, + 7070, + 7071, + 7072, + 0, + 0, + 7078, + 0, + 7080, + 7081, + 0, + 7083, + 0, + 0, + 0, + 7084, + 7087, + 7088, + 0, + 0, + 7090, + 0, + 7093, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7107, + 0, + 0, + 7108, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7110, + 0, + 7114, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7115, + 0, + 7116, + 0, + 0, + 0, + 0, + 0, + 7117, + 0, + 0, + 7118, + 0, + 0, + 7124, + 0, + 7125, + 0, + 0, + 7126, + 0, + 0, + 0, + 0, + 7128, + 0, + 0, + 0, + 0, + 0, + 7129, + 0, + 7130, + 0, + 7132, + 7133, + 0, + 0, + 7134, + 0, + 0, + 7139, + 0, + 7148, + 7150, + 0, + 0, + 0, + 0, + 7152, + 0, + 0, + 0, + 7153, + 7156, + 7157, + 0, + 0, + 0, + 0, + 0, + 7158, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7163, + 7165, + 7169, + 0, + 7171, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7172, + 0, + 7173, + 7181, + 0, + 0, + 0, + 0, + 0, + 7182, + 7185, + 0, + 0, + 0, + 0, + 7187, + 0, + 7201, + 7204, + 0, + 0, + 0, + 0, + 0, + 7206, + 7207, + 0, + 0, + 0, + 0, + 7211, + 7216, + 0, + 7218, + 0, + 0, + 0, + 0, + 7226, + 7228, + 7230, + 7232, + 7233, + 7235, + 7237, + 0, + 0, + 0, + 0, + 7238, + 7241, + 0, + 7242, + 0, + 0, + 7247, + 0, + 0, + 0, + 7266, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7289, + 0, + 0, + 7290, + 7291, + 0, + 0, + 7292, + 0, + 7297, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7300, + 0, + 7301, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7302, + 0, + 0, + 0, + 0, + 7305, + 0, + 0, + 0, + 0, + 7307, + 0, + 7308, + 0, + 7310, + 0, + 7335, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7337, + 0, + 7343, + 7347, + 0, + 0, + 0, + 0, + 0, + 7348, + 0, + 7349, + 7350, + 7352, + 7354, + 0, + 0, + 0, + 0, + 7357, + 0, + 7358, + 7366, + 0, + 7367, + 7368, + 0, + 0, + 7373, + 0, + 0, + 0, + 7374, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7376, + 0, + 0, + 0, + 7377, + 0, + 0, + 0, + 0, + 0, + 7378, + 0, + 7379, + 7380, + 0, + 0, + 0, + 0, + 0, + 7383, + 0, + 0, + 7386, + 0, + 0, + 0, + 0, + 7398, + 0, + 0, + 0, + 7399, + 7400, + 0, + 7401, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7402, + 0, + 0, + 0, + 0, + 0, + 7405, + 0, + 0, + 0, + 0, + 0, + 7406, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7421, + 7427, + 7429, + 0, + 0, + 0, + 7435, + 0, + 0, + 7436, + 0, + 0, + 0, + 7437, + 0, + 0, + 0, + 0, + 0, + 0, + 7438, + 7443, + 0, + 7446, + 0, + 7448, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7456, + 0, + 0, + 0, + 0, + 0, + 7457, + 0, + 0, + 7461, + 0, + 0, + 0, + 0, + 0, + 7462, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7463, + 7466, + 7472, + 0, + 7476, + 0, + 0, + 7490, + 0, + 7491, + 0, + 0, + 7493, + 0, + 0, + 0, + 7498, + 7499, + 0, + 0, + 7508, + 0, + 0, + 0, + 0, + 0, + 7512, + 0, + 0, + 0, + 7513, + 7514, + 7516, + 0, + 0, + 0, + 0, + 7518, + 0, + 0, + 7519, + 7521, + 7522, + 0, + 0, + 0, + 7526, + 0, + 0, + 7529, + 0, + 0, + 7531, + 0, + 7536, + 0, + 7538, + 0, + 7539, + 0, + 0, + 7541, + 7542, + 7546, + 0, + 0, + 0, + 0, + 0, + 7547, + 0, + 7548, + 0, + 0, + 0, + 0, + 0, + 7550, + 0, + 0, + 7552, + 7553, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7554, + 7563, + 0, + 7573, + 0, + 0, + 0, + 0, + 0, + 0, + 7574, + 7576, + 0, + 7578, + 7581, + 7583, + 0, + 0, + 0, + 7584, + 0, + 7587, + 0, + 0, + 0, + 0, + 0, + 7589, + 0, + 0, + 0, + 7594, + 0, + 0, + 7595, + 0, + 0, + 7600, + 7602, + 7610, + 0, + 0, + 0, + 0, + 0, + 7612, + 0, + 7613, + 7614, + 0, + 0, + 7615, + 0, + 0, + 7616, + 0, + 7620, + 0, + 7621, + 7622, + 0, + 7623, + 0, + 0, + 0, + 0, + 7626, + 0, + 0, + 0, + 0, + 7627, + 7629, + 7631, + 0, + 0, + 7633, + 0, + 0, + 0, + 0, + 0, + 7639, + 0, + 7640, + 7642, + 0, + 0, + 7643, + 0, + 0, + 0, + 0, + 7644, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7645, + 0, + 0, + 0, + 0, + 0, + 7661, + 7662, + 7663, + 7665, + 0, + 7666, + 0, + 7667, + 0, + 7684, + 7688, + 7690, + 0, + 7691, + 0, + 0, + 0, + 0, + 0, + 0, + 7692, + 0, + 0, + 7700, + 0, + 7707, + 0, + 7708, + 0, + 7709, + 0, + 7721, + 0, + 0, + 0, + 7722, + 0, + 7724, + 0, + 0, + 0, + 0, + 0, + 0, + 7729, + 7731, + 0, + 7732, + 0, + 7733, + 7735, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7739, + 0, + 0, + 7741, + 7745, + 0, + 7748, + 0, + 0, + 0, + 7751, + 0, + 0, + 0, + 7752, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7753, + 0, + 0, + 7756, + 0, + 7757, + 0, + 7759, + 0, + 7760, + 0, + 0, + 0, + 0, + 7761, + 7768, + 0, + 0, + 7769, + 0, + 0, + 7770, + 0, + 0, + 7771, + 0, + 0, + 7772, + 0, + 0, + 7773, + 0, + 0, + 0, + 0, + 0, + 7778, + 7783, + 0, + 0, + 0, + 0, + 0, + 7784, + 7785, + 0, + 7790, + 0, + 0, + 0, + 0, + 7792, + 0, + 7798, + 0, + 0, + 0, + 0, + 0, + 7799, + 0, + 7810, + 0, + 0, + 7813, + 0, + 7814, + 0, + 7816, + 0, + 7818, + 7824, + 7825, + 7826, + 0, + 7828, + 7830, + 0, + 0, + 0, + 7840, + 0, + 7842, + 0, + 7843, + 0, + 0, + 0, + 0, + 7844, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7846, + 0, + 0, + 0, + 0, + 0, + 7856, + 7857, + 7858, + 7862, + 0, + 7865, + 0, + 0, + 7866, + 0, + 0, + 7913, + 0, + 0, + 0, + 0, + 7914, + 0, + 0, + 7915, + 7917, + 7918, + 7919, + 0, + 7920, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7921, + 7922, + 0, + 7924, + 0, + 0, + 7925, + 0, + 0, + 7927, + 0, + 7930, + 7935, + 0, + 0, + 7937, + 0, + 0, + 0, + 0, + 0, + 0, + 7939, + 0, + 7940, + 0, + 0, + 0, + 0, + 0, + 7941, + 0, + 0, + 0, + 0, + 7945, + 0, + 0, + 0, + 0, + 7949, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7950, + 0, + 7953, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7968, + 0, + 0, + 0, + 0, + 7969, + 7972, + 7992, + 0, + 7993, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7994, + 0, + 0, + 0, + 0, + 8007, + 8008, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8010, + 0, + 0, + 0, + 8012, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8018, + 0, + 8028, + 8029, + 0, + 0, + 8030, + 0, + 0, + 8032, + 8033, + 0, + 0, + 8034, + 8036, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8037, + 0, + 0, + 0, + 8043, + 8052, + 8059, + 8060, + 0, + 0, + 8061, + 0, + 0, + 0, + 8062, + 0, + 8063, + 0, + 8064, + 0, + 8066, + 8068, + 0, + 0, + 0, + 8080, + 8081, + 0, + 8089, + 0, + 0, + 0, + 0, + 0, + 8092, + 0, + 0, + 0, + 0, + 0, + 0, + 8093, + 8110, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8111, + 0, + 0, + 0, + 0, + 0, + 8112, + 8115, + 0, + 8117, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8120, + 8121, + 8122, + 8128, + 8129, + 8130, + 8131, + 0, + 0, + 8139, + 0, + 0, + 8144, + 0, + 0, + 0, + 0, + 8145, + 8146, + 8153, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8154, + 0, + 8157, + 8160, + 8162, + 0, + 8164, + 8165, + 0, + 0, + 0, + 0, + 8166, + 8167, + 0, + 0, + 8179, + 0, + 0, + 0, + 8185, + 0, + 0, + 0, + 8186, + 0, + 0, + 8187, + 0, + 0, + 0, + 8188, + 0, + 0, + 0, + 0, + 0, + 8204, + 0, + 0, + 0, + 0, + 8210, + 0, + 0, + 0, + 0, + 0, + 8213, + 0, + 8214, + 0, + 0, + 8215, + 0, + 0, + 0, + 0, + 0, + 0, + 8218, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8219, + 0, + 8221, + 0, + 0, + 8222, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8225, + 0, + 0, + 0, + 8233, + 0, + 0, + 8242, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8247, + 0, + 8248, + 8252, + 0, + 8256, + 8257, + 0, + 0, + 8261, + 0, + 8264, + 8265, + 0, + 0, + 0, + 0, + 8267, + 0, + 0, + 0, + 8269, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8270, + 0, + 0, + 0, + 8278, + 0, + 8279, + 8283, + 0, + 0, + 8285, + 8286, + 8289, + 8292, + 0, + 0, + 0, + 0, + 8293, + 8295, + 8299, + 8300, + 8301, + 0, + 0, + 0, + 0, + 0, + 0, + 8304, + 8307, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8321, + 0, + 0, + 0, + 8322, + 8323, + 8325, + 8326, + 8327, + 0, + 0, + 8332, + 8338, + 0, + 0, + 8340, + 0, + 0, + 0, + 0, + 0, + 8350, + 0, + 0, + 8351, + 0, + 8354, + 8355, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8360, + 8372, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8377, + 0, + 0, + 0, + 0, + 8380, + 0, + 0, + 0, + 8383, + 0, + 8384, + 0, + 0, + 0, + 0, + 8386, + 8392, + 0, + 0, + 8394, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8396, + 8397, + 0, + 8398, + 0, + 8399, + 0, + 0, + 0, + 0, + 0, + 8400, + 0, + 8401, + 8410, + 8411, + 0, + 8412, + 8413, + 8422, + 0, + 0, + 0, + 0, + 8423, + 0, + 0, + 0, + 0, + 8424, + 0, + 0, + 8425, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8441, + 8442, + 0, + 0, + 0, + 0, + 0, + 0, + 8443, + 0, + 0, + 8444, + 0, + 8447, + 0, + 0, + 0, + 0, + 8451, + 0, + 8458, + 0, + 8462, + 0, + 0, + 8468, + 0, + 8469, + 0, + 0, + 0, + 8470, + 0, + 8473, + 8479, + 8480, + 0, + 0, + 0, + 0, + 8481, + 8483, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8484, + 0, + 0, + 8490, + 0, + 0, + 0, + 0, + 0, + 0, + 8491, + 8493, + 8494, + 0, + 8528, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8530, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8534, + 8538, + 8540, + 0, + 0, + 8541, + 0, + 0, + 8545, + 0, + 8557, + 0, + 0, + 8569, + 8570, + 0, + 0, + 8571, + 8574, + 8575, + 8579, + 0, + 8583, + 0, + 0, + 0, + 0, + 8591, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8606, + 0, + 8607, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8608, + 0, + 0, + 8609, + 0, + 0, + 0, + 8610, + 0, + 0, + 0, + 8611, + 0, + 0, + 8613, + 8617, + 8621, + 0, + 0, + 8622, + 0, + 8623, + 0, + 8624, + 8625, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8637, + 8638, + 8639, + 8650, + 0, + 0, + 0, + 0, + 8652, + 8654, + 8655, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8656, + 0, + 0, + 0, + 0, + 0, + 8657, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8658, + 0, + 0, + 8659, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8660, + 0, + 0, + 0, + 0, + 0, + 0, + 8661, + 8663, + 8664, + 0, + 0, + 0, + 0, + 8665, + 0, + 8669, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8671, + 8674, + 0, + 8684, + 0, + 8686, + 0, + 0, + 0, + 8689, + 0, + 0, + 0, + 8690, + 0, + 8706, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8710, + 0, + 8711, + 8713, + 8714, + 8724, + 8727, + 8728, + 8733, + 8736, + 0, + 8737, + 8739, + 0, + 0, + 0, + 0, + 8742, + 8743, + 8745, + 8754, + 0, + 0, + 0, + 0, + 8756, + 0, + 0, + 0, + 0, + 0, + 0, + 8757, + 8760, + 0, + 0, + 0, + 0, + 0, + 8762, + 8763, + 8764, + 0, + 8766, + 8769, + 8770, + 8773, + 0, + 8774, + 0, + 8779, + 0, + 0, + 0, + 0, + 8780, + 0, + 0, + 8781, + 0, + 0, + 8783, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8784, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8785, + 0, + 0, + 0, + 0, + 8786, + 0, + 0, + 0, + 0, + 8788, + 8790, + 0, + 0, + 0, + 8803, + 0, + 8813, + 8814, + 0, + 0, + 0, + 0, + 0, + 8815, + 8816, + 0, + 0, + 0, + 0, + 8818, + 0, + 0, + 0, + 0, + 8822, + 8828, + 8829, + 0, + 8831, + 0, + 0, + 0, + 0, + 8833, + 0, + 0, + 0, + 8834, + 0, + 0, + 0, + 8835, + 0, + 8836, + 0, + 0, + 0, + 8837, + 0, + 0, + 0, + 0, + 0, + 0, + 8838, + 8839, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8840, + 0, + 0, + 0, + 8841, + 0, + 8842, + 0, + 0, + 0, + 8846, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8847, + 0, + 8848, + 0, + 0, + 8864, + 0, + 0, + 8866, + 0, + 0, + 8870, + 8872, + 0, + 0, + 8873, + 8874, + 0, + 0, + 0, + 0, + 0, + 0, + 8875, + 0, + 8876, + 0, + 0, + 0, + 0, + 8896, + 8900, + 0, + 0, + 0, + 0, + 8901, + 0, + 0, + 0, + 0, + 0, + 8904, + 0, + 8907, + 0, + 0, + 0, + 0, + 8911, + 8912, + 8913, + 0, + 0, + 0, + 8914, + 0, + 8915, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8916, + 0, + 0, + 0, + 8929, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8930, + 0, + 8932, + 0, + 8943, + 0, + 0, + 0, + 8945, + 8947, + 0, + 0, + 0, + 0, + 8949, + 0, + 8950, + 0, + 8954, + 8957, + 0, + 0, + 8970, + 0, + 0, + 0, + 0, + 8971, + 0, + 8996, + 0, + 0, + 0, + 0, + 8997, + 9000, + 0, + 0, + 0, + 0, + 9001, + 9002, + 0, + 9004, + 9009, + 9024, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9027, + 9082, + 0, + 0, + 9083, + 9089, + 0, + 0, + 0, + 0, + 0, + 0, + 9090, + 0, + 0, + 0, + 9092, + 0, + 0, + 9093, + 0, + 9095, + 0, + 0, + 9096, + 9097, + 9101, + 9102, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9112, + 0, + 0, + 0, + 0, + 0, + 0, + 9114, + 0, + 0, + 9120, + 0, + 9121, + 9122, + 0, + 0, + 0, + 9123, + 9124, + 0, + 0, + 9125, + 0, + 0, + 9126, + 0, + 9127, + 0, + 0, + 9129, + 9131, + 0, + 0, + 0, + 9132, + 0, + 0, + 9136, + 0, + 9144, + 0, + 0, + 9148, + 0, + 0, + 0, + 0, + 0, + 0, + 9149, + 0, + 9152, + 9163, + 0, + 0, + 9165, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9166, + 0, + 9169, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9170, + 0, + 0, + 0, + 0, + 9172, + 0, + 9174, + 9175, + 9176, + 0, + 9177, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9186, + 0, + 9187, + 0, + 0, + 0, + 9188, + 9189, + 0, + 0, + 9190, + 0, + 0, + 0, + 0, + 9191, + 0, + 0, + 0, + 9193, + 0, + 0, + 0, + 0, + 9197, + 9198, + 0, + 0, + 0, + 9208, + 9211, + 0, + 0, + 0, + 0, + 9216, + 9217, + 0, + 9220, + 0, + 0, + 0, + 0, + 9221, + 9222, + 9223, + 0, + 9224, + 9225, + 0, + 0, + 9227, + 0, + 9228, + 9229, + 0, + 0, + 9230, + 0, + 9232, + 0, + 9233, + 0, + 0, + 0, + 0, + 0, + 9234, + 9235, + 0, + 0, + 9237, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9238, + 9240, + 0, + 0, + 9241, + 0, + 0, + 0, + 0, + 9244, + 0, + 0, + 0, + 0, + 9247, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9248, + 0, + 0, + 0, + 9249, + 0, + 0, + 0, + 0, + 0, + 9250, + 0, + 0, + 0, + 0, + 9251, + 0, + 0, + 9252, + 9255, + 0, + 0, + 0, + 9256, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9257, + 0, + 0, + 9258, + 0, + 0, + 0, + 0, + 0, + 0, + 9259, + 0, + 0, + 0, + 0, + 0, + 9262, + 9263, + 0, + 0, + 9265, + 9266, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9268, + 9271, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9273, + 0, + 0, + 0, + 9276, + 9277, + 9279, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9280, + 0, + 0, + 9293, + 0, + 0, + 0, + 0, + 0, + 9297, + 9301, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9308, + 9309, + 9313, + 9321, + 9322, + 0, + 9326, + 9327, + 0, + 0, + 9477, + 0, + 9479, + 0, + 0, + 0, + 0, + 9482, + 0, + 0, + 0, + 9483, + 0, + 9484, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9485, + 0, + 0, + 9486, + 0, + 0, + 0, + 9489, + 0, + 0, + 0, + 0, + 9490, + 9491, + 0, + 0, + 0, + 0, + 9493, + 0, + 9495, + 9496, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9500, + 0, + 9502, + 0, + 0, + 0, + 0, + 0, + 9504, + 9507, + 0, + 9509, + 0, + 9511, + 0, + 0, + 9513, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9515, + 0, + 0, + 0, + 0, + 0, + 0, + 9516, + 9517, + 0, + 0, + 0, + 0, + 9532, + 0, + 0, + 9533, + 0, + 0, + 9538, + 0, + 9539, + 9540, + 0, + 0, + 0, + 0, + 9541, + 0, + 0, + 0, + 9542, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9544, + 9545, + 0, + 9546, + 0, + 0, + 0, + 0, + 0, + 0, + 9547, + 9548, + 0, + 0, + 0, + 9550, + 0, + 9557, + 0, + 9558, + 0, + 9561, + 0, + 9563, + 9570, + 0, + 9572, + 9574, + 9575, + 0, + 0, + 0, + 9577, + 9592, + 0, + 0, + 9596, + 0, + 0, + 0, + 9598, + 0, + 9600, + 0, + 9601, + 0, + 0, + 0, + 0, + 0, + 0, + 9608, + 0, + 9638, + 9639, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9641, + 0, + 0, + 9643, + 9644, + 9645, + 9646, + 0, + 0, + 0, + 9648, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9650, + 9654, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9655, + 0, + 0, + 0, + 0, + 0, + 9656, + 0, + 9657, + 0, + 0, + 0, + 0, + 9658, + 0, + 0, + 9659, + 0, + 0, + 9664, + 0, + 0, + 9665, + 0, + 9667, + 9669, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9671, + 0, + 9673, + 9681, + 0, + 0, + 0, + 0, + 9682, + 9683, + 9684, + 0, + 0, + 0, + 0, + 9686, + 9698, + 0, + 0, + 9700, + 9701, + 9702, + 0, + 9703, + 9717, + 0, + 0, + 0, + 0, + 9718, + 0, + 9726, + 0, + 0, + 0, + 0, + 9727, + 0, + 0, + 0, + 9728, + 0, + 9742, + 0, + 9744, + 0, + 0, + 0, + 9750, + 0, + 9754, + 9755, + 0, + 0, + 0, + 0, + 0, + 9756, + 0, + 9757, + 9768, + 0, + 9769, + 0, + 0, + 0, + 9770, + 9771, + 0, + 9773, + 0, + 9774, + 0, + 9775, + 0, + 0, + 0, + 9776, + 9777, + 9784, + 0, + 0, + 0, + 9786, + 0, + 9789, + 0, + 0, + 0, + 0, + 9793, + 9794, + 0, + 0, + 0, + 9808, + 0, + 0, + 0, + 0, + 0, + 9811, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9812, + 0, + 9820, + 0, + 9823, + 0, + 9828, + 0, + 0, + 0, + 0, + 9830, + 0, + 0, + 9833, + 9836, + 0, + 0, + 0, + 9840, + 0, + 0, + 0, + 9841, + 0, + 0, + 9842, + 0, + 9845, + 0, + 0, + 0, + 9847, + 9848, + 0, + 0, + 9855, + 0, + 0, + 0, + 0, + 0, + 0, + 9856, + 9863, + 9865, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9866, + 9867, + 9868, + 9873, + 9875, + 0, + 0, + 0, + 0, + 0, + 0, + 9880, + 0, + 9886, + 0, + 0, + 0, + 9887, + 0, + 0, + 9891, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9906, + 9907, + 9908, + 0, + 0, + 0, + 9909, + 0, + 0, + 0, + 0, + 0, + 0, + 9910, + 0, + 0, + 0, + 0, + 9913, + 0, + 0, + 0, + 0, + 9914, + 0, + 0, + 0, + 0, + 0, + 9922, + 0, + 0, + 0, + 0, + 9923, + 9925, + 0, + 0, + 0, + 0, + 0, + 0, + 9930, + 0, + 0, + 0, + 9931, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9932, + 0, + 9939, + 0, + 0, + 9940, + 9962, + 9966, + 0, + 9969, + 9970, + 0, + 0, + 9974, + 0, + 9979, + 9981, + 9982, + 0, + 0, + 0, + 9985, + 0, + 0, + 0, + 0, + 0, + 0, + 9987, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9988, + 9993, + 0, + 0, + 9994, + 0, + 0, + 0, + 9997, + 0, + 10004, + 0, + 0, + 0, + 0, + 0, + 10007, + 10019, + 10020, + 10022, + 0, + 0, + 0, + 10031, + 0, + 0, + 0, + 0, + 0, + 10032, + 0, + 0, + 10034, + 0, + 10036, + 0, + 0, + 0, + 0, + 10038, + 0, + 10039, + 10040, + 10041, + 10042, + 0, + 0, + 0, + 0, + 0, + 10043, + 0, + 0, + 0, + 0, + 0, + 10045, + 10054, + 0, + 0, + 0, + 0, + 10055, + 0, + 0, + 10057, + 10058, + 0, + 0, + 0, + 0, + 0, + 0, + 10059, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10060, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10063, + 0, + 10066, + 0, + 0, + 0, + 10070, + 0, + 10072, + 0, + 0, + 10076, + 10077, + 0, + 0, + 10084, + 0, + 10087, + 10090, + 10091, + 0, + 0, + 0, + 10094, + 10097, + 0, + 0, + 0, + 0, + 0, + 0, + 10098, + 0, + 0, + 0, + 0, + 0, + 0, + 10103, + 0, + 10104, + 0, + 10108, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10120, + 0, + 0, + 0, + 10122, + 0, + 0, + 10125, + 0, + 0, + 0, + 0, + 10127, + 10128, + 0, + 0, + 10134, + 0, + 10135, + 10136, + 0, + 10137, + 0, + 0, + 10147, + 0, + 10149, + 10150, + 0, + 0, + 10156, + 0, + 10158, + 10159, + 10160, + 10168, + 0, + 0, + 10171, + 0, + 10173, + 0, + 0, + 0, + 10176, + 0, + 0, + 0, + 0, + 10177, + 0, + 0, + 0, + 0, + 10178, + 0, + 0, + 0, + 0, + 10194, + 0, + 10202, + 0, + 0, + 10203, + 10204, + 0, + 10205, + 10206, + 0, + 10207, + 0, + 0, + 0, + 0, + 10209, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10213, + 0, + 0, + 0, + 0, + 0, + 0, + 10217, + 0, + 10229, + 0, + 10230, + 10231, + 0, + 0, + 10232, + 0, + 0, + 10237, + 10238, + 10244, + 0, + 0, + 0, + 0, + 0, + 10250, + 0, + 10252, + 0, + 0, + 0, + 0, + 0, + 0, + 10255, + 0, + 0, + 10257, + 0, + 0, + 0, + 0, + 0, + 0, + 10258, + 0, + 10259, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10260, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10284, + 10288, + 10289, + 0, + 0, + 0, + 10290, + 0, + 10296, + 0, + 0, + 0, + 0, + 0, + 10297, + 0, + 0, + 0, + 0, + 0, + 0, + 10298, + 0, + 0, + 0, + 0, + 10299, + 10303, + 0, + 0, + 0, + 0, + 0, + 10306, + 0, + 0, + 0, + 10307, + 0, + 10308, + 0, + 0, + 0, + 0, + 10311, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10315, + 10317, + 0, + 0, + 0, + 10318, + 10319, + 0, + 10321, + 0, + 10326, + 0, + 10328, + 0, + 0, + 0, + 0, + 10329, + 0, + 0, + 10331, + 0, + 10332, + 0, + 0, + 0, + 0, + 0, + 0, + 10334, + 0, + 0, + 10335, + 10338, + 0, + 0, + 0, + 0, + 0, + 10339, + 10349, + 0, + 0, + 0, + 0, + 0, + 0, + 10351, + 0, + 10353, + 0, + 0, + 0, + 0, + 0, + 0, + 10362, + 0, + 10368, + 0, + 10369, + 0, + 0, + 0, + 10372, + 10373, + 0, + 0, + 0, + 0, + 0, + 10374, + 0, + 0, + 0, + 10375, + 0, + 10376, + 0, + 0, + 10386, + 10388, + 10390, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10391, + 0, + 0, + 10392, + 10394, + 0, + 0, + 10396, + 0, + 10397, + 0, + 10403, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10404, + 0, + 10405, + 10410, + 0, + 0, + 10411, + 0, + 10412, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10421, + 10422, + 10423, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10425, + 0, + 0, + 10427, + 0, + 0, + 10430, + 0, + 0, + 0, + 0, + 0, + 10432, + 0, + 10433, + 10434, + 0, + 0, + 0, + 0, + 10436, + 10437, + 0, + 10438, + 0, + 10439, + 0, + 10444, + 10446, + 0, + 0, + 0, + 0, + 0, + 10448, + 0, + 0, + 0, + 0, + 0, + 10449, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10451, + 0, + 10453, + 0, + 0, + 0, + 10454, + 10457, + 0, + 0, + 10459, + 0, + 10469, + 0, + 0, + 0, + 0, + 0, + 10472, + 10481, + 0, + 0, + 0, + 0, + 0, + 10482, + 10483, + 0, + 10492, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10499, + 0, + 0, + 0, + 10502, + 0, + 0, + 10510, + 0, + 10521, + 10524, + 0, + 0, + 10525, + 10526, + 10528, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10530, + 0, + 0, + 0, + 0, + 10533, + 0, + 10534, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10535, + 10536, + 0, + 0, + 10544, + 0, + 10553, + 10556, + 0, + 10557, + 10559, + 0, + 0, + 0, + 0, + 0, + 10562, + 10563, + 10564, + 0, + 10565, + 0, + 0, + 0, + 10566, + 0, + 10567, + 0, + 0, + 0, + 0, + 10575, + 0, + 0, + 10576, + 0, + 10578, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10585, + 10586, + 10587, + 10589, + 0, + 10590, + 0, + 0, + 10594, + 0, + 0, + 0, + 0, + 0, + 10598, + 0, + 0, + 10601, + 0, + 0, + 0, + 10602, + 0, + 10603, + 0, + 10604, + 0, + 10605, + 0, + 0, + 10607, + 0, + 10626, + 0, + 10627, + 0, + 0, + 0, + 0, + 0, + 10629, + 10630, + 10631, + 0, + 0, + 0, + 10646, + 0, + 0, + 0, + 10647, + 0, + 10650, + 0, + 10651, + 0, + 0, + 0, + 10652, + 10653, + 10655, + 0, + 10658, + 0, + 0, + 10659, + 0, + 10667, + 0, + 0, + 0, + 0, + 10669, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10670, + 0, + 0, + 0, + 10671, + 0, + 0, + 0, + 0, + 10672, + 10673, + 0, + 10674, + 0, + 0, + 0, + 10676, + 0, + 0, + 0, + 0, + 0, + 0, + 10678, + 0, + 10682, + 0, + 0, + 10692, + 0, + 10697, + 0, + 0, + 0, + 0, + 10698, + 0, + 0, + 0, + 10700, + 0, + 0, + 0, + 0, + 0, + 10703, + 0, + 10704, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10705, + 0, + 10715, + 10718, + 10720, + 0, + 0, + 10722, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10723, + 0, + 0, + 0, + 0, + 10726, + 0, + 0, + 0, + 0, + 0, + 10727, + 10730, + 10743, + 0, + 0, + 0, + 0, + 0, + 0, + 10744, + 0, + 0, + 10745, + 0, + 0, + 0, + 0, + 0, + 0, + 10748, + 0, + 0, + 0, + 0, + 10750, + 0, + 0, + 10752, + 10753, + 0, + 0, + 0, + 10756, + 0, + 0, + 0, + 0, + 0, + 0, + 10758, + 0, + 0, + 0, + 10759, + 0, + 10769, + 0, + 0, + 10772, + 0, + 0, + 0, + 0, + 0, + 0, + 10773, + 0, + 0, + 0, + 10777, + 0, + 0, + 10779, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10780, + 10784, + 0, + 0, + 0, + 10789, + 0, + 0, + 0, + 10791, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10795, + 0, + 0, + 10796, + 0, + 10808, + 0, + 10809, + 0, + 0, + 0, + 10810, + 0, + 0, + 0, + 10812, + 0, + 0, + 10814, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10815, + 0, + 0, + 0, + 0, + 10816, + 10817, + 0, + 0, + 0, + 0, + 10819, + 0, + 10820, + 0, + 0, + 0, + 0, + 10821, + 10822, + 10823, + 0, + 10826, + 10849, + 0, + 0, + 0, + 0, + 10850, + 0, + 0, + 10852, + 0, + 10853, + 0, + 0, + 10856, + 0, + 0, + 10857, + 10858, + 10859, + 10860, + 0, + 0, + 0, + 0, + 0, + 0, + 10863, + 0, + 10866, + 10867, + 10872, + 10890, + 0, + 0, + 10891, + 10892, + 0, + 0, + 0, + 0, + 0, + 10893, + 0, + 0, + 0, + 10896, + 10899, + 0, + 0, + 10900, + 10902, + 0, + 0, + 0, + 0, + 0, + 10903, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10905, + 0, + 10906, + 0, + 0, + 0, + 0, + 10908, + 10911, + 0, + 10912, + 0, + 0, + 10916, + 0, + 0, + 0, + 0, + 0, + 10917, + 0, + 10918, + 0, + 0, + 0, + 10923, + 0, + 0, + 0, + 0, + 0, + 10924, + 0, + 0, + 10928, + 10929, + 0, + 0, + 10930, + 0, + 0, + 0, + 10932, + 0, + 0, + 0, + 0, + 10939, + 0, + 0, + 10945, + 0, + 0, + 0, + 10947, + 0, + 0, + 10948, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10958, + 0, + 10960, + 10962, + 0, + 0, + 10964, + 0, + 0, + 0, + 10966, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10967, + 0, + 0, + 0, + 10968, + 0, + 0, + 0, + 10973, + 0, + 0, + 0, + 0, + 0, + 10975, + 0, + 0, + 0, + 10976, + 10978, + 0, + 0, + 10982, + 10984, + 10987, + 0, + 0, + 10988, + 0, + 10989, + 0, + 0, + 10991, + 0, + 0, + 0, + 0, + 10992, + 0, + 0, + 0, + 10993, + 0, + 10995, + 0, + 0, + 0, + 10996, + 10997, + 0, + 0, + 0, + 10998, + 0, + 10999, + 0, + 11001, + 0, + 0, + 0, + 0, + 0, + 0, + 11010, + 11012, + 0, + 11013, + 11016, + 11017, + 0, + 0, + 11019, + 11020, + 11021, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 11022, + 0, + 0, + 11023, + 11029, + 0, + 0, + 0, + 0, + 11031, + 0, + 0, + 0, + 11034, + 0, + 0, + 0, + 0, + 11055, + 0, + 0, + 0, + 0, + 0, + 11056, + 11060, + 0, + 0, + 0, + 0, + 0, + 0, + 11061, + 0, + 0, + 11064, + 11065, + 0, + 11066, + 0, + 11069, + 0, + 11085, + 0, + 0, + 0, + 0, + 0, + 11086, + 0, + 0, + 0, + 11088, + 0, + 0, + 0, + 11094, + 0, + 0, + 0, + 11095, + 11096, + 0, + 0, + 0, + 0, + 0, + 0, + 11097, + 11098, + 0, + 0, + 0, + 0, + 0, + 0, + 11099, + 0, + 0, + 11102, + 11108, + 0, + 0, + 0, + 11109, + 0, + 11114, + 11119, + 0, + 11131, + 0, + 0, + 0, + 11142, + 0, + 0, + 11143, + 0, + 11146, + 0, + 11147, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 11148, + 0, + 11149, + 11152, + 11153, + 11154, + 0, + 11156, + 0, + 11157, + 0, + 0, + 0, + 11158, + 0, + 0, + 11159, + 11160, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 11163, + 0, + 0, + 11164, + 11166, + 0, + 0, + 0, + 11172, + 11174, + 0, + 0, + 0, + 11176, + 0, + 0, + 0, + 0, + 0, + 11182, + 11183, + 0, + 0, + 0, + 11184, + 11187, + 0, + 0, + 11188, + 11189, + 0, + 0, + 0, + 0, + 0, + 0, + 11194, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 11200, + 11202, + 0, + 0, + 0, + 0, + 0, + 0, + 11203, + 0, + 11204, + 0, + 0, + 0, + 0, + 0, + 11205, + 0, + 0, + 0, + 11206, + 0, + 11207, + 0, + 0, + 11209, + 0, + 11211, + 0, + 11214, + 0, + 0, + 11231, + 0, + 0, + 0, + 11293, + 11295, + 0, + 0, + 11296, + 11297, + 11302, + 0, + 0, + 0, + 11307, + 0, + 0, + 0, + 0, + 11309, + 11310, + 0, + 11311, + 0, + 0, + 0, + 11313, + 0, + 11314, + 0, + 0, + 0, + 0, + 11334, + 0, + 11338, + 0, + 0, + 0, + 11339, + 0, + 0, + 0, + 0, + 0, + 11340, + 0, + 11341, + 11342, + 0, + 11344, + 0, + 11345, + 0, + 0, + 0, + 11348, + 11349, + 0, + 0, + 11350, + 0, + 0, + 0, + 11355, + 0, + 0, + 0, + 0, + 0, + 0, + 11356, + 0, + 11357, + 11370, + 0, + 0, + 11371, + 0, + 11374, + 11376, + 0, + 0, + 0, + 11377, + 0, + 0, + 11378, + 11383, + 0, + 11386, + 11399, + 0, + 11400, + 11406, + 0, + 0, + 0, + 11408, + 0, + 0, + 11409, + 11412, + 0, + 0, + 0, + 0, + 11417, + 0, + 0, + 0, + 11418, + 0, + 11421, + 0, + 11426, + 11429, + 0, + 0, + 0, + 0, + 0, + 11430, + 0, + 11437, + 0, + 11438, + 0, + 0, + 0, + 0, + 0, + 11440, + 11453, + 0, + 0, + 0, + 0, + 0, + 0, + 11454, + 0, + 0, + 0, + 0, + 11455, + 0, + 0, + 11456, + 11460, + 11461, + 11463, + 0, + 11469, + 0, + 11473, + 0, + 0, + 0, + 0, + 11474, + 0, + 0, + 0, + 11475, + 0, + 11476, + 11477, + 11480, + 0, + 0, + 0, + 0, + 11481, + 0, + 0, + 11484, + 0, + 0, + 11487, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 11497, + 0, + 0, + 11502, + 0, + 11509, + 0, + 0, + 11510, + 11511, + 11513, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 11515, + 0, + 0, + 0, + 0, + 11516, + 0, + 11520, + 11521, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 11529, + 11530, + 11531, + 11534, + 0, + 0, + 11543, + 0, + 0, + 0, + 0, + 0, + 11547, + 0, + 11548, + 0, + 0, + 0, + 0, + 0, + 11552, + 11556, + 0, + 11557, + 0, + 0, + 11559, + 0, + 11560, + 0, + 0, + 0, + 0, + 0, + 0, + 11561, + 0, + 0, + 11563, + 11564, + 0, + 11565, + 0, + 0, + 0, + 0, + 11567, + 0, + 0, + 0, + 11569, + 0, + 11574, + 0, + 11575, + 0, + 0, + 0, + 11577, + 0, + 11578, + 0, + 0, + 0, + 11580, + 11581, + 0, + 0, + 0, + 11582, + 11584, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 11587, + 0, + 11588, + 11591, + 0, + 11595, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 11596, + 0, + 11597, + 0, + 0, + 0, + 0, + 11598, + 11601, + 0, + 0, + 0, + 11602, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 11603, + 11604, + 0, + 11606, + 0, + 0, + 11608, + 0, + 0, + 0, + 0, + 11610, + 0, + 0, + 11611, + 0, + 0, + 0, + 0, + 11613, + 0, + 11622, + 0, + 0, + 0, + 11623, + 0, + 0, + 0, + 0, + 11625, + 0, + 0, + 11626, + 11627, + 11628, + 11630, + 0, + 0, + 0, + 0, + 0, + 0, + 11639, + 0, + 0, + 11646, + 0, + 11648, + 11649, + 0, + 11650, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 11651, + 0, + 0, + 11652, + 11653, + 11656, + 0, + 0, + 11677, + 11679, + 0, + 0, + 0, + 0, + 11680, + 0, + 0, + 11681, + 0, + 11685, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 11688, + 0, + 0, + 0, + 11716, + 0, + 11719, + 0, + 0, + 0, + 0, + 0, + 11721, + 0, + 0, + 11724, + 11743, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 11745, + 11748, + 11750, + 0, + 0, + 0, + 0, + 0, + 11751, + 0, + 0, + 0, + 11752, + 11754, + 0, + 11755, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 11759, + 0, + 0, + 0, + 0, + 0, + 0, + 11760, + 0, + 0, + 0, + 11761, + 0, + 0, + 0, + 0, + 0, + 0, + 11766, + 11767, + 0, + 11772, + 11773, + 0, + 11774, + 0, + 0, + 11775, + 0, + 11777, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 11778, + 11780, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 11783, + 0, + 11784, + 0, + 0, + 0, + 11785, + 0, + 0, + 0, + 11786, + 0, + 0, + 0, + 0, + 11788, + 0, + 0, + 11789, + 11791, + 11792, + 0, + 0, + 0, + 0, + 11795, + 11834, + 11835, + 11836, + 0, + 0, + 11837, + 0, + 0, + 0, + 11838, + 0, + 0, + 11846, + 11851, + 0, + 11852, + 0, + 11869, + 0, + 0, + 0, + 11871, + 0, + 0, + 0, + 11872, + 11874, + 0, + 0, + 0, + 0, + 0, + 0, + 11875, + 0, + 11876, + 11877, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 11883, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 11884, + 0, + 11885, + 0, + 11886, + 0, + 0, + 11887, + 0, + 11894, + 11895, + 11897, + 11909, + 11910, + 0, + 11912, + 11918, + 0, + 0, + 11920, + 0, + 11922, + 11924, + 11927, + 11928, + 0, + 0, + 0, + 0, + 11929, + 0, + 11934, + 0, + 0, + 0, + 0, + 0, + 11941, + 11943, + 11944, + 0, + 11945, + 0, + 0, + 0, + 0, + 11948, + 11949, + 0, + 0, + 0, + 0, + 11953, + 0, + 11954, + 0, + 11955, + 0, + 11956, + 0, + 0, + 0, + 0, + 0, + 11957, + 0, + 0, + 11959, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 11961, + 0, + 0, + 0, + 0, + 0, + 11978, + 0, + 0, + 0, + 11979, + 11980, + 11986, + 11987, + 0, + 11992, + 0, + 0, + 0, + 0, + 0, + 11993, + 0, + 0, + 0, + 11994, + 0, + 11999, + 12004, + 12005, + 12006, + 0, + 0, + 0, + 0, + 0, + 12011, + 0, + 0, + 12012, + 12014, + 0, + 0, + 12015, + 0, + 0, + 12019, + 12028, + 0, + 0, + 12029, + 0, + 0, + 12032, + 12033, + 0, + 0, + 0, + 0, + 12034, + 0, + 12041, + 12043, + 0, + 0, + 12044, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12046, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12054, + 12055, + 0, + 12056, + 0, + 0, + 0, + 12060, + 12064, + 0, + 0, + 0, + 0, + 0, + 12065, + 12067, + 12068, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12074, + 0, + 0, + 0, + 12075, + 12076, + 0, + 0, + 0, + 12079, + 0, + 12081, + 12086, + 12087, + 0, + 0, + 12088, + 0, + 0, + 0, + 0, + 12089, + 0, + 12092, + 0, + 0, + 0, + 0, + 12097, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12098, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12102, + 12103, + 12104, + 12111, + 0, + 0, + 12114, + 12116, + 0, + 0, + 0, + 12118, + 0, + 0, + 0, + 12119, + 12120, + 12128, + 0, + 0, + 0, + 0, + 12130, + 0, + 0, + 0, + 0, + 0, + 0, + 12131, + 0, + 0, + 0, + 12132, + 12134, + 0, + 0, + 0, + 0, + 12137, + 0, + 12139, + 0, + 12141, + 0, + 0, + 12142, + 0, + 0, + 0, + 12144, + 0, + 0, + 0, + 0, + 0, + 12145, + 0, + 12148, + 0, + 12153, + 0, + 0, + 0, + 0, + 12154, + 12171, + 12173, + 0, + 0, + 0, + 12175, + 0, + 0, + 0, + 0, + 12178, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12183, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12184, + 0, + 0, + 0, + 12186, + 0, + 0, + 0, + 0, + 0, + 12187, + 12188, + 0, + 0, + 12189, + 0, + 12196, + 0, + 12197, + 0, + 0, + 12198, + 0, + 12201, + 0, + 0, + 0, + 0, + 12203, + 0, + 12209, + 0, + 0, + 0, + 0, + 12210, + 12211, + 12212, + 12213, + 0, + 12217, + 12218, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12222, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12223, + 0, + 0, + 12229, + 0, + 0, + 0, + 0, + 12233, + 0, + 0, + 0, + 0, + 12234, + 0, + 0, + 12236, + 12242, + 0, + 0, + 0, + 12243, + 0, + 0, + 0, + 12244, + 12253, + 0, + 12254, + 12256, + 0, + 12257, + 0, + 0, + 12275, + 0, + 0, + 0, + 0, + 0, + 12277, + 0, + 0, + 0, + 0, + 0, + 12278, + 0, + 12289, + 0, + 0, + 12290, + 0, + 12292, + 12293, + 0, + 0, + 12294, + 0, + 12295, + 0, + 0, + 12296, + 0, + 12297, + 0, + 12298, + 0, + 0, + 0, + 0, + 12301, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12309, + 0, + 12338, + 12340, + 0, + 0, + 0, + 0, + 12341, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12342, + 12343, + 0, + 12344, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12345, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12346, + 0, + 0, + 0, + 0, + 12348, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12350, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12351, + 0, + 12355, + 12356, + 12357, + 0, + 0, + 12367, + 12370, + 12371, + 0, + 0, + 0, + 0, + 0, + 12372, + 12376, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12379, + 0, + 12382, + 0, + 12383, + 0, + 0, + 12384, + 0, + 0, + 0, + 0, + 12393, + 0, + 0, + 12394, + 0, + 0, + 0, + 0, + 12398, + 12403, + 0, + 0, + 12404, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12410, + 0, + 0, + 0, + 12411, + 0, + 0, + 0, + 12412, + 0, + 0, + 0, + 0, + 12420, + 0, + 12421, + 0, + 0, + 0, + 0, + 0, + 12423, + 0, + 12425, + 12429, + 0, + 0, + 0, + 12431, + 12432, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12434, + 0, + 0, + 0, + 0, + 0, + 12435, + 12436, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12437, + 0, + 0, + 0, + 0, + 0, + 12438, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12445, + 0, + 0, + 0, + 12450, + 12451, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12452, + 12475, + 0, + 0, + 12493, + 12494, + 0, + 0, + 0, + 12495, + 0, + 0, + 0, + 0, + 12496, + 12502, + 12509, + 0, + 0, + 0, + 0, + 12510, + 0, + 12512, + 12513, + 0, + 0, + 0, + 0, + 12514, + 0, + 0, + 0, + 12515, + 0, + 12520, + 0, + 0, + 0, + 12524, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12527, + 0, + 0, + 0, + 12528, + 0, + 0, + 0, + 12529, + 0, + 0, + 0, + 0, + 0, + 12530, + 0, + 12535, + 0, + 0, + 12536, + 0, + 12538, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12540, + 0, + 12548, + 0, + 0, + 0, + 0, + 0, + 12550, + 0, + 0, + 0, + 12551, + 12552, + 0, + 0, + 0, + 12554, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12555, + 0, + 0, + 12562, + 0, + 12565, + 0, + 12566, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12569, + 0, + 0, + 0, + 12571, + 12574, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12577, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12578, + 12579, + 12603, + 0, + 12608, + 0, + 0, + 12611, + 0, + 12612, + 0, + 12615, + 0, + 12625, + 0, + 0, + 0, + 0, + 12627, + 12646, + 0, + 12648, + 0, + 0, + 12657, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12670, + 0, + 0, + 12671, + 0, + 12673, + 12677, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12679, + 0, + 12681, + 0, + 12682, + 12693, + 0, + 12694, + 0, + 12697, + 0, + 12701, + 0, + 0, + 0, + 12703, + 12704, + 0, + 0, + 0, + 0, + 12707, + 12737, + 0, + 0, + 12739, + 0, + 0, + 12740, + 0, + 0, + 12742, + 12743, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12745, + 0, + 12746, + 12747, + 0, + 12748, + 0, + 0, + 12759, + 12767, + 0, + 0, + 0, + 0, + 12773, + 0, + 12774, + 12778, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12779, + 0, + 0, + 0, + 0, + 0, + 12780, + 12793, + 0, + 12824, + 0, + 12825, + 0, + 12836, + 0, + 0, + 0, + 0, + 12839, + 0, + 12842, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12843, + 12845, + 0, + 12846, + 0, + 0, + 0, + 0, + 12847, + 0, + 0, + 12850, + 12852, + 12853, + 0, + 0, + 0, + 12854, + 0, + 0, + 0, + 12855, + 0, + 12856, + 0, + 12858, + 0, + 0, + 12859, + 0, + 12862, + 0, + 12863, + 0, + 0, + 12866, + 0, + 12869, + 12872, + 12873, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12875, + 0, + 12877, + 0, + 0, + 12878, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12884, + 12885, + 12888, + 0, + 12889, + 0, + 0, + 0, + 0, + 12893, + 0, + 0, + 0, + 12895, + 12896, + 12898, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12902, + 0, + 12909, + 12910, + 0, + 12926, + 0, + 12928, + 0, + 0, + 0, + 12929, + 0, + 12930, + 0, + 0, + 0, + 0, + 12931, + 0, + 12932, + 12933, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12934, + 0, + 12942, + 0, + 0, + 0, + 0, + 12944, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12946, + 0, + 0, + 12948, + 0, + 0, + 12949, + 0, + 0, + 0, + 0, + 12950, + 0, + 0, + 0, + 0, + 12951, + 0, + 12952, + 0, + 12953, + 0, + 0, + 0, + 12954, + 12958, + 12959, + 0, + 0, + 0, + 0, + 0, + 12960, + 12964, + 0, + 0, + 0, + 0, + 0, + 12966, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12970, + 0, + 12971, + 0, + 0, + 0, + 0, + 0, + 0, + 12972, + 0, + 0, + 12982, + 0, + 0, + 0, + 12984, + 12985, + 0, + 12986, + 12996, + 12997, + 13001, + 13002, + 0, + 0, + 0, + 0, + 13004, + 0, + 0, + 13005, + 0, + 0, + 13007, + 13009, + 0, + 13017, + 0, + 0, + 0, + 13020, + 0, + 13021, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13022, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13024, + 13027, + 0, + 0, + 0, + 0, + 0, + 13028, + 0, + 0, + 13029, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13032, + 0, + 13037, + 0, + 0, + 0, + 0, + 0, + 0, + 13040, + 0, + 0, + 13041, + 0, + 0, + 0, + 13043, + 13044, + 13046, + 0, + 0, + 0, + 0, + 13047, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13049, + 13054, + 0, + 13056, + 0, + 0, + 13060, + 13061, + 0, + 0, + 0, + 0, + 0, + 13067, + 0, + 0, + 13068, + 0, + 13071, + 0, + 0, + 0, + 0, + 0, + 13077, + 13078, + 0, + 0, + 0, + 0, + 0, + 13079, + 13080, + 13081, + 0, + 13082, + 0, + 0, + 0, + 13085, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13086, + 0, + 13087, + 13088, + 0, + 0, + 0, + 0, + 0, + 13094, + 0, + 13099, + 0, + 13100, + 0, + 0, + 0, + 13101, + 0, + 13125, + 13126, + 13128, + 13129, + 0, + 0, + 13130, + 0, + 13131, + 0, + 0, + 0, + 0, + 0, + 0, + 13134, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13150, + 0, + 13168, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13169, + 0, + 0, + 13170, + 0, + 0, + 0, + 0, + 13174, + 0, + 0, + 0, + 13176, + 0, + 0, + 0, + 0, + 0, + 13177, + 0, + 13178, + 13183, + 13187, + 0, + 0, + 0, + 13189, + 0, + 0, + 13190, + 0, + 0, + 13191, + 0, + 0, + 13206, + 0, + 0, + 0, + 13207, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13212, + 0, + 0, + 13219, + 13232, + 0, + 0, + 0, + 13241, + 0, + 13249, + 13253, + 0, + 0, + 0, + 0, + 0, + 13255, + 13259, + 0, + 13260, + 13261, + 0, + 13262, + 0, + 13272, + 0, + 0, + 0, + 0, + 13276, + 0, + 0, + 0, + 0, + 13277, + 13299, + 0, + 0, + 13301, + 13302, + 0, + 0, + 13303, + 0, + 0, + 13305, + 0, + 13310, + 0, + 0, + 0, + 13311, + 0, + 0, + 0, + 0, + 13325, + 0, + 13328, + 0, + 0, + 0, + 13329, + 0, + 0, + 0, + 0, + 0, + 0, + 13330, + 0, + 0, + 13331, + 0, + 13335, + 0, + 0, + 13342, + 0, + 0, + 0, + 0, + 0, + 13343, + 0, + 13354, + 0, + 13362, + 0, + 13366, + 13367, + 13369, + 0, + 0, + 13371, + 13372, + 0, + 13373, + 13374, + 0, + 13376, + 0, + 13380, + 13381, + 13386, + 0, + 13387, + 13388, + 0, + 13389, + 13391, + 13395, + 0, + 0, + 0, + 0, + 0, + 13401, + 13409, + 0, + 13410, + 0, + 0, + 0, + 0, + 13420, + 0, + 0, + 0, + 0, + 0, + 13422, + 0, + 0, + 0, + 0, + 13423, + 0, + 0, + 0, + 0, + 13425, + 0, + 0, + 0, + 0, + 0, + 13427, + 0, + 0, + 0, + 13428, + 0, + 0, + 13430, + 13438, + 0, + 13439, + 0, + 13445, + 0, + 13448, + 13449, + 0, + 0, + 0, + 0, + 0, + 0, + 13451, + 0, + 13457, + 0, + 0, + 0, + 0, + 13458, + 13459, + 0, + 13460, + 0, + 0, + 0, + 0, + 13464, + 13465, + 13466, + 13470, + 0, + 13471, + 13472, + 13474, + 13475, + 0, + 13476, + 0, + 0, + 13478, + 13479, + 0, + 13481, + 0, + 0, + 0, + 0, + 13487, + 0, + 13490, + 0, + 13493, + 0, + 0, + 13494, + 0, + 0, + 13495, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13496, + 13497, + 0, + 13500, + 0, + 0, + 13516, + 13522, + 0, + 0, + 13525, + 13528, + 0, + 0, + 0, + 13530, + 13535, + 0, + 13537, + 13539, + 0, + 13540, + 0, + 13543, + 0, + 13544, + 0, + 0, + 0, + 0, + 0, + 0, + 13545, + 0, + 0, + 0, + 0, + 0, + 0, + 13547, + 0, + 0, + 0, + 13549, + 13555, + 0, + 0, + 0, + 13556, + 13557, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13558, + 0, + 13563, + 0, + 0, + 0, + 0, + 13564, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13566, + 0, + 0, + 0, + 0, + 0, + 0, + 13569, + 0, + 0, + 13571, + 0, + 0, + 0, + 0, + 13573, + 0, + 0, + 0, + 0, + 0, + 0, + 13578, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13581, + 0, + 13586, + 0, + 13595, + 0, + 13600, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13601, + 13603, + 0, + 13604, + 13605, + 13606, + 13607, + 0, + 0, + 13617, + 13618, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13623, + 0, + 13625, + 13627, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13629, + 0, + 0, + 0, + 13634, + 0, + 0, + 0, + 13638, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13654, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13656, + 0, + 13659, + 0, + 0, + 13660, + 0, + 0, + 13662, + 0, + 0, + 0, + 13663, + 0, + 13664, + 0, + 0, + 0, + 0, + 0, + 13668, + 0, + 13669, + 13671, + 0, + 0, + 13672, + 0, + 0, + 0, + 0, + 0, + 0, + 13675, + 13685, + 0, + 13686, + 0, + 0, + 0, + 13687, + 0, + 0, + 0, + 13692, + 13694, + 13697, + 0, + 0, + 0, + 13702, + 0, + 0, + 0, + 0, + 0, + 13705, + 0, + 0, + 0, + 0, + 13707, + 0, + 0, + 0, + 13714, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13715, + 0, + 13716, + 13717, + 0, + 0, + 13719, + 13724, + 13730, + 13731, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13732, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13734, + 0, + 13736, + 0, + 0, + 13737, + 13738, + 13747, + 0, + 13751, + 0, + 0, + 13752, + 0, + 0, + 0, + 13753, + 0, + 13757, + 0, + 0, + 13762, + 13763, + 0, + 13764, + 13765, + 0, + 13766, + 0, + 0, + 13767, + 0, + 0, + 0, + 13768, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13769, + 0, + 0, + 13772, + 0, + 13775, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13776, + 13778, + 13787, + 0, + 0, + 0, + 13797, + 0, + 13798, + 0, + 13801, + 0, + 13804, + 13806, + 0, + 0, + 0, + 0, + 13816, + 13817, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13834, + 0, + 13836, + 0, + 0, + 13838, + 0, + 0, + 13839, + 0, + 13840, + 0, + 0, + 0, + 0, + 13842, + 0, + 0, + 0, + 0, + 0, + 0, + 13843, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13845, + 0, + 0, + 0, + 0, + 0, + 13858, + 0, + 0, + 13860, + 0, + 0, + 13861, + 0, + 0, + 13862, + 13863, + 0, + 13868, + 0, + 13869, + 13870, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13872, + 0, + 0, + 0, + 0, + 13873, + 13878, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13886, + 0, + 13888, + 13889, + 13890, + 0, + 0, + 13891, + 13894, + 0, + 13897, + 13899, + 13900, + 13904, + 0, + 0, + 13906, + 0, + 0, + 0, + 13909, + 0, + 0, + 0, + 13910, + 0, + 0, + 0, + 13911, + 0, + 0, + 0, + 0, + 0, + 13912, + 13917, + 0, + 0, + 0, + 0, + 13918, + 0, + 13919, + 0, + 0, + 13920, + 0, + 0, + 0, + 13921, + 0, + 0, + 13922, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13924, + 0, + 13927, + 0, + 0, + 0, + 0, + 0, + 13932, + 0, + 13933, + 0, + 13934, + 0, + 0, + 13935, + 0, + 13944, + 0, + 0, + 0, + 13954, + 0, + 0, + 13955, + 0, + 0, + 0, + 0, + 13956, + 0, + 13957, + 0, + 13967, + 13969, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13970, + 13990, + 0, + 13991, + 13994, + 0, + 13995, + 0, + 0, + 0, + 0, + 13996, + 0, + 0, + 13999, + 0, + 0, + 0, + 14018, + 0, + 14019, + 0, + 14021, + 0, + 0, + 0, + 0, + 0, + 0, + 14041, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14043, + 0, + 0, + 0, + 0, + 14046, + 0, + 0, + 0, + 14048, + 14049, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14051, + 0, + 0, + 14052, + 14056, + 0, + 14063, + 0, + 14064, + 14066, + 0, + 0, + 14067, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14068, + 0, + 0, + 0, + 14072, + 0, + 14074, + 14075, + 0, + 14076, + 14079, + 14085, + 14086, + 14087, + 14093, + 0, + 0, + 0, + 0, + 14095, + 0, + 0, + 0, + 0, + 0, + 0, + 14096, + 14097, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14098, + 0, + 14102, + 0, + 0, + 0, + 0, + 0, + 14103, + 0, + 0, + 0, + 14104, + 0, + 0, + 14105, + 0, + 0, + 0, + 14107, + 14108, + 0, + 0, + 14109, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14117, + 0, + 0, + 0, + 0, + 14118, + 0, + 0, + 0, + 0, + 14119, + 0, + 0, + 14120, + 0, + 0, + 14121, + 0, + 14122, + 14127, + 0, + 14128, + 14136, + 0, + 0, + 14138, + 0, + 14140, + 0, + 0, + 0, + 14141, + 14142, + 0, + 0, + 0, + 0, + 14146, + 0, + 0, + 14149, + 0, + 14151, + 0, + 0, + 0, + 14152, + 0, + 0, + 14153, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14154, + 0, + 14156, + 14157, + 0, + 0, + 14159, + 0, + 14161, + 0, + 0, + 0, + 0, + 14162, + 0, + 0, + 0, + 0, + 0, + 0, + 14163, + 0, + 0, + 14173, + 0, + 0, + 0, + 0, + 0, + 0, + 14174, + 0, + 0, + 14176, + 0, + 0, + 14178, + 0, + 0, + 14179, + 14181, + 0, + 0, + 14182, + 14185, + 14187, + 0, + 14190, + 0, + 0, + 14197, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14198, + 0, + 0, + 0, + 0, + 0, + 0, + 14199, + 14200, + 0, + 0, + 0, + 14204, + 0, + 0, + 14208, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14231, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14234, + 0, + 0, + 14235, + 0, + 0, + 0, + 14240, + 14241, + 0, + 0, + 0, + 14246, + 0, + 0, + 0, + 14247, + 0, + 14250, + 0, + 0, + 14251, + 0, + 0, + 14254, + 0, + 0, + 14256, + 0, + 0, + 0, + 14260, + 0, + 14261, + 0, + 0, + 0, + 0, + 14262, + 14267, + 14269, + 0, + 0, + 14277, + 0, + 0, + 14278, + 0, + 14279, + 14282, + 0, + 0, + 0, + 14283, + 0, + 0, + 0, + 14284, + 14285, + 0, + 0, + 0, + 0, + 14286, + 0, + 0, + 0, + 14288, + 0, + 0, + 0, + 14289, + 0, + 14290, + 0, + 14293, + 14301, + 14302, + 14304, + 14305, + 0, + 14307, + 0, + 14308, + 14309, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14311, + 14312, + 0, + 0, + 14317, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14318, + 0, + 0, + 0, + 0, + 14320, + 0, + 0, + 0, + 0, + 14321, + 14322, + 0, + 0, + 0, + 0, + 0, + 14326, + 14329, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14330, + 14331, + 0, + 0, + 0, + 0, + 14332, + 0, + 0, + 0, + 14333, + 0, + 0, + 14337, + 14340, + 0, + 14341, + 0, + 0, + 14342, + 0, + 14345, + 14346, + 0, + 0, + 14347, + 0, + 14362, + 0, + 0, + 0, + 0, + 0, + 14364, + 14365, + 14371, + 0, + 14373, + 0, + 0, + 14374, + 0, + 14379, + 0, + 14400, + 0, + 0, + 0, + 0, + 0, + 14401, + 0, + 0, + 14405, + 0, + 14406, + 0, + 14408, + 14409, + 0, + 0, + 0, + 14417, + 0, + 0, + 14424, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14430, + 0, + 0, + 0, + 14431, + 0, + 0, + 14435, + 0, + 14440, + 0, + 0, + 0, + 0, + 0, + 0, + 14442, + 0, + 0, + 14443, + 0, + 0, + 0, + 0, + 0, + 14446, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14454, + 0, + 14457, + 0, + 14460, + 0, + 0, + 14466, + 0, + 0, + 0, + 0, + 0, + 14467, + 0, + 0, + 0, + 0, + 0, + 0, + 14469, + 0, + 14477, + 0, + 0, + 0, + 0, + 0, + 0, + 14478, + 14482, + 0, + 0, + 0, + 14483, + 0, + 0, + 0, + 14485, + 14486, + 0, + 0, + 0, + 14487, + 14488, + 14489, + 14492, + 14493, + 14494, + 14495, + 14496, + 14497, + 0, + 14499, + 0, + 14501, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14502, + 0, + 14507, + 14512, + 14513, + 14514, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14515, + 14526, + 14530, + 0, + 14537, + 0, + 14544, + 0, + 14547, + 0, + 0, + 14548, + 14550, + 14551, + 0, + 0, + 14552, + 0, + 0, + 0, + 14553, + 0, + 14554, + 0, + 0, + 0, + 0, + 14556, + 14564, + 0, + 0, + 14565, + 14566, + 0, + 0, + 0, + 0, + 0, + 0, + 14568, + 0, + 0, + 14569, + 0, + 0, + 0, + 14571, + 14576, + 0, + 0, + 14577, + 14578, + 14579, + 0, + 0, + 14580, + 0, + 0, + 0, + 0, + 14582, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14583, + 0, + 0, + 0, + 0, + 0, + 14587, + 0, + 14588, + 0, + 0, + 14600, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14601, + 0, + 0, + 14604, + 14605, + 14611, + 0, + 14613, + 0, + 0, + 0, + 0, + 14615, + 0, + 0, + 0, + 0, + 0, + 0, + 14627, + 0, + 14628, + 0, + 0, + 0, + 0, + 14631, + 0, + 14633, + 14634, + 0, + 0, + 0, + 0, + 14635, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14636, + 0, + 0, + 14639, + 14642, + 0, + 0, + 0, + 0, + 14644, + 0, + 0, + 0, + 0, + 14645, + 14646, + 0, + 14653, + 0, + 0, + 14654, + 0, + 14658, + 0, + 14661, + 0, + 0, + 0, + 14665, + 0, + 0, + 0, + 14668, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14669, + 0, + 0, + 14670, + 0, + 0, + 0, + 14680, + 0, + 0, + 14681, + 0, + 0, + 0, + 0, + 0, + 14682, + 14683, + 0, + 0, + 0, + 0, + 14686, + 0, + 0, + 0, + 0, + 14687, + 14697, + 0, + 0, + 0, + 0, + 14699, + 14705, + 14711, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14712, + 0, + 0, + 0, + 14713, + 0, + 0, + 0, + 0, + 14719, + 0, + 14720, + 14721, + 14726, + 0, + 0, + 0, + 14728, + 14729, + 0, + 0, + 0, + 0, + 14731, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14733, + 14736, + 14737, + 0, + 0, + 14740, + 14742, + 0, + 0, + 0, + 14744, + 14753, + 0, + 0, + 0, + 0, + 14755, + 14758, + 14760, + 0, + 0, + 0, + 0, + 0, + 14761, + 14762, + 14765, + 14771, + 0, + 14772, + 0, + 14773, + 14774, + 0, + 0, + 14775, + 0, + 0, + 14776, + 0, + 0, + 0, + 0, + 14777, + 0, + 14779, + 0, + 0, + 14782, + 0, + 0, + 14785, + 14786, + 14788, + 0, + 0, + 0, + 0, + 0, + 14795, + 0, + 0, + 0, + 0, + 0, + 0, + 14798, + 0, + 14803, + 14804, + 14806, + 0, + 0, + 0, + 14809, + 0, + 0, + 0, + 0, + 0, + 0, + 14810, + 0, + 0, + 0, + 0, + 14811, + 0, + 14812, + 0, + 0, + 0, + 0, + 0, + 14815, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14816, + 0, + 14818, + 0, + 0, + 0, + 0, + 0, + 0, + 14819, + 0, + 14820, + 0, + 14823, + 0, + 0, + 0, + 14824, + 0, + 0, + 14826, + 14827, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14830, + 0, + 0, + 0, + 0, + 0, + 14833, + 0, + 14845, + 0, + 0, + 0, + 0, + 0, + 14846, + 0, + 0, + 14847, + 14871, + 0, + 14873, + 0, + 14876, + 0, + 14877, + 14878, + 14880, + 0, + 0, + 0, + 0, + 0, + 14881, + 0, + 14882, + 14894, + 0, + 0, + 0, + 0, + 14895, + 0, + 14907, + 0, + 14908, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14911, + 0, + 0, + 0, + 0, + 14920, + 0, + 0, + 14931, + 0, + 14932, + 14934, + 14935, + 0, + 0, + 14936, + 0, + 14945, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14947, + 0, + 0, + 14948, + 14949, + 14951, + 0, + 0, + 14952, + 0, + 0, + 0, + 14964, + 14973, + 0, + 0, + 14990, + 0, + 0, + 0, + 0, + 14995, + 0, + 0, + 14998, + 15001, + 0, + 0, + 15002, + 15020, + 0, + 0, + 0, + 0, + 0, + 0, + 15021, + 0, + 15022, + 0, + 0, + 0, + 0, + 15023, + 0, + 0, + 15025, + 15029, + 15033, + 0, + 0, + 0, + 15034, + 0, + 0, + 0, + 15035, + 0, + 0, + 0, + 0, + 0, + 15043, + 15044, + 0, + 0, + 0, + 15045, + 15046, + 15048, + 15050, + 0, + 15065, + 0, + 0, + 0, + 0, + 15066, + 0, + 0, + 15075, + 15082, + 15084, + 0, + 0, + 15085, + 15086, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15088, + 0, + 0, + 0, + 15089, + 0, + 0, + 0, + 0, + 15094, + 0, + 15096, + 0, + 15097, + 0, + 15100, + 0, + 0, + 15102, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15105, + 0, + 0, + 15106, + 0, + 15109, + 15113, + 0, + 0, + 0, + 15115, + 0, + 15118, + 0, + 0, + 0, + 0, + 0, + 0, + 15119, + 0, + 0, + 15120, + 0, + 0, + 0, + 0, + 0, + 15123, + 15129, + 0, + 0, + 0, + 15130, + 0, + 15131, + 0, + 0, + 15134, + 0, + 15135, + 0, + 0, + 0, + 15137, + 15138, + 0, + 0, + 0, + 0, + 0, + 0, + 15139, + 0, + 0, + 0, + 0, + 0, + 15140, + 0, + 0, + 15154, + 15162, + 0, + 15169, + 15170, + 0, + 15175, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15177, + 0, + 15178, + 15179, + 0, + 0, + 0, + 0, + 0, + 15183, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15185, + 15187, + 0, + 15194, + 15195, + 15196, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15204, + 0, + 0, + 0, + 0, + 15206, + 0, + 0, + 0, + 0, + 0, + 15207, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15213, + 0, + 15214, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15232, + 0, + 0, + 0, + 0, + 15234, + 0, + 15238, + 15240, + 0, + 15248, + 0, + 0, + 0, + 0, + 15250, + 15251, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15252, + 0, + 0, + 0, + 15255, + 15262, + 15266, + 0, + 0, + 0, + 15267, + 0, + 0, + 0, + 15277, + 15279, + 0, + 0, + 0, + 15280, + 15281, + 15282, + 0, + 0, + 0, + 0, + 0, + 15285, + 0, + 0, + 0, + 0, + 15289, + 0, + 0, + 15291, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15296, + 15297, + 0, + 0, + 15304, + 0, + 0, + 0, + 0, + 15306, + 0, + 0, + 0, + 0, + 0, + 0, + 15307, + 15308, + 0, + 15309, + 0, + 0, + 15311, + 0, + 0, + 15312, + 15313, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15314, + 15317, + 0, + 0, + 0, + 15318, + 15319, + 0, + 0, + 0, + 0, + 15320, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15321, + 0, + 0, + 0, + 0, + 0, + 15324, + 0, + 15325, + 15326, + 0, + 15330, + 0, + 0, + 0, + 0, + 15334, + 0, + 15335, + 0, + 15341, + 0, + 0, + 15342, + 0, + 0, + 15343, + 15344, + 0, + 0, + 0, + 0, + 15345, + 0, + 0, + 0, + 0, + 15347, + 0, + 0, + 15348, + 15349, + 15350, + 0, + 15356, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15357, + 0, + 15358, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15359, + 15360, + 15364, + 0, + 15380, + 0, + 0, + 0, + 0, + 0, + 15392, + 0, + 0, + 15393, + 0, + 15395, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15396, + 0, + 0, + 15397, + 15398, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15399, + 0, + 15400, + 0, + 0, + 0, + 15402, + 0, + 15405, + 15410, + 0, + 0, + 0, + 0, + 15411, + 0, + 0, + 0, + 15412, + 0, + 15416, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15428, + 0, + 15435, + 0, + 0, + 15438, + 0, + 0, + 0, + 0, + 15439, + 0, + 0, + 0, + 15440, + 0, + 0, + 0, + 15441, + 15449, + 15451, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15452, + 0, + 0, + 15455, + 0, + 0, + 0, + 15456, + 0, + 0, + 15458, + 0, + 15460, + 15461, + 0, + 0, + 0, + 0, + 0, + 15462, + 15464, + 0, + 15465, + 0, + 0, + 15466, + 0, + 0, + 15467, + 0, + 0, + 0, + 0, + 0, + 15468, + 0, + 0, + 0, + 0, + 15481, + 0, + 0, + 15484, + 0, + 15485, + 15486, + 0, + 0, + 0, + 15487, + 0, + 0, + 0, + 0, + 0, + 15488, + 0, + 15492, + 15498, + 0, + 0, + 0, + 15499, + 0, + 0, + 0, + 15500, + 0, + 15501, + 0, + 0, + 15512, + 0, + 15522, + 0, + 0, + 0, + 15524, + 0, + 15525, + 15526, + 0, + 0, + 15527, + 0, + 0, + 15545, + 15546, + 0, + 15548, + 15552, + 0, + 15553, + 0, + 0, + 0, + 15554, + 0, + 15555, + 0, + 15557, + 15565, + 15573, + 15577, + 15578, + 0, + 15582, + 0, + 15583, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15586, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15588, + 0, + 0, + 0, + 0, + 0, + 15589, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15593, + 15594, + 0, + 0, + 0, + 0, + 15595, + 0, + 0, + 0, + 0, + 0, + 0, + 15596, + 0, + 0, + 0, + 15597, + 0, + 0, + 0, + 0, + 15600, + 0, + 0, + 15601, + 0, + 0, + 0, + 0, + 15602, + 15603, + 0, + 0, + 0, + 0, + 0, + 0, + 15604, + 0, + 15609, + 0, + 0, + 15612, + 0, + 0, + 15613, + 0, + 0, + 15615, + 15617, + 15618, + 0, + 0, + 15620, + 0, + 15636, + 15637, + 0, + 0, + 15649, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15650, + 0, + 0, + 15651, + 0, + 0, + 0, + 15656, + 0, + 15658, + 0, + 0, + 0, + 15664, + 0, + 0, + 15665, + 0, + 0, + 15668, + 0, + 0, + 0, + 0, + 0, + 15669, + 0, + 0, + 15674, + 0, + 0, + 15675, + 0, + 0, + 0, + 0, + 15676, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15677, + 0, + 0, + 0, + 0, + 15678, + 0, + 0, + 0, + 0, + 0, + 15679, + 0, + 0, + 15681, + 0, + 15686, + 0, + 0, + 0, + 0, + 15687, + 0, + 15688, + 0, + 0, + 15690, + 0, + 0, + 0, + 15697, + 0, + 15699, + 15700, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15701, + 0, + 15702, + 15703, + 0, + 15704, + 0, + 15705, + 0, + 15707, + 0, + 15709, + 0, + 15712, + 15716, + 0, + 15717, + 0, + 15718, + 15720, + 0, + 0, + 0, + 0, + 0, + 15724, + 0, + 0, + 0, + 15725, + 0, + 15726, + 0, + 0, + 0, + 15740, + 0, + 15745, + 15746, + 0, + 0, + 15747, + 0, + 15748, + 0, + 0, + 0, + 0, + 0, + 15749, + 0, + 0, + 0, + 15752, + 0, + 15753, + 0, + 0, + 0, + 0, + 0, + 0, + 15759, + 0, + 0, + 0, + 15765, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15767, + 0, + 0, + 0, + 15771, + 0, + 0, + 15784, + 0, + 0, + 0, + 0, + 15785, + 15790, + 15791, + 0, + 0, + 15792, + 0, + 0, + 0, + 15807, + 0, + 15811, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15818, + 0, + 0, + 0, + 15819, + 0, + 0, + 0, + 0, + 15821, + 0, + 0, + 0, + 0, + 0, + 15822, + 15824, + 0, + 0, + 15827, + 0, + 0, + 15829, + 15831, + 0, + 15832, + 0, + 0, + 15833, + 0, + 15835, + 15838, + 15839, + 15843, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15844, + 0, + 0, + 0, + 0, + 15845, + 15851, + 15856, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15858, + 15860, + 0, + 15861, + 0, + 0, + 0, + 15864, + 0, + 0, + 0, + 0, + 15865, + 0, + 0, + 0, + 0, + 0, + 0, + 15866, + 0, + 15872, + 0, + 0, + 15876, + 0, + 0, + 0, + 0, + 15877, + 15878, + 15883, + 15885, + 0, + 0, + 15888, + 0, + 0, + 0, + 0, + 0, + 15889, + 15890, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15892, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15893, + 0, + 0, + 15894, + 0, + 0, + 0, + 15895, + 0, + 15896, + 15897, + 0, + 15898, + 15901, + 15902, + 0, + 15911, + 15915, + 0, + 15916, + 0, + 15924, + 15935, + 0, + 15937, + 0, + 0, + 0, + 0, + 0, + 15950, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15958, + 0, + 0, + 0, + 15961, + 0, + 0, + 15966, + 0, + 15967, + 0, + 0, + 15977, + 0, + 0, + 15978, + 0, + 0, + 15981, + 15982, + 15983, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15986, + 0, + 0, + 0, + 15990, + 0, + 15991, + 15995, + 15998, + 0, + 15999, + 0, + 16000, + 0, + 0, + 0, + 0, + 16008, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16009, + 16011, + 0, + 16013, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16014, + 0, + 0, + 16015, + 16023, + 16024, + 16025, + 0, + 0, + 16026, + 0, + 16030, + 0, + 16032, + 0, + 16033, + 0, + 0, + 0, + 0, + 0, + 0, + 16035, + 16036, + 16037, + 0, + 0, + 0, + 0, + 0, + 16039, + 0, + 0, + 0, + 0, + 16041, + 0, + 0, + 0, + 0, + 0, + 16043, + 16044, + 0, + 0, + 16047, + 0, + 0, + 0, + 16048, + 0, + 0, + 16049, + 16050, + 16052, + 0, + 0, + 0, + 0, + 0, + 16055, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16056, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16058, + 16060, + 16061, + 0, + 0, + 16063, + 0, + 0, + 16064, + 0, + 0, + 0, + 16067, + 16068, + 0, + 0, + 16069, + 16078, + 0, + 0, + 0, + 16079, + 0, + 0, + 0, + 16080, + 0, + 16081, + 0, + 0, + 0, + 16088, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16089, + 16093, + 0, + 16097, + 0, + 16103, + 0, + 16104, + 16105, + 0, + 0, + 16256, + 0, + 0, + 16259, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16260, + 16261, + 0, + 0, + 16262, + 0, + 0, + 16263, + 0, + 16268, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16269, + 0, + 0, + 16270, + 16273, + 0, + 16274, + 0, + 0, + 0, + 0, + 16275, + 16276, + 16277, + 16280, + 0, + 0, + 0, + 16281, + 16284, + 0, + 0, + 0, + 16286, + 0, + 16289, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16290, + 0, + 0, + 0, + 0, + 16291, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16292, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16293, + 16295, + 16297, + 0, + 16302, + 0, + 16304, + 0, + 16305, + 0, + 16306, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16307, + 16308, + 16312, + 0, + 0, + 0, + 0, + 0, + 0, + 16313, + 16315, + 0, + 16318, + 0, + 0, + 0, + 16321, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16326, + 16333, + 16336, + 0, + 0, + 0, + 0, + 16337, + 16340, + 0, + 0, + 0, + 0, + 0, + 16345, + 0, + 0, + 16346, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16347, + 0, + 0, + 16348, + 0, + 0, + 0, + 0, + 16349, + 0, + 0, + 0, + 16350, + 0, + 16357, + 0, + 0, + 0, + 0, + 16359, + 16360, + 0, + 0, + 0, + 0, + 16362, + 16363, + 16364, + 16365, + 0, + 0, + 16366, + 0, + 0, + 0, + 0, + 16367, + 16368, + 0, + 16369, + 16374, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16376, + 0, + 0, + 0, + 0, + 16378, + 16379, + 0, + 16380, + 0, + 0, + 0, + 16381, + 16383, + 0, + 0, + 0, + 0, + 0, + 16390, + 0, + 0, + 0, + 16399, + 0, + 16402, + 16404, + 16406, + 16407, + 0, + 0, + 0, + 16409, + 16411, + 0, + 0, + 0, + 0, + 16412, + 0, + 16413, + 16415, + 16423, + 0, + 0, + 0, + 0, + 0, + 16424, + 0, + 0, + 0, + 16428, + 16434, + 16435, + 16449, + 0, + 16450, + 16451, + 0, + 0, + 0, + 16453, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16454, + 0, + 0, + 16456, + 16458, + 0, + 0, + 16459, + 0, + 0, + 16460, + 0, + 0, + 0, + 0, + 16462, + 0, + 16463, + 0, + 0, + 16466, + 0, + 0, + 0, + 0, + 0, + 16479, + 0, + 0, + 16480, + 0, + 16481, + 16484, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16485, + 0, + 0, + 0, + 0, + 0, + 0, + 16489, + 0, + 0, + 0, + 0, + 0, + 16491, + 0, + 0, + 16498, + 0, + 0, + 16503, + 0, + 16505, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16506, + 0, + 0, + 0, + 16508, + 16509, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16511, + 16513, + 0, + 0, + 0, + 16516, + 0, + 16517, + 0, + 16519, + 0, + 16529, + 0, + 0, + 16531, + 0, + 0, + 0, + 0, + 0, + 0, + 16534, + 0, + 0, + 16541, + 16542, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16543, + 16547, + 16548, + 0, + 0, + 0, + 16551, + 0, + 16552, + 0, + 0, + 0, + 16553, + 0, + 0, + 16558, + 0, + 0, + 16562, + 16565, + 0, + 0, + 0, + 16570, + 0, + 0, + 0, + 16573, + 16585, + 0, + 0, + 0, + 16586, + 16587, + 16595, + 0, + 16596, + 0, + 16598, + 0, + 0, + 0, + 16600, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16601, + 0, + 0, + 0, + 0, + 16603, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16604, + 16612, + 0, + 0, + 0, + 0, + 16613, + 0, + 16618, + 0, + 0, + 0, + 16640, + 0, + 0, + 16641, + 0, + 0, + 0, + 0, + 0, + 0, + 16645, + 0, + 0, + 0, + 0, + 16646, + 0, + 0, + 0, + 0, + 0, + 0, + 16651, + 0, + 0, + 0, + 0, + 16653, + 16654, + 0, + 0, + 0, + 16655, + 0, + 0, + 16656, + 16667, + 0, + 0, + 0, + 0, + 16671, + 0, + 16672, + 0, + 0, + 0, + 16673, + 0, + 0, + 0, + 0, + 0, + 16676, + 0, + 16686, + 0, + 0, + 0, + 0, + 16689, + 0, + 16690, + 0, + 16692, + 0, + 16693, + 0, + 16694, + 0, + 16696, + 0, + 0, + 0, + 16705, + 0, + 0, + 0, + 0, + 0, + 0, + 16707, + 0, + 0, + 0, + 16709, + 0, + 0, + 0, + 0, + 16711, + 0, + 16712, + 16713, + 0, + 0, + 0, + 16715, + 0, + 0, + 0, + 0, + 16716, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16718, + 16724, + 0, + 0, + 16726, + 16727, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16728, + 0, + 16729, + 0, + 0, + 16730, + 0, + 0, + 0, + 0, + 0, + 16731, + 0, + 0, + 0, + 16732, + 0, + 0, + 0, + 0, + 16734, + 16738, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16743, + 0, + 0, + 16745, + 0, + 0, + 0, + 0, + 0, + 16749, + 0, + 16752, + 0, + 0, + 0, + 0, + 16756, + 0, + 0, + 16758, + 0, + 16759, + 0, + 0, + 0, + 0, + 0, + 16760, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16762, + 0, + 16769, + 0, + 16770, + 0, + 16772, + 0, + 0, + 0, + 16777, + 16780, + 0, + 0, + 0, + 0, + 0, + 0, + 16781, + 0, + 0, + 16782, + 0, + 16784, + 0, + 0, + 16785, + 16787, + 16792, + 0, + 0, + 16794, + 0, + 0, + 0, + 16798, + 0, + 0, + 16809, + 0, + 0, + 16814, + 16816, + 16817, + 0, + 16819, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16820, + 0, + 0, + 16836, + 16839, + 0, + 0, + 16841, + 16851, + 16857, + 0, + 0, + 16858, + 16859, + 0, + 0, + 16860, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16862, + 0, + 16863, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16864, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16876, + 0, + 16881, + 16882, + 0, + 16885, + 16886, + 0, + 16887, + 0, + 0, + 0, + 16889, + 16891, + 0, + 0, + 0, + 0, + 0, + 16894, + 16895, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16897, + 0, + 16898, + 0, + 0, + 0, + 0, + 0, + 16913, + 0, + 0, + 16924, + 16925, + 16926, + 0, + 0, + 16927, + 0, + 0, + 0, + 16937, + 16938, + 0, + 0, + 0, + 16940, + 16941, + 0, + 0, + 0, + 16942, + 16945, + 0, + 16946, + 16949, + 16950, + 0, + 0, + 0, + 16952, + 16955, + 0, + 0, + 0, + 16965, + 0, + 16969, + 0, + 0, + 16975, + 0, + 0, + 16976, + 0, + 0, + 0, + 0, + 16978, + 0, + 0, + 16981, + 0, + 16983, + 16989, + 0, + 0, + 0, + 0, + 16990, + 0, + 0, + 16991, + 0, + 0, + 0, + 16993, + 0, + 16994, + 16996, + 17000, + 0, + 0, + 0, + 0, + 0, + 17002, + 17004, + 0, + 17006, + 0, + 0, + 17007, + 0, + 0, + 0, + 0, + 17008, + 17013, + 17014, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17021, + 0, + 17031, + 0, + 0, + 0, + 0, + 0, + 17033, + 17036, + 0, + 17038, + 0, + 0, + 17039, + 0, + 17045, + 0, + 0, + 17046, + 17047, + 0, + 0, + 0, + 0, + 17048, + 0, + 17049, + 17050, + 0, + 17051, + 17053, + 0, + 17054, + 0, + 17055, + 0, + 0, + 0, + 0, + 0, + 17063, + 0, + 0, + 17064, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17065, + 0, + 0, + 17068, + 0, + 0, + 0, + 0, + 0, + 17072, + 0, + 0, + 0, + 0, + 0, + 0, + 17073, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17074, + 0, + 17080, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17081, + 17083, + 17084, + 0, + 0, + 0, + 17085, + 0, + 0, + 0, + 0, + 17092, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17093, + 0, + 17095, + 17102, + 0, + 0, + 0, + 0, + 0, + 0, + 17103, + 0, + 0, + 17105, + 0, + 17107, + 0, + 0, + 0, + 0, + 17114, + 0, + 0, + 0, + 0, + 0, + 17115, + 17125, + 17127, + 0, + 0, + 17128, + 0, + 0, + 0, + 17129, + 17130, + 0, + 17131, + 0, + 0, + 0, + 0, + 0, + 17132, + 17135, + 17145, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17146, + 0, + 17147, + 0, + 17148, + 0, + 0, + 0, + 0, + 0, + 0, + 17149, + 17150, + 0, + 17151, + 17153, + 0, + 17155, + 0, + 0, + 0, + 0, + 17163, + 17171, + 0, + 17174, + 0, + 0, + 0, + 0, + 17179, + 0, + 0, + 17182, + 17185, + 0, + 0, + 0, + 0, + 0, + 17186, + 0, + 0, + 17188, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17189, + 17191, + 0, + 17194, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17195, + 17196, + 17203, + 17204, + 0, + 0, + 17205, + 17217, + 0, + 0, + 0, + 0, + 0, + 17218, + 0, + 0, + 0, + 0, + 17219, + 0, + 17220, + 0, + 17221, + 0, + 0, + 17230, + 0, + 0, + 0, + 0, + 0, + 17236, + 0, + 17238, + 17239, + 0, + 0, + 0, + 17241, + 17244, + 0, + 0, + 17245, + 0, + 17248, + 0, + 0, + 17251, + 0, + 17252, + 0, + 0, + 17264, + 0, + 17266, + 0, + 0, + 0, + 17268, + 0, + 0, + 0, + 0, + 17271, + 17272, + 0, + 17273, + 0, + 17295, + 0, + 17302, + 0, + 17305, + 0, + 0, + 0, + 17306, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17308, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17309, + 0, + 17310, + 17313, + 0, + 0, + 0, + 0, + 17314, + 17315, + 0, + 17317, + 0, + 0, + 0, + 0, + 17318, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17320, + 0, + 0, + 0, + 0, + 0, + 0, + 17334, + 0, + 17344, + 17348, + 0, + 0, + 0, + 17350, + 17351, + 0, + 0, + 17353, + 0, + 0, + 17354, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17355, + 0, + 0, + 0, + 0, + 0, + 0, + 17356, + 17357, + 0, + 0, + 17359, + 0, + 0, + 0, + 17371, + 0, + 17372, + 0, + 0, + 0, + 17393, + 0, + 0, + 0, + 0, + 17394, + 0, + 0, + 0, + 0, + 0, + 17395, + 0, + 0, + 17399, + 0, + 0, + 0, + 17401, + 17417, + 0, + 17418, + 0, + 17419, + 0, + 0, + 0, + 0, + 0, + 17422, + 17423, + 0, + 0, + 0, + 0, + 0, + 17424, + 0, + 0, + 0, + 0, + 0, + 17428, + 17429, + 17433, + 0, + 0, + 0, + 17437, + 0, + 0, + 17441, + 0, + 0, + 17442, + 0, + 0, + 17453, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17454, + 17456, + 17462, + 0, + 0, + 17466, + 0, + 0, + 17468, + 0, + 0, + 17469, + 0, + 0, + 0, + 0, + 17470, + 0, + 17475, + 0, + 0, + 0, + 0, + 0, + 17479, + 0, + 0, + 0, + 17483, + 17484, + 0, + 17485, + 0, + 17486, + 0, + 17491, + 17492, + 0, + 0, + 17493, + 0, + 17494, + 17495, + 0, + 0, + 0, + 17496, + 0, + 0, + 0, + 17497, + 0, + 0, + 0, + 17502, + 0, + 0, + 0, + 0, + 0, + 17503, + 0, + 17505, + 0, + 17507, + 0, + 0, + 0, + 17512, + 17513, + 17514, + 0, + 0, + 17515, + 0, + 0, + 0, + 17519, + 0, + 0, + 0, + 17522, + 0, + 0, + 17523, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17527, + 0, + 0, + 0, + 17528, + 0, + 0, + 0, + 17534, + 0, + 0, + 0, + 0, + 17536, + 0, + 0, + 0, + 17539, + 0, + 17540, + 17543, + 17549, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17556, + 0, + 0, + 17558, + 0, + 17559, + 0, + 0, + 17560, + 0, + 0, + 0, + 17563, + 0, + 0, + 0, + 0, + 0, + 0, + 17564, + 0, + 0, + 17565, + 17566, + 0, + 17567, + 0, + 0, + 0, + 0, + 0, + 0, + 17569, + 17570, + 0, + 17575, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17581, + 0, + 0, + 0, + 17582, + 17583, + 0, + 17586, + 0, + 0, + 17587, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17588, + 0, + 0, + 0, + 0, + 17596, + 17597, + 0, + 0, + 17598, + 17600, + 0, + 0, + 0, + 0, + 0, + 0, + 17601, + 0, + 0, + 0, + 17604, + 0, + 0, + 17605, + 0, + 0, + 17607, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17612, + 0, + 0, + 17618, + 0, + 17621, + 17622, + 0, + 0, + 0, + 0, + 17623, + 0, + 0, + 17624, + 0, + 0, + 17630, + 0, + 0, + 17631, + 17633, + 17634, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17635, + 0, + 0, + 17636, + 0, + 0, + 17637, + 0, + 17638, + 0, + 17640, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17641, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17643, + 0, + 0, + 0, + 0, + 17645, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17646, + 17662, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17663, + 17664, + 0, + 17665, + 17666, + 0, + 0, + 0, + 17669, + 17671, + 17673, + 0, + 17679, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17684, + 0, + 0, + 0, + 17686, + 0, + 17714, + 0, + 0, + 17720, + 17722, + 17726, + 0, + 0, + 17728, + 0, + 0, + 17729, + 0, + 0, + 0, + 17732, + 0, + 17733, + 0, + 17734, + 0, + 0, + 0, + 17735, + 0, + 0, + 0, + 0, + 17737, + 0, + 0, + 0, + 0, + 17739, + 0, + 0, + 0, + 17741, + 17742, + 0, + 0, + 0, + 0, + 17743, + 17744, + 17745, + 0, + 0, + 0, + 17749, + 0, + 17750, + 17751, + 17752, + 17754, + 17761, + 17762, + 0, + 17763, + 0, + 17766, + 0, + 17772, + 0, + 0, + 0, + 0, + 0, + 17775, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17776, + 0, + 0, + 17777, + 0, + 0, + 17778, + 17779, + 0, + 17782, + 17783, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17784, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17821, + 0, + 0, + 0, + 17822, + 0, + 0, + 0, + 17823, + 17825, + 0, + 0, + 0, + 0, + 0, + 17826, + 17831, + 17832, + 17833, + 0, + 0, + 17845, + 0, + 0, + 0, + 17846, + 0, + 0, + 0, + 17848, + 17850, + 17854, + 0, + 17855, + 0, + 0, + 17859, + 0, + 0, + 0, + 0, + 0, + 0, + 17860, + 17861, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17870, + 17871, + 0, + 0, + 0, + 0, + 0, + 0, + 17872, + 0, + 0, + 0, + 17879, + 0, + 0, + 0, + 17881, + 17883, + 0, + 17884, + 0, + 17885, + 0, + 0, + 17886, + 0, + 0, + 17887, + 17891, + 17953, + 0, + 0, + 0, + 0, + 17954, + 0, + 0, + 17955, + 0, + 17968, + 0, + 0, + 17972, + 0, + 0, + 0, + 0, + 0, + 17974, + 0, + 0, + 0, + 0, + 17976, + 17978, + 0, + 0, + 17983, + 0, + 0, + 0, + 0, + 18003, + 0, + 0, + 0, + 0, + 0, + 18007, + 0, + 0, + 0, + 0, + 0, + 18009, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18010, + 0, + 0, + 0, + 0, + 0, + 0, + 18012, + 0, + 0, + 18014, + 0, + 0, + 0, + 18015, + 0, + 0, + 0, + 18016, + 0, + 18017, + 0, + 0, + 0, + 18030, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18031, + 0, + 0, + 18036, + 18037, + 18038, + 0, + 0, + 18049, + 18056, + 0, + 18057, + 18058, + 0, + 18059, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18062, + 0, + 0, + 0, + 0, + 18064, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18067, + 0, + 0, + 0, + 18068, + 0, + 0, + 18075, + 0, + 0, + 18078, + 18093, + 18094, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18097, + 0, + 0, + 0, + 0, + 0, + 18098, + 18100, + 0, + 0, + 0, + 18108, + 0, + 18111, + 0, + 0, + 18112, + 0, + 18113, + 0, + 0, + 18115, + 18116, + 0, + 18118, + 0, + 0, + 0, + 0, + 18121, + 0, + 0, + 0, + 0, + 18123, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18124, + 0, + 0, + 0, + 0, + 18125, + 18126, + 0, + 18127, + 0, + 0, + 18128, + 18135, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18150, + 0, + 0, + 0, + 0, + 0, + 18151, + 18152, + 0, + 0, + 18156, + 18164, + 0, + 18166, + 18171, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18172, + 18183, + 0, + 18184, + 0, + 0, + 0, + 0, + 18185, + 0, + 18187, + 0, + 0, + 0, + 0, + 0, + 18188, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18189, + 0, + 0, + 18190, + 0, + 0, + 18191, + 18192, + 0, + 0, + 18194, + 18195, + 18196, + 0, + 0, + 0, + 18197, + 0, + 18203, + 0, + 18204, + 0, + 0, + 0, + 0, + 18205, + 0, + 0, + 0, + 18207, + 18208, + 0, + 0, + 18214, + 0, + 0, + 0, + 18215, + 18216, + 0, + 0, + 0, + 18220, + 0, + 0, + 18222, + 0, + 0, + 0, + 0, + 0, + 18223, + 0, + 18225, + 18231, + 0, + 18234, + 0, + 18235, + 0, + 0, + 0, + 0, + 18240, + 0, + 0, + 18241, + 18242, + 0, + 0, + 0, + 0, + 0, + 18243, + 18251, + 0, + 18253, + 0, + 18254, + 0, + 0, + 0, + 18266, + 0, + 0, + 0, + 0, + 0, + 0, + 18269, + 18270, + 18271, + 18273, + 18281, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18282, + 0, + 18283, + 0, + 18284, + 0, + 0, + 0, + 0, + 0, + 0, + 18285, + 0, + 18287, + 18289, + 0, + 0, + 18290, + 0, + 0, + 0, + 0, + 18308, + 0, + 0, + 0, + 18310, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18311, + 0, + 18312, + 18313, + 0, + 18315, + 0, + 0, + 18316, + 18320, + 0, + 18331, + 0, + 18332, + 0, + 18336, + 0, + 0, + 0, + 0, + 18337, + 0, + 18340, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18341, + 0, + 18344, + 18345, + 0, + 18346, + 0, + 0, + 0, + 0, + 0, + 18348, + 0, + 18351, + 0, + 0, + 18356, + 0, + 0, + 0, + 0, + 0, + 0, + 18357, + 0, + 0, + 0, + 0, + 0, + 18367, + 0, + 0, + 0, + 18368, + 0, + 18369, + 0, + 18370, + 18371, + 0, + 0, + 0, + 18437, + 18444, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18445, + 18450, + 0, + 0, + 0, + 0, + 18451, + 0, + 18452, + 0, + 0, + 0, + 18453, + 0, + 0, + 0, + 0, + 0, + 18455, + 0, + 0, + 0, + 18456, + 0, + 18457, + 0, + 18460, + 0, + 0, + 18461, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18466, + 0, + 0, + 18467, + 0, + 0, + 0, + 0, + 18473, + 0, + 0, + 0, + 18476, + 0, + 18477, + 0, + 0, + 0, + 18478, + 18479, + 18480, + 0, + 0, + 0, + 18485, + 0, + 0, + 0, + 18486, + 0, + 0, + 0, + 0, + 0, + 0, + 18488, + 18490, + 0, + 0, + 0, + 0, + 0, + 0, + 18491, + 0, + 0, + 0, + 0, + 0, + 18495, + 0, + 0, + 18496, + 0, + 0, + 0, + 0, + 0, + 0, + 18505, + 0, + 18521, + 0, + 18522, + 18523, + 0, + 0, + 0, + 18525, + 18526, + 0, + 0, + 0, + 0, + 0, + 18527, + 0, + 0, + 0, + 0, + 18532, + 18533, + 0, + 18534, + 0, + 0, + 0, + 0, + 0, + 0, + 18535, + 18537, + 0, + 18538, + 0, + 0, + 0, + 0, + 0, + 0, + 18540, + 18541, + 18542, + 18543, + 0, + 18546, + 0, + 0, + 0, + 0, + 18553, + 18556, + 0, + 0, + 18558, + 0, + 0, + 18569, + 18571, + 0, + 0, + 0, + 18572, + 0, + 18574, + 0, + 0, + 0, + 0, + 18586, + 0, + 0, + 0, + 0, + 0, + 18588, + 0, + 0, + 18589, + 0, + 0, + 0, + 0, + 0, + 0, + 18590, + 0, + 18592, + 0, + 0, + 0, + 0, + 18594, + 0, + 0, + 0, + 18596, + 0, + 0, + 18597, + 18598, + 0, + 0, + 18601, + 0, + 0, + 0, + 0, + 18602, + 0, + 0, + 0, + 18603, + 18604, + 0, + 18605, + 0, + 0, + 0, + 0, + 18608, + 0, + 0, + 18611, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18612, + 0, + 18616, + 0, + 0, + 18617, + 18619, + 0, + 0, + 0, + 18628, + 0, + 0, + 0, + 18629, + 0, + 0, + 18630, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18631, + 0, + 18632, + 0, + 0, + 18635, + 18637, + 0, + 0, + 0, + 0, + 0, + 0, + 18641, + 18643, + 18648, + 0, + 18652, + 0, + 0, + 18653, + 0, + 18655, + 18656, + 0, + 0, + 0, + 18657, + 0, + 0, + 18666, + 18674, + 0, + 0, + 0, + 0, + 18677, + 18684, + 18685, + 0, + 0, + 18686, + 0, + 0, + 18690, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18695, + 18696, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18697, + 0, + 0, + 18700, + 0, + 0, + 0, + 0, + 0, + 0, + 18702, + 0, + 18708, + 0, + 0, + 18709, + 0, + 18710, + 0, + 0, + 18711, + 0, + 18714, + 0, + 0, + 18718, + 0, + 0, + 0, + 0, + 0, + 0, + 18719, + 0, + 0, + 18722, + 0, + 18726, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18731, + 0, + 0, + 0, + 0, + 0, + 18739, + 18741, + 0, + 0, + 18742, + 0, + 18743, + 18744, + 18746, + 18748, + 0, + 18752, + 18753, + 0, + 0, + 18754, + 18763, + 0, + 18765, + 0, + 0, + 0, + 18766, + 0, + 0, + 0, + 18769, + 0, + 0, + 0, + 0, + 0, + 18773, + 18778, + 18779, + 18781, + 0, + 0, + 18784, + 18787, + 0, + 18788, + 0, + 18793, + 0, + 0, + 0, + 0, + 0, + 0, + 18795, + 0, + 0, + 18800, + 0, + 0, + 0, + 0, + 0, + 18801, + 18804, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18806, + 0, + 0, + 0, + 18811, + 18815, + 18816, + 0, + 0, + 0, + 0, + 18825, + 0, + 0, + 18827, + 18829, + 0, + 0, + 18830, + 0, + 0, + 0, + 0, + 18831, + 0, + 0, + 18832, + 0, + 0, + 0, + 0, + 18833, + 0, + 18840, + 0, + 18841, + 0, + 18842, + 0, + 0, + 0, + 0, + 18843, + 0, + 18844, + 0, + 0, + 0, + 0, + 0, + 0, + 18845, + 18846, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18848, + 0, + 0, + 0, + 18853, + 18860, + 0, + 0, + 18862, + 18866, + 0, + 0, + 18867, + 18869, + 0, + 0, + 18874, + 18881, + 18891, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18892, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18895, + 0, + 18896, + 0, + 0, + 0, + 18900, + 0, + 0, + 0, + 18901, + 0, + 18902, + 18915, + 18916, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18919, + 0, + 0, + 0, + 0, + 0, + 18920, + 0, + 0, + 0, + 18921, + 18929, + 0, + 0, + 0, + 0, + 18930, + 0, + 0, + 0, + 0, + 0, + 0, + 18932, + 0, + 0, + 0, + 0, + 18934, + 18942, + 0, + 0, + 0, + 18951, + 18957, + 0, + 0, + 0, + 0, + 18958, + 0, + 0, + 0, + 0, + 18959, + 18960, + 0, + 0, + 18961, + 0, + 0, + 18962, + 0, + 0, + 0, + 0, + 18963, + 18964, + 0, + 0, + 0, + 18965, + 0, + 18967, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18968, + 0, + 18969, + 0, + 18970, + 18973, + 18976, + 0, + 0, + 0, + 0, + 0, + 0, + 18977, + 0, + 0, + 0, + 18981, + 0, + 0, + 0, + 18990, + 0, + 18998, + 0, + 0, + 0, + 0, + 0, + 18999, + 19003, + 0, + 0, + 19005, + 0, + 0, + 0, + 19006, + 0, + 0, + 0, + 0, + 0, + 0, + 19008, + 19011, + 0, + 0, + 19018, + 0, + 0, + 19019, + 0, + 19024, + 0, + 19031, + 19032, + 0, + 19039, + 0, + 19041, + 19050, + 0, + 0, + 0, + 19051, + 19055, + 19056, + 0, + 19059, + 19063, + 19064, + 0, + 0, + 19088, + 0, + 0, + 0, + 19093, + 19094, + 0, + 0, + 0, + 0, + 19095, + 0, + 19096, + 0, + 0, + 0, + 19097, + 0, + 0, + 19098, + 0, + 19099, + 19100, + 0, + 0, + 19103, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19111, + 0, + 0, + 0, + 0, + 0, + 0, + 19112, + 0, + 0, + 0, + 19116, + 19117, + 0, + 19121, + 19122, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19123, + 19124, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19125, + 19126, + 0, + 19128, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19129, + 19130, + 19131, + 19132, + 0, + 0, + 19146, + 0, + 0, + 19147, + 19156, + 19158, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19182, + 19185, + 0, + 0, + 19187, + 0, + 0, + 0, + 19193, + 0, + 0, + 0, + 0, + 0, + 19194, + 0, + 19197, + 0, + 0, + 0, + 0, + 19198, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19202, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19203, + 0, + 19205, + 19210, + 0, + 0, + 0, + 19213, + 0, + 19218, + 0, + 0, + 0, + 19223, + 19229, + 0, + 0, + 19230, + 0, + 0, + 19231, + 19232, + 19233, + 19239, + 0, + 0, + 0, + 0, + 0, + 19240, + 0, + 19248, + 19249, + 0, + 0, + 0, + 0, + 19254, + 0, + 19256, + 19258, + 19259, + 0, + 0, + 19261, + 0, + 19266, + 0, + 0, + 0, + 19272, + 0, + 19278, + 19281, + 19282, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19283, + 0, + 0, + 19284, + 0, + 0, + 19285, + 19287, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19288, + 19291, + 0, + 19292, + 0, + 0, + 0, + 0, + 19297, + 0, + 19298, + 0, + 0, + 0, + 0, + 19302, + 19303, + 0, + 0, + 0, + 0, + 19304, + 19305, + 0, + 0, + 0, + 0, + 19314, + 0, + 0, + 19315, + 0, + 0, + 19321, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19322, + 0, + 19333, + 0, + 19334, + 19335, + 0, + 19336, + 19337, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19346, + 0, + 0, + 19353, + 0, + 19354, + 19362, + 0, + 19366, + 19367, + 0, + 0, + 19369, + 0, + 19375, + 0, + 19377, + 19380, + 19388, + 0, + 0, + 0, + 0, + 0, + 19389, + 19390, + 0, + 0, + 0, + 0, + 19392, + 0, + 0, + 0, + 0, + 0, + 19402, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19412, + 0, + 0, + 19413, + 19422, + 0, + 19424, + 0, + 0, + 0, + 19425, + 0, + 0, + 0, + 19428, + 0, + 0, + 0, + 0, + 19431, + 0, + 0, + 0, + 0, + 0, + 19432, + 0, + 0, + 0, + 0, + 0, + 19448, + 19459, + 0, + 0, + 19461, + 0, + 19462, + 19463, + 0, + 19467, + 19474, + 19482, + 0, + 0, + 0, + 0, + 19494, + 0, + 0, + 0, + 0, + 19501, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19502, + 19504, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19505, + 0, + 0, + 0, + 0, + 19506, + 19507, + 0, + 0, + 0, + 19508, + 0, + 0, + 19511, + 0, + 0, + 19514, + 0, + 19515, + 0, + 19516, + 0, + 19518, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19530, + 0, + 19537, + 19538, + 0, + 19543, + 19546, + 0, + 19547, + 19551, + 0, + 0, + 0, + 0, + 0, + 0, + 19552, + 19553, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19555, + 0, + 0, + 19556, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19560, + 19561, + 0, + 0, + 19562, + 0, + 0, + 0, + 0, + 0, + 0, + 19565, + 19567, + 0, + 19568, + 0, + 0, + 0, + 19569, + 19570, + 0, + 19578, + 0, + 0, + 0, + 0, + 19580, + 0, + 0, + 0, + 0, + 19581, + 19584, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19585, + 19586, + 0, + 0, + 0, + 19587, + 19588, + 0, + 19589, + 0, + 0, + 0, + 0, + 0, + 0, + 19592, + 19593, + 19599, + 0, + 19600, + 0, + 0, + 19604, + 0, + 0, + 19605, + 0, + 19606, + 19608, + 19610, + 0, + 19613, + 19614, + 0, + 0, + 0, + 0, + 0, + 0, + 19616, + 19617, + 0, + 0, + 19618, + 0, + 0, + 19619, + 0, + 0, + 0, + 19620, + 19621, + 19631, + 0, + 0, + 19632, + 19634, + 19636, + 0, + 19643, + 0, + 0, + 19644, + 19658, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19659, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19675, + 19677, + 0, + 0, + 0, + 0, + 19679, + 0, + 19683, + 0, + 19684, + 0, + 0, + 0, + 0, + 0, + 0, + 19687, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19688, + 19689, + 19692, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19695, + 19697, + 0, + 0, + 0, + 0, + 0, + 19698, + 19699, + 0, + 0, + 19700, + 0, + 19702, + 0, + 0, + 19703, + 0, + 0, + 0, + 0, + 0, + 0, + 19704, + 19708, + 0, + 19710, + 0, + 19713, + 0, + 0, + 0, + 19715, + 0, + 0, + 0, + 0, + 19718, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19720, + 0, + 19722, + 0, + 0, + 19725, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19730, + 0, + 0, + 0, + 0, + 0, + 19731, + 0, + 19734, + 19735, + 19739, + 0, + 0, + 19740, + 0, + 19741, + 0, + 0, + 0, + 19746, + 0, + 0, + 19747, + 0, + 19771, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19772, + 19775, + 0, + 0, + 0, + 0, + 0, + 0, + 19778, + 0, + 0, + 0, + 0, + 0, + 19779, + 0, + 0, + 19780, + 19790, + 0, + 19791, + 0, + 0, + 19792, + 0, + 0, + 0, + 19793, + 0, + 0, + 19796, + 19797, + 0, + 0, + 0, + 19799, + 0, + 0, + 0, + 19801, + 0, + 0, + 0, + 0, + 19803, + 0, + 19804, + 0, + 19805, + 0, + 0, + 19807, + 0, + 0, + 0, + 19808, + 0, + 0, + 0, + 0, + 0, + 0, + 19809, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19816, + 0, + 19821, + 0, + 19822, + 19830, + 19831, + 0, + 0, + 0, + 19833, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19838, + 0, + 0, + 0, + 0, + 19839, + 0, + 0, + 19843, + 0, + 0, + 0, + 0, + 19845, + 0, + 0, + 0, + 0, + 19847, + 0, + 0, + 19848, + 0, + 19849, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19851, + 0, + 0, + 0, + 19854, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19864, + 0, + 19865, + 0, + 19866, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19868, + 0, + 0, + 19870, + 0, + 0, + 19871, + 0, + 0, + 19872, + 19873, + 19875, + 0, + 19880, + 19882, + 19884, + 0, + 0, + 19885, + 19886, + 19888, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19890, + 19892, + 19893, + 0, + 0, + 19894, + 0, + 0, + 0, + 19895, + 0, + 19896, + 19902, + 0, + 0, + 19903, + 0, + 0, + 19905, + 0, + 0, + 0, + 19906, + 0, + 19908, + 0, + 19909, + 19911, + 0, + 0, + 0, + 19913, + 19920, + 0, + 19938, + 19939, + 19940, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19942, + 0, + 19943, + 0, + 19945, + 0, + 0, + 0, + 19951, + 19952, + 19954, + 19960, + 0, + 19965, + 0, + 19971, + 0, + 0, + 0, + 0, + 0, + 19975, + 0, + 19976, + 0, + 19990, + 0, + 0, + 19991, + 0, + 19993, + 0, + 19995, + 0, + 0, + 0, + 19998, + 19999, + 20001, + 0, + 20003, + 20005, + 0, + 20011, + 20012, + 0, + 0, + 0, + 0, + 0, + 0, + 20014, + 0, + 20020, + 0, + 0, + 0, + 0, + 20021, + 0, + 0, + 0, + 0, + 0, + 20023, + 20024, + 0, + 0, + 0, + 0, + 0, + 20025, + 0, + 0, + 20027, + 0, + 0, + 20029, + 0, + 0, + 20032, + 0, + 0, + 0, + 0, + 20044, + 20045, + 0, + 20048, + 20049, + 0, + 0, + 20050, + 0, + 20052, + 0, + 0, + 20054, + 20057, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 20059, + 0, + 0, + 20061, + 0, + 20062, + 0, + 20064, + 0, + 0, + 20066, + 0, + 0, + 20067, + 0, + 0, + 0, + 0, + 20069, + 0, + 0, + 0, + 0, + 0, + 0, + 20070, + 20071, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 20072, + 0, + 0, + 20073, + 20074, + 0, + 0, + 0, + 0, + 0, + 20075, + 0, + 20078, + 0, + 0, + 0, + 0, + 20080, + 0, + 20081, + 0, + 0, + 0, + 0, + 0, + 0, + 20095, + 0, + 20098, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 20107, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 20112, + 0, + 0, + 0, + 20113, + 20114, + 0, + 0, + 0, + 20115, + 20123, + 20124, + 0, + 0, + 0, + 20131, + 20133, + 20134, + 0, + 0, + 0, + 0, + 20136, + 0, + 0, + 20137, + 20138, + 20150, + 0, + 20152, + 0, + 0, + 0, + 20153, + 0, + 0, + 20154, + 0, + 0, + 0, + 20158, + 0, + 20163, + 0, + 0, + 20164, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 20166, + 0, + 20168, + 0, + 20170, + 0, + 20175, + 0, + 0, + 20178, + 0, + 0, + 0, + 0, + 20223, + 0, + 0, + 0, + 0, + 20224, + 0, + 20226, + 0, + 0, + 20230, + 0, + 20231, + 0, + 0, + 0, + 0, + 20232, + 0, + 0, + 20233, + 20234, + 0, + 20244, + 0, + 20247, + 0, + 0, + 0, + 0, + 0, + 0, + 20249, + 0, + 0, + 0, + 20250, + 0, + 0, + 0, + 0, + 20251, + 0, + 20253, + 0, + 20254, + 0, + 0, + 0, + 0, + 20256, + 0, + 0, + 20264, + 0, + 0, + 0, + 0, + 20266, + 0, + 0, + 0, + 20278, + 0, + 0, + 20279, + 20282, + 0, + 0, + 0, + 0, + 0, + 20283, + 0, + 20284, + 0, + 20285, + 0, + 20287, + 20290, + 0, + 0, + 0, + 0, + 20292, + 0, + 0, + 0, + 0, + 20293, + 20297, + 0, + 0, + 0, + 0, + 0, + 0, + 20299, + 0, + 20300, + 20303, + 0, + 0, + 0, + 0, + 0, + 0, + 20307, + 0, + 0, + 20308, + 0, + 20309, + 0, + 20310, + 0, + 0, + 0, + 0, + 0, + 0, + 20312, + 0, + 0, + 0, + 20314, + 0, + 0, + 0, + 0, + 20315, + 20316, + 0, + 20322, + 0, + 0, + 0, + 0, + 0, + 0, + 20339, + 0, + 0, + 0, + 20342, + 0, + 0, + 0, + 0, + 20352, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 20362, + 0, + 0, + 20365, + 0, + 20375, + 20377, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 20378, + 20379, + 0, + 20380, + 0, + 0, + 20381, + 0, + 20382, + 0, + 20383, + 0, + 20388, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 20390, + 20392, + 20393, + 0, + 0, + 20395, + 0, + 0, + 0, + 0, + 0, + 20396, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 20398, + 20415, + 0, + 0, + 0, + 20417, + 0, + 0, + 20420, + 0, + 0, + 20426, + 20428, + 0, + 20431, + 0, + 0, + 20432, + 0, + 20433, + 20434, + 20435, + 0, + 0, + 0, + 0, + 20440, + 0, + 0, + 0, + 0, + 0, + 20442, + 0, + 20443, + 0, + 20446, + 0, + 0, + 0, + 0, + 20448, + 0, + 20451, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 20452, + 20453, + 0, + 0, + 20454, + 0, + 0, + 0, + 0, + 0, + 0, + 20457, + 0, + 20458, + 0, + 0, + 0, + 20465, + 0, + 0, + 0, + 0, + 0, + 20469, + 0, + 0, + 0, + 20473, + 0, + 20476, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 20477, + 0, + 0, + 20485, + 0, + 0, + 20486, + 0, + 0, + 20487, + 0, + 20496, + 0, + 20497, + 0, + 0, + 20498, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 20499, + 20500, + 0, + 20501, + 0, + 0, + 0, + 0, + 0, + 20520, + 20527, + 0, + 20529, + 0, + 0, + 0, + 0, + 20539, + 0, + 0, + 20540, + 0, + 0, + 0, + 20543, + 0, + 0, + 0, + 20546, + 0, + 0, + 0, + 0, + 0, + 20548, + 0, + 0, + 20563, + 0, + 0, + 20564, + 0, + 20566, + 0, + 0, + 0, + 0, + 0, + 20589, + 0, + 0, + 0, + 0, + 20590, + 0, + 0, + 20593, + 20594, + 0, + 0, + 0, + 0, + 20595, + 0, + 20597, + 20598, + 0, + 0, + 0, + 20618, + 20620, + 0, + 0, + 0, + 0, + 20621, + 0, + 0, + 0, + 0, + 20627, + 0, + 0, + 0, + 0, + 0, + 20628, + 0, + 0, + 0, + 20629, + 0, + 20630, + 0, + 0, + 20639, + 0, + 0, + 0, + 0, + 0, + 20707, + 0, + 0, + 20709, + 0, + 0, + 0, + 20713, + 20714, + 0, + 0, + 0, + 0, + 0, + 20724, + 20725, + 0, + 0, + 0, + 0, + 20726, + 20728, + 20729, + 0, + 20733, + 0, + 20734, + 0, + 20735, + 20736, + 0, + 20737, + 0, + 0, + 20744, + 0, + 20745, + 0, + 20748, + 0, + 0, + 20749, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 20750, + 0, + 0, + 0, + 0, + 20754, + 0, + 0, + 0, + 20761, + 0, + 0, + 20763, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 20766, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 20767, + 0, + 0, + 0, + 0, + 20768, + 0, + 20769, + 20777, + 0, + 0, + 0, + 0, + 0, + 0, + 20785, + 0, + 0, + 0, + 20786, + 20795, + 20801, + 0, + 20802, + 0, + 20807, + 0, + 0, + 20808, + 0, + 0, + 20810, + 0, + 0, + 20811, + 0, + 20812, + 0, + 0, + 0, + 0, + 0, + 20813, + 0, + 0, + 20818, + 20820, + 20821, + 0, + 0, + 0, + 20822, + 0, + 20823, + 0, + 0, + 0, + 20826, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 20829, + 20830, + 20831, + 0, + 20832, + 20836, + 0, + 0, + 20839, + 0, + 0, + 20840, + 20842, + 0, + 20843, + 0, + 20844, + 0, + 20854, + 0, + 0, + 0, + 20855, + 0, + 0, + 0, + 0, + 20856, + 0, + 0, + 0, + 20869, + 0, + 0, + 20871, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 20873, + 0, + 0, + 0, + 0, + 0, + 20876, + 0, + 0, + 0, + 0, + 0, + 20880, + 0, + 0, + 20882, + 0, + 0, + 0, + 0, + 20883, + 20884, + 0, + 0, + 20890, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 20891, + 0, + 0, + 0, + 0, + 0, + 20905, + 0, + 20906, + 20910, + 0, + 0, + 20912, + 20915, + 0, + 0, + 0, + 0, + 0, + 20916, + 0, + 20917, + 0, + 20919, + 20920, + 20922, + 0, + 20927, + 0, + 20928, + 20929, + 20930, + 0, + 0, + 20935, + 0, + 0, + 20939, + 0, + 0, + 20941, + 0, + 0, + 0, + 20943, + 0, + 0, + 0, + 20946, + 20947, + 0, + 0, + 0, + 0, + 0, + 20950, + 0, + 20954, + 0, + 0, + 20955, + 20964, + 0, + 0, + 20967, + 0, + 0, + 0, + 0, + 0, + 20973, + 20975, + 0, + 0, + 0, + 20984, + 0, + 20987, + 20988, + 0, + 0, + 0, + 0, + 0, + 20989, + 0, + 0, + 0, + 20995, + 0, + 20998, + 0, + 20999, + 0, + 0, + 0, + 0, + 21000, + 21001, + 0, + 0, + 0, + 0, + 21008, + 0, + 21010, + 0, + 21016, + 0, + 0, + 0, + 21017, + 21018, + 0, + 0, + 0, + 0, + 0, + 21021, + 21026, + 21027, + 21028, + 0, + 0, + 21029, + 0, + 0, + 0, + 0, + 0, + 21030, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21031, + 21032, + 0, + 0, + 0, + 0, + 0, + 21037, + 0, + 0, + 21038, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21039, + 0, + 21041, + 0, + 21046, + 21047, + 0, + 0, + 0, + 21049, + 21053, + 0, + 0, + 21057, + 21064, + 21065, + 0, + 0, + 21066, + 21067, + 0, + 0, + 0, + 21069, + 0, + 0, + 0, + 21071, + 21072, + 0, + 0, + 21073, + 0, + 21074, + 0, + 0, + 21078, + 0, + 0, + 0, + 0, + 21079, + 0, + 0, + 21080, + 21081, + 0, + 0, + 21086, + 21087, + 0, + 21089, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21091, + 0, + 21093, + 0, + 21094, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21095, + 0, + 0, + 0, + 0, + 0, + 21096, + 0, + 21098, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21099, + 0, + 0, + 21100, + 21101, + 21102, + 0, + 0, + 0, + 0, + 0, + 21103, + 0, + 21104, + 0, + 0, + 0, + 0, + 0, + 21105, + 21108, + 21109, + 0, + 0, + 21112, + 21113, + 0, + 0, + 0, + 0, + 0, + 0, + 21115, + 21122, + 21123, + 0, + 0, + 0, + 0, + 0, + 21125, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21129, + 21131, + 0, + 0, + 21134, + 0, + 0, + 0, + 21137, + 21142, + 0, + 21143, + 0, + 0, + 21144, + 0, + 21145, + 21146, + 0, + 21152, + 21154, + 21155, + 21156, + 0, + 0, + 0, + 21160, + 0, + 0, + 0, + 0, + 0, + 0, + 21161, + 0, + 21164, + 0, + 21166, + 0, + 0, + 0, + 0, + 21170, + 0, + 0, + 0, + 0, + 21171, + 0, + 0, + 21172, + 0, + 21174, + 0, + 21175, + 0, + 0, + 0, + 0, + 0, + 21176, + 21179, + 21188, + 0, + 0, + 0, + 21189, + 0, + 0, + 21190, + 0, + 0, + 0, + 21192, + 0, + 0, + 21193, + 0, + 0, + 0, + 21198, + 0, + 21212, + 0, + 0, + 21213, + 0, + 0, + 0, + 0, + 0, + 0, + 21215, + 21216, + 0, + 0, + 21223, + 21225, + 0, + 21226, + 0, + 0, + 0, + 0, + 21227, + 21228, + 0, + 0, + 21229, + 0, + 0, + 0, + 0, + 21230, + 21236, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21237, + 0, + 0, + 21238, + 21239, + 0, + 0, + 0, + 0, + 21256, + 0, + 0, + 0, + 0, + 0, + 21257, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21259, + 0, + 0, + 0, + 21263, + 0, + 21272, + 0, + 21274, + 0, + 21282, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21283, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21294, + 0, + 0, + 21297, + 0, + 0, + 0, + 0, + 21298, + 0, + 0, + 0, + 21299, + 0, + 21300, + 21302, + 0, + 21316, + 0, + 21318, + 21322, + 21323, + 0, + 21324, + 0, + 21326, + 0, + 0, + 0, + 21327, + 21328, + 0, + 0, + 0, + 21352, + 0, + 0, + 21354, + 21361, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21362, + 0, + 0, + 0, + 21363, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21366, + 0, + 0, + 21367, + 21372, + 21374, + 0, + 0, + 0, + 21375, + 21377, + 0, + 21378, + 0, + 0, + 0, + 21380, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21381, + 0, + 0, + 0, + 0, + 0, + 0, + 21382, + 0, + 21383, + 0, + 0, + 21384, + 0, + 0, + 21385, + 0, + 0, + 0, + 0, + 21389, + 21390, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21397, + 21398, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21399, + 0, + 21400, + 0, + 0, + 0, + 0, + 21402, + 0, + 0, + 0, + 21403, + 21404, + 0, + 21405, + 21406, + 0, + 0, + 0, + 21407, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21408, + 0, + 0, + 0, + 0, + 21409, + 0, + 21421, + 0, + 21422, + 0, + 0, + 0, + 21425, + 21428, + 0, + 0, + 0, + 0, + 21429, + 0, + 0, + 0, + 0, + 0, + 21433, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21434, + 0, + 21443, + 0, + 21444, + 21449, + 0, + 21452, + 0, + 21453, + 21454, + 0, + 0, + 0, + 21457, + 0, + 0, + 21458, + 0, + 0, + 0, + 21460, + 21461, + 0, + 0, + 21464, + 0, + 0, + 0, + 21473, + 21478, + 0, + 0, + 21479, + 0, + 0, + 21481, + 21483, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21484, + 0, + 0, + 21485, + 21486, + 0, + 0, + 21488, + 0, + 0, + 0, + 0, + 0, + 0, + 21523, + 0, + 0, + 21525, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21526, + 0, + 0, + 0, + 0, + 0, + 0, + 21529, + 21530, + 0, + 0, + 21531, + 0, + 0, + 21533, + 0, + 0, + 21539, + 21564, + 0, + 21567, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21575, + 0, + 0, + 0, + 0, + 21577, + 0, + 0, + 0, + 0, + 0, + 21591, + 0, + 0, + 21604, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21605, + 0, + 21606, + 0, + 0, + 21617, + 21618, + 21619, + 21620, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21623, + 0, + 0, + 0, + 0, + 21631, + 0, + 21635, + 0, + 0, + 0, + 0, + 21639, + 21646, + 21653, + 21662, + 0, + 0, + 21663, + 21664, + 0, + 21666, + 0, + 0, + 21667, + 0, + 21670, + 21672, + 21673, + 0, + 21674, + 21683, + 0, + 0, + 0, + 0, + 0, + 21684, + 0, + 21694, + 0, + 0, + 0, + 0, + 21695, + 21700, + 0, + 21703, + 0, + 21704, + 0, + 0, + 21709, + 0, + 0, + 0, + 21710, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21711, + 0, + 0, + 0, + 21712, + 0, + 21717, + 0, + 21730, + 0, + 0, + 0, + 21731, + 21733, + 0, + 0, + 0, + 0, + 21737, + 21741, + 21742, + 0, + 21747, + 0, + 0, + 0, + 21749, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21750, + 0, + 0, + 0, + 0, + 0, + 21752, + 0, + 0, + 0, + 0, + 21753, + 0, + 0, + 0, + 0, + 0, + 0, + 21755, + 21756, + 0, + 21757, + 0, + 0, + 0, + 0, + 0, + 0, + 21760, + 0, + 0, + 21763, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21764, + 0, + 0, + 21766, + 0, + 0, + 21767, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21773, + 0, + 21774, + 0, + 0, + 21775, + 0, + 0, + 0, + 0, + 21776, + 0, + 0, + 21777, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21780, + 21787, + 21788, + 21791, + 0, + 0, + 0, + 21797, + 0, + 0, + 0, + 0, + 0, + 21805, + 0, + 0, + 0, + 0, + 21806, + 0, + 21807, + 21809, + 0, + 21810, + 21811, + 0, + 21817, + 21819, + 21820, + 0, + 21823, + 0, + 21824, + 0, + 0, + 21825, + 0, + 0, + 21826, + 21832, + 0, + 0, + 0, + 0, + 0, + 21833, + 21848, + 21849, + 0, + 0, + 21867, + 21870, + 21871, + 21873, + 0, + 0, + 0, + 21874, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21875, + 0, + 21878, + 0, + 0, + 0, + 21879, + 0, + 21881, + 21886, + 0, + 0, + 0, + 0, + 21887, + 0, + 0, + 21888, + 21894, + 21895, + 21897, + 0, + 21901, + 0, + 21904, + 0, + 0, + 21906, + 0, + 0, + 0, + 21909, + 21910, + 21911, + 0, + 0, + 21912, + 0, + 0, + 21913, + 21914, + 21915, + 0, + 21919, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21921, + 0, + 0, + 21922, + 21933, + 21939, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21944, + 0, + 0, + 0, + 0, + 0, + 21945, + 0, + 21947, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21949, + 0, + 0, + 0, + 21950, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21951, + 0, + 21952, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21954, + 21957, + 0, + 0, + 0, + 0, + 21958, + 0, + 21959, + 0, + 0, + 0, + 0, + 0, + 0, + 21962, + 21963, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21964, + 21965, + 0, + 0, + 21969, + 21970, + 0, + 0, + 0, + 21974, + 0, + 0, + 21980, + 21981, + 0, + 21982, + 0, + 0, + 0, + 0, + 0, + 21985, + 0, + 21988, + 0, + 21992, + 0, + 21999, + 0, + 0, + 0, + 0, + 0, + 0, + 22001, + 0, + 22002, + 0, + 0, + 0, + 0, + 0, + 0, + 22003, + 0, + 0, + 0, + 0, + 0, + 22004, + 0, + 0, + 0, + 22008, + 0, + 22009, + 22015, + 0, + 0, + 22016, + 0, + 0, + 0, + 22017, + 22019, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22020, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22021, + 22037, + 0, + 22039, + 0, + 0, + 0, + 22040, + 0, + 0, + 0, + 22048, + 22049, + 0, + 0, + 22053, + 22055, + 22056, + 22059, + 0, + 0, + 22060, + 22061, + 0, + 0, + 22064, + 0, + 0, + 0, + 0, + 22066, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22073, + 0, + 0, + 0, + 22074, + 22075, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22076, + 0, + 0, + 0, + 0, + 22077, + 22084, + 22099, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22104, + 0, + 0, + 22107, + 0, + 22108, + 0, + 22109, + 0, + 22110, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22111, + 22119, + 0, + 22120, + 22122, + 0, + 0, + 0, + 0, + 22125, + 0, + 0, + 0, + 22128, + 22129, + 0, + 0, + 0, + 0, + 0, + 0, + 22141, + 0, + 0, + 0, + 22142, + 0, + 0, + 22144, + 22146, + 0, + 22148, + 22149, + 22151, + 22154, + 0, + 0, + 0, + 22162, + 0, + 0, + 0, + 0, + 22164, + 22177, + 0, + 0, + 0, + 0, + 22179, + 0, + 22182, + 22183, + 0, + 0, + 22184, + 22188, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22190, + 0, + 22194, + 22201, + 0, + 0, + 22208, + 0, + 22209, + 0, + 22212, + 0, + 0, + 22215, + 0, + 22223, + 22231, + 0, + 0, + 22232, + 0, + 22234, + 0, + 0, + 22235, + 22236, + 0, + 22237, + 0, + 22240, + 0, + 0, + 0, + 0, + 0, + 22241, + 0, + 0, + 0, + 22242, + 22246, + 22247, + 0, + 0, + 0, + 22259, + 22268, + 0, + 22269, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22270, + 0, + 0, + 0, + 0, + 22271, + 0, + 22272, + 0, + 22277, + 0, + 0, + 0, + 0, + 0, + 22278, + 22280, + 22283, + 22286, + 0, + 0, + 22287, + 22289, + 0, + 0, + 22290, + 0, + 22293, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22295, + 0, + 22301, + 22302, + 0, + 0, + 0, + 22305, + 0, + 22308, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22315, + 0, + 0, + 0, + 22317, + 0, + 22334, + 0, + 0, + 0, + 22335, + 0, + 0, + 0, + 0, + 0, + 22336, + 0, + 22338, + 22344, + 0, + 22347, + 22349, + 0, + 22350, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22357, + 0, + 0, + 0, + 0, + 0, + 22358, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22359, + 22360, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22361, + 22366, + 0, + 0, + 22369, + 0, + 22370, + 22373, + 0, + 0, + 0, + 0, + 0, + 22375, + 0, + 22377, + 0, + 0, + 0, + 0, + 0, + 22378, + 0, + 0, + 0, + 0, + 22381, + 0, + 0, + 0, + 0, + 22382, + 0, + 22383, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22391, + 0, + 0, + 22392, + 22395, + 22396, + 22402, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22405, + 0, + 0, + 22406, + 0, + 0, + 22408, + 0, + 0, + 22409, + 22410, + 0, + 0, + 0, + 0, + 0, + 0, + 22424, + 0, + 0, + 0, + 0, + 22426, + 0, + 0, + 0, + 22427, + 0, + 22428, + 0, + 22432, + 0, + 22435, + 22442, + 22443, + 0, + 0, + 0, + 0, + 22444, + 0, + 0, + 0, + 0, + 0, + 22446, + 0, + 22454, + 0, + 22455, + 0, + 0, + 0, + 22465, + 0, + 22470, + 0, + 22471, + 0, + 0, + 0, + 0, + 22472, + 22473, + 0, + 22487, + 0, + 0, + 0, + 22488, + 0, + 0, + 0, + 0, + 22489, + 0, + 0, + 22499, + 0, + 0, + 0, + 0, + 0, + 0, + 22514, + 0, + 0, + 22515, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22516, + 0, + 0, + 0, + 22517, + 22520, + 0, + 0, + 0, + 22534, + 0, + 0, + 22535, + 0, + 0, + 22536, + 0, + 22540, + 22553, + 0, + 22555, + 0, + 0, + 0, + 0, + 22561, + 0, + 0, + 22562, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22566, + 0, + 0, + 0, + 0, + 22567, + 22568, + 0, + 0, + 22575, + 0, + 22579, + 0, + 22582, + 22583, + 22585, + 0, + 0, + 0, + 0, + 0, + 22586, + 0, + 0, + 22587, + 0, + 0, + 22590, + 0, + 0, + 0, + 0, + 0, + 22591, + 0, + 22592, + 0, + 0, + 0, + 0, + 0, + 22593, + 0, + 22602, + 0, + 0, + 22604, + 0, + 0, + 22609, + 0, + 0, + 22618, + 0, + 0, + 0, + 0, + 0, + 0, + 22619, + 0, + 22624, + 22625, + 0, + 0, + 22638, + 0, + 0, + 0, + 0, + 0, + 22639, + 0, + 0, + 22640, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22644, + 0, + 22645, + 22647, + 0, + 0, + 0, + 0, + 22652, + 22653, + 0, + 0, + 0, + 22654, + 0, + 22655, + 0, + 0, + 0, + 22656, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22673, + 22675, + 22676, + 0, + 0, + 22678, + 22679, + 0, + 22691, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22693, + 0, + 0, + 22696, + 0, + 22699, + 22707, + 22708, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22718, + 0, + 22719, + 0, + 0, + 0, + 0, + 22723, + 0, + 0, + 0, + 22724, + 22725, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22726, + 22728, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22729, + 0, + 0, + 22731, + 0, + 0, + 0, + 0, + 22732, + 22735, + 22736, + 0, + 0, + 0, + 0, + 22739, + 0, + 22749, + 0, + 0, + 22751, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22758, + 0, + 0, + 0, + 0, + 0, + 22760, + 0, + 0, + 0, + 0, + 0, + 22764, + 22765, + 22766, + 0, + 22768, + 0, + 0, + 0, + 0, + 0, + 22769, + 22770, + 0, + 0, + 0, + 0, + 0, + 0, + 22771, + 0, + 0, + 22772, + 22775, + 0, + 22776, + 22777, + 22780, + 0, + 0, + 22782, + 22784, + 0, + 22787, + 0, + 22789, + 22796, + 0, + 0, + 0, + 0, + 0, + 22798, + 0, + 0, + 0, + 0, + 0, + 0, + 22802, + 0, + 22803, + 22804, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22805, + 0, + 0, + 22810, + 22811, + 22814, + 22816, + 0, + 22825, + 22826, + 0, + 22831, + 22833, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22834, + 0, + 22836, + 22838, + 0, + 22839, + 0, + 0, + 0, + 0, + 0, + 22840, + 0, + 22847, + 0, + 0, + 0, + 0, + 0, + 22856, + 22857, + 0, + 22858, + 22859, + 0, + 0, + 22862, + 0, + 0, + 22864, + 0, + 0, + 0, + 0, + 22865, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22866, + 0, + 22867, + 22868, + 0, + 0, + 0, + 0, + 22869, + 0, + 22871, + 0, + 22872, + 0, + 22873, + 22881, + 22882, + 22884, + 22885, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22886, + 22887, + 0, + 22894, + 0, + 22895, + 0, + 0, + 0, + 22900, + 0, + 22901, + 0, + 0, + 0, + 0, + 22904, + 0, + 0, + 0, + 0, + 22905, + 22907, + 0, + 0, + 0, + 22915, + 22917, + 0, + 0, + 22918, + 0, + 0, + 0, + 22920, + 0, + 0, + 0, + 22929, + 22930, + 0, + 0, + 0, + 22941, + 22942, + 0, + 0, + 0, + 22943, + 0, + 0, + 0, + 22944, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22946, + 0, + 22947, + 0, + 0, + 22954, + 0, + 22956, + 0, + 0, + 22962, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22963, + 0, + 0, + 22964, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22965, + 0, + 22968, + 0, + 0, + 0, + 22969, + 0, + 0, + 0, + 0, + 0, + 22970, + 0, + 22971, + 0, + 0, + 0, + 0, + 0, + 22978, + 0, + 0, + 22979, + 0, + 22987, + 0, + 0, + 22989, + 0, + 0, + 0, + 0, + 0, + 0, + 22990, + 0, + 23005, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23006, + 23007, + 23008, + 0, + 0, + 23023, + 23024, + 23029, + 0, + 0, + 0, + 0, + 23030, + 0, + 0, + 0, + 0, + 0, + 23032, + 0, + 0, + 0, + 0, + 0, + 23035, + 0, + 0, + 0, + 0, + 23038, + 0, + 0, + 0, + 23048, + 0, + 23049, + 23052, + 23053, + 23060, + 23061, + 0, + 23063, + 0, + 0, + 0, + 0, + 23067, + 23068, + 0, + 0, + 0, + 23069, + 23073, + 0, + 0, + 0, + 23127, + 0, + 23128, + 0, + 0, + 0, + 0, + 0, + 23129, + 0, + 23138, + 23141, + 0, + 23149, + 0, + 0, + 23150, + 0, + 0, + 0, + 23152, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23154, + 0, + 0, + 0, + 0, + 23157, + 23159, + 23160, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23180, + 0, + 0, + 0, + 0, + 23181, + 0, + 0, + 23188, + 0, + 23189, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23195, + 0, + 0, + 23196, + 23199, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23202, + 0, + 23204, + 0, + 23207, + 0, + 23209, + 23210, + 0, + 0, + 0, + 0, + 0, + 0, + 23227, + 23229, + 0, + 0, + 23230, + 23234, + 23238, + 0, + 0, + 0, + 23245, + 23246, + 23248, + 0, + 0, + 0, + 0, + 23249, + 23254, + 0, + 0, + 0, + 23265, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23268, + 0, + 23276, + 0, + 0, + 0, + 0, + 23277, + 0, + 23297, + 0, + 23298, + 0, + 0, + 0, + 0, + 23299, + 0, + 23302, + 0, + 0, + 23303, + 23312, + 0, + 0, + 23314, + 0, + 23320, + 0, + 0, + 0, + 0, + 23324, + 0, + 23325, + 0, + 23328, + 0, + 23334, + 0, + 0, + 0, + 23337, + 0, + 0, + 0, + 0, + 23343, + 23344, + 23346, + 0, + 23348, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23353, + 0, + 0, + 0, + 0, + 23355, + 0, + 23356, + 23358, + 0, + 0, + 0, + 23359, + 23360, + 0, + 23361, + 0, + 23367, + 0, + 23369, + 0, + 0, + 23373, + 0, + 23378, + 23379, + 0, + 23382, + 23383, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23387, + 0, + 0, + 0, + 0, + 0, + 0, + 23388, + 23390, + 0, + 0, + 23393, + 23398, + 0, + 0, + 0, + 23399, + 0, + 0, + 0, + 23400, + 0, + 0, + 0, + 0, + 23401, + 0, + 0, + 0, + 23415, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23416, + 0, + 23422, + 0, + 23443, + 23444, + 0, + 0, + 0, + 0, + 23448, + 0, + 23454, + 0, + 0, + 0, + 0, + 0, + 0, + 23456, + 0, + 0, + 23458, + 23464, + 0, + 0, + 0, + 0, + 0, + 0, + 23465, + 0, + 0, + 0, + 23470, + 23471, + 0, + 0, + 23472, + 0, + 0, + 0, + 23473, + 23496, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23497, + 0, + 23499, + 0, + 0, + 23502, + 0, + 0, + 23503, + 0, + 0, + 23513, + 0, + 0, + 23515, + 0, + 0, + 0, + 23517, + 0, + 0, + 0, + 0, + 23518, + 23519, + 23521, + 23524, + 0, + 23525, + 23528, + 23539, + 0, + 0, + 0, + 0, + 0, + 23541, + 0, + 0, + 23544, + 0, + 0, + 23556, + 0, + 0, + 23557, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23559, + 0, + 23560, + 0, + 0, + 23561, + 0, + 0, + 23566, + 0, + 0, + 0, + 0, + 0, + 23568, + 23569, + 23570, + 0, + 0, + 0, + 0, + 23571, + 0, + 23574, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23575, + 0, + 23579, + 0, + 0, + 23581, + 0, + 0, + 0, + 0, + 0, + 0, + 23587, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23596, + 23598, + 0, + 0, + 0, + 0, + 23602, + 23606, + 0, + 0, + 23607, + 0, + 23608, + 0, + 0, + 0, + 23614, + 23616, + 0, + 0, + 0, + 0, + 0, + 23618, + 0, + 0, + 23619, + 0, + 0, + 0, + 0, + 23621, + 23626, + 0, + 23627, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23629, + 0, + 23630, + 0, + 0, + 0, + 0, + 23634, + 0, + 23636, + 0, + 0, + 0, + 0, + 0, + 0, + 23638, + 0, + 0, + 0, + 0, + 23640, + 23667, + 0, + 23669, + 0, + 0, + 0, + 23681, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23682, + 0, + 23683, + 0, + 0, + 0, + 0, + 0, + 23684, + 0, + 0, + 0, + 23685, + 23689, + 0, + 23693, + 23694, + 23700, + 0, + 23702, + 0, + 23709, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23712, + 0, + 0, + 0, + 0, + 0, + 23714, + 0, + 0, + 23715, + 0, + 0, + 0, + 0, + 23718, + 0, + 0, + 23720, + 0, + 0, + 0, + 0, + 23722, + 0, + 0, + 0, + 23726, + 23729, + 0, + 23741, + 23746, + 0, + 23748, + 0, + 0, + 0, + 0, + 23749, + 0, + 0, + 0, + 0, + 0, + 23750, + 0, + 0, + 0, + 0, + 23751, + 0, + 23753, + 0, + 0, + 0, + 0, + 23757, + 23765, + 0, + 0, + 0, + 23770, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23771, + 0, + 23772, + 23781, + 0, + 0, + 23796, + 0, + 0, + 0, + 0, + 23798, + 0, + 23799, + 0, + 0, + 0, + 23802, + 0, + 0, + 23806, + 0, + 23807, + 0, + 0, + 23808, + 0, + 23809, + 0, + 23819, + 0, + 0, + 0, + 23821, + 0, + 23827, + 0, + 0, + 0, + 23829, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23830, + 0, + 0, + 0, + 0, + 0, + 0, + 23832, + 23833, + 23834, + 23835, + 0, + 0, + 0, + 0, + 23837, + 23838, + 0, + 0, + 0, + 0, + 0, + 23846, + 0, + 0, + 0, + 0, + 0, + 0, + 23847, + 0, + 0, + 0, + 0, + 0, + 23879, + 23881, + 0, + 0, + 23882, + 23883, + 23895, + 0, + 23899, + 0, + 0, + 0, + 0, + 23901, + 0, + 0, + 0, + 0, + 0, + 0, + 23902, + 0, + 0, + 0, + 0, + 0, + 23903, + 23905, + 0, + 23906, + 0, + 23907, + 23918, + 23919, + 23920, + 0, + 23922, + 0, + 23924, + 0, + 23927, + 0, + 23934, + 0, + 23937, + 23941, + 0, + 23942, + 23946, + 0, + 0, + 0, + 0, + 0, + 23955, + 23956, + 23958, + 0, + 0, + 0, + 0, + 0, + 0, + 23959, + 0, + 23962, + 23965, + 0, + 23966, + 0, + 0, + 0, + 0, + 23967, + 23968, + 0, + 0, + 23973, + 0, + 0, + 23974, + 0, + 0, + 0, + 0, + 23975, + 0, + 23976, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23977, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23980, + 0, + 0, + 23984, + 0, + 23985, + 0, + 0, + 23987, + 0, + 0, + 23988, + 23990, + 23991, + 0, + 0, + 0, + 0, + 0, + 0, + 23992, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23994, + 0, + 0, + 0, + 23998, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23999, + 0, + 0, + 24003, + 0, + 24004, + 0, + 24006, + 0, + 0, + 0, + 24007, + 0, + 0, + 24008, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24009, + 0, + 0, + 24010, + 0, + 0, + 24011, + 0, + 0, + 24013, + 24014, + 0, + 0, + 24015, + 24016, + 24027, + 0, + 24028, + 24029, + 0, + 24030, + 0, + 0, + 0, + 0, + 0, + 24033, + 24034, + 0, + 24035, + 0, + 0, + 24036, + 0, + 0, + 24044, + 0, + 24048, + 24049, + 24063, + 24067, + 0, + 24068, + 24070, + 0, + 0, + 24071, + 24078, + 24087, + 0, + 24090, + 0, + 0, + 0, + 24095, + 0, + 24098, + 24101, + 24104, + 24106, + 0, + 24107, + 0, + 0, + 0, + 24108, + 0, + 0, + 0, + 0, + 24110, + 24111, + 0, + 24113, + 0, + 0, + 24115, + 24120, + 0, + 0, + 0, + 0, + 0, + 0, + 24124, + 0, + 24125, + 0, + 24126, + 0, + 24127, + 0, + 0, + 0, + 0, + 0, + 24135, + 0, + 0, + 24136, + 0, + 24137, + 24142, + 0, + 0, + 0, + 24146, + 0, + 0, + 24147, + 24149, + 24154, + 0, + 24163, + 0, + 0, + 0, + 24165, + 24166, + 24167, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24169, + 24170, + 24175, + 0, + 0, + 0, + 24178, + 0, + 0, + 24179, + 0, + 0, + 24181, + 0, + 24184, + 24197, + 0, + 24201, + 24204, + 0, + 0, + 0, + 0, + 0, + 0, + 24206, + 24212, + 24220, + 0, + 0, + 0, + 24224, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24226, + 0, + 24234, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24235, + 0, + 24236, + 0, + 0, + 0, + 0, + 0, + 24239, + 24240, + 24241, + 0, + 0, + 24248, + 0, + 0, + 24249, + 0, + 24251, + 0, + 0, + 0, + 0, + 0, + 0, + 24253, + 0, + 24268, + 0, + 0, + 0, + 24269, + 0, + 24271, + 24272, + 0, + 0, + 0, + 0, + 24273, + 0, + 0, + 24274, + 0, + 0, + 24279, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24280, + 0, + 24293, + 24294, + 0, + 0, + 0, + 0, + 0, + 0, + 24296, + 0, + 0, + 24323, + 0, + 0, + 0, + 24329, + 24330, + 24331, + 24339, + 0, + 24351, + 0, + 0, + 24369, + 24370, + 0, + 0, + 0, + 24371, + 0, + 0, + 0, + 0, + 24372, + 24373, + 24374, + 0, + 0, + 0, + 0, + 0, + 24378, + 0, + 0, + 0, + 0, + 24379, + 0, + 24381, + 0, + 24383, + 24389, + 0, + 24390, + 0, + 0, + 24394, + 24395, + 24400, + 0, + 0, + 0, + 24401, + 24402, + 0, + 24406, + 0, + 0, + 0, + 24411, + 0, + 0, + 0, + 24415, + 0, + 24416, + 0, + 0, + 0, + 0, + 0, + 24417, + 0, + 24419, + 0, + 24422, + 0, + 24423, + 24428, + 0, + 24435, + 0, + 0, + 0, + 24439, + 0, + 0, + 0, + 24440, + 24442, + 24446, + 0, + 0, + 0, + 24447, + 24448, + 24449, + 24452, + 0, + 0, + 0, + 0, + 24453, + 24457, + 0, + 0, + 24458, + 24459, + 24460, + 0, + 24465, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24470, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24471, + 0, + 24473, + 24474, + 24475, + 24476, + 0, + 24478, + 0, + 0, + 0, + 0, + 24480, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24481, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24482, + 24485, + 0, + 0, + 0, + 0, + 24486, + 0, + 0, + 0, + 24488, + 0, + 0, + 0, + 24494, + 0, + 0, + 0, + 0, + 24497, + 0, + 0, + 24498, + 0, + 0, + 0, + 24499, + 24506, + 0, + 0, + 0, + 24507, + 0, + 0, + 24511, + 0, + 0, + 24513, + 24514, + 0, + 0, + 0, + 0, + 0, + 24517, + 0, + 24518, + 0, + 24520, + 0, + 24521, + 24524, + 24525, + 0, + 0, + 0, + 0, + 0, + 24527, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24528, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24537, + 24539, + 0, + 24540, + 0, + 0, + 0, + 24548, + 0, + 0, + 0, + 0, + 0, + 24549, + 24550, + 0, + 0, + 0, + 24553, + 24554, + 0, + 24555, + 0, + 24556, + 0, + 24558, + 0, + 0, + 0, + 0, + 0, + 24560, + 0, + 0, + 0, + 24561, + 0, + 0, + 0, + 0, + 0, + 24562, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24567, + 0, + 0, + 0, + 0, + 0, + 24569, + 0, + 0, + 0, + 24574, + 0, + 24575, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24577, + 24581, + 0, + 24584, + 0, + 0, + 0, + 0, + 0, + 24585, + 0, + 0, + 0, + 0, + 0, + 24586, + 0, + 0, + 24587, + 0, + 24588, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24590, + 24591, + 0, + 0, + 0, + 0, + 24592, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24594, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24596, + 24597, + 0, + 0, + 0, + 0, + 24602, + 24603, + 0, + 0, + 0, + 0, + 24604, + 0, + 0, + 24605, + 0, + 24610, + 0, + 0, + 24611, + 0, + 0, + 0, + 0, + 24612, + 24615, + 24616, + 24624, + 0, + 0, + 0, + 24627, + 0, + 24638, + 24639, + 0, + 0, + 0, + 0, + 24640, + 0, + 0, + 0, + 24655, + 24656, + 24657, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24662, + 0, + 24663, + 24664, + 0, + 0, + 0, + 0, + 0, + 24665, + 0, + 0, + 0, + 0, + 24667, + 0, + 0, + 0, + 0, + 0, + 0, + 24668, + 24669, + 0, + 24670, + 24674, + 0, + 0, + 0, + 24675, + 0, + 24678, + 0, + 0, + 24679, + 0, + 0, + 0, + 24681, + 0, + 24683, + 0, + 0, + 0, + 0, + 24684, + 0, + 24685, + 0, + 0, + 24686, + 0, + 0, + 24688, + 24689, + 0, + 0, + 0, + 0, + 24690, + 24691, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24697, + 0, + 24698, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24709, + 0, + 0, + 0, + 0, + 0, + 24710, + 0, + 24712, + 0, + 0, + 0, + 0, + 0, + 0, + 24713, + 24714, + 0, + 24715, + 0, + 24716, + 24718, + 0, + 24719, + 0, + 0, + 0, + 0, + 24720, + 0, + 0, + 24725, + 0, + 0, + 24738, + 0, + 24749, + 24750, + 0, + 0, + 0, + 24752, + 0, + 0, + 0, + 24753, + 0, + 0, + 0, + 24758, + 0, + 0, + 0, + 0, + 0, + 24762, + 0, + 24763, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24764, + 0, + 0, + 0, + 0, + 0, + 24765, + 24767, + 24768, + 0, + 24772, + 0, + 0, + 0, + 0, + 24773, + 0, + 0, + 0, + 0, + 24777, + 0, + 0, + 0, + 0, + 0, + 24785, + 0, + 24786, + 24788, + 0, + 0, + 0, + 24789, + 0, + 0, + 0, + 0, + 24794, + 24798, + 0, + 24799, + 24800, + 0, + 0, + 0, + 24803, + 0, + 24804, + 24806, + 0, + 24807, + 0, + 0, + 0, + 24810, + 0, + 0, + 0, + 0, + 0, + 0, + 24827, + 24828, + 0, + 24835, + 0, + 0, + 0, + 0, + 0, + 0, + 24836, + 0, + 0, + 0, + 0, + 0, + 24839, + 0, + 24843, + 24844, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24847, + 0, + 0, + 24848, + 0, + 0, + 0, + 0, + 0, + 0, + 24849, + 0, + 24850, + 24851, + 0, + 0, + 0, + 24852, + 0, + 24853, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24854, + 0, + 24855, + 0, + 0, + 24868, + 0, + 0, + 0, + 24883, + 0, + 0, + 0, + 24884, + 0, + 24895, + 24897, + 0, + 0, + 0, + 0, + 0, + 24899, + 0, + 0, + 0, + 0, + 0, + 24900, + 0, + 24913, + 0, + 0, + 0, + 0, + 0, + 0, + 24914, + 0, + 0, + 24917, + 24930, + 24931, + 0, + 0, + 0, + 24932, + 0, + 0, + 24939, + 0, + 0, + 24942, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24945, + 24950, + 0, + 24951, + 0, + 0, + 24953, + 0, + 0, + 0, + 24954, + 0, + 24959, + 0, + 0, + 0, + 24961, + 0, + 0, + 24962, + 0, + 24964, + 24968, + 24970, + 24972, + 0, + 0, + 0, + 0, + 0, + 24976, + 0, + 0, + 0, + 24977, + 0, + 24982, + 0, + 0, + 24983, + 0, + 0, + 24984, + 0, + 0, + 0, + 24993, + 0, + 0, + 0, + 24994, + 0, + 0, + 25001, + 0, + 0, + 0, + 25003, + 0, + 0, + 25018, + 0, + 0, + 25023, + 0, + 0, + 0, + 25034, + 0, + 0, + 25035, + 25036, + 0, + 25037, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25039, + 0, + 0, + 0, + 0, + 0, + 25040, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25042, + 0, + 0, + 25043, + 25045, + 0, + 0, + 0, + 0, + 0, + 0, + 25049, + 0, + 0, + 25051, + 0, + 25052, + 25053, + 0, + 0, + 25054, + 0, + 0, + 0, + 25055, + 0, + 0, + 0, + 0, + 25057, + 25059, + 0, + 0, + 25060, + 25064, + 0, + 25065, + 25069, + 25070, + 0, + 0, + 0, + 0, + 25072, + 0, + 25073, + 0, + 25090, + 0, + 0, + 25092, + 25093, + 25101, + 0, + 0, + 0, + 0, + 0, + 0, + 25105, + 25108, + 0, + 0, + 25113, + 0, + 0, + 25115, + 25116, + 0, + 0, + 0, + 0, + 0, + 0, + 25117, + 0, + 0, + 0, + 25120, + 25121, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25125, + 0, + 0, + 0, + 25126, + 0, + 25130, + 25134, + 0, + 25139, + 0, + 25143, + 0, + 0, + 0, + 25151, + 0, + 25161, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25163, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25174, + 0, + 25175, + 0, + 25207, + 0, + 0, + 0, + 25209, + 0, + 0, + 0, + 0, + 25213, + 0, + 25219, + 0, + 25223, + 0, + 25225, + 0, + 0, + 0, + 25227, + 0, + 0, + 0, + 25228, + 0, + 0, + 0, + 25229, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25231, + 25233, + 0, + 0, + 0, + 0, + 25237, + 25239, + 0, + 0, + 0, + 25243, + 0, + 0, + 0, + 25252, + 0, + 25257, + 25258, + 0, + 0, + 0, + 0, + 25260, + 25265, + 0, + 25268, + 0, + 0, + 25273, + 25324, + 0, + 25325, + 0, + 25326, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25327, + 0, + 0, + 0, + 0, + 0, + 25328, + 0, + 0, + 0, + 0, + 0, + 0, + 25332, + 0, + 0, + 0, + 25333, + 0, + 0, + 0, + 25336, + 25337, + 25338, + 0, + 0, + 25343, + 0, + 25350, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25352, + 0, + 25354, + 0, + 25375, + 0, + 25379, + 0, + 0, + 0, + 0, + 25384, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25386, + 0, + 25388, + 0, + 25390, + 0, + 0, + 25399, + 0, + 0, + 25401, + 0, + 0, + 0, + 25402, + 0, + 0, + 0, + 25407, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25413, + 25415, + 0, + 0, + 25417, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25419, + 0, + 0, + 0, + 25421, + 0, + 0, + 0, + 25424, + 0, + 0, + 0, + 0, + 25433, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25435, + 0, + 0, + 0, + 0, + 0, + 0, + 25436, + 0, + 0, + 0, + 25437, + 0, + 0, + 25440, + 0, + 0, + 0, + 0, + 0, + 0, + 25442, + 0, + 0, + 25443, + 0, + 25446, + 0, + 0, + 25449, + 0, + 0, + 0, + 25450, + 0, + 0, + 0, + 0, + 25452, + 0, + 25453, + 25454, + 25455, + 0, + 0, + 0, + 25456, + 0, + 25457, + 0, + 0, + 0, + 25459, + 0, + 25461, + 0, + 25468, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25469, + 0, + 0, + 0, + 0, + 0, + 25471, + 0, + 0, + 0, + 0, + 0, + 25474, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25475, + 0, + 0, + 0, + 0, + 25477, + 0, + 0, + 0, + 0, + 25483, + 0, + 0, + 0, + 0, + 0, + 25484, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25485, + 0, + 25497, + 0, + 0, + 25498, + 0, + 25504, + 0, + 25510, + 0, + 25512, + 0, + 0, + 25513, + 25514, + 0, + 0, + 0, + 0, + 0, + 0, + 25517, + 25518, + 25519, + 0, + 25520, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25521, + 0, + 25522, + 25527, + 25534, + 0, + 25536, + 0, + 25537, + 0, + 0, + 25548, + 25550, + 0, + 0, + 25551, + 0, + 25552, + 0, + 0, + 0, + 0, + 0, + 25554, + 0, + 25555, + 0, + 25556, + 25557, + 25568, + 0, + 0, + 0, + 25570, + 25571, + 0, + 0, + 0, + 0, + 0, + 0, + 25574, + 0, + 0, + 0, + 0, + 25579, + 0, + 0, + 0, + 25581, + 0, + 0, + 0, + 25582, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25588, + 0, + 0, + 0, + 0, + 25589, + 0, + 0, + 0, + 0, + 25590, + 0, + 25591, + 25592, + 25593, + 0, + 25594, + 0, + 0, + 0, + 25596, + 0, + 25597, + 25615, + 0, + 0, + 0, + 0, + 0, + 25618, + 0, + 0, + 0, + 0, + 25619, + 25623, + 0, + 0, + 25629, + 0, + 0, + 25631, + 0, + 0, + 0, + 25635, + 25636, + 0, + 0, + 25649, + 0, + 0, + 0, + 0, + 25654, + 0, + 0, + 0, + 25661, + 25663, + 0, + 0, + 25671, + 0, + 0, + 25678, + 25698, + 0, + 25699, + 25702, + 25703, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25704, + 0, + 0, + 0, + 0, + 0, + 25706, + 0, + 0, + 25710, + 0, + 25711, + 0, + 25712, + 0, + 25715, + 25716, + 25717, + 0, + 0, + 25718, + 25728, + 25732, + 0, + 0, + 0, + 25734, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25737, + 0, + 0, + 25739, + 0, + 0, + 0, + 25740, + 0, + 25741, + 25745, + 0, + 25746, + 0, + 25748, + 25772, + 25778, + 0, + 0, + 0, + 0, + 0, + 25780, + 0, + 0, + 0, + 0, + 25781, + 0, + 25782, + 25784, + 25785, + 0, + 0, + 0, + 25789, + 0, + 0, + 0, + 0, + 0, + 0, + 25797, + 25801, + 0, + 0, + 0, + 25808, + 25809, + 0, + 0, + 25811, + 25814, + 25815, + 0, + 0, + 25817, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25820, + 0, + 0, + 0, + 0, + 25832, + 25833, + 0, + 0, + 0, + 25846, + 0, + 0, + 0, + 25847, + 25848, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25849, + 25850, + 0, + 0, + 25851, + 0, + 0, + 25852, + 0, + 25862, + 0, + 0, + 0, + 25863, + 25865, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25867, + 25868, + 0, + 25869, + 25874, + 0, + 25875, + 0, + 25876, + 25877, + 0, + 0, + 0, + 0, + 25878, + 25902, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25903, + 25904, + 25905, + 0, + 0, + 0, + 25908, + 25909, + 0, + 0, + 0, + 0, + 25910, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25912, + 0, + 25913, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25914, + 0, + 0, + 25916, + 0, + 0, + 0, + 0, + 0, + 25917, + 25927, + 0, + 0, + 0, + 0, + 25928, + 0, + 0, + 25930, + 0, + 0, + 0, + 25933, + 0, + 0, + 25938, + 25942, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25945, + 0, + 25950, + 0, + 25956, + 0, + 0, + 25961, + 25962, + 0, + 0, + 25963, + 0, + 25964, + 25965, + 25966, + 0, + 0, + 0, + 0, + 0, + 25967, + 0, + 0, + 0, + 0, + 25968, + 0, + 0, + 0, + 25969, + 25971, + 0, + 0, + 0, + 0, + 0, + 25973, + 25975, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25978, + 0, + 25981, + 0, + 0, + 0, + 25982, + 0, + 0, + 0, + 25984, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25993, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26002, + 0, + 0, + 0, + 26005, + 0, + 0, + 0, + 26006, + 26007, + 0, + 0, + 26014, + 26015, + 26016, + 0, + 0, + 0, + 0, + 0, + 0, + 26017, + 26018, + 26020, + 0, + 26022, + 26023, + 0, + 0, + 0, + 26024, + 26028, + 0, + 26029, + 26033, + 26034, + 26044, + 0, + 0, + 0, + 0, + 0, + 26046, + 0, + 0, + 26047, + 0, + 0, + 26049, + 0, + 26050, + 0, + 26051, + 0, + 0, + 0, + 0, + 0, + 26053, + 0, + 0, + 0, + 0, + 26054, + 26059, + 0, + 0, + 0, + 0, + 0, + 0, + 26060, + 0, + 26066, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26067, + 0, + 26069, + 0, + 0, + 26071, + 0, + 0, + 0, + 26073, + 0, + 26074, + 26077, + 0, + 0, + 0, + 0, + 26078, + 0, + 0, + 0, + 26079, + 0, + 26090, + 0, + 0, + 26094, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26095, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26096, + 26101, + 0, + 26107, + 26122, + 0, + 26124, + 0, + 0, + 26125, + 0, + 0, + 0, + 0, + 0, + 0, + 26136, + 26141, + 26155, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26164, + 26166, + 0, + 0, + 0, + 26167, + 0, + 26170, + 26171, + 0, + 0, + 26172, + 0, + 0, + 26174, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26175, + 0, + 0, + 0, + 26176, + 26177, + 0, + 26321, + 26322, + 0, + 26323, + 0, + 0, + 26324, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26325, + 0, + 26331, + 0, + 0, + 0, + 0, + 0, + 0, + 26335, + 0, + 0, + 0, + 26350, + 0, + 0, + 0, + 26379, + 0, + 0, + 26382, + 26383, + 26385, + 0, + 0, + 26392, + 26406, + 0, + 0, + 0, + 0, + 26411, + 0, + 0, + 0, + 0, + 0, + 26412, + 0, + 0, + 26420, + 0, + 0, + 26423, + 0, + 26424, + 26426, + 26432, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26435, + 0, + 26436, + 0, + 0, + 0, + 0, + 0, + 26441, + 0, + 26444, + 0, + 0, + 0, + 26446, + 0, + 0, + 0, + 0, + 26447, + 0, + 0, + 0, + 0, + 26449, + 0, + 26450, + 26452, + 0, + 26453, + 26454, + 0, + 0, + 0, + 26455, + 0, + 0, + 0, + 26456, + 0, + 0, + 26458, + 0, + 0, + 26460, + 0, + 26463, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26464, + 26470, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26473, + 0, + 0, + 26474, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26475, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26477, + 0, + 26485, + 0, + 0, + 26486, + 0, + 26487, + 0, + 0, + 26488, + 26493, + 26494, + 0, + 0, + 26495, + 0, + 26497, + 26504, + 26506, + 0, + 0, + 0, + 0, + 0, + 26507, + 0, + 0, + 0, + 0, + 0, + 26509, + 0, + 0, + 26510, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26512, + 0, + 26513, + 26515, + 0, + 0, + 0, + 26518, + 0, + 0, + 0, + 26519, + 0, + 26524, + 26526, + 0, + 0, + 0, + 26527, + 0, + 26532, + 0, + 26533, + 26537, + 26558, + 0, + 0, + 0, + 26559, + 0, + 0, + 0, + 26571, + 0, + 0, + 26573, + 0, + 26588, + 0, + 26593, + 0, + 0, + 0, + 0, + 0, + 0, + 26603, + 0, + 26604, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26606, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26607, + 26609, + 26611, + 26614, + 0, + 0, + 0, + 26616, + 26620, + 0, + 26621, + 0, + 0, + 0, + 0, + 0, + 26627, + 0, + 26629, + 0, + 0, + 26630, + 0, + 0, + 26632, + 26643, + 0, + 0, + 0, + 26644, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26646, + 26647, + 0, + 0, + 0, + 26650, + 0, + 0, + 26656, + 0, + 0, + 0, + 0, + 26663, + 26670, + 26671, + 0, + 0, + 0, + 26685, + 26686, + 26687, + 0, + 26689, + 0, + 0, + 0, + 0, + 26744, + 0, + 26745, + 0, + 26747, + 26748, + 0, + 26749, + 26750, + 26751, + 0, + 0, + 0, + 0, + 26752, + 26755, + 0, + 0, + 0, + 26756, + 26769, + 0, + 0, + 0, + 26774, + 0, + 0, + 0, + 0, + 0, + 26775, + 0, + 26777, + 26778, + 0, + 26786, + 0, + 0, + 0, + 26787, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26788, + 0, + 0, + 26789, + 0, + 0, + 0, + 0, + 0, + 26791, + 0, + 26792, + 26793, + 0, + 0, + 0, + 26794, + 0, + 26797, + 26798, + 0, + 0, + 0, + 26800, + 0, + 0, + 26803, + 0, + 26804, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26805, + 0, + 0, + 26808, + 0, + 0, + 26809, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26812, + 0, + 26825, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26826, + 0, + 0, + 26827, + 26829, + 26834, + 0, + 0, + 0, + 0, + 26835, + 0, + 0, + 26849, + 0, + 26851, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26852, + 0, + 26853, + 26857, + 0, + 26858, + 0, + 26859, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26876, + 0, + 26878, + 26882, + 26883, + 0, + 0, + 0, + 0, + 26890, + 26894, + 0, + 0, + 0, + 0, + 26895, + 26896, + 0, + 0, + 0, + 0, + 0, + 26900, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26911, + 26913, + 26914, + 26915, + 26916, + 26919, + 0, + 0, + 0, + 26921, + 26922, + 0, + 0, + 26925, + 0, + 0, + 0, + 26928, + 0, + 0, + 26929, + 26930, + 0, + 0, + 0, + 26931, + 0, + 26932, + 0, + 0, + 0, + 0, + 0, + 26933, + 0, + 0, + 0, + 0, + 0, + 0, + 26937, + 0, + 0, + 26943, + 0, + 0, + 26944, + 0, + 0, + 0, + 26946, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26956, + 0, + 26958, + 0, + 0, + 26963, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26965, + 0, + 26969, + 26970, + 26972, + 0, + 0, + 0, + 0, + 0, + 26973, + 0, + 26974, + 0, + 26978, + 0, + 26980, + 0, + 0, + 0, + 0, + 0, + 0, + 26982, + 0, + 26986, + 26987, + 0, + 26990, + 0, + 0, + 0, + 0, + 27003, + 27006, + 0, + 0, + 27007, + 27010, + 27012, + 27013, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27014, + 27015, + 27018, + 0, + 27019, + 0, + 0, + 0, + 0, + 0, + 27025, + 0, + 0, + 0, + 27026, + 0, + 0, + 0, + 0, + 27029, + 27030, + 27031, + 27034, + 0, + 0, + 27036, + 27037, + 0, + 0, + 0, + 27038, + 27042, + 0, + 0, + 0, + 27044, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27045, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27046, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27047, + 27049, + 0, + 27050, + 0, + 0, + 0, + 27051, + 27052, + 0, + 27055, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27056, + 27058, + 27059, + 0, + 27061, + 0, + 27064, + 0, + 0, + 0, + 0, + 0, + 27069, + 0, + 0, + 27070, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27072, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27076, + 0, + 0, + 0, + 0, + 0, + 27078, + 0, + 27079, + 0, + 0, + 0, + 27081, + 0, + 0, + 0, + 0, + 0, + 0, + 27082, + 0, + 27083, + 27086, + 0, + 0, + 0, + 0, + 27087, + 0, + 0, + 0, + 0, + 0, + 27088, + 27090, + 0, + 27094, + 0, + 0, + 27095, + 0, + 27099, + 27102, + 0, + 0, + 0, + 27103, + 0, + 0, + 0, + 0, + 27105, + 0, + 0, + 0, + 27106, + 0, + 0, + 0, + 0, + 0, + 0, + 27107, + 0, + 0, + 0, + 0, + 27108, + 27117, + 0, + 0, + 0, + 0, + 27118, + 0, + 0, + 27124, + 0, + 27126, + 0, + 0, + 27130, + 27131, + 0, + 0, + 0, + 0, + 0, + 0, + 27147, + 0, + 0, + 0, + 0, + 27148, + 27149, + 0, + 0, + 0, + 0, + 27150, + 27151, + 0, + 27152, + 0, + 27159, + 0, + 0, + 0, + 27164, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27175, + 0, + 27189, + 0, + 0, + 27191, + 0, + 27193, + 0, + 27195, + 0, + 27198, + 0, + 0, + 0, + 0, + 0, + 27200, + 0, + 0, + 0, + 0, + 27202, + 0, + 0, + 0, + 0, + 27203, + 0, + 0, + 27204, + 0, + 0, + 27206, + 0, + 27207, + 0, + 0, + 0, + 0, + 27209, + 0, + 0, + 0, + 27213, + 0, + 0, + 27216, + 27219, + 27220, + 27222, + 27223, + 0, + 27224, + 0, + 27225, + 27226, + 0, + 0, + 27233, + 0, + 0, + 0, + 0, + 27235, + 0, + 27237, + 0, + 27238, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27239, + 0, + 27242, + 27243, + 0, + 27250, + 0, + 0, + 0, + 27251, + 0, + 27253, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27254, + 27255, + 27258, + 0, + 0, + 0, + 27259, + 0, + 0, + 0, + 0, + 0, + 0, + 27267, + 0, + 27276, + 27278, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27296, + 27297, + 27301, + 0, + 0, + 0, + 0, + 0, + 0, + 27302, + 0, + 0, + 0, + 0, + 0, + 0, + 27312, + 27313, + 0, + 0, + 0, + 0, + 0, + 27318, + 0, + 27320, + 0, + 27329, + 0, + 27330, + 27331, + 0, + 27332, + 0, + 0, + 0, + 0, + 27340, + 0, + 0, + 0, + 27348, + 0, + 0, + 0, + 0, + 0, + 0, + 27350, + 0, + 27351, + 0, + 0, + 0, + 0, + 27355, + 0, + 0, + 27358, + 27359, + 27361, + 0, + 0, + 0, + 27365, + 0, + 27367, + 0, + 27376, + 27378, + 0, + 0, + 27379, + 0, + 0, + 0, + 0, + 0, + 0, + 27396, + 0, + 27397, + 27404, + 0, + 0, + 0, + 0, + 0, + 27408, + 0, + 0, + 0, + 0, + 27453, + 0, + 0, + 0, + 27456, + 0, + 0, + 0, + 27458, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27459, + 0, + 0, + 0, + 27460, + 0, + 0, + 27461, + 0, + 27465, + 27467, + 0, + 0, + 27469, + 0, + 27470, + 0, + 27471, + 0, + 27477, + 27482, + 0, + 0, + 0, + 0, + 0, + 0, + 27484, + 0, + 0, + 0, + 0, + 0, + 0, + 27485, + 0, + 0, + 0, + 0, + 0, + 27493, + 0, + 27494, + 27502, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27511, + 27532, + 0, + 0, + 0, + 27533, + 27545, + 0, + 0, + 0, + 27546, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27547, + 0, + 0, + 27549, + 27550, + 0, + 27551, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27555, + 0, + 0, + 27571, + 0, + 27573, + 27574, + 27575, + 27577, + 0, + 27578, + 0, + 0, + 27579, + 27585, + 0, + 0, + 0, + 0, + 0, + 27586, + 0, + 0, + 27588, + 27589, + 0, + 0, + 0, + 0, + 27596, + 0, + 0, + 27600, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27608, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27610, + 0, + 0, + 0, + 27618, + 0, + 0, + 27620, + 0, + 0, + 0, + 27631, + 0, + 0, + 27632, + 27634, + 0, + 27636, + 27638, + 0, + 0, + 0, + 27643, + 0, + 27644, + 27649, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27651, + 27660, + 0, + 27661, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27662, + 0, + 0, + 27664, + 0, + 27665, + 0, + 0, + 0, + 27669, + 0, + 27671, + 0, + 0, + 0, + 27673, + 27674, + 0, + 0, + 0, + 27682, + 0, + 0, + 0, + 27711, + 0, + 27712, + 27713, + 27719, + 27720, + 0, + 0, + 27728, + 0, + 27729, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27731, + 0, + 0, + 27732, + 0, + 27733, + 0, + 27738, + 0, + 0, + 0, + 27742, + 0, + 0, + 0, + 27743, + 27744, + 0, + 0, + 0, + 0, + 0, + 0, + 27745, + 27746, + 0, + 0, + 0, + 27747, + 27748, + 27751, + 27752, + 0, + 0, + 0, + 27768, + 27770, + 0, + 0, + 0, + 27774, + 27775, + 0, + 27776, + 27777, + 0, + 0, + 27781, + 0, + 27784, + 0, + 27786, + 0, + 0, + 27791, + 0, + 27792, + 27793, + 27804, + 0, + 27812, + 27813, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27814, + 0, + 27825, + 0, + 27827, + 0, + 0, + 0, + 0, + 27828, + 27861, + 27862, + 0, + 0, + 0, + 27864, + 0, + 0, + 0, + 27865, + 27884, + 0, + 27889, + 0, + 0, + 0, + 0, + 0, + 27890, + 0, + 27891, + 0, + 0, + 0, + 27892, + 0, + 0, + 0, + 0, + 0, + 27897, + 27898, + 0, + 0, + 27899, + 0, + 0, + 0, + 27901, + 27905, + 0, + 0, + 27920, + 0, + 0, + 27921, + 0, + 27922, + 0, + 0, + 0, + 27931, + 27934, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27941, + 0, + 27942, + 0, + 27945, + 0, + 27947, + 27954, + 0, + 0, + 0, + 0, + 27960, + 27963, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27964, + 27965, + 0, + 0, + 0, + 27967, + 0, + 27969, + 27975, + 0, + 27976, + 27977, + 0, + 27981, + 0, + 27983, + 28051, + 28052, + 0, + 0, + 0, + 0, + 0, + 28056, + 0, + 0, + 0, + 0, + 0, + 0, + 28058, + 28059, + 0, + 0, + 28061, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28063, + 0, + 0, + 0, + 0, + 0, + 0, + 28066, + 0, + 0, + 0, + 0, + 0, + 0, + 28069, + 28070, + 28072, + 0, + 28073, + 0, + 0, + 28074, + 0, + 0, + 0, + 0, + 28075, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28078, + 0, + 0, + 0, + 0, + 28085, + 0, + 0, + 0, + 0, + 28086, + 0, + 0, + 0, + 0, + 0, + 0, + 28088, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28090, + 0, + 28097, + 28114, + 28115, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28116, + 0, + 0, + 0, + 0, + 0, + 28118, + 0, + 28129, + 0, + 28131, + 0, + 0, + 28135, + 0, + 0, + 0, + 28140, + 28141, + 0, + 0, + 0, + 28146, + 0, + 0, + 0, + 0, + 28152, + 0, + 0, + 0, + 0, + 28155, + 28157, + 28161, + 0, + 0, + 0, + 0, + 28166, + 0, + 28167, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28172, + 0, + 0, + 0, + 0, + 0, + 0, + 28173, + 0, + 0, + 28175, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28178, + 28188, + 0, + 28190, + 0, + 0, + 0, + 0, + 0, + 28191, + 0, + 28193, + 28206, + 0, + 0, + 28207, + 28209, + 0, + 28211, + 0, + 28213, + 0, + 0, + 0, + 28215, + 28216, + 28217, + 0, + 28222, + 0, + 28223, + 28225, + 0, + 0, + 0, + 28226, + 0, + 28227, + 28229, + 28232, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28235, + 0, + 28241, + 0, + 0, + 28242, + 0, + 0, + 0, + 0, + 28243, + 0, + 0, + 0, + 28245, + 0, + 0, + 0, + 28248, + 28250, + 0, + 28251, + 28252, + 0, + 0, + 0, + 0, + 0, + 0, + 28253, + 0, + 0, + 28254, + 28255, + 0, + 0, + 28256, + 0, + 0, + 28258, + 0, + 0, + 0, + 0, + 0, + 28259, + 0, + 0, + 28260, + 0, + 0, + 28261, + 0, + 0, + 0, + 0, + 28262, + 28263, + 0, + 0, + 28264, + 0, + 0, + 0, + 28266, + 0, + 28268, + 28269, + 0, + 28270, + 28272, + 28274, + 0, + 28277, + 28278, + 0, + 0, + 0, + 28279, + 0, + 28280, + 28281, + 28283, + 0, + 28292, + 0, + 28294, + 0, + 28297, + 0, + 0, + 0, + 0, + 28299, + 0, + 0, + 0, + 0, + 0, + 28300, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28301, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28302, + 28303, + 0, + 0, + 0, + 0, + 28304, + 0, + 0, + 28305, + 0, + 28312, + 0, + 28313, + 28314, + 0, + 0, + 0, + 0, + 0, + 0, + 28315, + 0, + 0, + 0, + 28320, + 28321, + 0, + 0, + 28328, + 0, + 0, + 0, + 28329, + 28338, + 0, + 28339, + 0, + 0, + 28344, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28347, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28348, + 0, + 0, + 0, + 0, + 0, + 28411, + 0, + 28412, + 28413, + 0, + 28416, + 0, + 0, + 0, + 28420, + 0, + 0, + 0, + 0, + 0, + 28421, + 0, + 0, + 0, + 0, + 28423, + 0, + 0, + 0, + 28424, + 0, + 0, + 28428, + 0, + 0, + 0, + 0, + 0, + 28429, + 0, + 0, + 0, + 28431, + 28434, + 0, + 28458, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28464, + 0, + 0, + 0, + 0, + 28465, + 0, + 28467, + 0, + 0, + 0, + 0, + 0, + 0, + 28471, + 0, + 0, + 0, + 0, + 28474, + 0, + 28480, + 0, + 28481, + 0, + 0, + 28485, + 0, + 0, + 0, + 0, + 28486, + 28488, + 0, + 0, + 28489, + 0, + 0, + 0, + 0, + 28492, + 0, + 0, + 0, + 28495, + 0, + 28497, + 0, + 28499, + 0, + 0, + 0, + 0, + 28500, + 0, + 0, + 28502, + 28503, + 0, + 0, + 0, + 28508, + 0, + 0, + 0, + 28510, + 0, + 0, + 28512, + 28513, + 28514, + 28521, + 0, + 28526, + 0, + 28527, + 28528, + 0, + 0, + 0, + 0, + 28529, + 0, + 0, + 28532, + 0, + 0, + 28537, + 28538, + 0, + 0, + 0, + 28539, + 0, + 28548, + 0, + 28553, + 28554, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28560, + 28563, + 0, + 0, + 28564, + 0, + 0, + 0, + 0, + 28565, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28566, + 28568, + 0, + 0, + 0, + 0, + 0, + 0, + 28569, + 0, + 0, + 0, + 28570, + 0, + 28572, + 28573, + 0, + 0, + 0, + 0, + 28575, + 0, + 0, + 0, + 0, + 28576, + 28581, + 28588, + 0, + 0, + 28589, + 0, + 0, + 0, + 28590, + 28595, + 0, + 28598, + 0, + 0, + 28601, + 0, + 0, + 28605, + 0, + 0, + 0, + 0, + 28614, + 28615, + 28619, + 0, + 0, + 0, + 0, + 0, + 0, + 28620, + 0, + 28626, + 0, + 0, + 28628, + 0, + 28631, + 0, + 28632, + 0, + 0, + 0, + 0, + 0, + 0, + 28635, + 0, + 0, + 0, + 28637, + 28638, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28639, + 0, + 28643, + 0, + 0, + 28652, + 0, + 0, + 0, + 28662, + 0, + 28670, + 28671, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28672, + 28673, + 28675, + 28676, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28691, + 0, + 0, + 0, + 28695, + 0, + 0, + 0, + 28696, + 0, + 28697, + 28698, + 0, + 28705, + 0, + 28707, + 28708, + 28710, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28711, + 28728, + 0, + 0, + 0, + 28736, + 0, + 0, + 0, + 28737, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28738, + 0, + 28739, + 0, + 28741, + 0, + 0, + 28742, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28745, + 0, + 0, + 0, + 0, + 0, + 0, + 28749, + 28750, + 28752, + 28754, + 28756, + 0, + 28757, + 0, + 0, + 0, + 0, + 28759, + 28760, + 0, + 0, + 0, + 0, + 0, + 0, + 28762, + 0, + 0, + 0, + 28764, + 0, + 0, + 0, + 0, + 0, + 0, + 28766, + 0, + 28767, + 28768, + 0, + 0, + 0, + 0, + 28769, + 28770, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28771, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28772, + 0, + 28773, + 0, + 28782, + 0, + 0, + 0, + 0, + 0, + 0, + 28784, + 0, + 28785, + 0, + 28786, + 0, + 0, + 0, + 28787, + 0, + 0, + 0, + 28797, + 0, + 0, + 0, + 0, + 0, + 0, + 28799, + 0, + 0, + 28801, + 0, + 0, + 0, + 0, + 28802, + 0, + 28805, + 0, + 0, + 28806, + 0, + 0, + 28807, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28808, + 0, + 0, + 0, + 0, + 0, + 28810, + 28812, + 0, + 0, + 28816, + 28819, + 0, + 0, + 28821, + 0, + 28826, + 0, + 0, + 0, + 28842, + 28852, + 0, + 0, + 28853, + 0, + 28854, + 28855, + 0, + 0, + 0, + 28857, + 0, + 0, + 0, + 28858, + 0, + 28867, + 28868, + 28869, + 0, + 0, + 0, + 28874, + 28880, + 28882, + 28890, + 28892, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28895, + 0, + 0, + 0, + 28898, + 28899, + 0, + 0, + 0, + 28900, + 0, + 0, + 28904, + 0, + 28906, + 0, + 0, + 0, + 0, + 28907, + 0, + 0, + 0, + 0, + 0, + 0, + 28908, + 0, + 0, + 0, + 28910, + 0, + 28914, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28915, + 28916, + 28919, + 0, + 0, + 28920, + 0, + 28921, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28924, + 0, + 0, + 0, + 0, + 28926, + 28929, + 0, + 0, + 0, + 28930, + 0, + 28936, + 0, + 28939, + 0, + 0, + 0, + 0, + 28942, + 0, + 0, + 0, + 0, + 0, + 0, + 28956, + 0, + 0, + 0, + 28966, + 0, + 0, + 0, + 0, + 28967, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28968, + 0, + 28971, + 0, + 28975, + 28976, + 0, + 28982, + 28983, + 0, + 0, + 28984, + 28989, + 28996, + 28997, + 28998, + 0, + 0, + 0, + 0, + 0, + 0, + 28999, + 0, + 0, + 0, + 0, + 0, + 29000, + 0, + 29001, + 0, + 0, + 0, + 29009, + 0, + 0, + 29011, + 0, + 0, + 29021, + 0, + 0, + 0, + 0, + 29024, + 0, + 29025, + 0, + 0, + 0, + 0, + 0, + 29026, + 0, + 0, + 0, + 29036, + 0, + 0, + 0, + 29037, + 0, + 0, + 0, + 0, + 29038, + 0, + 29045, + 0, + 29047, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29051, + 0, + 0, + 0, + 29054, + 29056, + 29062, + 0, + 29070, + 29082, + 0, + 0, + 0, + 29083, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29084, + 0, + 0, + 0, + 0, + 29085, + 29088, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29090, + 29097, + 0, + 0, + 0, + 29103, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29105, + 0, + 0, + 0, + 0, + 0, + 29107, + 0, + 29109, + 0, + 0, + 0, + 29115, + 0, + 0, + 29120, + 0, + 0, + 29138, + 29140, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29152, + 0, + 29160, + 29174, + 0, + 29176, + 0, + 0, + 29180, + 0, + 29181, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29228, + 0, + 0, + 29229, + 0, + 0, + 29230, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29234, + 0, + 0, + 0, + 29241, + 0, + 29245, + 0, + 29248, + 0, + 29250, + 29256, + 29280, + 0, + 29282, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29285, + 0, + 0, + 29286, + 29291, + 29292, + 0, + 0, + 0, + 0, + 29294, + 0, + 29295, + 0, + 0, + 0, + 0, + 0, + 29296, + 29297, + 29298, + 29300, + 0, + 29302, + 0, + 0, + 29304, + 29307, + 0, + 29312, + 0, + 0, + 0, + 29322, + 0, + 0, + 29323, + 0, + 0, + 29324, + 29326, + 29328, + 0, + 29335, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29338, + 29339, + 0, + 0, + 0, + 0, + 0, + 29341, + 29343, + 0, + 0, + 0, + 0, + 29344, + 0, + 0, + 0, + 0, + 0, + 29345, + 0, + 0, + 0, + 0, + 29346, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29347, + 29348, + 29349, + 0, + 0, + 29354, + 0, + 0, + 29355, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29357, + 0, + 0, + 0, + 0, + 29364, + 0, + 29365, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29366, + 0, + 0, + 29368, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29378, + 0, + 29381, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29386, + 0, + 0, + 0, + 0, + 0, + 0, + 29389, + 0, + 0, + 0, + 29390, + 0, + 0, + 29391, + 29397, + 0, + 29398, + 29412, + 29414, + 29418, + 29419, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29420, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29423, + 0, + 0, + 0, + 29435, + 0, + 0, + 0, + 29437, + 0, + 0, + 29439, + 0, + 29441, + 0, + 0, + 0, + 0, + 29443, + 0, + 29446, + 29450, + 29452, + 0, + 0, + 0, + 0, + 0, + 29456, + 0, + 0, + 0, + 0, + 0, + 29461, + 0, + 0, + 0, + 29464, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29468, + 0, + 29473, + 0, + 0, + 0, + 29486, + 0, + 0, + 0, + 29490, + 0, + 0, + 0, + 29491, + 29492, + 0, + 0, + 29497, + 0, + 0, + 0, + 29498, + 0, + 29499, + 0, + 29502, + 29505, + 0, + 29509, + 0, + 0, + 0, + 29510, + 0, + 0, + 0, + 29512, + 0, + 0, + 0, + 29516, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29518, + 0, + 29519, + 0, + 0, + 0, + 0, + 0, + 29520, + 29521, + 29529, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29530, + 0, + 0, + 29531, + 29538, + 0, + 29540, + 0, + 0, + 0, + 29542, + 0, + 29543, + 29544, + 29547, + 0, + 0, + 29548, + 0, + 0, + 0, + 29549, + 0, + 0, + 0, + 29550, + 0, + 0, + 29552, + 0, + 0, + 0, + 0, + 29558, + 29561, + 0, + 29562, + 29564, + 0, + 0, + 29565, + 0, + 0, + 29566, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29578, + 29584, + 29586, + 29591, + 0, + 0, + 0, + 0, + 29593, + 29594, + 0, + 0, + 29597, + 0, + 0, + 29613, + 0, + 29614, + 0, + 29615, + 0, + 0, + 0, + 0, + 29616, + 29617, + 0, + 0, + 29625, + 0, + 0, + 0, + 29632, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29633, + 0, + 0, + 0, + 0, + 0, + 29634, + 29635, + 29637, + 0, + 29638, + 0, + 29641, + 29643, + 0, + 0, + 0, + 0, + 0, + 0, + 29644, + 0, + 29645, + 0, + 29649, + 0, + 0, + 0, + 29650, + 0, + 29653, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29656, + 29659, + 0, + 0, + 29660, + 0, + 0, + 0, + 29661, + 0, + 0, + 0, + 0, + 0, + 29664, + 0, + 0, + 0, + 29671, + 29673, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29675, + 0, + 29677, + 29679, + 0, + 0, + 29684, + 0, + 0, + 0, + 0, + 0, + 29685, + 0, + 0, + 0, + 29687, + 0, + 0, + 0, + 29688, + 0, + 29689, + 29690, + 29700, + 0, + 29701, + 0, + 0, + 0, + 29702, + 0, + 29706, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29720, + 0, + 29721, + 0, + 29727, + 0, + 29733, + 29734, + 0, + 29750, + 29761, + 0, + 29763, + 0, + 0, + 0, + 0, + 0, + 29764, + 0, + 0, + 29765, + 0, + 0, + 0, + 29771, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29772, + 0, + 0, + 0, + 29773, + 29774, + 29775, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29822, + 0, + 0, + 0, + 29824, + 0, + 29825, + 0, + 0, + 0, + 0, + 0, + 29827, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29829, + 0, + 29832, + 29834, + 0, + 0, + 29835, + 0, + 0, + 29837, + 29838, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29843, + 0, + 0, + 0, + 0, + 29844, + 29845, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29849, + 0, + 0, + 29869, + 29872, + 29890, + 29905, + 0, + 0, + 0, + 0, + 0, + 29907, + 29921, + 0, + 29922, + 0, + 0, + 29923, + 29926, + 29944, + 29946, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29947, + 29948, + 0, + 0, + 0, + 29951, + 0, + 0, + 0, + 0, + 0, + 29953, + 0, + 0, + 29956, + 0, + 29957, + 0, + 0, + 29962, + 0, + 0, + 0, + 0, + 29971, + 0, + 0, + 0, + 29972, + 0, + 0, + 0, + 0, + 0, + 29978, + 0, + 29979, + 29992, + 30007, + 30008, + 30010, + 0, + 0, + 0, + 30013, + 0, + 0, + 0, + 0, + 30014, + 30016, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 30017, + 0, + 0, + 0, + 0, + 0, + 30023, + 30031, + 0, + 0, + 30033, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 30034, + 0, + 30038, + 0, + 30039, + 0, + 30040, + 0, + 0, + 0, + 0, + 0, + 0, + 30067, + 30068, + 0, + 0, + 0, + 30069, + 0, + 30072, + 0, + 0, + 0, + 30073, + 0, + 0, + 0, + 0, + 30075, + 0, + 0, + 0, + 0, + 0, + 0, + 30079, + 0, + 0, + 30080, + 0, + 0, + 0, + 0, + 0, + 30082, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 30084, + 30090, + 0, + 0, + 30091, + 0, + 0, + 0, + 0, + 30098, + 30118, + 0, + 30119, + 0, + 30121, + 30130, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 30131, + 30132, + 30133, + 0, + 0, + 0, + 0, + 0, + 0, + 30135, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 30136, + 0, + 0, + 30137, + 30138, + 0, + 0, + 0, + 30139, + 30146, + 0, + 0, + 0, + 0, + 0, + 30147, + 0, + 0, + 30148, + 30151, + 0, + 0, + 0, + 30168, + 0, + 30172, + 30173, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 30180, + 30181, + 0, + 30192, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 30194, + 30196, + 0, + 0, + 30199, + 0, + 0, + 30202, + 0, + 0, + 0, + 0, + 30203, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 30213, + 0, + 0, + 0, + 30216, + 0, + 0, + 30217, + 0, + 0, + 0, + 30218, + 0, + 0, + 0, + 0, + 30219, + 0, + 30220, + 0, + 30222, + 30227, + 0, + 0, + 0, + 0, + 0, + 30231, + 0, + 0, + 30233, + 30235, + 0, + 0, + 0, + 0, + 30238, + 0, + 30240, + 30243, + 30245, + 0, + 30250, + 30252, + 0, + 0, + 0, + 30269, + 0, + 0, + 30271, + 30272, + 0, + 0, + 0, + 30278, + 30280, + 0, + 0, + 30282, + 0, + 30284, + 0, + 30294, + 0, + 0, + 0, + 0, + 30295, + 30296, + 0, + 0, + 0, + 0, + 0, + 30298, + 30299, + 30302, + 30304, + 30306, + 0, + 0, + 0, + 0, + 0, + 0, + 30316, + 30317, + 0, + 0, + 0, + 30318, + 0, + 0, + 0, + 30319, + 0, + 30320, + 30322, + 30326, + 0, + 0, + 0, + 0, + 0, + 30327, + 0, + 30332, + 30348, + 30349, + 0, + 0, + 30356, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 30357, + 0, + 30358, + 0, + 30359, + 30360, + 0, + 0, + 30365, + 30366, + 30378, + 0, + 0, + 0, + 0, + 30379, + 0, + 0, + 30381, + 0, + 30385, + 0, + 30388, + 30397, + 0, + 0, + 0, + 30401, + 0, + 0, + 0, + 0, + 30403, + 0, + 0, + 0, + 0, + 0, + 30404, + 0, + 0, + 30405, + 0, + 30406, + 30408, + 0, + 30409, + 0, + 30410, + 0, + 0, + 0, + 30417, + 0, + 0, + 30418, + 30419, + 0, + 30420, + 0, + 30424, + 0, + 0, + 0, + 30427, + 30430, + 30432, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 30433, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 30436, + 0, + 30437, + 30438, + 0, + 30441, + 30442, + 0, + 0, + 0, + 30445, + 0, + 0, + 0, + 0, + 30452, + 30456, + 30457, + 0, + 0, + 0, + 30458, + 0, + 30464, + 0, + 0, + 0, + 0, + 0, + 0, + 30467, + 0, + 30469, + 0, + 0, + 0, + 0, + 0, + 30477, + 0, + 0, + 30484, + 0, + 0, + 0, + 0, + 0, + 30485, + 0, + 0, + 0, + 0, + 0, + 30486, + 30487, + 30497, + 30498, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 30505, + 0, + 30508, + 0, + 0, + 0, + 30509, + 30510, + 0, + 30514, + 30516, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 30523, + 0, + 30524, + 0, + 30525, + 0, + 0, + 0, + 0, + 30537, + 0, + 0, + 30538, + 0, + 0, + 0, + 0, + 0, + 30553, + 0, + 0, + 30555, + 30556, + 30558, + 30559, + 30560, + 0, + 0, + 30561, + 0, + 30562, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 30563, + 30570, + 30571, + 0, + 30586, + 30587, + 0, + 0, + 30590, + 0, + 0, + 30594, + 0, + 0, + 0, + 0, + 30611, + 30612, + 30623, + 30634, + 0, + 0, + 30636, + 30640, + 30655, + 30656, + 0, + 30657, + 0, + 0, + 30658, + 30669, + 0, + 30670, + 0, + 30676, + 30678, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 30679, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 30695, + 0, + 0, + 30698, + 0, + 0, + 0, + 0, + 30700, + 0, + 0, + 0, + 0, + 30701, + 0, + 30702, + 30703, + 0, + 0, + 0, + 0, + 30707, + 0, + 0, + 0, + 30709, + 0, + 0, + 30710, + 30719, + 30729, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 30731, + 0, + 0, + 30733, + 0, + 0, + 0, + 30734, + 0, + 0, + 0, + 0, + 0, + 30736, + 30737, + 0, + 0, + 0, + 30740, + 0, + 0, + 0, + 30743, + 0, + 30746, + 0, + 30747, + 30748, + 0, + 0, + 30751, + 30752, + 30753, + 0, + 0, + 0, + 30754, + 0, + 0, + 30760, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 30763, + 0, + 30764, + 0, + 0, + 30766, + 0, + 30769, + 30770, + 30771, + 30774, + 30777, + 0, + 0, + 30779, + 30780, + 30781, + 0, + 0, + 0, + 0, + 30790, + 0, + 0, + 0, + 30792, + 0, + 0, + 0, + 0, + 30810, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 30812, + 30819, + 0, + 0, + 30823, + 30824, + 0, + 30825, + 0, + 30827, + 0, + 0, + 0, + 0, + 0, + 0, + 30828, + 0, + 0, + 30830, + 0, + 0, + 0, + 30834, + 0, + 30835, + 0, + 30837, + 30838, + 0, + 30845, + 0, + 0, + 0, + 0, + 0, + 30846, + 30847, + 0, + 0, + 30849, + 0, + 30851, + 0, + 0, + 0, + 0, + 0, + 30852, + 30858, + 0, + 0, + 30859, + 0, + 30865, + 0, + 0, + 30866, + 0, + 0, + 30868, + 0, + 0, + 30869, + 0, + 0, + 0, + 30881, + 30883, + 0, + 0, + 0, + 0, + 0, + 30889, + 0, + 30891, + 0, + 0, + 0, + 0, + 30894, + 0, + 30895, + 0, + 30897, + 0, + 30898, + 0, + 0, + 0, + 30904, + 30906, + 0, + 30909, + 0, + 0, + 0, + 0, + 0, + 0, + 30910, + 0, + 0, + 0, + 30915, + 30933, + 30942, + 0, + 0, + 0, + 0, + 30943, + 0, + 0, + 30945, + 0, + 0, + 0, + 0, + 0, + 0, + 30946, + 0, + 0, + 30947, + 0, + 0, + 30955, + 30956, + 0, + 0, + 30960, + 0, + 0, + 30961, + 30962, + 30966, + 0, + 0, + 30969, + 30974, + 0, + 0, + 0, + 30976, + 0, + 0, + 30977, + 0, + 30978, + 30982, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 30994, + 30995, + 30998, + 0, + 31000, + 0, + 0, + 31001, + 0, + 0, + 31003, + 31005, + 0, + 0, + 31006, + 31011, + 0, + 0, + 31014, + 0, + 31016, + 0, + 0, + 0, + 0, + 31018, + 0, + 0, + 31020, + 31023, + 31024, + 31025, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31027, + 31028, + 31029, + 0, + 0, + 0, + 0, + 0, + 0, + 31032, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31036, + 31037, + 31038, + 0, + 0, + 0, + 31041, + 31043, + 31045, + 0, + 31047, + 0, + 0, + 0, + 31048, + 0, + 31049, + 0, + 0, + 0, + 31053, + 31054, + 31055, + 0, + 0, + 31063, + 0, + 0, + 0, + 0, + 0, + 31066, + 0, + 31068, + 31071, + 0, + 0, + 0, + 31072, + 31073, + 0, + 0, + 0, + 0, + 31075, + 0, + 0, + 31076, + 0, + 0, + 0, + 31077, + 31079, + 0, + 31080, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31087, + 0, + 31142, + 0, + 31144, + 0, + 0, + 31145, + 31146, + 31147, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31149, + 0, + 31151, + 31152, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31162, + 31171, + 31174, + 31175, + 0, + 0, + 0, + 31176, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31179, + 0, + 0, + 0, + 31186, + 0, + 0, + 0, + 31192, + 31195, + 0, + 0, + 31196, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31198, + 0, + 0, + 0, + 0, + 0, + 31199, + 0, + 0, + 0, + 31205, + 0, + 0, + 0, + 0, + 31211, + 31215, + 0, + 0, + 0, + 0, + 31231, + 0, + 31232, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31233, + 31236, + 31253, + 0, + 31254, + 0, + 0, + 0, + 0, + 0, + 0, + 31255, + 0, + 0, + 31257, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31258, + 31259, + 0, + 0, + 31260, + 0, + 31261, + 0, + 0, + 0, + 0, + 0, + 31262, + 31263, + 0, + 0, + 31264, + 0, + 31266, + 0, + 31267, + 0, + 0, + 0, + 0, + 0, + 31281, + 0, + 31282, + 0, + 31284, + 0, + 0, + 31285, + 31287, + 31288, + 0, + 0, + 31290, + 0, + 0, + 0, + 31292, + 31295, + 0, + 31299, + 0, + 31300, + 0, + 0, + 0, + 0, + 0, + 31302, + 0, + 0, + 0, + 0, + 31303, + 0, + 0, + 0, + 0, + 0, + 0, + 31304, + 0, + 0, + 0, + 0, + 0, + 31305, + 31308, + 31309, + 31315, + 0, + 31317, + 0, + 0, + 0, + 0, + 0, + 31323, + 0, + 31324, + 0, + 0, + 0, + 0, + 0, + 31325, + 31327, + 0, + 0, + 31331, + 0, + 0, + 0, + 0, + 0, + 31333, + 0, + 0, + 0, + 0, + 0, + 31336, + 0, + 0, + 31337, + 0, + 0, + 0, + 0, + 0, + 0, + 31338, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31339, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31342, + 0, + 0, + 0, + 0, + 31345, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31347, + 0, + 0, + 0, + 0, + 0, + 0, + 31348, + 0, + 0, + 31350, + 31351, + 0, + 31352, + 0, + 0, + 31354, + 0, + 0, + 0, + 0, + 31355, + 0, + 0, + 31356, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31363, + 0, + 31372, + 0, + 0, + 31373, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31376, + 0, + 31388, + 0, + 31389, + 0, + 31392, + 0, + 31401, + 0, + 31405, + 31407, + 31408, + 0, + 31409, + 0, + 0, + 0, + 0, + 0, + 0, + 31413, + 31415, + 0, + 0, + 0, + 31416, + 31418, + 0, + 0, + 0, + 0, + 0, + 0, + 31422, + 31423, + 0, + 0, + 31424, + 0, + 31425, + 31432, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31433, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31434, + 0, + 0, + 0, + 0, + 0, + 0, + 31435, + 0, + 0, + 0, + 0, + 31438, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31442, + 0, + 31444, + 0, + 31448, + 0, + 0, + 31451, + 0, + 0, + 0, + 0, + 31452, + 0, + 31461, + 31465, + 0, + 0, + 31466, + 0, + 0, + 31467, + 0, + 0, + 31468, + 0, + 0, + 0, + 31469, + 31473, + 0, + 31476, + 0, + 0, + 0, + 0, + 31489, + 31490, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31492, + 31493, + 31494, + 0, + 0, + 0, + 0, + 31501, + 31504, + 31505, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31509, + 0, + 0, + 0, + 0, + 31510, + 0, + 0, + 31511, + 0, + 0, + 31513, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31514, + 0, + 31522, + 31536, + 31539, + 31540, + 0, + 31541, + 0, + 0, + 0, + 0, + 0, + 0, + 31546, + 31553, + 31559, + 0, + 0, + 0, + 31560, + 31561, + 31562, + 0, + 0, + 31564, + 31567, + 0, + 31569, + 0, + 0, + 0, + 31570, + 0, + 0, + 0, + 0, + 31571, + 0, + 0, + 0, + 0, + 0, + 0, + 31572, + 31574, + 31580, + 31581, + 0, + 0, + 31582, + 31584, + 31585, + 31586, + 31595, + 0, + 31596, + 0, + 0, + 0, + 0, + 31597, + 0, + 31599, + 0, + 31600, + 31601, + 0, + 0, + 31603, + 31604, + 0, + 0, + 31608, + 31610, + 0, + 0, + 0, + 31611, + 0, + 31615, + 0, + 0, + 0, + 0, + 31616, + 0, + 0, + 0, + 0, + 0, + 0, + 31617, + 0, + 0, + 0, + 0, + 0, + 31618, + 0, + 0, + 0, + 0, + 0, + 0, + 31621, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31622, + 31625, + 0, + 0, + 0, + 0, + 31627, + 0, + 31641, + 0, + 0, + 31642, + 0, + 0, + 31643, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31644, + 0, + 31646, + 0, + 0, + 0, + 0, + 31648, + 0, + 0, + 0, + 31652, + 0, + 0, + 0, + 31657, + 0, + 0, + 31676, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31689, + 31691, + 31692, + 0, + 31694, + 0, + 0, + 0, + 31696, + 0, + 31702, + 0, + 31703, + 0, +} + +var kStaticDictionaryWords = [31705]dictWord{ + dictWord{0, 0, 0}, + dictWord{8, 0, 1002}, + dictWord{136, 0, 1015}, + dictWord{4, 0, 683}, + dictWord{4, 10, 325}, + dictWord{138, 10, 125}, + dictWord{7, 11, 572}, + dictWord{ + 9, + 11, + 592, + }, + dictWord{11, 11, 680}, + dictWord{11, 11, 842}, + dictWord{11, 11, 924}, + dictWord{12, 11, 356}, + dictWord{12, 11, 550}, + dictWord{13, 11, 317}, + dictWord{13, 11, 370}, + dictWord{13, 11, 469}, + dictWord{13, 11, 471}, + dictWord{14, 11, 397}, + dictWord{18, 11, 69}, + dictWord{146, 11, 145}, + dictWord{ + 134, + 0, + 1265, + }, + dictWord{136, 11, 534}, + dictWord{134, 0, 1431}, + dictWord{11, 0, 138}, + dictWord{140, 0, 40}, + dictWord{4, 0, 155}, + dictWord{7, 0, 1689}, + dictWord{ + 4, + 10, + 718, + }, + dictWord{135, 10, 1216}, + dictWord{4, 0, 245}, + dictWord{5, 0, 151}, + dictWord{5, 0, 741}, + dictWord{6, 0, 1147}, + dictWord{7, 0, 498}, + dictWord{7, 0, 870}, + dictWord{7, 0, 1542}, + dictWord{12, 0, 213}, + dictWord{14, 0, 36}, + dictWord{14, 0, 391}, + dictWord{17, 0, 111}, + dictWord{18, 0, 6}, + dictWord{18, 0, 46}, + dictWord{ + 18, + 0, + 151, + }, + dictWord{19, 0, 36}, + dictWord{20, 0, 32}, + dictWord{20, 0, 56}, + dictWord{20, 0, 69}, + dictWord{20, 0, 102}, + dictWord{21, 0, 4}, + dictWord{22, 0, 8}, + dictWord{ + 22, + 0, + 10, + }, + dictWord{22, 0, 14}, + dictWord{150, 0, 31}, + dictWord{4, 0, 624}, + dictWord{135, 0, 1752}, + dictWord{5, 10, 124}, + dictWord{5, 10, 144}, + dictWord{6, 10, 548}, + dictWord{7, 10, 15}, + dictWord{7, 10, 153}, + dictWord{137, 10, 629}, + dictWord{6, 0, 503}, + dictWord{9, 0, 586}, + dictWord{13, 0, 468}, + dictWord{14, 0, 66}, + dictWord{ + 16, + 0, + 58, + }, + dictWord{7, 10, 1531}, + dictWord{8, 10, 416}, + dictWord{9, 10, 275}, + dictWord{10, 10, 100}, + dictWord{11, 10, 658}, + dictWord{11, 10, 979}, + dictWord{ + 12, + 10, + 86, + }, + dictWord{14, 10, 207}, + dictWord{15, 10, 20}, + dictWord{143, 10, 25}, + dictWord{5, 0, 603}, + dictWord{7, 0, 1212}, + dictWord{9, 0, 565}, + dictWord{ + 14, + 0, + 301, + }, + dictWord{5, 10, 915}, + dictWord{6, 10, 1783}, + dictWord{7, 10, 211}, + dictWord{7, 10, 1353}, + dictWord{9, 10, 83}, + dictWord{10, 10, 376}, + dictWord{ + 10, + 10, + 431, + }, + dictWord{11, 10, 543}, + dictWord{12, 10, 664}, + dictWord{13, 10, 280}, + dictWord{13, 10, 428}, + dictWord{14, 10, 128}, + dictWord{17, 10, 52}, + dictWord{ + 145, + 10, + 81, + }, + dictWord{4, 0, 492}, + dictWord{133, 0, 451}, + dictWord{135, 0, 835}, + dictWord{141, 0, 70}, + dictWord{132, 0, 539}, + dictWord{7, 11, 748}, + dictWord{ + 139, + 11, + 700, + }, + dictWord{7, 11, 1517}, + dictWord{11, 11, 597}, + dictWord{14, 11, 76}, + dictWord{14, 11, 335}, + dictWord{148, 11, 33}, + dictWord{6, 0, 113}, + dictWord{135, 0, 436}, + dictWord{4, 10, 338}, + dictWord{133, 10, 400}, + dictWord{136, 0, 718}, + dictWord{133, 11, 127}, + dictWord{133, 11, 418}, + dictWord{ + 6, + 0, + 1505, + }, + dictWord{7, 0, 520}, + dictWord{6, 11, 198}, + dictWord{11, 10, 892}, + dictWord{140, 11, 83}, + dictWord{4, 10, 221}, + dictWord{5, 10, 659}, + dictWord{ + 5, + 10, + 989, + }, + dictWord{7, 10, 697}, + dictWord{7, 10, 1211}, + dictWord{138, 10, 284}, + dictWord{135, 0, 1070}, + dictWord{5, 11, 276}, + dictWord{6, 11, 55}, + dictWord{ + 135, + 11, + 1369, + }, + dictWord{134, 0, 1515}, + dictWord{6, 11, 1752}, + dictWord{136, 11, 726}, + dictWord{138, 10, 507}, + dictWord{15, 0, 78}, + dictWord{4, 10, 188}, + dictWord{135, 10, 805}, + dictWord{5, 10, 884}, + dictWord{139, 10, 991}, + dictWord{133, 11, 764}, + dictWord{134, 10, 1653}, + dictWord{6, 11, 309}, + dictWord{ + 7, + 11, + 331, + }, + dictWord{138, 11, 550}, + dictWord{135, 11, 1861}, + dictWord{132, 11, 348}, + dictWord{135, 11, 986}, + dictWord{135, 11, 1573}, + dictWord{ + 12, + 0, + 610, + }, + dictWord{13, 0, 431}, + dictWord{144, 0, 59}, + dictWord{9, 11, 799}, + dictWord{140, 10, 166}, + dictWord{134, 0, 1530}, + dictWord{132, 0, 750}, + dictWord{132, 0, 307}, + dictWord{133, 0, 964}, + dictWord{6, 11, 194}, + dictWord{7, 11, 133}, + dictWord{10, 11, 493}, + dictWord{10, 11, 570}, + dictWord{139, 11, 664}, + dictWord{5, 11, 24}, + dictWord{5, 11, 569}, + dictWord{6, 11, 3}, + dictWord{6, 11, 119}, + dictWord{6, 11, 143}, + dictWord{6, 11, 440}, + dictWord{7, 11, 295}, + dictWord{ + 7, + 11, + 599, + }, + dictWord{7, 11, 1686}, + dictWord{7, 11, 1854}, + dictWord{8, 11, 424}, + dictWord{9, 11, 43}, + dictWord{9, 11, 584}, + dictWord{9, 11, 760}, + dictWord{ + 10, + 11, + 148, + }, + dictWord{10, 11, 328}, + dictWord{11, 11, 159}, + dictWord{11, 11, 253}, + dictWord{11, 11, 506}, + dictWord{12, 11, 487}, + dictWord{12, 11, 531}, + dictWord{144, 11, 33}, + dictWord{136, 10, 760}, + dictWord{5, 11, 14}, + dictWord{5, 11, 892}, + dictWord{6, 11, 283}, + dictWord{7, 11, 234}, + dictWord{136, 11, 537}, + dictWord{135, 11, 1251}, + dictWord{4, 11, 126}, + dictWord{8, 11, 635}, + dictWord{147, 11, 34}, + dictWord{4, 11, 316}, + dictWord{135, 11, 1561}, + dictWord{ + 6, + 0, + 999, + }, + dictWord{6, 0, 1310}, + dictWord{137, 11, 861}, + dictWord{4, 11, 64}, + dictWord{5, 11, 352}, + dictWord{5, 11, 720}, + dictWord{6, 11, 368}, + dictWord{ + 139, + 11, + 359, + }, + dictWord{4, 0, 75}, + dictWord{5, 0, 180}, + dictWord{6, 0, 500}, + dictWord{7, 0, 58}, + dictWord{7, 0, 710}, + dictWord{10, 0, 645}, + dictWord{136, 10, 770}, + dictWord{133, 0, 649}, + dictWord{6, 0, 276}, + dictWord{7, 0, 282}, + dictWord{7, 0, 879}, + dictWord{7, 0, 924}, + dictWord{8, 0, 459}, + dictWord{9, 0, 599}, + dictWord{9, 0, 754}, + dictWord{11, 0, 574}, + dictWord{12, 0, 128}, + dictWord{12, 0, 494}, + dictWord{13, 0, 52}, + dictWord{13, 0, 301}, + dictWord{15, 0, 30}, + dictWord{143, 0, 132}, + dictWord{132, 0, 200}, + dictWord{4, 10, 89}, + dictWord{5, 10, 489}, + dictWord{6, 10, 315}, + dictWord{7, 10, 553}, + dictWord{7, 10, 1745}, + dictWord{138, 10, 243}, + dictWord{135, 11, 1050}, + dictWord{7, 0, 1621}, + dictWord{6, 10, 1658}, + dictWord{9, 10, 3}, + dictWord{10, 10, 154}, + dictWord{11, 10, 641}, + dictWord{13, 10, 85}, + dictWord{13, 10, 201}, + dictWord{141, 10, 346}, + dictWord{6, 11, 175}, + dictWord{137, 11, 289}, + dictWord{5, 11, 432}, + dictWord{133, 11, 913}, + dictWord{ + 6, + 0, + 225, + }, + dictWord{137, 0, 211}, + dictWord{7, 0, 718}, + dictWord{8, 0, 687}, + dictWord{139, 0, 374}, + dictWord{4, 10, 166}, + dictWord{133, 10, 505}, + dictWord{ + 9, + 0, + 110, + }, + dictWord{134, 10, 1670}, + dictWord{8, 0, 58}, + dictWord{9, 0, 724}, + dictWord{11, 0, 809}, + dictWord{13, 0, 113}, + dictWord{145, 0, 72}, + dictWord{6, 0, 345}, + dictWord{7, 0, 1247}, + dictWord{144, 11, 82}, + dictWord{5, 11, 931}, + dictWord{134, 11, 1698}, + dictWord{8, 0, 767}, + dictWord{8, 0, 803}, + dictWord{9, 0, 301}, + dictWord{137, 0, 903}, + dictWord{139, 0, 203}, + dictWord{134, 0, 1154}, + dictWord{7, 0, 1949}, + dictWord{136, 0, 674}, + dictWord{134, 0, 259}, + dictWord{ + 135, + 0, + 1275, + }, + dictWord{5, 11, 774}, + dictWord{6, 11, 1637}, + dictWord{6, 11, 1686}, + dictWord{134, 11, 1751}, + dictWord{134, 0, 1231}, + dictWord{7, 10, 445}, + dictWord{8, 10, 307}, + dictWord{8, 10, 704}, + dictWord{10, 10, 41}, + dictWord{10, 10, 439}, + dictWord{11, 10, 237}, + dictWord{11, 10, 622}, + dictWord{140, 10, 201}, + dictWord{136, 0, 254}, + dictWord{6, 11, 260}, + dictWord{135, 11, 1484}, + dictWord{139, 0, 277}, + dictWord{135, 10, 1977}, + dictWord{4, 10, 189}, + dictWord{ + 5, + 10, + 713, + }, + dictWord{6, 11, 573}, + dictWord{136, 10, 57}, + dictWord{138, 10, 371}, + dictWord{132, 10, 552}, + dictWord{134, 11, 344}, + dictWord{133, 0, 248}, + dictWord{9, 0, 800}, + dictWord{10, 0, 693}, + dictWord{11, 0, 482}, + dictWord{11, 0, 734}, + dictWord{11, 0, 789}, + dictWord{134, 11, 240}, + dictWord{4, 0, 116}, + dictWord{ + 5, + 0, + 95, + }, + dictWord{5, 0, 445}, + dictWord{7, 0, 1688}, + dictWord{8, 0, 29}, + dictWord{9, 0, 272}, + dictWord{11, 0, 509}, + dictWord{11, 0, 915}, + dictWord{4, 11, 292}, + dictWord{4, 11, 736}, + dictWord{5, 11, 871}, + dictWord{6, 11, 171}, + dictWord{6, 11, 1689}, + dictWord{7, 11, 1324}, + dictWord{7, 11, 1944}, + dictWord{9, 11, 415}, + dictWord{9, 11, 580}, + dictWord{14, 11, 230}, + dictWord{146, 11, 68}, + dictWord{7, 0, 490}, + dictWord{13, 0, 100}, + dictWord{143, 0, 75}, + dictWord{135, 0, 1641}, + dictWord{133, 0, 543}, + dictWord{7, 11, 209}, + dictWord{8, 11, 661}, + dictWord{10, 11, 42}, + dictWord{11, 11, 58}, + dictWord{12, 11, 58}, + dictWord{12, 11, 118}, + dictWord{141, 11, 32}, + dictWord{5, 0, 181}, + dictWord{8, 0, 41}, + dictWord{6, 11, 63}, + dictWord{135, 11, 920}, + dictWord{133, 0, 657}, + dictWord{133, 11, 793}, + dictWord{138, 0, 709}, + dictWord{7, 0, 25}, + dictWord{8, 0, 202}, + dictWord{138, 0, 536}, + dictWord{5, 11, 665}, + dictWord{135, 10, 1788}, + dictWord{145, 10, 49}, + dictWord{9, 0, 423}, + dictWord{140, 0, 89}, + dictWord{5, 11, 67}, + dictWord{6, 11, 62}, + dictWord{6, 11, 374}, + dictWord{135, 11, 1391}, + dictWord{8, 0, 113}, + dictWord{ + 9, + 0, + 877, + }, + dictWord{10, 0, 554}, + dictWord{11, 0, 83}, + dictWord{12, 0, 136}, + dictWord{19, 0, 109}, + dictWord{9, 11, 790}, + dictWord{140, 11, 47}, + dictWord{ + 138, + 10, + 661, + }, + dictWord{4, 0, 963}, + dictWord{10, 0, 927}, + dictWord{14, 0, 442}, + dictWord{135, 10, 1945}, + dictWord{133, 0, 976}, + dictWord{132, 0, 206}, + dictWord{ + 4, + 11, + 391, + }, + dictWord{135, 11, 1169}, + dictWord{134, 0, 2002}, + dictWord{6, 0, 696}, + dictWord{134, 0, 1008}, + dictWord{134, 0, 1170}, + dictWord{132, 11, 271}, + dictWord{7, 0, 13}, + dictWord{8, 0, 226}, + dictWord{10, 0, 537}, + dictWord{11, 0, 570}, + dictWord{11, 0, 605}, + dictWord{11, 0, 799}, + dictWord{11, 0, 804}, + dictWord{ + 12, + 0, + 85, + }, + dictWord{12, 0, 516}, + dictWord{12, 0, 623}, + dictWord{13, 0, 112}, + dictWord{13, 0, 361}, + dictWord{14, 0, 77}, + dictWord{14, 0, 78}, + dictWord{17, 0, 28}, + dictWord{19, 0, 110}, + dictWord{140, 11, 314}, + dictWord{132, 0, 769}, + dictWord{134, 0, 1544}, + dictWord{4, 0, 551}, + dictWord{137, 0, 678}, + dictWord{5, 10, 84}, + dictWord{134, 10, 163}, + dictWord{9, 0, 57}, + dictWord{9, 0, 459}, + dictWord{10, 0, 425}, + dictWord{11, 0, 119}, + dictWord{12, 0, 184}, + dictWord{12, 0, 371}, + dictWord{ + 13, + 0, + 358, + }, + dictWord{145, 0, 51}, + dictWord{5, 0, 188}, + dictWord{5, 0, 814}, + dictWord{8, 0, 10}, + dictWord{9, 0, 421}, + dictWord{9, 0, 729}, + dictWord{10, 0, 609}, + dictWord{11, 0, 689}, + dictWord{4, 11, 253}, + dictWord{5, 10, 410}, + dictWord{5, 11, 544}, + dictWord{7, 11, 300}, + dictWord{137, 11, 340}, + dictWord{134, 0, 624}, + dictWord{138, 11, 321}, + dictWord{135, 0, 1941}, + dictWord{18, 0, 130}, + dictWord{5, 10, 322}, + dictWord{8, 10, 186}, + dictWord{9, 10, 262}, + dictWord{10, 10, 187}, + dictWord{142, 10, 208}, + dictWord{5, 11, 53}, + dictWord{5, 11, 541}, + dictWord{6, 11, 94}, + dictWord{6, 11, 499}, + dictWord{7, 11, 230}, + dictWord{139, 11, 321}, + dictWord{133, 10, 227}, + dictWord{4, 0, 378}, + dictWord{4, 11, 920}, + dictWord{5, 11, 25}, + dictWord{5, 11, 790}, + dictWord{6, 11, 457}, + dictWord{135, 11, 853}, + dictWord{137, 0, 269}, + dictWord{132, 0, 528}, + dictWord{134, 0, 1146}, + dictWord{7, 10, 1395}, + dictWord{8, 10, 486}, + dictWord{9, 10, 236}, + dictWord{9, 10, 878}, + dictWord{10, 10, 218}, + dictWord{11, 10, 95}, + dictWord{19, 10, 17}, + dictWord{147, 10, 31}, + dictWord{7, 10, 2043}, + dictWord{8, 10, 672}, + dictWord{ + 141, + 10, + 448, + }, + dictWord{134, 0, 1105}, + dictWord{134, 0, 1616}, + dictWord{134, 11, 1765}, + dictWord{140, 11, 163}, + dictWord{5, 10, 412}, + dictWord{133, 11, 822}, + dictWord{132, 11, 634}, + dictWord{6, 0, 656}, + dictWord{134, 11, 1730}, + dictWord{134, 0, 1940}, + dictWord{5, 0, 104}, + dictWord{6, 0, 173}, + dictWord{ + 135, + 0, + 1631, + }, + dictWord{136, 10, 562}, + dictWord{6, 11, 36}, + dictWord{7, 11, 658}, + dictWord{8, 11, 454}, + dictWord{147, 11, 86}, + dictWord{5, 0, 457}, + dictWord{ + 134, + 10, + 1771, + }, + dictWord{7, 0, 810}, + dictWord{8, 0, 138}, + dictWord{8, 0, 342}, + dictWord{9, 0, 84}, + dictWord{10, 0, 193}, + dictWord{11, 0, 883}, + dictWord{140, 0, 359}, + dictWord{9, 0, 620}, + dictWord{135, 10, 1190}, + dictWord{137, 10, 132}, + dictWord{7, 11, 975}, + dictWord{137, 11, 789}, + dictWord{6, 0, 95}, + dictWord{6, 0, 1934}, + dictWord{136, 0, 967}, + dictWord{141, 11, 335}, + dictWord{6, 0, 406}, + dictWord{10, 0, 409}, + dictWord{10, 0, 447}, + dictWord{11, 0, 44}, + dictWord{140, 0, 100}, + dictWord{4, 10, 317}, + dictWord{135, 10, 1279}, + dictWord{132, 0, 477}, + dictWord{134, 0, 1268}, + dictWord{6, 0, 1941}, + dictWord{8, 0, 944}, + dictWord{5, 10, 63}, + dictWord{133, 10, 509}, + dictWord{132, 0, 629}, + dictWord{132, 11, 104}, + dictWord{4, 0, 246}, + dictWord{133, 0, 375}, + dictWord{6, 0, 1636}, + dictWord{ + 132, + 10, + 288, + }, + dictWord{135, 11, 1614}, + dictWord{9, 0, 49}, + dictWord{10, 0, 774}, + dictWord{8, 10, 89}, + dictWord{8, 10, 620}, + dictWord{11, 10, 628}, + dictWord{ + 12, + 10, + 322, + }, + dictWord{143, 10, 124}, + dictWord{4, 0, 282}, + dictWord{7, 0, 1034}, + dictWord{11, 0, 398}, + dictWord{11, 0, 634}, + dictWord{12, 0, 1}, + dictWord{12, 0, 79}, + dictWord{12, 0, 544}, + dictWord{14, 0, 237}, + dictWord{17, 0, 10}, + dictWord{146, 0, 20}, + dictWord{132, 0, 824}, + dictWord{7, 11, 45}, + dictWord{9, 11, 542}, + dictWord{ + 9, + 11, + 566, + }, + dictWord{138, 11, 728}, + dictWord{5, 0, 118}, + dictWord{5, 0, 499}, + dictWord{6, 0, 476}, + dictWord{6, 0, 665}, + dictWord{6, 0, 1176}, + dictWord{ + 6, + 0, + 1196, + }, + dictWord{7, 0, 600}, + dictWord{7, 0, 888}, + dictWord{135, 0, 1096}, + dictWord{7, 0, 296}, + dictWord{7, 0, 596}, + dictWord{8, 0, 560}, + dictWord{8, 0, 586}, + dictWord{9, 0, 612}, + dictWord{11, 0, 304}, + dictWord{12, 0, 46}, + dictWord{13, 0, 89}, + dictWord{14, 0, 112}, + dictWord{145, 0, 122}, + dictWord{5, 0, 894}, + dictWord{ + 6, + 0, + 1772, + }, + dictWord{9, 0, 1009}, + dictWord{138, 10, 120}, + dictWord{5, 11, 533}, + dictWord{7, 11, 755}, + dictWord{138, 11, 780}, + dictWord{151, 10, 1}, + dictWord{ + 6, + 0, + 1474, + }, + dictWord{7, 11, 87}, + dictWord{142, 11, 288}, + dictWord{139, 0, 366}, + dictWord{137, 10, 461}, + dictWord{7, 11, 988}, + dictWord{7, 11, 1939}, + dictWord{ + 9, + 11, + 64, + }, + dictWord{9, 11, 502}, + dictWord{12, 11, 7}, + dictWord{12, 11, 34}, + dictWord{13, 11, 12}, + dictWord{13, 11, 234}, + dictWord{147, 11, 77}, + dictWord{ + 7, + 0, + 1599, + }, + dictWord{7, 0, 1723}, + dictWord{8, 0, 79}, + dictWord{8, 0, 106}, + dictWord{8, 0, 190}, + dictWord{8, 0, 302}, + dictWord{8, 0, 383}, + dictWord{8, 0, 713}, + dictWord{ + 9, + 0, + 119, + }, + dictWord{9, 0, 233}, + dictWord{9, 0, 419}, + dictWord{9, 0, 471}, + dictWord{10, 0, 181}, + dictWord{10, 0, 406}, + dictWord{11, 0, 57}, + dictWord{11, 0, 85}, + dictWord{11, 0, 120}, + dictWord{11, 0, 177}, + dictWord{11, 0, 296}, + dictWord{11, 0, 382}, + dictWord{11, 0, 454}, + dictWord{11, 0, 758}, + dictWord{11, 0, 999}, + dictWord{ + 12, + 0, + 27, + }, + dictWord{12, 0, 98}, + dictWord{12, 0, 131}, + dictWord{12, 0, 245}, + dictWord{12, 0, 312}, + dictWord{12, 0, 446}, + dictWord{12, 0, 454}, + dictWord{13, 0, 25}, + dictWord{13, 0, 98}, + dictWord{13, 0, 426}, + dictWord{13, 0, 508}, + dictWord{14, 0, 70}, + dictWord{14, 0, 163}, + dictWord{14, 0, 272}, + dictWord{14, 0, 277}, + dictWord{ + 14, + 0, + 370, + }, + dictWord{15, 0, 95}, + dictWord{15, 0, 138}, + dictWord{15, 0, 167}, + dictWord{17, 0, 38}, + dictWord{148, 0, 96}, + dictWord{135, 10, 1346}, + dictWord{ + 10, + 0, + 200, + }, + dictWord{19, 0, 2}, + dictWord{151, 0, 22}, + dictWord{135, 11, 141}, + dictWord{134, 10, 85}, + dictWord{134, 0, 1759}, + dictWord{138, 0, 372}, + dictWord{ + 145, + 0, + 16, + }, + dictWord{8, 0, 943}, + dictWord{132, 11, 619}, + dictWord{139, 11, 88}, + dictWord{5, 11, 246}, + dictWord{8, 11, 189}, + dictWord{9, 11, 355}, + dictWord{ + 9, + 11, + 512, + }, + dictWord{10, 11, 124}, + dictWord{10, 11, 453}, + dictWord{11, 11, 143}, + dictWord{11, 11, 416}, + dictWord{11, 11, 859}, + dictWord{141, 11, 341}, + dictWord{ + 5, + 0, + 258, + }, + dictWord{134, 0, 719}, + dictWord{6, 0, 1798}, + dictWord{6, 0, 1839}, + dictWord{8, 0, 900}, + dictWord{10, 0, 874}, + dictWord{10, 0, 886}, + dictWord{ + 12, + 0, + 698, + }, + dictWord{12, 0, 732}, + dictWord{12, 0, 770}, + dictWord{16, 0, 106}, + dictWord{18, 0, 163}, + dictWord{18, 0, 170}, + dictWord{18, 0, 171}, + dictWord{152, 0, 20}, + dictWord{9, 0, 707}, + dictWord{11, 0, 326}, + dictWord{11, 0, 339}, + dictWord{12, 0, 423}, + dictWord{12, 0, 502}, + dictWord{20, 0, 62}, + dictWord{9, 11, 707}, + dictWord{ + 11, + 11, + 326, + }, + dictWord{11, 11, 339}, + dictWord{12, 11, 423}, + dictWord{12, 11, 502}, + dictWord{148, 11, 62}, + dictWord{5, 0, 30}, + dictWord{7, 0, 495}, + dictWord{ + 8, + 0, + 134, + }, + dictWord{9, 0, 788}, + dictWord{140, 0, 438}, + dictWord{133, 11, 678}, + dictWord{5, 10, 279}, + dictWord{6, 10, 235}, + dictWord{7, 10, 468}, + dictWord{ + 8, + 10, + 446, + }, + dictWord{9, 10, 637}, + dictWord{10, 10, 717}, + dictWord{11, 10, 738}, + dictWord{140, 10, 514}, + dictWord{5, 11, 35}, + dictWord{6, 11, 287}, + dictWord{ + 7, + 11, + 862, + }, + dictWord{7, 11, 1886}, + dictWord{138, 11, 179}, + dictWord{7, 0, 1948}, + dictWord{7, 0, 2004}, + dictWord{132, 11, 517}, + dictWord{5, 10, 17}, + dictWord{ + 6, + 10, + 371, + }, + dictWord{137, 10, 528}, + dictWord{4, 0, 115}, + dictWord{5, 0, 669}, + dictWord{6, 0, 407}, + dictWord{8, 0, 311}, + dictWord{11, 0, 10}, + dictWord{141, 0, 5}, + dictWord{137, 0, 381}, + dictWord{5, 0, 50}, + dictWord{6, 0, 439}, + dictWord{7, 0, 780}, + dictWord{135, 0, 1040}, + dictWord{136, 11, 667}, + dictWord{11, 11, 403}, + dictWord{146, 11, 83}, + dictWord{5, 0, 1}, + dictWord{6, 0, 81}, + dictWord{138, 0, 520}, + dictWord{134, 0, 738}, + dictWord{5, 0, 482}, + dictWord{8, 0, 98}, + dictWord{9, 0, 172}, + dictWord{10, 0, 360}, + dictWord{10, 0, 700}, + dictWord{10, 0, 822}, + dictWord{11, 0, 302}, + dictWord{11, 0, 778}, + dictWord{12, 0, 50}, + dictWord{12, 0, 127}, + dictWord{ + 12, + 0, + 396, + }, + dictWord{13, 0, 62}, + dictWord{13, 0, 328}, + dictWord{14, 0, 122}, + dictWord{147, 0, 72}, + dictWord{9, 11, 157}, + dictWord{10, 11, 131}, + dictWord{ + 140, + 11, + 72, + }, + dictWord{135, 11, 714}, + dictWord{135, 11, 539}, + dictWord{5, 0, 2}, + dictWord{6, 0, 512}, + dictWord{7, 0, 797}, + dictWord{7, 0, 1494}, + dictWord{8, 0, 253}, + dictWord{8, 0, 589}, + dictWord{9, 0, 77}, + dictWord{10, 0, 1}, + dictWord{10, 0, 129}, + dictWord{10, 0, 225}, + dictWord{11, 0, 118}, + dictWord{11, 0, 226}, + dictWord{ + 11, + 0, + 251, + }, + dictWord{11, 0, 430}, + dictWord{11, 0, 701}, + dictWord{11, 0, 974}, + dictWord{11, 0, 982}, + dictWord{12, 0, 64}, + dictWord{12, 0, 260}, + dictWord{12, 0, 488}, + dictWord{140, 0, 690}, + dictWord{5, 11, 394}, + dictWord{7, 11, 367}, + dictWord{7, 11, 487}, + dictWord{7, 11, 857}, + dictWord{7, 11, 1713}, + dictWord{8, 11, 246}, + dictWord{9, 11, 537}, + dictWord{10, 11, 165}, + dictWord{12, 11, 219}, + dictWord{140, 11, 561}, + dictWord{136, 0, 557}, + dictWord{5, 10, 779}, + dictWord{5, 10, 807}, + dictWord{6, 10, 1655}, + dictWord{134, 10, 1676}, + dictWord{4, 10, 196}, + dictWord{5, 10, 558}, + dictWord{133, 10, 949}, + dictWord{11, 11, 827}, + dictWord{ + 12, + 11, + 56, + }, + dictWord{14, 11, 34}, + dictWord{143, 11, 148}, + dictWord{137, 0, 347}, + dictWord{133, 0, 572}, + dictWord{134, 0, 832}, + dictWord{4, 0, 12}, + dictWord{ + 7, + 0, + 504, + }, + dictWord{7, 0, 522}, + dictWord{7, 0, 809}, + dictWord{8, 0, 797}, + dictWord{141, 0, 88}, + dictWord{4, 10, 752}, + dictWord{133, 11, 449}, + dictWord{7, 11, 86}, + dictWord{8, 11, 103}, + dictWord{145, 11, 69}, + dictWord{7, 11, 2028}, + dictWord{138, 11, 641}, + dictWord{5, 0, 528}, + dictWord{6, 11, 1}, + dictWord{142, 11, 2}, + dictWord{134, 0, 861}, + dictWord{10, 0, 294}, + dictWord{4, 10, 227}, + dictWord{5, 10, 159}, + dictWord{5, 10, 409}, + dictWord{7, 10, 80}, + dictWord{10, 10, 479}, + dictWord{ + 12, + 10, + 418, + }, + dictWord{14, 10, 50}, + dictWord{14, 10, 249}, + dictWord{142, 10, 295}, + dictWord{7, 10, 1470}, + dictWord{8, 10, 66}, + dictWord{8, 10, 137}, + dictWord{ + 8, + 10, + 761, + }, + dictWord{9, 10, 638}, + dictWord{11, 10, 80}, + dictWord{11, 10, 212}, + dictWord{11, 10, 368}, + dictWord{11, 10, 418}, + dictWord{12, 10, 8}, + dictWord{ + 13, + 10, + 15, + }, + dictWord{16, 10, 61}, + dictWord{17, 10, 59}, + dictWord{19, 10, 28}, + dictWord{148, 10, 84}, + dictWord{20, 0, 109}, + dictWord{135, 11, 1148}, + dictWord{ + 6, + 11, + 277, + }, + dictWord{7, 11, 1274}, + dictWord{7, 11, 1386}, + dictWord{7, 11, 1392}, + dictWord{12, 11, 129}, + dictWord{146, 11, 87}, + dictWord{6, 11, 187}, + dictWord{7, 11, 39}, + dictWord{7, 11, 1203}, + dictWord{8, 11, 380}, + dictWord{8, 11, 542}, + dictWord{14, 11, 117}, + dictWord{149, 11, 28}, + dictWord{134, 0, 1187}, + dictWord{5, 0, 266}, + dictWord{9, 0, 290}, + dictWord{9, 0, 364}, + dictWord{10, 0, 293}, + dictWord{11, 0, 606}, + dictWord{142, 0, 45}, + dictWord{6, 11, 297}, + dictWord{ + 7, + 11, + 793, + }, + dictWord{139, 11, 938}, + dictWord{4, 0, 50}, + dictWord{6, 0, 594}, + dictWord{9, 0, 121}, + dictWord{10, 0, 49}, + dictWord{10, 0, 412}, + dictWord{139, 0, 834}, + dictWord{136, 0, 748}, + dictWord{7, 11, 464}, + dictWord{8, 11, 438}, + dictWord{11, 11, 105}, + dictWord{11, 11, 363}, + dictWord{12, 11, 231}, + dictWord{ + 14, + 11, + 386, + }, + dictWord{15, 11, 102}, + dictWord{148, 11, 75}, + dictWord{132, 0, 466}, + dictWord{13, 0, 399}, + dictWord{14, 0, 337}, + dictWord{6, 10, 38}, + dictWord{ + 7, + 10, + 1220, + }, + dictWord{8, 10, 185}, + dictWord{8, 10, 256}, + dictWord{9, 10, 22}, + dictWord{9, 10, 331}, + dictWord{10, 10, 738}, + dictWord{11, 10, 205}, + dictWord{ + 11, + 10, + 540, + }, + dictWord{11, 10, 746}, + dictWord{13, 10, 465}, + dictWord{142, 10, 194}, + dictWord{9, 0, 378}, + dictWord{141, 0, 162}, + dictWord{137, 0, 519}, + dictWord{ + 4, + 10, + 159, + }, + dictWord{6, 10, 115}, + dictWord{7, 10, 252}, + dictWord{7, 10, 257}, + dictWord{7, 10, 1928}, + dictWord{8, 10, 69}, + dictWord{9, 10, 384}, + dictWord{ + 10, + 10, + 91, + }, + dictWord{10, 10, 615}, + dictWord{12, 10, 375}, + dictWord{14, 10, 235}, + dictWord{18, 10, 117}, + dictWord{147, 10, 123}, + dictWord{5, 11, 604}, + dictWord{ + 5, + 10, + 911, + }, + dictWord{136, 10, 278}, + dictWord{132, 0, 667}, + dictWord{8, 0, 351}, + dictWord{9, 0, 322}, + dictWord{4, 10, 151}, + dictWord{135, 10, 1567}, + dictWord{134, 0, 902}, + dictWord{133, 10, 990}, + dictWord{12, 0, 180}, + dictWord{5, 10, 194}, + dictWord{7, 10, 1662}, + dictWord{137, 10, 90}, + dictWord{4, 0, 869}, + dictWord{134, 0, 1996}, + dictWord{134, 0, 813}, + dictWord{133, 10, 425}, + dictWord{137, 11, 761}, + dictWord{132, 0, 260}, + dictWord{133, 10, 971}, + dictWord{ + 5, + 11, + 20, + }, + dictWord{6, 11, 298}, + dictWord{7, 11, 659}, + dictWord{7, 11, 1366}, + dictWord{137, 11, 219}, + dictWord{4, 0, 39}, + dictWord{5, 0, 36}, + dictWord{ + 7, + 0, + 1843, + }, + dictWord{8, 0, 407}, + dictWord{11, 0, 144}, + dictWord{140, 0, 523}, + dictWord{4, 0, 510}, + dictWord{10, 0, 587}, + dictWord{139, 10, 752}, + dictWord{7, 0, 29}, + dictWord{7, 0, 66}, + dictWord{7, 0, 1980}, + dictWord{10, 0, 487}, + dictWord{138, 0, 809}, + dictWord{13, 0, 260}, + dictWord{14, 0, 82}, + dictWord{18, 0, 63}, + dictWord{ + 137, + 10, + 662, + }, + dictWord{5, 10, 72}, + dictWord{6, 10, 264}, + dictWord{7, 10, 21}, + dictWord{7, 10, 46}, + dictWord{7, 10, 2013}, + dictWord{8, 10, 215}, + dictWord{ + 8, + 10, + 513, + }, + dictWord{10, 10, 266}, + dictWord{139, 10, 22}, + dictWord{134, 0, 570}, + dictWord{6, 0, 565}, + dictWord{7, 0, 1667}, + dictWord{4, 11, 439}, + dictWord{ + 10, + 10, + 95, + }, + dictWord{11, 10, 603}, + dictWord{12, 11, 242}, + dictWord{13, 10, 443}, + dictWord{14, 10, 160}, + dictWord{143, 10, 4}, + dictWord{134, 0, 1464}, + dictWord{ + 134, + 10, + 431, + }, + dictWord{9, 0, 372}, + dictWord{15, 0, 2}, + dictWord{19, 0, 10}, + dictWord{19, 0, 18}, + dictWord{5, 10, 874}, + dictWord{6, 10, 1677}, + dictWord{143, 10, 0}, + dictWord{132, 0, 787}, + dictWord{6, 0, 380}, + dictWord{12, 0, 399}, + dictWord{21, 0, 19}, + dictWord{7, 10, 939}, + dictWord{7, 10, 1172}, + dictWord{7, 10, 1671}, + dictWord{9, 10, 540}, + dictWord{10, 10, 696}, + dictWord{11, 10, 265}, + dictWord{11, 10, 732}, + dictWord{11, 10, 928}, + dictWord{11, 10, 937}, + dictWord{ + 141, + 10, + 438, + }, + dictWord{137, 0, 200}, + dictWord{132, 11, 233}, + dictWord{132, 0, 516}, + dictWord{134, 11, 577}, + dictWord{132, 0, 844}, + dictWord{11, 0, 887}, + dictWord{14, 0, 365}, + dictWord{142, 0, 375}, + dictWord{132, 11, 482}, + dictWord{8, 0, 821}, + dictWord{140, 0, 44}, + dictWord{7, 0, 1655}, + dictWord{136, 0, 305}, + dictWord{5, 10, 682}, + dictWord{135, 10, 1887}, + dictWord{135, 11, 346}, + dictWord{132, 10, 696}, + dictWord{4, 0, 10}, + dictWord{7, 0, 917}, + dictWord{139, 0, 786}, + dictWord{5, 11, 795}, + dictWord{6, 11, 1741}, + dictWord{8, 11, 417}, + dictWord{137, 11, 782}, + dictWord{4, 0, 1016}, + dictWord{134, 0, 2031}, + dictWord{5, 0, 684}, + dictWord{4, 10, 726}, + dictWord{133, 10, 630}, + dictWord{6, 0, 1021}, + dictWord{134, 0, 1480}, + dictWord{8, 10, 802}, + dictWord{136, 10, 838}, + dictWord{ + 134, + 0, + 27, + }, + dictWord{134, 0, 395}, + dictWord{135, 11, 622}, + dictWord{7, 11, 625}, + dictWord{135, 11, 1750}, + dictWord{4, 11, 203}, + dictWord{135, 11, 1936}, + dictWord{6, 10, 118}, + dictWord{7, 10, 215}, + dictWord{7, 10, 1521}, + dictWord{140, 10, 11}, + dictWord{132, 0, 813}, + dictWord{136, 0, 511}, + dictWord{7, 10, 615}, + dictWord{138, 10, 251}, + dictWord{135, 10, 1044}, + dictWord{145, 0, 56}, + dictWord{133, 10, 225}, + dictWord{6, 0, 342}, + dictWord{6, 0, 496}, + dictWord{8, 0, 275}, + dictWord{137, 0, 206}, + dictWord{4, 0, 909}, + dictWord{133, 0, 940}, + dictWord{132, 0, 891}, + dictWord{7, 11, 311}, + dictWord{9, 11, 308}, + dictWord{ + 140, + 11, + 255, + }, + dictWord{4, 10, 370}, + dictWord{5, 10, 756}, + dictWord{135, 10, 1326}, + dictWord{4, 0, 687}, + dictWord{134, 0, 1596}, + dictWord{134, 0, 1342}, + dictWord{ + 6, + 10, + 1662, + }, + dictWord{7, 10, 48}, + dictWord{8, 10, 771}, + dictWord{10, 10, 116}, + dictWord{13, 10, 104}, + dictWord{14, 10, 105}, + dictWord{14, 10, 184}, + dictWord{15, 10, 168}, + dictWord{19, 10, 92}, + dictWord{148, 10, 68}, + dictWord{138, 10, 209}, + dictWord{4, 11, 400}, + dictWord{5, 11, 267}, + dictWord{135, 11, 232}, + dictWord{151, 11, 12}, + dictWord{6, 0, 41}, + dictWord{141, 0, 160}, + dictWord{141, 11, 314}, + dictWord{134, 0, 1718}, + dictWord{136, 0, 778}, + dictWord{ + 142, + 11, + 261, + }, + dictWord{134, 0, 1610}, + dictWord{133, 0, 115}, + dictWord{132, 0, 294}, + dictWord{14, 0, 314}, + dictWord{132, 10, 120}, + dictWord{132, 0, 983}, + dictWord{5, 0, 193}, + dictWord{140, 0, 178}, + dictWord{138, 10, 429}, + dictWord{5, 10, 820}, + dictWord{135, 10, 931}, + dictWord{6, 0, 994}, + dictWord{6, 0, 1051}, + dictWord{6, 0, 1439}, + dictWord{7, 0, 174}, + dictWord{133, 11, 732}, + dictWord{4, 11, 100}, + dictWord{7, 11, 679}, + dictWord{8, 11, 313}, + dictWord{138, 10, 199}, + dictWord{6, 10, 151}, + dictWord{6, 10, 1675}, + dictWord{7, 10, 383}, + dictWord{151, 10, 10}, + dictWord{6, 0, 1796}, + dictWord{8, 0, 848}, + dictWord{8, 0, 867}, + dictWord{ + 8, + 0, + 907, + }, + dictWord{10, 0, 855}, + dictWord{140, 0, 703}, + dictWord{140, 0, 221}, + dictWord{4, 0, 122}, + dictWord{5, 0, 796}, + dictWord{5, 0, 952}, + dictWord{6, 0, 1660}, + dictWord{6, 0, 1671}, + dictWord{8, 0, 567}, + dictWord{9, 0, 687}, + dictWord{9, 0, 742}, + dictWord{10, 0, 686}, + dictWord{11, 0, 682}, + dictWord{11, 0, 909}, + dictWord{ + 140, + 0, + 281, + }, + dictWord{5, 11, 362}, + dictWord{5, 11, 443}, + dictWord{6, 11, 318}, + dictWord{7, 11, 1019}, + dictWord{139, 11, 623}, + dictWord{5, 11, 463}, + dictWord{136, 11, 296}, + dictWord{11, 0, 583}, + dictWord{13, 0, 262}, + dictWord{6, 10, 1624}, + dictWord{12, 10, 422}, + dictWord{142, 10, 360}, + dictWord{5, 0, 179}, + dictWord{7, 0, 1095}, + dictWord{135, 0, 1213}, + dictWord{4, 10, 43}, + dictWord{4, 11, 454}, + dictWord{5, 10, 344}, + dictWord{133, 10, 357}, + dictWord{4, 0, 66}, + dictWord{7, 0, 722}, + dictWord{135, 0, 904}, + dictWord{134, 0, 773}, + dictWord{7, 0, 352}, + dictWord{133, 10, 888}, + dictWord{5, 11, 48}, + dictWord{5, 11, 404}, + dictWord{ + 6, + 11, + 557, + }, + dictWord{7, 11, 458}, + dictWord{8, 11, 597}, + dictWord{10, 11, 455}, + dictWord{10, 11, 606}, + dictWord{11, 11, 49}, + dictWord{11, 11, 548}, + dictWord{ + 12, + 11, + 476, + }, + dictWord{13, 11, 18}, + dictWord{141, 11, 450}, + dictWord{134, 11, 418}, + dictWord{132, 10, 711}, + dictWord{5, 11, 442}, + dictWord{ + 135, + 11, + 1984, + }, + dictWord{141, 0, 35}, + dictWord{137, 0, 152}, + dictWord{134, 0, 1197}, + dictWord{135, 11, 1093}, + dictWord{137, 11, 203}, + dictWord{137, 10, 440}, + dictWord{10, 0, 592}, + dictWord{10, 0, 753}, + dictWord{12, 0, 317}, + dictWord{12, 0, 355}, + dictWord{12, 0, 465}, + dictWord{12, 0, 469}, + dictWord{12, 0, 560}, + dictWord{12, 0, 578}, + dictWord{141, 0, 243}, + dictWord{133, 0, 564}, + dictWord{134, 0, 797}, + dictWord{5, 10, 958}, + dictWord{133, 10, 987}, + dictWord{5, 11, 55}, + dictWord{7, 11, 376}, + dictWord{140, 11, 161}, + dictWord{133, 11, 450}, + dictWord{134, 0, 556}, + dictWord{134, 0, 819}, + dictWord{11, 10, 276}, + dictWord{ + 142, + 10, + 293, + }, + dictWord{7, 0, 544}, + dictWord{138, 0, 61}, + dictWord{8, 0, 719}, + dictWord{4, 10, 65}, + dictWord{5, 10, 479}, + dictWord{5, 10, 1004}, + dictWord{7, 10, 1913}, + dictWord{8, 10, 317}, + dictWord{9, 10, 302}, + dictWord{10, 10, 612}, + dictWord{141, 10, 22}, + dictWord{4, 0, 5}, + dictWord{5, 0, 498}, + dictWord{8, 0, 637}, + dictWord{ + 9, + 0, + 521, + }, + dictWord{4, 11, 213}, + dictWord{4, 10, 261}, + dictWord{7, 11, 223}, + dictWord{7, 10, 510}, + dictWord{136, 11, 80}, + dictWord{5, 0, 927}, + dictWord{7, 0, 101}, + dictWord{4, 10, 291}, + dictWord{7, 11, 381}, + dictWord{7, 11, 806}, + dictWord{7, 11, 820}, + dictWord{8, 11, 354}, + dictWord{8, 11, 437}, + dictWord{8, 11, 787}, + dictWord{9, 10, 515}, + dictWord{9, 11, 657}, + dictWord{10, 11, 58}, + dictWord{10, 11, 339}, + dictWord{10, 11, 749}, + dictWord{11, 11, 914}, + dictWord{12, 10, 152}, + dictWord{12, 11, 162}, + dictWord{12, 10, 443}, + dictWord{13, 11, 75}, + dictWord{13, 10, 392}, + dictWord{14, 11, 106}, + dictWord{14, 11, 198}, + dictWord{ + 14, + 11, + 320, + }, + dictWord{14, 10, 357}, + dictWord{14, 11, 413}, + dictWord{146, 11, 43}, + dictWord{6, 0, 1153}, + dictWord{7, 0, 1441}, + dictWord{136, 11, 747}, + dictWord{ + 4, + 0, + 893, + }, + dictWord{5, 0, 780}, + dictWord{133, 0, 893}, + dictWord{138, 11, 654}, + dictWord{133, 11, 692}, + dictWord{133, 0, 238}, + dictWord{134, 11, 191}, + dictWord{4, 10, 130}, + dictWord{135, 10, 843}, + dictWord{6, 0, 1296}, + dictWord{5, 10, 42}, + dictWord{5, 10, 879}, + dictWord{7, 10, 245}, + dictWord{7, 10, 324}, + dictWord{ + 7, + 10, + 1532, + }, + dictWord{11, 10, 463}, + dictWord{11, 10, 472}, + dictWord{13, 10, 363}, + dictWord{144, 10, 52}, + dictWord{134, 0, 1729}, + dictWord{6, 0, 1999}, + dictWord{136, 0, 969}, + dictWord{4, 10, 134}, + dictWord{133, 10, 372}, + dictWord{4, 0, 60}, + dictWord{7, 0, 941}, + dictWord{7, 0, 1800}, + dictWord{8, 0, 314}, + dictWord{ + 9, + 0, + 700, + }, + dictWord{139, 0, 487}, + dictWord{134, 0, 1144}, + dictWord{6, 11, 162}, + dictWord{7, 11, 1960}, + dictWord{136, 11, 831}, + dictWord{132, 11, 706}, + dictWord{135, 0, 1147}, + dictWord{138, 11, 426}, + dictWord{138, 11, 89}, + dictWord{7, 0, 1853}, + dictWord{138, 0, 437}, + dictWord{136, 0, 419}, + dictWord{ + 135, + 10, + 1634, + }, + dictWord{133, 0, 828}, + dictWord{5, 0, 806}, + dictWord{7, 0, 176}, + dictWord{7, 0, 178}, + dictWord{7, 0, 1240}, + dictWord{7, 0, 1976}, + dictWord{ + 132, + 10, + 644, + }, + dictWord{135, 11, 1877}, + dictWord{5, 11, 420}, + dictWord{135, 11, 1449}, + dictWord{4, 0, 51}, + dictWord{5, 0, 39}, + dictWord{6, 0, 4}, + dictWord{7, 0, 591}, + dictWord{7, 0, 849}, + dictWord{7, 0, 951}, + dictWord{7, 0, 1613}, + dictWord{7, 0, 1760}, + dictWord{7, 0, 1988}, + dictWord{9, 0, 434}, + dictWord{10, 0, 754}, + dictWord{ + 11, + 0, + 25, + }, + dictWord{139, 0, 37}, + dictWord{10, 11, 57}, + dictWord{138, 11, 277}, + dictWord{135, 10, 540}, + dictWord{132, 11, 204}, + dictWord{135, 0, 159}, + dictWord{139, 11, 231}, + dictWord{133, 0, 902}, + dictWord{7, 0, 928}, + dictWord{7, 11, 366}, + dictWord{9, 11, 287}, + dictWord{12, 11, 199}, + dictWord{12, 11, 556}, + dictWord{140, 11, 577}, + dictWord{6, 10, 623}, + dictWord{136, 10, 789}, + dictWord{4, 10, 908}, + dictWord{5, 10, 359}, + dictWord{5, 10, 508}, + dictWord{6, 10, 1723}, + dictWord{7, 10, 343}, + dictWord{7, 10, 1996}, + dictWord{135, 10, 2026}, + dictWord{134, 0, 270}, + dictWord{4, 10, 341}, + dictWord{135, 10, 480}, + dictWord{ + 5, + 11, + 356, + }, + dictWord{135, 11, 224}, + dictWord{11, 11, 588}, + dictWord{11, 11, 864}, + dictWord{11, 11, 968}, + dictWord{143, 11, 160}, + dictWord{132, 0, 556}, + dictWord{137, 0, 801}, + dictWord{132, 0, 416}, + dictWord{142, 0, 372}, + dictWord{5, 0, 152}, + dictWord{5, 0, 197}, + dictWord{7, 0, 340}, + dictWord{7, 0, 867}, + dictWord{ + 10, + 0, + 548, + }, + dictWord{10, 0, 581}, + dictWord{11, 0, 6}, + dictWord{12, 0, 3}, + dictWord{12, 0, 19}, + dictWord{14, 0, 110}, + dictWord{142, 0, 289}, + dictWord{139, 0, 369}, + dictWord{7, 11, 630}, + dictWord{9, 11, 567}, + dictWord{11, 11, 150}, + dictWord{11, 11, 444}, + dictWord{141, 11, 119}, + dictWord{134, 11, 539}, + dictWord{ + 7, + 10, + 1995, + }, + dictWord{8, 10, 299}, + dictWord{11, 10, 890}, + dictWord{140, 10, 674}, + dictWord{7, 0, 34}, + dictWord{7, 0, 190}, + dictWord{8, 0, 28}, + dictWord{8, 0, 141}, + dictWord{8, 0, 444}, + dictWord{8, 0, 811}, + dictWord{9, 0, 468}, + dictWord{11, 0, 334}, + dictWord{12, 0, 24}, + dictWord{12, 0, 386}, + dictWord{140, 0, 576}, + dictWord{ + 133, + 0, + 757, + }, + dictWord{7, 0, 1553}, + dictWord{136, 0, 898}, + dictWord{133, 0, 721}, + dictWord{136, 0, 1012}, + dictWord{4, 0, 789}, + dictWord{5, 0, 647}, + dictWord{ + 135, + 0, + 1102, + }, + dictWord{132, 0, 898}, + dictWord{10, 0, 183}, + dictWord{4, 10, 238}, + dictWord{5, 10, 503}, + dictWord{6, 10, 179}, + dictWord{7, 10, 2003}, + dictWord{ + 8, + 10, + 381, + }, + dictWord{8, 10, 473}, + dictWord{9, 10, 149}, + dictWord{10, 10, 788}, + dictWord{15, 10, 45}, + dictWord{15, 10, 86}, + dictWord{20, 10, 110}, + dictWord{ + 150, + 10, + 57, + }, + dictWord{9, 0, 136}, + dictWord{19, 0, 107}, + dictWord{4, 10, 121}, + dictWord{5, 10, 156}, + dictWord{5, 10, 349}, + dictWord{10, 10, 605}, + dictWord{ + 142, + 10, + 342, + }, + dictWord{4, 11, 235}, + dictWord{135, 11, 255}, + dictWord{4, 11, 194}, + dictWord{5, 11, 584}, + dictWord{6, 11, 384}, + dictWord{7, 11, 583}, + dictWord{ + 10, + 11, + 761, + }, + dictWord{11, 11, 760}, + dictWord{139, 11, 851}, + dictWord{6, 10, 80}, + dictWord{6, 10, 1694}, + dictWord{7, 10, 173}, + dictWord{7, 10, 1974}, + dictWord{ + 9, + 10, + 547, + }, + dictWord{10, 10, 730}, + dictWord{14, 10, 18}, + dictWord{150, 10, 39}, + dictWord{4, 10, 923}, + dictWord{134, 10, 1711}, + dictWord{5, 0, 277}, + dictWord{141, 0, 247}, + dictWord{132, 0, 435}, + dictWord{133, 11, 562}, + dictWord{134, 0, 1311}, + dictWord{5, 11, 191}, + dictWord{137, 11, 271}, + dictWord{ + 132, + 10, + 595, + }, + dictWord{7, 11, 1537}, + dictWord{14, 11, 96}, + dictWord{143, 11, 73}, + dictWord{5, 0, 437}, + dictWord{7, 0, 502}, + dictWord{7, 0, 519}, + dictWord{7, 0, 1122}, + dictWord{7, 0, 1751}, + dictWord{14, 0, 211}, + dictWord{6, 10, 459}, + dictWord{7, 10, 1753}, + dictWord{7, 10, 1805}, + dictWord{8, 10, 658}, + dictWord{9, 10, 1}, + dictWord{11, 10, 959}, + dictWord{141, 10, 446}, + dictWord{6, 0, 814}, + dictWord{4, 11, 470}, + dictWord{5, 11, 473}, + dictWord{6, 11, 153}, + dictWord{7, 11, 1503}, + dictWord{7, 11, 1923}, + dictWord{10, 11, 701}, + dictWord{11, 11, 132}, + dictWord{11, 11, 168}, + dictWord{11, 11, 227}, + dictWord{11, 11, 320}, + dictWord{ + 11, + 11, + 436, + }, + dictWord{11, 11, 525}, + dictWord{11, 11, 855}, + dictWord{12, 11, 41}, + dictWord{12, 11, 286}, + dictWord{13, 11, 103}, + dictWord{13, 11, 284}, + dictWord{ + 14, + 11, + 255, + }, + dictWord{14, 11, 262}, + dictWord{15, 11, 117}, + dictWord{143, 11, 127}, + dictWord{5, 0, 265}, + dictWord{6, 0, 212}, + dictWord{135, 0, 28}, + dictWord{ + 138, + 0, + 750, + }, + dictWord{133, 11, 327}, + dictWord{6, 11, 552}, + dictWord{7, 11, 1754}, + dictWord{137, 11, 604}, + dictWord{134, 0, 2012}, + dictWord{132, 0, 702}, + dictWord{5, 11, 80}, + dictWord{6, 11, 405}, + dictWord{7, 11, 403}, + dictWord{7, 11, 1502}, + dictWord{7, 11, 1626}, + dictWord{8, 11, 456}, + dictWord{9, 11, 487}, + dictWord{9, 11, 853}, + dictWord{9, 11, 889}, + dictWord{10, 11, 309}, + dictWord{11, 11, 721}, + dictWord{11, 11, 994}, + dictWord{12, 11, 430}, + dictWord{ + 141, + 11, + 165, + }, + dictWord{5, 0, 808}, + dictWord{135, 0, 2045}, + dictWord{5, 0, 166}, + dictWord{8, 0, 739}, + dictWord{140, 0, 511}, + dictWord{134, 10, 490}, + dictWord{ + 4, + 11, + 453, + }, + dictWord{5, 11, 887}, + dictWord{6, 11, 535}, + dictWord{8, 11, 6}, + dictWord{136, 11, 543}, + dictWord{4, 0, 119}, + dictWord{5, 0, 170}, + dictWord{5, 0, 447}, + dictWord{7, 0, 1708}, + dictWord{7, 0, 1889}, + dictWord{9, 0, 357}, + dictWord{9, 0, 719}, + dictWord{12, 0, 486}, + dictWord{140, 0, 596}, + dictWord{137, 0, 500}, + dictWord{ + 7, + 10, + 250, + }, + dictWord{136, 10, 507}, + dictWord{132, 10, 158}, + dictWord{6, 0, 809}, + dictWord{134, 0, 1500}, + dictWord{9, 0, 327}, + dictWord{11, 0, 350}, + dictWord{11, 0, 831}, + dictWord{13, 0, 352}, + dictWord{4, 10, 140}, + dictWord{7, 10, 362}, + dictWord{8, 10, 209}, + dictWord{9, 10, 10}, + dictWord{9, 10, 503}, + dictWord{ + 9, + 10, + 614, + }, + dictWord{10, 10, 689}, + dictWord{11, 10, 327}, + dictWord{11, 10, 725}, + dictWord{12, 10, 252}, + dictWord{12, 10, 583}, + dictWord{13, 10, 192}, + dictWord{14, 10, 269}, + dictWord{14, 10, 356}, + dictWord{148, 10, 50}, + dictWord{135, 11, 741}, + dictWord{4, 0, 450}, + dictWord{7, 0, 1158}, + dictWord{19, 10, 1}, + dictWord{19, 10, 26}, + dictWord{150, 10, 9}, + dictWord{6, 0, 597}, + dictWord{135, 0, 1318}, + dictWord{134, 0, 1602}, + dictWord{6, 10, 228}, + dictWord{7, 10, 1341}, + dictWord{9, 10, 408}, + dictWord{138, 10, 343}, + dictWord{7, 0, 1375}, + dictWord{7, 0, 1466}, + dictWord{138, 0, 331}, + dictWord{132, 0, 754}, + dictWord{ + 132, + 10, + 557, + }, + dictWord{5, 11, 101}, + dictWord{6, 11, 88}, + dictWord{6, 11, 543}, + dictWord{7, 11, 1677}, + dictWord{9, 11, 100}, + dictWord{10, 11, 677}, + dictWord{ + 14, + 11, + 169, + }, + dictWord{14, 11, 302}, + dictWord{14, 11, 313}, + dictWord{15, 11, 48}, + dictWord{143, 11, 84}, + dictWord{134, 0, 1368}, + dictWord{4, 11, 310}, + dictWord{ + 9, + 11, + 795, + }, + dictWord{10, 11, 733}, + dictWord{11, 11, 451}, + dictWord{12, 11, 249}, + dictWord{14, 11, 115}, + dictWord{14, 11, 286}, + dictWord{143, 11, 100}, + dictWord{132, 10, 548}, + dictWord{10, 0, 557}, + dictWord{7, 10, 197}, + dictWord{8, 10, 142}, + dictWord{8, 10, 325}, + dictWord{9, 10, 150}, + dictWord{9, 10, 596}, + dictWord{10, 10, 353}, + dictWord{11, 10, 74}, + dictWord{11, 10, 315}, + dictWord{12, 10, 662}, + dictWord{12, 10, 681}, + dictWord{14, 10, 423}, + dictWord{ + 143, + 10, + 141, + }, + dictWord{133, 11, 587}, + dictWord{5, 0, 850}, + dictWord{136, 0, 799}, + dictWord{10, 0, 908}, + dictWord{12, 0, 701}, + dictWord{12, 0, 757}, + dictWord{ + 142, + 0, + 466, + }, + dictWord{4, 0, 62}, + dictWord{5, 0, 275}, + dictWord{18, 0, 19}, + dictWord{6, 10, 399}, + dictWord{6, 10, 579}, + dictWord{7, 10, 692}, + dictWord{7, 10, 846}, + dictWord{ + 7, + 10, + 1015, + }, + dictWord{7, 10, 1799}, + dictWord{8, 10, 403}, + dictWord{9, 10, 394}, + dictWord{10, 10, 133}, + dictWord{12, 10, 4}, + dictWord{12, 10, 297}, + dictWord{12, 10, 452}, + dictWord{16, 10, 81}, + dictWord{18, 10, 25}, + dictWord{21, 10, 14}, + dictWord{22, 10, 12}, + dictWord{151, 10, 18}, + dictWord{12, 0, 459}, + dictWord{ + 7, + 10, + 1546, + }, + dictWord{11, 10, 299}, + dictWord{142, 10, 407}, + dictWord{132, 10, 177}, + dictWord{132, 11, 498}, + dictWord{7, 11, 217}, + dictWord{ + 8, + 11, + 140, + }, + dictWord{138, 11, 610}, + dictWord{5, 10, 411}, + dictWord{135, 10, 653}, + dictWord{134, 0, 1802}, + dictWord{7, 10, 439}, + dictWord{10, 10, 727}, + dictWord{11, 10, 260}, + dictWord{139, 10, 684}, + dictWord{133, 11, 905}, + dictWord{11, 11, 580}, + dictWord{142, 11, 201}, + dictWord{134, 0, 1397}, + dictWord{ + 5, + 10, + 208, + }, + dictWord{7, 10, 753}, + dictWord{135, 10, 1528}, + dictWord{7, 0, 238}, + dictWord{7, 0, 2033}, + dictWord{8, 0, 120}, + dictWord{8, 0, 188}, + dictWord{8, 0, 659}, + dictWord{9, 0, 598}, + dictWord{10, 0, 466}, + dictWord{12, 0, 342}, + dictWord{12, 0, 588}, + dictWord{13, 0, 503}, + dictWord{14, 0, 246}, + dictWord{143, 0, 92}, + dictWord{135, 11, 1041}, + dictWord{4, 11, 456}, + dictWord{7, 11, 105}, + dictWord{7, 11, 358}, + dictWord{7, 11, 1637}, + dictWord{8, 11, 643}, + dictWord{139, 11, 483}, + dictWord{6, 0, 1318}, + dictWord{134, 0, 1324}, + dictWord{4, 0, 201}, + dictWord{7, 0, 1744}, + dictWord{8, 0, 602}, + dictWord{11, 0, 247}, + dictWord{11, 0, 826}, + dictWord{17, 0, 65}, + dictWord{133, 10, 242}, + dictWord{8, 0, 164}, + dictWord{146, 0, 62}, + dictWord{133, 10, 953}, + dictWord{139, 10, 802}, + dictWord{133, 0, 615}, + dictWord{7, 11, 1566}, + dictWord{8, 11, 269}, + dictWord{9, 11, 212}, + dictWord{9, 11, 718}, + dictWord{14, 11, 15}, + dictWord{14, 11, 132}, + dictWord{142, 11, 227}, + dictWord{133, 10, 290}, + dictWord{132, 10, 380}, + dictWord{5, 10, 52}, + dictWord{7, 10, 277}, + dictWord{9, 10, 368}, + dictWord{139, 10, 791}, + dictWord{ + 135, + 0, + 1243, + }, + dictWord{133, 11, 539}, + dictWord{11, 11, 919}, + dictWord{141, 11, 409}, + dictWord{136, 0, 968}, + dictWord{133, 11, 470}, + dictWord{134, 0, 882}, + dictWord{132, 0, 907}, + dictWord{5, 0, 100}, + dictWord{10, 0, 329}, + dictWord{12, 0, 416}, + dictWord{149, 0, 29}, + dictWord{10, 10, 138}, + dictWord{139, 10, 476}, + dictWord{5, 10, 725}, + dictWord{5, 10, 727}, + dictWord{6, 11, 91}, + dictWord{7, 11, 435}, + dictWord{135, 10, 1811}, + dictWord{4, 11, 16}, + dictWord{5, 11, 316}, + dictWord{5, 11, 842}, + dictWord{6, 11, 370}, + dictWord{6, 11, 1778}, + dictWord{8, 11, 166}, + dictWord{11, 11, 812}, + dictWord{12, 11, 206}, + dictWord{12, 11, 351}, + dictWord{14, 11, 418}, + dictWord{16, 11, 15}, + dictWord{16, 11, 34}, + dictWord{18, 11, 3}, + dictWord{19, 11, 3}, + dictWord{19, 11, 7}, + dictWord{20, 11, 4}, + dictWord{ + 149, + 11, + 21, + }, + dictWord{132, 0, 176}, + dictWord{5, 0, 636}, + dictWord{5, 0, 998}, + dictWord{7, 0, 9}, + dictWord{7, 0, 1508}, + dictWord{8, 0, 26}, + dictWord{9, 0, 317}, + dictWord{ + 9, + 0, + 358, + }, + dictWord{10, 0, 210}, + dictWord{10, 0, 292}, + dictWord{10, 0, 533}, + dictWord{11, 0, 555}, + dictWord{12, 0, 526}, + dictWord{12, 0, 607}, + dictWord{ + 13, + 0, + 263, + }, + dictWord{13, 0, 459}, + dictWord{142, 0, 271}, + dictWord{6, 0, 256}, + dictWord{8, 0, 265}, + dictWord{4, 10, 38}, + dictWord{7, 10, 307}, + dictWord{7, 10, 999}, + dictWord{7, 10, 1481}, + dictWord{7, 10, 1732}, + dictWord{7, 10, 1738}, + dictWord{9, 10, 414}, + dictWord{11, 10, 316}, + dictWord{12, 10, 52}, + dictWord{13, 10, 420}, + dictWord{147, 10, 100}, + dictWord{135, 10, 1296}, + dictWord{4, 11, 611}, + dictWord{133, 11, 606}, + dictWord{4, 0, 643}, + dictWord{142, 11, 21}, + dictWord{ + 133, + 11, + 715, + }, + dictWord{133, 10, 723}, + dictWord{6, 0, 610}, + dictWord{135, 11, 597}, + dictWord{10, 0, 127}, + dictWord{141, 0, 27}, + dictWord{6, 0, 1995}, + dictWord{ + 6, + 0, + 2001, + }, + dictWord{8, 0, 119}, + dictWord{136, 0, 973}, + dictWord{4, 11, 149}, + dictWord{138, 11, 368}, + dictWord{12, 0, 522}, + dictWord{4, 11, 154}, + dictWord{ + 5, + 10, + 109, + }, + dictWord{6, 10, 1784}, + dictWord{7, 11, 1134}, + dictWord{7, 10, 1895}, + dictWord{8, 11, 105}, + dictWord{12, 10, 296}, + dictWord{140, 10, 302}, + dictWord{4, 11, 31}, + dictWord{6, 11, 429}, + dictWord{7, 11, 962}, + dictWord{9, 11, 458}, + dictWord{139, 11, 691}, + dictWord{10, 0, 553}, + dictWord{11, 0, 876}, + dictWord{13, 0, 193}, + dictWord{13, 0, 423}, + dictWord{14, 0, 166}, + dictWord{19, 0, 84}, + dictWord{4, 11, 312}, + dictWord{5, 10, 216}, + dictWord{7, 10, 1879}, + dictWord{ + 9, + 10, + 141, + }, + dictWord{9, 10, 270}, + dictWord{9, 10, 679}, + dictWord{10, 10, 159}, + dictWord{11, 10, 197}, + dictWord{12, 10, 538}, + dictWord{12, 10, 559}, + dictWord{14, 10, 144}, + dictWord{14, 10, 167}, + dictWord{143, 10, 67}, + dictWord{134, 0, 1582}, + dictWord{7, 0, 1578}, + dictWord{135, 11, 1578}, + dictWord{ + 137, + 10, + 81, + }, + dictWord{132, 11, 236}, + dictWord{134, 10, 391}, + dictWord{134, 0, 795}, + dictWord{7, 10, 322}, + dictWord{136, 10, 249}, + dictWord{5, 11, 836}, + dictWord{ + 5, + 11, + 857, + }, + dictWord{6, 11, 1680}, + dictWord{7, 11, 59}, + dictWord{147, 11, 53}, + dictWord{135, 0, 432}, + dictWord{10, 11, 68}, + dictWord{139, 11, 494}, + dictWord{4, 11, 81}, + dictWord{139, 11, 867}, + dictWord{7, 0, 126}, + dictWord{136, 0, 84}, + dictWord{142, 11, 280}, + dictWord{5, 11, 282}, + dictWord{8, 11, 650}, + dictWord{ + 9, + 11, + 295, + }, + dictWord{9, 11, 907}, + dictWord{138, 11, 443}, + dictWord{136, 0, 790}, + dictWord{5, 10, 632}, + dictWord{138, 10, 526}, + dictWord{6, 0, 64}, + dictWord{12, 0, 377}, + dictWord{13, 0, 309}, + dictWord{14, 0, 141}, + dictWord{14, 0, 429}, + dictWord{14, 11, 141}, + dictWord{142, 11, 429}, + dictWord{134, 0, 1529}, + dictWord{6, 0, 321}, + dictWord{7, 0, 1857}, + dictWord{9, 0, 530}, + dictWord{19, 0, 99}, + dictWord{7, 10, 948}, + dictWord{7, 10, 1042}, + dictWord{8, 10, 235}, + dictWord{ + 8, + 10, + 461, + }, + dictWord{9, 10, 453}, + dictWord{10, 10, 354}, + dictWord{145, 10, 77}, + dictWord{7, 0, 1104}, + dictWord{11, 0, 269}, + dictWord{11, 0, 539}, + dictWord{ + 11, + 0, + 627, + }, + dictWord{11, 0, 706}, + dictWord{11, 0, 975}, + dictWord{12, 0, 248}, + dictWord{12, 0, 434}, + dictWord{12, 0, 600}, + dictWord{12, 0, 622}, + dictWord{ + 13, + 0, + 297, + }, + dictWord{13, 0, 485}, + dictWord{14, 0, 69}, + dictWord{14, 0, 409}, + dictWord{143, 0, 108}, + dictWord{4, 10, 362}, + dictWord{7, 10, 52}, + dictWord{7, 10, 303}, + dictWord{10, 11, 70}, + dictWord{12, 11, 26}, + dictWord{14, 11, 17}, + dictWord{14, 11, 178}, + dictWord{15, 11, 34}, + dictWord{149, 11, 12}, + dictWord{11, 0, 977}, + dictWord{141, 0, 507}, + dictWord{9, 0, 34}, + dictWord{139, 0, 484}, + dictWord{5, 10, 196}, + dictWord{6, 10, 486}, + dictWord{7, 10, 212}, + dictWord{8, 10, 309}, + dictWord{136, 10, 346}, + dictWord{6, 0, 1700}, + dictWord{7, 0, 26}, + dictWord{7, 0, 293}, + dictWord{7, 0, 382}, + dictWord{7, 0, 1026}, + dictWord{7, 0, 1087}, + dictWord{ + 7, + 0, + 2027, + }, + dictWord{8, 0, 24}, + dictWord{8, 0, 114}, + dictWord{8, 0, 252}, + dictWord{8, 0, 727}, + dictWord{8, 0, 729}, + dictWord{9, 0, 30}, + dictWord{9, 0, 199}, + dictWord{ + 9, + 0, + 231, + }, + dictWord{9, 0, 251}, + dictWord{9, 0, 334}, + dictWord{9, 0, 361}, + dictWord{9, 0, 712}, + dictWord{10, 0, 55}, + dictWord{10, 0, 60}, + dictWord{10, 0, 232}, + dictWord{ + 10, + 0, + 332, + }, + dictWord{10, 0, 384}, + dictWord{10, 0, 396}, + dictWord{10, 0, 504}, + dictWord{10, 0, 542}, + dictWord{10, 0, 652}, + dictWord{11, 0, 20}, + dictWord{11, 0, 48}, + dictWord{11, 0, 207}, + dictWord{11, 0, 291}, + dictWord{11, 0, 298}, + dictWord{11, 0, 342}, + dictWord{11, 0, 365}, + dictWord{11, 0, 394}, + dictWord{11, 0, 620}, + dictWord{11, 0, 705}, + dictWord{11, 0, 1017}, + dictWord{12, 0, 123}, + dictWord{12, 0, 340}, + dictWord{12, 0, 406}, + dictWord{12, 0, 643}, + dictWord{13, 0, 61}, + dictWord{ + 13, + 0, + 269, + }, + dictWord{13, 0, 311}, + dictWord{13, 0, 319}, + dictWord{13, 0, 486}, + dictWord{14, 0, 234}, + dictWord{15, 0, 62}, + dictWord{15, 0, 85}, + dictWord{16, 0, 71}, + dictWord{18, 0, 119}, + dictWord{20, 0, 105}, + dictWord{135, 10, 1912}, + dictWord{4, 11, 71}, + dictWord{5, 11, 376}, + dictWord{7, 11, 119}, + dictWord{138, 11, 665}, + dictWord{10, 0, 918}, + dictWord{10, 0, 926}, + dictWord{4, 10, 686}, + dictWord{136, 11, 55}, + dictWord{138, 10, 625}, + dictWord{136, 10, 706}, + dictWord{ + 132, + 11, + 479, + }, + dictWord{4, 10, 30}, + dictWord{133, 10, 43}, + dictWord{6, 0, 379}, + dictWord{7, 0, 270}, + dictWord{8, 0, 176}, + dictWord{8, 0, 183}, + dictWord{9, 0, 432}, + dictWord{ + 9, + 0, + 661, + }, + dictWord{12, 0, 247}, + dictWord{12, 0, 617}, + dictWord{18, 0, 125}, + dictWord{7, 11, 607}, + dictWord{8, 11, 99}, + dictWord{152, 11, 4}, + dictWord{ + 5, + 0, + 792, + }, + dictWord{133, 0, 900}, + dictWord{4, 11, 612}, + dictWord{133, 11, 561}, + dictWord{4, 11, 41}, + dictWord{4, 10, 220}, + dictWord{5, 11, 74}, + dictWord{ + 7, + 10, + 1535, + }, + dictWord{7, 11, 1627}, + dictWord{11, 11, 871}, + dictWord{140, 11, 619}, + dictWord{135, 0, 1920}, + dictWord{7, 11, 94}, + dictWord{11, 11, 329}, + dictWord{11, 11, 965}, + dictWord{12, 11, 241}, + dictWord{14, 11, 354}, + dictWord{15, 11, 22}, + dictWord{148, 11, 63}, + dictWord{9, 11, 209}, + dictWord{137, 11, 300}, + dictWord{134, 0, 771}, + dictWord{135, 0, 1979}, + dictWord{4, 0, 901}, + dictWord{133, 0, 776}, + dictWord{142, 0, 254}, + dictWord{133, 11, 98}, + dictWord{ + 9, + 11, + 16, + }, + dictWord{141, 11, 386}, + dictWord{133, 11, 984}, + dictWord{4, 11, 182}, + dictWord{6, 11, 205}, + dictWord{135, 11, 220}, + dictWord{7, 10, 1725}, + dictWord{ + 7, + 10, + 1774, + }, + dictWord{138, 10, 393}, + dictWord{5, 10, 263}, + dictWord{134, 10, 414}, + dictWord{4, 11, 42}, + dictWord{9, 11, 205}, + dictWord{9, 11, 786}, + dictWord{138, 11, 659}, + dictWord{14, 0, 140}, + dictWord{148, 0, 41}, + dictWord{8, 0, 440}, + dictWord{10, 0, 359}, + dictWord{6, 10, 178}, + dictWord{6, 11, 289}, + dictWord{ + 6, + 10, + 1750, + }, + dictWord{7, 11, 1670}, + dictWord{9, 10, 690}, + dictWord{10, 10, 155}, + dictWord{10, 10, 373}, + dictWord{11, 10, 698}, + dictWord{12, 11, 57}, + dictWord{13, 10, 155}, + dictWord{20, 10, 93}, + dictWord{151, 11, 4}, + dictWord{4, 0, 37}, + dictWord{5, 0, 334}, + dictWord{7, 0, 1253}, + dictWord{151, 11, 25}, + dictWord{ + 4, + 0, + 508, + }, + dictWord{4, 11, 635}, + dictWord{5, 10, 97}, + dictWord{137, 10, 393}, + dictWord{139, 11, 533}, + dictWord{4, 0, 640}, + dictWord{133, 0, 513}, + dictWord{ + 134, + 10, + 1639, + }, + dictWord{132, 11, 371}, + dictWord{4, 11, 272}, + dictWord{7, 11, 836}, + dictWord{7, 11, 1651}, + dictWord{145, 11, 89}, + dictWord{5, 11, 825}, + dictWord{6, 11, 444}, + dictWord{6, 11, 1640}, + dictWord{136, 11, 308}, + dictWord{4, 10, 191}, + dictWord{7, 10, 934}, + dictWord{8, 10, 647}, + dictWord{145, 10, 97}, + dictWord{12, 0, 246}, + dictWord{15, 0, 162}, + dictWord{19, 0, 64}, + dictWord{20, 0, 8}, + dictWord{20, 0, 95}, + dictWord{22, 0, 24}, + dictWord{152, 0, 17}, + dictWord{4, 0, 533}, + dictWord{5, 10, 165}, + dictWord{9, 10, 346}, + dictWord{138, 10, 655}, + dictWord{5, 11, 737}, + dictWord{139, 10, 885}, + dictWord{133, 10, 877}, + dictWord{ + 8, + 10, + 128, + }, + dictWord{139, 10, 179}, + dictWord{137, 11, 307}, + dictWord{140, 0, 752}, + dictWord{133, 0, 920}, + dictWord{135, 0, 1048}, + dictWord{5, 0, 153}, + dictWord{ + 6, + 0, + 580, + }, + dictWord{6, 10, 1663}, + dictWord{7, 10, 132}, + dictWord{7, 10, 1154}, + dictWord{7, 10, 1415}, + dictWord{7, 10, 1507}, + dictWord{12, 10, 493}, + dictWord{15, 10, 105}, + dictWord{151, 10, 15}, + dictWord{5, 10, 459}, + dictWord{7, 10, 1073}, + dictWord{8, 10, 241}, + dictWord{136, 10, 334}, + dictWord{138, 0, 391}, + dictWord{135, 0, 1952}, + dictWord{133, 11, 525}, + dictWord{8, 11, 641}, + dictWord{11, 11, 388}, + dictWord{140, 11, 580}, + dictWord{142, 0, 126}, + dictWord{ + 134, + 0, + 640, + }, + dictWord{132, 0, 483}, + dictWord{7, 0, 1616}, + dictWord{9, 0, 69}, + dictWord{6, 10, 324}, + dictWord{6, 10, 520}, + dictWord{7, 10, 338}, + dictWord{ + 7, + 10, + 1729, + }, + dictWord{8, 10, 228}, + dictWord{139, 10, 750}, + dictWord{5, 11, 493}, + dictWord{134, 11, 528}, + dictWord{135, 0, 734}, + dictWord{4, 11, 174}, + dictWord{135, 11, 911}, + dictWord{138, 0, 480}, + dictWord{9, 0, 495}, + dictWord{146, 0, 104}, + dictWord{135, 10, 705}, + dictWord{9, 0, 472}, + dictWord{4, 10, 73}, + dictWord{6, 10, 612}, + dictWord{7, 10, 927}, + dictWord{7, 10, 1330}, + dictWord{7, 10, 1822}, + dictWord{8, 10, 217}, + dictWord{9, 10, 765}, + dictWord{9, 10, 766}, + dictWord{10, 10, 408}, + dictWord{11, 10, 51}, + dictWord{11, 10, 793}, + dictWord{12, 10, 266}, + dictWord{15, 10, 158}, + dictWord{20, 10, 89}, + dictWord{150, 10, 32}, + dictWord{7, 11, 548}, + dictWord{137, 11, 58}, + dictWord{4, 11, 32}, + dictWord{5, 11, 215}, + dictWord{6, 11, 269}, + dictWord{7, 11, 1782}, + dictWord{7, 11, 1892}, + dictWord{10, 11, 16}, + dictWord{11, 11, 822}, + dictWord{11, 11, 954}, + dictWord{141, 11, 481}, + dictWord{132, 0, 874}, + dictWord{9, 0, 229}, + dictWord{5, 10, 389}, + dictWord{136, 10, 636}, + dictWord{7, 11, 1749}, + dictWord{136, 11, 477}, + dictWord{134, 0, 948}, + dictWord{5, 11, 308}, + dictWord{135, 11, 1088}, + dictWord{ + 4, + 0, + 748, + }, + dictWord{139, 0, 1009}, + dictWord{136, 10, 21}, + dictWord{6, 0, 555}, + dictWord{135, 0, 485}, + dictWord{5, 11, 126}, + dictWord{8, 11, 297}, + dictWord{ + 9, + 11, + 366, + }, + dictWord{9, 11, 445}, + dictWord{12, 11, 53}, + dictWord{12, 11, 374}, + dictWord{141, 11, 492}, + dictWord{7, 11, 1551}, + dictWord{139, 11, 361}, + dictWord{136, 0, 193}, + dictWord{136, 0, 472}, + dictWord{8, 0, 653}, + dictWord{13, 0, 93}, + dictWord{147, 0, 14}, + dictWord{132, 0, 984}, + dictWord{132, 11, 175}, + dictWord{5, 0, 172}, + dictWord{6, 0, 1971}, + dictWord{132, 11, 685}, + dictWord{149, 11, 8}, + dictWord{133, 11, 797}, + dictWord{13, 0, 83}, + dictWord{5, 10, 189}, + dictWord{ + 7, + 10, + 442, + }, + dictWord{7, 10, 443}, + dictWord{8, 10, 281}, + dictWord{12, 10, 174}, + dictWord{141, 10, 261}, + dictWord{134, 0, 1568}, + dictWord{133, 11, 565}, + dictWord{139, 0, 384}, + dictWord{133, 0, 260}, + dictWord{7, 0, 758}, + dictWord{7, 0, 880}, + dictWord{7, 0, 1359}, + dictWord{9, 0, 164}, + dictWord{9, 0, 167}, + dictWord{ + 10, + 0, + 156, + }, + dictWord{10, 0, 588}, + dictWord{12, 0, 101}, + dictWord{14, 0, 48}, + dictWord{15, 0, 70}, + dictWord{6, 10, 2}, + dictWord{7, 10, 1262}, + dictWord{ + 7, + 10, + 1737, + }, + dictWord{8, 10, 22}, + dictWord{8, 10, 270}, + dictWord{8, 10, 612}, + dictWord{9, 10, 312}, + dictWord{9, 10, 436}, + dictWord{10, 10, 311}, + dictWord{ + 10, + 10, + 623, + }, + dictWord{11, 10, 72}, + dictWord{11, 10, 330}, + dictWord{11, 10, 455}, + dictWord{12, 10, 321}, + dictWord{12, 10, 504}, + dictWord{12, 10, 530}, + dictWord{ + 12, + 10, + 543, + }, + dictWord{13, 10, 17}, + dictWord{13, 10, 156}, + dictWord{13, 10, 334}, + dictWord{17, 10, 60}, + dictWord{148, 10, 64}, + dictWord{4, 11, 252}, + dictWord{ + 7, + 11, + 1068, + }, + dictWord{10, 11, 434}, + dictWord{11, 11, 228}, + dictWord{11, 11, 426}, + dictWord{13, 11, 231}, + dictWord{18, 11, 106}, + dictWord{148, 11, 87}, + dictWord{7, 10, 354}, + dictWord{10, 10, 410}, + dictWord{139, 10, 815}, + dictWord{6, 0, 367}, + dictWord{7, 10, 670}, + dictWord{7, 10, 1327}, + dictWord{8, 10, 411}, + dictWord{8, 10, 435}, + dictWord{9, 10, 653}, + dictWord{9, 10, 740}, + dictWord{10, 10, 385}, + dictWord{11, 10, 222}, + dictWord{11, 10, 324}, + dictWord{11, 10, 829}, + dictWord{140, 10, 611}, + dictWord{7, 0, 1174}, + dictWord{6, 10, 166}, + dictWord{135, 10, 374}, + dictWord{146, 0, 121}, + dictWord{132, 0, 828}, + dictWord{ + 5, + 11, + 231, + }, + dictWord{138, 11, 509}, + dictWord{7, 11, 601}, + dictWord{9, 11, 277}, + dictWord{9, 11, 674}, + dictWord{10, 11, 178}, + dictWord{10, 11, 257}, + dictWord{ + 10, + 11, + 418, + }, + dictWord{11, 11, 531}, + dictWord{11, 11, 544}, + dictWord{11, 11, 585}, + dictWord{12, 11, 113}, + dictWord{12, 11, 475}, + dictWord{13, 11, 99}, + dictWord{142, 11, 428}, + dictWord{134, 0, 1541}, + dictWord{135, 11, 1779}, + dictWord{5, 0, 343}, + dictWord{134, 10, 398}, + dictWord{135, 10, 50}, + dictWord{ + 135, + 11, + 1683, + }, + dictWord{4, 0, 440}, + dictWord{7, 0, 57}, + dictWord{8, 0, 167}, + dictWord{8, 0, 375}, + dictWord{9, 0, 82}, + dictWord{9, 0, 561}, + dictWord{9, 0, 744}, + dictWord{ + 10, + 0, + 620, + }, + dictWord{137, 11, 744}, + dictWord{134, 0, 926}, + dictWord{6, 10, 517}, + dictWord{7, 10, 1159}, + dictWord{10, 10, 621}, + dictWord{139, 10, 192}, + dictWord{137, 0, 827}, + dictWord{8, 0, 194}, + dictWord{136, 0, 756}, + dictWord{10, 10, 223}, + dictWord{139, 10, 645}, + dictWord{7, 10, 64}, + dictWord{ + 136, + 10, + 245, + }, + dictWord{4, 11, 399}, + dictWord{5, 11, 119}, + dictWord{5, 11, 494}, + dictWord{7, 11, 751}, + dictWord{137, 11, 556}, + dictWord{132, 0, 808}, + dictWord{ + 135, + 0, + 22, + }, + dictWord{7, 10, 1763}, + dictWord{140, 10, 310}, + dictWord{5, 0, 639}, + dictWord{7, 0, 1249}, + dictWord{11, 0, 896}, + dictWord{134, 11, 584}, + dictWord{ + 134, + 0, + 1614, + }, + dictWord{135, 0, 860}, + dictWord{135, 11, 1121}, + dictWord{5, 10, 129}, + dictWord{6, 10, 61}, + dictWord{135, 10, 947}, + dictWord{4, 0, 102}, + dictWord{ + 7, + 0, + 815, + }, + dictWord{7, 0, 1699}, + dictWord{139, 0, 964}, + dictWord{13, 10, 505}, + dictWord{141, 10, 506}, + dictWord{139, 10, 1000}, + dictWord{ + 132, + 11, + 679, + }, + dictWord{132, 0, 899}, + dictWord{132, 0, 569}, + dictWord{5, 11, 694}, + dictWord{137, 11, 714}, + dictWord{136, 0, 795}, + dictWord{6, 0, 2045}, + dictWord{ + 139, + 11, + 7, + }, + dictWord{6, 0, 52}, + dictWord{9, 0, 104}, + dictWord{9, 0, 559}, + dictWord{12, 0, 308}, + dictWord{147, 0, 87}, + dictWord{4, 0, 301}, + dictWord{132, 0, 604}, + dictWord{133, 10, 637}, + dictWord{136, 0, 779}, + dictWord{5, 11, 143}, + dictWord{5, 11, 769}, + dictWord{6, 11, 1760}, + dictWord{7, 11, 682}, + dictWord{7, 11, 1992}, + dictWord{136, 11, 736}, + dictWord{137, 10, 590}, + dictWord{147, 0, 32}, + dictWord{137, 11, 527}, + dictWord{5, 10, 280}, + dictWord{135, 10, 1226}, + dictWord{134, 0, 494}, + dictWord{6, 0, 677}, + dictWord{6, 0, 682}, + dictWord{134, 0, 1044}, + dictWord{133, 10, 281}, + dictWord{135, 10, 1064}, + dictWord{7, 0, 508}, + dictWord{133, 11, 860}, + dictWord{6, 11, 422}, + dictWord{7, 11, 0}, + dictWord{7, 11, 1544}, + dictWord{9, 11, 577}, + dictWord{11, 11, 990}, + dictWord{12, 11, 141}, + dictWord{12, 11, 453}, + dictWord{13, 11, 47}, + dictWord{141, 11, 266}, + dictWord{134, 0, 1014}, + dictWord{5, 11, 515}, + dictWord{137, 11, 131}, + dictWord{ + 134, + 0, + 957, + }, + dictWord{132, 11, 646}, + dictWord{6, 0, 310}, + dictWord{7, 0, 1849}, + dictWord{8, 0, 72}, + dictWord{8, 0, 272}, + dictWord{8, 0, 431}, + dictWord{9, 0, 12}, + dictWord{ + 9, + 0, + 376, + }, + dictWord{10, 0, 563}, + dictWord{10, 0, 630}, + dictWord{10, 0, 796}, + dictWord{10, 0, 810}, + dictWord{11, 0, 367}, + dictWord{11, 0, 599}, + dictWord{ + 11, + 0, + 686, + }, + dictWord{140, 0, 672}, + dictWord{7, 0, 570}, + dictWord{4, 11, 396}, + dictWord{7, 10, 120}, + dictWord{7, 11, 728}, + dictWord{8, 10, 489}, + dictWord{9, 11, 117}, + dictWord{9, 10, 319}, + dictWord{10, 10, 820}, + dictWord{11, 10, 1004}, + dictWord{12, 10, 379}, + dictWord{12, 10, 679}, + dictWord{13, 10, 117}, + dictWord{ + 13, + 11, + 202, + }, + dictWord{13, 10, 412}, + dictWord{14, 10, 25}, + dictWord{15, 10, 52}, + dictWord{15, 10, 161}, + dictWord{16, 10, 47}, + dictWord{20, 11, 51}, + dictWord{ + 149, + 10, + 2, + }, + dictWord{6, 11, 121}, + dictWord{6, 11, 124}, + dictWord{6, 11, 357}, + dictWord{7, 11, 1138}, + dictWord{7, 11, 1295}, + dictWord{8, 11, 162}, + dictWord{ + 139, + 11, + 655, + }, + dictWord{8, 0, 449}, + dictWord{4, 10, 937}, + dictWord{5, 10, 801}, + dictWord{136, 11, 449}, + dictWord{139, 11, 958}, + dictWord{6, 0, 181}, + dictWord{ + 7, + 0, + 537, + }, + dictWord{8, 0, 64}, + dictWord{9, 0, 127}, + dictWord{10, 0, 496}, + dictWord{12, 0, 510}, + dictWord{141, 0, 384}, + dictWord{138, 11, 253}, + dictWord{4, 0, 244}, + dictWord{135, 0, 233}, + dictWord{133, 11, 237}, + dictWord{132, 10, 365}, + dictWord{6, 0, 1650}, + dictWord{10, 0, 702}, + dictWord{139, 0, 245}, + dictWord{ + 5, + 10, + 7, + }, + dictWord{139, 10, 774}, + dictWord{13, 0, 463}, + dictWord{20, 0, 49}, + dictWord{13, 11, 463}, + dictWord{148, 11, 49}, + dictWord{4, 10, 734}, + dictWord{ + 5, + 10, + 662, + }, + dictWord{134, 10, 430}, + dictWord{4, 10, 746}, + dictWord{135, 10, 1090}, + dictWord{5, 10, 360}, + dictWord{136, 10, 237}, + dictWord{137, 0, 338}, + dictWord{143, 11, 10}, + dictWord{7, 11, 571}, + dictWord{138, 11, 366}, + dictWord{134, 0, 1279}, + dictWord{9, 11, 513}, + dictWord{10, 11, 22}, + dictWord{10, 11, 39}, + dictWord{12, 11, 122}, + dictWord{140, 11, 187}, + dictWord{133, 0, 896}, + dictWord{146, 0, 178}, + dictWord{134, 0, 695}, + dictWord{137, 0, 808}, + dictWord{ + 134, + 11, + 587, + }, + dictWord{7, 11, 107}, + dictWord{7, 11, 838}, + dictWord{8, 11, 550}, + dictWord{138, 11, 401}, + dictWord{7, 0, 1117}, + dictWord{136, 0, 539}, + dictWord{ + 4, + 10, + 277, + }, + dictWord{5, 10, 608}, + dictWord{6, 10, 493}, + dictWord{7, 10, 457}, + dictWord{140, 10, 384}, + dictWord{133, 11, 768}, + dictWord{12, 0, 257}, + dictWord{ + 7, + 10, + 27, + }, + dictWord{135, 10, 316}, + dictWord{140, 0, 1003}, + dictWord{4, 0, 207}, + dictWord{5, 0, 586}, + dictWord{5, 0, 676}, + dictWord{6, 0, 448}, + dictWord{ + 8, + 0, + 244, + }, + dictWord{11, 0, 1}, + dictWord{13, 0, 3}, + dictWord{16, 0, 54}, + dictWord{17, 0, 4}, + dictWord{18, 0, 13}, + dictWord{133, 10, 552}, + dictWord{4, 10, 401}, + dictWord{ + 137, + 10, + 264, + }, + dictWord{5, 0, 516}, + dictWord{7, 0, 1883}, + dictWord{135, 11, 1883}, + dictWord{12, 0, 960}, + dictWord{132, 11, 894}, + dictWord{5, 0, 4}, + dictWord{ + 5, + 0, + 810, + }, + dictWord{6, 0, 13}, + dictWord{6, 0, 538}, + dictWord{6, 0, 1690}, + dictWord{6, 0, 1726}, + dictWord{7, 0, 499}, + dictWord{7, 0, 1819}, + dictWord{8, 0, 148}, + dictWord{ + 8, + 0, + 696, + }, + dictWord{8, 0, 791}, + dictWord{12, 0, 125}, + dictWord{143, 0, 9}, + dictWord{135, 0, 1268}, + dictWord{11, 0, 30}, + dictWord{14, 0, 315}, + dictWord{ + 9, + 10, + 543, + }, + dictWord{10, 10, 524}, + dictWord{12, 10, 524}, + dictWord{16, 10, 18}, + dictWord{20, 10, 26}, + dictWord{148, 10, 65}, + dictWord{6, 0, 748}, + dictWord{ + 4, + 10, + 205, + }, + dictWord{5, 10, 623}, + dictWord{7, 10, 104}, + dictWord{136, 10, 519}, + dictWord{11, 0, 542}, + dictWord{139, 0, 852}, + dictWord{140, 0, 6}, + dictWord{ + 132, + 0, + 848, + }, + dictWord{7, 0, 1385}, + dictWord{11, 0, 582}, + dictWord{11, 0, 650}, + dictWord{11, 0, 901}, + dictWord{11, 0, 949}, + dictWord{12, 0, 232}, + dictWord{12, 0, 236}, + dictWord{13, 0, 413}, + dictWord{13, 0, 501}, + dictWord{18, 0, 116}, + dictWord{7, 10, 579}, + dictWord{9, 10, 41}, + dictWord{9, 10, 244}, + dictWord{9, 10, 669}, + dictWord{10, 10, 5}, + dictWord{11, 10, 861}, + dictWord{11, 10, 951}, + dictWord{139, 10, 980}, + dictWord{4, 0, 945}, + dictWord{6, 0, 1811}, + dictWord{6, 0, 1845}, + dictWord{ + 6, + 0, + 1853, + }, + dictWord{6, 0, 1858}, + dictWord{8, 0, 862}, + dictWord{12, 0, 782}, + dictWord{12, 0, 788}, + dictWord{18, 0, 160}, + dictWord{148, 0, 117}, + dictWord{ + 132, + 10, + 717, + }, + dictWord{4, 0, 925}, + dictWord{5, 0, 803}, + dictWord{8, 0, 698}, + dictWord{138, 0, 828}, + dictWord{134, 0, 1416}, + dictWord{132, 0, 610}, + dictWord{ + 139, + 0, + 992, + }, + dictWord{6, 0, 878}, + dictWord{134, 0, 1477}, + dictWord{135, 0, 1847}, + dictWord{138, 11, 531}, + dictWord{137, 11, 539}, + dictWord{134, 11, 272}, + dictWord{133, 0, 383}, + dictWord{134, 0, 1404}, + dictWord{132, 10, 489}, + dictWord{4, 11, 9}, + dictWord{5, 11, 128}, + dictWord{7, 11, 368}, + dictWord{ + 11, + 11, + 480, + }, + dictWord{148, 11, 3}, + dictWord{136, 0, 986}, + dictWord{9, 0, 660}, + dictWord{138, 0, 347}, + dictWord{135, 10, 892}, + dictWord{136, 11, 682}, + dictWord{ + 7, + 0, + 572, + }, + dictWord{9, 0, 592}, + dictWord{11, 0, 680}, + dictWord{12, 0, 356}, + dictWord{140, 0, 550}, + dictWord{7, 0, 1411}, + dictWord{138, 11, 527}, + dictWord{ + 4, + 11, + 2, + }, + dictWord{7, 11, 545}, + dictWord{135, 11, 894}, + dictWord{137, 10, 473}, + dictWord{11, 0, 64}, + dictWord{7, 11, 481}, + dictWord{7, 10, 819}, + dictWord{9, 10, 26}, + dictWord{9, 10, 392}, + dictWord{9, 11, 792}, + dictWord{10, 10, 152}, + dictWord{10, 10, 226}, + dictWord{12, 10, 276}, + dictWord{12, 10, 426}, + dictWord{ + 12, + 10, + 589, + }, + dictWord{13, 10, 460}, + dictWord{15, 10, 97}, + dictWord{19, 10, 48}, + dictWord{148, 10, 104}, + dictWord{135, 10, 51}, + dictWord{136, 11, 445}, + dictWord{136, 11, 646}, + dictWord{135, 0, 606}, + dictWord{132, 10, 674}, + dictWord{6, 0, 1829}, + dictWord{134, 0, 1830}, + dictWord{132, 10, 770}, + dictWord{ + 5, + 10, + 79, + }, + dictWord{7, 10, 1027}, + dictWord{7, 10, 1477}, + dictWord{139, 10, 52}, + dictWord{5, 11, 530}, + dictWord{142, 11, 113}, + dictWord{134, 10, 1666}, + dictWord{ + 7, + 0, + 748, + }, + dictWord{139, 0, 700}, + dictWord{134, 10, 195}, + dictWord{133, 10, 789}, + dictWord{9, 0, 87}, + dictWord{10, 0, 365}, + dictWord{4, 10, 251}, + dictWord{ + 4, + 10, + 688, + }, + dictWord{7, 10, 513}, + dictWord{135, 10, 1284}, + dictWord{136, 11, 111}, + dictWord{133, 0, 127}, + dictWord{6, 0, 198}, + dictWord{140, 0, 83}, + dictWord{133, 11, 556}, + dictWord{133, 10, 889}, + dictWord{4, 10, 160}, + dictWord{5, 10, 330}, + dictWord{7, 10, 1434}, + dictWord{136, 10, 174}, + dictWord{5, 0, 276}, + dictWord{6, 0, 55}, + dictWord{7, 0, 1369}, + dictWord{138, 0, 864}, + dictWord{8, 11, 16}, + dictWord{140, 11, 568}, + dictWord{6, 0, 1752}, + dictWord{136, 0, 726}, + dictWord{135, 0, 1066}, + dictWord{133, 0, 764}, + dictWord{6, 11, 186}, + dictWord{137, 11, 426}, + dictWord{11, 0, 683}, + dictWord{139, 11, 683}, + dictWord{ + 6, + 0, + 309, + }, + dictWord{7, 0, 331}, + dictWord{138, 0, 550}, + dictWord{133, 10, 374}, + dictWord{6, 0, 1212}, + dictWord{6, 0, 1852}, + dictWord{7, 0, 1062}, + dictWord{ + 8, + 0, + 874, + }, + dictWord{8, 0, 882}, + dictWord{138, 0, 936}, + dictWord{132, 11, 585}, + dictWord{134, 0, 1364}, + dictWord{7, 0, 986}, + dictWord{133, 10, 731}, + dictWord{ + 6, + 0, + 723, + }, + dictWord{6, 0, 1408}, + dictWord{138, 0, 381}, + dictWord{135, 0, 1573}, + dictWord{134, 0, 1025}, + dictWord{4, 10, 626}, + dictWord{5, 10, 642}, + dictWord{ + 6, + 10, + 425, + }, + dictWord{10, 10, 202}, + dictWord{139, 10, 141}, + dictWord{4, 11, 93}, + dictWord{5, 11, 252}, + dictWord{6, 11, 229}, + dictWord{7, 11, 291}, + dictWord{ + 9, + 11, + 550, + }, + dictWord{139, 11, 644}, + dictWord{137, 11, 749}, + dictWord{137, 11, 162}, + dictWord{132, 11, 381}, + dictWord{135, 0, 1559}, + dictWord{ + 6, + 0, + 194, + }, + dictWord{7, 0, 133}, + dictWord{10, 0, 493}, + dictWord{10, 0, 570}, + dictWord{139, 0, 664}, + dictWord{5, 0, 24}, + dictWord{5, 0, 569}, + dictWord{6, 0, 3}, + dictWord{ + 6, + 0, + 119, + }, + dictWord{6, 0, 143}, + dictWord{6, 0, 440}, + dictWord{7, 0, 295}, + dictWord{7, 0, 599}, + dictWord{7, 0, 1686}, + dictWord{7, 0, 1854}, + dictWord{8, 0, 424}, + dictWord{ + 9, + 0, + 43, + }, + dictWord{9, 0, 584}, + dictWord{9, 0, 760}, + dictWord{10, 0, 148}, + dictWord{10, 0, 328}, + dictWord{11, 0, 159}, + dictWord{11, 0, 253}, + dictWord{11, 0, 506}, + dictWord{12, 0, 487}, + dictWord{140, 0, 531}, + dictWord{6, 0, 661}, + dictWord{134, 0, 1517}, + dictWord{136, 10, 835}, + dictWord{151, 10, 17}, + dictWord{5, 0, 14}, + dictWord{5, 0, 892}, + dictWord{6, 0, 283}, + dictWord{7, 0, 234}, + dictWord{136, 0, 537}, + dictWord{139, 0, 541}, + dictWord{4, 0, 126}, + dictWord{8, 0, 635}, + dictWord{ + 147, + 0, + 34, + }, + dictWord{4, 0, 316}, + dictWord{4, 0, 495}, + dictWord{135, 0, 1561}, + dictWord{4, 11, 187}, + dictWord{5, 11, 184}, + dictWord{5, 11, 690}, + dictWord{ + 7, + 11, + 1869, + }, + dictWord{138, 11, 756}, + dictWord{139, 11, 783}, + dictWord{4, 0, 998}, + dictWord{137, 0, 861}, + dictWord{136, 0, 1009}, + dictWord{139, 11, 292}, + dictWord{5, 11, 21}, + dictWord{6, 11, 77}, + dictWord{6, 11, 157}, + dictWord{7, 11, 974}, + dictWord{7, 11, 1301}, + dictWord{7, 11, 1339}, + dictWord{7, 11, 1490}, + dictWord{ + 7, + 11, + 1873, + }, + dictWord{137, 11, 628}, + dictWord{7, 11, 1283}, + dictWord{9, 11, 227}, + dictWord{9, 11, 499}, + dictWord{10, 11, 341}, + dictWord{11, 11, 325}, + dictWord{11, 11, 408}, + dictWord{14, 11, 180}, + dictWord{15, 11, 144}, + dictWord{18, 11, 47}, + dictWord{147, 11, 49}, + dictWord{4, 0, 64}, + dictWord{5, 0, 352}, + dictWord{5, 0, 720}, + dictWord{6, 0, 368}, + dictWord{139, 0, 359}, + dictWord{5, 10, 384}, + dictWord{8, 10, 455}, + dictWord{140, 10, 48}, + dictWord{5, 10, 264}, + dictWord{ + 134, + 10, + 184, + }, + dictWord{7, 0, 1577}, + dictWord{10, 0, 304}, + dictWord{10, 0, 549}, + dictWord{12, 0, 365}, + dictWord{13, 0, 220}, + dictWord{13, 0, 240}, + dictWord{ + 142, + 0, + 33, + }, + dictWord{134, 0, 1107}, + dictWord{134, 0, 929}, + dictWord{135, 0, 1142}, + dictWord{6, 0, 175}, + dictWord{137, 0, 289}, + dictWord{5, 0, 432}, + dictWord{ + 133, + 0, + 913, + }, + dictWord{6, 0, 279}, + dictWord{7, 0, 219}, + dictWord{5, 10, 633}, + dictWord{135, 10, 1323}, + dictWord{7, 0, 785}, + dictWord{7, 10, 359}, + dictWord{ + 8, + 10, + 243, + }, + dictWord{140, 10, 175}, + dictWord{139, 0, 595}, + dictWord{132, 10, 105}, + dictWord{8, 11, 398}, + dictWord{9, 11, 681}, + dictWord{139, 11, 632}, + dictWord{140, 0, 80}, + dictWord{5, 0, 931}, + dictWord{134, 0, 1698}, + dictWord{142, 11, 241}, + dictWord{134, 11, 20}, + dictWord{134, 0, 1323}, + dictWord{11, 0, 526}, + dictWord{11, 0, 939}, + dictWord{141, 0, 290}, + dictWord{5, 0, 774}, + dictWord{6, 0, 780}, + dictWord{6, 0, 1637}, + dictWord{6, 0, 1686}, + dictWord{6, 0, 1751}, + dictWord{ + 8, + 0, + 559, + }, + dictWord{141, 0, 109}, + dictWord{141, 0, 127}, + dictWord{7, 0, 1167}, + dictWord{11, 0, 934}, + dictWord{13, 0, 391}, + dictWord{17, 0, 76}, + dictWord{ + 135, + 11, + 709, + }, + dictWord{135, 0, 963}, + dictWord{6, 0, 260}, + dictWord{135, 0, 1484}, + dictWord{134, 0, 573}, + dictWord{4, 10, 758}, + dictWord{139, 11, 941}, + dictWord{135, 10, 1649}, + dictWord{145, 11, 36}, + dictWord{4, 0, 292}, + dictWord{137, 0, 580}, + dictWord{4, 0, 736}, + dictWord{5, 0, 871}, + dictWord{6, 0, 1689}, + dictWord{135, 0, 1944}, + dictWord{7, 11, 945}, + dictWord{11, 11, 713}, + dictWord{139, 11, 744}, + dictWord{134, 0, 1164}, + dictWord{135, 11, 937}, + dictWord{ + 6, + 0, + 1922, + }, + dictWord{9, 0, 982}, + dictWord{15, 0, 173}, + dictWord{15, 0, 178}, + dictWord{15, 0, 200}, + dictWord{18, 0, 189}, + dictWord{18, 0, 207}, + dictWord{21, 0, 47}, + dictWord{135, 11, 1652}, + dictWord{7, 0, 1695}, + dictWord{139, 10, 128}, + dictWord{6, 0, 63}, + dictWord{135, 0, 920}, + dictWord{133, 0, 793}, + dictWord{ + 143, + 11, + 134, + }, + dictWord{133, 10, 918}, + dictWord{5, 0, 67}, + dictWord{6, 0, 62}, + dictWord{6, 0, 374}, + dictWord{135, 0, 1391}, + dictWord{9, 0, 790}, + dictWord{12, 0, 47}, + dictWord{4, 11, 579}, + dictWord{5, 11, 226}, + dictWord{5, 11, 323}, + dictWord{135, 11, 960}, + dictWord{10, 11, 784}, + dictWord{141, 11, 191}, + dictWord{4, 0, 391}, + dictWord{135, 0, 1169}, + dictWord{137, 0, 443}, + dictWord{13, 11, 232}, + dictWord{146, 11, 35}, + dictWord{132, 10, 340}, + dictWord{132, 0, 271}, + dictWord{ + 137, + 11, + 313, + }, + dictWord{5, 11, 973}, + dictWord{137, 11, 659}, + dictWord{134, 0, 1140}, + dictWord{6, 11, 135}, + dictWord{135, 11, 1176}, + dictWord{4, 0, 253}, + dictWord{5, 0, 544}, + dictWord{7, 0, 300}, + dictWord{137, 0, 340}, + dictWord{7, 0, 897}, + dictWord{5, 10, 985}, + dictWord{7, 10, 509}, + dictWord{145, 10, 96}, + dictWord{ + 138, + 11, + 735, + }, + dictWord{135, 10, 1919}, + dictWord{138, 0, 890}, + dictWord{5, 0, 818}, + dictWord{134, 0, 1122}, + dictWord{5, 0, 53}, + dictWord{5, 0, 541}, + dictWord{ + 6, + 0, + 94, + }, + dictWord{6, 0, 499}, + dictWord{7, 0, 230}, + dictWord{139, 0, 321}, + dictWord{4, 0, 920}, + dictWord{5, 0, 25}, + dictWord{5, 0, 790}, + dictWord{6, 0, 457}, + dictWord{ + 7, + 0, + 853, + }, + dictWord{8, 0, 788}, + dictWord{142, 11, 31}, + dictWord{132, 10, 247}, + dictWord{135, 11, 314}, + dictWord{132, 0, 468}, + dictWord{7, 0, 243}, + dictWord{ + 6, + 10, + 337, + }, + dictWord{7, 10, 494}, + dictWord{8, 10, 27}, + dictWord{8, 10, 599}, + dictWord{138, 10, 153}, + dictWord{4, 10, 184}, + dictWord{5, 10, 390}, + dictWord{ + 7, + 10, + 618, + }, + dictWord{7, 10, 1456}, + dictWord{139, 10, 710}, + dictWord{134, 0, 870}, + dictWord{134, 0, 1238}, + dictWord{134, 0, 1765}, + dictWord{10, 0, 853}, + dictWord{10, 0, 943}, + dictWord{14, 0, 437}, + dictWord{14, 0, 439}, + dictWord{14, 0, 443}, + dictWord{14, 0, 446}, + dictWord{14, 0, 452}, + dictWord{14, 0, 469}, + dictWord{ + 14, + 0, + 471, + }, + dictWord{14, 0, 473}, + dictWord{16, 0, 93}, + dictWord{16, 0, 102}, + dictWord{16, 0, 110}, + dictWord{148, 0, 121}, + dictWord{4, 0, 605}, + dictWord{ + 7, + 0, + 518, + }, + dictWord{7, 0, 1282}, + dictWord{7, 0, 1918}, + dictWord{10, 0, 180}, + dictWord{139, 0, 218}, + dictWord{133, 0, 822}, + dictWord{4, 0, 634}, + dictWord{ + 11, + 0, + 916, + }, + dictWord{142, 0, 419}, + dictWord{6, 11, 281}, + dictWord{7, 11, 6}, + dictWord{8, 11, 282}, + dictWord{8, 11, 480}, + dictWord{8, 11, 499}, + dictWord{9, 11, 198}, + dictWord{10, 11, 143}, + dictWord{10, 11, 169}, + dictWord{10, 11, 211}, + dictWord{10, 11, 417}, + dictWord{10, 11, 574}, + dictWord{11, 11, 147}, + dictWord{ + 11, + 11, + 395, + }, + dictWord{12, 11, 75}, + dictWord{12, 11, 407}, + dictWord{12, 11, 608}, + dictWord{13, 11, 500}, + dictWord{142, 11, 251}, + dictWord{134, 0, 898}, + dictWord{ + 6, + 0, + 36, + }, + dictWord{7, 0, 658}, + dictWord{8, 0, 454}, + dictWord{150, 11, 48}, + dictWord{133, 11, 674}, + dictWord{135, 11, 1776}, + dictWord{4, 11, 419}, + dictWord{ + 10, + 10, + 227, + }, + dictWord{11, 10, 497}, + dictWord{11, 10, 709}, + dictWord{140, 10, 415}, + dictWord{6, 10, 360}, + dictWord{7, 10, 1664}, + dictWord{136, 10, 478}, + dictWord{137, 0, 806}, + dictWord{12, 11, 508}, + dictWord{14, 11, 102}, + dictWord{14, 11, 226}, + dictWord{144, 11, 57}, + dictWord{135, 11, 1123}, + dictWord{ + 4, + 11, + 138, + }, + dictWord{7, 11, 1012}, + dictWord{7, 11, 1280}, + dictWord{137, 11, 76}, + dictWord{5, 11, 29}, + dictWord{140, 11, 638}, + dictWord{136, 10, 699}, + dictWord{134, 0, 1326}, + dictWord{132, 0, 104}, + dictWord{135, 11, 735}, + dictWord{132, 10, 739}, + dictWord{134, 0, 1331}, + dictWord{7, 0, 260}, + dictWord{ + 135, + 11, + 260, + }, + dictWord{135, 11, 1063}, + dictWord{7, 0, 45}, + dictWord{9, 0, 542}, + dictWord{9, 0, 566}, + dictWord{10, 0, 728}, + dictWord{137, 10, 869}, + dictWord{ + 4, + 10, + 67, + }, + dictWord{5, 10, 422}, + dictWord{7, 10, 1037}, + dictWord{7, 10, 1289}, + dictWord{7, 10, 1555}, + dictWord{9, 10, 741}, + dictWord{145, 10, 108}, + dictWord{ + 139, + 0, + 263, + }, + dictWord{134, 0, 1516}, + dictWord{14, 0, 146}, + dictWord{15, 0, 42}, + dictWord{16, 0, 23}, + dictWord{17, 0, 86}, + dictWord{146, 0, 17}, + dictWord{ + 138, + 0, + 468, + }, + dictWord{136, 0, 1005}, + dictWord{4, 11, 17}, + dictWord{5, 11, 23}, + dictWord{7, 11, 995}, + dictWord{11, 11, 383}, + dictWord{11, 11, 437}, + dictWord{ + 12, + 11, + 460, + }, + dictWord{140, 11, 532}, + dictWord{7, 0, 87}, + dictWord{142, 0, 288}, + dictWord{138, 10, 96}, + dictWord{135, 11, 626}, + dictWord{144, 10, 26}, + dictWord{ + 7, + 0, + 988, + }, + dictWord{7, 0, 1939}, + dictWord{9, 0, 64}, + dictWord{9, 0, 502}, + dictWord{12, 0, 22}, + dictWord{12, 0, 34}, + dictWord{13, 0, 12}, + dictWord{13, 0, 234}, + dictWord{147, 0, 77}, + dictWord{13, 0, 133}, + dictWord{8, 10, 203}, + dictWord{11, 10, 823}, + dictWord{11, 10, 846}, + dictWord{12, 10, 482}, + dictWord{13, 10, 277}, + dictWord{13, 10, 302}, + dictWord{13, 10, 464}, + dictWord{14, 10, 205}, + dictWord{142, 10, 221}, + dictWord{4, 10, 449}, + dictWord{133, 10, 718}, + dictWord{ + 135, + 0, + 141, + }, + dictWord{6, 0, 1842}, + dictWord{136, 0, 872}, + dictWord{8, 11, 70}, + dictWord{12, 11, 171}, + dictWord{141, 11, 272}, + dictWord{4, 10, 355}, + dictWord{ + 6, + 10, + 311, + }, + dictWord{9, 10, 256}, + dictWord{138, 10, 404}, + dictWord{132, 0, 619}, + dictWord{137, 0, 261}, + dictWord{10, 11, 233}, + dictWord{10, 10, 758}, + dictWord{139, 11, 76}, + dictWord{5, 0, 246}, + dictWord{8, 0, 189}, + dictWord{9, 0, 355}, + dictWord{9, 0, 512}, + dictWord{10, 0, 124}, + dictWord{10, 0, 453}, + dictWord{ + 11, + 0, + 143, + }, + dictWord{11, 0, 416}, + dictWord{11, 0, 859}, + dictWord{141, 0, 341}, + dictWord{134, 11, 442}, + dictWord{133, 10, 827}, + dictWord{5, 10, 64}, + dictWord{ + 140, + 10, + 581, + }, + dictWord{4, 10, 442}, + dictWord{7, 10, 1047}, + dictWord{7, 10, 1352}, + dictWord{135, 10, 1643}, + dictWord{134, 11, 1709}, + dictWord{5, 0, 678}, + dictWord{6, 0, 305}, + dictWord{7, 0, 775}, + dictWord{7, 0, 1065}, + dictWord{133, 10, 977}, + dictWord{11, 11, 69}, + dictWord{12, 11, 105}, + dictWord{12, 11, 117}, + dictWord{13, 11, 213}, + dictWord{14, 11, 13}, + dictWord{14, 11, 62}, + dictWord{14, 11, 177}, + dictWord{14, 11, 421}, + dictWord{15, 11, 19}, + dictWord{146, 11, 141}, + dictWord{137, 11, 309}, + dictWord{5, 0, 35}, + dictWord{7, 0, 862}, + dictWord{7, 0, 1886}, + dictWord{138, 0, 179}, + dictWord{136, 0, 285}, + dictWord{132, 0, 517}, + dictWord{7, 11, 976}, + dictWord{9, 11, 146}, + dictWord{10, 11, 206}, + dictWord{10, 11, 596}, + dictWord{13, 11, 218}, + dictWord{142, 11, 153}, + dictWord{ + 132, + 10, + 254, + }, + dictWord{6, 0, 214}, + dictWord{12, 0, 540}, + dictWord{4, 10, 275}, + dictWord{7, 10, 1219}, + dictWord{140, 10, 376}, + dictWord{8, 0, 667}, + dictWord{ + 11, + 0, + 403, + }, + dictWord{146, 0, 83}, + dictWord{12, 0, 74}, + dictWord{10, 11, 648}, + dictWord{11, 11, 671}, + dictWord{143, 11, 46}, + dictWord{135, 0, 125}, + dictWord{ + 134, + 10, + 1753, + }, + dictWord{133, 0, 761}, + dictWord{6, 0, 912}, + dictWord{4, 11, 518}, + dictWord{6, 10, 369}, + dictWord{6, 10, 502}, + dictWord{7, 10, 1036}, + dictWord{ + 7, + 11, + 1136, + }, + dictWord{8, 10, 348}, + dictWord{9, 10, 452}, + dictWord{10, 10, 26}, + dictWord{11, 10, 224}, + dictWord{11, 10, 387}, + dictWord{11, 10, 772}, + dictWord{12, 10, 95}, + dictWord{12, 10, 629}, + dictWord{13, 10, 195}, + dictWord{13, 10, 207}, + dictWord{13, 10, 241}, + dictWord{14, 10, 260}, + dictWord{14, 10, 270}, + dictWord{143, 10, 140}, + dictWord{10, 0, 131}, + dictWord{140, 0, 72}, + dictWord{132, 10, 269}, + dictWord{5, 10, 480}, + dictWord{7, 10, 532}, + dictWord{ + 7, + 10, + 1197, + }, + dictWord{7, 10, 1358}, + dictWord{8, 10, 291}, + dictWord{11, 10, 349}, + dictWord{142, 10, 396}, + dictWord{8, 11, 689}, + dictWord{137, 11, 863}, + dictWord{ + 8, + 0, + 333, + }, + dictWord{138, 0, 182}, + dictWord{4, 11, 18}, + dictWord{7, 11, 145}, + dictWord{7, 11, 444}, + dictWord{7, 11, 1278}, + dictWord{8, 11, 49}, + dictWord{ + 8, + 11, + 400, + }, + dictWord{9, 11, 71}, + dictWord{9, 11, 250}, + dictWord{10, 11, 459}, + dictWord{12, 11, 160}, + dictWord{144, 11, 24}, + dictWord{14, 11, 35}, + dictWord{ + 142, + 11, + 191, + }, + dictWord{135, 11, 1864}, + dictWord{135, 0, 1338}, + dictWord{148, 10, 15}, + dictWord{14, 0, 94}, + dictWord{15, 0, 65}, + dictWord{16, 0, 4}, + dictWord{ + 16, + 0, + 77, + }, + dictWord{16, 0, 80}, + dictWord{145, 0, 5}, + dictWord{12, 11, 82}, + dictWord{143, 11, 36}, + dictWord{133, 11, 1010}, + dictWord{133, 0, 449}, + dictWord{ + 133, + 0, + 646, + }, + dictWord{7, 0, 86}, + dictWord{8, 0, 103}, + dictWord{135, 10, 657}, + dictWord{7, 0, 2028}, + dictWord{138, 0, 641}, + dictWord{136, 10, 533}, + dictWord{ + 134, + 0, + 1, + }, + dictWord{139, 11, 970}, + dictWord{5, 11, 87}, + dictWord{7, 11, 313}, + dictWord{7, 11, 1103}, + dictWord{10, 11, 112}, + dictWord{10, 11, 582}, + dictWord{ + 11, + 11, + 389, + }, + dictWord{11, 11, 813}, + dictWord{12, 11, 385}, + dictWord{13, 11, 286}, + dictWord{14, 11, 124}, + dictWord{146, 11, 108}, + dictWord{6, 0, 869}, + dictWord{ + 132, + 11, + 267, + }, + dictWord{6, 0, 277}, + dictWord{7, 0, 1274}, + dictWord{7, 0, 1386}, + dictWord{146, 0, 87}, + dictWord{6, 0, 187}, + dictWord{7, 0, 39}, + dictWord{7, 0, 1203}, + dictWord{8, 0, 380}, + dictWord{14, 0, 117}, + dictWord{149, 0, 28}, + dictWord{4, 10, 211}, + dictWord{4, 10, 332}, + dictWord{5, 10, 335}, + dictWord{6, 10, 238}, + dictWord{ + 7, + 10, + 269, + }, + dictWord{7, 10, 811}, + dictWord{7, 10, 1797}, + dictWord{8, 10, 836}, + dictWord{9, 10, 507}, + dictWord{141, 10, 242}, + dictWord{4, 0, 785}, + dictWord{ + 5, + 0, + 368, + }, + dictWord{6, 0, 297}, + dictWord{7, 0, 793}, + dictWord{139, 0, 938}, + dictWord{7, 0, 464}, + dictWord{8, 0, 558}, + dictWord{11, 0, 105}, + dictWord{12, 0, 231}, + dictWord{14, 0, 386}, + dictWord{15, 0, 102}, + dictWord{148, 0, 75}, + dictWord{133, 10, 1009}, + dictWord{8, 0, 877}, + dictWord{140, 0, 731}, + dictWord{ + 139, + 11, + 289, + }, + dictWord{10, 11, 249}, + dictWord{139, 11, 209}, + dictWord{132, 11, 561}, + dictWord{134, 0, 1608}, + dictWord{132, 11, 760}, + dictWord{134, 0, 1429}, + dictWord{9, 11, 154}, + dictWord{140, 11, 485}, + dictWord{5, 10, 228}, + dictWord{6, 10, 203}, + dictWord{7, 10, 156}, + dictWord{8, 10, 347}, + dictWord{ + 137, + 10, + 265, + }, + dictWord{7, 0, 1010}, + dictWord{11, 0, 733}, + dictWord{11, 0, 759}, + dictWord{13, 0, 34}, + dictWord{14, 0, 427}, + dictWord{146, 0, 45}, + dictWord{7, 10, 1131}, + dictWord{135, 10, 1468}, + dictWord{136, 11, 255}, + dictWord{7, 0, 1656}, + dictWord{9, 0, 369}, + dictWord{10, 0, 338}, + dictWord{10, 0, 490}, + dictWord{ + 11, + 0, + 154, + }, + dictWord{11, 0, 545}, + dictWord{11, 0, 775}, + dictWord{13, 0, 77}, + dictWord{141, 0, 274}, + dictWord{133, 11, 621}, + dictWord{134, 0, 1038}, + dictWord{ + 4, + 11, + 368, + }, + dictWord{135, 11, 641}, + dictWord{6, 0, 2010}, + dictWord{8, 0, 979}, + dictWord{8, 0, 985}, + dictWord{10, 0, 951}, + dictWord{138, 0, 1011}, + dictWord{ + 134, + 0, + 1005, + }, + dictWord{19, 0, 121}, + dictWord{5, 10, 291}, + dictWord{5, 10, 318}, + dictWord{7, 10, 765}, + dictWord{9, 10, 389}, + dictWord{140, 10, 548}, + dictWord{ + 5, + 0, + 20, + }, + dictWord{6, 0, 298}, + dictWord{7, 0, 659}, + dictWord{137, 0, 219}, + dictWord{7, 0, 1440}, + dictWord{11, 0, 854}, + dictWord{11, 0, 872}, + dictWord{11, 0, 921}, + dictWord{12, 0, 551}, + dictWord{13, 0, 472}, + dictWord{142, 0, 367}, + dictWord{5, 0, 490}, + dictWord{6, 0, 615}, + dictWord{6, 0, 620}, + dictWord{135, 0, 683}, + dictWord{ + 6, + 0, + 1070, + }, + dictWord{134, 0, 1597}, + dictWord{139, 0, 522}, + dictWord{132, 0, 439}, + dictWord{136, 0, 669}, + dictWord{6, 0, 766}, + dictWord{6, 0, 1143}, + dictWord{ + 6, + 0, + 1245, + }, + dictWord{10, 10, 525}, + dictWord{139, 10, 82}, + dictWord{9, 11, 92}, + dictWord{147, 11, 91}, + dictWord{6, 0, 668}, + dictWord{134, 0, 1218}, + dictWord{ + 6, + 11, + 525, + }, + dictWord{9, 11, 876}, + dictWord{140, 11, 284}, + dictWord{132, 0, 233}, + dictWord{136, 0, 547}, + dictWord{132, 10, 422}, + dictWord{5, 10, 355}, + dictWord{145, 10, 0}, + dictWord{6, 11, 300}, + dictWord{135, 11, 1515}, + dictWord{4, 0, 482}, + dictWord{137, 10, 905}, + dictWord{4, 0, 886}, + dictWord{7, 0, 346}, + dictWord{133, 11, 594}, + dictWord{133, 10, 865}, + dictWord{5, 10, 914}, + dictWord{134, 10, 1625}, + dictWord{135, 0, 334}, + dictWord{5, 0, 795}, + dictWord{ + 6, + 0, + 1741, + }, + dictWord{133, 10, 234}, + dictWord{135, 10, 1383}, + dictWord{6, 11, 1641}, + dictWord{136, 11, 820}, + dictWord{135, 0, 371}, + dictWord{7, 11, 1313}, + dictWord{138, 11, 660}, + dictWord{135, 10, 1312}, + dictWord{135, 0, 622}, + dictWord{7, 0, 625}, + dictWord{135, 0, 1750}, + dictWord{135, 0, 339}, + dictWord{ + 4, + 0, + 203, + }, + dictWord{135, 0, 1936}, + dictWord{15, 0, 29}, + dictWord{16, 0, 38}, + dictWord{15, 11, 29}, + dictWord{144, 11, 38}, + dictWord{5, 0, 338}, + dictWord{ + 135, + 0, + 1256, + }, + dictWord{135, 10, 1493}, + dictWord{10, 0, 130}, + dictWord{6, 10, 421}, + dictWord{7, 10, 61}, + dictWord{7, 10, 1540}, + dictWord{138, 10, 501}, + dictWord{ + 6, + 11, + 389, + }, + dictWord{7, 11, 149}, + dictWord{9, 11, 142}, + dictWord{138, 11, 94}, + dictWord{137, 10, 341}, + dictWord{11, 0, 678}, + dictWord{12, 0, 307}, + dictWord{142, 10, 98}, + dictWord{6, 11, 8}, + dictWord{7, 11, 1881}, + dictWord{136, 11, 91}, + dictWord{135, 0, 2044}, + dictWord{6, 0, 770}, + dictWord{6, 0, 802}, + dictWord{ + 6, + 0, + 812, + }, + dictWord{7, 0, 311}, + dictWord{9, 0, 308}, + dictWord{12, 0, 255}, + dictWord{6, 10, 102}, + dictWord{7, 10, 72}, + dictWord{15, 10, 142}, + dictWord{ + 147, + 10, + 67, + }, + dictWord{151, 10, 30}, + dictWord{135, 10, 823}, + dictWord{135, 0, 1266}, + dictWord{135, 11, 1746}, + dictWord{135, 10, 1870}, + dictWord{4, 0, 400}, + dictWord{5, 0, 267}, + dictWord{135, 0, 232}, + dictWord{7, 11, 24}, + dictWord{11, 11, 542}, + dictWord{139, 11, 852}, + dictWord{135, 11, 1739}, + dictWord{4, 11, 503}, + dictWord{135, 11, 1661}, + dictWord{5, 11, 130}, + dictWord{7, 11, 1314}, + dictWord{9, 11, 610}, + dictWord{10, 11, 718}, + dictWord{11, 11, 601}, + dictWord{ + 11, + 11, + 819, + }, + dictWord{11, 11, 946}, + dictWord{140, 11, 536}, + dictWord{10, 11, 149}, + dictWord{11, 11, 280}, + dictWord{142, 11, 336}, + dictWord{7, 0, 739}, + dictWord{11, 0, 690}, + dictWord{7, 11, 1946}, + dictWord{8, 10, 48}, + dictWord{8, 10, 88}, + dictWord{8, 10, 582}, + dictWord{8, 10, 681}, + dictWord{9, 10, 373}, + dictWord{ + 9, + 10, + 864, + }, + dictWord{11, 10, 157}, + dictWord{11, 10, 843}, + dictWord{148, 10, 27}, + dictWord{134, 0, 990}, + dictWord{4, 10, 88}, + dictWord{5, 10, 137}, + dictWord{ + 5, + 10, + 174, + }, + dictWord{5, 10, 777}, + dictWord{6, 10, 1664}, + dictWord{6, 10, 1725}, + dictWord{7, 10, 77}, + dictWord{7, 10, 426}, + dictWord{7, 10, 1317}, + dictWord{ + 7, + 10, + 1355, + }, + dictWord{8, 10, 126}, + dictWord{8, 10, 563}, + dictWord{9, 10, 523}, + dictWord{9, 10, 750}, + dictWord{10, 10, 310}, + dictWord{10, 10, 836}, + dictWord{ + 11, + 10, + 42, + }, + dictWord{11, 10, 318}, + dictWord{11, 10, 731}, + dictWord{12, 10, 68}, + dictWord{12, 10, 92}, + dictWord{12, 10, 507}, + dictWord{12, 10, 692}, + dictWord{ + 13, + 10, + 81, + }, + dictWord{13, 10, 238}, + dictWord{13, 10, 374}, + dictWord{14, 10, 436}, + dictWord{18, 10, 138}, + dictWord{19, 10, 78}, + dictWord{19, 10, 111}, + dictWord{20, 10, 55}, + dictWord{20, 10, 77}, + dictWord{148, 10, 92}, + dictWord{141, 10, 418}, + dictWord{7, 0, 1831}, + dictWord{132, 10, 938}, + dictWord{6, 0, 776}, + dictWord{134, 0, 915}, + dictWord{138, 10, 351}, + dictWord{5, 11, 348}, + dictWord{6, 11, 522}, + dictWord{6, 10, 1668}, + dictWord{7, 10, 1499}, + dictWord{8, 10, 117}, + dictWord{9, 10, 314}, + dictWord{138, 10, 174}, + dictWord{135, 10, 707}, + dictWord{132, 0, 613}, + dictWord{133, 10, 403}, + dictWord{132, 11, 392}, + dictWord{ + 5, + 11, + 433, + }, + dictWord{9, 11, 633}, + dictWord{139, 11, 629}, + dictWord{133, 0, 763}, + dictWord{132, 0, 878}, + dictWord{132, 0, 977}, + dictWord{132, 0, 100}, + dictWord{6, 0, 463}, + dictWord{4, 10, 44}, + dictWord{5, 10, 311}, + dictWord{7, 10, 639}, + dictWord{7, 10, 762}, + dictWord{7, 10, 1827}, + dictWord{9, 10, 8}, + dictWord{ + 9, + 10, + 462, + }, + dictWord{148, 10, 83}, + dictWord{134, 11, 234}, + dictWord{4, 10, 346}, + dictWord{7, 10, 115}, + dictWord{9, 10, 180}, + dictWord{9, 10, 456}, + dictWord{ + 138, + 10, + 363, + }, + dictWord{5, 0, 362}, + dictWord{5, 0, 443}, + dictWord{6, 0, 318}, + dictWord{7, 0, 1019}, + dictWord{139, 0, 623}, + dictWord{5, 0, 463}, + dictWord{8, 0, 296}, + dictWord{7, 11, 140}, + dictWord{7, 11, 1950}, + dictWord{8, 11, 680}, + dictWord{11, 11, 817}, + dictWord{147, 11, 88}, + dictWord{7, 11, 1222}, + dictWord{ + 138, + 11, + 386, + }, + dictWord{142, 0, 137}, + dictWord{132, 0, 454}, + dictWord{7, 0, 1914}, + dictWord{6, 11, 5}, + dictWord{7, 10, 1051}, + dictWord{9, 10, 545}, + dictWord{ + 11, + 11, + 249, + }, + dictWord{12, 11, 313}, + dictWord{16, 11, 66}, + dictWord{145, 11, 26}, + dictWord{135, 0, 1527}, + dictWord{145, 0, 58}, + dictWord{148, 11, 59}, + dictWord{ + 5, + 0, + 48, + }, + dictWord{5, 0, 404}, + dictWord{6, 0, 557}, + dictWord{7, 0, 458}, + dictWord{8, 0, 597}, + dictWord{10, 0, 455}, + dictWord{10, 0, 606}, + dictWord{11, 0, 49}, + dictWord{ + 11, + 0, + 548, + }, + dictWord{12, 0, 476}, + dictWord{13, 0, 18}, + dictWord{141, 0, 450}, + dictWord{5, 11, 963}, + dictWord{134, 11, 1773}, + dictWord{133, 0, 729}, + dictWord{138, 11, 586}, + dictWord{5, 0, 442}, + dictWord{135, 0, 1984}, + dictWord{134, 0, 449}, + dictWord{144, 0, 40}, + dictWord{4, 0, 853}, + dictWord{7, 11, 180}, + dictWord{8, 11, 509}, + dictWord{136, 11, 792}, + dictWord{6, 10, 185}, + dictWord{7, 10, 1899}, + dictWord{9, 10, 875}, + dictWord{139, 10, 673}, + dictWord{ + 134, + 11, + 524, + }, + dictWord{12, 0, 227}, + dictWord{4, 10, 327}, + dictWord{5, 10, 478}, + dictWord{7, 10, 1332}, + dictWord{136, 10, 753}, + dictWord{6, 0, 1491}, + dictWord{ + 5, + 10, + 1020, + }, + dictWord{133, 10, 1022}, + dictWord{4, 10, 103}, + dictWord{133, 10, 401}, + dictWord{132, 11, 931}, + dictWord{4, 10, 499}, + dictWord{135, 10, 1421}, + dictWord{5, 0, 55}, + dictWord{7, 0, 376}, + dictWord{140, 0, 161}, + dictWord{133, 0, 450}, + dictWord{6, 0, 1174}, + dictWord{134, 0, 1562}, + dictWord{10, 0, 62}, + dictWord{13, 0, 400}, + dictWord{135, 11, 1837}, + dictWord{140, 0, 207}, + dictWord{135, 0, 869}, + dictWord{4, 11, 773}, + dictWord{5, 11, 618}, + dictWord{ + 137, + 11, + 756, + }, + dictWord{132, 10, 96}, + dictWord{4, 0, 213}, + dictWord{7, 0, 223}, + dictWord{8, 0, 80}, + dictWord{135, 10, 968}, + dictWord{4, 11, 90}, + dictWord{5, 11, 337}, + dictWord{5, 11, 545}, + dictWord{7, 11, 754}, + dictWord{9, 11, 186}, + dictWord{10, 11, 72}, + dictWord{10, 11, 782}, + dictWord{11, 11, 513}, + dictWord{11, 11, 577}, + dictWord{11, 11, 610}, + dictWord{11, 11, 889}, + dictWord{11, 11, 961}, + dictWord{12, 11, 354}, + dictWord{12, 11, 362}, + dictWord{12, 11, 461}, + dictWord{ + 12, + 11, + 595, + }, + dictWord{13, 11, 79}, + dictWord{143, 11, 121}, + dictWord{7, 0, 381}, + dictWord{7, 0, 806}, + dictWord{7, 0, 820}, + dictWord{8, 0, 354}, + dictWord{8, 0, 437}, + dictWord{8, 0, 787}, + dictWord{9, 0, 657}, + dictWord{10, 0, 58}, + dictWord{10, 0, 339}, + dictWord{10, 0, 749}, + dictWord{11, 0, 914}, + dictWord{12, 0, 162}, + dictWord{ + 13, + 0, + 75, + }, + dictWord{14, 0, 106}, + dictWord{14, 0, 198}, + dictWord{14, 0, 320}, + dictWord{14, 0, 413}, + dictWord{146, 0, 43}, + dictWord{136, 0, 747}, + dictWord{ + 136, + 0, + 954, + }, + dictWord{134, 0, 1073}, + dictWord{135, 0, 556}, + dictWord{7, 11, 151}, + dictWord{9, 11, 329}, + dictWord{139, 11, 254}, + dictWord{5, 0, 692}, + dictWord{ + 134, + 0, + 1395, + }, + dictWord{6, 10, 563}, + dictWord{137, 10, 224}, + dictWord{134, 0, 191}, + dictWord{132, 0, 804}, + dictWord{9, 11, 187}, + dictWord{10, 11, 36}, + dictWord{17, 11, 44}, + dictWord{146, 11, 64}, + dictWord{7, 11, 165}, + dictWord{7, 11, 919}, + dictWord{136, 11, 517}, + dictWord{4, 11, 506}, + dictWord{5, 11, 295}, + dictWord{7, 11, 1680}, + dictWord{15, 11, 14}, + dictWord{144, 11, 5}, + dictWord{4, 0, 706}, + dictWord{6, 0, 162}, + dictWord{7, 0, 1960}, + dictWord{136, 0, 831}, + dictWord{ + 135, + 11, + 1376, + }, + dictWord{7, 11, 987}, + dictWord{9, 11, 688}, + dictWord{10, 11, 522}, + dictWord{11, 11, 788}, + dictWord{140, 11, 566}, + dictWord{150, 0, 35}, + dictWord{138, 0, 426}, + dictWord{135, 0, 1235}, + dictWord{135, 11, 1741}, + dictWord{7, 11, 389}, + dictWord{7, 11, 700}, + dictWord{7, 11, 940}, + dictWord{ + 8, + 11, + 514, + }, + dictWord{9, 11, 116}, + dictWord{9, 11, 535}, + dictWord{10, 11, 118}, + dictWord{11, 11, 107}, + dictWord{11, 11, 148}, + dictWord{11, 11, 922}, + dictWord{ + 12, + 11, + 254, + }, + dictWord{12, 11, 421}, + dictWord{142, 11, 238}, + dictWord{134, 0, 1234}, + dictWord{132, 11, 743}, + dictWord{4, 10, 910}, + dictWord{5, 10, 832}, + dictWord{135, 11, 1335}, + dictWord{141, 0, 96}, + dictWord{135, 11, 185}, + dictWord{146, 0, 149}, + dictWord{4, 0, 204}, + dictWord{137, 0, 902}, + dictWord{ + 4, + 11, + 784, + }, + dictWord{133, 11, 745}, + dictWord{136, 0, 833}, + dictWord{136, 0, 949}, + dictWord{7, 0, 366}, + dictWord{9, 0, 287}, + dictWord{12, 0, 199}, + dictWord{ + 12, + 0, + 556, + }, + dictWord{12, 0, 577}, + dictWord{5, 11, 81}, + dictWord{7, 11, 146}, + dictWord{7, 11, 1342}, + dictWord{7, 11, 1446}, + dictWord{8, 11, 53}, + dictWord{8, 11, 561}, + dictWord{8, 11, 694}, + dictWord{8, 11, 754}, + dictWord{9, 11, 97}, + dictWord{9, 11, 115}, + dictWord{9, 11, 894}, + dictWord{10, 11, 462}, + dictWord{10, 11, 813}, + dictWord{11, 11, 230}, + dictWord{11, 11, 657}, + dictWord{11, 11, 699}, + dictWord{11, 11, 748}, + dictWord{12, 11, 119}, + dictWord{12, 11, 200}, + dictWord{ + 12, + 11, + 283, + }, + dictWord{14, 11, 273}, + dictWord{145, 11, 15}, + dictWord{5, 11, 408}, + dictWord{137, 11, 747}, + dictWord{9, 11, 498}, + dictWord{140, 11, 181}, + dictWord{ + 6, + 0, + 2020, + }, + dictWord{136, 0, 992}, + dictWord{5, 0, 356}, + dictWord{135, 0, 224}, + dictWord{134, 0, 784}, + dictWord{7, 0, 630}, + dictWord{9, 0, 567}, + dictWord{ + 11, + 0, + 150, + }, + dictWord{11, 0, 444}, + dictWord{13, 0, 119}, + dictWord{8, 10, 528}, + dictWord{137, 10, 348}, + dictWord{134, 0, 539}, + dictWord{4, 10, 20}, + dictWord{ + 133, + 10, + 616, + }, + dictWord{142, 0, 27}, + dictWord{7, 11, 30}, + dictWord{8, 11, 86}, + dictWord{8, 11, 315}, + dictWord{8, 11, 700}, + dictWord{9, 11, 576}, + dictWord{9, 11, 858}, + dictWord{11, 11, 310}, + dictWord{11, 11, 888}, + dictWord{11, 11, 904}, + dictWord{12, 11, 361}, + dictWord{141, 11, 248}, + dictWord{138, 11, 839}, + dictWord{ + 134, + 0, + 755, + }, + dictWord{134, 0, 1063}, + dictWord{7, 10, 1091}, + dictWord{135, 10, 1765}, + dictWord{134, 11, 428}, + dictWord{7, 11, 524}, + dictWord{8, 11, 169}, + dictWord{8, 11, 234}, + dictWord{9, 11, 480}, + dictWord{138, 11, 646}, + dictWord{139, 0, 814}, + dictWord{7, 11, 1462}, + dictWord{139, 11, 659}, + dictWord{ + 4, + 10, + 26, + }, + dictWord{5, 10, 429}, + dictWord{6, 10, 245}, + dictWord{7, 10, 704}, + dictWord{7, 10, 1379}, + dictWord{135, 10, 1474}, + dictWord{7, 11, 1205}, + dictWord{ + 138, + 11, + 637, + }, + dictWord{139, 11, 803}, + dictWord{132, 10, 621}, + dictWord{136, 0, 987}, + dictWord{4, 11, 266}, + dictWord{8, 11, 4}, + dictWord{9, 11, 39}, + dictWord{ + 10, + 11, + 166, + }, + dictWord{11, 11, 918}, + dictWord{12, 11, 635}, + dictWord{20, 11, 10}, + dictWord{22, 11, 27}, + dictWord{150, 11, 43}, + dictWord{4, 0, 235}, + dictWord{ + 135, + 0, + 255, + }, + dictWord{4, 0, 194}, + dictWord{5, 0, 584}, + dictWord{6, 0, 384}, + dictWord{7, 0, 583}, + dictWord{10, 0, 761}, + dictWord{11, 0, 760}, + dictWord{139, 0, 851}, + dictWord{133, 10, 542}, + dictWord{134, 0, 1086}, + dictWord{133, 10, 868}, + dictWord{8, 0, 1016}, + dictWord{136, 0, 1018}, + dictWord{7, 0, 1396}, + dictWord{ + 7, + 11, + 1396, + }, + dictWord{136, 10, 433}, + dictWord{135, 10, 1495}, + dictWord{138, 10, 215}, + dictWord{141, 10, 124}, + dictWord{7, 11, 157}, + dictWord{ + 8, + 11, + 279, + }, + dictWord{9, 11, 759}, + dictWord{16, 11, 31}, + dictWord{16, 11, 39}, + dictWord{16, 11, 75}, + dictWord{18, 11, 24}, + dictWord{20, 11, 42}, + dictWord{152, 11, 1}, + dictWord{5, 0, 562}, + dictWord{134, 11, 604}, + dictWord{134, 0, 913}, + dictWord{5, 0, 191}, + dictWord{137, 0, 271}, + dictWord{4, 0, 470}, + dictWord{6, 0, 153}, + dictWord{7, 0, 1503}, + dictWord{7, 0, 1923}, + dictWord{10, 0, 701}, + dictWord{11, 0, 132}, + dictWord{11, 0, 227}, + dictWord{11, 0, 320}, + dictWord{11, 0, 436}, + dictWord{ + 11, + 0, + 525, + }, + dictWord{11, 0, 855}, + dictWord{11, 0, 873}, + dictWord{12, 0, 41}, + dictWord{12, 0, 286}, + dictWord{13, 0, 103}, + dictWord{13, 0, 284}, + dictWord{ + 14, + 0, + 255, + }, + dictWord{14, 0, 262}, + dictWord{15, 0, 117}, + dictWord{143, 0, 127}, + dictWord{7, 0, 475}, + dictWord{12, 0, 45}, + dictWord{147, 10, 112}, + dictWord{ + 132, + 11, + 567, + }, + dictWord{137, 11, 859}, + dictWord{6, 0, 713}, + dictWord{6, 0, 969}, + dictWord{6, 0, 1290}, + dictWord{134, 0, 1551}, + dictWord{133, 0, 327}, + dictWord{ + 6, + 0, + 552, + }, + dictWord{6, 0, 1292}, + dictWord{7, 0, 1754}, + dictWord{137, 0, 604}, + dictWord{4, 0, 223}, + dictWord{6, 0, 359}, + dictWord{11, 0, 3}, + dictWord{13, 0, 108}, + dictWord{14, 0, 89}, + dictWord{16, 0, 22}, + dictWord{5, 11, 762}, + dictWord{7, 11, 1880}, + dictWord{9, 11, 680}, + dictWord{139, 11, 798}, + dictWord{5, 0, 80}, + dictWord{ + 6, + 0, + 405, + }, + dictWord{7, 0, 403}, + dictWord{7, 0, 1502}, + dictWord{8, 0, 456}, + dictWord{9, 0, 487}, + dictWord{9, 0, 853}, + dictWord{9, 0, 889}, + dictWord{10, 0, 309}, + dictWord{ + 11, + 0, + 721, + }, + dictWord{11, 0, 994}, + dictWord{12, 0, 430}, + dictWord{141, 0, 165}, + dictWord{133, 11, 298}, + dictWord{132, 10, 647}, + dictWord{134, 0, 2016}, + dictWord{18, 10, 10}, + dictWord{146, 11, 10}, + dictWord{4, 0, 453}, + dictWord{5, 0, 887}, + dictWord{6, 0, 535}, + dictWord{8, 0, 6}, + dictWord{8, 0, 543}, + dictWord{ + 136, + 0, + 826, + }, + dictWord{136, 0, 975}, + dictWord{10, 0, 961}, + dictWord{138, 0, 962}, + dictWord{138, 10, 220}, + dictWord{6, 0, 1891}, + dictWord{6, 0, 1893}, + dictWord{ + 9, + 0, + 916, + }, + dictWord{9, 0, 965}, + dictWord{9, 0, 972}, + dictWord{12, 0, 801}, + dictWord{12, 0, 859}, + dictWord{12, 0, 883}, + dictWord{15, 0, 226}, + dictWord{149, 0, 51}, + dictWord{132, 10, 109}, + dictWord{135, 11, 267}, + dictWord{7, 11, 92}, + dictWord{7, 11, 182}, + dictWord{8, 11, 453}, + dictWord{9, 11, 204}, + dictWord{11, 11, 950}, + dictWord{12, 11, 94}, + dictWord{12, 11, 644}, + dictWord{16, 11, 20}, + dictWord{16, 11, 70}, + dictWord{16, 11, 90}, + dictWord{147, 11, 55}, + dictWord{ + 134, + 10, + 1746, + }, + dictWord{6, 11, 71}, + dictWord{7, 11, 845}, + dictWord{7, 11, 1308}, + dictWord{8, 11, 160}, + dictWord{137, 11, 318}, + dictWord{5, 0, 101}, + dictWord{6, 0, 88}, + dictWord{7, 0, 263}, + dictWord{7, 0, 628}, + dictWord{7, 0, 1677}, + dictWord{8, 0, 349}, + dictWord{9, 0, 100}, + dictWord{10, 0, 677}, + dictWord{14, 0, 169}, + dictWord{ + 14, + 0, + 302, + }, + dictWord{14, 0, 313}, + dictWord{15, 0, 48}, + dictWord{15, 0, 84}, + dictWord{7, 11, 237}, + dictWord{8, 11, 664}, + dictWord{9, 11, 42}, + dictWord{9, 11, 266}, + dictWord{9, 11, 380}, + dictWord{9, 11, 645}, + dictWord{10, 11, 177}, + dictWord{138, 11, 276}, + dictWord{138, 11, 69}, + dictWord{4, 0, 310}, + dictWord{7, 0, 708}, + dictWord{7, 0, 996}, + dictWord{9, 0, 795}, + dictWord{10, 0, 390}, + dictWord{10, 0, 733}, + dictWord{11, 0, 451}, + dictWord{12, 0, 249}, + dictWord{14, 0, 115}, + dictWord{ + 14, + 0, + 286, + }, + dictWord{143, 0, 100}, + dictWord{5, 0, 587}, + dictWord{4, 10, 40}, + dictWord{10, 10, 67}, + dictWord{11, 10, 117}, + dictWord{11, 10, 768}, + dictWord{ + 139, + 10, + 935, + }, + dictWord{6, 0, 1942}, + dictWord{7, 0, 512}, + dictWord{136, 0, 983}, + dictWord{7, 10, 992}, + dictWord{8, 10, 301}, + dictWord{9, 10, 722}, + dictWord{12, 10, 63}, + dictWord{13, 10, 29}, + dictWord{14, 10, 161}, + dictWord{143, 10, 18}, + dictWord{136, 11, 76}, + dictWord{139, 10, 923}, + dictWord{134, 0, 645}, + dictWord{ + 134, + 0, + 851, + }, + dictWord{4, 0, 498}, + dictWord{132, 11, 293}, + dictWord{7, 0, 217}, + dictWord{8, 0, 140}, + dictWord{10, 0, 610}, + dictWord{14, 11, 352}, + dictWord{ + 17, + 11, + 53, + }, + dictWord{18, 11, 146}, + dictWord{18, 11, 152}, + dictWord{19, 11, 11}, + dictWord{150, 11, 54}, + dictWord{134, 0, 1448}, + dictWord{138, 11, 841}, + dictWord{133, 0, 905}, + dictWord{4, 11, 605}, + dictWord{7, 11, 518}, + dictWord{7, 11, 1282}, + dictWord{7, 11, 1918}, + dictWord{10, 11, 180}, + dictWord{139, 11, 218}, + dictWord{139, 11, 917}, + dictWord{135, 10, 825}, + dictWord{140, 10, 328}, + dictWord{4, 0, 456}, + dictWord{7, 0, 105}, + dictWord{7, 0, 358}, + dictWord{7, 0, 1637}, + dictWord{8, 0, 643}, + dictWord{139, 0, 483}, + dictWord{134, 0, 792}, + dictWord{6, 11, 96}, + dictWord{135, 11, 1426}, + dictWord{137, 11, 691}, + dictWord{ + 4, + 11, + 651, + }, + dictWord{133, 11, 289}, + dictWord{7, 11, 688}, + dictWord{8, 11, 35}, + dictWord{9, 11, 511}, + dictWord{10, 11, 767}, + dictWord{147, 11, 118}, + dictWord{ + 150, + 0, + 56, + }, + dictWord{5, 0, 243}, + dictWord{5, 0, 535}, + dictWord{6, 10, 204}, + dictWord{10, 10, 320}, + dictWord{10, 10, 583}, + dictWord{13, 10, 502}, + dictWord{ + 14, + 10, + 72, + }, + dictWord{14, 10, 274}, + dictWord{14, 10, 312}, + dictWord{14, 10, 344}, + dictWord{15, 10, 159}, + dictWord{16, 10, 62}, + dictWord{16, 10, 69}, + dictWord{ + 17, + 10, + 30, + }, + dictWord{18, 10, 42}, + dictWord{18, 10, 53}, + dictWord{18, 10, 84}, + dictWord{18, 10, 140}, + dictWord{19, 10, 68}, + dictWord{19, 10, 85}, + dictWord{20, 10, 5}, + dictWord{20, 10, 45}, + dictWord{20, 10, 101}, + dictWord{22, 10, 7}, + dictWord{150, 10, 20}, + dictWord{4, 10, 558}, + dictWord{6, 10, 390}, + dictWord{7, 10, 162}, + dictWord{7, 10, 689}, + dictWord{9, 10, 360}, + dictWord{138, 10, 653}, + dictWord{146, 11, 23}, + dictWord{135, 0, 1748}, + dictWord{5, 10, 856}, + dictWord{ + 6, + 10, + 1672, + }, + dictWord{6, 10, 1757}, + dictWord{134, 10, 1781}, + dictWord{5, 0, 539}, + dictWord{5, 0, 754}, + dictWord{6, 0, 876}, + dictWord{132, 11, 704}, + dictWord{ + 135, + 11, + 1078, + }, + dictWord{5, 10, 92}, + dictWord{10, 10, 736}, + dictWord{140, 10, 102}, + dictWord{17, 0, 91}, + dictWord{5, 10, 590}, + dictWord{137, 10, 213}, + dictWord{134, 0, 1565}, + dictWord{6, 0, 91}, + dictWord{135, 0, 435}, + dictWord{4, 0, 939}, + dictWord{140, 0, 792}, + dictWord{134, 0, 1399}, + dictWord{4, 0, 16}, + dictWord{ + 5, + 0, + 316, + }, + dictWord{5, 0, 842}, + dictWord{6, 0, 370}, + dictWord{6, 0, 1778}, + dictWord{8, 0, 166}, + dictWord{11, 0, 812}, + dictWord{12, 0, 206}, + dictWord{12, 0, 351}, + dictWord{14, 0, 418}, + dictWord{16, 0, 15}, + dictWord{16, 0, 34}, + dictWord{18, 0, 3}, + dictWord{19, 0, 3}, + dictWord{19, 0, 7}, + dictWord{20, 0, 4}, + dictWord{21, 0, 21}, + dictWord{ + 4, + 11, + 720, + }, + dictWord{133, 11, 306}, + dictWord{144, 0, 95}, + dictWord{133, 11, 431}, + dictWord{132, 11, 234}, + dictWord{135, 0, 551}, + dictWord{4, 0, 999}, + dictWord{6, 0, 1966}, + dictWord{134, 0, 2042}, + dictWord{7, 0, 619}, + dictWord{10, 0, 547}, + dictWord{11, 0, 122}, + dictWord{12, 0, 601}, + dictWord{15, 0, 7}, + dictWord{148, 0, 20}, + dictWord{5, 11, 464}, + dictWord{6, 11, 236}, + dictWord{7, 11, 276}, + dictWord{7, 11, 696}, + dictWord{7, 11, 914}, + dictWord{7, 11, 1108}, + dictWord{ + 7, + 11, + 1448, + }, + dictWord{9, 11, 15}, + dictWord{9, 11, 564}, + dictWord{10, 11, 14}, + dictWord{12, 11, 565}, + dictWord{13, 11, 449}, + dictWord{14, 11, 53}, + dictWord{ + 15, + 11, + 13, + }, + dictWord{16, 11, 64}, + dictWord{145, 11, 41}, + dictWord{6, 0, 884}, + dictWord{6, 0, 1019}, + dictWord{134, 0, 1150}, + dictWord{6, 11, 1767}, + dictWord{ + 12, + 11, + 194, + }, + dictWord{145, 11, 107}, + dictWord{136, 10, 503}, + dictWord{133, 11, 840}, + dictWord{7, 0, 671}, + dictWord{134, 10, 466}, + dictWord{132, 0, 888}, + dictWord{4, 0, 149}, + dictWord{138, 0, 368}, + dictWord{4, 0, 154}, + dictWord{7, 0, 1134}, + dictWord{136, 0, 105}, + dictWord{135, 0, 983}, + dictWord{9, 11, 642}, + dictWord{11, 11, 236}, + dictWord{142, 11, 193}, + dictWord{4, 0, 31}, + dictWord{6, 0, 429}, + dictWord{7, 0, 962}, + dictWord{9, 0, 458}, + dictWord{139, 0, 691}, + dictWord{ + 6, + 0, + 643, + }, + dictWord{134, 0, 1102}, + dictWord{132, 0, 312}, + dictWord{4, 11, 68}, + dictWord{5, 11, 634}, + dictWord{6, 11, 386}, + dictWord{7, 11, 794}, + dictWord{ + 8, + 11, + 273, + }, + dictWord{9, 11, 563}, + dictWord{10, 11, 105}, + dictWord{10, 11, 171}, + dictWord{11, 11, 94}, + dictWord{139, 11, 354}, + dictWord{133, 0, 740}, + dictWord{ + 135, + 0, + 1642, + }, + dictWord{4, 11, 95}, + dictWord{7, 11, 416}, + dictWord{8, 11, 211}, + dictWord{139, 11, 830}, + dictWord{132, 0, 236}, + dictWord{138, 10, 241}, + dictWord{7, 11, 731}, + dictWord{13, 11, 20}, + dictWord{143, 11, 11}, + dictWord{5, 0, 836}, + dictWord{5, 0, 857}, + dictWord{6, 0, 1680}, + dictWord{135, 0, 59}, + dictWord{ + 10, + 0, + 68, + }, + dictWord{11, 0, 494}, + dictWord{152, 11, 6}, + dictWord{4, 0, 81}, + dictWord{139, 0, 867}, + dictWord{135, 0, 795}, + dictWord{133, 11, 689}, + dictWord{ + 4, + 0, + 1001, + }, + dictWord{5, 0, 282}, + dictWord{6, 0, 1932}, + dictWord{6, 0, 1977}, + dictWord{6, 0, 1987}, + dictWord{6, 0, 1992}, + dictWord{8, 0, 650}, + dictWord{8, 0, 919}, + dictWord{8, 0, 920}, + dictWord{8, 0, 923}, + dictWord{8, 0, 926}, + dictWord{8, 0, 927}, + dictWord{8, 0, 931}, + dictWord{8, 0, 939}, + dictWord{8, 0, 947}, + dictWord{8, 0, 956}, + dictWord{8, 0, 997}, + dictWord{9, 0, 907}, + dictWord{10, 0, 950}, + dictWord{10, 0, 953}, + dictWord{10, 0, 954}, + dictWord{10, 0, 956}, + dictWord{10, 0, 958}, + dictWord{ + 10, + 0, + 959, + }, + dictWord{10, 0, 964}, + dictWord{10, 0, 970}, + dictWord{10, 0, 972}, + dictWord{10, 0, 973}, + dictWord{10, 0, 975}, + dictWord{10, 0, 976}, + dictWord{ + 10, + 0, + 980, + }, + dictWord{10, 0, 981}, + dictWord{10, 0, 984}, + dictWord{10, 0, 988}, + dictWord{10, 0, 990}, + dictWord{10, 0, 995}, + dictWord{10, 0, 999}, + dictWord{ + 10, + 0, + 1002, + }, + dictWord{10, 0, 1003}, + dictWord{10, 0, 1005}, + dictWord{10, 0, 1006}, + dictWord{10, 0, 1008}, + dictWord{10, 0, 1009}, + dictWord{10, 0, 1012}, + dictWord{10, 0, 1014}, + dictWord{10, 0, 1015}, + dictWord{10, 0, 1019}, + dictWord{10, 0, 1020}, + dictWord{10, 0, 1022}, + dictWord{12, 0, 959}, + dictWord{12, 0, 961}, + dictWord{12, 0, 962}, + dictWord{12, 0, 963}, + dictWord{12, 0, 964}, + dictWord{12, 0, 965}, + dictWord{12, 0, 967}, + dictWord{12, 0, 968}, + dictWord{12, 0, 969}, + dictWord{12, 0, 970}, + dictWord{12, 0, 971}, + dictWord{12, 0, 972}, + dictWord{12, 0, 973}, + dictWord{12, 0, 974}, + dictWord{12, 0, 975}, + dictWord{12, 0, 976}, + dictWord{ + 12, + 0, + 977, + }, + dictWord{12, 0, 979}, + dictWord{12, 0, 981}, + dictWord{12, 0, 982}, + dictWord{12, 0, 983}, + dictWord{12, 0, 984}, + dictWord{12, 0, 985}, + dictWord{ + 12, + 0, + 986, + }, + dictWord{12, 0, 987}, + dictWord{12, 0, 989}, + dictWord{12, 0, 990}, + dictWord{12, 0, 992}, + dictWord{12, 0, 993}, + dictWord{12, 0, 995}, + dictWord{12, 0, 998}, + dictWord{12, 0, 999}, + dictWord{12, 0, 1000}, + dictWord{12, 0, 1001}, + dictWord{12, 0, 1002}, + dictWord{12, 0, 1004}, + dictWord{12, 0, 1005}, + dictWord{ + 12, + 0, + 1006, + }, + dictWord{12, 0, 1007}, + dictWord{12, 0, 1008}, + dictWord{12, 0, 1009}, + dictWord{12, 0, 1010}, + dictWord{12, 0, 1011}, + dictWord{12, 0, 1012}, + dictWord{12, 0, 1014}, + dictWord{12, 0, 1015}, + dictWord{12, 0, 1016}, + dictWord{12, 0, 1017}, + dictWord{12, 0, 1018}, + dictWord{12, 0, 1019}, + dictWord{ + 12, + 0, + 1022, + }, + dictWord{12, 0, 1023}, + dictWord{14, 0, 475}, + dictWord{14, 0, 477}, + dictWord{14, 0, 478}, + dictWord{14, 0, 479}, + dictWord{14, 0, 480}, + dictWord{ + 14, + 0, + 482, + }, + dictWord{14, 0, 483}, + dictWord{14, 0, 484}, + dictWord{14, 0, 485}, + dictWord{14, 0, 486}, + dictWord{14, 0, 487}, + dictWord{14, 0, 488}, + dictWord{14, 0, 489}, + dictWord{14, 0, 490}, + dictWord{14, 0, 491}, + dictWord{14, 0, 492}, + dictWord{14, 0, 493}, + dictWord{14, 0, 494}, + dictWord{14, 0, 495}, + dictWord{14, 0, 496}, + dictWord{14, 0, 497}, + dictWord{14, 0, 498}, + dictWord{14, 0, 499}, + dictWord{14, 0, 500}, + dictWord{14, 0, 501}, + dictWord{14, 0, 502}, + dictWord{14, 0, 503}, + dictWord{ + 14, + 0, + 504, + }, + dictWord{14, 0, 506}, + dictWord{14, 0, 507}, + dictWord{14, 0, 508}, + dictWord{14, 0, 509}, + dictWord{14, 0, 510}, + dictWord{14, 0, 511}, + dictWord{ + 16, + 0, + 113, + }, + dictWord{16, 0, 114}, + dictWord{16, 0, 115}, + dictWord{16, 0, 117}, + dictWord{16, 0, 118}, + dictWord{16, 0, 119}, + dictWord{16, 0, 121}, + dictWord{16, 0, 122}, + dictWord{16, 0, 123}, + dictWord{16, 0, 124}, + dictWord{16, 0, 125}, + dictWord{16, 0, 126}, + dictWord{16, 0, 127}, + dictWord{18, 0, 242}, + dictWord{18, 0, 243}, + dictWord{18, 0, 244}, + dictWord{18, 0, 245}, + dictWord{18, 0, 248}, + dictWord{18, 0, 249}, + dictWord{18, 0, 250}, + dictWord{18, 0, 251}, + dictWord{18, 0, 252}, + dictWord{ + 18, + 0, + 253, + }, + dictWord{18, 0, 254}, + dictWord{18, 0, 255}, + dictWord{20, 0, 125}, + dictWord{20, 0, 126}, + dictWord{148, 0, 127}, + dictWord{7, 11, 1717}, + dictWord{ + 7, + 11, + 1769, + }, + dictWord{138, 11, 546}, + dictWord{7, 11, 1127}, + dictWord{7, 11, 1572}, + dictWord{10, 11, 297}, + dictWord{10, 11, 422}, + dictWord{11, 11, 764}, + dictWord{11, 11, 810}, + dictWord{12, 11, 264}, + dictWord{13, 11, 102}, + dictWord{13, 11, 300}, + dictWord{13, 11, 484}, + dictWord{14, 11, 147}, + dictWord{ + 14, + 11, + 229, + }, + dictWord{17, 11, 71}, + dictWord{18, 11, 118}, + dictWord{147, 11, 120}, + dictWord{6, 0, 1148}, + dictWord{134, 0, 1586}, + dictWord{132, 0, 775}, + dictWord{135, 10, 954}, + dictWord{133, 11, 864}, + dictWord{133, 11, 928}, + dictWord{138, 11, 189}, + dictWord{135, 10, 1958}, + dictWord{6, 10, 549}, + dictWord{ + 8, + 10, + 34, + }, + dictWord{8, 10, 283}, + dictWord{9, 10, 165}, + dictWord{138, 10, 475}, + dictWord{5, 10, 652}, + dictWord{5, 10, 701}, + dictWord{135, 10, 449}, + dictWord{135, 11, 695}, + dictWord{4, 10, 655}, + dictWord{7, 10, 850}, + dictWord{17, 10, 75}, + dictWord{146, 10, 137}, + dictWord{140, 11, 682}, + dictWord{ + 133, + 11, + 523, + }, + dictWord{8, 0, 970}, + dictWord{136, 10, 670}, + dictWord{136, 11, 555}, + dictWord{7, 11, 76}, + dictWord{8, 11, 44}, + dictWord{9, 11, 884}, + dictWord{ + 10, + 11, + 580, + }, + dictWord{11, 11, 399}, + dictWord{11, 11, 894}, + dictWord{15, 11, 122}, + dictWord{18, 11, 144}, + dictWord{147, 11, 61}, + dictWord{6, 10, 159}, + dictWord{ + 6, + 10, + 364, + }, + dictWord{7, 10, 516}, + dictWord{7, 10, 1439}, + dictWord{137, 10, 518}, + dictWord{4, 0, 71}, + dictWord{5, 0, 376}, + dictWord{7, 0, 119}, + dictWord{ + 138, + 0, + 665, + }, + dictWord{141, 10, 151}, + dictWord{11, 0, 827}, + dictWord{14, 0, 34}, + dictWord{143, 0, 148}, + dictWord{133, 11, 518}, + dictWord{4, 0, 479}, + dictWord{ + 135, + 11, + 1787, + }, + dictWord{135, 11, 1852}, + dictWord{135, 10, 993}, + dictWord{7, 0, 607}, + dictWord{136, 0, 99}, + dictWord{134, 0, 1960}, + dictWord{132, 0, 793}, + dictWord{4, 0, 41}, + dictWord{5, 0, 74}, + dictWord{7, 0, 1627}, + dictWord{11, 0, 871}, + dictWord{140, 0, 619}, + dictWord{7, 0, 94}, + dictWord{11, 0, 329}, + dictWord{ + 11, + 0, + 965, + }, + dictWord{12, 0, 241}, + dictWord{14, 0, 354}, + dictWord{15, 0, 22}, + dictWord{148, 0, 63}, + dictWord{7, 10, 501}, + dictWord{9, 10, 111}, + dictWord{10, 10, 141}, + dictWord{11, 10, 332}, + dictWord{13, 10, 43}, + dictWord{13, 10, 429}, + dictWord{14, 10, 130}, + dictWord{14, 10, 415}, + dictWord{145, 10, 102}, + dictWord{ + 9, + 0, + 209, + }, + dictWord{137, 0, 300}, + dictWord{134, 0, 1497}, + dictWord{138, 11, 255}, + dictWord{4, 11, 934}, + dictWord{5, 11, 138}, + dictWord{136, 11, 610}, + dictWord{133, 0, 98}, + dictWord{6, 0, 1316}, + dictWord{10, 11, 804}, + dictWord{138, 11, 832}, + dictWord{8, 11, 96}, + dictWord{9, 11, 36}, + dictWord{10, 11, 607}, + dictWord{11, 11, 423}, + dictWord{11, 11, 442}, + dictWord{12, 11, 309}, + dictWord{14, 11, 199}, + dictWord{15, 11, 90}, + dictWord{145, 11, 110}, + dictWord{ + 132, + 0, + 463, + }, + dictWord{5, 10, 149}, + dictWord{136, 10, 233}, + dictWord{133, 10, 935}, + dictWord{4, 11, 652}, + dictWord{8, 11, 320}, + dictWord{9, 11, 13}, + dictWord{ + 9, + 11, + 398, + }, + dictWord{9, 11, 727}, + dictWord{10, 11, 75}, + dictWord{10, 11, 184}, + dictWord{10, 11, 230}, + dictWord{10, 11, 564}, + dictWord{10, 11, 569}, + dictWord{ + 11, + 11, + 973, + }, + dictWord{12, 11, 70}, + dictWord{12, 11, 189}, + dictWord{13, 11, 57}, + dictWord{13, 11, 257}, + dictWord{22, 11, 6}, + dictWord{150, 11, 16}, + dictWord{ + 142, + 0, + 291, + }, + dictWord{12, 10, 582}, + dictWord{146, 10, 131}, + dictWord{136, 10, 801}, + dictWord{133, 0, 984}, + dictWord{145, 11, 116}, + dictWord{4, 11, 692}, + dictWord{133, 11, 321}, + dictWord{4, 0, 182}, + dictWord{6, 0, 205}, + dictWord{135, 0, 220}, + dictWord{4, 0, 42}, + dictWord{9, 0, 205}, + dictWord{9, 0, 786}, + dictWord{ + 138, + 0, + 659, + }, + dictWord{6, 0, 801}, + dictWord{11, 11, 130}, + dictWord{140, 11, 609}, + dictWord{132, 0, 635}, + dictWord{5, 11, 345}, + dictWord{135, 11, 1016}, + dictWord{139, 0, 533}, + dictWord{132, 0, 371}, + dictWord{4, 0, 272}, + dictWord{135, 0, 836}, + dictWord{6, 0, 1282}, + dictWord{135, 11, 1100}, + dictWord{5, 0, 825}, + dictWord{134, 0, 1640}, + dictWord{135, 11, 1325}, + dictWord{133, 11, 673}, + dictWord{4, 11, 287}, + dictWord{133, 11, 1018}, + dictWord{135, 0, 357}, + dictWord{ + 6, + 0, + 467, + }, + dictWord{137, 0, 879}, + dictWord{7, 0, 317}, + dictWord{135, 0, 569}, + dictWord{6, 0, 924}, + dictWord{134, 0, 1588}, + dictWord{5, 11, 34}, + dictWord{ + 5, + 10, + 406, + }, + dictWord{10, 11, 724}, + dictWord{12, 11, 444}, + dictWord{13, 11, 354}, + dictWord{18, 11, 32}, + dictWord{23, 11, 24}, + dictWord{23, 11, 31}, + dictWord{ + 152, + 11, + 5, + }, + dictWord{6, 0, 1795}, + dictWord{6, 0, 1835}, + dictWord{6, 0, 1836}, + dictWord{6, 0, 1856}, + dictWord{8, 0, 844}, + dictWord{8, 0, 849}, + dictWord{8, 0, 854}, + dictWord{8, 0, 870}, + dictWord{8, 0, 887}, + dictWord{10, 0, 852}, + dictWord{138, 0, 942}, + dictWord{6, 10, 69}, + dictWord{135, 10, 117}, + dictWord{137, 0, 307}, + dictWord{ + 4, + 0, + 944, + }, + dictWord{6, 0, 1799}, + dictWord{6, 0, 1825}, + dictWord{10, 0, 848}, + dictWord{10, 0, 875}, + dictWord{10, 0, 895}, + dictWord{10, 0, 899}, + dictWord{ + 10, + 0, + 902, + }, + dictWord{140, 0, 773}, + dictWord{11, 0, 43}, + dictWord{13, 0, 72}, + dictWord{141, 0, 142}, + dictWord{135, 10, 1830}, + dictWord{134, 11, 382}, + dictWord{ + 4, + 10, + 432, + }, + dictWord{135, 10, 824}, + dictWord{132, 11, 329}, + dictWord{7, 0, 1820}, + dictWord{139, 11, 124}, + dictWord{133, 10, 826}, + dictWord{ + 133, + 0, + 525, + }, + dictWord{132, 11, 906}, + dictWord{7, 11, 1940}, + dictWord{136, 11, 366}, + dictWord{138, 11, 10}, + dictWord{4, 11, 123}, + dictWord{4, 11, 649}, + dictWord{ + 5, + 11, + 605, + }, + dictWord{7, 11, 1509}, + dictWord{136, 11, 36}, + dictWord{6, 0, 110}, + dictWord{135, 0, 1681}, + dictWord{133, 0, 493}, + dictWord{133, 11, 767}, + dictWord{4, 0, 174}, + dictWord{135, 0, 911}, + dictWord{138, 11, 786}, + dictWord{8, 0, 417}, + dictWord{137, 0, 782}, + dictWord{133, 10, 1000}, + dictWord{7, 0, 733}, + dictWord{137, 0, 583}, + dictWord{4, 10, 297}, + dictWord{6, 10, 529}, + dictWord{7, 10, 152}, + dictWord{7, 10, 713}, + dictWord{7, 10, 1845}, + dictWord{8, 10, 710}, + dictWord{8, 10, 717}, + dictWord{12, 10, 639}, + dictWord{140, 10, 685}, + dictWord{4, 0, 32}, + dictWord{5, 0, 215}, + dictWord{6, 0, 269}, + dictWord{7, 0, 1782}, + dictWord{ + 7, + 0, + 1892, + }, + dictWord{10, 0, 16}, + dictWord{11, 0, 822}, + dictWord{11, 0, 954}, + dictWord{141, 0, 481}, + dictWord{4, 11, 273}, + dictWord{5, 11, 658}, + dictWord{ + 133, + 11, + 995, + }, + dictWord{136, 0, 477}, + dictWord{134, 11, 72}, + dictWord{135, 11, 1345}, + dictWord{5, 0, 308}, + dictWord{7, 0, 1088}, + dictWord{4, 10, 520}, + dictWord{ + 135, + 10, + 575, + }, + dictWord{133, 11, 589}, + dictWord{5, 0, 126}, + dictWord{8, 0, 297}, + dictWord{9, 0, 366}, + dictWord{140, 0, 374}, + dictWord{7, 0, 1551}, + dictWord{ + 139, + 0, + 361, + }, + dictWord{5, 11, 117}, + dictWord{6, 11, 514}, + dictWord{6, 11, 541}, + dictWord{7, 11, 1164}, + dictWord{7, 11, 1436}, + dictWord{8, 11, 220}, + dictWord{ + 8, + 11, + 648, + }, + dictWord{10, 11, 688}, + dictWord{139, 11, 560}, + dictWord{133, 11, 686}, + dictWord{4, 0, 946}, + dictWord{6, 0, 1807}, + dictWord{8, 0, 871}, + dictWord{ + 10, + 0, + 854, + }, + dictWord{10, 0, 870}, + dictWord{10, 0, 888}, + dictWord{10, 0, 897}, + dictWord{10, 0, 920}, + dictWord{12, 0, 722}, + dictWord{12, 0, 761}, + dictWord{ + 12, + 0, + 763, + }, + dictWord{12, 0, 764}, + dictWord{14, 0, 454}, + dictWord{14, 0, 465}, + dictWord{16, 0, 107}, + dictWord{18, 0, 167}, + dictWord{18, 0, 168}, + dictWord{ + 146, + 0, + 172, + }, + dictWord{132, 0, 175}, + dictWord{135, 0, 1307}, + dictWord{132, 0, 685}, + dictWord{135, 11, 1834}, + dictWord{133, 0, 797}, + dictWord{6, 0, 745}, + dictWord{ + 6, + 0, + 858, + }, + dictWord{134, 0, 963}, + dictWord{133, 0, 565}, + dictWord{5, 10, 397}, + dictWord{6, 10, 154}, + dictWord{7, 11, 196}, + dictWord{7, 10, 676}, + dictWord{ + 8, + 10, + 443, + }, + dictWord{8, 10, 609}, + dictWord{9, 10, 24}, + dictWord{9, 10, 325}, + dictWord{10, 10, 35}, + dictWord{10, 11, 765}, + dictWord{11, 11, 347}, + dictWord{ + 11, + 10, + 535, + }, + dictWord{11, 11, 552}, + dictWord{11, 11, 576}, + dictWord{11, 10, 672}, + dictWord{11, 11, 790}, + dictWord{11, 10, 1018}, + dictWord{12, 11, 263}, + dictWord{12, 10, 637}, + dictWord{13, 11, 246}, + dictWord{13, 11, 270}, + dictWord{13, 11, 395}, + dictWord{14, 11, 74}, + dictWord{14, 11, 176}, + dictWord{ + 14, + 11, + 190, + }, + dictWord{14, 11, 398}, + dictWord{14, 11, 412}, + dictWord{15, 11, 32}, + dictWord{15, 11, 63}, + dictWord{16, 10, 30}, + dictWord{16, 11, 88}, + dictWord{ + 147, + 11, + 105, + }, + dictWord{13, 11, 84}, + dictWord{141, 11, 122}, + dictWord{4, 0, 252}, + dictWord{7, 0, 1068}, + dictWord{10, 0, 434}, + dictWord{11, 0, 228}, + dictWord{ + 11, + 0, + 426, + }, + dictWord{13, 0, 231}, + dictWord{18, 0, 106}, + dictWord{148, 0, 87}, + dictWord{137, 0, 826}, + dictWord{4, 11, 589}, + dictWord{139, 11, 282}, + dictWord{ + 5, + 11, + 381, + }, + dictWord{135, 11, 1792}, + dictWord{132, 0, 791}, + dictWord{5, 0, 231}, + dictWord{10, 0, 509}, + dictWord{133, 10, 981}, + dictWord{7, 0, 601}, + dictWord{ + 9, + 0, + 277, + }, + dictWord{9, 0, 674}, + dictWord{10, 0, 178}, + dictWord{10, 0, 418}, + dictWord{10, 0, 571}, + dictWord{11, 0, 531}, + dictWord{12, 0, 113}, + dictWord{12, 0, 475}, + dictWord{13, 0, 99}, + dictWord{142, 0, 428}, + dictWord{4, 10, 56}, + dictWord{7, 11, 616}, + dictWord{7, 10, 1791}, + dictWord{8, 10, 607}, + dictWord{8, 10, 651}, + dictWord{10, 11, 413}, + dictWord{11, 10, 465}, + dictWord{11, 10, 835}, + dictWord{12, 10, 337}, + dictWord{141, 10, 480}, + dictWord{7, 0, 1591}, + dictWord{144, 0, 43}, + dictWord{9, 10, 158}, + dictWord{138, 10, 411}, + dictWord{135, 0, 1683}, + dictWord{8, 0, 289}, + dictWord{11, 0, 45}, + dictWord{12, 0, 278}, + dictWord{140, 0, 537}, + dictWord{6, 11, 120}, + dictWord{7, 11, 1188}, + dictWord{7, 11, 1710}, + dictWord{8, 11, 286}, + dictWord{9, 11, 667}, + dictWord{11, 11, 592}, + dictWord{ + 139, + 11, + 730, + }, + dictWord{136, 10, 617}, + dictWord{135, 0, 1120}, + dictWord{135, 11, 1146}, + dictWord{139, 10, 563}, + dictWord{4, 11, 352}, + dictWord{4, 10, 369}, + dictWord{135, 11, 687}, + dictWord{143, 11, 38}, + dictWord{4, 0, 399}, + dictWord{5, 0, 119}, + dictWord{5, 0, 494}, + dictWord{7, 0, 751}, + dictWord{9, 0, 556}, + dictWord{ + 14, + 11, + 179, + }, + dictWord{15, 11, 151}, + dictWord{150, 11, 11}, + dictWord{4, 11, 192}, + dictWord{5, 11, 49}, + dictWord{6, 11, 200}, + dictWord{6, 11, 293}, + dictWord{ + 6, + 11, + 1696, + }, + dictWord{135, 11, 488}, + dictWord{4, 0, 398}, + dictWord{133, 0, 660}, + dictWord{7, 0, 1030}, + dictWord{134, 10, 622}, + dictWord{135, 11, 595}, + dictWord{141, 0, 168}, + dictWord{132, 11, 147}, + dictWord{7, 0, 973}, + dictWord{10, 10, 624}, + dictWord{142, 10, 279}, + dictWord{132, 10, 363}, + dictWord{ + 132, + 0, + 642, + }, + dictWord{133, 11, 934}, + dictWord{134, 0, 1615}, + dictWord{7, 11, 505}, + dictWord{135, 11, 523}, + dictWord{7, 0, 594}, + dictWord{7, 0, 851}, + dictWord{ + 7, + 0, + 1858, + }, + dictWord{9, 0, 411}, + dictWord{9, 0, 574}, + dictWord{9, 0, 666}, + dictWord{9, 0, 737}, + dictWord{10, 0, 346}, + dictWord{10, 0, 712}, + dictWord{11, 0, 246}, + dictWord{11, 0, 432}, + dictWord{11, 0, 517}, + dictWord{11, 0, 647}, + dictWord{11, 0, 679}, + dictWord{11, 0, 727}, + dictWord{12, 0, 304}, + dictWord{12, 0, 305}, + dictWord{ + 12, + 0, + 323, + }, + dictWord{12, 0, 483}, + dictWord{12, 0, 572}, + dictWord{12, 0, 593}, + dictWord{12, 0, 602}, + dictWord{13, 0, 95}, + dictWord{13, 0, 101}, + dictWord{ + 13, + 0, + 171, + }, + dictWord{13, 0, 315}, + dictWord{13, 0, 378}, + dictWord{13, 0, 425}, + dictWord{13, 0, 475}, + dictWord{14, 0, 63}, + dictWord{14, 0, 380}, + dictWord{14, 0, 384}, + dictWord{15, 0, 133}, + dictWord{18, 0, 112}, + dictWord{148, 0, 72}, + dictWord{135, 0, 1093}, + dictWord{132, 0, 679}, + dictWord{8, 0, 913}, + dictWord{10, 0, 903}, + dictWord{10, 0, 915}, + dictWord{12, 0, 648}, + dictWord{12, 0, 649}, + dictWord{14, 0, 455}, + dictWord{16, 0, 112}, + dictWord{138, 11, 438}, + dictWord{137, 0, 203}, + dictWord{134, 10, 292}, + dictWord{134, 0, 1492}, + dictWord{7, 0, 1374}, + dictWord{8, 0, 540}, + dictWord{5, 10, 177}, + dictWord{6, 10, 616}, + dictWord{7, 10, 827}, + dictWord{9, 10, 525}, + dictWord{138, 10, 656}, + dictWord{135, 0, 1486}, + dictWord{9, 0, 714}, + dictWord{138, 10, 31}, + dictWord{136, 0, 825}, + dictWord{ + 134, + 0, + 1511, + }, + dictWord{132, 11, 637}, + dictWord{134, 0, 952}, + dictWord{4, 10, 161}, + dictWord{133, 10, 631}, + dictWord{5, 0, 143}, + dictWord{5, 0, 769}, + dictWord{ + 6, + 0, + 1760, + }, + dictWord{7, 0, 682}, + dictWord{7, 0, 1992}, + dictWord{136, 0, 736}, + dictWord{132, 0, 700}, + dictWord{134, 0, 1540}, + dictWord{132, 11, 777}, + dictWord{ + 9, + 11, + 867, + }, + dictWord{138, 11, 837}, + dictWord{7, 0, 1557}, + dictWord{135, 10, 1684}, + dictWord{133, 0, 860}, + dictWord{6, 0, 422}, + dictWord{7, 0, 0}, + dictWord{ + 7, + 0, + 1544, + }, + dictWord{9, 0, 605}, + dictWord{11, 0, 990}, + dictWord{12, 0, 235}, + dictWord{12, 0, 453}, + dictWord{13, 0, 47}, + dictWord{13, 0, 266}, + dictWord{9, 10, 469}, + dictWord{9, 10, 709}, + dictWord{12, 10, 512}, + dictWord{14, 10, 65}, + dictWord{145, 10, 12}, + dictWord{11, 0, 807}, + dictWord{10, 10, 229}, + dictWord{11, 10, 73}, + dictWord{139, 10, 376}, + dictWord{6, 11, 170}, + dictWord{7, 11, 1080}, + dictWord{8, 11, 395}, + dictWord{8, 11, 487}, + dictWord{11, 11, 125}, + dictWord{ + 141, + 11, + 147, + }, + dictWord{5, 0, 515}, + dictWord{137, 0, 131}, + dictWord{7, 0, 1605}, + dictWord{11, 0, 962}, + dictWord{146, 0, 139}, + dictWord{132, 0, 646}, + dictWord{ + 4, + 0, + 396, + }, + dictWord{7, 0, 728}, + dictWord{9, 0, 117}, + dictWord{13, 0, 202}, + dictWord{148, 0, 51}, + dictWord{6, 0, 121}, + dictWord{6, 0, 124}, + dictWord{6, 0, 357}, + dictWord{ + 7, + 0, + 1138, + }, + dictWord{7, 0, 1295}, + dictWord{8, 0, 162}, + dictWord{8, 0, 508}, + dictWord{11, 0, 655}, + dictWord{4, 11, 535}, + dictWord{6, 10, 558}, + dictWord{ + 7, + 10, + 651, + }, + dictWord{8, 11, 618}, + dictWord{9, 10, 0}, + dictWord{10, 10, 34}, + dictWord{139, 10, 1008}, + dictWord{135, 11, 1245}, + dictWord{138, 0, 357}, + dictWord{ + 150, + 11, + 23, + }, + dictWord{133, 0, 237}, + dictWord{135, 0, 1784}, + dictWord{7, 10, 1832}, + dictWord{138, 10, 374}, + dictWord{132, 0, 713}, + dictWord{132, 11, 46}, + dictWord{6, 0, 1536}, + dictWord{10, 0, 348}, + dictWord{5, 11, 811}, + dictWord{6, 11, 1679}, + dictWord{6, 11, 1714}, + dictWord{135, 11, 2032}, + dictWord{ + 11, + 11, + 182, + }, + dictWord{142, 11, 195}, + dictWord{6, 0, 523}, + dictWord{7, 0, 738}, + dictWord{7, 10, 771}, + dictWord{7, 10, 1731}, + dictWord{9, 10, 405}, + dictWord{ + 138, + 10, + 421, + }, + dictWord{7, 11, 1458}, + dictWord{9, 11, 407}, + dictWord{139, 11, 15}, + dictWord{6, 11, 34}, + dictWord{7, 11, 69}, + dictWord{7, 11, 640}, + dictWord{ + 7, + 11, + 1089, + }, + dictWord{8, 11, 708}, + dictWord{8, 11, 721}, + dictWord{9, 11, 363}, + dictWord{9, 11, 643}, + dictWord{10, 11, 628}, + dictWord{148, 11, 98}, + dictWord{ + 133, + 0, + 434, + }, + dictWord{135, 0, 1877}, + dictWord{7, 0, 571}, + dictWord{138, 0, 366}, + dictWord{5, 10, 881}, + dictWord{133, 10, 885}, + dictWord{9, 0, 513}, + dictWord{ + 10, + 0, + 25, + }, + dictWord{10, 0, 39}, + dictWord{12, 0, 122}, + dictWord{140, 0, 187}, + dictWord{132, 0, 580}, + dictWord{5, 10, 142}, + dictWord{134, 10, 546}, + dictWord{ + 132, + 11, + 462, + }, + dictWord{137, 0, 873}, + dictWord{5, 10, 466}, + dictWord{11, 10, 571}, + dictWord{12, 10, 198}, + dictWord{13, 10, 283}, + dictWord{14, 10, 186}, + dictWord{15, 10, 21}, + dictWord{143, 10, 103}, + dictWord{7, 0, 171}, + dictWord{4, 10, 185}, + dictWord{5, 10, 257}, + dictWord{5, 10, 839}, + dictWord{5, 10, 936}, + dictWord{ + 9, + 10, + 399, + }, + dictWord{10, 10, 258}, + dictWord{10, 10, 395}, + dictWord{10, 10, 734}, + dictWord{11, 10, 1014}, + dictWord{12, 10, 23}, + dictWord{13, 10, 350}, + dictWord{14, 10, 150}, + dictWord{147, 10, 6}, + dictWord{134, 0, 625}, + dictWord{7, 0, 107}, + dictWord{7, 0, 838}, + dictWord{8, 0, 550}, + dictWord{138, 0, 401}, + dictWord{ + 5, + 11, + 73, + }, + dictWord{6, 11, 23}, + dictWord{134, 11, 338}, + dictWord{4, 0, 943}, + dictWord{6, 0, 1850}, + dictWord{12, 0, 713}, + dictWord{142, 0, 434}, + dictWord{ + 11, + 0, + 588, + }, + dictWord{11, 0, 864}, + dictWord{11, 0, 936}, + dictWord{11, 0, 968}, + dictWord{12, 0, 73}, + dictWord{12, 0, 343}, + dictWord{12, 0, 394}, + dictWord{13, 0, 275}, + dictWord{14, 0, 257}, + dictWord{15, 0, 160}, + dictWord{7, 10, 404}, + dictWord{7, 10, 1377}, + dictWord{7, 10, 1430}, + dictWord{7, 10, 2017}, + dictWord{8, 10, 149}, + dictWord{8, 10, 239}, + dictWord{8, 10, 512}, + dictWord{8, 10, 793}, + dictWord{8, 10, 818}, + dictWord{9, 10, 474}, + dictWord{9, 10, 595}, + dictWord{10, 10, 122}, + dictWord{10, 10, 565}, + dictWord{10, 10, 649}, + dictWord{10, 10, 783}, + dictWord{11, 10, 239}, + dictWord{11, 10, 295}, + dictWord{11, 10, 447}, + dictWord{ + 11, + 10, + 528, + }, + dictWord{11, 10, 639}, + dictWord{11, 10, 800}, + dictWord{12, 10, 25}, + dictWord{12, 10, 157}, + dictWord{12, 10, 316}, + dictWord{12, 10, 390}, + dictWord{ + 12, + 10, + 391, + }, + dictWord{12, 10, 395}, + dictWord{12, 10, 478}, + dictWord{12, 10, 503}, + dictWord{12, 10, 592}, + dictWord{12, 10, 680}, + dictWord{13, 10, 50}, + dictWord{13, 10, 53}, + dictWord{13, 10, 132}, + dictWord{13, 10, 198}, + dictWord{13, 10, 322}, + dictWord{13, 10, 415}, + dictWord{13, 10, 511}, + dictWord{14, 10, 71}, + dictWord{14, 10, 395}, + dictWord{15, 10, 71}, + dictWord{15, 10, 136}, + dictWord{17, 10, 123}, + dictWord{18, 10, 93}, + dictWord{147, 10, 58}, + dictWord{ + 133, + 0, + 768, + }, + dictWord{11, 0, 103}, + dictWord{142, 0, 0}, + dictWord{136, 10, 712}, + dictWord{132, 0, 799}, + dictWord{132, 0, 894}, + dictWord{7, 11, 725}, + dictWord{ + 8, + 11, + 498, + }, + dictWord{139, 11, 268}, + dictWord{135, 11, 1798}, + dictWord{135, 11, 773}, + dictWord{141, 11, 360}, + dictWord{4, 10, 377}, + dictWord{152, 10, 13}, + dictWord{135, 0, 1673}, + dictWord{132, 11, 583}, + dictWord{134, 0, 1052}, + dictWord{133, 11, 220}, + dictWord{140, 11, 69}, + dictWord{132, 11, 544}, + dictWord{ + 4, + 10, + 180, + }, + dictWord{135, 10, 1906}, + dictWord{134, 0, 272}, + dictWord{4, 0, 441}, + dictWord{134, 0, 1421}, + dictWord{4, 0, 9}, + dictWord{5, 0, 128}, + dictWord{ + 7, + 0, + 368, + }, + dictWord{11, 0, 480}, + dictWord{148, 0, 3}, + dictWord{5, 11, 176}, + dictWord{6, 11, 437}, + dictWord{6, 11, 564}, + dictWord{11, 11, 181}, + dictWord{ + 141, + 11, + 183, + }, + dictWord{132, 10, 491}, + dictWord{7, 0, 1182}, + dictWord{141, 11, 67}, + dictWord{6, 0, 1346}, + dictWord{4, 10, 171}, + dictWord{138, 10, 234}, + dictWord{ + 4, + 10, + 586, + }, + dictWord{7, 10, 1186}, + dictWord{138, 10, 631}, + dictWord{136, 0, 682}, + dictWord{134, 0, 1004}, + dictWord{15, 0, 24}, + dictWord{143, 11, 24}, + dictWord{134, 0, 968}, + dictWord{4, 0, 2}, + dictWord{6, 0, 742}, + dictWord{6, 0, 793}, + dictWord{7, 0, 545}, + dictWord{7, 0, 894}, + dictWord{9, 10, 931}, + dictWord{ + 10, + 10, + 334, + }, + dictWord{148, 10, 71}, + dictWord{136, 11, 600}, + dictWord{133, 10, 765}, + dictWord{9, 0, 769}, + dictWord{140, 0, 185}, + dictWord{4, 11, 790}, + dictWord{ + 5, + 11, + 273, + }, + dictWord{134, 11, 394}, + dictWord{7, 0, 474}, + dictWord{137, 0, 578}, + dictWord{4, 11, 135}, + dictWord{6, 11, 127}, + dictWord{7, 11, 1185}, + dictWord{ + 7, + 11, + 1511, + }, + dictWord{8, 11, 613}, + dictWord{11, 11, 5}, + dictWord{12, 11, 133}, + dictWord{12, 11, 495}, + dictWord{12, 11, 586}, + dictWord{14, 11, 385}, + dictWord{15, 11, 118}, + dictWord{17, 11, 20}, + dictWord{146, 11, 98}, + dictWord{133, 10, 424}, + dictWord{5, 0, 530}, + dictWord{142, 0, 113}, + dictWord{6, 11, 230}, + dictWord{7, 11, 961}, + dictWord{7, 11, 1085}, + dictWord{136, 11, 462}, + dictWord{7, 11, 1954}, + dictWord{137, 11, 636}, + dictWord{136, 10, 714}, + dictWord{ + 149, + 11, + 6, + }, + dictWord{135, 10, 685}, + dictWord{9, 10, 420}, + dictWord{10, 10, 269}, + dictWord{10, 10, 285}, + dictWord{10, 10, 576}, + dictWord{11, 10, 397}, + dictWord{13, 10, 175}, + dictWord{145, 10, 90}, + dictWord{132, 10, 429}, + dictWord{5, 0, 556}, + dictWord{5, 11, 162}, + dictWord{136, 11, 68}, + dictWord{132, 11, 654}, + dictWord{4, 11, 156}, + dictWord{7, 11, 998}, + dictWord{7, 11, 1045}, + dictWord{7, 11, 1860}, + dictWord{9, 11, 48}, + dictWord{9, 11, 692}, + dictWord{11, 11, 419}, + dictWord{139, 11, 602}, + dictWord{6, 0, 1317}, + dictWord{8, 0, 16}, + dictWord{9, 0, 825}, + dictWord{12, 0, 568}, + dictWord{7, 11, 1276}, + dictWord{8, 11, 474}, + dictWord{137, 11, 652}, + dictWord{18, 0, 97}, + dictWord{7, 10, 18}, + dictWord{7, 10, 699}, + dictWord{7, 10, 1966}, + dictWord{8, 10, 752}, + dictWord{9, 10, 273}, + dictWord{ + 9, + 10, + 412, + }, + dictWord{9, 10, 703}, + dictWord{10, 10, 71}, + dictWord{10, 10, 427}, + dictWord{138, 10, 508}, + dictWord{10, 0, 703}, + dictWord{7, 11, 1454}, + dictWord{138, 11, 703}, + dictWord{4, 10, 53}, + dictWord{5, 10, 186}, + dictWord{135, 10, 752}, + dictWord{134, 0, 892}, + dictWord{134, 0, 1571}, + dictWord{8, 10, 575}, + dictWord{10, 10, 289}, + dictWord{139, 10, 319}, + dictWord{6, 0, 186}, + dictWord{137, 0, 426}, + dictWord{134, 0, 1101}, + dictWord{132, 10, 675}, + dictWord{ + 132, + 0, + 585, + }, + dictWord{6, 0, 1870}, + dictWord{137, 0, 937}, + dictWord{152, 11, 10}, + dictWord{9, 11, 197}, + dictWord{10, 11, 300}, + dictWord{12, 11, 473}, + dictWord{ + 13, + 11, + 90, + }, + dictWord{141, 11, 405}, + dictWord{4, 0, 93}, + dictWord{5, 0, 252}, + dictWord{6, 0, 229}, + dictWord{7, 0, 291}, + dictWord{9, 0, 550}, + dictWord{139, 0, 644}, + dictWord{137, 0, 749}, + dictWord{9, 0, 162}, + dictWord{6, 10, 209}, + dictWord{8, 10, 468}, + dictWord{9, 10, 210}, + dictWord{11, 10, 36}, + dictWord{12, 10, 28}, + dictWord{12, 10, 630}, + dictWord{13, 10, 21}, + dictWord{13, 10, 349}, + dictWord{14, 10, 7}, + dictWord{145, 10, 13}, + dictWord{132, 0, 381}, + dictWord{132, 11, 606}, + dictWord{4, 10, 342}, + dictWord{135, 10, 1179}, + dictWord{7, 11, 1587}, + dictWord{7, 11, 1707}, + dictWord{10, 11, 528}, + dictWord{139, 11, 504}, + dictWord{ + 12, + 11, + 39, + }, + dictWord{13, 11, 265}, + dictWord{141, 11, 439}, + dictWord{4, 10, 928}, + dictWord{133, 10, 910}, + dictWord{7, 10, 1838}, + dictWord{7, 11, 1978}, + dictWord{136, 11, 676}, + dictWord{6, 0, 762}, + dictWord{6, 0, 796}, + dictWord{134, 0, 956}, + dictWord{4, 10, 318}, + dictWord{4, 10, 496}, + dictWord{7, 10, 856}, + dictWord{139, 10, 654}, + dictWord{137, 11, 242}, + dictWord{4, 11, 361}, + dictWord{133, 11, 315}, + dictWord{132, 11, 461}, + dictWord{132, 11, 472}, + dictWord{ + 132, + 0, + 857, + }, + dictWord{5, 0, 21}, + dictWord{6, 0, 77}, + dictWord{6, 0, 157}, + dictWord{7, 0, 974}, + dictWord{7, 0, 1301}, + dictWord{7, 0, 1339}, + dictWord{7, 0, 1490}, + dictWord{ + 7, + 0, + 1873, + }, + dictWord{9, 0, 628}, + dictWord{7, 10, 915}, + dictWord{8, 10, 247}, + dictWord{147, 10, 0}, + dictWord{4, 10, 202}, + dictWord{5, 10, 382}, + dictWord{ + 6, + 10, + 454, + }, + dictWord{7, 10, 936}, + dictWord{7, 10, 1803}, + dictWord{8, 10, 758}, + dictWord{9, 10, 375}, + dictWord{9, 10, 895}, + dictWord{10, 10, 743}, + dictWord{ + 10, + 10, + 792, + }, + dictWord{11, 10, 978}, + dictWord{11, 10, 1012}, + dictWord{142, 10, 109}, + dictWord{7, 11, 617}, + dictWord{10, 11, 498}, + dictWord{11, 11, 501}, + dictWord{12, 11, 16}, + dictWord{140, 11, 150}, + dictWord{7, 10, 1150}, + dictWord{7, 10, 1425}, + dictWord{7, 10, 1453}, + dictWord{10, 11, 747}, + dictWord{ + 140, + 10, + 513, + }, + dictWord{133, 11, 155}, + dictWord{11, 0, 919}, + dictWord{141, 0, 409}, + dictWord{138, 10, 791}, + dictWord{10, 0, 633}, + dictWord{139, 11, 729}, + dictWord{ + 7, + 11, + 163, + }, + dictWord{8, 11, 319}, + dictWord{9, 11, 402}, + dictWord{10, 11, 24}, + dictWord{10, 11, 681}, + dictWord{11, 11, 200}, + dictWord{11, 11, 567}, + dictWord{12, 11, 253}, + dictWord{12, 11, 410}, + dictWord{142, 11, 219}, + dictWord{5, 11, 475}, + dictWord{7, 11, 1780}, + dictWord{9, 11, 230}, + dictWord{11, 11, 297}, + dictWord{11, 11, 558}, + dictWord{14, 11, 322}, + dictWord{147, 11, 76}, + dictWord{7, 0, 332}, + dictWord{6, 10, 445}, + dictWord{137, 10, 909}, + dictWord{ + 135, + 11, + 1956, + }, + dictWord{136, 11, 274}, + dictWord{134, 10, 578}, + dictWord{135, 0, 1489}, + dictWord{135, 11, 1848}, + dictWord{5, 11, 944}, + dictWord{ + 134, + 11, + 1769, + }, + dictWord{132, 11, 144}, + dictWord{136, 10, 766}, + dictWord{4, 0, 832}, + dictWord{135, 10, 541}, + dictWord{8, 0, 398}, + dictWord{9, 0, 681}, + dictWord{ + 139, + 0, + 632, + }, + dictWord{136, 0, 645}, + dictWord{9, 0, 791}, + dictWord{10, 0, 93}, + dictWord{16, 0, 13}, + dictWord{17, 0, 23}, + dictWord{18, 0, 135}, + dictWord{19, 0, 12}, + dictWord{20, 0, 1}, + dictWord{20, 0, 12}, + dictWord{148, 0, 14}, + dictWord{6, 11, 247}, + dictWord{137, 11, 555}, + dictWord{134, 0, 20}, + dictWord{132, 0, 800}, + dictWord{135, 0, 1841}, + dictWord{139, 10, 983}, + dictWord{137, 10, 768}, + dictWord{132, 10, 584}, + dictWord{141, 11, 51}, + dictWord{6, 0, 1993}, + dictWord{ + 4, + 11, + 620, + }, + dictWord{138, 11, 280}, + dictWord{136, 0, 769}, + dictWord{11, 0, 290}, + dictWord{11, 0, 665}, + dictWord{7, 11, 1810}, + dictWord{11, 11, 866}, + dictWord{ + 12, + 11, + 103, + }, + dictWord{13, 11, 495}, + dictWord{17, 11, 67}, + dictWord{147, 11, 74}, + dictWord{134, 0, 1426}, + dictWord{139, 0, 60}, + dictWord{4, 10, 326}, + dictWord{135, 10, 1770}, + dictWord{7, 0, 1874}, + dictWord{9, 0, 641}, + dictWord{132, 10, 226}, + dictWord{6, 0, 644}, + dictWord{5, 10, 426}, + dictWord{8, 10, 30}, + dictWord{ + 9, + 10, + 2, + }, + dictWord{11, 10, 549}, + dictWord{147, 10, 122}, + dictWord{5, 11, 428}, + dictWord{138, 11, 442}, + dictWord{135, 11, 1871}, + dictWord{ + 135, + 0, + 1757, + }, + dictWord{147, 10, 117}, + dictWord{135, 0, 937}, + dictWord{135, 0, 1652}, + dictWord{6, 0, 654}, + dictWord{134, 0, 1476}, + dictWord{133, 11, 99}, + dictWord{135, 0, 527}, + dictWord{132, 10, 345}, + dictWord{4, 10, 385}, + dictWord{4, 11, 397}, + dictWord{7, 10, 265}, + dictWord{135, 10, 587}, + dictWord{4, 0, 579}, + dictWord{5, 0, 226}, + dictWord{5, 0, 323}, + dictWord{135, 0, 960}, + dictWord{134, 0, 1486}, + dictWord{8, 11, 502}, + dictWord{144, 11, 9}, + dictWord{4, 10, 347}, + dictWord{ + 5, + 10, + 423, + }, + dictWord{5, 10, 996}, + dictWord{135, 10, 1329}, + dictWord{7, 11, 727}, + dictWord{146, 11, 73}, + dictWord{4, 11, 485}, + dictWord{7, 11, 353}, + dictWord{7, 10, 1259}, + dictWord{7, 11, 1523}, + dictWord{9, 10, 125}, + dictWord{139, 10, 65}, + dictWord{6, 0, 325}, + dictWord{5, 10, 136}, + dictWord{6, 11, 366}, + dictWord{ + 7, + 11, + 1384, + }, + dictWord{7, 11, 1601}, + dictWord{136, 10, 644}, + dictWord{138, 11, 160}, + dictWord{6, 0, 1345}, + dictWord{137, 11, 282}, + dictWord{18, 0, 91}, + dictWord{147, 0, 70}, + dictWord{136, 0, 404}, + dictWord{4, 11, 157}, + dictWord{133, 11, 471}, + dictWord{133, 0, 973}, + dictWord{6, 0, 135}, + dictWord{ + 135, + 0, + 1176, + }, + dictWord{8, 11, 116}, + dictWord{11, 11, 551}, + dictWord{142, 11, 159}, + dictWord{4, 0, 549}, + dictWord{4, 10, 433}, + dictWord{133, 10, 719}, + dictWord{ + 136, + 0, + 976, + }, + dictWord{5, 11, 160}, + dictWord{7, 11, 363}, + dictWord{7, 11, 589}, + dictWord{10, 11, 170}, + dictWord{141, 11, 55}, + dictWord{144, 0, 21}, + dictWord{ + 144, + 0, + 51, + }, + dictWord{135, 0, 314}, + dictWord{135, 10, 1363}, + dictWord{4, 11, 108}, + dictWord{7, 11, 405}, + dictWord{10, 11, 491}, + dictWord{139, 11, 498}, + dictWord{146, 0, 4}, + dictWord{4, 10, 555}, + dictWord{8, 10, 536}, + dictWord{10, 10, 288}, + dictWord{139, 10, 1005}, + dictWord{135, 11, 1005}, + dictWord{6, 0, 281}, + dictWord{7, 0, 6}, + dictWord{8, 0, 282}, + dictWord{8, 0, 480}, + dictWord{8, 0, 499}, + dictWord{9, 0, 198}, + dictWord{10, 0, 143}, + dictWord{10, 0, 169}, + dictWord{ + 10, + 0, + 211, + }, + dictWord{10, 0, 417}, + dictWord{10, 0, 574}, + dictWord{11, 0, 147}, + dictWord{11, 0, 395}, + dictWord{12, 0, 75}, + dictWord{12, 0, 407}, + dictWord{12, 0, 608}, + dictWord{13, 0, 500}, + dictWord{142, 0, 251}, + dictWord{6, 0, 1093}, + dictWord{6, 0, 1405}, + dictWord{9, 10, 370}, + dictWord{138, 10, 90}, + dictWord{4, 11, 926}, + dictWord{133, 11, 983}, + dictWord{135, 0, 1776}, + dictWord{134, 0, 1528}, + dictWord{132, 0, 419}, + dictWord{132, 11, 538}, + dictWord{6, 11, 294}, + dictWord{ + 7, + 11, + 1267, + }, + dictWord{136, 11, 624}, + dictWord{135, 11, 1772}, + dictWord{138, 11, 301}, + dictWord{4, 10, 257}, + dictWord{135, 10, 2031}, + dictWord{4, 0, 138}, + dictWord{7, 0, 1012}, + dictWord{7, 0, 1280}, + dictWord{9, 0, 76}, + dictWord{135, 10, 1768}, + dictWord{132, 11, 757}, + dictWord{5, 0, 29}, + dictWord{140, 0, 638}, + dictWord{7, 11, 655}, + dictWord{135, 11, 1844}, + dictWord{7, 0, 1418}, + dictWord{6, 11, 257}, + dictWord{135, 11, 1522}, + dictWord{8, 11, 469}, + dictWord{ + 138, + 11, + 47, + }, + dictWord{142, 11, 278}, + dictWord{6, 10, 83}, + dictWord{6, 10, 1733}, + dictWord{135, 10, 1389}, + dictWord{11, 11, 204}, + dictWord{11, 11, 243}, + dictWord{140, 11, 293}, + dictWord{135, 11, 1875}, + dictWord{6, 0, 1710}, + dictWord{135, 0, 2038}, + dictWord{137, 11, 299}, + dictWord{4, 0, 17}, + dictWord{5, 0, 23}, + dictWord{7, 0, 995}, + dictWord{11, 0, 383}, + dictWord{11, 0, 437}, + dictWord{12, 0, 460}, + dictWord{140, 0, 532}, + dictWord{133, 0, 862}, + dictWord{137, 10, 696}, + dictWord{6, 0, 592}, + dictWord{138, 0, 946}, + dictWord{138, 11, 599}, + dictWord{7, 10, 1718}, + dictWord{9, 10, 95}, + dictWord{9, 10, 274}, + dictWord{10, 10, 279}, + dictWord{10, 10, 317}, + dictWord{10, 10, 420}, + dictWord{11, 10, 303}, + dictWord{11, 10, 808}, + dictWord{12, 10, 134}, + dictWord{12, 10, 367}, + dictWord{ + 13, + 10, + 149, + }, + dictWord{13, 10, 347}, + dictWord{14, 10, 349}, + dictWord{14, 10, 406}, + dictWord{18, 10, 22}, + dictWord{18, 10, 89}, + dictWord{18, 10, 122}, + dictWord{ + 147, + 10, + 47, + }, + dictWord{8, 0, 70}, + dictWord{12, 0, 171}, + dictWord{141, 0, 272}, + dictWord{133, 10, 26}, + dictWord{132, 10, 550}, + dictWord{137, 0, 812}, + dictWord{ + 10, + 0, + 233, + }, + dictWord{139, 0, 76}, + dictWord{134, 0, 988}, + dictWord{134, 0, 442}, + dictWord{136, 10, 822}, + dictWord{7, 0, 896}, + dictWord{4, 10, 902}, + dictWord{ + 5, + 10, + 809, + }, + dictWord{134, 10, 122}, + dictWord{5, 11, 150}, + dictWord{7, 11, 106}, + dictWord{8, 11, 603}, + dictWord{9, 11, 593}, + dictWord{9, 11, 634}, + dictWord{ + 10, + 11, + 44, + }, + dictWord{10, 11, 173}, + dictWord{11, 11, 462}, + dictWord{11, 11, 515}, + dictWord{13, 11, 216}, + dictWord{13, 11, 288}, + dictWord{142, 11, 400}, + dictWord{136, 0, 483}, + dictWord{135, 10, 262}, + dictWord{6, 0, 1709}, + dictWord{133, 10, 620}, + dictWord{4, 10, 34}, + dictWord{5, 10, 574}, + dictWord{7, 10, 279}, + dictWord{7, 10, 1624}, + dictWord{136, 10, 601}, + dictWord{137, 10, 170}, + dictWord{147, 0, 119}, + dictWord{12, 11, 108}, + dictWord{141, 11, 291}, + dictWord{ + 11, + 0, + 69, + }, + dictWord{12, 0, 105}, + dictWord{12, 0, 117}, + dictWord{13, 0, 213}, + dictWord{14, 0, 13}, + dictWord{14, 0, 62}, + dictWord{14, 0, 177}, + dictWord{14, 0, 421}, + dictWord{15, 0, 19}, + dictWord{146, 0, 141}, + dictWord{137, 0, 309}, + dictWord{11, 11, 278}, + dictWord{142, 11, 73}, + dictWord{7, 0, 608}, + dictWord{7, 0, 976}, + dictWord{9, 0, 146}, + dictWord{10, 0, 206}, + dictWord{10, 0, 596}, + dictWord{13, 0, 218}, + dictWord{142, 0, 153}, + dictWord{133, 10, 332}, + dictWord{6, 10, 261}, + dictWord{ + 8, + 10, + 182, + }, + dictWord{139, 10, 943}, + dictWord{4, 11, 493}, + dictWord{144, 11, 55}, + dictWord{134, 10, 1721}, + dictWord{132, 0, 768}, + dictWord{4, 10, 933}, + dictWord{133, 10, 880}, + dictWord{7, 11, 555}, + dictWord{7, 11, 1316}, + dictWord{7, 11, 1412}, + dictWord{7, 11, 1839}, + dictWord{9, 11, 192}, + dictWord{ + 9, + 11, + 589, + }, + dictWord{11, 11, 241}, + dictWord{11, 11, 676}, + dictWord{11, 11, 811}, + dictWord{11, 11, 891}, + dictWord{12, 11, 140}, + dictWord{12, 11, 346}, + dictWord{ + 12, + 11, + 479, + }, + dictWord{13, 11, 30}, + dictWord{13, 11, 49}, + dictWord{13, 11, 381}, + dictWord{14, 11, 188}, + dictWord{15, 11, 150}, + dictWord{16, 11, 76}, + dictWord{18, 11, 30}, + dictWord{148, 11, 52}, + dictWord{4, 0, 518}, + dictWord{135, 0, 1136}, + dictWord{6, 11, 568}, + dictWord{7, 11, 112}, + dictWord{7, 11, 1804}, + dictWord{8, 11, 362}, + dictWord{8, 11, 410}, + dictWord{8, 11, 830}, + dictWord{9, 11, 514}, + dictWord{11, 11, 649}, + dictWord{142, 11, 157}, + dictWord{135, 11, 673}, + dictWord{8, 0, 689}, + dictWord{137, 0, 863}, + dictWord{4, 0, 18}, + dictWord{7, 0, 145}, + dictWord{7, 0, 444}, + dictWord{7, 0, 1278}, + dictWord{8, 0, 49}, + dictWord{8, 0, 400}, + dictWord{9, 0, 71}, + dictWord{9, 0, 250}, + dictWord{10, 0, 459}, + dictWord{12, 0, 160}, + dictWord{16, 0, 24}, + dictWord{132, 11, 625}, + dictWord{140, 0, 1020}, + dictWord{4, 0, 997}, + dictWord{6, 0, 1946}, + dictWord{6, 0, 1984}, + dictWord{134, 0, 1998}, + dictWord{6, 11, 16}, + dictWord{6, 11, 158}, + dictWord{7, 11, 43}, + dictWord{ + 7, + 11, + 129, + }, + dictWord{7, 11, 181}, + dictWord{8, 11, 276}, + dictWord{8, 11, 377}, + dictWord{10, 11, 523}, + dictWord{11, 11, 816}, + dictWord{12, 11, 455}, + dictWord{ + 13, + 11, + 303, + }, + dictWord{142, 11, 135}, + dictWord{133, 10, 812}, + dictWord{134, 0, 658}, + dictWord{4, 11, 1}, + dictWord{7, 11, 1143}, + dictWord{7, 11, 1463}, + dictWord{8, 11, 61}, + dictWord{9, 11, 207}, + dictWord{9, 11, 390}, + dictWord{9, 11, 467}, + dictWord{139, 11, 836}, + dictWord{150, 11, 26}, + dictWord{140, 0, 106}, + dictWord{6, 0, 1827}, + dictWord{10, 0, 931}, + dictWord{18, 0, 166}, + dictWord{20, 0, 114}, + dictWord{4, 10, 137}, + dictWord{7, 10, 1178}, + dictWord{7, 11, 1319}, + dictWord{135, 10, 1520}, + dictWord{133, 0, 1010}, + dictWord{4, 11, 723}, + dictWord{5, 11, 895}, + dictWord{7, 11, 1031}, + dictWord{8, 11, 199}, + dictWord{8, 11, 340}, + dictWord{9, 11, 153}, + dictWord{9, 11, 215}, + dictWord{10, 11, 21}, + dictWord{10, 11, 59}, + dictWord{10, 11, 80}, + dictWord{10, 11, 224}, + dictWord{11, 11, 229}, + dictWord{11, 11, 652}, + dictWord{12, 11, 192}, + dictWord{13, 11, 146}, + dictWord{142, 11, 91}, + dictWord{132, 11, 295}, + dictWord{6, 11, 619}, + dictWord{ + 7, + 11, + 898, + }, + dictWord{7, 11, 1092}, + dictWord{8, 11, 485}, + dictWord{18, 11, 28}, + dictWord{147, 11, 116}, + dictWord{137, 11, 51}, + dictWord{6, 10, 1661}, + dictWord{ + 7, + 10, + 1975, + }, + dictWord{7, 10, 2009}, + dictWord{135, 10, 2011}, + dictWord{5, 11, 309}, + dictWord{140, 11, 211}, + dictWord{5, 0, 87}, + dictWord{7, 0, 313}, + dictWord{ + 7, + 0, + 1103, + }, + dictWord{10, 0, 208}, + dictWord{10, 0, 582}, + dictWord{11, 0, 389}, + dictWord{11, 0, 813}, + dictWord{12, 0, 385}, + dictWord{13, 0, 286}, + dictWord{ + 14, + 0, + 124, + }, + dictWord{146, 0, 108}, + dictWord{5, 11, 125}, + dictWord{8, 11, 77}, + dictWord{138, 11, 15}, + dictWord{132, 0, 267}, + dictWord{133, 0, 703}, + dictWord{ + 137, + 11, + 155, + }, + dictWord{133, 11, 439}, + dictWord{11, 11, 164}, + dictWord{140, 11, 76}, + dictWord{9, 0, 496}, + dictWord{5, 10, 89}, + dictWord{7, 10, 1915}, + dictWord{ + 9, + 10, + 185, + }, + dictWord{9, 10, 235}, + dictWord{10, 10, 64}, + dictWord{10, 10, 270}, + dictWord{10, 10, 403}, + dictWord{10, 10, 469}, + dictWord{10, 10, 529}, + dictWord{10, 10, 590}, + dictWord{11, 10, 140}, + dictWord{11, 10, 860}, + dictWord{13, 10, 1}, + dictWord{13, 10, 422}, + dictWord{14, 10, 341}, + dictWord{14, 10, 364}, + dictWord{17, 10, 93}, + dictWord{18, 10, 113}, + dictWord{19, 10, 97}, + dictWord{147, 10, 113}, + dictWord{133, 10, 695}, + dictWord{135, 0, 1121}, + dictWord{ + 5, + 10, + 6, + }, + dictWord{6, 10, 183}, + dictWord{7, 10, 680}, + dictWord{7, 10, 978}, + dictWord{7, 10, 1013}, + dictWord{7, 10, 1055}, + dictWord{12, 10, 230}, + dictWord{ + 13, + 10, + 172, + }, + dictWord{146, 10, 29}, + dictWord{4, 11, 8}, + dictWord{7, 11, 1152}, + dictWord{7, 11, 1153}, + dictWord{7, 11, 1715}, + dictWord{9, 11, 374}, + dictWord{ + 10, + 11, + 478, + }, + dictWord{139, 11, 648}, + dictWord{135, 11, 1099}, + dictWord{6, 10, 29}, + dictWord{139, 10, 63}, + dictWord{4, 0, 561}, + dictWord{10, 0, 249}, + dictWord{ + 139, + 0, + 209, + }, + dictWord{132, 0, 760}, + dictWord{7, 11, 799}, + dictWord{138, 11, 511}, + dictWord{136, 11, 87}, + dictWord{9, 0, 154}, + dictWord{140, 0, 485}, + dictWord{136, 0, 255}, + dictWord{132, 0, 323}, + dictWord{140, 0, 419}, + dictWord{132, 10, 311}, + dictWord{134, 10, 1740}, + dictWord{4, 0, 368}, + dictWord{ + 135, + 0, + 641, + }, + dictWord{7, 10, 170}, + dictWord{8, 10, 90}, + dictWord{8, 10, 177}, + dictWord{8, 10, 415}, + dictWord{11, 10, 714}, + dictWord{142, 10, 281}, + dictWord{ + 4, + 11, + 69, + }, + dictWord{5, 11, 122}, + dictWord{9, 11, 656}, + dictWord{138, 11, 464}, + dictWord{5, 11, 849}, + dictWord{134, 11, 1633}, + dictWord{8, 0, 522}, + dictWord{ + 142, + 0, + 328, + }, + dictWord{11, 10, 91}, + dictWord{13, 10, 129}, + dictWord{15, 10, 101}, + dictWord{145, 10, 125}, + dictWord{7, 0, 562}, + dictWord{8, 0, 551}, + dictWord{ + 4, + 10, + 494, + }, + dictWord{6, 10, 74}, + dictWord{7, 10, 44}, + dictWord{11, 11, 499}, + dictWord{12, 10, 17}, + dictWord{15, 10, 5}, + dictWord{148, 10, 11}, + dictWord{4, 10, 276}, + dictWord{133, 10, 296}, + dictWord{9, 0, 92}, + dictWord{147, 0, 91}, + dictWord{4, 10, 7}, + dictWord{5, 10, 90}, + dictWord{5, 10, 158}, + dictWord{6, 10, 542}, + dictWord{ + 7, + 10, + 221, + }, + dictWord{7, 10, 1574}, + dictWord{9, 10, 490}, + dictWord{10, 10, 540}, + dictWord{11, 10, 443}, + dictWord{139, 10, 757}, + dictWord{6, 0, 525}, + dictWord{ + 6, + 0, + 1976, + }, + dictWord{8, 0, 806}, + dictWord{9, 0, 876}, + dictWord{140, 0, 284}, + dictWord{5, 11, 859}, + dictWord{7, 10, 588}, + dictWord{7, 11, 1160}, + dictWord{ + 8, + 11, + 107, + }, + dictWord{9, 10, 175}, + dictWord{9, 11, 291}, + dictWord{9, 11, 439}, + dictWord{10, 10, 530}, + dictWord{10, 11, 663}, + dictWord{11, 11, 609}, + dictWord{ + 140, + 11, + 197, + }, + dictWord{7, 11, 168}, + dictWord{13, 11, 196}, + dictWord{141, 11, 237}, + dictWord{139, 0, 958}, + dictWord{133, 0, 594}, + dictWord{135, 10, 580}, + dictWord{7, 10, 88}, + dictWord{136, 10, 627}, + dictWord{6, 0, 479}, + dictWord{6, 0, 562}, + dictWord{7, 0, 1060}, + dictWord{13, 0, 6}, + dictWord{5, 10, 872}, + dictWord{ + 6, + 10, + 57, + }, + dictWord{7, 10, 471}, + dictWord{9, 10, 447}, + dictWord{137, 10, 454}, + dictWord{136, 11, 413}, + dictWord{145, 11, 19}, + dictWord{4, 11, 117}, + dictWord{ + 6, + 11, + 372, + }, + dictWord{7, 11, 1905}, + dictWord{142, 11, 323}, + dictWord{4, 11, 722}, + dictWord{139, 11, 471}, + dictWord{17, 0, 61}, + dictWord{5, 10, 31}, + dictWord{134, 10, 614}, + dictWord{8, 10, 330}, + dictWord{140, 10, 477}, + dictWord{7, 10, 1200}, + dictWord{138, 10, 460}, + dictWord{6, 10, 424}, + dictWord{ + 135, + 10, + 1866, + }, + dictWord{6, 0, 1641}, + dictWord{136, 0, 820}, + dictWord{6, 0, 1556}, + dictWord{134, 0, 1618}, + dictWord{9, 11, 5}, + dictWord{12, 11, 216}, + dictWord{ + 12, + 11, + 294, + }, + dictWord{12, 11, 298}, + dictWord{12, 11, 400}, + dictWord{12, 11, 518}, + dictWord{13, 11, 229}, + dictWord{143, 11, 139}, + dictWord{15, 11, 155}, + dictWord{144, 11, 79}, + dictWord{4, 0, 302}, + dictWord{135, 0, 1766}, + dictWord{5, 10, 13}, + dictWord{134, 10, 142}, + dictWord{6, 0, 148}, + dictWord{7, 0, 1313}, + dictWord{ + 7, + 10, + 116, + }, + dictWord{8, 10, 322}, + dictWord{8, 10, 755}, + dictWord{9, 10, 548}, + dictWord{10, 10, 714}, + dictWord{11, 10, 884}, + dictWord{141, 10, 324}, + dictWord{137, 0, 676}, + dictWord{9, 11, 88}, + dictWord{139, 11, 270}, + dictWord{5, 11, 12}, + dictWord{7, 11, 375}, + dictWord{137, 11, 438}, + dictWord{134, 0, 1674}, + dictWord{7, 10, 1472}, + dictWord{135, 10, 1554}, + dictWord{11, 0, 178}, + dictWord{7, 10, 1071}, + dictWord{7, 10, 1541}, + dictWord{7, 10, 1767}, + dictWord{ + 7, + 10, + 1806, + }, + dictWord{11, 10, 162}, + dictWord{11, 10, 242}, + dictWord{12, 10, 605}, + dictWord{15, 10, 26}, + dictWord{144, 10, 44}, + dictWord{6, 0, 389}, + dictWord{ + 7, + 0, + 149, + }, + dictWord{9, 0, 142}, + dictWord{138, 0, 94}, + dictWord{140, 11, 71}, + dictWord{145, 10, 115}, + dictWord{6, 0, 8}, + dictWord{7, 0, 1881}, + dictWord{8, 0, 91}, + dictWord{11, 11, 966}, + dictWord{12, 11, 287}, + dictWord{13, 11, 342}, + dictWord{13, 11, 402}, + dictWord{15, 11, 110}, + dictWord{143, 11, 163}, + dictWord{ + 4, + 11, + 258, + }, + dictWord{136, 11, 639}, + dictWord{6, 11, 22}, + dictWord{7, 11, 903}, + dictWord{138, 11, 577}, + dictWord{133, 11, 681}, + dictWord{135, 10, 1111}, + dictWord{135, 11, 1286}, + dictWord{9, 0, 112}, + dictWord{8, 10, 1}, + dictWord{138, 10, 326}, + dictWord{5, 10, 488}, + dictWord{6, 10, 527}, + dictWord{7, 10, 489}, + dictWord{ + 7, + 10, + 1636, + }, + dictWord{8, 10, 121}, + dictWord{8, 10, 144}, + dictWord{8, 10, 359}, + dictWord{9, 10, 193}, + dictWord{9, 10, 241}, + dictWord{9, 10, 336}, + dictWord{ + 9, + 10, + 882, + }, + dictWord{11, 10, 266}, + dictWord{11, 10, 372}, + dictWord{11, 10, 944}, + dictWord{12, 10, 401}, + dictWord{140, 10, 641}, + dictWord{4, 11, 664}, + dictWord{133, 11, 804}, + dictWord{6, 0, 747}, + dictWord{134, 0, 1015}, + dictWord{135, 0, 1746}, + dictWord{9, 10, 31}, + dictWord{10, 10, 244}, + dictWord{ + 10, + 10, + 699, + }, + dictWord{12, 10, 149}, + dictWord{141, 10, 497}, + dictWord{133, 10, 377}, + dictWord{135, 0, 24}, + dictWord{6, 0, 1352}, + dictWord{5, 11, 32}, + dictWord{ + 145, + 10, + 101, + }, + dictWord{7, 0, 1530}, + dictWord{10, 0, 158}, + dictWord{13, 0, 13}, + dictWord{13, 0, 137}, + dictWord{13, 0, 258}, + dictWord{14, 0, 111}, + dictWord{ + 14, + 0, + 225, + }, + dictWord{14, 0, 253}, + dictWord{14, 0, 304}, + dictWord{14, 0, 339}, + dictWord{14, 0, 417}, + dictWord{146, 0, 33}, + dictWord{4, 0, 503}, + dictWord{ + 135, + 0, + 1661, + }, + dictWord{5, 0, 130}, + dictWord{6, 0, 845}, + dictWord{7, 0, 1314}, + dictWord{9, 0, 610}, + dictWord{10, 0, 718}, + dictWord{11, 0, 601}, + dictWord{11, 0, 819}, + dictWord{11, 0, 946}, + dictWord{140, 0, 536}, + dictWord{10, 0, 149}, + dictWord{11, 0, 280}, + dictWord{142, 0, 336}, + dictWord{134, 0, 1401}, + dictWord{ + 135, + 0, + 1946, + }, + dictWord{8, 0, 663}, + dictWord{144, 0, 8}, + dictWord{134, 0, 1607}, + dictWord{135, 10, 2023}, + dictWord{4, 11, 289}, + dictWord{7, 11, 629}, + dictWord{ + 7, + 11, + 1698, + }, + dictWord{7, 11, 1711}, + dictWord{140, 11, 215}, + dictWord{6, 11, 450}, + dictWord{136, 11, 109}, + dictWord{10, 0, 882}, + dictWord{10, 0, 883}, + dictWord{10, 0, 914}, + dictWord{138, 0, 928}, + dictWord{133, 10, 843}, + dictWord{136, 11, 705}, + dictWord{132, 10, 554}, + dictWord{133, 10, 536}, + dictWord{ + 5, + 0, + 417, + }, + dictWord{9, 10, 79}, + dictWord{11, 10, 625}, + dictWord{145, 10, 7}, + dictWord{7, 11, 1238}, + dictWord{142, 11, 37}, + dictWord{4, 0, 392}, + dictWord{ + 135, + 0, + 1597, + }, + dictWord{5, 0, 433}, + dictWord{9, 0, 633}, + dictWord{11, 0, 629}, + dictWord{132, 10, 424}, + dictWord{7, 10, 336}, + dictWord{136, 10, 785}, + dictWord{ + 134, + 11, + 355, + }, + dictWord{6, 0, 234}, + dictWord{7, 0, 769}, + dictWord{9, 0, 18}, + dictWord{138, 0, 358}, + dictWord{4, 10, 896}, + dictWord{134, 10, 1777}, + dictWord{ + 138, + 11, + 323, + }, + dictWord{7, 0, 140}, + dictWord{7, 0, 1950}, + dictWord{8, 0, 680}, + dictWord{11, 0, 817}, + dictWord{147, 0, 88}, + dictWord{7, 0, 1222}, + dictWord{ + 138, + 0, + 386, + }, + dictWord{139, 11, 908}, + dictWord{11, 0, 249}, + dictWord{12, 0, 313}, + dictWord{16, 0, 66}, + dictWord{145, 0, 26}, + dictWord{134, 0, 5}, + dictWord{7, 10, 750}, + dictWord{9, 10, 223}, + dictWord{11, 10, 27}, + dictWord{11, 10, 466}, + dictWord{12, 10, 624}, + dictWord{14, 10, 265}, + dictWord{146, 10, 61}, + dictWord{ + 134, + 11, + 26, + }, + dictWord{134, 0, 1216}, + dictWord{5, 0, 963}, + dictWord{134, 0, 1773}, + dictWord{4, 11, 414}, + dictWord{5, 11, 467}, + dictWord{9, 11, 654}, + dictWord{ + 10, + 11, + 451, + }, + dictWord{12, 11, 59}, + dictWord{141, 11, 375}, + dictWord{135, 11, 17}, + dictWord{4, 10, 603}, + dictWord{133, 10, 661}, + dictWord{4, 10, 11}, + dictWord{ + 6, + 10, + 128, + }, + dictWord{7, 10, 231}, + dictWord{7, 10, 1533}, + dictWord{138, 10, 725}, + dictWord{135, 11, 955}, + dictWord{7, 0, 180}, + dictWord{8, 0, 509}, + dictWord{ + 136, + 0, + 792, + }, + dictWord{132, 10, 476}, + dictWord{132, 0, 1002}, + dictWord{133, 11, 538}, + dictWord{135, 10, 1807}, + dictWord{132, 0, 931}, + dictWord{7, 0, 943}, + dictWord{11, 0, 614}, + dictWord{140, 0, 747}, + dictWord{135, 0, 1837}, + dictWord{9, 10, 20}, + dictWord{10, 10, 324}, + dictWord{10, 10, 807}, + dictWord{ + 139, + 10, + 488, + }, + dictWord{134, 0, 641}, + dictWord{6, 11, 280}, + dictWord{10, 11, 502}, + dictWord{11, 11, 344}, + dictWord{140, 11, 38}, + dictWord{5, 11, 45}, + dictWord{ + 7, + 11, + 1161, + }, + dictWord{11, 11, 448}, + dictWord{11, 11, 880}, + dictWord{13, 11, 139}, + dictWord{13, 11, 407}, + dictWord{15, 11, 16}, + dictWord{17, 11, 95}, + dictWord{ + 18, + 11, + 66, + }, + dictWord{18, 11, 88}, + dictWord{18, 11, 123}, + dictWord{149, 11, 7}, + dictWord{9, 0, 280}, + dictWord{138, 0, 134}, + dictWord{22, 0, 22}, + dictWord{23, 0, 5}, + dictWord{151, 0, 29}, + dictWord{136, 11, 777}, + dictWord{4, 0, 90}, + dictWord{5, 0, 545}, + dictWord{7, 0, 754}, + dictWord{9, 0, 186}, + dictWord{10, 0, 72}, + dictWord{ + 10, + 0, + 782, + }, + dictWord{11, 0, 577}, + dictWord{11, 0, 610}, + dictWord{11, 0, 960}, + dictWord{12, 0, 354}, + dictWord{12, 0, 362}, + dictWord{12, 0, 595}, + dictWord{ + 4, + 11, + 410, + }, + dictWord{135, 11, 521}, + dictWord{135, 11, 1778}, + dictWord{5, 10, 112}, + dictWord{6, 10, 103}, + dictWord{134, 10, 150}, + dictWord{138, 10, 356}, + dictWord{132, 0, 742}, + dictWord{7, 0, 151}, + dictWord{9, 0, 329}, + dictWord{139, 0, 254}, + dictWord{8, 0, 853}, + dictWord{8, 0, 881}, + dictWord{8, 0, 911}, + dictWord{ + 8, + 0, + 912, + }, + dictWord{10, 0, 872}, + dictWord{12, 0, 741}, + dictWord{12, 0, 742}, + dictWord{152, 0, 18}, + dictWord{4, 11, 573}, + dictWord{136, 11, 655}, + dictWord{ + 6, + 0, + 921, + }, + dictWord{134, 0, 934}, + dictWord{9, 0, 187}, + dictWord{10, 0, 36}, + dictWord{11, 0, 1016}, + dictWord{17, 0, 44}, + dictWord{146, 0, 64}, + dictWord{7, 0, 833}, + dictWord{136, 0, 517}, + dictWord{4, 0, 506}, + dictWord{5, 0, 295}, + dictWord{135, 0, 1680}, + dictWord{4, 10, 708}, + dictWord{8, 10, 15}, + dictWord{9, 10, 50}, + dictWord{ + 9, + 10, + 386, + }, + dictWord{11, 10, 18}, + dictWord{11, 10, 529}, + dictWord{140, 10, 228}, + dictWord{7, 0, 251}, + dictWord{7, 0, 1701}, + dictWord{8, 0, 436}, + dictWord{ + 4, + 10, + 563, + }, + dictWord{7, 10, 592}, + dictWord{7, 10, 637}, + dictWord{7, 10, 770}, + dictWord{8, 10, 463}, + dictWord{9, 10, 60}, + dictWord{9, 10, 335}, + dictWord{9, 10, 904}, + dictWord{10, 10, 73}, + dictWord{11, 10, 434}, + dictWord{12, 10, 585}, + dictWord{13, 10, 331}, + dictWord{18, 10, 110}, + dictWord{148, 10, 60}, + dictWord{ + 132, + 10, + 502, + }, + dictWord{136, 0, 584}, + dictWord{6, 10, 347}, + dictWord{138, 10, 161}, + dictWord{7, 0, 987}, + dictWord{9, 0, 688}, + dictWord{10, 0, 522}, + dictWord{ + 11, + 0, + 788, + }, + dictWord{12, 0, 137}, + dictWord{12, 0, 566}, + dictWord{14, 0, 9}, + dictWord{14, 0, 24}, + dictWord{14, 0, 64}, + dictWord{7, 11, 899}, + dictWord{142, 11, 325}, + dictWord{4, 0, 214}, + dictWord{5, 0, 500}, + dictWord{5, 10, 102}, + dictWord{6, 10, 284}, + dictWord{7, 10, 1079}, + dictWord{7, 10, 1423}, + dictWord{7, 10, 1702}, + dictWord{ + 8, + 10, + 470, + }, + dictWord{9, 10, 554}, + dictWord{9, 10, 723}, + dictWord{139, 10, 333}, + dictWord{7, 10, 246}, + dictWord{135, 10, 840}, + dictWord{6, 10, 10}, + dictWord{ + 8, + 10, + 571, + }, + dictWord{9, 10, 739}, + dictWord{143, 10, 91}, + dictWord{133, 10, 626}, + dictWord{146, 0, 195}, + dictWord{134, 0, 1775}, + dictWord{7, 0, 389}, + dictWord{7, 0, 700}, + dictWord{7, 0, 940}, + dictWord{8, 0, 514}, + dictWord{9, 0, 116}, + dictWord{9, 0, 535}, + dictWord{10, 0, 118}, + dictWord{11, 0, 107}, + dictWord{ + 11, + 0, + 148, + }, + dictWord{11, 0, 922}, + dictWord{12, 0, 254}, + dictWord{12, 0, 421}, + dictWord{142, 0, 238}, + dictWord{5, 10, 18}, + dictWord{6, 10, 526}, + dictWord{13, 10, 24}, + dictWord{13, 10, 110}, + dictWord{19, 10, 5}, + dictWord{147, 10, 44}, + dictWord{132, 0, 743}, + dictWord{11, 0, 292}, + dictWord{4, 10, 309}, + dictWord{5, 10, 462}, + dictWord{7, 10, 970}, + dictWord{135, 10, 1097}, + dictWord{22, 10, 30}, + dictWord{150, 10, 33}, + dictWord{139, 11, 338}, + dictWord{135, 11, 1598}, + dictWord{ + 7, + 0, + 1283, + }, + dictWord{9, 0, 227}, + dictWord{11, 0, 325}, + dictWord{11, 0, 408}, + dictWord{14, 0, 180}, + dictWord{146, 0, 47}, + dictWord{4, 0, 953}, + dictWord{6, 0, 1805}, + dictWord{6, 0, 1814}, + dictWord{6, 0, 1862}, + dictWord{140, 0, 774}, + dictWord{6, 11, 611}, + dictWord{135, 11, 1733}, + dictWord{135, 11, 1464}, + dictWord{ + 5, + 0, + 81, + }, + dictWord{7, 0, 146}, + dictWord{7, 0, 1342}, + dictWord{8, 0, 53}, + dictWord{8, 0, 561}, + dictWord{8, 0, 694}, + dictWord{8, 0, 754}, + dictWord{9, 0, 115}, + dictWord{ + 9, + 0, + 179, + }, + dictWord{9, 0, 894}, + dictWord{10, 0, 462}, + dictWord{10, 0, 813}, + dictWord{11, 0, 230}, + dictWord{11, 0, 657}, + dictWord{11, 0, 699}, + dictWord{11, 0, 748}, + dictWord{12, 0, 119}, + dictWord{12, 0, 200}, + dictWord{12, 0, 283}, + dictWord{142, 0, 273}, + dictWord{5, 0, 408}, + dictWord{6, 0, 789}, + dictWord{6, 0, 877}, + dictWord{ + 6, + 0, + 1253, + }, + dictWord{6, 0, 1413}, + dictWord{137, 0, 747}, + dictWord{134, 10, 1704}, + dictWord{135, 11, 663}, + dictWord{6, 0, 1910}, + dictWord{6, 0, 1915}, + dictWord{6, 0, 1923}, + dictWord{9, 0, 913}, + dictWord{9, 0, 928}, + dictWord{9, 0, 950}, + dictWord{9, 0, 954}, + dictWord{9, 0, 978}, + dictWord{9, 0, 993}, + dictWord{12, 0, 812}, + dictWord{12, 0, 819}, + dictWord{12, 0, 831}, + dictWord{12, 0, 833}, + dictWord{12, 0, 838}, + dictWord{12, 0, 909}, + dictWord{12, 0, 928}, + dictWord{12, 0, 931}, + dictWord{12, 0, 950}, + dictWord{15, 0, 186}, + dictWord{15, 0, 187}, + dictWord{15, 0, 195}, + dictWord{15, 0, 196}, + dictWord{15, 0, 209}, + dictWord{15, 0, 215}, + dictWord{ + 15, + 0, + 236, + }, + dictWord{15, 0, 241}, + dictWord{15, 0, 249}, + dictWord{15, 0, 253}, + dictWord{18, 0, 180}, + dictWord{18, 0, 221}, + dictWord{18, 0, 224}, + dictWord{ + 18, + 0, + 227, + }, + dictWord{18, 0, 229}, + dictWord{149, 0, 60}, + dictWord{7, 0, 1826}, + dictWord{135, 0, 1938}, + dictWord{11, 0, 490}, + dictWord{18, 0, 143}, + dictWord{ + 5, + 10, + 86, + }, + dictWord{7, 10, 743}, + dictWord{9, 10, 85}, + dictWord{10, 10, 281}, + dictWord{10, 10, 432}, + dictWord{12, 10, 251}, + dictWord{13, 10, 118}, + dictWord{ + 142, + 10, + 378, + }, + dictWord{5, 10, 524}, + dictWord{133, 10, 744}, + dictWord{141, 11, 442}, + dictWord{10, 10, 107}, + dictWord{140, 10, 436}, + dictWord{135, 11, 503}, + dictWord{134, 0, 1162}, + dictWord{132, 10, 927}, + dictWord{7, 0, 30}, + dictWord{8, 0, 86}, + dictWord{8, 0, 315}, + dictWord{8, 0, 700}, + dictWord{9, 0, 576}, + dictWord{ + 9, + 0, + 858, + }, + dictWord{10, 0, 414}, + dictWord{11, 0, 310}, + dictWord{11, 0, 888}, + dictWord{11, 0, 904}, + dictWord{12, 0, 361}, + dictWord{13, 0, 248}, + dictWord{13, 0, 371}, + dictWord{14, 0, 142}, + dictWord{12, 10, 670}, + dictWord{146, 10, 94}, + dictWord{134, 0, 721}, + dictWord{4, 11, 113}, + dictWord{5, 11, 163}, + dictWord{5, 11, 735}, + dictWord{7, 11, 1009}, + dictWord{7, 10, 1149}, + dictWord{9, 11, 9}, + dictWord{9, 10, 156}, + dictWord{9, 11, 771}, + dictWord{12, 11, 90}, + dictWord{13, 11, 138}, + dictWord{13, 11, 410}, + dictWord{143, 11, 128}, + dictWord{138, 0, 839}, + dictWord{133, 10, 778}, + dictWord{137, 0, 617}, + dictWord{133, 10, 502}, + dictWord{ + 8, + 10, + 196, + }, + dictWord{10, 10, 283}, + dictWord{139, 10, 406}, + dictWord{6, 0, 428}, + dictWord{7, 0, 524}, + dictWord{8, 0, 169}, + dictWord{8, 0, 234}, + dictWord{9, 0, 480}, + dictWord{138, 0, 646}, + dictWord{133, 10, 855}, + dictWord{134, 0, 1648}, + dictWord{7, 0, 1205}, + dictWord{138, 0, 637}, + dictWord{7, 0, 1596}, + dictWord{ + 4, + 11, + 935, + }, + dictWord{133, 11, 823}, + dictWord{5, 11, 269}, + dictWord{7, 11, 434}, + dictWord{7, 11, 891}, + dictWord{8, 11, 339}, + dictWord{9, 11, 702}, + dictWord{ + 11, + 11, + 594, + }, + dictWord{11, 11, 718}, + dictWord{145, 11, 100}, + dictWord{7, 11, 878}, + dictWord{9, 11, 485}, + dictWord{141, 11, 264}, + dictWord{4, 0, 266}, + dictWord{ + 8, + 0, + 4, + }, + dictWord{9, 0, 39}, + dictWord{10, 0, 166}, + dictWord{11, 0, 918}, + dictWord{12, 0, 635}, + dictWord{20, 0, 10}, + dictWord{22, 0, 27}, + dictWord{22, 0, 43}, + dictWord{ + 22, + 0, + 52, + }, + dictWord{134, 11, 1713}, + dictWord{7, 10, 1400}, + dictWord{9, 10, 446}, + dictWord{138, 10, 45}, + dictWord{135, 11, 900}, + dictWord{132, 0, 862}, + dictWord{134, 0, 1554}, + dictWord{135, 11, 1033}, + dictWord{19, 0, 16}, + dictWord{147, 11, 16}, + dictWord{135, 11, 1208}, + dictWord{7, 0, 157}, + dictWord{ + 136, + 0, + 279, + }, + dictWord{6, 0, 604}, + dictWord{136, 0, 391}, + dictWord{13, 10, 455}, + dictWord{15, 10, 99}, + dictWord{15, 10, 129}, + dictWord{144, 10, 68}, + dictWord{ + 135, + 10, + 172, + }, + dictWord{7, 0, 945}, + dictWord{11, 0, 713}, + dictWord{139, 0, 744}, + dictWord{4, 0, 973}, + dictWord{10, 0, 877}, + dictWord{10, 0, 937}, + dictWord{ + 10, + 0, + 938, + }, + dictWord{140, 0, 711}, + dictWord{139, 0, 1022}, + dictWord{132, 10, 568}, + dictWord{142, 11, 143}, + dictWord{4, 0, 567}, + dictWord{9, 0, 859}, + dictWord{ + 132, + 10, + 732, + }, + dictWord{7, 0, 1846}, + dictWord{136, 0, 628}, + dictWord{136, 10, 733}, + dictWord{133, 0, 762}, + dictWord{4, 10, 428}, + dictWord{135, 10, 1789}, + dictWord{10, 0, 784}, + dictWord{13, 0, 191}, + dictWord{7, 10, 2015}, + dictWord{140, 10, 665}, + dictWord{133, 0, 298}, + dictWord{7, 0, 633}, + dictWord{7, 0, 905}, + dictWord{7, 0, 909}, + dictWord{7, 0, 1538}, + dictWord{9, 0, 767}, + dictWord{140, 0, 636}, + dictWord{138, 10, 806}, + dictWord{132, 0, 795}, + dictWord{139, 0, 301}, + dictWord{135, 0, 1970}, + dictWord{5, 11, 625}, + dictWord{135, 11, 1617}, + dictWord{135, 11, 275}, + dictWord{7, 11, 37}, + dictWord{8, 11, 425}, + dictWord{ + 8, + 11, + 693, + }, + dictWord{9, 11, 720}, + dictWord{10, 11, 380}, + dictWord{10, 11, 638}, + dictWord{11, 11, 273}, + dictWord{11, 11, 307}, + dictWord{11, 11, 473}, + dictWord{ + 12, + 11, + 61, + }, + dictWord{143, 11, 43}, + dictWord{135, 11, 198}, + dictWord{134, 0, 1236}, + dictWord{7, 0, 369}, + dictWord{12, 0, 644}, + dictWord{12, 0, 645}, + dictWord{144, 0, 90}, + dictWord{19, 0, 15}, + dictWord{149, 0, 27}, + dictWord{6, 0, 71}, + dictWord{7, 0, 845}, + dictWord{8, 0, 160}, + dictWord{9, 0, 318}, + dictWord{6, 10, 1623}, + dictWord{134, 10, 1681}, + dictWord{134, 0, 1447}, + dictWord{134, 0, 1255}, + dictWord{138, 0, 735}, + dictWord{8, 0, 76}, + dictWord{132, 11, 168}, + dictWord{ + 6, + 10, + 1748, + }, + dictWord{8, 10, 715}, + dictWord{9, 10, 802}, + dictWord{10, 10, 46}, + dictWord{10, 10, 819}, + dictWord{13, 10, 308}, + dictWord{14, 10, 351}, + dictWord{14, 10, 363}, + dictWord{146, 10, 67}, + dictWord{135, 11, 91}, + dictWord{6, 0, 474}, + dictWord{4, 10, 63}, + dictWord{133, 10, 347}, + dictWord{133, 10, 749}, + dictWord{138, 0, 841}, + dictWord{133, 10, 366}, + dictWord{6, 0, 836}, + dictWord{132, 11, 225}, + dictWord{135, 0, 1622}, + dictWord{135, 10, 89}, + dictWord{ + 140, + 0, + 735, + }, + dictWord{134, 0, 1601}, + dictWord{138, 11, 145}, + dictWord{6, 0, 1390}, + dictWord{137, 0, 804}, + dictWord{142, 0, 394}, + dictWord{6, 11, 15}, + dictWord{ + 7, + 11, + 70, + }, + dictWord{10, 11, 240}, + dictWord{147, 11, 93}, + dictWord{6, 0, 96}, + dictWord{135, 0, 1426}, + dictWord{4, 0, 651}, + dictWord{133, 0, 289}, + dictWord{ + 7, + 11, + 956, + }, + dictWord{7, 10, 977}, + dictWord{7, 11, 1157}, + dictWord{7, 11, 1506}, + dictWord{7, 11, 1606}, + dictWord{7, 11, 1615}, + dictWord{7, 11, 1619}, + dictWord{ + 7, + 11, + 1736, + }, + dictWord{7, 11, 1775}, + dictWord{8, 11, 590}, + dictWord{9, 11, 324}, + dictWord{9, 11, 736}, + dictWord{9, 11, 774}, + dictWord{9, 11, 776}, + dictWord{ + 9, + 11, + 784, + }, + dictWord{10, 11, 567}, + dictWord{10, 11, 708}, + dictWord{11, 11, 518}, + dictWord{11, 11, 613}, + dictWord{11, 11, 695}, + dictWord{11, 11, 716}, + dictWord{11, 11, 739}, + dictWord{11, 11, 770}, + dictWord{11, 11, 771}, + dictWord{11, 11, 848}, + dictWord{11, 11, 857}, + dictWord{11, 11, 931}, + dictWord{ + 11, + 11, + 947, + }, + dictWord{12, 11, 326}, + dictWord{12, 11, 387}, + dictWord{12, 11, 484}, + dictWord{12, 11, 528}, + dictWord{12, 11, 552}, + dictWord{12, 11, 613}, + dictWord{ + 13, + 11, + 189, + }, + dictWord{13, 11, 256}, + dictWord{13, 11, 340}, + dictWord{13, 11, 432}, + dictWord{13, 11, 436}, + dictWord{13, 11, 440}, + dictWord{13, 11, 454}, + dictWord{14, 11, 174}, + dictWord{14, 11, 220}, + dictWord{14, 11, 284}, + dictWord{14, 11, 390}, + dictWord{145, 11, 121}, + dictWord{7, 0, 688}, + dictWord{8, 0, 35}, + dictWord{9, 0, 511}, + dictWord{10, 0, 767}, + dictWord{147, 0, 118}, + dictWord{134, 0, 667}, + dictWord{4, 0, 513}, + dictWord{5, 10, 824}, + dictWord{133, 10, 941}, + dictWord{7, 10, 440}, + dictWord{8, 10, 230}, + dictWord{139, 10, 106}, + dictWord{134, 0, 2034}, + dictWord{135, 11, 1399}, + dictWord{143, 11, 66}, + dictWord{ + 135, + 11, + 1529, + }, + dictWord{4, 11, 145}, + dictWord{6, 11, 176}, + dictWord{7, 11, 395}, + dictWord{9, 11, 562}, + dictWord{144, 11, 28}, + dictWord{132, 11, 501}, + dictWord{132, 0, 704}, + dictWord{134, 0, 1524}, + dictWord{7, 0, 1078}, + dictWord{134, 11, 464}, + dictWord{6, 11, 509}, + dictWord{10, 11, 82}, + dictWord{20, 11, 91}, + dictWord{151, 11, 13}, + dictWord{4, 0, 720}, + dictWord{133, 0, 306}, + dictWord{133, 0, 431}, + dictWord{7, 0, 1196}, + dictWord{4, 10, 914}, + dictWord{5, 10, 800}, + dictWord{133, 10, 852}, + dictWord{135, 11, 1189}, + dictWord{10, 0, 54}, + dictWord{141, 10, 115}, + dictWord{7, 10, 564}, + dictWord{142, 10, 168}, + dictWord{ + 5, + 0, + 464, + }, + dictWord{6, 0, 236}, + dictWord{7, 0, 696}, + dictWord{7, 0, 914}, + dictWord{7, 0, 1108}, + dictWord{7, 0, 1448}, + dictWord{9, 0, 15}, + dictWord{9, 0, 564}, + dictWord{ + 10, + 0, + 14, + }, + dictWord{12, 0, 565}, + dictWord{13, 0, 449}, + dictWord{14, 0, 53}, + dictWord{15, 0, 13}, + dictWord{16, 0, 64}, + dictWord{17, 0, 41}, + dictWord{4, 10, 918}, + dictWord{133, 10, 876}, + dictWord{6, 0, 1418}, + dictWord{134, 10, 1764}, + dictWord{4, 10, 92}, + dictWord{133, 10, 274}, + dictWord{134, 0, 907}, + dictWord{ + 4, + 11, + 114, + }, + dictWord{8, 10, 501}, + dictWord{9, 11, 492}, + dictWord{13, 11, 462}, + dictWord{142, 11, 215}, + dictWord{4, 11, 77}, + dictWord{5, 11, 361}, + dictWord{ + 6, + 11, + 139, + }, + dictWord{6, 11, 401}, + dictWord{6, 11, 404}, + dictWord{7, 11, 413}, + dictWord{7, 11, 715}, + dictWord{7, 11, 1716}, + dictWord{11, 11, 279}, + dictWord{ + 12, + 11, + 179, + }, + dictWord{12, 11, 258}, + dictWord{13, 11, 244}, + dictWord{142, 11, 358}, + dictWord{6, 0, 1767}, + dictWord{12, 0, 194}, + dictWord{145, 0, 107}, + dictWord{ + 134, + 11, + 1717, + }, + dictWord{5, 10, 743}, + dictWord{142, 11, 329}, + dictWord{4, 10, 49}, + dictWord{7, 10, 280}, + dictWord{135, 10, 1633}, + dictWord{5, 0, 840}, + dictWord{7, 11, 1061}, + dictWord{8, 11, 82}, + dictWord{11, 11, 250}, + dictWord{12, 11, 420}, + dictWord{141, 11, 184}, + dictWord{135, 11, 724}, + dictWord{ + 134, + 0, + 900, + }, + dictWord{136, 10, 47}, + dictWord{134, 0, 1436}, + dictWord{144, 11, 0}, + dictWord{6, 0, 675}, + dictWord{7, 0, 1008}, + dictWord{7, 0, 1560}, + dictWord{ + 9, + 0, + 642, + }, + dictWord{11, 0, 236}, + dictWord{14, 0, 193}, + dictWord{5, 10, 272}, + dictWord{5, 10, 908}, + dictWord{5, 10, 942}, + dictWord{8, 10, 197}, + dictWord{9, 10, 47}, + dictWord{11, 10, 538}, + dictWord{139, 10, 742}, + dictWord{4, 0, 68}, + dictWord{5, 0, 628}, + dictWord{5, 0, 634}, + dictWord{6, 0, 386}, + dictWord{7, 0, 794}, + dictWord{ + 8, + 0, + 273, + }, + dictWord{9, 0, 563}, + dictWord{10, 0, 105}, + dictWord{10, 0, 171}, + dictWord{11, 0, 94}, + dictWord{139, 0, 354}, + dictWord{135, 10, 1911}, + dictWord{ + 137, + 10, + 891, + }, + dictWord{4, 0, 95}, + dictWord{6, 0, 1297}, + dictWord{6, 0, 1604}, + dictWord{7, 0, 416}, + dictWord{139, 0, 830}, + dictWord{6, 11, 513}, + dictWord{ + 135, + 11, + 1052, + }, + dictWord{7, 0, 731}, + dictWord{13, 0, 20}, + dictWord{143, 0, 11}, + dictWord{137, 11, 899}, + dictWord{10, 0, 850}, + dictWord{140, 0, 697}, + dictWord{ + 4, + 0, + 662, + }, + dictWord{7, 11, 1417}, + dictWord{12, 11, 382}, + dictWord{17, 11, 48}, + dictWord{152, 11, 12}, + dictWord{133, 0, 736}, + dictWord{132, 0, 861}, + dictWord{ + 4, + 10, + 407, + }, + dictWord{132, 10, 560}, + dictWord{141, 10, 490}, + dictWord{6, 11, 545}, + dictWord{7, 11, 565}, + dictWord{7, 11, 1669}, + dictWord{10, 11, 114}, + dictWord{11, 11, 642}, + dictWord{140, 11, 618}, + dictWord{6, 0, 871}, + dictWord{134, 0, 1000}, + dictWord{5, 0, 864}, + dictWord{10, 0, 648}, + dictWord{11, 0, 671}, + dictWord{15, 0, 46}, + dictWord{133, 11, 5}, + dictWord{133, 0, 928}, + dictWord{11, 0, 90}, + dictWord{13, 0, 7}, + dictWord{4, 10, 475}, + dictWord{11, 10, 35}, + dictWord{ + 13, + 10, + 71, + }, + dictWord{13, 10, 177}, + dictWord{142, 10, 422}, + dictWord{136, 0, 332}, + dictWord{135, 11, 192}, + dictWord{134, 0, 1055}, + dictWord{136, 11, 763}, + dictWord{11, 0, 986}, + dictWord{140, 0, 682}, + dictWord{7, 0, 76}, + dictWord{8, 0, 44}, + dictWord{9, 0, 884}, + dictWord{10, 0, 580}, + dictWord{11, 0, 399}, + dictWord{ + 11, + 0, + 894, + }, + dictWord{143, 0, 122}, + dictWord{135, 11, 1237}, + dictWord{135, 10, 636}, + dictWord{11, 0, 300}, + dictWord{6, 10, 222}, + dictWord{7, 10, 1620}, + dictWord{ + 8, + 10, + 409, + }, + dictWord{137, 10, 693}, + dictWord{4, 11, 87}, + dictWord{5, 11, 250}, + dictWord{10, 11, 601}, + dictWord{13, 11, 298}, + dictWord{13, 11, 353}, + dictWord{141, 11, 376}, + dictWord{5, 0, 518}, + dictWord{10, 0, 340}, + dictWord{11, 0, 175}, + dictWord{149, 0, 16}, + dictWord{140, 0, 771}, + dictWord{6, 0, 1108}, + dictWord{137, 0, 831}, + dictWord{132, 0, 836}, + dictWord{135, 0, 1852}, + dictWord{4, 0, 957}, + dictWord{6, 0, 1804}, + dictWord{8, 0, 842}, + dictWord{8, 0, 843}, + dictWord{ + 8, + 0, + 851, + }, + dictWord{8, 0, 855}, + dictWord{140, 0, 767}, + dictWord{135, 11, 814}, + dictWord{4, 11, 57}, + dictWord{7, 11, 1195}, + dictWord{7, 11, 1438}, + dictWord{ + 7, + 11, + 1548, + }, + dictWord{7, 11, 1835}, + dictWord{7, 11, 1904}, + dictWord{9, 11, 757}, + dictWord{10, 11, 604}, + dictWord{139, 11, 519}, + dictWord{133, 10, 882}, + dictWord{138, 0, 246}, + dictWord{4, 0, 934}, + dictWord{5, 0, 202}, + dictWord{8, 0, 610}, + dictWord{7, 11, 1897}, + dictWord{12, 11, 290}, + dictWord{13, 11, 80}, + dictWord{13, 11, 437}, + dictWord{145, 11, 74}, + dictWord{8, 0, 96}, + dictWord{9, 0, 36}, + dictWord{10, 0, 607}, + dictWord{10, 0, 804}, + dictWord{10, 0, 832}, + dictWord{ + 11, + 0, + 423, + }, + dictWord{11, 0, 442}, + dictWord{12, 0, 309}, + dictWord{14, 0, 199}, + dictWord{15, 0, 90}, + dictWord{145, 0, 110}, + dictWord{132, 10, 426}, + dictWord{ + 7, + 0, + 654, + }, + dictWord{8, 0, 240}, + dictWord{6, 10, 58}, + dictWord{7, 10, 745}, + dictWord{7, 10, 1969}, + dictWord{8, 10, 675}, + dictWord{9, 10, 479}, + dictWord{9, 10, 731}, + dictWord{10, 10, 330}, + dictWord{10, 10, 593}, + dictWord{10, 10, 817}, + dictWord{11, 10, 32}, + dictWord{11, 10, 133}, + dictWord{11, 10, 221}, + dictWord{ + 145, + 10, + 68, + }, + dictWord{9, 0, 13}, + dictWord{9, 0, 398}, + dictWord{9, 0, 727}, + dictWord{10, 0, 75}, + dictWord{10, 0, 184}, + dictWord{10, 0, 230}, + dictWord{10, 0, 564}, + dictWord{ + 10, + 0, + 569, + }, + dictWord{11, 0, 973}, + dictWord{12, 0, 70}, + dictWord{12, 0, 189}, + dictWord{13, 0, 57}, + dictWord{141, 0, 257}, + dictWord{4, 11, 209}, + dictWord{ + 135, + 11, + 902, + }, + dictWord{7, 0, 391}, + dictWord{137, 10, 538}, + dictWord{134, 0, 403}, + dictWord{6, 11, 303}, + dictWord{7, 11, 335}, + dictWord{7, 11, 1437}, + dictWord{ + 7, + 11, + 1668, + }, + dictWord{8, 11, 553}, + dictWord{8, 11, 652}, + dictWord{8, 11, 656}, + dictWord{9, 11, 558}, + dictWord{11, 11, 743}, + dictWord{149, 11, 18}, + dictWord{ + 132, + 11, + 559, + }, + dictWord{11, 0, 75}, + dictWord{142, 0, 267}, + dictWord{6, 0, 815}, + dictWord{141, 11, 2}, + dictWord{141, 0, 366}, + dictWord{137, 0, 631}, + dictWord{ + 133, + 11, + 1017, + }, + dictWord{5, 0, 345}, + dictWord{135, 0, 1016}, + dictWord{133, 11, 709}, + dictWord{134, 11, 1745}, + dictWord{133, 10, 566}, + dictWord{7, 0, 952}, + dictWord{6, 10, 48}, + dictWord{9, 10, 139}, + dictWord{10, 10, 399}, + dictWord{11, 10, 469}, + dictWord{12, 10, 634}, + dictWord{141, 10, 223}, + dictWord{ + 133, + 0, + 673, + }, + dictWord{9, 0, 850}, + dictWord{7, 11, 8}, + dictWord{136, 11, 206}, + dictWord{6, 0, 662}, + dictWord{149, 0, 35}, + dictWord{4, 0, 287}, + dictWord{133, 0, 1018}, + dictWord{6, 10, 114}, + dictWord{7, 10, 1224}, + dictWord{7, 10, 1556}, + dictWord{136, 10, 3}, + dictWord{8, 10, 576}, + dictWord{137, 10, 267}, + dictWord{4, 0, 884}, + dictWord{5, 0, 34}, + dictWord{10, 0, 724}, + dictWord{12, 0, 444}, + dictWord{13, 0, 354}, + dictWord{18, 0, 32}, + dictWord{23, 0, 24}, + dictWord{23, 0, 31}, + dictWord{ + 152, + 0, + 5, + }, + dictWord{133, 10, 933}, + dictWord{132, 11, 776}, + dictWord{138, 0, 151}, + dictWord{136, 0, 427}, + dictWord{134, 0, 382}, + dictWord{132, 0, 329}, + dictWord{ + 9, + 0, + 846, + }, + dictWord{10, 0, 827}, + dictWord{138, 11, 33}, + dictWord{9, 0, 279}, + dictWord{10, 0, 407}, + dictWord{14, 0, 84}, + dictWord{22, 0, 18}, + dictWord{ + 135, + 11, + 1297, + }, + dictWord{136, 11, 406}, + dictWord{132, 0, 906}, + dictWord{136, 0, 366}, + dictWord{134, 0, 843}, + dictWord{134, 0, 1443}, + dictWord{135, 0, 1372}, + dictWord{138, 0, 992}, + dictWord{4, 0, 123}, + dictWord{5, 0, 605}, + dictWord{7, 0, 1509}, + dictWord{136, 0, 36}, + dictWord{132, 0, 649}, + dictWord{8, 11, 175}, + dictWord{10, 11, 168}, + dictWord{138, 11, 573}, + dictWord{133, 0, 767}, + dictWord{134, 0, 1018}, + dictWord{135, 11, 1305}, + dictWord{12, 10, 30}, + dictWord{ + 13, + 10, + 148, + }, + dictWord{14, 10, 87}, + dictWord{14, 10, 182}, + dictWord{16, 10, 42}, + dictWord{148, 10, 70}, + dictWord{134, 11, 607}, + dictWord{4, 0, 273}, + dictWord{ + 5, + 0, + 658, + }, + dictWord{133, 0, 995}, + dictWord{6, 0, 72}, + dictWord{139, 11, 174}, + dictWord{10, 0, 483}, + dictWord{12, 0, 368}, + dictWord{7, 10, 56}, + dictWord{ + 7, + 10, + 1989, + }, + dictWord{8, 10, 337}, + dictWord{8, 10, 738}, + dictWord{9, 10, 600}, + dictWord{13, 10, 447}, + dictWord{142, 10, 92}, + dictWord{5, 11, 784}, + dictWord{ + 138, + 10, + 666, + }, + dictWord{135, 0, 1345}, + dictWord{139, 11, 882}, + dictWord{134, 0, 1293}, + dictWord{133, 0, 589}, + dictWord{134, 0, 1988}, + dictWord{5, 0, 117}, + dictWord{6, 0, 514}, + dictWord{6, 0, 541}, + dictWord{7, 0, 1164}, + dictWord{7, 0, 1436}, + dictWord{8, 0, 220}, + dictWord{8, 0, 648}, + dictWord{10, 0, 688}, + dictWord{ + 139, + 0, + 560, + }, + dictWord{136, 0, 379}, + dictWord{5, 0, 686}, + dictWord{7, 10, 866}, + dictWord{135, 10, 1163}, + dictWord{132, 10, 328}, + dictWord{9, 11, 14}, + dictWord{ + 9, + 11, + 441, + }, + dictWord{10, 11, 306}, + dictWord{139, 11, 9}, + dictWord{4, 10, 101}, + dictWord{135, 10, 1171}, + dictWord{5, 10, 833}, + dictWord{136, 10, 744}, + dictWord{5, 11, 161}, + dictWord{7, 11, 839}, + dictWord{135, 11, 887}, + dictWord{7, 0, 196}, + dictWord{10, 0, 765}, + dictWord{11, 0, 347}, + dictWord{11, 0, 552}, + dictWord{11, 0, 790}, + dictWord{12, 0, 263}, + dictWord{13, 0, 246}, + dictWord{13, 0, 270}, + dictWord{13, 0, 395}, + dictWord{14, 0, 176}, + dictWord{14, 0, 190}, + dictWord{ + 14, + 0, + 398, + }, + dictWord{14, 0, 412}, + dictWord{15, 0, 32}, + dictWord{15, 0, 63}, + dictWord{16, 0, 88}, + dictWord{147, 0, 105}, + dictWord{6, 10, 9}, + dictWord{6, 10, 397}, + dictWord{7, 10, 53}, + dictWord{7, 10, 1742}, + dictWord{10, 10, 632}, + dictWord{11, 10, 828}, + dictWord{140, 10, 146}, + dictWord{5, 0, 381}, + dictWord{135, 0, 1792}, + dictWord{134, 0, 1452}, + dictWord{135, 11, 429}, + dictWord{8, 0, 367}, + dictWord{10, 0, 760}, + dictWord{14, 0, 79}, + dictWord{20, 0, 17}, + dictWord{152, 0, 0}, + dictWord{7, 0, 616}, + dictWord{138, 0, 413}, + dictWord{11, 10, 417}, + dictWord{12, 10, 223}, + dictWord{140, 10, 265}, + dictWord{7, 11, 1611}, + dictWord{13, 11, 14}, + dictWord{15, 11, 44}, + dictWord{19, 11, 13}, + dictWord{148, 11, 76}, + dictWord{135, 0, 1229}, + dictWord{6, 0, 120}, + dictWord{7, 0, 1188}, + dictWord{7, 0, 1710}, + dictWord{8, 0, 286}, + dictWord{9, 0, 667}, + dictWord{11, 0, 592}, + dictWord{139, 0, 730}, + dictWord{135, 11, 1814}, + dictWord{135, 0, 1146}, + dictWord{4, 10, 186}, + dictWord{5, 10, 157}, + dictWord{8, 10, 168}, + dictWord{138, 10, 6}, + dictWord{4, 0, 352}, + dictWord{135, 0, 687}, + dictWord{4, 0, 192}, + dictWord{5, 0, 49}, + dictWord{ + 6, + 0, + 200, + }, + dictWord{6, 0, 293}, + dictWord{6, 0, 1696}, + dictWord{135, 0, 1151}, + dictWord{133, 10, 875}, + dictWord{5, 10, 773}, + dictWord{5, 10, 991}, + dictWord{ + 6, + 10, + 1635, + }, + dictWord{134, 10, 1788}, + dictWord{7, 10, 111}, + dictWord{136, 10, 581}, + dictWord{6, 0, 935}, + dictWord{134, 0, 1151}, + dictWord{134, 0, 1050}, + dictWord{132, 0, 650}, + dictWord{132, 0, 147}, + dictWord{11, 0, 194}, + dictWord{12, 0, 62}, + dictWord{12, 0, 88}, + dictWord{11, 11, 194}, + dictWord{12, 11, 62}, + dictWord{140, 11, 88}, + dictWord{6, 0, 339}, + dictWord{135, 0, 923}, + dictWord{134, 10, 1747}, + dictWord{7, 11, 643}, + dictWord{136, 11, 236}, + dictWord{ + 133, + 0, + 934, + }, + dictWord{7, 10, 1364}, + dictWord{7, 10, 1907}, + dictWord{141, 10, 158}, + dictWord{132, 10, 659}, + dictWord{4, 10, 404}, + dictWord{135, 10, 675}, + dictWord{7, 11, 581}, + dictWord{9, 11, 644}, + dictWord{137, 11, 699}, + dictWord{13, 0, 211}, + dictWord{14, 0, 133}, + dictWord{14, 0, 204}, + dictWord{15, 0, 64}, + dictWord{ + 15, + 0, + 69, + }, + dictWord{15, 0, 114}, + dictWord{16, 0, 10}, + dictWord{19, 0, 23}, + dictWord{19, 0, 35}, + dictWord{19, 0, 39}, + dictWord{19, 0, 51}, + dictWord{19, 0, 71}, + dictWord{19, 0, 75}, + dictWord{152, 0, 15}, + dictWord{133, 10, 391}, + dictWord{5, 11, 54}, + dictWord{135, 11, 1513}, + dictWord{7, 0, 222}, + dictWord{8, 0, 341}, + dictWord{ + 5, + 10, + 540, + }, + dictWord{134, 10, 1697}, + dictWord{134, 10, 78}, + dictWord{132, 11, 744}, + dictWord{136, 0, 293}, + dictWord{137, 11, 701}, + dictWord{ + 7, + 11, + 930, + }, + dictWord{10, 11, 402}, + dictWord{10, 11, 476}, + dictWord{13, 11, 452}, + dictWord{18, 11, 55}, + dictWord{147, 11, 104}, + dictWord{132, 0, 637}, + dictWord{133, 10, 460}, + dictWord{8, 11, 50}, + dictWord{137, 11, 624}, + dictWord{132, 11, 572}, + dictWord{134, 0, 1159}, + dictWord{4, 10, 199}, + dictWord{ + 139, + 10, + 34, + }, + dictWord{134, 0, 847}, + dictWord{134, 10, 388}, + dictWord{6, 11, 43}, + dictWord{7, 11, 38}, + dictWord{8, 11, 248}, + dictWord{9, 11, 504}, + dictWord{ + 138, + 11, + 513, + }, + dictWord{9, 0, 683}, + dictWord{4, 10, 511}, + dictWord{6, 10, 608}, + dictWord{9, 10, 333}, + dictWord{10, 10, 602}, + dictWord{11, 10, 441}, + dictWord{ + 11, + 10, + 723, + }, + dictWord{11, 10, 976}, + dictWord{140, 10, 357}, + dictWord{9, 0, 867}, + dictWord{138, 0, 837}, + dictWord{6, 0, 944}, + dictWord{135, 11, 326}, + dictWord{ + 135, + 0, + 1809, + }, + dictWord{5, 10, 938}, + dictWord{7, 11, 783}, + dictWord{136, 10, 707}, + dictWord{133, 11, 766}, + dictWord{133, 11, 363}, + dictWord{6, 0, 170}, + dictWord{7, 0, 1080}, + dictWord{8, 0, 395}, + dictWord{8, 0, 487}, + dictWord{141, 0, 147}, + dictWord{6, 11, 258}, + dictWord{140, 11, 409}, + dictWord{4, 0, 535}, + dictWord{ + 8, + 0, + 618, + }, + dictWord{5, 11, 249}, + dictWord{148, 11, 82}, + dictWord{6, 0, 1379}, + dictWord{149, 11, 15}, + dictWord{135, 0, 1625}, + dictWord{150, 0, 23}, + dictWord{ + 5, + 11, + 393, + }, + dictWord{6, 11, 378}, + dictWord{7, 11, 1981}, + dictWord{9, 11, 32}, + dictWord{9, 11, 591}, + dictWord{10, 11, 685}, + dictWord{10, 11, 741}, + dictWord{ + 142, + 11, + 382, + }, + dictWord{133, 11, 788}, + dictWord{7, 11, 1968}, + dictWord{10, 11, 19}, + dictWord{139, 11, 911}, + dictWord{7, 11, 1401}, + dictWord{ + 135, + 11, + 1476, + }, + dictWord{4, 11, 61}, + dictWord{5, 11, 58}, + dictWord{5, 11, 171}, + dictWord{5, 11, 635}, + dictWord{5, 11, 683}, + dictWord{5, 11, 700}, + dictWord{6, 11, 291}, + dictWord{6, 11, 566}, + dictWord{7, 11, 1650}, + dictWord{11, 11, 523}, + dictWord{12, 11, 273}, + dictWord{12, 11, 303}, + dictWord{15, 11, 39}, + dictWord{ + 143, + 11, + 111, + }, + dictWord{6, 10, 469}, + dictWord{7, 10, 1709}, + dictWord{138, 10, 515}, + dictWord{4, 0, 778}, + dictWord{134, 11, 589}, + dictWord{132, 0, 46}, + dictWord{ + 5, + 0, + 811, + }, + dictWord{6, 0, 1679}, + dictWord{6, 0, 1714}, + dictWord{135, 0, 2032}, + dictWord{7, 0, 1458}, + dictWord{9, 0, 407}, + dictWord{11, 0, 15}, + dictWord{12, 0, 651}, + dictWord{149, 0, 37}, + dictWord{7, 0, 938}, + dictWord{132, 10, 500}, + dictWord{6, 0, 34}, + dictWord{7, 0, 69}, + dictWord{7, 0, 1089}, + dictWord{7, 0, 1281}, + dictWord{ + 8, + 0, + 708, + }, + dictWord{8, 0, 721}, + dictWord{9, 0, 363}, + dictWord{148, 0, 98}, + dictWord{10, 11, 231}, + dictWord{147, 11, 124}, + dictWord{7, 11, 726}, + dictWord{ + 152, + 11, + 9, + }, + dictWord{5, 10, 68}, + dictWord{134, 10, 383}, + dictWord{136, 11, 583}, + dictWord{4, 11, 917}, + dictWord{133, 11, 1005}, + dictWord{11, 10, 216}, + dictWord{139, 10, 340}, + dictWord{135, 11, 1675}, + dictWord{8, 0, 441}, + dictWord{10, 0, 314}, + dictWord{143, 0, 3}, + dictWord{132, 11, 919}, + dictWord{4, 10, 337}, + dictWord{6, 10, 353}, + dictWord{7, 10, 1934}, + dictWord{8, 10, 488}, + dictWord{137, 10, 429}, + dictWord{7, 0, 889}, + dictWord{7, 10, 1795}, + dictWord{8, 10, 259}, + dictWord{9, 10, 135}, + dictWord{9, 10, 177}, + dictWord{9, 10, 860}, + dictWord{10, 10, 825}, + dictWord{11, 10, 115}, + dictWord{11, 10, 370}, + dictWord{11, 10, 405}, + dictWord{11, 10, 604}, + dictWord{12, 10, 10}, + dictWord{12, 10, 667}, + dictWord{12, 10, 669}, + dictWord{13, 10, 76}, + dictWord{14, 10, 310}, + dictWord{ + 15, + 10, + 76, + }, + dictWord{15, 10, 147}, + dictWord{148, 10, 23}, + dictWord{4, 10, 15}, + dictWord{4, 11, 255}, + dictWord{5, 10, 22}, + dictWord{5, 11, 302}, + dictWord{6, 11, 132}, + dictWord{6, 10, 244}, + dictWord{7, 10, 40}, + dictWord{7, 11, 128}, + dictWord{7, 10, 200}, + dictWord{7, 11, 283}, + dictWord{7, 10, 906}, + dictWord{7, 10, 1199}, + dictWord{ + 7, + 11, + 1299, + }, + dictWord{9, 10, 616}, + dictWord{10, 11, 52}, + dictWord{10, 11, 514}, + dictWord{10, 10, 716}, + dictWord{11, 10, 635}, + dictWord{11, 10, 801}, + dictWord{11, 11, 925}, + dictWord{12, 10, 458}, + dictWord{13, 11, 92}, + dictWord{142, 11, 309}, + dictWord{132, 0, 462}, + dictWord{137, 11, 173}, + dictWord{ + 135, + 10, + 1735, + }, + dictWord{8, 0, 525}, + dictWord{5, 10, 598}, + dictWord{7, 10, 791}, + dictWord{8, 10, 108}, + dictWord{137, 10, 123}, + dictWord{5, 0, 73}, + dictWord{6, 0, 23}, + dictWord{134, 0, 338}, + dictWord{132, 0, 676}, + dictWord{132, 10, 683}, + dictWord{7, 0, 725}, + dictWord{8, 0, 498}, + dictWord{139, 0, 268}, + dictWord{12, 0, 21}, + dictWord{151, 0, 7}, + dictWord{135, 0, 773}, + dictWord{4, 10, 155}, + dictWord{135, 10, 1689}, + dictWord{4, 0, 164}, + dictWord{5, 0, 730}, + dictWord{5, 10, 151}, + dictWord{ + 5, + 10, + 741, + }, + dictWord{6, 11, 210}, + dictWord{7, 10, 498}, + dictWord{7, 10, 870}, + dictWord{7, 10, 1542}, + dictWord{12, 10, 213}, + dictWord{14, 10, 36}, + dictWord{ + 14, + 10, + 391, + }, + dictWord{17, 10, 111}, + dictWord{18, 10, 6}, + dictWord{18, 10, 46}, + dictWord{18, 10, 151}, + dictWord{19, 10, 36}, + dictWord{20, 10, 32}, + dictWord{ + 20, + 10, + 56, + }, + dictWord{20, 10, 69}, + dictWord{20, 10, 102}, + dictWord{21, 10, 4}, + dictWord{22, 10, 8}, + dictWord{22, 10, 10}, + dictWord{22, 10, 14}, + dictWord{ + 150, + 10, + 31, + }, + dictWord{4, 10, 624}, + dictWord{135, 10, 1752}, + dictWord{4, 0, 583}, + dictWord{9, 0, 936}, + dictWord{15, 0, 214}, + dictWord{18, 0, 199}, + dictWord{24, 0, 26}, + dictWord{134, 11, 588}, + dictWord{7, 0, 1462}, + dictWord{11, 0, 659}, + dictWord{4, 11, 284}, + dictWord{134, 11, 223}, + dictWord{133, 0, 220}, + dictWord{ + 139, + 0, + 803, + }, + dictWord{132, 0, 544}, + dictWord{4, 10, 492}, + dictWord{133, 10, 451}, + dictWord{16, 0, 98}, + dictWord{148, 0, 119}, + dictWord{4, 11, 218}, + dictWord{ + 7, + 11, + 526, + }, + dictWord{143, 11, 137}, + dictWord{135, 10, 835}, + dictWord{4, 11, 270}, + dictWord{5, 11, 192}, + dictWord{6, 11, 332}, + dictWord{7, 11, 1322}, + dictWord{ + 13, + 11, + 9, + }, + dictWord{13, 10, 70}, + dictWord{14, 11, 104}, + dictWord{142, 11, 311}, + dictWord{132, 10, 539}, + dictWord{140, 11, 661}, + dictWord{5, 0, 176}, + dictWord{ + 6, + 0, + 437, + }, + dictWord{6, 0, 564}, + dictWord{11, 0, 181}, + dictWord{141, 0, 183}, + dictWord{135, 0, 1192}, + dictWord{6, 10, 113}, + dictWord{135, 10, 436}, + dictWord{136, 10, 718}, + dictWord{135, 10, 520}, + dictWord{135, 0, 1878}, + dictWord{140, 11, 196}, + dictWord{7, 11, 379}, + dictWord{8, 11, 481}, + dictWord{ + 137, + 11, + 377, + }, + dictWord{5, 11, 1003}, + dictWord{6, 11, 149}, + dictWord{137, 11, 746}, + dictWord{8, 11, 262}, + dictWord{9, 11, 627}, + dictWord{10, 11, 18}, + dictWord{ + 11, + 11, + 214, + }, + dictWord{11, 11, 404}, + dictWord{11, 11, 457}, + dictWord{11, 11, 780}, + dictWord{11, 11, 849}, + dictWord{11, 11, 913}, + dictWord{13, 11, 330}, + dictWord{13, 11, 401}, + dictWord{142, 11, 200}, + dictWord{149, 0, 26}, + dictWord{136, 11, 304}, + dictWord{132, 11, 142}, + dictWord{135, 0, 944}, + dictWord{ + 4, + 0, + 790, + }, + dictWord{5, 0, 273}, + dictWord{134, 0, 394}, + dictWord{134, 0, 855}, + dictWord{4, 0, 135}, + dictWord{6, 0, 127}, + dictWord{7, 0, 1185}, + dictWord{7, 0, 1511}, + dictWord{8, 0, 613}, + dictWord{11, 0, 5}, + dictWord{12, 0, 336}, + dictWord{12, 0, 495}, + dictWord{12, 0, 586}, + dictWord{12, 0, 660}, + dictWord{12, 0, 668}, + dictWord{ + 14, + 0, + 385, + }, + dictWord{15, 0, 118}, + dictWord{17, 0, 20}, + dictWord{146, 0, 98}, + dictWord{6, 0, 230}, + dictWord{9, 0, 752}, + dictWord{18, 0, 109}, + dictWord{12, 10, 610}, + dictWord{13, 10, 431}, + dictWord{144, 10, 59}, + dictWord{7, 0, 1954}, + dictWord{135, 11, 925}, + dictWord{4, 11, 471}, + dictWord{5, 11, 51}, + dictWord{6, 11, 602}, + dictWord{8, 11, 484}, + dictWord{10, 11, 195}, + dictWord{140, 11, 159}, + dictWord{132, 10, 307}, + dictWord{136, 11, 688}, + dictWord{132, 11, 697}, + dictWord{ + 7, + 11, + 812, + }, + dictWord{7, 11, 1261}, + dictWord{7, 11, 1360}, + dictWord{9, 11, 632}, + dictWord{140, 11, 352}, + dictWord{5, 0, 162}, + dictWord{8, 0, 68}, + dictWord{ + 133, + 10, + 964, + }, + dictWord{4, 0, 654}, + dictWord{136, 11, 212}, + dictWord{4, 0, 156}, + dictWord{7, 0, 998}, + dictWord{7, 0, 1045}, + dictWord{7, 0, 1860}, + dictWord{9, 0, 48}, + dictWord{9, 0, 692}, + dictWord{11, 0, 419}, + dictWord{139, 0, 602}, + dictWord{133, 11, 221}, + dictWord{4, 11, 373}, + dictWord{5, 11, 283}, + dictWord{6, 11, 480}, + dictWord{135, 11, 609}, + dictWord{142, 11, 216}, + dictWord{132, 0, 240}, + dictWord{6, 11, 192}, + dictWord{9, 11, 793}, + dictWord{145, 11, 55}, + dictWord{ + 4, + 10, + 75, + }, + dictWord{5, 10, 180}, + dictWord{6, 10, 500}, + dictWord{7, 10, 58}, + dictWord{7, 10, 710}, + dictWord{138, 10, 645}, + dictWord{4, 11, 132}, + dictWord{5, 11, 69}, + dictWord{5, 10, 649}, + dictWord{135, 11, 1242}, + dictWord{6, 10, 276}, + dictWord{7, 10, 282}, + dictWord{7, 10, 879}, + dictWord{7, 10, 924}, + dictWord{8, 10, 459}, + dictWord{9, 10, 599}, + dictWord{9, 10, 754}, + dictWord{11, 10, 574}, + dictWord{12, 10, 128}, + dictWord{12, 10, 494}, + dictWord{13, 10, 52}, + dictWord{13, 10, 301}, + dictWord{15, 10, 30}, + dictWord{143, 10, 132}, + dictWord{132, 10, 200}, + dictWord{4, 11, 111}, + dictWord{135, 11, 302}, + dictWord{9, 0, 197}, + dictWord{ + 10, + 0, + 300, + }, + dictWord{12, 0, 473}, + dictWord{13, 0, 90}, + dictWord{141, 0, 405}, + dictWord{132, 11, 767}, + dictWord{6, 11, 42}, + dictWord{7, 11, 1416}, + dictWord{ + 7, + 11, + 1590, + }, + dictWord{7, 11, 2005}, + dictWord{8, 11, 131}, + dictWord{8, 11, 466}, + dictWord{9, 11, 672}, + dictWord{13, 11, 252}, + dictWord{148, 11, 103}, + dictWord{ + 8, + 0, + 958, + }, + dictWord{8, 0, 999}, + dictWord{10, 0, 963}, + dictWord{138, 0, 1001}, + dictWord{135, 10, 1621}, + dictWord{135, 0, 858}, + dictWord{4, 0, 606}, + dictWord{ + 137, + 11, + 444, + }, + dictWord{6, 11, 44}, + dictWord{136, 11, 368}, + dictWord{139, 11, 172}, + dictWord{4, 11, 570}, + dictWord{133, 11, 120}, + dictWord{139, 11, 624}, + dictWord{7, 0, 1978}, + dictWord{8, 0, 676}, + dictWord{6, 10, 225}, + dictWord{137, 10, 211}, + dictWord{7, 0, 972}, + dictWord{11, 0, 102}, + dictWord{136, 10, 687}, + dictWord{6, 11, 227}, + dictWord{135, 11, 1589}, + dictWord{8, 10, 58}, + dictWord{9, 10, 724}, + dictWord{11, 10, 809}, + dictWord{13, 10, 113}, + dictWord{ + 145, + 10, + 72, + }, + dictWord{4, 0, 361}, + dictWord{133, 0, 315}, + dictWord{132, 0, 461}, + dictWord{6, 10, 345}, + dictWord{135, 10, 1247}, + dictWord{132, 0, 472}, + dictWord{ + 8, + 10, + 767, + }, + dictWord{8, 10, 803}, + dictWord{9, 10, 301}, + dictWord{137, 10, 903}, + dictWord{135, 11, 1333}, + dictWord{135, 11, 477}, + dictWord{7, 10, 1949}, + dictWord{136, 10, 674}, + dictWord{6, 0, 905}, + dictWord{138, 0, 747}, + dictWord{133, 0, 155}, + dictWord{134, 10, 259}, + dictWord{7, 0, 163}, + dictWord{8, 0, 319}, + dictWord{9, 0, 402}, + dictWord{10, 0, 24}, + dictWord{10, 0, 681}, + dictWord{11, 0, 200}, + dictWord{12, 0, 253}, + dictWord{12, 0, 410}, + dictWord{142, 0, 219}, + dictWord{ + 5, + 0, + 475, + }, + dictWord{7, 0, 1780}, + dictWord{9, 0, 230}, + dictWord{11, 0, 297}, + dictWord{11, 0, 558}, + dictWord{14, 0, 322}, + dictWord{19, 0, 76}, + dictWord{6, 11, 1667}, + dictWord{7, 11, 2036}, + dictWord{138, 11, 600}, + dictWord{136, 10, 254}, + dictWord{6, 0, 848}, + dictWord{135, 0, 1956}, + dictWord{6, 11, 511}, + dictWord{ + 140, + 11, + 132, + }, + dictWord{5, 11, 568}, + dictWord{6, 11, 138}, + dictWord{135, 11, 1293}, + dictWord{6, 0, 631}, + dictWord{137, 0, 838}, + dictWord{149, 0, 36}, + dictWord{ + 4, + 11, + 565, + }, + dictWord{8, 11, 23}, + dictWord{136, 11, 827}, + dictWord{5, 0, 944}, + dictWord{134, 0, 1769}, + dictWord{4, 0, 144}, + dictWord{6, 0, 842}, + dictWord{ + 6, + 0, + 1400, + }, + dictWord{4, 11, 922}, + dictWord{133, 11, 1023}, + dictWord{133, 10, 248}, + dictWord{9, 10, 800}, + dictWord{10, 10, 693}, + dictWord{11, 10, 482}, + dictWord{11, 10, 734}, + dictWord{139, 10, 789}, + dictWord{7, 11, 1002}, + dictWord{139, 11, 145}, + dictWord{4, 10, 116}, + dictWord{5, 10, 95}, + dictWord{5, 10, 445}, + dictWord{7, 10, 1688}, + dictWord{8, 10, 29}, + dictWord{9, 10, 272}, + dictWord{11, 10, 509}, + dictWord{139, 10, 915}, + dictWord{14, 0, 369}, + dictWord{146, 0, 72}, + dictWord{135, 10, 1641}, + dictWord{132, 11, 740}, + dictWord{133, 10, 543}, + dictWord{140, 11, 116}, + dictWord{6, 0, 247}, + dictWord{9, 0, 555}, + dictWord{ + 5, + 10, + 181, + }, + dictWord{136, 10, 41}, + dictWord{133, 10, 657}, + dictWord{136, 0, 996}, + dictWord{138, 10, 709}, + dictWord{7, 0, 189}, + dictWord{8, 10, 202}, + dictWord{ + 138, + 10, + 536, + }, + dictWord{136, 11, 402}, + dictWord{4, 11, 716}, + dictWord{141, 11, 31}, + dictWord{10, 0, 280}, + dictWord{138, 0, 797}, + dictWord{9, 10, 423}, + dictWord{140, 10, 89}, + dictWord{8, 10, 113}, + dictWord{9, 10, 877}, + dictWord{10, 10, 554}, + dictWord{11, 10, 83}, + dictWord{12, 10, 136}, + dictWord{147, 10, 109}, + dictWord{133, 10, 976}, + dictWord{7, 0, 746}, + dictWord{132, 10, 206}, + dictWord{136, 0, 526}, + dictWord{139, 0, 345}, + dictWord{136, 0, 1017}, + dictWord{ + 8, + 11, + 152, + }, + dictWord{9, 11, 53}, + dictWord{9, 11, 268}, + dictWord{9, 11, 901}, + dictWord{10, 11, 518}, + dictWord{10, 11, 829}, + dictWord{11, 11, 188}, + dictWord{ + 13, + 11, + 74, + }, + dictWord{14, 11, 46}, + dictWord{15, 11, 17}, + dictWord{15, 11, 33}, + dictWord{17, 11, 40}, + dictWord{18, 11, 36}, + dictWord{19, 11, 20}, + dictWord{22, 11, 1}, + dictWord{152, 11, 2}, + dictWord{133, 11, 736}, + dictWord{136, 11, 532}, + dictWord{5, 0, 428}, + dictWord{138, 0, 651}, + dictWord{135, 11, 681}, + dictWord{ + 135, + 0, + 1162, + }, + dictWord{7, 0, 327}, + dictWord{13, 0, 230}, + dictWord{17, 0, 113}, + dictWord{8, 10, 226}, + dictWord{10, 10, 537}, + dictWord{11, 10, 570}, + dictWord{ + 11, + 10, + 605, + }, + dictWord{11, 10, 799}, + dictWord{11, 10, 804}, + dictWord{12, 10, 85}, + dictWord{12, 10, 516}, + dictWord{12, 10, 623}, + dictWord{12, 11, 677}, + dictWord{ + 13, + 10, + 361, + }, + dictWord{14, 10, 77}, + dictWord{14, 10, 78}, + dictWord{147, 10, 110}, + dictWord{4, 0, 792}, + dictWord{7, 0, 1717}, + dictWord{10, 0, 546}, + dictWord{ + 132, + 10, + 769, + }, + dictWord{4, 11, 684}, + dictWord{136, 11, 384}, + dictWord{132, 10, 551}, + dictWord{134, 0, 1203}, + dictWord{9, 10, 57}, + dictWord{9, 10, 459}, + dictWord{10, 10, 425}, + dictWord{11, 10, 119}, + dictWord{12, 10, 184}, + dictWord{12, 10, 371}, + dictWord{13, 10, 358}, + dictWord{145, 10, 51}, + dictWord{5, 0, 672}, + dictWord{5, 10, 814}, + dictWord{8, 10, 10}, + dictWord{9, 10, 421}, + dictWord{9, 10, 729}, + dictWord{10, 10, 609}, + dictWord{139, 10, 689}, + dictWord{138, 0, 189}, + dictWord{134, 10, 624}, + dictWord{7, 11, 110}, + dictWord{7, 11, 188}, + dictWord{8, 11, 290}, + dictWord{8, 11, 591}, + dictWord{9, 11, 382}, + dictWord{9, 11, 649}, + dictWord{11, 11, 71}, + dictWord{11, 11, 155}, + dictWord{11, 11, 313}, + dictWord{12, 11, 5}, + dictWord{13, 11, 325}, + dictWord{142, 11, 287}, + dictWord{133, 0, 99}, + dictWord{6, 0, 1053}, + dictWord{135, 0, 298}, + dictWord{7, 11, 360}, + dictWord{7, 11, 425}, + dictWord{9, 11, 66}, + dictWord{9, 11, 278}, + dictWord{138, 11, 644}, + dictWord{4, 0, 397}, + dictWord{136, 0, 555}, + dictWord{137, 10, 269}, + dictWord{132, 10, 528}, + dictWord{4, 11, 900}, + dictWord{133, 11, 861}, + dictWord{ + 6, + 0, + 1157, + }, + dictWord{5, 11, 254}, + dictWord{7, 11, 985}, + dictWord{136, 11, 73}, + dictWord{7, 11, 1959}, + dictWord{136, 11, 683}, + dictWord{12, 0, 398}, + dictWord{ + 20, + 0, + 39, + }, + dictWord{21, 0, 11}, + dictWord{150, 0, 41}, + dictWord{4, 0, 485}, + dictWord{7, 0, 353}, + dictWord{135, 0, 1523}, + dictWord{6, 0, 366}, + dictWord{7, 0, 1384}, + dictWord{135, 0, 1601}, + dictWord{138, 0, 787}, + dictWord{137, 0, 282}, + dictWord{5, 10, 104}, + dictWord{6, 10, 173}, + dictWord{135, 10, 1631}, + dictWord{ + 139, + 11, + 146, + }, + dictWord{4, 0, 157}, + dictWord{133, 0, 471}, + dictWord{134, 0, 941}, + dictWord{132, 11, 725}, + dictWord{7, 0, 1336}, + dictWord{8, 10, 138}, + dictWord{ + 8, + 10, + 342, + }, + dictWord{9, 10, 84}, + dictWord{10, 10, 193}, + dictWord{11, 10, 883}, + dictWord{140, 10, 359}, + dictWord{134, 11, 196}, + dictWord{136, 0, 116}, + dictWord{133, 11, 831}, + dictWord{134, 0, 787}, + dictWord{134, 10, 95}, + dictWord{6, 10, 406}, + dictWord{10, 10, 409}, + dictWord{10, 10, 447}, + dictWord{ + 11, + 10, + 44, + }, + dictWord{140, 10, 100}, + dictWord{5, 0, 160}, + dictWord{7, 0, 363}, + dictWord{7, 0, 589}, + dictWord{10, 0, 170}, + dictWord{141, 0, 55}, + dictWord{134, 0, 1815}, + dictWord{132, 0, 866}, + dictWord{6, 0, 889}, + dictWord{6, 0, 1067}, + dictWord{6, 0, 1183}, + dictWord{4, 11, 321}, + dictWord{134, 11, 569}, + dictWord{5, 11, 848}, + dictWord{134, 11, 66}, + dictWord{4, 11, 36}, + dictWord{6, 10, 1636}, + dictWord{7, 11, 1387}, + dictWord{10, 11, 205}, + dictWord{11, 11, 755}, + dictWord{ + 141, + 11, + 271, + }, + dictWord{132, 0, 689}, + dictWord{9, 0, 820}, + dictWord{4, 10, 282}, + dictWord{7, 10, 1034}, + dictWord{11, 10, 398}, + dictWord{11, 10, 634}, + dictWord{ + 12, + 10, + 1, + }, + dictWord{12, 10, 79}, + dictWord{12, 10, 544}, + dictWord{14, 10, 237}, + dictWord{17, 10, 10}, + dictWord{146, 10, 20}, + dictWord{4, 0, 108}, + dictWord{7, 0, 804}, + dictWord{139, 0, 498}, + dictWord{132, 11, 887}, + dictWord{6, 0, 1119}, + dictWord{135, 11, 620}, + dictWord{6, 11, 165}, + dictWord{138, 11, 388}, + dictWord{ + 5, + 0, + 244, + }, + dictWord{5, 10, 499}, + dictWord{6, 10, 476}, + dictWord{7, 10, 600}, + dictWord{7, 10, 888}, + dictWord{135, 10, 1096}, + dictWord{140, 0, 609}, + dictWord{ + 135, + 0, + 1005, + }, + dictWord{4, 0, 412}, + dictWord{133, 0, 581}, + dictWord{4, 11, 719}, + dictWord{135, 11, 155}, + dictWord{7, 10, 296}, + dictWord{7, 10, 596}, + dictWord{ + 8, + 10, + 560, + }, + dictWord{8, 10, 586}, + dictWord{9, 10, 612}, + dictWord{11, 10, 304}, + dictWord{12, 10, 46}, + dictWord{13, 10, 89}, + dictWord{14, 10, 112}, + dictWord{ + 145, + 10, + 122, + }, + dictWord{4, 0, 895}, + dictWord{133, 0, 772}, + dictWord{142, 11, 307}, + dictWord{135, 0, 1898}, + dictWord{4, 0, 926}, + dictWord{133, 0, 983}, + dictWord{4, 11, 353}, + dictWord{6, 11, 146}, + dictWord{6, 11, 1789}, + dictWord{7, 11, 288}, + dictWord{7, 11, 990}, + dictWord{7, 11, 1348}, + dictWord{9, 11, 665}, + dictWord{ + 9, + 11, + 898, + }, + dictWord{11, 11, 893}, + dictWord{142, 11, 212}, + dictWord{132, 0, 538}, + dictWord{133, 11, 532}, + dictWord{6, 0, 294}, + dictWord{7, 0, 1267}, + dictWord{8, 0, 624}, + dictWord{141, 0, 496}, + dictWord{7, 0, 1325}, + dictWord{4, 11, 45}, + dictWord{135, 11, 1257}, + dictWord{138, 0, 301}, + dictWord{9, 0, 298}, + dictWord{12, 0, 291}, + dictWord{13, 0, 276}, + dictWord{14, 0, 6}, + dictWord{17, 0, 18}, + dictWord{21, 0, 32}, + dictWord{7, 10, 1599}, + dictWord{7, 10, 1723}, + dictWord{ + 8, + 10, + 79, + }, + dictWord{8, 10, 106}, + dictWord{8, 10, 190}, + dictWord{8, 10, 302}, + dictWord{8, 10, 383}, + dictWord{8, 10, 713}, + dictWord{9, 10, 119}, + dictWord{9, 10, 233}, + dictWord{9, 10, 419}, + dictWord{9, 10, 471}, + dictWord{10, 10, 181}, + dictWord{10, 10, 406}, + dictWord{11, 10, 57}, + dictWord{11, 10, 85}, + dictWord{11, 10, 120}, + dictWord{11, 10, 177}, + dictWord{11, 10, 296}, + dictWord{11, 10, 382}, + dictWord{11, 10, 454}, + dictWord{11, 10, 758}, + dictWord{11, 10, 999}, + dictWord{ + 12, + 10, + 27, + }, + dictWord{12, 10, 131}, + dictWord{12, 10, 245}, + dictWord{12, 10, 312}, + dictWord{12, 10, 446}, + dictWord{12, 10, 454}, + dictWord{13, 10, 98}, + dictWord{ + 13, + 10, + 426, + }, + dictWord{13, 10, 508}, + dictWord{14, 10, 163}, + dictWord{14, 10, 272}, + dictWord{14, 10, 277}, + dictWord{14, 10, 370}, + dictWord{15, 10, 95}, + dictWord{15, 10, 138}, + dictWord{15, 10, 167}, + dictWord{17, 10, 38}, + dictWord{148, 10, 96}, + dictWord{132, 0, 757}, + dictWord{134, 0, 1263}, + dictWord{4, 0, 820}, + dictWord{134, 10, 1759}, + dictWord{133, 0, 722}, + dictWord{136, 11, 816}, + dictWord{138, 10, 372}, + dictWord{145, 10, 16}, + dictWord{134, 0, 1039}, + dictWord{ + 4, + 0, + 991, + }, + dictWord{134, 0, 2028}, + dictWord{133, 10, 258}, + dictWord{7, 0, 1875}, + dictWord{139, 0, 124}, + dictWord{6, 11, 559}, + dictWord{6, 11, 1691}, + dictWord{135, 11, 586}, + dictWord{5, 0, 324}, + dictWord{7, 0, 881}, + dictWord{8, 10, 134}, + dictWord{9, 10, 788}, + dictWord{140, 10, 438}, + dictWord{7, 11, 1823}, + dictWord{139, 11, 693}, + dictWord{6, 0, 1348}, + dictWord{134, 0, 1545}, + dictWord{134, 0, 911}, + dictWord{132, 0, 954}, + dictWord{8, 0, 329}, + dictWord{8, 0, 414}, + dictWord{7, 10, 1948}, + dictWord{135, 10, 2004}, + dictWord{5, 0, 517}, + dictWord{6, 10, 439}, + dictWord{7, 10, 780}, + dictWord{135, 10, 1040}, + dictWord{ + 132, + 0, + 816, + }, + dictWord{5, 10, 1}, + dictWord{6, 10, 81}, + dictWord{138, 10, 520}, + dictWord{9, 0, 713}, + dictWord{10, 0, 222}, + dictWord{5, 10, 482}, + dictWord{8, 10, 98}, + dictWord{10, 10, 700}, + dictWord{10, 10, 822}, + dictWord{11, 10, 302}, + dictWord{11, 10, 778}, + dictWord{12, 10, 50}, + dictWord{12, 10, 127}, + dictWord{12, 10, 396}, + dictWord{13, 10, 62}, + dictWord{13, 10, 328}, + dictWord{14, 10, 122}, + dictWord{147, 10, 72}, + dictWord{137, 0, 33}, + dictWord{5, 10, 2}, + dictWord{7, 10, 1494}, + dictWord{136, 10, 589}, + dictWord{6, 10, 512}, + dictWord{7, 10, 797}, + dictWord{8, 10, 253}, + dictWord{9, 10, 77}, + dictWord{10, 10, 1}, + dictWord{10, 11, 108}, + dictWord{10, 10, 129}, + dictWord{10, 10, 225}, + dictWord{11, 11, 116}, + dictWord{11, 10, 118}, + dictWord{11, 10, 226}, + dictWord{11, 10, 251}, + dictWord{ + 11, + 10, + 430, + }, + dictWord{11, 10, 701}, + dictWord{11, 10, 974}, + dictWord{11, 10, 982}, + dictWord{12, 10, 64}, + dictWord{12, 10, 260}, + dictWord{12, 10, 488}, + dictWord{ + 140, + 10, + 690, + }, + dictWord{134, 11, 456}, + dictWord{133, 11, 925}, + dictWord{5, 0, 150}, + dictWord{7, 0, 106}, + dictWord{7, 0, 774}, + dictWord{8, 0, 603}, + dictWord{ + 9, + 0, + 593, + }, + dictWord{9, 0, 634}, + dictWord{10, 0, 44}, + dictWord{10, 0, 173}, + dictWord{11, 0, 462}, + dictWord{11, 0, 515}, + dictWord{13, 0, 216}, + dictWord{13, 0, 288}, + dictWord{142, 0, 400}, + dictWord{137, 10, 347}, + dictWord{5, 0, 748}, + dictWord{134, 0, 553}, + dictWord{12, 0, 108}, + dictWord{141, 0, 291}, + dictWord{7, 0, 420}, + dictWord{4, 10, 12}, + dictWord{7, 10, 522}, + dictWord{7, 10, 809}, + dictWord{8, 10, 797}, + dictWord{141, 10, 88}, + dictWord{6, 11, 193}, + dictWord{7, 11, 240}, + dictWord{ + 7, + 11, + 1682, + }, + dictWord{10, 11, 51}, + dictWord{10, 11, 640}, + dictWord{11, 11, 410}, + dictWord{13, 11, 82}, + dictWord{14, 11, 247}, + dictWord{14, 11, 331}, + dictWord{142, 11, 377}, + dictWord{133, 10, 528}, + dictWord{135, 0, 1777}, + dictWord{4, 0, 493}, + dictWord{144, 0, 55}, + dictWord{136, 11, 633}, + dictWord{ + 139, + 0, + 81, + }, + dictWord{6, 0, 980}, + dictWord{136, 0, 321}, + dictWord{148, 10, 109}, + dictWord{5, 10, 266}, + dictWord{9, 10, 290}, + dictWord{9, 10, 364}, + dictWord{ + 10, + 10, + 293, + }, + dictWord{11, 10, 606}, + dictWord{142, 10, 45}, + dictWord{6, 0, 568}, + dictWord{7, 0, 112}, + dictWord{7, 0, 1804}, + dictWord{8, 0, 362}, + dictWord{8, 0, 410}, + dictWord{8, 0, 830}, + dictWord{9, 0, 514}, + dictWord{11, 0, 649}, + dictWord{142, 0, 157}, + dictWord{4, 0, 74}, + dictWord{6, 0, 510}, + dictWord{6, 10, 594}, + dictWord{ + 9, + 10, + 121, + }, + dictWord{10, 10, 49}, + dictWord{10, 10, 412}, + dictWord{139, 10, 834}, + dictWord{134, 0, 838}, + dictWord{136, 10, 748}, + dictWord{132, 10, 466}, + dictWord{132, 0, 625}, + dictWord{135, 11, 1443}, + dictWord{4, 11, 237}, + dictWord{135, 11, 514}, + dictWord{9, 10, 378}, + dictWord{141, 10, 162}, + dictWord{6, 0, 16}, + dictWord{6, 0, 158}, + dictWord{7, 0, 43}, + dictWord{7, 0, 129}, + dictWord{7, 0, 181}, + dictWord{8, 0, 276}, + dictWord{8, 0, 377}, + dictWord{10, 0, 523}, + dictWord{ + 11, + 0, + 816, + }, + dictWord{12, 0, 455}, + dictWord{13, 0, 303}, + dictWord{142, 0, 135}, + dictWord{135, 0, 281}, + dictWord{4, 0, 1}, + dictWord{7, 0, 1143}, + dictWord{7, 0, 1463}, + dictWord{8, 0, 61}, + dictWord{9, 0, 207}, + dictWord{9, 0, 390}, + dictWord{9, 0, 467}, + dictWord{139, 0, 836}, + dictWord{6, 11, 392}, + dictWord{7, 11, 65}, + dictWord{ + 135, + 11, + 2019, + }, + dictWord{132, 10, 667}, + dictWord{4, 0, 723}, + dictWord{5, 0, 895}, + dictWord{7, 0, 1031}, + dictWord{8, 0, 199}, + dictWord{8, 0, 340}, + dictWord{9, 0, 153}, + dictWord{9, 0, 215}, + dictWord{10, 0, 21}, + dictWord{10, 0, 59}, + dictWord{10, 0, 80}, + dictWord{10, 0, 224}, + dictWord{10, 0, 838}, + dictWord{11, 0, 229}, + dictWord{ + 11, + 0, + 652, + }, + dictWord{12, 0, 192}, + dictWord{13, 0, 146}, + dictWord{142, 0, 91}, + dictWord{132, 0, 295}, + dictWord{137, 0, 51}, + dictWord{9, 11, 222}, + dictWord{ + 10, + 11, + 43, + }, + dictWord{139, 11, 900}, + dictWord{5, 0, 309}, + dictWord{140, 0, 211}, + dictWord{5, 0, 125}, + dictWord{8, 0, 77}, + dictWord{138, 0, 15}, + dictWord{136, 11, 604}, + dictWord{138, 0, 789}, + dictWord{5, 0, 173}, + dictWord{4, 10, 39}, + dictWord{7, 10, 1843}, + dictWord{8, 10, 407}, + dictWord{11, 10, 144}, + dictWord{140, 10, 523}, + dictWord{138, 11, 265}, + dictWord{133, 0, 439}, + dictWord{132, 10, 510}, + dictWord{7, 0, 648}, + dictWord{7, 0, 874}, + dictWord{11, 0, 164}, + dictWord{12, 0, 76}, + dictWord{18, 0, 9}, + dictWord{7, 10, 1980}, + dictWord{10, 10, 487}, + dictWord{138, 10, 809}, + dictWord{12, 0, 111}, + dictWord{14, 0, 294}, + dictWord{19, 0, 45}, + dictWord{13, 10, 260}, + dictWord{146, 10, 63}, + dictWord{133, 11, 549}, + dictWord{134, 10, 570}, + dictWord{4, 0, 8}, + dictWord{7, 0, 1152}, + dictWord{7, 0, 1153}, + dictWord{7, 0, 1715}, + dictWord{9, 0, 374}, + dictWord{10, 0, 478}, + dictWord{139, 0, 648}, + dictWord{135, 0, 1099}, + dictWord{5, 0, 575}, + dictWord{6, 0, 354}, + dictWord{ + 135, + 0, + 701, + }, + dictWord{7, 11, 36}, + dictWord{8, 11, 201}, + dictWord{136, 11, 605}, + dictWord{4, 10, 787}, + dictWord{136, 11, 156}, + dictWord{6, 0, 518}, + dictWord{ + 149, + 11, + 13, + }, + dictWord{140, 11, 224}, + dictWord{134, 0, 702}, + dictWord{132, 10, 516}, + dictWord{5, 11, 724}, + dictWord{10, 11, 305}, + dictWord{11, 11, 151}, + dictWord{12, 11, 33}, + dictWord{12, 11, 121}, + dictWord{12, 11, 381}, + dictWord{17, 11, 3}, + dictWord{17, 11, 27}, + dictWord{17, 11, 78}, + dictWord{18, 11, 18}, + dictWord{19, 11, 54}, + dictWord{149, 11, 5}, + dictWord{8, 0, 87}, + dictWord{4, 11, 523}, + dictWord{5, 11, 638}, + dictWord{11, 10, 887}, + dictWord{14, 10, 365}, + dictWord{ + 142, + 10, + 375, + }, + dictWord{138, 0, 438}, + dictWord{136, 10, 821}, + dictWord{135, 11, 1908}, + dictWord{6, 11, 242}, + dictWord{7, 11, 227}, + dictWord{7, 11, 1581}, + dictWord{8, 11, 104}, + dictWord{9, 11, 113}, + dictWord{9, 11, 220}, + dictWord{9, 11, 427}, + dictWord{10, 11, 74}, + dictWord{10, 11, 239}, + dictWord{11, 11, 579}, + dictWord{11, 11, 1023}, + dictWord{13, 11, 4}, + dictWord{13, 11, 204}, + dictWord{13, 11, 316}, + dictWord{18, 11, 95}, + dictWord{148, 11, 86}, + dictWord{4, 0, 69}, + dictWord{5, 0, 122}, + dictWord{5, 0, 849}, + dictWord{6, 0, 1633}, + dictWord{9, 0, 656}, + dictWord{138, 0, 464}, + dictWord{7, 0, 1802}, + dictWord{4, 10, 10}, + dictWord{ + 139, + 10, + 786, + }, + dictWord{135, 11, 861}, + dictWord{139, 0, 499}, + dictWord{7, 0, 476}, + dictWord{7, 0, 1592}, + dictWord{138, 0, 87}, + dictWord{133, 10, 684}, + dictWord{ + 4, + 0, + 840, + }, + dictWord{134, 10, 27}, + dictWord{142, 0, 283}, + dictWord{6, 0, 1620}, + dictWord{7, 11, 1328}, + dictWord{136, 11, 494}, + dictWord{5, 0, 859}, + dictWord{ + 7, + 0, + 1160, + }, + dictWord{8, 0, 107}, + dictWord{9, 0, 291}, + dictWord{9, 0, 439}, + dictWord{10, 0, 663}, + dictWord{11, 0, 609}, + dictWord{140, 0, 197}, + dictWord{ + 7, + 11, + 1306, + }, + dictWord{8, 11, 505}, + dictWord{9, 11, 482}, + dictWord{10, 11, 126}, + dictWord{11, 11, 225}, + dictWord{12, 11, 347}, + dictWord{12, 11, 449}, + dictWord{ + 13, + 11, + 19, + }, + dictWord{142, 11, 218}, + dictWord{5, 11, 268}, + dictWord{10, 11, 764}, + dictWord{12, 11, 120}, + dictWord{13, 11, 39}, + dictWord{145, 11, 127}, + dictWord{145, 10, 56}, + dictWord{7, 11, 1672}, + dictWord{10, 11, 472}, + dictWord{11, 11, 189}, + dictWord{143, 11, 51}, + dictWord{6, 10, 342}, + dictWord{6, 10, 496}, + dictWord{8, 10, 275}, + dictWord{137, 10, 206}, + dictWord{133, 0, 600}, + dictWord{4, 0, 117}, + dictWord{6, 0, 372}, + dictWord{7, 0, 1905}, + dictWord{142, 0, 323}, + dictWord{4, 10, 909}, + dictWord{5, 10, 940}, + dictWord{135, 11, 1471}, + dictWord{132, 10, 891}, + dictWord{4, 0, 722}, + dictWord{139, 0, 471}, + dictWord{4, 11, 384}, + dictWord{135, 11, 1022}, + dictWord{132, 10, 687}, + dictWord{9, 0, 5}, + dictWord{12, 0, 216}, + dictWord{12, 0, 294}, + dictWord{12, 0, 298}, + dictWord{12, 0, 400}, + dictWord{12, 0, 518}, + dictWord{13, 0, 229}, + dictWord{143, 0, 139}, + dictWord{135, 11, 1703}, + dictWord{7, 11, 1602}, + dictWord{10, 11, 698}, + dictWord{ + 12, + 11, + 212, + }, + dictWord{141, 11, 307}, + dictWord{6, 10, 41}, + dictWord{141, 10, 160}, + dictWord{135, 11, 1077}, + dictWord{9, 11, 159}, + dictWord{11, 11, 28}, + dictWord{140, 11, 603}, + dictWord{4, 0, 514}, + dictWord{7, 0, 1304}, + dictWord{138, 0, 477}, + dictWord{134, 0, 1774}, + dictWord{9, 0, 88}, + dictWord{139, 0, 270}, + dictWord{5, 0, 12}, + dictWord{7, 0, 375}, + dictWord{9, 0, 438}, + dictWord{134, 10, 1718}, + dictWord{132, 11, 515}, + dictWord{136, 10, 778}, + dictWord{8, 11, 632}, + dictWord{8, 11, 697}, + dictWord{137, 11, 854}, + dictWord{6, 0, 362}, + dictWord{6, 0, 997}, + dictWord{146, 0, 51}, + dictWord{7, 0, 816}, + dictWord{7, 0, 1241}, + dictWord{ + 9, + 0, + 283, + }, + dictWord{9, 0, 520}, + dictWord{10, 0, 213}, + dictWord{10, 0, 307}, + dictWord{10, 0, 463}, + dictWord{10, 0, 671}, + dictWord{10, 0, 746}, + dictWord{11, 0, 401}, + dictWord{11, 0, 794}, + dictWord{12, 0, 517}, + dictWord{18, 0, 107}, + dictWord{147, 0, 115}, + dictWord{133, 10, 115}, + dictWord{150, 11, 28}, + dictWord{4, 11, 136}, + dictWord{133, 11, 551}, + dictWord{142, 10, 314}, + dictWord{132, 0, 258}, + dictWord{6, 0, 22}, + dictWord{7, 0, 903}, + dictWord{7, 0, 1963}, + dictWord{8, 0, 639}, + dictWord{138, 0, 577}, + dictWord{5, 0, 681}, + dictWord{8, 0, 782}, + dictWord{13, 0, 130}, + dictWord{17, 0, 84}, + dictWord{5, 10, 193}, + dictWord{140, 10, 178}, + dictWord{ + 9, + 11, + 17, + }, + dictWord{138, 11, 291}, + dictWord{7, 11, 1287}, + dictWord{9, 11, 44}, + dictWord{10, 11, 552}, + dictWord{10, 11, 642}, + dictWord{11, 11, 839}, + dictWord{12, 11, 274}, + dictWord{12, 11, 275}, + dictWord{12, 11, 372}, + dictWord{13, 11, 91}, + dictWord{142, 11, 125}, + dictWord{135, 10, 174}, + dictWord{4, 0, 664}, + dictWord{5, 0, 804}, + dictWord{139, 0, 1013}, + dictWord{134, 0, 942}, + dictWord{6, 0, 1349}, + dictWord{6, 0, 1353}, + dictWord{6, 0, 1450}, + dictWord{7, 11, 1518}, + dictWord{139, 11, 694}, + dictWord{11, 0, 356}, + dictWord{4, 10, 122}, + dictWord{5, 10, 796}, + dictWord{5, 10, 952}, + dictWord{6, 10, 1660}, + dictWord{ + 6, + 10, + 1671, + }, + dictWord{8, 10, 567}, + dictWord{9, 10, 687}, + dictWord{9, 10, 742}, + dictWord{10, 10, 686}, + dictWord{11, 10, 682}, + dictWord{140, 10, 281}, + dictWord{ + 5, + 0, + 32, + }, + dictWord{6, 11, 147}, + dictWord{7, 11, 886}, + dictWord{9, 11, 753}, + dictWord{138, 11, 268}, + dictWord{5, 10, 179}, + dictWord{7, 10, 1095}, + dictWord{ + 135, + 10, + 1213, + }, + dictWord{4, 10, 66}, + dictWord{7, 10, 722}, + dictWord{135, 10, 904}, + dictWord{135, 10, 352}, + dictWord{9, 11, 245}, + dictWord{138, 11, 137}, + dictWord{4, 0, 289}, + dictWord{7, 0, 629}, + dictWord{7, 0, 1698}, + dictWord{7, 0, 1711}, + dictWord{12, 0, 215}, + dictWord{133, 11, 414}, + dictWord{6, 0, 1975}, + dictWord{135, 11, 1762}, + dictWord{6, 0, 450}, + dictWord{136, 0, 109}, + dictWord{141, 10, 35}, + dictWord{134, 11, 599}, + dictWord{136, 0, 705}, + dictWord{ + 133, + 0, + 664, + }, + dictWord{134, 11, 1749}, + dictWord{11, 11, 402}, + dictWord{12, 11, 109}, + dictWord{12, 11, 431}, + dictWord{13, 11, 179}, + dictWord{13, 11, 206}, + dictWord{14, 11, 175}, + dictWord{14, 11, 217}, + dictWord{16, 11, 3}, + dictWord{148, 11, 53}, + dictWord{135, 0, 1238}, + dictWord{134, 11, 1627}, + dictWord{ + 132, + 11, + 488, + }, + dictWord{13, 0, 318}, + dictWord{10, 10, 592}, + dictWord{10, 10, 753}, + dictWord{12, 10, 317}, + dictWord{12, 10, 355}, + dictWord{12, 10, 465}, + dictWord{ + 12, + 10, + 469, + }, + dictWord{12, 10, 560}, + dictWord{140, 10, 578}, + dictWord{133, 10, 564}, + dictWord{132, 11, 83}, + dictWord{140, 11, 676}, + dictWord{6, 0, 1872}, + dictWord{6, 0, 1906}, + dictWord{6, 0, 1907}, + dictWord{9, 0, 934}, + dictWord{9, 0, 956}, + dictWord{9, 0, 960}, + dictWord{9, 0, 996}, + dictWord{12, 0, 794}, + dictWord{ + 12, + 0, + 876, + }, + dictWord{12, 0, 880}, + dictWord{12, 0, 918}, + dictWord{15, 0, 230}, + dictWord{18, 0, 234}, + dictWord{18, 0, 238}, + dictWord{21, 0, 38}, + dictWord{149, 0, 62}, + dictWord{134, 10, 556}, + dictWord{134, 11, 278}, + dictWord{137, 0, 103}, + dictWord{7, 10, 544}, + dictWord{8, 10, 719}, + dictWord{138, 10, 61}, + dictWord{ + 4, + 10, + 5, + }, + dictWord{5, 10, 498}, + dictWord{8, 10, 637}, + dictWord{137, 10, 521}, + dictWord{7, 0, 777}, + dictWord{12, 0, 229}, + dictWord{12, 0, 239}, + dictWord{15, 0, 12}, + dictWord{12, 11, 229}, + dictWord{12, 11, 239}, + dictWord{143, 11, 12}, + dictWord{6, 0, 26}, + dictWord{7, 11, 388}, + dictWord{7, 11, 644}, + dictWord{139, 11, 781}, + dictWord{7, 11, 229}, + dictWord{8, 11, 59}, + dictWord{9, 11, 190}, + dictWord{9, 11, 257}, + dictWord{10, 11, 378}, + dictWord{140, 11, 191}, + dictWord{133, 10, 927}, + dictWord{135, 10, 1441}, + dictWord{4, 10, 893}, + dictWord{5, 10, 780}, + dictWord{133, 10, 893}, + dictWord{4, 0, 414}, + dictWord{5, 0, 467}, + dictWord{9, 0, 654}, + dictWord{10, 0, 451}, + dictWord{12, 0, 59}, + dictWord{141, 0, 375}, + dictWord{142, 0, 173}, + dictWord{135, 0, 17}, + dictWord{7, 0, 1350}, + dictWord{133, 10, 238}, + dictWord{135, 0, 955}, + dictWord{4, 0, 960}, + dictWord{10, 0, 887}, + dictWord{12, 0, 753}, + dictWord{18, 0, 161}, + dictWord{18, 0, 162}, + dictWord{152, 0, 19}, + dictWord{136, 11, 344}, + dictWord{6, 10, 1729}, + dictWord{137, 11, 288}, + dictWord{132, 11, 660}, + dictWord{4, 0, 217}, + dictWord{5, 0, 710}, + dictWord{7, 0, 760}, + dictWord{7, 0, 1926}, + dictWord{9, 0, 428}, + dictWord{9, 0, 708}, + dictWord{10, 0, 254}, + dictWord{10, 0, 296}, + dictWord{10, 0, 720}, + dictWord{11, 0, 109}, + dictWord{ + 11, + 0, + 255, + }, + dictWord{12, 0, 165}, + dictWord{12, 0, 315}, + dictWord{13, 0, 107}, + dictWord{13, 0, 203}, + dictWord{14, 0, 54}, + dictWord{14, 0, 99}, + dictWord{14, 0, 114}, + dictWord{14, 0, 388}, + dictWord{16, 0, 85}, + dictWord{17, 0, 9}, + dictWord{17, 0, 33}, + dictWord{20, 0, 25}, + dictWord{20, 0, 28}, + dictWord{20, 0, 29}, + dictWord{21, 0, 9}, + dictWord{21, 0, 10}, + dictWord{21, 0, 34}, + dictWord{22, 0, 17}, + dictWord{4, 10, 60}, + dictWord{7, 10, 1800}, + dictWord{8, 10, 314}, + dictWord{9, 10, 700}, + dictWord{ + 139, + 10, + 487, + }, + dictWord{7, 11, 1035}, + dictWord{138, 11, 737}, + dictWord{7, 11, 690}, + dictWord{9, 11, 217}, + dictWord{9, 11, 587}, + dictWord{140, 11, 521}, + dictWord{6, 0, 919}, + dictWord{7, 11, 706}, + dictWord{7, 11, 1058}, + dictWord{138, 11, 538}, + dictWord{7, 10, 1853}, + dictWord{138, 10, 437}, + dictWord{ + 136, + 10, + 419, + }, + dictWord{6, 0, 280}, + dictWord{10, 0, 502}, + dictWord{11, 0, 344}, + dictWord{140, 0, 38}, + dictWord{5, 0, 45}, + dictWord{7, 0, 1161}, + dictWord{11, 0, 448}, + dictWord{11, 0, 880}, + dictWord{13, 0, 139}, + dictWord{13, 0, 407}, + dictWord{15, 0, 16}, + dictWord{17, 0, 95}, + dictWord{18, 0, 66}, + dictWord{18, 0, 88}, + dictWord{ + 18, + 0, + 123, + }, + dictWord{149, 0, 7}, + dictWord{11, 11, 92}, + dictWord{11, 11, 196}, + dictWord{11, 11, 409}, + dictWord{11, 11, 450}, + dictWord{11, 11, 666}, + dictWord{ + 11, + 11, + 777, + }, + dictWord{12, 11, 262}, + dictWord{13, 11, 385}, + dictWord{13, 11, 393}, + dictWord{15, 11, 115}, + dictWord{16, 11, 45}, + dictWord{145, 11, 82}, + dictWord{136, 0, 777}, + dictWord{134, 11, 1744}, + dictWord{4, 0, 410}, + dictWord{7, 0, 521}, + dictWord{133, 10, 828}, + dictWord{134, 0, 673}, + dictWord{7, 0, 1110}, + dictWord{7, 0, 1778}, + dictWord{7, 10, 176}, + dictWord{135, 10, 178}, + dictWord{5, 10, 806}, + dictWord{7, 11, 268}, + dictWord{7, 10, 1976}, + dictWord{ + 136, + 11, + 569, + }, + dictWord{4, 11, 733}, + dictWord{9, 11, 194}, + dictWord{10, 11, 92}, + dictWord{11, 11, 198}, + dictWord{12, 11, 84}, + dictWord{12, 11, 87}, + dictWord{ + 13, + 11, + 128, + }, + dictWord{144, 11, 74}, + dictWord{5, 0, 341}, + dictWord{7, 0, 1129}, + dictWord{11, 0, 414}, + dictWord{4, 10, 51}, + dictWord{6, 10, 4}, + dictWord{7, 10, 591}, + dictWord{7, 10, 849}, + dictWord{7, 10, 951}, + dictWord{7, 10, 1613}, + dictWord{7, 10, 1760}, + dictWord{7, 10, 1988}, + dictWord{9, 10, 434}, + dictWord{10, 10, 754}, + dictWord{11, 10, 25}, + dictWord{139, 10, 37}, + dictWord{133, 10, 902}, + dictWord{135, 10, 928}, + dictWord{135, 0, 787}, + dictWord{132, 0, 436}, + dictWord{ + 134, + 10, + 270, + }, + dictWord{7, 0, 1587}, + dictWord{135, 0, 1707}, + dictWord{6, 0, 377}, + dictWord{7, 0, 1025}, + dictWord{9, 0, 613}, + dictWord{145, 0, 104}, + dictWord{ + 7, + 11, + 982, + }, + dictWord{7, 11, 1361}, + dictWord{10, 11, 32}, + dictWord{143, 11, 56}, + dictWord{139, 0, 96}, + dictWord{132, 0, 451}, + dictWord{132, 10, 416}, + dictWord{ + 142, + 10, + 372, + }, + dictWord{5, 10, 152}, + dictWord{5, 10, 197}, + dictWord{7, 11, 306}, + dictWord{7, 10, 340}, + dictWord{7, 10, 867}, + dictWord{10, 10, 548}, + dictWord{ + 10, + 10, + 581, + }, + dictWord{11, 10, 6}, + dictWord{12, 10, 3}, + dictWord{12, 10, 19}, + dictWord{14, 10, 110}, + dictWord{142, 10, 289}, + dictWord{134, 0, 680}, + dictWord{ + 134, + 11, + 609, + }, + dictWord{7, 0, 483}, + dictWord{7, 10, 190}, + dictWord{8, 10, 28}, + dictWord{8, 10, 141}, + dictWord{8, 10, 444}, + dictWord{8, 10, 811}, + dictWord{ + 9, + 10, + 468, + }, + dictWord{11, 10, 334}, + dictWord{12, 10, 24}, + dictWord{12, 10, 386}, + dictWord{140, 10, 576}, + dictWord{10, 0, 916}, + dictWord{133, 10, 757}, + dictWord{ + 5, + 10, + 721, + }, + dictWord{135, 10, 1553}, + dictWord{133, 11, 178}, + dictWord{134, 0, 937}, + dictWord{132, 10, 898}, + dictWord{133, 0, 739}, + dictWord{ + 147, + 0, + 82, + }, + dictWord{135, 0, 663}, + dictWord{146, 0, 128}, + dictWord{5, 10, 277}, + dictWord{141, 10, 247}, + dictWord{134, 0, 1087}, + dictWord{132, 10, 435}, + dictWord{ + 6, + 11, + 381, + }, + dictWord{7, 11, 645}, + dictWord{7, 11, 694}, + dictWord{136, 11, 546}, + dictWord{7, 0, 503}, + dictWord{135, 0, 1885}, + dictWord{6, 0, 1965}, + dictWord{ + 8, + 0, + 925, + }, + dictWord{138, 0, 955}, + dictWord{4, 0, 113}, + dictWord{5, 0, 163}, + dictWord{5, 0, 735}, + dictWord{7, 0, 1009}, + dictWord{9, 0, 9}, + dictWord{9, 0, 771}, + dictWord{12, 0, 90}, + dictWord{13, 0, 138}, + dictWord{13, 0, 410}, + dictWord{143, 0, 128}, + dictWord{4, 0, 324}, + dictWord{138, 0, 104}, + dictWord{7, 0, 460}, + dictWord{ + 5, + 10, + 265, + }, + dictWord{134, 10, 212}, + dictWord{133, 11, 105}, + dictWord{7, 11, 261}, + dictWord{7, 11, 1107}, + dictWord{7, 11, 1115}, + dictWord{7, 11, 1354}, + dictWord{7, 11, 1588}, + dictWord{7, 11, 1705}, + dictWord{7, 11, 1902}, + dictWord{9, 11, 465}, + dictWord{10, 11, 248}, + dictWord{10, 11, 349}, + dictWord{10, 11, 647}, + dictWord{11, 11, 527}, + dictWord{11, 11, 660}, + dictWord{11, 11, 669}, + dictWord{12, 11, 529}, + dictWord{141, 11, 305}, + dictWord{5, 11, 438}, + dictWord{ + 9, + 11, + 694, + }, + dictWord{12, 11, 627}, + dictWord{141, 11, 210}, + dictWord{152, 11, 11}, + dictWord{4, 0, 935}, + dictWord{133, 0, 823}, + dictWord{132, 10, 702}, + dictWord{ + 5, + 0, + 269, + }, + dictWord{7, 0, 434}, + dictWord{7, 0, 891}, + dictWord{8, 0, 339}, + dictWord{9, 0, 702}, + dictWord{11, 0, 594}, + dictWord{11, 0, 718}, + dictWord{17, 0, 100}, + dictWord{5, 10, 808}, + dictWord{135, 10, 2045}, + dictWord{7, 0, 1014}, + dictWord{9, 0, 485}, + dictWord{141, 0, 264}, + dictWord{134, 0, 1713}, + dictWord{7, 0, 1810}, + dictWord{11, 0, 866}, + dictWord{12, 0, 103}, + dictWord{13, 0, 495}, + dictWord{140, 11, 233}, + dictWord{4, 0, 423}, + dictWord{10, 0, 949}, + dictWord{138, 0, 1013}, + dictWord{135, 0, 900}, + dictWord{8, 11, 25}, + dictWord{138, 11, 826}, + dictWord{5, 10, 166}, + dictWord{8, 10, 739}, + dictWord{140, 10, 511}, + dictWord{ + 134, + 0, + 2018, + }, + dictWord{7, 11, 1270}, + dictWord{139, 11, 612}, + dictWord{4, 10, 119}, + dictWord{5, 10, 170}, + dictWord{5, 10, 447}, + dictWord{7, 10, 1708}, + dictWord{ + 7, + 10, + 1889, + }, + dictWord{9, 10, 357}, + dictWord{9, 10, 719}, + dictWord{12, 10, 486}, + dictWord{140, 10, 596}, + dictWord{12, 0, 574}, + dictWord{140, 11, 574}, + dictWord{132, 11, 308}, + dictWord{6, 0, 964}, + dictWord{6, 0, 1206}, + dictWord{134, 0, 1302}, + dictWord{4, 10, 450}, + dictWord{135, 10, 1158}, + dictWord{ + 135, + 11, + 150, + }, + dictWord{136, 11, 649}, + dictWord{14, 0, 213}, + dictWord{148, 0, 38}, + dictWord{9, 11, 45}, + dictWord{9, 11, 311}, + dictWord{141, 11, 42}, + dictWord{ + 134, + 11, + 521, + }, + dictWord{7, 10, 1375}, + dictWord{7, 10, 1466}, + dictWord{138, 10, 331}, + dictWord{132, 10, 754}, + dictWord{5, 11, 339}, + dictWord{7, 11, 1442}, + dictWord{14, 11, 3}, + dictWord{15, 11, 41}, + dictWord{147, 11, 66}, + dictWord{136, 11, 378}, + dictWord{134, 0, 1022}, + dictWord{5, 10, 850}, + dictWord{136, 10, 799}, + dictWord{142, 0, 143}, + dictWord{135, 0, 2029}, + dictWord{134, 11, 1628}, + dictWord{8, 0, 523}, + dictWord{150, 0, 34}, + dictWord{5, 0, 625}, + dictWord{ + 135, + 0, + 1617, + }, + dictWord{7, 0, 275}, + dictWord{7, 10, 238}, + dictWord{7, 10, 2033}, + dictWord{8, 10, 120}, + dictWord{8, 10, 188}, + dictWord{8, 10, 659}, + dictWord{ + 9, + 10, + 598, + }, + dictWord{10, 10, 466}, + dictWord{12, 10, 342}, + dictWord{12, 10, 588}, + dictWord{13, 10, 503}, + dictWord{14, 10, 246}, + dictWord{143, 10, 92}, + dictWord{ + 7, + 0, + 37, + }, + dictWord{8, 0, 425}, + dictWord{8, 0, 693}, + dictWord{9, 0, 720}, + dictWord{10, 0, 380}, + dictWord{10, 0, 638}, + dictWord{11, 0, 273}, + dictWord{11, 0, 473}, + dictWord{12, 0, 61}, + dictWord{143, 0, 43}, + dictWord{135, 11, 829}, + dictWord{135, 0, 1943}, + dictWord{132, 0, 765}, + dictWord{5, 11, 486}, + dictWord{ + 135, + 11, + 1349, + }, + dictWord{7, 11, 1635}, + dictWord{8, 11, 17}, + dictWord{10, 11, 217}, + dictWord{138, 11, 295}, + dictWord{4, 10, 201}, + dictWord{7, 10, 1744}, + dictWord{ + 8, + 10, + 602, + }, + dictWord{11, 10, 247}, + dictWord{11, 10, 826}, + dictWord{145, 10, 65}, + dictWord{138, 11, 558}, + dictWord{11, 0, 551}, + dictWord{142, 0, 159}, + dictWord{8, 10, 164}, + dictWord{146, 10, 62}, + dictWord{139, 11, 176}, + dictWord{132, 0, 168}, + dictWord{136, 0, 1010}, + dictWord{134, 0, 1994}, + dictWord{ + 135, + 0, + 91, + }, + dictWord{138, 0, 532}, + dictWord{135, 10, 1243}, + dictWord{135, 0, 1884}, + dictWord{132, 10, 907}, + dictWord{5, 10, 100}, + dictWord{10, 10, 329}, + dictWord{12, 10, 416}, + dictWord{149, 10, 29}, + dictWord{134, 11, 447}, + dictWord{132, 10, 176}, + dictWord{5, 10, 636}, + dictWord{5, 10, 998}, + dictWord{7, 10, 9}, + dictWord{7, 10, 1508}, + dictWord{8, 10, 26}, + dictWord{9, 10, 317}, + dictWord{9, 10, 358}, + dictWord{10, 10, 210}, + dictWord{10, 10, 292}, + dictWord{10, 10, 533}, + dictWord{11, 10, 555}, + dictWord{12, 10, 526}, + dictWord{12, 10, 607}, + dictWord{13, 10, 263}, + dictWord{13, 10, 459}, + dictWord{142, 10, 271}, + dictWord{ + 4, + 11, + 609, + }, + dictWord{135, 11, 756}, + dictWord{6, 0, 15}, + dictWord{7, 0, 70}, + dictWord{10, 0, 240}, + dictWord{147, 0, 93}, + dictWord{4, 11, 930}, + dictWord{133, 11, 947}, + dictWord{134, 0, 1227}, + dictWord{134, 0, 1534}, + dictWord{133, 11, 939}, + dictWord{133, 11, 962}, + dictWord{5, 11, 651}, + dictWord{8, 11, 170}, + dictWord{ + 9, + 11, + 61, + }, + dictWord{9, 11, 63}, + dictWord{10, 11, 23}, + dictWord{10, 11, 37}, + dictWord{10, 11, 834}, + dictWord{11, 11, 4}, + dictWord{11, 11, 187}, + dictWord{ + 11, + 11, + 281, + }, + dictWord{11, 11, 503}, + dictWord{11, 11, 677}, + dictWord{12, 11, 96}, + dictWord{12, 11, 130}, + dictWord{12, 11, 244}, + dictWord{14, 11, 5}, + dictWord{ + 14, + 11, + 40, + }, + dictWord{14, 11, 162}, + dictWord{14, 11, 202}, + dictWord{146, 11, 133}, + dictWord{4, 11, 406}, + dictWord{5, 11, 579}, + dictWord{12, 11, 492}, + dictWord{ + 150, + 11, + 15, + }, + dictWord{139, 0, 392}, + dictWord{6, 10, 610}, + dictWord{10, 10, 127}, + dictWord{141, 10, 27}, + dictWord{7, 0, 655}, + dictWord{7, 0, 1844}, + dictWord{ + 136, + 10, + 119, + }, + dictWord{4, 0, 145}, + dictWord{6, 0, 176}, + dictWord{7, 0, 395}, + dictWord{137, 0, 562}, + dictWord{132, 0, 501}, + dictWord{140, 11, 145}, + dictWord{ + 136, + 0, + 1019, + }, + dictWord{134, 0, 509}, + dictWord{139, 0, 267}, + dictWord{6, 11, 17}, + dictWord{7, 11, 16}, + dictWord{7, 11, 1001}, + dictWord{7, 11, 1982}, + dictWord{ + 9, + 11, + 886, + }, + dictWord{10, 11, 489}, + dictWord{10, 11, 800}, + dictWord{11, 11, 782}, + dictWord{12, 11, 320}, + dictWord{13, 11, 467}, + dictWord{14, 11, 145}, + dictWord{14, 11, 387}, + dictWord{143, 11, 119}, + dictWord{145, 11, 17}, + dictWord{6, 0, 1099}, + dictWord{133, 11, 458}, + dictWord{7, 11, 1983}, + dictWord{8, 11, 0}, + dictWord{8, 11, 171}, + dictWord{9, 11, 120}, + dictWord{9, 11, 732}, + dictWord{10, 11, 473}, + dictWord{11, 11, 656}, + dictWord{11, 11, 998}, + dictWord{18, 11, 0}, + dictWord{18, 11, 2}, + dictWord{147, 11, 21}, + dictWord{12, 11, 427}, + dictWord{146, 11, 38}, + dictWord{10, 0, 948}, + dictWord{138, 0, 968}, + dictWord{7, 10, 126}, + dictWord{136, 10, 84}, + dictWord{136, 10, 790}, + dictWord{4, 0, 114}, + dictWord{9, 0, 492}, + dictWord{13, 0, 462}, + dictWord{142, 0, 215}, + dictWord{6, 10, 64}, + dictWord{12, 10, 377}, + dictWord{141, 10, 309}, + dictWord{4, 0, 77}, + dictWord{5, 0, 361}, + dictWord{6, 0, 139}, + dictWord{6, 0, 401}, + dictWord{6, 0, 404}, + dictWord{ + 7, + 0, + 413, + }, + dictWord{7, 0, 715}, + dictWord{7, 0, 1716}, + dictWord{11, 0, 279}, + dictWord{12, 0, 179}, + dictWord{12, 0, 258}, + dictWord{13, 0, 244}, + dictWord{142, 0, 358}, + dictWord{134, 0, 1717}, + dictWord{7, 0, 772}, + dictWord{7, 0, 1061}, + dictWord{7, 0, 1647}, + dictWord{8, 0, 82}, + dictWord{11, 0, 250}, + dictWord{11, 0, 607}, + dictWord{12, 0, 311}, + dictWord{12, 0, 420}, + dictWord{13, 0, 184}, + dictWord{13, 0, 367}, + dictWord{7, 10, 1104}, + dictWord{11, 10, 269}, + dictWord{11, 10, 539}, + dictWord{11, 10, 627}, + dictWord{11, 10, 706}, + dictWord{11, 10, 975}, + dictWord{12, 10, 248}, + dictWord{12, 10, 434}, + dictWord{12, 10, 600}, + dictWord{ + 12, + 10, + 622, + }, + dictWord{13, 10, 297}, + dictWord{13, 10, 485}, + dictWord{14, 10, 69}, + dictWord{14, 10, 409}, + dictWord{143, 10, 108}, + dictWord{135, 0, 724}, + dictWord{ + 4, + 11, + 512, + }, + dictWord{4, 11, 519}, + dictWord{133, 11, 342}, + dictWord{134, 0, 1133}, + dictWord{145, 11, 29}, + dictWord{11, 10, 977}, + dictWord{141, 10, 507}, + dictWord{6, 0, 841}, + dictWord{6, 0, 1042}, + dictWord{6, 0, 1194}, + dictWord{10, 0, 993}, + dictWord{140, 0, 1021}, + dictWord{6, 11, 31}, + dictWord{7, 11, 491}, + dictWord{7, 11, 530}, + dictWord{8, 11, 592}, + dictWord{9, 10, 34}, + dictWord{11, 11, 53}, + dictWord{11, 10, 484}, + dictWord{11, 11, 779}, + dictWord{12, 11, 167}, + dictWord{12, 11, 411}, + dictWord{14, 11, 14}, + dictWord{14, 11, 136}, + dictWord{15, 11, 72}, + dictWord{16, 11, 17}, + dictWord{144, 11, 72}, + dictWord{4, 0, 1021}, + dictWord{6, 0, 2037}, + dictWord{133, 11, 907}, + dictWord{7, 0, 373}, + dictWord{8, 0, 335}, + dictWord{8, 0, 596}, + dictWord{9, 0, 488}, + dictWord{6, 10, 1700}, + dictWord{ + 7, + 10, + 293, + }, + dictWord{7, 10, 382}, + dictWord{7, 10, 1026}, + dictWord{7, 10, 1087}, + dictWord{7, 10, 2027}, + dictWord{8, 10, 252}, + dictWord{8, 10, 727}, + dictWord{ + 8, + 10, + 729, + }, + dictWord{9, 10, 30}, + dictWord{9, 10, 199}, + dictWord{9, 10, 231}, + dictWord{9, 10, 251}, + dictWord{9, 10, 334}, + dictWord{9, 10, 361}, + dictWord{9, 10, 712}, + dictWord{10, 10, 55}, + dictWord{10, 10, 60}, + dictWord{10, 10, 232}, + dictWord{10, 10, 332}, + dictWord{10, 10, 384}, + dictWord{10, 10, 396}, + dictWord{ + 10, + 10, + 504, + }, + dictWord{10, 10, 542}, + dictWord{10, 10, 652}, + dictWord{11, 10, 20}, + dictWord{11, 10, 48}, + dictWord{11, 10, 207}, + dictWord{11, 10, 291}, + dictWord{ + 11, + 10, + 298, + }, + dictWord{11, 10, 342}, + dictWord{11, 10, 365}, + dictWord{11, 10, 394}, + dictWord{11, 10, 620}, + dictWord{11, 10, 705}, + dictWord{11, 10, 1017}, + dictWord{12, 10, 123}, + dictWord{12, 10, 340}, + dictWord{12, 10, 406}, + dictWord{12, 10, 643}, + dictWord{13, 10, 61}, + dictWord{13, 10, 269}, + dictWord{ + 13, + 10, + 311, + }, + dictWord{13, 10, 319}, + dictWord{13, 10, 486}, + dictWord{14, 10, 234}, + dictWord{15, 10, 62}, + dictWord{15, 10, 85}, + dictWord{16, 10, 71}, + dictWord{ + 18, + 10, + 119, + }, + dictWord{148, 10, 105}, + dictWord{150, 0, 37}, + dictWord{4, 11, 208}, + dictWord{5, 11, 106}, + dictWord{6, 11, 531}, + dictWord{8, 11, 408}, + dictWord{ + 9, + 11, + 188, + }, + dictWord{138, 11, 572}, + dictWord{132, 0, 564}, + dictWord{6, 0, 513}, + dictWord{135, 0, 1052}, + dictWord{132, 0, 825}, + dictWord{9, 0, 899}, + dictWord{ + 140, + 11, + 441, + }, + dictWord{134, 0, 778}, + dictWord{133, 11, 379}, + dictWord{7, 0, 1417}, + dictWord{12, 0, 382}, + dictWord{17, 0, 48}, + dictWord{152, 0, 12}, + dictWord{ + 132, + 11, + 241, + }, + dictWord{7, 0, 1116}, + dictWord{6, 10, 379}, + dictWord{7, 10, 270}, + dictWord{8, 10, 176}, + dictWord{8, 10, 183}, + dictWord{9, 10, 432}, + dictWord{ + 9, + 10, + 661, + }, + dictWord{12, 10, 247}, + dictWord{12, 10, 617}, + dictWord{146, 10, 125}, + dictWord{5, 10, 792}, + dictWord{133, 10, 900}, + dictWord{6, 0, 545}, + dictWord{ + 7, + 0, + 565, + }, + dictWord{7, 0, 1669}, + dictWord{10, 0, 114}, + dictWord{11, 0, 642}, + dictWord{140, 0, 618}, + dictWord{133, 0, 5}, + dictWord{138, 11, 7}, + dictWord{ + 132, + 11, + 259, + }, + dictWord{135, 0, 192}, + dictWord{134, 0, 701}, + dictWord{136, 0, 763}, + dictWord{135, 10, 1979}, + dictWord{4, 10, 901}, + dictWord{133, 10, 776}, + dictWord{10, 0, 755}, + dictWord{147, 0, 29}, + dictWord{133, 0, 759}, + dictWord{4, 11, 173}, + dictWord{5, 11, 312}, + dictWord{5, 11, 512}, + dictWord{135, 11, 1285}, + dictWord{7, 11, 1603}, + dictWord{7, 11, 1691}, + dictWord{9, 11, 464}, + dictWord{11, 11, 195}, + dictWord{12, 11, 279}, + dictWord{12, 11, 448}, + dictWord{ + 14, + 11, + 11, + }, + dictWord{147, 11, 102}, + dictWord{7, 0, 370}, + dictWord{7, 0, 1007}, + dictWord{7, 0, 1177}, + dictWord{135, 0, 1565}, + dictWord{135, 0, 1237}, + dictWord{ + 4, + 0, + 87, + }, + dictWord{5, 0, 250}, + dictWord{141, 0, 298}, + dictWord{4, 11, 452}, + dictWord{5, 11, 583}, + dictWord{5, 11, 817}, + dictWord{6, 11, 433}, + dictWord{7, 11, 593}, + dictWord{7, 11, 720}, + dictWord{7, 11, 1378}, + dictWord{8, 11, 161}, + dictWord{9, 11, 284}, + dictWord{10, 11, 313}, + dictWord{139, 11, 886}, + dictWord{4, 11, 547}, + dictWord{135, 11, 1409}, + dictWord{136, 11, 722}, + dictWord{4, 10, 37}, + dictWord{5, 10, 334}, + dictWord{135, 10, 1253}, + dictWord{132, 10, 508}, + dictWord{ + 12, + 0, + 107, + }, + dictWord{146, 0, 31}, + dictWord{8, 11, 420}, + dictWord{139, 11, 193}, + dictWord{135, 0, 814}, + dictWord{135, 11, 409}, + dictWord{140, 0, 991}, + dictWord{4, 0, 57}, + dictWord{7, 0, 1195}, + dictWord{7, 0, 1438}, + dictWord{7, 0, 1548}, + dictWord{7, 0, 1835}, + dictWord{7, 0, 1904}, + dictWord{9, 0, 757}, + dictWord{ + 10, + 0, + 604, + }, + dictWord{139, 0, 519}, + dictWord{132, 0, 540}, + dictWord{138, 11, 308}, + dictWord{132, 10, 533}, + dictWord{136, 0, 608}, + dictWord{144, 11, 65}, + dictWord{4, 0, 1014}, + dictWord{134, 0, 2029}, + dictWord{4, 0, 209}, + dictWord{7, 0, 902}, + dictWord{5, 11, 1002}, + dictWord{136, 11, 745}, + dictWord{134, 0, 2030}, + dictWord{6, 0, 303}, + dictWord{7, 0, 335}, + dictWord{7, 0, 1437}, + dictWord{7, 0, 1668}, + dictWord{8, 0, 553}, + dictWord{8, 0, 652}, + dictWord{8, 0, 656}, + dictWord{ + 9, + 0, + 558, + }, + dictWord{11, 0, 743}, + dictWord{149, 0, 18}, + dictWord{5, 11, 575}, + dictWord{6, 11, 354}, + dictWord{135, 11, 701}, + dictWord{4, 11, 239}, + dictWord{ + 6, + 11, + 477, + }, + dictWord{7, 11, 1607}, + dictWord{11, 11, 68}, + dictWord{139, 11, 617}, + dictWord{132, 0, 559}, + dictWord{8, 0, 527}, + dictWord{18, 0, 60}, + dictWord{ + 147, + 0, + 24, + }, + dictWord{133, 10, 920}, + dictWord{138, 0, 511}, + dictWord{133, 0, 1017}, + dictWord{133, 0, 675}, + dictWord{138, 10, 391}, + dictWord{11, 0, 156}, + dictWord{135, 10, 1952}, + dictWord{138, 11, 369}, + dictWord{132, 11, 367}, + dictWord{133, 0, 709}, + dictWord{6, 0, 698}, + dictWord{134, 0, 887}, + dictWord{ + 142, + 10, + 126, + }, + dictWord{134, 0, 1745}, + dictWord{132, 10, 483}, + dictWord{13, 11, 299}, + dictWord{142, 11, 75}, + dictWord{133, 0, 714}, + dictWord{7, 0, 8}, + dictWord{ + 136, + 0, + 206, + }, + dictWord{138, 10, 480}, + dictWord{4, 11, 694}, + dictWord{9, 10, 495}, + dictWord{146, 10, 104}, + dictWord{7, 11, 1248}, + dictWord{11, 11, 621}, + dictWord{139, 11, 702}, + dictWord{140, 11, 687}, + dictWord{132, 0, 776}, + dictWord{139, 10, 1009}, + dictWord{135, 0, 1272}, + dictWord{134, 0, 1059}, + dictWord{ + 8, + 10, + 653, + }, + dictWord{13, 10, 93}, + dictWord{147, 10, 14}, + dictWord{135, 11, 213}, + dictWord{136, 0, 406}, + dictWord{133, 10, 172}, + dictWord{132, 0, 947}, + dictWord{8, 0, 175}, + dictWord{10, 0, 168}, + dictWord{138, 0, 573}, + dictWord{132, 0, 870}, + dictWord{6, 0, 1567}, + dictWord{151, 11, 28}, + dictWord{ + 134, + 11, + 472, + }, + dictWord{5, 10, 260}, + dictWord{136, 11, 132}, + dictWord{4, 11, 751}, + dictWord{11, 11, 390}, + dictWord{140, 11, 32}, + dictWord{4, 11, 409}, + dictWord{ + 133, + 11, + 78, + }, + dictWord{12, 0, 554}, + dictWord{6, 11, 473}, + dictWord{145, 11, 105}, + dictWord{133, 0, 784}, + dictWord{8, 0, 908}, + dictWord{136, 11, 306}, + dictWord{139, 0, 882}, + dictWord{6, 0, 358}, + dictWord{7, 0, 1393}, + dictWord{8, 0, 396}, + dictWord{10, 0, 263}, + dictWord{14, 0, 154}, + dictWord{16, 0, 48}, + dictWord{ + 17, + 0, + 8, + }, + dictWord{7, 11, 1759}, + dictWord{8, 11, 396}, + dictWord{10, 11, 263}, + dictWord{14, 11, 154}, + dictWord{16, 11, 48}, + dictWord{145, 11, 8}, + dictWord{ + 13, + 11, + 163, + }, + dictWord{13, 11, 180}, + dictWord{18, 11, 78}, + dictWord{148, 11, 35}, + dictWord{14, 0, 32}, + dictWord{18, 0, 85}, + dictWord{20, 0, 2}, + dictWord{152, 0, 16}, + dictWord{7, 0, 228}, + dictWord{10, 0, 770}, + dictWord{8, 10, 167}, + dictWord{8, 10, 375}, + dictWord{9, 10, 82}, + dictWord{9, 10, 561}, + dictWord{138, 10, 620}, + dictWord{132, 0, 845}, + dictWord{9, 0, 14}, + dictWord{9, 0, 441}, + dictWord{10, 0, 306}, + dictWord{139, 0, 9}, + dictWord{11, 0, 966}, + dictWord{12, 0, 287}, + dictWord{ + 13, + 0, + 342, + }, + dictWord{13, 0, 402}, + dictWord{15, 0, 110}, + dictWord{15, 0, 163}, + dictWord{8, 10, 194}, + dictWord{136, 10, 756}, + dictWord{134, 0, 1578}, + dictWord{ + 4, + 0, + 967, + }, + dictWord{6, 0, 1820}, + dictWord{6, 0, 1847}, + dictWord{140, 0, 716}, + dictWord{136, 0, 594}, + dictWord{7, 0, 1428}, + dictWord{7, 0, 1640}, + dictWord{ + 7, + 0, + 1867, + }, + dictWord{9, 0, 169}, + dictWord{9, 0, 182}, + dictWord{9, 0, 367}, + dictWord{9, 0, 478}, + dictWord{9, 0, 506}, + dictWord{9, 0, 551}, + dictWord{9, 0, 557}, + dictWord{ + 9, + 0, + 648, + }, + dictWord{9, 0, 697}, + dictWord{9, 0, 705}, + dictWord{9, 0, 725}, + dictWord{9, 0, 787}, + dictWord{9, 0, 794}, + dictWord{10, 0, 198}, + dictWord{10, 0, 214}, + dictWord{10, 0, 267}, + dictWord{10, 0, 275}, + dictWord{10, 0, 456}, + dictWord{10, 0, 551}, + dictWord{10, 0, 561}, + dictWord{10, 0, 613}, + dictWord{10, 0, 627}, + dictWord{ + 10, + 0, + 668, + }, + dictWord{10, 0, 675}, + dictWord{10, 0, 691}, + dictWord{10, 0, 695}, + dictWord{10, 0, 707}, + dictWord{10, 0, 715}, + dictWord{11, 0, 183}, + dictWord{ + 11, + 0, + 201, + }, + dictWord{11, 0, 244}, + dictWord{11, 0, 262}, + dictWord{11, 0, 352}, + dictWord{11, 0, 439}, + dictWord{11, 0, 493}, + dictWord{11, 0, 572}, + dictWord{11, 0, 591}, + dictWord{11, 0, 608}, + dictWord{11, 0, 611}, + dictWord{11, 0, 646}, + dictWord{11, 0, 674}, + dictWord{11, 0, 711}, + dictWord{11, 0, 751}, + dictWord{11, 0, 761}, + dictWord{11, 0, 776}, + dictWord{11, 0, 785}, + dictWord{11, 0, 850}, + dictWord{11, 0, 853}, + dictWord{11, 0, 862}, + dictWord{11, 0, 865}, + dictWord{11, 0, 868}, + dictWord{ + 11, + 0, + 875, + }, + dictWord{11, 0, 898}, + dictWord{11, 0, 902}, + dictWord{11, 0, 903}, + dictWord{11, 0, 910}, + dictWord{11, 0, 932}, + dictWord{11, 0, 942}, + dictWord{ + 11, + 0, + 957, + }, + dictWord{11, 0, 967}, + dictWord{11, 0, 972}, + dictWord{12, 0, 148}, + dictWord{12, 0, 195}, + dictWord{12, 0, 220}, + dictWord{12, 0, 237}, + dictWord{12, 0, 318}, + dictWord{12, 0, 339}, + dictWord{12, 0, 393}, + dictWord{12, 0, 445}, + dictWord{12, 0, 450}, + dictWord{12, 0, 474}, + dictWord{12, 0, 505}, + dictWord{12, 0, 509}, + dictWord{12, 0, 533}, + dictWord{12, 0, 591}, + dictWord{12, 0, 594}, + dictWord{12, 0, 597}, + dictWord{12, 0, 621}, + dictWord{12, 0, 633}, + dictWord{12, 0, 642}, + dictWord{ + 13, + 0, + 59, + }, + dictWord{13, 0, 60}, + dictWord{13, 0, 145}, + dictWord{13, 0, 239}, + dictWord{13, 0, 250}, + dictWord{13, 0, 329}, + dictWord{13, 0, 344}, + dictWord{13, 0, 365}, + dictWord{13, 0, 372}, + dictWord{13, 0, 387}, + dictWord{13, 0, 403}, + dictWord{13, 0, 414}, + dictWord{13, 0, 456}, + dictWord{13, 0, 470}, + dictWord{13, 0, 478}, + dictWord{13, 0, 483}, + dictWord{13, 0, 489}, + dictWord{14, 0, 55}, + dictWord{14, 0, 57}, + dictWord{14, 0, 81}, + dictWord{14, 0, 90}, + dictWord{14, 0, 148}, + dictWord{ + 14, + 0, + 239, + }, + dictWord{14, 0, 266}, + dictWord{14, 0, 321}, + dictWord{14, 0, 326}, + dictWord{14, 0, 327}, + dictWord{14, 0, 330}, + dictWord{14, 0, 347}, + dictWord{14, 0, 355}, + dictWord{14, 0, 401}, + dictWord{14, 0, 404}, + dictWord{14, 0, 411}, + dictWord{14, 0, 414}, + dictWord{14, 0, 416}, + dictWord{14, 0, 420}, + dictWord{15, 0, 61}, + dictWord{15, 0, 74}, + dictWord{15, 0, 87}, + dictWord{15, 0, 88}, + dictWord{15, 0, 94}, + dictWord{15, 0, 96}, + dictWord{15, 0, 116}, + dictWord{15, 0, 149}, + dictWord{15, 0, 154}, + dictWord{16, 0, 50}, + dictWord{16, 0, 63}, + dictWord{16, 0, 73}, + dictWord{17, 0, 2}, + dictWord{17, 0, 66}, + dictWord{17, 0, 92}, + dictWord{17, 0, 103}, + dictWord{ + 17, + 0, + 112, + }, + dictWord{17, 0, 120}, + dictWord{18, 0, 50}, + dictWord{18, 0, 54}, + dictWord{18, 0, 82}, + dictWord{18, 0, 86}, + dictWord{18, 0, 90}, + dictWord{18, 0, 111}, + dictWord{ + 18, + 0, + 115, + }, + dictWord{18, 0, 156}, + dictWord{19, 0, 40}, + dictWord{19, 0, 79}, + dictWord{20, 0, 78}, + dictWord{21, 0, 22}, + dictWord{135, 11, 883}, + dictWord{5, 0, 161}, + dictWord{135, 0, 839}, + dictWord{4, 0, 782}, + dictWord{13, 11, 293}, + dictWord{142, 11, 56}, + dictWord{133, 11, 617}, + dictWord{139, 11, 50}, + dictWord{ + 135, + 10, + 22, + }, + dictWord{145, 0, 64}, + dictWord{5, 10, 639}, + dictWord{7, 10, 1249}, + dictWord{139, 10, 896}, + dictWord{138, 0, 998}, + dictWord{135, 11, 2042}, + dictWord{ + 4, + 11, + 546, + }, + dictWord{142, 11, 233}, + dictWord{6, 0, 1043}, + dictWord{134, 0, 1574}, + dictWord{134, 0, 1496}, + dictWord{4, 10, 102}, + dictWord{7, 10, 815}, + dictWord{7, 10, 1699}, + dictWord{139, 10, 964}, + dictWord{12, 0, 781}, + dictWord{142, 0, 461}, + dictWord{4, 11, 313}, + dictWord{133, 11, 577}, + dictWord{ + 6, + 0, + 639, + }, + dictWord{6, 0, 1114}, + dictWord{137, 0, 817}, + dictWord{8, 11, 184}, + dictWord{141, 11, 433}, + dictWord{7, 0, 1814}, + dictWord{135, 11, 935}, + dictWord{ + 10, + 0, + 997, + }, + dictWord{140, 0, 958}, + dictWord{4, 0, 812}, + dictWord{137, 11, 625}, + dictWord{132, 10, 899}, + dictWord{136, 10, 795}, + dictWord{5, 11, 886}, + dictWord{6, 11, 46}, + dictWord{6, 11, 1790}, + dictWord{7, 11, 14}, + dictWord{7, 11, 732}, + dictWord{7, 11, 1654}, + dictWord{8, 11, 95}, + dictWord{8, 11, 327}, + dictWord{ + 8, + 11, + 616, + }, + dictWord{10, 11, 598}, + dictWord{10, 11, 769}, + dictWord{11, 11, 134}, + dictWord{11, 11, 747}, + dictWord{12, 11, 378}, + dictWord{142, 11, 97}, + dictWord{136, 0, 139}, + dictWord{6, 10, 52}, + dictWord{9, 10, 104}, + dictWord{9, 10, 559}, + dictWord{12, 10, 308}, + dictWord{147, 10, 87}, + dictWord{133, 11, 1021}, + dictWord{132, 10, 604}, + dictWord{132, 10, 301}, + dictWord{136, 10, 779}, + dictWord{7, 0, 643}, + dictWord{136, 0, 236}, + dictWord{132, 11, 153}, + dictWord{ + 134, + 0, + 1172, + }, + dictWord{147, 10, 32}, + dictWord{133, 11, 798}, + dictWord{6, 0, 1338}, + dictWord{132, 11, 587}, + dictWord{6, 11, 598}, + dictWord{7, 11, 42}, + dictWord{ + 8, + 11, + 695, + }, + dictWord{10, 11, 212}, + dictWord{11, 11, 158}, + dictWord{14, 11, 196}, + dictWord{145, 11, 85}, + dictWord{135, 10, 508}, + dictWord{5, 11, 957}, + dictWord{5, 11, 1008}, + dictWord{135, 11, 249}, + dictWord{4, 11, 129}, + dictWord{135, 11, 465}, + dictWord{5, 0, 54}, + dictWord{7, 11, 470}, + dictWord{7, 11, 1057}, + dictWord{7, 11, 1201}, + dictWord{9, 11, 755}, + dictWord{11, 11, 906}, + dictWord{140, 11, 527}, + dictWord{7, 11, 908}, + dictWord{146, 11, 7}, + dictWord{ + 5, + 11, + 148, + }, + dictWord{136, 11, 450}, + dictWord{144, 11, 1}, + dictWord{4, 0, 256}, + dictWord{135, 0, 1488}, + dictWord{9, 0, 351}, + dictWord{6, 10, 310}, + dictWord{ + 7, + 10, + 1849, + }, + dictWord{8, 10, 72}, + dictWord{8, 10, 272}, + dictWord{8, 10, 431}, + dictWord{9, 10, 12}, + dictWord{10, 10, 563}, + dictWord{10, 10, 630}, + dictWord{ + 10, + 10, + 796, + }, + dictWord{10, 10, 810}, + dictWord{11, 10, 367}, + dictWord{11, 10, 599}, + dictWord{11, 10, 686}, + dictWord{140, 10, 672}, + dictWord{6, 0, 1885}, + dictWord{ + 6, + 0, + 1898, + }, + dictWord{6, 0, 1899}, + dictWord{140, 0, 955}, + dictWord{4, 0, 714}, + dictWord{133, 0, 469}, + dictWord{6, 0, 1270}, + dictWord{134, 0, 1456}, + dictWord{132, 0, 744}, + dictWord{6, 0, 313}, + dictWord{7, 10, 537}, + dictWord{8, 10, 64}, + dictWord{9, 10, 127}, + dictWord{10, 10, 496}, + dictWord{12, 10, 510}, + dictWord{141, 10, 384}, + dictWord{4, 11, 217}, + dictWord{4, 10, 244}, + dictWord{5, 11, 710}, + dictWord{7, 10, 233}, + dictWord{7, 11, 1926}, + dictWord{9, 11, 428}, + dictWord{9, 11, 708}, + dictWord{10, 11, 254}, + dictWord{10, 11, 296}, + dictWord{10, 11, 720}, + dictWord{11, 11, 109}, + dictWord{11, 11, 255}, + dictWord{12, 11, 165}, + dictWord{12, 11, 315}, + dictWord{13, 11, 107}, + dictWord{13, 11, 203}, + dictWord{14, 11, 54}, + dictWord{14, 11, 99}, + dictWord{14, 11, 114}, + dictWord{ + 14, + 11, + 388, + }, + dictWord{16, 11, 85}, + dictWord{17, 11, 9}, + dictWord{17, 11, 33}, + dictWord{20, 11, 25}, + dictWord{20, 11, 28}, + dictWord{20, 11, 29}, + dictWord{21, 11, 9}, + dictWord{21, 11, 10}, + dictWord{21, 11, 34}, + dictWord{150, 11, 17}, + dictWord{138, 0, 402}, + dictWord{7, 0, 969}, + dictWord{146, 0, 55}, + dictWord{8, 0, 50}, + dictWord{ + 137, + 0, + 624, + }, + dictWord{134, 0, 1355}, + dictWord{132, 0, 572}, + dictWord{134, 10, 1650}, + dictWord{10, 10, 702}, + dictWord{139, 10, 245}, + dictWord{ + 10, + 0, + 847, + }, + dictWord{142, 0, 445}, + dictWord{6, 0, 43}, + dictWord{7, 0, 38}, + dictWord{8, 0, 248}, + dictWord{138, 0, 513}, + dictWord{133, 0, 369}, + dictWord{137, 10, 338}, + dictWord{133, 0, 766}, + dictWord{133, 0, 363}, + dictWord{133, 10, 896}, + dictWord{8, 11, 392}, + dictWord{11, 11, 54}, + dictWord{13, 11, 173}, + dictWord{ + 13, + 11, + 294, + }, + dictWord{148, 11, 7}, + dictWord{134, 0, 678}, + dictWord{7, 11, 1230}, + dictWord{136, 11, 531}, + dictWord{6, 0, 258}, + dictWord{140, 0, 409}, + dictWord{ + 5, + 0, + 249, + }, + dictWord{148, 0, 82}, + dictWord{7, 10, 1117}, + dictWord{136, 10, 539}, + dictWord{5, 0, 393}, + dictWord{6, 0, 378}, + dictWord{7, 0, 1981}, + dictWord{9, 0, 32}, + dictWord{9, 0, 591}, + dictWord{10, 0, 685}, + dictWord{10, 0, 741}, + dictWord{142, 0, 382}, + dictWord{133, 0, 788}, + dictWord{134, 0, 1281}, + dictWord{ + 134, + 0, + 1295, + }, + dictWord{7, 0, 1968}, + dictWord{141, 0, 509}, + dictWord{4, 0, 61}, + dictWord{5, 0, 58}, + dictWord{5, 0, 171}, + dictWord{5, 0, 683}, + dictWord{6, 0, 291}, + dictWord{ + 6, + 0, + 566, + }, + dictWord{7, 0, 1650}, + dictWord{11, 0, 523}, + dictWord{12, 0, 273}, + dictWord{12, 0, 303}, + dictWord{15, 0, 39}, + dictWord{143, 0, 111}, + dictWord{ + 6, + 0, + 706, + }, + dictWord{134, 0, 1283}, + dictWord{134, 0, 589}, + dictWord{135, 11, 1433}, + dictWord{133, 11, 435}, + dictWord{7, 0, 1059}, + dictWord{13, 0, 54}, + dictWord{ + 5, + 10, + 4, + }, + dictWord{5, 10, 810}, + dictWord{6, 10, 13}, + dictWord{6, 10, 538}, + dictWord{6, 10, 1690}, + dictWord{6, 10, 1726}, + dictWord{7, 10, 1819}, + dictWord{ + 8, + 10, + 148, + }, + dictWord{8, 10, 696}, + dictWord{8, 10, 791}, + dictWord{12, 10, 125}, + dictWord{143, 10, 9}, + dictWord{135, 10, 1268}, + dictWord{5, 11, 85}, + dictWord{ + 6, + 11, + 419, + }, + dictWord{7, 11, 134}, + dictWord{7, 11, 305}, + dictWord{7, 11, 361}, + dictWord{7, 11, 1337}, + dictWord{8, 11, 71}, + dictWord{140, 11, 519}, + dictWord{ + 137, + 0, + 824, + }, + dictWord{140, 11, 688}, + dictWord{5, 11, 691}, + dictWord{7, 11, 345}, + dictWord{7, 10, 1385}, + dictWord{9, 11, 94}, + dictWord{11, 10, 582}, + dictWord{ + 11, + 10, + 650, + }, + dictWord{11, 10, 901}, + dictWord{11, 10, 949}, + dictWord{12, 11, 169}, + dictWord{12, 10, 232}, + dictWord{12, 10, 236}, + dictWord{13, 10, 413}, + dictWord{13, 10, 501}, + dictWord{146, 10, 116}, + dictWord{4, 0, 917}, + dictWord{133, 0, 1005}, + dictWord{7, 0, 1598}, + dictWord{5, 11, 183}, + dictWord{6, 11, 582}, + dictWord{9, 11, 344}, + dictWord{10, 11, 679}, + dictWord{140, 11, 435}, + dictWord{4, 10, 925}, + dictWord{5, 10, 803}, + dictWord{8, 10, 698}, + dictWord{ + 138, + 10, + 828, + }, + dictWord{132, 0, 919}, + dictWord{135, 11, 511}, + dictWord{139, 10, 992}, + dictWord{4, 0, 255}, + dictWord{5, 0, 302}, + dictWord{6, 0, 132}, + dictWord{ + 7, + 0, + 128, + }, + dictWord{7, 0, 283}, + dictWord{7, 0, 1299}, + dictWord{10, 0, 52}, + dictWord{10, 0, 514}, + dictWord{11, 0, 925}, + dictWord{13, 0, 92}, + dictWord{142, 0, 309}, + dictWord{134, 0, 1369}, + dictWord{135, 10, 1847}, + dictWord{134, 0, 328}, + dictWord{7, 11, 1993}, + dictWord{136, 11, 684}, + dictWord{133, 10, 383}, + dictWord{137, 0, 173}, + dictWord{134, 11, 583}, + dictWord{134, 0, 1411}, + dictWord{19, 0, 65}, + dictWord{5, 11, 704}, + dictWord{8, 11, 357}, + dictWord{10, 11, 745}, + dictWord{14, 11, 426}, + dictWord{17, 11, 94}, + dictWord{147, 11, 57}, + dictWord{9, 10, 660}, + dictWord{138, 10, 347}, + dictWord{4, 11, 179}, + dictWord{5, 11, 198}, + dictWord{133, 11, 697}, + dictWord{7, 11, 347}, + dictWord{7, 11, 971}, + dictWord{8, 11, 181}, + dictWord{138, 11, 711}, + dictWord{141, 0, 442}, + dictWord{ + 11, + 0, + 842, + }, + dictWord{11, 0, 924}, + dictWord{13, 0, 317}, + dictWord{13, 0, 370}, + dictWord{13, 0, 469}, + dictWord{13, 0, 471}, + dictWord{14, 0, 397}, + dictWord{18, 0, 69}, + dictWord{18, 0, 145}, + dictWord{7, 10, 572}, + dictWord{9, 10, 592}, + dictWord{11, 10, 680}, + dictWord{12, 10, 356}, + dictWord{140, 10, 550}, + dictWord{14, 11, 19}, + dictWord{14, 11, 28}, + dictWord{144, 11, 29}, + dictWord{136, 0, 534}, + dictWord{4, 11, 243}, + dictWord{5, 11, 203}, + dictWord{7, 11, 19}, + dictWord{7, 11, 71}, + dictWord{7, 11, 113}, + dictWord{10, 11, 405}, + dictWord{11, 11, 357}, + dictWord{142, 11, 240}, + dictWord{6, 0, 210}, + dictWord{10, 0, 845}, + dictWord{138, 0, 862}, + dictWord{7, 11, 1351}, + dictWord{9, 11, 581}, + dictWord{10, 11, 639}, + dictWord{11, 11, 453}, + dictWord{140, 11, 584}, + dictWord{7, 11, 1450}, + dictWord{ + 139, + 11, + 99, + }, + dictWord{10, 0, 892}, + dictWord{12, 0, 719}, + dictWord{144, 0, 105}, + dictWord{4, 0, 284}, + dictWord{6, 0, 223}, + dictWord{134, 11, 492}, + dictWord{5, 11, 134}, + dictWord{6, 11, 408}, + dictWord{6, 11, 495}, + dictWord{135, 11, 1593}, + dictWord{136, 0, 529}, + dictWord{137, 0, 807}, + dictWord{4, 0, 218}, + dictWord{7, 0, 526}, + dictWord{143, 0, 137}, + dictWord{6, 0, 1444}, + dictWord{142, 11, 4}, + dictWord{132, 11, 665}, + dictWord{4, 0, 270}, + dictWord{5, 0, 192}, + dictWord{6, 0, 332}, + dictWord{7, 0, 1322}, + dictWord{4, 11, 248}, + dictWord{7, 11, 137}, + dictWord{137, 11, 349}, + dictWord{140, 0, 661}, + dictWord{7, 0, 1517}, + dictWord{11, 0, 597}, + dictWord{14, 0, 76}, + dictWord{14, 0, 335}, + dictWord{20, 0, 33}, + dictWord{7, 10, 748}, + dictWord{139, 10, 700}, + dictWord{5, 11, 371}, + dictWord{135, 11, 563}, + dictWord{146, 11, 57}, + dictWord{133, 10, 127}, + dictWord{133, 0, 418}, + dictWord{4, 11, 374}, + dictWord{7, 11, 547}, + dictWord{7, 11, 1700}, + dictWord{7, 11, 1833}, + dictWord{139, 11, 858}, + dictWord{6, 10, 198}, + dictWord{140, 10, 83}, + dictWord{7, 11, 1812}, + dictWord{13, 11, 259}, + dictWord{13, 11, 356}, + dictWord{ + 14, + 11, + 242, + }, + dictWord{147, 11, 114}, + dictWord{7, 0, 379}, + dictWord{8, 0, 481}, + dictWord{9, 0, 377}, + dictWord{5, 10, 276}, + dictWord{6, 10, 55}, + dictWord{ + 135, + 10, + 1369, + }, + dictWord{138, 11, 286}, + dictWord{5, 0, 1003}, + dictWord{6, 0, 149}, + dictWord{6, 10, 1752}, + dictWord{136, 10, 726}, + dictWord{8, 0, 262}, + dictWord{ + 9, + 0, + 627, + }, + dictWord{10, 0, 18}, + dictWord{11, 0, 214}, + dictWord{11, 0, 404}, + dictWord{11, 0, 457}, + dictWord{11, 0, 780}, + dictWord{11, 0, 913}, + dictWord{13, 0, 401}, + dictWord{14, 0, 200}, + dictWord{6, 11, 1647}, + dictWord{7, 11, 1552}, + dictWord{7, 11, 2010}, + dictWord{9, 11, 494}, + dictWord{137, 11, 509}, + dictWord{ + 135, + 0, + 742, + }, + dictWord{136, 0, 304}, + dictWord{132, 0, 142}, + dictWord{133, 10, 764}, + dictWord{6, 10, 309}, + dictWord{7, 10, 331}, + dictWord{138, 10, 550}, + dictWord{135, 10, 1062}, + dictWord{6, 11, 123}, + dictWord{7, 11, 214}, + dictWord{7, 10, 986}, + dictWord{9, 11, 728}, + dictWord{10, 11, 157}, + dictWord{11, 11, 346}, + dictWord{11, 11, 662}, + dictWord{143, 11, 106}, + dictWord{135, 10, 1573}, + dictWord{7, 0, 925}, + dictWord{137, 0, 799}, + dictWord{4, 0, 471}, + dictWord{5, 0, 51}, + dictWord{6, 0, 602}, + dictWord{8, 0, 484}, + dictWord{138, 0, 195}, + dictWord{136, 0, 688}, + dictWord{132, 0, 697}, + dictWord{6, 0, 1169}, + dictWord{6, 0, 1241}, + dictWord{6, 10, 194}, + dictWord{7, 10, 133}, + dictWord{10, 10, 493}, + dictWord{10, 10, 570}, + dictWord{139, 10, 664}, + dictWord{140, 0, 751}, + dictWord{7, 0, 929}, + dictWord{10, 0, 452}, + dictWord{11, 0, 878}, + dictWord{16, 0, 33}, + dictWord{5, 10, 24}, + dictWord{5, 10, 569}, + dictWord{6, 10, 3}, + dictWord{6, 10, 119}, + dictWord{ + 6, + 10, + 143, + }, + dictWord{6, 10, 440}, + dictWord{7, 10, 599}, + dictWord{7, 10, 1686}, + dictWord{7, 10, 1854}, + dictWord{8, 10, 424}, + dictWord{9, 10, 43}, + dictWord{ + 9, + 10, + 584, + }, + dictWord{9, 10, 760}, + dictWord{10, 10, 328}, + dictWord{11, 10, 159}, + dictWord{11, 10, 253}, + dictWord{12, 10, 487}, + dictWord{140, 10, 531}, + dictWord{ + 4, + 11, + 707, + }, + dictWord{13, 11, 106}, + dictWord{18, 11, 49}, + dictWord{147, 11, 41}, + dictWord{5, 0, 221}, + dictWord{5, 11, 588}, + dictWord{134, 11, 393}, + dictWord{134, 0, 1437}, + dictWord{6, 11, 211}, + dictWord{7, 11, 1690}, + dictWord{11, 11, 486}, + dictWord{140, 11, 369}, + dictWord{5, 10, 14}, + dictWord{5, 10, 892}, + dictWord{6, 10, 283}, + dictWord{7, 10, 234}, + dictWord{136, 10, 537}, + dictWord{4, 0, 988}, + dictWord{136, 0, 955}, + dictWord{135, 0, 1251}, + dictWord{4, 10, 126}, + dictWord{8, 10, 635}, + dictWord{147, 10, 34}, + dictWord{4, 10, 316}, + dictWord{135, 10, 1561}, + dictWord{137, 10, 861}, + dictWord{4, 10, 64}, + dictWord{ + 5, + 10, + 352, + }, + dictWord{5, 10, 720}, + dictWord{6, 10, 368}, + dictWord{139, 10, 359}, + dictWord{134, 0, 192}, + dictWord{4, 0, 132}, + dictWord{5, 0, 69}, + dictWord{ + 135, + 0, + 1242, + }, + dictWord{7, 10, 1577}, + dictWord{10, 10, 304}, + dictWord{10, 10, 549}, + dictWord{12, 10, 365}, + dictWord{13, 10, 220}, + dictWord{13, 10, 240}, + dictWord{142, 10, 33}, + dictWord{4, 0, 111}, + dictWord{7, 0, 865}, + dictWord{134, 11, 219}, + dictWord{5, 11, 582}, + dictWord{6, 11, 1646}, + dictWord{7, 11, 99}, + dictWord{ + 7, + 11, + 1962, + }, + dictWord{7, 11, 1986}, + dictWord{8, 11, 515}, + dictWord{8, 11, 773}, + dictWord{9, 11, 23}, + dictWord{9, 11, 491}, + dictWord{12, 11, 620}, + dictWord{ + 14, + 11, + 52, + }, + dictWord{145, 11, 50}, + dictWord{132, 0, 767}, + dictWord{7, 11, 568}, + dictWord{148, 11, 21}, + dictWord{6, 0, 42}, + dictWord{7, 0, 1416}, + dictWord{ + 7, + 0, + 2005, + }, + dictWord{8, 0, 131}, + dictWord{8, 0, 466}, + dictWord{9, 0, 672}, + dictWord{13, 0, 252}, + dictWord{20, 0, 103}, + dictWord{133, 11, 851}, + dictWord{ + 135, + 0, + 1050, + }, + dictWord{6, 10, 175}, + dictWord{137, 10, 289}, + dictWord{5, 10, 432}, + dictWord{133, 10, 913}, + dictWord{6, 0, 44}, + dictWord{136, 0, 368}, + dictWord{ + 135, + 11, + 784, + }, + dictWord{132, 0, 570}, + dictWord{133, 0, 120}, + dictWord{139, 10, 595}, + dictWord{140, 0, 29}, + dictWord{6, 0, 227}, + dictWord{135, 0, 1589}, + dictWord{4, 11, 98}, + dictWord{7, 11, 1365}, + dictWord{9, 11, 422}, + dictWord{9, 11, 670}, + dictWord{10, 11, 775}, + dictWord{11, 11, 210}, + dictWord{13, 11, 26}, + dictWord{13, 11, 457}, + dictWord{141, 11, 476}, + dictWord{140, 10, 80}, + dictWord{5, 10, 931}, + dictWord{134, 10, 1698}, + dictWord{133, 0, 522}, + dictWord{ + 134, + 0, + 1120, + }, + dictWord{135, 0, 1529}, + dictWord{12, 0, 739}, + dictWord{14, 0, 448}, + dictWord{142, 0, 467}, + dictWord{11, 10, 526}, + dictWord{11, 10, 939}, + dictWord{141, 10, 290}, + dictWord{5, 10, 774}, + dictWord{6, 10, 1637}, + dictWord{6, 10, 1686}, + dictWord{134, 10, 1751}, + dictWord{6, 0, 1667}, + dictWord{ + 135, + 0, + 2036, + }, + dictWord{7, 10, 1167}, + dictWord{11, 10, 934}, + dictWord{13, 10, 391}, + dictWord{145, 10, 76}, + dictWord{137, 11, 147}, + dictWord{6, 10, 260}, + dictWord{ + 7, + 10, + 1484, + }, + dictWord{11, 11, 821}, + dictWord{12, 11, 110}, + dictWord{12, 11, 153}, + dictWord{18, 11, 41}, + dictWord{150, 11, 19}, + dictWord{6, 0, 511}, + dictWord{12, 0, 132}, + dictWord{134, 10, 573}, + dictWord{5, 0, 568}, + dictWord{6, 0, 138}, + dictWord{135, 0, 1293}, + dictWord{132, 0, 1020}, + dictWord{8, 0, 258}, + dictWord{9, 0, 208}, + dictWord{137, 0, 359}, + dictWord{4, 0, 565}, + dictWord{8, 0, 23}, + dictWord{136, 0, 827}, + dictWord{134, 0, 344}, + dictWord{4, 0, 922}, + dictWord{ + 5, + 0, + 1023, + }, + dictWord{13, 11, 477}, + dictWord{14, 11, 120}, + dictWord{148, 11, 61}, + dictWord{134, 0, 240}, + dictWord{5, 11, 209}, + dictWord{6, 11, 30}, + dictWord{ + 11, + 11, + 56, + }, + dictWord{139, 11, 305}, + dictWord{6, 0, 171}, + dictWord{7, 0, 1002}, + dictWord{7, 0, 1324}, + dictWord{9, 0, 415}, + dictWord{14, 0, 230}, + dictWord{ + 18, + 0, + 68, + }, + dictWord{4, 10, 292}, + dictWord{4, 10, 736}, + dictWord{5, 10, 871}, + dictWord{6, 10, 1689}, + dictWord{7, 10, 1944}, + dictWord{137, 10, 580}, + dictWord{ + 9, + 11, + 635, + }, + dictWord{139, 11, 559}, + dictWord{4, 11, 150}, + dictWord{5, 11, 303}, + dictWord{134, 11, 327}, + dictWord{6, 10, 63}, + dictWord{135, 10, 920}, + dictWord{ + 133, + 10, + 793, + }, + dictWord{8, 11, 192}, + dictWord{10, 11, 78}, + dictWord{10, 11, 555}, + dictWord{11, 11, 308}, + dictWord{13, 11, 359}, + dictWord{147, 11, 95}, + dictWord{135, 11, 786}, + dictWord{135, 11, 1712}, + dictWord{136, 0, 402}, + dictWord{6, 0, 754}, + dictWord{6, 11, 1638}, + dictWord{7, 11, 79}, + dictWord{7, 11, 496}, + dictWord{9, 11, 138}, + dictWord{10, 11, 336}, + dictWord{11, 11, 12}, + dictWord{12, 11, 412}, + dictWord{12, 11, 440}, + dictWord{142, 11, 305}, + dictWord{4, 0, 716}, + dictWord{141, 0, 31}, + dictWord{133, 0, 982}, + dictWord{8, 0, 691}, + dictWord{8, 0, 731}, + dictWord{5, 10, 67}, + dictWord{6, 10, 62}, + dictWord{6, 10, 374}, + dictWord{ + 135, + 10, + 1391, + }, + dictWord{9, 10, 790}, + dictWord{140, 10, 47}, + dictWord{139, 11, 556}, + dictWord{151, 11, 1}, + dictWord{7, 11, 204}, + dictWord{7, 11, 415}, + dictWord{8, 11, 42}, + dictWord{10, 11, 85}, + dictWord{11, 11, 33}, + dictWord{11, 11, 564}, + dictWord{12, 11, 571}, + dictWord{149, 11, 1}, + dictWord{8, 0, 888}, + dictWord{ + 7, + 11, + 610, + }, + dictWord{135, 11, 1501}, + dictWord{4, 10, 391}, + dictWord{135, 10, 1169}, + dictWord{5, 0, 847}, + dictWord{9, 0, 840}, + dictWord{138, 0, 803}, + dictWord{137, 0, 823}, + dictWord{134, 0, 785}, + dictWord{8, 0, 152}, + dictWord{9, 0, 53}, + dictWord{9, 0, 268}, + dictWord{9, 0, 901}, + dictWord{10, 0, 518}, + dictWord{ + 10, + 0, + 829, + }, + dictWord{11, 0, 188}, + dictWord{13, 0, 74}, + dictWord{14, 0, 46}, + dictWord{15, 0, 17}, + dictWord{15, 0, 33}, + dictWord{17, 0, 40}, + dictWord{18, 0, 36}, + dictWord{ + 19, + 0, + 20, + }, + dictWord{22, 0, 1}, + dictWord{152, 0, 2}, + dictWord{4, 11, 3}, + dictWord{5, 11, 247}, + dictWord{5, 11, 644}, + dictWord{7, 11, 744}, + dictWord{7, 11, 1207}, + dictWord{7, 11, 1225}, + dictWord{7, 11, 1909}, + dictWord{146, 11, 147}, + dictWord{136, 0, 532}, + dictWord{135, 0, 681}, + dictWord{132, 10, 271}, + dictWord{ + 140, + 0, + 314, + }, + dictWord{140, 0, 677}, + dictWord{4, 0, 684}, + dictWord{136, 0, 384}, + dictWord{5, 11, 285}, + dictWord{9, 11, 67}, + dictWord{13, 11, 473}, + dictWord{ + 143, + 11, + 82, + }, + dictWord{4, 10, 253}, + dictWord{5, 10, 544}, + dictWord{7, 10, 300}, + dictWord{137, 10, 340}, + dictWord{7, 0, 110}, + dictWord{7, 0, 447}, + dictWord{8, 0, 290}, + dictWord{8, 0, 591}, + dictWord{9, 0, 382}, + dictWord{9, 0, 649}, + dictWord{11, 0, 71}, + dictWord{11, 0, 155}, + dictWord{11, 0, 313}, + dictWord{12, 0, 5}, + dictWord{13, 0, 325}, + dictWord{142, 0, 287}, + dictWord{134, 0, 1818}, + dictWord{136, 0, 1007}, + dictWord{138, 0, 321}, + dictWord{7, 0, 360}, + dictWord{7, 0, 425}, + dictWord{9, 0, 66}, + dictWord{9, 0, 278}, + dictWord{138, 0, 644}, + dictWord{133, 10, 818}, + dictWord{5, 0, 385}, + dictWord{5, 10, 541}, + dictWord{6, 10, 94}, + dictWord{6, 10, 499}, + dictWord{ + 7, + 10, + 230, + }, + dictWord{139, 10, 321}, + dictWord{4, 10, 920}, + dictWord{5, 10, 25}, + dictWord{5, 10, 790}, + dictWord{6, 10, 457}, + dictWord{7, 10, 853}, + dictWord{ + 136, + 10, + 788, + }, + dictWord{4, 0, 900}, + dictWord{133, 0, 861}, + dictWord{5, 0, 254}, + dictWord{7, 0, 985}, + dictWord{136, 0, 73}, + dictWord{7, 0, 1959}, + dictWord{ + 136, + 0, + 683, + }, + dictWord{134, 10, 1765}, + dictWord{133, 10, 822}, + dictWord{132, 10, 634}, + dictWord{4, 11, 29}, + dictWord{6, 11, 532}, + dictWord{7, 11, 1628}, + dictWord{ + 7, + 11, + 1648, + }, + dictWord{9, 11, 303}, + dictWord{9, 11, 350}, + dictWord{10, 11, 433}, + dictWord{11, 11, 97}, + dictWord{11, 11, 557}, + dictWord{11, 11, 745}, + dictWord{12, 11, 289}, + dictWord{12, 11, 335}, + dictWord{12, 11, 348}, + dictWord{12, 11, 606}, + dictWord{13, 11, 116}, + dictWord{13, 11, 233}, + dictWord{ + 13, + 11, + 466, + }, + dictWord{14, 11, 181}, + dictWord{14, 11, 209}, + dictWord{14, 11, 232}, + dictWord{14, 11, 236}, + dictWord{14, 11, 300}, + dictWord{16, 11, 41}, + dictWord{ + 148, + 11, + 97, + }, + dictWord{19, 0, 86}, + dictWord{6, 10, 36}, + dictWord{7, 10, 658}, + dictWord{136, 10, 454}, + dictWord{135, 11, 1692}, + dictWord{132, 0, 725}, + dictWord{ + 5, + 11, + 501, + }, + dictWord{7, 11, 1704}, + dictWord{9, 11, 553}, + dictWord{11, 11, 520}, + dictWord{12, 11, 557}, + dictWord{141, 11, 249}, + dictWord{134, 0, 196}, + dictWord{133, 0, 831}, + dictWord{136, 0, 723}, + dictWord{7, 0, 1897}, + dictWord{13, 0, 80}, + dictWord{13, 0, 437}, + dictWord{145, 0, 74}, + dictWord{4, 0, 992}, + dictWord{ + 6, + 0, + 627, + }, + dictWord{136, 0, 994}, + dictWord{135, 11, 1294}, + dictWord{132, 10, 104}, + dictWord{5, 0, 848}, + dictWord{6, 0, 66}, + dictWord{136, 0, 764}, + dictWord{ + 4, + 0, + 36, + }, + dictWord{7, 0, 1387}, + dictWord{10, 0, 205}, + dictWord{139, 0, 755}, + dictWord{6, 0, 1046}, + dictWord{134, 0, 1485}, + dictWord{134, 0, 950}, + dictWord{132, 0, 887}, + dictWord{14, 0, 450}, + dictWord{148, 0, 111}, + dictWord{7, 0, 620}, + dictWord{7, 0, 831}, + dictWord{9, 10, 542}, + dictWord{9, 10, 566}, + dictWord{ + 138, + 10, + 728, + }, + dictWord{6, 0, 165}, + dictWord{138, 0, 388}, + dictWord{139, 10, 263}, + dictWord{4, 0, 719}, + dictWord{135, 0, 155}, + dictWord{138, 10, 468}, + dictWord{6, 11, 453}, + dictWord{144, 11, 36}, + dictWord{134, 11, 129}, + dictWord{5, 0, 533}, + dictWord{7, 0, 755}, + dictWord{138, 0, 780}, + dictWord{134, 0, 1465}, + dictWord{4, 0, 353}, + dictWord{6, 0, 146}, + dictWord{6, 0, 1789}, + dictWord{7, 0, 427}, + dictWord{7, 0, 990}, + dictWord{7, 0, 1348}, + dictWord{9, 0, 665}, + dictWord{9, 0, 898}, + dictWord{11, 0, 893}, + dictWord{142, 0, 212}, + dictWord{7, 10, 87}, + dictWord{142, 10, 288}, + dictWord{4, 0, 45}, + dictWord{135, 0, 1257}, + dictWord{12, 0, 7}, + dictWord{7, 10, 988}, + dictWord{7, 10, 1939}, + dictWord{9, 10, 64}, + dictWord{9, 10, 502}, + dictWord{12, 10, 34}, + dictWord{13, 10, 12}, + dictWord{13, 10, 234}, + dictWord{147, 10, 77}, + dictWord{4, 0, 607}, + dictWord{5, 11, 60}, + dictWord{6, 11, 504}, + dictWord{7, 11, 614}, + dictWord{7, 11, 1155}, + dictWord{140, 11, 0}, + dictWord{ + 135, + 10, + 141, + }, + dictWord{8, 11, 198}, + dictWord{11, 11, 29}, + dictWord{140, 11, 534}, + dictWord{140, 0, 65}, + dictWord{136, 0, 816}, + dictWord{132, 10, 619}, + dictWord{139, 0, 88}, + dictWord{5, 10, 246}, + dictWord{8, 10, 189}, + dictWord{9, 10, 355}, + dictWord{9, 10, 512}, + dictWord{10, 10, 124}, + dictWord{10, 10, 453}, + dictWord{11, 10, 143}, + dictWord{11, 10, 416}, + dictWord{11, 10, 859}, + dictWord{141, 10, 341}, + dictWord{4, 11, 379}, + dictWord{135, 11, 1397}, + dictWord{ + 4, + 0, + 600, + }, + dictWord{137, 0, 621}, + dictWord{133, 0, 367}, + dictWord{134, 0, 561}, + dictWord{6, 0, 559}, + dictWord{134, 0, 1691}, + dictWord{6, 0, 585}, + dictWord{ + 134, + 11, + 585, + }, + dictWord{135, 11, 1228}, + dictWord{4, 11, 118}, + dictWord{5, 10, 678}, + dictWord{6, 11, 274}, + dictWord{6, 11, 361}, + dictWord{7, 11, 75}, + dictWord{ + 141, + 11, + 441, + }, + dictWord{135, 11, 1818}, + dictWord{137, 11, 841}, + dictWord{5, 0, 573}, + dictWord{6, 0, 287}, + dictWord{7, 10, 862}, + dictWord{7, 10, 1886}, + dictWord{138, 10, 179}, + dictWord{132, 10, 517}, + dictWord{140, 11, 693}, + dictWord{5, 11, 314}, + dictWord{6, 11, 221}, + dictWord{7, 11, 419}, + dictWord{ + 10, + 11, + 650, + }, + dictWord{11, 11, 396}, + dictWord{12, 11, 156}, + dictWord{13, 11, 369}, + dictWord{14, 11, 333}, + dictWord{145, 11, 47}, + dictWord{140, 10, 540}, + dictWord{136, 10, 667}, + dictWord{11, 10, 403}, + dictWord{146, 10, 83}, + dictWord{6, 0, 672}, + dictWord{133, 10, 761}, + dictWord{9, 0, 157}, + dictWord{10, 10, 131}, + dictWord{140, 10, 72}, + dictWord{7, 0, 714}, + dictWord{134, 11, 460}, + dictWord{134, 0, 456}, + dictWord{133, 0, 925}, + dictWord{5, 11, 682}, + dictWord{ + 135, + 11, + 1887, + }, + dictWord{136, 11, 510}, + dictWord{136, 11, 475}, + dictWord{133, 11, 1016}, + dictWord{9, 0, 19}, + dictWord{7, 11, 602}, + dictWord{8, 11, 179}, + dictWord{ + 10, + 11, + 781, + }, + dictWord{140, 11, 126}, + dictWord{6, 11, 329}, + dictWord{138, 11, 111}, + dictWord{6, 0, 822}, + dictWord{134, 0, 1473}, + dictWord{144, 11, 86}, + dictWord{11, 0, 113}, + dictWord{139, 11, 113}, + dictWord{5, 11, 821}, + dictWord{134, 11, 1687}, + dictWord{133, 10, 449}, + dictWord{7, 0, 463}, + dictWord{ + 17, + 0, + 69, + }, + dictWord{136, 10, 103}, + dictWord{7, 10, 2028}, + dictWord{138, 10, 641}, + dictWord{6, 0, 193}, + dictWord{7, 0, 240}, + dictWord{7, 0, 1682}, + dictWord{ + 10, + 0, + 51, + }, + dictWord{10, 0, 640}, + dictWord{11, 0, 410}, + dictWord{13, 0, 82}, + dictWord{14, 0, 247}, + dictWord{14, 0, 331}, + dictWord{142, 0, 377}, + dictWord{6, 0, 471}, + dictWord{11, 0, 411}, + dictWord{142, 0, 2}, + dictWord{5, 11, 71}, + dictWord{7, 11, 1407}, + dictWord{9, 11, 388}, + dictWord{9, 11, 704}, + dictWord{10, 11, 261}, + dictWord{ + 10, + 11, + 619, + }, + dictWord{11, 11, 547}, + dictWord{11, 11, 619}, + dictWord{143, 11, 157}, + dictWord{136, 0, 633}, + dictWord{135, 0, 1148}, + dictWord{6, 0, 554}, + dictWord{7, 0, 1392}, + dictWord{12, 0, 129}, + dictWord{7, 10, 1274}, + dictWord{7, 10, 1386}, + dictWord{7, 11, 2008}, + dictWord{9, 11, 337}, + dictWord{10, 11, 517}, + dictWord{146, 10, 87}, + dictWord{7, 0, 803}, + dictWord{8, 0, 542}, + dictWord{6, 10, 187}, + dictWord{7, 10, 1203}, + dictWord{8, 10, 380}, + dictWord{14, 10, 117}, + dictWord{149, 10, 28}, + dictWord{6, 10, 297}, + dictWord{7, 10, 793}, + dictWord{139, 10, 938}, + dictWord{8, 0, 438}, + dictWord{11, 0, 363}, + dictWord{7, 10, 464}, + dictWord{11, 10, 105}, + dictWord{12, 10, 231}, + dictWord{14, 10, 386}, + dictWord{15, 10, 102}, + dictWord{148, 10, 75}, + dictWord{5, 11, 16}, + dictWord{6, 11, 86}, + dictWord{6, 11, 603}, + dictWord{7, 11, 292}, + dictWord{7, 11, 561}, + dictWord{8, 11, 257}, + dictWord{8, 11, 382}, + dictWord{9, 11, 721}, + dictWord{9, 11, 778}, + dictWord{ + 11, + 11, + 581, + }, + dictWord{140, 11, 466}, + dictWord{6, 0, 717}, + dictWord{4, 11, 486}, + dictWord{133, 11, 491}, + dictWord{132, 0, 875}, + dictWord{132, 11, 72}, + dictWord{6, 11, 265}, + dictWord{135, 11, 847}, + dictWord{4, 0, 237}, + dictWord{135, 0, 514}, + dictWord{6, 0, 392}, + dictWord{7, 0, 65}, + dictWord{135, 0, 2019}, + dictWord{140, 11, 261}, + dictWord{135, 11, 922}, + dictWord{137, 11, 404}, + dictWord{12, 0, 563}, + dictWord{14, 0, 101}, + dictWord{18, 0, 129}, + dictWord{ + 7, + 10, + 1010, + }, + dictWord{11, 10, 733}, + dictWord{11, 10, 759}, + dictWord{13, 10, 34}, + dictWord{146, 10, 45}, + dictWord{7, 10, 1656}, + dictWord{9, 10, 369}, + dictWord{ + 10, + 10, + 338, + }, + dictWord{10, 10, 490}, + dictWord{11, 10, 154}, + dictWord{11, 10, 545}, + dictWord{11, 10, 775}, + dictWord{13, 10, 77}, + dictWord{141, 10, 274}, + dictWord{4, 0, 444}, + dictWord{10, 0, 146}, + dictWord{140, 0, 9}, + dictWord{139, 11, 163}, + dictWord{7, 0, 1260}, + dictWord{135, 0, 1790}, + dictWord{9, 0, 222}, + dictWord{10, 0, 43}, + dictWord{139, 0, 900}, + dictWord{137, 11, 234}, + dictWord{138, 0, 971}, + dictWord{137, 0, 761}, + dictWord{134, 0, 699}, + dictWord{ + 136, + 11, + 434, + }, + dictWord{6, 0, 1116}, + dictWord{7, 0, 1366}, + dictWord{5, 10, 20}, + dictWord{6, 11, 197}, + dictWord{6, 10, 298}, + dictWord{7, 10, 659}, + dictWord{8, 11, 205}, + dictWord{137, 10, 219}, + dictWord{132, 11, 490}, + dictWord{11, 11, 820}, + dictWord{150, 11, 51}, + dictWord{7, 10, 1440}, + dictWord{11, 10, 854}, + dictWord{ + 11, + 10, + 872, + }, + dictWord{11, 10, 921}, + dictWord{12, 10, 551}, + dictWord{13, 10, 472}, + dictWord{142, 10, 367}, + dictWord{140, 11, 13}, + dictWord{132, 0, 829}, + dictWord{12, 0, 242}, + dictWord{132, 10, 439}, + dictWord{136, 10, 669}, + dictWord{6, 0, 593}, + dictWord{6, 11, 452}, + dictWord{7, 11, 312}, + dictWord{ + 138, + 11, + 219, + }, + dictWord{4, 11, 333}, + dictWord{9, 11, 176}, + dictWord{12, 11, 353}, + dictWord{141, 11, 187}, + dictWord{7, 0, 36}, + dictWord{8, 0, 201}, + dictWord{ + 136, + 0, + 605, + }, + dictWord{140, 0, 224}, + dictWord{132, 10, 233}, + dictWord{134, 0, 1430}, + dictWord{134, 0, 1806}, + dictWord{4, 0, 523}, + dictWord{133, 0, 638}, + dictWord{ + 6, + 0, + 1889, + }, + dictWord{9, 0, 958}, + dictWord{9, 0, 971}, + dictWord{9, 0, 976}, + dictWord{12, 0, 796}, + dictWord{12, 0, 799}, + dictWord{12, 0, 808}, + dictWord{ + 12, + 0, + 835, + }, + dictWord{12, 0, 836}, + dictWord{12, 0, 914}, + dictWord{12, 0, 946}, + dictWord{15, 0, 216}, + dictWord{15, 0, 232}, + dictWord{18, 0, 183}, + dictWord{18, 0, 187}, + dictWord{18, 0, 194}, + dictWord{18, 0, 212}, + dictWord{18, 0, 232}, + dictWord{149, 0, 49}, + dictWord{132, 10, 482}, + dictWord{6, 0, 827}, + dictWord{134, 0, 1434}, + dictWord{135, 10, 346}, + dictWord{134, 0, 2043}, + dictWord{6, 0, 242}, + dictWord{7, 0, 227}, + dictWord{7, 0, 1581}, + dictWord{8, 0, 104}, + dictWord{9, 0, 113}, + dictWord{9, 0, 220}, + dictWord{9, 0, 427}, + dictWord{10, 0, 136}, + dictWord{10, 0, 239}, + dictWord{11, 0, 579}, + dictWord{11, 0, 1023}, + dictWord{13, 0, 4}, + dictWord{ + 13, + 0, + 204, + }, + dictWord{13, 0, 316}, + dictWord{148, 0, 86}, + dictWord{134, 11, 1685}, + dictWord{7, 0, 148}, + dictWord{8, 0, 284}, + dictWord{141, 0, 63}, + dictWord{ + 142, + 0, + 10, + }, + dictWord{135, 11, 584}, + dictWord{134, 0, 1249}, + dictWord{7, 0, 861}, + dictWord{135, 10, 334}, + dictWord{5, 10, 795}, + dictWord{6, 10, 1741}, + dictWord{ + 137, + 11, + 70, + }, + dictWord{132, 0, 807}, + dictWord{7, 11, 135}, + dictWord{8, 11, 7}, + dictWord{8, 11, 62}, + dictWord{9, 11, 243}, + dictWord{10, 11, 658}, + dictWord{ + 10, + 11, + 697, + }, + dictWord{11, 11, 456}, + dictWord{139, 11, 756}, + dictWord{9, 11, 395}, + dictWord{138, 11, 79}, + dictWord{137, 11, 108}, + dictWord{147, 0, 94}, + dictWord{136, 0, 494}, + dictWord{135, 11, 631}, + dictWord{135, 10, 622}, + dictWord{7, 0, 1510}, + dictWord{135, 10, 1750}, + dictWord{4, 10, 203}, + dictWord{ + 135, + 10, + 1936, + }, + dictWord{7, 11, 406}, + dictWord{7, 11, 459}, + dictWord{8, 11, 606}, + dictWord{139, 11, 726}, + dictWord{7, 0, 1306}, + dictWord{8, 0, 505}, + dictWord{ + 9, + 0, + 482, + }, + dictWord{10, 0, 126}, + dictWord{11, 0, 225}, + dictWord{12, 0, 347}, + dictWord{12, 0, 449}, + dictWord{13, 0, 19}, + dictWord{14, 0, 218}, + dictWord{142, 0, 435}, + dictWord{5, 0, 268}, + dictWord{10, 0, 764}, + dictWord{12, 0, 120}, + dictWord{13, 0, 39}, + dictWord{145, 0, 127}, + dictWord{142, 11, 68}, + dictWord{11, 10, 678}, + dictWord{140, 10, 307}, + dictWord{12, 11, 268}, + dictWord{12, 11, 640}, + dictWord{142, 11, 119}, + dictWord{135, 10, 2044}, + dictWord{133, 11, 612}, + dictWord{ + 4, + 11, + 372, + }, + dictWord{7, 11, 482}, + dictWord{8, 11, 158}, + dictWord{9, 11, 602}, + dictWord{9, 11, 615}, + dictWord{10, 11, 245}, + dictWord{10, 11, 678}, + dictWord{ + 10, + 11, + 744, + }, + dictWord{11, 11, 248}, + dictWord{139, 11, 806}, + dictWord{7, 10, 311}, + dictWord{9, 10, 308}, + dictWord{140, 10, 255}, + dictWord{4, 0, 384}, + dictWord{135, 0, 1022}, + dictWord{5, 11, 854}, + dictWord{135, 11, 1991}, + dictWord{135, 10, 1266}, + dictWord{4, 10, 400}, + dictWord{5, 10, 267}, + dictWord{ + 135, + 10, + 232, + }, + dictWord{135, 0, 1703}, + dictWord{9, 0, 159}, + dictWord{11, 0, 661}, + dictWord{140, 0, 603}, + dictWord{4, 0, 964}, + dictWord{14, 0, 438}, + dictWord{ + 14, + 0, + 444, + }, + dictWord{14, 0, 456}, + dictWord{22, 0, 60}, + dictWord{22, 0, 63}, + dictWord{9, 11, 106}, + dictWord{9, 11, 163}, + dictWord{9, 11, 296}, + dictWord{10, 11, 167}, + dictWord{10, 11, 172}, + dictWord{10, 11, 777}, + dictWord{139, 11, 16}, + dictWord{136, 0, 583}, + dictWord{132, 0, 515}, + dictWord{8, 0, 632}, + dictWord{8, 0, 697}, + dictWord{137, 0, 854}, + dictWord{5, 11, 195}, + dictWord{135, 11, 1685}, + dictWord{6, 0, 1123}, + dictWord{134, 0, 1365}, + dictWord{134, 11, 328}, + dictWord{ + 7, + 11, + 1997, + }, + dictWord{8, 11, 730}, + dictWord{139, 11, 1006}, + dictWord{4, 0, 136}, + dictWord{133, 0, 551}, + dictWord{134, 0, 1782}, + dictWord{7, 0, 1287}, + dictWord{ + 9, + 0, + 44, + }, + dictWord{10, 0, 552}, + dictWord{10, 0, 642}, + dictWord{11, 0, 839}, + dictWord{12, 0, 274}, + dictWord{12, 0, 275}, + dictWord{12, 0, 372}, + dictWord{ + 13, + 0, + 91, + }, + dictWord{142, 0, 125}, + dictWord{5, 11, 751}, + dictWord{11, 11, 797}, + dictWord{140, 11, 203}, + dictWord{133, 0, 732}, + dictWord{7, 0, 679}, + dictWord{ + 8, + 0, + 313, + }, + dictWord{4, 10, 100}, + dictWord{135, 11, 821}, + dictWord{10, 0, 361}, + dictWord{142, 0, 316}, + dictWord{134, 0, 595}, + dictWord{6, 0, 147}, + dictWord{ + 7, + 0, + 886, + }, + dictWord{9, 0, 753}, + dictWord{138, 0, 268}, + dictWord{5, 10, 362}, + dictWord{5, 10, 443}, + dictWord{6, 10, 318}, + dictWord{7, 10, 1019}, + dictWord{ + 139, + 10, + 623, + }, + dictWord{5, 10, 463}, + dictWord{136, 10, 296}, + dictWord{4, 10, 454}, + dictWord{5, 11, 950}, + dictWord{5, 11, 994}, + dictWord{134, 11, 351}, + dictWord{ + 138, + 0, + 137, + }, + dictWord{5, 10, 48}, + dictWord{5, 10, 404}, + dictWord{6, 10, 557}, + dictWord{7, 10, 458}, + dictWord{8, 10, 597}, + dictWord{10, 10, 455}, + dictWord{ + 10, + 10, + 606, + }, + dictWord{11, 10, 49}, + dictWord{11, 10, 548}, + dictWord{12, 10, 476}, + dictWord{13, 10, 18}, + dictWord{141, 10, 450}, + dictWord{133, 0, 414}, + dictWord{ + 135, + 0, + 1762, + }, + dictWord{5, 11, 421}, + dictWord{135, 11, 47}, + dictWord{5, 10, 442}, + dictWord{135, 10, 1984}, + dictWord{134, 0, 599}, + dictWord{134, 0, 1749}, + dictWord{134, 0, 1627}, + dictWord{4, 0, 488}, + dictWord{132, 11, 350}, + dictWord{137, 11, 751}, + dictWord{132, 0, 83}, + dictWord{140, 0, 676}, + dictWord{ + 133, + 11, + 967, + }, + dictWord{7, 0, 1639}, + dictWord{5, 10, 55}, + dictWord{140, 10, 161}, + dictWord{4, 11, 473}, + dictWord{7, 11, 623}, + dictWord{8, 11, 808}, + dictWord{ + 9, + 11, + 871, + }, + dictWord{9, 11, 893}, + dictWord{11, 11, 38}, + dictWord{11, 11, 431}, + dictWord{12, 11, 112}, + dictWord{12, 11, 217}, + dictWord{12, 11, 243}, + dictWord{ + 12, + 11, + 562, + }, + dictWord{12, 11, 683}, + dictWord{13, 11, 141}, + dictWord{13, 11, 197}, + dictWord{13, 11, 227}, + dictWord{13, 11, 406}, + dictWord{13, 11, 487}, + dictWord{14, 11, 156}, + dictWord{14, 11, 203}, + dictWord{14, 11, 224}, + dictWord{14, 11, 256}, + dictWord{18, 11, 58}, + dictWord{150, 11, 0}, + dictWord{ + 133, + 10, + 450, + }, + dictWord{7, 11, 736}, + dictWord{139, 11, 264}, + dictWord{134, 0, 278}, + dictWord{4, 11, 222}, + dictWord{7, 11, 286}, + dictWord{136, 11, 629}, + dictWord{ + 135, + 10, + 869, + }, + dictWord{140, 0, 97}, + dictWord{144, 0, 14}, + dictWord{134, 0, 1085}, + dictWord{4, 10, 213}, + dictWord{7, 10, 223}, + dictWord{136, 10, 80}, + dictWord{ + 7, + 0, + 388, + }, + dictWord{7, 0, 644}, + dictWord{139, 0, 781}, + dictWord{132, 0, 849}, + dictWord{7, 0, 229}, + dictWord{8, 0, 59}, + dictWord{9, 0, 190}, + dictWord{10, 0, 378}, + dictWord{140, 0, 191}, + dictWord{7, 10, 381}, + dictWord{7, 10, 806}, + dictWord{7, 10, 820}, + dictWord{8, 10, 354}, + dictWord{8, 10, 437}, + dictWord{8, 10, 787}, + dictWord{9, 10, 657}, + dictWord{10, 10, 58}, + dictWord{10, 10, 339}, + dictWord{10, 10, 749}, + dictWord{11, 10, 914}, + dictWord{12, 10, 162}, + dictWord{13, 10, 75}, + dictWord{14, 10, 106}, + dictWord{14, 10, 198}, + dictWord{14, 10, 320}, + dictWord{14, 10, 413}, + dictWord{146, 10, 43}, + dictWord{141, 11, 306}, + dictWord{ + 136, + 10, + 747, + }, + dictWord{134, 0, 1115}, + dictWord{16, 0, 94}, + dictWord{16, 0, 108}, + dictWord{136, 11, 146}, + dictWord{6, 0, 700}, + dictWord{6, 0, 817}, + dictWord{ + 134, + 0, + 1002, + }, + dictWord{133, 10, 692}, + dictWord{4, 11, 465}, + dictWord{135, 11, 1663}, + dictWord{134, 10, 191}, + dictWord{6, 0, 1414}, + dictWord{ + 135, + 11, + 913, + }, + dictWord{132, 0, 660}, + dictWord{7, 0, 1035}, + dictWord{138, 0, 737}, + dictWord{6, 10, 162}, + dictWord{7, 10, 1960}, + dictWord{136, 10, 831}, + dictWord{ + 132, + 10, + 706, + }, + dictWord{7, 0, 690}, + dictWord{9, 0, 217}, + dictWord{9, 0, 587}, + dictWord{140, 0, 521}, + dictWord{138, 10, 426}, + dictWord{135, 10, 1235}, + dictWord{ + 6, + 11, + 82, + }, + dictWord{7, 11, 138}, + dictWord{7, 11, 517}, + dictWord{9, 11, 673}, + dictWord{139, 11, 238}, + dictWord{138, 0, 272}, + dictWord{5, 11, 495}, + dictWord{ + 7, + 11, + 834, + }, + dictWord{9, 11, 733}, + dictWord{139, 11, 378}, + dictWord{134, 0, 1744}, + dictWord{132, 0, 1011}, + dictWord{7, 11, 828}, + dictWord{142, 11, 116}, + dictWord{4, 0, 733}, + dictWord{9, 0, 194}, + dictWord{10, 0, 92}, + dictWord{11, 0, 198}, + dictWord{12, 0, 84}, + dictWord{13, 0, 128}, + dictWord{133, 11, 559}, + dictWord{ + 10, + 0, + 57, + }, + dictWord{10, 0, 277}, + dictWord{6, 11, 21}, + dictWord{6, 11, 1737}, + dictWord{7, 11, 1444}, + dictWord{136, 11, 224}, + dictWord{4, 10, 204}, + dictWord{ + 137, + 10, + 902, + }, + dictWord{136, 10, 833}, + dictWord{11, 0, 348}, + dictWord{12, 0, 99}, + dictWord{18, 0, 1}, + dictWord{18, 0, 11}, + dictWord{19, 0, 4}, + dictWord{7, 10, 366}, + dictWord{9, 10, 287}, + dictWord{12, 10, 199}, + dictWord{12, 10, 556}, + dictWord{140, 10, 577}, + dictWord{6, 0, 1981}, + dictWord{136, 0, 936}, + dictWord{ + 21, + 0, + 33, + }, + dictWord{150, 0, 40}, + dictWord{5, 11, 519}, + dictWord{138, 11, 204}, + dictWord{5, 10, 356}, + dictWord{135, 10, 224}, + dictWord{134, 0, 775}, + dictWord{ + 135, + 0, + 306, + }, + dictWord{7, 10, 630}, + dictWord{9, 10, 567}, + dictWord{11, 10, 150}, + dictWord{11, 10, 444}, + dictWord{141, 10, 119}, + dictWord{5, 0, 979}, + dictWord{ + 134, + 10, + 539, + }, + dictWord{133, 0, 611}, + dictWord{4, 11, 402}, + dictWord{135, 11, 1679}, + dictWord{5, 0, 178}, + dictWord{7, 11, 2}, + dictWord{8, 11, 323}, + dictWord{ + 136, + 11, + 479, + }, + dictWord{5, 11, 59}, + dictWord{135, 11, 672}, + dictWord{4, 0, 1010}, + dictWord{6, 0, 1969}, + dictWord{138, 11, 237}, + dictWord{133, 11, 412}, + dictWord{146, 11, 34}, + dictWord{7, 11, 1740}, + dictWord{146, 11, 48}, + dictWord{134, 0, 664}, + dictWord{139, 10, 814}, + dictWord{4, 11, 85}, + dictWord{ + 135, + 11, + 549, + }, + dictWord{133, 11, 94}, + dictWord{133, 11, 457}, + dictWord{132, 0, 390}, + dictWord{134, 0, 1510}, + dictWord{4, 10, 235}, + dictWord{135, 10, 255}, + dictWord{4, 10, 194}, + dictWord{5, 10, 584}, + dictWord{6, 11, 11}, + dictWord{6, 10, 384}, + dictWord{7, 11, 187}, + dictWord{7, 10, 583}, + dictWord{10, 10, 761}, + dictWord{ + 11, + 10, + 760, + }, + dictWord{139, 10, 851}, + dictWord{4, 11, 522}, + dictWord{139, 11, 802}, + dictWord{135, 0, 493}, + dictWord{10, 11, 776}, + dictWord{13, 11, 345}, + dictWord{142, 11, 425}, + dictWord{146, 0, 37}, + dictWord{4, 11, 52}, + dictWord{135, 11, 661}, + dictWord{134, 0, 724}, + dictWord{134, 0, 829}, + dictWord{ + 133, + 11, + 520, + }, + dictWord{133, 10, 562}, + dictWord{4, 11, 281}, + dictWord{5, 11, 38}, + dictWord{7, 11, 194}, + dictWord{7, 11, 668}, + dictWord{7, 11, 1893}, + dictWord{ + 137, + 11, + 397, + }, + dictWord{5, 10, 191}, + dictWord{137, 10, 271}, + dictWord{7, 0, 1537}, + dictWord{14, 0, 96}, + dictWord{143, 0, 73}, + dictWord{5, 0, 473}, + dictWord{ + 11, + 0, + 168, + }, + dictWord{4, 10, 470}, + dictWord{6, 10, 153}, + dictWord{7, 10, 1503}, + dictWord{7, 10, 1923}, + dictWord{10, 10, 701}, + dictWord{11, 10, 132}, + dictWord{ + 11, + 10, + 227, + }, + dictWord{11, 10, 320}, + dictWord{11, 10, 436}, + dictWord{11, 10, 525}, + dictWord{11, 10, 855}, + dictWord{12, 10, 41}, + dictWord{12, 10, 286}, + dictWord{13, 10, 103}, + dictWord{13, 10, 284}, + dictWord{14, 10, 255}, + dictWord{14, 10, 262}, + dictWord{15, 10, 117}, + dictWord{143, 10, 127}, + dictWord{ + 133, + 0, + 105, + }, + dictWord{5, 0, 438}, + dictWord{9, 0, 694}, + dictWord{12, 0, 627}, + dictWord{141, 0, 210}, + dictWord{133, 10, 327}, + dictWord{6, 10, 552}, + dictWord{ + 7, + 10, + 1754, + }, + dictWord{137, 10, 604}, + dictWord{134, 0, 1256}, + dictWord{152, 0, 11}, + dictWord{5, 11, 448}, + dictWord{11, 11, 98}, + dictWord{139, 11, 524}, + dictWord{ + 7, + 0, + 1626, + }, + dictWord{5, 10, 80}, + dictWord{6, 10, 405}, + dictWord{7, 10, 403}, + dictWord{7, 10, 1502}, + dictWord{8, 10, 456}, + dictWord{9, 10, 487}, + dictWord{ + 9, + 10, + 853, + }, + dictWord{9, 10, 889}, + dictWord{10, 10, 309}, + dictWord{11, 10, 721}, + dictWord{11, 10, 994}, + dictWord{12, 10, 430}, + dictWord{13, 10, 165}, + dictWord{ + 14, + 11, + 16, + }, + dictWord{146, 11, 44}, + dictWord{132, 0, 779}, + dictWord{8, 0, 25}, + dictWord{138, 0, 826}, + dictWord{4, 10, 453}, + dictWord{5, 10, 887}, + dictWord{ + 6, + 10, + 535, + }, + dictWord{8, 10, 6}, + dictWord{8, 10, 543}, + dictWord{136, 10, 826}, + dictWord{137, 11, 461}, + dictWord{140, 11, 632}, + dictWord{132, 0, 308}, + dictWord{135, 0, 741}, + dictWord{132, 0, 671}, + dictWord{7, 0, 150}, + dictWord{8, 0, 649}, + dictWord{136, 0, 1020}, + dictWord{9, 0, 99}, + dictWord{6, 11, 336}, + dictWord{ + 8, + 11, + 552, + }, + dictWord{9, 11, 285}, + dictWord{10, 11, 99}, + dictWord{139, 11, 568}, + dictWord{134, 0, 521}, + dictWord{5, 0, 339}, + dictWord{14, 0, 3}, + dictWord{ + 15, + 0, + 41, + }, + dictWord{15, 0, 166}, + dictWord{147, 0, 66}, + dictWord{6, 11, 423}, + dictWord{7, 11, 665}, + dictWord{7, 11, 1210}, + dictWord{9, 11, 218}, + dictWord{ + 141, + 11, + 222, + }, + dictWord{6, 0, 543}, + dictWord{5, 10, 101}, + dictWord{5, 11, 256}, + dictWord{6, 10, 88}, + dictWord{7, 10, 1677}, + dictWord{9, 10, 100}, + dictWord{10, 10, 677}, + dictWord{14, 10, 169}, + dictWord{14, 10, 302}, + dictWord{14, 10, 313}, + dictWord{15, 10, 48}, + dictWord{143, 10, 84}, + dictWord{4, 10, 310}, + dictWord{ + 7, + 10, + 708, + }, + dictWord{7, 10, 996}, + dictWord{9, 10, 795}, + dictWord{10, 10, 390}, + dictWord{10, 10, 733}, + dictWord{11, 10, 451}, + dictWord{12, 10, 249}, + dictWord{ + 14, + 10, + 115, + }, + dictWord{14, 10, 286}, + dictWord{143, 10, 100}, + dictWord{133, 10, 587}, + dictWord{13, 11, 417}, + dictWord{14, 11, 129}, + dictWord{143, 11, 15}, + dictWord{134, 0, 1358}, + dictWord{136, 11, 554}, + dictWord{132, 10, 498}, + dictWord{7, 10, 217}, + dictWord{8, 10, 140}, + dictWord{138, 10, 610}, + dictWord{ + 135, + 11, + 989, + }, + dictWord{135, 11, 634}, + dictWord{6, 0, 155}, + dictWord{140, 0, 234}, + dictWord{135, 11, 462}, + dictWord{132, 11, 618}, + dictWord{ + 134, + 0, + 1628, + }, + dictWord{132, 0, 766}, + dictWord{4, 11, 339}, + dictWord{5, 10, 905}, + dictWord{135, 11, 259}, + dictWord{135, 0, 829}, + dictWord{4, 11, 759}, + dictWord{ + 141, + 11, + 169, + }, + dictWord{7, 0, 1445}, + dictWord{4, 10, 456}, + dictWord{7, 10, 358}, + dictWord{7, 10, 1637}, + dictWord{8, 10, 643}, + dictWord{139, 10, 483}, + dictWord{ + 5, + 0, + 486, + }, + dictWord{135, 0, 1349}, + dictWord{5, 11, 688}, + dictWord{135, 11, 712}, + dictWord{7, 0, 1635}, + dictWord{8, 0, 17}, + dictWord{10, 0, 217}, + dictWord{ + 10, + 0, + 295, + }, + dictWord{12, 0, 2}, + dictWord{140, 11, 2}, + dictWord{138, 0, 558}, + dictWord{150, 10, 56}, + dictWord{4, 11, 278}, + dictWord{5, 11, 465}, + dictWord{ + 135, + 11, + 1367, + }, + dictWord{136, 11, 482}, + dictWord{133, 10, 535}, + dictWord{6, 0, 1362}, + dictWord{6, 0, 1461}, + dictWord{10, 11, 274}, + dictWord{10, 11, 625}, + dictWord{139, 11, 530}, + dictWord{5, 0, 599}, + dictWord{5, 11, 336}, + dictWord{6, 11, 341}, + dictWord{6, 11, 478}, + dictWord{6, 11, 1763}, + dictWord{136, 11, 386}, + dictWord{7, 10, 1748}, + dictWord{137, 11, 151}, + dictWord{134, 0, 1376}, + dictWord{133, 10, 539}, + dictWord{135, 11, 73}, + dictWord{135, 11, 1971}, + dictWord{139, 11, 283}, + dictWord{9, 0, 93}, + dictWord{139, 0, 474}, + dictWord{6, 10, 91}, + dictWord{135, 10, 435}, + dictWord{6, 0, 447}, + dictWord{5, 11, 396}, + dictWord{134, 11, 501}, + dictWord{4, 10, 16}, + dictWord{5, 10, 316}, + dictWord{5, 10, 842}, + dictWord{6, 10, 370}, + dictWord{6, 10, 1778}, + dictWord{8, 10, 166}, + dictWord{11, 10, 812}, + dictWord{12, 10, 206}, + dictWord{12, 10, 351}, + dictWord{14, 10, 418}, + dictWord{16, 10, 15}, + dictWord{16, 10, 34}, + dictWord{18, 10, 3}, + dictWord{19, 10, 3}, + dictWord{19, 10, 7}, + dictWord{20, 10, 4}, + dictWord{149, 10, 21}, + dictWord{7, 0, 577}, + dictWord{7, 0, 1432}, + dictWord{9, 0, 475}, + dictWord{9, 0, 505}, + dictWord{9, 0, 526}, + dictWord{9, 0, 609}, + dictWord{9, 0, 689}, + dictWord{9, 0, 726}, + dictWord{9, 0, 735}, + dictWord{9, 0, 738}, + dictWord{10, 0, 556}, + dictWord{ + 10, + 0, + 674, + }, + dictWord{10, 0, 684}, + dictWord{11, 0, 89}, + dictWord{11, 0, 202}, + dictWord{11, 0, 272}, + dictWord{11, 0, 380}, + dictWord{11, 0, 415}, + dictWord{11, 0, 505}, + dictWord{11, 0, 537}, + dictWord{11, 0, 550}, + dictWord{11, 0, 562}, + dictWord{11, 0, 640}, + dictWord{11, 0, 667}, + dictWord{11, 0, 688}, + dictWord{11, 0, 847}, + dictWord{11, 0, 927}, + dictWord{11, 0, 930}, + dictWord{11, 0, 940}, + dictWord{12, 0, 144}, + dictWord{12, 0, 325}, + dictWord{12, 0, 329}, + dictWord{12, 0, 389}, + dictWord{ + 12, + 0, + 403, + }, + dictWord{12, 0, 451}, + dictWord{12, 0, 515}, + dictWord{12, 0, 604}, + dictWord{12, 0, 616}, + dictWord{12, 0, 626}, + dictWord{13, 0, 66}, + dictWord{ + 13, + 0, + 131, + }, + dictWord{13, 0, 167}, + dictWord{13, 0, 236}, + dictWord{13, 0, 368}, + dictWord{13, 0, 411}, + dictWord{13, 0, 434}, + dictWord{13, 0, 453}, + dictWord{13, 0, 461}, + dictWord{13, 0, 474}, + dictWord{14, 0, 59}, + dictWord{14, 0, 60}, + dictWord{14, 0, 139}, + dictWord{14, 0, 152}, + dictWord{14, 0, 276}, + dictWord{14, 0, 353}, + dictWord{ + 14, + 0, + 402, + }, + dictWord{15, 0, 28}, + dictWord{15, 0, 81}, + dictWord{15, 0, 123}, + dictWord{15, 0, 152}, + dictWord{18, 0, 136}, + dictWord{148, 0, 88}, + dictWord{ + 4, + 11, + 929, + }, + dictWord{133, 11, 799}, + dictWord{136, 11, 46}, + dictWord{142, 0, 307}, + dictWord{4, 0, 609}, + dictWord{7, 0, 756}, + dictWord{9, 0, 544}, + dictWord{ + 11, + 0, + 413, + }, + dictWord{144, 0, 25}, + dictWord{10, 0, 687}, + dictWord{7, 10, 619}, + dictWord{10, 10, 547}, + dictWord{11, 10, 122}, + dictWord{140, 10, 601}, + dictWord{ + 4, + 0, + 930, + }, + dictWord{133, 0, 947}, + dictWord{133, 0, 939}, + dictWord{142, 0, 21}, + dictWord{4, 11, 892}, + dictWord{133, 11, 770}, + dictWord{133, 0, 962}, + dictWord{ + 5, + 0, + 651, + }, + dictWord{8, 0, 170}, + dictWord{9, 0, 61}, + dictWord{9, 0, 63}, + dictWord{10, 0, 23}, + dictWord{10, 0, 37}, + dictWord{10, 0, 834}, + dictWord{11, 0, 4}, + dictWord{ + 11, + 0, + 187, + }, + dictWord{11, 0, 281}, + dictWord{11, 0, 503}, + dictWord{11, 0, 677}, + dictWord{12, 0, 96}, + dictWord{12, 0, 130}, + dictWord{12, 0, 244}, + dictWord{14, 0, 5}, + dictWord{14, 0, 40}, + dictWord{14, 0, 162}, + dictWord{14, 0, 202}, + dictWord{146, 0, 133}, + dictWord{4, 0, 406}, + dictWord{5, 0, 579}, + dictWord{12, 0, 492}, + dictWord{ + 150, + 0, + 15, + }, + dictWord{135, 11, 158}, + dictWord{135, 0, 597}, + dictWord{132, 0, 981}, + dictWord{132, 10, 888}, + dictWord{4, 10, 149}, + dictWord{138, 10, 368}, + dictWord{132, 0, 545}, + dictWord{4, 10, 154}, + dictWord{7, 10, 1134}, + dictWord{136, 10, 105}, + dictWord{135, 11, 2001}, + dictWord{134, 0, 1558}, + dictWord{ + 4, + 10, + 31, + }, + dictWord{6, 10, 429}, + dictWord{7, 10, 962}, + dictWord{9, 10, 458}, + dictWord{139, 10, 691}, + dictWord{132, 10, 312}, + dictWord{135, 10, 1642}, + dictWord{ + 6, + 0, + 17, + }, + dictWord{6, 0, 1304}, + dictWord{7, 0, 16}, + dictWord{7, 0, 1001}, + dictWord{9, 0, 886}, + dictWord{10, 0, 489}, + dictWord{10, 0, 800}, + dictWord{11, 0, 782}, + dictWord{12, 0, 320}, + dictWord{13, 0, 467}, + dictWord{14, 0, 145}, + dictWord{14, 0, 387}, + dictWord{143, 0, 119}, + dictWord{135, 0, 1982}, + dictWord{17, 0, 17}, + dictWord{7, 11, 1461}, + dictWord{140, 11, 91}, + dictWord{4, 10, 236}, + dictWord{132, 11, 602}, + dictWord{138, 0, 907}, + dictWord{136, 0, 110}, + dictWord{7, 0, 272}, + dictWord{19, 0, 53}, + dictWord{5, 10, 836}, + dictWord{5, 10, 857}, + dictWord{134, 10, 1680}, + dictWord{5, 0, 458}, + dictWord{7, 11, 1218}, + dictWord{136, 11, 303}, + dictWord{7, 0, 1983}, + dictWord{8, 0, 0}, + dictWord{8, 0, 171}, + dictWord{9, 0, 120}, + dictWord{9, 0, 732}, + dictWord{10, 0, 473}, + dictWord{11, 0, 656}, + dictWord{ + 11, + 0, + 998, + }, + dictWord{18, 0, 0}, + dictWord{18, 0, 2}, + dictWord{19, 0, 21}, + dictWord{10, 10, 68}, + dictWord{139, 10, 494}, + dictWord{137, 11, 662}, + dictWord{4, 11, 13}, + dictWord{5, 11, 567}, + dictWord{7, 11, 1498}, + dictWord{9, 11, 124}, + dictWord{11, 11, 521}, + dictWord{140, 11, 405}, + dictWord{4, 10, 81}, + dictWord{139, 10, 867}, + dictWord{135, 11, 1006}, + dictWord{7, 11, 800}, + dictWord{7, 11, 1783}, + dictWord{138, 11, 12}, + dictWord{9, 0, 295}, + dictWord{10, 0, 443}, + dictWord{ + 5, + 10, + 282, + }, + dictWord{8, 10, 650}, + dictWord{137, 10, 907}, + dictWord{132, 11, 735}, + dictWord{4, 11, 170}, + dictWord{4, 10, 775}, + dictWord{135, 11, 323}, + dictWord{ + 6, + 0, + 1844, + }, + dictWord{10, 0, 924}, + dictWord{11, 11, 844}, + dictWord{12, 11, 104}, + dictWord{140, 11, 625}, + dictWord{5, 11, 304}, + dictWord{7, 11, 1403}, + dictWord{140, 11, 498}, + dictWord{134, 0, 1232}, + dictWord{4, 0, 519}, + dictWord{10, 0, 70}, + dictWord{12, 0, 26}, + dictWord{14, 0, 17}, + dictWord{14, 0, 178}, + dictWord{ + 15, + 0, + 34, + }, + dictWord{149, 0, 12}, + dictWord{132, 0, 993}, + dictWord{4, 11, 148}, + dictWord{133, 11, 742}, + dictWord{6, 0, 31}, + dictWord{7, 0, 491}, + dictWord{7, 0, 530}, + dictWord{8, 0, 592}, + dictWord{11, 0, 53}, + dictWord{11, 0, 779}, + dictWord{12, 0, 167}, + dictWord{12, 0, 411}, + dictWord{14, 0, 14}, + dictWord{14, 0, 136}, + dictWord{ + 15, + 0, + 72, + }, + dictWord{16, 0, 17}, + dictWord{144, 0, 72}, + dictWord{133, 0, 907}, + dictWord{134, 0, 733}, + dictWord{133, 11, 111}, + dictWord{4, 10, 71}, + dictWord{ + 5, + 10, + 376, + }, + dictWord{7, 10, 119}, + dictWord{138, 10, 665}, + dictWord{136, 0, 55}, + dictWord{8, 0, 430}, + dictWord{136, 11, 430}, + dictWord{4, 0, 208}, + dictWord{ + 5, + 0, + 106, + }, + dictWord{6, 0, 531}, + dictWord{8, 0, 408}, + dictWord{9, 0, 188}, + dictWord{138, 0, 572}, + dictWord{12, 0, 56}, + dictWord{11, 10, 827}, + dictWord{14, 10, 34}, + dictWord{143, 10, 148}, + dictWord{134, 0, 1693}, + dictWord{133, 11, 444}, + dictWord{132, 10, 479}, + dictWord{140, 0, 441}, + dictWord{9, 0, 449}, + dictWord{ + 10, + 0, + 192, + }, + dictWord{138, 0, 740}, + dictWord{134, 0, 928}, + dictWord{4, 0, 241}, + dictWord{7, 10, 607}, + dictWord{136, 10, 99}, + dictWord{8, 11, 123}, + dictWord{ + 15, + 11, + 6, + }, + dictWord{144, 11, 7}, + dictWord{6, 11, 285}, + dictWord{8, 11, 654}, + dictWord{11, 11, 749}, + dictWord{12, 11, 190}, + dictWord{12, 11, 327}, + dictWord{ + 13, + 11, + 120, + }, + dictWord{13, 11, 121}, + dictWord{13, 11, 327}, + dictWord{15, 11, 47}, + dictWord{146, 11, 40}, + dictWord{4, 10, 41}, + dictWord{5, 10, 74}, + dictWord{ + 7, + 10, + 1627, + }, + dictWord{11, 10, 871}, + dictWord{140, 10, 619}, + dictWord{7, 0, 1525}, + dictWord{11, 10, 329}, + dictWord{11, 10, 965}, + dictWord{12, 10, 241}, + dictWord{14, 10, 354}, + dictWord{15, 10, 22}, + dictWord{148, 10, 63}, + dictWord{132, 0, 259}, + dictWord{135, 11, 183}, + dictWord{9, 10, 209}, + dictWord{ + 137, + 10, + 300, + }, + dictWord{5, 11, 937}, + dictWord{135, 11, 100}, + dictWord{133, 10, 98}, + dictWord{4, 0, 173}, + dictWord{5, 0, 312}, + dictWord{5, 0, 512}, + dictWord{ + 135, + 0, + 1285, + }, + dictWord{141, 0, 185}, + dictWord{7, 0, 1603}, + dictWord{7, 0, 1691}, + dictWord{9, 0, 464}, + dictWord{11, 0, 195}, + dictWord{12, 0, 279}, + dictWord{ + 12, + 0, + 448, + }, + dictWord{14, 0, 11}, + dictWord{147, 0, 102}, + dictWord{135, 0, 1113}, + dictWord{133, 10, 984}, + dictWord{4, 0, 452}, + dictWord{5, 0, 583}, + dictWord{ + 135, + 0, + 720, + }, + dictWord{4, 0, 547}, + dictWord{5, 0, 817}, + dictWord{6, 0, 433}, + dictWord{7, 0, 593}, + dictWord{7, 0, 1378}, + dictWord{8, 0, 161}, + dictWord{9, 0, 284}, + dictWord{ + 10, + 0, + 313, + }, + dictWord{139, 0, 886}, + dictWord{8, 0, 722}, + dictWord{4, 10, 182}, + dictWord{6, 10, 205}, + dictWord{135, 10, 220}, + dictWord{150, 0, 13}, + dictWord{ + 4, + 10, + 42, + }, + dictWord{9, 10, 205}, + dictWord{9, 10, 786}, + dictWord{138, 10, 659}, + dictWord{6, 0, 289}, + dictWord{7, 0, 1670}, + dictWord{12, 0, 57}, + dictWord{151, 0, 4}, + dictWord{132, 10, 635}, + dictWord{14, 0, 43}, + dictWord{146, 0, 21}, + dictWord{139, 10, 533}, + dictWord{135, 0, 1694}, + dictWord{8, 0, 420}, + dictWord{ + 139, + 0, + 193, + }, + dictWord{135, 0, 409}, + dictWord{132, 10, 371}, + dictWord{4, 10, 272}, + dictWord{135, 10, 836}, + dictWord{5, 10, 825}, + dictWord{134, 10, 1640}, + dictWord{5, 11, 251}, + dictWord{5, 11, 956}, + dictWord{8, 11, 268}, + dictWord{9, 11, 214}, + dictWord{146, 11, 142}, + dictWord{138, 0, 308}, + dictWord{6, 0, 1863}, + dictWord{141, 11, 37}, + dictWord{137, 10, 879}, + dictWord{7, 10, 317}, + dictWord{135, 10, 569}, + dictWord{132, 11, 294}, + dictWord{134, 0, 790}, + dictWord{ + 5, + 0, + 1002, + }, + dictWord{136, 0, 745}, + dictWord{5, 11, 346}, + dictWord{5, 11, 711}, + dictWord{136, 11, 390}, + dictWord{135, 0, 289}, + dictWord{5, 0, 504}, + dictWord{ + 11, + 0, + 68, + }, + dictWord{137, 10, 307}, + dictWord{4, 0, 239}, + dictWord{6, 0, 477}, + dictWord{7, 0, 1607}, + dictWord{139, 0, 617}, + dictWord{149, 0, 13}, + dictWord{ + 133, + 0, + 609, + }, + dictWord{133, 11, 624}, + dictWord{5, 11, 783}, + dictWord{7, 11, 1998}, + dictWord{135, 11, 2047}, + dictWord{133, 10, 525}, + dictWord{132, 0, 367}, + dictWord{132, 11, 594}, + dictWord{6, 0, 528}, + dictWord{133, 10, 493}, + dictWord{4, 10, 174}, + dictWord{135, 10, 911}, + dictWord{8, 10, 417}, + dictWord{ + 137, + 10, + 782, + }, + dictWord{132, 0, 694}, + dictWord{7, 0, 548}, + dictWord{137, 0, 58}, + dictWord{4, 10, 32}, + dictWord{5, 10, 215}, + dictWord{6, 10, 269}, + dictWord{7, 10, 1782}, + dictWord{7, 10, 1892}, + dictWord{10, 10, 16}, + dictWord{11, 10, 822}, + dictWord{11, 10, 954}, + dictWord{141, 10, 481}, + dictWord{140, 0, 687}, + dictWord{ + 7, + 0, + 1749, + }, + dictWord{136, 10, 477}, + dictWord{132, 11, 569}, + dictWord{133, 10, 308}, + dictWord{135, 10, 1088}, + dictWord{4, 0, 661}, + dictWord{138, 0, 1004}, + dictWord{5, 11, 37}, + dictWord{6, 11, 39}, + dictWord{6, 11, 451}, + dictWord{7, 11, 218}, + dictWord{7, 11, 667}, + dictWord{7, 11, 1166}, + dictWord{7, 11, 1687}, + dictWord{8, 11, 662}, + dictWord{144, 11, 2}, + dictWord{9, 0, 445}, + dictWord{12, 0, 53}, + dictWord{13, 0, 492}, + dictWord{5, 10, 126}, + dictWord{8, 10, 297}, + dictWord{ + 9, + 10, + 366, + }, + dictWord{140, 10, 374}, + dictWord{7, 10, 1551}, + dictWord{139, 10, 361}, + dictWord{148, 0, 74}, + dictWord{134, 11, 508}, + dictWord{135, 0, 213}, + dictWord{132, 10, 175}, + dictWord{132, 10, 685}, + dictWord{6, 0, 760}, + dictWord{6, 0, 834}, + dictWord{134, 0, 1248}, + dictWord{7, 11, 453}, + dictWord{7, 11, 635}, + dictWord{7, 11, 796}, + dictWord{8, 11, 331}, + dictWord{9, 11, 328}, + dictWord{9, 11, 330}, + dictWord{9, 11, 865}, + dictWord{10, 11, 119}, + dictWord{10, 11, 235}, + dictWord{11, 11, 111}, + dictWord{11, 11, 129}, + dictWord{11, 11, 240}, + dictWord{12, 11, 31}, + dictWord{12, 11, 66}, + dictWord{12, 11, 222}, + dictWord{12, 11, 269}, + dictWord{12, 11, 599}, + dictWord{12, 11, 689}, + dictWord{13, 11, 186}, + dictWord{13, 11, 364}, + dictWord{142, 11, 345}, + dictWord{7, 0, 1672}, + dictWord{ + 139, + 0, + 189, + }, + dictWord{133, 10, 797}, + dictWord{133, 10, 565}, + dictWord{6, 0, 1548}, + dictWord{6, 11, 98}, + dictWord{7, 11, 585}, + dictWord{135, 11, 702}, + dictWord{ + 9, + 0, + 968, + }, + dictWord{15, 0, 192}, + dictWord{149, 0, 56}, + dictWord{4, 10, 252}, + dictWord{6, 11, 37}, + dictWord{7, 11, 299}, + dictWord{7, 10, 1068}, + dictWord{ + 7, + 11, + 1666, + }, + dictWord{8, 11, 195}, + dictWord{8, 11, 316}, + dictWord{9, 11, 178}, + dictWord{9, 11, 276}, + dictWord{9, 11, 339}, + dictWord{9, 11, 536}, + dictWord{ + 10, + 11, + 102, + }, + dictWord{10, 11, 362}, + dictWord{10, 10, 434}, + dictWord{10, 11, 785}, + dictWord{11, 11, 55}, + dictWord{11, 11, 149}, + dictWord{11, 10, 228}, + dictWord{ + 11, + 10, + 426, + }, + dictWord{11, 11, 773}, + dictWord{13, 10, 231}, + dictWord{13, 11, 416}, + dictWord{13, 11, 419}, + dictWord{14, 11, 38}, + dictWord{14, 11, 41}, + dictWord{14, 11, 210}, + dictWord{18, 10, 106}, + dictWord{148, 10, 87}, + dictWord{4, 0, 751}, + dictWord{11, 0, 390}, + dictWord{140, 0, 32}, + dictWord{4, 0, 409}, + dictWord{133, 0, 78}, + dictWord{11, 11, 458}, + dictWord{12, 11, 15}, + dictWord{140, 11, 432}, + dictWord{7, 0, 1602}, + dictWord{10, 0, 257}, + dictWord{10, 0, 698}, + dictWord{11, 0, 544}, + dictWord{11, 0, 585}, + dictWord{12, 0, 212}, + dictWord{13, 0, 307}, + dictWord{5, 10, 231}, + dictWord{7, 10, 601}, + dictWord{9, 10, 277}, + dictWord{ + 9, + 10, + 674, + }, + dictWord{10, 10, 178}, + dictWord{10, 10, 418}, + dictWord{10, 10, 509}, + dictWord{11, 10, 531}, + dictWord{12, 10, 113}, + dictWord{12, 10, 475}, + dictWord{13, 10, 99}, + dictWord{142, 10, 428}, + dictWord{6, 0, 473}, + dictWord{145, 0, 105}, + dictWord{6, 0, 1949}, + dictWord{15, 0, 156}, + dictWord{133, 11, 645}, + dictWord{7, 10, 1591}, + dictWord{144, 10, 43}, + dictWord{135, 0, 1779}, + dictWord{135, 10, 1683}, + dictWord{4, 11, 290}, + dictWord{135, 11, 1356}, + dictWord{134, 0, 763}, + dictWord{6, 11, 70}, + dictWord{7, 11, 1292}, + dictWord{10, 11, 762}, + dictWord{139, 11, 288}, + dictWord{142, 0, 29}, + dictWord{140, 11, 428}, + dictWord{7, 0, 883}, + dictWord{7, 11, 131}, + dictWord{7, 11, 422}, + dictWord{8, 11, 210}, + dictWord{140, 11, 573}, + dictWord{134, 0, 488}, + dictWord{4, 10, 399}, + dictWord{5, 10, 119}, + dictWord{5, 10, 494}, + dictWord{7, 10, 751}, + dictWord{137, 10, 556}, + dictWord{133, 0, 617}, + dictWord{132, 11, 936}, + dictWord{ + 139, + 0, + 50, + }, + dictWord{7, 0, 1518}, + dictWord{139, 0, 694}, + dictWord{137, 0, 785}, + dictWord{4, 0, 546}, + dictWord{135, 0, 2042}, + dictWord{7, 11, 716}, + dictWord{ + 13, + 11, + 97, + }, + dictWord{141, 11, 251}, + dictWord{132, 11, 653}, + dictWord{145, 0, 22}, + dictWord{134, 0, 1016}, + dictWord{4, 0, 313}, + dictWord{133, 0, 577}, + dictWord{ + 136, + 11, + 657, + }, + dictWord{8, 0, 184}, + dictWord{141, 0, 433}, + dictWord{135, 0, 935}, + dictWord{6, 0, 720}, + dictWord{9, 0, 114}, + dictWord{146, 11, 80}, + dictWord{ + 12, + 0, + 186, + }, + dictWord{12, 0, 292}, + dictWord{14, 0, 100}, + dictWord{18, 0, 70}, + dictWord{7, 10, 594}, + dictWord{7, 10, 851}, + dictWord{7, 10, 1858}, + dictWord{ + 9, + 10, + 411, + }, + dictWord{9, 10, 574}, + dictWord{9, 10, 666}, + dictWord{9, 10, 737}, + dictWord{10, 10, 346}, + dictWord{10, 10, 712}, + dictWord{11, 10, 246}, + dictWord{ + 11, + 10, + 432, + }, + dictWord{11, 10, 517}, + dictWord{11, 10, 647}, + dictWord{11, 10, 679}, + dictWord{11, 10, 727}, + dictWord{12, 10, 304}, + dictWord{12, 10, 305}, + dictWord{12, 10, 323}, + dictWord{12, 10, 483}, + dictWord{12, 10, 572}, + dictWord{12, 10, 593}, + dictWord{12, 10, 602}, + dictWord{13, 10, 95}, + dictWord{13, 10, 101}, + dictWord{13, 10, 171}, + dictWord{13, 10, 315}, + dictWord{13, 10, 378}, + dictWord{13, 10, 425}, + dictWord{13, 10, 475}, + dictWord{14, 10, 63}, + dictWord{ + 14, + 10, + 380, + }, + dictWord{14, 10, 384}, + dictWord{15, 10, 133}, + dictWord{18, 10, 112}, + dictWord{148, 10, 72}, + dictWord{135, 10, 1093}, + dictWord{135, 11, 1836}, + dictWord{132, 10, 679}, + dictWord{137, 10, 203}, + dictWord{11, 0, 402}, + dictWord{12, 0, 109}, + dictWord{12, 0, 431}, + dictWord{13, 0, 179}, + dictWord{13, 0, 206}, + dictWord{14, 0, 217}, + dictWord{16, 0, 3}, + dictWord{148, 0, 53}, + dictWord{7, 11, 1368}, + dictWord{8, 11, 232}, + dictWord{8, 11, 361}, + dictWord{10, 11, 682}, + dictWord{138, 11, 742}, + dictWord{137, 10, 714}, + dictWord{5, 0, 886}, + dictWord{6, 0, 46}, + dictWord{6, 0, 1790}, + dictWord{7, 0, 14}, + dictWord{7, 0, 732}, + dictWord{ + 7, + 0, + 1654, + }, + dictWord{8, 0, 95}, + dictWord{8, 0, 327}, + dictWord{8, 0, 616}, + dictWord{9, 0, 892}, + dictWord{10, 0, 598}, + dictWord{10, 0, 769}, + dictWord{11, 0, 134}, + dictWord{11, 0, 747}, + dictWord{12, 0, 378}, + dictWord{14, 0, 97}, + dictWord{137, 11, 534}, + dictWord{4, 0, 969}, + dictWord{136, 10, 825}, + dictWord{137, 11, 27}, + dictWord{6, 0, 727}, + dictWord{142, 11, 12}, + dictWord{133, 0, 1021}, + dictWord{134, 0, 1190}, + dictWord{134, 11, 1657}, + dictWord{5, 10, 143}, + dictWord{ + 5, + 10, + 769, + }, + dictWord{6, 10, 1760}, + dictWord{7, 10, 682}, + dictWord{7, 10, 1992}, + dictWord{136, 10, 736}, + dictWord{132, 0, 153}, + dictWord{135, 11, 127}, + dictWord{133, 0, 798}, + dictWord{132, 0, 587}, + dictWord{6, 0, 598}, + dictWord{7, 0, 42}, + dictWord{8, 0, 695}, + dictWord{10, 0, 212}, + dictWord{11, 0, 158}, + dictWord{ + 14, + 0, + 196, + }, + dictWord{145, 0, 85}, + dictWord{133, 10, 860}, + dictWord{6, 0, 1929}, + dictWord{134, 0, 1933}, + dictWord{5, 0, 957}, + dictWord{5, 0, 1008}, + dictWord{ + 9, + 0, + 577, + }, + dictWord{12, 0, 141}, + dictWord{6, 10, 422}, + dictWord{7, 10, 0}, + dictWord{7, 10, 1544}, + dictWord{8, 11, 364}, + dictWord{11, 10, 990}, + dictWord{ + 12, + 10, + 453, + }, + dictWord{13, 10, 47}, + dictWord{141, 10, 266}, + dictWord{134, 0, 1319}, + dictWord{4, 0, 129}, + dictWord{135, 0, 465}, + dictWord{7, 0, 470}, + dictWord{ + 7, + 0, + 1057, + }, + dictWord{7, 0, 1201}, + dictWord{9, 0, 755}, + dictWord{11, 0, 906}, + dictWord{140, 0, 527}, + dictWord{7, 0, 908}, + dictWord{146, 0, 7}, + dictWord{5, 0, 148}, + dictWord{136, 0, 450}, + dictWord{5, 10, 515}, + dictWord{137, 10, 131}, + dictWord{7, 10, 1605}, + dictWord{11, 10, 962}, + dictWord{146, 10, 139}, + dictWord{ + 132, + 10, + 646, + }, + dictWord{134, 0, 1166}, + dictWord{4, 10, 396}, + dictWord{7, 10, 728}, + dictWord{9, 10, 117}, + dictWord{13, 10, 202}, + dictWord{148, 10, 51}, + dictWord{ + 6, + 10, + 121, + }, + dictWord{6, 10, 124}, + dictWord{6, 10, 357}, + dictWord{7, 10, 1138}, + dictWord{7, 10, 1295}, + dictWord{8, 10, 162}, + dictWord{139, 10, 655}, + dictWord{14, 0, 374}, + dictWord{142, 11, 374}, + dictWord{138, 0, 253}, + dictWord{139, 0, 1003}, + dictWord{5, 11, 909}, + dictWord{9, 11, 849}, + dictWord{ + 138, + 11, + 805, + }, + dictWord{133, 10, 237}, + dictWord{7, 11, 525}, + dictWord{7, 11, 1579}, + dictWord{8, 11, 497}, + dictWord{136, 11, 573}, + dictWord{137, 0, 46}, + dictWord{ + 132, + 0, + 879, + }, + dictWord{134, 0, 806}, + dictWord{135, 0, 1868}, + dictWord{6, 0, 1837}, + dictWord{134, 0, 1846}, + dictWord{6, 0, 730}, + dictWord{134, 0, 881}, + dictWord{7, 0, 965}, + dictWord{7, 0, 1460}, + dictWord{7, 0, 1604}, + dictWord{7, 11, 193}, + dictWord{7, 11, 397}, + dictWord{7, 11, 1105}, + dictWord{8, 11, 124}, + dictWord{ + 8, + 11, + 619, + }, + dictWord{9, 11, 305}, + dictWord{10, 11, 264}, + dictWord{11, 11, 40}, + dictWord{12, 11, 349}, + dictWord{13, 11, 134}, + dictWord{13, 11, 295}, + dictWord{14, 11, 155}, + dictWord{15, 11, 120}, + dictWord{146, 11, 105}, + dictWord{136, 0, 506}, + dictWord{143, 0, 10}, + dictWord{4, 11, 262}, + dictWord{7, 11, 342}, + dictWord{7, 10, 571}, + dictWord{7, 10, 1877}, + dictWord{10, 10, 366}, + dictWord{141, 11, 23}, + dictWord{133, 11, 641}, + dictWord{10, 0, 22}, + dictWord{9, 10, 513}, + dictWord{10, 10, 39}, + dictWord{12, 10, 122}, + dictWord{140, 10, 187}, + dictWord{135, 11, 1431}, + dictWord{150, 11, 49}, + dictWord{4, 11, 99}, + dictWord{ + 6, + 11, + 250, + }, + dictWord{6, 11, 346}, + dictWord{8, 11, 127}, + dictWord{138, 11, 81}, + dictWord{6, 0, 2014}, + dictWord{8, 0, 928}, + dictWord{10, 0, 960}, + dictWord{10, 0, 979}, + dictWord{140, 0, 996}, + dictWord{134, 0, 296}, + dictWord{132, 11, 915}, + dictWord{5, 11, 75}, + dictWord{9, 11, 517}, + dictWord{10, 11, 470}, + dictWord{ + 12, + 11, + 155, + }, + dictWord{141, 11, 224}, + dictWord{137, 10, 873}, + dictWord{4, 0, 854}, + dictWord{140, 11, 18}, + dictWord{134, 0, 587}, + dictWord{7, 10, 107}, + dictWord{ + 7, + 10, + 838, + }, + dictWord{8, 10, 550}, + dictWord{138, 10, 401}, + dictWord{11, 0, 636}, + dictWord{15, 0, 145}, + dictWord{17, 0, 34}, + dictWord{19, 0, 50}, + dictWord{ + 23, + 0, + 20, + }, + dictWord{11, 10, 588}, + dictWord{11, 10, 864}, + dictWord{11, 10, 968}, + dictWord{143, 10, 160}, + dictWord{135, 11, 216}, + dictWord{7, 0, 982}, + dictWord{ + 10, + 0, + 32, + }, + dictWord{143, 0, 56}, + dictWord{133, 10, 768}, + dictWord{133, 11, 954}, + dictWord{6, 11, 304}, + dictWord{7, 11, 1114}, + dictWord{8, 11, 418}, + dictWord{ + 10, + 11, + 345, + }, + dictWord{11, 11, 341}, + dictWord{11, 11, 675}, + dictWord{141, 11, 40}, + dictWord{9, 11, 410}, + dictWord{139, 11, 425}, + dictWord{136, 0, 941}, + dictWord{5, 0, 435}, + dictWord{132, 10, 894}, + dictWord{5, 0, 85}, + dictWord{6, 0, 419}, + dictWord{7, 0, 134}, + dictWord{7, 0, 305}, + dictWord{7, 0, 361}, + dictWord{ + 7, + 0, + 1337, + }, + dictWord{8, 0, 71}, + dictWord{140, 0, 519}, + dictWord{140, 0, 688}, + dictWord{135, 0, 740}, + dictWord{5, 0, 691}, + dictWord{7, 0, 345}, + dictWord{9, 0, 94}, + dictWord{140, 0, 169}, + dictWord{5, 0, 183}, + dictWord{6, 0, 582}, + dictWord{10, 0, 679}, + dictWord{140, 0, 435}, + dictWord{134, 11, 14}, + dictWord{6, 0, 945}, + dictWord{135, 0, 511}, + dictWord{134, 11, 1708}, + dictWord{5, 11, 113}, + dictWord{6, 11, 243}, + dictWord{7, 11, 1865}, + dictWord{11, 11, 161}, + dictWord{16, 11, 37}, + dictWord{145, 11, 99}, + dictWord{132, 11, 274}, + dictWord{137, 0, 539}, + dictWord{7, 0, 1993}, + dictWord{8, 0, 684}, + dictWord{134, 10, 272}, + dictWord{ + 6, + 0, + 659, + }, + dictWord{134, 0, 982}, + dictWord{4, 10, 9}, + dictWord{5, 10, 128}, + dictWord{7, 10, 368}, + dictWord{11, 10, 480}, + dictWord{148, 10, 3}, + dictWord{ + 134, + 0, + 583, + }, + dictWord{132, 0, 803}, + dictWord{133, 0, 704}, + dictWord{4, 0, 179}, + dictWord{5, 0, 198}, + dictWord{133, 0, 697}, + dictWord{7, 0, 347}, + dictWord{7, 0, 971}, + dictWord{8, 0, 181}, + dictWord{10, 0, 711}, + dictWord{135, 11, 166}, + dictWord{136, 10, 682}, + dictWord{4, 10, 2}, + dictWord{7, 10, 545}, + dictWord{7, 10, 894}, + dictWord{136, 11, 521}, + dictWord{135, 0, 481}, + dictWord{132, 0, 243}, + dictWord{5, 0, 203}, + dictWord{7, 0, 19}, + dictWord{7, 0, 71}, + dictWord{7, 0, 113}, + dictWord{ + 10, + 0, + 405, + }, + dictWord{11, 0, 357}, + dictWord{142, 0, 240}, + dictWord{5, 11, 725}, + dictWord{5, 11, 727}, + dictWord{135, 11, 1811}, + dictWord{6, 0, 826}, + dictWord{ + 137, + 11, + 304, + }, + dictWord{7, 0, 1450}, + dictWord{139, 0, 99}, + dictWord{133, 11, 654}, + dictWord{134, 0, 492}, + dictWord{5, 0, 134}, + dictWord{6, 0, 408}, + dictWord{ + 6, + 0, + 495, + }, + dictWord{7, 0, 1593}, + dictWord{6, 11, 273}, + dictWord{10, 11, 188}, + dictWord{13, 11, 377}, + dictWord{146, 11, 77}, + dictWord{9, 10, 769}, + dictWord{ + 140, + 10, + 185, + }, + dictWord{135, 11, 410}, + dictWord{142, 0, 4}, + dictWord{4, 0, 665}, + dictWord{134, 11, 1785}, + dictWord{4, 0, 248}, + dictWord{7, 0, 137}, + dictWord{ + 137, + 0, + 349, + }, + dictWord{5, 10, 530}, + dictWord{142, 10, 113}, + dictWord{7, 0, 1270}, + dictWord{139, 0, 612}, + dictWord{132, 11, 780}, + dictWord{5, 0, 371}, + dictWord{135, 0, 563}, + dictWord{135, 0, 826}, + dictWord{6, 0, 1535}, + dictWord{23, 0, 21}, + dictWord{151, 0, 23}, + dictWord{4, 0, 374}, + dictWord{7, 0, 547}, + dictWord{ + 7, + 0, + 1700, + }, + dictWord{7, 0, 1833}, + dictWord{139, 0, 858}, + dictWord{133, 10, 556}, + dictWord{7, 11, 612}, + dictWord{8, 11, 545}, + dictWord{8, 11, 568}, + dictWord{ + 8, + 11, + 642, + }, + dictWord{9, 11, 717}, + dictWord{10, 11, 541}, + dictWord{10, 11, 763}, + dictWord{11, 11, 449}, + dictWord{12, 11, 489}, + dictWord{13, 11, 153}, + dictWord{ + 13, + 11, + 296, + }, + dictWord{14, 11, 138}, + dictWord{14, 11, 392}, + dictWord{15, 11, 50}, + dictWord{16, 11, 6}, + dictWord{16, 11, 12}, + dictWord{148, 11, 9}, + dictWord{ + 9, + 0, + 311, + }, + dictWord{141, 0, 42}, + dictWord{8, 10, 16}, + dictWord{140, 10, 568}, + dictWord{6, 0, 1968}, + dictWord{6, 0, 2027}, + dictWord{138, 0, 991}, + dictWord{ + 6, + 0, + 1647, + }, + dictWord{7, 0, 1552}, + dictWord{7, 0, 2010}, + dictWord{9, 0, 494}, + dictWord{137, 0, 509}, + dictWord{133, 11, 948}, + dictWord{6, 10, 186}, + dictWord{ + 137, + 10, + 426, + }, + dictWord{134, 0, 769}, + dictWord{134, 0, 642}, + dictWord{132, 10, 585}, + dictWord{6, 0, 123}, + dictWord{7, 0, 214}, + dictWord{9, 0, 728}, + dictWord{ + 10, + 0, + 157, + }, + dictWord{11, 0, 346}, + dictWord{11, 0, 662}, + dictWord{143, 0, 106}, + dictWord{142, 11, 381}, + dictWord{135, 0, 1435}, + dictWord{4, 11, 532}, + dictWord{ + 5, + 11, + 706, + }, + dictWord{135, 11, 662}, + dictWord{5, 11, 837}, + dictWord{134, 11, 1651}, + dictWord{4, 10, 93}, + dictWord{5, 10, 252}, + dictWord{6, 10, 229}, + dictWord{ + 7, + 10, + 291, + }, + dictWord{9, 10, 550}, + dictWord{139, 10, 644}, + dictWord{148, 0, 79}, + dictWord{137, 10, 749}, + dictWord{134, 0, 1425}, + dictWord{ + 137, + 10, + 162, + }, + dictWord{4, 11, 362}, + dictWord{7, 11, 52}, + dictWord{7, 11, 303}, + dictWord{140, 11, 166}, + dictWord{132, 10, 381}, + dictWord{4, 11, 330}, + dictWord{ + 7, + 11, + 933, + }, + dictWord{7, 11, 2012}, + dictWord{136, 11, 292}, + dictWord{135, 11, 767}, + dictWord{4, 0, 707}, + dictWord{5, 0, 588}, + dictWord{6, 0, 393}, + dictWord{ + 13, + 0, + 106, + }, + dictWord{18, 0, 49}, + dictWord{147, 0, 41}, + dictWord{6, 0, 211}, + dictWord{7, 0, 1690}, + dictWord{11, 0, 486}, + dictWord{140, 0, 369}, + dictWord{ + 137, + 11, + 883, + }, + dictWord{4, 11, 703}, + dictWord{135, 11, 207}, + dictWord{4, 0, 187}, + dictWord{5, 0, 184}, + dictWord{5, 0, 690}, + dictWord{7, 0, 1869}, + dictWord{10, 0, 756}, + dictWord{139, 0, 783}, + dictWord{132, 11, 571}, + dictWord{134, 0, 1382}, + dictWord{5, 0, 175}, + dictWord{6, 10, 77}, + dictWord{6, 10, 157}, + dictWord{7, 10, 974}, + dictWord{7, 10, 1301}, + dictWord{7, 10, 1339}, + dictWord{7, 10, 1490}, + dictWord{7, 10, 1873}, + dictWord{137, 10, 628}, + dictWord{134, 0, 1493}, + dictWord{ + 5, + 11, + 873, + }, + dictWord{133, 11, 960}, + dictWord{134, 0, 1007}, + dictWord{12, 11, 93}, + dictWord{12, 11, 501}, + dictWord{13, 11, 362}, + dictWord{14, 11, 151}, + dictWord{15, 11, 40}, + dictWord{15, 11, 59}, + dictWord{16, 11, 46}, + dictWord{17, 11, 25}, + dictWord{18, 11, 14}, + dictWord{18, 11, 134}, + dictWord{19, 11, 25}, + dictWord{ + 19, + 11, + 69, + }, + dictWord{20, 11, 16}, + dictWord{20, 11, 19}, + dictWord{20, 11, 66}, + dictWord{21, 11, 23}, + dictWord{21, 11, 25}, + dictWord{150, 11, 42}, + dictWord{ + 11, + 10, + 919, + }, + dictWord{141, 10, 409}, + dictWord{134, 0, 219}, + dictWord{5, 0, 582}, + dictWord{6, 0, 1646}, + dictWord{7, 0, 99}, + dictWord{7, 0, 1962}, + dictWord{ + 7, + 0, + 1986, + }, + dictWord{8, 0, 515}, + dictWord{8, 0, 773}, + dictWord{9, 0, 23}, + dictWord{9, 0, 491}, + dictWord{12, 0, 620}, + dictWord{142, 0, 93}, + dictWord{133, 0, 851}, + dictWord{5, 11, 33}, + dictWord{134, 11, 470}, + dictWord{135, 11, 1291}, + dictWord{134, 0, 1278}, + dictWord{135, 11, 1882}, + dictWord{135, 10, 1489}, + dictWord{132, 0, 1000}, + dictWord{138, 0, 982}, + dictWord{8, 0, 762}, + dictWord{8, 0, 812}, + dictWord{137, 0, 910}, + dictWord{6, 11, 47}, + dictWord{7, 11, 90}, + dictWord{ + 7, + 11, + 664, + }, + dictWord{7, 11, 830}, + dictWord{7, 11, 1380}, + dictWord{7, 11, 2025}, + dictWord{8, 11, 448}, + dictWord{136, 11, 828}, + dictWord{4, 0, 98}, + dictWord{ + 4, + 0, + 940, + }, + dictWord{6, 0, 1819}, + dictWord{6, 0, 1834}, + dictWord{6, 0, 1841}, + dictWord{7, 0, 1365}, + dictWord{8, 0, 859}, + dictWord{8, 0, 897}, + dictWord{8, 0, 918}, + dictWord{9, 0, 422}, + dictWord{9, 0, 670}, + dictWord{10, 0, 775}, + dictWord{10, 0, 894}, + dictWord{10, 0, 909}, + dictWord{10, 0, 910}, + dictWord{10, 0, 935}, + dictWord{ + 11, + 0, + 210, + }, + dictWord{12, 0, 750}, + dictWord{12, 0, 755}, + dictWord{13, 0, 26}, + dictWord{13, 0, 457}, + dictWord{13, 0, 476}, + dictWord{16, 0, 100}, + dictWord{16, 0, 109}, + dictWord{18, 0, 173}, + dictWord{18, 0, 175}, + dictWord{8, 10, 398}, + dictWord{9, 10, 681}, + dictWord{139, 10, 632}, + dictWord{9, 11, 417}, + dictWord{ + 137, + 11, + 493, + }, + dictWord{136, 10, 645}, + dictWord{138, 0, 906}, + dictWord{134, 0, 1730}, + dictWord{134, 10, 20}, + dictWord{133, 11, 1019}, + dictWord{134, 0, 1185}, + dictWord{10, 0, 40}, + dictWord{136, 10, 769}, + dictWord{9, 0, 147}, + dictWord{134, 11, 208}, + dictWord{140, 0, 650}, + dictWord{5, 0, 209}, + dictWord{6, 0, 30}, + dictWord{11, 0, 56}, + dictWord{139, 0, 305}, + dictWord{132, 0, 553}, + dictWord{138, 11, 344}, + dictWord{6, 11, 68}, + dictWord{7, 11, 398}, + dictWord{7, 11, 448}, + dictWord{ + 7, + 11, + 1629, + }, + dictWord{7, 11, 1813}, + dictWord{8, 11, 387}, + dictWord{8, 11, 442}, + dictWord{9, 11, 710}, + dictWord{10, 11, 282}, + dictWord{138, 11, 722}, + dictWord{5, 0, 597}, + dictWord{14, 0, 20}, + dictWord{142, 11, 20}, + dictWord{135, 0, 1614}, + dictWord{135, 10, 1757}, + dictWord{4, 0, 150}, + dictWord{5, 0, 303}, + dictWord{6, 0, 327}, + dictWord{135, 10, 937}, + dictWord{16, 0, 49}, + dictWord{7, 10, 1652}, + dictWord{144, 11, 49}, + dictWord{8, 0, 192}, + dictWord{10, 0, 78}, + dictWord{ + 141, + 0, + 359, + }, + dictWord{135, 0, 786}, + dictWord{143, 0, 134}, + dictWord{6, 0, 1638}, + dictWord{7, 0, 79}, + dictWord{7, 0, 496}, + dictWord{9, 0, 138}, + dictWord{ + 10, + 0, + 336, + }, + dictWord{11, 0, 12}, + dictWord{12, 0, 412}, + dictWord{12, 0, 440}, + dictWord{142, 0, 305}, + dictWord{136, 11, 491}, + dictWord{4, 10, 579}, + dictWord{ + 5, + 10, + 226, + }, + dictWord{5, 10, 323}, + dictWord{135, 10, 960}, + dictWord{7, 0, 204}, + dictWord{7, 0, 415}, + dictWord{8, 0, 42}, + dictWord{10, 0, 85}, + dictWord{139, 0, 564}, + dictWord{132, 0, 614}, + dictWord{4, 11, 403}, + dictWord{5, 11, 441}, + dictWord{7, 11, 450}, + dictWord{11, 11, 101}, + dictWord{12, 11, 193}, + dictWord{141, 11, 430}, + dictWord{135, 11, 1927}, + dictWord{135, 11, 1330}, + dictWord{4, 0, 3}, + dictWord{5, 0, 247}, + dictWord{5, 0, 644}, + dictWord{7, 0, 744}, + dictWord{7, 0, 1207}, + dictWord{7, 0, 1225}, + dictWord{7, 0, 1909}, + dictWord{146, 0, 147}, + dictWord{136, 0, 942}, + dictWord{4, 0, 1019}, + dictWord{134, 0, 2023}, + dictWord{5, 11, 679}, + dictWord{133, 10, 973}, + dictWord{5, 0, 285}, + dictWord{9, 0, 67}, + dictWord{13, 0, 473}, + dictWord{143, 0, 82}, + dictWord{7, 11, 328}, + dictWord{137, 11, 326}, + dictWord{151, 0, 8}, + dictWord{6, 10, 135}, + dictWord{135, 10, 1176}, + dictWord{135, 11, 1128}, + dictWord{134, 0, 1309}, + dictWord{135, 11, 1796}, + dictWord{ + 135, + 10, + 314, + }, + dictWord{4, 11, 574}, + dictWord{7, 11, 350}, + dictWord{7, 11, 1024}, + dictWord{8, 11, 338}, + dictWord{9, 11, 677}, + dictWord{10, 11, 808}, + dictWord{ + 139, + 11, + 508, + }, + dictWord{7, 11, 818}, + dictWord{17, 11, 14}, + dictWord{17, 11, 45}, + dictWord{18, 11, 75}, + dictWord{148, 11, 18}, + dictWord{146, 10, 4}, + dictWord{ + 135, + 11, + 1081, + }, + dictWord{4, 0, 29}, + dictWord{6, 0, 532}, + dictWord{7, 0, 1628}, + dictWord{7, 0, 1648}, + dictWord{9, 0, 350}, + dictWord{10, 0, 433}, + dictWord{11, 0, 97}, + dictWord{11, 0, 557}, + dictWord{11, 0, 745}, + dictWord{12, 0, 289}, + dictWord{12, 0, 335}, + dictWord{12, 0, 348}, + dictWord{12, 0, 606}, + dictWord{13, 0, 116}, + dictWord{13, 0, 233}, + dictWord{13, 0, 466}, + dictWord{14, 0, 181}, + dictWord{14, 0, 209}, + dictWord{14, 0, 232}, + dictWord{14, 0, 236}, + dictWord{14, 0, 300}, + dictWord{ + 16, + 0, + 41, + }, + dictWord{148, 0, 97}, + dictWord{7, 0, 318}, + dictWord{6, 10, 281}, + dictWord{8, 10, 282}, + dictWord{8, 10, 480}, + dictWord{8, 10, 499}, + dictWord{9, 10, 198}, + dictWord{10, 10, 143}, + dictWord{10, 10, 169}, + dictWord{10, 10, 211}, + dictWord{10, 10, 417}, + dictWord{10, 10, 574}, + dictWord{11, 10, 147}, + dictWord{ + 11, + 10, + 395, + }, + dictWord{12, 10, 75}, + dictWord{12, 10, 407}, + dictWord{12, 10, 608}, + dictWord{13, 10, 500}, + dictWord{142, 10, 251}, + dictWord{135, 11, 1676}, + dictWord{135, 11, 2037}, + dictWord{135, 0, 1692}, + dictWord{5, 0, 501}, + dictWord{7, 0, 1704}, + dictWord{9, 0, 553}, + dictWord{11, 0, 520}, + dictWord{12, 0, 557}, + dictWord{141, 0, 249}, + dictWord{6, 0, 1527}, + dictWord{14, 0, 324}, + dictWord{15, 0, 55}, + dictWord{15, 0, 80}, + dictWord{14, 11, 324}, + dictWord{15, 11, 55}, + dictWord{143, 11, 80}, + dictWord{135, 10, 1776}, + dictWord{8, 0, 988}, + dictWord{137, 11, 297}, + dictWord{132, 10, 419}, + dictWord{142, 0, 223}, + dictWord{ + 139, + 11, + 234, + }, + dictWord{7, 0, 1123}, + dictWord{12, 0, 508}, + dictWord{14, 0, 102}, + dictWord{14, 0, 226}, + dictWord{144, 0, 57}, + dictWord{4, 10, 138}, + dictWord{ + 7, + 10, + 1012, + }, + dictWord{7, 10, 1280}, + dictWord{137, 10, 76}, + dictWord{7, 0, 1764}, + dictWord{5, 10, 29}, + dictWord{140, 10, 638}, + dictWord{134, 0, 2015}, + dictWord{134, 0, 1599}, + dictWord{138, 11, 56}, + dictWord{6, 11, 306}, + dictWord{7, 11, 1140}, + dictWord{7, 11, 1340}, + dictWord{8, 11, 133}, + dictWord{ + 138, + 11, + 449, + }, + dictWord{139, 11, 1011}, + dictWord{6, 10, 1710}, + dictWord{135, 10, 2038}, + dictWord{7, 11, 1763}, + dictWord{140, 11, 310}, + dictWord{6, 0, 129}, + dictWord{4, 10, 17}, + dictWord{5, 10, 23}, + dictWord{7, 10, 995}, + dictWord{11, 10, 383}, + dictWord{11, 10, 437}, + dictWord{12, 10, 460}, + dictWord{140, 10, 532}, + dictWord{5, 11, 329}, + dictWord{136, 11, 260}, + dictWord{133, 10, 862}, + dictWord{132, 0, 534}, + dictWord{6, 0, 811}, + dictWord{135, 0, 626}, + dictWord{ + 132, + 11, + 657, + }, + dictWord{4, 0, 25}, + dictWord{5, 0, 60}, + dictWord{6, 0, 504}, + dictWord{7, 0, 614}, + dictWord{7, 0, 1155}, + dictWord{12, 0, 0}, + dictWord{152, 11, 7}, + dictWord{ + 7, + 0, + 1248, + }, + dictWord{11, 0, 621}, + dictWord{139, 0, 702}, + dictWord{137, 0, 321}, + dictWord{8, 10, 70}, + dictWord{12, 10, 171}, + dictWord{141, 10, 272}, + dictWord{ + 10, + 10, + 233, + }, + dictWord{139, 10, 76}, + dictWord{4, 0, 379}, + dictWord{7, 0, 1397}, + dictWord{134, 10, 442}, + dictWord{5, 11, 66}, + dictWord{7, 11, 1896}, + dictWord{ + 136, + 11, + 288, + }, + dictWord{134, 11, 1643}, + dictWord{134, 10, 1709}, + dictWord{4, 11, 21}, + dictWord{5, 11, 91}, + dictWord{5, 11, 570}, + dictWord{5, 11, 648}, + dictWord{5, 11, 750}, + dictWord{5, 11, 781}, + dictWord{6, 11, 54}, + dictWord{6, 11, 112}, + dictWord{6, 11, 402}, + dictWord{6, 11, 1732}, + dictWord{7, 11, 315}, + dictWord{ + 7, + 11, + 749, + }, + dictWord{7, 11, 1347}, + dictWord{7, 11, 1900}, + dictWord{9, 11, 78}, + dictWord{9, 11, 508}, + dictWord{10, 11, 611}, + dictWord{11, 11, 510}, + dictWord{ + 11, + 11, + 728, + }, + dictWord{13, 11, 36}, + dictWord{14, 11, 39}, + dictWord{16, 11, 83}, + dictWord{17, 11, 124}, + dictWord{148, 11, 30}, + dictWord{4, 0, 118}, + dictWord{ + 6, + 0, + 274, + }, + dictWord{6, 0, 361}, + dictWord{7, 0, 75}, + dictWord{141, 0, 441}, + dictWord{10, 11, 322}, + dictWord{10, 11, 719}, + dictWord{139, 11, 407}, + dictWord{ + 147, + 10, + 119, + }, + dictWord{12, 11, 549}, + dictWord{14, 11, 67}, + dictWord{147, 11, 60}, + dictWord{11, 10, 69}, + dictWord{12, 10, 105}, + dictWord{12, 10, 117}, + dictWord{13, 10, 213}, + dictWord{14, 10, 13}, + dictWord{14, 10, 62}, + dictWord{14, 10, 177}, + dictWord{14, 10, 421}, + dictWord{15, 10, 19}, + dictWord{146, 10, 141}, + dictWord{9, 0, 841}, + dictWord{137, 10, 309}, + dictWord{7, 10, 608}, + dictWord{7, 10, 976}, + dictWord{8, 11, 125}, + dictWord{8, 11, 369}, + dictWord{8, 11, 524}, + dictWord{9, 10, 146}, + dictWord{10, 10, 206}, + dictWord{10, 11, 486}, + dictWord{10, 10, 596}, + dictWord{11, 11, 13}, + dictWord{11, 11, 381}, + dictWord{11, 11, 736}, + dictWord{11, 11, 766}, + dictWord{11, 11, 845}, + dictWord{13, 11, 114}, + dictWord{13, 10, 218}, + dictWord{13, 11, 292}, + dictWord{14, 11, 47}, + dictWord{ + 142, + 10, + 153, + }, + dictWord{12, 0, 693}, + dictWord{135, 11, 759}, + dictWord{5, 0, 314}, + dictWord{6, 0, 221}, + dictWord{7, 0, 419}, + dictWord{10, 0, 650}, + dictWord{11, 0, 396}, + dictWord{12, 0, 156}, + dictWord{13, 0, 369}, + dictWord{14, 0, 333}, + dictWord{145, 0, 47}, + dictWord{6, 11, 1684}, + dictWord{6, 11, 1731}, + dictWord{7, 11, 356}, + dictWord{7, 11, 1932}, + dictWord{8, 11, 54}, + dictWord{8, 11, 221}, + dictWord{9, 11, 225}, + dictWord{9, 11, 356}, + dictWord{10, 11, 77}, + dictWord{10, 11, 446}, + dictWord{10, 11, 731}, + dictWord{12, 11, 404}, + dictWord{141, 11, 491}, + dictWord{132, 11, 375}, + dictWord{4, 10, 518}, + dictWord{135, 10, 1136}, + dictWord{ + 4, + 0, + 913, + }, + dictWord{4, 11, 411}, + dictWord{11, 11, 643}, + dictWord{140, 11, 115}, + dictWord{4, 11, 80}, + dictWord{133, 11, 44}, + dictWord{8, 10, 689}, + dictWord{ + 137, + 10, + 863, + }, + dictWord{138, 0, 880}, + dictWord{4, 10, 18}, + dictWord{7, 10, 145}, + dictWord{7, 10, 444}, + dictWord{7, 10, 1278}, + dictWord{8, 10, 49}, + dictWord{ + 8, + 10, + 400, + }, + dictWord{9, 10, 71}, + dictWord{9, 10, 250}, + dictWord{10, 10, 459}, + dictWord{12, 10, 160}, + dictWord{144, 10, 24}, + dictWord{136, 0, 475}, + dictWord{ + 5, + 0, + 1016, + }, + dictWord{5, 11, 299}, + dictWord{135, 11, 1083}, + dictWord{7, 0, 602}, + dictWord{8, 0, 179}, + dictWord{10, 0, 781}, + dictWord{140, 0, 126}, + dictWord{ + 6, + 0, + 329, + }, + dictWord{138, 0, 111}, + dictWord{135, 0, 1864}, + dictWord{4, 11, 219}, + dictWord{7, 11, 1761}, + dictWord{137, 11, 86}, + dictWord{6, 0, 1888}, + dictWord{ + 6, + 0, + 1892, + }, + dictWord{6, 0, 1901}, + dictWord{6, 0, 1904}, + dictWord{9, 0, 953}, + dictWord{9, 0, 985}, + dictWord{9, 0, 991}, + dictWord{9, 0, 1001}, + dictWord{12, 0, 818}, + dictWord{12, 0, 846}, + dictWord{12, 0, 847}, + dictWord{12, 0, 861}, + dictWord{12, 0, 862}, + dictWord{12, 0, 873}, + dictWord{12, 0, 875}, + dictWord{12, 0, 877}, + dictWord{12, 0, 879}, + dictWord{12, 0, 881}, + dictWord{12, 0, 884}, + dictWord{12, 0, 903}, + dictWord{12, 0, 915}, + dictWord{12, 0, 926}, + dictWord{12, 0, 939}, + dictWord{ + 15, + 0, + 182, + }, + dictWord{15, 0, 219}, + dictWord{15, 0, 255}, + dictWord{18, 0, 191}, + dictWord{18, 0, 209}, + dictWord{18, 0, 211}, + dictWord{149, 0, 41}, + dictWord{ + 5, + 11, + 328, + }, + dictWord{135, 11, 918}, + dictWord{137, 0, 780}, + dictWord{12, 0, 82}, + dictWord{143, 0, 36}, + dictWord{133, 10, 1010}, + dictWord{5, 0, 821}, + dictWord{ + 134, + 0, + 1687, + }, + dictWord{133, 11, 514}, + dictWord{132, 0, 956}, + dictWord{134, 0, 1180}, + dictWord{10, 0, 112}, + dictWord{5, 10, 87}, + dictWord{7, 10, 313}, + dictWord{ + 7, + 10, + 1103, + }, + dictWord{10, 10, 582}, + dictWord{11, 10, 389}, + dictWord{11, 10, 813}, + dictWord{12, 10, 385}, + dictWord{13, 10, 286}, + dictWord{14, 10, 124}, + dictWord{146, 10, 108}, + dictWord{5, 0, 71}, + dictWord{7, 0, 1407}, + dictWord{9, 0, 704}, + dictWord{10, 0, 261}, + dictWord{10, 0, 619}, + dictWord{11, 0, 547}, + dictWord{11, 0, 619}, + dictWord{143, 0, 157}, + dictWord{4, 0, 531}, + dictWord{5, 0, 455}, + dictWord{5, 11, 301}, + dictWord{6, 11, 571}, + dictWord{14, 11, 49}, + dictWord{ + 146, + 11, + 102, + }, + dictWord{132, 10, 267}, + dictWord{6, 0, 385}, + dictWord{7, 0, 2008}, + dictWord{9, 0, 337}, + dictWord{138, 0, 517}, + dictWord{133, 11, 726}, + dictWord{133, 11, 364}, + dictWord{4, 11, 76}, + dictWord{7, 11, 1550}, + dictWord{9, 11, 306}, + dictWord{9, 11, 430}, + dictWord{9, 11, 663}, + dictWord{10, 11, 683}, + dictWord{11, 11, 427}, + dictWord{11, 11, 753}, + dictWord{12, 11, 334}, + dictWord{12, 11, 442}, + dictWord{14, 11, 258}, + dictWord{14, 11, 366}, + dictWord{ + 143, + 11, + 131, + }, + dictWord{6, 0, 1865}, + dictWord{6, 0, 1879}, + dictWord{6, 0, 1881}, + dictWord{6, 0, 1894}, + dictWord{6, 0, 1908}, + dictWord{9, 0, 915}, + dictWord{9, 0, 926}, + dictWord{9, 0, 940}, + dictWord{9, 0, 943}, + dictWord{9, 0, 966}, + dictWord{9, 0, 980}, + dictWord{9, 0, 989}, + dictWord{9, 0, 1005}, + dictWord{9, 0, 1010}, + dictWord{ + 12, + 0, + 813, + }, + dictWord{12, 0, 817}, + dictWord{12, 0, 840}, + dictWord{12, 0, 843}, + dictWord{12, 0, 855}, + dictWord{12, 0, 864}, + dictWord{12, 0, 871}, + dictWord{12, 0, 872}, + dictWord{12, 0, 899}, + dictWord{12, 0, 905}, + dictWord{12, 0, 924}, + dictWord{15, 0, 171}, + dictWord{15, 0, 181}, + dictWord{15, 0, 224}, + dictWord{15, 0, 235}, + dictWord{15, 0, 251}, + dictWord{146, 0, 184}, + dictWord{137, 11, 52}, + dictWord{5, 0, 16}, + dictWord{6, 0, 86}, + dictWord{6, 0, 603}, + dictWord{7, 0, 292}, + dictWord{7, 0, 561}, + dictWord{8, 0, 257}, + dictWord{8, 0, 382}, + dictWord{9, 0, 721}, + dictWord{9, 0, 778}, + dictWord{11, 0, 581}, + dictWord{140, 0, 466}, + dictWord{4, 0, 486}, + dictWord{ + 5, + 0, + 491, + }, + dictWord{135, 10, 1121}, + dictWord{4, 0, 72}, + dictWord{6, 0, 265}, + dictWord{135, 0, 1300}, + dictWord{135, 11, 1183}, + dictWord{10, 10, 249}, + dictWord{139, 10, 209}, + dictWord{132, 10, 561}, + dictWord{137, 11, 519}, + dictWord{4, 11, 656}, + dictWord{4, 10, 760}, + dictWord{135, 11, 779}, + dictWord{ + 9, + 10, + 154, + }, + dictWord{140, 10, 485}, + dictWord{135, 11, 1793}, + dictWord{135, 11, 144}, + dictWord{136, 10, 255}, + dictWord{133, 0, 621}, + dictWord{4, 10, 368}, + dictWord{135, 10, 641}, + dictWord{135, 11, 1373}, + dictWord{7, 11, 554}, + dictWord{7, 11, 605}, + dictWord{141, 11, 10}, + dictWord{137, 0, 234}, + dictWord{ + 5, + 0, + 815, + }, + dictWord{6, 0, 1688}, + dictWord{134, 0, 1755}, + dictWord{5, 11, 838}, + dictWord{5, 11, 841}, + dictWord{134, 11, 1649}, + dictWord{7, 0, 1987}, + dictWord{ + 7, + 0, + 2040, + }, + dictWord{136, 0, 743}, + dictWord{133, 11, 1012}, + dictWord{6, 0, 197}, + dictWord{136, 0, 205}, + dictWord{6, 0, 314}, + dictWord{134, 11, 314}, + dictWord{144, 11, 53}, + dictWord{6, 11, 251}, + dictWord{7, 11, 365}, + dictWord{7, 11, 1357}, + dictWord{7, 11, 1497}, + dictWord{8, 11, 154}, + dictWord{141, 11, 281}, + dictWord{133, 11, 340}, + dictWord{6, 0, 452}, + dictWord{7, 0, 312}, + dictWord{138, 0, 219}, + dictWord{138, 0, 589}, + dictWord{4, 0, 333}, + dictWord{9, 0, 176}, + dictWord{12, 0, 353}, + dictWord{141, 0, 187}, + dictWord{9, 10, 92}, + dictWord{147, 10, 91}, + dictWord{134, 0, 1110}, + dictWord{11, 0, 47}, + dictWord{139, 11, 495}, + dictWord{6, 10, 525}, + dictWord{8, 10, 806}, + dictWord{9, 10, 876}, + dictWord{140, 10, 284}, + dictWord{8, 11, 261}, + dictWord{9, 11, 144}, + dictWord{9, 11, 466}, + dictWord{10, 11, 370}, + dictWord{12, 11, 470}, + dictWord{13, 11, 144}, + dictWord{142, 11, 348}, + dictWord{137, 11, 897}, + dictWord{8, 0, 863}, + dictWord{8, 0, 864}, + dictWord{8, 0, 868}, + dictWord{8, 0, 884}, + dictWord{10, 0, 866}, + dictWord{10, 0, 868}, + dictWord{10, 0, 873}, + dictWord{10, 0, 911}, + dictWord{10, 0, 912}, + dictWord{ + 10, + 0, + 944, + }, + dictWord{12, 0, 727}, + dictWord{6, 11, 248}, + dictWord{9, 11, 546}, + dictWord{10, 11, 535}, + dictWord{11, 11, 681}, + dictWord{141, 11, 135}, + dictWord{ + 6, + 0, + 300, + }, + dictWord{135, 0, 1515}, + dictWord{134, 0, 1237}, + dictWord{139, 10, 958}, + dictWord{133, 10, 594}, + dictWord{140, 11, 250}, + dictWord{ + 134, + 0, + 1685, + }, + dictWord{134, 11, 567}, + dictWord{7, 0, 135}, + dictWord{8, 0, 7}, + dictWord{8, 0, 62}, + dictWord{9, 0, 243}, + dictWord{10, 0, 658}, + dictWord{10, 0, 697}, + dictWord{11, 0, 456}, + dictWord{139, 0, 756}, + dictWord{9, 0, 395}, + dictWord{138, 0, 79}, + dictWord{6, 10, 1641}, + dictWord{136, 10, 820}, + dictWord{4, 10, 302}, + dictWord{135, 10, 1766}, + dictWord{134, 11, 174}, + dictWord{135, 10, 1313}, + dictWord{135, 0, 631}, + dictWord{134, 10, 1674}, + dictWord{134, 11, 395}, + dictWord{138, 0, 835}, + dictWord{7, 0, 406}, + dictWord{7, 0, 459}, + dictWord{8, 0, 606}, + dictWord{139, 0, 726}, + dictWord{134, 11, 617}, + dictWord{134, 0, 979}, + dictWord{ + 6, + 10, + 389, + }, + dictWord{7, 10, 149}, + dictWord{9, 10, 142}, + dictWord{138, 10, 94}, + dictWord{5, 11, 878}, + dictWord{133, 11, 972}, + dictWord{6, 10, 8}, + dictWord{ + 7, + 10, + 1881, + }, + dictWord{8, 10, 91}, + dictWord{136, 11, 511}, + dictWord{133, 0, 612}, + dictWord{132, 11, 351}, + dictWord{4, 0, 372}, + dictWord{7, 0, 482}, + dictWord{ + 8, + 0, + 158, + }, + dictWord{9, 0, 602}, + dictWord{9, 0, 615}, + dictWord{10, 0, 245}, + dictWord{10, 0, 678}, + dictWord{10, 0, 744}, + dictWord{11, 0, 248}, + dictWord{ + 139, + 0, + 806, + }, + dictWord{5, 0, 854}, + dictWord{135, 0, 1991}, + dictWord{132, 11, 286}, + dictWord{135, 11, 344}, + dictWord{7, 11, 438}, + dictWord{7, 11, 627}, + dictWord{ + 7, + 11, + 1516, + }, + dictWord{8, 11, 40}, + dictWord{9, 11, 56}, + dictWord{9, 11, 294}, + dictWord{10, 11, 30}, + dictWord{10, 11, 259}, + dictWord{11, 11, 969}, + dictWord{ + 146, + 11, + 148, + }, + dictWord{135, 0, 1492}, + dictWord{5, 11, 259}, + dictWord{7, 11, 414}, + dictWord{7, 11, 854}, + dictWord{142, 11, 107}, + dictWord{135, 10, 1746}, + dictWord{6, 0, 833}, + dictWord{134, 0, 998}, + dictWord{135, 10, 24}, + dictWord{6, 0, 750}, + dictWord{135, 0, 1739}, + dictWord{4, 10, 503}, + dictWord{ + 135, + 10, + 1661, + }, + dictWord{5, 10, 130}, + dictWord{7, 10, 1314}, + dictWord{9, 10, 610}, + dictWord{10, 10, 718}, + dictWord{11, 10, 601}, + dictWord{11, 10, 819}, + dictWord{ + 11, + 10, + 946, + }, + dictWord{140, 10, 536}, + dictWord{10, 10, 149}, + dictWord{11, 10, 280}, + dictWord{142, 10, 336}, + dictWord{132, 11, 738}, + dictWord{ + 135, + 10, + 1946, + }, + dictWord{5, 0, 195}, + dictWord{135, 0, 1685}, + dictWord{7, 0, 1997}, + dictWord{8, 0, 730}, + dictWord{139, 0, 1006}, + dictWord{151, 11, 17}, + dictWord{ + 133, + 11, + 866, + }, + dictWord{14, 0, 463}, + dictWord{14, 0, 470}, + dictWord{150, 0, 61}, + dictWord{5, 0, 751}, + dictWord{8, 0, 266}, + dictWord{11, 0, 578}, + dictWord{ + 4, + 10, + 392, + }, + dictWord{135, 10, 1597}, + dictWord{5, 10, 433}, + dictWord{9, 10, 633}, + dictWord{139, 10, 629}, + dictWord{135, 0, 821}, + dictWord{6, 0, 715}, + dictWord{ + 134, + 0, + 1325, + }, + dictWord{133, 11, 116}, + dictWord{6, 0, 868}, + dictWord{132, 11, 457}, + dictWord{134, 0, 959}, + dictWord{6, 10, 234}, + dictWord{138, 11, 199}, + dictWord{7, 0, 1053}, + dictWord{7, 10, 1950}, + dictWord{8, 10, 680}, + dictWord{11, 10, 817}, + dictWord{147, 10, 88}, + dictWord{7, 10, 1222}, + dictWord{ + 138, + 10, + 386, + }, + dictWord{5, 0, 950}, + dictWord{5, 0, 994}, + dictWord{6, 0, 351}, + dictWord{134, 0, 1124}, + dictWord{134, 0, 1081}, + dictWord{7, 0, 1595}, + dictWord{6, 10, 5}, + dictWord{11, 10, 249}, + dictWord{12, 10, 313}, + dictWord{16, 10, 66}, + dictWord{145, 10, 26}, + dictWord{148, 0, 59}, + dictWord{5, 11, 527}, + dictWord{6, 11, 189}, + dictWord{135, 11, 859}, + dictWord{5, 10, 963}, + dictWord{6, 10, 1773}, + dictWord{11, 11, 104}, + dictWord{11, 11, 554}, + dictWord{15, 11, 60}, + dictWord{ + 143, + 11, + 125, + }, + dictWord{135, 0, 47}, + dictWord{137, 0, 684}, + dictWord{134, 11, 116}, + dictWord{134, 0, 1606}, + dictWord{134, 0, 777}, + dictWord{7, 0, 1020}, + dictWord{ + 8, + 10, + 509, + }, + dictWord{136, 10, 792}, + dictWord{135, 0, 1094}, + dictWord{132, 0, 350}, + dictWord{133, 11, 487}, + dictWord{4, 11, 86}, + dictWord{5, 11, 667}, + dictWord{5, 11, 753}, + dictWord{6, 11, 316}, + dictWord{6, 11, 455}, + dictWord{135, 11, 946}, + dictWord{7, 0, 1812}, + dictWord{13, 0, 259}, + dictWord{13, 0, 356}, + dictWord{14, 0, 242}, + dictWord{147, 0, 114}, + dictWord{132, 10, 931}, + dictWord{133, 0, 967}, + dictWord{4, 0, 473}, + dictWord{7, 0, 623}, + dictWord{8, 0, 808}, + dictWord{ + 9, + 0, + 871, + }, + dictWord{9, 0, 893}, + dictWord{11, 0, 38}, + dictWord{11, 0, 431}, + dictWord{12, 0, 112}, + dictWord{12, 0, 217}, + dictWord{12, 0, 243}, + dictWord{12, 0, 562}, + dictWord{12, 0, 663}, + dictWord{12, 0, 683}, + dictWord{13, 0, 141}, + dictWord{13, 0, 197}, + dictWord{13, 0, 227}, + dictWord{13, 0, 406}, + dictWord{13, 0, 487}, + dictWord{14, 0, 156}, + dictWord{14, 0, 203}, + dictWord{14, 0, 224}, + dictWord{14, 0, 256}, + dictWord{18, 0, 58}, + dictWord{150, 0, 0}, + dictWord{138, 0, 286}, + dictWord{ + 7, + 10, + 943, + }, + dictWord{139, 10, 614}, + dictWord{135, 10, 1837}, + dictWord{150, 11, 45}, + dictWord{132, 0, 798}, + dictWord{4, 0, 222}, + dictWord{7, 0, 286}, + dictWord{136, 0, 629}, + dictWord{4, 11, 79}, + dictWord{7, 11, 1773}, + dictWord{10, 11, 450}, + dictWord{11, 11, 589}, + dictWord{13, 11, 332}, + dictWord{13, 11, 493}, + dictWord{14, 11, 183}, + dictWord{14, 11, 334}, + dictWord{14, 11, 362}, + dictWord{14, 11, 368}, + dictWord{14, 11, 376}, + dictWord{14, 11, 379}, + dictWord{ + 19, + 11, + 90, + }, + dictWord{19, 11, 103}, + dictWord{19, 11, 127}, + dictWord{148, 11, 90}, + dictWord{5, 0, 337}, + dictWord{11, 0, 513}, + dictWord{11, 0, 889}, + dictWord{ + 11, + 0, + 961, + }, + dictWord{12, 0, 461}, + dictWord{13, 0, 79}, + dictWord{15, 0, 121}, + dictWord{4, 10, 90}, + dictWord{5, 10, 545}, + dictWord{7, 10, 754}, + dictWord{9, 10, 186}, + dictWord{10, 10, 72}, + dictWord{10, 10, 782}, + dictWord{11, 10, 577}, + dictWord{11, 10, 610}, + dictWord{12, 10, 354}, + dictWord{12, 10, 362}, + dictWord{ + 140, + 10, + 595, + }, + dictWord{141, 0, 306}, + dictWord{136, 0, 146}, + dictWord{7, 0, 1646}, + dictWord{9, 10, 329}, + dictWord{11, 10, 254}, + dictWord{141, 11, 124}, + dictWord{ + 4, + 0, + 465, + }, + dictWord{135, 0, 1663}, + dictWord{132, 0, 525}, + dictWord{133, 11, 663}, + dictWord{10, 0, 299}, + dictWord{18, 0, 74}, + dictWord{9, 10, 187}, + dictWord{ + 11, + 10, + 1016, + }, + dictWord{145, 10, 44}, + dictWord{7, 0, 165}, + dictWord{7, 0, 919}, + dictWord{4, 10, 506}, + dictWord{136, 10, 517}, + dictWord{5, 10, 295}, + dictWord{ + 135, + 10, + 1680, + }, + dictWord{133, 11, 846}, + dictWord{134, 0, 1064}, + dictWord{5, 11, 378}, + dictWord{7, 11, 1402}, + dictWord{7, 11, 1414}, + dictWord{8, 11, 465}, + dictWord{9, 11, 286}, + dictWord{10, 11, 185}, + dictWord{10, 11, 562}, + dictWord{10, 11, 635}, + dictWord{11, 11, 31}, + dictWord{11, 11, 393}, + dictWord{ + 12, + 11, + 456, + }, + dictWord{13, 11, 312}, + dictWord{18, 11, 65}, + dictWord{18, 11, 96}, + dictWord{147, 11, 89}, + dictWord{132, 0, 596}, + dictWord{7, 10, 987}, + dictWord{ + 9, + 10, + 688, + }, + dictWord{10, 10, 522}, + dictWord{11, 10, 788}, + dictWord{140, 10, 566}, + dictWord{6, 0, 82}, + dictWord{7, 0, 138}, + dictWord{7, 0, 517}, + dictWord{7, 0, 1741}, + dictWord{11, 0, 238}, + dictWord{4, 11, 648}, + dictWord{134, 10, 1775}, + dictWord{7, 0, 1233}, + dictWord{7, 10, 700}, + dictWord{7, 10, 940}, + dictWord{8, 10, 514}, + dictWord{9, 10, 116}, + dictWord{9, 10, 535}, + dictWord{10, 10, 118}, + dictWord{11, 10, 107}, + dictWord{11, 10, 148}, + dictWord{11, 10, 922}, + dictWord{ + 12, + 10, + 254, + }, + dictWord{12, 10, 421}, + dictWord{142, 10, 238}, + dictWord{4, 0, 962}, + dictWord{6, 0, 1824}, + dictWord{8, 0, 894}, + dictWord{12, 0, 708}, + dictWord{ + 12, + 0, + 725, + }, + dictWord{14, 0, 451}, + dictWord{20, 0, 94}, + dictWord{22, 0, 59}, + dictWord{150, 0, 62}, + dictWord{5, 11, 945}, + dictWord{6, 11, 1656}, + dictWord{6, 11, 1787}, + dictWord{7, 11, 167}, + dictWord{8, 11, 824}, + dictWord{9, 11, 391}, + dictWord{10, 11, 375}, + dictWord{139, 11, 185}, + dictWord{5, 0, 495}, + dictWord{7, 0, 834}, + dictWord{9, 0, 733}, + dictWord{139, 0, 378}, + dictWord{4, 10, 743}, + dictWord{135, 11, 1273}, + dictWord{6, 0, 1204}, + dictWord{7, 11, 1645}, + dictWord{8, 11, 352}, + dictWord{137, 11, 249}, + dictWord{139, 10, 292}, + dictWord{133, 0, 559}, + dictWord{132, 11, 152}, + dictWord{9, 0, 499}, + dictWord{10, 0, 341}, + dictWord{ + 15, + 0, + 144, + }, + dictWord{19, 0, 49}, + dictWord{7, 10, 1283}, + dictWord{9, 10, 227}, + dictWord{11, 10, 325}, + dictWord{11, 10, 408}, + dictWord{14, 10, 180}, + dictWord{ + 146, + 10, + 47, + }, + dictWord{6, 0, 21}, + dictWord{6, 0, 1737}, + dictWord{7, 0, 1444}, + dictWord{136, 0, 224}, + dictWord{133, 11, 1006}, + dictWord{7, 0, 1446}, + dictWord{ + 9, + 0, + 97, + }, + dictWord{17, 0, 15}, + dictWord{5, 10, 81}, + dictWord{7, 10, 146}, + dictWord{7, 10, 1342}, + dictWord{8, 10, 53}, + dictWord{8, 10, 561}, + dictWord{8, 10, 694}, + dictWord{8, 10, 754}, + dictWord{9, 10, 115}, + dictWord{9, 10, 894}, + dictWord{10, 10, 462}, + dictWord{10, 10, 813}, + dictWord{11, 10, 230}, + dictWord{11, 10, 657}, + dictWord{11, 10, 699}, + dictWord{11, 10, 748}, + dictWord{12, 10, 119}, + dictWord{12, 10, 200}, + dictWord{12, 10, 283}, + dictWord{142, 10, 273}, + dictWord{ + 5, + 10, + 408, + }, + dictWord{137, 10, 747}, + dictWord{135, 11, 431}, + dictWord{135, 11, 832}, + dictWord{6, 0, 729}, + dictWord{134, 0, 953}, + dictWord{4, 0, 727}, + dictWord{ + 8, + 0, + 565, + }, + dictWord{5, 11, 351}, + dictWord{7, 11, 264}, + dictWord{136, 11, 565}, + dictWord{134, 0, 1948}, + dictWord{5, 0, 519}, + dictWord{5, 11, 40}, + dictWord{ + 7, + 11, + 598, + }, + dictWord{7, 11, 1638}, + dictWord{8, 11, 78}, + dictWord{9, 11, 166}, + dictWord{9, 11, 640}, + dictWord{9, 11, 685}, + dictWord{9, 11, 773}, + dictWord{ + 11, + 11, + 215, + }, + dictWord{13, 11, 65}, + dictWord{14, 11, 172}, + dictWord{14, 11, 317}, + dictWord{145, 11, 6}, + dictWord{8, 11, 60}, + dictWord{9, 11, 343}, + dictWord{ + 139, + 11, + 769, + }, + dictWord{137, 11, 455}, + dictWord{134, 0, 1193}, + dictWord{140, 0, 790}, + dictWord{7, 11, 1951}, + dictWord{8, 11, 765}, + dictWord{8, 11, 772}, + dictWord{140, 11, 671}, + dictWord{7, 11, 108}, + dictWord{8, 11, 219}, + dictWord{8, 11, 388}, + dictWord{9, 11, 639}, + dictWord{9, 11, 775}, + dictWord{11, 11, 275}, + dictWord{140, 11, 464}, + dictWord{132, 11, 468}, + dictWord{7, 10, 30}, + dictWord{8, 10, 86}, + dictWord{8, 10, 315}, + dictWord{8, 10, 700}, + dictWord{9, 10, 576}, + dictWord{ + 9, + 10, + 858, + }, + dictWord{11, 10, 310}, + dictWord{11, 10, 888}, + dictWord{11, 10, 904}, + dictWord{12, 10, 361}, + dictWord{141, 10, 248}, + dictWord{5, 11, 15}, + dictWord{6, 11, 56}, + dictWord{7, 11, 1758}, + dictWord{8, 11, 500}, + dictWord{9, 11, 730}, + dictWord{11, 11, 331}, + dictWord{13, 11, 150}, + dictWord{142, 11, 282}, + dictWord{4, 0, 402}, + dictWord{7, 0, 2}, + dictWord{8, 0, 323}, + dictWord{136, 0, 479}, + dictWord{138, 10, 839}, + dictWord{11, 0, 580}, + dictWord{142, 0, 201}, + dictWord{ + 5, + 0, + 59, + }, + dictWord{135, 0, 672}, + dictWord{137, 10, 617}, + dictWord{146, 0, 34}, + dictWord{134, 11, 1886}, + dictWord{4, 0, 961}, + dictWord{136, 0, 896}, + dictWord{ + 6, + 0, + 1285, + }, + dictWord{5, 11, 205}, + dictWord{6, 11, 438}, + dictWord{137, 11, 711}, + dictWord{134, 10, 428}, + dictWord{7, 10, 524}, + dictWord{8, 10, 169}, + dictWord{8, 10, 234}, + dictWord{9, 10, 480}, + dictWord{138, 10, 646}, + dictWord{148, 0, 46}, + dictWord{141, 0, 479}, + dictWord{133, 11, 534}, + dictWord{6, 0, 2019}, + dictWord{134, 10, 1648}, + dictWord{4, 0, 85}, + dictWord{7, 0, 549}, + dictWord{7, 10, 1205}, + dictWord{138, 10, 637}, + dictWord{4, 0, 663}, + dictWord{5, 0, 94}, + dictWord{ + 7, + 11, + 235, + }, + dictWord{7, 11, 1475}, + dictWord{15, 11, 68}, + dictWord{146, 11, 120}, + dictWord{6, 11, 443}, + dictWord{9, 11, 237}, + dictWord{9, 11, 571}, + dictWord{ + 9, + 11, + 695, + }, + dictWord{10, 11, 139}, + dictWord{11, 11, 715}, + dictWord{12, 11, 417}, + dictWord{141, 11, 421}, + dictWord{132, 0, 783}, + dictWord{4, 0, 682}, + dictWord{8, 0, 65}, + dictWord{9, 10, 39}, + dictWord{10, 10, 166}, + dictWord{11, 10, 918}, + dictWord{12, 10, 635}, + dictWord{20, 10, 10}, + dictWord{22, 10, 27}, + dictWord{ + 22, + 10, + 43, + }, + dictWord{150, 10, 52}, + dictWord{6, 0, 11}, + dictWord{135, 0, 187}, + dictWord{132, 0, 522}, + dictWord{4, 0, 52}, + dictWord{135, 0, 661}, + dictWord{ + 4, + 0, + 383, + }, + dictWord{133, 0, 520}, + dictWord{135, 11, 546}, + dictWord{11, 0, 343}, + dictWord{142, 0, 127}, + dictWord{4, 11, 578}, + dictWord{7, 10, 157}, + dictWord{ + 7, + 11, + 624, + }, + dictWord{7, 11, 916}, + dictWord{8, 10, 279}, + dictWord{10, 11, 256}, + dictWord{11, 11, 87}, + dictWord{139, 11, 703}, + dictWord{134, 10, 604}, + dictWord{ + 4, + 0, + 281, + }, + dictWord{5, 0, 38}, + dictWord{7, 0, 194}, + dictWord{7, 0, 668}, + dictWord{7, 0, 1893}, + dictWord{137, 0, 397}, + dictWord{7, 10, 945}, + dictWord{11, 10, 713}, + dictWord{139, 10, 744}, + dictWord{139, 10, 1022}, + dictWord{9, 0, 635}, + dictWord{139, 0, 559}, + dictWord{5, 11, 923}, + dictWord{7, 11, 490}, + dictWord{ + 12, + 11, + 553, + }, + dictWord{13, 11, 100}, + dictWord{14, 11, 118}, + dictWord{143, 11, 75}, + dictWord{132, 0, 975}, + dictWord{132, 10, 567}, + dictWord{137, 10, 859}, + dictWord{7, 10, 1846}, + dictWord{7, 11, 1846}, + dictWord{8, 10, 628}, + dictWord{136, 11, 628}, + dictWord{148, 0, 116}, + dictWord{138, 11, 750}, + dictWord{14, 0, 51}, + dictWord{14, 11, 51}, + dictWord{15, 11, 7}, + dictWord{148, 11, 20}, + dictWord{132, 0, 858}, + dictWord{134, 0, 1075}, + dictWord{4, 11, 924}, + dictWord{ + 133, + 10, + 762, + }, + dictWord{136, 0, 535}, + dictWord{133, 0, 448}, + dictWord{10, 10, 784}, + dictWord{141, 10, 191}, + dictWord{133, 10, 298}, + dictWord{7, 0, 610}, + dictWord{135, 0, 1501}, + dictWord{7, 10, 633}, + dictWord{7, 10, 905}, + dictWord{7, 10, 909}, + dictWord{7, 10, 1538}, + dictWord{9, 10, 767}, + dictWord{140, 10, 636}, + dictWord{4, 11, 265}, + dictWord{7, 11, 807}, + dictWord{135, 11, 950}, + dictWord{5, 11, 93}, + dictWord{12, 11, 267}, + dictWord{144, 11, 26}, + dictWord{136, 0, 191}, + dictWord{139, 10, 301}, + dictWord{135, 10, 1970}, + dictWord{135, 0, 267}, + dictWord{4, 0, 319}, + dictWord{5, 0, 699}, + dictWord{138, 0, 673}, + dictWord{ + 6, + 0, + 336, + }, + dictWord{7, 0, 92}, + dictWord{7, 0, 182}, + dictWord{8, 0, 453}, + dictWord{8, 0, 552}, + dictWord{9, 0, 204}, + dictWord{9, 0, 285}, + dictWord{10, 0, 99}, + dictWord{ + 11, + 0, + 568, + }, + dictWord{11, 0, 950}, + dictWord{12, 0, 94}, + dictWord{16, 0, 20}, + dictWord{16, 0, 70}, + dictWord{19, 0, 55}, + dictWord{12, 10, 644}, + dictWord{144, 10, 90}, + dictWord{6, 0, 551}, + dictWord{7, 0, 1308}, + dictWord{7, 10, 845}, + dictWord{7, 11, 994}, + dictWord{8, 10, 160}, + dictWord{137, 10, 318}, + dictWord{19, 11, 1}, + dictWord{ + 19, + 11, + 26, + }, + dictWord{150, 11, 9}, + dictWord{7, 0, 1406}, + dictWord{9, 0, 218}, + dictWord{141, 0, 222}, + dictWord{5, 0, 256}, + dictWord{138, 0, 69}, + dictWord{ + 5, + 11, + 233, + }, + dictWord{5, 11, 320}, + dictWord{6, 11, 140}, + dictWord{7, 11, 330}, + dictWord{136, 11, 295}, + dictWord{6, 0, 1980}, + dictWord{136, 0, 952}, + dictWord{ + 4, + 0, + 833, + }, + dictWord{137, 11, 678}, + dictWord{133, 11, 978}, + dictWord{4, 11, 905}, + dictWord{6, 11, 1701}, + dictWord{137, 11, 843}, + dictWord{138, 10, 735}, + dictWord{136, 10, 76}, + dictWord{17, 0, 39}, + dictWord{148, 0, 36}, + dictWord{18, 0, 81}, + dictWord{146, 11, 81}, + dictWord{14, 0, 352}, + dictWord{17, 0, 53}, + dictWord{ + 18, + 0, + 146, + }, + dictWord{18, 0, 152}, + dictWord{19, 0, 11}, + dictWord{150, 0, 54}, + dictWord{135, 0, 634}, + dictWord{138, 10, 841}, + dictWord{132, 0, 618}, + dictWord{ + 4, + 0, + 339, + }, + dictWord{7, 0, 259}, + dictWord{17, 0, 73}, + dictWord{4, 11, 275}, + dictWord{140, 11, 376}, + dictWord{132, 11, 509}, + dictWord{7, 11, 273}, + dictWord{ + 139, + 11, + 377, + }, + dictWord{4, 0, 759}, + dictWord{13, 0, 169}, + dictWord{137, 10, 804}, + dictWord{6, 10, 96}, + dictWord{135, 10, 1426}, + dictWord{4, 10, 651}, + dictWord{133, 10, 289}, + dictWord{7, 0, 1075}, + dictWord{8, 10, 35}, + dictWord{9, 10, 511}, + dictWord{10, 10, 767}, + dictWord{147, 10, 118}, + dictWord{6, 0, 649}, + dictWord{6, 0, 670}, + dictWord{136, 0, 482}, + dictWord{5, 0, 336}, + dictWord{6, 0, 341}, + dictWord{6, 0, 478}, + dictWord{6, 0, 1763}, + dictWord{136, 0, 386}, + dictWord{ + 5, + 11, + 802, + }, + dictWord{7, 11, 2021}, + dictWord{8, 11, 805}, + dictWord{14, 11, 94}, + dictWord{15, 11, 65}, + dictWord{16, 11, 4}, + dictWord{16, 11, 77}, + dictWord{16, 11, 80}, + dictWord{145, 11, 5}, + dictWord{6, 0, 1035}, + dictWord{5, 11, 167}, + dictWord{5, 11, 899}, + dictWord{6, 11, 410}, + dictWord{137, 11, 777}, + dictWord{ + 134, + 11, + 1705, + }, + dictWord{5, 0, 924}, + dictWord{133, 0, 969}, + dictWord{132, 10, 704}, + dictWord{135, 0, 73}, + dictWord{135, 11, 10}, + dictWord{135, 10, 1078}, + dictWord{ + 5, + 11, + 11, + }, + dictWord{6, 11, 117}, + dictWord{6, 11, 485}, + dictWord{7, 11, 1133}, + dictWord{9, 11, 582}, + dictWord{9, 11, 594}, + dictWord{11, 11, 21}, + dictWord{ + 11, + 11, + 818, + }, + dictWord{12, 11, 535}, + dictWord{141, 11, 86}, + dictWord{135, 0, 1971}, + dictWord{4, 11, 264}, + dictWord{7, 11, 1067}, + dictWord{8, 11, 204}, + dictWord{8, 11, 385}, + dictWord{139, 11, 953}, + dictWord{6, 0, 1458}, + dictWord{135, 0, 1344}, + dictWord{5, 0, 396}, + dictWord{134, 0, 501}, + dictWord{4, 10, 720}, + dictWord{133, 10, 306}, + dictWord{4, 0, 929}, + dictWord{5, 0, 799}, + dictWord{8, 0, 46}, + dictWord{8, 0, 740}, + dictWord{133, 10, 431}, + dictWord{7, 11, 646}, + dictWord{ + 7, + 11, + 1730, + }, + dictWord{11, 11, 446}, + dictWord{141, 11, 178}, + dictWord{7, 0, 276}, + dictWord{5, 10, 464}, + dictWord{6, 10, 236}, + dictWord{7, 10, 696}, + dictWord{ + 7, + 10, + 914, + }, + dictWord{7, 10, 1108}, + dictWord{7, 10, 1448}, + dictWord{9, 10, 15}, + dictWord{9, 10, 564}, + dictWord{10, 10, 14}, + dictWord{12, 10, 565}, + dictWord{ + 13, + 10, + 449, + }, + dictWord{14, 10, 53}, + dictWord{15, 10, 13}, + dictWord{16, 10, 64}, + dictWord{145, 10, 41}, + dictWord{4, 0, 892}, + dictWord{133, 0, 770}, + dictWord{ + 6, + 10, + 1767, + }, + dictWord{12, 10, 194}, + dictWord{145, 10, 107}, + dictWord{135, 0, 158}, + dictWord{5, 10, 840}, + dictWord{138, 11, 608}, + dictWord{134, 0, 1432}, + dictWord{138, 11, 250}, + dictWord{8, 11, 794}, + dictWord{9, 11, 400}, + dictWord{10, 11, 298}, + dictWord{142, 11, 228}, + dictWord{151, 0, 25}, + dictWord{ + 7, + 11, + 1131, + }, + dictWord{135, 11, 1468}, + dictWord{135, 0, 2001}, + dictWord{9, 10, 642}, + dictWord{11, 10, 236}, + dictWord{142, 10, 193}, + dictWord{4, 10, 68}, + dictWord{5, 10, 634}, + dictWord{6, 10, 386}, + dictWord{7, 10, 794}, + dictWord{8, 10, 273}, + dictWord{9, 10, 563}, + dictWord{10, 10, 105}, + dictWord{10, 10, 171}, + dictWord{11, 10, 94}, + dictWord{139, 10, 354}, + dictWord{136, 11, 724}, + dictWord{132, 0, 478}, + dictWord{11, 11, 512}, + dictWord{13, 11, 205}, + dictWord{ + 19, + 11, + 30, + }, + dictWord{22, 11, 36}, + dictWord{151, 11, 19}, + dictWord{7, 0, 1461}, + dictWord{140, 0, 91}, + dictWord{6, 11, 190}, + dictWord{7, 11, 768}, + dictWord{ + 135, + 11, + 1170, + }, + dictWord{4, 0, 602}, + dictWord{8, 0, 211}, + dictWord{4, 10, 95}, + dictWord{7, 10, 416}, + dictWord{139, 10, 830}, + dictWord{7, 10, 731}, + dictWord{13, 10, 20}, + dictWord{143, 10, 11}, + dictWord{6, 0, 1068}, + dictWord{135, 0, 1872}, + dictWord{4, 0, 13}, + dictWord{5, 0, 567}, + dictWord{7, 0, 1498}, + dictWord{9, 0, 124}, + dictWord{11, 0, 521}, + dictWord{12, 0, 405}, + dictWord{135, 11, 1023}, + dictWord{135, 0, 1006}, + dictWord{132, 0, 735}, + dictWord{138, 0, 812}, + dictWord{4, 0, 170}, + dictWord{135, 0, 323}, + dictWord{6, 11, 137}, + dictWord{9, 11, 75}, + dictWord{9, 11, 253}, + dictWord{10, 11, 194}, + dictWord{138, 11, 444}, + dictWord{5, 0, 304}, + dictWord{7, 0, 1403}, + dictWord{5, 10, 864}, + dictWord{10, 10, 648}, + dictWord{11, 10, 671}, + dictWord{143, 10, 46}, + dictWord{135, 11, 1180}, + dictWord{ + 133, + 10, + 928, + }, + dictWord{4, 0, 148}, + dictWord{133, 0, 742}, + dictWord{11, 10, 986}, + dictWord{140, 10, 682}, + dictWord{133, 0, 523}, + dictWord{135, 11, 1743}, + dictWord{7, 0, 730}, + dictWord{18, 0, 144}, + dictWord{19, 0, 61}, + dictWord{8, 10, 44}, + dictWord{9, 10, 884}, + dictWord{10, 10, 580}, + dictWord{11, 10, 399}, + dictWord{ + 11, + 10, + 894, + }, + dictWord{143, 10, 122}, + dictWord{5, 11, 760}, + dictWord{7, 11, 542}, + dictWord{8, 11, 135}, + dictWord{136, 11, 496}, + dictWord{136, 0, 981}, + dictWord{133, 0, 111}, + dictWord{10, 0, 132}, + dictWord{11, 0, 191}, + dictWord{11, 0, 358}, + dictWord{139, 0, 460}, + dictWord{7, 11, 319}, + dictWord{7, 11, 355}, + dictWord{ + 7, + 11, + 763, + }, + dictWord{10, 11, 389}, + dictWord{145, 11, 43}, + dictWord{134, 0, 890}, + dictWord{134, 0, 1420}, + dictWord{136, 11, 557}, + dictWord{ + 133, + 10, + 518, + }, + dictWord{133, 0, 444}, + dictWord{135, 0, 1787}, + dictWord{135, 10, 1852}, + dictWord{8, 0, 123}, + dictWord{15, 0, 6}, + dictWord{144, 0, 7}, + dictWord{ + 6, + 0, + 2041, + }, + dictWord{10, 11, 38}, + dictWord{139, 11, 784}, + dictWord{136, 0, 932}, + dictWord{5, 0, 937}, + dictWord{135, 0, 100}, + dictWord{6, 0, 995}, + dictWord{ + 4, + 11, + 58, + }, + dictWord{5, 11, 286}, + dictWord{6, 11, 319}, + dictWord{7, 11, 402}, + dictWord{7, 11, 1254}, + dictWord{7, 11, 1903}, + dictWord{8, 11, 356}, + dictWord{ + 140, + 11, + 408, + }, + dictWord{4, 11, 389}, + dictWord{9, 11, 181}, + dictWord{9, 11, 255}, + dictWord{10, 11, 8}, + dictWord{10, 11, 29}, + dictWord{10, 11, 816}, + dictWord{ + 11, + 11, + 311, + }, + dictWord{11, 11, 561}, + dictWord{12, 11, 67}, + dictWord{141, 11, 181}, + dictWord{138, 0, 255}, + dictWord{5, 0, 138}, + dictWord{4, 10, 934}, + dictWord{ + 136, + 10, + 610, + }, + dictWord{4, 0, 965}, + dictWord{10, 0, 863}, + dictWord{138, 0, 898}, + dictWord{10, 10, 804}, + dictWord{138, 10, 832}, + dictWord{12, 0, 631}, + dictWord{ + 8, + 10, + 96, + }, + dictWord{9, 10, 36}, + dictWord{10, 10, 607}, + dictWord{11, 10, 423}, + dictWord{11, 10, 442}, + dictWord{12, 10, 309}, + dictWord{14, 10, 199}, + dictWord{ + 15, + 10, + 90, + }, + dictWord{145, 10, 110}, + dictWord{134, 0, 1394}, + dictWord{4, 0, 652}, + dictWord{8, 0, 320}, + dictWord{22, 0, 6}, + dictWord{22, 0, 16}, + dictWord{ + 9, + 10, + 13, + }, + dictWord{9, 10, 398}, + dictWord{9, 10, 727}, + dictWord{10, 10, 75}, + dictWord{10, 10, 184}, + dictWord{10, 10, 230}, + dictWord{10, 10, 564}, + dictWord{ + 10, + 10, + 569, + }, + dictWord{11, 10, 973}, + dictWord{12, 10, 70}, + dictWord{12, 10, 189}, + dictWord{13, 10, 57}, + dictWord{141, 10, 257}, + dictWord{6, 0, 897}, + dictWord{ + 134, + 0, + 1333, + }, + dictWord{4, 0, 692}, + dictWord{133, 0, 321}, + dictWord{133, 11, 373}, + dictWord{135, 0, 922}, + dictWord{5, 0, 619}, + dictWord{133, 0, 698}, + dictWord{ + 137, + 10, + 631, + }, + dictWord{5, 10, 345}, + dictWord{135, 10, 1016}, + dictWord{9, 0, 957}, + dictWord{9, 0, 1018}, + dictWord{12, 0, 828}, + dictWord{12, 0, 844}, + dictWord{ + 12, + 0, + 897, + }, + dictWord{12, 0, 901}, + dictWord{12, 0, 943}, + dictWord{15, 0, 180}, + dictWord{18, 0, 197}, + dictWord{18, 0, 200}, + dictWord{18, 0, 213}, + dictWord{ + 18, + 0, + 214, + }, + dictWord{146, 0, 226}, + dictWord{5, 0, 917}, + dictWord{134, 0, 1659}, + dictWord{135, 0, 1100}, + dictWord{134, 0, 1173}, + dictWord{134, 0, 1930}, + dictWord{5, 0, 251}, + dictWord{5, 0, 956}, + dictWord{8, 0, 268}, + dictWord{9, 0, 214}, + dictWord{146, 0, 142}, + dictWord{133, 10, 673}, + dictWord{137, 10, 850}, + dictWord{ + 4, + 10, + 287, + }, + dictWord{133, 10, 1018}, + dictWord{132, 11, 672}, + dictWord{5, 0, 346}, + dictWord{5, 0, 711}, + dictWord{8, 0, 390}, + dictWord{11, 11, 752}, + dictWord{139, 11, 885}, + dictWord{5, 10, 34}, + dictWord{10, 10, 724}, + dictWord{12, 10, 444}, + dictWord{13, 10, 354}, + dictWord{18, 10, 32}, + dictWord{23, 10, 24}, + dictWord{23, 10, 31}, + dictWord{152, 10, 5}, + dictWord{4, 11, 710}, + dictWord{134, 11, 606}, + dictWord{134, 0, 744}, + dictWord{134, 10, 382}, + dictWord{ + 133, + 11, + 145, + }, + dictWord{4, 10, 329}, + dictWord{7, 11, 884}, + dictWord{140, 11, 124}, + dictWord{4, 11, 467}, + dictWord{5, 11, 405}, + dictWord{134, 11, 544}, + dictWord{ + 9, + 10, + 846, + }, + dictWord{138, 10, 827}, + dictWord{133, 0, 624}, + dictWord{9, 11, 372}, + dictWord{15, 11, 2}, + dictWord{19, 11, 10}, + dictWord{147, 11, 18}, + dictWord{ + 4, + 11, + 387, + }, + dictWord{135, 11, 1288}, + dictWord{5, 0, 783}, + dictWord{7, 0, 1998}, + dictWord{135, 0, 2047}, + dictWord{132, 10, 906}, + dictWord{136, 10, 366}, + dictWord{135, 11, 550}, + dictWord{4, 10, 123}, + dictWord{4, 10, 649}, + dictWord{5, 10, 605}, + dictWord{7, 10, 1509}, + dictWord{136, 10, 36}, + dictWord{ + 134, + 0, + 1125, + }, + dictWord{132, 0, 594}, + dictWord{133, 10, 767}, + dictWord{135, 11, 1227}, + dictWord{136, 11, 467}, + dictWord{4, 11, 576}, + dictWord{ + 135, + 11, + 1263, + }, + dictWord{4, 0, 268}, + dictWord{7, 0, 1534}, + dictWord{135, 11, 1534}, + dictWord{4, 10, 273}, + dictWord{5, 10, 658}, + dictWord{5, 11, 919}, + dictWord{ + 5, + 10, + 995, + }, + dictWord{134, 11, 1673}, + dictWord{133, 0, 563}, + dictWord{134, 10, 72}, + dictWord{135, 10, 1345}, + dictWord{4, 11, 82}, + dictWord{5, 11, 333}, + dictWord{ + 5, + 11, + 904, + }, + dictWord{6, 11, 207}, + dictWord{7, 11, 325}, + dictWord{7, 11, 1726}, + dictWord{8, 11, 101}, + dictWord{10, 11, 778}, + dictWord{139, 11, 220}, + dictWord{5, 0, 37}, + dictWord{6, 0, 39}, + dictWord{6, 0, 451}, + dictWord{7, 0, 218}, + dictWord{7, 0, 667}, + dictWord{7, 0, 1166}, + dictWord{7, 0, 1687}, + dictWord{8, 0, 662}, + dictWord{16, 0, 2}, + dictWord{133, 10, 589}, + dictWord{134, 0, 1332}, + dictWord{133, 11, 903}, + dictWord{134, 0, 508}, + dictWord{5, 10, 117}, + dictWord{6, 10, 514}, + dictWord{6, 10, 541}, + dictWord{7, 10, 1164}, + dictWord{7, 10, 1436}, + dictWord{8, 10, 220}, + dictWord{8, 10, 648}, + dictWord{10, 10, 688}, + dictWord{11, 10, 560}, + dictWord{140, 11, 147}, + dictWord{6, 11, 555}, + dictWord{135, 11, 485}, + dictWord{133, 10, 686}, + dictWord{7, 0, 453}, + dictWord{7, 0, 635}, + dictWord{7, 0, 796}, + dictWord{8, 0, 331}, + dictWord{9, 0, 330}, + dictWord{9, 0, 865}, + dictWord{10, 0, 119}, + dictWord{10, 0, 235}, + dictWord{11, 0, 111}, + dictWord{11, 0, 129}, + dictWord{ + 11, + 0, + 240, + }, + dictWord{12, 0, 31}, + dictWord{12, 0, 66}, + dictWord{12, 0, 222}, + dictWord{12, 0, 269}, + dictWord{12, 0, 599}, + dictWord{12, 0, 684}, + dictWord{12, 0, 689}, + dictWord{12, 0, 691}, + dictWord{142, 0, 345}, + dictWord{135, 0, 1834}, + dictWord{4, 11, 705}, + dictWord{7, 11, 615}, + dictWord{138, 11, 251}, + dictWord{ + 136, + 11, + 345, + }, + dictWord{137, 0, 527}, + dictWord{6, 0, 98}, + dictWord{7, 0, 702}, + dictWord{135, 0, 991}, + dictWord{11, 0, 576}, + dictWord{14, 0, 74}, + dictWord{7, 10, 196}, + dictWord{10, 10, 765}, + dictWord{11, 10, 347}, + dictWord{11, 10, 552}, + dictWord{11, 10, 790}, + dictWord{12, 10, 263}, + dictWord{13, 10, 246}, + dictWord{ + 13, + 10, + 270, + }, + dictWord{13, 10, 395}, + dictWord{14, 10, 176}, + dictWord{14, 10, 190}, + dictWord{14, 10, 398}, + dictWord{14, 10, 412}, + dictWord{15, 10, 32}, + dictWord{ + 15, + 10, + 63, + }, + dictWord{16, 10, 88}, + dictWord{147, 10, 105}, + dictWord{134, 11, 90}, + dictWord{13, 0, 84}, + dictWord{141, 0, 122}, + dictWord{6, 0, 37}, + dictWord{ + 7, + 0, + 299, + }, + dictWord{7, 0, 1666}, + dictWord{8, 0, 195}, + dictWord{8, 0, 316}, + dictWord{9, 0, 178}, + dictWord{9, 0, 276}, + dictWord{9, 0, 339}, + dictWord{9, 0, 536}, + dictWord{ + 10, + 0, + 102, + }, + dictWord{10, 0, 362}, + dictWord{10, 0, 785}, + dictWord{11, 0, 55}, + dictWord{11, 0, 149}, + dictWord{11, 0, 773}, + dictWord{13, 0, 416}, + dictWord{ + 13, + 0, + 419, + }, + dictWord{14, 0, 38}, + dictWord{14, 0, 41}, + dictWord{142, 0, 210}, + dictWord{5, 10, 381}, + dictWord{135, 10, 1792}, + dictWord{7, 11, 813}, + dictWord{ + 12, + 11, + 497, + }, + dictWord{141, 11, 56}, + dictWord{7, 10, 616}, + dictWord{138, 10, 413}, + dictWord{133, 0, 645}, + dictWord{6, 11, 125}, + dictWord{135, 11, 1277}, + dictWord{132, 0, 290}, + dictWord{6, 0, 70}, + dictWord{7, 0, 1292}, + dictWord{10, 0, 762}, + dictWord{139, 0, 288}, + dictWord{6, 10, 120}, + dictWord{7, 10, 1188}, + dictWord{ + 7, + 10, + 1710, + }, + dictWord{8, 10, 286}, + dictWord{9, 10, 667}, + dictWord{11, 10, 592}, + dictWord{139, 10, 730}, + dictWord{135, 11, 1784}, + dictWord{7, 0, 1315}, + dictWord{135, 11, 1315}, + dictWord{134, 0, 1955}, + dictWord{135, 10, 1146}, + dictWord{7, 0, 131}, + dictWord{7, 0, 422}, + dictWord{8, 0, 210}, + dictWord{ + 140, + 0, + 573, + }, + dictWord{4, 10, 352}, + dictWord{135, 10, 687}, + dictWord{139, 0, 797}, + dictWord{143, 0, 38}, + dictWord{14, 0, 179}, + dictWord{15, 0, 151}, + dictWord{ + 150, + 0, + 11, + }, + dictWord{7, 0, 488}, + dictWord{4, 10, 192}, + dictWord{5, 10, 49}, + dictWord{6, 10, 200}, + dictWord{6, 10, 293}, + dictWord{134, 10, 1696}, + dictWord{ + 132, + 0, + 936, + }, + dictWord{135, 11, 703}, + dictWord{6, 11, 160}, + dictWord{7, 11, 1106}, + dictWord{9, 11, 770}, + dictWord{10, 11, 618}, + dictWord{11, 11, 112}, + dictWord{ + 140, + 11, + 413, + }, + dictWord{5, 0, 453}, + dictWord{134, 0, 441}, + dictWord{135, 0, 595}, + dictWord{132, 10, 650}, + dictWord{132, 10, 147}, + dictWord{6, 0, 991}, + dictWord{6, 0, 1182}, + dictWord{12, 11, 271}, + dictWord{145, 11, 109}, + dictWord{133, 10, 934}, + dictWord{140, 11, 221}, + dictWord{132, 0, 653}, + dictWord{ + 7, + 0, + 505, + }, + dictWord{135, 0, 523}, + dictWord{134, 0, 903}, + dictWord{135, 11, 479}, + dictWord{7, 11, 304}, + dictWord{9, 11, 646}, + dictWord{9, 11, 862}, + dictWord{ + 10, + 11, + 262, + }, + dictWord{11, 11, 696}, + dictWord{12, 11, 208}, + dictWord{15, 11, 79}, + dictWord{147, 11, 108}, + dictWord{146, 0, 80}, + dictWord{135, 11, 981}, + dictWord{142, 0, 432}, + dictWord{132, 0, 314}, + dictWord{137, 11, 152}, + dictWord{7, 0, 1368}, + dictWord{8, 0, 232}, + dictWord{8, 0, 361}, + dictWord{10, 0, 682}, + dictWord{138, 0, 742}, + dictWord{135, 11, 1586}, + dictWord{9, 0, 534}, + dictWord{4, 11, 434}, + dictWord{11, 11, 663}, + dictWord{12, 11, 210}, + dictWord{13, 11, 166}, + dictWord{13, 11, 310}, + dictWord{14, 11, 373}, + dictWord{147, 11, 43}, + dictWord{7, 11, 1091}, + dictWord{135, 11, 1765}, + dictWord{6, 11, 550}, + dictWord{ + 135, + 11, + 652, + }, + dictWord{137, 0, 27}, + dictWord{142, 0, 12}, + dictWord{4, 10, 637}, + dictWord{5, 11, 553}, + dictWord{7, 11, 766}, + dictWord{138, 11, 824}, + dictWord{ + 7, + 11, + 737, + }, + dictWord{8, 11, 298}, + dictWord{136, 11, 452}, + dictWord{7, 0, 736}, + dictWord{139, 0, 264}, + dictWord{134, 0, 1657}, + dictWord{133, 11, 292}, + dictWord{138, 11, 135}, + dictWord{6, 0, 844}, + dictWord{134, 0, 1117}, + dictWord{135, 0, 127}, + dictWord{9, 10, 867}, + dictWord{138, 10, 837}, + dictWord{ + 6, + 0, + 1184, + }, + dictWord{134, 0, 1208}, + dictWord{134, 0, 1294}, + dictWord{136, 0, 364}, + dictWord{6, 0, 1415}, + dictWord{7, 0, 1334}, + dictWord{11, 0, 125}, + dictWord{ + 6, + 10, + 170, + }, + dictWord{7, 11, 393}, + dictWord{8, 10, 395}, + dictWord{8, 10, 487}, + dictWord{10, 11, 603}, + dictWord{11, 11, 206}, + dictWord{141, 10, 147}, + dictWord{137, 11, 748}, + dictWord{4, 11, 912}, + dictWord{137, 11, 232}, + dictWord{4, 10, 535}, + dictWord{136, 10, 618}, + dictWord{137, 0, 792}, + dictWord{ + 7, + 11, + 1973, + }, + dictWord{136, 11, 716}, + dictWord{135, 11, 98}, + dictWord{5, 0, 909}, + dictWord{9, 0, 849}, + dictWord{138, 0, 805}, + dictWord{4, 0, 630}, + dictWord{ + 132, + 0, + 699, + }, + dictWord{5, 11, 733}, + dictWord{14, 11, 103}, + dictWord{150, 10, 23}, + dictWord{12, 11, 158}, + dictWord{18, 11, 8}, + dictWord{19, 11, 62}, + dictWord{ + 20, + 11, + 6, + }, + dictWord{22, 11, 4}, + dictWord{23, 11, 2}, + dictWord{151, 11, 9}, + dictWord{132, 0, 968}, + dictWord{132, 10, 778}, + dictWord{132, 10, 46}, + dictWord{5, 10, 811}, + dictWord{6, 10, 1679}, + dictWord{6, 10, 1714}, + dictWord{135, 10, 2032}, + dictWord{6, 0, 1446}, + dictWord{7, 10, 1458}, + dictWord{9, 10, 407}, + dictWord{ + 139, + 10, + 15, + }, + dictWord{7, 0, 206}, + dictWord{7, 0, 397}, + dictWord{7, 0, 621}, + dictWord{7, 0, 640}, + dictWord{8, 0, 124}, + dictWord{8, 0, 619}, + dictWord{9, 0, 305}, + dictWord{ + 9, + 0, + 643, + }, + dictWord{10, 0, 264}, + dictWord{10, 0, 628}, + dictWord{11, 0, 40}, + dictWord{12, 0, 349}, + dictWord{13, 0, 134}, + dictWord{13, 0, 295}, + dictWord{ + 14, + 0, + 155, + }, + dictWord{15, 0, 120}, + dictWord{18, 0, 105}, + dictWord{6, 10, 34}, + dictWord{7, 10, 1089}, + dictWord{8, 10, 708}, + dictWord{8, 10, 721}, + dictWord{9, 10, 363}, + dictWord{148, 10, 98}, + dictWord{4, 0, 262}, + dictWord{5, 0, 641}, + dictWord{135, 0, 342}, + dictWord{137, 11, 72}, + dictWord{4, 0, 99}, + dictWord{6, 0, 250}, + dictWord{ + 6, + 0, + 346, + }, + dictWord{8, 0, 127}, + dictWord{138, 0, 81}, + dictWord{132, 0, 915}, + dictWord{5, 0, 75}, + dictWord{9, 0, 517}, + dictWord{10, 0, 470}, + dictWord{12, 0, 155}, + dictWord{141, 0, 224}, + dictWord{132, 10, 462}, + dictWord{11, 11, 600}, + dictWord{11, 11, 670}, + dictWord{141, 11, 245}, + dictWord{142, 0, 83}, + dictWord{ + 5, + 10, + 73, + }, + dictWord{6, 10, 23}, + dictWord{134, 10, 338}, + dictWord{6, 0, 1031}, + dictWord{139, 11, 923}, + dictWord{7, 11, 164}, + dictWord{7, 11, 1571}, + dictWord{ + 9, + 11, + 107, + }, + dictWord{140, 11, 225}, + dictWord{134, 0, 1470}, + dictWord{133, 0, 954}, + dictWord{6, 0, 304}, + dictWord{8, 0, 418}, + dictWord{10, 0, 345}, + dictWord{ + 11, + 0, + 341, + }, + dictWord{139, 0, 675}, + dictWord{9, 0, 410}, + dictWord{139, 0, 425}, + dictWord{4, 11, 27}, + dictWord{5, 11, 484}, + dictWord{5, 11, 510}, + dictWord{6, 11, 434}, + dictWord{7, 11, 1000}, + dictWord{7, 11, 1098}, + dictWord{8, 11, 2}, + dictWord{136, 11, 200}, + dictWord{134, 0, 734}, + dictWord{140, 11, 257}, + dictWord{ + 7, + 10, + 725, + }, + dictWord{8, 10, 498}, + dictWord{139, 10, 268}, + dictWord{134, 0, 1822}, + dictWord{135, 0, 1798}, + dictWord{135, 10, 773}, + dictWord{132, 11, 460}, + dictWord{4, 11, 932}, + dictWord{133, 11, 891}, + dictWord{134, 0, 14}, + dictWord{132, 10, 583}, + dictWord{7, 10, 1462}, + dictWord{8, 11, 625}, + dictWord{ + 139, + 10, + 659, + }, + dictWord{5, 0, 113}, + dictWord{6, 0, 243}, + dictWord{6, 0, 1708}, + dictWord{7, 0, 1865}, + dictWord{11, 0, 161}, + dictWord{16, 0, 37}, + dictWord{17, 0, 99}, + dictWord{133, 10, 220}, + dictWord{134, 11, 76}, + dictWord{5, 11, 461}, + dictWord{135, 11, 1925}, + dictWord{140, 0, 69}, + dictWord{8, 11, 92}, + dictWord{ + 137, + 11, + 221, + }, + dictWord{139, 10, 803}, + dictWord{132, 10, 544}, + dictWord{4, 0, 274}, + dictWord{134, 0, 922}, + dictWord{132, 0, 541}, + dictWord{5, 0, 627}, + dictWord{ + 6, + 10, + 437, + }, + dictWord{6, 10, 564}, + dictWord{11, 10, 181}, + dictWord{141, 10, 183}, + dictWord{135, 10, 1192}, + dictWord{7, 0, 166}, + dictWord{132, 11, 763}, + dictWord{133, 11, 253}, + dictWord{134, 0, 849}, + dictWord{9, 11, 73}, + dictWord{10, 11, 110}, + dictWord{14, 11, 185}, + dictWord{145, 11, 119}, + dictWord{5, 11, 212}, + dictWord{12, 11, 35}, + dictWord{141, 11, 382}, + dictWord{133, 0, 717}, + dictWord{137, 0, 304}, + dictWord{136, 0, 600}, + dictWord{133, 0, 654}, + dictWord{ + 6, + 0, + 273, + }, + dictWord{10, 0, 188}, + dictWord{13, 0, 377}, + dictWord{146, 0, 77}, + dictWord{4, 10, 790}, + dictWord{5, 10, 273}, + dictWord{134, 10, 394}, + dictWord{ + 132, + 0, + 543, + }, + dictWord{135, 0, 410}, + dictWord{11, 0, 98}, + dictWord{11, 0, 524}, + dictWord{141, 0, 87}, + dictWord{132, 0, 941}, + dictWord{135, 11, 1175}, + dictWord{ + 4, + 0, + 250, + }, + dictWord{7, 0, 1612}, + dictWord{11, 0, 186}, + dictWord{12, 0, 133}, + dictWord{6, 10, 127}, + dictWord{7, 10, 1511}, + dictWord{8, 10, 613}, + dictWord{ + 12, + 10, + 495, + }, + dictWord{12, 10, 586}, + dictWord{12, 10, 660}, + dictWord{12, 10, 668}, + dictWord{14, 10, 385}, + dictWord{15, 10, 118}, + dictWord{17, 10, 20}, + dictWord{ + 146, + 10, + 98, + }, + dictWord{6, 0, 1785}, + dictWord{133, 11, 816}, + dictWord{134, 0, 1339}, + dictWord{7, 0, 961}, + dictWord{7, 0, 1085}, + dictWord{7, 0, 1727}, + dictWord{ + 8, + 0, + 462, + }, + dictWord{6, 10, 230}, + dictWord{135, 11, 1727}, + dictWord{9, 0, 636}, + dictWord{135, 10, 1954}, + dictWord{132, 0, 780}, + dictWord{5, 11, 869}, + dictWord{5, 11, 968}, + dictWord{6, 11, 1626}, + dictWord{8, 11, 734}, + dictWord{136, 11, 784}, + dictWord{4, 11, 542}, + dictWord{6, 11, 1716}, + dictWord{6, 11, 1727}, + dictWord{7, 11, 1082}, + dictWord{7, 11, 1545}, + dictWord{8, 11, 56}, + dictWord{8, 11, 118}, + dictWord{8, 11, 412}, + dictWord{8, 11, 564}, + dictWord{9, 11, 888}, + dictWord{9, 11, 908}, + dictWord{10, 11, 50}, + dictWord{10, 11, 423}, + dictWord{11, 11, 685}, + dictWord{11, 11, 697}, + dictWord{11, 11, 933}, + dictWord{12, 11, 299}, + dictWord{13, 11, 126}, + dictWord{13, 11, 136}, + dictWord{13, 11, 170}, + dictWord{141, 11, 190}, + dictWord{134, 11, 226}, + dictWord{4, 11, 232}, + dictWord{ + 9, + 11, + 202, + }, + dictWord{10, 11, 474}, + dictWord{140, 11, 433}, + dictWord{137, 11, 500}, + dictWord{5, 0, 529}, + dictWord{136, 10, 68}, + dictWord{132, 10, 654}, + dictWord{ + 4, + 10, + 156, + }, + dictWord{7, 10, 998}, + dictWord{7, 10, 1045}, + dictWord{7, 10, 1860}, + dictWord{9, 10, 48}, + dictWord{9, 10, 692}, + dictWord{11, 10, 419}, + dictWord{139, 10, 602}, + dictWord{7, 0, 1276}, + dictWord{8, 0, 474}, + dictWord{9, 0, 652}, + dictWord{6, 11, 108}, + dictWord{7, 11, 1003}, + dictWord{7, 11, 1181}, + dictWord{136, 11, 343}, + dictWord{7, 11, 1264}, + dictWord{7, 11, 1678}, + dictWord{11, 11, 945}, + dictWord{12, 11, 341}, + dictWord{12, 11, 471}, + dictWord{ + 140, + 11, + 569, + }, + dictWord{134, 11, 1712}, + dictWord{5, 0, 948}, + dictWord{12, 0, 468}, + dictWord{19, 0, 96}, + dictWord{148, 0, 24}, + dictWord{4, 11, 133}, + dictWord{ + 7, + 11, + 711, + }, + dictWord{7, 11, 1298}, + dictWord{7, 11, 1585}, + dictWord{135, 11, 1929}, + dictWord{6, 0, 753}, + dictWord{140, 0, 657}, + dictWord{139, 0, 941}, + dictWord{ + 6, + 11, + 99, + }, + dictWord{7, 11, 1808}, + dictWord{145, 11, 57}, + dictWord{6, 11, 574}, + dictWord{7, 11, 428}, + dictWord{7, 11, 1250}, + dictWord{10, 11, 669}, + dictWord{ + 11, + 11, + 485, + }, + dictWord{11, 11, 840}, + dictWord{12, 11, 300}, + dictWord{142, 11, 250}, + dictWord{4, 0, 532}, + dictWord{5, 0, 706}, + dictWord{135, 0, 662}, + dictWord{ + 5, + 0, + 837, + }, + dictWord{6, 0, 1651}, + dictWord{139, 0, 985}, + dictWord{7, 0, 1861}, + dictWord{9, 10, 197}, + dictWord{10, 10, 300}, + dictWord{12, 10, 473}, + dictWord{ + 13, + 10, + 90, + }, + dictWord{141, 10, 405}, + dictWord{137, 11, 252}, + dictWord{6, 11, 323}, + dictWord{135, 11, 1564}, + dictWord{4, 0, 330}, + dictWord{4, 0, 863}, + dictWord{7, 0, 933}, + dictWord{7, 0, 2012}, + dictWord{8, 0, 292}, + dictWord{7, 11, 461}, + dictWord{8, 11, 775}, + dictWord{138, 11, 435}, + dictWord{132, 10, 606}, + dictWord{ + 4, + 11, + 655, + }, + dictWord{7, 11, 850}, + dictWord{17, 11, 75}, + dictWord{146, 11, 137}, + dictWord{135, 0, 767}, + dictWord{7, 10, 1978}, + dictWord{136, 10, 676}, + dictWord{132, 0, 641}, + dictWord{135, 11, 1559}, + dictWord{134, 0, 1233}, + dictWord{137, 0, 242}, + dictWord{17, 0, 114}, + dictWord{4, 10, 361}, + dictWord{ + 133, + 10, + 315, + }, + dictWord{137, 0, 883}, + dictWord{132, 10, 461}, + dictWord{138, 0, 274}, + dictWord{134, 0, 2008}, + dictWord{134, 0, 1794}, + dictWord{4, 0, 703}, + dictWord{135, 0, 207}, + dictWord{12, 0, 285}, + dictWord{132, 10, 472}, + dictWord{132, 0, 571}, + dictWord{5, 0, 873}, + dictWord{5, 0, 960}, + dictWord{8, 0, 823}, + dictWord{9, 0, 881}, + dictWord{136, 11, 577}, + dictWord{7, 0, 617}, + dictWord{10, 0, 498}, + dictWord{11, 0, 501}, + dictWord{12, 0, 16}, + dictWord{140, 0, 150}, + dictWord{ + 138, + 10, + 747, + }, + dictWord{132, 0, 431}, + dictWord{133, 10, 155}, + dictWord{11, 0, 283}, + dictWord{11, 0, 567}, + dictWord{7, 10, 163}, + dictWord{8, 10, 319}, + dictWord{ + 9, + 10, + 402, + }, + dictWord{10, 10, 24}, + dictWord{10, 10, 681}, + dictWord{11, 10, 200}, + dictWord{12, 10, 253}, + dictWord{12, 10, 410}, + dictWord{142, 10, 219}, + dictWord{4, 11, 413}, + dictWord{5, 11, 677}, + dictWord{8, 11, 432}, + dictWord{140, 11, 280}, + dictWord{9, 0, 401}, + dictWord{5, 10, 475}, + dictWord{7, 10, 1780}, + dictWord{11, 10, 297}, + dictWord{11, 10, 558}, + dictWord{14, 10, 322}, + dictWord{147, 10, 76}, + dictWord{6, 0, 781}, + dictWord{9, 0, 134}, + dictWord{10, 0, 2}, + dictWord{ + 10, + 0, + 27, + }, + dictWord{10, 0, 333}, + dictWord{11, 0, 722}, + dictWord{143, 0, 1}, + dictWord{5, 0, 33}, + dictWord{6, 0, 470}, + dictWord{139, 0, 424}, + dictWord{ + 135, + 0, + 2006, + }, + dictWord{12, 0, 783}, + dictWord{135, 10, 1956}, + dictWord{136, 0, 274}, + dictWord{135, 0, 1882}, + dictWord{132, 0, 794}, + dictWord{135, 0, 1848}, + dictWord{5, 10, 944}, + dictWord{134, 10, 1769}, + dictWord{6, 0, 47}, + dictWord{7, 0, 90}, + dictWord{7, 0, 664}, + dictWord{7, 0, 830}, + dictWord{7, 0, 1380}, + dictWord{ + 7, + 0, + 2025, + }, + dictWord{8, 0, 448}, + dictWord{136, 0, 828}, + dictWord{132, 10, 144}, + dictWord{134, 0, 1199}, + dictWord{4, 11, 395}, + dictWord{139, 11, 762}, + dictWord{135, 11, 1504}, + dictWord{9, 0, 417}, + dictWord{137, 0, 493}, + dictWord{9, 11, 174}, + dictWord{10, 11, 164}, + dictWord{11, 11, 440}, + dictWord{11, 11, 841}, + dictWord{143, 11, 98}, + dictWord{134, 11, 426}, + dictWord{139, 11, 1002}, + dictWord{134, 0, 295}, + dictWord{134, 0, 816}, + dictWord{6, 10, 247}, + dictWord{ + 137, + 10, + 555, + }, + dictWord{133, 0, 1019}, + dictWord{4, 0, 620}, + dictWord{5, 11, 476}, + dictWord{10, 10, 280}, + dictWord{138, 10, 797}, + dictWord{139, 0, 464}, + dictWord{5, 11, 76}, + dictWord{6, 11, 458}, + dictWord{6, 11, 497}, + dictWord{7, 11, 764}, + dictWord{7, 11, 868}, + dictWord{9, 11, 658}, + dictWord{10, 11, 594}, + dictWord{ + 11, + 11, + 173, + }, + dictWord{11, 11, 566}, + dictWord{12, 11, 20}, + dictWord{12, 11, 338}, + dictWord{141, 11, 200}, + dictWord{134, 0, 208}, + dictWord{4, 11, 526}, + dictWord{7, 11, 1029}, + dictWord{135, 11, 1054}, + dictWord{132, 11, 636}, + dictWord{6, 11, 233}, + dictWord{7, 11, 660}, + dictWord{7, 11, 1124}, + dictWord{ + 17, + 11, + 31, + }, + dictWord{19, 11, 22}, + dictWord{151, 11, 14}, + dictWord{10, 0, 442}, + dictWord{133, 10, 428}, + dictWord{10, 0, 930}, + dictWord{140, 0, 778}, + dictWord{ + 6, + 0, + 68, + }, + dictWord{7, 0, 448}, + dictWord{7, 0, 1629}, + dictWord{7, 0, 1769}, + dictWord{7, 0, 1813}, + dictWord{8, 0, 442}, + dictWord{8, 0, 516}, + dictWord{9, 0, 710}, + dictWord{ + 10, + 0, + 282, + }, + dictWord{10, 0, 722}, + dictWord{7, 10, 1717}, + dictWord{138, 10, 546}, + dictWord{134, 0, 1128}, + dictWord{11, 0, 844}, + dictWord{12, 0, 104}, + dictWord{140, 0, 625}, + dictWord{4, 11, 432}, + dictWord{135, 11, 824}, + dictWord{138, 10, 189}, + dictWord{133, 0, 787}, + dictWord{133, 10, 99}, + dictWord{ + 4, + 11, + 279, + }, + dictWord{7, 11, 301}, + dictWord{137, 11, 362}, + dictWord{8, 0, 491}, + dictWord{4, 10, 397}, + dictWord{136, 10, 555}, + dictWord{4, 11, 178}, + dictWord{ + 133, + 11, + 399, + }, + dictWord{134, 0, 711}, + dictWord{144, 0, 9}, + dictWord{4, 0, 403}, + dictWord{5, 0, 441}, + dictWord{7, 0, 450}, + dictWord{10, 0, 840}, + dictWord{11, 0, 101}, + dictWord{12, 0, 193}, + dictWord{141, 0, 430}, + dictWord{135, 11, 1246}, + dictWord{12, 10, 398}, + dictWord{20, 10, 39}, + dictWord{21, 10, 11}, + dictWord{ + 150, + 10, + 41, + }, + dictWord{4, 10, 485}, + dictWord{7, 10, 353}, + dictWord{135, 10, 1523}, + dictWord{6, 10, 366}, + dictWord{7, 10, 1384}, + dictWord{7, 10, 1601}, + dictWord{ + 135, + 11, + 1912, + }, + dictWord{7, 0, 396}, + dictWord{10, 0, 160}, + dictWord{135, 11, 396}, + dictWord{137, 10, 282}, + dictWord{134, 11, 1692}, + dictWord{4, 10, 157}, + dictWord{5, 10, 471}, + dictWord{6, 11, 202}, + dictWord{10, 11, 448}, + dictWord{11, 11, 208}, + dictWord{12, 11, 360}, + dictWord{17, 11, 117}, + dictWord{ + 17, + 11, + 118, + }, + dictWord{18, 11, 27}, + dictWord{148, 11, 67}, + dictWord{133, 0, 679}, + dictWord{137, 0, 326}, + dictWord{136, 10, 116}, + dictWord{7, 11, 872}, + dictWord{ + 10, + 11, + 516, + }, + dictWord{139, 11, 167}, + dictWord{132, 11, 224}, + dictWord{5, 11, 546}, + dictWord{7, 11, 35}, + dictWord{8, 11, 11}, + dictWord{8, 11, 12}, + dictWord{ + 9, + 11, + 315, + }, + dictWord{9, 11, 533}, + dictWord{10, 11, 802}, + dictWord{11, 11, 166}, + dictWord{12, 11, 525}, + dictWord{142, 11, 243}, + dictWord{7, 0, 1128}, + dictWord{135, 11, 1920}, + dictWord{5, 11, 241}, + dictWord{8, 11, 242}, + dictWord{9, 11, 451}, + dictWord{10, 11, 667}, + dictWord{11, 11, 598}, + dictWord{ + 140, + 11, + 429, + }, + dictWord{6, 0, 737}, + dictWord{5, 10, 160}, + dictWord{7, 10, 363}, + dictWord{7, 10, 589}, + dictWord{10, 10, 170}, + dictWord{141, 10, 55}, + dictWord{ + 135, + 0, + 1796, + }, + dictWord{142, 11, 254}, + dictWord{4, 0, 574}, + dictWord{7, 0, 350}, + dictWord{7, 0, 1024}, + dictWord{8, 0, 338}, + dictWord{9, 0, 677}, + dictWord{138, 0, 808}, + dictWord{134, 0, 1096}, + dictWord{137, 11, 516}, + dictWord{7, 0, 405}, + dictWord{10, 0, 491}, + dictWord{4, 10, 108}, + dictWord{4, 11, 366}, + dictWord{ + 139, + 10, + 498, + }, + dictWord{11, 11, 337}, + dictWord{142, 11, 303}, + dictWord{134, 11, 1736}, + dictWord{7, 0, 1081}, + dictWord{140, 11, 364}, + dictWord{7, 10, 1005}, + dictWord{140, 10, 609}, + dictWord{7, 0, 1676}, + dictWord{4, 10, 895}, + dictWord{133, 10, 772}, + dictWord{135, 0, 2037}, + dictWord{6, 0, 1207}, + dictWord{ + 11, + 11, + 916, + }, + dictWord{142, 11, 419}, + dictWord{14, 11, 140}, + dictWord{148, 11, 41}, + dictWord{6, 11, 331}, + dictWord{136, 11, 623}, + dictWord{9, 0, 944}, + dictWord{ + 9, + 0, + 969, + }, + dictWord{9, 0, 1022}, + dictWord{12, 0, 913}, + dictWord{12, 0, 936}, + dictWord{15, 0, 177}, + dictWord{15, 0, 193}, + dictWord{4, 10, 926}, + dictWord{ + 133, + 10, + 983, + }, + dictWord{5, 0, 354}, + dictWord{135, 11, 506}, + dictWord{8, 0, 598}, + dictWord{9, 0, 664}, + dictWord{138, 0, 441}, + dictWord{4, 11, 640}, + dictWord{ + 133, + 11, + 513, + }, + dictWord{137, 0, 297}, + dictWord{132, 10, 538}, + dictWord{6, 10, 294}, + dictWord{7, 10, 1267}, + dictWord{136, 10, 624}, + dictWord{7, 0, 1772}, + dictWord{ + 7, + 11, + 1888, + }, + dictWord{8, 11, 289}, + dictWord{11, 11, 45}, + dictWord{12, 11, 278}, + dictWord{140, 11, 537}, + dictWord{135, 10, 1325}, + dictWord{138, 0, 751}, + dictWord{141, 0, 37}, + dictWord{134, 0, 1828}, + dictWord{132, 10, 757}, + dictWord{132, 11, 394}, + dictWord{6, 0, 257}, + dictWord{135, 0, 1522}, + dictWord{ + 4, + 0, + 582, + }, + dictWord{9, 0, 191}, + dictWord{135, 11, 1931}, + dictWord{7, 11, 574}, + dictWord{7, 11, 1719}, + dictWord{137, 11, 145}, + dictWord{132, 11, 658}, + dictWord{10, 0, 790}, + dictWord{132, 11, 369}, + dictWord{9, 11, 781}, + dictWord{10, 11, 144}, + dictWord{11, 11, 385}, + dictWord{13, 11, 161}, + dictWord{13, 11, 228}, + dictWord{13, 11, 268}, + dictWord{148, 11, 107}, + dictWord{8, 0, 469}, + dictWord{10, 0, 47}, + dictWord{136, 11, 374}, + dictWord{6, 0, 306}, + dictWord{7, 0, 1140}, + dictWord{7, 0, 1340}, + dictWord{8, 0, 133}, + dictWord{138, 0, 449}, + dictWord{139, 0, 1011}, + dictWord{7, 10, 1875}, + dictWord{139, 10, 124}, + dictWord{ + 4, + 11, + 344, + }, + dictWord{6, 11, 498}, + dictWord{139, 11, 323}, + dictWord{137, 0, 299}, + dictWord{132, 0, 837}, + dictWord{133, 11, 906}, + dictWord{5, 0, 329}, + dictWord{ + 8, + 0, + 260, + }, + dictWord{138, 0, 10}, + dictWord{134, 0, 1320}, + dictWord{4, 0, 657}, + dictWord{146, 0, 158}, + dictWord{135, 0, 1191}, + dictWord{152, 0, 7}, + dictWord{ + 6, + 0, + 1939, + }, + dictWord{8, 0, 974}, + dictWord{138, 0, 996}, + dictWord{135, 0, 1665}, + dictWord{11, 11, 126}, + dictWord{139, 11, 287}, + dictWord{143, 0, 8}, + dictWord{ + 14, + 11, + 149, + }, + dictWord{14, 11, 399}, + dictWord{143, 11, 57}, + dictWord{5, 0, 66}, + dictWord{7, 0, 1896}, + dictWord{136, 0, 288}, + dictWord{7, 0, 175}, + dictWord{ + 10, + 0, + 494, + }, + dictWord{5, 10, 150}, + dictWord{8, 10, 603}, + dictWord{9, 10, 593}, + dictWord{9, 10, 634}, + dictWord{10, 10, 173}, + dictWord{11, 10, 462}, + dictWord{ + 11, + 10, + 515, + }, + dictWord{13, 10, 216}, + dictWord{13, 10, 288}, + dictWord{142, 10, 400}, + dictWord{134, 0, 1643}, + dictWord{136, 11, 21}, + dictWord{4, 0, 21}, + dictWord{ + 5, + 0, + 91, + }, + dictWord{5, 0, 648}, + dictWord{5, 0, 750}, + dictWord{5, 0, 781}, + dictWord{6, 0, 54}, + dictWord{6, 0, 112}, + dictWord{6, 0, 402}, + dictWord{6, 0, 1732}, + dictWord{ + 7, + 0, + 315, + }, + dictWord{7, 0, 749}, + dictWord{7, 0, 1427}, + dictWord{7, 0, 1900}, + dictWord{9, 0, 78}, + dictWord{9, 0, 508}, + dictWord{10, 0, 611}, + dictWord{10, 0, 811}, + dictWord{11, 0, 510}, + dictWord{11, 0, 728}, + dictWord{13, 0, 36}, + dictWord{14, 0, 39}, + dictWord{16, 0, 83}, + dictWord{17, 0, 124}, + dictWord{148, 0, 30}, + dictWord{ + 4, + 0, + 668, + }, + dictWord{136, 0, 570}, + dictWord{10, 0, 322}, + dictWord{10, 0, 719}, + dictWord{139, 0, 407}, + dictWord{135, 11, 1381}, + dictWord{136, 11, 193}, + dictWord{12, 10, 108}, + dictWord{141, 10, 291}, + dictWord{132, 11, 616}, + dictWord{136, 11, 692}, + dictWord{8, 0, 125}, + dictWord{8, 0, 369}, + dictWord{8, 0, 524}, + dictWord{10, 0, 486}, + dictWord{11, 0, 13}, + dictWord{11, 0, 381}, + dictWord{11, 0, 736}, + dictWord{11, 0, 766}, + dictWord{11, 0, 845}, + dictWord{13, 0, 114}, + dictWord{ + 13, + 0, + 292, + }, + dictWord{142, 0, 47}, + dictWord{134, 0, 1247}, + dictWord{6, 0, 1684}, + dictWord{6, 0, 1731}, + dictWord{7, 0, 356}, + dictWord{8, 0, 54}, + dictWord{8, 0, 221}, + dictWord{9, 0, 225}, + dictWord{9, 0, 356}, + dictWord{10, 0, 77}, + dictWord{10, 0, 446}, + dictWord{10, 0, 731}, + dictWord{12, 0, 404}, + dictWord{141, 0, 491}, + dictWord{135, 10, 1777}, + dictWord{4, 11, 305}, + dictWord{4, 10, 493}, + dictWord{144, 10, 55}, + dictWord{4, 0, 951}, + dictWord{6, 0, 1809}, + dictWord{6, 0, 1849}, + dictWord{8, 0, 846}, + dictWord{8, 0, 866}, + dictWord{8, 0, 899}, + dictWord{10, 0, 896}, + dictWord{12, 0, 694}, + dictWord{142, 0, 468}, + dictWord{5, 11, 214}, + dictWord{ + 7, + 11, + 603, + }, + dictWord{8, 11, 611}, + dictWord{9, 11, 686}, + dictWord{10, 11, 88}, + dictWord{11, 11, 459}, + dictWord{11, 11, 496}, + dictWord{12, 11, 463}, + dictWord{ + 12, + 11, + 590, + }, + dictWord{13, 11, 0}, + dictWord{142, 11, 214}, + dictWord{132, 0, 411}, + dictWord{4, 0, 80}, + dictWord{133, 0, 44}, + dictWord{140, 11, 74}, + dictWord{ + 143, + 0, + 31, + }, + dictWord{7, 0, 669}, + dictWord{6, 10, 568}, + dictWord{7, 10, 1804}, + dictWord{8, 10, 362}, + dictWord{8, 10, 410}, + dictWord{8, 10, 830}, + dictWord{9, 10, 514}, + dictWord{11, 10, 649}, + dictWord{142, 10, 157}, + dictWord{7, 0, 673}, + dictWord{134, 11, 1703}, + dictWord{132, 10, 625}, + dictWord{134, 0, 1303}, + dictWord{ + 5, + 0, + 299, + }, + dictWord{135, 0, 1083}, + dictWord{138, 0, 704}, + dictWord{6, 0, 275}, + dictWord{7, 0, 408}, + dictWord{6, 10, 158}, + dictWord{7, 10, 129}, + dictWord{ + 7, + 10, + 181, + }, + dictWord{8, 10, 276}, + dictWord{8, 10, 377}, + dictWord{10, 10, 523}, + dictWord{11, 10, 816}, + dictWord{12, 10, 455}, + dictWord{13, 10, 303}, + dictWord{ + 142, + 10, + 135, + }, + dictWord{4, 0, 219}, + dictWord{7, 0, 367}, + dictWord{7, 0, 1713}, + dictWord{7, 0, 1761}, + dictWord{9, 0, 86}, + dictWord{9, 0, 537}, + dictWord{10, 0, 165}, + dictWord{12, 0, 219}, + dictWord{140, 0, 561}, + dictWord{8, 0, 216}, + dictWord{4, 10, 1}, + dictWord{4, 11, 737}, + dictWord{6, 11, 317}, + dictWord{7, 10, 1143}, + dictWord{ + 7, + 10, + 1463, + }, + dictWord{9, 10, 207}, + dictWord{9, 10, 390}, + dictWord{9, 10, 467}, + dictWord{10, 11, 98}, + dictWord{11, 11, 294}, + dictWord{11, 10, 836}, + dictWord{ + 12, + 11, + 60, + }, + dictWord{12, 11, 437}, + dictWord{13, 11, 64}, + dictWord{13, 11, 380}, + dictWord{142, 11, 430}, + dictWord{6, 11, 1758}, + dictWord{8, 11, 520}, + dictWord{9, 11, 345}, + dictWord{9, 11, 403}, + dictWord{142, 11, 350}, + dictWord{5, 11, 47}, + dictWord{10, 11, 242}, + dictWord{138, 11, 579}, + dictWord{5, 11, 139}, + dictWord{7, 11, 1168}, + dictWord{138, 11, 539}, + dictWord{135, 0, 1319}, + dictWord{4, 10, 295}, + dictWord{4, 10, 723}, + dictWord{5, 10, 895}, + dictWord{ + 7, + 10, + 1031, + }, + dictWord{8, 10, 199}, + dictWord{8, 10, 340}, + dictWord{9, 10, 153}, + dictWord{9, 10, 215}, + dictWord{10, 10, 21}, + dictWord{10, 10, 59}, + dictWord{ + 10, + 10, + 80, + }, + dictWord{10, 10, 224}, + dictWord{10, 10, 838}, + dictWord{11, 10, 229}, + dictWord{11, 10, 652}, + dictWord{12, 10, 192}, + dictWord{13, 10, 146}, + dictWord{ + 142, + 10, + 91, + }, + dictWord{140, 0, 428}, + dictWord{137, 10, 51}, + dictWord{133, 0, 514}, + dictWord{5, 10, 309}, + dictWord{140, 10, 211}, + dictWord{6, 0, 1010}, + dictWord{5, 10, 125}, + dictWord{8, 10, 77}, + dictWord{138, 10, 15}, + dictWord{4, 0, 55}, + dictWord{5, 0, 301}, + dictWord{6, 0, 571}, + dictWord{142, 0, 49}, + dictWord{ + 146, + 0, + 102, + }, + dictWord{136, 11, 370}, + dictWord{4, 11, 107}, + dictWord{7, 11, 613}, + dictWord{8, 11, 358}, + dictWord{8, 11, 439}, + dictWord{8, 11, 504}, + dictWord{ + 9, + 11, + 501, + }, + dictWord{10, 11, 383}, + dictWord{139, 11, 477}, + dictWord{132, 11, 229}, + dictWord{133, 0, 364}, + dictWord{133, 10, 439}, + dictWord{4, 11, 903}, + dictWord{135, 11, 1816}, + dictWord{11, 0, 379}, + dictWord{140, 10, 76}, + dictWord{4, 0, 76}, + dictWord{4, 0, 971}, + dictWord{7, 0, 1550}, + dictWord{9, 0, 306}, + dictWord{ + 9, + 0, + 430, + }, + dictWord{9, 0, 663}, + dictWord{10, 0, 683}, + dictWord{10, 0, 921}, + dictWord{11, 0, 427}, + dictWord{11, 0, 753}, + dictWord{12, 0, 334}, + dictWord{12, 0, 442}, + dictWord{14, 0, 258}, + dictWord{14, 0, 366}, + dictWord{143, 0, 131}, + dictWord{137, 0, 52}, + dictWord{4, 11, 47}, + dictWord{6, 11, 373}, + dictWord{7, 11, 452}, + dictWord{7, 11, 543}, + dictWord{7, 11, 1714}, + dictWord{7, 11, 1856}, + dictWord{9, 11, 6}, + dictWord{11, 11, 257}, + dictWord{139, 11, 391}, + dictWord{4, 10, 8}, + dictWord{ + 7, + 10, + 1152, + }, + dictWord{7, 10, 1153}, + dictWord{7, 10, 1715}, + dictWord{9, 10, 374}, + dictWord{10, 10, 478}, + dictWord{139, 10, 648}, + dictWord{4, 11, 785}, + dictWord{133, 11, 368}, + dictWord{135, 10, 1099}, + dictWord{135, 11, 860}, + dictWord{5, 11, 980}, + dictWord{134, 11, 1754}, + dictWord{134, 0, 1258}, + dictWord{ + 6, + 0, + 1058, + }, + dictWord{6, 0, 1359}, + dictWord{7, 11, 536}, + dictWord{7, 11, 1331}, + dictWord{136, 11, 143}, + dictWord{4, 0, 656}, + dictWord{135, 0, 779}, + dictWord{136, 10, 87}, + dictWord{5, 11, 19}, + dictWord{6, 11, 533}, + dictWord{146, 11, 126}, + dictWord{7, 0, 144}, + dictWord{138, 10, 438}, + dictWord{5, 11, 395}, + dictWord{5, 11, 951}, + dictWord{134, 11, 1776}, + dictWord{135, 0, 1373}, + dictWord{7, 0, 554}, + dictWord{7, 0, 605}, + dictWord{141, 0, 10}, + dictWord{4, 10, 69}, + dictWord{ + 5, + 10, + 122, + }, + dictWord{9, 10, 656}, + dictWord{138, 10, 464}, + dictWord{5, 10, 849}, + dictWord{134, 10, 1633}, + dictWord{5, 0, 838}, + dictWord{5, 0, 841}, + dictWord{134, 0, 1649}, + dictWord{133, 0, 1012}, + dictWord{139, 10, 499}, + dictWord{7, 10, 476}, + dictWord{7, 10, 1592}, + dictWord{138, 10, 87}, + dictWord{ + 6, + 0, + 251, + }, + dictWord{7, 0, 365}, + dictWord{7, 0, 1357}, + dictWord{7, 0, 1497}, + dictWord{8, 0, 154}, + dictWord{141, 0, 281}, + dictWord{132, 11, 441}, + dictWord{ + 132, + 11, + 695, + }, + dictWord{7, 11, 497}, + dictWord{9, 11, 387}, + dictWord{147, 11, 81}, + dictWord{133, 0, 340}, + dictWord{14, 10, 283}, + dictWord{142, 11, 283}, + dictWord{ + 134, + 0, + 810, + }, + dictWord{135, 11, 1894}, + dictWord{139, 0, 495}, + dictWord{5, 11, 284}, + dictWord{6, 11, 49}, + dictWord{6, 11, 350}, + dictWord{7, 11, 1}, + dictWord{ + 7, + 11, + 377, + }, + dictWord{7, 11, 1693}, + dictWord{8, 11, 18}, + dictWord{8, 11, 678}, + dictWord{9, 11, 161}, + dictWord{9, 11, 585}, + dictWord{9, 11, 671}, + dictWord{ + 9, + 11, + 839, + }, + dictWord{11, 11, 912}, + dictWord{141, 11, 427}, + dictWord{5, 10, 859}, + dictWord{7, 10, 1160}, + dictWord{8, 10, 107}, + dictWord{9, 10, 291}, + dictWord{ + 9, + 10, + 439, + }, + dictWord{10, 10, 663}, + dictWord{11, 10, 609}, + dictWord{140, 10, 197}, + dictWord{8, 0, 261}, + dictWord{9, 0, 144}, + dictWord{9, 0, 466}, + dictWord{ + 10, + 0, + 370, + }, + dictWord{12, 0, 470}, + dictWord{13, 0, 144}, + dictWord{142, 0, 348}, + dictWord{137, 0, 897}, + dictWord{6, 0, 248}, + dictWord{9, 0, 546}, + dictWord{10, 0, 535}, + dictWord{11, 0, 681}, + dictWord{141, 0, 135}, + dictWord{4, 0, 358}, + dictWord{135, 0, 1496}, + dictWord{134, 0, 567}, + dictWord{136, 0, 445}, + dictWord{ + 4, + 10, + 117, + }, + dictWord{6, 10, 372}, + dictWord{7, 10, 1905}, + dictWord{142, 10, 323}, + dictWord{4, 10, 722}, + dictWord{139, 10, 471}, + dictWord{6, 0, 697}, + dictWord{ + 134, + 0, + 996, + }, + dictWord{7, 11, 2007}, + dictWord{9, 11, 101}, + dictWord{9, 11, 450}, + dictWord{10, 11, 66}, + dictWord{10, 11, 842}, + dictWord{11, 11, 536}, + dictWord{ + 140, + 11, + 587, + }, + dictWord{132, 0, 577}, + dictWord{134, 0, 1336}, + dictWord{9, 10, 5}, + dictWord{12, 10, 216}, + dictWord{12, 10, 294}, + dictWord{12, 10, 298}, + dictWord{12, 10, 400}, + dictWord{12, 10, 518}, + dictWord{13, 10, 229}, + dictWord{143, 10, 139}, + dictWord{6, 0, 174}, + dictWord{138, 0, 917}, + dictWord{ + 134, + 10, + 1774, + }, + dictWord{5, 10, 12}, + dictWord{7, 10, 375}, + dictWord{9, 10, 88}, + dictWord{9, 10, 438}, + dictWord{11, 11, 62}, + dictWord{139, 10, 270}, + dictWord{ + 134, + 11, + 1766, + }, + dictWord{6, 11, 0}, + dictWord{7, 11, 84}, + dictWord{7, 10, 816}, + dictWord{7, 10, 1241}, + dictWord{9, 10, 283}, + dictWord{9, 10, 520}, + dictWord{10, 10, 213}, + dictWord{10, 10, 307}, + dictWord{10, 10, 463}, + dictWord{10, 10, 671}, + dictWord{10, 10, 746}, + dictWord{11, 10, 401}, + dictWord{11, 10, 794}, + dictWord{ + 11, + 11, + 895, + }, + dictWord{12, 10, 517}, + dictWord{17, 11, 11}, + dictWord{18, 10, 107}, + dictWord{147, 10, 115}, + dictWord{5, 0, 878}, + dictWord{133, 0, 972}, + dictWord{ + 6, + 11, + 1665, + }, + dictWord{7, 11, 256}, + dictWord{7, 11, 1388}, + dictWord{138, 11, 499}, + dictWord{4, 10, 258}, + dictWord{136, 10, 639}, + dictWord{4, 11, 22}, + dictWord{5, 11, 10}, + dictWord{6, 10, 22}, + dictWord{7, 11, 848}, + dictWord{7, 10, 903}, + dictWord{7, 10, 1963}, + dictWord{8, 11, 97}, + dictWord{138, 10, 577}, + dictWord{ + 5, + 10, + 681, + }, + dictWord{136, 10, 782}, + dictWord{133, 11, 481}, + dictWord{132, 0, 351}, + dictWord{4, 10, 664}, + dictWord{5, 10, 804}, + dictWord{139, 10, 1013}, + dictWord{6, 11, 134}, + dictWord{7, 11, 437}, + dictWord{7, 11, 959}, + dictWord{9, 11, 37}, + dictWord{14, 11, 285}, + dictWord{14, 11, 371}, + dictWord{144, 11, 60}, + dictWord{7, 11, 486}, + dictWord{8, 11, 155}, + dictWord{11, 11, 93}, + dictWord{140, 11, 164}, + dictWord{132, 0, 286}, + dictWord{7, 0, 438}, + dictWord{7, 0, 627}, + dictWord{7, 0, 1516}, + dictWord{8, 0, 40}, + dictWord{9, 0, 56}, + dictWord{9, 0, 294}, + dictWord{10, 0, 30}, + dictWord{11, 0, 969}, + dictWord{11, 0, 995}, + dictWord{146, 0, 148}, + dictWord{5, 11, 591}, + dictWord{135, 11, 337}, + dictWord{134, 0, 1950}, + dictWord{133, 10, 32}, + dictWord{138, 11, 500}, + dictWord{5, 11, 380}, + dictWord{ + 5, + 11, + 650, + }, + dictWord{136, 11, 310}, + dictWord{4, 11, 364}, + dictWord{7, 11, 1156}, + dictWord{7, 11, 1187}, + dictWord{137, 11, 409}, + dictWord{4, 0, 738}, + dictWord{134, 11, 482}, + dictWord{4, 11, 781}, + dictWord{6, 11, 487}, + dictWord{7, 11, 926}, + dictWord{8, 11, 263}, + dictWord{139, 11, 500}, + dictWord{135, 11, 418}, + dictWord{6, 0, 2047}, + dictWord{10, 0, 969}, + dictWord{4, 10, 289}, + dictWord{7, 10, 629}, + dictWord{7, 10, 1698}, + dictWord{7, 10, 1711}, + dictWord{ + 140, + 10, + 215, + }, + dictWord{6, 10, 450}, + dictWord{136, 10, 109}, + dictWord{134, 0, 818}, + dictWord{136, 10, 705}, + dictWord{133, 0, 866}, + dictWord{4, 11, 94}, + dictWord{ + 135, + 11, + 1265, + }, + dictWord{132, 11, 417}, + dictWord{134, 0, 1467}, + dictWord{135, 10, 1238}, + dictWord{4, 0, 972}, + dictWord{6, 0, 1851}, + dictWord{ + 134, + 0, + 1857, + }, + dictWord{134, 0, 355}, + dictWord{133, 0, 116}, + dictWord{132, 0, 457}, + dictWord{135, 11, 1411}, + dictWord{4, 11, 408}, + dictWord{4, 11, 741}, + dictWord{135, 11, 500}, + dictWord{134, 10, 26}, + dictWord{142, 11, 137}, + dictWord{5, 0, 527}, + dictWord{6, 0, 189}, + dictWord{7, 0, 859}, + dictWord{136, 0, 267}, + dictWord{11, 0, 104}, + dictWord{11, 0, 554}, + dictWord{15, 0, 60}, + dictWord{143, 0, 125}, + dictWord{134, 0, 1613}, + dictWord{4, 10, 414}, + dictWord{5, 10, 467}, + dictWord{ + 9, + 10, + 654, + }, + dictWord{10, 10, 451}, + dictWord{12, 10, 59}, + dictWord{141, 10, 375}, + dictWord{135, 10, 17}, + dictWord{134, 0, 116}, + dictWord{135, 11, 541}, + dictWord{135, 10, 955}, + dictWord{6, 11, 73}, + dictWord{135, 11, 177}, + dictWord{133, 11, 576}, + dictWord{134, 0, 886}, + dictWord{133, 0, 487}, + dictWord{ + 4, + 0, + 86, + }, + dictWord{5, 0, 667}, + dictWord{5, 0, 753}, + dictWord{6, 0, 316}, + dictWord{6, 0, 455}, + dictWord{135, 0, 946}, + dictWord{142, 11, 231}, + dictWord{150, 0, 45}, + dictWord{134, 0, 863}, + dictWord{134, 0, 1953}, + dictWord{6, 10, 280}, + dictWord{10, 10, 502}, + dictWord{11, 10, 344}, + dictWord{140, 10, 38}, + dictWord{4, 0, 79}, + dictWord{7, 0, 1773}, + dictWord{10, 0, 450}, + dictWord{11, 0, 589}, + dictWord{13, 0, 332}, + dictWord{13, 0, 493}, + dictWord{14, 0, 183}, + dictWord{14, 0, 334}, + dictWord{14, 0, 362}, + dictWord{14, 0, 368}, + dictWord{14, 0, 376}, + dictWord{14, 0, 379}, + dictWord{19, 0, 90}, + dictWord{19, 0, 103}, + dictWord{19, 0, 127}, + dictWord{ + 148, + 0, + 90, + }, + dictWord{5, 10, 45}, + dictWord{7, 10, 1161}, + dictWord{11, 10, 448}, + dictWord{11, 10, 880}, + dictWord{13, 10, 139}, + dictWord{13, 10, 407}, + dictWord{ + 15, + 10, + 16, + }, + dictWord{17, 10, 95}, + dictWord{18, 10, 66}, + dictWord{18, 10, 88}, + dictWord{18, 10, 123}, + dictWord{149, 10, 7}, + dictWord{136, 10, 777}, + dictWord{ + 4, + 10, + 410, + }, + dictWord{135, 10, 521}, + dictWord{135, 10, 1778}, + dictWord{135, 11, 538}, + dictWord{142, 0, 381}, + dictWord{133, 11, 413}, + dictWord{ + 134, + 0, + 1142, + }, + dictWord{6, 0, 1189}, + dictWord{136, 11, 495}, + dictWord{5, 0, 663}, + dictWord{6, 0, 1962}, + dictWord{134, 0, 2003}, + dictWord{7, 11, 54}, + dictWord{ + 8, + 11, + 312, + }, + dictWord{10, 11, 191}, + dictWord{10, 11, 614}, + dictWord{140, 11, 567}, + dictWord{132, 10, 436}, + dictWord{133, 0, 846}, + dictWord{10, 0, 528}, + dictWord{11, 0, 504}, + dictWord{7, 10, 1587}, + dictWord{135, 10, 1707}, + dictWord{5, 0, 378}, + dictWord{8, 0, 465}, + dictWord{9, 0, 286}, + dictWord{10, 0, 185}, + dictWord{ + 10, + 0, + 562, + }, + dictWord{10, 0, 635}, + dictWord{11, 0, 31}, + dictWord{11, 0, 393}, + dictWord{13, 0, 312}, + dictWord{18, 0, 65}, + dictWord{18, 0, 96}, + dictWord{147, 0, 89}, + dictWord{7, 0, 899}, + dictWord{14, 0, 325}, + dictWord{6, 11, 468}, + dictWord{7, 11, 567}, + dictWord{7, 11, 1478}, + dictWord{8, 11, 530}, + dictWord{142, 11, 290}, + dictWord{7, 0, 1880}, + dictWord{9, 0, 680}, + dictWord{139, 0, 798}, + dictWord{134, 0, 1770}, + dictWord{132, 0, 648}, + dictWord{150, 11, 35}, + dictWord{5, 0, 945}, + dictWord{6, 0, 1656}, + dictWord{6, 0, 1787}, + dictWord{7, 0, 167}, + dictWord{8, 0, 824}, + dictWord{9, 0, 391}, + dictWord{10, 0, 375}, + dictWord{139, 0, 185}, + dictWord{ + 6, + 11, + 484, + }, + dictWord{135, 11, 822}, + dictWord{134, 0, 2046}, + dictWord{7, 0, 1645}, + dictWord{8, 0, 352}, + dictWord{137, 0, 249}, + dictWord{132, 0, 152}, + dictWord{6, 0, 611}, + dictWord{135, 0, 1733}, + dictWord{6, 11, 1724}, + dictWord{135, 11, 2022}, + dictWord{133, 0, 1006}, + dictWord{141, 11, 96}, + dictWord{ + 5, + 0, + 420, + }, + dictWord{135, 0, 1449}, + dictWord{146, 11, 149}, + dictWord{135, 0, 832}, + dictWord{135, 10, 663}, + dictWord{133, 0, 351}, + dictWord{5, 0, 40}, + dictWord{ + 7, + 0, + 598, + }, + dictWord{7, 0, 1638}, + dictWord{8, 0, 78}, + dictWord{9, 0, 166}, + dictWord{9, 0, 640}, + dictWord{9, 0, 685}, + dictWord{9, 0, 773}, + dictWord{11, 0, 215}, + dictWord{13, 0, 65}, + dictWord{14, 0, 172}, + dictWord{14, 0, 317}, + dictWord{145, 0, 6}, + dictWord{8, 0, 60}, + dictWord{9, 0, 343}, + dictWord{139, 0, 769}, + dictWord{ + 134, + 0, + 1354, + }, + dictWord{132, 0, 724}, + dictWord{137, 0, 745}, + dictWord{132, 11, 474}, + dictWord{7, 0, 1951}, + dictWord{8, 0, 765}, + dictWord{8, 0, 772}, + dictWord{ + 140, + 0, + 671, + }, + dictWord{7, 0, 108}, + dictWord{8, 0, 219}, + dictWord{8, 0, 388}, + dictWord{9, 0, 775}, + dictWord{11, 0, 275}, + dictWord{140, 0, 464}, + dictWord{137, 0, 639}, + dictWord{135, 10, 503}, + dictWord{133, 11, 366}, + dictWord{5, 0, 15}, + dictWord{6, 0, 56}, + dictWord{7, 0, 1758}, + dictWord{8, 0, 500}, + dictWord{9, 0, 730}, + dictWord{ + 11, + 0, + 331, + }, + dictWord{13, 0, 150}, + dictWord{14, 0, 282}, + dictWord{5, 11, 305}, + dictWord{9, 11, 560}, + dictWord{141, 11, 208}, + dictWord{4, 10, 113}, + dictWord{ + 5, + 10, + 163, + }, + dictWord{5, 10, 735}, + dictWord{7, 10, 1009}, + dictWord{9, 10, 9}, + dictWord{9, 10, 771}, + dictWord{12, 10, 90}, + dictWord{13, 10, 138}, + dictWord{ + 13, + 10, + 410, + }, + dictWord{143, 10, 128}, + dictWord{4, 10, 324}, + dictWord{138, 10, 104}, + dictWord{135, 11, 466}, + dictWord{142, 11, 27}, + dictWord{134, 0, 1886}, + dictWord{5, 0, 205}, + dictWord{6, 0, 438}, + dictWord{9, 0, 711}, + dictWord{4, 11, 480}, + dictWord{6, 11, 167}, + dictWord{6, 11, 302}, + dictWord{6, 11, 1642}, + dictWord{ + 7, + 11, + 130, + }, + dictWord{7, 11, 656}, + dictWord{7, 11, 837}, + dictWord{7, 11, 1547}, + dictWord{7, 11, 1657}, + dictWord{8, 11, 429}, + dictWord{9, 11, 228}, + dictWord{ + 10, + 11, + 643, + }, + dictWord{13, 11, 289}, + dictWord{13, 11, 343}, + dictWord{147, 11, 101}, + dictWord{134, 0, 865}, + dictWord{6, 0, 2025}, + dictWord{136, 0, 965}, + dictWord{ + 7, + 11, + 278, + }, + dictWord{10, 11, 739}, + dictWord{11, 11, 708}, + dictWord{141, 11, 348}, + dictWord{133, 0, 534}, + dictWord{135, 11, 1922}, + dictWord{ + 137, + 0, + 691, + }, + dictWord{4, 10, 935}, + dictWord{133, 10, 823}, + dictWord{6, 0, 443}, + dictWord{9, 0, 237}, + dictWord{9, 0, 571}, + dictWord{9, 0, 695}, + dictWord{10, 0, 139}, + dictWord{11, 0, 715}, + dictWord{12, 0, 417}, + dictWord{141, 0, 421}, + dictWord{5, 10, 269}, + dictWord{7, 10, 434}, + dictWord{7, 10, 891}, + dictWord{8, 10, 339}, + dictWord{ + 9, + 10, + 702, + }, + dictWord{11, 10, 594}, + dictWord{11, 10, 718}, + dictWord{145, 10, 100}, + dictWord{6, 0, 1555}, + dictWord{7, 0, 878}, + dictWord{9, 10, 485}, + dictWord{141, 10, 264}, + dictWord{134, 10, 1713}, + dictWord{7, 10, 1810}, + dictWord{11, 10, 866}, + dictWord{12, 10, 103}, + dictWord{141, 10, 495}, + dictWord{ + 135, + 10, + 900, + }, + dictWord{6, 0, 1410}, + dictWord{9, 11, 316}, + dictWord{139, 11, 256}, + dictWord{4, 0, 995}, + dictWord{135, 0, 1033}, + dictWord{132, 0, 578}, + dictWord{10, 0, 881}, + dictWord{12, 0, 740}, + dictWord{12, 0, 743}, + dictWord{140, 0, 759}, + dictWord{132, 0, 822}, + dictWord{133, 0, 923}, + dictWord{142, 10, 143}, + dictWord{135, 11, 1696}, + dictWord{6, 11, 363}, + dictWord{7, 11, 1955}, + dictWord{136, 11, 725}, + dictWord{132, 0, 924}, + dictWord{133, 0, 665}, + dictWord{ + 135, + 10, + 2029, + }, + dictWord{135, 0, 1901}, + dictWord{4, 0, 265}, + dictWord{6, 0, 1092}, + dictWord{6, 0, 1417}, + dictWord{7, 0, 807}, + dictWord{135, 0, 950}, + dictWord{ + 5, + 0, + 93, + }, + dictWord{12, 0, 267}, + dictWord{141, 0, 498}, + dictWord{135, 0, 1451}, + dictWord{5, 11, 813}, + dictWord{135, 11, 2046}, + dictWord{5, 10, 625}, + dictWord{135, 10, 1617}, + dictWord{135, 0, 747}, + dictWord{6, 0, 788}, + dictWord{137, 0, 828}, + dictWord{7, 0, 184}, + dictWord{11, 0, 307}, + dictWord{11, 0, 400}, + dictWord{15, 0, 130}, + dictWord{5, 11, 712}, + dictWord{7, 11, 1855}, + dictWord{8, 10, 425}, + dictWord{8, 10, 693}, + dictWord{9, 10, 720}, + dictWord{10, 10, 380}, + dictWord{10, 10, 638}, + dictWord{11, 11, 17}, + dictWord{11, 10, 473}, + dictWord{12, 10, 61}, + dictWord{13, 11, 321}, + dictWord{144, 11, 67}, + dictWord{135, 0, 198}, + dictWord{6, 11, 320}, + dictWord{7, 11, 781}, + dictWord{7, 11, 1921}, + dictWord{9, 11, 55}, + dictWord{10, 11, 186}, + dictWord{10, 11, 273}, + dictWord{10, 11, 664}, + dictWord{10, 11, 801}, + dictWord{11, 11, 996}, + dictWord{11, 11, 997}, + dictWord{13, 11, 157}, + dictWord{142, 11, 170}, + dictWord{136, 11, 271}, + dictWord{ + 135, + 0, + 994, + }, + dictWord{7, 11, 103}, + dictWord{7, 11, 863}, + dictWord{11, 11, 184}, + dictWord{14, 11, 299}, + dictWord{145, 11, 62}, + dictWord{11, 10, 551}, + dictWord{142, 10, 159}, + dictWord{5, 0, 233}, + dictWord{5, 0, 320}, + dictWord{6, 0, 140}, + dictWord{8, 0, 295}, + dictWord{8, 0, 615}, + dictWord{136, 11, 615}, + dictWord{ + 133, + 0, + 978, + }, + dictWord{4, 0, 905}, + dictWord{6, 0, 1701}, + dictWord{137, 0, 843}, + dictWord{132, 10, 168}, + dictWord{4, 0, 974}, + dictWord{8, 0, 850}, + dictWord{ + 12, + 0, + 709, + }, + dictWord{12, 0, 768}, + dictWord{140, 0, 786}, + dictWord{135, 10, 91}, + dictWord{152, 0, 6}, + dictWord{138, 10, 532}, + dictWord{135, 10, 1884}, + dictWord{132, 0, 509}, + dictWord{6, 0, 1307}, + dictWord{135, 0, 273}, + dictWord{5, 11, 77}, + dictWord{7, 11, 1455}, + dictWord{10, 11, 843}, + dictWord{19, 11, 73}, + dictWord{150, 11, 5}, + dictWord{132, 11, 458}, + dictWord{135, 11, 1420}, + dictWord{6, 11, 109}, + dictWord{138, 11, 382}, + dictWord{6, 0, 201}, + dictWord{6, 11, 330}, + dictWord{7, 10, 70}, + dictWord{7, 11, 1084}, + dictWord{10, 10, 240}, + dictWord{11, 11, 142}, + dictWord{147, 10, 93}, + dictWord{7, 0, 1041}, + dictWord{ + 140, + 11, + 328, + }, + dictWord{133, 11, 354}, + dictWord{134, 0, 1040}, + dictWord{133, 0, 693}, + dictWord{134, 0, 774}, + dictWord{139, 0, 234}, + dictWord{132, 0, 336}, + dictWord{7, 0, 1399}, + dictWord{139, 10, 392}, + dictWord{20, 0, 22}, + dictWord{148, 11, 22}, + dictWord{5, 0, 802}, + dictWord{7, 0, 2021}, + dictWord{136, 0, 805}, + dictWord{ + 5, + 0, + 167, + }, + dictWord{5, 0, 899}, + dictWord{6, 0, 410}, + dictWord{137, 0, 777}, + dictWord{137, 0, 789}, + dictWord{134, 0, 1705}, + dictWord{7, 10, 655}, + dictWord{ + 135, + 10, + 1844, + }, + dictWord{4, 10, 145}, + dictWord{6, 10, 176}, + dictWord{7, 10, 395}, + dictWord{137, 10, 562}, + dictWord{132, 10, 501}, + dictWord{135, 0, 10}, + dictWord{5, 0, 11}, + dictWord{6, 0, 117}, + dictWord{6, 0, 485}, + dictWord{7, 0, 1133}, + dictWord{9, 0, 582}, + dictWord{9, 0, 594}, + dictWord{10, 0, 82}, + dictWord{11, 0, 21}, + dictWord{11, 0, 818}, + dictWord{12, 0, 535}, + dictWord{13, 0, 86}, + dictWord{20, 0, 91}, + dictWord{23, 0, 13}, + dictWord{134, 10, 509}, + dictWord{4, 0, 264}, + dictWord{ + 7, + 0, + 1067, + }, + dictWord{8, 0, 204}, + dictWord{8, 0, 385}, + dictWord{139, 0, 953}, + dictWord{139, 11, 737}, + dictWord{138, 0, 56}, + dictWord{134, 0, 1917}, + dictWord{ + 133, + 0, + 470, + }, + dictWord{10, 11, 657}, + dictWord{14, 11, 297}, + dictWord{142, 11, 361}, + dictWord{135, 11, 412}, + dictWord{7, 0, 1198}, + dictWord{7, 11, 1198}, + dictWord{8, 11, 556}, + dictWord{14, 11, 123}, + dictWord{14, 11, 192}, + dictWord{143, 11, 27}, + dictWord{7, 11, 1985}, + dictWord{14, 11, 146}, + dictWord{15, 11, 42}, + dictWord{16, 11, 23}, + dictWord{17, 11, 86}, + dictWord{146, 11, 17}, + dictWord{11, 0, 1015}, + dictWord{136, 11, 122}, + dictWord{4, 10, 114}, + dictWord{ + 9, + 10, + 492, + }, + dictWord{13, 10, 462}, + dictWord{142, 10, 215}, + dictWord{4, 10, 77}, + dictWord{5, 10, 361}, + dictWord{6, 10, 139}, + dictWord{6, 10, 401}, + dictWord{ + 6, + 10, + 404, + }, + dictWord{7, 10, 413}, + dictWord{7, 10, 715}, + dictWord{7, 10, 1716}, + dictWord{11, 10, 279}, + dictWord{12, 10, 179}, + dictWord{12, 10, 258}, + dictWord{ + 13, + 10, + 244, + }, + dictWord{142, 10, 358}, + dictWord{134, 10, 1717}, + dictWord{7, 10, 1061}, + dictWord{8, 10, 82}, + dictWord{11, 10, 250}, + dictWord{12, 10, 420}, + dictWord{141, 10, 184}, + dictWord{133, 0, 715}, + dictWord{135, 10, 724}, + dictWord{9, 0, 919}, + dictWord{9, 0, 922}, + dictWord{9, 0, 927}, + dictWord{9, 0, 933}, + dictWord{9, 0, 962}, + dictWord{9, 0, 1000}, + dictWord{9, 0, 1002}, + dictWord{9, 0, 1021}, + dictWord{12, 0, 890}, + dictWord{12, 0, 907}, + dictWord{12, 0, 930}, + dictWord{ + 15, + 0, + 207, + }, + dictWord{15, 0, 228}, + dictWord{15, 0, 238}, + dictWord{149, 0, 61}, + dictWord{8, 0, 794}, + dictWord{9, 0, 400}, + dictWord{10, 0, 298}, + dictWord{142, 0, 228}, + dictWord{5, 11, 430}, + dictWord{5, 11, 932}, + dictWord{6, 11, 131}, + dictWord{7, 11, 417}, + dictWord{9, 11, 522}, + dictWord{11, 11, 314}, + dictWord{141, 11, 390}, + dictWord{132, 0, 867}, + dictWord{8, 0, 724}, + dictWord{132, 11, 507}, + dictWord{137, 11, 261}, + dictWord{4, 11, 343}, + dictWord{133, 11, 511}, + dictWord{ + 6, + 0, + 190, + }, + dictWord{7, 0, 768}, + dictWord{135, 0, 1170}, + dictWord{6, 10, 513}, + dictWord{135, 10, 1052}, + dictWord{7, 11, 455}, + dictWord{138, 11, 591}, + dictWord{134, 0, 1066}, + dictWord{137, 10, 899}, + dictWord{14, 0, 67}, + dictWord{147, 0, 60}, + dictWord{4, 0, 948}, + dictWord{18, 0, 174}, + dictWord{146, 0, 176}, + dictWord{135, 0, 1023}, + dictWord{7, 10, 1417}, + dictWord{12, 10, 382}, + dictWord{17, 10, 48}, + dictWord{152, 10, 12}, + dictWord{134, 11, 575}, + dictWord{ + 132, + 0, + 764, + }, + dictWord{6, 10, 545}, + dictWord{7, 10, 565}, + dictWord{7, 10, 1669}, + dictWord{10, 10, 114}, + dictWord{11, 10, 642}, + dictWord{140, 10, 618}, + dictWord{ + 6, + 0, + 137, + }, + dictWord{9, 0, 75}, + dictWord{9, 0, 253}, + dictWord{10, 0, 194}, + dictWord{138, 0, 444}, + dictWord{4, 0, 756}, + dictWord{133, 10, 5}, + dictWord{8, 0, 1008}, + dictWord{135, 10, 192}, + dictWord{132, 0, 842}, + dictWord{11, 0, 643}, + dictWord{12, 0, 115}, + dictWord{136, 10, 763}, + dictWord{139, 0, 67}, + dictWord{ + 133, + 10, + 759, + }, + dictWord{4, 0, 821}, + dictWord{5, 0, 760}, + dictWord{7, 0, 542}, + dictWord{8, 0, 135}, + dictWord{8, 0, 496}, + dictWord{135, 11, 580}, + dictWord{7, 10, 370}, + dictWord{7, 10, 1007}, + dictWord{7, 10, 1177}, + dictWord{135, 10, 1565}, + dictWord{135, 10, 1237}, + dictWord{140, 0, 736}, + dictWord{7, 0, 319}, + dictWord{ + 7, + 0, + 355, + }, + dictWord{7, 0, 763}, + dictWord{10, 0, 389}, + dictWord{145, 0, 43}, + dictWord{8, 11, 333}, + dictWord{138, 11, 182}, + dictWord{4, 10, 87}, + dictWord{5, 10, 250}, + dictWord{141, 10, 298}, + dictWord{138, 0, 786}, + dictWord{134, 0, 2044}, + dictWord{8, 11, 330}, + dictWord{140, 11, 477}, + dictWord{135, 11, 1338}, + dictWord{132, 11, 125}, + dictWord{134, 0, 1030}, + dictWord{134, 0, 1083}, + dictWord{132, 11, 721}, + dictWord{135, 10, 814}, + dictWord{7, 11, 776}, + dictWord{ + 8, + 11, + 145, + }, + dictWord{147, 11, 56}, + dictWord{134, 0, 1226}, + dictWord{4, 10, 57}, + dictWord{7, 10, 1195}, + dictWord{7, 10, 1438}, + dictWord{7, 10, 1548}, + dictWord{ + 7, + 10, + 1835, + }, + dictWord{7, 10, 1904}, + dictWord{9, 10, 757}, + dictWord{10, 10, 604}, + dictWord{139, 10, 519}, + dictWord{7, 11, 792}, + dictWord{8, 11, 147}, + dictWord{10, 11, 821}, + dictWord{139, 11, 1021}, + dictWord{137, 11, 797}, + dictWord{4, 0, 58}, + dictWord{5, 0, 286}, + dictWord{6, 0, 319}, + dictWord{7, 0, 402}, + dictWord{ + 7, + 0, + 1254, + }, + dictWord{7, 0, 1903}, + dictWord{8, 0, 356}, + dictWord{140, 0, 408}, + dictWord{4, 0, 389}, + dictWord{4, 0, 815}, + dictWord{9, 0, 181}, + dictWord{9, 0, 255}, + dictWord{10, 0, 8}, + dictWord{10, 0, 29}, + dictWord{10, 0, 816}, + dictWord{11, 0, 311}, + dictWord{11, 0, 561}, + dictWord{12, 0, 67}, + dictWord{141, 0, 181}, + dictWord{ + 7, + 11, + 1472, + }, + dictWord{135, 11, 1554}, + dictWord{7, 11, 1071}, + dictWord{7, 11, 1541}, + dictWord{7, 11, 1767}, + dictWord{7, 11, 1806}, + dictWord{7, 11, 1999}, + dictWord{9, 11, 248}, + dictWord{10, 11, 400}, + dictWord{11, 11, 162}, + dictWord{11, 11, 178}, + dictWord{11, 11, 242}, + dictWord{12, 11, 605}, + dictWord{ + 15, + 11, + 26, + }, + dictWord{144, 11, 44}, + dictWord{5, 11, 168}, + dictWord{5, 11, 930}, + dictWord{8, 11, 74}, + dictWord{9, 11, 623}, + dictWord{12, 11, 500}, + dictWord{ + 12, + 11, + 579, + }, + dictWord{13, 11, 41}, + dictWord{143, 11, 93}, + dictWord{6, 11, 220}, + dictWord{7, 11, 1101}, + dictWord{141, 11, 105}, + dictWord{5, 0, 474}, + dictWord{ + 7, + 0, + 507, + }, + dictWord{4, 10, 209}, + dictWord{7, 11, 507}, + dictWord{135, 10, 902}, + dictWord{132, 0, 427}, + dictWord{6, 0, 413}, + dictWord{7, 10, 335}, + dictWord{ + 7, + 10, + 1437, + }, + dictWord{7, 10, 1668}, + dictWord{8, 10, 553}, + dictWord{8, 10, 652}, + dictWord{8, 10, 656}, + dictWord{9, 10, 558}, + dictWord{11, 10, 743}, + dictWord{ + 149, + 10, + 18, + }, + dictWord{132, 0, 730}, + dictWord{6, 11, 19}, + dictWord{7, 11, 1413}, + dictWord{139, 11, 428}, + dictWord{133, 0, 373}, + dictWord{132, 10, 559}, + dictWord{7, 11, 96}, + dictWord{8, 11, 401}, + dictWord{137, 11, 896}, + dictWord{7, 0, 799}, + dictWord{7, 0, 1972}, + dictWord{5, 10, 1017}, + dictWord{138, 10, 511}, + dictWord{135, 0, 1793}, + dictWord{7, 11, 1961}, + dictWord{7, 11, 1965}, + dictWord{8, 11, 702}, + dictWord{136, 11, 750}, + dictWord{8, 11, 150}, + dictWord{8, 11, 737}, + dictWord{140, 11, 366}, + dictWord{132, 0, 322}, + dictWord{133, 10, 709}, + dictWord{8, 11, 800}, + dictWord{9, 11, 148}, + dictWord{9, 11, 872}, + dictWord{ + 9, + 11, + 890, + }, + dictWord{11, 11, 309}, + dictWord{11, 11, 1001}, + dictWord{13, 11, 267}, + dictWord{141, 11, 323}, + dictWord{134, 10, 1745}, + dictWord{7, 0, 290}, + dictWord{136, 10, 206}, + dictWord{7, 0, 1651}, + dictWord{145, 0, 89}, + dictWord{139, 0, 2}, + dictWord{132, 0, 672}, + dictWord{6, 0, 1860}, + dictWord{8, 0, 905}, + dictWord{ + 10, + 0, + 844, + }, + dictWord{10, 0, 846}, + dictWord{10, 0, 858}, + dictWord{12, 0, 699}, + dictWord{12, 0, 746}, + dictWord{140, 0, 772}, + dictWord{135, 11, 424}, + dictWord{133, 11, 547}, + dictWord{133, 0, 737}, + dictWord{5, 11, 490}, + dictWord{6, 11, 615}, + dictWord{6, 11, 620}, + dictWord{135, 11, 683}, + dictWord{6, 0, 746}, + dictWord{134, 0, 1612}, + dictWord{132, 10, 776}, + dictWord{9, 11, 385}, + dictWord{149, 11, 17}, + dictWord{133, 0, 145}, + dictWord{135, 10, 1272}, + dictWord{ + 7, + 0, + 884, + }, + dictWord{140, 0, 124}, + dictWord{4, 0, 387}, + dictWord{135, 0, 1288}, + dictWord{5, 11, 133}, + dictWord{136, 10, 406}, + dictWord{136, 11, 187}, + dictWord{ + 6, + 0, + 679, + }, + dictWord{8, 11, 8}, + dictWord{138, 11, 0}, + dictWord{135, 0, 550}, + dictWord{135, 11, 798}, + dictWord{136, 11, 685}, + dictWord{7, 11, 1086}, + dictWord{145, 11, 46}, + dictWord{8, 10, 175}, + dictWord{10, 10, 168}, + dictWord{138, 10, 573}, + dictWord{135, 0, 1305}, + dictWord{4, 0, 576}, + dictWord{ + 135, + 0, + 1263, + }, + dictWord{6, 0, 686}, + dictWord{134, 0, 1563}, + dictWord{134, 0, 607}, + dictWord{5, 0, 919}, + dictWord{134, 0, 1673}, + dictWord{148, 0, 37}, + dictWord{ + 8, + 11, + 774, + }, + dictWord{10, 11, 670}, + dictWord{140, 11, 51}, + dictWord{133, 10, 784}, + dictWord{139, 10, 882}, + dictWord{4, 0, 82}, + dictWord{5, 0, 333}, + dictWord{ + 5, + 0, + 904, + }, + dictWord{6, 0, 207}, + dictWord{7, 0, 325}, + dictWord{7, 0, 1726}, + dictWord{8, 0, 101}, + dictWord{10, 0, 778}, + dictWord{139, 0, 220}, + dictWord{135, 11, 371}, + dictWord{132, 0, 958}, + dictWord{133, 0, 903}, + dictWord{4, 11, 127}, + dictWord{5, 11, 350}, + dictWord{6, 11, 356}, + dictWord{8, 11, 426}, + dictWord{9, 11, 572}, + dictWord{10, 11, 247}, + dictWord{139, 11, 312}, + dictWord{140, 0, 147}, + dictWord{6, 11, 59}, + dictWord{7, 11, 885}, + dictWord{9, 11, 603}, + dictWord{ + 141, + 11, + 397, + }, + dictWord{10, 0, 367}, + dictWord{9, 10, 14}, + dictWord{9, 10, 441}, + dictWord{139, 10, 9}, + dictWord{11, 10, 966}, + dictWord{12, 10, 287}, + dictWord{ + 13, + 10, + 342, + }, + dictWord{13, 10, 402}, + dictWord{15, 10, 110}, + dictWord{143, 10, 163}, + dictWord{134, 0, 690}, + dictWord{132, 0, 705}, + dictWord{9, 0, 651}, + dictWord{ + 11, + 0, + 971, + }, + dictWord{13, 0, 273}, + dictWord{7, 10, 1428}, + dictWord{7, 10, 1640}, + dictWord{7, 10, 1867}, + dictWord{9, 10, 169}, + dictWord{9, 10, 182}, + dictWord{ + 9, + 10, + 367, + }, + dictWord{9, 10, 478}, + dictWord{9, 10, 506}, + dictWord{9, 10, 551}, + dictWord{9, 10, 557}, + dictWord{9, 10, 648}, + dictWord{9, 10, 697}, + dictWord{ + 9, + 10, + 705, + }, + dictWord{9, 10, 725}, + dictWord{9, 10, 787}, + dictWord{9, 10, 794}, + dictWord{10, 10, 198}, + dictWord{10, 10, 214}, + dictWord{10, 10, 267}, + dictWord{ + 10, + 10, + 275, + }, + dictWord{10, 10, 456}, + dictWord{10, 10, 551}, + dictWord{10, 10, 561}, + dictWord{10, 10, 613}, + dictWord{10, 10, 627}, + dictWord{10, 10, 668}, + dictWord{10, 10, 675}, + dictWord{10, 10, 691}, + dictWord{10, 10, 695}, + dictWord{10, 10, 707}, + dictWord{10, 10, 715}, + dictWord{11, 10, 183}, + dictWord{ + 11, + 10, + 201, + }, + dictWord{11, 10, 262}, + dictWord{11, 10, 352}, + dictWord{11, 10, 439}, + dictWord{11, 10, 493}, + dictWord{11, 10, 572}, + dictWord{11, 10, 591}, + dictWord{ + 11, + 10, + 608, + }, + dictWord{11, 10, 611}, + dictWord{11, 10, 646}, + dictWord{11, 10, 674}, + dictWord{11, 10, 711}, + dictWord{11, 10, 751}, + dictWord{11, 10, 761}, + dictWord{11, 10, 776}, + dictWord{11, 10, 785}, + dictWord{11, 10, 850}, + dictWord{11, 10, 853}, + dictWord{11, 10, 862}, + dictWord{11, 10, 865}, + dictWord{ + 11, + 10, + 868, + }, + dictWord{11, 10, 875}, + dictWord{11, 10, 898}, + dictWord{11, 10, 902}, + dictWord{11, 10, 903}, + dictWord{11, 10, 910}, + dictWord{11, 10, 932}, + dictWord{ + 11, + 10, + 942, + }, + dictWord{11, 10, 957}, + dictWord{11, 10, 967}, + dictWord{11, 10, 972}, + dictWord{12, 10, 148}, + dictWord{12, 10, 195}, + dictWord{12, 10, 220}, + dictWord{12, 10, 237}, + dictWord{12, 10, 318}, + dictWord{12, 10, 339}, + dictWord{12, 10, 393}, + dictWord{12, 10, 445}, + dictWord{12, 10, 450}, + dictWord{ + 12, + 10, + 474, + }, + dictWord{12, 10, 505}, + dictWord{12, 10, 509}, + dictWord{12, 10, 533}, + dictWord{12, 10, 591}, + dictWord{12, 10, 594}, + dictWord{12, 10, 597}, + dictWord{ + 12, + 10, + 621, + }, + dictWord{12, 10, 633}, + dictWord{12, 10, 642}, + dictWord{13, 10, 59}, + dictWord{13, 10, 60}, + dictWord{13, 10, 145}, + dictWord{13, 10, 239}, + dictWord{13, 10, 250}, + dictWord{13, 10, 329}, + dictWord{13, 10, 344}, + dictWord{13, 10, 365}, + dictWord{13, 10, 372}, + dictWord{13, 10, 387}, + dictWord{ + 13, + 10, + 403, + }, + dictWord{13, 10, 414}, + dictWord{13, 10, 456}, + dictWord{13, 10, 470}, + dictWord{13, 10, 478}, + dictWord{13, 10, 483}, + dictWord{13, 10, 489}, + dictWord{ + 14, + 10, + 55, + }, + dictWord{14, 10, 57}, + dictWord{14, 10, 81}, + dictWord{14, 10, 90}, + dictWord{14, 10, 148}, + dictWord{14, 10, 239}, + dictWord{14, 10, 266}, + dictWord{ + 14, + 10, + 321, + }, + dictWord{14, 10, 326}, + dictWord{14, 10, 327}, + dictWord{14, 10, 330}, + dictWord{14, 10, 347}, + dictWord{14, 10, 355}, + dictWord{14, 10, 401}, + dictWord{14, 10, 404}, + dictWord{14, 10, 411}, + dictWord{14, 10, 414}, + dictWord{14, 10, 416}, + dictWord{14, 10, 420}, + dictWord{15, 10, 61}, + dictWord{ + 15, + 10, + 74, + }, + dictWord{15, 10, 87}, + dictWord{15, 10, 88}, + dictWord{15, 10, 94}, + dictWord{15, 10, 96}, + dictWord{15, 10, 116}, + dictWord{15, 10, 149}, + dictWord{ + 15, + 10, + 154, + }, + dictWord{16, 10, 50}, + dictWord{16, 10, 63}, + dictWord{16, 10, 73}, + dictWord{17, 10, 2}, + dictWord{17, 10, 66}, + dictWord{17, 10, 92}, + dictWord{17, 10, 103}, + dictWord{17, 10, 112}, + dictWord{17, 10, 120}, + dictWord{18, 10, 50}, + dictWord{18, 10, 54}, + dictWord{18, 10, 82}, + dictWord{18, 10, 86}, + dictWord{18, 10, 90}, + dictWord{18, 10, 111}, + dictWord{18, 10, 115}, + dictWord{18, 10, 156}, + dictWord{19, 10, 40}, + dictWord{19, 10, 79}, + dictWord{20, 10, 78}, + dictWord{149, 10, 22}, + dictWord{7, 0, 887}, + dictWord{5, 10, 161}, + dictWord{135, 10, 839}, + dictWord{142, 11, 98}, + dictWord{134, 0, 90}, + dictWord{138, 11, 356}, + dictWord{ + 135, + 11, + 441, + }, + dictWord{6, 11, 111}, + dictWord{7, 11, 4}, + dictWord{8, 11, 163}, + dictWord{8, 11, 776}, + dictWord{138, 11, 566}, + dictWord{134, 0, 908}, + dictWord{ + 134, + 0, + 1261, + }, + dictWord{7, 0, 813}, + dictWord{12, 0, 497}, + dictWord{141, 0, 56}, + dictWord{134, 0, 1235}, + dictWord{135, 0, 429}, + dictWord{135, 11, 1994}, + dictWord{138, 0, 904}, + dictWord{6, 0, 125}, + dictWord{7, 0, 1277}, + dictWord{137, 0, 772}, + dictWord{151, 0, 12}, + dictWord{4, 0, 841}, + dictWord{5, 0, 386}, + dictWord{ + 133, + 11, + 386, + }, + dictWord{5, 11, 297}, + dictWord{135, 11, 1038}, + dictWord{6, 0, 860}, + dictWord{6, 0, 1069}, + dictWord{135, 11, 309}, + dictWord{136, 0, 946}, + dictWord{135, 10, 1814}, + dictWord{141, 11, 418}, + dictWord{136, 11, 363}, + dictWord{10, 0, 768}, + dictWord{139, 0, 787}, + dictWord{22, 11, 30}, + dictWord{ + 150, + 11, + 33, + }, + dictWord{6, 0, 160}, + dictWord{7, 0, 1106}, + dictWord{9, 0, 770}, + dictWord{11, 0, 112}, + dictWord{140, 0, 413}, + dictWord{11, 11, 216}, + dictWord{ + 139, + 11, + 340, + }, + dictWord{136, 10, 139}, + dictWord{135, 11, 1390}, + dictWord{135, 11, 808}, + dictWord{132, 11, 280}, + dictWord{12, 0, 271}, + dictWord{17, 0, 109}, + dictWord{7, 10, 643}, + dictWord{136, 10, 236}, + dictWord{140, 11, 54}, + dictWord{4, 11, 421}, + dictWord{133, 11, 548}, + dictWord{11, 0, 719}, + dictWord{12, 0, 36}, + dictWord{141, 0, 337}, + dictWord{7, 0, 581}, + dictWord{9, 0, 644}, + dictWord{137, 0, 699}, + dictWord{11, 11, 511}, + dictWord{13, 11, 394}, + dictWord{14, 11, 298}, + dictWord{14, 11, 318}, + dictWord{146, 11, 103}, + dictWord{7, 0, 304}, + dictWord{9, 0, 646}, + dictWord{9, 0, 862}, + dictWord{11, 0, 696}, + dictWord{12, 0, 208}, + dictWord{15, 0, 79}, + dictWord{147, 0, 108}, + dictWord{4, 0, 631}, + dictWord{7, 0, 1126}, + dictWord{135, 0, 1536}, + dictWord{135, 11, 1527}, + dictWord{8, 0, 880}, + dictWord{10, 0, 869}, + dictWord{138, 0, 913}, + dictWord{7, 0, 1513}, + dictWord{5, 10, 54}, + dictWord{6, 11, 254}, + dictWord{9, 11, 109}, + dictWord{138, 11, 103}, + dictWord{135, 0, 981}, + dictWord{133, 11, 729}, + dictWord{132, 10, 744}, + dictWord{132, 0, 434}, + dictWord{134, 0, 550}, + dictWord{7, 0, 930}, + dictWord{10, 0, 476}, + dictWord{13, 0, 452}, + dictWord{19, 0, 104}, + dictWord{6, 11, 1630}, + dictWord{10, 10, 402}, + dictWord{146, 10, 55}, + dictWord{5, 0, 553}, + dictWord{138, 0, 824}, + dictWord{136, 0, 452}, + dictWord{8, 0, 151}, + dictWord{137, 10, 624}, + dictWord{132, 10, 572}, + dictWord{132, 0, 772}, + dictWord{133, 11, 671}, + dictWord{ + 133, + 0, + 292, + }, + dictWord{138, 0, 135}, + dictWord{132, 11, 889}, + dictWord{140, 11, 207}, + dictWord{9, 0, 504}, + dictWord{6, 10, 43}, + dictWord{7, 10, 38}, + dictWord{ + 8, + 10, + 248, + }, + dictWord{138, 10, 513}, + dictWord{6, 0, 1089}, + dictWord{135, 11, 1910}, + dictWord{4, 11, 627}, + dictWord{133, 11, 775}, + dictWord{135, 0, 783}, + dictWord{133, 10, 766}, + dictWord{133, 10, 363}, + dictWord{7, 0, 387}, + dictWord{135, 11, 387}, + dictWord{7, 0, 393}, + dictWord{10, 0, 603}, + dictWord{11, 0, 206}, + dictWord{7, 11, 202}, + dictWord{11, 11, 362}, + dictWord{11, 11, 948}, + dictWord{140, 11, 388}, + dictWord{6, 11, 507}, + dictWord{7, 11, 451}, + dictWord{8, 11, 389}, + dictWord{12, 11, 490}, + dictWord{13, 11, 16}, + dictWord{13, 11, 215}, + dictWord{13, 11, 351}, + dictWord{18, 11, 132}, + dictWord{147, 11, 125}, + dictWord{ + 4, + 0, + 912, + }, + dictWord{9, 0, 232}, + dictWord{135, 11, 841}, + dictWord{6, 10, 258}, + dictWord{140, 10, 409}, + dictWord{5, 10, 249}, + dictWord{148, 10, 82}, + dictWord{ + 136, + 11, + 566, + }, + dictWord{6, 0, 977}, + dictWord{135, 11, 1214}, + dictWord{7, 0, 1973}, + dictWord{136, 0, 716}, + dictWord{135, 0, 98}, + dictWord{133, 0, 733}, + dictWord{ + 5, + 11, + 912, + }, + dictWord{134, 11, 1695}, + dictWord{5, 10, 393}, + dictWord{6, 10, 378}, + dictWord{7, 10, 1981}, + dictWord{9, 10, 32}, + dictWord{9, 10, 591}, + dictWord{10, 10, 685}, + dictWord{10, 10, 741}, + dictWord{142, 10, 382}, + dictWord{133, 10, 788}, + dictWord{10, 0, 19}, + dictWord{11, 0, 911}, + dictWord{7, 10, 1968}, + dictWord{141, 10, 509}, + dictWord{5, 0, 668}, + dictWord{5, 11, 236}, + dictWord{6, 11, 572}, + dictWord{8, 11, 492}, + dictWord{11, 11, 618}, + dictWord{144, 11, 56}, + dictWord{135, 11, 1789}, + dictWord{4, 0, 360}, + dictWord{5, 0, 635}, + dictWord{5, 0, 700}, + dictWord{5, 10, 58}, + dictWord{5, 10, 171}, + dictWord{5, 10, 683}, + dictWord{ + 6, + 10, + 291, + }, + dictWord{6, 10, 566}, + dictWord{7, 10, 1650}, + dictWord{11, 10, 523}, + dictWord{12, 10, 273}, + dictWord{12, 10, 303}, + dictWord{15, 10, 39}, + dictWord{143, 10, 111}, + dictWord{133, 0, 901}, + dictWord{134, 10, 589}, + dictWord{5, 11, 190}, + dictWord{136, 11, 318}, + dictWord{140, 0, 656}, + dictWord{ + 7, + 0, + 726, + }, + dictWord{152, 0, 9}, + dictWord{4, 10, 917}, + dictWord{133, 10, 1005}, + dictWord{135, 10, 1598}, + dictWord{134, 11, 491}, + dictWord{4, 10, 919}, + dictWord{133, 11, 434}, + dictWord{137, 0, 72}, + dictWord{6, 0, 1269}, + dictWord{6, 0, 1566}, + dictWord{134, 0, 1621}, + dictWord{9, 0, 463}, + dictWord{10, 0, 595}, + dictWord{4, 10, 255}, + dictWord{5, 10, 302}, + dictWord{6, 10, 132}, + dictWord{7, 10, 128}, + dictWord{7, 10, 283}, + dictWord{7, 10, 1299}, + dictWord{10, 10, 52}, + dictWord{ + 10, + 10, + 514, + }, + dictWord{11, 10, 925}, + dictWord{13, 10, 92}, + dictWord{142, 10, 309}, + dictWord{135, 0, 1454}, + dictWord{134, 0, 1287}, + dictWord{11, 0, 600}, + dictWord{13, 0, 245}, + dictWord{137, 10, 173}, + dictWord{136, 0, 989}, + dictWord{7, 0, 164}, + dictWord{7, 0, 1571}, + dictWord{9, 0, 107}, + dictWord{140, 0, 225}, + dictWord{6, 0, 1061}, + dictWord{141, 10, 442}, + dictWord{4, 0, 27}, + dictWord{5, 0, 484}, + dictWord{5, 0, 510}, + dictWord{6, 0, 434}, + dictWord{7, 0, 1000}, + dictWord{ + 7, + 0, + 1098, + }, + dictWord{136, 0, 2}, + dictWord{7, 11, 85}, + dictWord{7, 11, 247}, + dictWord{8, 11, 585}, + dictWord{10, 11, 163}, + dictWord{138, 11, 316}, + dictWord{ + 11, + 11, + 103, + }, + dictWord{142, 11, 0}, + dictWord{134, 0, 1127}, + dictWord{4, 0, 460}, + dictWord{134, 0, 852}, + dictWord{134, 10, 210}, + dictWord{4, 0, 932}, + dictWord{ + 133, + 0, + 891, + }, + dictWord{6, 0, 588}, + dictWord{147, 11, 83}, + dictWord{8, 0, 625}, + dictWord{4, 10, 284}, + dictWord{134, 10, 223}, + dictWord{134, 0, 76}, + dictWord{8, 0, 92}, + dictWord{137, 0, 221}, + dictWord{4, 11, 124}, + dictWord{10, 11, 457}, + dictWord{11, 11, 121}, + dictWord{11, 11, 169}, + dictWord{11, 11, 422}, + dictWord{ + 11, + 11, + 870, + }, + dictWord{12, 11, 214}, + dictWord{13, 11, 389}, + dictWord{14, 11, 187}, + dictWord{143, 11, 77}, + dictWord{9, 11, 618}, + dictWord{138, 11, 482}, + dictWord{ + 4, + 10, + 218, + }, + dictWord{7, 10, 526}, + dictWord{143, 10, 137}, + dictWord{13, 0, 9}, + dictWord{14, 0, 104}, + dictWord{14, 0, 311}, + dictWord{4, 10, 270}, + dictWord{ + 5, + 10, + 192, + }, + dictWord{6, 10, 332}, + dictWord{135, 10, 1322}, + dictWord{140, 10, 661}, + dictWord{135, 11, 1193}, + dictWord{6, 11, 107}, + dictWord{7, 11, 638}, + dictWord{7, 11, 1632}, + dictWord{137, 11, 396}, + dictWord{132, 0, 763}, + dictWord{4, 0, 622}, + dictWord{5, 11, 370}, + dictWord{134, 11, 1756}, + dictWord{ + 133, + 0, + 253, + }, + dictWord{135, 0, 546}, + dictWord{9, 0, 73}, + dictWord{10, 0, 110}, + dictWord{14, 0, 185}, + dictWord{17, 0, 119}, + dictWord{133, 11, 204}, + dictWord{7, 0, 624}, + dictWord{7, 0, 916}, + dictWord{10, 0, 256}, + dictWord{139, 0, 87}, + dictWord{7, 10, 379}, + dictWord{8, 10, 481}, + dictWord{137, 10, 377}, + dictWord{5, 0, 212}, + dictWord{12, 0, 35}, + dictWord{13, 0, 382}, + dictWord{5, 11, 970}, + dictWord{134, 11, 1706}, + dictWord{9, 0, 746}, + dictWord{5, 10, 1003}, + dictWord{134, 10, 149}, + dictWord{10, 0, 150}, + dictWord{11, 0, 849}, + dictWord{13, 0, 330}, + dictWord{8, 10, 262}, + dictWord{9, 10, 627}, + dictWord{11, 10, 214}, + dictWord{11, 10, 404}, + dictWord{11, 10, 457}, + dictWord{11, 10, 780}, + dictWord{11, 10, 913}, + dictWord{13, 10, 401}, + dictWord{142, 10, 200}, + dictWord{134, 0, 1466}, + dictWord{ + 135, + 11, + 3, + }, + dictWord{6, 0, 1299}, + dictWord{4, 11, 35}, + dictWord{5, 11, 121}, + dictWord{5, 11, 483}, + dictWord{5, 11, 685}, + dictWord{6, 11, 489}, + dictWord{7, 11, 1204}, + dictWord{136, 11, 394}, + dictWord{135, 10, 742}, + dictWord{4, 10, 142}, + dictWord{136, 10, 304}, + dictWord{4, 11, 921}, + dictWord{133, 11, 1007}, + dictWord{ + 134, + 0, + 1518, + }, + dictWord{6, 0, 1229}, + dictWord{135, 0, 1175}, + dictWord{133, 0, 816}, + dictWord{12, 0, 159}, + dictWord{4, 10, 471}, + dictWord{4, 11, 712}, + dictWord{ + 5, + 10, + 51, + }, + dictWord{6, 10, 602}, + dictWord{7, 10, 925}, + dictWord{8, 10, 484}, + dictWord{138, 10, 195}, + dictWord{134, 11, 1629}, + dictWord{5, 0, 869}, + dictWord{ + 5, + 0, + 968, + }, + dictWord{6, 0, 1626}, + dictWord{8, 0, 734}, + dictWord{136, 0, 784}, + dictWord{4, 0, 542}, + dictWord{6, 0, 1716}, + dictWord{6, 0, 1727}, + dictWord{ + 7, + 0, + 1082, + }, + dictWord{7, 0, 1545}, + dictWord{8, 0, 56}, + dictWord{8, 0, 118}, + dictWord{8, 0, 412}, + dictWord{8, 0, 564}, + dictWord{9, 0, 888}, + dictWord{9, 0, 908}, + dictWord{ + 10, + 0, + 50, + }, + dictWord{10, 0, 423}, + dictWord{11, 0, 685}, + dictWord{11, 0, 697}, + dictWord{11, 0, 933}, + dictWord{12, 0, 299}, + dictWord{13, 0, 126}, + dictWord{ + 13, + 0, + 136, + }, + dictWord{13, 0, 170}, + dictWord{13, 0, 190}, + dictWord{136, 10, 688}, + dictWord{132, 10, 697}, + dictWord{4, 0, 232}, + dictWord{9, 0, 202}, + dictWord{ + 10, + 0, + 474, + }, + dictWord{140, 0, 433}, + dictWord{136, 0, 212}, + dictWord{6, 0, 108}, + dictWord{7, 0, 1003}, + dictWord{7, 0, 1181}, + dictWord{8, 0, 111}, + dictWord{ + 136, + 0, + 343, + }, + dictWord{5, 10, 221}, + dictWord{135, 11, 1255}, + dictWord{133, 11, 485}, + dictWord{134, 0, 1712}, + dictWord{142, 0, 216}, + dictWord{5, 0, 643}, + dictWord{ + 6, + 0, + 516, + }, + dictWord{4, 11, 285}, + dictWord{5, 11, 317}, + dictWord{6, 11, 301}, + dictWord{7, 11, 7}, + dictWord{8, 11, 153}, + dictWord{10, 11, 766}, + dictWord{ + 11, + 11, + 468, + }, + dictWord{12, 11, 467}, + dictWord{141, 11, 143}, + dictWord{4, 0, 133}, + dictWord{7, 0, 711}, + dictWord{7, 0, 1298}, + dictWord{135, 0, 1585}, + dictWord{ + 134, + 0, + 650, + }, + dictWord{135, 11, 512}, + dictWord{6, 0, 99}, + dictWord{7, 0, 1808}, + dictWord{145, 0, 57}, + dictWord{6, 0, 246}, + dictWord{6, 0, 574}, + dictWord{7, 0, 428}, + dictWord{9, 0, 793}, + dictWord{10, 0, 669}, + dictWord{11, 0, 485}, + dictWord{11, 0, 840}, + dictWord{12, 0, 300}, + dictWord{14, 0, 250}, + dictWord{145, 0, 55}, + dictWord{ + 4, + 10, + 132, + }, + dictWord{5, 10, 69}, + dictWord{135, 10, 1242}, + dictWord{136, 0, 1023}, + dictWord{7, 0, 302}, + dictWord{132, 10, 111}, + dictWord{135, 0, 1871}, + dictWord{132, 0, 728}, + dictWord{9, 0, 252}, + dictWord{132, 10, 767}, + dictWord{6, 0, 461}, + dictWord{7, 0, 1590}, + dictWord{7, 10, 1416}, + dictWord{7, 10, 2005}, + dictWord{8, 10, 131}, + dictWord{8, 10, 466}, + dictWord{9, 10, 672}, + dictWord{13, 10, 252}, + dictWord{148, 10, 103}, + dictWord{6, 0, 323}, + dictWord{135, 0, 1564}, + dictWord{7, 0, 461}, + dictWord{136, 0, 775}, + dictWord{6, 10, 44}, + dictWord{136, 10, 368}, + dictWord{139, 0, 172}, + dictWord{132, 0, 464}, + dictWord{4, 10, 570}, + dictWord{133, 10, 120}, + dictWord{137, 11, 269}, + dictWord{6, 10, 227}, + dictWord{135, 10, 1589}, + dictWord{6, 11, 1719}, + dictWord{6, 11, 1735}, + dictWord{ + 7, + 11, + 2016, + }, + dictWord{7, 11, 2020}, + dictWord{8, 11, 837}, + dictWord{137, 11, 852}, + dictWord{7, 0, 727}, + dictWord{146, 0, 73}, + dictWord{132, 0, 1023}, + dictWord{135, 11, 852}, + dictWord{135, 10, 1529}, + dictWord{136, 0, 577}, + dictWord{138, 11, 568}, + dictWord{134, 0, 1037}, + dictWord{8, 11, 67}, + dictWord{ + 138, + 11, + 419, + }, + dictWord{4, 0, 413}, + dictWord{5, 0, 677}, + dictWord{8, 0, 432}, + dictWord{140, 0, 280}, + dictWord{10, 0, 600}, + dictWord{6, 10, 1667}, + dictWord{ + 7, + 11, + 967, + }, + dictWord{7, 10, 2036}, + dictWord{141, 11, 11}, + dictWord{6, 10, 511}, + dictWord{140, 10, 132}, + dictWord{6, 0, 799}, + dictWord{5, 10, 568}, + dictWord{ + 6, + 10, + 138, + }, + dictWord{135, 10, 1293}, + dictWord{8, 0, 159}, + dictWord{4, 10, 565}, + dictWord{136, 10, 827}, + dictWord{7, 0, 646}, + dictWord{7, 0, 1730}, + dictWord{ + 11, + 0, + 446, + }, + dictWord{141, 0, 178}, + dictWord{4, 10, 922}, + dictWord{133, 10, 1023}, + dictWord{135, 11, 11}, + dictWord{132, 0, 395}, + dictWord{11, 0, 145}, + dictWord{135, 10, 1002}, + dictWord{9, 0, 174}, + dictWord{10, 0, 164}, + dictWord{11, 0, 440}, + dictWord{11, 0, 514}, + dictWord{11, 0, 841}, + dictWord{15, 0, 98}, + dictWord{149, 0, 20}, + dictWord{134, 0, 426}, + dictWord{10, 0, 608}, + dictWord{139, 0, 1002}, + dictWord{7, 11, 320}, + dictWord{8, 11, 51}, + dictWord{12, 11, 481}, + dictWord{12, 11, 570}, + dictWord{148, 11, 106}, + dictWord{9, 0, 977}, + dictWord{9, 0, 983}, + dictWord{132, 11, 445}, + dictWord{138, 0, 250}, + dictWord{139, 0, 100}, + dictWord{6, 0, 1982}, + dictWord{136, 10, 402}, + dictWord{133, 11, 239}, + dictWord{4, 10, 716}, + dictWord{141, 10, 31}, + dictWord{5, 0, 476}, + dictWord{7, 11, 83}, + dictWord{7, 11, 1990}, + dictWord{8, 11, 130}, + dictWord{139, 11, 720}, + dictWord{8, 10, 691}, + dictWord{136, 10, 731}, + dictWord{5, 11, 123}, + dictWord{ + 6, + 11, + 530, + }, + dictWord{7, 11, 348}, + dictWord{135, 11, 1419}, + dictWord{5, 0, 76}, + dictWord{6, 0, 458}, + dictWord{6, 0, 497}, + dictWord{7, 0, 868}, + dictWord{9, 0, 658}, + dictWord{10, 0, 594}, + dictWord{11, 0, 173}, + dictWord{11, 0, 566}, + dictWord{12, 0, 20}, + dictWord{12, 0, 338}, + dictWord{141, 0, 200}, + dictWord{9, 11, 139}, + dictWord{ + 10, + 11, + 399, + }, + dictWord{11, 11, 469}, + dictWord{12, 11, 634}, + dictWord{141, 11, 223}, + dictWord{9, 10, 840}, + dictWord{138, 10, 803}, + dictWord{133, 10, 847}, + dictWord{11, 11, 223}, + dictWord{140, 11, 168}, + dictWord{132, 11, 210}, + dictWord{8, 0, 447}, + dictWord{9, 10, 53}, + dictWord{9, 10, 268}, + dictWord{9, 10, 901}, + dictWord{10, 10, 518}, + dictWord{10, 10, 829}, + dictWord{11, 10, 188}, + dictWord{13, 10, 74}, + dictWord{14, 10, 46}, + dictWord{15, 10, 17}, + dictWord{15, 10, 33}, + dictWord{17, 10, 40}, + dictWord{18, 10, 36}, + dictWord{19, 10, 20}, + dictWord{22, 10, 1}, + dictWord{152, 10, 2}, + dictWord{4, 0, 526}, + dictWord{7, 0, 1029}, + dictWord{135, 0, 1054}, + dictWord{19, 11, 59}, + dictWord{150, 11, 2}, + dictWord{4, 0, 636}, + dictWord{6, 0, 1875}, + dictWord{6, 0, 1920}, + dictWord{9, 0, 999}, + dictWord{ + 12, + 0, + 807, + }, + dictWord{12, 0, 825}, + dictWord{15, 0, 179}, + dictWord{15, 0, 190}, + dictWord{18, 0, 182}, + dictWord{136, 10, 532}, + dictWord{6, 0, 1699}, + dictWord{ + 7, + 0, + 660, + }, + dictWord{7, 0, 1124}, + dictWord{17, 0, 31}, + dictWord{19, 0, 22}, + dictWord{151, 0, 14}, + dictWord{135, 10, 681}, + dictWord{132, 11, 430}, + dictWord{ + 140, + 10, + 677, + }, + dictWord{4, 10, 684}, + dictWord{136, 10, 384}, + dictWord{132, 11, 756}, + dictWord{133, 11, 213}, + dictWord{7, 0, 188}, + dictWord{7, 10, 110}, + dictWord{ + 8, + 10, + 290, + }, + dictWord{8, 10, 591}, + dictWord{9, 10, 382}, + dictWord{9, 10, 649}, + dictWord{11, 10, 71}, + dictWord{11, 10, 155}, + dictWord{11, 10, 313}, + dictWord{ + 12, + 10, + 5, + }, + dictWord{13, 10, 325}, + dictWord{142, 10, 287}, + dictWord{7, 10, 360}, + dictWord{7, 10, 425}, + dictWord{9, 10, 66}, + dictWord{9, 10, 278}, + dictWord{ + 138, + 10, + 644, + }, + dictWord{142, 11, 164}, + dictWord{4, 0, 279}, + dictWord{7, 0, 301}, + dictWord{137, 0, 362}, + dictWord{134, 11, 586}, + dictWord{135, 0, 1743}, + dictWord{4, 0, 178}, + dictWord{133, 0, 399}, + dictWord{4, 10, 900}, + dictWord{133, 10, 861}, + dictWord{5, 10, 254}, + dictWord{7, 10, 985}, + dictWord{136, 10, 73}, + dictWord{133, 11, 108}, + dictWord{7, 10, 1959}, + dictWord{136, 10, 683}, + dictWord{133, 11, 219}, + dictWord{4, 11, 193}, + dictWord{5, 11, 916}, + dictWord{ + 7, + 11, + 364, + }, + dictWord{10, 11, 398}, + dictWord{10, 11, 726}, + dictWord{11, 11, 317}, + dictWord{11, 11, 626}, + dictWord{12, 11, 142}, + dictWord{12, 11, 288}, + dictWord{ + 12, + 11, + 678, + }, + dictWord{13, 11, 313}, + dictWord{15, 11, 113}, + dictWord{18, 11, 114}, + dictWord{21, 11, 30}, + dictWord{150, 11, 53}, + dictWord{6, 11, 241}, + dictWord{7, 11, 907}, + dictWord{8, 11, 832}, + dictWord{9, 11, 342}, + dictWord{10, 11, 729}, + dictWord{11, 11, 284}, + dictWord{11, 11, 445}, + dictWord{11, 11, 651}, + dictWord{11, 11, 863}, + dictWord{13, 11, 398}, + dictWord{146, 11, 99}, + dictWord{132, 0, 872}, + dictWord{134, 0, 831}, + dictWord{134, 0, 1692}, + dictWord{ + 6, + 0, + 202, + }, + dictWord{6, 0, 1006}, + dictWord{9, 0, 832}, + dictWord{10, 0, 636}, + dictWord{11, 0, 208}, + dictWord{12, 0, 360}, + dictWord{17, 0, 118}, + dictWord{18, 0, 27}, + dictWord{20, 0, 67}, + dictWord{137, 11, 734}, + dictWord{132, 10, 725}, + dictWord{7, 11, 993}, + dictWord{138, 11, 666}, + dictWord{134, 0, 1954}, + dictWord{ + 134, + 10, + 196, + }, + dictWord{7, 0, 872}, + dictWord{10, 0, 516}, + dictWord{139, 0, 167}, + dictWord{133, 10, 831}, + dictWord{4, 11, 562}, + dictWord{9, 11, 254}, + dictWord{ + 139, + 11, + 879, + }, + dictWord{137, 0, 313}, + dictWord{4, 0, 224}, + dictWord{132, 11, 786}, + dictWord{11, 0, 24}, + dictWord{12, 0, 170}, + dictWord{136, 10, 723}, + dictWord{ + 5, + 0, + 546, + }, + dictWord{7, 0, 35}, + dictWord{8, 0, 11}, + dictWord{8, 0, 12}, + dictWord{9, 0, 315}, + dictWord{9, 0, 533}, + dictWord{10, 0, 802}, + dictWord{11, 0, 166}, + dictWord{ + 12, + 0, + 525, + }, + dictWord{142, 0, 243}, + dictWord{7, 0, 1937}, + dictWord{13, 10, 80}, + dictWord{13, 10, 437}, + dictWord{145, 10, 74}, + dictWord{5, 0, 241}, + dictWord{ + 8, + 0, + 242, + }, + dictWord{9, 0, 451}, + dictWord{10, 0, 667}, + dictWord{11, 0, 598}, + dictWord{140, 0, 429}, + dictWord{150, 0, 46}, + dictWord{6, 0, 1273}, + dictWord{ + 137, + 0, + 830, + }, + dictWord{5, 10, 848}, + dictWord{6, 10, 66}, + dictWord{136, 10, 764}, + dictWord{6, 0, 825}, + dictWord{134, 0, 993}, + dictWord{4, 0, 1006}, + dictWord{ + 10, + 0, + 327, + }, + dictWord{13, 0, 271}, + dictWord{4, 10, 36}, + dictWord{7, 10, 1387}, + dictWord{139, 10, 755}, + dictWord{134, 0, 1023}, + dictWord{135, 0, 1580}, + dictWord{ + 4, + 0, + 366, + }, + dictWord{137, 0, 516}, + dictWord{132, 10, 887}, + dictWord{6, 0, 1736}, + dictWord{135, 0, 1891}, + dictWord{6, 11, 216}, + dictWord{7, 11, 901}, + dictWord{ + 7, + 11, + 1343, + }, + dictWord{136, 11, 493}, + dictWord{6, 10, 165}, + dictWord{138, 10, 388}, + dictWord{7, 11, 341}, + dictWord{139, 11, 219}, + dictWord{4, 10, 719}, + dictWord{135, 10, 155}, + dictWord{134, 0, 1935}, + dictWord{132, 0, 826}, + dictWord{6, 0, 331}, + dictWord{6, 0, 1605}, + dictWord{8, 0, 623}, + dictWord{11, 0, 139}, + dictWord{139, 0, 171}, + dictWord{135, 11, 1734}, + dictWord{10, 11, 115}, + dictWord{11, 11, 420}, + dictWord{12, 11, 154}, + dictWord{13, 11, 404}, + dictWord{ + 14, + 11, + 346, + }, + dictWord{15, 11, 54}, + dictWord{143, 11, 112}, + dictWord{7, 0, 288}, + dictWord{4, 10, 353}, + dictWord{6, 10, 146}, + dictWord{6, 10, 1789}, + dictWord{ + 7, + 10, + 990, + }, + dictWord{7, 10, 1348}, + dictWord{9, 10, 665}, + dictWord{9, 10, 898}, + dictWord{11, 10, 893}, + dictWord{142, 10, 212}, + dictWord{6, 0, 916}, + dictWord{134, 0, 1592}, + dictWord{7, 0, 1888}, + dictWord{4, 10, 45}, + dictWord{135, 10, 1257}, + dictWord{5, 11, 1011}, + dictWord{136, 11, 701}, + dictWord{ + 139, + 11, + 596, + }, + dictWord{4, 11, 54}, + dictWord{5, 11, 666}, + dictWord{7, 11, 1039}, + dictWord{7, 11, 1130}, + dictWord{9, 11, 195}, + dictWord{138, 11, 302}, + dictWord{ + 134, + 0, + 1471, + }, + dictWord{134, 0, 1570}, + dictWord{132, 0, 394}, + dictWord{140, 10, 65}, + dictWord{136, 10, 816}, + dictWord{135, 0, 1931}, + dictWord{7, 0, 574}, + dictWord{135, 0, 1719}, + dictWord{134, 11, 467}, + dictWord{132, 0, 658}, + dictWord{9, 0, 781}, + dictWord{10, 0, 144}, + dictWord{11, 0, 385}, + dictWord{13, 0, 161}, + dictWord{13, 0, 228}, + dictWord{13, 0, 268}, + dictWord{20, 0, 107}, + dictWord{134, 11, 1669}, + dictWord{136, 0, 374}, + dictWord{135, 0, 735}, + dictWord{4, 0, 344}, + dictWord{6, 0, 498}, + dictWord{139, 0, 323}, + dictWord{7, 0, 586}, + dictWord{7, 0, 1063}, + dictWord{6, 10, 559}, + dictWord{134, 10, 1691}, + dictWord{137, 0, 155}, + dictWord{133, 0, 906}, + dictWord{7, 11, 122}, + dictWord{9, 11, 259}, + dictWord{10, 11, 84}, + dictWord{11, 11, 470}, + dictWord{12, 11, 541}, + dictWord{ + 141, + 11, + 379, + }, + dictWord{134, 0, 1139}, + dictWord{10, 0, 108}, + dictWord{139, 0, 116}, + dictWord{134, 10, 456}, + dictWord{133, 10, 925}, + dictWord{5, 11, 82}, + dictWord{ + 5, + 11, + 131, + }, + dictWord{7, 11, 1755}, + dictWord{8, 11, 31}, + dictWord{9, 11, 168}, + dictWord{9, 11, 764}, + dictWord{139, 11, 869}, + dictWord{134, 11, 605}, + dictWord{ + 5, + 11, + 278, + }, + dictWord{137, 11, 68}, + dictWord{4, 11, 163}, + dictWord{5, 11, 201}, + dictWord{5, 11, 307}, + dictWord{5, 11, 310}, + dictWord{6, 11, 335}, + dictWord{ + 7, + 11, + 284, + }, + dictWord{136, 11, 165}, + dictWord{135, 11, 1660}, + dictWord{6, 11, 33}, + dictWord{135, 11, 1244}, + dictWord{4, 0, 616}, + dictWord{136, 11, 483}, + dictWord{8, 0, 857}, + dictWord{8, 0, 902}, + dictWord{8, 0, 910}, + dictWord{10, 0, 879}, + dictWord{12, 0, 726}, + dictWord{4, 11, 199}, + dictWord{139, 11, 34}, + dictWord{136, 0, 692}, + dictWord{6, 10, 193}, + dictWord{7, 10, 240}, + dictWord{7, 10, 1682}, + dictWord{10, 10, 51}, + dictWord{10, 10, 640}, + dictWord{11, 10, 410}, + dictWord{13, 10, 82}, + dictWord{14, 10, 247}, + dictWord{14, 10, 331}, + dictWord{142, 10, 377}, + dictWord{6, 0, 823}, + dictWord{134, 0, 983}, + dictWord{ + 139, + 10, + 411, + }, + dictWord{132, 0, 305}, + dictWord{136, 10, 633}, + dictWord{138, 11, 203}, + dictWord{134, 0, 681}, + dictWord{6, 11, 326}, + dictWord{7, 11, 677}, + dictWord{137, 11, 425}, + dictWord{5, 0, 214}, + dictWord{7, 0, 603}, + dictWord{8, 0, 611}, + dictWord{9, 0, 686}, + dictWord{10, 0, 88}, + dictWord{11, 0, 459}, + dictWord{ + 11, + 0, + 496, + }, + dictWord{12, 0, 463}, + dictWord{12, 0, 590}, + dictWord{141, 0, 0}, + dictWord{136, 0, 1004}, + dictWord{142, 0, 23}, + dictWord{134, 0, 1703}, + dictWord{ + 147, + 11, + 8, + }, + dictWord{145, 11, 56}, + dictWord{135, 0, 1443}, + dictWord{4, 10, 237}, + dictWord{135, 10, 514}, + dictWord{6, 0, 714}, + dictWord{145, 0, 19}, + dictWord{ + 5, + 11, + 358, + }, + dictWord{7, 11, 473}, + dictWord{7, 11, 1184}, + dictWord{10, 11, 662}, + dictWord{13, 11, 212}, + dictWord{13, 11, 304}, + dictWord{13, 11, 333}, + dictWord{145, 11, 98}, + dictWord{4, 0, 737}, + dictWord{10, 0, 98}, + dictWord{11, 0, 294}, + dictWord{12, 0, 60}, + dictWord{12, 0, 437}, + dictWord{13, 0, 64}, + dictWord{ + 13, + 0, + 380, + }, + dictWord{142, 0, 430}, + dictWord{6, 10, 392}, + dictWord{7, 10, 65}, + dictWord{135, 10, 2019}, + dictWord{6, 0, 1758}, + dictWord{8, 0, 520}, + dictWord{ + 9, + 0, + 345, + }, + dictWord{9, 0, 403}, + dictWord{142, 0, 350}, + dictWord{5, 0, 47}, + dictWord{10, 0, 242}, + dictWord{138, 0, 579}, + dictWord{5, 0, 139}, + dictWord{7, 0, 1168}, + dictWord{138, 0, 539}, + dictWord{134, 0, 1459}, + dictWord{13, 0, 388}, + dictWord{141, 11, 388}, + dictWord{134, 0, 253}, + dictWord{7, 10, 1260}, + dictWord{ + 135, + 10, + 1790, + }, + dictWord{10, 0, 252}, + dictWord{9, 10, 222}, + dictWord{139, 10, 900}, + dictWord{140, 0, 745}, + dictWord{133, 11, 946}, + dictWord{4, 0, 107}, + dictWord{ + 7, + 0, + 613, + }, + dictWord{8, 0, 439}, + dictWord{8, 0, 504}, + dictWord{9, 0, 501}, + dictWord{10, 0, 383}, + dictWord{139, 0, 477}, + dictWord{135, 11, 1485}, + dictWord{ + 132, + 0, + 871, + }, + dictWord{7, 11, 411}, + dictWord{7, 11, 590}, + dictWord{8, 11, 631}, + dictWord{9, 11, 323}, + dictWord{10, 11, 355}, + dictWord{11, 11, 491}, + dictWord{ + 12, + 11, + 143, + }, + dictWord{12, 11, 402}, + dictWord{13, 11, 73}, + dictWord{14, 11, 408}, + dictWord{15, 11, 107}, + dictWord{146, 11, 71}, + dictWord{132, 0, 229}, + dictWord{132, 0, 903}, + dictWord{140, 0, 71}, + dictWord{133, 0, 549}, + dictWord{4, 0, 47}, + dictWord{6, 0, 373}, + dictWord{7, 0, 452}, + dictWord{7, 0, 543}, + dictWord{ + 7, + 0, + 1828, + }, + dictWord{7, 0, 1856}, + dictWord{9, 0, 6}, + dictWord{11, 0, 257}, + dictWord{139, 0, 391}, + dictWord{7, 11, 1467}, + dictWord{8, 11, 328}, + dictWord{ + 10, + 11, + 544, + }, + dictWord{11, 11, 955}, + dictWord{13, 11, 320}, + dictWord{145, 11, 83}, + dictWord{5, 0, 980}, + dictWord{134, 0, 1754}, + dictWord{136, 0, 865}, + dictWord{ + 5, + 0, + 705, + }, + dictWord{137, 0, 606}, + dictWord{7, 0, 161}, + dictWord{8, 10, 201}, + dictWord{136, 10, 605}, + dictWord{143, 11, 35}, + dictWord{5, 11, 835}, + dictWord{ + 6, + 11, + 483, + }, + dictWord{140, 10, 224}, + dictWord{7, 0, 536}, + dictWord{7, 0, 1331}, + dictWord{136, 0, 143}, + dictWord{134, 0, 1388}, + dictWord{5, 0, 724}, + dictWord{ + 10, + 0, + 305, + }, + dictWord{11, 0, 151}, + dictWord{12, 0, 33}, + dictWord{12, 0, 121}, + dictWord{12, 0, 381}, + dictWord{17, 0, 3}, + dictWord{17, 0, 27}, + dictWord{17, 0, 78}, + dictWord{18, 0, 18}, + dictWord{19, 0, 54}, + dictWord{149, 0, 5}, + dictWord{4, 10, 523}, + dictWord{133, 10, 638}, + dictWord{5, 0, 19}, + dictWord{134, 0, 533}, + dictWord{ + 5, + 0, + 395, + }, + dictWord{5, 0, 951}, + dictWord{134, 0, 1776}, + dictWord{135, 0, 1908}, + dictWord{132, 0, 846}, + dictWord{10, 0, 74}, + dictWord{11, 0, 663}, + dictWord{ + 12, + 0, + 210, + }, + dictWord{13, 0, 166}, + dictWord{13, 0, 310}, + dictWord{14, 0, 373}, + dictWord{18, 0, 95}, + dictWord{19, 0, 43}, + dictWord{6, 10, 242}, + dictWord{7, 10, 227}, + dictWord{7, 10, 1581}, + dictWord{8, 10, 104}, + dictWord{9, 10, 113}, + dictWord{9, 10, 220}, + dictWord{9, 10, 427}, + dictWord{10, 10, 239}, + dictWord{11, 10, 579}, + dictWord{11, 10, 1023}, + dictWord{13, 10, 4}, + dictWord{13, 10, 204}, + dictWord{13, 10, 316}, + dictWord{148, 10, 86}, + dictWord{9, 11, 716}, + dictWord{11, 11, 108}, + dictWord{13, 11, 123}, + dictWord{14, 11, 252}, + dictWord{19, 11, 38}, + dictWord{21, 11, 3}, + dictWord{151, 11, 11}, + dictWord{8, 0, 372}, + dictWord{9, 0, 122}, + dictWord{138, 0, 175}, + dictWord{132, 11, 677}, + dictWord{7, 11, 1374}, + dictWord{136, 11, 540}, + dictWord{135, 10, 861}, + dictWord{132, 0, 695}, + dictWord{ + 7, + 0, + 497, + }, + dictWord{9, 0, 387}, + dictWord{147, 0, 81}, + dictWord{136, 0, 937}, + dictWord{134, 0, 718}, + dictWord{7, 0, 1328}, + dictWord{136, 10, 494}, + dictWord{ + 132, + 11, + 331, + }, + dictWord{6, 0, 1581}, + dictWord{133, 11, 747}, + dictWord{5, 0, 284}, + dictWord{6, 0, 49}, + dictWord{6, 0, 350}, + dictWord{7, 0, 1}, + dictWord{7, 0, 377}, + dictWord{7, 0, 1693}, + dictWord{8, 0, 18}, + dictWord{8, 0, 678}, + dictWord{9, 0, 161}, + dictWord{9, 0, 585}, + dictWord{9, 0, 671}, + dictWord{9, 0, 839}, + dictWord{11, 0, 912}, + dictWord{141, 0, 427}, + dictWord{7, 10, 1306}, + dictWord{8, 10, 505}, + dictWord{9, 10, 482}, + dictWord{10, 10, 126}, + dictWord{11, 10, 225}, + dictWord{12, 10, 347}, + dictWord{12, 10, 449}, + dictWord{13, 10, 19}, + dictWord{14, 10, 218}, + dictWord{142, 10, 435}, + dictWord{10, 10, 764}, + dictWord{12, 10, 120}, + dictWord{ + 13, + 10, + 39, + }, + dictWord{145, 10, 127}, + dictWord{4, 0, 597}, + dictWord{133, 10, 268}, + dictWord{134, 0, 1094}, + dictWord{4, 0, 1008}, + dictWord{134, 0, 1973}, + dictWord{132, 0, 811}, + dictWord{139, 0, 908}, + dictWord{135, 0, 1471}, + dictWord{133, 11, 326}, + dictWord{4, 10, 384}, + dictWord{135, 10, 1022}, + dictWord{ + 7, + 0, + 1935, + }, + dictWord{8, 0, 324}, + dictWord{12, 0, 42}, + dictWord{4, 11, 691}, + dictWord{7, 11, 1935}, + dictWord{8, 11, 324}, + dictWord{9, 11, 35}, + dictWord{10, 11, 680}, + dictWord{11, 11, 364}, + dictWord{12, 11, 42}, + dictWord{13, 11, 357}, + dictWord{146, 11, 16}, + dictWord{135, 0, 2014}, + dictWord{7, 0, 2007}, + dictWord{ + 9, + 0, + 101, + }, + dictWord{9, 0, 450}, + dictWord{10, 0, 66}, + dictWord{10, 0, 842}, + dictWord{11, 0, 536}, + dictWord{12, 0, 587}, + dictWord{6, 11, 32}, + dictWord{7, 11, 385}, + dictWord{7, 11, 757}, + dictWord{7, 11, 1916}, + dictWord{8, 11, 37}, + dictWord{8, 11, 94}, + dictWord{8, 11, 711}, + dictWord{9, 11, 541}, + dictWord{10, 11, 162}, + dictWord{ + 10, + 11, + 795, + }, + dictWord{11, 11, 989}, + dictWord{11, 11, 1010}, + dictWord{12, 11, 14}, + dictWord{142, 11, 308}, + dictWord{139, 0, 586}, + dictWord{ + 135, + 10, + 1703, + }, + dictWord{7, 0, 1077}, + dictWord{11, 0, 28}, + dictWord{9, 10, 159}, + dictWord{140, 10, 603}, + dictWord{6, 0, 1221}, + dictWord{136, 10, 583}, + dictWord{ + 6, + 11, + 152, + }, + dictWord{6, 11, 349}, + dictWord{6, 11, 1682}, + dictWord{7, 11, 1252}, + dictWord{8, 11, 112}, + dictWord{9, 11, 435}, + dictWord{9, 11, 668}, + dictWord{ + 10, + 11, + 290, + }, + dictWord{10, 11, 319}, + dictWord{10, 11, 815}, + dictWord{11, 11, 180}, + dictWord{11, 11, 837}, + dictWord{12, 11, 240}, + dictWord{13, 11, 152}, + dictWord{13, 11, 219}, + dictWord{142, 11, 158}, + dictWord{139, 0, 62}, + dictWord{132, 10, 515}, + dictWord{8, 10, 632}, + dictWord{8, 10, 697}, + dictWord{ + 137, + 10, + 854, + }, + dictWord{134, 0, 1766}, + dictWord{132, 11, 581}, + dictWord{6, 11, 126}, + dictWord{7, 11, 573}, + dictWord{8, 11, 397}, + dictWord{142, 11, 44}, + dictWord{ + 150, + 0, + 28, + }, + dictWord{11, 0, 670}, + dictWord{22, 0, 25}, + dictWord{4, 10, 136}, + dictWord{133, 10, 551}, + dictWord{6, 0, 1665}, + dictWord{7, 0, 256}, + dictWord{ + 7, + 0, + 1388, + }, + dictWord{138, 0, 499}, + dictWord{4, 0, 22}, + dictWord{5, 0, 10}, + dictWord{7, 0, 1576}, + dictWord{136, 0, 97}, + dictWord{134, 10, 1782}, + dictWord{5, 0, 481}, + dictWord{7, 10, 1287}, + dictWord{9, 10, 44}, + dictWord{10, 10, 552}, + dictWord{10, 10, 642}, + dictWord{11, 10, 839}, + dictWord{12, 10, 274}, + dictWord{ + 12, + 10, + 275, + }, + dictWord{12, 10, 372}, + dictWord{13, 10, 91}, + dictWord{142, 10, 125}, + dictWord{133, 11, 926}, + dictWord{7, 11, 1232}, + dictWord{137, 11, 531}, + dictWord{6, 0, 134}, + dictWord{7, 0, 437}, + dictWord{7, 0, 1824}, + dictWord{9, 0, 37}, + dictWord{14, 0, 285}, + dictWord{142, 0, 371}, + dictWord{7, 0, 486}, + dictWord{8, 0, 155}, + dictWord{11, 0, 93}, + dictWord{140, 0, 164}, + dictWord{6, 0, 1391}, + dictWord{134, 0, 1442}, + dictWord{133, 11, 670}, + dictWord{133, 0, 591}, + dictWord{ + 6, + 10, + 147, + }, + dictWord{7, 10, 886}, + dictWord{7, 11, 1957}, + dictWord{9, 10, 753}, + dictWord{138, 10, 268}, + dictWord{5, 0, 380}, + dictWord{5, 0, 650}, + dictWord{ + 7, + 0, + 1173, + }, + dictWord{136, 0, 310}, + dictWord{4, 0, 364}, + dictWord{7, 0, 1156}, + dictWord{7, 0, 1187}, + dictWord{137, 0, 409}, + dictWord{135, 11, 1621}, + dictWord{ + 134, + 0, + 482, + }, + dictWord{133, 11, 506}, + dictWord{4, 0, 781}, + dictWord{6, 0, 487}, + dictWord{7, 0, 926}, + dictWord{8, 0, 263}, + dictWord{139, 0, 500}, + dictWord{ + 138, + 10, + 137, + }, + dictWord{135, 11, 242}, + dictWord{139, 11, 96}, + dictWord{133, 10, 414}, + dictWord{135, 10, 1762}, + dictWord{134, 0, 804}, + dictWord{5, 11, 834}, + dictWord{7, 11, 1202}, + dictWord{8, 11, 14}, + dictWord{9, 11, 481}, + dictWord{137, 11, 880}, + dictWord{134, 10, 599}, + dictWord{4, 0, 94}, + dictWord{135, 0, 1265}, + dictWord{4, 0, 415}, + dictWord{132, 0, 417}, + dictWord{5, 0, 348}, + dictWord{6, 0, 522}, + dictWord{6, 10, 1749}, + dictWord{7, 11, 1526}, + dictWord{138, 11, 465}, + dictWord{134, 10, 1627}, + dictWord{132, 0, 1012}, + dictWord{132, 10, 488}, + dictWord{4, 11, 357}, + dictWord{6, 11, 172}, + dictWord{7, 11, 143}, + dictWord{ + 137, + 11, + 413, + }, + dictWord{4, 10, 83}, + dictWord{4, 11, 590}, + dictWord{146, 11, 76}, + dictWord{140, 10, 676}, + dictWord{7, 11, 287}, + dictWord{8, 11, 355}, + dictWord{ + 9, + 11, + 293, + }, + dictWord{137, 11, 743}, + dictWord{134, 10, 278}, + dictWord{6, 0, 1803}, + dictWord{18, 0, 165}, + dictWord{24, 0, 21}, + dictWord{5, 11, 169}, + dictWord{ + 7, + 11, + 333, + }, + dictWord{136, 11, 45}, + dictWord{12, 10, 97}, + dictWord{140, 11, 97}, + dictWord{4, 0, 408}, + dictWord{4, 0, 741}, + dictWord{135, 0, 500}, + dictWord{ + 132, + 11, + 198, + }, + dictWord{7, 10, 388}, + dictWord{7, 10, 644}, + dictWord{139, 10, 781}, + dictWord{4, 11, 24}, + dictWord{5, 11, 140}, + dictWord{5, 11, 185}, + dictWord{ + 7, + 11, + 1500, + }, + dictWord{11, 11, 565}, + dictWord{139, 11, 838}, + dictWord{6, 0, 1321}, + dictWord{9, 0, 257}, + dictWord{7, 10, 229}, + dictWord{8, 10, 59}, + dictWord{ + 9, + 10, + 190, + }, + dictWord{10, 10, 378}, + dictWord{140, 10, 191}, + dictWord{4, 11, 334}, + dictWord{133, 11, 593}, + dictWord{135, 11, 1885}, + dictWord{134, 0, 1138}, + dictWord{4, 0, 249}, + dictWord{6, 0, 73}, + dictWord{135, 0, 177}, + dictWord{133, 0, 576}, + dictWord{142, 0, 231}, + dictWord{137, 0, 288}, + dictWord{132, 10, 660}, + dictWord{7, 10, 1035}, + dictWord{138, 10, 737}, + dictWord{135, 0, 1487}, + dictWord{6, 0, 989}, + dictWord{9, 0, 433}, + dictWord{7, 10, 690}, + dictWord{9, 10, 587}, + dictWord{140, 10, 521}, + dictWord{7, 0, 1264}, + dictWord{7, 0, 1678}, + dictWord{11, 0, 945}, + dictWord{12, 0, 341}, + dictWord{12, 0, 471}, + dictWord{140, 0, 569}, + dictWord{132, 11, 709}, + dictWord{133, 11, 897}, + dictWord{5, 11, 224}, + dictWord{13, 11, 174}, + dictWord{146, 11, 52}, + dictWord{135, 11, 1840}, + dictWord{ + 134, + 10, + 1744, + }, + dictWord{12, 0, 87}, + dictWord{16, 0, 74}, + dictWord{4, 10, 733}, + dictWord{9, 10, 194}, + dictWord{10, 10, 92}, + dictWord{11, 10, 198}, + dictWord{ + 12, + 10, + 84, + }, + dictWord{141, 10, 128}, + dictWord{140, 0, 779}, + dictWord{135, 0, 538}, + dictWord{4, 11, 608}, + dictWord{133, 11, 497}, + dictWord{133, 0, 413}, + dictWord{7, 11, 1375}, + dictWord{7, 11, 1466}, + dictWord{138, 11, 331}, + dictWord{136, 0, 495}, + dictWord{6, 11, 540}, + dictWord{136, 11, 136}, + dictWord{7, 0, 54}, + dictWord{8, 0, 312}, + dictWord{10, 0, 191}, + dictWord{10, 0, 614}, + dictWord{140, 0, 567}, + dictWord{6, 0, 468}, + dictWord{7, 0, 567}, + dictWord{7, 0, 1478}, + dictWord{ + 8, + 0, + 530, + }, + dictWord{14, 0, 290}, + dictWord{133, 11, 999}, + dictWord{4, 11, 299}, + dictWord{7, 10, 306}, + dictWord{135, 11, 1004}, + dictWord{142, 11, 296}, + dictWord{134, 0, 1484}, + dictWord{133, 10, 979}, + dictWord{6, 0, 609}, + dictWord{9, 0, 815}, + dictWord{12, 11, 137}, + dictWord{14, 11, 9}, + dictWord{14, 11, 24}, + dictWord{142, 11, 64}, + dictWord{133, 11, 456}, + dictWord{6, 0, 484}, + dictWord{135, 0, 822}, + dictWord{133, 10, 178}, + dictWord{136, 11, 180}, + dictWord{ + 132, + 11, + 755, + }, + dictWord{137, 0, 900}, + dictWord{135, 0, 1335}, + dictWord{6, 0, 1724}, + dictWord{135, 0, 2022}, + dictWord{135, 11, 1139}, + dictWord{5, 0, 640}, + dictWord{132, 10, 390}, + dictWord{6, 0, 1831}, + dictWord{138, 11, 633}, + dictWord{135, 11, 566}, + dictWord{4, 11, 890}, + dictWord{5, 11, 805}, + dictWord{5, 11, 819}, + dictWord{5, 11, 961}, + dictWord{6, 11, 396}, + dictWord{6, 11, 1631}, + dictWord{6, 11, 1678}, + dictWord{7, 11, 1967}, + dictWord{7, 11, 2041}, + dictWord{ + 9, + 11, + 630, + }, + dictWord{11, 11, 8}, + dictWord{11, 11, 1019}, + dictWord{12, 11, 176}, + dictWord{13, 11, 225}, + dictWord{14, 11, 292}, + dictWord{149, 11, 24}, + dictWord{ + 132, + 0, + 474, + }, + dictWord{134, 0, 1103}, + dictWord{135, 0, 1504}, + dictWord{134, 0, 1576}, + dictWord{6, 0, 961}, + dictWord{6, 0, 1034}, + dictWord{140, 0, 655}, + dictWord{11, 11, 514}, + dictWord{149, 11, 20}, + dictWord{5, 0, 305}, + dictWord{135, 11, 1815}, + dictWord{7, 11, 1505}, + dictWord{10, 11, 190}, + dictWord{ + 10, + 11, + 634, + }, + dictWord{11, 11, 792}, + dictWord{12, 11, 358}, + dictWord{140, 11, 447}, + dictWord{5, 11, 0}, + dictWord{6, 11, 536}, + dictWord{7, 11, 604}, + dictWord{ + 13, + 11, + 445, + }, + dictWord{145, 11, 126}, + dictWord{7, 0, 1236}, + dictWord{133, 10, 105}, + dictWord{4, 0, 480}, + dictWord{6, 0, 217}, + dictWord{6, 0, 302}, + dictWord{ + 6, + 0, + 1642, + }, + dictWord{7, 0, 130}, + dictWord{7, 0, 837}, + dictWord{7, 0, 1321}, + dictWord{7, 0, 1547}, + dictWord{7, 0, 1657}, + dictWord{8, 0, 429}, + dictWord{9, 0, 228}, + dictWord{13, 0, 289}, + dictWord{13, 0, 343}, + dictWord{19, 0, 101}, + dictWord{6, 11, 232}, + dictWord{6, 11, 412}, + dictWord{7, 11, 1074}, + dictWord{8, 11, 9}, + dictWord{ + 8, + 11, + 157, + }, + dictWord{8, 11, 786}, + dictWord{9, 11, 196}, + dictWord{9, 11, 352}, + dictWord{9, 11, 457}, + dictWord{10, 11, 337}, + dictWord{11, 11, 232}, + dictWord{ + 11, + 11, + 877, + }, + dictWord{12, 11, 480}, + dictWord{140, 11, 546}, + dictWord{5, 10, 438}, + dictWord{7, 11, 958}, + dictWord{9, 10, 694}, + dictWord{12, 10, 627}, + dictWord{ + 13, + 11, + 38, + }, + dictWord{141, 10, 210}, + dictWord{4, 11, 382}, + dictWord{136, 11, 579}, + dictWord{7, 0, 278}, + dictWord{10, 0, 739}, + dictWord{11, 0, 708}, + dictWord{ + 141, + 0, + 348, + }, + dictWord{4, 11, 212}, + dictWord{135, 11, 1206}, + dictWord{135, 11, 1898}, + dictWord{6, 0, 708}, + dictWord{6, 0, 1344}, + dictWord{152, 10, 11}, + dictWord{137, 11, 768}, + dictWord{134, 0, 1840}, + dictWord{140, 0, 233}, + dictWord{8, 10, 25}, + dictWord{138, 10, 826}, + dictWord{6, 0, 2017}, + dictWord{ + 133, + 11, + 655, + }, + dictWord{6, 0, 1488}, + dictWord{139, 11, 290}, + dictWord{132, 10, 308}, + dictWord{134, 0, 1590}, + dictWord{134, 0, 1800}, + dictWord{134, 0, 1259}, + dictWord{16, 0, 28}, + dictWord{6, 11, 231}, + dictWord{7, 11, 95}, + dictWord{136, 11, 423}, + dictWord{133, 11, 300}, + dictWord{135, 10, 150}, + dictWord{ + 136, + 10, + 649, + }, + dictWord{7, 11, 1874}, + dictWord{137, 11, 641}, + dictWord{6, 11, 237}, + dictWord{7, 11, 611}, + dictWord{8, 11, 100}, + dictWord{9, 11, 416}, + dictWord{ + 11, + 11, + 335, + }, + dictWord{12, 11, 173}, + dictWord{146, 11, 101}, + dictWord{137, 0, 45}, + dictWord{134, 10, 521}, + dictWord{17, 0, 36}, + dictWord{14, 11, 26}, + dictWord{ + 146, + 11, + 150, + }, + dictWord{7, 0, 1442}, + dictWord{14, 0, 22}, + dictWord{5, 10, 339}, + dictWord{15, 10, 41}, + dictWord{15, 10, 166}, + dictWord{147, 10, 66}, + dictWord{ + 8, + 0, + 378, + }, + dictWord{6, 11, 581}, + dictWord{135, 11, 1119}, + dictWord{134, 0, 1507}, + dictWord{147, 11, 117}, + dictWord{139, 0, 39}, + dictWord{134, 0, 1054}, + dictWord{6, 0, 363}, + dictWord{7, 0, 1955}, + dictWord{136, 0, 725}, + dictWord{134, 0, 2036}, + dictWord{133, 11, 199}, + dictWord{6, 0, 1871}, + dictWord{9, 0, 935}, + dictWord{9, 0, 961}, + dictWord{9, 0, 1004}, + dictWord{9, 0, 1016}, + dictWord{12, 0, 805}, + dictWord{12, 0, 852}, + dictWord{12, 0, 853}, + dictWord{12, 0, 869}, + dictWord{ + 12, + 0, + 882, + }, + dictWord{12, 0, 896}, + dictWord{12, 0, 906}, + dictWord{12, 0, 917}, + dictWord{12, 0, 940}, + dictWord{15, 0, 170}, + dictWord{15, 0, 176}, + dictWord{ + 15, + 0, + 188, + }, + dictWord{15, 0, 201}, + dictWord{15, 0, 205}, + dictWord{15, 0, 212}, + dictWord{15, 0, 234}, + dictWord{15, 0, 244}, + dictWord{18, 0, 181}, + dictWord{18, 0, 193}, + dictWord{18, 0, 196}, + dictWord{18, 0, 201}, + dictWord{18, 0, 202}, + dictWord{18, 0, 210}, + dictWord{18, 0, 217}, + dictWord{18, 0, 235}, + dictWord{18, 0, 236}, + dictWord{18, 0, 237}, + dictWord{21, 0, 54}, + dictWord{21, 0, 55}, + dictWord{21, 0, 58}, + dictWord{21, 0, 59}, + dictWord{152, 0, 22}, + dictWord{134, 10, 1628}, + dictWord{ + 137, + 0, + 805, + }, + dictWord{5, 0, 813}, + dictWord{135, 0, 2046}, + dictWord{142, 11, 42}, + dictWord{5, 0, 712}, + dictWord{6, 0, 1240}, + dictWord{11, 0, 17}, + dictWord{ + 13, + 0, + 321, + }, + dictWord{144, 0, 67}, + dictWord{132, 0, 617}, + dictWord{135, 10, 829}, + dictWord{6, 0, 320}, + dictWord{7, 0, 781}, + dictWord{7, 0, 1921}, + dictWord{9, 0, 55}, + dictWord{10, 0, 186}, + dictWord{10, 0, 273}, + dictWord{10, 0, 664}, + dictWord{10, 0, 801}, + dictWord{11, 0, 996}, + dictWord{11, 0, 997}, + dictWord{13, 0, 157}, + dictWord{142, 0, 170}, + dictWord{136, 0, 271}, + dictWord{5, 10, 486}, + dictWord{135, 10, 1349}, + dictWord{18, 11, 91}, + dictWord{147, 11, 70}, + dictWord{10, 0, 445}, + dictWord{7, 10, 1635}, + dictWord{8, 10, 17}, + dictWord{138, 10, 295}, + dictWord{136, 11, 404}, + dictWord{7, 0, 103}, + dictWord{7, 0, 863}, + dictWord{11, 0, 184}, + dictWord{145, 0, 62}, + dictWord{138, 10, 558}, + dictWord{137, 0, 659}, + dictWord{6, 11, 312}, + dictWord{6, 11, 1715}, + dictWord{10, 11, 584}, + dictWord{ + 11, + 11, + 546, + }, + dictWord{11, 11, 692}, + dictWord{12, 11, 259}, + dictWord{12, 11, 295}, + dictWord{13, 11, 46}, + dictWord{141, 11, 154}, + dictWord{134, 0, 676}, + dictWord{132, 11, 588}, + dictWord{4, 11, 231}, + dictWord{5, 11, 61}, + dictWord{6, 11, 104}, + dictWord{7, 11, 729}, + dictWord{7, 11, 964}, + dictWord{7, 11, 1658}, + dictWord{140, 11, 414}, + dictWord{6, 11, 263}, + dictWord{138, 11, 757}, + dictWord{11, 0, 337}, + dictWord{142, 0, 303}, + dictWord{135, 11, 1363}, + dictWord{ + 132, + 11, + 320, + }, + dictWord{140, 0, 506}, + dictWord{134, 10, 447}, + dictWord{5, 0, 77}, + dictWord{7, 0, 1455}, + dictWord{10, 0, 843}, + dictWord{147, 0, 73}, + dictWord{ + 7, + 10, + 577, + }, + dictWord{7, 10, 1432}, + dictWord{9, 10, 475}, + dictWord{9, 10, 505}, + dictWord{9, 10, 526}, + dictWord{9, 10, 609}, + dictWord{9, 10, 689}, + dictWord{ + 9, + 10, + 726, + }, + dictWord{9, 10, 735}, + dictWord{9, 10, 738}, + dictWord{10, 10, 556}, + dictWord{10, 10, 674}, + dictWord{10, 10, 684}, + dictWord{11, 10, 89}, + dictWord{ + 11, + 10, + 202, + }, + dictWord{11, 10, 272}, + dictWord{11, 10, 380}, + dictWord{11, 10, 415}, + dictWord{11, 10, 505}, + dictWord{11, 10, 537}, + dictWord{11, 10, 550}, + dictWord{11, 10, 562}, + dictWord{11, 10, 640}, + dictWord{11, 10, 667}, + dictWord{11, 10, 688}, + dictWord{11, 10, 847}, + dictWord{11, 10, 927}, + dictWord{ + 11, + 10, + 930, + }, + dictWord{11, 10, 940}, + dictWord{12, 10, 144}, + dictWord{12, 10, 325}, + dictWord{12, 10, 329}, + dictWord{12, 10, 389}, + dictWord{12, 10, 403}, + dictWord{ + 12, + 10, + 451, + }, + dictWord{12, 10, 515}, + dictWord{12, 10, 604}, + dictWord{12, 10, 616}, + dictWord{12, 10, 626}, + dictWord{13, 10, 66}, + dictWord{13, 10, 131}, + dictWord{13, 10, 167}, + dictWord{13, 10, 236}, + dictWord{13, 10, 368}, + dictWord{13, 10, 411}, + dictWord{13, 10, 434}, + dictWord{13, 10, 453}, + dictWord{ + 13, + 10, + 461, + }, + dictWord{13, 10, 474}, + dictWord{14, 10, 59}, + dictWord{14, 10, 60}, + dictWord{14, 10, 139}, + dictWord{14, 10, 152}, + dictWord{14, 10, 276}, + dictWord{ + 14, + 10, + 353, + }, + dictWord{14, 10, 402}, + dictWord{15, 10, 28}, + dictWord{15, 10, 81}, + dictWord{15, 10, 123}, + dictWord{15, 10, 152}, + dictWord{18, 10, 136}, + dictWord{148, 10, 88}, + dictWord{132, 0, 458}, + dictWord{135, 0, 1420}, + dictWord{6, 0, 109}, + dictWord{10, 0, 382}, + dictWord{4, 11, 405}, + dictWord{4, 10, 609}, + dictWord{7, 10, 756}, + dictWord{7, 11, 817}, + dictWord{9, 10, 544}, + dictWord{11, 10, 413}, + dictWord{14, 11, 58}, + dictWord{14, 10, 307}, + dictWord{16, 10, 25}, + dictWord{17, 11, 37}, + dictWord{146, 11, 124}, + dictWord{6, 0, 330}, + dictWord{7, 0, 1084}, + dictWord{11, 0, 142}, + dictWord{133, 11, 974}, + dictWord{4, 10, 930}, + dictWord{133, 10, 947}, + dictWord{5, 10, 939}, + dictWord{142, 11, 394}, + dictWord{16, 0, 91}, + dictWord{145, 0, 87}, + dictWord{5, 11, 235}, + dictWord{5, 10, 962}, + dictWord{7, 11, 1239}, + dictWord{11, 11, 131}, + dictWord{140, 11, 370}, + dictWord{11, 0, 492}, + dictWord{5, 10, 651}, + dictWord{8, 10, 170}, + dictWord{9, 10, 61}, + dictWord{9, 10, 63}, + dictWord{10, 10, 23}, + dictWord{10, 10, 37}, + dictWord{10, 10, 834}, + dictWord{11, 10, 4}, + dictWord{11, 10, 281}, + dictWord{11, 10, 503}, + dictWord{ + 11, + 10, + 677, + }, + dictWord{12, 10, 96}, + dictWord{12, 10, 130}, + dictWord{12, 10, 244}, + dictWord{14, 10, 5}, + dictWord{14, 10, 40}, + dictWord{14, 10, 162}, + dictWord{ + 14, + 10, + 202, + }, + dictWord{146, 10, 133}, + dictWord{4, 10, 406}, + dictWord{5, 10, 579}, + dictWord{12, 10, 492}, + dictWord{150, 10, 15}, + dictWord{9, 11, 137}, + dictWord{138, 11, 221}, + dictWord{134, 0, 1239}, + dictWord{11, 0, 211}, + dictWord{140, 0, 145}, + dictWord{7, 11, 390}, + dictWord{138, 11, 140}, + dictWord{ + 135, + 11, + 1418, + }, + dictWord{135, 11, 1144}, + dictWord{134, 0, 1049}, + dictWord{7, 0, 321}, + dictWord{6, 10, 17}, + dictWord{7, 10, 1001}, + dictWord{7, 10, 1982}, + dictWord{ + 9, + 10, + 886, + }, + dictWord{10, 10, 489}, + dictWord{10, 10, 800}, + dictWord{11, 10, 782}, + dictWord{12, 10, 320}, + dictWord{13, 10, 467}, + dictWord{14, 10, 145}, + dictWord{14, 10, 387}, + dictWord{143, 10, 119}, + dictWord{145, 10, 17}, + dictWord{5, 11, 407}, + dictWord{11, 11, 489}, + dictWord{19, 11, 37}, + dictWord{20, 11, 73}, + dictWord{150, 11, 38}, + dictWord{133, 10, 458}, + dictWord{135, 0, 1985}, + dictWord{7, 10, 1983}, + dictWord{8, 10, 0}, + dictWord{8, 10, 171}, + dictWord{ + 9, + 10, + 120, + }, + dictWord{9, 10, 732}, + dictWord{10, 10, 473}, + dictWord{11, 10, 656}, + dictWord{11, 10, 998}, + dictWord{18, 10, 0}, + dictWord{18, 10, 2}, + dictWord{ + 147, + 10, + 21, + }, + dictWord{5, 11, 325}, + dictWord{7, 11, 1483}, + dictWord{8, 11, 5}, + dictWord{8, 11, 227}, + dictWord{9, 11, 105}, + dictWord{10, 11, 585}, + dictWord{ + 140, + 11, + 614, + }, + dictWord{136, 0, 122}, + dictWord{132, 0, 234}, + dictWord{135, 11, 1196}, + dictWord{6, 0, 976}, + dictWord{6, 0, 1098}, + dictWord{134, 0, 1441}, + dictWord{ + 7, + 0, + 253, + }, + dictWord{136, 0, 549}, + dictWord{6, 11, 621}, + dictWord{13, 11, 504}, + dictWord{144, 11, 19}, + dictWord{132, 10, 519}, + dictWord{5, 0, 430}, + dictWord{ + 5, + 0, + 932, + }, + dictWord{6, 0, 131}, + dictWord{7, 0, 417}, + dictWord{9, 0, 522}, + dictWord{11, 0, 314}, + dictWord{141, 0, 390}, + dictWord{14, 0, 149}, + dictWord{14, 0, 399}, + dictWord{143, 0, 57}, + dictWord{5, 10, 907}, + dictWord{6, 10, 31}, + dictWord{6, 11, 218}, + dictWord{7, 10, 491}, + dictWord{7, 10, 530}, + dictWord{8, 10, 592}, + dictWord{11, 10, 53}, + dictWord{11, 10, 779}, + dictWord{12, 10, 167}, + dictWord{12, 10, 411}, + dictWord{14, 10, 14}, + dictWord{14, 10, 136}, + dictWord{15, 10, 72}, + dictWord{16, 10, 17}, + dictWord{144, 10, 72}, + dictWord{140, 11, 330}, + dictWord{7, 11, 454}, + dictWord{7, 11, 782}, + dictWord{136, 11, 768}, + dictWord{ + 132, + 0, + 507, + }, + dictWord{10, 11, 676}, + dictWord{140, 11, 462}, + dictWord{6, 0, 630}, + dictWord{9, 0, 811}, + dictWord{4, 10, 208}, + dictWord{5, 10, 106}, + dictWord{ + 6, + 10, + 531, + }, + dictWord{8, 10, 408}, + dictWord{9, 10, 188}, + dictWord{138, 10, 572}, + dictWord{4, 0, 343}, + dictWord{5, 0, 511}, + dictWord{134, 10, 1693}, + dictWord{ + 134, + 11, + 164, + }, + dictWord{132, 0, 448}, + dictWord{7, 0, 455}, + dictWord{138, 0, 591}, + dictWord{135, 0, 1381}, + dictWord{12, 10, 441}, + dictWord{150, 11, 50}, + dictWord{9, 10, 449}, + dictWord{10, 10, 192}, + dictWord{138, 10, 740}, + dictWord{6, 0, 575}, + dictWord{132, 10, 241}, + dictWord{134, 0, 1175}, + dictWord{ + 134, + 0, + 653, + }, + dictWord{134, 0, 1761}, + dictWord{134, 0, 1198}, + dictWord{132, 10, 259}, + dictWord{6, 11, 343}, + dictWord{7, 11, 195}, + dictWord{9, 11, 226}, + dictWord{ + 10, + 11, + 197, + }, + dictWord{10, 11, 575}, + dictWord{11, 11, 502}, + dictWord{139, 11, 899}, + dictWord{7, 0, 1127}, + dictWord{7, 0, 1572}, + dictWord{10, 0, 297}, + dictWord{10, 0, 422}, + dictWord{11, 0, 764}, + dictWord{11, 0, 810}, + dictWord{12, 0, 264}, + dictWord{13, 0, 102}, + dictWord{13, 0, 300}, + dictWord{13, 0, 484}, + dictWord{ + 14, + 0, + 147, + }, + dictWord{14, 0, 229}, + dictWord{17, 0, 71}, + dictWord{18, 0, 118}, + dictWord{147, 0, 120}, + dictWord{135, 11, 666}, + dictWord{132, 0, 678}, + dictWord{ + 4, + 10, + 173, + }, + dictWord{5, 10, 312}, + dictWord{5, 10, 512}, + dictWord{135, 10, 1285}, + dictWord{7, 10, 1603}, + dictWord{7, 10, 1691}, + dictWord{9, 10, 464}, + dictWord{11, 10, 195}, + dictWord{12, 10, 279}, + dictWord{12, 10, 448}, + dictWord{14, 10, 11}, + dictWord{147, 10, 102}, + dictWord{16, 0, 99}, + dictWord{146, 0, 164}, + dictWord{7, 11, 1125}, + dictWord{9, 11, 143}, + dictWord{11, 11, 61}, + dictWord{14, 11, 405}, + dictWord{150, 11, 21}, + dictWord{137, 11, 260}, + dictWord{ + 4, + 10, + 452, + }, + dictWord{5, 10, 583}, + dictWord{5, 10, 817}, + dictWord{6, 10, 433}, + dictWord{7, 10, 593}, + dictWord{7, 10, 720}, + dictWord{7, 10, 1378}, + dictWord{ + 8, + 10, + 161, + }, + dictWord{9, 10, 284}, + dictWord{10, 10, 313}, + dictWord{139, 10, 886}, + dictWord{132, 10, 547}, + dictWord{136, 10, 722}, + dictWord{14, 0, 35}, + dictWord{142, 0, 191}, + dictWord{141, 0, 45}, + dictWord{138, 0, 121}, + dictWord{132, 0, 125}, + dictWord{134, 0, 1622}, + dictWord{133, 11, 959}, + dictWord{ + 8, + 10, + 420, + }, + dictWord{139, 10, 193}, + dictWord{132, 0, 721}, + dictWord{135, 10, 409}, + dictWord{136, 0, 145}, + dictWord{7, 0, 792}, + dictWord{8, 0, 147}, + dictWord{ + 10, + 0, + 821, + }, + dictWord{11, 0, 970}, + dictWord{11, 0, 1021}, + dictWord{136, 11, 173}, + dictWord{134, 11, 266}, + dictWord{132, 0, 715}, + dictWord{7, 0, 1999}, + dictWord{138, 10, 308}, + dictWord{133, 0, 531}, + dictWord{5, 0, 168}, + dictWord{5, 0, 930}, + dictWord{8, 0, 74}, + dictWord{9, 0, 623}, + dictWord{12, 0, 500}, + dictWord{ + 140, + 0, + 579, + }, + dictWord{144, 0, 65}, + dictWord{138, 11, 246}, + dictWord{6, 0, 220}, + dictWord{7, 0, 1101}, + dictWord{13, 0, 105}, + dictWord{142, 11, 314}, + dictWord{ + 5, + 10, + 1002, + }, + dictWord{136, 10, 745}, + dictWord{134, 0, 960}, + dictWord{20, 0, 0}, + dictWord{148, 11, 0}, + dictWord{4, 0, 1005}, + dictWord{4, 10, 239}, + dictWord{ + 6, + 10, + 477, + }, + dictWord{7, 10, 1607}, + dictWord{11, 10, 68}, + dictWord{139, 10, 617}, + dictWord{6, 0, 19}, + dictWord{7, 0, 1413}, + dictWord{139, 0, 428}, + dictWord{ + 149, + 10, + 13, + }, + dictWord{7, 0, 96}, + dictWord{8, 0, 401}, + dictWord{8, 0, 703}, + dictWord{9, 0, 896}, + dictWord{136, 11, 300}, + dictWord{134, 0, 1595}, + dictWord{145, 0, 116}, + dictWord{136, 0, 1021}, + dictWord{7, 0, 1961}, + dictWord{7, 0, 1965}, + dictWord{7, 0, 2030}, + dictWord{8, 0, 150}, + dictWord{8, 0, 702}, + dictWord{8, 0, 737}, + dictWord{ + 8, + 0, + 750, + }, + dictWord{140, 0, 366}, + dictWord{11, 11, 75}, + dictWord{142, 11, 267}, + dictWord{132, 10, 367}, + dictWord{8, 0, 800}, + dictWord{9, 0, 148}, + dictWord{ + 9, + 0, + 872, + }, + dictWord{9, 0, 890}, + dictWord{11, 0, 309}, + dictWord{11, 0, 1001}, + dictWord{13, 0, 267}, + dictWord{13, 0, 323}, + dictWord{5, 11, 427}, + dictWord{ + 5, + 11, + 734, + }, + dictWord{7, 11, 478}, + dictWord{136, 11, 52}, + dictWord{7, 11, 239}, + dictWord{11, 11, 217}, + dictWord{142, 11, 165}, + dictWord{132, 11, 323}, + dictWord{140, 11, 419}, + dictWord{13, 0, 299}, + dictWord{142, 0, 75}, + dictWord{6, 11, 87}, + dictWord{6, 11, 1734}, + dictWord{7, 11, 20}, + dictWord{7, 11, 1056}, + dictWord{ + 8, + 11, + 732, + }, + dictWord{9, 11, 406}, + dictWord{9, 11, 911}, + dictWord{138, 11, 694}, + dictWord{134, 0, 1383}, + dictWord{132, 10, 694}, + dictWord{ + 133, + 11, + 613, + }, + dictWord{137, 0, 779}, + dictWord{4, 0, 598}, + dictWord{140, 10, 687}, + dictWord{6, 0, 970}, + dictWord{135, 0, 424}, + dictWord{133, 0, 547}, + dictWord{ + 7, + 11, + 32, + }, + dictWord{7, 11, 984}, + dictWord{8, 11, 85}, + dictWord{8, 11, 709}, + dictWord{9, 11, 579}, + dictWord{9, 11, 847}, + dictWord{9, 11, 856}, + dictWord{10, 11, 799}, + dictWord{11, 11, 258}, + dictWord{11, 11, 1007}, + dictWord{12, 11, 331}, + dictWord{12, 11, 615}, + dictWord{13, 11, 188}, + dictWord{13, 11, 435}, + dictWord{ + 14, + 11, + 8, + }, + dictWord{15, 11, 165}, + dictWord{16, 11, 27}, + dictWord{148, 11, 40}, + dictWord{6, 0, 1222}, + dictWord{134, 0, 1385}, + dictWord{132, 0, 876}, + dictWord{ + 138, + 11, + 151, + }, + dictWord{135, 10, 213}, + dictWord{4, 11, 167}, + dictWord{135, 11, 82}, + dictWord{133, 0, 133}, + dictWord{6, 11, 24}, + dictWord{7, 11, 74}, + dictWord{ + 7, + 11, + 678, + }, + dictWord{137, 11, 258}, + dictWord{5, 11, 62}, + dictWord{6, 11, 534}, + dictWord{7, 11, 684}, + dictWord{7, 11, 1043}, + dictWord{7, 11, 1072}, + dictWord{ + 8, + 11, + 280, + }, + dictWord{8, 11, 541}, + dictWord{8, 11, 686}, + dictWord{10, 11, 519}, + dictWord{11, 11, 252}, + dictWord{140, 11, 282}, + dictWord{136, 0, 187}, + dictWord{8, 0, 8}, + dictWord{10, 0, 0}, + dictWord{10, 0, 818}, + dictWord{139, 0, 988}, + dictWord{132, 11, 359}, + dictWord{11, 0, 429}, + dictWord{15, 0, 51}, + dictWord{ + 135, + 10, + 1672, + }, + dictWord{136, 0, 685}, + dictWord{5, 11, 211}, + dictWord{7, 11, 88}, + dictWord{136, 11, 627}, + dictWord{134, 0, 472}, + dictWord{136, 0, 132}, + dictWord{ + 6, + 11, + 145, + }, + dictWord{141, 11, 336}, + dictWord{4, 10, 751}, + dictWord{11, 10, 390}, + dictWord{140, 10, 32}, + dictWord{6, 0, 938}, + dictWord{6, 0, 1060}, + dictWord{ + 4, + 11, + 263, + }, + dictWord{4, 10, 409}, + dictWord{133, 10, 78}, + dictWord{137, 0, 874}, + dictWord{8, 0, 774}, + dictWord{10, 0, 670}, + dictWord{12, 0, 51}, + dictWord{ + 4, + 11, + 916, + }, + dictWord{6, 10, 473}, + dictWord{7, 10, 1602}, + dictWord{10, 10, 698}, + dictWord{12, 10, 212}, + dictWord{13, 10, 307}, + dictWord{145, 10, 105}, + dictWord{146, 0, 92}, + dictWord{143, 10, 156}, + dictWord{132, 0, 830}, + dictWord{137, 0, 701}, + dictWord{4, 11, 599}, + dictWord{6, 11, 1634}, + dictWord{7, 11, 5}, + dictWord{7, 11, 55}, + dictWord{7, 11, 67}, + dictWord{7, 11, 97}, + dictWord{7, 11, 691}, + dictWord{7, 11, 979}, + dictWord{7, 11, 1697}, + dictWord{8, 11, 207}, + dictWord{ + 8, + 11, + 214, + }, + dictWord{8, 11, 231}, + dictWord{8, 11, 294}, + dictWord{8, 11, 336}, + dictWord{8, 11, 428}, + dictWord{8, 11, 451}, + dictWord{8, 11, 460}, + dictWord{8, 11, 471}, + dictWord{8, 11, 622}, + dictWord{8, 11, 626}, + dictWord{8, 11, 679}, + dictWord{8, 11, 759}, + dictWord{8, 11, 829}, + dictWord{9, 11, 11}, + dictWord{9, 11, 246}, + dictWord{ + 9, + 11, + 484, + }, + dictWord{9, 11, 573}, + dictWord{9, 11, 706}, + dictWord{9, 11, 762}, + dictWord{9, 11, 798}, + dictWord{9, 11, 855}, + dictWord{9, 11, 870}, + dictWord{ + 9, + 11, + 912, + }, + dictWord{10, 11, 303}, + dictWord{10, 11, 335}, + dictWord{10, 11, 424}, + dictWord{10, 11, 461}, + dictWord{10, 11, 543}, + dictWord{10, 11, 759}, + dictWord{10, 11, 814}, + dictWord{11, 11, 59}, + dictWord{11, 11, 199}, + dictWord{11, 11, 235}, + dictWord{11, 11, 475}, + dictWord{11, 11, 590}, + dictWord{11, 11, 929}, + dictWord{11, 11, 963}, + dictWord{12, 11, 114}, + dictWord{12, 11, 182}, + dictWord{12, 11, 226}, + dictWord{12, 11, 332}, + dictWord{12, 11, 439}, + dictWord{ + 12, + 11, + 575, + }, + dictWord{12, 11, 598}, + dictWord{13, 11, 8}, + dictWord{13, 11, 125}, + dictWord{13, 11, 194}, + dictWord{13, 11, 287}, + dictWord{14, 11, 197}, + dictWord{ + 14, + 11, + 383, + }, + dictWord{15, 11, 53}, + dictWord{17, 11, 63}, + dictWord{19, 11, 46}, + dictWord{19, 11, 98}, + dictWord{19, 11, 106}, + dictWord{148, 11, 85}, + dictWord{ + 4, + 0, + 127, + }, + dictWord{5, 0, 350}, + dictWord{6, 0, 356}, + dictWord{8, 0, 426}, + dictWord{9, 0, 572}, + dictWord{10, 0, 247}, + dictWord{139, 0, 312}, + dictWord{134, 0, 1215}, + dictWord{6, 0, 59}, + dictWord{9, 0, 603}, + dictWord{13, 0, 397}, + dictWord{7, 11, 1853}, + dictWord{138, 11, 437}, + dictWord{134, 0, 1762}, + dictWord{ + 147, + 11, + 126, + }, + dictWord{135, 10, 883}, + dictWord{13, 0, 293}, + dictWord{142, 0, 56}, + dictWord{133, 10, 617}, + dictWord{139, 10, 50}, + dictWord{5, 11, 187}, + dictWord{ + 7, + 10, + 1518, + }, + dictWord{139, 10, 694}, + dictWord{135, 0, 441}, + dictWord{6, 0, 111}, + dictWord{7, 0, 4}, + dictWord{8, 0, 163}, + dictWord{8, 0, 776}, + dictWord{ + 138, + 0, + 566, + }, + dictWord{132, 0, 806}, + dictWord{4, 11, 215}, + dictWord{9, 11, 38}, + dictWord{10, 11, 3}, + dictWord{11, 11, 23}, + dictWord{11, 11, 127}, + dictWord{ + 139, + 11, + 796, + }, + dictWord{14, 0, 233}, + dictWord{4, 10, 546}, + dictWord{135, 10, 2042}, + dictWord{135, 0, 1994}, + dictWord{134, 0, 1739}, + dictWord{135, 11, 1530}, + dictWord{136, 0, 393}, + dictWord{5, 0, 297}, + dictWord{7, 0, 1038}, + dictWord{14, 0, 359}, + dictWord{19, 0, 52}, + dictWord{148, 0, 47}, + dictWord{135, 0, 309}, + dictWord{ + 4, + 10, + 313, + }, + dictWord{133, 10, 577}, + dictWord{8, 10, 184}, + dictWord{141, 10, 433}, + dictWord{135, 10, 935}, + dictWord{12, 10, 186}, + dictWord{ + 12, + 10, + 292, + }, + dictWord{14, 10, 100}, + dictWord{146, 10, 70}, + dictWord{136, 0, 363}, + dictWord{14, 0, 175}, + dictWord{11, 10, 402}, + dictWord{12, 10, 109}, + dictWord{ + 12, + 10, + 431, + }, + dictWord{13, 10, 179}, + dictWord{13, 10, 206}, + dictWord{14, 10, 217}, + dictWord{16, 10, 3}, + dictWord{148, 10, 53}, + dictWord{5, 10, 886}, + dictWord{ + 6, + 10, + 46, + }, + dictWord{6, 10, 1790}, + dictWord{7, 10, 14}, + dictWord{7, 10, 732}, + dictWord{7, 10, 1654}, + dictWord{8, 10, 95}, + dictWord{8, 10, 327}, + dictWord{ + 8, + 10, + 616, + }, + dictWord{9, 10, 892}, + dictWord{10, 10, 598}, + dictWord{10, 10, 769}, + dictWord{11, 10, 134}, + dictWord{11, 10, 747}, + dictWord{12, 10, 378}, + dictWord{ + 142, + 10, + 97, + }, + dictWord{136, 0, 666}, + dictWord{135, 0, 1675}, + dictWord{6, 0, 655}, + dictWord{134, 0, 1600}, + dictWord{135, 0, 808}, + dictWord{133, 10, 1021}, + dictWord{4, 11, 28}, + dictWord{5, 11, 440}, + dictWord{7, 11, 248}, + dictWord{11, 11, 833}, + dictWord{140, 11, 344}, + dictWord{134, 11, 1654}, + dictWord{ + 132, + 0, + 280, + }, + dictWord{140, 0, 54}, + dictWord{4, 0, 421}, + dictWord{133, 0, 548}, + dictWord{132, 10, 153}, + dictWord{6, 11, 339}, + dictWord{135, 11, 923}, + dictWord{ + 133, + 11, + 853, + }, + dictWord{133, 10, 798}, + dictWord{132, 10, 587}, + dictWord{6, 11, 249}, + dictWord{7, 11, 1234}, + dictWord{139, 11, 573}, + dictWord{6, 10, 598}, + dictWord{7, 10, 42}, + dictWord{8, 10, 695}, + dictWord{10, 10, 212}, + dictWord{11, 10, 158}, + dictWord{14, 10, 196}, + dictWord{145, 10, 85}, + dictWord{7, 0, 249}, + dictWord{5, 10, 957}, + dictWord{133, 10, 1008}, + dictWord{4, 10, 129}, + dictWord{135, 10, 465}, + dictWord{6, 0, 254}, + dictWord{7, 0, 842}, + dictWord{7, 0, 1659}, + dictWord{9, 0, 109}, + dictWord{10, 0, 103}, + dictWord{7, 10, 908}, + dictWord{7, 10, 1201}, + dictWord{9, 10, 755}, + dictWord{11, 10, 906}, + dictWord{12, 10, 527}, + dictWord{146, 10, 7}, + dictWord{5, 0, 262}, + dictWord{136, 10, 450}, + dictWord{144, 0, 1}, + dictWord{10, 11, 201}, + dictWord{142, 11, 319}, + dictWord{7, 11, 49}, + dictWord{ + 7, + 11, + 392, + }, + dictWord{8, 11, 20}, + dictWord{8, 11, 172}, + dictWord{8, 11, 690}, + dictWord{9, 11, 383}, + dictWord{9, 11, 845}, + dictWord{10, 11, 48}, + dictWord{ + 11, + 11, + 293, + }, + dictWord{11, 11, 832}, + dictWord{11, 11, 920}, + dictWord{141, 11, 221}, + dictWord{5, 11, 858}, + dictWord{133, 11, 992}, + dictWord{134, 0, 805}, + dictWord{139, 10, 1003}, + dictWord{6, 0, 1630}, + dictWord{134, 11, 307}, + dictWord{7, 11, 1512}, + dictWord{135, 11, 1794}, + dictWord{6, 11, 268}, + dictWord{ + 137, + 11, + 62, + }, + dictWord{135, 10, 1868}, + dictWord{133, 0, 671}, + dictWord{4, 0, 989}, + dictWord{8, 0, 972}, + dictWord{136, 0, 998}, + dictWord{132, 11, 423}, + dictWord{132, 0, 889}, + dictWord{135, 0, 1382}, + dictWord{135, 0, 1910}, + dictWord{7, 10, 965}, + dictWord{7, 10, 1460}, + dictWord{135, 10, 1604}, + dictWord{ + 4, + 0, + 627, + }, + dictWord{5, 0, 775}, + dictWord{138, 11, 106}, + dictWord{134, 11, 348}, + dictWord{7, 0, 202}, + dictWord{11, 0, 362}, + dictWord{11, 0, 948}, + dictWord{ + 140, + 0, + 388, + }, + dictWord{138, 11, 771}, + dictWord{6, 11, 613}, + dictWord{136, 11, 223}, + dictWord{6, 0, 560}, + dictWord{7, 0, 451}, + dictWord{8, 0, 389}, + dictWord{ + 12, + 0, + 490, + }, + dictWord{13, 0, 16}, + dictWord{13, 0, 215}, + dictWord{13, 0, 351}, + dictWord{18, 0, 132}, + dictWord{147, 0, 125}, + dictWord{135, 0, 841}, + dictWord{ + 136, + 0, + 566, + }, + dictWord{136, 0, 938}, + dictWord{132, 11, 670}, + dictWord{5, 0, 912}, + dictWord{6, 0, 1695}, + dictWord{140, 11, 55}, + dictWord{9, 11, 40}, + dictWord{ + 139, + 11, + 136, + }, + dictWord{7, 0, 1361}, + dictWord{7, 10, 982}, + dictWord{10, 10, 32}, + dictWord{143, 10, 56}, + dictWord{11, 11, 259}, + dictWord{140, 11, 270}, + dictWord{ + 5, + 0, + 236, + }, + dictWord{6, 0, 572}, + dictWord{8, 0, 492}, + dictWord{11, 0, 618}, + dictWord{144, 0, 56}, + dictWord{8, 11, 572}, + dictWord{9, 11, 310}, + dictWord{9, 11, 682}, + dictWord{137, 11, 698}, + dictWord{134, 0, 1854}, + dictWord{5, 0, 190}, + dictWord{136, 0, 318}, + dictWord{133, 10, 435}, + dictWord{135, 0, 1376}, + dictWord{ + 4, + 11, + 296, + }, + dictWord{6, 11, 352}, + dictWord{7, 11, 401}, + dictWord{7, 11, 1410}, + dictWord{7, 11, 1594}, + dictWord{7, 11, 1674}, + dictWord{8, 11, 63}, + dictWord{ + 8, + 11, + 660, + }, + dictWord{137, 11, 74}, + dictWord{7, 0, 349}, + dictWord{5, 10, 85}, + dictWord{6, 10, 419}, + dictWord{7, 10, 305}, + dictWord{7, 10, 361}, + dictWord{7, 10, 1337}, + dictWord{8, 10, 71}, + dictWord{140, 10, 519}, + dictWord{4, 11, 139}, + dictWord{4, 11, 388}, + dictWord{140, 11, 188}, + dictWord{6, 0, 1972}, + dictWord{6, 0, 2013}, + dictWord{8, 0, 951}, + dictWord{10, 0, 947}, + dictWord{10, 0, 974}, + dictWord{10, 0, 1018}, + dictWord{142, 0, 476}, + dictWord{140, 10, 688}, + dictWord{ + 135, + 10, + 740, + }, + dictWord{5, 10, 691}, + dictWord{7, 10, 345}, + dictWord{9, 10, 94}, + dictWord{140, 10, 169}, + dictWord{9, 0, 344}, + dictWord{5, 10, 183}, + dictWord{6, 10, 582}, + dictWord{10, 10, 679}, + dictWord{140, 10, 435}, + dictWord{135, 10, 511}, + dictWord{132, 0, 850}, + dictWord{8, 11, 441}, + dictWord{10, 11, 314}, + dictWord{ + 143, + 11, + 3, + }, + dictWord{7, 10, 1993}, + dictWord{136, 10, 684}, + dictWord{4, 11, 747}, + dictWord{6, 11, 290}, + dictWord{6, 10, 583}, + dictWord{7, 11, 649}, + dictWord{ + 7, + 11, + 1479, + }, + dictWord{135, 11, 1583}, + dictWord{133, 11, 232}, + dictWord{133, 10, 704}, + dictWord{134, 0, 910}, + dictWord{4, 10, 179}, + dictWord{5, 10, 198}, + dictWord{133, 10, 697}, + dictWord{7, 10, 347}, + dictWord{7, 10, 971}, + dictWord{8, 10, 181}, + dictWord{138, 10, 711}, + dictWord{136, 11, 525}, + dictWord{ + 14, + 0, + 19, + }, + dictWord{14, 0, 28}, + dictWord{144, 0, 29}, + dictWord{7, 0, 85}, + dictWord{7, 0, 247}, + dictWord{8, 0, 585}, + dictWord{138, 0, 163}, + dictWord{4, 0, 487}, + dictWord{ + 7, + 11, + 472, + }, + dictWord{7, 11, 1801}, + dictWord{10, 11, 748}, + dictWord{141, 11, 458}, + dictWord{4, 10, 243}, + dictWord{5, 10, 203}, + dictWord{7, 10, 19}, + dictWord{ + 7, + 10, + 71, + }, + dictWord{7, 10, 113}, + dictWord{10, 10, 405}, + dictWord{11, 10, 357}, + dictWord{142, 10, 240}, + dictWord{7, 10, 1450}, + dictWord{139, 10, 99}, + dictWord{132, 11, 425}, + dictWord{138, 0, 145}, + dictWord{147, 0, 83}, + dictWord{6, 10, 492}, + dictWord{137, 11, 247}, + dictWord{4, 0, 1013}, + dictWord{ + 134, + 0, + 2033, + }, + dictWord{5, 10, 134}, + dictWord{6, 10, 408}, + dictWord{6, 10, 495}, + dictWord{135, 10, 1593}, + dictWord{135, 0, 1922}, + dictWord{134, 11, 1768}, + dictWord{4, 0, 124}, + dictWord{10, 0, 457}, + dictWord{11, 0, 121}, + dictWord{11, 0, 169}, + dictWord{11, 0, 870}, + dictWord{11, 0, 874}, + dictWord{12, 0, 214}, + dictWord{ + 14, + 0, + 187, + }, + dictWord{143, 0, 77}, + dictWord{5, 0, 557}, + dictWord{135, 0, 1457}, + dictWord{139, 0, 66}, + dictWord{5, 11, 943}, + dictWord{6, 11, 1779}, + dictWord{ + 142, + 10, + 4, + }, + dictWord{4, 10, 248}, + dictWord{4, 10, 665}, + dictWord{7, 10, 137}, + dictWord{137, 10, 349}, + dictWord{7, 0, 1193}, + dictWord{5, 11, 245}, + dictWord{ + 6, + 11, + 576, + }, + dictWord{7, 11, 582}, + dictWord{136, 11, 225}, + dictWord{144, 0, 82}, + dictWord{7, 10, 1270}, + dictWord{139, 10, 612}, + dictWord{5, 0, 454}, + dictWord{ + 10, + 0, + 352, + }, + dictWord{138, 11, 352}, + dictWord{18, 0, 57}, + dictWord{5, 10, 371}, + dictWord{135, 10, 563}, + dictWord{135, 0, 1333}, + dictWord{6, 0, 107}, + dictWord{ + 7, + 0, + 638, + }, + dictWord{7, 0, 1632}, + dictWord{9, 0, 396}, + dictWord{134, 11, 610}, + dictWord{5, 0, 370}, + dictWord{134, 0, 1756}, + dictWord{4, 10, 374}, + dictWord{ + 7, + 10, + 547, + }, + dictWord{7, 10, 1700}, + dictWord{7, 10, 1833}, + dictWord{139, 10, 858}, + dictWord{133, 0, 204}, + dictWord{6, 0, 1305}, + dictWord{9, 10, 311}, + dictWord{ + 141, + 10, + 42, + }, + dictWord{5, 0, 970}, + dictWord{134, 0, 1706}, + dictWord{6, 10, 1647}, + dictWord{7, 10, 1552}, + dictWord{7, 10, 2010}, + dictWord{9, 10, 494}, + dictWord{137, 10, 509}, + dictWord{13, 11, 455}, + dictWord{15, 11, 99}, + dictWord{15, 11, 129}, + dictWord{144, 11, 68}, + dictWord{135, 0, 3}, + dictWord{4, 0, 35}, + dictWord{ + 5, + 0, + 121, + }, + dictWord{5, 0, 483}, + dictWord{5, 0, 685}, + dictWord{6, 0, 489}, + dictWord{6, 0, 782}, + dictWord{6, 0, 1032}, + dictWord{7, 0, 1204}, + dictWord{136, 0, 394}, + dictWord{4, 0, 921}, + dictWord{133, 0, 1007}, + dictWord{8, 11, 360}, + dictWord{138, 11, 63}, + dictWord{135, 0, 1696}, + dictWord{134, 0, 1519}, + dictWord{ + 132, + 11, + 443, + }, + dictWord{135, 11, 944}, + dictWord{6, 10, 123}, + dictWord{7, 10, 214}, + dictWord{9, 10, 728}, + dictWord{10, 10, 157}, + dictWord{11, 10, 346}, + dictWord{11, 10, 662}, + dictWord{143, 10, 106}, + dictWord{137, 0, 981}, + dictWord{135, 10, 1435}, + dictWord{134, 0, 1072}, + dictWord{132, 0, 712}, + dictWord{ + 134, + 0, + 1629, + }, + dictWord{134, 0, 728}, + dictWord{4, 11, 298}, + dictWord{137, 11, 483}, + dictWord{6, 0, 1177}, + dictWord{6, 0, 1271}, + dictWord{5, 11, 164}, + dictWord{ + 7, + 11, + 121, + }, + dictWord{142, 11, 189}, + dictWord{7, 0, 1608}, + dictWord{4, 10, 707}, + dictWord{5, 10, 588}, + dictWord{6, 10, 393}, + dictWord{13, 10, 106}, + dictWord{ + 18, + 10, + 49, + }, + dictWord{147, 10, 41}, + dictWord{23, 0, 16}, + dictWord{151, 11, 16}, + dictWord{6, 10, 211}, + dictWord{7, 10, 1690}, + dictWord{11, 10, 486}, + dictWord{140, 10, 369}, + dictWord{133, 0, 485}, + dictWord{19, 11, 15}, + dictWord{149, 11, 27}, + dictWord{4, 11, 172}, + dictWord{9, 11, 611}, + dictWord{10, 11, 436}, + dictWord{12, 11, 673}, + dictWord{141, 11, 255}, + dictWord{5, 11, 844}, + dictWord{10, 11, 484}, + dictWord{11, 11, 754}, + dictWord{12, 11, 457}, + dictWord{ + 14, + 11, + 171, + }, + dictWord{14, 11, 389}, + dictWord{146, 11, 153}, + dictWord{4, 0, 285}, + dictWord{5, 0, 27}, + dictWord{5, 0, 317}, + dictWord{6, 0, 301}, + dictWord{7, 0, 7}, + dictWord{ + 8, + 0, + 153, + }, + dictWord{10, 0, 766}, + dictWord{11, 0, 468}, + dictWord{12, 0, 467}, + dictWord{141, 0, 143}, + dictWord{134, 0, 1462}, + dictWord{9, 11, 263}, + dictWord{ + 10, + 11, + 147, + }, + dictWord{138, 11, 492}, + dictWord{133, 11, 537}, + dictWord{6, 0, 1945}, + dictWord{6, 0, 1986}, + dictWord{6, 0, 1991}, + dictWord{134, 0, 2038}, + dictWord{134, 10, 219}, + dictWord{137, 11, 842}, + dictWord{14, 0, 52}, + dictWord{17, 0, 50}, + dictWord{5, 10, 582}, + dictWord{6, 10, 1646}, + dictWord{7, 10, 99}, + dictWord{7, 10, 1962}, + dictWord{7, 10, 1986}, + dictWord{8, 10, 515}, + dictWord{8, 10, 773}, + dictWord{9, 10, 23}, + dictWord{9, 10, 491}, + dictWord{12, 10, 620}, + dictWord{142, 10, 93}, + dictWord{138, 11, 97}, + dictWord{20, 0, 21}, + dictWord{20, 0, 44}, + dictWord{133, 10, 851}, + dictWord{136, 0, 819}, + dictWord{139, 0, 917}, + dictWord{5, 11, 230}, + dictWord{5, 11, 392}, + dictWord{6, 11, 420}, + dictWord{8, 10, 762}, + dictWord{8, 10, 812}, + dictWord{9, 11, 568}, + dictWord{9, 10, 910}, + dictWord{140, 11, 612}, + dictWord{135, 0, 784}, + dictWord{15, 0, 135}, + dictWord{143, 11, 135}, + dictWord{10, 0, 454}, + dictWord{140, 0, 324}, + dictWord{4, 11, 0}, + dictWord{5, 11, 41}, + dictWord{7, 11, 1459}, + dictWord{7, 11, 1469}, + dictWord{7, 11, 1618}, + dictWord{7, 11, 1859}, + dictWord{9, 11, 549}, + dictWord{139, 11, 905}, + dictWord{4, 10, 98}, + dictWord{7, 10, 1365}, + dictWord{9, 10, 422}, + dictWord{9, 10, 670}, + dictWord{10, 10, 775}, + dictWord{11, 10, 210}, + dictWord{13, 10, 26}, + dictWord{13, 10, 457}, + dictWord{141, 10, 476}, + dictWord{6, 0, 1719}, + dictWord{6, 0, 1735}, + dictWord{7, 0, 2016}, + dictWord{7, 0, 2020}, + dictWord{8, 0, 837}, + dictWord{137, 0, 852}, + dictWord{133, 11, 696}, + dictWord{135, 0, 852}, + dictWord{132, 0, 952}, + dictWord{134, 10, 1730}, + dictWord{132, 11, 771}, + dictWord{ + 138, + 0, + 568, + }, + dictWord{137, 0, 448}, + dictWord{139, 0, 146}, + dictWord{8, 0, 67}, + dictWord{138, 0, 419}, + dictWord{133, 11, 921}, + dictWord{137, 10, 147}, + dictWord{134, 0, 1826}, + dictWord{10, 0, 657}, + dictWord{14, 0, 297}, + dictWord{142, 0, 361}, + dictWord{6, 0, 666}, + dictWord{6, 0, 767}, + dictWord{134, 0, 1542}, + dictWord{139, 0, 729}, + dictWord{6, 11, 180}, + dictWord{7, 11, 1137}, + dictWord{8, 11, 751}, + dictWord{139, 11, 805}, + dictWord{4, 11, 183}, + dictWord{7, 11, 271}, + dictWord{11, 11, 824}, + dictWord{11, 11, 952}, + dictWord{13, 11, 278}, + dictWord{13, 11, 339}, + dictWord{13, 11, 482}, + dictWord{14, 11, 424}, + dictWord{ + 148, + 11, + 99, + }, + dictWord{4, 0, 669}, + dictWord{5, 11, 477}, + dictWord{5, 11, 596}, + dictWord{6, 11, 505}, + dictWord{7, 11, 1221}, + dictWord{11, 11, 907}, + dictWord{ + 12, + 11, + 209, + }, + dictWord{141, 11, 214}, + dictWord{135, 11, 1215}, + dictWord{5, 0, 402}, + dictWord{6, 10, 30}, + dictWord{11, 10, 56}, + dictWord{139, 10, 305}, + dictWord{ + 7, + 11, + 564, + }, + dictWord{142, 11, 168}, + dictWord{139, 0, 152}, + dictWord{7, 0, 912}, + dictWord{135, 10, 1614}, + dictWord{4, 10, 150}, + dictWord{5, 10, 303}, + dictWord{134, 10, 327}, + dictWord{7, 0, 320}, + dictWord{8, 0, 51}, + dictWord{9, 0, 868}, + dictWord{10, 0, 833}, + dictWord{12, 0, 481}, + dictWord{12, 0, 570}, + dictWord{ + 148, + 0, + 106, + }, + dictWord{132, 0, 445}, + dictWord{7, 11, 274}, + dictWord{11, 11, 263}, + dictWord{11, 11, 479}, + dictWord{11, 11, 507}, + dictWord{140, 11, 277}, + dictWord{10, 0, 555}, + dictWord{11, 0, 308}, + dictWord{19, 0, 95}, + dictWord{6, 11, 1645}, + dictWord{8, 10, 192}, + dictWord{10, 10, 78}, + dictWord{141, 10, 359}, + dictWord{135, 10, 786}, + dictWord{6, 11, 92}, + dictWord{6, 11, 188}, + dictWord{7, 11, 1269}, + dictWord{7, 11, 1524}, + dictWord{7, 11, 1876}, + dictWord{10, 11, 228}, + dictWord{139, 11, 1020}, + dictWord{4, 11, 459}, + dictWord{133, 11, 966}, + dictWord{11, 0, 386}, + dictWord{6, 10, 1638}, + dictWord{7, 10, 79}, + dictWord{ + 7, + 10, + 496, + }, + dictWord{9, 10, 138}, + dictWord{10, 10, 336}, + dictWord{12, 10, 412}, + dictWord{12, 10, 440}, + dictWord{142, 10, 305}, + dictWord{133, 0, 239}, + dictWord{ + 7, + 0, + 83, + }, + dictWord{7, 0, 1990}, + dictWord{8, 0, 130}, + dictWord{139, 0, 720}, + dictWord{138, 11, 709}, + dictWord{4, 0, 143}, + dictWord{5, 0, 550}, + dictWord{ + 133, + 0, + 752, + }, + dictWord{5, 0, 123}, + dictWord{6, 0, 530}, + dictWord{7, 0, 348}, + dictWord{135, 0, 1419}, + dictWord{135, 0, 2024}, + dictWord{6, 11, 18}, + dictWord{7, 11, 179}, + dictWord{7, 11, 721}, + dictWord{7, 11, 932}, + dictWord{8, 11, 548}, + dictWord{8, 11, 757}, + dictWord{9, 11, 54}, + dictWord{9, 11, 65}, + dictWord{9, 11, 532}, + dictWord{ + 9, + 11, + 844, + }, + dictWord{10, 11, 113}, + dictWord{10, 11, 117}, + dictWord{10, 11, 236}, + dictWord{10, 11, 315}, + dictWord{10, 11, 430}, + dictWord{10, 11, 798}, + dictWord{11, 11, 153}, + dictWord{11, 11, 351}, + dictWord{11, 11, 375}, + dictWord{12, 11, 78}, + dictWord{12, 11, 151}, + dictWord{12, 11, 392}, + dictWord{ + 14, + 11, + 248, + }, + dictWord{143, 11, 23}, + dictWord{7, 10, 204}, + dictWord{7, 10, 415}, + dictWord{8, 10, 42}, + dictWord{10, 10, 85}, + dictWord{139, 10, 564}, + dictWord{ + 134, + 0, + 958, + }, + dictWord{133, 11, 965}, + dictWord{132, 0, 210}, + dictWord{135, 11, 1429}, + dictWord{138, 11, 480}, + dictWord{134, 11, 182}, + dictWord{ + 139, + 11, + 345, + }, + dictWord{10, 11, 65}, + dictWord{10, 11, 488}, + dictWord{138, 11, 497}, + dictWord{4, 10, 3}, + dictWord{5, 10, 247}, + dictWord{5, 10, 644}, + dictWord{ + 7, + 10, + 744, + }, + dictWord{7, 10, 1207}, + dictWord{7, 10, 1225}, + dictWord{7, 10, 1909}, + dictWord{146, 10, 147}, + dictWord{132, 0, 430}, + dictWord{5, 10, 285}, + dictWord{ + 9, + 10, + 67, + }, + dictWord{13, 10, 473}, + dictWord{143, 10, 82}, + dictWord{144, 11, 16}, + dictWord{7, 11, 1162}, + dictWord{9, 11, 588}, + dictWord{10, 11, 260}, + dictWord{151, 10, 8}, + dictWord{133, 0, 213}, + dictWord{138, 0, 7}, + dictWord{135, 0, 801}, + dictWord{134, 11, 1786}, + dictWord{135, 11, 308}, + dictWord{6, 0, 936}, + dictWord{134, 0, 1289}, + dictWord{133, 0, 108}, + dictWord{132, 0, 885}, + dictWord{133, 0, 219}, + dictWord{139, 0, 587}, + dictWord{4, 0, 193}, + dictWord{5, 0, 916}, + dictWord{6, 0, 1041}, + dictWord{7, 0, 364}, + dictWord{10, 0, 398}, + dictWord{10, 0, 726}, + dictWord{11, 0, 317}, + dictWord{11, 0, 626}, + dictWord{12, 0, 142}, + dictWord{12, 0, 288}, + dictWord{12, 0, 678}, + dictWord{13, 0, 313}, + dictWord{15, 0, 113}, + dictWord{146, 0, 114}, + dictWord{135, 0, 1165}, + dictWord{6, 0, 241}, + dictWord{ + 9, + 0, + 342, + }, + dictWord{10, 0, 729}, + dictWord{11, 0, 284}, + dictWord{11, 0, 445}, + dictWord{11, 0, 651}, + dictWord{11, 0, 863}, + dictWord{13, 0, 398}, + dictWord{ + 146, + 0, + 99, + }, + dictWord{7, 0, 907}, + dictWord{136, 0, 832}, + dictWord{9, 0, 303}, + dictWord{4, 10, 29}, + dictWord{6, 10, 532}, + dictWord{7, 10, 1628}, + dictWord{7, 10, 1648}, + dictWord{9, 10, 350}, + dictWord{10, 10, 433}, + dictWord{11, 10, 97}, + dictWord{11, 10, 557}, + dictWord{11, 10, 745}, + dictWord{12, 10, 289}, + dictWord{ + 12, + 10, + 335, + }, + dictWord{12, 10, 348}, + dictWord{12, 10, 606}, + dictWord{13, 10, 116}, + dictWord{13, 10, 233}, + dictWord{13, 10, 466}, + dictWord{14, 10, 181}, + dictWord{ + 14, + 10, + 209, + }, + dictWord{14, 10, 232}, + dictWord{14, 10, 236}, + dictWord{14, 10, 300}, + dictWord{16, 10, 41}, + dictWord{148, 10, 97}, + dictWord{7, 11, 423}, + dictWord{7, 10, 1692}, + dictWord{136, 11, 588}, + dictWord{6, 0, 931}, + dictWord{134, 0, 1454}, + dictWord{5, 10, 501}, + dictWord{7, 10, 1704}, + dictWord{9, 10, 553}, + dictWord{11, 10, 520}, + dictWord{12, 10, 557}, + dictWord{141, 10, 249}, + dictWord{136, 11, 287}, + dictWord{4, 0, 562}, + dictWord{9, 0, 254}, + dictWord{ + 139, + 0, + 879, + }, + dictWord{132, 0, 786}, + dictWord{14, 11, 32}, + dictWord{18, 11, 85}, + dictWord{20, 11, 2}, + dictWord{152, 11, 16}, + dictWord{135, 0, 1294}, + dictWord{ + 7, + 11, + 723, + }, + dictWord{135, 11, 1135}, + dictWord{6, 0, 216}, + dictWord{7, 0, 901}, + dictWord{7, 0, 1343}, + dictWord{8, 0, 493}, + dictWord{134, 11, 403}, + dictWord{ + 7, + 11, + 719, + }, + dictWord{8, 11, 809}, + dictWord{136, 11, 834}, + dictWord{5, 11, 210}, + dictWord{6, 11, 213}, + dictWord{7, 11, 60}, + dictWord{10, 11, 364}, + dictWord{ + 139, + 11, + 135, + }, + dictWord{7, 0, 341}, + dictWord{11, 0, 219}, + dictWord{5, 11, 607}, + dictWord{8, 11, 326}, + dictWord{136, 11, 490}, + dictWord{4, 11, 701}, + dictWord{ + 5, + 11, + 472, + }, + dictWord{5, 11, 639}, + dictWord{7, 11, 1249}, + dictWord{9, 11, 758}, + dictWord{139, 11, 896}, + dictWord{135, 11, 380}, + dictWord{135, 11, 1947}, + dictWord{139, 0, 130}, + dictWord{135, 0, 1734}, + dictWord{10, 0, 115}, + dictWord{11, 0, 420}, + dictWord{12, 0, 154}, + dictWord{13, 0, 404}, + dictWord{14, 0, 346}, + dictWord{143, 0, 54}, + dictWord{134, 10, 129}, + dictWord{4, 11, 386}, + dictWord{7, 11, 41}, + dictWord{8, 11, 405}, + dictWord{9, 11, 497}, + dictWord{11, 11, 110}, + dictWord{11, 11, 360}, + dictWord{15, 11, 37}, + dictWord{144, 11, 84}, + dictWord{141, 11, 282}, + dictWord{5, 11, 46}, + dictWord{7, 11, 1452}, + dictWord{7, 11, 1480}, + dictWord{8, 11, 634}, + dictWord{140, 11, 472}, + dictWord{4, 11, 524}, + dictWord{136, 11, 810}, + dictWord{10, 11, 238}, + dictWord{141, 11, 33}, + dictWord{ + 133, + 0, + 604, + }, + dictWord{5, 0, 1011}, + dictWord{136, 0, 701}, + dictWord{8, 0, 856}, + dictWord{8, 0, 858}, + dictWord{8, 0, 879}, + dictWord{12, 0, 702}, + dictWord{142, 0, 447}, + dictWord{4, 0, 54}, + dictWord{5, 0, 666}, + dictWord{7, 0, 1039}, + dictWord{7, 0, 1130}, + dictWord{9, 0, 195}, + dictWord{138, 0, 302}, + dictWord{4, 10, 25}, + dictWord{ + 5, + 10, + 60, + }, + dictWord{6, 10, 504}, + dictWord{7, 10, 614}, + dictWord{7, 10, 1155}, + dictWord{140, 10, 0}, + dictWord{7, 10, 1248}, + dictWord{11, 10, 621}, + dictWord{ + 139, + 10, + 702, + }, + dictWord{133, 11, 997}, + dictWord{137, 10, 321}, + dictWord{134, 0, 1669}, + dictWord{134, 0, 1791}, + dictWord{4, 10, 379}, + dictWord{ + 135, + 10, + 1397, + }, + dictWord{138, 11, 372}, + dictWord{5, 11, 782}, + dictWord{5, 11, 829}, + dictWord{134, 11, 1738}, + dictWord{135, 0, 1228}, + dictWord{4, 10, 118}, + dictWord{6, 10, 274}, + dictWord{6, 10, 361}, + dictWord{7, 10, 75}, + dictWord{141, 10, 441}, + dictWord{132, 0, 623}, + dictWord{9, 11, 279}, + dictWord{10, 11, 407}, + dictWord{14, 11, 84}, + dictWord{150, 11, 18}, + dictWord{137, 10, 841}, + dictWord{135, 0, 798}, + dictWord{140, 10, 693}, + dictWord{5, 10, 314}, + dictWord{6, 10, 221}, + dictWord{7, 10, 419}, + dictWord{10, 10, 650}, + dictWord{11, 10, 396}, + dictWord{12, 10, 156}, + dictWord{13, 10, 369}, + dictWord{14, 10, 333}, + dictWord{ + 145, + 10, + 47, + }, + dictWord{135, 11, 1372}, + dictWord{7, 0, 122}, + dictWord{9, 0, 259}, + dictWord{10, 0, 84}, + dictWord{11, 0, 470}, + dictWord{12, 0, 541}, + dictWord{ + 141, + 0, + 379, + }, + dictWord{134, 0, 837}, + dictWord{8, 0, 1013}, + dictWord{4, 11, 78}, + dictWord{5, 11, 96}, + dictWord{5, 11, 182}, + dictWord{7, 11, 1724}, + dictWord{ + 7, + 11, + 1825, + }, + dictWord{10, 11, 394}, + dictWord{10, 11, 471}, + dictWord{11, 11, 532}, + dictWord{14, 11, 340}, + dictWord{145, 11, 88}, + dictWord{134, 0, 577}, + dictWord{135, 11, 1964}, + dictWord{132, 10, 913}, + dictWord{134, 0, 460}, + dictWord{8, 0, 891}, + dictWord{10, 0, 901}, + dictWord{10, 0, 919}, + dictWord{10, 0, 932}, + dictWord{12, 0, 715}, + dictWord{12, 0, 728}, + dictWord{12, 0, 777}, + dictWord{14, 0, 457}, + dictWord{144, 0, 103}, + dictWord{5, 0, 82}, + dictWord{5, 0, 131}, + dictWord{ + 7, + 0, + 1755, + }, + dictWord{8, 0, 31}, + dictWord{9, 0, 168}, + dictWord{9, 0, 764}, + dictWord{139, 0, 869}, + dictWord{136, 10, 475}, + dictWord{6, 0, 605}, + dictWord{ + 5, + 10, + 1016, + }, + dictWord{9, 11, 601}, + dictWord{9, 11, 619}, + dictWord{10, 11, 505}, + dictWord{10, 11, 732}, + dictWord{11, 11, 355}, + dictWord{140, 11, 139}, + dictWord{ + 7, + 10, + 602, + }, + dictWord{8, 10, 179}, + dictWord{10, 10, 781}, + dictWord{140, 10, 126}, + dictWord{134, 0, 1246}, + dictWord{6, 10, 329}, + dictWord{138, 10, 111}, + dictWord{6, 11, 215}, + dictWord{7, 11, 1028}, + dictWord{7, 11, 1473}, + dictWord{7, 11, 1721}, + dictWord{9, 11, 424}, + dictWord{138, 11, 779}, + dictWord{5, 0, 278}, + dictWord{137, 0, 68}, + dictWord{6, 0, 932}, + dictWord{6, 0, 1084}, + dictWord{144, 0, 86}, + dictWord{4, 0, 163}, + dictWord{5, 0, 201}, + dictWord{5, 0, 307}, + dictWord{ + 5, + 0, + 310, + }, + dictWord{6, 0, 335}, + dictWord{7, 0, 284}, + dictWord{7, 0, 1660}, + dictWord{136, 0, 165}, + dictWord{136, 0, 781}, + dictWord{134, 0, 707}, + dictWord{6, 0, 33}, + dictWord{135, 0, 1244}, + dictWord{5, 10, 821}, + dictWord{6, 11, 67}, + dictWord{6, 10, 1687}, + dictWord{7, 11, 258}, + dictWord{7, 11, 1630}, + dictWord{9, 11, 354}, + dictWord{9, 11, 675}, + dictWord{10, 11, 830}, + dictWord{14, 11, 80}, + dictWord{145, 11, 80}, + dictWord{6, 11, 141}, + dictWord{7, 11, 225}, + dictWord{9, 11, 59}, + dictWord{9, 11, 607}, + dictWord{10, 11, 312}, + dictWord{11, 11, 687}, + dictWord{12, 11, 555}, + dictWord{13, 11, 373}, + dictWord{13, 11, 494}, + dictWord{148, 11, 58}, + dictWord{134, 0, 1113}, + dictWord{9, 0, 388}, + dictWord{5, 10, 71}, + dictWord{7, 10, 1407}, + dictWord{9, 10, 704}, + dictWord{10, 10, 261}, + dictWord{10, 10, 619}, + dictWord{11, 10, 547}, + dictWord{11, 10, 619}, + dictWord{143, 10, 157}, + dictWord{7, 0, 1953}, + dictWord{136, 0, 720}, + dictWord{138, 0, 203}, + dictWord{ + 7, + 10, + 2008, + }, + dictWord{9, 10, 337}, + dictWord{138, 10, 517}, + dictWord{6, 0, 326}, + dictWord{7, 0, 677}, + dictWord{137, 0, 425}, + dictWord{139, 11, 81}, + dictWord{ + 7, + 0, + 1316, + }, + dictWord{7, 0, 1412}, + dictWord{7, 0, 1839}, + dictWord{9, 0, 589}, + dictWord{11, 0, 241}, + dictWord{11, 0, 676}, + dictWord{11, 0, 811}, + dictWord{11, 0, 891}, + dictWord{12, 0, 140}, + dictWord{12, 0, 346}, + dictWord{12, 0, 479}, + dictWord{13, 0, 140}, + dictWord{13, 0, 381}, + dictWord{14, 0, 188}, + dictWord{18, 0, 30}, + dictWord{148, 0, 108}, + dictWord{5, 0, 416}, + dictWord{6, 10, 86}, + dictWord{6, 10, 603}, + dictWord{7, 10, 292}, + dictWord{7, 10, 561}, + dictWord{8, 10, 257}, + dictWord{ + 8, + 10, + 382, + }, + dictWord{9, 10, 721}, + dictWord{9, 10, 778}, + dictWord{11, 10, 581}, + dictWord{140, 10, 466}, + dictWord{4, 10, 486}, + dictWord{133, 10, 491}, + dictWord{134, 0, 1300}, + dictWord{132, 10, 72}, + dictWord{7, 0, 847}, + dictWord{6, 10, 265}, + dictWord{7, 11, 430}, + dictWord{139, 11, 46}, + dictWord{5, 11, 602}, + dictWord{6, 11, 106}, + dictWord{7, 11, 1786}, + dictWord{7, 11, 1821}, + dictWord{7, 11, 2018}, + dictWord{9, 11, 418}, + dictWord{137, 11, 763}, + dictWord{5, 0, 358}, + dictWord{7, 0, 535}, + dictWord{7, 0, 1184}, + dictWord{10, 0, 662}, + dictWord{13, 0, 212}, + dictWord{13, 0, 304}, + dictWord{13, 0, 333}, + dictWord{145, 0, 98}, + dictWord{ + 5, + 11, + 65, + }, + dictWord{6, 11, 416}, + dictWord{7, 11, 1720}, + dictWord{7, 11, 1924}, + dictWord{8, 11, 677}, + dictWord{10, 11, 109}, + dictWord{11, 11, 14}, + dictWord{ + 11, + 11, + 70, + }, + dictWord{11, 11, 569}, + dictWord{11, 11, 735}, + dictWord{15, 11, 153}, + dictWord{148, 11, 80}, + dictWord{6, 0, 1823}, + dictWord{8, 0, 839}, + dictWord{ + 8, + 0, + 852, + }, + dictWord{8, 0, 903}, + dictWord{10, 0, 940}, + dictWord{12, 0, 707}, + dictWord{140, 0, 775}, + dictWord{135, 11, 1229}, + dictWord{6, 0, 1522}, + dictWord{ + 140, + 0, + 654, + }, + dictWord{136, 11, 595}, + dictWord{139, 0, 163}, + dictWord{141, 0, 314}, + dictWord{132, 0, 978}, + dictWord{4, 0, 601}, + dictWord{6, 0, 2035}, + dictWord{137, 10, 234}, + dictWord{5, 10, 815}, + dictWord{6, 10, 1688}, + dictWord{134, 10, 1755}, + dictWord{133, 0, 946}, + dictWord{136, 0, 434}, + dictWord{ + 6, + 10, + 197, + }, + dictWord{136, 10, 205}, + dictWord{7, 0, 411}, + dictWord{7, 0, 590}, + dictWord{8, 0, 631}, + dictWord{9, 0, 323}, + dictWord{10, 0, 355}, + dictWord{11, 0, 491}, + dictWord{12, 0, 143}, + dictWord{12, 0, 402}, + dictWord{13, 0, 73}, + dictWord{14, 0, 408}, + dictWord{15, 0, 107}, + dictWord{146, 0, 71}, + dictWord{7, 0, 1467}, + dictWord{ + 8, + 0, + 328, + }, + dictWord{10, 0, 544}, + dictWord{11, 0, 955}, + dictWord{12, 0, 13}, + dictWord{13, 0, 320}, + dictWord{145, 0, 83}, + dictWord{142, 0, 410}, + dictWord{ + 11, + 0, + 511, + }, + dictWord{13, 0, 394}, + dictWord{14, 0, 298}, + dictWord{14, 0, 318}, + dictWord{146, 0, 103}, + dictWord{6, 10, 452}, + dictWord{7, 10, 312}, + dictWord{ + 138, + 10, + 219, + }, + dictWord{138, 10, 589}, + dictWord{4, 10, 333}, + dictWord{9, 10, 176}, + dictWord{12, 10, 353}, + dictWord{141, 10, 187}, + dictWord{135, 11, 329}, + dictWord{132, 11, 469}, + dictWord{5, 0, 835}, + dictWord{134, 0, 483}, + dictWord{134, 11, 1743}, + dictWord{5, 11, 929}, + dictWord{6, 11, 340}, + dictWord{8, 11, 376}, + dictWord{136, 11, 807}, + dictWord{134, 10, 1685}, + dictWord{132, 0, 677}, + dictWord{5, 11, 218}, + dictWord{7, 11, 1610}, + dictWord{138, 11, 83}, + dictWord{ + 5, + 11, + 571, + }, + dictWord{135, 11, 1842}, + dictWord{132, 11, 455}, + dictWord{137, 0, 70}, + dictWord{135, 0, 1405}, + dictWord{7, 10, 135}, + dictWord{8, 10, 7}, + dictWord{ + 8, + 10, + 62, + }, + dictWord{9, 10, 243}, + dictWord{10, 10, 658}, + dictWord{10, 10, 697}, + dictWord{11, 10, 456}, + dictWord{139, 10, 756}, + dictWord{9, 10, 395}, + dictWord{138, 10, 79}, + dictWord{137, 0, 108}, + dictWord{6, 11, 161}, + dictWord{7, 11, 372}, + dictWord{137, 11, 597}, + dictWord{132, 11, 349}, + dictWord{ + 132, + 0, + 777, + }, + dictWord{132, 0, 331}, + dictWord{135, 10, 631}, + dictWord{133, 0, 747}, + dictWord{6, 11, 432}, + dictWord{6, 11, 608}, + dictWord{139, 11, 322}, + dictWord{138, 10, 835}, + dictWord{5, 11, 468}, + dictWord{7, 11, 1809}, + dictWord{10, 11, 325}, + dictWord{11, 11, 856}, + dictWord{12, 11, 345}, + dictWord{ + 143, + 11, + 104, + }, + dictWord{133, 11, 223}, + dictWord{7, 10, 406}, + dictWord{7, 10, 459}, + dictWord{8, 10, 606}, + dictWord{139, 10, 726}, + dictWord{132, 11, 566}, + dictWord{142, 0, 68}, + dictWord{4, 11, 59}, + dictWord{135, 11, 1394}, + dictWord{6, 11, 436}, + dictWord{139, 11, 481}, + dictWord{4, 11, 48}, + dictWord{5, 11, 271}, + dictWord{135, 11, 953}, + dictWord{139, 11, 170}, + dictWord{5, 11, 610}, + dictWord{136, 11, 457}, + dictWord{133, 11, 755}, + dictWord{135, 11, 1217}, + dictWord{ + 133, + 10, + 612, + }, + dictWord{132, 11, 197}, + dictWord{132, 0, 505}, + dictWord{4, 10, 372}, + dictWord{7, 10, 482}, + dictWord{8, 10, 158}, + dictWord{9, 10, 602}, + dictWord{ + 9, + 10, + 615, + }, + dictWord{10, 10, 245}, + dictWord{10, 10, 678}, + dictWord{10, 10, 744}, + dictWord{11, 10, 248}, + dictWord{139, 10, 806}, + dictWord{133, 0, 326}, + dictWord{5, 10, 854}, + dictWord{135, 10, 1991}, + dictWord{4, 0, 691}, + dictWord{146, 0, 16}, + dictWord{6, 0, 628}, + dictWord{9, 0, 35}, + dictWord{10, 0, 680}, + dictWord{10, 0, 793}, + dictWord{11, 0, 364}, + dictWord{13, 0, 357}, + dictWord{143, 0, 164}, + dictWord{138, 0, 654}, + dictWord{6, 0, 32}, + dictWord{7, 0, 385}, + dictWord{ + 7, + 0, + 757, + }, + dictWord{7, 0, 1916}, + dictWord{8, 0, 37}, + dictWord{8, 0, 94}, + dictWord{8, 0, 711}, + dictWord{9, 0, 541}, + dictWord{10, 0, 162}, + dictWord{10, 0, 795}, + dictWord{ + 11, + 0, + 989, + }, + dictWord{11, 0, 1010}, + dictWord{12, 0, 14}, + dictWord{142, 0, 308}, + dictWord{133, 11, 217}, + dictWord{6, 0, 152}, + dictWord{6, 0, 349}, + dictWord{ + 6, + 0, + 1682, + }, + dictWord{7, 0, 1252}, + dictWord{8, 0, 112}, + dictWord{9, 0, 435}, + dictWord{9, 0, 668}, + dictWord{10, 0, 290}, + dictWord{10, 0, 319}, + dictWord{10, 0, 815}, + dictWord{11, 0, 180}, + dictWord{11, 0, 837}, + dictWord{12, 0, 240}, + dictWord{13, 0, 152}, + dictWord{13, 0, 219}, + dictWord{142, 0, 158}, + dictWord{4, 0, 581}, + dictWord{134, 0, 726}, + dictWord{5, 10, 195}, + dictWord{135, 10, 1685}, + dictWord{6, 0, 126}, + dictWord{7, 0, 573}, + dictWord{8, 0, 397}, + dictWord{142, 0, 44}, + dictWord{138, 0, 89}, + dictWord{7, 10, 1997}, + dictWord{8, 10, 730}, + dictWord{139, 10, 1006}, + dictWord{134, 0, 1531}, + dictWord{134, 0, 1167}, + dictWord{ + 5, + 0, + 926, + }, + dictWord{12, 0, 203}, + dictWord{133, 10, 751}, + dictWord{4, 11, 165}, + dictWord{7, 11, 1398}, + dictWord{135, 11, 1829}, + dictWord{7, 0, 1232}, + dictWord{137, 0, 531}, + dictWord{135, 10, 821}, + dictWord{134, 0, 943}, + dictWord{133, 0, 670}, + dictWord{4, 0, 880}, + dictWord{139, 0, 231}, + dictWord{ + 134, + 0, + 1617, + }, + dictWord{135, 0, 1957}, + dictWord{5, 11, 9}, + dictWord{7, 11, 297}, + dictWord{7, 11, 966}, + dictWord{140, 11, 306}, + dictWord{6, 0, 975}, + dictWord{ + 134, + 0, + 985, + }, + dictWord{5, 10, 950}, + dictWord{5, 10, 994}, + dictWord{134, 10, 351}, + dictWord{12, 11, 21}, + dictWord{151, 11, 7}, + dictWord{5, 11, 146}, + dictWord{ + 6, + 11, + 411, + }, + dictWord{138, 11, 721}, + dictWord{7, 0, 242}, + dictWord{135, 0, 1942}, + dictWord{6, 11, 177}, + dictWord{135, 11, 467}, + dictWord{5, 0, 421}, + dictWord{ + 7, + 10, + 47, + }, + dictWord{137, 10, 684}, + dictWord{5, 0, 834}, + dictWord{7, 0, 1202}, + dictWord{8, 0, 14}, + dictWord{9, 0, 481}, + dictWord{137, 0, 880}, + dictWord{138, 0, 465}, + dictWord{6, 0, 688}, + dictWord{9, 0, 834}, + dictWord{132, 10, 350}, + dictWord{132, 0, 855}, + dictWord{4, 0, 357}, + dictWord{6, 0, 172}, + dictWord{7, 0, 143}, + dictWord{137, 0, 413}, + dictWord{133, 11, 200}, + dictWord{132, 0, 590}, + dictWord{7, 10, 1812}, + dictWord{13, 10, 259}, + dictWord{13, 10, 356}, + dictWord{ + 14, + 10, + 242, + }, + dictWord{147, 10, 114}, + dictWord{133, 10, 967}, + dictWord{11, 0, 114}, + dictWord{4, 10, 473}, + dictWord{7, 10, 623}, + dictWord{8, 10, 808}, + dictWord{ + 9, + 10, + 871, + }, + dictWord{9, 10, 893}, + dictWord{11, 10, 431}, + dictWord{12, 10, 112}, + dictWord{12, 10, 217}, + dictWord{12, 10, 243}, + dictWord{12, 10, 562}, + dictWord{ + 12, + 10, + 663, + }, + dictWord{12, 10, 683}, + dictWord{13, 10, 141}, + dictWord{13, 10, 197}, + dictWord{13, 10, 227}, + dictWord{13, 10, 406}, + dictWord{13, 10, 487}, + dictWord{14, 10, 156}, + dictWord{14, 10, 203}, + dictWord{14, 10, 224}, + dictWord{14, 10, 256}, + dictWord{18, 10, 58}, + dictWord{150, 10, 0}, + dictWord{ + 138, + 10, + 286, + }, + dictWord{4, 10, 222}, + dictWord{7, 10, 286}, + dictWord{136, 10, 629}, + dictWord{5, 0, 169}, + dictWord{7, 0, 333}, + dictWord{136, 0, 45}, + dictWord{ + 134, + 11, + 481, + }, + dictWord{132, 0, 198}, + dictWord{4, 0, 24}, + dictWord{5, 0, 140}, + dictWord{5, 0, 185}, + dictWord{7, 0, 1500}, + dictWord{11, 0, 565}, + dictWord{11, 0, 838}, + dictWord{4, 11, 84}, + dictWord{7, 11, 1482}, + dictWord{10, 11, 76}, + dictWord{138, 11, 142}, + dictWord{133, 0, 585}, + dictWord{141, 10, 306}, + dictWord{ + 133, + 11, + 1015, + }, + dictWord{4, 11, 315}, + dictWord{5, 11, 507}, + dictWord{135, 11, 1370}, + dictWord{136, 10, 146}, + dictWord{6, 0, 691}, + dictWord{134, 0, 1503}, + dictWord{ + 4, + 0, + 334, + }, + dictWord{133, 0, 593}, + dictWord{4, 10, 465}, + dictWord{135, 10, 1663}, + dictWord{142, 11, 173}, + dictWord{135, 0, 913}, + dictWord{12, 0, 116}, + dictWord{134, 11, 1722}, + dictWord{134, 0, 1360}, + dictWord{132, 0, 802}, + dictWord{8, 11, 222}, + dictWord{8, 11, 476}, + dictWord{9, 11, 238}, + dictWord{ + 11, + 11, + 516, + }, + dictWord{11, 11, 575}, + dictWord{15, 11, 109}, + dictWord{146, 11, 100}, + dictWord{6, 0, 308}, + dictWord{9, 0, 673}, + dictWord{7, 10, 138}, + dictWord{ + 7, + 10, + 517, + }, + dictWord{139, 10, 238}, + dictWord{132, 0, 709}, + dictWord{6, 0, 1876}, + dictWord{6, 0, 1895}, + dictWord{9, 0, 994}, + dictWord{9, 0, 1006}, + dictWord{ + 12, + 0, + 829, + }, + dictWord{12, 0, 888}, + dictWord{12, 0, 891}, + dictWord{146, 0, 185}, + dictWord{148, 10, 94}, + dictWord{4, 0, 228}, + dictWord{133, 0, 897}, + dictWord{ + 7, + 0, + 1840, + }, + dictWord{5, 10, 495}, + dictWord{7, 10, 834}, + dictWord{9, 10, 733}, + dictWord{139, 10, 378}, + dictWord{133, 10, 559}, + dictWord{6, 10, 21}, + dictWord{ + 6, + 10, + 1737, + }, + dictWord{7, 10, 1444}, + dictWord{136, 10, 224}, + dictWord{4, 0, 608}, + dictWord{133, 0, 497}, + dictWord{6, 11, 40}, + dictWord{135, 11, 1781}, + dictWord{134, 0, 1573}, + dictWord{135, 0, 2039}, + dictWord{6, 0, 540}, + dictWord{136, 0, 136}, + dictWord{4, 0, 897}, + dictWord{5, 0, 786}, + dictWord{133, 10, 519}, + dictWord{6, 0, 1878}, + dictWord{6, 0, 1884}, + dictWord{9, 0, 938}, + dictWord{9, 0, 948}, + dictWord{9, 0, 955}, + dictWord{9, 0, 973}, + dictWord{9, 0, 1012}, + dictWord{ + 12, + 0, + 895, + }, + dictWord{12, 0, 927}, + dictWord{143, 0, 254}, + dictWord{134, 0, 1469}, + dictWord{133, 0, 999}, + dictWord{4, 0, 299}, + dictWord{135, 0, 1004}, + dictWord{ + 4, + 0, + 745, + }, + dictWord{133, 0, 578}, + dictWord{136, 11, 574}, + dictWord{133, 0, 456}, + dictWord{134, 0, 1457}, + dictWord{7, 0, 1679}, + dictWord{132, 10, 402}, + dictWord{7, 0, 693}, + dictWord{8, 0, 180}, + dictWord{12, 0, 163}, + dictWord{8, 10, 323}, + dictWord{136, 10, 479}, + dictWord{11, 10, 580}, + dictWord{142, 10, 201}, + dictWord{5, 10, 59}, + dictWord{135, 10, 672}, + dictWord{132, 11, 354}, + dictWord{146, 10, 34}, + dictWord{4, 0, 755}, + dictWord{135, 11, 1558}, + dictWord{ + 7, + 0, + 1740, + }, + dictWord{146, 0, 48}, + dictWord{4, 10, 85}, + dictWord{135, 10, 549}, + dictWord{139, 0, 338}, + dictWord{133, 10, 94}, + dictWord{134, 0, 1091}, + dictWord{135, 11, 469}, + dictWord{12, 0, 695}, + dictWord{12, 0, 704}, + dictWord{20, 0, 113}, + dictWord{5, 11, 830}, + dictWord{14, 11, 338}, + dictWord{148, 11, 81}, + dictWord{135, 0, 1464}, + dictWord{6, 10, 11}, + dictWord{135, 10, 187}, + dictWord{135, 0, 975}, + dictWord{13, 0, 335}, + dictWord{132, 10, 522}, + dictWord{ + 134, + 0, + 1979, + }, + dictWord{5, 11, 496}, + dictWord{135, 11, 203}, + dictWord{4, 10, 52}, + dictWord{135, 10, 661}, + dictWord{7, 0, 1566}, + dictWord{8, 0, 269}, + dictWord{ + 9, + 0, + 212, + }, + dictWord{9, 0, 718}, + dictWord{14, 0, 15}, + dictWord{14, 0, 132}, + dictWord{142, 0, 227}, + dictWord{4, 0, 890}, + dictWord{5, 0, 805}, + dictWord{5, 0, 819}, + dictWord{ + 5, + 0, + 961, + }, + dictWord{6, 0, 396}, + dictWord{6, 0, 1631}, + dictWord{6, 0, 1678}, + dictWord{7, 0, 1967}, + dictWord{7, 0, 2041}, + dictWord{9, 0, 630}, + dictWord{11, 0, 8}, + dictWord{11, 0, 1019}, + dictWord{12, 0, 176}, + dictWord{13, 0, 225}, + dictWord{14, 0, 292}, + dictWord{21, 0, 24}, + dictWord{4, 10, 383}, + dictWord{133, 10, 520}, + dictWord{134, 11, 547}, + dictWord{135, 11, 1748}, + dictWord{5, 11, 88}, + dictWord{137, 11, 239}, + dictWord{146, 11, 128}, + dictWord{7, 11, 650}, + dictWord{ + 135, + 11, + 1310, + }, + dictWord{4, 10, 281}, + dictWord{5, 10, 38}, + dictWord{7, 10, 194}, + dictWord{7, 10, 668}, + dictWord{7, 10, 1893}, + dictWord{137, 10, 397}, + dictWord{135, 0, 1815}, + dictWord{9, 10, 635}, + dictWord{139, 10, 559}, + dictWord{7, 0, 1505}, + dictWord{10, 0, 190}, + dictWord{10, 0, 634}, + dictWord{11, 0, 792}, + dictWord{12, 0, 358}, + dictWord{140, 0, 447}, + dictWord{5, 0, 0}, + dictWord{6, 0, 536}, + dictWord{7, 0, 604}, + dictWord{13, 0, 445}, + dictWord{145, 0, 126}, + dictWord{ + 7, + 11, + 1076, + }, + dictWord{9, 11, 80}, + dictWord{11, 11, 78}, + dictWord{11, 11, 421}, + dictWord{11, 11, 534}, + dictWord{140, 11, 545}, + dictWord{8, 0, 966}, + dictWord{ + 10, + 0, + 1023, + }, + dictWord{14, 11, 369}, + dictWord{146, 11, 72}, + dictWord{135, 11, 1641}, + dictWord{6, 0, 232}, + dictWord{6, 0, 412}, + dictWord{7, 0, 1074}, + dictWord{ + 8, + 0, + 9, + }, + dictWord{8, 0, 157}, + dictWord{8, 0, 786}, + dictWord{9, 0, 196}, + dictWord{9, 0, 352}, + dictWord{9, 0, 457}, + dictWord{10, 0, 337}, + dictWord{11, 0, 232}, + dictWord{ + 11, + 0, + 877, + }, + dictWord{12, 0, 480}, + dictWord{140, 0, 546}, + dictWord{135, 0, 958}, + dictWord{4, 0, 382}, + dictWord{136, 0, 579}, + dictWord{4, 0, 212}, + dictWord{ + 135, + 0, + 1206, + }, + dictWord{4, 11, 497}, + dictWord{5, 11, 657}, + dictWord{135, 11, 1584}, + dictWord{132, 0, 681}, + dictWord{8, 0, 971}, + dictWord{138, 0, 965}, + dictWord{ + 5, + 10, + 448, + }, + dictWord{136, 10, 535}, + dictWord{14, 0, 16}, + dictWord{146, 0, 44}, + dictWord{11, 0, 584}, + dictWord{11, 0, 616}, + dictWord{14, 0, 275}, + dictWord{ + 11, + 11, + 584, + }, + dictWord{11, 11, 616}, + dictWord{142, 11, 275}, + dictWord{136, 11, 13}, + dictWord{7, 10, 610}, + dictWord{135, 10, 1501}, + dictWord{7, 11, 642}, + dictWord{8, 11, 250}, + dictWord{11, 11, 123}, + dictWord{11, 11, 137}, + dictWord{13, 11, 48}, + dictWord{142, 11, 95}, + dictWord{133, 0, 655}, + dictWord{17, 0, 67}, + dictWord{147, 0, 74}, + dictWord{134, 0, 751}, + dictWord{134, 0, 1967}, + dictWord{6, 0, 231}, + dictWord{136, 0, 423}, + dictWord{5, 0, 300}, + dictWord{138, 0, 1016}, + dictWord{4, 10, 319}, + dictWord{5, 10, 699}, + dictWord{138, 10, 673}, + dictWord{6, 0, 237}, + dictWord{7, 0, 611}, + dictWord{8, 0, 100}, + dictWord{9, 0, 416}, + dictWord{ + 11, + 0, + 335, + }, + dictWord{12, 0, 173}, + dictWord{18, 0, 101}, + dictWord{6, 10, 336}, + dictWord{8, 10, 552}, + dictWord{9, 10, 285}, + dictWord{10, 10, 99}, + dictWord{ + 139, + 10, + 568, + }, + dictWord{134, 0, 1370}, + dictWord{7, 10, 1406}, + dictWord{9, 10, 218}, + dictWord{141, 10, 222}, + dictWord{133, 10, 256}, + dictWord{ + 135, + 0, + 1208, + }, + dictWord{14, 11, 213}, + dictWord{148, 11, 38}, + dictWord{6, 0, 1219}, + dictWord{135, 11, 1642}, + dictWord{13, 0, 417}, + dictWord{14, 0, 129}, + dictWord{143, 0, 15}, + dictWord{10, 11, 545}, + dictWord{140, 11, 301}, + dictWord{17, 10, 39}, + dictWord{148, 10, 36}, + dictWord{133, 0, 199}, + dictWord{4, 11, 904}, + dictWord{133, 11, 794}, + dictWord{12, 0, 427}, + dictWord{146, 0, 38}, + dictWord{134, 0, 949}, + dictWord{8, 0, 665}, + dictWord{135, 10, 634}, + dictWord{ + 132, + 10, + 618, + }, + dictWord{135, 10, 259}, + dictWord{132, 10, 339}, + dictWord{133, 11, 761}, + dictWord{141, 10, 169}, + dictWord{132, 10, 759}, + dictWord{5, 0, 688}, + dictWord{7, 0, 539}, + dictWord{135, 0, 712}, + dictWord{7, 11, 386}, + dictWord{138, 11, 713}, + dictWord{134, 0, 1186}, + dictWord{6, 11, 7}, + dictWord{6, 11, 35}, + dictWord{ + 7, + 11, + 147, + }, + dictWord{7, 11, 1069}, + dictWord{7, 11, 1568}, + dictWord{7, 11, 1575}, + dictWord{7, 11, 1917}, + dictWord{8, 11, 43}, + dictWord{8, 11, 208}, + dictWord{ + 9, + 11, + 128, + }, + dictWord{9, 11, 866}, + dictWord{10, 11, 20}, + dictWord{11, 11, 981}, + dictWord{147, 11, 33}, + dictWord{7, 11, 893}, + dictWord{8, 10, 482}, + dictWord{141, 11, 424}, + dictWord{6, 0, 312}, + dictWord{6, 0, 1715}, + dictWord{10, 0, 584}, + dictWord{11, 0, 546}, + dictWord{11, 0, 692}, + dictWord{12, 0, 259}, + dictWord{ + 12, + 0, + 295, + }, + dictWord{13, 0, 46}, + dictWord{141, 0, 154}, + dictWord{5, 10, 336}, + dictWord{6, 10, 341}, + dictWord{6, 10, 478}, + dictWord{6, 10, 1763}, + dictWord{ + 136, + 10, + 386, + }, + dictWord{137, 0, 151}, + dictWord{132, 0, 588}, + dictWord{152, 0, 4}, + dictWord{6, 11, 322}, + dictWord{9, 11, 552}, + dictWord{11, 11, 274}, + dictWord{ + 13, + 11, + 209, + }, + dictWord{13, 11, 499}, + dictWord{14, 11, 85}, + dictWord{15, 11, 126}, + dictWord{145, 11, 70}, + dictWord{135, 10, 73}, + dictWord{4, 0, 231}, + dictWord{ + 5, + 0, + 61, + }, + dictWord{6, 0, 104}, + dictWord{7, 0, 729}, + dictWord{7, 0, 964}, + dictWord{7, 0, 1658}, + dictWord{140, 0, 414}, + dictWord{6, 0, 263}, + dictWord{138, 0, 757}, + dictWord{135, 10, 1971}, + dictWord{4, 0, 612}, + dictWord{133, 0, 561}, + dictWord{132, 0, 320}, + dictWord{135, 10, 1344}, + dictWord{8, 11, 83}, + dictWord{ + 8, + 11, + 817, + }, + dictWord{9, 11, 28}, + dictWord{9, 11, 29}, + dictWord{9, 11, 885}, + dictWord{10, 11, 387}, + dictWord{11, 11, 633}, + dictWord{11, 11, 740}, + dictWord{ + 13, + 11, + 235, + }, + dictWord{13, 11, 254}, + dictWord{15, 11, 143}, + dictWord{143, 11, 146}, + dictWord{5, 10, 396}, + dictWord{134, 10, 501}, + dictWord{140, 11, 49}, + dictWord{132, 0, 225}, + dictWord{4, 10, 929}, + dictWord{5, 10, 799}, + dictWord{8, 10, 46}, + dictWord{136, 10, 740}, + dictWord{4, 0, 405}, + dictWord{7, 0, 817}, + dictWord{ + 14, + 0, + 58, + }, + dictWord{17, 0, 37}, + dictWord{146, 0, 124}, + dictWord{133, 0, 974}, + dictWord{4, 11, 412}, + dictWord{133, 11, 581}, + dictWord{4, 10, 892}, + dictWord{ + 133, + 10, + 770, + }, + dictWord{4, 0, 996}, + dictWord{134, 0, 2026}, + dictWord{4, 0, 527}, + dictWord{5, 0, 235}, + dictWord{7, 0, 1239}, + dictWord{11, 0, 131}, + dictWord{ + 140, + 0, + 370, + }, + dictWord{9, 0, 16}, + dictWord{13, 0, 386}, + dictWord{135, 11, 421}, + dictWord{7, 0, 956}, + dictWord{7, 0, 1157}, + dictWord{7, 0, 1506}, + dictWord{7, 0, 1606}, + dictWord{7, 0, 1615}, + dictWord{7, 0, 1619}, + dictWord{7, 0, 1736}, + dictWord{7, 0, 1775}, + dictWord{8, 0, 590}, + dictWord{9, 0, 324}, + dictWord{9, 0, 736}, + dictWord{ + 9, + 0, + 774, + }, + dictWord{9, 0, 776}, + dictWord{9, 0, 784}, + dictWord{10, 0, 567}, + dictWord{10, 0, 708}, + dictWord{11, 0, 518}, + dictWord{11, 0, 613}, + dictWord{11, 0, 695}, + dictWord{11, 0, 716}, + dictWord{11, 0, 739}, + dictWord{11, 0, 770}, + dictWord{11, 0, 771}, + dictWord{11, 0, 848}, + dictWord{11, 0, 857}, + dictWord{11, 0, 931}, + dictWord{ + 11, + 0, + 947, + }, + dictWord{12, 0, 326}, + dictWord{12, 0, 387}, + dictWord{12, 0, 484}, + dictWord{12, 0, 528}, + dictWord{12, 0, 552}, + dictWord{12, 0, 613}, + dictWord{ + 13, + 0, + 189, + }, + dictWord{13, 0, 256}, + dictWord{13, 0, 340}, + dictWord{13, 0, 432}, + dictWord{13, 0, 436}, + dictWord{13, 0, 440}, + dictWord{13, 0, 454}, + dictWord{14, 0, 174}, + dictWord{14, 0, 220}, + dictWord{14, 0, 284}, + dictWord{14, 0, 390}, + dictWord{145, 0, 121}, + dictWord{135, 10, 158}, + dictWord{9, 0, 137}, + dictWord{138, 0, 221}, + dictWord{4, 11, 110}, + dictWord{10, 11, 415}, + dictWord{10, 11, 597}, + dictWord{142, 11, 206}, + dictWord{141, 11, 496}, + dictWord{135, 11, 205}, + dictWord{ + 151, + 10, + 25, + }, + dictWord{135, 11, 778}, + dictWord{7, 11, 1656}, + dictWord{7, 10, 2001}, + dictWord{9, 11, 369}, + dictWord{10, 11, 338}, + dictWord{10, 11, 490}, + dictWord{11, 11, 154}, + dictWord{11, 11, 545}, + dictWord{11, 11, 775}, + dictWord{13, 11, 77}, + dictWord{141, 11, 274}, + dictWord{4, 11, 444}, + dictWord{ + 10, + 11, + 146, + }, + dictWord{140, 11, 9}, + dictWord{7, 0, 390}, + dictWord{138, 0, 140}, + dictWord{135, 0, 1144}, + dictWord{134, 0, 464}, + dictWord{7, 10, 1461}, + dictWord{ + 140, + 10, + 91, + }, + dictWord{132, 10, 602}, + dictWord{4, 11, 283}, + dictWord{135, 11, 1194}, + dictWord{5, 0, 407}, + dictWord{11, 0, 204}, + dictWord{11, 0, 243}, + dictWord{ + 11, + 0, + 489, + }, + dictWord{12, 0, 293}, + dictWord{19, 0, 37}, + dictWord{20, 0, 73}, + dictWord{150, 0, 38}, + dictWord{7, 0, 1218}, + dictWord{136, 0, 303}, + dictWord{ + 5, + 0, + 325, + }, + dictWord{8, 0, 5}, + dictWord{8, 0, 227}, + dictWord{9, 0, 105}, + dictWord{10, 0, 585}, + dictWord{12, 0, 614}, + dictWord{4, 10, 13}, + dictWord{5, 10, 567}, + dictWord{ + 7, + 10, + 1498, + }, + dictWord{9, 10, 124}, + dictWord{11, 10, 521}, + dictWord{140, 10, 405}, + dictWord{135, 10, 1006}, + dictWord{7, 0, 800}, + dictWord{10, 0, 12}, + dictWord{134, 11, 1720}, + dictWord{135, 0, 1783}, + dictWord{132, 10, 735}, + dictWord{138, 10, 812}, + dictWord{4, 10, 170}, + dictWord{135, 10, 323}, + dictWord{ + 6, + 0, + 621, + }, + dictWord{13, 0, 504}, + dictWord{144, 0, 89}, + dictWord{5, 10, 304}, + dictWord{135, 10, 1403}, + dictWord{137, 11, 216}, + dictWord{6, 0, 920}, + dictWord{ + 6, + 0, + 1104, + }, + dictWord{9, 11, 183}, + dictWord{139, 11, 286}, + dictWord{4, 0, 376}, + dictWord{133, 10, 742}, + dictWord{134, 0, 218}, + dictWord{8, 0, 641}, + dictWord{ + 11, + 0, + 388, + }, + dictWord{140, 0, 580}, + dictWord{7, 0, 454}, + dictWord{7, 0, 782}, + dictWord{8, 0, 768}, + dictWord{140, 0, 686}, + dictWord{137, 11, 33}, + dictWord{ + 133, + 10, + 111, + }, + dictWord{144, 0, 0}, + dictWord{10, 0, 676}, + dictWord{140, 0, 462}, + dictWord{6, 0, 164}, + dictWord{136, 11, 735}, + dictWord{133, 10, 444}, + dictWord{ + 150, + 0, + 50, + }, + dictWord{7, 11, 1862}, + dictWord{12, 11, 491}, + dictWord{12, 11, 520}, + dictWord{13, 11, 383}, + dictWord{14, 11, 244}, + dictWord{146, 11, 12}, + dictWord{ + 5, + 11, + 132, + }, + dictWord{9, 11, 486}, + dictWord{9, 11, 715}, + dictWord{10, 11, 458}, + dictWord{11, 11, 373}, + dictWord{11, 11, 668}, + dictWord{11, 11, 795}, + dictWord{11, 11, 897}, + dictWord{12, 11, 272}, + dictWord{12, 11, 424}, + dictWord{12, 11, 539}, + dictWord{12, 11, 558}, + dictWord{14, 11, 245}, + dictWord{ + 14, + 11, + 263, + }, + dictWord{14, 11, 264}, + dictWord{14, 11, 393}, + dictWord{142, 11, 403}, + dictWord{8, 10, 123}, + dictWord{15, 10, 6}, + dictWord{144, 10, 7}, + dictWord{ + 6, + 0, + 285, + }, + dictWord{8, 0, 654}, + dictWord{11, 0, 749}, + dictWord{12, 0, 190}, + dictWord{12, 0, 327}, + dictWord{13, 0, 120}, + dictWord{13, 0, 121}, + dictWord{13, 0, 327}, + dictWord{15, 0, 47}, + dictWord{146, 0, 40}, + dictWord{5, 11, 8}, + dictWord{6, 11, 89}, + dictWord{6, 11, 400}, + dictWord{7, 11, 1569}, + dictWord{7, 11, 1623}, + dictWord{ + 7, + 11, + 1850, + }, + dictWord{8, 11, 218}, + dictWord{8, 11, 422}, + dictWord{9, 11, 570}, + dictWord{138, 11, 626}, + dictWord{6, 11, 387}, + dictWord{7, 11, 882}, + dictWord{141, 11, 111}, + dictWord{6, 0, 343}, + dictWord{7, 0, 195}, + dictWord{9, 0, 226}, + dictWord{10, 0, 197}, + dictWord{10, 0, 575}, + dictWord{11, 0, 502}, + dictWord{ + 11, + 0, + 899, + }, + dictWord{6, 11, 224}, + dictWord{7, 11, 877}, + dictWord{137, 11, 647}, + dictWord{5, 10, 937}, + dictWord{135, 10, 100}, + dictWord{135, 11, 790}, + dictWord{150, 0, 29}, + dictWord{147, 0, 8}, + dictWord{134, 0, 1812}, + dictWord{149, 0, 8}, + dictWord{135, 11, 394}, + dictWord{7, 0, 1125}, + dictWord{9, 0, 143}, + dictWord{ + 11, + 0, + 61, + }, + dictWord{14, 0, 405}, + dictWord{150, 0, 21}, + dictWord{10, 11, 755}, + dictWord{147, 11, 29}, + dictWord{9, 11, 378}, + dictWord{141, 11, 162}, + dictWord{135, 10, 922}, + dictWord{5, 10, 619}, + dictWord{133, 10, 698}, + dictWord{134, 0, 1327}, + dictWord{6, 0, 1598}, + dictWord{137, 0, 575}, + dictWord{ + 9, + 11, + 569, + }, + dictWord{12, 11, 12}, + dictWord{12, 11, 81}, + dictWord{12, 11, 319}, + dictWord{13, 11, 69}, + dictWord{14, 11, 259}, + dictWord{16, 11, 87}, + dictWord{ + 17, + 11, + 1, + }, + dictWord{17, 11, 21}, + dictWord{17, 11, 24}, + dictWord{18, 11, 15}, + dictWord{18, 11, 56}, + dictWord{18, 11, 59}, + dictWord{18, 11, 127}, + dictWord{18, 11, 154}, + dictWord{19, 11, 19}, + dictWord{148, 11, 31}, + dictWord{6, 0, 895}, + dictWord{135, 11, 1231}, + dictWord{5, 0, 959}, + dictWord{7, 11, 124}, + dictWord{136, 11, 38}, + dictWord{5, 11, 261}, + dictWord{7, 11, 78}, + dictWord{7, 11, 199}, + dictWord{8, 11, 815}, + dictWord{9, 11, 126}, + dictWord{138, 11, 342}, + dictWord{5, 10, 917}, + dictWord{134, 10, 1659}, + dictWord{7, 0, 1759}, + dictWord{5, 11, 595}, + dictWord{135, 11, 1863}, + dictWord{136, 0, 173}, + dictWord{134, 0, 266}, + dictWord{ + 142, + 0, + 261, + }, + dictWord{132, 11, 628}, + dictWord{5, 10, 251}, + dictWord{5, 10, 956}, + dictWord{8, 10, 268}, + dictWord{9, 10, 214}, + dictWord{146, 10, 142}, + dictWord{ + 7, + 11, + 266, + }, + dictWord{136, 11, 804}, + dictWord{135, 11, 208}, + dictWord{6, 11, 79}, + dictWord{7, 11, 1021}, + dictWord{135, 11, 1519}, + dictWord{11, 11, 704}, + dictWord{141, 11, 396}, + dictWord{5, 10, 346}, + dictWord{5, 10, 711}, + dictWord{136, 10, 390}, + dictWord{136, 11, 741}, + dictWord{134, 11, 376}, + dictWord{ + 134, + 0, + 1427, + }, + dictWord{6, 0, 1033}, + dictWord{6, 0, 1217}, + dictWord{136, 0, 300}, + dictWord{133, 10, 624}, + dictWord{6, 11, 100}, + dictWord{7, 11, 244}, + dictWord{ + 7, + 11, + 632, + }, + dictWord{7, 11, 1609}, + dictWord{8, 11, 178}, + dictWord{8, 11, 638}, + dictWord{141, 11, 58}, + dictWord{6, 0, 584}, + dictWord{5, 10, 783}, + dictWord{ + 7, + 10, + 1998, + }, + dictWord{135, 10, 2047}, + dictWord{5, 0, 427}, + dictWord{5, 0, 734}, + dictWord{7, 0, 478}, + dictWord{136, 0, 52}, + dictWord{7, 0, 239}, + dictWord{ + 11, + 0, + 217, + }, + dictWord{142, 0, 165}, + dictWord{134, 0, 1129}, + dictWord{6, 0, 168}, + dictWord{6, 0, 1734}, + dictWord{7, 0, 20}, + dictWord{7, 0, 1056}, + dictWord{8, 0, 732}, + dictWord{9, 0, 406}, + dictWord{9, 0, 911}, + dictWord{138, 0, 694}, + dictWord{132, 10, 594}, + dictWord{133, 11, 791}, + dictWord{7, 11, 686}, + dictWord{8, 11, 33}, + dictWord{8, 11, 238}, + dictWord{10, 11, 616}, + dictWord{11, 11, 467}, + dictWord{11, 11, 881}, + dictWord{13, 11, 217}, + dictWord{13, 11, 253}, + dictWord{ + 142, + 11, + 268, + }, + dictWord{137, 11, 476}, + dictWord{134, 0, 418}, + dictWord{133, 0, 613}, + dictWord{132, 0, 632}, + dictWord{132, 11, 447}, + dictWord{7, 0, 32}, + dictWord{ + 7, + 0, + 984, + }, + dictWord{8, 0, 85}, + dictWord{8, 0, 709}, + dictWord{9, 0, 579}, + dictWord{9, 0, 847}, + dictWord{9, 0, 856}, + dictWord{10, 0, 799}, + dictWord{11, 0, 258}, + dictWord{ + 11, + 0, + 1007, + }, + dictWord{12, 0, 331}, + dictWord{12, 0, 615}, + dictWord{13, 0, 188}, + dictWord{13, 0, 435}, + dictWord{14, 0, 8}, + dictWord{15, 0, 165}, + dictWord{ + 16, + 0, + 27, + }, + dictWord{20, 0, 40}, + dictWord{144, 11, 35}, + dictWord{4, 11, 128}, + dictWord{5, 11, 415}, + dictWord{6, 11, 462}, + dictWord{7, 11, 294}, + dictWord{7, 11, 578}, + dictWord{10, 11, 710}, + dictWord{139, 11, 86}, + dictWord{5, 0, 694}, + dictWord{136, 0, 909}, + dictWord{7, 0, 1109}, + dictWord{11, 0, 7}, + dictWord{5, 10, 37}, + dictWord{ + 6, + 10, + 39, + }, + dictWord{6, 10, 451}, + dictWord{7, 10, 218}, + dictWord{7, 10, 1166}, + dictWord{7, 10, 1687}, + dictWord{8, 10, 662}, + dictWord{144, 10, 2}, + dictWord{ + 136, + 11, + 587, + }, + dictWord{6, 11, 427}, + dictWord{7, 11, 1018}, + dictWord{138, 11, 692}, + dictWord{4, 11, 195}, + dictWord{6, 10, 508}, + dictWord{135, 11, 802}, + dictWord{4, 0, 167}, + dictWord{135, 0, 82}, + dictWord{5, 0, 62}, + dictWord{6, 0, 24}, + dictWord{6, 0, 534}, + dictWord{7, 0, 74}, + dictWord{7, 0, 678}, + dictWord{7, 0, 684}, + dictWord{ + 7, + 0, + 1043, + }, + dictWord{7, 0, 1072}, + dictWord{8, 0, 280}, + dictWord{8, 0, 541}, + dictWord{8, 0, 686}, + dictWord{9, 0, 258}, + dictWord{10, 0, 519}, + dictWord{11, 0, 252}, + dictWord{140, 0, 282}, + dictWord{138, 0, 33}, + dictWord{4, 0, 359}, + dictWord{133, 11, 738}, + dictWord{7, 0, 980}, + dictWord{9, 0, 328}, + dictWord{13, 0, 186}, + dictWord{13, 0, 364}, + dictWord{7, 10, 635}, + dictWord{7, 10, 796}, + dictWord{8, 10, 331}, + dictWord{9, 10, 330}, + dictWord{9, 10, 865}, + dictWord{10, 10, 119}, + dictWord{ + 10, + 10, + 235, + }, + dictWord{11, 10, 111}, + dictWord{11, 10, 129}, + dictWord{11, 10, 240}, + dictWord{12, 10, 31}, + dictWord{12, 10, 66}, + dictWord{12, 10, 222}, + dictWord{12, 10, 269}, + dictWord{12, 10, 599}, + dictWord{12, 10, 684}, + dictWord{12, 10, 689}, + dictWord{12, 10, 691}, + dictWord{142, 10, 345}, + dictWord{ + 137, + 10, + 527, + }, + dictWord{6, 0, 596}, + dictWord{7, 0, 585}, + dictWord{135, 10, 702}, + dictWord{134, 11, 1683}, + dictWord{133, 0, 211}, + dictWord{6, 0, 145}, + dictWord{ + 141, + 0, + 336, + }, + dictWord{134, 0, 1130}, + dictWord{7, 0, 873}, + dictWord{6, 10, 37}, + dictWord{7, 10, 1666}, + dictWord{8, 10, 195}, + dictWord{8, 10, 316}, + dictWord{ + 9, + 10, + 178, + }, + dictWord{9, 10, 276}, + dictWord{9, 10, 339}, + dictWord{9, 10, 536}, + dictWord{10, 10, 102}, + dictWord{10, 10, 362}, + dictWord{10, 10, 785}, + dictWord{ + 11, + 10, + 55, + }, + dictWord{11, 10, 149}, + dictWord{11, 10, 773}, + dictWord{13, 10, 416}, + dictWord{13, 10, 419}, + dictWord{14, 10, 38}, + dictWord{14, 10, 41}, + dictWord{ + 142, + 10, + 210, + }, + dictWord{8, 0, 840}, + dictWord{136, 0, 841}, + dictWord{132, 0, 263}, + dictWord{5, 11, 3}, + dictWord{8, 11, 578}, + dictWord{9, 11, 118}, + dictWord{ + 10, + 11, + 705, + }, + dictWord{12, 11, 383}, + dictWord{141, 11, 279}, + dictWord{132, 0, 916}, + dictWord{133, 11, 229}, + dictWord{133, 10, 645}, + dictWord{15, 0, 155}, + dictWord{16, 0, 79}, + dictWord{8, 11, 102}, + dictWord{10, 11, 578}, + dictWord{10, 11, 672}, + dictWord{12, 11, 496}, + dictWord{13, 11, 408}, + dictWord{14, 11, 121}, + dictWord{145, 11, 106}, + dictWord{4, 0, 599}, + dictWord{5, 0, 592}, + dictWord{6, 0, 1634}, + dictWord{7, 0, 5}, + dictWord{7, 0, 55}, + dictWord{7, 0, 67}, + dictWord{7, 0, 97}, + dictWord{7, 0, 691}, + dictWord{7, 0, 979}, + dictWord{7, 0, 1600}, + dictWord{7, 0, 1697}, + dictWord{8, 0, 207}, + dictWord{8, 0, 214}, + dictWord{8, 0, 231}, + dictWord{8, 0, 294}, + dictWord{8, 0, 336}, + dictWord{8, 0, 428}, + dictWord{8, 0, 471}, + dictWord{8, 0, 622}, + dictWord{8, 0, 626}, + dictWord{8, 0, 679}, + dictWord{8, 0, 759}, + dictWord{8, 0, 829}, + dictWord{9, 0, 11}, + dictWord{9, 0, 246}, + dictWord{9, 0, 484}, + dictWord{9, 0, 573}, + dictWord{9, 0, 706}, + dictWord{9, 0, 762}, + dictWord{9, 0, 798}, + dictWord{9, 0, 855}, + dictWord{9, 0, 870}, + dictWord{9, 0, 912}, + dictWord{10, 0, 303}, + dictWord{10, 0, 335}, + dictWord{10, 0, 424}, + dictWord{10, 0, 461}, + dictWord{10, 0, 543}, + dictWord{ + 10, + 0, + 759, + }, + dictWord{10, 0, 814}, + dictWord{11, 0, 59}, + dictWord{11, 0, 199}, + dictWord{11, 0, 235}, + dictWord{11, 0, 590}, + dictWord{11, 0, 631}, + dictWord{11, 0, 929}, + dictWord{11, 0, 963}, + dictWord{11, 0, 987}, + dictWord{12, 0, 114}, + dictWord{12, 0, 182}, + dictWord{12, 0, 226}, + dictWord{12, 0, 332}, + dictWord{12, 0, 439}, + dictWord{12, 0, 575}, + dictWord{12, 0, 598}, + dictWord{12, 0, 675}, + dictWord{13, 0, 8}, + dictWord{13, 0, 125}, + dictWord{13, 0, 194}, + dictWord{13, 0, 287}, + dictWord{ + 14, + 0, + 197, + }, + dictWord{14, 0, 383}, + dictWord{15, 0, 53}, + dictWord{17, 0, 63}, + dictWord{19, 0, 46}, + dictWord{19, 0, 98}, + dictWord{19, 0, 106}, + dictWord{148, 0, 85}, + dictWord{ + 7, + 0, + 1356, + }, + dictWord{132, 10, 290}, + dictWord{6, 10, 70}, + dictWord{7, 10, 1292}, + dictWord{10, 10, 762}, + dictWord{139, 10, 288}, + dictWord{150, 11, 55}, + dictWord{4, 0, 593}, + dictWord{8, 11, 115}, + dictWord{8, 11, 350}, + dictWord{9, 11, 489}, + dictWord{10, 11, 128}, + dictWord{11, 11, 306}, + dictWord{12, 11, 373}, + dictWord{14, 11, 30}, + dictWord{17, 11, 79}, + dictWord{147, 11, 80}, + dictWord{135, 11, 1235}, + dictWord{134, 0, 1392}, + dictWord{4, 11, 230}, + dictWord{ + 133, + 11, + 702, + }, + dictWord{147, 0, 126}, + dictWord{7, 10, 131}, + dictWord{7, 10, 422}, + dictWord{8, 10, 210}, + dictWord{140, 10, 573}, + dictWord{134, 0, 1179}, + dictWord{ + 139, + 11, + 435, + }, + dictWord{139, 10, 797}, + dictWord{134, 11, 1728}, + dictWord{4, 0, 162}, + dictWord{18, 11, 26}, + dictWord{19, 11, 42}, + dictWord{20, 11, 43}, + dictWord{21, 11, 0}, + dictWord{23, 11, 27}, + dictWord{152, 11, 14}, + dictWord{132, 10, 936}, + dictWord{6, 0, 765}, + dictWord{5, 10, 453}, + dictWord{134, 10, 441}, + dictWord{133, 0, 187}, + dictWord{135, 0, 1286}, + dictWord{6, 0, 635}, + dictWord{6, 0, 904}, + dictWord{6, 0, 1210}, + dictWord{134, 0, 1489}, + dictWord{4, 0, 215}, + dictWord{ + 8, + 0, + 890, + }, + dictWord{9, 0, 38}, + dictWord{10, 0, 923}, + dictWord{11, 0, 23}, + dictWord{11, 0, 127}, + dictWord{139, 0, 796}, + dictWord{6, 0, 1165}, + dictWord{ + 134, + 0, + 1306, + }, + dictWord{7, 0, 716}, + dictWord{13, 0, 97}, + dictWord{141, 0, 251}, + dictWord{132, 10, 653}, + dictWord{136, 0, 657}, + dictWord{146, 10, 80}, + dictWord{ + 5, + 11, + 622, + }, + dictWord{7, 11, 1032}, + dictWord{11, 11, 26}, + dictWord{11, 11, 213}, + dictWord{11, 11, 707}, + dictWord{12, 11, 380}, + dictWord{13, 11, 226}, + dictWord{141, 11, 355}, + dictWord{6, 0, 299}, + dictWord{5, 11, 70}, + dictWord{6, 11, 334}, + dictWord{9, 11, 171}, + dictWord{11, 11, 637}, + dictWord{12, 11, 202}, + dictWord{14, 11, 222}, + dictWord{145, 11, 42}, + dictWord{142, 0, 134}, + dictWord{4, 11, 23}, + dictWord{5, 11, 313}, + dictWord{5, 11, 1014}, + dictWord{6, 11, 50}, + dictWord{ + 6, + 11, + 51, + }, + dictWord{7, 11, 142}, + dictWord{7, 11, 384}, + dictWord{9, 11, 783}, + dictWord{139, 11, 741}, + dictWord{4, 11, 141}, + dictWord{7, 11, 559}, + dictWord{ + 8, + 11, + 640, + }, + dictWord{9, 11, 460}, + dictWord{12, 11, 183}, + dictWord{141, 11, 488}, + dictWord{136, 11, 614}, + dictWord{7, 10, 1368}, + dictWord{8, 10, 232}, + dictWord{8, 10, 361}, + dictWord{10, 10, 682}, + dictWord{138, 10, 742}, + dictWord{137, 10, 534}, + dictWord{6, 0, 1082}, + dictWord{140, 0, 658}, + dictWord{ + 137, + 10, + 27, + }, + dictWord{135, 0, 2002}, + dictWord{142, 10, 12}, + dictWord{4, 0, 28}, + dictWord{5, 0, 440}, + dictWord{7, 0, 248}, + dictWord{11, 0, 833}, + dictWord{140, 0, 344}, + dictWord{7, 10, 736}, + dictWord{139, 10, 264}, + dictWord{134, 10, 1657}, + dictWord{134, 0, 1654}, + dictWord{138, 0, 531}, + dictWord{5, 11, 222}, + dictWord{ + 9, + 11, + 140, + }, + dictWord{138, 11, 534}, + dictWord{6, 0, 634}, + dictWord{6, 0, 798}, + dictWord{134, 0, 840}, + dictWord{138, 11, 503}, + dictWord{135, 10, 127}, + dictWord{133, 0, 853}, + dictWord{5, 11, 154}, + dictWord{7, 11, 1491}, + dictWord{10, 11, 379}, + dictWord{138, 11, 485}, + dictWord{6, 0, 249}, + dictWord{7, 0, 1234}, + dictWord{139, 0, 573}, + dictWord{133, 11, 716}, + dictWord{7, 11, 1570}, + dictWord{140, 11, 542}, + dictWord{136, 10, 364}, + dictWord{138, 0, 527}, + dictWord{ + 4, + 11, + 91, + }, + dictWord{5, 11, 388}, + dictWord{5, 11, 845}, + dictWord{6, 11, 206}, + dictWord{6, 11, 252}, + dictWord{6, 11, 365}, + dictWord{7, 11, 136}, + dictWord{7, 11, 531}, + dictWord{8, 11, 264}, + dictWord{136, 11, 621}, + dictWord{134, 0, 1419}, + dictWord{135, 11, 1441}, + dictWord{7, 0, 49}, + dictWord{7, 0, 392}, + dictWord{8, 0, 20}, + dictWord{8, 0, 172}, + dictWord{8, 0, 690}, + dictWord{9, 0, 383}, + dictWord{9, 0, 845}, + dictWord{10, 0, 48}, + dictWord{11, 0, 293}, + dictWord{11, 0, 832}, + dictWord{ + 11, + 0, + 920, + }, + dictWord{11, 0, 984}, + dictWord{141, 0, 221}, + dictWord{5, 0, 858}, + dictWord{133, 0, 992}, + dictWord{5, 0, 728}, + dictWord{137, 10, 792}, + dictWord{ + 5, + 10, + 909, + }, + dictWord{9, 10, 849}, + dictWord{138, 10, 805}, + dictWord{7, 0, 525}, + dictWord{7, 0, 1579}, + dictWord{8, 0, 497}, + dictWord{136, 0, 573}, + dictWord{6, 0, 268}, + dictWord{137, 0, 62}, + dictWord{135, 11, 576}, + dictWord{134, 0, 1201}, + dictWord{5, 11, 771}, + dictWord{5, 11, 863}, + dictWord{5, 11, 898}, + dictWord{ + 6, + 11, + 1632, + }, + dictWord{6, 11, 1644}, + dictWord{134, 11, 1780}, + dictWord{133, 11, 331}, + dictWord{7, 0, 193}, + dictWord{7, 0, 1105}, + dictWord{10, 0, 495}, + dictWord{ + 7, + 10, + 397, + }, + dictWord{8, 10, 124}, + dictWord{8, 10, 619}, + dictWord{9, 10, 305}, + dictWord{11, 10, 40}, + dictWord{12, 10, 349}, + dictWord{13, 10, 134}, + dictWord{ + 13, + 10, + 295, + }, + dictWord{14, 10, 155}, + dictWord{15, 10, 120}, + dictWord{146, 10, 105}, + dictWord{138, 0, 106}, + dictWord{6, 0, 859}, + dictWord{5, 11, 107}, + dictWord{ + 7, + 11, + 201, + }, + dictWord{136, 11, 518}, + dictWord{6, 11, 446}, + dictWord{135, 11, 1817}, + dictWord{13, 0, 23}, + dictWord{4, 10, 262}, + dictWord{135, 10, 342}, + dictWord{133, 10, 641}, + dictWord{137, 11, 851}, + dictWord{6, 0, 925}, + dictWord{137, 0, 813}, + dictWord{132, 11, 504}, + dictWord{6, 0, 613}, + dictWord{ + 136, + 0, + 223, + }, + dictWord{4, 10, 99}, + dictWord{6, 10, 250}, + dictWord{6, 10, 346}, + dictWord{8, 10, 127}, + dictWord{138, 10, 81}, + dictWord{136, 0, 953}, + dictWord{ + 132, + 10, + 915, + }, + dictWord{139, 11, 892}, + dictWord{5, 10, 75}, + dictWord{9, 10, 517}, + dictWord{10, 10, 470}, + dictWord{12, 10, 155}, + dictWord{141, 10, 224}, + dictWord{ + 4, + 0, + 666, + }, + dictWord{7, 0, 1017}, + dictWord{7, 11, 996}, + dictWord{138, 11, 390}, + dictWord{5, 11, 883}, + dictWord{133, 11, 975}, + dictWord{14, 10, 83}, + dictWord{ + 142, + 11, + 83, + }, + dictWord{4, 0, 670}, + dictWord{5, 11, 922}, + dictWord{134, 11, 1707}, + dictWord{135, 0, 216}, + dictWord{9, 0, 40}, + dictWord{11, 0, 136}, + dictWord{ + 135, + 11, + 787, + }, + dictWord{5, 10, 954}, + dictWord{5, 11, 993}, + dictWord{7, 11, 515}, + dictWord{137, 11, 91}, + dictWord{139, 0, 259}, + dictWord{7, 0, 1114}, + dictWord{ + 9, + 0, + 310, + }, + dictWord{9, 0, 682}, + dictWord{10, 0, 440}, + dictWord{13, 0, 40}, + dictWord{6, 10, 304}, + dictWord{8, 10, 418}, + dictWord{11, 10, 341}, + dictWord{ + 139, + 10, + 675, + }, + dictWord{14, 0, 296}, + dictWord{9, 10, 410}, + dictWord{139, 10, 425}, + dictWord{10, 11, 377}, + dictWord{12, 11, 363}, + dictWord{13, 11, 68}, + dictWord{ + 13, + 11, + 94, + }, + dictWord{14, 11, 108}, + dictWord{142, 11, 306}, + dictWord{7, 0, 1401}, + dictWord{135, 0, 1476}, + dictWord{4, 0, 296}, + dictWord{6, 0, 475}, + dictWord{ + 7, + 0, + 401, + }, + dictWord{7, 0, 1410}, + dictWord{7, 0, 1594}, + dictWord{7, 0, 1674}, + dictWord{8, 0, 63}, + dictWord{8, 0, 660}, + dictWord{137, 0, 74}, + dictWord{4, 0, 139}, + dictWord{4, 0, 388}, + dictWord{140, 0, 188}, + dictWord{132, 0, 797}, + dictWord{132, 11, 766}, + dictWord{5, 11, 103}, + dictWord{7, 11, 921}, + dictWord{8, 11, 580}, + dictWord{8, 11, 593}, + dictWord{8, 11, 630}, + dictWord{138, 11, 28}, + dictWord{4, 11, 911}, + dictWord{5, 11, 867}, + dictWord{133, 11, 1013}, + dictWord{134, 10, 14}, + dictWord{134, 0, 1572}, + dictWord{134, 10, 1708}, + dictWord{21, 0, 39}, + dictWord{5, 10, 113}, + dictWord{6, 10, 243}, + dictWord{7, 10, 1865}, + dictWord{ + 11, + 10, + 161, + }, + dictWord{16, 10, 37}, + dictWord{145, 10, 99}, + dictWord{7, 11, 1563}, + dictWord{141, 11, 182}, + dictWord{5, 11, 135}, + dictWord{6, 11, 519}, + dictWord{ + 7, + 11, + 1722, + }, + dictWord{10, 11, 271}, + dictWord{11, 11, 261}, + dictWord{145, 11, 54}, + dictWord{132, 10, 274}, + dictWord{134, 0, 1594}, + dictWord{4, 11, 300}, + dictWord{5, 11, 436}, + dictWord{135, 11, 484}, + dictWord{4, 0, 747}, + dictWord{6, 0, 290}, + dictWord{7, 0, 649}, + dictWord{7, 0, 1479}, + dictWord{135, 0, 1583}, + dictWord{133, 11, 535}, + dictWord{147, 11, 82}, + dictWord{133, 0, 232}, + dictWord{137, 0, 887}, + dictWord{135, 10, 166}, + dictWord{136, 0, 521}, + dictWord{4, 0, 14}, + dictWord{7, 0, 472}, + dictWord{7, 0, 1801}, + dictWord{10, 0, 748}, + dictWord{141, 0, 458}, + dictWord{134, 0, 741}, + dictWord{134, 0, 992}, + dictWord{16, 0, 111}, + dictWord{137, 10, 304}, + dictWord{4, 0, 425}, + dictWord{5, 11, 387}, + dictWord{7, 11, 557}, + dictWord{12, 11, 547}, + dictWord{142, 11, 86}, + dictWord{ + 135, + 11, + 1747, + }, + dictWord{5, 10, 654}, + dictWord{135, 11, 1489}, + dictWord{7, 0, 789}, + dictWord{4, 11, 6}, + dictWord{5, 11, 708}, + dictWord{136, 11, 75}, + dictWord{ + 6, + 10, + 273, + }, + dictWord{10, 10, 188}, + dictWord{13, 10, 377}, + dictWord{146, 10, 77}, + dictWord{6, 0, 1593}, + dictWord{4, 11, 303}, + dictWord{7, 11, 619}, + dictWord{ + 10, + 11, + 547, + }, + dictWord{10, 11, 687}, + dictWord{11, 11, 122}, + dictWord{140, 11, 601}, + dictWord{134, 0, 1768}, + dictWord{135, 10, 410}, + dictWord{138, 11, 772}, + dictWord{11, 0, 233}, + dictWord{139, 10, 524}, + dictWord{5, 0, 943}, + dictWord{134, 0, 1779}, + dictWord{134, 10, 1785}, + dictWord{136, 11, 529}, + dictWord{ + 132, + 0, + 955, + }, + dictWord{5, 0, 245}, + dictWord{6, 0, 576}, + dictWord{7, 0, 582}, + dictWord{136, 0, 225}, + dictWord{132, 10, 780}, + dictWord{142, 0, 241}, + dictWord{ + 134, + 0, + 1943, + }, + dictWord{4, 11, 106}, + dictWord{7, 11, 310}, + dictWord{7, 11, 1785}, + dictWord{10, 11, 690}, + dictWord{139, 11, 717}, + dictWord{134, 0, 1284}, + dictWord{5, 11, 890}, + dictWord{133, 11, 988}, + dictWord{6, 11, 626}, + dictWord{142, 11, 431}, + dictWord{10, 11, 706}, + dictWord{145, 11, 32}, + dictWord{ + 137, + 11, + 332, + }, + dictWord{132, 11, 698}, + dictWord{135, 0, 709}, + dictWord{5, 10, 948}, + dictWord{138, 11, 17}, + dictWord{136, 0, 554}, + dictWord{134, 0, 1564}, + dictWord{139, 10, 941}, + dictWord{132, 0, 443}, + dictWord{134, 0, 909}, + dictWord{134, 11, 84}, + dictWord{142, 0, 280}, + dictWord{4, 10, 532}, + dictWord{5, 10, 706}, + dictWord{135, 10, 662}, + dictWord{132, 0, 729}, + dictWord{5, 10, 837}, + dictWord{6, 10, 1651}, + dictWord{139, 10, 985}, + dictWord{135, 10, 1861}, + dictWord{ + 4, + 0, + 348, + }, + dictWord{152, 11, 3}, + dictWord{5, 11, 986}, + dictWord{6, 11, 130}, + dictWord{7, 11, 1582}, + dictWord{8, 11, 458}, + dictWord{10, 11, 101}, + dictWord{ + 10, + 11, + 318, + }, + dictWord{138, 11, 823}, + dictWord{134, 0, 758}, + dictWord{4, 0, 298}, + dictWord{137, 0, 848}, + dictWord{4, 10, 330}, + dictWord{7, 10, 933}, + dictWord{ + 7, + 10, + 2012, + }, + dictWord{136, 10, 292}, + dictWord{7, 11, 1644}, + dictWord{137, 11, 129}, + dictWord{6, 0, 1422}, + dictWord{9, 0, 829}, + dictWord{135, 10, 767}, + dictWord{5, 0, 164}, + dictWord{7, 0, 121}, + dictWord{142, 0, 189}, + dictWord{7, 0, 812}, + dictWord{7, 0, 1261}, + dictWord{7, 0, 1360}, + dictWord{9, 0, 632}, + dictWord{ + 140, + 0, + 352, + }, + dictWord{135, 11, 1788}, + dictWord{139, 0, 556}, + dictWord{135, 11, 997}, + dictWord{145, 10, 114}, + dictWord{4, 0, 172}, + dictWord{9, 0, 611}, + dictWord{10, 0, 436}, + dictWord{12, 0, 673}, + dictWord{13, 0, 255}, + dictWord{137, 10, 883}, + dictWord{11, 0, 530}, + dictWord{138, 10, 274}, + dictWord{133, 0, 844}, + dictWord{134, 0, 984}, + dictWord{13, 0, 232}, + dictWord{18, 0, 35}, + dictWord{4, 10, 703}, + dictWord{135, 10, 207}, + dictWord{132, 10, 571}, + dictWord{9, 0, 263}, + dictWord{10, 0, 147}, + dictWord{138, 0, 492}, + dictWord{7, 11, 1756}, + dictWord{137, 11, 98}, + dictWord{5, 10, 873}, + dictWord{5, 10, 960}, + dictWord{8, 10, 823}, + dictWord{137, 10, 881}, + dictWord{133, 0, 537}, + dictWord{132, 0, 859}, + dictWord{7, 11, 1046}, + dictWord{139, 11, 160}, + dictWord{137, 0, 842}, + dictWord{ + 139, + 10, + 283, + }, + dictWord{5, 10, 33}, + dictWord{6, 10, 470}, + dictWord{139, 10, 424}, + dictWord{6, 11, 45}, + dictWord{7, 11, 433}, + dictWord{8, 11, 129}, + dictWord{ + 9, + 11, + 21, + }, + dictWord{10, 11, 392}, + dictWord{11, 11, 79}, + dictWord{12, 11, 499}, + dictWord{13, 11, 199}, + dictWord{141, 11, 451}, + dictWord{135, 0, 1291}, + dictWord{135, 10, 1882}, + dictWord{7, 11, 558}, + dictWord{136, 11, 353}, + dictWord{134, 0, 1482}, + dictWord{5, 0, 230}, + dictWord{5, 0, 392}, + dictWord{6, 0, 420}, + dictWord{9, 0, 568}, + dictWord{140, 0, 612}, + dictWord{6, 0, 262}, + dictWord{7, 10, 90}, + dictWord{7, 10, 664}, + dictWord{7, 10, 830}, + dictWord{7, 10, 1380}, + dictWord{ + 7, + 10, + 2025, + }, + dictWord{8, 11, 81}, + dictWord{8, 10, 448}, + dictWord{8, 10, 828}, + dictWord{9, 11, 189}, + dictWord{9, 11, 201}, + dictWord{11, 11, 478}, + dictWord{ + 11, + 11, + 712, + }, + dictWord{141, 11, 338}, + dictWord{142, 0, 31}, + dictWord{5, 11, 353}, + dictWord{151, 11, 26}, + dictWord{132, 0, 753}, + dictWord{4, 0, 0}, + dictWord{ + 5, + 0, + 41, + }, + dictWord{7, 0, 1459}, + dictWord{7, 0, 1469}, + dictWord{7, 0, 1859}, + dictWord{9, 0, 549}, + dictWord{139, 0, 905}, + dictWord{9, 10, 417}, + dictWord{ + 137, + 10, + 493, + }, + dictWord{135, 11, 1113}, + dictWord{133, 0, 696}, + dictWord{141, 11, 448}, + dictWord{134, 10, 295}, + dictWord{132, 0, 834}, + dictWord{4, 0, 771}, + dictWord{5, 10, 1019}, + dictWord{6, 11, 25}, + dictWord{7, 11, 855}, + dictWord{7, 11, 1258}, + dictWord{144, 11, 32}, + dictWord{134, 0, 1076}, + dictWord{133, 0, 921}, + dictWord{133, 0, 674}, + dictWord{4, 11, 4}, + dictWord{7, 11, 1118}, + dictWord{7, 11, 1320}, + dictWord{7, 11, 1706}, + dictWord{8, 11, 277}, + dictWord{9, 11, 622}, + dictWord{10, 11, 9}, + dictWord{11, 11, 724}, + dictWord{12, 11, 350}, + dictWord{12, 11, 397}, + dictWord{13, 11, 28}, + dictWord{13, 11, 159}, + dictWord{15, 11, 89}, + dictWord{18, 11, 5}, + dictWord{19, 11, 9}, + dictWord{20, 11, 34}, + dictWord{150, 11, 47}, + dictWord{134, 10, 208}, + dictWord{6, 0, 444}, + dictWord{136, 0, 308}, + dictWord{ + 6, + 0, + 180, + }, + dictWord{7, 0, 1137}, + dictWord{8, 0, 751}, + dictWord{139, 0, 805}, + dictWord{4, 0, 183}, + dictWord{7, 0, 271}, + dictWord{11, 0, 824}, + dictWord{ + 11, + 0, + 952, + }, + dictWord{13, 0, 278}, + dictWord{13, 0, 339}, + dictWord{13, 0, 482}, + dictWord{14, 0, 424}, + dictWord{148, 0, 99}, + dictWord{7, 11, 317}, + dictWord{ + 135, + 11, + 569, + }, + dictWord{4, 0, 19}, + dictWord{5, 0, 477}, + dictWord{5, 0, 596}, + dictWord{6, 0, 505}, + dictWord{7, 0, 1221}, + dictWord{11, 0, 907}, + dictWord{12, 0, 209}, + dictWord{141, 0, 214}, + dictWord{135, 0, 1215}, + dictWord{6, 0, 271}, + dictWord{7, 0, 398}, + dictWord{8, 0, 387}, + dictWord{10, 0, 344}, + dictWord{7, 10, 448}, + dictWord{ + 7, + 10, + 1629, + }, + dictWord{7, 10, 1813}, + dictWord{8, 10, 442}, + dictWord{9, 10, 710}, + dictWord{10, 10, 282}, + dictWord{138, 10, 722}, + dictWord{11, 10, 844}, + dictWord{12, 10, 104}, + dictWord{140, 10, 625}, + dictWord{134, 11, 255}, + dictWord{133, 10, 787}, + dictWord{134, 0, 1645}, + dictWord{11, 11, 956}, + dictWord{ + 151, + 11, + 3, + }, + dictWord{6, 0, 92}, + dictWord{6, 0, 188}, + dictWord{7, 0, 209}, + dictWord{7, 0, 1269}, + dictWord{7, 0, 1524}, + dictWord{7, 0, 1876}, + dictWord{8, 0, 661}, + dictWord{10, 0, 42}, + dictWord{10, 0, 228}, + dictWord{11, 0, 58}, + dictWord{11, 0, 1020}, + dictWord{12, 0, 58}, + dictWord{12, 0, 118}, + dictWord{141, 0, 32}, + dictWord{ + 4, + 0, + 459, + }, + dictWord{133, 0, 966}, + dictWord{4, 11, 536}, + dictWord{7, 11, 1141}, + dictWord{10, 11, 723}, + dictWord{139, 11, 371}, + dictWord{140, 0, 330}, + dictWord{134, 0, 1557}, + dictWord{7, 11, 285}, + dictWord{135, 11, 876}, + dictWord{136, 10, 491}, + dictWord{135, 11, 560}, + dictWord{6, 0, 18}, + dictWord{7, 0, 179}, + dictWord{7, 0, 932}, + dictWord{8, 0, 548}, + dictWord{8, 0, 757}, + dictWord{9, 0, 54}, + dictWord{9, 0, 65}, + dictWord{9, 0, 532}, + dictWord{9, 0, 844}, + dictWord{10, 0, 113}, + dictWord{10, 0, 117}, + dictWord{10, 0, 315}, + dictWord{10, 0, 560}, + dictWord{10, 0, 622}, + dictWord{10, 0, 798}, + dictWord{11, 0, 153}, + dictWord{11, 0, 351}, + dictWord{ + 11, + 0, + 375, + }, + dictWord{12, 0, 78}, + dictWord{12, 0, 151}, + dictWord{12, 0, 392}, + dictWord{12, 0, 666}, + dictWord{14, 0, 248}, + dictWord{143, 0, 23}, + dictWord{ + 6, + 0, + 1742, + }, + dictWord{132, 11, 690}, + dictWord{4, 10, 403}, + dictWord{5, 10, 441}, + dictWord{7, 10, 450}, + dictWord{10, 10, 840}, + dictWord{11, 10, 101}, + dictWord{ + 12, + 10, + 193, + }, + dictWord{141, 10, 430}, + dictWord{133, 0, 965}, + dictWord{134, 0, 182}, + dictWord{10, 0, 65}, + dictWord{10, 0, 488}, + dictWord{138, 0, 497}, + dictWord{135, 11, 1346}, + dictWord{6, 0, 973}, + dictWord{6, 0, 1158}, + dictWord{10, 11, 200}, + dictWord{19, 11, 2}, + dictWord{151, 11, 22}, + dictWord{4, 11, 190}, + dictWord{133, 11, 554}, + dictWord{133, 10, 679}, + dictWord{7, 0, 328}, + dictWord{137, 10, 326}, + dictWord{133, 11, 1001}, + dictWord{9, 0, 588}, + dictWord{ + 138, + 0, + 260, + }, + dictWord{133, 11, 446}, + dictWord{135, 10, 1128}, + dictWord{135, 10, 1796}, + dictWord{147, 11, 119}, + dictWord{134, 0, 1786}, + dictWord{ + 6, + 0, + 1328, + }, + dictWord{6, 0, 1985}, + dictWord{8, 0, 962}, + dictWord{138, 0, 1017}, + dictWord{135, 0, 308}, + dictWord{11, 0, 508}, + dictWord{4, 10, 574}, + dictWord{ + 7, + 10, + 350, + }, + dictWord{7, 10, 1024}, + dictWord{8, 10, 338}, + dictWord{9, 10, 677}, + dictWord{138, 10, 808}, + dictWord{138, 11, 752}, + dictWord{135, 10, 1081}, + dictWord{137, 11, 96}, + dictWord{7, 10, 1676}, + dictWord{135, 10, 2037}, + dictWord{136, 0, 588}, + dictWord{132, 11, 304}, + dictWord{133, 0, 614}, + dictWord{ + 140, + 0, + 793, + }, + dictWord{136, 0, 287}, + dictWord{137, 10, 297}, + dictWord{141, 10, 37}, + dictWord{6, 11, 53}, + dictWord{6, 11, 199}, + dictWord{7, 11, 1408}, + dictWord{ + 8, + 11, + 32, + }, + dictWord{8, 11, 93}, + dictWord{9, 11, 437}, + dictWord{10, 11, 397}, + dictWord{10, 11, 629}, + dictWord{11, 11, 593}, + dictWord{11, 11, 763}, + dictWord{ + 13, + 11, + 326, + }, + dictWord{145, 11, 35}, + dictWord{134, 11, 105}, + dictWord{9, 11, 320}, + dictWord{10, 11, 506}, + dictWord{138, 11, 794}, + dictWord{5, 11, 114}, + dictWord{5, 11, 255}, + dictWord{141, 11, 285}, + dictWord{140, 0, 290}, + dictWord{7, 11, 2035}, + dictWord{8, 11, 19}, + dictWord{9, 11, 89}, + dictWord{138, 11, 831}, + dictWord{134, 0, 1136}, + dictWord{7, 0, 719}, + dictWord{8, 0, 796}, + dictWord{8, 0, 809}, + dictWord{8, 0, 834}, + dictWord{6, 10, 306}, + dictWord{7, 10, 1140}, + dictWord{ + 7, + 10, + 1340, + }, + dictWord{8, 10, 133}, + dictWord{138, 10, 449}, + dictWord{139, 10, 1011}, + dictWord{5, 0, 210}, + dictWord{6, 0, 213}, + dictWord{7, 0, 60}, + dictWord{ + 10, + 0, + 364, + }, + dictWord{139, 0, 135}, + dictWord{5, 0, 607}, + dictWord{8, 0, 326}, + dictWord{136, 0, 490}, + dictWord{138, 11, 176}, + dictWord{132, 0, 701}, + dictWord{ + 5, + 0, + 472, + }, + dictWord{7, 0, 380}, + dictWord{137, 0, 758}, + dictWord{135, 0, 1947}, + dictWord{6, 0, 1079}, + dictWord{138, 0, 278}, + dictWord{138, 11, 391}, + dictWord{ + 5, + 10, + 329, + }, + dictWord{8, 10, 260}, + dictWord{139, 11, 156}, + dictWord{4, 0, 386}, + dictWord{7, 0, 41}, + dictWord{8, 0, 405}, + dictWord{8, 0, 728}, + dictWord{9, 0, 497}, + dictWord{11, 0, 110}, + dictWord{11, 0, 360}, + dictWord{15, 0, 37}, + dictWord{144, 0, 84}, + dictWord{5, 0, 46}, + dictWord{7, 0, 1452}, + dictWord{7, 0, 1480}, + dictWord{ + 8, + 0, + 634, + }, + dictWord{140, 0, 472}, + dictWord{136, 0, 961}, + dictWord{4, 0, 524}, + dictWord{136, 0, 810}, + dictWord{10, 0, 238}, + dictWord{141, 0, 33}, + dictWord{ + 132, + 10, + 657, + }, + dictWord{152, 10, 7}, + dictWord{133, 0, 532}, + dictWord{5, 0, 997}, + dictWord{135, 10, 1665}, + dictWord{7, 11, 594}, + dictWord{7, 11, 851}, + dictWord{ + 7, + 11, + 1858, + }, + dictWord{9, 11, 411}, + dictWord{9, 11, 574}, + dictWord{9, 11, 666}, + dictWord{9, 11, 737}, + dictWord{10, 11, 346}, + dictWord{10, 11, 712}, + dictWord{ + 11, + 11, + 246, + }, + dictWord{11, 11, 432}, + dictWord{11, 11, 517}, + dictWord{11, 11, 647}, + dictWord{11, 11, 679}, + dictWord{11, 11, 727}, + dictWord{12, 11, 304}, + dictWord{12, 11, 305}, + dictWord{12, 11, 323}, + dictWord{12, 11, 483}, + dictWord{12, 11, 572}, + dictWord{12, 11, 593}, + dictWord{12, 11, 602}, + dictWord{ + 13, + 11, + 95, + }, + dictWord{13, 11, 101}, + dictWord{13, 11, 171}, + dictWord{13, 11, 315}, + dictWord{13, 11, 378}, + dictWord{13, 11, 425}, + dictWord{13, 11, 475}, + dictWord{ + 14, + 11, + 63, + }, + dictWord{14, 11, 380}, + dictWord{14, 11, 384}, + dictWord{15, 11, 133}, + dictWord{18, 11, 112}, + dictWord{148, 11, 72}, + dictWord{5, 11, 955}, + dictWord{136, 11, 814}, + dictWord{134, 0, 1301}, + dictWord{5, 10, 66}, + dictWord{7, 10, 1896}, + dictWord{136, 10, 288}, + dictWord{133, 11, 56}, + dictWord{ + 134, + 10, + 1643, + }, + dictWord{6, 0, 1298}, + dictWord{148, 11, 100}, + dictWord{5, 0, 782}, + dictWord{5, 0, 829}, + dictWord{6, 0, 671}, + dictWord{6, 0, 1156}, + dictWord{6, 0, 1738}, + dictWord{137, 11, 621}, + dictWord{4, 0, 306}, + dictWord{5, 0, 570}, + dictWord{7, 0, 1347}, + dictWord{5, 10, 91}, + dictWord{5, 10, 648}, + dictWord{5, 10, 750}, + dictWord{ + 5, + 10, + 781, + }, + dictWord{6, 10, 54}, + dictWord{6, 10, 112}, + dictWord{6, 10, 402}, + dictWord{6, 10, 1732}, + dictWord{7, 10, 315}, + dictWord{7, 10, 749}, + dictWord{ + 7, + 10, + 1900, + }, + dictWord{9, 10, 78}, + dictWord{9, 10, 508}, + dictWord{10, 10, 611}, + dictWord{10, 10, 811}, + dictWord{11, 10, 510}, + dictWord{11, 10, 728}, + dictWord{ + 13, + 10, + 36, + }, + dictWord{14, 10, 39}, + dictWord{16, 10, 83}, + dictWord{17, 10, 124}, + dictWord{148, 10, 30}, + dictWord{8, 10, 570}, + dictWord{9, 11, 477}, + dictWord{ + 141, + 11, + 78, + }, + dictWord{4, 11, 639}, + dictWord{10, 11, 4}, + dictWord{10, 10, 322}, + dictWord{10, 10, 719}, + dictWord{11, 10, 407}, + dictWord{11, 11, 638}, + dictWord{ + 12, + 11, + 177, + }, + dictWord{148, 11, 57}, + dictWord{7, 0, 1823}, + dictWord{139, 0, 693}, + dictWord{7, 0, 759}, + dictWord{5, 11, 758}, + dictWord{8, 10, 125}, + dictWord{ + 8, + 10, + 369, + }, + dictWord{8, 10, 524}, + dictWord{10, 10, 486}, + dictWord{11, 10, 13}, + dictWord{11, 10, 381}, + dictWord{11, 10, 736}, + dictWord{11, 10, 766}, + dictWord{ + 11, + 10, + 845, + }, + dictWord{13, 10, 114}, + dictWord{13, 10, 292}, + dictWord{142, 10, 47}, + dictWord{7, 0, 1932}, + dictWord{6, 10, 1684}, + dictWord{6, 10, 1731}, + dictWord{7, 10, 356}, + dictWord{8, 10, 54}, + dictWord{8, 10, 221}, + dictWord{9, 10, 225}, + dictWord{9, 10, 356}, + dictWord{10, 10, 77}, + dictWord{10, 10, 446}, + dictWord{ + 10, + 10, + 731, + }, + dictWord{12, 10, 404}, + dictWord{141, 10, 491}, + dictWord{135, 11, 552}, + dictWord{135, 11, 1112}, + dictWord{4, 0, 78}, + dictWord{5, 0, 96}, + dictWord{ + 5, + 0, + 182, + }, + dictWord{6, 0, 1257}, + dictWord{7, 0, 1724}, + dictWord{7, 0, 1825}, + dictWord{10, 0, 394}, + dictWord{10, 0, 471}, + dictWord{11, 0, 532}, + dictWord{ + 14, + 0, + 340, + }, + dictWord{145, 0, 88}, + dictWord{139, 11, 328}, + dictWord{135, 0, 1964}, + dictWord{132, 10, 411}, + dictWord{4, 10, 80}, + dictWord{5, 10, 44}, + dictWord{ + 137, + 11, + 133, + }, + dictWord{5, 11, 110}, + dictWord{6, 11, 169}, + dictWord{6, 11, 1702}, + dictWord{7, 11, 400}, + dictWord{8, 11, 538}, + dictWord{9, 11, 184}, + dictWord{ + 9, + 11, + 524, + }, + dictWord{140, 11, 218}, + dictWord{4, 0, 521}, + dictWord{5, 10, 299}, + dictWord{7, 10, 1083}, + dictWord{140, 11, 554}, + dictWord{6, 11, 133}, + dictWord{ + 9, + 11, + 353, + }, + dictWord{12, 11, 628}, + dictWord{146, 11, 79}, + dictWord{6, 0, 215}, + dictWord{7, 0, 584}, + dictWord{7, 0, 1028}, + dictWord{7, 0, 1473}, + dictWord{ + 7, + 0, + 1721, + }, + dictWord{9, 0, 424}, + dictWord{138, 0, 779}, + dictWord{7, 0, 857}, + dictWord{7, 0, 1209}, + dictWord{7, 10, 1713}, + dictWord{9, 10, 537}, + dictWord{ + 10, + 10, + 165, + }, + dictWord{12, 10, 219}, + dictWord{140, 10, 561}, + dictWord{4, 10, 219}, + dictWord{6, 11, 93}, + dictWord{7, 11, 1422}, + dictWord{7, 10, 1761}, + dictWord{ + 7, + 11, + 1851, + }, + dictWord{8, 11, 673}, + dictWord{9, 10, 86}, + dictWord{9, 11, 529}, + dictWord{140, 11, 43}, + dictWord{137, 11, 371}, + dictWord{136, 0, 671}, + dictWord{ + 5, + 0, + 328, + }, + dictWord{135, 0, 918}, + dictWord{132, 0, 529}, + dictWord{9, 11, 25}, + dictWord{10, 11, 467}, + dictWord{138, 11, 559}, + dictWord{4, 11, 335}, + dictWord{ + 135, + 11, + 942, + }, + dictWord{134, 0, 716}, + dictWord{134, 0, 1509}, + dictWord{6, 0, 67}, + dictWord{7, 0, 258}, + dictWord{7, 0, 1630}, + dictWord{9, 0, 354}, + dictWord{ + 9, + 0, + 675, + }, + dictWord{10, 0, 830}, + dictWord{14, 0, 80}, + dictWord{17, 0, 80}, + dictWord{140, 10, 428}, + dictWord{134, 0, 1112}, + dictWord{6, 0, 141}, + dictWord{7, 0, 225}, + dictWord{9, 0, 59}, + dictWord{9, 0, 607}, + dictWord{10, 0, 312}, + dictWord{11, 0, 687}, + dictWord{12, 0, 555}, + dictWord{13, 0, 373}, + dictWord{13, 0, 494}, + dictWord{ + 148, + 0, + 58, + }, + dictWord{133, 10, 514}, + dictWord{8, 11, 39}, + dictWord{10, 11, 773}, + dictWord{11, 11, 84}, + dictWord{12, 11, 205}, + dictWord{142, 11, 1}, + dictWord{ + 8, + 0, + 783, + }, + dictWord{5, 11, 601}, + dictWord{133, 11, 870}, + dictWord{136, 11, 594}, + dictWord{4, 10, 55}, + dictWord{5, 10, 301}, + dictWord{6, 10, 571}, + dictWord{ + 14, + 10, + 49, + }, + dictWord{146, 10, 102}, + dictWord{132, 11, 181}, + dictWord{134, 11, 1652}, + dictWord{133, 10, 364}, + dictWord{4, 11, 97}, + dictWord{5, 11, 147}, + dictWord{6, 11, 286}, + dictWord{7, 11, 1362}, + dictWord{141, 11, 176}, + dictWord{4, 10, 76}, + dictWord{7, 10, 1550}, + dictWord{9, 10, 306}, + dictWord{9, 10, 430}, + dictWord{9, 10, 663}, + dictWord{10, 10, 683}, + dictWord{11, 10, 427}, + dictWord{11, 10, 753}, + dictWord{12, 10, 334}, + dictWord{12, 10, 442}, + dictWord{ + 14, + 10, + 258, + }, + dictWord{14, 10, 366}, + dictWord{143, 10, 131}, + dictWord{137, 10, 52}, + dictWord{6, 0, 955}, + dictWord{134, 0, 1498}, + dictWord{6, 11, 375}, + dictWord{ + 7, + 11, + 169, + }, + dictWord{7, 11, 254}, + dictWord{136, 11, 780}, + dictWord{7, 0, 430}, + dictWord{11, 0, 46}, + dictWord{14, 0, 343}, + dictWord{142, 11, 343}, + dictWord{ + 135, + 0, + 1183, + }, + dictWord{5, 0, 602}, + dictWord{7, 0, 2018}, + dictWord{9, 0, 418}, + dictWord{9, 0, 803}, + dictWord{135, 11, 1447}, + dictWord{8, 0, 677}, + dictWord{ + 135, + 11, + 1044, + }, + dictWord{139, 11, 285}, + dictWord{4, 10, 656}, + dictWord{135, 10, 779}, + dictWord{135, 10, 144}, + dictWord{5, 11, 629}, + dictWord{ + 135, + 11, + 1549, + }, + dictWord{135, 10, 1373}, + dictWord{138, 11, 209}, + dictWord{7, 10, 554}, + dictWord{7, 10, 605}, + dictWord{141, 10, 10}, + dictWord{5, 10, 838}, + dictWord{ + 5, + 10, + 841, + }, + dictWord{134, 10, 1649}, + dictWord{133, 10, 1012}, + dictWord{6, 0, 1357}, + dictWord{134, 0, 1380}, + dictWord{144, 0, 53}, + dictWord{6, 0, 590}, + dictWord{7, 10, 365}, + dictWord{7, 10, 1357}, + dictWord{7, 10, 1497}, + dictWord{8, 10, 154}, + dictWord{141, 10, 281}, + dictWord{133, 10, 340}, + dictWord{ + 132, + 11, + 420, + }, + dictWord{135, 0, 329}, + dictWord{147, 11, 32}, + dictWord{4, 0, 469}, + dictWord{10, 11, 429}, + dictWord{139, 10, 495}, + dictWord{8, 10, 261}, + dictWord{ + 9, + 10, + 144, + }, + dictWord{9, 10, 466}, + dictWord{10, 10, 370}, + dictWord{12, 10, 470}, + dictWord{13, 10, 144}, + dictWord{142, 10, 348}, + dictWord{142, 0, 460}, + dictWord{4, 11, 325}, + dictWord{9, 10, 897}, + dictWord{138, 11, 125}, + dictWord{6, 0, 1743}, + dictWord{6, 10, 248}, + dictWord{9, 10, 546}, + dictWord{10, 10, 535}, + dictWord{11, 10, 681}, + dictWord{141, 10, 135}, + dictWord{4, 0, 990}, + dictWord{5, 0, 929}, + dictWord{6, 0, 340}, + dictWord{8, 0, 376}, + dictWord{8, 0, 807}, + dictWord{ + 8, + 0, + 963, + }, + dictWord{8, 0, 980}, + dictWord{138, 0, 1007}, + dictWord{134, 0, 1603}, + dictWord{140, 0, 250}, + dictWord{4, 11, 714}, + dictWord{133, 11, 469}, + dictWord{134, 10, 567}, + dictWord{136, 10, 445}, + dictWord{5, 0, 218}, + dictWord{7, 0, 1610}, + dictWord{8, 0, 646}, + dictWord{10, 0, 83}, + dictWord{11, 11, 138}, + dictWord{140, 11, 40}, + dictWord{7, 0, 1512}, + dictWord{135, 0, 1794}, + dictWord{135, 11, 1216}, + dictWord{11, 0, 0}, + dictWord{16, 0, 78}, + dictWord{132, 11, 718}, + dictWord{133, 0, 571}, + dictWord{132, 0, 455}, + dictWord{134, 0, 1012}, + dictWord{5, 11, 124}, + dictWord{5, 11, 144}, + dictWord{6, 11, 548}, + dictWord{7, 11, 15}, + dictWord{7, 11, 153}, + dictWord{137, 11, 629}, + dictWord{142, 11, 10}, + dictWord{6, 11, 75}, + dictWord{7, 11, 1531}, + dictWord{8, 11, 416}, + dictWord{9, 11, 240}, + dictWord{9, 11, 275}, + dictWord{10, 11, 100}, + dictWord{11, 11, 658}, + dictWord{11, 11, 979}, + dictWord{12, 11, 86}, + dictWord{13, 11, 468}, + dictWord{14, 11, 66}, + dictWord{14, 11, 207}, + dictWord{15, 11, 20}, + dictWord{15, 11, 25}, + dictWord{144, 11, 58}, + dictWord{132, 10, 577}, + dictWord{5, 11, 141}, + dictWord{ + 5, + 11, + 915, + }, + dictWord{6, 11, 1783}, + dictWord{7, 11, 211}, + dictWord{7, 11, 698}, + dictWord{7, 11, 1353}, + dictWord{9, 11, 83}, + dictWord{9, 11, 281}, + dictWord{ + 10, + 11, + 376, + }, + dictWord{10, 11, 431}, + dictWord{11, 11, 543}, + dictWord{12, 11, 664}, + dictWord{13, 11, 280}, + dictWord{13, 11, 428}, + dictWord{14, 11, 61}, + dictWord{ + 14, + 11, + 128, + }, + dictWord{17, 11, 52}, + dictWord{145, 11, 81}, + dictWord{6, 0, 161}, + dictWord{7, 0, 372}, + dictWord{137, 0, 597}, + dictWord{132, 0, 349}, + dictWord{ + 10, + 11, + 702, + }, + dictWord{139, 11, 245}, + dictWord{134, 0, 524}, + dictWord{134, 10, 174}, + dictWord{6, 0, 432}, + dictWord{9, 0, 751}, + dictWord{139, 0, 322}, + dictWord{147, 11, 94}, + dictWord{4, 11, 338}, + dictWord{133, 11, 400}, + dictWord{5, 0, 468}, + dictWord{10, 0, 325}, + dictWord{11, 0, 856}, + dictWord{12, 0, 345}, + dictWord{143, 0, 104}, + dictWord{133, 0, 223}, + dictWord{132, 0, 566}, + dictWord{4, 11, 221}, + dictWord{5, 11, 659}, + dictWord{5, 11, 989}, + dictWord{7, 11, 697}, + dictWord{7, 11, 1211}, + dictWord{138, 11, 284}, + dictWord{135, 11, 1070}, + dictWord{4, 0, 59}, + dictWord{135, 0, 1394}, + dictWord{6, 0, 436}, + dictWord{11, 0, 481}, + dictWord{5, 10, 878}, + dictWord{133, 10, 972}, + dictWord{4, 0, 48}, + dictWord{5, 0, 271}, + dictWord{135, 0, 953}, + dictWord{5, 0, 610}, + dictWord{136, 0, 457}, + dictWord{ + 4, + 0, + 773, + }, + dictWord{5, 0, 618}, + dictWord{137, 0, 756}, + dictWord{133, 0, 755}, + dictWord{135, 0, 1217}, + dictWord{138, 11, 507}, + dictWord{132, 10, 351}, + dictWord{132, 0, 197}, + dictWord{143, 11, 78}, + dictWord{4, 11, 188}, + dictWord{7, 11, 805}, + dictWord{11, 11, 276}, + dictWord{142, 11, 293}, + dictWord{ + 5, + 11, + 884, + }, + dictWord{139, 11, 991}, + dictWord{132, 10, 286}, + dictWord{10, 0, 259}, + dictWord{10, 0, 428}, + dictWord{7, 10, 438}, + dictWord{7, 10, 627}, + dictWord{ + 7, + 10, + 1516, + }, + dictWord{8, 10, 40}, + dictWord{9, 10, 56}, + dictWord{9, 10, 294}, + dictWord{11, 10, 969}, + dictWord{11, 10, 995}, + dictWord{146, 10, 148}, + dictWord{ + 4, + 0, + 356, + }, + dictWord{5, 0, 217}, + dictWord{5, 0, 492}, + dictWord{5, 0, 656}, + dictWord{8, 0, 544}, + dictWord{136, 11, 544}, + dictWord{5, 0, 259}, + dictWord{6, 0, 1230}, + dictWord{7, 0, 414}, + dictWord{7, 0, 854}, + dictWord{142, 0, 107}, + dictWord{132, 0, 1007}, + dictWord{15, 0, 14}, + dictWord{144, 0, 5}, + dictWord{6, 0, 1580}, + dictWord{ + 132, + 10, + 738, + }, + dictWord{132, 11, 596}, + dictWord{132, 0, 673}, + dictWord{133, 10, 866}, + dictWord{6, 0, 1843}, + dictWord{135, 11, 1847}, + dictWord{4, 0, 165}, + dictWord{7, 0, 1398}, + dictWord{135, 0, 1829}, + dictWord{135, 11, 1634}, + dictWord{147, 11, 65}, + dictWord{6, 0, 885}, + dictWord{6, 0, 1009}, + dictWord{ + 137, + 0, + 809, + }, + dictWord{133, 10, 116}, + dictWord{132, 10, 457}, + dictWord{136, 11, 770}, + dictWord{9, 0, 498}, + dictWord{12, 0, 181}, + dictWord{10, 11, 361}, + dictWord{142, 11, 316}, + dictWord{134, 11, 595}, + dictWord{5, 0, 9}, + dictWord{7, 0, 297}, + dictWord{7, 0, 966}, + dictWord{140, 0, 306}, + dictWord{4, 11, 89}, + dictWord{ + 5, + 11, + 489, + }, + dictWord{6, 11, 315}, + dictWord{7, 11, 553}, + dictWord{7, 11, 1745}, + dictWord{138, 11, 243}, + dictWord{134, 0, 1487}, + dictWord{132, 0, 437}, + dictWord{ + 5, + 0, + 146, + }, + dictWord{6, 0, 411}, + dictWord{138, 0, 721}, + dictWord{5, 10, 527}, + dictWord{6, 10, 189}, + dictWord{135, 10, 859}, + dictWord{11, 10, 104}, + dictWord{ + 11, + 10, + 554, + }, + dictWord{15, 10, 60}, + dictWord{143, 10, 125}, + dictWord{6, 11, 1658}, + dictWord{9, 11, 3}, + dictWord{10, 11, 154}, + dictWord{11, 11, 641}, + dictWord{13, 11, 85}, + dictWord{13, 11, 201}, + dictWord{141, 11, 346}, + dictWord{6, 0, 177}, + dictWord{135, 0, 467}, + dictWord{134, 0, 1377}, + dictWord{ + 134, + 10, + 116, + }, + dictWord{136, 11, 645}, + dictWord{4, 11, 166}, + dictWord{5, 11, 505}, + dictWord{6, 11, 1670}, + dictWord{137, 11, 110}, + dictWord{133, 10, 487}, + dictWord{ + 4, + 10, + 86, + }, + dictWord{5, 10, 667}, + dictWord{5, 10, 753}, + dictWord{6, 10, 316}, + dictWord{6, 10, 455}, + dictWord{135, 10, 946}, + dictWord{133, 0, 200}, + dictWord{132, 0, 959}, + dictWord{6, 0, 1928}, + dictWord{134, 0, 1957}, + dictWord{139, 11, 203}, + dictWord{150, 10, 45}, + dictWord{4, 10, 79}, + dictWord{7, 10, 1773}, + dictWord{10, 10, 450}, + dictWord{11, 10, 589}, + dictWord{13, 10, 332}, + dictWord{13, 10, 493}, + dictWord{14, 10, 183}, + dictWord{14, 10, 334}, + dictWord{ + 14, + 10, + 362, + }, + dictWord{14, 10, 368}, + dictWord{14, 10, 376}, + dictWord{14, 10, 379}, + dictWord{19, 10, 90}, + dictWord{19, 10, 103}, + dictWord{19, 10, 127}, + dictWord{148, 10, 90}, + dictWord{6, 0, 1435}, + dictWord{135, 11, 1275}, + dictWord{134, 0, 481}, + dictWord{7, 11, 445}, + dictWord{8, 11, 307}, + dictWord{8, 11, 704}, + dictWord{10, 11, 41}, + dictWord{10, 11, 439}, + dictWord{11, 11, 237}, + dictWord{11, 11, 622}, + dictWord{140, 11, 201}, + dictWord{135, 11, 869}, + dictWord{ + 4, + 0, + 84, + }, + dictWord{7, 0, 1482}, + dictWord{10, 0, 76}, + dictWord{138, 0, 142}, + dictWord{11, 11, 277}, + dictWord{144, 11, 14}, + dictWord{135, 11, 1977}, + dictWord{ + 4, + 11, + 189, + }, + dictWord{5, 11, 713}, + dictWord{136, 11, 57}, + dictWord{133, 0, 1015}, + dictWord{138, 11, 371}, + dictWord{4, 0, 315}, + dictWord{5, 0, 507}, + dictWord{ + 135, + 0, + 1370, + }, + dictWord{4, 11, 552}, + dictWord{142, 10, 381}, + dictWord{9, 0, 759}, + dictWord{16, 0, 31}, + dictWord{16, 0, 39}, + dictWord{16, 0, 75}, + dictWord{18, 0, 24}, + dictWord{20, 0, 42}, + dictWord{152, 0, 1}, + dictWord{134, 0, 712}, + dictWord{134, 0, 1722}, + dictWord{133, 10, 663}, + dictWord{133, 10, 846}, + dictWord{ + 8, + 0, + 222, + }, + dictWord{8, 0, 476}, + dictWord{9, 0, 238}, + dictWord{11, 0, 516}, + dictWord{11, 0, 575}, + dictWord{15, 0, 109}, + dictWord{146, 0, 100}, + dictWord{7, 0, 1402}, + dictWord{7, 0, 1414}, + dictWord{12, 0, 456}, + dictWord{5, 10, 378}, + dictWord{8, 10, 465}, + dictWord{9, 10, 286}, + dictWord{10, 10, 185}, + dictWord{10, 10, 562}, + dictWord{10, 10, 635}, + dictWord{11, 10, 31}, + dictWord{11, 10, 393}, + dictWord{13, 10, 312}, + dictWord{18, 10, 65}, + dictWord{18, 10, 96}, + dictWord{147, 10, 89}, + dictWord{4, 0, 986}, + dictWord{6, 0, 1958}, + dictWord{6, 0, 2032}, + dictWord{8, 0, 934}, + dictWord{138, 0, 985}, + dictWord{7, 10, 1880}, + dictWord{9, 10, 680}, + dictWord{139, 10, 798}, + dictWord{134, 10, 1770}, + dictWord{145, 11, 49}, + dictWord{132, 11, 614}, + dictWord{132, 10, 648}, + dictWord{5, 10, 945}, + dictWord{ + 6, + 10, + 1656, + }, + dictWord{6, 10, 1787}, + dictWord{7, 10, 167}, + dictWord{8, 10, 824}, + dictWord{9, 10, 391}, + dictWord{10, 10, 375}, + dictWord{139, 10, 185}, + dictWord{138, 11, 661}, + dictWord{7, 0, 1273}, + dictWord{135, 11, 1945}, + dictWord{7, 0, 706}, + dictWord{7, 0, 1058}, + dictWord{138, 0, 538}, + dictWord{7, 10, 1645}, + dictWord{8, 10, 352}, + dictWord{137, 10, 249}, + dictWord{132, 10, 152}, + dictWord{11, 0, 92}, + dictWord{11, 0, 196}, + dictWord{11, 0, 409}, + dictWord{11, 0, 450}, + dictWord{11, 0, 666}, + dictWord{11, 0, 777}, + dictWord{12, 0, 262}, + dictWord{13, 0, 385}, + dictWord{13, 0, 393}, + dictWord{15, 0, 115}, + dictWord{16, 0, 45}, + dictWord{145, 0, 82}, + dictWord{133, 10, 1006}, + dictWord{6, 0, 40}, + dictWord{135, 0, 1781}, + dictWord{9, 11, 614}, + dictWord{139, 11, 327}, + dictWord{5, 10, 420}, + dictWord{135, 10, 1449}, + dictWord{135, 0, 431}, + dictWord{10, 0, 97}, + dictWord{135, 10, 832}, + dictWord{6, 0, 423}, + dictWord{7, 0, 665}, + dictWord{ + 135, + 0, + 1210, + }, + dictWord{7, 0, 237}, + dictWord{8, 0, 664}, + dictWord{9, 0, 42}, + dictWord{9, 0, 266}, + dictWord{9, 0, 380}, + dictWord{9, 0, 645}, + dictWord{10, 0, 177}, + dictWord{ + 138, + 0, + 276, + }, + dictWord{7, 0, 264}, + dictWord{133, 10, 351}, + dictWord{8, 0, 213}, + dictWord{5, 10, 40}, + dictWord{7, 10, 598}, + dictWord{7, 10, 1638}, + dictWord{ + 9, + 10, + 166, + }, + dictWord{9, 10, 640}, + dictWord{9, 10, 685}, + dictWord{9, 10, 773}, + dictWord{11, 10, 215}, + dictWord{13, 10, 65}, + dictWord{14, 10, 172}, + dictWord{ + 14, + 10, + 317, + }, + dictWord{145, 10, 6}, + dictWord{5, 11, 84}, + dictWord{134, 11, 163}, + dictWord{8, 10, 60}, + dictWord{9, 10, 343}, + dictWord{139, 10, 769}, + dictWord{ + 137, + 0, + 455, + }, + dictWord{133, 11, 410}, + dictWord{8, 0, 906}, + dictWord{12, 0, 700}, + dictWord{12, 0, 706}, + dictWord{140, 0, 729}, + dictWord{21, 11, 33}, + dictWord{ + 150, + 11, + 40, + }, + dictWord{7, 10, 1951}, + dictWord{8, 10, 765}, + dictWord{8, 10, 772}, + dictWord{140, 10, 671}, + dictWord{7, 10, 108}, + dictWord{8, 10, 219}, + dictWord{ + 8, + 10, + 388, + }, + dictWord{9, 10, 639}, + dictWord{9, 10, 775}, + dictWord{11, 10, 275}, + dictWord{140, 10, 464}, + dictWord{5, 11, 322}, + dictWord{7, 11, 1941}, + dictWord{ + 8, + 11, + 186, + }, + dictWord{9, 11, 262}, + dictWord{10, 11, 187}, + dictWord{14, 11, 208}, + dictWord{146, 11, 130}, + dictWord{139, 0, 624}, + dictWord{8, 0, 574}, + dictWord{ + 5, + 11, + 227, + }, + dictWord{140, 11, 29}, + dictWord{7, 11, 1546}, + dictWord{11, 11, 299}, + dictWord{142, 11, 407}, + dictWord{5, 10, 15}, + dictWord{6, 10, 56}, + dictWord{ + 7, + 10, + 1758, + }, + dictWord{8, 10, 500}, + dictWord{9, 10, 730}, + dictWord{11, 10, 331}, + dictWord{13, 10, 150}, + dictWord{142, 10, 282}, + dictWord{7, 11, 1395}, + dictWord{8, 11, 486}, + dictWord{9, 11, 236}, + dictWord{9, 11, 878}, + dictWord{10, 11, 218}, + dictWord{11, 11, 95}, + dictWord{19, 11, 17}, + dictWord{147, 11, 31}, + dictWord{135, 11, 2043}, + dictWord{4, 0, 354}, + dictWord{146, 11, 4}, + dictWord{140, 11, 80}, + dictWord{135, 0, 1558}, + dictWord{134, 10, 1886}, + dictWord{ + 5, + 10, + 205, + }, + dictWord{6, 10, 438}, + dictWord{137, 10, 711}, + dictWord{133, 11, 522}, + dictWord{133, 10, 534}, + dictWord{7, 0, 235}, + dictWord{7, 0, 1475}, + dictWord{ + 15, + 0, + 68, + }, + dictWord{146, 0, 120}, + dictWord{137, 10, 691}, + dictWord{4, 0, 942}, + dictWord{6, 0, 1813}, + dictWord{8, 0, 917}, + dictWord{10, 0, 884}, + dictWord{ + 12, + 0, + 696, + }, + dictWord{12, 0, 717}, + dictWord{12, 0, 723}, + dictWord{12, 0, 738}, + dictWord{12, 0, 749}, + dictWord{12, 0, 780}, + dictWord{16, 0, 97}, + dictWord{146, 0, 169}, + dictWord{6, 10, 443}, + dictWord{8, 11, 562}, + dictWord{9, 10, 237}, + dictWord{9, 10, 571}, + dictWord{9, 10, 695}, + dictWord{10, 10, 139}, + dictWord{11, 10, 715}, + dictWord{12, 10, 417}, + dictWord{141, 10, 421}, + dictWord{135, 0, 957}, + dictWord{133, 0, 830}, + dictWord{134, 11, 1771}, + dictWord{146, 0, 23}, + dictWord{ + 5, + 0, + 496, + }, + dictWord{6, 0, 694}, + dictWord{7, 0, 203}, + dictWord{7, 11, 1190}, + dictWord{137, 11, 620}, + dictWord{137, 11, 132}, + dictWord{6, 0, 547}, + dictWord{ + 134, + 0, + 1549, + }, + dictWord{8, 11, 258}, + dictWord{9, 11, 208}, + dictWord{137, 11, 359}, + dictWord{4, 0, 864}, + dictWord{5, 0, 88}, + dictWord{137, 0, 239}, + dictWord{ + 135, + 11, + 493, + }, + dictWord{4, 11, 317}, + dictWord{135, 11, 1279}, + dictWord{132, 11, 477}, + dictWord{4, 10, 578}, + dictWord{5, 11, 63}, + dictWord{133, 11, 509}, + dictWord{ + 7, + 0, + 650, + }, + dictWord{135, 0, 1310}, + dictWord{7, 0, 1076}, + dictWord{9, 0, 80}, + dictWord{11, 0, 78}, + dictWord{11, 0, 421}, + dictWord{11, 0, 534}, + dictWord{ + 140, + 0, + 545, + }, + dictWord{132, 11, 288}, + dictWord{12, 0, 553}, + dictWord{14, 0, 118}, + dictWord{133, 10, 923}, + dictWord{7, 0, 274}, + dictWord{11, 0, 479}, + dictWord{ + 139, + 0, + 507, + }, + dictWord{8, 11, 89}, + dictWord{8, 11, 620}, + dictWord{9, 11, 49}, + dictWord{10, 11, 774}, + dictWord{11, 11, 628}, + dictWord{12, 11, 322}, + dictWord{ + 143, + 11, + 124, + }, + dictWord{4, 0, 497}, + dictWord{135, 0, 1584}, + dictWord{7, 0, 261}, + dictWord{7, 0, 1115}, + dictWord{7, 0, 1354}, + dictWord{7, 0, 1404}, + dictWord{ + 7, + 0, + 1588, + }, + dictWord{7, 0, 1705}, + dictWord{7, 0, 1902}, + dictWord{9, 0, 465}, + dictWord{10, 0, 248}, + dictWord{10, 0, 349}, + dictWord{10, 0, 647}, + dictWord{11, 0, 527}, + dictWord{11, 0, 660}, + dictWord{11, 0, 669}, + dictWord{12, 0, 529}, + dictWord{13, 0, 305}, + dictWord{132, 10, 924}, + dictWord{133, 10, 665}, + dictWord{ + 136, + 0, + 13, + }, + dictWord{6, 0, 791}, + dictWord{138, 11, 120}, + dictWord{7, 0, 642}, + dictWord{8, 0, 250}, + dictWord{11, 0, 123}, + dictWord{11, 0, 137}, + dictWord{13, 0, 48}, + dictWord{142, 0, 95}, + dictWord{4, 10, 265}, + dictWord{7, 10, 807}, + dictWord{135, 10, 950}, + dictWord{5, 10, 93}, + dictWord{140, 10, 267}, + dictWord{135, 0, 1429}, + dictWord{4, 0, 949}, + dictWord{10, 0, 885}, + dictWord{10, 0, 891}, + dictWord{10, 0, 900}, + dictWord{10, 0, 939}, + dictWord{12, 0, 760}, + dictWord{142, 0, 449}, + dictWord{139, 11, 366}, + dictWord{132, 0, 818}, + dictWord{134, 11, 85}, + dictWord{135, 10, 994}, + dictWord{7, 0, 330}, + dictWord{5, 10, 233}, + dictWord{5, 10, 320}, + dictWord{6, 10, 140}, + dictWord{136, 10, 295}, + dictWord{4, 0, 1004}, + dictWord{8, 0, 982}, + dictWord{136, 0, 993}, + dictWord{133, 10, 978}, + dictWord{4, 10, 905}, + dictWord{6, 10, 1701}, + dictWord{137, 10, 843}, + dictWord{10, 0, 545}, + dictWord{140, 0, 301}, + dictWord{6, 0, 947}, + dictWord{134, 0, 1062}, + dictWord{ + 134, + 0, + 1188, + }, + dictWord{4, 0, 904}, + dictWord{5, 0, 794}, + dictWord{152, 10, 6}, + dictWord{134, 0, 1372}, + dictWord{135, 11, 608}, + dictWord{5, 11, 279}, + dictWord{ + 6, + 11, + 235, + }, + dictWord{7, 11, 468}, + dictWord{8, 11, 446}, + dictWord{9, 11, 637}, + dictWord{10, 11, 717}, + dictWord{11, 11, 738}, + dictWord{140, 11, 514}, + dictWord{ + 132, + 10, + 509, + }, + dictWord{5, 11, 17}, + dictWord{6, 11, 371}, + dictWord{137, 11, 528}, + dictWord{132, 0, 693}, + dictWord{4, 11, 115}, + dictWord{5, 11, 669}, + dictWord{ + 6, + 11, + 407, + }, + dictWord{8, 11, 311}, + dictWord{11, 11, 10}, + dictWord{141, 11, 5}, + dictWord{11, 0, 377}, + dictWord{7, 10, 273}, + dictWord{137, 11, 381}, + dictWord{ + 135, + 0, + 695, + }, + dictWord{7, 0, 386}, + dictWord{138, 0, 713}, + dictWord{135, 10, 1041}, + dictWord{134, 0, 1291}, + dictWord{6, 0, 7}, + dictWord{6, 0, 35}, + dictWord{ + 7, + 0, + 147, + }, + dictWord{7, 0, 1069}, + dictWord{7, 0, 1568}, + dictWord{7, 0, 1575}, + dictWord{7, 0, 1917}, + dictWord{8, 0, 43}, + dictWord{8, 0, 208}, + dictWord{9, 0, 128}, + dictWord{ + 9, + 0, + 866, + }, + dictWord{10, 0, 20}, + dictWord{11, 0, 981}, + dictWord{147, 0, 33}, + dictWord{7, 0, 893}, + dictWord{141, 0, 424}, + dictWord{139, 10, 234}, + dictWord{ + 150, + 11, + 56, + }, + dictWord{5, 11, 779}, + dictWord{5, 11, 807}, + dictWord{6, 11, 1655}, + dictWord{134, 11, 1676}, + dictWord{5, 10, 802}, + dictWord{7, 10, 2021}, + dictWord{136, 10, 805}, + dictWord{4, 11, 196}, + dictWord{5, 10, 167}, + dictWord{5, 11, 558}, + dictWord{5, 10, 899}, + dictWord{5, 11, 949}, + dictWord{6, 10, 410}, + dictWord{137, 10, 777}, + dictWord{137, 10, 789}, + dictWord{134, 10, 1705}, + dictWord{8, 0, 904}, + dictWord{140, 0, 787}, + dictWord{6, 0, 322}, + dictWord{9, 0, 552}, + dictWord{11, 0, 274}, + dictWord{13, 0, 209}, + dictWord{13, 0, 499}, + dictWord{14, 0, 85}, + dictWord{15, 0, 126}, + dictWord{145, 0, 70}, + dictWord{135, 10, 10}, + dictWord{ + 5, + 10, + 11, + }, + dictWord{6, 10, 117}, + dictWord{6, 10, 485}, + dictWord{7, 10, 1133}, + dictWord{9, 10, 582}, + dictWord{9, 10, 594}, + dictWord{11, 10, 21}, + dictWord{ + 11, + 10, + 818, + }, + dictWord{12, 10, 535}, + dictWord{141, 10, 86}, + dictWord{4, 10, 264}, + dictWord{7, 10, 1067}, + dictWord{8, 10, 204}, + dictWord{8, 10, 385}, + dictWord{139, 10, 953}, + dictWord{132, 11, 752}, + dictWord{138, 10, 56}, + dictWord{133, 10, 470}, + dictWord{6, 0, 1808}, + dictWord{8, 0, 83}, + dictWord{8, 0, 742}, + dictWord{8, 0, 817}, + dictWord{9, 0, 28}, + dictWord{9, 0, 29}, + dictWord{9, 0, 885}, + dictWord{10, 0, 387}, + dictWord{11, 0, 633}, + dictWord{11, 0, 740}, + dictWord{13, 0, 235}, + dictWord{13, 0, 254}, + dictWord{15, 0, 143}, + dictWord{143, 0, 146}, + dictWord{140, 0, 49}, + dictWord{134, 0, 1832}, + dictWord{4, 11, 227}, + dictWord{5, 11, 159}, + dictWord{5, 11, 409}, + dictWord{7, 11, 80}, + dictWord{10, 11, 294}, + dictWord{10, 11, 479}, + dictWord{12, 11, 418}, + dictWord{14, 11, 50}, + dictWord{14, 11, 249}, + dictWord{142, 11, 295}, + dictWord{7, 11, 1470}, + dictWord{8, 11, 66}, + dictWord{8, 11, 137}, + dictWord{8, 11, 761}, + dictWord{9, 11, 638}, + dictWord{11, 11, 80}, + dictWord{11, 11, 212}, + dictWord{11, 11, 368}, + dictWord{11, 11, 418}, + dictWord{12, 11, 8}, + dictWord{13, 11, 15}, + dictWord{16, 11, 61}, + dictWord{17, 11, 59}, + dictWord{19, 11, 28}, + dictWord{148, 11, 84}, + dictWord{139, 10, 1015}, + dictWord{138, 11, 468}, + dictWord{135, 0, 421}, + dictWord{6, 0, 415}, + dictWord{ + 7, + 0, + 1049, + }, + dictWord{137, 0, 442}, + dictWord{6, 11, 38}, + dictWord{7, 11, 1220}, + dictWord{8, 11, 185}, + dictWord{8, 11, 256}, + dictWord{9, 11, 22}, + dictWord{ + 9, + 11, + 331, + }, + dictWord{10, 11, 738}, + dictWord{11, 11, 205}, + dictWord{11, 11, 540}, + dictWord{11, 11, 746}, + dictWord{13, 11, 399}, + dictWord{13, 11, 465}, + dictWord{ + 14, + 11, + 88, + }, + dictWord{142, 11, 194}, + dictWord{139, 0, 289}, + dictWord{133, 10, 715}, + dictWord{4, 0, 110}, + dictWord{10, 0, 415}, + dictWord{10, 0, 597}, + dictWord{142, 0, 206}, + dictWord{4, 11, 159}, + dictWord{6, 11, 115}, + dictWord{7, 11, 252}, + dictWord{7, 11, 257}, + dictWord{7, 11, 1928}, + dictWord{8, 11, 69}, + dictWord{ + 9, + 11, + 384, + }, + dictWord{10, 11, 91}, + dictWord{10, 11, 615}, + dictWord{12, 11, 375}, + dictWord{14, 11, 235}, + dictWord{18, 11, 117}, + dictWord{147, 11, 123}, + dictWord{5, 11, 911}, + dictWord{136, 11, 278}, + dictWord{7, 0, 205}, + dictWord{7, 0, 2000}, + dictWord{8, 10, 794}, + dictWord{9, 10, 400}, + dictWord{10, 10, 298}, + dictWord{142, 10, 228}, + dictWord{135, 11, 1774}, + dictWord{4, 11, 151}, + dictWord{7, 11, 1567}, + dictWord{8, 11, 351}, + dictWord{137, 11, 322}, + dictWord{ + 136, + 10, + 724, + }, + dictWord{133, 11, 990}, + dictWord{7, 0, 1539}, + dictWord{11, 0, 512}, + dictWord{13, 0, 205}, + dictWord{19, 0, 30}, + dictWord{22, 0, 36}, + dictWord{23, 0, 19}, + dictWord{135, 11, 1539}, + dictWord{5, 11, 194}, + dictWord{7, 11, 1662}, + dictWord{9, 11, 90}, + dictWord{140, 11, 180}, + dictWord{6, 10, 190}, + dictWord{ + 7, + 10, + 768, + }, + dictWord{135, 10, 1170}, + dictWord{134, 0, 1340}, + dictWord{4, 0, 283}, + dictWord{135, 0, 1194}, + dictWord{133, 11, 425}, + dictWord{133, 11, 971}, + dictWord{12, 0, 549}, + dictWord{14, 10, 67}, + dictWord{147, 10, 60}, + dictWord{135, 10, 1023}, + dictWord{134, 0, 1720}, + dictWord{138, 11, 587}, + dictWord{ + 5, + 11, + 72, + }, + dictWord{6, 11, 264}, + dictWord{7, 11, 21}, + dictWord{7, 11, 46}, + dictWord{7, 11, 2013}, + dictWord{8, 11, 215}, + dictWord{8, 11, 513}, + dictWord{10, 11, 266}, + dictWord{139, 11, 22}, + dictWord{5, 0, 319}, + dictWord{135, 0, 534}, + dictWord{6, 10, 137}, + dictWord{9, 10, 75}, + dictWord{9, 10, 253}, + dictWord{10, 10, 194}, + dictWord{138, 10, 444}, + dictWord{7, 0, 1180}, + dictWord{20, 0, 112}, + dictWord{6, 11, 239}, + dictWord{7, 11, 118}, + dictWord{10, 11, 95}, + dictWord{11, 11, 603}, + dictWord{13, 11, 443}, + dictWord{14, 11, 160}, + dictWord{143, 11, 4}, + dictWord{134, 11, 431}, + dictWord{5, 11, 874}, + dictWord{6, 11, 1677}, + dictWord{ + 11, + 10, + 643, + }, + dictWord{12, 10, 115}, + dictWord{143, 11, 0}, + dictWord{134, 0, 967}, + dictWord{6, 11, 65}, + dictWord{7, 11, 939}, + dictWord{7, 11, 1172}, + dictWord{ + 7, + 11, + 1671, + }, + dictWord{9, 11, 540}, + dictWord{10, 11, 696}, + dictWord{11, 11, 265}, + dictWord{11, 11, 732}, + dictWord{11, 11, 928}, + dictWord{11, 11, 937}, + dictWord{ + 12, + 11, + 399, + }, + dictWord{13, 11, 438}, + dictWord{149, 11, 19}, + dictWord{137, 11, 200}, + dictWord{135, 0, 1940}, + dictWord{5, 10, 760}, + dictWord{7, 10, 542}, + dictWord{8, 10, 135}, + dictWord{136, 10, 496}, + dictWord{140, 11, 44}, + dictWord{7, 11, 1655}, + dictWord{136, 11, 305}, + dictWord{7, 10, 319}, + dictWord{ + 7, + 10, + 355, + }, + dictWord{7, 10, 763}, + dictWord{10, 10, 389}, + dictWord{145, 10, 43}, + dictWord{136, 0, 735}, + dictWord{138, 10, 786}, + dictWord{137, 11, 19}, + dictWord{132, 11, 696}, + dictWord{5, 0, 132}, + dictWord{9, 0, 486}, + dictWord{9, 0, 715}, + dictWord{10, 0, 458}, + dictWord{11, 0, 373}, + dictWord{11, 0, 668}, + dictWord{ + 11, + 0, + 795, + }, + dictWord{11, 0, 897}, + dictWord{12, 0, 272}, + dictWord{12, 0, 424}, + dictWord{12, 0, 539}, + dictWord{12, 0, 558}, + dictWord{14, 0, 245}, + dictWord{ + 14, + 0, + 263, + }, + dictWord{14, 0, 264}, + dictWord{14, 0, 393}, + dictWord{142, 0, 403}, + dictWord{10, 0, 38}, + dictWord{139, 0, 784}, + dictWord{132, 0, 838}, + dictWord{ + 4, + 11, + 302, + }, + dictWord{135, 11, 1766}, + dictWord{133, 0, 379}, + dictWord{5, 0, 8}, + dictWord{6, 0, 89}, + dictWord{6, 0, 400}, + dictWord{7, 0, 1569}, + dictWord{7, 0, 1623}, + dictWord{7, 0, 1850}, + dictWord{8, 0, 218}, + dictWord{8, 0, 422}, + dictWord{9, 0, 570}, + dictWord{10, 0, 626}, + dictWord{4, 11, 726}, + dictWord{133, 11, 630}, + dictWord{ + 4, + 0, + 1017, + }, + dictWord{138, 0, 660}, + dictWord{6, 0, 387}, + dictWord{7, 0, 882}, + dictWord{141, 0, 111}, + dictWord{6, 0, 224}, + dictWord{7, 0, 877}, + dictWord{ + 137, + 0, + 647, + }, + dictWord{4, 10, 58}, + dictWord{5, 10, 286}, + dictWord{6, 10, 319}, + dictWord{7, 10, 402}, + dictWord{7, 10, 1254}, + dictWord{7, 10, 1903}, + dictWord{ + 8, + 10, + 356, + }, + dictWord{140, 10, 408}, + dictWord{135, 0, 790}, + dictWord{9, 0, 510}, + dictWord{10, 0, 53}, + dictWord{4, 10, 389}, + dictWord{9, 10, 181}, + dictWord{ + 10, + 10, + 29, + }, + dictWord{10, 10, 816}, + dictWord{11, 10, 311}, + dictWord{11, 10, 561}, + dictWord{12, 10, 67}, + dictWord{141, 10, 181}, + dictWord{142, 0, 458}, + dictWord{ + 6, + 11, + 118, + }, + dictWord{7, 11, 215}, + dictWord{7, 11, 1521}, + dictWord{140, 11, 11}, + dictWord{134, 0, 954}, + dictWord{135, 0, 394}, + dictWord{134, 0, 1367}, + dictWord{5, 11, 225}, + dictWord{133, 10, 373}, + dictWord{132, 0, 882}, + dictWord{7, 0, 1409}, + dictWord{135, 10, 1972}, + dictWord{135, 10, 1793}, + dictWord{ + 4, + 11, + 370, + }, + dictWord{5, 11, 756}, + dictWord{135, 11, 1326}, + dictWord{150, 11, 13}, + dictWord{7, 11, 354}, + dictWord{10, 11, 410}, + dictWord{139, 11, 815}, + dictWord{6, 11, 1662}, + dictWord{7, 11, 48}, + dictWord{8, 11, 771}, + dictWord{10, 11, 116}, + dictWord{13, 11, 104}, + dictWord{14, 11, 105}, + dictWord{14, 11, 184}, + dictWord{15, 11, 168}, + dictWord{19, 11, 92}, + dictWord{148, 11, 68}, + dictWord{7, 0, 124}, + dictWord{136, 0, 38}, + dictWord{5, 0, 261}, + dictWord{7, 0, 78}, + dictWord{ + 7, + 0, + 199, + }, + dictWord{8, 0, 815}, + dictWord{9, 0, 126}, + dictWord{10, 0, 342}, + dictWord{140, 0, 647}, + dictWord{4, 0, 628}, + dictWord{140, 0, 724}, + dictWord{7, 0, 266}, + dictWord{8, 0, 804}, + dictWord{7, 10, 1651}, + dictWord{145, 10, 89}, + dictWord{135, 0, 208}, + dictWord{134, 0, 1178}, + dictWord{6, 0, 79}, + dictWord{135, 0, 1519}, + dictWord{132, 10, 672}, + dictWord{133, 10, 737}, + dictWord{136, 0, 741}, + dictWord{132, 11, 120}, + dictWord{4, 0, 710}, + dictWord{6, 0, 376}, + dictWord{ + 134, + 0, + 606, + }, + dictWord{134, 0, 1347}, + dictWord{134, 0, 1494}, + dictWord{6, 0, 850}, + dictWord{6, 0, 1553}, + dictWord{137, 0, 821}, + dictWord{5, 10, 145}, + dictWord{ + 134, + 11, + 593, + }, + dictWord{7, 0, 1311}, + dictWord{140, 0, 135}, + dictWord{4, 0, 467}, + dictWord{5, 0, 405}, + dictWord{134, 0, 544}, + dictWord{5, 11, 820}, + dictWord{ + 135, + 11, + 931, + }, + dictWord{6, 0, 100}, + dictWord{7, 0, 244}, + dictWord{7, 0, 632}, + dictWord{7, 0, 1609}, + dictWord{8, 0, 178}, + dictWord{8, 0, 638}, + dictWord{141, 0, 58}, + dictWord{4, 10, 387}, + dictWord{135, 10, 1288}, + dictWord{6, 11, 151}, + dictWord{6, 11, 1675}, + dictWord{7, 11, 383}, + dictWord{151, 11, 10}, + dictWord{ + 132, + 0, + 481, + }, + dictWord{135, 10, 550}, + dictWord{134, 0, 1378}, + dictWord{6, 11, 1624}, + dictWord{11, 11, 11}, + dictWord{12, 11, 422}, + dictWord{13, 11, 262}, + dictWord{142, 11, 360}, + dictWord{133, 0, 791}, + dictWord{4, 11, 43}, + dictWord{5, 11, 344}, + dictWord{133, 11, 357}, + dictWord{7, 0, 1227}, + dictWord{140, 0, 978}, + dictWord{7, 0, 686}, + dictWord{8, 0, 33}, + dictWord{8, 0, 238}, + dictWord{10, 0, 616}, + dictWord{11, 0, 467}, + dictWord{11, 0, 881}, + dictWord{13, 0, 217}, + dictWord{ + 13, + 0, + 253, + }, + dictWord{142, 0, 268}, + dictWord{137, 0, 857}, + dictWord{8, 0, 467}, + dictWord{8, 0, 1006}, + dictWord{7, 11, 148}, + dictWord{8, 11, 284}, + dictWord{ + 141, + 11, + 63, + }, + dictWord{4, 10, 576}, + dictWord{135, 10, 1263}, + dictWord{133, 11, 888}, + dictWord{5, 10, 919}, + dictWord{134, 10, 1673}, + dictWord{20, 10, 37}, + dictWord{148, 11, 37}, + dictWord{132, 0, 447}, + dictWord{132, 11, 711}, + dictWord{4, 0, 128}, + dictWord{5, 0, 415}, + dictWord{6, 0, 462}, + dictWord{7, 0, 294}, + dictWord{ + 7, + 0, + 578, + }, + dictWord{10, 0, 710}, + dictWord{139, 0, 86}, + dictWord{4, 10, 82}, + dictWord{5, 10, 333}, + dictWord{5, 10, 904}, + dictWord{6, 10, 207}, + dictWord{7, 10, 325}, + dictWord{7, 10, 1726}, + dictWord{8, 10, 101}, + dictWord{10, 10, 778}, + dictWord{139, 10, 220}, + dictWord{136, 0, 587}, + dictWord{137, 11, 440}, + dictWord{ + 133, + 10, + 903, + }, + dictWord{6, 0, 427}, + dictWord{7, 0, 1018}, + dictWord{138, 0, 692}, + dictWord{4, 0, 195}, + dictWord{135, 0, 802}, + dictWord{140, 10, 147}, + dictWord{ + 134, + 0, + 1546, + }, + dictWord{134, 0, 684}, + dictWord{132, 10, 705}, + dictWord{136, 0, 345}, + dictWord{11, 11, 678}, + dictWord{140, 11, 307}, + dictWord{ + 133, + 0, + 365, + }, + dictWord{134, 0, 1683}, + dictWord{4, 11, 65}, + dictWord{5, 11, 479}, + dictWord{5, 11, 1004}, + dictWord{7, 11, 1913}, + dictWord{8, 11, 317}, + dictWord{ + 9, + 11, + 302, + }, + dictWord{10, 11, 612}, + dictWord{141, 11, 22}, + dictWord{138, 0, 472}, + dictWord{4, 11, 261}, + dictWord{135, 11, 510}, + dictWord{134, 10, 90}, + dictWord{142, 0, 433}, + dictWord{151, 0, 28}, + dictWord{4, 11, 291}, + dictWord{7, 11, 101}, + dictWord{9, 11, 515}, + dictWord{12, 11, 152}, + dictWord{12, 11, 443}, + dictWord{13, 11, 392}, + dictWord{142, 11, 357}, + dictWord{140, 0, 997}, + dictWord{5, 0, 3}, + dictWord{8, 0, 578}, + dictWord{9, 0, 118}, + dictWord{10, 0, 705}, + dictWord{ + 141, + 0, + 279, + }, + dictWord{135, 11, 1266}, + dictWord{7, 10, 813}, + dictWord{12, 10, 497}, + dictWord{141, 10, 56}, + dictWord{133, 0, 229}, + dictWord{6, 10, 125}, + dictWord{135, 10, 1277}, + dictWord{8, 0, 102}, + dictWord{10, 0, 578}, + dictWord{10, 0, 672}, + dictWord{12, 0, 496}, + dictWord{13, 0, 408}, + dictWord{14, 0, 121}, + dictWord{17, 0, 106}, + dictWord{151, 10, 12}, + dictWord{6, 0, 866}, + dictWord{134, 0, 1080}, + dictWord{136, 0, 1022}, + dictWord{4, 11, 130}, + dictWord{135, 11, 843}, + dictWord{5, 11, 42}, + dictWord{5, 11, 879}, + dictWord{7, 11, 245}, + dictWord{7, 11, 324}, + dictWord{7, 11, 1532}, + dictWord{11, 11, 463}, + dictWord{11, 11, 472}, + dictWord{13, 11, 363}, + dictWord{144, 11, 52}, + dictWord{150, 0, 55}, + dictWord{8, 0, 115}, + dictWord{8, 0, 350}, + dictWord{9, 0, 489}, + dictWord{10, 0, 128}, + dictWord{ + 11, + 0, + 306, + }, + dictWord{12, 0, 373}, + dictWord{14, 0, 30}, + dictWord{17, 0, 79}, + dictWord{19, 0, 80}, + dictWord{4, 11, 134}, + dictWord{133, 11, 372}, + dictWord{ + 134, + 0, + 657, + }, + dictWord{134, 0, 933}, + dictWord{135, 11, 1147}, + dictWord{4, 0, 230}, + dictWord{133, 0, 702}, + dictWord{134, 0, 1728}, + dictWord{4, 0, 484}, + dictWord{ + 18, + 0, + 26, + }, + dictWord{19, 0, 42}, + dictWord{20, 0, 43}, + dictWord{21, 0, 0}, + dictWord{23, 0, 27}, + dictWord{152, 0, 14}, + dictWord{7, 0, 185}, + dictWord{135, 0, 703}, + dictWord{ + 6, + 0, + 417, + }, + dictWord{10, 0, 618}, + dictWord{7, 10, 1106}, + dictWord{9, 10, 770}, + dictWord{11, 10, 112}, + dictWord{140, 10, 413}, + dictWord{134, 0, 803}, + dictWord{132, 11, 644}, + dictWord{134, 0, 1262}, + dictWord{7, 11, 540}, + dictWord{12, 10, 271}, + dictWord{145, 10, 109}, + dictWord{135, 11, 123}, + dictWord{ + 132, + 0, + 633, + }, + dictWord{134, 11, 623}, + dictWord{4, 11, 908}, + dictWord{5, 11, 359}, + dictWord{5, 11, 508}, + dictWord{6, 11, 1723}, + dictWord{7, 11, 343}, + dictWord{ + 7, + 11, + 1996, + }, + dictWord{135, 11, 2026}, + dictWord{135, 0, 479}, + dictWord{10, 0, 262}, + dictWord{7, 10, 304}, + dictWord{9, 10, 646}, + dictWord{9, 10, 862}, + dictWord{ + 11, + 10, + 696, + }, + dictWord{12, 10, 208}, + dictWord{15, 10, 79}, + dictWord{147, 10, 108}, + dictWord{4, 11, 341}, + dictWord{135, 11, 480}, + dictWord{134, 0, 830}, + dictWord{5, 0, 70}, + dictWord{5, 0, 622}, + dictWord{6, 0, 334}, + dictWord{7, 0, 1032}, + dictWord{9, 0, 171}, + dictWord{11, 0, 26}, + dictWord{11, 0, 213}, + dictWord{ + 11, + 0, + 637, + }, + dictWord{11, 0, 707}, + dictWord{12, 0, 202}, + dictWord{12, 0, 380}, + dictWord{13, 0, 226}, + dictWord{13, 0, 355}, + dictWord{14, 0, 222}, + dictWord{145, 0, 42}, + dictWord{135, 10, 981}, + dictWord{143, 0, 217}, + dictWord{137, 11, 114}, + dictWord{4, 0, 23}, + dictWord{4, 0, 141}, + dictWord{5, 0, 313}, + dictWord{5, 0, 1014}, + dictWord{6, 0, 50}, + dictWord{6, 0, 51}, + dictWord{7, 0, 142}, + dictWord{7, 0, 384}, + dictWord{7, 0, 559}, + dictWord{8, 0, 640}, + dictWord{9, 0, 460}, + dictWord{9, 0, 783}, + dictWord{11, 0, 741}, + dictWord{12, 0, 183}, + dictWord{141, 0, 488}, + dictWord{141, 0, 360}, + dictWord{7, 0, 1586}, + dictWord{7, 11, 1995}, + dictWord{8, 11, 299}, + dictWord{11, 11, 890}, + dictWord{140, 11, 674}, + dictWord{132, 10, 434}, + dictWord{7, 0, 652}, + dictWord{134, 10, 550}, + dictWord{7, 0, 766}, + dictWord{5, 10, 553}, + dictWord{138, 10, 824}, + dictWord{7, 0, 737}, + dictWord{8, 0, 298}, + dictWord{136, 10, 452}, + dictWord{4, 11, 238}, + dictWord{5, 11, 503}, + dictWord{6, 11, 179}, + dictWord{7, 11, 2003}, + dictWord{8, 11, 381}, + dictWord{8, 11, 473}, + dictWord{9, 11, 149}, + dictWord{10, 11, 183}, + dictWord{15, 11, 45}, + dictWord{143, 11, 86}, + dictWord{133, 10, 292}, + dictWord{5, 0, 222}, + dictWord{9, 0, 655}, + dictWord{138, 0, 534}, + dictWord{138, 10, 135}, + dictWord{4, 11, 121}, + dictWord{5, 11, 156}, + dictWord{5, 11, 349}, + dictWord{9, 11, 136}, + dictWord{10, 11, 605}, + dictWord{14, 11, 342}, + dictWord{147, 11, 107}, + dictWord{137, 0, 906}, + dictWord{6, 0, 1013}, + dictWord{134, 0, 1250}, + dictWord{6, 0, 1956}, + dictWord{6, 0, 2009}, + dictWord{8, 0, 991}, + dictWord{144, 0, 120}, + dictWord{135, 11, 1192}, + dictWord{ + 138, + 0, + 503, + }, + dictWord{5, 0, 154}, + dictWord{7, 0, 1491}, + dictWord{10, 0, 379}, + dictWord{138, 0, 485}, + dictWord{6, 0, 1867}, + dictWord{6, 0, 1914}, + dictWord{6, 0, 1925}, + dictWord{9, 0, 917}, + dictWord{9, 0, 925}, + dictWord{9, 0, 932}, + dictWord{9, 0, 951}, + dictWord{9, 0, 1007}, + dictWord{9, 0, 1013}, + dictWord{12, 0, 806}, + dictWord{ + 12, + 0, + 810, + }, + dictWord{12, 0, 814}, + dictWord{12, 0, 816}, + dictWord{12, 0, 824}, + dictWord{12, 0, 832}, + dictWord{12, 0, 837}, + dictWord{12, 0, 863}, + dictWord{ + 12, + 0, + 868, + }, + dictWord{12, 0, 870}, + dictWord{12, 0, 889}, + dictWord{12, 0, 892}, + dictWord{12, 0, 900}, + dictWord{12, 0, 902}, + dictWord{12, 0, 908}, + dictWord{12, 0, 933}, + dictWord{12, 0, 942}, + dictWord{12, 0, 949}, + dictWord{12, 0, 954}, + dictWord{15, 0, 175}, + dictWord{15, 0, 203}, + dictWord{15, 0, 213}, + dictWord{15, 0, 218}, + dictWord{15, 0, 225}, + dictWord{15, 0, 231}, + dictWord{15, 0, 239}, + dictWord{15, 0, 248}, + dictWord{15, 0, 252}, + dictWord{18, 0, 190}, + dictWord{18, 0, 204}, + dictWord{ + 18, + 0, + 215, + }, + dictWord{18, 0, 216}, + dictWord{18, 0, 222}, + dictWord{18, 0, 225}, + dictWord{18, 0, 230}, + dictWord{18, 0, 239}, + dictWord{18, 0, 241}, + dictWord{ + 21, + 0, + 42, + }, + dictWord{21, 0, 43}, + dictWord{21, 0, 44}, + dictWord{21, 0, 45}, + dictWord{21, 0, 46}, + dictWord{21, 0, 53}, + dictWord{24, 0, 27}, + dictWord{152, 0, 31}, + dictWord{ + 133, + 0, + 716, + }, + dictWord{135, 0, 844}, + dictWord{4, 0, 91}, + dictWord{5, 0, 388}, + dictWord{5, 0, 845}, + dictWord{6, 0, 206}, + dictWord{6, 0, 252}, + dictWord{6, 0, 365}, + dictWord{ + 7, + 0, + 136, + }, + dictWord{7, 0, 531}, + dictWord{136, 0, 621}, + dictWord{7, 10, 393}, + dictWord{10, 10, 603}, + dictWord{139, 10, 206}, + dictWord{6, 11, 80}, + dictWord{ + 6, + 11, + 1694, + }, + dictWord{7, 11, 173}, + dictWord{7, 11, 1974}, + dictWord{9, 11, 547}, + dictWord{10, 11, 730}, + dictWord{14, 11, 18}, + dictWord{150, 11, 39}, + dictWord{137, 0, 748}, + dictWord{4, 11, 923}, + dictWord{134, 11, 1711}, + dictWord{4, 10, 912}, + dictWord{137, 10, 232}, + dictWord{7, 10, 98}, + dictWord{7, 10, 1973}, + dictWord{136, 10, 716}, + dictWord{14, 0, 103}, + dictWord{133, 10, 733}, + dictWord{132, 11, 595}, + dictWord{12, 0, 158}, + dictWord{18, 0, 8}, + dictWord{19, 0, 62}, + dictWord{20, 0, 6}, + dictWord{22, 0, 4}, + dictWord{23, 0, 2}, + dictWord{23, 0, 9}, + dictWord{5, 11, 240}, + dictWord{6, 11, 459}, + dictWord{7, 11, 12}, + dictWord{7, 11, 114}, + dictWord{7, 11, 502}, + dictWord{7, 11, 1751}, + dictWord{7, 11, 1753}, + dictWord{7, 11, 1805}, + dictWord{8, 11, 658}, + dictWord{9, 11, 1}, + dictWord{11, 11, 959}, + dictWord{13, 11, 446}, + dictWord{142, 11, 211}, + dictWord{135, 0, 576}, + dictWord{5, 0, 771}, + dictWord{5, 0, 863}, + dictWord{5, 0, 898}, + dictWord{6, 0, 648}, + dictWord{ + 6, + 0, + 1632, + }, + dictWord{6, 0, 1644}, + dictWord{134, 0, 1780}, + dictWord{133, 0, 331}, + dictWord{7, 11, 633}, + dictWord{7, 11, 905}, + dictWord{7, 11, 909}, + dictWord{ + 7, + 11, + 1538, + }, + dictWord{9, 11, 767}, + dictWord{140, 11, 636}, + dictWord{140, 0, 632}, + dictWord{5, 0, 107}, + dictWord{7, 0, 201}, + dictWord{136, 0, 518}, + dictWord{ + 6, + 0, + 446, + }, + dictWord{7, 0, 1817}, + dictWord{134, 11, 490}, + dictWord{9, 0, 851}, + dictWord{141, 0, 510}, + dictWord{7, 11, 250}, + dictWord{8, 11, 506}, + dictWord{ + 136, + 11, + 507, + }, + dictWord{4, 0, 504}, + dictWord{137, 10, 72}, + dictWord{132, 11, 158}, + dictWord{4, 11, 140}, + dictWord{7, 11, 362}, + dictWord{8, 11, 209}, + dictWord{ + 9, + 11, + 10, + }, + dictWord{9, 11, 160}, + dictWord{9, 11, 503}, + dictWord{10, 11, 689}, + dictWord{11, 11, 350}, + dictWord{11, 11, 553}, + dictWord{11, 11, 725}, + dictWord{ + 12, + 11, + 252, + }, + dictWord{12, 11, 583}, + dictWord{13, 11, 192}, + dictWord{13, 11, 352}, + dictWord{14, 11, 269}, + dictWord{14, 11, 356}, + dictWord{148, 11, 50}, + dictWord{6, 11, 597}, + dictWord{135, 11, 1318}, + dictWord{135, 10, 1454}, + dictWord{5, 0, 883}, + dictWord{5, 0, 975}, + dictWord{8, 0, 392}, + dictWord{148, 0, 7}, + dictWord{6, 11, 228}, + dictWord{7, 11, 1341}, + dictWord{9, 11, 408}, + dictWord{138, 11, 343}, + dictWord{11, 11, 348}, + dictWord{11, 10, 600}, + dictWord{12, 11, 99}, + dictWord{13, 10, 245}, + dictWord{18, 11, 1}, + dictWord{18, 11, 11}, + dictWord{147, 11, 4}, + dictWord{134, 11, 296}, + dictWord{5, 0, 922}, + dictWord{134, 0, 1707}, + dictWord{132, 11, 557}, + dictWord{4, 11, 548}, + dictWord{7, 10, 164}, + dictWord{7, 10, 1571}, + dictWord{9, 10, 107}, + dictWord{140, 10, 225}, + dictWord{ + 7, + 11, + 197, + }, + dictWord{8, 11, 142}, + dictWord{8, 11, 325}, + dictWord{9, 11, 150}, + dictWord{9, 11, 596}, + dictWord{10, 11, 350}, + dictWord{10, 11, 353}, + dictWord{ + 11, + 11, + 74, + }, + dictWord{11, 11, 315}, + dictWord{14, 11, 423}, + dictWord{143, 11, 141}, + dictWord{5, 0, 993}, + dictWord{7, 0, 515}, + dictWord{137, 0, 91}, + dictWord{4, 0, 131}, + dictWord{8, 0, 200}, + dictWord{5, 10, 484}, + dictWord{5, 10, 510}, + dictWord{6, 10, 434}, + dictWord{7, 10, 1000}, + dictWord{7, 10, 1098}, + dictWord{136, 10, 2}, + dictWord{152, 0, 10}, + dictWord{4, 11, 62}, + dictWord{5, 11, 83}, + dictWord{6, 11, 399}, + dictWord{6, 11, 579}, + dictWord{7, 11, 692}, + dictWord{7, 11, 846}, + dictWord{ + 7, + 11, + 1015, + }, + dictWord{7, 11, 1799}, + dictWord{8, 11, 403}, + dictWord{9, 11, 394}, + dictWord{10, 11, 133}, + dictWord{12, 11, 4}, + dictWord{12, 11, 297}, + dictWord{ + 12, + 11, + 452, + }, + dictWord{16, 11, 81}, + dictWord{18, 11, 19}, + dictWord{18, 11, 25}, + dictWord{21, 11, 14}, + dictWord{22, 11, 12}, + dictWord{151, 11, 18}, + dictWord{ + 140, + 11, + 459, + }, + dictWord{132, 11, 177}, + dictWord{7, 0, 1433}, + dictWord{9, 0, 365}, + dictWord{137, 11, 365}, + dictWord{132, 10, 460}, + dictWord{5, 0, 103}, + dictWord{ + 6, + 0, + 2004, + }, + dictWord{7, 0, 921}, + dictWord{8, 0, 580}, + dictWord{8, 0, 593}, + dictWord{8, 0, 630}, + dictWord{10, 0, 28}, + dictWord{5, 11, 411}, + dictWord{ + 135, + 11, + 653, + }, + dictWord{4, 10, 932}, + dictWord{133, 10, 891}, + dictWord{4, 0, 911}, + dictWord{5, 0, 867}, + dictWord{5, 0, 1013}, + dictWord{7, 0, 2034}, + dictWord{8, 0, 798}, + dictWord{136, 0, 813}, + dictWord{7, 11, 439}, + dictWord{10, 11, 727}, + dictWord{11, 11, 260}, + dictWord{139, 11, 684}, + dictWord{136, 10, 625}, + dictWord{ + 5, + 11, + 208, + }, + dictWord{7, 11, 753}, + dictWord{135, 11, 1528}, + dictWord{5, 0, 461}, + dictWord{7, 0, 1925}, + dictWord{12, 0, 39}, + dictWord{13, 0, 265}, + dictWord{ + 13, + 0, + 439, + }, + dictWord{134, 10, 76}, + dictWord{6, 0, 853}, + dictWord{8, 10, 92}, + dictWord{137, 10, 221}, + dictWord{5, 0, 135}, + dictWord{6, 0, 519}, + dictWord{7, 0, 1722}, + dictWord{10, 0, 271}, + dictWord{11, 0, 261}, + dictWord{145, 0, 54}, + dictWord{139, 11, 814}, + dictWord{14, 0, 338}, + dictWord{148, 0, 81}, + dictWord{4, 0, 300}, + dictWord{133, 0, 436}, + dictWord{5, 0, 419}, + dictWord{5, 0, 687}, + dictWord{7, 0, 864}, + dictWord{9, 0, 470}, + dictWord{135, 11, 864}, + dictWord{9, 0, 836}, + dictWord{ + 133, + 11, + 242, + }, + dictWord{134, 0, 1937}, + dictWord{4, 10, 763}, + dictWord{133, 11, 953}, + dictWord{132, 10, 622}, + dictWord{132, 0, 393}, + dictWord{ + 133, + 10, + 253, + }, + dictWord{8, 0, 357}, + dictWord{10, 0, 745}, + dictWord{14, 0, 426}, + dictWord{17, 0, 94}, + dictWord{19, 0, 57}, + dictWord{135, 10, 546}, + dictWord{5, 11, 615}, + dictWord{146, 11, 37}, + dictWord{9, 10, 73}, + dictWord{10, 10, 110}, + dictWord{14, 10, 185}, + dictWord{145, 10, 119}, + dictWord{11, 0, 703}, + dictWord{7, 10, 624}, + dictWord{7, 10, 916}, + dictWord{10, 10, 256}, + dictWord{139, 10, 87}, + dictWord{133, 11, 290}, + dictWord{5, 10, 212}, + dictWord{12, 10, 35}, + dictWord{ + 141, + 10, + 382, + }, + dictWord{132, 11, 380}, + dictWord{5, 11, 52}, + dictWord{7, 11, 277}, + dictWord{9, 11, 368}, + dictWord{139, 11, 791}, + dictWord{133, 0, 387}, + dictWord{ + 10, + 11, + 138, + }, + dictWord{139, 11, 476}, + dictWord{4, 0, 6}, + dictWord{5, 0, 708}, + dictWord{136, 0, 75}, + dictWord{7, 0, 1351}, + dictWord{9, 0, 581}, + dictWord{10, 0, 639}, + dictWord{11, 0, 453}, + dictWord{140, 0, 584}, + dictWord{132, 0, 303}, + dictWord{138, 0, 772}, + dictWord{135, 10, 1175}, + dictWord{4, 0, 749}, + dictWord{ + 5, + 10, + 816, + }, + dictWord{6, 11, 256}, + dictWord{7, 11, 307}, + dictWord{7, 11, 999}, + dictWord{7, 11, 1481}, + dictWord{7, 11, 1732}, + dictWord{7, 11, 1738}, + dictWord{ + 8, + 11, + 265, + }, + dictWord{9, 11, 414}, + dictWord{11, 11, 316}, + dictWord{12, 11, 52}, + dictWord{13, 11, 420}, + dictWord{147, 11, 100}, + dictWord{135, 11, 1296}, + dictWord{ + 6, + 0, + 1065, + }, + dictWord{5, 10, 869}, + dictWord{5, 10, 968}, + dictWord{6, 10, 1626}, + dictWord{8, 10, 734}, + dictWord{136, 10, 784}, + dictWord{4, 10, 542}, + dictWord{ + 6, + 10, + 1716, + }, + dictWord{6, 10, 1727}, + dictWord{7, 10, 1082}, + dictWord{7, 10, 1545}, + dictWord{8, 10, 56}, + dictWord{8, 10, 118}, + dictWord{8, 10, 412}, + dictWord{ + 8, + 10, + 564, + }, + dictWord{9, 10, 888}, + dictWord{9, 10, 908}, + dictWord{10, 10, 50}, + dictWord{10, 10, 423}, + dictWord{11, 10, 685}, + dictWord{11, 10, 697}, + dictWord{11, 10, 933}, + dictWord{12, 10, 299}, + dictWord{13, 10, 126}, + dictWord{13, 10, 136}, + dictWord{13, 10, 170}, + dictWord{141, 10, 190}, + dictWord{ + 134, + 0, + 226, + }, + dictWord{4, 0, 106}, + dictWord{7, 0, 310}, + dictWord{11, 0, 717}, + dictWord{133, 11, 723}, + dictWord{5, 0, 890}, + dictWord{5, 0, 988}, + dictWord{4, 10, 232}, + dictWord{9, 10, 202}, + dictWord{10, 10, 474}, + dictWord{140, 10, 433}, + dictWord{6, 0, 626}, + dictWord{142, 0, 431}, + dictWord{10, 0, 706}, + dictWord{150, 0, 44}, + dictWord{13, 0, 51}, + dictWord{6, 10, 108}, + dictWord{7, 10, 1003}, + dictWord{7, 10, 1181}, + dictWord{8, 10, 111}, + dictWord{136, 10, 343}, + dictWord{132, 0, 698}, + dictWord{5, 11, 109}, + dictWord{6, 11, 1784}, + dictWord{7, 11, 1895}, + dictWord{12, 11, 296}, + dictWord{140, 11, 302}, + dictWord{134, 0, 828}, + dictWord{ + 134, + 10, + 1712, + }, + dictWord{138, 0, 17}, + dictWord{7, 0, 1929}, + dictWord{4, 10, 133}, + dictWord{5, 11, 216}, + dictWord{7, 10, 711}, + dictWord{7, 10, 1298}, + dictWord{ + 7, + 10, + 1585, + }, + dictWord{7, 11, 1879}, + dictWord{9, 11, 141}, + dictWord{9, 11, 270}, + dictWord{9, 11, 679}, + dictWord{10, 11, 159}, + dictWord{10, 11, 553}, + dictWord{ + 11, + 11, + 197, + }, + dictWord{11, 11, 438}, + dictWord{12, 11, 538}, + dictWord{12, 11, 559}, + dictWord{13, 11, 193}, + dictWord{13, 11, 423}, + dictWord{14, 11, 144}, + dictWord{14, 11, 166}, + dictWord{14, 11, 167}, + dictWord{15, 11, 67}, + dictWord{147, 11, 84}, + dictWord{141, 11, 127}, + dictWord{7, 11, 1872}, + dictWord{ + 137, + 11, + 81, + }, + dictWord{6, 10, 99}, + dictWord{7, 10, 1808}, + dictWord{145, 10, 57}, + dictWord{134, 11, 391}, + dictWord{5, 0, 689}, + dictWord{6, 0, 84}, + dictWord{7, 0, 1250}, + dictWord{6, 10, 574}, + dictWord{7, 10, 428}, + dictWord{10, 10, 669}, + dictWord{11, 10, 485}, + dictWord{11, 10, 840}, + dictWord{12, 10, 300}, + dictWord{ + 142, + 10, + 250, + }, + dictWord{7, 11, 322}, + dictWord{136, 11, 249}, + dictWord{7, 11, 432}, + dictWord{135, 11, 1649}, + dictWord{135, 10, 1871}, + dictWord{137, 10, 252}, + dictWord{6, 11, 155}, + dictWord{140, 11, 234}, + dictWord{7, 0, 871}, + dictWord{19, 0, 27}, + dictWord{147, 11, 27}, + dictWord{140, 0, 498}, + dictWord{5, 0, 986}, + dictWord{6, 0, 130}, + dictWord{138, 0, 823}, + dictWord{6, 0, 1793}, + dictWord{7, 0, 1582}, + dictWord{8, 0, 458}, + dictWord{10, 0, 101}, + dictWord{10, 0, 318}, + dictWord{ + 10, + 0, + 945, + }, + dictWord{12, 0, 734}, + dictWord{16, 0, 104}, + dictWord{18, 0, 177}, + dictWord{6, 10, 323}, + dictWord{135, 10, 1564}, + dictWord{5, 11, 632}, + dictWord{ + 138, + 11, + 526, + }, + dictWord{10, 0, 435}, + dictWord{7, 10, 461}, + dictWord{136, 10, 775}, + dictWord{6, 11, 144}, + dictWord{7, 11, 948}, + dictWord{7, 11, 1042}, + dictWord{ + 7, + 11, + 1857, + }, + dictWord{8, 11, 235}, + dictWord{8, 11, 461}, + dictWord{9, 11, 453}, + dictWord{9, 11, 530}, + dictWord{10, 11, 354}, + dictWord{17, 11, 77}, + dictWord{ + 19, + 11, + 99, + }, + dictWord{148, 11, 79}, + dictWord{138, 0, 966}, + dictWord{7, 0, 1644}, + dictWord{137, 0, 129}, + dictWord{135, 0, 997}, + dictWord{136, 0, 502}, + dictWord{ + 5, + 11, + 196, + }, + dictWord{6, 11, 486}, + dictWord{7, 11, 212}, + dictWord{8, 11, 309}, + dictWord{136, 11, 346}, + dictWord{7, 10, 727}, + dictWord{146, 10, 73}, + dictWord{132, 0, 823}, + dictWord{132, 11, 686}, + dictWord{135, 0, 1927}, + dictWord{4, 0, 762}, + dictWord{7, 0, 1756}, + dictWord{137, 0, 98}, + dictWord{136, 10, 577}, + dictWord{24, 0, 8}, + dictWord{4, 11, 30}, + dictWord{5, 11, 43}, + dictWord{152, 11, 8}, + dictWord{7, 0, 1046}, + dictWord{139, 0, 160}, + dictWord{7, 0, 492}, + dictWord{ + 4, + 10, + 413, + }, + dictWord{5, 10, 677}, + dictWord{7, 11, 492}, + dictWord{8, 10, 432}, + dictWord{140, 10, 280}, + dictWord{6, 0, 45}, + dictWord{7, 0, 433}, + dictWord{8, 0, 129}, + dictWord{9, 0, 21}, + dictWord{10, 0, 392}, + dictWord{11, 0, 79}, + dictWord{12, 0, 499}, + dictWord{13, 0, 199}, + dictWord{141, 0, 451}, + dictWord{7, 0, 558}, + dictWord{ + 136, + 0, + 353, + }, + dictWord{4, 11, 220}, + dictWord{7, 11, 1535}, + dictWord{9, 11, 93}, + dictWord{139, 11, 474}, + dictWord{7, 10, 646}, + dictWord{7, 10, 1730}, + dictWord{ + 11, + 10, + 446, + }, + dictWord{141, 10, 178}, + dictWord{133, 0, 785}, + dictWord{134, 0, 1145}, + dictWord{8, 0, 81}, + dictWord{9, 0, 189}, + dictWord{9, 0, 201}, + dictWord{ + 11, + 0, + 478, + }, + dictWord{11, 0, 712}, + dictWord{141, 0, 338}, + dictWord{5, 0, 353}, + dictWord{151, 0, 26}, + dictWord{11, 0, 762}, + dictWord{132, 10, 395}, + dictWord{ + 134, + 0, + 2024, + }, + dictWord{4, 0, 611}, + dictWord{133, 0, 606}, + dictWord{9, 10, 174}, + dictWord{10, 10, 164}, + dictWord{11, 10, 440}, + dictWord{11, 10, 841}, + dictWord{ + 143, + 10, + 98, + }, + dictWord{134, 10, 426}, + dictWord{10, 10, 608}, + dictWord{139, 10, 1002}, + dictWord{138, 10, 250}, + dictWord{6, 0, 25}, + dictWord{7, 0, 855}, + dictWord{7, 0, 1258}, + dictWord{144, 0, 32}, + dictWord{7, 11, 1725}, + dictWord{138, 11, 393}, + dictWord{5, 11, 263}, + dictWord{134, 11, 414}, + dictWord{6, 0, 2011}, + dictWord{133, 10, 476}, + dictWord{4, 0, 4}, + dictWord{7, 0, 1118}, + dictWord{7, 0, 1320}, + dictWord{7, 0, 1706}, + dictWord{8, 0, 277}, + dictWord{9, 0, 622}, + dictWord{ + 10, + 0, + 9, + }, + dictWord{11, 0, 724}, + dictWord{12, 0, 350}, + dictWord{12, 0, 397}, + dictWord{13, 0, 28}, + dictWord{13, 0, 159}, + dictWord{15, 0, 89}, + dictWord{18, 0, 5}, + dictWord{ + 19, + 0, + 9, + }, + dictWord{20, 0, 34}, + dictWord{22, 0, 47}, + dictWord{6, 11, 178}, + dictWord{6, 11, 1750}, + dictWord{8, 11, 251}, + dictWord{9, 11, 690}, + dictWord{ + 10, + 11, + 155, + }, + dictWord{10, 11, 196}, + dictWord{10, 11, 373}, + dictWord{11, 11, 698}, + dictWord{13, 11, 155}, + dictWord{148, 11, 93}, + dictWord{5, 11, 97}, + dictWord{ + 137, + 11, + 393, + }, + dictWord{7, 0, 764}, + dictWord{11, 0, 461}, + dictWord{12, 0, 172}, + dictWord{5, 10, 76}, + dictWord{6, 10, 458}, + dictWord{6, 10, 497}, + dictWord{ + 7, + 10, + 868, + }, + dictWord{9, 10, 658}, + dictWord{10, 10, 594}, + dictWord{11, 10, 566}, + dictWord{12, 10, 338}, + dictWord{141, 10, 200}, + dictWord{134, 0, 1449}, + dictWord{138, 11, 40}, + dictWord{134, 11, 1639}, + dictWord{134, 0, 1445}, + dictWord{6, 0, 1168}, + dictWord{4, 10, 526}, + dictWord{7, 10, 1029}, + dictWord{ + 135, + 10, + 1054, + }, + dictWord{4, 11, 191}, + dictWord{7, 11, 934}, + dictWord{8, 11, 647}, + dictWord{145, 11, 97}, + dictWord{132, 10, 636}, + dictWord{6, 0, 233}, + dictWord{ + 7, + 10, + 660, + }, + dictWord{7, 10, 1124}, + dictWord{17, 10, 31}, + dictWord{19, 10, 22}, + dictWord{151, 10, 14}, + dictWord{6, 10, 1699}, + dictWord{136, 11, 110}, + dictWord{ + 12, + 11, + 246, + }, + dictWord{15, 11, 162}, + dictWord{19, 11, 64}, + dictWord{20, 11, 8}, + dictWord{20, 11, 95}, + dictWord{22, 11, 24}, + dictWord{152, 11, 17}, + dictWord{ + 5, + 11, + 165, + }, + dictWord{9, 11, 346}, + dictWord{138, 11, 655}, + dictWord{5, 11, 319}, + dictWord{135, 11, 534}, + dictWord{134, 0, 255}, + dictWord{9, 0, 216}, + dictWord{ + 8, + 11, + 128, + }, + dictWord{139, 11, 179}, + dictWord{9, 0, 183}, + dictWord{139, 0, 286}, + dictWord{11, 0, 956}, + dictWord{151, 0, 3}, + dictWord{4, 0, 536}, + dictWord{ + 7, + 0, + 1141, + }, + dictWord{10, 0, 723}, + dictWord{139, 0, 371}, + dictWord{4, 10, 279}, + dictWord{7, 10, 301}, + dictWord{137, 10, 362}, + dictWord{7, 0, 285}, + dictWord{ + 5, + 11, + 57, + }, + dictWord{6, 11, 101}, + dictWord{6, 11, 1663}, + dictWord{7, 11, 132}, + dictWord{7, 11, 1048}, + dictWord{7, 11, 1154}, + dictWord{7, 11, 1415}, + dictWord{ + 7, + 11, + 1507, + }, + dictWord{12, 11, 493}, + dictWord{15, 11, 105}, + dictWord{151, 11, 15}, + dictWord{5, 11, 459}, + dictWord{7, 11, 1073}, + dictWord{7, 10, 1743}, + dictWord{ + 8, + 11, + 241, + }, + dictWord{136, 11, 334}, + dictWord{4, 10, 178}, + dictWord{133, 10, 399}, + dictWord{135, 0, 560}, + dictWord{132, 0, 690}, + dictWord{135, 0, 1246}, + dictWord{18, 0, 157}, + dictWord{147, 0, 63}, + dictWord{10, 0, 599}, + dictWord{11, 0, 33}, + dictWord{12, 0, 571}, + dictWord{149, 0, 1}, + dictWord{6, 11, 324}, + dictWord{ + 6, + 11, + 520, + }, + dictWord{7, 11, 338}, + dictWord{7, 11, 1616}, + dictWord{7, 11, 1729}, + dictWord{8, 11, 228}, + dictWord{9, 11, 69}, + dictWord{139, 11, 750}, + dictWord{ + 7, + 0, + 1862, + }, + dictWord{12, 0, 491}, + dictWord{12, 0, 520}, + dictWord{13, 0, 383}, + dictWord{142, 0, 244}, + dictWord{135, 11, 734}, + dictWord{134, 10, 1692}, + dictWord{10, 0, 448}, + dictWord{11, 0, 630}, + dictWord{17, 0, 117}, + dictWord{6, 10, 202}, + dictWord{7, 11, 705}, + dictWord{12, 10, 360}, + dictWord{17, 10, 118}, + dictWord{18, 10, 27}, + dictWord{148, 10, 67}, + dictWord{4, 11, 73}, + dictWord{6, 11, 612}, + dictWord{7, 11, 927}, + dictWord{7, 11, 1822}, + dictWord{8, 11, 217}, + dictWord{ + 9, + 11, + 472, + }, + dictWord{9, 11, 765}, + dictWord{9, 11, 766}, + dictWord{10, 11, 408}, + dictWord{11, 11, 51}, + dictWord{11, 11, 793}, + dictWord{12, 11, 266}, + dictWord{ + 15, + 11, + 158, + }, + dictWord{20, 11, 89}, + dictWord{150, 11, 32}, + dictWord{4, 0, 190}, + dictWord{133, 0, 554}, + dictWord{133, 0, 1001}, + dictWord{5, 11, 389}, + dictWord{ + 8, + 11, + 636, + }, + dictWord{137, 11, 229}, + dictWord{5, 0, 446}, + dictWord{7, 10, 872}, + dictWord{10, 10, 516}, + dictWord{139, 10, 167}, + dictWord{137, 10, 313}, + dictWord{132, 10, 224}, + dictWord{134, 0, 1313}, + dictWord{5, 10, 546}, + dictWord{7, 10, 35}, + dictWord{8, 10, 11}, + dictWord{8, 10, 12}, + dictWord{9, 10, 315}, + dictWord{9, 10, 533}, + dictWord{10, 10, 802}, + dictWord{11, 10, 166}, + dictWord{12, 10, 525}, + dictWord{142, 10, 243}, + dictWord{6, 0, 636}, + dictWord{137, 0, 837}, + dictWord{5, 10, 241}, + dictWord{8, 10, 242}, + dictWord{9, 10, 451}, + dictWord{10, 10, 667}, + dictWord{11, 10, 598}, + dictWord{140, 10, 429}, + dictWord{22, 10, 46}, + dictWord{150, 11, 46}, + dictWord{136, 11, 472}, + dictWord{11, 0, 278}, + dictWord{142, 0, 73}, + dictWord{141, 11, 185}, + dictWord{132, 0, 868}, + dictWord{ + 134, + 0, + 972, + }, + dictWord{4, 10, 366}, + dictWord{137, 10, 516}, + dictWord{138, 0, 1010}, + dictWord{5, 11, 189}, + dictWord{6, 10, 1736}, + dictWord{7, 11, 442}, + dictWord{ + 7, + 11, + 443, + }, + dictWord{8, 11, 281}, + dictWord{12, 11, 174}, + dictWord{13, 11, 83}, + dictWord{141, 11, 261}, + dictWord{139, 11, 384}, + dictWord{6, 11, 2}, + dictWord{ + 7, + 11, + 191, + }, + dictWord{7, 11, 446}, + dictWord{7, 11, 758}, + dictWord{7, 11, 1262}, + dictWord{7, 11, 1737}, + dictWord{8, 11, 22}, + dictWord{8, 11, 270}, + dictWord{ + 8, + 11, + 612, + }, + dictWord{9, 11, 4}, + dictWord{9, 11, 167}, + dictWord{9, 11, 312}, + dictWord{9, 11, 436}, + dictWord{10, 11, 156}, + dictWord{10, 11, 216}, + dictWord{ + 10, + 11, + 311, + }, + dictWord{10, 11, 623}, + dictWord{11, 11, 72}, + dictWord{11, 11, 330}, + dictWord{11, 11, 455}, + dictWord{12, 11, 101}, + dictWord{12, 11, 321}, + dictWord{ + 12, + 11, + 504, + }, + dictWord{12, 11, 530}, + dictWord{12, 11, 543}, + dictWord{13, 11, 17}, + dictWord{13, 11, 156}, + dictWord{13, 11, 334}, + dictWord{14, 11, 48}, + dictWord{15, 11, 70}, + dictWord{17, 11, 60}, + dictWord{148, 11, 64}, + dictWord{6, 10, 331}, + dictWord{136, 10, 623}, + dictWord{135, 0, 1231}, + dictWord{132, 0, 304}, + dictWord{6, 11, 60}, + dictWord{7, 11, 670}, + dictWord{7, 11, 1327}, + dictWord{8, 11, 411}, + dictWord{8, 11, 435}, + dictWord{9, 11, 653}, + dictWord{9, 11, 740}, + dictWord{10, 11, 385}, + dictWord{11, 11, 222}, + dictWord{11, 11, 324}, + dictWord{11, 11, 829}, + dictWord{140, 11, 611}, + dictWord{7, 0, 506}, + dictWord{6, 11, 166}, + dictWord{7, 11, 374}, + dictWord{135, 11, 1174}, + dictWord{14, 11, 43}, + dictWord{146, 11, 21}, + dictWord{135, 11, 1694}, + dictWord{135, 10, 1888}, + dictWord{ + 5, + 11, + 206, + }, + dictWord{134, 11, 398}, + dictWord{135, 11, 50}, + dictWord{150, 0, 26}, + dictWord{6, 0, 53}, + dictWord{6, 0, 199}, + dictWord{7, 0, 1408}, + dictWord{ + 8, + 0, + 32, + }, + dictWord{8, 0, 93}, + dictWord{10, 0, 397}, + dictWord{10, 0, 629}, + dictWord{11, 0, 593}, + dictWord{11, 0, 763}, + dictWord{13, 0, 326}, + dictWord{145, 0, 35}, + dictWord{134, 0, 105}, + dictWord{132, 10, 394}, + dictWord{4, 0, 843}, + dictWord{138, 0, 794}, + dictWord{11, 0, 704}, + dictWord{141, 0, 396}, + dictWord{5, 0, 114}, + dictWord{5, 0, 255}, + dictWord{141, 0, 285}, + dictWord{6, 0, 619}, + dictWord{7, 0, 898}, + dictWord{7, 0, 1092}, + dictWord{8, 0, 485}, + dictWord{18, 0, 28}, + dictWord{ + 19, + 0, + 116, + }, + dictWord{135, 10, 1931}, + dictWord{9, 0, 145}, + dictWord{7, 10, 574}, + dictWord{135, 10, 1719}, + dictWord{7, 0, 2035}, + dictWord{8, 0, 19}, + dictWord{ + 9, + 0, + 89, + }, + dictWord{138, 0, 831}, + dictWord{132, 10, 658}, + dictWord{6, 11, 517}, + dictWord{7, 11, 1159}, + dictWord{10, 11, 621}, + dictWord{139, 11, 192}, + dictWord{ + 7, + 0, + 1933, + }, + dictWord{7, 11, 1933}, + dictWord{9, 10, 781}, + dictWord{10, 10, 144}, + dictWord{11, 10, 385}, + dictWord{13, 10, 161}, + dictWord{13, 10, 228}, + dictWord{13, 10, 268}, + dictWord{148, 10, 107}, + dictWord{136, 10, 374}, + dictWord{10, 11, 223}, + dictWord{139, 11, 645}, + dictWord{135, 0, 1728}, + dictWord{ + 7, + 11, + 64, + }, + dictWord{7, 11, 289}, + dictWord{136, 11, 245}, + dictWord{4, 10, 344}, + dictWord{6, 10, 498}, + dictWord{139, 10, 323}, + dictWord{136, 0, 746}, + dictWord{ + 135, + 10, + 1063, + }, + dictWord{137, 10, 155}, + dictWord{4, 0, 987}, + dictWord{6, 0, 1964}, + dictWord{6, 0, 1974}, + dictWord{6, 0, 1990}, + dictWord{136, 0, 995}, + dictWord{133, 11, 609}, + dictWord{133, 10, 906}, + dictWord{134, 0, 1550}, + dictWord{134, 0, 874}, + dictWord{5, 11, 129}, + dictWord{6, 11, 61}, + dictWord{ + 135, + 11, + 947, + }, + dictWord{4, 0, 1018}, + dictWord{6, 0, 1938}, + dictWord{6, 0, 2021}, + dictWord{134, 0, 2039}, + dictWord{132, 0, 814}, + dictWord{11, 0, 126}, + dictWord{ + 139, + 0, + 287, + }, + dictWord{134, 0, 1264}, + dictWord{5, 0, 955}, + dictWord{136, 0, 814}, + dictWord{141, 11, 506}, + dictWord{132, 11, 314}, + dictWord{6, 0, 981}, + dictWord{139, 11, 1000}, + dictWord{5, 0, 56}, + dictWord{8, 0, 892}, + dictWord{8, 0, 915}, + dictWord{140, 0, 776}, + dictWord{148, 0, 100}, + dictWord{10, 0, 4}, + dictWord{ + 10, + 0, + 13, + }, + dictWord{11, 0, 638}, + dictWord{148, 0, 57}, + dictWord{148, 11, 74}, + dictWord{5, 0, 738}, + dictWord{132, 10, 616}, + dictWord{133, 11, 637}, + dictWord{ + 136, + 10, + 692, + }, + dictWord{133, 0, 758}, + dictWord{132, 10, 305}, + dictWord{137, 11, 590}, + dictWord{5, 11, 280}, + dictWord{135, 11, 1226}, + dictWord{ + 134, + 11, + 494, + }, + dictWord{135, 0, 1112}, + dictWord{133, 11, 281}, + dictWord{13, 0, 44}, + dictWord{14, 0, 214}, + dictWord{5, 10, 214}, + dictWord{7, 10, 603}, + dictWord{ + 8, + 10, + 611, + }, + dictWord{9, 10, 686}, + dictWord{10, 10, 88}, + dictWord{11, 10, 459}, + dictWord{11, 10, 496}, + dictWord{12, 10, 463}, + dictWord{140, 10, 590}, + dictWord{ + 139, + 0, + 328, + }, + dictWord{135, 11, 1064}, + dictWord{137, 0, 133}, + dictWord{7, 0, 168}, + dictWord{13, 0, 196}, + dictWord{141, 0, 237}, + dictWord{134, 10, 1703}, + dictWord{134, 0, 1152}, + dictWord{135, 0, 1245}, + dictWord{5, 0, 110}, + dictWord{6, 0, 169}, + dictWord{6, 0, 1702}, + dictWord{7, 0, 400}, + dictWord{8, 0, 538}, + dictWord{ + 9, + 0, + 184, + }, + dictWord{9, 0, 524}, + dictWord{140, 0, 218}, + dictWord{6, 0, 1816}, + dictWord{10, 0, 871}, + dictWord{12, 0, 769}, + dictWord{140, 0, 785}, + dictWord{ + 132, + 11, + 630, + }, + dictWord{7, 11, 33}, + dictWord{7, 11, 120}, + dictWord{8, 11, 489}, + dictWord{9, 11, 319}, + dictWord{10, 11, 820}, + dictWord{11, 11, 1004}, + dictWord{ + 12, + 11, + 379, + }, + dictWord{13, 11, 117}, + dictWord{13, 11, 412}, + dictWord{14, 11, 25}, + dictWord{15, 11, 52}, + dictWord{15, 11, 161}, + dictWord{16, 11, 47}, + dictWord{149, 11, 2}, + dictWord{6, 0, 133}, + dictWord{8, 0, 413}, + dictWord{9, 0, 353}, + dictWord{139, 0, 993}, + dictWord{145, 10, 19}, + dictWord{4, 11, 937}, + dictWord{ + 133, + 11, + 801, + }, + dictWord{134, 0, 978}, + dictWord{6, 0, 93}, + dictWord{6, 0, 1508}, + dictWord{7, 0, 1422}, + dictWord{7, 0, 1851}, + dictWord{8, 0, 673}, + dictWord{9, 0, 529}, + dictWord{140, 0, 43}, + dictWord{6, 0, 317}, + dictWord{10, 0, 512}, + dictWord{4, 10, 737}, + dictWord{11, 10, 294}, + dictWord{12, 10, 60}, + dictWord{12, 10, 437}, + dictWord{13, 10, 64}, + dictWord{13, 10, 380}, + dictWord{142, 10, 430}, + dictWord{9, 0, 371}, + dictWord{7, 11, 1591}, + dictWord{144, 11, 43}, + dictWord{6, 10, 1758}, + dictWord{8, 10, 520}, + dictWord{9, 10, 345}, + dictWord{9, 10, 403}, + dictWord{142, 10, 350}, + dictWord{5, 0, 526}, + dictWord{10, 10, 242}, + dictWord{ + 138, + 10, + 579, + }, + dictWord{9, 0, 25}, + dictWord{10, 0, 467}, + dictWord{138, 0, 559}, + dictWord{5, 10, 139}, + dictWord{7, 10, 1168}, + dictWord{138, 10, 539}, + dictWord{ + 4, + 0, + 335, + }, + dictWord{135, 0, 942}, + dictWord{140, 0, 754}, + dictWord{132, 11, 365}, + dictWord{11, 0, 182}, + dictWord{142, 0, 195}, + dictWord{142, 11, 29}, + dictWord{ + 5, + 11, + 7, + }, + dictWord{139, 11, 774}, + dictWord{4, 11, 746}, + dictWord{135, 11, 1090}, + dictWord{8, 0, 39}, + dictWord{10, 0, 773}, + dictWord{11, 0, 84}, + dictWord{ + 12, + 0, + 205, + }, + dictWord{142, 0, 1}, + dictWord{5, 0, 601}, + dictWord{5, 0, 870}, + dictWord{5, 11, 360}, + dictWord{136, 11, 237}, + dictWord{132, 0, 181}, + dictWord{ + 136, + 0, + 370, + }, + dictWord{134, 0, 1652}, + dictWord{8, 0, 358}, + dictWord{4, 10, 107}, + dictWord{7, 10, 613}, + dictWord{8, 10, 439}, + dictWord{8, 10, 504}, + dictWord{ + 9, + 10, + 501, + }, + dictWord{10, 10, 383}, + dictWord{139, 10, 477}, + dictWord{132, 10, 229}, + dictWord{137, 11, 785}, + dictWord{4, 0, 97}, + dictWord{5, 0, 147}, + dictWord{ + 6, + 0, + 286, + }, + dictWord{7, 0, 1362}, + dictWord{141, 0, 176}, + dictWord{6, 0, 537}, + dictWord{7, 0, 788}, + dictWord{7, 0, 1816}, + dictWord{132, 10, 903}, + dictWord{ + 140, + 10, + 71, + }, + dictWord{6, 0, 743}, + dictWord{134, 0, 1223}, + dictWord{6, 0, 375}, + dictWord{7, 0, 169}, + dictWord{7, 0, 254}, + dictWord{8, 0, 780}, + dictWord{135, 11, 1493}, + dictWord{7, 0, 1714}, + dictWord{4, 10, 47}, + dictWord{6, 10, 373}, + dictWord{7, 10, 452}, + dictWord{7, 10, 543}, + dictWord{7, 10, 1856}, + dictWord{9, 10, 6}, + dictWord{ + 11, + 10, + 257, + }, + dictWord{139, 10, 391}, + dictWord{6, 0, 896}, + dictWord{136, 0, 1003}, + dictWord{135, 0, 1447}, + dictWord{137, 11, 341}, + dictWord{5, 10, 980}, + dictWord{134, 10, 1754}, + dictWord{145, 11, 22}, + dictWord{4, 11, 277}, + dictWord{5, 11, 608}, + dictWord{6, 11, 493}, + dictWord{7, 11, 457}, + dictWord{ + 140, + 11, + 384, + }, + dictWord{7, 10, 536}, + dictWord{7, 10, 1331}, + dictWord{136, 10, 143}, + dictWord{140, 0, 744}, + dictWord{7, 11, 27}, + dictWord{135, 11, 316}, + dictWord{ + 18, + 0, + 126, + }, + dictWord{5, 10, 19}, + dictWord{134, 10, 533}, + dictWord{4, 0, 788}, + dictWord{11, 0, 41}, + dictWord{5, 11, 552}, + dictWord{5, 11, 586}, + dictWord{ + 5, + 11, + 676, + }, + dictWord{6, 11, 448}, + dictWord{8, 11, 244}, + dictWord{11, 11, 1}, + dictWord{11, 11, 41}, + dictWord{13, 11, 3}, + dictWord{16, 11, 54}, + dictWord{17, 11, 4}, + dictWord{146, 11, 13}, + dictWord{4, 0, 985}, + dictWord{6, 0, 1801}, + dictWord{4, 11, 401}, + dictWord{137, 11, 264}, + dictWord{5, 10, 395}, + dictWord{5, 10, 951}, + dictWord{134, 10, 1776}, + dictWord{5, 0, 629}, + dictWord{135, 0, 1549}, + dictWord{11, 10, 663}, + dictWord{12, 10, 210}, + dictWord{13, 10, 166}, + dictWord{ + 13, + 10, + 310, + }, + dictWord{14, 10, 373}, + dictWord{147, 10, 43}, + dictWord{9, 11, 543}, + dictWord{10, 11, 524}, + dictWord{11, 11, 30}, + dictWord{12, 11, 524}, + dictWord{ + 14, + 11, + 315, + }, + dictWord{16, 11, 18}, + dictWord{20, 11, 26}, + dictWord{148, 11, 65}, + dictWord{4, 11, 205}, + dictWord{5, 11, 623}, + dictWord{7, 11, 104}, + dictWord{ + 136, + 11, + 519, + }, + dictWord{5, 0, 293}, + dictWord{134, 0, 601}, + dictWord{7, 11, 579}, + dictWord{9, 11, 41}, + dictWord{9, 11, 244}, + dictWord{9, 11, 669}, + dictWord{ + 10, + 11, + 5, + }, + dictWord{11, 11, 861}, + dictWord{11, 11, 951}, + dictWord{139, 11, 980}, + dictWord{132, 11, 717}, + dictWord{132, 10, 695}, + dictWord{7, 10, 497}, + dictWord{ + 9, + 10, + 387, + }, + dictWord{147, 10, 81}, + dictWord{132, 0, 420}, + dictWord{142, 0, 37}, + dictWord{6, 0, 1134}, + dictWord{6, 0, 1900}, + dictWord{12, 0, 830}, + dictWord{ + 12, + 0, + 878, + }, + dictWord{12, 0, 894}, + dictWord{15, 0, 221}, + dictWord{143, 0, 245}, + dictWord{132, 11, 489}, + dictWord{7, 0, 1570}, + dictWord{140, 0, 542}, + dictWord{ + 8, + 0, + 933, + }, + dictWord{136, 0, 957}, + dictWord{6, 0, 1371}, + dictWord{7, 0, 31}, + dictWord{8, 0, 373}, + dictWord{5, 10, 284}, + dictWord{6, 10, 49}, + dictWord{6, 10, 350}, + dictWord{7, 10, 377}, + dictWord{7, 10, 1693}, + dictWord{8, 10, 678}, + dictWord{9, 10, 161}, + dictWord{9, 10, 585}, + dictWord{9, 10, 671}, + dictWord{9, 10, 839}, + dictWord{11, 10, 912}, + dictWord{141, 10, 427}, + dictWord{135, 11, 892}, + dictWord{4, 0, 325}, + dictWord{138, 0, 125}, + dictWord{139, 11, 47}, + dictWord{ + 132, + 10, + 597, + }, + dictWord{138, 0, 323}, + dictWord{6, 0, 1547}, + dictWord{7, 11, 1605}, + dictWord{9, 11, 473}, + dictWord{11, 11, 962}, + dictWord{146, 11, 139}, + dictWord{ + 139, + 10, + 908, + }, + dictWord{7, 11, 819}, + dictWord{9, 11, 26}, + dictWord{9, 11, 392}, + dictWord{10, 11, 152}, + dictWord{10, 11, 226}, + dictWord{11, 11, 19}, + dictWord{ + 12, + 11, + 276, + }, + dictWord{12, 11, 426}, + dictWord{12, 11, 589}, + dictWord{13, 11, 460}, + dictWord{15, 11, 97}, + dictWord{19, 11, 48}, + dictWord{148, 11, 104}, + dictWord{135, 11, 51}, + dictWord{4, 0, 718}, + dictWord{135, 0, 1216}, + dictWord{6, 0, 1896}, + dictWord{6, 0, 1905}, + dictWord{6, 0, 1912}, + dictWord{9, 0, 947}, + dictWord{ + 9, + 0, + 974, + }, + dictWord{12, 0, 809}, + dictWord{12, 0, 850}, + dictWord{12, 0, 858}, + dictWord{12, 0, 874}, + dictWord{12, 0, 887}, + dictWord{12, 0, 904}, + dictWord{ + 12, + 0, + 929, + }, + dictWord{12, 0, 948}, + dictWord{12, 0, 952}, + dictWord{15, 0, 198}, + dictWord{15, 0, 206}, + dictWord{15, 0, 220}, + dictWord{15, 0, 227}, + dictWord{15, 0, 247}, + dictWord{18, 0, 188}, + dictWord{21, 0, 48}, + dictWord{21, 0, 50}, + dictWord{24, 0, 25}, + dictWord{24, 0, 29}, + dictWord{7, 11, 761}, + dictWord{7, 11, 1051}, + dictWord{ + 137, + 11, + 545, + }, + dictWord{5, 0, 124}, + dictWord{5, 0, 144}, + dictWord{6, 0, 548}, + dictWord{7, 0, 15}, + dictWord{7, 0, 153}, + dictWord{137, 0, 629}, + dictWord{ + 135, + 11, + 606, + }, + dictWord{135, 10, 2014}, + dictWord{7, 10, 2007}, + dictWord{9, 11, 46}, + dictWord{9, 10, 101}, + dictWord{9, 10, 450}, + dictWord{10, 10, 66}, + dictWord{ + 10, + 10, + 842, + }, + dictWord{11, 10, 536}, + dictWord{140, 10, 587}, + dictWord{6, 0, 75}, + dictWord{7, 0, 1531}, + dictWord{8, 0, 416}, + dictWord{9, 0, 240}, + dictWord{9, 0, 275}, + dictWord{10, 0, 100}, + dictWord{11, 0, 658}, + dictWord{11, 0, 979}, + dictWord{12, 0, 86}, + dictWord{14, 0, 207}, + dictWord{15, 0, 20}, + dictWord{143, 0, 25}, + dictWord{ + 5, + 0, + 141, + }, + dictWord{5, 0, 915}, + dictWord{6, 0, 1783}, + dictWord{7, 0, 211}, + dictWord{7, 0, 698}, + dictWord{7, 0, 1353}, + dictWord{9, 0, 83}, + dictWord{9, 0, 281}, + dictWord{ + 10, + 0, + 376, + }, + dictWord{10, 0, 431}, + dictWord{11, 0, 543}, + dictWord{12, 0, 664}, + dictWord{13, 0, 280}, + dictWord{13, 0, 428}, + dictWord{14, 0, 61}, + dictWord{ + 14, + 0, + 128, + }, + dictWord{17, 0, 52}, + dictWord{145, 0, 81}, + dictWord{132, 11, 674}, + dictWord{135, 0, 533}, + dictWord{149, 0, 6}, + dictWord{132, 11, 770}, + dictWord{ + 133, + 0, + 538, + }, + dictWord{5, 11, 79}, + dictWord{7, 11, 1027}, + dictWord{7, 11, 1477}, + dictWord{139, 11, 52}, + dictWord{139, 10, 62}, + dictWord{4, 0, 338}, + dictWord{ + 133, + 0, + 400, + }, + dictWord{5, 11, 789}, + dictWord{134, 11, 195}, + dictWord{4, 11, 251}, + dictWord{4, 11, 688}, + dictWord{7, 11, 513}, + dictWord{7, 11, 1284}, + dictWord{ + 9, + 11, + 87, + }, + dictWord{138, 11, 365}, + dictWord{134, 10, 1766}, + dictWord{6, 0, 0}, + dictWord{7, 0, 84}, + dictWord{11, 0, 895}, + dictWord{145, 0, 11}, + dictWord{ + 139, + 0, + 892, + }, + dictWord{4, 0, 221}, + dictWord{5, 0, 659}, + dictWord{7, 0, 697}, + dictWord{7, 0, 1211}, + dictWord{138, 0, 284}, + dictWord{133, 0, 989}, + dictWord{ + 133, + 11, + 889, + }, + dictWord{4, 11, 160}, + dictWord{5, 11, 330}, + dictWord{7, 11, 1434}, + dictWord{136, 11, 174}, + dictWord{6, 10, 1665}, + dictWord{7, 10, 256}, + dictWord{ + 7, + 10, + 1388, + }, + dictWord{10, 10, 499}, + dictWord{139, 10, 670}, + dictWord{7, 0, 848}, + dictWord{4, 10, 22}, + dictWord{5, 10, 10}, + dictWord{136, 10, 97}, + dictWord{ + 138, + 0, + 507, + }, + dictWord{133, 10, 481}, + dictWord{4, 0, 188}, + dictWord{135, 0, 805}, + dictWord{5, 0, 884}, + dictWord{6, 0, 732}, + dictWord{139, 0, 991}, + dictWord{ + 135, + 11, + 968, + }, + dictWord{11, 11, 636}, + dictWord{15, 11, 145}, + dictWord{17, 11, 34}, + dictWord{19, 11, 50}, + dictWord{151, 11, 20}, + dictWord{7, 0, 959}, + dictWord{ + 16, + 0, + 60, + }, + dictWord{6, 10, 134}, + dictWord{7, 10, 437}, + dictWord{9, 10, 37}, + dictWord{14, 10, 285}, + dictWord{142, 10, 371}, + dictWord{7, 10, 486}, + dictWord{ + 8, + 10, + 155, + }, + dictWord{11, 10, 93}, + dictWord{140, 10, 164}, + dictWord{134, 0, 1653}, + dictWord{7, 0, 337}, + dictWord{133, 10, 591}, + dictWord{6, 0, 1989}, + dictWord{ + 8, + 0, + 922, + }, + dictWord{8, 0, 978}, + dictWord{133, 11, 374}, + dictWord{132, 0, 638}, + dictWord{138, 0, 500}, + dictWord{133, 11, 731}, + dictWord{5, 10, 380}, + dictWord{ + 5, + 10, + 650, + }, + dictWord{136, 10, 310}, + dictWord{138, 11, 381}, + dictWord{4, 10, 364}, + dictWord{7, 10, 1156}, + dictWord{7, 10, 1187}, + dictWord{137, 10, 409}, + dictWord{137, 11, 224}, + dictWord{140, 0, 166}, + dictWord{134, 10, 482}, + dictWord{4, 11, 626}, + dictWord{5, 11, 642}, + dictWord{6, 11, 425}, + dictWord{ + 10, + 11, + 202, + }, + dictWord{139, 11, 141}, + dictWord{4, 10, 781}, + dictWord{6, 10, 487}, + dictWord{7, 10, 926}, + dictWord{8, 10, 263}, + dictWord{139, 10, 500}, + dictWord{ + 135, + 0, + 418, + }, + dictWord{4, 10, 94}, + dictWord{135, 10, 1265}, + dictWord{136, 0, 760}, + dictWord{132, 10, 417}, + dictWord{136, 11, 835}, + dictWord{5, 10, 348}, + dictWord{134, 10, 522}, + dictWord{6, 0, 1277}, + dictWord{134, 0, 1538}, + dictWord{139, 11, 541}, + dictWord{135, 11, 1597}, + dictWord{5, 11, 384}, + dictWord{ + 8, + 11, + 455, + }, + dictWord{140, 11, 48}, + dictWord{136, 0, 770}, + dictWord{5, 11, 264}, + dictWord{134, 11, 184}, + dictWord{4, 0, 89}, + dictWord{5, 0, 489}, + dictWord{ + 6, + 0, + 315, + }, + dictWord{7, 0, 553}, + dictWord{7, 0, 1745}, + dictWord{138, 0, 243}, + dictWord{4, 10, 408}, + dictWord{4, 10, 741}, + dictWord{135, 10, 500}, + dictWord{ + 134, + 0, + 1396, + }, + dictWord{133, 0, 560}, + dictWord{6, 0, 1658}, + dictWord{9, 0, 3}, + dictWord{10, 0, 154}, + dictWord{11, 0, 641}, + dictWord{13, 0, 85}, + dictWord{13, 0, 201}, + dictWord{141, 0, 346}, + dictWord{135, 11, 1595}, + dictWord{5, 11, 633}, + dictWord{6, 11, 28}, + dictWord{7, 11, 219}, + dictWord{135, 11, 1323}, + dictWord{ + 9, + 11, + 769, + }, + dictWord{140, 11, 185}, + dictWord{135, 11, 785}, + dictWord{7, 11, 359}, + dictWord{8, 11, 243}, + dictWord{140, 11, 175}, + dictWord{138, 0, 586}, + dictWord{ + 7, + 0, + 1271, + }, + dictWord{134, 10, 73}, + dictWord{132, 11, 105}, + dictWord{4, 0, 166}, + dictWord{5, 0, 505}, + dictWord{134, 0, 1670}, + dictWord{133, 10, 576}, + dictWord{4, 11, 324}, + dictWord{138, 11, 104}, + dictWord{142, 10, 231}, + dictWord{6, 0, 637}, + dictWord{7, 10, 1264}, + dictWord{7, 10, 1678}, + dictWord{ + 11, + 10, + 945, + }, + dictWord{12, 10, 341}, + dictWord{12, 10, 471}, + dictWord{12, 10, 569}, + dictWord{23, 11, 21}, + dictWord{151, 11, 23}, + dictWord{8, 11, 559}, + dictWord{ + 141, + 11, + 109, + }, + dictWord{134, 0, 1947}, + dictWord{7, 0, 445}, + dictWord{8, 0, 307}, + dictWord{8, 0, 704}, + dictWord{10, 0, 41}, + dictWord{10, 0, 439}, + dictWord{ + 11, + 0, + 237, + }, + dictWord{11, 0, 622}, + dictWord{140, 0, 201}, + dictWord{135, 11, 963}, + dictWord{135, 0, 1977}, + dictWord{4, 0, 189}, + dictWord{5, 0, 713}, + dictWord{ + 136, + 0, + 57, + }, + dictWord{138, 0, 371}, + dictWord{135, 10, 538}, + dictWord{132, 0, 552}, + dictWord{6, 0, 883}, + dictWord{133, 10, 413}, + dictWord{6, 0, 923}, + dictWord{ + 132, + 11, + 758, + }, + dictWord{138, 11, 215}, + dictWord{136, 10, 495}, + dictWord{7, 10, 54}, + dictWord{8, 10, 312}, + dictWord{10, 10, 191}, + dictWord{10, 10, 614}, + dictWord{140, 10, 567}, + dictWord{7, 11, 351}, + dictWord{139, 11, 128}, + dictWord{7, 0, 875}, + dictWord{6, 10, 468}, + dictWord{7, 10, 1478}, + dictWord{8, 10, 530}, + dictWord{142, 10, 290}, + dictWord{135, 0, 1788}, + dictWord{17, 0, 49}, + dictWord{133, 11, 918}, + dictWord{12, 11, 398}, + dictWord{20, 11, 39}, + dictWord{ + 21, + 11, + 11, + }, + dictWord{150, 11, 41}, + dictWord{10, 0, 661}, + dictWord{6, 10, 484}, + dictWord{135, 10, 822}, + dictWord{135, 0, 1945}, + dictWord{134, 0, 794}, + dictWord{ + 137, + 10, + 900, + }, + dictWord{135, 10, 1335}, + dictWord{6, 10, 1724}, + dictWord{135, 10, 2022}, + dictWord{132, 11, 340}, + dictWord{134, 0, 1135}, + dictWord{ + 4, + 0, + 784, + }, + dictWord{133, 0, 745}, + dictWord{5, 0, 84}, + dictWord{134, 0, 163}, + dictWord{133, 0, 410}, + dictWord{4, 0, 976}, + dictWord{5, 11, 985}, + dictWord{7, 11, 509}, + dictWord{7, 11, 529}, + dictWord{145, 11, 96}, + dictWord{132, 10, 474}, + dictWord{134, 0, 703}, + dictWord{135, 11, 1919}, + dictWord{5, 0, 322}, + dictWord{ + 8, + 0, + 186, + }, + dictWord{9, 0, 262}, + dictWord{10, 0, 187}, + dictWord{142, 0, 208}, + dictWord{135, 10, 1504}, + dictWord{133, 0, 227}, + dictWord{9, 0, 560}, + dictWord{ + 13, + 0, + 208, + }, + dictWord{133, 10, 305}, + dictWord{132, 11, 247}, + dictWord{7, 0, 1395}, + dictWord{8, 0, 486}, + dictWord{9, 0, 236}, + dictWord{9, 0, 878}, + dictWord{ + 10, + 0, + 218, + }, + dictWord{11, 0, 95}, + dictWord{19, 0, 17}, + dictWord{147, 0, 31}, + dictWord{7, 0, 2043}, + dictWord{8, 0, 672}, + dictWord{141, 0, 448}, + dictWord{4, 11, 184}, + dictWord{5, 11, 390}, + dictWord{6, 11, 337}, + dictWord{7, 11, 23}, + dictWord{7, 11, 494}, + dictWord{7, 11, 618}, + dictWord{7, 11, 1456}, + dictWord{8, 11, 27}, + dictWord{ + 8, + 11, + 599, + }, + dictWord{10, 11, 153}, + dictWord{139, 11, 710}, + dictWord{135, 0, 466}, + dictWord{135, 10, 1236}, + dictWord{6, 0, 167}, + dictWord{7, 0, 186}, + dictWord{7, 0, 656}, + dictWord{10, 0, 643}, + dictWord{4, 10, 480}, + dictWord{6, 10, 302}, + dictWord{6, 10, 1642}, + dictWord{7, 10, 837}, + dictWord{7, 10, 1547}, + dictWord{ + 7, + 10, + 1657, + }, + dictWord{8, 10, 429}, + dictWord{9, 10, 228}, + dictWord{13, 10, 289}, + dictWord{13, 10, 343}, + dictWord{147, 10, 101}, + dictWord{134, 0, 1428}, + dictWord{134, 0, 1440}, + dictWord{5, 0, 412}, + dictWord{7, 10, 278}, + dictWord{10, 10, 739}, + dictWord{11, 10, 708}, + dictWord{141, 10, 348}, + dictWord{ + 134, + 0, + 1118, + }, + dictWord{136, 0, 562}, + dictWord{148, 11, 46}, + dictWord{9, 0, 316}, + dictWord{139, 0, 256}, + dictWord{134, 0, 1771}, + dictWord{135, 0, 1190}, + dictWord{137, 0, 132}, + dictWord{10, 11, 227}, + dictWord{11, 11, 497}, + dictWord{11, 11, 709}, + dictWord{140, 11, 415}, + dictWord{143, 0, 66}, + dictWord{6, 11, 360}, + dictWord{7, 11, 1664}, + dictWord{136, 11, 478}, + dictWord{144, 10, 28}, + dictWord{4, 0, 317}, + dictWord{135, 0, 1279}, + dictWord{5, 0, 63}, + dictWord{ + 133, + 0, + 509, + }, + dictWord{136, 11, 699}, + dictWord{145, 10, 36}, + dictWord{134, 0, 1475}, + dictWord{11, 11, 343}, + dictWord{142, 11, 127}, + dictWord{132, 11, 739}, + dictWord{132, 0, 288}, + dictWord{135, 11, 1757}, + dictWord{8, 0, 89}, + dictWord{8, 0, 620}, + dictWord{9, 0, 608}, + dictWord{11, 0, 628}, + dictWord{12, 0, 322}, + dictWord{143, 0, 124}, + dictWord{134, 0, 1225}, + dictWord{7, 0, 1189}, + dictWord{4, 11, 67}, + dictWord{5, 11, 422}, + dictWord{6, 10, 363}, + dictWord{7, 11, 1037}, + dictWord{7, 11, 1289}, + dictWord{7, 11, 1555}, + dictWord{7, 10, 1955}, + dictWord{8, 10, 725}, + dictWord{9, 11, 741}, + dictWord{145, 11, 108}, + dictWord{ + 134, + 0, + 1468, + }, + dictWord{6, 0, 689}, + dictWord{134, 0, 1451}, + dictWord{138, 0, 120}, + dictWord{151, 0, 1}, + dictWord{137, 10, 805}, + dictWord{142, 0, 329}, + dictWord{ + 5, + 10, + 813, + }, + dictWord{135, 10, 2046}, + dictWord{135, 0, 226}, + dictWord{138, 11, 96}, + dictWord{7, 0, 1855}, + dictWord{5, 10, 712}, + dictWord{11, 10, 17}, + dictWord{13, 10, 321}, + dictWord{144, 10, 67}, + dictWord{9, 0, 461}, + dictWord{6, 10, 320}, + dictWord{7, 10, 781}, + dictWord{7, 10, 1921}, + dictWord{9, 10, 55}, + dictWord{ + 10, + 10, + 186, + }, + dictWord{10, 10, 273}, + dictWord{10, 10, 664}, + dictWord{10, 10, 801}, + dictWord{11, 10, 996}, + dictWord{11, 10, 997}, + dictWord{13, 10, 157}, + dictWord{142, 10, 170}, + dictWord{8, 11, 203}, + dictWord{8, 10, 271}, + dictWord{11, 11, 823}, + dictWord{11, 11, 846}, + dictWord{12, 11, 482}, + dictWord{ + 13, + 11, + 133, + }, + dictWord{13, 11, 277}, + dictWord{13, 11, 302}, + dictWord{13, 11, 464}, + dictWord{14, 11, 205}, + dictWord{142, 11, 221}, + dictWord{135, 0, 1346}, + dictWord{4, 11, 449}, + dictWord{133, 11, 718}, + dictWord{134, 0, 85}, + dictWord{14, 0, 299}, + dictWord{7, 10, 103}, + dictWord{7, 10, 863}, + dictWord{11, 10, 184}, + dictWord{145, 10, 62}, + dictWord{4, 11, 355}, + dictWord{6, 11, 311}, + dictWord{9, 11, 256}, + dictWord{138, 11, 404}, + dictWord{137, 10, 659}, + dictWord{ + 138, + 11, + 758, + }, + dictWord{133, 11, 827}, + dictWord{5, 11, 64}, + dictWord{140, 11, 581}, + dictWord{134, 0, 1171}, + dictWord{4, 11, 442}, + dictWord{7, 11, 1047}, + dictWord{ + 7, + 11, + 1352, + }, + dictWord{135, 11, 1643}, + dictWord{132, 0, 980}, + dictWord{5, 11, 977}, + dictWord{6, 11, 288}, + dictWord{7, 11, 528}, + dictWord{135, 11, 1065}, + dictWord{5, 0, 279}, + dictWord{6, 0, 235}, + dictWord{7, 0, 468}, + dictWord{8, 0, 446}, + dictWord{9, 0, 637}, + dictWord{10, 0, 717}, + dictWord{11, 0, 738}, + dictWord{ + 140, + 0, + 514, + }, + dictWord{132, 0, 293}, + dictWord{11, 10, 337}, + dictWord{142, 10, 303}, + dictWord{136, 11, 285}, + dictWord{5, 0, 17}, + dictWord{6, 0, 371}, + dictWord{ + 9, + 0, + 528, + }, + dictWord{12, 0, 364}, + dictWord{132, 11, 254}, + dictWord{5, 10, 77}, + dictWord{7, 10, 1455}, + dictWord{10, 10, 843}, + dictWord{147, 10, 73}, + dictWord{ + 150, + 0, + 5, + }, + dictWord{132, 10, 458}, + dictWord{6, 11, 12}, + dictWord{7, 11, 1219}, + dictWord{145, 11, 73}, + dictWord{135, 10, 1420}, + dictWord{6, 10, 109}, + dictWord{138, 10, 382}, + dictWord{135, 11, 125}, + dictWord{6, 10, 330}, + dictWord{7, 10, 1084}, + dictWord{139, 10, 142}, + dictWord{6, 11, 369}, + dictWord{ + 6, + 11, + 502, + }, + dictWord{7, 11, 1036}, + dictWord{8, 11, 348}, + dictWord{9, 11, 452}, + dictWord{10, 11, 26}, + dictWord{11, 11, 224}, + dictWord{11, 11, 387}, + dictWord{ + 11, + 11, + 772, + }, + dictWord{12, 11, 95}, + dictWord{12, 11, 629}, + dictWord{13, 11, 195}, + dictWord{13, 11, 207}, + dictWord{13, 11, 241}, + dictWord{14, 11, 260}, + dictWord{ + 14, + 11, + 270, + }, + dictWord{143, 11, 140}, + dictWord{132, 11, 269}, + dictWord{5, 11, 480}, + dictWord{7, 11, 532}, + dictWord{7, 11, 1197}, + dictWord{7, 11, 1358}, + dictWord{8, 11, 291}, + dictWord{11, 11, 349}, + dictWord{142, 11, 396}, + dictWord{150, 0, 48}, + dictWord{10, 0, 601}, + dictWord{13, 0, 353}, + dictWord{141, 0, 376}, + dictWord{5, 0, 779}, + dictWord{5, 0, 807}, + dictWord{6, 0, 1655}, + dictWord{134, 0, 1676}, + dictWord{142, 11, 223}, + dictWord{4, 0, 196}, + dictWord{5, 0, 558}, + dictWord{133, 0, 949}, + dictWord{148, 11, 15}, + dictWord{135, 11, 1764}, + dictWord{134, 0, 1322}, + dictWord{132, 0, 752}, + dictWord{139, 0, 737}, + dictWord{ + 135, + 11, + 657, + }, + dictWord{136, 11, 533}, + dictWord{135, 0, 412}, + dictWord{4, 0, 227}, + dictWord{5, 0, 159}, + dictWord{5, 0, 409}, + dictWord{7, 0, 80}, + dictWord{8, 0, 556}, + dictWord{10, 0, 479}, + dictWord{12, 0, 418}, + dictWord{14, 0, 50}, + dictWord{14, 0, 123}, + dictWord{14, 0, 192}, + dictWord{14, 0, 249}, + dictWord{14, 0, 295}, + dictWord{143, 0, 27}, + dictWord{7, 0, 1470}, + dictWord{8, 0, 66}, + dictWord{8, 0, 137}, + dictWord{8, 0, 761}, + dictWord{9, 0, 638}, + dictWord{11, 0, 80}, + dictWord{11, 0, 212}, + dictWord{11, 0, 368}, + dictWord{11, 0, 418}, + dictWord{12, 0, 8}, + dictWord{13, 0, 15}, + dictWord{16, 0, 61}, + dictWord{17, 0, 59}, + dictWord{19, 0, 28}, + dictWord{ + 148, + 0, + 84, + }, + dictWord{135, 10, 1985}, + dictWord{4, 11, 211}, + dictWord{4, 11, 332}, + dictWord{5, 11, 335}, + dictWord{6, 11, 238}, + dictWord{7, 11, 269}, + dictWord{ + 7, + 11, + 811, + }, + dictWord{7, 11, 1797}, + dictWord{8, 10, 122}, + dictWord{8, 11, 836}, + dictWord{9, 11, 507}, + dictWord{141, 11, 242}, + dictWord{6, 0, 683}, + dictWord{ + 134, + 0, + 1252, + }, + dictWord{4, 0, 873}, + dictWord{132, 10, 234}, + dictWord{134, 0, 835}, + dictWord{6, 0, 38}, + dictWord{7, 0, 1220}, + dictWord{8, 0, 185}, + dictWord{8, 0, 256}, + dictWord{9, 0, 22}, + dictWord{9, 0, 331}, + dictWord{10, 0, 738}, + dictWord{11, 0, 205}, + dictWord{11, 0, 540}, + dictWord{11, 0, 746}, + dictWord{13, 0, 465}, + dictWord{ + 14, + 0, + 88, + }, + dictWord{142, 0, 194}, + dictWord{138, 0, 986}, + dictWord{5, 11, 1009}, + dictWord{12, 11, 582}, + dictWord{146, 11, 131}, + dictWord{4, 0, 159}, + dictWord{ + 6, + 0, + 115, + }, + dictWord{7, 0, 252}, + dictWord{7, 0, 257}, + dictWord{7, 0, 1928}, + dictWord{8, 0, 69}, + dictWord{9, 0, 384}, + dictWord{10, 0, 91}, + dictWord{10, 0, 615}, + dictWord{ + 12, + 0, + 375, + }, + dictWord{14, 0, 235}, + dictWord{18, 0, 117}, + dictWord{147, 0, 123}, + dictWord{133, 0, 911}, + dictWord{136, 0, 278}, + dictWord{5, 10, 430}, + dictWord{ + 5, + 10, + 932, + }, + dictWord{6, 10, 131}, + dictWord{7, 10, 417}, + dictWord{9, 10, 522}, + dictWord{11, 10, 314}, + dictWord{141, 10, 390}, + dictWord{14, 10, 149}, + dictWord{14, 10, 399}, + dictWord{143, 10, 57}, + dictWord{4, 0, 151}, + dictWord{7, 0, 1567}, + dictWord{136, 0, 749}, + dictWord{5, 11, 228}, + dictWord{6, 11, 203}, + dictWord{ + 7, + 11, + 156, + }, + dictWord{8, 11, 347}, + dictWord{137, 11, 265}, + dictWord{132, 10, 507}, + dictWord{10, 0, 989}, + dictWord{140, 0, 956}, + dictWord{133, 0, 990}, + dictWord{5, 0, 194}, + dictWord{6, 0, 927}, + dictWord{7, 0, 1662}, + dictWord{9, 0, 90}, + dictWord{140, 0, 564}, + dictWord{4, 10, 343}, + dictWord{133, 10, 511}, + dictWord{133, 0, 425}, + dictWord{7, 10, 455}, + dictWord{138, 10, 591}, + dictWord{4, 0, 774}, + dictWord{7, 11, 476}, + dictWord{7, 11, 1592}, + dictWord{138, 11, 87}, + dictWord{5, 0, 971}, + dictWord{135, 10, 1381}, + dictWord{5, 11, 318}, + dictWord{147, 11, 121}, + dictWord{5, 11, 291}, + dictWord{7, 11, 765}, + dictWord{9, 11, 389}, + dictWord{140, 11, 548}, + dictWord{134, 10, 575}, + dictWord{4, 0, 827}, + dictWord{12, 0, 646}, + dictWord{12, 0, 705}, + dictWord{12, 0, 712}, + dictWord{140, 0, 714}, + dictWord{139, 0, 752}, + dictWord{137, 0, 662}, + dictWord{5, 0, 72}, + dictWord{6, 0, 264}, + dictWord{7, 0, 21}, + dictWord{7, 0, 46}, + dictWord{7, 0, 2013}, + dictWord{ + 8, + 0, + 215, + }, + dictWord{8, 0, 513}, + dictWord{10, 0, 266}, + dictWord{139, 0, 22}, + dictWord{139, 11, 522}, + dictWord{6, 0, 239}, + dictWord{7, 0, 118}, + dictWord{10, 0, 95}, + dictWord{11, 0, 603}, + dictWord{13, 0, 443}, + dictWord{14, 0, 160}, + dictWord{143, 0, 4}, + dictWord{6, 0, 431}, + dictWord{134, 0, 669}, + dictWord{7, 10, 1127}, + dictWord{ + 7, + 10, + 1572, + }, + dictWord{10, 10, 297}, + dictWord{10, 10, 422}, + dictWord{11, 10, 764}, + dictWord{11, 10, 810}, + dictWord{12, 10, 264}, + dictWord{13, 10, 102}, + dictWord{13, 10, 300}, + dictWord{13, 10, 484}, + dictWord{14, 10, 147}, + dictWord{14, 10, 229}, + dictWord{17, 10, 71}, + dictWord{18, 10, 118}, + dictWord{ + 147, + 10, + 120, + }, + dictWord{5, 0, 874}, + dictWord{6, 0, 1677}, + dictWord{15, 0, 0}, + dictWord{10, 11, 525}, + dictWord{139, 11, 82}, + dictWord{6, 0, 65}, + dictWord{7, 0, 939}, + dictWord{ + 7, + 0, + 1172, + }, + dictWord{7, 0, 1671}, + dictWord{9, 0, 540}, + dictWord{10, 0, 696}, + dictWord{11, 0, 265}, + dictWord{11, 0, 732}, + dictWord{11, 0, 928}, + dictWord{ + 11, + 0, + 937, + }, + dictWord{141, 0, 438}, + dictWord{134, 0, 1350}, + dictWord{136, 11, 547}, + dictWord{132, 11, 422}, + dictWord{5, 11, 355}, + dictWord{145, 11, 0}, + dictWord{137, 11, 905}, + dictWord{5, 0, 682}, + dictWord{135, 0, 1887}, + dictWord{132, 0, 809}, + dictWord{4, 0, 696}, + dictWord{133, 11, 865}, + dictWord{6, 0, 1074}, + dictWord{6, 0, 1472}, + dictWord{14, 10, 35}, + dictWord{142, 10, 191}, + dictWord{5, 11, 914}, + dictWord{134, 11, 1625}, + dictWord{133, 11, 234}, + dictWord{ + 135, + 11, + 1383, + }, + dictWord{137, 11, 780}, + dictWord{132, 10, 125}, + dictWord{4, 0, 726}, + dictWord{133, 0, 630}, + dictWord{8, 0, 802}, + dictWord{136, 0, 838}, + dictWord{132, 10, 721}, + dictWord{6, 0, 1337}, + dictWord{7, 0, 776}, + dictWord{19, 0, 56}, + dictWord{136, 10, 145}, + dictWord{132, 0, 970}, + dictWord{7, 10, 792}, + dictWord{8, 10, 147}, + dictWord{10, 10, 821}, + dictWord{139, 10, 1021}, + dictWord{139, 10, 970}, + dictWord{8, 0, 940}, + dictWord{137, 0, 797}, + dictWord{ + 135, + 11, + 1312, + }, + dictWord{9, 0, 248}, + dictWord{10, 0, 400}, + dictWord{7, 11, 816}, + dictWord{7, 11, 1241}, + dictWord{7, 10, 1999}, + dictWord{9, 11, 283}, + dictWord{ + 9, + 11, + 520, + }, + dictWord{10, 11, 213}, + dictWord{10, 11, 307}, + dictWord{10, 11, 463}, + dictWord{10, 11, 671}, + dictWord{10, 11, 746}, + dictWord{11, 11, 401}, + dictWord{ + 11, + 11, + 794, + }, + dictWord{12, 11, 517}, + dictWord{18, 11, 107}, + dictWord{147, 11, 115}, + dictWord{6, 0, 1951}, + dictWord{134, 0, 2040}, + dictWord{ + 135, + 11, + 339, + }, + dictWord{13, 0, 41}, + dictWord{15, 0, 93}, + dictWord{5, 10, 168}, + dictWord{5, 10, 930}, + dictWord{8, 10, 74}, + dictWord{9, 10, 623}, + dictWord{12, 10, 500}, + dictWord{140, 10, 579}, + dictWord{6, 0, 118}, + dictWord{7, 0, 215}, + dictWord{7, 0, 1521}, + dictWord{140, 0, 11}, + dictWord{6, 10, 220}, + dictWord{7, 10, 1101}, + dictWord{141, 10, 105}, + dictWord{6, 11, 421}, + dictWord{7, 11, 61}, + dictWord{7, 11, 1540}, + dictWord{10, 11, 11}, + dictWord{138, 11, 501}, + dictWord{7, 0, 615}, + dictWord{138, 0, 251}, + dictWord{140, 11, 631}, + dictWord{135, 0, 1044}, + dictWord{6, 10, 19}, + dictWord{7, 10, 1413}, + dictWord{139, 10, 428}, + dictWord{ + 133, + 0, + 225, + }, + dictWord{7, 10, 96}, + dictWord{8, 10, 401}, + dictWord{8, 10, 703}, + dictWord{137, 10, 896}, + dictWord{145, 10, 116}, + dictWord{6, 11, 102}, + dictWord{ + 7, + 11, + 72, + }, + dictWord{15, 11, 142}, + dictWord{147, 11, 67}, + dictWord{7, 10, 1961}, + dictWord{7, 10, 1965}, + dictWord{8, 10, 702}, + dictWord{136, 10, 750}, + dictWord{ + 7, + 10, + 2030, + }, + dictWord{8, 10, 150}, + dictWord{8, 10, 737}, + dictWord{12, 10, 366}, + dictWord{151, 11, 30}, + dictWord{4, 0, 370}, + dictWord{5, 0, 756}, + dictWord{ + 7, + 0, + 1326, + }, + dictWord{135, 11, 823}, + dictWord{8, 10, 800}, + dictWord{9, 10, 148}, + dictWord{9, 10, 872}, + dictWord{9, 10, 890}, + dictWord{11, 10, 309}, + dictWord{ + 11, + 10, + 1001, + }, + dictWord{13, 10, 267}, + dictWord{141, 10, 323}, + dictWord{6, 0, 1662}, + dictWord{7, 0, 48}, + dictWord{8, 0, 771}, + dictWord{10, 0, 116}, + dictWord{ + 13, + 0, + 104, + }, + dictWord{14, 0, 105}, + dictWord{14, 0, 184}, + dictWord{15, 0, 168}, + dictWord{19, 0, 92}, + dictWord{148, 0, 68}, + dictWord{10, 0, 209}, + dictWord{ + 135, + 11, + 1870, + }, + dictWord{7, 11, 68}, + dictWord{8, 11, 48}, + dictWord{8, 11, 88}, + dictWord{8, 11, 582}, + dictWord{8, 11, 681}, + dictWord{9, 11, 373}, + dictWord{9, 11, 864}, + dictWord{11, 11, 157}, + dictWord{11, 11, 336}, + dictWord{11, 11, 843}, + dictWord{148, 11, 27}, + dictWord{134, 0, 930}, + dictWord{4, 11, 88}, + dictWord{5, 11, 137}, + dictWord{5, 11, 174}, + dictWord{5, 11, 777}, + dictWord{6, 11, 1664}, + dictWord{6, 11, 1725}, + dictWord{7, 11, 77}, + dictWord{7, 11, 426}, + dictWord{7, 11, 1317}, + dictWord{7, 11, 1355}, + dictWord{8, 11, 126}, + dictWord{8, 11, 563}, + dictWord{9, 11, 523}, + dictWord{9, 11, 750}, + dictWord{10, 11, 310}, + dictWord{10, 11, 836}, + dictWord{11, 11, 42}, + dictWord{11, 11, 318}, + dictWord{11, 11, 731}, + dictWord{12, 11, 68}, + dictWord{12, 11, 92}, + dictWord{12, 11, 507}, + dictWord{12, 11, 692}, + dictWord{13, 11, 81}, + dictWord{13, 11, 238}, + dictWord{13, 11, 374}, + dictWord{18, 11, 138}, + dictWord{19, 11, 78}, + dictWord{19, 11, 111}, + dictWord{20, 11, 55}, + dictWord{20, 11, 77}, + dictWord{148, 11, 92}, + dictWord{4, 11, 938}, + dictWord{135, 11, 1831}, + dictWord{5, 10, 547}, + dictWord{7, 10, 424}, + dictWord{ + 8, + 11, + 617, + }, + dictWord{138, 11, 351}, + dictWord{6, 0, 1286}, + dictWord{6, 11, 1668}, + dictWord{7, 11, 1499}, + dictWord{8, 11, 117}, + dictWord{9, 11, 314}, + dictWord{ + 138, + 11, + 174, + }, + dictWord{6, 0, 759}, + dictWord{6, 0, 894}, + dictWord{7, 11, 707}, + dictWord{139, 11, 563}, + dictWord{4, 0, 120}, + dictWord{135, 0, 1894}, + dictWord{ + 9, + 0, + 385, + }, + dictWord{149, 0, 17}, + dictWord{138, 0, 429}, + dictWord{133, 11, 403}, + dictWord{5, 0, 820}, + dictWord{135, 0, 931}, + dictWord{10, 0, 199}, + dictWord{ + 133, + 10, + 133, + }, + dictWord{6, 0, 151}, + dictWord{6, 0, 1675}, + dictWord{7, 0, 383}, + dictWord{151, 0, 10}, + dictWord{6, 0, 761}, + dictWord{136, 10, 187}, + dictWord{ + 8, + 0, + 365, + }, + dictWord{10, 10, 0}, + dictWord{10, 10, 818}, + dictWord{139, 10, 988}, + dictWord{4, 11, 44}, + dictWord{5, 11, 311}, + dictWord{6, 11, 156}, + dictWord{ + 7, + 11, + 639, + }, + dictWord{7, 11, 762}, + dictWord{7, 11, 1827}, + dictWord{9, 11, 8}, + dictWord{9, 11, 462}, + dictWord{148, 11, 83}, + dictWord{4, 11, 346}, + dictWord{7, 11, 115}, + dictWord{9, 11, 180}, + dictWord{9, 11, 456}, + dictWord{138, 11, 363}, + dictWord{136, 10, 685}, + dictWord{7, 0, 1086}, + dictWord{145, 0, 46}, + dictWord{ + 6, + 0, + 1624, + }, + dictWord{11, 0, 11}, + dictWord{12, 0, 422}, + dictWord{13, 0, 444}, + dictWord{142, 0, 360}, + dictWord{6, 0, 1020}, + dictWord{6, 0, 1260}, + dictWord{ + 134, + 0, + 1589, + }, + dictWord{4, 0, 43}, + dictWord{5, 0, 344}, + dictWord{5, 0, 357}, + dictWord{14, 0, 472}, + dictWord{150, 0, 58}, + dictWord{6, 0, 1864}, + dictWord{6, 0, 1866}, + dictWord{6, 0, 1868}, + dictWord{6, 0, 1869}, + dictWord{6, 0, 1874}, + dictWord{6, 0, 1877}, + dictWord{6, 0, 1903}, + dictWord{6, 0, 1911}, + dictWord{9, 0, 920}, + dictWord{ + 9, + 0, + 921, + }, + dictWord{9, 0, 924}, + dictWord{9, 0, 946}, + dictWord{9, 0, 959}, + dictWord{9, 0, 963}, + dictWord{9, 0, 970}, + dictWord{9, 0, 997}, + dictWord{9, 0, 1008}, + dictWord{ + 9, + 0, + 1017, + }, + dictWord{12, 0, 795}, + dictWord{12, 0, 797}, + dictWord{12, 0, 798}, + dictWord{12, 0, 800}, + dictWord{12, 0, 803}, + dictWord{12, 0, 811}, + dictWord{ + 12, + 0, + 820, + }, + dictWord{12, 0, 821}, + dictWord{12, 0, 839}, + dictWord{12, 0, 841}, + dictWord{12, 0, 848}, + dictWord{12, 0, 911}, + dictWord{12, 0, 921}, + dictWord{12, 0, 922}, + dictWord{12, 0, 925}, + dictWord{12, 0, 937}, + dictWord{12, 0, 944}, + dictWord{12, 0, 945}, + dictWord{12, 0, 953}, + dictWord{15, 0, 184}, + dictWord{15, 0, 191}, + dictWord{15, 0, 199}, + dictWord{15, 0, 237}, + dictWord{15, 0, 240}, + dictWord{15, 0, 243}, + dictWord{15, 0, 246}, + dictWord{18, 0, 203}, + dictWord{21, 0, 40}, + dictWord{ + 21, + 0, + 52, + }, + dictWord{21, 0, 57}, + dictWord{24, 0, 23}, + dictWord{24, 0, 28}, + dictWord{152, 0, 30}, + dictWord{134, 0, 725}, + dictWord{145, 11, 58}, + dictWord{133, 0, 888}, + dictWord{137, 10, 874}, + dictWord{4, 0, 711}, + dictWord{8, 10, 774}, + dictWord{10, 10, 670}, + dictWord{140, 10, 51}, + dictWord{144, 11, 40}, + dictWord{ + 6, + 11, + 185, + }, + dictWord{7, 11, 1899}, + dictWord{139, 11, 673}, + dictWord{137, 10, 701}, + dictWord{137, 0, 440}, + dictWord{4, 11, 327}, + dictWord{5, 11, 478}, + dictWord{ + 7, + 11, + 1332, + }, + dictWord{8, 11, 753}, + dictWord{140, 11, 227}, + dictWord{4, 10, 127}, + dictWord{5, 10, 350}, + dictWord{6, 10, 356}, + dictWord{8, 10, 426}, + dictWord{ + 9, + 10, + 572, + }, + dictWord{10, 10, 247}, + dictWord{139, 10, 312}, + dictWord{5, 11, 1020}, + dictWord{133, 11, 1022}, + dictWord{4, 11, 103}, + dictWord{ + 133, + 11, + 401, + }, + dictWord{6, 0, 1913}, + dictWord{6, 0, 1926}, + dictWord{6, 0, 1959}, + dictWord{9, 0, 914}, + dictWord{9, 0, 939}, + dictWord{9, 0, 952}, + dictWord{9, 0, 979}, + dictWord{ + 9, + 0, + 990, + }, + dictWord{9, 0, 998}, + dictWord{9, 0, 1003}, + dictWord{9, 0, 1023}, + dictWord{12, 0, 827}, + dictWord{12, 0, 834}, + dictWord{12, 0, 845}, + dictWord{ + 12, + 0, + 912, + }, + dictWord{12, 0, 935}, + dictWord{12, 0, 951}, + dictWord{15, 0, 172}, + dictWord{15, 0, 174}, + dictWord{18, 0, 198}, + dictWord{149, 0, 63}, + dictWord{5, 0, 958}, + dictWord{5, 0, 987}, + dictWord{4, 11, 499}, + dictWord{135, 11, 1421}, + dictWord{7, 0, 885}, + dictWord{6, 10, 59}, + dictWord{6, 10, 1762}, + dictWord{9, 10, 603}, + dictWord{141, 10, 397}, + dictWord{10, 11, 62}, + dictWord{141, 11, 164}, + dictWord{4, 0, 847}, + dictWord{135, 0, 326}, + dictWord{11, 0, 276}, + dictWord{142, 0, 293}, + dictWord{4, 0, 65}, + dictWord{5, 0, 479}, + dictWord{5, 0, 1004}, + dictWord{7, 0, 1913}, + dictWord{8, 0, 317}, + dictWord{9, 0, 302}, + dictWord{10, 0, 612}, + dictWord{ + 13, + 0, + 22, + }, + dictWord{132, 11, 96}, + dictWord{4, 0, 261}, + dictWord{135, 0, 510}, + dictWord{135, 0, 1514}, + dictWord{6, 10, 111}, + dictWord{7, 10, 4}, + dictWord{8, 10, 163}, + dictWord{8, 10, 776}, + dictWord{138, 10, 566}, + dictWord{4, 0, 291}, + dictWord{9, 0, 515}, + dictWord{12, 0, 152}, + dictWord{12, 0, 443}, + dictWord{13, 0, 392}, + dictWord{142, 0, 357}, + dictWord{7, 11, 399}, + dictWord{135, 11, 1492}, + dictWord{4, 0, 589}, + dictWord{139, 0, 282}, + dictWord{6, 11, 563}, + dictWord{ + 135, + 10, + 1994, + }, + dictWord{5, 10, 297}, + dictWord{135, 10, 1038}, + dictWord{4, 0, 130}, + dictWord{7, 0, 843}, + dictWord{135, 0, 1562}, + dictWord{5, 0, 42}, + dictWord{ + 5, + 0, + 879, + }, + dictWord{7, 0, 245}, + dictWord{7, 0, 324}, + dictWord{7, 0, 1532}, + dictWord{11, 0, 463}, + dictWord{11, 0, 472}, + dictWord{13, 0, 363}, + dictWord{144, 0, 52}, + dictWord{4, 0, 134}, + dictWord{133, 0, 372}, + dictWord{133, 0, 680}, + dictWord{136, 10, 363}, + dictWord{6, 0, 1997}, + dictWord{8, 0, 935}, + dictWord{136, 0, 977}, + dictWord{4, 0, 810}, + dictWord{135, 0, 1634}, + dictWord{135, 10, 1675}, + dictWord{7, 0, 1390}, + dictWord{4, 11, 910}, + dictWord{133, 11, 832}, + dictWord{ + 7, + 10, + 808, + }, + dictWord{8, 11, 266}, + dictWord{139, 11, 578}, + dictWord{132, 0, 644}, + dictWord{4, 0, 982}, + dictWord{138, 0, 867}, + dictWord{132, 10, 280}, + dictWord{ + 135, + 0, + 540, + }, + dictWord{140, 10, 54}, + dictWord{135, 0, 123}, + dictWord{134, 0, 1978}, + dictWord{4, 10, 421}, + dictWord{133, 10, 548}, + dictWord{6, 0, 623}, + dictWord{136, 0, 789}, + dictWord{4, 0, 908}, + dictWord{5, 0, 359}, + dictWord{5, 0, 508}, + dictWord{6, 0, 1723}, + dictWord{7, 0, 343}, + dictWord{7, 0, 1996}, + dictWord{ + 135, + 0, + 2026, + }, + dictWord{134, 0, 1220}, + dictWord{4, 0, 341}, + dictWord{135, 0, 480}, + dictWord{6, 10, 254}, + dictWord{9, 10, 109}, + dictWord{138, 10, 103}, + dictWord{ + 134, + 0, + 888, + }, + dictWord{8, 11, 528}, + dictWord{137, 11, 348}, + dictWord{7, 0, 1995}, + dictWord{8, 0, 299}, + dictWord{11, 0, 890}, + dictWord{12, 0, 674}, + dictWord{ + 4, + 11, + 20, + }, + dictWord{133, 11, 616}, + dictWord{135, 11, 1094}, + dictWord{134, 10, 1630}, + dictWord{4, 0, 238}, + dictWord{5, 0, 503}, + dictWord{6, 0, 179}, + dictWord{ + 7, + 0, + 2003, + }, + dictWord{8, 0, 381}, + dictWord{8, 0, 473}, + dictWord{9, 0, 149}, + dictWord{10, 0, 788}, + dictWord{15, 0, 45}, + dictWord{15, 0, 86}, + dictWord{20, 0, 110}, + dictWord{150, 0, 57}, + dictWord{133, 10, 671}, + dictWord{4, 11, 26}, + dictWord{5, 11, 429}, + dictWord{6, 11, 245}, + dictWord{7, 11, 704}, + dictWord{7, 11, 1379}, + dictWord{135, 11, 1474}, + dictWord{4, 0, 121}, + dictWord{5, 0, 156}, + dictWord{5, 0, 349}, + dictWord{9, 0, 431}, + dictWord{10, 0, 605}, + dictWord{142, 0, 342}, + dictWord{ + 7, + 11, + 943, + }, + dictWord{139, 11, 614}, + dictWord{132, 10, 889}, + dictWord{132, 11, 621}, + dictWord{7, 10, 1382}, + dictWord{7, 11, 1382}, + dictWord{ + 135, + 10, + 1910, + }, + dictWord{132, 10, 627}, + dictWord{133, 10, 775}, + dictWord{133, 11, 542}, + dictWord{133, 11, 868}, + dictWord{136, 11, 433}, + dictWord{6, 0, 1373}, + dictWord{7, 0, 1011}, + dictWord{11, 10, 362}, + dictWord{11, 10, 948}, + dictWord{140, 10, 388}, + dictWord{6, 0, 80}, + dictWord{7, 0, 173}, + dictWord{9, 0, 547}, + dictWord{10, 0, 730}, + dictWord{14, 0, 18}, + dictWord{22, 0, 39}, + dictWord{135, 11, 1495}, + dictWord{6, 0, 1694}, + dictWord{135, 0, 1974}, + dictWord{140, 0, 196}, + dictWord{4, 0, 923}, + dictWord{6, 0, 507}, + dictWord{6, 0, 1711}, + dictWord{7, 10, 451}, + dictWord{8, 10, 389}, + dictWord{12, 10, 490}, + dictWord{13, 10, 16}, + dictWord{ + 13, + 10, + 215, + }, + dictWord{13, 10, 351}, + dictWord{18, 10, 132}, + dictWord{147, 10, 125}, + dictWord{6, 0, 646}, + dictWord{134, 0, 1047}, + dictWord{135, 10, 841}, + dictWord{136, 10, 566}, + dictWord{6, 0, 1611}, + dictWord{135, 0, 1214}, + dictWord{139, 0, 926}, + dictWord{132, 11, 525}, + dictWord{132, 0, 595}, + dictWord{ + 5, + 0, + 240, + }, + dictWord{6, 0, 459}, + dictWord{7, 0, 12}, + dictWord{7, 0, 114}, + dictWord{7, 0, 949}, + dictWord{7, 0, 1753}, + dictWord{7, 0, 1805}, + dictWord{8, 0, 658}, + dictWord{ + 9, + 0, + 1, + }, + dictWord{11, 0, 959}, + dictWord{141, 0, 446}, + dictWord{5, 10, 912}, + dictWord{134, 10, 1695}, + dictWord{132, 0, 446}, + dictWord{7, 11, 62}, + dictWord{ + 12, + 11, + 45, + }, + dictWord{147, 11, 112}, + dictWord{5, 10, 236}, + dictWord{6, 10, 572}, + dictWord{8, 10, 492}, + dictWord{11, 10, 618}, + dictWord{144, 10, 56}, + dictWord{ + 5, + 10, + 190, + }, + dictWord{136, 10, 318}, + dictWord{135, 10, 1376}, + dictWord{4, 11, 223}, + dictWord{6, 11, 359}, + dictWord{11, 11, 3}, + dictWord{13, 11, 108}, + dictWord{ + 14, + 11, + 89, + }, + dictWord{144, 11, 22}, + dictWord{132, 11, 647}, + dictWord{134, 0, 490}, + dictWord{134, 0, 491}, + dictWord{134, 0, 1584}, + dictWord{ + 135, + 11, + 685, + }, + dictWord{138, 11, 220}, + dictWord{7, 0, 250}, + dictWord{136, 0, 507}, + dictWord{132, 0, 158}, + dictWord{4, 0, 140}, + dictWord{7, 0, 362}, + dictWord{8, 0, 209}, + dictWord{9, 0, 10}, + dictWord{9, 0, 160}, + dictWord{9, 0, 503}, + dictWord{9, 0, 614}, + dictWord{10, 0, 689}, + dictWord{11, 0, 327}, + dictWord{11, 0, 553}, + dictWord{ + 11, + 0, + 725, + }, + dictWord{11, 0, 767}, + dictWord{12, 0, 252}, + dictWord{12, 0, 583}, + dictWord{13, 0, 192}, + dictWord{14, 0, 269}, + dictWord{14, 0, 356}, + dictWord{148, 0, 50}, + dictWord{19, 0, 1}, + dictWord{19, 0, 26}, + dictWord{150, 0, 9}, + dictWord{132, 11, 109}, + dictWord{6, 0, 228}, + dictWord{7, 0, 1341}, + dictWord{9, 0, 408}, + dictWord{ + 138, + 0, + 343, + }, + dictWord{4, 0, 373}, + dictWord{5, 0, 283}, + dictWord{6, 0, 480}, + dictWord{7, 0, 609}, + dictWord{10, 0, 860}, + dictWord{138, 0, 878}, + dictWord{6, 0, 779}, + dictWord{134, 0, 1209}, + dictWord{4, 0, 557}, + dictWord{7, 11, 263}, + dictWord{7, 11, 628}, + dictWord{136, 11, 349}, + dictWord{132, 0, 548}, + dictWord{7, 0, 197}, + dictWord{8, 0, 142}, + dictWord{8, 0, 325}, + dictWord{9, 0, 150}, + dictWord{9, 0, 596}, + dictWord{10, 0, 350}, + dictWord{10, 0, 353}, + dictWord{11, 0, 74}, + dictWord{ + 11, + 0, + 315, + }, + dictWord{12, 0, 662}, + dictWord{12, 0, 681}, + dictWord{14, 0, 423}, + dictWord{143, 0, 141}, + dictWord{4, 11, 40}, + dictWord{10, 11, 67}, + dictWord{ + 11, + 11, + 117, + }, + dictWord{11, 11, 768}, + dictWord{139, 11, 935}, + dictWord{7, 11, 992}, + dictWord{8, 11, 301}, + dictWord{9, 11, 722}, + dictWord{12, 11, 63}, + dictWord{ + 13, + 11, + 29, + }, + dictWord{14, 11, 161}, + dictWord{143, 11, 18}, + dictWord{6, 0, 1490}, + dictWord{138, 11, 532}, + dictWord{5, 0, 580}, + dictWord{7, 0, 378}, + dictWord{ + 7, + 0, + 674, + }, + dictWord{7, 0, 1424}, + dictWord{15, 0, 83}, + dictWord{16, 0, 11}, + dictWord{15, 11, 83}, + dictWord{144, 11, 11}, + dictWord{6, 0, 1057}, + dictWord{6, 0, 1335}, + dictWord{10, 0, 316}, + dictWord{7, 10, 85}, + dictWord{7, 10, 247}, + dictWord{8, 10, 585}, + dictWord{138, 10, 163}, + dictWord{4, 0, 169}, + dictWord{5, 0, 83}, + dictWord{ + 6, + 0, + 399, + }, + dictWord{6, 0, 579}, + dictWord{6, 0, 1513}, + dictWord{7, 0, 692}, + dictWord{7, 0, 846}, + dictWord{7, 0, 1015}, + dictWord{7, 0, 1799}, + dictWord{8, 0, 403}, + dictWord{9, 0, 394}, + dictWord{10, 0, 133}, + dictWord{12, 0, 4}, + dictWord{12, 0, 297}, + dictWord{12, 0, 452}, + dictWord{16, 0, 81}, + dictWord{18, 0, 25}, + dictWord{21, 0, 14}, + dictWord{22, 0, 12}, + dictWord{151, 0, 18}, + dictWord{134, 0, 1106}, + dictWord{7, 0, 1546}, + dictWord{11, 0, 299}, + dictWord{142, 0, 407}, + dictWord{134, 0, 1192}, + dictWord{132, 0, 177}, + dictWord{5, 0, 411}, + dictWord{135, 0, 653}, + dictWord{7, 0, 439}, + dictWord{10, 0, 727}, + dictWord{11, 0, 260}, + dictWord{139, 0, 684}, + dictWord{138, 10, 145}, + dictWord{147, 10, 83}, + dictWord{5, 0, 208}, + dictWord{7, 0, 753}, + dictWord{135, 0, 1528}, + dictWord{137, 11, 617}, + dictWord{ + 135, + 10, + 1922, + }, + dictWord{135, 11, 825}, + dictWord{11, 0, 422}, + dictWord{13, 0, 389}, + dictWord{4, 10, 124}, + dictWord{10, 10, 457}, + dictWord{11, 10, 121}, + dictWord{ + 11, + 10, + 169, + }, + dictWord{11, 10, 870}, + dictWord{12, 10, 214}, + dictWord{14, 10, 187}, + dictWord{143, 10, 77}, + dictWord{11, 0, 615}, + dictWord{15, 0, 58}, + dictWord{ + 11, + 11, + 615, + }, + dictWord{143, 11, 58}, + dictWord{9, 0, 618}, + dictWord{138, 0, 482}, + dictWord{6, 0, 1952}, + dictWord{6, 0, 1970}, + dictWord{142, 0, 505}, + dictWord{ + 7, + 10, + 1193, + }, + dictWord{135, 11, 1838}, + dictWord{133, 0, 242}, + dictWord{135, 10, 1333}, + dictWord{6, 10, 107}, + dictWord{7, 10, 638}, + dictWord{ + 7, + 10, + 1632, + }, + dictWord{137, 10, 396}, + dictWord{133, 0, 953}, + dictWord{5, 10, 370}, + dictWord{134, 10, 1756}, + dictWord{5, 11, 28}, + dictWord{6, 11, 204}, + dictWord{ + 10, + 11, + 320, + }, + dictWord{10, 11, 583}, + dictWord{13, 11, 502}, + dictWord{14, 11, 72}, + dictWord{14, 11, 274}, + dictWord{14, 11, 312}, + dictWord{14, 11, 344}, + dictWord{15, 11, 159}, + dictWord{16, 11, 62}, + dictWord{16, 11, 69}, + dictWord{17, 11, 30}, + dictWord{18, 11, 42}, + dictWord{18, 11, 53}, + dictWord{18, 11, 84}, + dictWord{18, 11, 140}, + dictWord{19, 11, 68}, + dictWord{19, 11, 85}, + dictWord{20, 11, 5}, + dictWord{20, 11, 45}, + dictWord{20, 11, 101}, + dictWord{22, 11, 7}, + dictWord{ + 150, + 11, + 20, + }, + dictWord{4, 11, 558}, + dictWord{6, 11, 390}, + dictWord{7, 11, 162}, + dictWord{7, 11, 689}, + dictWord{9, 11, 360}, + dictWord{138, 11, 653}, + dictWord{ + 11, + 0, + 802, + }, + dictWord{141, 0, 67}, + dictWord{133, 10, 204}, + dictWord{133, 0, 290}, + dictWord{5, 10, 970}, + dictWord{134, 10, 1706}, + dictWord{132, 0, 380}, + dictWord{5, 0, 52}, + dictWord{7, 0, 277}, + dictWord{9, 0, 368}, + dictWord{139, 0, 791}, + dictWord{5, 11, 856}, + dictWord{6, 11, 1672}, + dictWord{6, 11, 1757}, + dictWord{ + 6, + 11, + 1781, + }, + dictWord{7, 11, 1150}, + dictWord{7, 11, 1425}, + dictWord{7, 11, 1453}, + dictWord{140, 11, 513}, + dictWord{5, 11, 92}, + dictWord{7, 10, 3}, + dictWord{ + 10, + 11, + 736, + }, + dictWord{140, 11, 102}, + dictWord{4, 0, 112}, + dictWord{5, 0, 653}, + dictWord{5, 10, 483}, + dictWord{5, 10, 685}, + dictWord{6, 10, 489}, + dictWord{ + 7, + 10, + 1204, + }, + dictWord{136, 10, 394}, + dictWord{132, 10, 921}, + dictWord{6, 0, 1028}, + dictWord{133, 10, 1007}, + dictWord{5, 11, 590}, + dictWord{9, 11, 213}, + dictWord{145, 11, 91}, + dictWord{135, 10, 1696}, + dictWord{10, 0, 138}, + dictWord{139, 0, 476}, + dictWord{5, 0, 725}, + dictWord{5, 0, 727}, + dictWord{135, 0, 1811}, + dictWord{4, 0, 979}, + dictWord{6, 0, 1821}, + dictWord{6, 0, 1838}, + dictWord{8, 0, 876}, + dictWord{8, 0, 883}, + dictWord{8, 0, 889}, + dictWord{8, 0, 893}, + dictWord{ + 8, + 0, + 895, + }, + dictWord{10, 0, 934}, + dictWord{12, 0, 720}, + dictWord{14, 0, 459}, + dictWord{148, 0, 123}, + dictWord{135, 11, 551}, + dictWord{4, 0, 38}, + dictWord{6, 0, 435}, + dictWord{7, 0, 307}, + dictWord{7, 0, 999}, + dictWord{7, 0, 1481}, + dictWord{7, 0, 1732}, + dictWord{7, 0, 1738}, + dictWord{8, 0, 371}, + dictWord{9, 0, 414}, + dictWord{ + 11, + 0, + 316, + }, + dictWord{12, 0, 52}, + dictWord{13, 0, 420}, + dictWord{147, 0, 100}, + dictWord{135, 0, 1296}, + dictWord{132, 10, 712}, + dictWord{134, 10, 1629}, + dictWord{133, 0, 723}, + dictWord{134, 0, 651}, + dictWord{136, 11, 191}, + dictWord{9, 11, 791}, + dictWord{10, 11, 93}, + dictWord{11, 11, 301}, + dictWord{16, 11, 13}, + dictWord{17, 11, 23}, + dictWord{18, 11, 135}, + dictWord{19, 11, 12}, + dictWord{20, 11, 1}, + dictWord{20, 11, 12}, + dictWord{148, 11, 14}, + dictWord{136, 11, 503}, + dictWord{6, 11, 466}, + dictWord{135, 11, 671}, + dictWord{6, 0, 1200}, + dictWord{134, 0, 1330}, + dictWord{135, 0, 1255}, + dictWord{134, 0, 986}, + dictWord{ + 5, + 0, + 109, + }, + dictWord{6, 0, 1784}, + dictWord{7, 0, 1895}, + dictWord{12, 0, 296}, + dictWord{140, 0, 302}, + dictWord{135, 11, 983}, + dictWord{133, 10, 485}, + dictWord{ + 134, + 0, + 660, + }, + dictWord{134, 0, 800}, + dictWord{5, 0, 216}, + dictWord{5, 0, 294}, + dictWord{6, 0, 591}, + dictWord{7, 0, 1879}, + dictWord{9, 0, 141}, + dictWord{9, 0, 270}, + dictWord{9, 0, 679}, + dictWord{10, 0, 159}, + dictWord{11, 0, 197}, + dictWord{11, 0, 438}, + dictWord{12, 0, 538}, + dictWord{12, 0, 559}, + dictWord{14, 0, 144}, + dictWord{ + 14, + 0, + 167, + }, + dictWord{15, 0, 67}, + dictWord{4, 10, 285}, + dictWord{5, 10, 317}, + dictWord{6, 10, 301}, + dictWord{7, 10, 7}, + dictWord{8, 10, 153}, + dictWord{ + 10, + 10, + 766, + }, + dictWord{11, 10, 468}, + dictWord{12, 10, 467}, + dictWord{141, 10, 143}, + dictWord{136, 0, 945}, + dictWord{134, 0, 1090}, + dictWord{137, 0, 81}, + dictWord{12, 11, 468}, + dictWord{19, 11, 96}, + dictWord{148, 11, 24}, + dictWord{134, 0, 391}, + dictWord{138, 11, 241}, + dictWord{7, 0, 322}, + dictWord{136, 0, 249}, + dictWord{134, 0, 1412}, + dictWord{135, 11, 795}, + dictWord{5, 0, 632}, + dictWord{138, 0, 526}, + dictWord{136, 10, 819}, + dictWord{6, 0, 144}, + dictWord{7, 0, 948}, + dictWord{7, 0, 1042}, + dictWord{8, 0, 235}, + dictWord{8, 0, 461}, + dictWord{9, 0, 453}, + dictWord{9, 0, 796}, + dictWord{10, 0, 354}, + dictWord{17, 0, 77}, + dictWord{ + 135, + 11, + 954, + }, + dictWord{139, 10, 917}, + dictWord{6, 0, 940}, + dictWord{134, 0, 1228}, + dictWord{4, 0, 362}, + dictWord{7, 0, 52}, + dictWord{135, 0, 303}, + dictWord{ + 6, + 11, + 549, + }, + dictWord{8, 11, 34}, + dictWord{8, 11, 283}, + dictWord{9, 11, 165}, + dictWord{138, 11, 475}, + dictWord{7, 11, 370}, + dictWord{7, 11, 1007}, + dictWord{ + 7, + 11, + 1177, + }, + dictWord{135, 11, 1565}, + dictWord{5, 11, 652}, + dictWord{5, 11, 701}, + dictWord{135, 11, 449}, + dictWord{5, 0, 196}, + dictWord{6, 0, 486}, + dictWord{ + 7, + 0, + 212, + }, + dictWord{8, 0, 309}, + dictWord{136, 0, 346}, + dictWord{6, 10, 1719}, + dictWord{6, 10, 1735}, + dictWord{7, 10, 2016}, + dictWord{7, 10, 2020}, + dictWord{ + 8, + 10, + 837, + }, + dictWord{137, 10, 852}, + dictWord{6, 11, 159}, + dictWord{6, 11, 364}, + dictWord{7, 11, 516}, + dictWord{7, 11, 1439}, + dictWord{137, 11, 518}, + dictWord{135, 0, 1912}, + dictWord{135, 0, 1290}, + dictWord{132, 0, 686}, + dictWord{141, 11, 151}, + dictWord{138, 0, 625}, + dictWord{136, 0, 706}, + dictWord{ + 138, + 10, + 568, + }, + dictWord{139, 0, 412}, + dictWord{4, 0, 30}, + dictWord{133, 0, 43}, + dictWord{8, 10, 67}, + dictWord{138, 10, 419}, + dictWord{7, 0, 967}, + dictWord{ + 141, + 0, + 11, + }, + dictWord{12, 0, 758}, + dictWord{14, 0, 441}, + dictWord{142, 0, 462}, + dictWord{10, 10, 657}, + dictWord{14, 10, 297}, + dictWord{142, 10, 361}, + dictWord{ + 139, + 10, + 729, + }, + dictWord{4, 0, 220}, + dictWord{135, 0, 1535}, + dictWord{7, 11, 501}, + dictWord{9, 11, 111}, + dictWord{10, 11, 141}, + dictWord{11, 11, 332}, + dictWord{ + 13, + 11, + 43, + }, + dictWord{13, 11, 429}, + dictWord{14, 11, 130}, + dictWord{14, 11, 415}, + dictWord{145, 11, 102}, + dictWord{4, 0, 950}, + dictWord{6, 0, 1859}, + dictWord{ + 7, + 0, + 11, + }, + dictWord{8, 0, 873}, + dictWord{12, 0, 710}, + dictWord{12, 0, 718}, + dictWord{12, 0, 748}, + dictWord{12, 0, 765}, + dictWord{148, 0, 124}, + dictWord{ + 5, + 11, + 149, + }, + dictWord{5, 11, 935}, + dictWord{136, 11, 233}, + dictWord{142, 11, 291}, + dictWord{134, 0, 1579}, + dictWord{7, 0, 890}, + dictWord{8, 10, 51}, + dictWord{ + 9, + 10, + 868, + }, + dictWord{10, 10, 833}, + dictWord{12, 10, 481}, + dictWord{12, 10, 570}, + dictWord{148, 10, 106}, + dictWord{141, 0, 2}, + dictWord{132, 10, 445}, + dictWord{136, 11, 801}, + dictWord{135, 0, 1774}, + dictWord{7, 0, 1725}, + dictWord{138, 0, 393}, + dictWord{5, 0, 263}, + dictWord{134, 0, 414}, + dictWord{ + 132, + 11, + 322, + }, + dictWord{133, 10, 239}, + dictWord{7, 0, 456}, + dictWord{7, 10, 1990}, + dictWord{8, 10, 130}, + dictWord{139, 10, 720}, + dictWord{137, 0, 818}, + dictWord{ + 5, + 10, + 123, + }, + dictWord{6, 10, 530}, + dictWord{7, 10, 348}, + dictWord{135, 10, 1419}, + dictWord{135, 10, 2024}, + dictWord{6, 0, 178}, + dictWord{6, 0, 1750}, + dictWord{8, 0, 251}, + dictWord{9, 0, 690}, + dictWord{10, 0, 155}, + dictWord{10, 0, 196}, + dictWord{10, 0, 373}, + dictWord{11, 0, 698}, + dictWord{13, 0, 155}, + dictWord{ + 148, + 0, + 93, + }, + dictWord{5, 0, 97}, + dictWord{137, 0, 393}, + dictWord{134, 0, 674}, + dictWord{11, 0, 223}, + dictWord{140, 0, 168}, + dictWord{132, 10, 210}, + dictWord{ + 139, + 11, + 464, + }, + dictWord{6, 0, 1639}, + dictWord{146, 0, 159}, + dictWord{139, 11, 2}, + dictWord{7, 0, 934}, + dictWord{8, 0, 647}, + dictWord{17, 0, 97}, + dictWord{19, 0, 59}, + dictWord{150, 0, 2}, + dictWord{132, 0, 191}, + dictWord{5, 0, 165}, + dictWord{9, 0, 346}, + dictWord{10, 0, 655}, + dictWord{11, 0, 885}, + dictWord{4, 10, 430}, + dictWord{135, 11, 357}, + dictWord{133, 0, 877}, + dictWord{5, 10, 213}, + dictWord{133, 11, 406}, + dictWord{8, 0, 128}, + dictWord{139, 0, 179}, + dictWord{6, 11, 69}, + dictWord{135, 11, 117}, + dictWord{135, 0, 1297}, + dictWord{11, 11, 43}, + dictWord{13, 11, 72}, + dictWord{141, 11, 142}, + dictWord{135, 11, 1830}, + dictWord{ + 142, + 0, + 164, + }, + dictWord{5, 0, 57}, + dictWord{6, 0, 101}, + dictWord{6, 0, 586}, + dictWord{6, 0, 1663}, + dictWord{7, 0, 132}, + dictWord{7, 0, 1154}, + dictWord{7, 0, 1415}, + dictWord{7, 0, 1507}, + dictWord{12, 0, 493}, + dictWord{15, 0, 105}, + dictWord{151, 0, 15}, + dictWord{5, 0, 459}, + dictWord{7, 0, 1073}, + dictWord{8, 0, 241}, + dictWord{ + 136, + 0, + 334, + }, + dictWord{133, 11, 826}, + dictWord{133, 10, 108}, + dictWord{5, 10, 219}, + dictWord{10, 11, 132}, + dictWord{11, 11, 191}, + dictWord{11, 11, 358}, + dictWord{139, 11, 460}, + dictWord{6, 0, 324}, + dictWord{6, 0, 520}, + dictWord{7, 0, 338}, + dictWord{7, 0, 1729}, + dictWord{8, 0, 228}, + dictWord{139, 0, 750}, + dictWord{ + 21, + 0, + 30, + }, + dictWord{22, 0, 53}, + dictWord{4, 10, 193}, + dictWord{5, 10, 916}, + dictWord{7, 10, 364}, + dictWord{10, 10, 398}, + dictWord{10, 10, 726}, + dictWord{ + 11, + 10, + 317, + }, + dictWord{11, 10, 626}, + dictWord{12, 10, 142}, + dictWord{12, 10, 288}, + dictWord{12, 10, 678}, + dictWord{13, 10, 313}, + dictWord{15, 10, 113}, + dictWord{146, 10, 114}, + dictWord{6, 11, 110}, + dictWord{135, 11, 1681}, + dictWord{135, 0, 910}, + dictWord{6, 10, 241}, + dictWord{7, 10, 907}, + dictWord{8, 10, 832}, + dictWord{9, 10, 342}, + dictWord{10, 10, 729}, + dictWord{11, 10, 284}, + dictWord{11, 10, 445}, + dictWord{11, 10, 651}, + dictWord{11, 10, 863}, + dictWord{ + 13, + 10, + 398, + }, + dictWord{146, 10, 99}, + dictWord{7, 0, 705}, + dictWord{9, 0, 734}, + dictWord{5, 11, 1000}, + dictWord{7, 11, 733}, + dictWord{137, 11, 583}, + dictWord{4, 0, 73}, + dictWord{6, 0, 612}, + dictWord{7, 0, 927}, + dictWord{7, 0, 1822}, + dictWord{8, 0, 217}, + dictWord{9, 0, 765}, + dictWord{9, 0, 766}, + dictWord{10, 0, 408}, + dictWord{ + 11, + 0, + 51, + }, + dictWord{11, 0, 793}, + dictWord{12, 0, 266}, + dictWord{15, 0, 158}, + dictWord{20, 0, 89}, + dictWord{150, 0, 32}, + dictWord{7, 0, 1330}, + dictWord{4, 11, 297}, + dictWord{6, 11, 529}, + dictWord{7, 11, 152}, + dictWord{7, 11, 713}, + dictWord{7, 11, 1845}, + dictWord{8, 11, 710}, + dictWord{8, 11, 717}, + dictWord{140, 11, 639}, + dictWord{5, 0, 389}, + dictWord{136, 0, 636}, + dictWord{134, 0, 1409}, + dictWord{4, 10, 562}, + dictWord{9, 10, 254}, + dictWord{139, 10, 879}, + dictWord{134, 0, 893}, + dictWord{132, 10, 786}, + dictWord{4, 11, 520}, + dictWord{135, 11, 575}, + dictWord{136, 0, 21}, + dictWord{140, 0, 721}, + dictWord{136, 0, 959}, + dictWord{ + 7, + 11, + 1428, + }, + dictWord{7, 11, 1640}, + dictWord{9, 11, 169}, + dictWord{9, 11, 182}, + dictWord{9, 11, 367}, + dictWord{9, 11, 478}, + dictWord{9, 11, 506}, + dictWord{ + 9, + 11, + 551, + }, + dictWord{9, 11, 648}, + dictWord{9, 11, 651}, + dictWord{9, 11, 697}, + dictWord{9, 11, 705}, + dictWord{9, 11, 725}, + dictWord{9, 11, 787}, + dictWord{9, 11, 794}, + dictWord{10, 11, 198}, + dictWord{10, 11, 214}, + dictWord{10, 11, 267}, + dictWord{10, 11, 275}, + dictWord{10, 11, 456}, + dictWord{10, 11, 551}, + dictWord{ + 10, + 11, + 561, + }, + dictWord{10, 11, 613}, + dictWord{10, 11, 627}, + dictWord{10, 11, 668}, + dictWord{10, 11, 675}, + dictWord{10, 11, 691}, + dictWord{10, 11, 695}, + dictWord{10, 11, 707}, + dictWord{10, 11, 715}, + dictWord{11, 11, 183}, + dictWord{11, 11, 201}, + dictWord{11, 11, 244}, + dictWord{11, 11, 262}, + dictWord{ + 11, + 11, + 352, + }, + dictWord{11, 11, 439}, + dictWord{11, 11, 493}, + dictWord{11, 11, 572}, + dictWord{11, 11, 591}, + dictWord{11, 11, 608}, + dictWord{11, 11, 611}, + dictWord{ + 11, + 11, + 646, + }, + dictWord{11, 11, 674}, + dictWord{11, 11, 711}, + dictWord{11, 11, 751}, + dictWord{11, 11, 761}, + dictWord{11, 11, 776}, + dictWord{11, 11, 785}, + dictWord{11, 11, 850}, + dictWord{11, 11, 853}, + dictWord{11, 11, 862}, + dictWord{11, 11, 865}, + dictWord{11, 11, 868}, + dictWord{11, 11, 898}, + dictWord{ + 11, + 11, + 902, + }, + dictWord{11, 11, 903}, + dictWord{11, 11, 910}, + dictWord{11, 11, 932}, + dictWord{11, 11, 942}, + dictWord{11, 11, 957}, + dictWord{11, 11, 967}, + dictWord{ + 11, + 11, + 972, + }, + dictWord{12, 11, 148}, + dictWord{12, 11, 195}, + dictWord{12, 11, 220}, + dictWord{12, 11, 237}, + dictWord{12, 11, 318}, + dictWord{12, 11, 339}, + dictWord{12, 11, 393}, + dictWord{12, 11, 445}, + dictWord{12, 11, 450}, + dictWord{12, 11, 474}, + dictWord{12, 11, 509}, + dictWord{12, 11, 533}, + dictWord{ + 12, + 11, + 591, + }, + dictWord{12, 11, 594}, + dictWord{12, 11, 597}, + dictWord{12, 11, 621}, + dictWord{12, 11, 633}, + dictWord{12, 11, 642}, + dictWord{13, 11, 59}, + dictWord{ + 13, + 11, + 60, + }, + dictWord{13, 11, 145}, + dictWord{13, 11, 239}, + dictWord{13, 11, 250}, + dictWord{13, 11, 273}, + dictWord{13, 11, 329}, + dictWord{13, 11, 344}, + dictWord{13, 11, 365}, + dictWord{13, 11, 372}, + dictWord{13, 11, 387}, + dictWord{13, 11, 403}, + dictWord{13, 11, 414}, + dictWord{13, 11, 456}, + dictWord{ + 13, + 11, + 478, + }, + dictWord{13, 11, 483}, + dictWord{13, 11, 489}, + dictWord{14, 11, 55}, + dictWord{14, 11, 57}, + dictWord{14, 11, 81}, + dictWord{14, 11, 90}, + dictWord{ + 14, + 11, + 148, + }, + dictWord{14, 11, 239}, + dictWord{14, 11, 266}, + dictWord{14, 11, 321}, + dictWord{14, 11, 326}, + dictWord{14, 11, 327}, + dictWord{14, 11, 330}, + dictWord{ + 14, + 11, + 347, + }, + dictWord{14, 11, 355}, + dictWord{14, 11, 401}, + dictWord{14, 11, 411}, + dictWord{14, 11, 414}, + dictWord{14, 11, 416}, + dictWord{14, 11, 420}, + dictWord{15, 11, 61}, + dictWord{15, 11, 74}, + dictWord{15, 11, 87}, + dictWord{15, 11, 88}, + dictWord{15, 11, 94}, + dictWord{15, 11, 96}, + dictWord{15, 11, 116}, + dictWord{15, 11, 149}, + dictWord{15, 11, 154}, + dictWord{16, 11, 50}, + dictWord{16, 11, 63}, + dictWord{16, 11, 73}, + dictWord{17, 11, 2}, + dictWord{17, 11, 66}, + dictWord{ + 17, + 11, + 92, + }, + dictWord{17, 11, 103}, + dictWord{17, 11, 112}, + dictWord{18, 11, 50}, + dictWord{18, 11, 54}, + dictWord{18, 11, 82}, + dictWord{18, 11, 86}, + dictWord{ + 18, + 11, + 90, + }, + dictWord{18, 11, 111}, + dictWord{18, 11, 115}, + dictWord{18, 11, 156}, + dictWord{19, 11, 40}, + dictWord{19, 11, 79}, + dictWord{20, 11, 78}, + dictWord{ + 149, + 11, + 22, + }, + dictWord{137, 11, 170}, + dictWord{134, 0, 1433}, + dictWord{135, 11, 1307}, + dictWord{139, 11, 411}, + dictWord{5, 0, 189}, + dictWord{7, 0, 442}, + dictWord{7, 0, 443}, + dictWord{8, 0, 281}, + dictWord{12, 0, 174}, + dictWord{141, 0, 261}, + dictWord{6, 10, 216}, + dictWord{7, 10, 901}, + dictWord{7, 10, 1343}, + dictWord{136, 10, 493}, + dictWord{5, 11, 397}, + dictWord{6, 11, 154}, + dictWord{7, 10, 341}, + dictWord{7, 11, 676}, + dictWord{8, 11, 443}, + dictWord{8, 11, 609}, + dictWord{ + 9, + 11, + 24, + }, + dictWord{9, 11, 325}, + dictWord{10, 11, 35}, + dictWord{11, 10, 219}, + dictWord{11, 11, 535}, + dictWord{11, 11, 672}, + dictWord{11, 11, 1018}, + dictWord{12, 11, 637}, + dictWord{144, 11, 30}, + dictWord{6, 0, 2}, + dictWord{7, 0, 191}, + dictWord{7, 0, 446}, + dictWord{7, 0, 1262}, + dictWord{7, 0, 1737}, + dictWord{8, 0, 22}, + dictWord{8, 0, 270}, + dictWord{8, 0, 612}, + dictWord{9, 0, 4}, + dictWord{9, 0, 312}, + dictWord{9, 0, 436}, + dictWord{9, 0, 626}, + dictWord{10, 0, 216}, + dictWord{10, 0, 311}, + dictWord{10, 0, 521}, + dictWord{10, 0, 623}, + dictWord{11, 0, 72}, + dictWord{11, 0, 330}, + dictWord{11, 0, 455}, + dictWord{12, 0, 321}, + dictWord{12, 0, 504}, + dictWord{12, 0, 530}, + dictWord{12, 0, 543}, + dictWord{13, 0, 17}, + dictWord{13, 0, 156}, + dictWord{13, 0, 334}, + dictWord{14, 0, 131}, + dictWord{17, 0, 60}, + dictWord{ + 148, + 0, + 64, + }, + dictWord{7, 0, 354}, + dictWord{10, 0, 410}, + dictWord{139, 0, 815}, + dictWord{139, 10, 130}, + dictWord{7, 10, 1734}, + dictWord{137, 11, 631}, + dictWord{ + 12, + 0, + 425, + }, + dictWord{15, 0, 112}, + dictWord{10, 10, 115}, + dictWord{11, 10, 420}, + dictWord{13, 10, 404}, + dictWord{14, 10, 346}, + dictWord{143, 10, 54}, + dictWord{ + 6, + 0, + 60, + }, + dictWord{6, 0, 166}, + dictWord{7, 0, 374}, + dictWord{7, 0, 670}, + dictWord{7, 0, 1327}, + dictWord{8, 0, 411}, + dictWord{8, 0, 435}, + dictWord{9, 0, 653}, + dictWord{ + 9, + 0, + 740, + }, + dictWord{10, 0, 385}, + dictWord{11, 0, 222}, + dictWord{11, 0, 324}, + dictWord{11, 0, 829}, + dictWord{140, 0, 611}, + dictWord{7, 0, 1611}, + dictWord{ + 13, + 0, + 14, + }, + dictWord{15, 0, 44}, + dictWord{19, 0, 13}, + dictWord{148, 0, 76}, + dictWord{133, 11, 981}, + dictWord{4, 11, 56}, + dictWord{7, 11, 1791}, + dictWord{8, 11, 607}, + dictWord{8, 11, 651}, + dictWord{11, 11, 465}, + dictWord{11, 11, 835}, + dictWord{12, 11, 337}, + dictWord{141, 11, 480}, + dictWord{6, 0, 1478}, + dictWord{ + 5, + 10, + 1011, + }, + dictWord{136, 10, 701}, + dictWord{139, 0, 596}, + dictWord{5, 0, 206}, + dictWord{134, 0, 398}, + dictWord{4, 10, 54}, + dictWord{5, 10, 666}, + dictWord{ + 7, + 10, + 1039, + }, + dictWord{7, 10, 1130}, + dictWord{9, 10, 195}, + dictWord{138, 10, 302}, + dictWord{7, 0, 50}, + dictWord{9, 11, 158}, + dictWord{138, 11, 411}, + dictWord{ + 135, + 11, + 1120, + }, + dictWord{6, 0, 517}, + dictWord{7, 0, 1159}, + dictWord{10, 0, 621}, + dictWord{11, 0, 192}, + dictWord{134, 10, 1669}, + dictWord{4, 0, 592}, + dictWord{ + 6, + 0, + 600, + }, + dictWord{135, 0, 1653}, + dictWord{10, 0, 223}, + dictWord{139, 0, 645}, + dictWord{136, 11, 139}, + dictWord{7, 0, 64}, + dictWord{136, 0, 245}, + dictWord{ + 142, + 0, + 278, + }, + dictWord{6, 11, 622}, + dictWord{135, 11, 1030}, + dictWord{136, 0, 604}, + dictWord{134, 0, 1502}, + dictWord{138, 0, 265}, + dictWord{ + 141, + 11, + 168, + }, + dictWord{7, 0, 1763}, + dictWord{140, 0, 310}, + dictWord{7, 10, 798}, + dictWord{139, 11, 719}, + dictWord{7, 11, 160}, + dictWord{10, 11, 624}, + dictWord{ + 142, + 11, + 279, + }, + dictWord{132, 11, 363}, + dictWord{7, 10, 122}, + dictWord{9, 10, 259}, + dictWord{10, 10, 84}, + dictWord{11, 10, 470}, + dictWord{12, 10, 541}, + dictWord{141, 10, 379}, + dictWord{5, 0, 129}, + dictWord{6, 0, 61}, + dictWord{135, 0, 947}, + dictWord{134, 0, 1356}, + dictWord{135, 11, 1191}, + dictWord{13, 0, 505}, + dictWord{141, 0, 506}, + dictWord{11, 0, 1000}, + dictWord{5, 10, 82}, + dictWord{5, 10, 131}, + dictWord{7, 10, 1755}, + dictWord{8, 10, 31}, + dictWord{9, 10, 168}, + dictWord{9, 10, 764}, + dictWord{139, 10, 869}, + dictWord{134, 0, 966}, + dictWord{134, 10, 605}, + dictWord{134, 11, 292}, + dictWord{5, 11, 177}, + dictWord{ + 6, + 11, + 616, + }, + dictWord{7, 11, 827}, + dictWord{9, 11, 525}, + dictWord{138, 11, 656}, + dictWord{135, 11, 1486}, + dictWord{138, 11, 31}, + dictWord{5, 10, 278}, + dictWord{137, 10, 68}, + dictWord{4, 10, 163}, + dictWord{5, 10, 201}, + dictWord{5, 10, 307}, + dictWord{5, 10, 310}, + dictWord{6, 10, 335}, + dictWord{7, 10, 284}, + dictWord{136, 10, 165}, + dictWord{6, 0, 839}, + dictWord{135, 10, 1660}, + dictWord{136, 10, 781}, + dictWord{6, 10, 33}, + dictWord{135, 10, 1244}, + dictWord{ + 133, + 0, + 637, + }, + dictWord{4, 11, 161}, + dictWord{133, 11, 631}, + dictWord{137, 0, 590}, + dictWord{7, 10, 1953}, + dictWord{136, 10, 720}, + dictWord{5, 0, 280}, + dictWord{ + 7, + 0, + 1226, + }, + dictWord{138, 10, 203}, + dictWord{134, 0, 1386}, + dictWord{5, 0, 281}, + dictWord{6, 0, 1026}, + dictWord{6, 10, 326}, + dictWord{7, 10, 677}, + dictWord{ + 137, + 10, + 425, + }, + dictWord{7, 11, 1557}, + dictWord{135, 11, 1684}, + dictWord{135, 0, 1064}, + dictWord{9, 11, 469}, + dictWord{9, 11, 709}, + dictWord{12, 11, 512}, + dictWord{14, 11, 65}, + dictWord{145, 11, 12}, + dictWord{134, 0, 917}, + dictWord{10, 11, 229}, + dictWord{11, 11, 73}, + dictWord{11, 11, 376}, + dictWord{ + 139, + 11, + 433, + }, + dictWord{7, 0, 555}, + dictWord{9, 0, 192}, + dictWord{13, 0, 30}, + dictWord{13, 0, 49}, + dictWord{15, 0, 150}, + dictWord{16, 0, 76}, + dictWord{20, 0, 52}, + dictWord{ + 7, + 10, + 1316, + }, + dictWord{7, 10, 1412}, + dictWord{7, 10, 1839}, + dictWord{9, 10, 589}, + dictWord{11, 10, 241}, + dictWord{11, 10, 676}, + dictWord{11, 10, 811}, + dictWord{11, 10, 891}, + dictWord{12, 10, 140}, + dictWord{12, 10, 346}, + dictWord{12, 10, 479}, + dictWord{13, 10, 381}, + dictWord{14, 10, 188}, + dictWord{ + 146, + 10, + 30, + }, + dictWord{149, 0, 15}, + dictWord{6, 0, 1882}, + dictWord{6, 0, 1883}, + dictWord{6, 0, 1897}, + dictWord{9, 0, 945}, + dictWord{9, 0, 1014}, + dictWord{9, 0, 1020}, + dictWord{12, 0, 823}, + dictWord{12, 0, 842}, + dictWord{12, 0, 866}, + dictWord{12, 0, 934}, + dictWord{15, 0, 242}, + dictWord{146, 0, 208}, + dictWord{6, 0, 965}, + dictWord{134, 0, 1499}, + dictWord{7, 0, 33}, + dictWord{7, 0, 120}, + dictWord{8, 0, 489}, + dictWord{9, 0, 319}, + dictWord{10, 0, 820}, + dictWord{11, 0, 1004}, + dictWord{ + 12, + 0, + 379, + }, + dictWord{12, 0, 679}, + dictWord{13, 0, 117}, + dictWord{13, 0, 412}, + dictWord{14, 0, 25}, + dictWord{15, 0, 52}, + dictWord{15, 0, 161}, + dictWord{16, 0, 47}, + dictWord{149, 0, 2}, + dictWord{6, 11, 558}, + dictWord{7, 11, 651}, + dictWord{8, 11, 421}, + dictWord{9, 11, 0}, + dictWord{138, 11, 34}, + dictWord{4, 0, 937}, + dictWord{ + 5, + 0, + 801, + }, + dictWord{7, 0, 473}, + dictWord{5, 10, 358}, + dictWord{7, 10, 1184}, + dictWord{10, 10, 662}, + dictWord{13, 10, 212}, + dictWord{13, 10, 304}, + dictWord{ + 13, + 10, + 333, + }, + dictWord{145, 10, 98}, + dictWord{132, 0, 877}, + dictWord{6, 0, 693}, + dictWord{134, 0, 824}, + dictWord{132, 0, 365}, + dictWord{7, 11, 1832}, + dictWord{ + 138, + 11, + 374, + }, + dictWord{5, 0, 7}, + dictWord{139, 0, 774}, + dictWord{4, 0, 734}, + dictWord{5, 0, 662}, + dictWord{134, 0, 430}, + dictWord{4, 0, 746}, + dictWord{ + 135, + 0, + 1090, + }, + dictWord{5, 0, 360}, + dictWord{8, 0, 237}, + dictWord{10, 0, 231}, + dictWord{147, 0, 124}, + dictWord{138, 11, 348}, + dictWord{6, 11, 6}, + dictWord{7, 11, 81}, + dictWord{7, 11, 771}, + dictWord{7, 11, 1731}, + dictWord{9, 11, 405}, + dictWord{138, 11, 421}, + dictWord{6, 0, 740}, + dictWord{137, 0, 822}, + dictWord{ + 133, + 10, + 946, + }, + dictWord{7, 0, 1485}, + dictWord{136, 0, 929}, + dictWord{7, 10, 411}, + dictWord{8, 10, 631}, + dictWord{9, 10, 323}, + dictWord{10, 10, 355}, + dictWord{ + 11, + 10, + 491, + }, + dictWord{12, 10, 143}, + dictWord{12, 10, 402}, + dictWord{13, 10, 73}, + dictWord{14, 10, 408}, + dictWord{15, 10, 107}, + dictWord{146, 10, 71}, + dictWord{ + 135, + 10, + 590, + }, + dictWord{5, 11, 881}, + dictWord{133, 11, 885}, + dictWord{150, 11, 25}, + dictWord{4, 0, 852}, + dictWord{5, 11, 142}, + dictWord{134, 11, 546}, + dictWord{7, 10, 1467}, + dictWord{8, 10, 328}, + dictWord{10, 10, 544}, + dictWord{11, 10, 955}, + dictWord{13, 10, 320}, + dictWord{145, 10, 83}, + dictWord{9, 0, 17}, + dictWord{10, 0, 291}, + dictWord{11, 10, 511}, + dictWord{13, 10, 394}, + dictWord{14, 10, 298}, + dictWord{14, 10, 318}, + dictWord{146, 10, 103}, + dictWord{5, 11, 466}, + dictWord{11, 11, 571}, + dictWord{12, 11, 198}, + dictWord{13, 11, 283}, + dictWord{14, 11, 186}, + dictWord{15, 11, 21}, + dictWord{143, 11, 103}, + dictWord{ + 134, + 0, + 1001, + }, + dictWord{4, 11, 185}, + dictWord{5, 11, 257}, + dictWord{5, 11, 839}, + dictWord{5, 11, 936}, + dictWord{7, 11, 171}, + dictWord{9, 11, 399}, + dictWord{ + 10, + 11, + 258, + }, + dictWord{10, 11, 395}, + dictWord{10, 11, 734}, + dictWord{11, 11, 1014}, + dictWord{12, 11, 23}, + dictWord{13, 11, 350}, + dictWord{14, 11, 150}, + dictWord{147, 11, 6}, + dictWord{143, 0, 35}, + dictWord{132, 0, 831}, + dictWord{5, 10, 835}, + dictWord{134, 10, 483}, + dictWord{4, 0, 277}, + dictWord{5, 0, 608}, + dictWord{ + 6, + 0, + 493, + }, + dictWord{7, 0, 457}, + dictWord{12, 0, 384}, + dictWord{7, 11, 404}, + dictWord{7, 11, 1377}, + dictWord{7, 11, 1430}, + dictWord{7, 11, 2017}, + dictWord{ + 8, + 11, + 149, + }, + dictWord{8, 11, 239}, + dictWord{8, 11, 512}, + dictWord{8, 11, 793}, + dictWord{8, 11, 818}, + dictWord{9, 11, 474}, + dictWord{9, 11, 595}, + dictWord{ + 10, + 11, + 122, + }, + dictWord{10, 11, 565}, + dictWord{10, 11, 649}, + dictWord{10, 11, 783}, + dictWord{11, 11, 239}, + dictWord{11, 11, 295}, + dictWord{11, 11, 447}, + dictWord{ + 11, + 11, + 528, + }, + dictWord{11, 11, 639}, + dictWord{11, 11, 800}, + dictWord{11, 11, 936}, + dictWord{12, 11, 25}, + dictWord{12, 11, 73}, + dictWord{12, 11, 77}, + dictWord{12, 11, 157}, + dictWord{12, 11, 316}, + dictWord{12, 11, 390}, + dictWord{12, 11, 391}, + dictWord{12, 11, 394}, + dictWord{12, 11, 395}, + dictWord{ + 12, + 11, + 478, + }, + dictWord{12, 11, 503}, + dictWord{12, 11, 592}, + dictWord{12, 11, 680}, + dictWord{13, 11, 50}, + dictWord{13, 11, 53}, + dictWord{13, 11, 132}, + dictWord{ + 13, + 11, + 198, + }, + dictWord{13, 11, 275}, + dictWord{13, 11, 322}, + dictWord{13, 11, 415}, + dictWord{14, 11, 71}, + dictWord{14, 11, 257}, + dictWord{14, 11, 395}, + dictWord{15, 11, 71}, + dictWord{15, 11, 136}, + dictWord{17, 11, 123}, + dictWord{18, 11, 93}, + dictWord{147, 11, 58}, + dictWord{134, 0, 1351}, + dictWord{7, 0, 27}, + dictWord{135, 0, 316}, + dictWord{136, 11, 712}, + dictWord{136, 0, 984}, + dictWord{133, 0, 552}, + dictWord{137, 0, 264}, + dictWord{132, 0, 401}, + dictWord{6, 0, 710}, + dictWord{6, 0, 1111}, + dictWord{134, 0, 1343}, + dictWord{134, 0, 1211}, + dictWord{9, 0, 543}, + dictWord{10, 0, 524}, + dictWord{11, 0, 108}, + dictWord{11, 0, 653}, + dictWord{12, 0, 524}, + dictWord{13, 0, 123}, + dictWord{14, 0, 252}, + dictWord{16, 0, 18}, + dictWord{19, 0, 38}, + dictWord{20, 0, 26}, + dictWord{20, 0, 65}, + dictWord{ + 21, + 0, + 3, + }, + dictWord{151, 0, 11}, + dictWord{4, 0, 205}, + dictWord{5, 0, 623}, + dictWord{7, 0, 104}, + dictWord{8, 0, 519}, + dictWord{137, 0, 716}, + dictWord{132, 10, 677}, + dictWord{4, 11, 377}, + dictWord{152, 11, 13}, + dictWord{135, 11, 1673}, + dictWord{7, 0, 579}, + dictWord{9, 0, 41}, + dictWord{9, 0, 244}, + dictWord{9, 0, 669}, + dictWord{ + 10, + 0, + 5, + }, + dictWord{11, 0, 861}, + dictWord{11, 0, 951}, + dictWord{139, 0, 980}, + dictWord{132, 0, 717}, + dictWord{136, 0, 1011}, + dictWord{132, 0, 805}, + dictWord{ + 4, + 11, + 180, + }, + dictWord{135, 11, 1906}, + dictWord{132, 10, 777}, + dictWord{132, 10, 331}, + dictWord{132, 0, 489}, + dictWord{6, 0, 1024}, + dictWord{4, 11, 491}, + dictWord{133, 10, 747}, + dictWord{135, 11, 1182}, + dictWord{4, 11, 171}, + dictWord{138, 11, 234}, + dictWord{4, 11, 586}, + dictWord{7, 11, 1186}, + dictWord{ + 138, + 11, + 631, + }, + dictWord{135, 0, 892}, + dictWord{135, 11, 336}, + dictWord{9, 11, 931}, + dictWord{10, 11, 334}, + dictWord{148, 11, 71}, + dictWord{137, 0, 473}, + dictWord{6, 0, 864}, + dictWord{12, 0, 659}, + dictWord{139, 11, 926}, + dictWord{7, 0, 819}, + dictWord{9, 0, 26}, + dictWord{9, 0, 392}, + dictWord{10, 0, 152}, + dictWord{ + 10, + 0, + 226, + }, + dictWord{11, 0, 19}, + dictWord{12, 0, 276}, + dictWord{12, 0, 426}, + dictWord{12, 0, 589}, + dictWord{13, 0, 460}, + dictWord{15, 0, 97}, + dictWord{19, 0, 48}, + dictWord{148, 0, 104}, + dictWord{135, 0, 51}, + dictWord{133, 10, 326}, + dictWord{4, 10, 691}, + dictWord{146, 10, 16}, + dictWord{9, 0, 130}, + dictWord{11, 0, 765}, + dictWord{10, 10, 680}, + dictWord{10, 10, 793}, + dictWord{141, 10, 357}, + dictWord{133, 11, 765}, + dictWord{8, 0, 229}, + dictWord{6, 10, 32}, + dictWord{7, 10, 385}, + dictWord{7, 10, 757}, + dictWord{7, 10, 1916}, + dictWord{8, 10, 94}, + dictWord{8, 10, 711}, + dictWord{9, 10, 541}, + dictWord{10, 10, 162}, + dictWord{10, 10, 795}, + dictWord{11, 10, 989}, + dictWord{11, 10, 1010}, + dictWord{12, 10, 14}, + dictWord{142, 10, 308}, + dictWord{7, 11, 474}, + dictWord{137, 11, 578}, + dictWord{ + 132, + 0, + 674, + }, + dictWord{132, 0, 770}, + dictWord{5, 0, 79}, + dictWord{7, 0, 1027}, + dictWord{7, 0, 1477}, + dictWord{139, 0, 52}, + dictWord{133, 11, 424}, + dictWord{ + 134, + 0, + 1666, + }, + dictWord{6, 0, 409}, + dictWord{6, 10, 349}, + dictWord{6, 10, 1682}, + dictWord{7, 10, 1252}, + dictWord{8, 10, 112}, + dictWord{8, 11, 714}, + dictWord{ + 9, + 10, + 435, + }, + dictWord{9, 10, 668}, + dictWord{10, 10, 290}, + dictWord{10, 10, 319}, + dictWord{10, 10, 815}, + dictWord{11, 10, 180}, + dictWord{11, 10, 837}, + dictWord{ + 12, + 10, + 240, + }, + dictWord{13, 10, 152}, + dictWord{13, 10, 219}, + dictWord{142, 10, 158}, + dictWord{5, 0, 789}, + dictWord{134, 0, 195}, + dictWord{4, 0, 251}, + dictWord{ + 4, + 0, + 688, + }, + dictWord{7, 0, 513}, + dictWord{135, 0, 1284}, + dictWord{132, 10, 581}, + dictWord{9, 11, 420}, + dictWord{10, 11, 269}, + dictWord{10, 11, 285}, + dictWord{10, 11, 576}, + dictWord{11, 11, 397}, + dictWord{13, 11, 175}, + dictWord{145, 11, 90}, + dictWord{6, 10, 126}, + dictWord{7, 10, 573}, + dictWord{8, 10, 397}, + dictWord{142, 10, 44}, + dictWord{132, 11, 429}, + dictWord{133, 0, 889}, + dictWord{4, 0, 160}, + dictWord{5, 0, 330}, + dictWord{7, 0, 1434}, + dictWord{136, 0, 174}, + dictWord{7, 11, 18}, + dictWord{7, 11, 699}, + dictWord{7, 11, 1966}, + dictWord{8, 11, 752}, + dictWord{9, 11, 273}, + dictWord{9, 11, 412}, + dictWord{9, 11, 703}, + dictWord{ + 10, + 11, + 71, + }, + dictWord{10, 11, 427}, + dictWord{10, 11, 508}, + dictWord{146, 11, 97}, + dictWord{6, 0, 872}, + dictWord{134, 0, 899}, + dictWord{133, 10, 926}, + dictWord{134, 0, 1126}, + dictWord{134, 0, 918}, + dictWord{4, 11, 53}, + dictWord{5, 11, 186}, + dictWord{135, 11, 752}, + dictWord{7, 0, 268}, + dictWord{136, 0, 569}, + dictWord{134, 0, 1224}, + dictWord{6, 0, 1361}, + dictWord{7, 10, 1232}, + dictWord{137, 10, 531}, + dictWord{8, 11, 575}, + dictWord{10, 11, 289}, + dictWord{ + 139, + 11, + 319, + }, + dictWord{133, 10, 670}, + dictWord{132, 11, 675}, + dictWord{133, 0, 374}, + dictWord{135, 10, 1957}, + dictWord{133, 0, 731}, + dictWord{11, 0, 190}, + dictWord{15, 0, 49}, + dictWord{11, 11, 190}, + dictWord{143, 11, 49}, + dictWord{4, 0, 626}, + dictWord{5, 0, 506}, + dictWord{5, 0, 642}, + dictWord{6, 0, 425}, + dictWord{ + 10, + 0, + 202, + }, + dictWord{139, 0, 141}, + dictWord{137, 0, 444}, + dictWord{7, 10, 242}, + dictWord{135, 10, 1942}, + dictWord{6, 11, 209}, + dictWord{8, 11, 468}, + dictWord{ + 9, + 11, + 210, + }, + dictWord{11, 11, 36}, + dictWord{12, 11, 28}, + dictWord{12, 11, 630}, + dictWord{13, 11, 21}, + dictWord{13, 11, 349}, + dictWord{14, 11, 7}, + dictWord{ + 145, + 11, + 13, + }, + dictWord{4, 11, 342}, + dictWord{135, 11, 1179}, + dictWord{5, 10, 834}, + dictWord{7, 10, 1202}, + dictWord{8, 10, 14}, + dictWord{9, 10, 481}, + dictWord{ + 137, + 10, + 880, + }, + dictWord{4, 11, 928}, + dictWord{133, 11, 910}, + dictWord{4, 11, 318}, + dictWord{4, 11, 496}, + dictWord{7, 11, 856}, + dictWord{139, 11, 654}, + dictWord{136, 0, 835}, + dictWord{7, 0, 1526}, + dictWord{138, 10, 465}, + dictWord{151, 0, 17}, + dictWord{135, 0, 477}, + dictWord{4, 10, 357}, + dictWord{6, 10, 172}, + dictWord{7, 10, 143}, + dictWord{137, 10, 413}, + dictWord{6, 0, 1374}, + dictWord{138, 0, 994}, + dictWord{18, 0, 76}, + dictWord{132, 10, 590}, + dictWord{7, 0, 287}, + dictWord{8, 0, 355}, + dictWord{9, 0, 293}, + dictWord{137, 0, 743}, + dictWord{134, 0, 1389}, + dictWord{7, 11, 915}, + dictWord{8, 11, 247}, + dictWord{147, 11, 0}, + dictWord{ + 4, + 11, + 202, + }, + dictWord{5, 11, 382}, + dictWord{6, 11, 454}, + dictWord{7, 11, 936}, + dictWord{7, 11, 1803}, + dictWord{8, 11, 758}, + dictWord{9, 11, 375}, + dictWord{ + 9, + 11, + 895, + }, + dictWord{10, 11, 743}, + dictWord{10, 11, 792}, + dictWord{11, 11, 978}, + dictWord{11, 11, 1012}, + dictWord{142, 11, 109}, + dictWord{5, 0, 384}, + dictWord{8, 0, 455}, + dictWord{140, 0, 48}, + dictWord{132, 11, 390}, + dictWord{5, 10, 169}, + dictWord{7, 10, 333}, + dictWord{136, 10, 45}, + dictWord{5, 0, 264}, + dictWord{134, 0, 184}, + dictWord{138, 11, 791}, + dictWord{133, 11, 717}, + dictWord{132, 10, 198}, + dictWord{6, 11, 445}, + dictWord{7, 11, 332}, + dictWord{ + 137, + 11, + 909, + }, + dictWord{136, 0, 1001}, + dictWord{4, 10, 24}, + dictWord{5, 10, 140}, + dictWord{5, 10, 185}, + dictWord{7, 10, 1500}, + dictWord{11, 10, 565}, + dictWord{ + 139, + 10, + 838, + }, + dictWord{134, 11, 578}, + dictWord{5, 0, 633}, + dictWord{6, 0, 28}, + dictWord{135, 0, 1323}, + dictWord{132, 0, 851}, + dictWord{136, 11, 267}, + dictWord{ + 7, + 0, + 359, + }, + dictWord{8, 0, 243}, + dictWord{140, 0, 175}, + dictWord{4, 10, 334}, + dictWord{133, 10, 593}, + dictWord{141, 11, 87}, + dictWord{136, 11, 766}, + dictWord{10, 0, 287}, + dictWord{12, 0, 138}, + dictWord{10, 11, 287}, + dictWord{140, 11, 138}, + dictWord{4, 0, 105}, + dictWord{132, 0, 740}, + dictWord{140, 10, 116}, + dictWord{134, 0, 857}, + dictWord{135, 11, 1841}, + dictWord{6, 0, 1402}, + dictWord{137, 0, 819}, + dictWord{132, 11, 584}, + dictWord{132, 10, 709}, + dictWord{ + 133, + 10, + 897, + }, + dictWord{5, 0, 224}, + dictWord{13, 0, 174}, + dictWord{146, 0, 52}, + dictWord{135, 10, 1840}, + dictWord{4, 10, 608}, + dictWord{133, 10, 497}, + dictWord{139, 11, 60}, + dictWord{4, 0, 758}, + dictWord{135, 0, 1649}, + dictWord{4, 11, 226}, + dictWord{4, 11, 326}, + dictWord{135, 11, 1770}, + dictWord{5, 11, 426}, + dictWord{8, 11, 30}, + dictWord{9, 11, 2}, + dictWord{11, 11, 549}, + dictWord{147, 11, 122}, + dictWord{135, 10, 2039}, + dictWord{6, 10, 540}, + dictWord{ + 136, + 10, + 136, + }, + dictWord{4, 0, 573}, + dictWord{8, 0, 655}, + dictWord{4, 10, 897}, + dictWord{133, 10, 786}, + dictWord{7, 0, 351}, + dictWord{139, 0, 128}, + dictWord{ + 133, + 10, + 999, + }, + dictWord{4, 10, 299}, + dictWord{135, 10, 1004}, + dictWord{133, 0, 918}, + dictWord{132, 11, 345}, + dictWord{4, 11, 385}, + dictWord{7, 11, 265}, + dictWord{135, 11, 587}, + dictWord{133, 10, 456}, + dictWord{136, 10, 180}, + dictWord{6, 0, 687}, + dictWord{134, 0, 1537}, + dictWord{4, 11, 347}, + dictWord{ + 5, + 11, + 423, + }, + dictWord{5, 11, 996}, + dictWord{135, 11, 1329}, + dictWord{132, 10, 755}, + dictWord{7, 11, 1259}, + dictWord{9, 11, 125}, + dictWord{11, 11, 65}, + dictWord{140, 11, 285}, + dictWord{5, 11, 136}, + dictWord{6, 11, 136}, + dictWord{136, 11, 644}, + dictWord{134, 0, 1525}, + dictWord{4, 0, 1009}, + dictWord{ + 135, + 0, + 1139, + }, + dictWord{139, 10, 338}, + dictWord{132, 0, 340}, + dictWord{135, 10, 1464}, + dictWord{8, 0, 847}, + dictWord{10, 0, 861}, + dictWord{10, 0, 876}, + dictWord{ + 10, + 0, + 889, + }, + dictWord{10, 0, 922}, + dictWord{10, 0, 929}, + dictWord{10, 0, 933}, + dictWord{12, 0, 784}, + dictWord{140, 0, 791}, + dictWord{139, 0, 176}, + dictWord{ + 9, + 11, + 134, + }, + dictWord{10, 11, 2}, + dictWord{10, 11, 27}, + dictWord{10, 11, 333}, + dictWord{11, 11, 722}, + dictWord{143, 11, 1}, + dictWord{4, 11, 433}, + dictWord{ + 133, + 11, + 719, + }, + dictWord{5, 0, 985}, + dictWord{7, 0, 509}, + dictWord{7, 0, 529}, + dictWord{145, 0, 96}, + dictWord{132, 0, 615}, + dictWord{4, 10, 890}, + dictWord{ + 5, + 10, + 805, + }, + dictWord{5, 10, 819}, + dictWord{5, 10, 961}, + dictWord{6, 10, 396}, + dictWord{6, 10, 1631}, + dictWord{6, 10, 1678}, + dictWord{7, 10, 1967}, + dictWord{ + 7, + 10, + 2041, + }, + dictWord{9, 10, 630}, + dictWord{11, 10, 8}, + dictWord{11, 10, 1019}, + dictWord{12, 10, 176}, + dictWord{13, 10, 225}, + dictWord{14, 10, 292}, + dictWord{ + 149, + 10, + 24, + }, + dictWord{135, 0, 1919}, + dictWord{134, 0, 1131}, + dictWord{144, 11, 21}, + dictWord{144, 11, 51}, + dictWord{135, 10, 1815}, + dictWord{4, 0, 247}, + dictWord{7, 10, 1505}, + dictWord{10, 10, 190}, + dictWord{10, 10, 634}, + dictWord{11, 10, 792}, + dictWord{12, 10, 358}, + dictWord{140, 10, 447}, + dictWord{ + 5, + 10, + 0, + }, + dictWord{6, 10, 536}, + dictWord{7, 10, 604}, + dictWord{13, 10, 445}, + dictWord{145, 10, 126}, + dictWord{4, 0, 184}, + dictWord{5, 0, 390}, + dictWord{6, 0, 337}, + dictWord{7, 0, 23}, + dictWord{7, 0, 494}, + dictWord{7, 0, 618}, + dictWord{7, 0, 1456}, + dictWord{8, 0, 27}, + dictWord{8, 0, 599}, + dictWord{10, 0, 153}, + dictWord{ + 139, + 0, + 710, + }, + dictWord{6, 10, 232}, + dictWord{6, 10, 412}, + dictWord{7, 10, 1074}, + dictWord{8, 10, 9}, + dictWord{8, 10, 157}, + dictWord{8, 10, 786}, + dictWord{9, 10, 196}, + dictWord{9, 10, 352}, + dictWord{9, 10, 457}, + dictWord{10, 10, 337}, + dictWord{11, 10, 232}, + dictWord{11, 10, 877}, + dictWord{12, 10, 480}, + dictWord{ + 140, + 10, + 546, + }, + dictWord{13, 0, 38}, + dictWord{135, 10, 958}, + dictWord{4, 10, 382}, + dictWord{136, 10, 579}, + dictWord{4, 10, 212}, + dictWord{135, 10, 1206}, + dictWord{ + 4, + 11, + 555, + }, + dictWord{8, 11, 536}, + dictWord{138, 11, 288}, + dictWord{11, 11, 139}, + dictWord{139, 11, 171}, + dictWord{9, 11, 370}, + dictWord{138, 11, 90}, + dictWord{132, 0, 1015}, + dictWord{134, 0, 1088}, + dictWord{5, 10, 655}, + dictWord{135, 11, 977}, + dictWord{134, 0, 1585}, + dictWord{17, 10, 67}, + dictWord{ + 147, + 10, + 74, + }, + dictWord{10, 0, 227}, + dictWord{11, 0, 497}, + dictWord{11, 0, 709}, + dictWord{140, 0, 415}, + dictWord{6, 0, 360}, + dictWord{7, 0, 1664}, + dictWord{ + 136, + 0, + 478, + }, + dictWord{7, 0, 95}, + dictWord{6, 10, 231}, + dictWord{136, 10, 423}, + dictWord{140, 11, 65}, + dictWord{4, 11, 257}, + dictWord{135, 11, 2031}, + dictWord{ + 135, + 11, + 1768, + }, + dictWord{133, 10, 300}, + dictWord{139, 11, 211}, + dictWord{136, 0, 699}, + dictWord{6, 10, 237}, + dictWord{7, 10, 611}, + dictWord{8, 10, 100}, + dictWord{9, 10, 416}, + dictWord{11, 10, 335}, + dictWord{12, 10, 173}, + dictWord{146, 10, 101}, + dictWord{14, 0, 26}, + dictWord{146, 0, 150}, + dictWord{6, 0, 581}, + dictWord{135, 0, 1119}, + dictWord{135, 10, 1208}, + dictWord{132, 0, 739}, + dictWord{6, 11, 83}, + dictWord{6, 11, 1733}, + dictWord{135, 11, 1389}, + dictWord{ + 137, + 0, + 869, + }, + dictWord{4, 0, 67}, + dictWord{5, 0, 422}, + dictWord{7, 0, 1037}, + dictWord{7, 0, 1289}, + dictWord{7, 0, 1555}, + dictWord{9, 0, 741}, + dictWord{145, 0, 108}, + dictWord{133, 10, 199}, + dictWord{12, 10, 427}, + dictWord{146, 10, 38}, + dictWord{136, 0, 464}, + dictWord{142, 0, 42}, + dictWord{10, 0, 96}, + dictWord{8, 11, 501}, + dictWord{137, 11, 696}, + dictWord{134, 11, 592}, + dictWord{4, 0, 512}, + dictWord{4, 0, 966}, + dictWord{5, 0, 342}, + dictWord{6, 0, 1855}, + dictWord{8, 0, 869}, + dictWord{8, 0, 875}, + dictWord{8, 0, 901}, + dictWord{144, 0, 26}, + dictWord{8, 0, 203}, + dictWord{11, 0, 823}, + dictWord{11, 0, 846}, + dictWord{12, 0, 482}, + dictWord{ + 13, + 0, + 277, + }, + dictWord{13, 0, 302}, + dictWord{13, 0, 464}, + dictWord{14, 0, 205}, + dictWord{142, 0, 221}, + dictWord{4, 0, 449}, + dictWord{133, 0, 718}, + dictWord{ + 7, + 11, + 1718, + }, + dictWord{9, 11, 95}, + dictWord{9, 11, 274}, + dictWord{10, 11, 279}, + dictWord{10, 11, 317}, + dictWord{10, 11, 420}, + dictWord{11, 11, 303}, + dictWord{ + 11, + 11, + 808, + }, + dictWord{12, 11, 134}, + dictWord{12, 11, 367}, + dictWord{13, 11, 149}, + dictWord{13, 11, 347}, + dictWord{14, 11, 349}, + dictWord{14, 11, 406}, + dictWord{18, 11, 22}, + dictWord{18, 11, 89}, + dictWord{18, 11, 122}, + dictWord{147, 11, 47}, + dictWord{133, 11, 26}, + dictWord{4, 0, 355}, + dictWord{6, 0, 311}, + dictWord{ + 9, + 0, + 256, + }, + dictWord{138, 0, 404}, + dictWord{132, 11, 550}, + dictWord{10, 0, 758}, + dictWord{6, 10, 312}, + dictWord{6, 10, 1715}, + dictWord{10, 10, 584}, + dictWord{11, 10, 546}, + dictWord{11, 10, 692}, + dictWord{12, 10, 259}, + dictWord{12, 10, 295}, + dictWord{13, 10, 46}, + dictWord{141, 10, 154}, + dictWord{ + 136, + 11, + 822, + }, + dictWord{5, 0, 827}, + dictWord{4, 11, 902}, + dictWord{5, 11, 809}, + dictWord{6, 11, 122}, + dictWord{135, 11, 896}, + dictWord{5, 0, 64}, + dictWord{140, 0, 581}, + dictWord{4, 0, 442}, + dictWord{6, 0, 739}, + dictWord{7, 0, 1047}, + dictWord{7, 0, 1352}, + dictWord{7, 0, 1643}, + dictWord{7, 11, 1911}, + dictWord{9, 11, 449}, + dictWord{10, 11, 192}, + dictWord{138, 11, 740}, + dictWord{135, 11, 262}, + dictWord{132, 10, 588}, + dictWord{133, 11, 620}, + dictWord{5, 0, 977}, + dictWord{ + 6, + 0, + 288, + }, + dictWord{7, 0, 528}, + dictWord{4, 11, 34}, + dictWord{5, 11, 574}, + dictWord{7, 11, 279}, + dictWord{7, 11, 1624}, + dictWord{136, 11, 601}, + dictWord{ + 6, + 0, + 1375, + }, + dictWord{4, 10, 231}, + dictWord{5, 10, 61}, + dictWord{6, 10, 104}, + dictWord{7, 10, 729}, + dictWord{7, 10, 964}, + dictWord{7, 10, 1658}, + dictWord{ + 140, + 10, + 414, + }, + dictWord{6, 10, 263}, + dictWord{138, 10, 757}, + dictWord{132, 10, 320}, + dictWord{4, 0, 254}, + dictWord{7, 0, 1309}, + dictWord{5, 11, 332}, + dictWord{ + 135, + 11, + 1309, + }, + dictWord{6, 11, 261}, + dictWord{8, 11, 182}, + dictWord{139, 11, 943}, + dictWord{132, 10, 225}, + dictWord{6, 0, 12}, + dictWord{135, 0, 1219}, + dictWord{4, 0, 275}, + dictWord{12, 0, 376}, + dictWord{6, 11, 1721}, + dictWord{141, 11, 490}, + dictWord{4, 11, 933}, + dictWord{133, 11, 880}, + dictWord{6, 0, 951}, + dictWord{6, 0, 1109}, + dictWord{6, 0, 1181}, + dictWord{7, 0, 154}, + dictWord{4, 10, 405}, + dictWord{7, 10, 817}, + dictWord{14, 10, 58}, + dictWord{17, 10, 37}, + dictWord{ + 146, + 10, + 124, + }, + dictWord{6, 0, 1520}, + dictWord{133, 10, 974}, + dictWord{134, 0, 1753}, + dictWord{6, 0, 369}, + dictWord{6, 0, 502}, + dictWord{7, 0, 1036}, + dictWord{ + 8, + 0, + 348, + }, + dictWord{9, 0, 452}, + dictWord{10, 0, 26}, + dictWord{11, 0, 224}, + dictWord{11, 0, 387}, + dictWord{11, 0, 772}, + dictWord{12, 0, 95}, + dictWord{12, 0, 629}, + dictWord{13, 0, 195}, + dictWord{13, 0, 207}, + dictWord{13, 0, 241}, + dictWord{14, 0, 260}, + dictWord{14, 0, 270}, + dictWord{143, 0, 140}, + dictWord{132, 0, 269}, + dictWord{5, 0, 480}, + dictWord{7, 0, 532}, + dictWord{7, 0, 1197}, + dictWord{7, 0, 1358}, + dictWord{8, 0, 291}, + dictWord{11, 0, 349}, + dictWord{142, 0, 396}, + dictWord{ + 5, + 10, + 235, + }, + dictWord{7, 10, 1239}, + dictWord{11, 10, 131}, + dictWord{140, 10, 370}, + dictWord{7, 10, 956}, + dictWord{7, 10, 1157}, + dictWord{7, 10, 1506}, + dictWord{ + 7, + 10, + 1606, + }, + dictWord{7, 10, 1615}, + dictWord{7, 10, 1619}, + dictWord{7, 10, 1736}, + dictWord{7, 10, 1775}, + dictWord{8, 10, 590}, + dictWord{9, 10, 324}, + dictWord{9, 10, 736}, + dictWord{9, 10, 774}, + dictWord{9, 10, 776}, + dictWord{9, 10, 784}, + dictWord{10, 10, 567}, + dictWord{10, 10, 708}, + dictWord{11, 10, 518}, + dictWord{11, 10, 613}, + dictWord{11, 10, 695}, + dictWord{11, 10, 716}, + dictWord{11, 10, 739}, + dictWord{11, 10, 770}, + dictWord{11, 10, 771}, + dictWord{ + 11, + 10, + 848, + }, + dictWord{11, 10, 857}, + dictWord{11, 10, 931}, + dictWord{11, 10, 947}, + dictWord{12, 10, 326}, + dictWord{12, 10, 387}, + dictWord{12, 10, 484}, + dictWord{ + 12, + 10, + 528, + }, + dictWord{12, 10, 552}, + dictWord{12, 10, 613}, + dictWord{13, 10, 189}, + dictWord{13, 10, 256}, + dictWord{13, 10, 340}, + dictWord{13, 10, 432}, + dictWord{13, 10, 436}, + dictWord{13, 10, 440}, + dictWord{13, 10, 454}, + dictWord{14, 10, 174}, + dictWord{14, 10, 220}, + dictWord{14, 10, 284}, + dictWord{ + 14, + 10, + 390, + }, + dictWord{145, 10, 121}, + dictWord{8, 11, 598}, + dictWord{9, 11, 664}, + dictWord{138, 11, 441}, + dictWord{9, 10, 137}, + dictWord{138, 10, 221}, + dictWord{133, 11, 812}, + dictWord{148, 0, 15}, + dictWord{134, 0, 1341}, + dictWord{6, 0, 1017}, + dictWord{4, 11, 137}, + dictWord{7, 11, 1178}, + dictWord{ + 135, + 11, + 1520, + }, + dictWord{7, 10, 390}, + dictWord{138, 10, 140}, + dictWord{7, 11, 1260}, + dictWord{135, 11, 1790}, + dictWord{137, 11, 191}, + dictWord{ + 135, + 10, + 1144, + }, + dictWord{6, 0, 1810}, + dictWord{7, 0, 657}, + dictWord{8, 0, 886}, + dictWord{10, 0, 857}, + dictWord{14, 0, 440}, + dictWord{144, 0, 96}, + dictWord{8, 0, 533}, + dictWord{6, 11, 1661}, + dictWord{7, 11, 1975}, + dictWord{7, 11, 2009}, + dictWord{135, 11, 2011}, + dictWord{6, 0, 1453}, + dictWord{134, 10, 464}, + dictWord{ + 132, + 11, + 715, + }, + dictWord{5, 10, 407}, + dictWord{11, 10, 204}, + dictWord{11, 10, 243}, + dictWord{11, 10, 489}, + dictWord{12, 10, 293}, + dictWord{19, 10, 37}, + dictWord{20, 10, 73}, + dictWord{150, 10, 38}, + dictWord{133, 11, 703}, + dictWord{4, 0, 211}, + dictWord{7, 0, 1483}, + dictWord{5, 10, 325}, + dictWord{8, 10, 5}, + dictWord{ + 8, + 10, + 227, + }, + dictWord{9, 10, 105}, + dictWord{10, 10, 585}, + dictWord{140, 10, 614}, + dictWord{4, 0, 332}, + dictWord{5, 0, 335}, + dictWord{6, 0, 238}, + dictWord{ + 7, + 0, + 269, + }, + dictWord{7, 0, 811}, + dictWord{7, 0, 1797}, + dictWord{8, 0, 836}, + dictWord{9, 0, 507}, + dictWord{141, 0, 242}, + dictWord{5, 11, 89}, + dictWord{7, 11, 1915}, + dictWord{9, 11, 185}, + dictWord{9, 11, 235}, + dictWord{9, 11, 496}, + dictWord{10, 11, 64}, + dictWord{10, 11, 270}, + dictWord{10, 11, 403}, + dictWord{10, 11, 469}, + dictWord{10, 11, 529}, + dictWord{10, 11, 590}, + dictWord{11, 11, 140}, + dictWord{11, 11, 860}, + dictWord{13, 11, 1}, + dictWord{13, 11, 422}, + dictWord{14, 11, 341}, + dictWord{14, 11, 364}, + dictWord{17, 11, 93}, + dictWord{18, 11, 113}, + dictWord{19, 11, 97}, + dictWord{147, 11, 113}, + dictWord{133, 11, 695}, + dictWord{ + 16, + 0, + 19, + }, + dictWord{5, 11, 6}, + dictWord{6, 11, 183}, + dictWord{6, 10, 621}, + dictWord{7, 11, 680}, + dictWord{7, 11, 978}, + dictWord{7, 11, 1013}, + dictWord{7, 11, 1055}, + dictWord{12, 11, 230}, + dictWord{13, 11, 172}, + dictWord{13, 10, 504}, + dictWord{146, 11, 29}, + dictWord{136, 0, 156}, + dictWord{133, 0, 1009}, + dictWord{ + 6, + 11, + 29, + }, + dictWord{139, 11, 63}, + dictWord{134, 0, 820}, + dictWord{134, 10, 218}, + dictWord{7, 10, 454}, + dictWord{7, 10, 782}, + dictWord{8, 10, 768}, + dictWord{ + 140, + 10, + 686, + }, + dictWord{5, 0, 228}, + dictWord{6, 0, 203}, + dictWord{7, 0, 156}, + dictWord{8, 0, 347}, + dictWord{9, 0, 265}, + dictWord{18, 0, 39}, + dictWord{20, 0, 54}, + dictWord{21, 0, 31}, + dictWord{22, 0, 3}, + dictWord{23, 0, 0}, + dictWord{15, 11, 8}, + dictWord{18, 11, 39}, + dictWord{20, 11, 54}, + dictWord{21, 11, 31}, + dictWord{22, 11, 3}, + dictWord{151, 11, 0}, + dictWord{7, 0, 1131}, + dictWord{135, 0, 1468}, + dictWord{144, 10, 0}, + dictWord{134, 0, 1276}, + dictWord{10, 10, 676}, + dictWord{ + 140, + 10, + 462, + }, + dictWord{132, 11, 311}, + dictWord{134, 11, 1740}, + dictWord{7, 11, 170}, + dictWord{8, 11, 90}, + dictWord{8, 11, 177}, + dictWord{8, 11, 415}, + dictWord{ + 11, + 11, + 714, + }, + dictWord{142, 11, 281}, + dictWord{134, 10, 164}, + dictWord{6, 0, 1792}, + dictWord{138, 0, 849}, + dictWord{150, 10, 50}, + dictWord{5, 0, 291}, + dictWord{5, 0, 318}, + dictWord{7, 0, 765}, + dictWord{9, 0, 389}, + dictWord{12, 0, 548}, + dictWord{8, 11, 522}, + dictWord{142, 11, 328}, + dictWord{11, 11, 91}, + dictWord{ + 13, + 11, + 129, + }, + dictWord{15, 11, 101}, + dictWord{145, 11, 125}, + dictWord{4, 11, 494}, + dictWord{6, 11, 74}, + dictWord{7, 11, 44}, + dictWord{7, 11, 407}, + dictWord{ + 8, + 11, + 551, + }, + dictWord{12, 11, 17}, + dictWord{15, 11, 5}, + dictWord{148, 11, 11}, + dictWord{4, 11, 276}, + dictWord{133, 11, 296}, + dictWord{6, 10, 343}, + dictWord{ + 7, + 10, + 195, + }, + dictWord{7, 11, 1777}, + dictWord{9, 10, 226}, + dictWord{10, 10, 197}, + dictWord{10, 10, 575}, + dictWord{11, 10, 502}, + dictWord{139, 10, 899}, + dictWord{ + 10, + 0, + 525, + }, + dictWord{139, 0, 82}, + dictWord{14, 0, 453}, + dictWord{4, 11, 7}, + dictWord{5, 11, 90}, + dictWord{5, 11, 158}, + dictWord{6, 11, 542}, + dictWord{7, 11, 221}, + dictWord{7, 11, 1574}, + dictWord{9, 11, 490}, + dictWord{10, 11, 540}, + dictWord{11, 11, 443}, + dictWord{139, 11, 757}, + dictWord{135, 0, 666}, + dictWord{ + 22, + 10, + 29, + }, + dictWord{150, 11, 29}, + dictWord{4, 0, 422}, + dictWord{147, 10, 8}, + dictWord{5, 0, 355}, + dictWord{145, 0, 0}, + dictWord{6, 0, 1873}, + dictWord{9, 0, 918}, + dictWord{7, 11, 588}, + dictWord{9, 11, 175}, + dictWord{138, 11, 530}, + dictWord{143, 11, 31}, + dictWord{11, 0, 165}, + dictWord{7, 10, 1125}, + dictWord{9, 10, 143}, + dictWord{14, 10, 405}, + dictWord{150, 10, 21}, + dictWord{9, 0, 260}, + dictWord{137, 0, 905}, + dictWord{5, 11, 872}, + dictWord{6, 11, 57}, + dictWord{6, 11, 479}, + dictWord{ + 6, + 11, + 562, + }, + dictWord{7, 11, 471}, + dictWord{7, 11, 1060}, + dictWord{9, 11, 447}, + dictWord{9, 11, 454}, + dictWord{141, 11, 6}, + dictWord{138, 11, 704}, + dictWord{133, 0, 865}, + dictWord{5, 0, 914}, + dictWord{134, 0, 1625}, + dictWord{133, 0, 234}, + dictWord{7, 0, 1383}, + dictWord{5, 11, 31}, + dictWord{6, 11, 614}, + dictWord{145, 11, 61}, + dictWord{7, 11, 1200}, + dictWord{138, 11, 460}, + dictWord{6, 11, 424}, + dictWord{135, 11, 1866}, + dictWord{136, 0, 306}, + dictWord{ + 5, + 10, + 959, + }, + dictWord{12, 11, 30}, + dictWord{13, 11, 148}, + dictWord{14, 11, 87}, + dictWord{14, 11, 182}, + dictWord{16, 11, 42}, + dictWord{18, 11, 92}, + dictWord{ + 148, + 11, + 70, + }, + dictWord{6, 0, 1919}, + dictWord{6, 0, 1921}, + dictWord{9, 0, 923}, + dictWord{9, 0, 930}, + dictWord{9, 0, 941}, + dictWord{9, 0, 949}, + dictWord{9, 0, 987}, + dictWord{ + 9, + 0, + 988, + }, + dictWord{9, 0, 992}, + dictWord{12, 0, 802}, + dictWord{12, 0, 815}, + dictWord{12, 0, 856}, + dictWord{12, 0, 885}, + dictWord{12, 0, 893}, + dictWord{ + 12, + 0, + 898, + }, + dictWord{12, 0, 919}, + dictWord{12, 0, 920}, + dictWord{12, 0, 941}, + dictWord{12, 0, 947}, + dictWord{15, 0, 183}, + dictWord{15, 0, 185}, + dictWord{15, 0, 189}, + dictWord{15, 0, 197}, + dictWord{15, 0, 202}, + dictWord{15, 0, 233}, + dictWord{18, 0, 218}, + dictWord{18, 0, 219}, + dictWord{18, 0, 233}, + dictWord{143, 11, 156}, + dictWord{135, 10, 1759}, + dictWord{136, 10, 173}, + dictWord{13, 0, 163}, + dictWord{13, 0, 180}, + dictWord{18, 0, 78}, + dictWord{20, 0, 35}, + dictWord{5, 11, 13}, + dictWord{134, 11, 142}, + dictWord{134, 10, 266}, + dictWord{6, 11, 97}, + dictWord{7, 11, 116}, + dictWord{8, 11, 322}, + dictWord{8, 11, 755}, + dictWord{9, 11, 548}, + dictWord{10, 11, 714}, + dictWord{11, 11, 884}, + dictWord{141, 11, 324}, + dictWord{135, 0, 1312}, + dictWord{9, 0, 814}, + dictWord{137, 11, 676}, + dictWord{ + 133, + 0, + 707, + }, + dictWord{135, 0, 1493}, + dictWord{6, 0, 421}, + dictWord{7, 0, 61}, + dictWord{7, 0, 1540}, + dictWord{10, 0, 11}, + dictWord{138, 0, 501}, + dictWord{12, 0, 733}, + dictWord{12, 0, 766}, + dictWord{7, 11, 866}, + dictWord{135, 11, 1163}, + dictWord{137, 0, 341}, + dictWord{142, 0, 98}, + dictWord{145, 11, 115}, + dictWord{ + 135, + 11, + 1111, + }, + dictWord{136, 10, 300}, + dictWord{136, 0, 1014}, + dictWord{8, 11, 1}, + dictWord{9, 11, 112}, + dictWord{138, 11, 326}, + dictWord{132, 11, 730}, + dictWord{5, 11, 488}, + dictWord{6, 11, 527}, + dictWord{7, 11, 489}, + dictWord{7, 11, 1636}, + dictWord{8, 11, 121}, + dictWord{8, 11, 144}, + dictWord{8, 11, 359}, + dictWord{ + 9, + 11, + 193, + }, + dictWord{9, 11, 241}, + dictWord{9, 11, 336}, + dictWord{9, 11, 882}, + dictWord{11, 11, 266}, + dictWord{11, 11, 372}, + dictWord{11, 11, 944}, + dictWord{ + 12, + 11, + 401, + }, + dictWord{140, 11, 641}, + dictWord{6, 0, 971}, + dictWord{134, 0, 1121}, + dictWord{6, 0, 102}, + dictWord{7, 0, 72}, + dictWord{15, 0, 142}, + dictWord{ + 147, + 0, + 67, + }, + dictWord{151, 0, 30}, + dictWord{135, 0, 823}, + dictWord{134, 0, 1045}, + dictWord{5, 10, 427}, + dictWord{5, 10, 734}, + dictWord{7, 10, 478}, + dictWord{ + 136, + 10, + 52, + }, + dictWord{7, 0, 1930}, + dictWord{11, 10, 217}, + dictWord{142, 10, 165}, + dictWord{6, 0, 1512}, + dictWord{135, 0, 1870}, + dictWord{9, 11, 31}, + dictWord{ + 10, + 11, + 244, + }, + dictWord{10, 11, 699}, + dictWord{12, 11, 149}, + dictWord{141, 11, 497}, + dictWord{133, 11, 377}, + dictWord{145, 11, 101}, + dictWord{ + 10, + 11, + 158, + }, + dictWord{13, 11, 13}, + dictWord{13, 11, 137}, + dictWord{13, 11, 258}, + dictWord{14, 11, 111}, + dictWord{14, 11, 225}, + dictWord{14, 11, 253}, + dictWord{ + 14, + 11, + 304, + }, + dictWord{14, 11, 339}, + dictWord{14, 11, 417}, + dictWord{146, 11, 33}, + dictWord{6, 0, 87}, + dictWord{6, 10, 1734}, + dictWord{7, 10, 20}, + dictWord{ + 7, + 10, + 1056, + }, + dictWord{8, 10, 732}, + dictWord{9, 10, 406}, + dictWord{9, 10, 911}, + dictWord{138, 10, 694}, + dictWord{134, 0, 1243}, + dictWord{137, 0, 245}, + dictWord{ + 7, + 0, + 68, + }, + dictWord{8, 0, 48}, + dictWord{8, 0, 88}, + dictWord{8, 0, 582}, + dictWord{8, 0, 681}, + dictWord{9, 0, 373}, + dictWord{9, 0, 864}, + dictWord{11, 0, 157}, + dictWord{ + 11, + 0, + 336, + }, + dictWord{11, 0, 843}, + dictWord{148, 0, 27}, + dictWord{8, 11, 663}, + dictWord{144, 11, 8}, + dictWord{133, 10, 613}, + dictWord{4, 0, 88}, + dictWord{ + 5, + 0, + 137, + }, + dictWord{5, 0, 174}, + dictWord{5, 0, 777}, + dictWord{6, 0, 1664}, + dictWord{6, 0, 1725}, + dictWord{7, 0, 77}, + dictWord{7, 0, 426}, + dictWord{7, 0, 1317}, + dictWord{ + 7, + 0, + 1355, + }, + dictWord{8, 0, 126}, + dictWord{8, 0, 563}, + dictWord{9, 0, 523}, + dictWord{9, 0, 750}, + dictWord{10, 0, 310}, + dictWord{10, 0, 836}, + dictWord{11, 0, 42}, + dictWord{11, 0, 318}, + dictWord{11, 0, 731}, + dictWord{12, 0, 68}, + dictWord{12, 0, 92}, + dictWord{12, 0, 507}, + dictWord{12, 0, 692}, + dictWord{13, 0, 81}, + dictWord{ + 13, + 0, + 238, + }, + dictWord{13, 0, 374}, + dictWord{14, 0, 436}, + dictWord{18, 0, 138}, + dictWord{19, 0, 78}, + dictWord{19, 0, 111}, + dictWord{20, 0, 55}, + dictWord{20, 0, 77}, + dictWord{148, 0, 92}, + dictWord{141, 0, 418}, + dictWord{4, 0, 938}, + dictWord{137, 0, 625}, + dictWord{138, 0, 351}, + dictWord{5, 11, 843}, + dictWord{7, 10, 32}, + dictWord{ + 7, + 10, + 984, + }, + dictWord{8, 10, 85}, + dictWord{8, 10, 709}, + dictWord{9, 10, 579}, + dictWord{9, 10, 847}, + dictWord{9, 10, 856}, + dictWord{10, 10, 799}, + dictWord{ + 11, + 10, + 258, + }, + dictWord{11, 10, 1007}, + dictWord{12, 10, 331}, + dictWord{12, 10, 615}, + dictWord{13, 10, 188}, + dictWord{13, 10, 435}, + dictWord{14, 10, 8}, + dictWord{ + 15, + 10, + 165, + }, + dictWord{16, 10, 27}, + dictWord{148, 10, 40}, + dictWord{6, 0, 1668}, + dictWord{7, 0, 1499}, + dictWord{8, 0, 117}, + dictWord{9, 0, 314}, + dictWord{ + 138, + 0, + 174, + }, + dictWord{135, 0, 707}, + dictWord{132, 11, 554}, + dictWord{133, 11, 536}, + dictWord{5, 0, 403}, + dictWord{5, 11, 207}, + dictWord{9, 11, 79}, + dictWord{ + 11, + 11, + 625, + }, + dictWord{145, 11, 7}, + dictWord{132, 11, 424}, + dictWord{136, 11, 785}, + dictWord{4, 10, 167}, + dictWord{135, 10, 82}, + dictWord{9, 0, 7}, + dictWord{ + 23, + 0, + 6, + }, + dictWord{9, 11, 7}, + dictWord{151, 11, 6}, + dictWord{6, 0, 282}, + dictWord{5, 10, 62}, + dictWord{6, 10, 534}, + dictWord{7, 10, 74}, + dictWord{7, 10, 678}, + dictWord{ + 7, + 10, + 684, + }, + dictWord{7, 10, 1043}, + dictWord{7, 10, 1072}, + dictWord{8, 10, 280}, + dictWord{8, 10, 541}, + dictWord{8, 10, 686}, + dictWord{9, 10, 258}, + dictWord{ + 10, + 10, + 519, + }, + dictWord{11, 10, 252}, + dictWord{140, 10, 282}, + dictWord{138, 10, 33}, + dictWord{132, 10, 359}, + dictWord{4, 0, 44}, + dictWord{5, 0, 311}, + dictWord{ + 6, + 0, + 156, + }, + dictWord{7, 0, 639}, + dictWord{7, 0, 762}, + dictWord{7, 0, 1827}, + dictWord{9, 0, 8}, + dictWord{9, 0, 462}, + dictWord{148, 0, 83}, + dictWord{7, 11, 769}, + dictWord{ + 9, + 11, + 18, + }, + dictWord{138, 11, 358}, + dictWord{4, 0, 346}, + dictWord{7, 0, 115}, + dictWord{9, 0, 180}, + dictWord{9, 0, 456}, + dictWord{10, 0, 363}, + dictWord{ + 4, + 11, + 896, + }, + dictWord{134, 11, 1777}, + dictWord{133, 10, 211}, + dictWord{7, 0, 761}, + dictWord{7, 0, 1051}, + dictWord{137, 0, 545}, + dictWord{6, 10, 145}, + dictWord{ + 141, + 10, + 336, + }, + dictWord{7, 11, 750}, + dictWord{9, 11, 223}, + dictWord{11, 11, 27}, + dictWord{11, 11, 466}, + dictWord{12, 11, 624}, + dictWord{14, 11, 265}, + dictWord{146, 11, 61}, + dictWord{6, 0, 752}, + dictWord{6, 0, 768}, + dictWord{6, 0, 1195}, + dictWord{6, 0, 1254}, + dictWord{6, 0, 1619}, + dictWord{137, 0, 835}, + dictWord{ + 6, + 0, + 1936, + }, + dictWord{8, 0, 930}, + dictWord{136, 0, 960}, + dictWord{132, 10, 263}, + dictWord{132, 11, 249}, + dictWord{12, 0, 653}, + dictWord{132, 10, 916}, + dictWord{4, 11, 603}, + dictWord{133, 11, 661}, + dictWord{8, 0, 344}, + dictWord{4, 11, 11}, + dictWord{6, 11, 128}, + dictWord{7, 11, 231}, + dictWord{7, 11, 1533}, + dictWord{138, 11, 725}, + dictWord{134, 0, 1483}, + dictWord{134, 0, 875}, + dictWord{6, 0, 185}, + dictWord{7, 0, 1899}, + dictWord{9, 0, 875}, + dictWord{139, 0, 673}, + dictWord{15, 10, 155}, + dictWord{144, 10, 79}, + dictWord{7, 0, 93}, + dictWord{7, 0, 210}, + dictWord{7, 0, 1223}, + dictWord{8, 0, 451}, + dictWord{8, 0, 460}, + dictWord{ + 11, + 0, + 353, + }, + dictWord{11, 0, 475}, + dictWord{4, 10, 599}, + dictWord{6, 10, 1634}, + dictWord{7, 10, 67}, + dictWord{7, 10, 691}, + dictWord{7, 10, 979}, + dictWord{ + 7, + 10, + 1697, + }, + dictWord{8, 10, 207}, + dictWord{8, 10, 214}, + dictWord{8, 10, 231}, + dictWord{8, 10, 294}, + dictWord{8, 10, 336}, + dictWord{8, 10, 428}, + dictWord{ + 8, + 10, + 471, + }, + dictWord{8, 10, 622}, + dictWord{8, 10, 626}, + dictWord{8, 10, 679}, + dictWord{8, 10, 759}, + dictWord{8, 10, 829}, + dictWord{9, 10, 11}, + dictWord{9, 10, 246}, + dictWord{9, 10, 484}, + dictWord{9, 10, 573}, + dictWord{9, 10, 706}, + dictWord{9, 10, 762}, + dictWord{9, 10, 798}, + dictWord{9, 10, 855}, + dictWord{9, 10, 870}, + dictWord{ + 9, + 10, + 912, + }, + dictWord{10, 10, 303}, + dictWord{10, 10, 335}, + dictWord{10, 10, 424}, + dictWord{10, 10, 461}, + dictWord{10, 10, 543}, + dictWord{10, 10, 759}, + dictWord{10, 10, 814}, + dictWord{11, 10, 59}, + dictWord{11, 10, 235}, + dictWord{11, 10, 590}, + dictWord{11, 10, 929}, + dictWord{11, 10, 963}, + dictWord{ + 11, + 10, + 987, + }, + dictWord{12, 10, 114}, + dictWord{12, 10, 182}, + dictWord{12, 10, 226}, + dictWord{12, 10, 332}, + dictWord{12, 10, 439}, + dictWord{12, 10, 575}, + dictWord{ + 12, + 10, + 598, + }, + dictWord{12, 10, 675}, + dictWord{13, 10, 8}, + dictWord{13, 10, 125}, + dictWord{13, 10, 194}, + dictWord{13, 10, 287}, + dictWord{14, 10, 197}, + dictWord{14, 10, 383}, + dictWord{15, 10, 53}, + dictWord{17, 10, 63}, + dictWord{19, 10, 46}, + dictWord{19, 10, 98}, + dictWord{19, 10, 106}, + dictWord{148, 10, 85}, + dictWord{132, 11, 476}, + dictWord{4, 0, 327}, + dictWord{5, 0, 478}, + dictWord{7, 0, 1332}, + dictWord{136, 0, 753}, + dictWord{5, 0, 1020}, + dictWord{133, 0, 1022}, + dictWord{135, 11, 1807}, + dictWord{4, 0, 103}, + dictWord{133, 0, 401}, + dictWord{4, 0, 499}, + dictWord{135, 0, 1421}, + dictWord{10, 0, 207}, + dictWord{13, 0, 164}, + dictWord{147, 10, 126}, + dictWord{9, 11, 20}, + dictWord{10, 11, 324}, + dictWord{139, 11, 488}, + dictWord{132, 0, 96}, + dictWord{9, 11, 280}, + dictWord{ + 138, + 11, + 134, + }, + dictWord{135, 0, 968}, + dictWord{133, 10, 187}, + dictWord{135, 10, 1286}, + dictWord{5, 11, 112}, + dictWord{6, 11, 103}, + dictWord{134, 11, 150}, + dictWord{8, 0, 914}, + dictWord{10, 0, 3}, + dictWord{4, 10, 215}, + dictWord{9, 10, 38}, + dictWord{11, 10, 23}, + dictWord{11, 10, 127}, + dictWord{139, 10, 796}, + dictWord{ + 135, + 0, + 399, + }, + dictWord{6, 0, 563}, + dictWord{137, 0, 224}, + dictWord{6, 0, 704}, + dictWord{134, 0, 1214}, + dictWord{4, 11, 708}, + dictWord{8, 11, 15}, + dictWord{ + 9, + 11, + 50, + }, + dictWord{9, 11, 386}, + dictWord{11, 11, 18}, + dictWord{11, 11, 529}, + dictWord{140, 11, 228}, + dictWord{4, 11, 563}, + dictWord{7, 11, 109}, + dictWord{ + 7, + 11, + 592, + }, + dictWord{7, 11, 637}, + dictWord{7, 11, 770}, + dictWord{7, 11, 1701}, + dictWord{8, 11, 436}, + dictWord{8, 11, 463}, + dictWord{9, 11, 60}, + dictWord{9, 11, 335}, + dictWord{9, 11, 904}, + dictWord{10, 11, 73}, + dictWord{11, 11, 434}, + dictWord{12, 11, 585}, + dictWord{13, 11, 331}, + dictWord{18, 11, 110}, + dictWord{ + 148, + 11, + 60, + }, + dictWord{134, 0, 1559}, + dictWord{132, 11, 502}, + dictWord{6, 11, 347}, + dictWord{138, 11, 161}, + dictWord{4, 11, 33}, + dictWord{5, 11, 102}, + dictWord{ + 5, + 11, + 500, + }, + dictWord{6, 11, 284}, + dictWord{7, 11, 1079}, + dictWord{7, 11, 1423}, + dictWord{7, 11, 1702}, + dictWord{8, 11, 470}, + dictWord{9, 11, 554}, + dictWord{ + 9, + 11, + 723, + }, + dictWord{139, 11, 333}, + dictWord{7, 11, 246}, + dictWord{135, 11, 840}, + dictWord{6, 11, 10}, + dictWord{8, 11, 571}, + dictWord{9, 11, 739}, + dictWord{ + 143, + 11, + 91, + }, + dictWord{8, 0, 861}, + dictWord{10, 0, 905}, + dictWord{12, 0, 730}, + dictWord{12, 0, 789}, + dictWord{133, 11, 626}, + dictWord{134, 0, 946}, + dictWord{ + 5, + 0, + 746, + }, + dictWord{12, 0, 333}, + dictWord{14, 0, 332}, + dictWord{12, 11, 333}, + dictWord{142, 11, 332}, + dictWord{5, 11, 18}, + dictWord{6, 11, 526}, + dictWord{ + 13, + 11, + 24, + }, + dictWord{13, 11, 110}, + dictWord{19, 11, 5}, + dictWord{147, 11, 44}, + dictWord{4, 0, 910}, + dictWord{5, 0, 832}, + dictWord{135, 10, 2002}, + dictWord{ + 10, + 11, + 768, + }, + dictWord{139, 11, 787}, + dictWord{4, 11, 309}, + dictWord{5, 11, 462}, + dictWord{7, 11, 970}, + dictWord{135, 11, 1097}, + dictWord{4, 10, 28}, + dictWord{ + 5, + 10, + 440, + }, + dictWord{7, 10, 248}, + dictWord{11, 10, 833}, + dictWord{140, 10, 344}, + dictWord{134, 10, 1654}, + dictWord{6, 0, 632}, + dictWord{6, 0, 652}, + dictWord{ + 6, + 0, + 1272, + }, + dictWord{6, 0, 1384}, + dictWord{134, 0, 1560}, + dictWord{134, 11, 1704}, + dictWord{6, 0, 1393}, + dictWord{133, 10, 853}, + dictWord{6, 10, 249}, + dictWord{7, 10, 1234}, + dictWord{139, 10, 573}, + dictWord{5, 11, 86}, + dictWord{7, 11, 743}, + dictWord{9, 11, 85}, + dictWord{10, 11, 281}, + dictWord{10, 11, 432}, + dictWord{11, 11, 490}, + dictWord{12, 11, 251}, + dictWord{13, 11, 118}, + dictWord{14, 11, 378}, + dictWord{146, 11, 143}, + dictWord{5, 11, 524}, + dictWord{ + 133, + 11, + 744, + }, + dictWord{134, 0, 1514}, + dictWord{10, 0, 201}, + dictWord{142, 0, 319}, + dictWord{7, 0, 717}, + dictWord{10, 0, 510}, + dictWord{7, 10, 392}, + dictWord{ + 8, + 10, + 20, + }, + dictWord{8, 10, 172}, + dictWord{8, 10, 690}, + dictWord{9, 10, 383}, + dictWord{9, 10, 845}, + dictWord{11, 10, 293}, + dictWord{11, 10, 832}, + dictWord{ + 11, + 10, + 920, + }, + dictWord{11, 10, 984}, + dictWord{141, 10, 221}, + dictWord{134, 0, 1381}, + dictWord{5, 10, 858}, + dictWord{133, 10, 992}, + dictWord{8, 0, 528}, + dictWord{137, 0, 348}, + dictWord{10, 11, 107}, + dictWord{140, 11, 436}, + dictWord{4, 0, 20}, + dictWord{133, 0, 616}, + dictWord{134, 0, 1251}, + dictWord{ + 132, + 11, + 927, + }, + dictWord{10, 11, 123}, + dictWord{12, 11, 670}, + dictWord{13, 11, 371}, + dictWord{14, 11, 142}, + dictWord{146, 11, 94}, + dictWord{134, 0, 1163}, + dictWord{ + 7, + 11, + 1149, + }, + dictWord{137, 11, 156}, + dictWord{134, 0, 307}, + dictWord{133, 11, 778}, + dictWord{7, 0, 1091}, + dictWord{135, 0, 1765}, + dictWord{ + 5, + 11, + 502, + }, + dictWord{6, 10, 268}, + dictWord{137, 10, 62}, + dictWord{8, 11, 196}, + dictWord{10, 11, 283}, + dictWord{139, 11, 406}, + dictWord{4, 0, 26}, + dictWord{ + 5, + 0, + 429, + }, + dictWord{6, 0, 245}, + dictWord{7, 0, 704}, + dictWord{7, 0, 1379}, + dictWord{135, 0, 1474}, + dictWord{133, 11, 855}, + dictWord{132, 0, 881}, + dictWord{ + 4, + 0, + 621, + }, + dictWord{135, 11, 1596}, + dictWord{7, 11, 1400}, + dictWord{9, 11, 446}, + dictWord{138, 11, 45}, + dictWord{6, 0, 736}, + dictWord{138, 10, 106}, + dictWord{133, 0, 542}, + dictWord{134, 0, 348}, + dictWord{133, 0, 868}, + dictWord{136, 0, 433}, + dictWord{135, 0, 1495}, + dictWord{138, 0, 771}, + dictWord{ + 6, + 10, + 613, + }, + dictWord{136, 10, 223}, + dictWord{138, 0, 215}, + dictWord{141, 0, 124}, + dictWord{136, 11, 391}, + dictWord{135, 11, 172}, + dictWord{132, 10, 670}, + dictWord{140, 0, 55}, + dictWord{9, 10, 40}, + dictWord{139, 10, 136}, + dictWord{7, 0, 62}, + dictWord{147, 0, 112}, + dictWord{132, 0, 856}, + dictWord{132, 11, 568}, + dictWord{12, 0, 270}, + dictWord{139, 10, 259}, + dictWord{8, 0, 572}, + dictWord{137, 0, 698}, + dictWord{4, 11, 732}, + dictWord{9, 10, 310}, + dictWord{137, 10, 682}, + dictWord{142, 10, 296}, + dictWord{134, 0, 939}, + dictWord{136, 11, 733}, + dictWord{135, 11, 1435}, + dictWord{7, 10, 1401}, + dictWord{135, 10, 1476}, + dictWord{6, 0, 352}, + dictWord{4, 10, 296}, + dictWord{7, 10, 401}, + dictWord{7, 10, 1410}, + dictWord{7, 10, 1594}, + dictWord{7, 10, 1674}, + dictWord{8, 10, 63}, + dictWord{ + 8, + 10, + 660, + }, + dictWord{137, 10, 74}, + dictWord{4, 11, 428}, + dictWord{133, 11, 668}, + dictWord{4, 10, 139}, + dictWord{4, 10, 388}, + dictWord{140, 10, 188}, + dictWord{7, 11, 2015}, + dictWord{140, 11, 665}, + dictWord{132, 0, 647}, + dictWord{146, 0, 10}, + dictWord{138, 0, 220}, + dictWord{142, 0, 464}, + dictWord{ + 132, + 0, + 109, + }, + dictWord{134, 0, 1746}, + dictWord{6, 0, 515}, + dictWord{4, 10, 747}, + dictWord{6, 11, 1623}, + dictWord{6, 11, 1681}, + dictWord{7, 10, 649}, + dictWord{ + 7, + 10, + 1479, + }, + dictWord{135, 10, 1583}, + dictWord{133, 10, 232}, + dictWord{135, 0, 566}, + dictWord{137, 10, 887}, + dictWord{4, 0, 40}, + dictWord{10, 0, 67}, + dictWord{ + 11, + 0, + 117, + }, + dictWord{11, 0, 768}, + dictWord{139, 0, 935}, + dictWord{132, 0, 801}, + dictWord{7, 0, 992}, + dictWord{8, 0, 301}, + dictWord{9, 0, 722}, + dictWord{ + 12, + 0, + 63, + }, + dictWord{13, 0, 29}, + dictWord{14, 0, 161}, + dictWord{143, 0, 18}, + dictWord{139, 0, 923}, + dictWord{6, 11, 1748}, + dictWord{8, 11, 715}, + dictWord{9, 11, 802}, + dictWord{10, 11, 46}, + dictWord{10, 11, 819}, + dictWord{13, 11, 308}, + dictWord{14, 11, 351}, + dictWord{14, 11, 363}, + dictWord{146, 11, 67}, + dictWord{ + 137, + 11, + 745, + }, + dictWord{7, 0, 1145}, + dictWord{4, 10, 14}, + dictWord{7, 10, 1801}, + dictWord{10, 10, 748}, + dictWord{141, 10, 458}, + dictWord{4, 11, 63}, + dictWord{ + 5, + 11, + 347, + }, + dictWord{134, 11, 474}, + dictWord{135, 0, 568}, + dictWord{4, 10, 425}, + dictWord{7, 11, 577}, + dictWord{7, 11, 1432}, + dictWord{9, 11, 475}, + dictWord{ + 9, + 11, + 505, + }, + dictWord{9, 11, 526}, + dictWord{9, 11, 609}, + dictWord{9, 11, 689}, + dictWord{9, 11, 726}, + dictWord{9, 11, 735}, + dictWord{9, 11, 738}, + dictWord{ + 10, + 11, + 556, + }, + dictWord{10, 11, 674}, + dictWord{10, 11, 684}, + dictWord{11, 11, 89}, + dictWord{11, 11, 202}, + dictWord{11, 11, 272}, + dictWord{11, 11, 380}, + dictWord{ + 11, + 11, + 415, + }, + dictWord{11, 11, 505}, + dictWord{11, 11, 537}, + dictWord{11, 11, 550}, + dictWord{11, 11, 562}, + dictWord{11, 11, 640}, + dictWord{11, 11, 667}, + dictWord{11, 11, 688}, + dictWord{11, 11, 847}, + dictWord{11, 11, 927}, + dictWord{11, 11, 930}, + dictWord{11, 11, 940}, + dictWord{12, 11, 144}, + dictWord{ + 12, + 11, + 325, + }, + dictWord{12, 11, 329}, + dictWord{12, 11, 389}, + dictWord{12, 11, 403}, + dictWord{12, 11, 451}, + dictWord{12, 11, 515}, + dictWord{12, 11, 604}, + dictWord{ + 12, + 11, + 616, + }, + dictWord{12, 11, 626}, + dictWord{13, 11, 66}, + dictWord{13, 11, 131}, + dictWord{13, 11, 167}, + dictWord{13, 11, 236}, + dictWord{13, 11, 368}, + dictWord{13, 11, 411}, + dictWord{13, 11, 434}, + dictWord{13, 11, 453}, + dictWord{13, 11, 461}, + dictWord{13, 11, 474}, + dictWord{14, 11, 59}, + dictWord{14, 11, 60}, + dictWord{14, 11, 139}, + dictWord{14, 11, 152}, + dictWord{14, 11, 276}, + dictWord{14, 11, 353}, + dictWord{14, 11, 402}, + dictWord{15, 11, 28}, + dictWord{ + 15, + 11, + 81, + }, + dictWord{15, 11, 123}, + dictWord{15, 11, 152}, + dictWord{18, 11, 136}, + dictWord{148, 11, 88}, + dictWord{137, 0, 247}, + dictWord{135, 11, 1622}, + dictWord{ + 9, + 11, + 544, + }, + dictWord{11, 11, 413}, + dictWord{144, 11, 25}, + dictWord{4, 0, 645}, + dictWord{7, 0, 825}, + dictWord{6, 10, 1768}, + dictWord{135, 11, 89}, + dictWord{140, 0, 328}, + dictWord{5, 10, 943}, + dictWord{134, 10, 1779}, + dictWord{134, 0, 1363}, + dictWord{5, 10, 245}, + dictWord{6, 10, 576}, + dictWord{7, 10, 582}, + dictWord{136, 10, 225}, + dictWord{134, 0, 1280}, + dictWord{5, 11, 824}, + dictWord{133, 11, 941}, + dictWord{7, 11, 440}, + dictWord{8, 11, 230}, + dictWord{ + 139, + 11, + 106, + }, + dictWord{5, 0, 28}, + dictWord{6, 0, 204}, + dictWord{10, 0, 320}, + dictWord{10, 0, 583}, + dictWord{13, 0, 502}, + dictWord{14, 0, 72}, + dictWord{14, 0, 274}, + dictWord{14, 0, 312}, + dictWord{14, 0, 344}, + dictWord{15, 0, 159}, + dictWord{16, 0, 62}, + dictWord{16, 0, 69}, + dictWord{17, 0, 30}, + dictWord{18, 0, 42}, + dictWord{ + 18, + 0, + 53, + }, + dictWord{18, 0, 84}, + dictWord{18, 0, 140}, + dictWord{19, 0, 68}, + dictWord{19, 0, 85}, + dictWord{20, 0, 5}, + dictWord{20, 0, 45}, + dictWord{20, 0, 101}, + dictWord{ + 22, + 0, + 7, + }, + dictWord{150, 0, 20}, + dictWord{4, 0, 558}, + dictWord{6, 0, 390}, + dictWord{7, 0, 162}, + dictWord{7, 0, 689}, + dictWord{9, 0, 360}, + dictWord{138, 0, 653}, + dictWord{134, 0, 764}, + dictWord{6, 0, 862}, + dictWord{137, 0, 833}, + dictWord{5, 0, 856}, + dictWord{6, 0, 1672}, + dictWord{6, 0, 1757}, + dictWord{134, 0, 1781}, + dictWord{ + 5, + 0, + 92, + }, + dictWord{10, 0, 736}, + dictWord{140, 0, 102}, + dictWord{6, 0, 1927}, + dictWord{6, 0, 1944}, + dictWord{8, 0, 924}, + dictWord{8, 0, 948}, + dictWord{ + 10, + 0, + 967, + }, + dictWord{138, 0, 978}, + dictWord{134, 0, 1479}, + dictWord{5, 0, 590}, + dictWord{8, 0, 360}, + dictWord{9, 0, 213}, + dictWord{138, 0, 63}, + dictWord{ + 134, + 0, + 1521, + }, + dictWord{6, 0, 709}, + dictWord{134, 0, 891}, + dictWord{132, 10, 443}, + dictWord{13, 0, 477}, + dictWord{14, 0, 120}, + dictWord{148, 0, 61}, + dictWord{ + 4, + 11, + 914, + }, + dictWord{5, 11, 800}, + dictWord{133, 11, 852}, + dictWord{10, 11, 54}, + dictWord{141, 11, 115}, + dictWord{4, 11, 918}, + dictWord{133, 11, 876}, + dictWord{139, 11, 152}, + dictWord{4, 11, 92}, + dictWord{133, 11, 274}, + dictWord{135, 11, 1901}, + dictWord{9, 11, 800}, + dictWord{10, 11, 693}, + dictWord{ + 11, + 11, + 482, + }, + dictWord{11, 11, 734}, + dictWord{139, 11, 789}, + dictWord{9, 0, 483}, + dictWord{132, 10, 298}, + dictWord{6, 0, 1213}, + dictWord{141, 11, 498}, + dictWord{135, 11, 1451}, + dictWord{133, 11, 743}, + dictWord{4, 0, 1022}, + dictWord{10, 0, 1000}, + dictWord{12, 0, 957}, + dictWord{12, 0, 980}, + dictWord{ + 12, + 0, + 1013, + }, + dictWord{14, 0, 481}, + dictWord{144, 0, 116}, + dictWord{8, 0, 503}, + dictWord{17, 0, 29}, + dictWord{4, 11, 49}, + dictWord{7, 11, 280}, + dictWord{ + 135, + 11, + 1633, + }, + dictWord{135, 0, 1712}, + dictWord{134, 0, 466}, + dictWord{136, 11, 47}, + dictWord{5, 10, 164}, + dictWord{7, 10, 121}, + dictWord{142, 10, 189}, + dictWord{ + 7, + 10, + 812, + }, + dictWord{7, 10, 1261}, + dictWord{7, 10, 1360}, + dictWord{9, 10, 632}, + dictWord{140, 10, 352}, + dictWord{139, 10, 556}, + dictWord{132, 0, 731}, + dictWord{5, 11, 272}, + dictWord{5, 11, 908}, + dictWord{5, 11, 942}, + dictWord{7, 11, 1008}, + dictWord{7, 11, 1560}, + dictWord{8, 11, 197}, + dictWord{9, 11, 47}, + dictWord{11, 11, 538}, + dictWord{139, 11, 742}, + dictWord{4, 10, 172}, + dictWord{9, 10, 611}, + dictWord{10, 10, 436}, + dictWord{12, 10, 673}, + dictWord{ + 141, + 10, + 255, + }, + dictWord{133, 10, 844}, + dictWord{10, 0, 484}, + dictWord{11, 0, 754}, + dictWord{12, 0, 457}, + dictWord{14, 0, 171}, + dictWord{14, 0, 389}, + dictWord{ + 146, + 0, + 153, + }, + dictWord{9, 10, 263}, + dictWord{10, 10, 147}, + dictWord{138, 10, 492}, + dictWord{137, 11, 891}, + dictWord{138, 0, 241}, + dictWord{133, 10, 537}, + dictWord{6, 0, 2005}, + dictWord{136, 0, 964}, + dictWord{137, 10, 842}, + dictWord{151, 11, 8}, + dictWord{4, 11, 407}, + dictWord{132, 11, 560}, + dictWord{ + 135, + 11, + 1884, + }, + dictWord{6, 0, 1100}, + dictWord{134, 0, 1242}, + dictWord{135, 0, 954}, + dictWord{5, 10, 230}, + dictWord{5, 10, 392}, + dictWord{6, 10, 420}, + dictWord{ + 9, + 10, + 568, + }, + dictWord{140, 10, 612}, + dictWord{4, 11, 475}, + dictWord{11, 11, 35}, + dictWord{11, 11, 90}, + dictWord{13, 11, 7}, + dictWord{13, 11, 71}, + dictWord{ + 13, + 11, + 177, + }, + dictWord{142, 11, 422}, + dictWord{136, 11, 332}, + dictWord{135, 0, 1958}, + dictWord{6, 0, 549}, + dictWord{8, 0, 34}, + dictWord{8, 0, 283}, + dictWord{ + 9, + 0, + 165, + }, + dictWord{138, 0, 475}, + dictWord{10, 0, 952}, + dictWord{12, 0, 966}, + dictWord{140, 0, 994}, + dictWord{5, 0, 652}, + dictWord{5, 0, 701}, + dictWord{ + 135, + 0, + 449, + }, + dictWord{4, 0, 655}, + dictWord{7, 0, 850}, + dictWord{17, 0, 75}, + dictWord{146, 0, 137}, + dictWord{4, 0, 146}, + dictWord{7, 0, 1618}, + dictWord{8, 0, 670}, + dictWord{ + 5, + 10, + 41, + }, + dictWord{7, 10, 1459}, + dictWord{7, 10, 1469}, + dictWord{7, 10, 1859}, + dictWord{9, 10, 549}, + dictWord{139, 10, 905}, + dictWord{133, 10, 696}, + dictWord{6, 0, 159}, + dictWord{6, 0, 364}, + dictWord{7, 0, 516}, + dictWord{137, 0, 518}, + dictWord{135, 0, 1439}, + dictWord{6, 11, 222}, + dictWord{7, 11, 636}, + dictWord{ + 7, + 11, + 1620, + }, + dictWord{8, 11, 409}, + dictWord{9, 11, 693}, + dictWord{139, 11, 77}, + dictWord{13, 0, 151}, + dictWord{141, 11, 45}, + dictWord{6, 0, 1027}, + dictWord{ + 4, + 11, + 336, + }, + dictWord{132, 10, 771}, + dictWord{139, 11, 392}, + dictWord{10, 11, 121}, + dictWord{11, 11, 175}, + dictWord{149, 11, 16}, + dictWord{8, 0, 950}, + dictWord{138, 0, 983}, + dictWord{133, 10, 921}, + dictWord{135, 0, 993}, + dictWord{6, 10, 180}, + dictWord{7, 10, 1137}, + dictWord{8, 10, 751}, + dictWord{ + 139, + 10, + 805, + }, + dictWord{7, 0, 501}, + dictWord{9, 0, 111}, + dictWord{10, 0, 141}, + dictWord{11, 0, 332}, + dictWord{13, 0, 43}, + dictWord{13, 0, 429}, + dictWord{14, 0, 130}, + dictWord{14, 0, 415}, + dictWord{145, 0, 102}, + dictWord{4, 10, 183}, + dictWord{5, 11, 882}, + dictWord{7, 10, 271}, + dictWord{11, 10, 824}, + dictWord{11, 10, 952}, + dictWord{13, 10, 278}, + dictWord{13, 10, 339}, + dictWord{13, 10, 482}, + dictWord{14, 10, 424}, + dictWord{148, 10, 99}, + dictWord{4, 10, 19}, + dictWord{5, 10, 477}, + dictWord{5, 10, 596}, + dictWord{6, 10, 505}, + dictWord{7, 10, 1221}, + dictWord{11, 10, 907}, + dictWord{12, 10, 209}, + dictWord{141, 10, 214}, + dictWord{ + 135, + 10, + 1215, + }, + dictWord{133, 0, 452}, + dictWord{132, 11, 426}, + dictWord{5, 0, 149}, + dictWord{136, 0, 233}, + dictWord{133, 0, 935}, + dictWord{6, 11, 58}, + dictWord{ + 7, + 11, + 654, + }, + dictWord{7, 11, 745}, + dictWord{7, 11, 1969}, + dictWord{8, 11, 240}, + dictWord{8, 11, 675}, + dictWord{9, 11, 479}, + dictWord{9, 11, 731}, + dictWord{ + 10, + 11, + 330, + }, + dictWord{10, 11, 593}, + dictWord{10, 11, 817}, + dictWord{11, 11, 32}, + dictWord{11, 11, 133}, + dictWord{11, 11, 221}, + dictWord{145, 11, 68}, + dictWord{ + 12, + 0, + 582, + }, + dictWord{18, 0, 131}, + dictWord{7, 11, 102}, + dictWord{137, 11, 538}, + dictWord{136, 0, 801}, + dictWord{134, 10, 1645}, + dictWord{132, 0, 70}, + dictWord{6, 10, 92}, + dictWord{6, 10, 188}, + dictWord{7, 10, 1269}, + dictWord{7, 10, 1524}, + dictWord{7, 10, 1876}, + dictWord{10, 10, 228}, + dictWord{139, 10, 1020}, + dictWord{4, 10, 459}, + dictWord{133, 10, 966}, + dictWord{138, 0, 369}, + dictWord{16, 0, 36}, + dictWord{140, 10, 330}, + dictWord{141, 11, 366}, + dictWord{ + 7, + 0, + 721, + }, + dictWord{10, 0, 236}, + dictWord{12, 0, 204}, + dictWord{6, 10, 18}, + dictWord{7, 10, 932}, + dictWord{8, 10, 757}, + dictWord{9, 10, 54}, + dictWord{9, 10, 65}, + dictWord{9, 10, 844}, + dictWord{10, 10, 113}, + dictWord{10, 10, 315}, + dictWord{10, 10, 798}, + dictWord{11, 10, 153}, + dictWord{12, 10, 151}, + dictWord{12, 10, 392}, + dictWord{12, 10, 666}, + dictWord{142, 10, 248}, + dictWord{7, 0, 241}, + dictWord{10, 0, 430}, + dictWord{8, 10, 548}, + dictWord{9, 10, 532}, + dictWord{10, 10, 117}, + dictWord{11, 10, 351}, + dictWord{11, 10, 375}, + dictWord{143, 10, 23}, + dictWord{134, 10, 1742}, + dictWord{133, 10, 965}, + dictWord{133, 11, 566}, + dictWord{ + 6, + 11, + 48, + }, + dictWord{135, 11, 63}, + dictWord{134, 10, 182}, + dictWord{10, 10, 65}, + dictWord{10, 10, 488}, + dictWord{138, 10, 497}, + dictWord{6, 11, 114}, + dictWord{7, 11, 1224}, + dictWord{7, 11, 1556}, + dictWord{136, 11, 3}, + dictWord{134, 0, 1817}, + dictWord{8, 11, 576}, + dictWord{137, 11, 267}, + dictWord{ + 6, + 0, + 1078, + }, + dictWord{144, 0, 16}, + dictWord{9, 10, 588}, + dictWord{138, 10, 260}, + dictWord{138, 0, 1021}, + dictWord{5, 0, 406}, + dictWord{134, 0, 2022}, + dictWord{133, 11, 933}, + dictWord{6, 0, 69}, + dictWord{135, 0, 117}, + dictWord{7, 0, 1830}, + dictWord{136, 11, 427}, + dictWord{4, 0, 432}, + dictWord{135, 0, 824}, + dictWord{134, 10, 1786}, + dictWord{133, 0, 826}, + dictWord{139, 11, 67}, + dictWord{133, 11, 759}, + dictWord{135, 10, 308}, + dictWord{137, 0, 816}, + dictWord{ + 133, + 0, + 1000, + }, + dictWord{4, 0, 297}, + dictWord{6, 0, 529}, + dictWord{7, 0, 152}, + dictWord{7, 0, 713}, + dictWord{7, 0, 1845}, + dictWord{8, 0, 710}, + dictWord{8, 0, 717}, + dictWord{12, 0, 639}, + dictWord{140, 0, 685}, + dictWord{7, 0, 423}, + dictWord{136, 10, 588}, + dictWord{136, 10, 287}, + dictWord{136, 0, 510}, + dictWord{ + 134, + 0, + 1048, + }, + dictWord{6, 0, 618}, + dictWord{7, 11, 56}, + dictWord{7, 11, 1989}, + dictWord{8, 11, 337}, + dictWord{8, 11, 738}, + dictWord{9, 11, 600}, + dictWord{ + 10, + 11, + 483, + }, + dictWord{12, 11, 37}, + dictWord{13, 11, 447}, + dictWord{142, 11, 92}, + dictWord{4, 0, 520}, + dictWord{135, 0, 575}, + dictWord{8, 0, 990}, + dictWord{ + 138, + 0, + 977, + }, + dictWord{135, 11, 774}, + dictWord{9, 11, 347}, + dictWord{11, 11, 24}, + dictWord{140, 11, 170}, + dictWord{136, 11, 379}, + dictWord{140, 10, 290}, + dictWord{132, 11, 328}, + dictWord{4, 0, 321}, + dictWord{134, 0, 569}, + dictWord{4, 11, 101}, + dictWord{135, 11, 1171}, + dictWord{7, 0, 723}, + dictWord{7, 0, 1135}, + dictWord{5, 11, 833}, + dictWord{136, 11, 744}, + dictWord{7, 10, 719}, + dictWord{8, 10, 809}, + dictWord{136, 10, 834}, + dictWord{8, 0, 921}, + dictWord{136, 10, 796}, + dictWord{5, 10, 210}, + dictWord{6, 10, 213}, + dictWord{7, 10, 60}, + dictWord{10, 10, 364}, + dictWord{139, 10, 135}, + dictWord{5, 0, 397}, + dictWord{6, 0, 154}, + dictWord{7, 0, 676}, + dictWord{8, 0, 443}, + dictWord{8, 0, 609}, + dictWord{9, 0, 24}, + dictWord{9, 0, 325}, + dictWord{10, 0, 35}, + dictWord{11, 0, 535}, + dictWord{11, 0, 672}, + dictWord{11, 0, 1018}, + dictWord{12, 0, 637}, + dictWord{16, 0, 30}, + dictWord{5, 10, 607}, + dictWord{8, 10, 326}, + dictWord{136, 10, 490}, + dictWord{4, 10, 701}, + dictWord{5, 10, 472}, + dictWord{6, 11, 9}, + dictWord{6, 11, 397}, + dictWord{7, 11, 53}, + dictWord{7, 11, 1742}, + dictWord{9, 10, 758}, + dictWord{10, 11, 632}, + dictWord{ + 11, + 11, + 828, + }, + dictWord{140, 11, 146}, + dictWord{135, 10, 380}, + dictWord{135, 10, 1947}, + dictWord{148, 11, 109}, + dictWord{10, 10, 278}, + dictWord{ + 138, + 11, + 278, + }, + dictWord{134, 0, 856}, + dictWord{7, 0, 139}, + dictWord{4, 10, 386}, + dictWord{8, 10, 405}, + dictWord{8, 10, 728}, + dictWord{9, 10, 497}, + dictWord{ + 11, + 10, + 110, + }, + dictWord{11, 10, 360}, + dictWord{15, 10, 37}, + dictWord{144, 10, 84}, + dictWord{141, 0, 282}, + dictWord{133, 0, 981}, + dictWord{5, 0, 288}, + dictWord{ + 7, + 10, + 1452, + }, + dictWord{7, 10, 1480}, + dictWord{8, 10, 634}, + dictWord{140, 10, 472}, + dictWord{7, 0, 1890}, + dictWord{8, 11, 367}, + dictWord{10, 11, 760}, + dictWord{ + 14, + 11, + 79, + }, + dictWord{20, 11, 17}, + dictWord{152, 11, 0}, + dictWord{4, 10, 524}, + dictWord{136, 10, 810}, + dictWord{4, 0, 56}, + dictWord{7, 0, 1791}, + dictWord{ + 8, + 0, + 607, + }, + dictWord{8, 0, 651}, + dictWord{11, 0, 465}, + dictWord{11, 0, 835}, + dictWord{12, 0, 337}, + dictWord{141, 0, 480}, + dictWord{10, 10, 238}, + dictWord{ + 141, + 10, + 33, + }, + dictWord{11, 11, 417}, + dictWord{12, 11, 223}, + dictWord{140, 11, 265}, + dictWord{9, 0, 158}, + dictWord{10, 0, 411}, + dictWord{140, 0, 261}, + dictWord{ + 133, + 10, + 532, + }, + dictWord{133, 10, 997}, + dictWord{12, 11, 186}, + dictWord{12, 11, 292}, + dictWord{14, 11, 100}, + dictWord{146, 11, 70}, + dictWord{6, 0, 1403}, + dictWord{136, 0, 617}, + dictWord{134, 0, 1205}, + dictWord{139, 0, 563}, + dictWord{4, 0, 242}, + dictWord{134, 0, 333}, + dictWord{4, 11, 186}, + dictWord{5, 11, 157}, + dictWord{8, 11, 168}, + dictWord{138, 11, 6}, + dictWord{132, 0, 369}, + dictWord{133, 11, 875}, + dictWord{5, 10, 782}, + dictWord{5, 10, 829}, + dictWord{ + 134, + 10, + 1738, + }, + dictWord{134, 0, 622}, + dictWord{135, 11, 1272}, + dictWord{6, 0, 1407}, + dictWord{7, 11, 111}, + dictWord{136, 11, 581}, + dictWord{7, 10, 1823}, + dictWord{139, 10, 693}, + dictWord{7, 0, 160}, + dictWord{10, 0, 624}, + dictWord{142, 0, 279}, + dictWord{132, 0, 363}, + dictWord{10, 11, 589}, + dictWord{12, 11, 111}, + dictWord{13, 11, 260}, + dictWord{14, 11, 82}, + dictWord{18, 11, 63}, + dictWord{147, 11, 45}, + dictWord{7, 11, 1364}, + dictWord{7, 11, 1907}, + dictWord{ + 141, + 11, + 158, + }, + dictWord{4, 11, 404}, + dictWord{4, 11, 659}, + dictWord{135, 11, 675}, + dictWord{13, 11, 211}, + dictWord{14, 11, 133}, + dictWord{14, 11, 204}, + dictWord{ + 15, + 11, + 64, + }, + dictWord{15, 11, 69}, + dictWord{15, 11, 114}, + dictWord{16, 11, 10}, + dictWord{19, 11, 23}, + dictWord{19, 11, 35}, + dictWord{19, 11, 39}, + dictWord{ + 19, + 11, + 51, + }, + dictWord{19, 11, 71}, + dictWord{19, 11, 75}, + dictWord{152, 11, 15}, + dictWord{4, 10, 78}, + dictWord{5, 10, 96}, + dictWord{5, 10, 182}, + dictWord{7, 10, 1724}, + dictWord{7, 10, 1825}, + dictWord{10, 10, 394}, + dictWord{10, 10, 471}, + dictWord{11, 10, 532}, + dictWord{14, 10, 340}, + dictWord{145, 10, 88}, + dictWord{ + 135, + 10, + 1964, + }, + dictWord{133, 11, 391}, + dictWord{11, 11, 887}, + dictWord{14, 11, 365}, + dictWord{142, 11, 375}, + dictWord{5, 11, 540}, + dictWord{6, 11, 1697}, + dictWord{7, 11, 222}, + dictWord{136, 11, 341}, + dictWord{134, 11, 78}, + dictWord{9, 0, 601}, + dictWord{9, 0, 619}, + dictWord{10, 0, 505}, + dictWord{10, 0, 732}, + dictWord{11, 0, 355}, + dictWord{140, 0, 139}, + dictWord{134, 0, 292}, + dictWord{139, 0, 174}, + dictWord{5, 0, 177}, + dictWord{6, 0, 616}, + dictWord{7, 0, 827}, + dictWord{ + 9, + 0, + 525, + }, + dictWord{138, 0, 656}, + dictWord{10, 0, 31}, + dictWord{6, 10, 215}, + dictWord{7, 10, 1028}, + dictWord{7, 10, 1473}, + dictWord{7, 10, 1721}, + dictWord{ + 9, + 10, + 424, + }, + dictWord{138, 10, 779}, + dictWord{135, 10, 584}, + dictWord{136, 11, 293}, + dictWord{134, 0, 685}, + dictWord{135, 11, 1868}, + dictWord{ + 133, + 11, + 460, + }, + dictWord{7, 0, 647}, + dictWord{6, 10, 67}, + dictWord{7, 10, 1630}, + dictWord{9, 10, 354}, + dictWord{9, 10, 675}, + dictWord{10, 10, 830}, + dictWord{ + 14, + 10, + 80, + }, + dictWord{145, 10, 80}, + dictWord{4, 0, 161}, + dictWord{133, 0, 631}, + dictWord{6, 10, 141}, + dictWord{7, 10, 225}, + dictWord{9, 10, 59}, + dictWord{9, 10, 607}, + dictWord{10, 10, 312}, + dictWord{11, 10, 687}, + dictWord{12, 10, 555}, + dictWord{13, 10, 373}, + dictWord{13, 10, 494}, + dictWord{148, 10, 58}, + dictWord{ + 7, + 11, + 965, + }, + dictWord{7, 11, 1460}, + dictWord{135, 11, 1604}, + dictWord{136, 10, 783}, + dictWord{134, 11, 388}, + dictWord{6, 0, 722}, + dictWord{6, 0, 1267}, + dictWord{ + 4, + 11, + 511, + }, + dictWord{9, 11, 333}, + dictWord{9, 11, 379}, + dictWord{10, 11, 602}, + dictWord{11, 11, 441}, + dictWord{11, 11, 723}, + dictWord{11, 11, 976}, + dictWord{140, 11, 357}, + dictWord{134, 0, 1797}, + dictWord{135, 0, 1684}, + dictWord{9, 0, 469}, + dictWord{9, 0, 709}, + dictWord{12, 0, 512}, + dictWord{14, 0, 65}, + dictWord{17, 0, 12}, + dictWord{5, 11, 938}, + dictWord{136, 11, 707}, + dictWord{7, 0, 1230}, + dictWord{136, 0, 531}, + dictWord{10, 0, 229}, + dictWord{11, 0, 73}, + dictWord{ + 11, + 0, + 376, + }, + dictWord{139, 0, 433}, + dictWord{12, 0, 268}, + dictWord{12, 0, 640}, + dictWord{142, 0, 119}, + dictWord{7, 10, 430}, + dictWord{139, 10, 46}, + dictWord{ + 6, + 0, + 558, + }, + dictWord{7, 0, 651}, + dictWord{8, 0, 421}, + dictWord{9, 0, 0}, + dictWord{10, 0, 34}, + dictWord{139, 0, 1008}, + dictWord{6, 0, 106}, + dictWord{7, 0, 1786}, + dictWord{7, 0, 1821}, + dictWord{9, 0, 102}, + dictWord{9, 0, 763}, + dictWord{5, 10, 602}, + dictWord{7, 10, 2018}, + dictWord{137, 10, 418}, + dictWord{5, 0, 65}, + dictWord{ + 6, + 0, + 416, + }, + dictWord{7, 0, 1720}, + dictWord{7, 0, 1924}, + dictWord{10, 0, 109}, + dictWord{11, 0, 14}, + dictWord{11, 0, 70}, + dictWord{11, 0, 569}, + dictWord{11, 0, 735}, + dictWord{15, 0, 153}, + dictWord{20, 0, 80}, + dictWord{136, 10, 677}, + dictWord{135, 11, 1625}, + dictWord{137, 11, 772}, + dictWord{136, 0, 595}, + dictWord{ + 6, + 11, + 469, + }, + dictWord{7, 11, 1709}, + dictWord{138, 11, 515}, + dictWord{7, 0, 1832}, + dictWord{138, 0, 374}, + dictWord{9, 0, 106}, + dictWord{9, 0, 163}, + dictWord{ + 9, + 0, + 296, + }, + dictWord{10, 0, 167}, + dictWord{10, 0, 172}, + dictWord{10, 0, 777}, + dictWord{139, 0, 16}, + dictWord{6, 0, 6}, + dictWord{7, 0, 81}, + dictWord{7, 0, 771}, + dictWord{ + 7, + 0, + 1731, + }, + dictWord{9, 0, 405}, + dictWord{138, 0, 421}, + dictWord{4, 11, 500}, + dictWord{135, 11, 938}, + dictWord{5, 11, 68}, + dictWord{134, 11, 383}, + dictWord{ + 5, + 0, + 881, + }, + dictWord{133, 0, 885}, + dictWord{6, 0, 854}, + dictWord{6, 0, 1132}, + dictWord{6, 0, 1495}, + dictWord{6, 0, 1526}, + dictWord{6, 0, 1533}, + dictWord{ + 134, + 0, + 1577, + }, + dictWord{4, 11, 337}, + dictWord{6, 11, 353}, + dictWord{7, 11, 1934}, + dictWord{8, 11, 488}, + dictWord{137, 11, 429}, + dictWord{7, 11, 236}, + dictWord{ + 7, + 11, + 1795, + }, + dictWord{8, 11, 259}, + dictWord{9, 11, 135}, + dictWord{9, 11, 177}, + dictWord{10, 11, 825}, + dictWord{11, 11, 115}, + dictWord{11, 11, 370}, + dictWord{ + 11, + 11, + 405, + }, + dictWord{11, 11, 604}, + dictWord{12, 11, 10}, + dictWord{12, 11, 667}, + dictWord{12, 11, 669}, + dictWord{13, 11, 76}, + dictWord{14, 11, 310}, + dictWord{15, 11, 76}, + dictWord{15, 11, 147}, + dictWord{148, 11, 23}, + dictWord{5, 0, 142}, + dictWord{134, 0, 546}, + dictWord{4, 11, 15}, + dictWord{5, 11, 22}, + dictWord{ + 6, + 11, + 244, + }, + dictWord{7, 11, 40}, + dictWord{7, 11, 200}, + dictWord{7, 11, 906}, + dictWord{7, 11, 1199}, + dictWord{9, 11, 616}, + dictWord{10, 11, 716}, + dictWord{ + 11, + 11, + 635, + }, + dictWord{11, 11, 801}, + dictWord{140, 11, 458}, + dictWord{5, 0, 466}, + dictWord{11, 0, 571}, + dictWord{12, 0, 198}, + dictWord{13, 0, 283}, + dictWord{ + 14, + 0, + 186, + }, + dictWord{15, 0, 21}, + dictWord{15, 0, 103}, + dictWord{135, 10, 329}, + dictWord{4, 0, 185}, + dictWord{5, 0, 257}, + dictWord{5, 0, 839}, + dictWord{5, 0, 936}, + dictWord{9, 0, 399}, + dictWord{10, 0, 258}, + dictWord{10, 0, 395}, + dictWord{10, 0, 734}, + dictWord{11, 0, 1014}, + dictWord{12, 0, 23}, + dictWord{13, 0, 350}, + dictWord{ + 14, + 0, + 150, + }, + dictWord{19, 0, 6}, + dictWord{135, 11, 1735}, + dictWord{12, 11, 36}, + dictWord{141, 11, 337}, + dictWord{5, 11, 598}, + dictWord{7, 11, 791}, + dictWord{ + 8, + 11, + 108, + }, + dictWord{137, 11, 123}, + dictWord{132, 10, 469}, + dictWord{7, 0, 404}, + dictWord{7, 0, 1377}, + dictWord{7, 0, 1430}, + dictWord{7, 0, 2017}, + dictWord{ + 8, + 0, + 149, + }, + dictWord{8, 0, 239}, + dictWord{8, 0, 512}, + dictWord{8, 0, 793}, + dictWord{8, 0, 818}, + dictWord{9, 0, 474}, + dictWord{9, 0, 595}, + dictWord{10, 0, 122}, + dictWord{10, 0, 565}, + dictWord{10, 0, 649}, + dictWord{10, 0, 783}, + dictWord{11, 0, 239}, + dictWord{11, 0, 295}, + dictWord{11, 0, 447}, + dictWord{11, 0, 528}, + dictWord{ + 11, + 0, + 639, + }, + dictWord{11, 0, 800}, + dictWord{12, 0, 25}, + dictWord{12, 0, 77}, + dictWord{12, 0, 157}, + dictWord{12, 0, 256}, + dictWord{12, 0, 316}, + dictWord{12, 0, 390}, + dictWord{12, 0, 391}, + dictWord{12, 0, 395}, + dictWord{12, 0, 478}, + dictWord{12, 0, 503}, + dictWord{12, 0, 592}, + dictWord{12, 0, 680}, + dictWord{13, 0, 50}, + dictWord{13, 0, 53}, + dictWord{13, 0, 132}, + dictWord{13, 0, 198}, + dictWord{13, 0, 322}, + dictWord{13, 0, 415}, + dictWord{13, 0, 511}, + dictWord{14, 0, 71}, + dictWord{ + 14, + 0, + 395, + }, + dictWord{15, 0, 71}, + dictWord{15, 0, 136}, + dictWord{17, 0, 123}, + dictWord{18, 0, 93}, + dictWord{147, 0, 58}, + dictWord{136, 0, 712}, + dictWord{ + 134, + 10, + 1743, + }, + dictWord{5, 10, 929}, + dictWord{6, 10, 340}, + dictWord{8, 10, 376}, + dictWord{136, 10, 807}, + dictWord{6, 0, 1848}, + dictWord{8, 0, 860}, + dictWord{ + 10, + 0, + 856, + }, + dictWord{10, 0, 859}, + dictWord{10, 0, 925}, + dictWord{10, 0, 941}, + dictWord{140, 0, 762}, + dictWord{6, 0, 629}, + dictWord{6, 0, 906}, + dictWord{9, 0, 810}, + dictWord{140, 0, 652}, + dictWord{5, 10, 218}, + dictWord{7, 10, 1610}, + dictWord{138, 10, 83}, + dictWord{7, 10, 1512}, + dictWord{135, 10, 1794}, + dictWord{ + 4, + 0, + 377, + }, + dictWord{24, 0, 13}, + dictWord{4, 11, 155}, + dictWord{7, 11, 1689}, + dictWord{11, 10, 0}, + dictWord{144, 10, 78}, + dictWord{4, 11, 164}, + dictWord{5, 11, 151}, + dictWord{5, 11, 730}, + dictWord{5, 11, 741}, + dictWord{7, 11, 498}, + dictWord{7, 11, 870}, + dictWord{7, 11, 1542}, + dictWord{12, 11, 213}, + dictWord{14, 11, 36}, + dictWord{14, 11, 391}, + dictWord{17, 11, 111}, + dictWord{18, 11, 6}, + dictWord{18, 11, 46}, + dictWord{18, 11, 151}, + dictWord{19, 11, 36}, + dictWord{20, 11, 32}, + dictWord{20, 11, 56}, + dictWord{20, 11, 69}, + dictWord{20, 11, 102}, + dictWord{21, 11, 4}, + dictWord{22, 11, 8}, + dictWord{22, 11, 10}, + dictWord{22, 11, 14}, + dictWord{ + 150, + 11, + 31, + }, + dictWord{7, 0, 1842}, + dictWord{133, 10, 571}, + dictWord{4, 10, 455}, + dictWord{4, 11, 624}, + dictWord{135, 11, 1752}, + dictWord{134, 0, 1501}, + dictWord{4, 11, 492}, + dictWord{5, 11, 451}, + dictWord{6, 10, 161}, + dictWord{7, 10, 372}, + dictWord{137, 10, 597}, + dictWord{132, 10, 349}, + dictWord{4, 0, 180}, + dictWord{135, 0, 1906}, + dictWord{135, 11, 835}, + dictWord{141, 11, 70}, + dictWord{132, 0, 491}, + dictWord{137, 10, 751}, + dictWord{6, 10, 432}, + dictWord{ + 139, + 10, + 322, + }, + dictWord{4, 0, 171}, + dictWord{138, 0, 234}, + dictWord{6, 11, 113}, + dictWord{135, 11, 436}, + dictWord{4, 0, 586}, + dictWord{7, 0, 1186}, + dictWord{ + 138, + 0, + 631, + }, + dictWord{5, 10, 468}, + dictWord{10, 10, 325}, + dictWord{11, 10, 856}, + dictWord{12, 10, 345}, + dictWord{143, 10, 104}, + dictWord{5, 10, 223}, + dictWord{10, 11, 592}, + dictWord{10, 11, 753}, + dictWord{12, 11, 317}, + dictWord{12, 11, 355}, + dictWord{12, 11, 465}, + dictWord{12, 11, 469}, + dictWord{ + 12, + 11, + 560, + }, + dictWord{12, 11, 578}, + dictWord{141, 11, 243}, + dictWord{132, 10, 566}, + dictWord{135, 11, 520}, + dictWord{4, 10, 59}, + dictWord{135, 10, 1394}, + dictWord{6, 10, 436}, + dictWord{139, 10, 481}, + dictWord{9, 0, 931}, + dictWord{10, 0, 334}, + dictWord{20, 0, 71}, + dictWord{4, 10, 48}, + dictWord{5, 10, 271}, + dictWord{ + 7, + 10, + 953, + }, + dictWord{135, 11, 1878}, + dictWord{11, 0, 170}, + dictWord{5, 10, 610}, + dictWord{136, 10, 457}, + dictWord{133, 10, 755}, + dictWord{6, 0, 1587}, + dictWord{135, 10, 1217}, + dictWord{4, 10, 197}, + dictWord{149, 11, 26}, + dictWord{133, 11, 585}, + dictWord{137, 11, 521}, + dictWord{133, 0, 765}, + dictWord{ + 133, + 10, + 217, + }, + dictWord{139, 11, 586}, + dictWord{133, 0, 424}, + dictWord{9, 11, 752}, + dictWord{12, 11, 610}, + dictWord{13, 11, 431}, + dictWord{16, 11, 59}, + dictWord{146, 11, 109}, + dictWord{136, 0, 714}, + dictWord{7, 0, 685}, + dictWord{132, 11, 307}, + dictWord{9, 0, 420}, + dictWord{10, 0, 269}, + dictWord{10, 0, 285}, + dictWord{10, 0, 576}, + dictWord{11, 0, 397}, + dictWord{13, 0, 175}, + dictWord{145, 0, 90}, + dictWord{132, 0, 429}, + dictWord{133, 11, 964}, + dictWord{9, 11, 463}, + dictWord{138, 11, 595}, + dictWord{7, 0, 18}, + dictWord{7, 0, 699}, + dictWord{7, 0, 1966}, + dictWord{8, 0, 752}, + dictWord{9, 0, 273}, + dictWord{9, 0, 412}, + dictWord{ + 9, + 0, + 703, + }, + dictWord{10, 0, 71}, + dictWord{10, 0, 427}, + dictWord{138, 0, 508}, + dictWord{4, 10, 165}, + dictWord{7, 10, 1398}, + dictWord{135, 10, 1829}, + dictWord{ + 4, + 0, + 53, + }, + dictWord{5, 0, 186}, + dictWord{7, 0, 752}, + dictWord{7, 0, 828}, + dictWord{142, 0, 116}, + dictWord{8, 0, 575}, + dictWord{10, 0, 289}, + dictWord{139, 0, 319}, + dictWord{132, 0, 675}, + dictWord{134, 0, 1424}, + dictWord{4, 11, 75}, + dictWord{5, 11, 180}, + dictWord{6, 11, 500}, + dictWord{7, 11, 58}, + dictWord{7, 11, 710}, + dictWord{138, 11, 645}, + dictWord{133, 11, 649}, + dictWord{6, 11, 276}, + dictWord{7, 11, 282}, + dictWord{7, 11, 879}, + dictWord{7, 11, 924}, + dictWord{8, 11, 459}, + dictWord{9, 11, 599}, + dictWord{9, 11, 754}, + dictWord{11, 11, 574}, + dictWord{12, 11, 128}, + dictWord{12, 11, 494}, + dictWord{13, 11, 52}, + dictWord{13, 11, 301}, + dictWord{15, 11, 30}, + dictWord{143, 11, 132}, + dictWord{6, 0, 647}, + dictWord{134, 0, 1095}, + dictWord{5, 10, 9}, + dictWord{7, 10, 297}, + dictWord{7, 10, 966}, + dictWord{140, 10, 306}, + dictWord{132, 11, 200}, + dictWord{134, 0, 1334}, + dictWord{5, 10, 146}, + dictWord{6, 10, 411}, + dictWord{138, 10, 721}, + dictWord{ + 6, + 0, + 209, + }, + dictWord{6, 0, 1141}, + dictWord{6, 0, 1288}, + dictWord{8, 0, 468}, + dictWord{9, 0, 210}, + dictWord{11, 0, 36}, + dictWord{12, 0, 28}, + dictWord{12, 0, 630}, + dictWord{13, 0, 21}, + dictWord{13, 0, 349}, + dictWord{14, 0, 7}, + dictWord{145, 0, 13}, + dictWord{6, 10, 177}, + dictWord{135, 10, 467}, + dictWord{4, 0, 342}, + dictWord{ + 135, + 0, + 1179, + }, + dictWord{10, 11, 454}, + dictWord{140, 11, 324}, + dictWord{4, 0, 928}, + dictWord{133, 0, 910}, + dictWord{7, 0, 1838}, + dictWord{6, 11, 225}, + dictWord{ + 137, + 11, + 211, + }, + dictWord{16, 0, 101}, + dictWord{20, 0, 115}, + dictWord{20, 0, 118}, + dictWord{148, 0, 122}, + dictWord{4, 0, 496}, + dictWord{135, 0, 856}, + dictWord{ + 4, + 0, + 318, + }, + dictWord{11, 0, 654}, + dictWord{7, 11, 718}, + dictWord{139, 11, 102}, + dictWord{8, 11, 58}, + dictWord{9, 11, 724}, + dictWord{11, 11, 809}, + dictWord{ + 13, + 11, + 113, + }, + dictWord{145, 11, 72}, + dictWord{5, 10, 200}, + dictWord{6, 11, 345}, + dictWord{135, 11, 1247}, + dictWord{8, 11, 767}, + dictWord{8, 11, 803}, + dictWord{ + 9, + 11, + 301, + }, + dictWord{137, 11, 903}, + dictWord{7, 0, 915}, + dictWord{8, 0, 247}, + dictWord{19, 0, 0}, + dictWord{7, 11, 1949}, + dictWord{136, 11, 674}, + dictWord{ + 4, + 0, + 202, + }, + dictWord{5, 0, 382}, + dictWord{6, 0, 454}, + dictWord{7, 0, 936}, + dictWord{7, 0, 1803}, + dictWord{8, 0, 758}, + dictWord{9, 0, 375}, + dictWord{9, 0, 895}, + dictWord{ + 10, + 0, + 743, + }, + dictWord{10, 0, 792}, + dictWord{11, 0, 978}, + dictWord{11, 0, 1012}, + dictWord{142, 0, 109}, + dictWord{7, 0, 1150}, + dictWord{7, 0, 1425}, + dictWord{ + 7, + 0, + 1453, + }, + dictWord{140, 0, 513}, + dictWord{134, 11, 259}, + dictWord{138, 0, 791}, + dictWord{11, 0, 821}, + dictWord{12, 0, 110}, + dictWord{12, 0, 153}, + dictWord{ + 18, + 0, + 41, + }, + dictWord{150, 0, 19}, + dictWord{134, 10, 481}, + dictWord{132, 0, 796}, + dictWord{6, 0, 445}, + dictWord{9, 0, 909}, + dictWord{136, 11, 254}, + dictWord{ + 10, + 0, + 776, + }, + dictWord{13, 0, 345}, + dictWord{142, 0, 425}, + dictWord{4, 10, 84}, + dictWord{7, 10, 1482}, + dictWord{10, 10, 76}, + dictWord{138, 10, 142}, + dictWord{ + 135, + 11, + 742, + }, + dictWord{6, 0, 578}, + dictWord{133, 10, 1015}, + dictWord{6, 0, 1387}, + dictWord{4, 10, 315}, + dictWord{5, 10, 507}, + dictWord{135, 10, 1370}, + dictWord{4, 0, 438}, + dictWord{133, 0, 555}, + dictWord{136, 0, 766}, + dictWord{133, 11, 248}, + dictWord{134, 10, 1722}, + dictWord{4, 11, 116}, + dictWord{5, 11, 95}, + dictWord{5, 11, 445}, + dictWord{7, 11, 1688}, + dictWord{8, 11, 29}, + dictWord{9, 11, 272}, + dictWord{11, 11, 509}, + dictWord{139, 11, 915}, + dictWord{135, 0, 541}, + dictWord{133, 11, 543}, + dictWord{8, 10, 222}, + dictWord{8, 10, 476}, + dictWord{9, 10, 238}, + dictWord{11, 10, 516}, + dictWord{11, 10, 575}, + dictWord{ + 15, + 10, + 109, + }, + dictWord{146, 10, 100}, + dictWord{6, 0, 880}, + dictWord{134, 0, 1191}, + dictWord{5, 11, 181}, + dictWord{136, 11, 41}, + dictWord{134, 0, 1506}, + dictWord{132, 11, 681}, + dictWord{7, 11, 25}, + dictWord{8, 11, 202}, + dictWord{138, 11, 536}, + dictWord{139, 0, 983}, + dictWord{137, 0, 768}, + dictWord{132, 0, 584}, + dictWord{9, 11, 423}, + dictWord{140, 11, 89}, + dictWord{8, 11, 113}, + dictWord{9, 11, 877}, + dictWord{10, 11, 554}, + dictWord{11, 11, 83}, + dictWord{12, 11, 136}, + dictWord{147, 11, 109}, + dictWord{7, 10, 706}, + dictWord{7, 10, 1058}, + dictWord{138, 10, 538}, + dictWord{133, 11, 976}, + dictWord{4, 11, 206}, + dictWord{ + 135, + 11, + 746, + }, + dictWord{136, 11, 526}, + dictWord{140, 0, 737}, + dictWord{11, 10, 92}, + dictWord{11, 10, 196}, + dictWord{11, 10, 409}, + dictWord{11, 10, 450}, + dictWord{11, 10, 666}, + dictWord{11, 10, 777}, + dictWord{12, 10, 262}, + dictWord{13, 10, 385}, + dictWord{13, 10, 393}, + dictWord{15, 10, 115}, + dictWord{ + 16, + 10, + 45, + }, + dictWord{145, 10, 82}, + dictWord{4, 0, 226}, + dictWord{4, 0, 326}, + dictWord{7, 0, 1770}, + dictWord{4, 11, 319}, + dictWord{5, 11, 699}, + dictWord{138, 11, 673}, + dictWord{6, 10, 40}, + dictWord{135, 10, 1781}, + dictWord{5, 0, 426}, + dictWord{8, 0, 30}, + dictWord{9, 0, 2}, + dictWord{11, 0, 549}, + dictWord{147, 0, 122}, + dictWord{ + 6, + 0, + 1161, + }, + dictWord{134, 0, 1329}, + dictWord{138, 10, 97}, + dictWord{6, 10, 423}, + dictWord{7, 10, 665}, + dictWord{135, 10, 1210}, + dictWord{7, 11, 13}, + dictWord{ + 8, + 11, + 226, + }, + dictWord{10, 11, 537}, + dictWord{11, 11, 570}, + dictWord{11, 11, 605}, + dictWord{11, 11, 799}, + dictWord{11, 11, 804}, + dictWord{12, 11, 85}, + dictWord{12, 11, 516}, + dictWord{12, 11, 623}, + dictWord{13, 11, 112}, + dictWord{13, 11, 361}, + dictWord{14, 11, 77}, + dictWord{14, 11, 78}, + dictWord{17, 11, 28}, + dictWord{147, 11, 110}, + dictWord{132, 11, 769}, + dictWord{132, 11, 551}, + dictWord{132, 11, 728}, + dictWord{147, 0, 117}, + dictWord{9, 11, 57}, + dictWord{ + 9, + 11, + 459, + }, + dictWord{10, 11, 425}, + dictWord{11, 11, 119}, + dictWord{12, 11, 184}, + dictWord{12, 11, 371}, + dictWord{13, 11, 358}, + dictWord{145, 11, 51}, + dictWord{ + 5, + 11, + 188, + }, + dictWord{5, 11, 814}, + dictWord{8, 11, 10}, + dictWord{9, 11, 421}, + dictWord{9, 11, 729}, + dictWord{10, 11, 609}, + dictWord{139, 11, 689}, + dictWord{134, 11, 624}, + dictWord{135, 11, 298}, + dictWord{135, 0, 462}, + dictWord{4, 0, 345}, + dictWord{139, 10, 624}, + dictWord{136, 10, 574}, + dictWord{ + 4, + 0, + 385, + }, + dictWord{7, 0, 265}, + dictWord{135, 0, 587}, + dictWord{6, 0, 808}, + dictWord{132, 11, 528}, + dictWord{133, 0, 398}, + dictWord{132, 10, 354}, + dictWord{ + 4, + 0, + 347, + }, + dictWord{5, 0, 423}, + dictWord{5, 0, 996}, + dictWord{135, 0, 1329}, + dictWord{135, 10, 1558}, + dictWord{7, 0, 1259}, + dictWord{9, 0, 125}, + dictWord{ + 139, + 0, + 65, + }, + dictWord{5, 0, 136}, + dictWord{6, 0, 136}, + dictWord{136, 0, 644}, + dictWord{5, 11, 104}, + dictWord{6, 11, 173}, + dictWord{135, 11, 1631}, + dictWord{ + 135, + 0, + 469, + }, + dictWord{133, 10, 830}, + dictWord{4, 0, 278}, + dictWord{5, 0, 465}, + dictWord{135, 0, 1367}, + dictWord{7, 11, 810}, + dictWord{8, 11, 138}, + dictWord{ + 8, + 11, + 342, + }, + dictWord{9, 11, 84}, + dictWord{10, 11, 193}, + dictWord{11, 11, 883}, + dictWord{140, 11, 359}, + dictWord{5, 10, 496}, + dictWord{135, 10, 203}, + dictWord{ + 4, + 0, + 433, + }, + dictWord{133, 0, 719}, + dictWord{6, 11, 95}, + dictWord{134, 10, 547}, + dictWord{5, 10, 88}, + dictWord{137, 10, 239}, + dictWord{6, 11, 406}, + dictWord{ + 10, + 11, + 409, + }, + dictWord{10, 11, 447}, + dictWord{11, 11, 44}, + dictWord{140, 11, 100}, + dictWord{134, 0, 1423}, + dictWord{7, 10, 650}, + dictWord{135, 10, 1310}, + dictWord{134, 0, 749}, + dictWord{135, 11, 1243}, + dictWord{135, 0, 1363}, + dictWord{6, 0, 381}, + dictWord{7, 0, 645}, + dictWord{7, 0, 694}, + dictWord{8, 0, 546}, + dictWord{7, 10, 1076}, + dictWord{9, 10, 80}, + dictWord{11, 10, 78}, + dictWord{11, 10, 421}, + dictWord{11, 10, 534}, + dictWord{140, 10, 545}, + dictWord{ + 134, + 11, + 1636, + }, + dictWord{135, 11, 1344}, + dictWord{12, 0, 277}, + dictWord{7, 10, 274}, + dictWord{11, 10, 479}, + dictWord{139, 10, 507}, + dictWord{6, 0, 705}, + dictWord{ + 6, + 0, + 783, + }, + dictWord{6, 0, 1275}, + dictWord{6, 0, 1481}, + dictWord{4, 11, 282}, + dictWord{7, 11, 1034}, + dictWord{11, 11, 398}, + dictWord{11, 11, 634}, + dictWord{ + 12, + 11, + 1, + }, + dictWord{12, 11, 79}, + dictWord{12, 11, 544}, + dictWord{14, 11, 237}, + dictWord{17, 11, 10}, + dictWord{146, 11, 20}, + dictWord{134, 0, 453}, + dictWord{ + 4, + 0, + 555, + }, + dictWord{8, 0, 536}, + dictWord{10, 0, 288}, + dictWord{11, 0, 1005}, + dictWord{4, 10, 497}, + dictWord{135, 10, 1584}, + dictWord{5, 11, 118}, + dictWord{ + 5, + 11, + 499, + }, + dictWord{6, 11, 476}, + dictWord{7, 11, 600}, + dictWord{7, 11, 888}, + dictWord{135, 11, 1096}, + dictWord{138, 0, 987}, + dictWord{7, 0, 1107}, + dictWord{ + 7, + 10, + 261, + }, + dictWord{7, 10, 1115}, + dictWord{7, 10, 1354}, + dictWord{7, 10, 1588}, + dictWord{7, 10, 1705}, + dictWord{7, 10, 1902}, + dictWord{9, 10, 465}, + dictWord{10, 10, 248}, + dictWord{10, 10, 349}, + dictWord{10, 10, 647}, + dictWord{11, 10, 527}, + dictWord{11, 10, 660}, + dictWord{11, 10, 669}, + dictWord{ + 12, + 10, + 529, + }, + dictWord{141, 10, 305}, + dictWord{7, 11, 296}, + dictWord{7, 11, 596}, + dictWord{8, 11, 560}, + dictWord{8, 11, 586}, + dictWord{9, 11, 612}, + dictWord{ + 11, + 11, + 100, + }, + dictWord{11, 11, 304}, + dictWord{12, 11, 46}, + dictWord{13, 11, 89}, + dictWord{14, 11, 112}, + dictWord{145, 11, 122}, + dictWord{9, 0, 370}, + dictWord{ + 138, + 0, + 90, + }, + dictWord{136, 10, 13}, + dictWord{132, 0, 860}, + dictWord{7, 10, 642}, + dictWord{8, 10, 250}, + dictWord{11, 10, 123}, + dictWord{11, 10, 137}, + dictWord{ + 13, + 10, + 48, + }, + dictWord{142, 10, 95}, + dictWord{135, 10, 1429}, + dictWord{137, 11, 321}, + dictWord{132, 0, 257}, + dictWord{135, 0, 2031}, + dictWord{7, 0, 1768}, + dictWord{7, 11, 1599}, + dictWord{7, 11, 1723}, + dictWord{8, 11, 79}, + dictWord{8, 11, 106}, + dictWord{8, 11, 190}, + dictWord{8, 11, 302}, + dictWord{8, 11, 383}, + dictWord{9, 11, 119}, + dictWord{9, 11, 233}, + dictWord{9, 11, 298}, + dictWord{9, 11, 419}, + dictWord{9, 11, 471}, + dictWord{10, 11, 181}, + dictWord{10, 11, 406}, + dictWord{11, 11, 57}, + dictWord{11, 11, 85}, + dictWord{11, 11, 120}, + dictWord{11, 11, 177}, + dictWord{11, 11, 296}, + dictWord{11, 11, 382}, + dictWord{11, 11, 454}, + dictWord{11, 11, 758}, + dictWord{11, 11, 999}, + dictWord{12, 11, 27}, + dictWord{12, 11, 98}, + dictWord{12, 11, 131}, + dictWord{12, 11, 245}, + dictWord{ + 12, + 11, + 312, + }, + dictWord{12, 11, 446}, + dictWord{12, 11, 454}, + dictWord{13, 11, 25}, + dictWord{13, 11, 98}, + dictWord{13, 11, 426}, + dictWord{13, 11, 508}, + dictWord{ + 14, + 11, + 6, + }, + dictWord{14, 11, 163}, + dictWord{14, 11, 272}, + dictWord{14, 11, 277}, + dictWord{14, 11, 370}, + dictWord{15, 11, 95}, + dictWord{15, 11, 138}, + dictWord{ + 15, + 11, + 167, + }, + dictWord{17, 11, 18}, + dictWord{17, 11, 38}, + dictWord{20, 11, 96}, + dictWord{149, 11, 32}, + dictWord{5, 11, 722}, + dictWord{134, 11, 1759}, + dictWord{145, 11, 16}, + dictWord{6, 0, 1071}, + dictWord{134, 0, 1561}, + dictWord{10, 10, 545}, + dictWord{140, 10, 301}, + dictWord{6, 0, 83}, + dictWord{6, 0, 1733}, + dictWord{135, 0, 1389}, + dictWord{4, 0, 835}, + dictWord{135, 0, 1818}, + dictWord{133, 11, 258}, + dictWord{4, 10, 904}, + dictWord{133, 10, 794}, + dictWord{ + 134, + 0, + 2006, + }, + dictWord{5, 11, 30}, + dictWord{7, 11, 495}, + dictWord{8, 11, 134}, + dictWord{9, 11, 788}, + dictWord{140, 11, 438}, + dictWord{135, 11, 2004}, + dictWord{ + 137, + 0, + 696, + }, + dictWord{5, 11, 50}, + dictWord{6, 11, 439}, + dictWord{7, 11, 780}, + dictWord{135, 11, 1040}, + dictWord{7, 11, 772}, + dictWord{7, 11, 1104}, + dictWord{ + 7, + 11, + 1647, + }, + dictWord{11, 11, 269}, + dictWord{11, 11, 539}, + dictWord{11, 11, 607}, + dictWord{11, 11, 627}, + dictWord{11, 11, 706}, + dictWord{11, 11, 975}, + dictWord{12, 11, 248}, + dictWord{12, 11, 311}, + dictWord{12, 11, 434}, + dictWord{12, 11, 600}, + dictWord{12, 11, 622}, + dictWord{13, 11, 297}, + dictWord{ + 13, + 11, + 367, + }, + dictWord{13, 11, 485}, + dictWord{14, 11, 69}, + dictWord{14, 11, 409}, + dictWord{143, 11, 108}, + dictWord{5, 11, 1}, + dictWord{6, 11, 81}, + dictWord{ + 138, + 11, + 520, + }, + dictWord{7, 0, 1718}, + dictWord{9, 0, 95}, + dictWord{9, 0, 274}, + dictWord{10, 0, 279}, + dictWord{10, 0, 317}, + dictWord{10, 0, 420}, + dictWord{11, 0, 303}, + dictWord{11, 0, 808}, + dictWord{12, 0, 134}, + dictWord{12, 0, 367}, + dictWord{13, 0, 149}, + dictWord{13, 0, 347}, + dictWord{14, 0, 349}, + dictWord{14, 0, 406}, + dictWord{ + 18, + 0, + 22, + }, + dictWord{18, 0, 89}, + dictWord{18, 0, 122}, + dictWord{147, 0, 47}, + dictWord{5, 11, 482}, + dictWord{8, 11, 98}, + dictWord{9, 11, 172}, + dictWord{10, 11, 222}, + dictWord{10, 11, 700}, + dictWord{10, 11, 822}, + dictWord{11, 11, 302}, + dictWord{11, 11, 778}, + dictWord{12, 11, 50}, + dictWord{12, 11, 127}, + dictWord{ + 12, + 11, + 396, + }, + dictWord{13, 11, 62}, + dictWord{13, 11, 328}, + dictWord{14, 11, 122}, + dictWord{147, 11, 72}, + dictWord{7, 10, 386}, + dictWord{138, 10, 713}, + dictWord{ + 6, + 10, + 7, + }, + dictWord{6, 10, 35}, + dictWord{7, 10, 147}, + dictWord{7, 10, 1069}, + dictWord{7, 10, 1568}, + dictWord{7, 10, 1575}, + dictWord{7, 10, 1917}, + dictWord{ + 8, + 10, + 43, + }, + dictWord{8, 10, 208}, + dictWord{9, 10, 128}, + dictWord{9, 10, 866}, + dictWord{10, 10, 20}, + dictWord{11, 10, 981}, + dictWord{147, 10, 33}, + dictWord{ + 133, + 0, + 26, + }, + dictWord{132, 0, 550}, + dictWord{5, 11, 2}, + dictWord{7, 11, 1494}, + dictWord{136, 11, 589}, + dictWord{6, 11, 512}, + dictWord{7, 11, 797}, + dictWord{ + 8, + 11, + 253, + }, + dictWord{9, 11, 77}, + dictWord{10, 11, 1}, + dictWord{10, 11, 129}, + dictWord{10, 11, 225}, + dictWord{11, 11, 118}, + dictWord{11, 11, 226}, + dictWord{ + 11, + 11, + 251, + }, + dictWord{11, 11, 430}, + dictWord{11, 11, 701}, + dictWord{11, 11, 974}, + dictWord{11, 11, 982}, + dictWord{12, 11, 64}, + dictWord{12, 11, 260}, + dictWord{ + 12, + 11, + 488, + }, + dictWord{140, 11, 690}, + dictWord{7, 10, 893}, + dictWord{141, 10, 424}, + dictWord{134, 0, 901}, + dictWord{136, 0, 822}, + dictWord{4, 0, 902}, + dictWord{5, 0, 809}, + dictWord{134, 0, 122}, + dictWord{6, 0, 807}, + dictWord{134, 0, 1366}, + dictWord{7, 0, 262}, + dictWord{5, 11, 748}, + dictWord{134, 11, 553}, + dictWord{133, 0, 620}, + dictWord{4, 0, 34}, + dictWord{5, 0, 574}, + dictWord{7, 0, 279}, + dictWord{7, 0, 1624}, + dictWord{136, 0, 601}, + dictWord{9, 0, 170}, + dictWord{ + 6, + 10, + 322, + }, + dictWord{9, 10, 552}, + dictWord{11, 10, 274}, + dictWord{13, 10, 209}, + dictWord{13, 10, 499}, + dictWord{14, 10, 85}, + dictWord{15, 10, 126}, + dictWord{ + 145, + 10, + 70, + }, + dictWord{132, 0, 537}, + dictWord{4, 11, 12}, + dictWord{7, 11, 420}, + dictWord{7, 11, 522}, + dictWord{7, 11, 809}, + dictWord{8, 11, 797}, + dictWord{ + 141, + 11, + 88, + }, + dictWord{133, 0, 332}, + dictWord{8, 10, 83}, + dictWord{8, 10, 742}, + dictWord{8, 10, 817}, + dictWord{9, 10, 28}, + dictWord{9, 10, 29}, + dictWord{9, 10, 885}, + dictWord{10, 10, 387}, + dictWord{11, 10, 633}, + dictWord{11, 10, 740}, + dictWord{13, 10, 235}, + dictWord{13, 10, 254}, + dictWord{15, 10, 143}, + dictWord{ + 143, + 10, + 146, + }, + dictWord{6, 0, 1909}, + dictWord{9, 0, 964}, + dictWord{12, 0, 822}, + dictWord{12, 0, 854}, + dictWord{12, 0, 865}, + dictWord{12, 0, 910}, + dictWord{12, 0, 938}, + dictWord{15, 0, 169}, + dictWord{15, 0, 208}, + dictWord{15, 0, 211}, + dictWord{18, 0, 205}, + dictWord{18, 0, 206}, + dictWord{18, 0, 220}, + dictWord{18, 0, 223}, + dictWord{152, 0, 24}, + dictWord{140, 10, 49}, + dictWord{5, 11, 528}, + dictWord{135, 11, 1580}, + dictWord{6, 0, 261}, + dictWord{8, 0, 182}, + dictWord{139, 0, 943}, + dictWord{134, 0, 1721}, + dictWord{4, 0, 933}, + dictWord{133, 0, 880}, + dictWord{136, 11, 321}, + dictWord{5, 11, 266}, + dictWord{9, 11, 290}, + dictWord{9, 11, 364}, + dictWord{10, 11, 293}, + dictWord{11, 11, 606}, + dictWord{142, 11, 45}, + dictWord{6, 0, 1609}, + dictWord{4, 11, 50}, + dictWord{6, 11, 510}, + dictWord{6, 11, 594}, + dictWord{9, 11, 121}, + dictWord{10, 11, 49}, + dictWord{10, 11, 412}, + dictWord{139, 11, 834}, + dictWord{7, 0, 895}, + dictWord{136, 11, 748}, + dictWord{132, 11, 466}, + dictWord{4, 10, 110}, + dictWord{10, 10, 415}, + dictWord{10, 10, 597}, + dictWord{142, 10, 206}, + dictWord{133, 0, 812}, + dictWord{135, 11, 281}, + dictWord{ + 6, + 0, + 1890, + }, + dictWord{6, 0, 1902}, + dictWord{6, 0, 1916}, + dictWord{9, 0, 929}, + dictWord{9, 0, 942}, + dictWord{9, 0, 975}, + dictWord{9, 0, 984}, + dictWord{9, 0, 986}, + dictWord{ + 9, + 0, + 1011, + }, + dictWord{9, 0, 1019}, + dictWord{12, 0, 804}, + dictWord{12, 0, 851}, + dictWord{12, 0, 867}, + dictWord{12, 0, 916}, + dictWord{12, 0, 923}, + dictWord{ + 15, + 0, + 194, + }, + dictWord{15, 0, 204}, + dictWord{15, 0, 210}, + dictWord{15, 0, 222}, + dictWord{15, 0, 223}, + dictWord{15, 0, 229}, + dictWord{15, 0, 250}, + dictWord{ + 18, + 0, + 179, + }, + dictWord{18, 0, 186}, + dictWord{18, 0, 192}, + dictWord{7, 10, 205}, + dictWord{135, 10, 2000}, + dictWord{132, 11, 667}, + dictWord{135, 0, 778}, + dictWord{ + 4, + 0, + 137, + }, + dictWord{7, 0, 1178}, + dictWord{135, 0, 1520}, + dictWord{134, 0, 1314}, + dictWord{4, 11, 242}, + dictWord{134, 11, 333}, + dictWord{6, 0, 1661}, + dictWord{7, 0, 1975}, + dictWord{7, 0, 2009}, + dictWord{135, 0, 2011}, + dictWord{134, 0, 1591}, + dictWord{4, 10, 283}, + dictWord{135, 10, 1194}, + dictWord{ + 11, + 0, + 820, + }, + dictWord{150, 0, 51}, + dictWord{4, 11, 39}, + dictWord{5, 11, 36}, + dictWord{7, 11, 1843}, + dictWord{8, 11, 407}, + dictWord{11, 11, 144}, + dictWord{ + 140, + 11, + 523, + }, + dictWord{134, 10, 1720}, + dictWord{4, 11, 510}, + dictWord{7, 11, 29}, + dictWord{7, 11, 66}, + dictWord{7, 11, 1980}, + dictWord{10, 11, 487}, + dictWord{ + 10, + 11, + 809, + }, + dictWord{146, 11, 9}, + dictWord{5, 0, 89}, + dictWord{7, 0, 1915}, + dictWord{9, 0, 185}, + dictWord{9, 0, 235}, + dictWord{10, 0, 64}, + dictWord{10, 0, 270}, + dictWord{10, 0, 403}, + dictWord{10, 0, 469}, + dictWord{10, 0, 529}, + dictWord{10, 0, 590}, + dictWord{11, 0, 140}, + dictWord{11, 0, 860}, + dictWord{13, 0, 1}, + dictWord{ + 13, + 0, + 422, + }, + dictWord{14, 0, 341}, + dictWord{14, 0, 364}, + dictWord{17, 0, 93}, + dictWord{18, 0, 113}, + dictWord{19, 0, 97}, + dictWord{147, 0, 113}, + dictWord{133, 0, 695}, + dictWord{6, 0, 987}, + dictWord{134, 0, 1160}, + dictWord{5, 0, 6}, + dictWord{6, 0, 183}, + dictWord{7, 0, 680}, + dictWord{7, 0, 978}, + dictWord{7, 0, 1013}, + dictWord{ + 7, + 0, + 1055, + }, + dictWord{12, 0, 230}, + dictWord{13, 0, 172}, + dictWord{146, 0, 29}, + dictWord{134, 11, 570}, + dictWord{132, 11, 787}, + dictWord{134, 11, 518}, + dictWord{ + 6, + 0, + 29, + }, + dictWord{139, 0, 63}, + dictWord{132, 11, 516}, + dictWord{136, 11, 821}, + dictWord{132, 0, 311}, + dictWord{134, 0, 1740}, + dictWord{7, 0, 170}, + dictWord{8, 0, 90}, + dictWord{8, 0, 177}, + dictWord{8, 0, 415}, + dictWord{11, 0, 714}, + dictWord{14, 0, 281}, + dictWord{136, 10, 735}, + dictWord{134, 0, 1961}, + dictWord{ + 135, + 11, + 1405, + }, + dictWord{4, 11, 10}, + dictWord{7, 11, 917}, + dictWord{139, 11, 786}, + dictWord{5, 10, 132}, + dictWord{9, 10, 486}, + dictWord{9, 10, 715}, + dictWord{ + 10, + 10, + 458, + }, + dictWord{11, 10, 373}, + dictWord{11, 10, 668}, + dictWord{11, 10, 795}, + dictWord{11, 10, 897}, + dictWord{12, 10, 272}, + dictWord{12, 10, 424}, + dictWord{12, 10, 539}, + dictWord{12, 10, 558}, + dictWord{14, 10, 245}, + dictWord{14, 10, 263}, + dictWord{14, 10, 264}, + dictWord{14, 10, 393}, + dictWord{ + 142, + 10, + 403, + }, + dictWord{11, 0, 91}, + dictWord{13, 0, 129}, + dictWord{15, 0, 101}, + dictWord{145, 0, 125}, + dictWord{135, 0, 1132}, + dictWord{4, 0, 494}, + dictWord{6, 0, 74}, + dictWord{7, 0, 44}, + dictWord{7, 0, 407}, + dictWord{12, 0, 17}, + dictWord{15, 0, 5}, + dictWord{148, 0, 11}, + dictWord{133, 10, 379}, + dictWord{5, 0, 270}, + dictWord{ + 5, + 11, + 684, + }, + dictWord{6, 10, 89}, + dictWord{6, 10, 400}, + dictWord{7, 10, 1569}, + dictWord{7, 10, 1623}, + dictWord{7, 10, 1850}, + dictWord{8, 10, 218}, + dictWord{ + 8, + 10, + 422, + }, + dictWord{9, 10, 570}, + dictWord{138, 10, 626}, + dictWord{4, 0, 276}, + dictWord{133, 0, 296}, + dictWord{6, 0, 1523}, + dictWord{134, 11, 27}, + dictWord{ + 6, + 10, + 387, + }, + dictWord{7, 10, 882}, + dictWord{141, 10, 111}, + dictWord{6, 10, 224}, + dictWord{7, 10, 877}, + dictWord{137, 10, 647}, + dictWord{135, 10, 790}, + dictWord{ + 4, + 0, + 7, + }, + dictWord{5, 0, 90}, + dictWord{5, 0, 158}, + dictWord{6, 0, 542}, + dictWord{7, 0, 221}, + dictWord{7, 0, 1574}, + dictWord{9, 0, 490}, + dictWord{10, 0, 540}, + dictWord{ + 11, + 0, + 443, + }, + dictWord{139, 0, 757}, + dictWord{7, 0, 588}, + dictWord{9, 0, 175}, + dictWord{138, 0, 530}, + dictWord{135, 10, 394}, + dictWord{142, 11, 23}, + dictWord{ + 134, + 0, + 786, + }, + dictWord{135, 0, 580}, + dictWord{7, 0, 88}, + dictWord{136, 0, 627}, + dictWord{5, 0, 872}, + dictWord{6, 0, 57}, + dictWord{7, 0, 471}, + dictWord{9, 0, 447}, + dictWord{137, 0, 454}, + dictWord{6, 11, 342}, + dictWord{6, 11, 496}, + dictWord{8, 11, 275}, + dictWord{137, 11, 206}, + dictWord{4, 11, 909}, + dictWord{133, 11, 940}, + dictWord{6, 0, 735}, + dictWord{132, 11, 891}, + dictWord{8, 0, 845}, + dictWord{8, 0, 916}, + dictWord{135, 10, 1409}, + dictWord{5, 0, 31}, + dictWord{134, 0, 614}, + dictWord{11, 0, 458}, + dictWord{12, 0, 15}, + dictWord{140, 0, 432}, + dictWord{8, 0, 330}, + dictWord{140, 0, 477}, + dictWord{4, 0, 530}, + dictWord{5, 0, 521}, + dictWord{ + 7, + 0, + 1200, + }, + dictWord{10, 0, 460}, + dictWord{132, 11, 687}, + dictWord{6, 0, 424}, + dictWord{135, 0, 1866}, + dictWord{9, 0, 569}, + dictWord{12, 0, 12}, + dictWord{ + 12, + 0, + 81, + }, + dictWord{12, 0, 319}, + dictWord{13, 0, 69}, + dictWord{14, 0, 259}, + dictWord{16, 0, 87}, + dictWord{17, 0, 1}, + dictWord{17, 0, 21}, + dictWord{17, 0, 24}, + dictWord{ + 18, + 0, + 15, + }, + dictWord{18, 0, 56}, + dictWord{18, 0, 59}, + dictWord{18, 0, 127}, + dictWord{18, 0, 154}, + dictWord{19, 0, 19}, + dictWord{148, 0, 31}, + dictWord{7, 0, 1302}, + dictWord{136, 10, 38}, + dictWord{134, 11, 253}, + dictWord{5, 10, 261}, + dictWord{7, 10, 78}, + dictWord{7, 10, 199}, + dictWord{8, 10, 815}, + dictWord{9, 10, 126}, + dictWord{138, 10, 342}, + dictWord{5, 0, 595}, + dictWord{135, 0, 1863}, + dictWord{6, 11, 41}, + dictWord{141, 11, 160}, + dictWord{5, 0, 13}, + dictWord{134, 0, 142}, + dictWord{6, 0, 97}, + dictWord{7, 0, 116}, + dictWord{8, 0, 322}, + dictWord{8, 0, 755}, + dictWord{9, 0, 548}, + dictWord{10, 0, 714}, + dictWord{11, 0, 884}, + dictWord{13, 0, 324}, + dictWord{7, 11, 1304}, + dictWord{138, 11, 477}, + dictWord{132, 10, 628}, + dictWord{134, 11, 1718}, + dictWord{7, 10, 266}, + dictWord{136, 10, 804}, + dictWord{135, 10, 208}, + dictWord{7, 0, 1021}, + dictWord{6, 10, 79}, + dictWord{135, 10, 1519}, + dictWord{7, 0, 1472}, + dictWord{135, 0, 1554}, + dictWord{6, 11, 362}, + dictWord{146, 11, 51}, + dictWord{7, 0, 1071}, + dictWord{7, 0, 1541}, + dictWord{7, 0, 1767}, + dictWord{7, 0, 1806}, + dictWord{11, 0, 162}, + dictWord{11, 0, 242}, + dictWord{11, 0, 452}, + dictWord{12, 0, 605}, + dictWord{15, 0, 26}, + dictWord{144, 0, 44}, + dictWord{136, 10, 741}, + dictWord{133, 11, 115}, + dictWord{145, 0, 115}, + dictWord{134, 10, 376}, + dictWord{6, 0, 1406}, + dictWord{134, 0, 1543}, + dictWord{5, 11, 193}, + dictWord{12, 11, 178}, + dictWord{13, 11, 130}, + dictWord{ + 145, + 11, + 84, + }, + dictWord{135, 0, 1111}, + dictWord{8, 0, 1}, + dictWord{9, 0, 650}, + dictWord{10, 0, 326}, + dictWord{5, 11, 705}, + dictWord{137, 11, 606}, + dictWord{5, 0, 488}, + dictWord{6, 0, 527}, + dictWord{7, 0, 489}, + dictWord{7, 0, 1636}, + dictWord{8, 0, 121}, + dictWord{8, 0, 144}, + dictWord{8, 0, 359}, + dictWord{9, 0, 193}, + dictWord{9, 0, 241}, + dictWord{9, 0, 336}, + dictWord{9, 0, 882}, + dictWord{11, 0, 266}, + dictWord{11, 0, 372}, + dictWord{11, 0, 944}, + dictWord{12, 0, 401}, + dictWord{140, 0, 641}, + dictWord{135, 11, 174}, + dictWord{6, 0, 267}, + dictWord{7, 10, 244}, + dictWord{7, 10, 632}, + dictWord{7, 10, 1609}, + dictWord{8, 10, 178}, + dictWord{8, 10, 638}, + dictWord{141, 10, 58}, + dictWord{134, 0, 1983}, + dictWord{134, 0, 1155}, + dictWord{134, 0, 1575}, + dictWord{134, 0, 1438}, + dictWord{9, 0, 31}, + dictWord{ + 10, + 0, + 244, + }, + dictWord{10, 0, 699}, + dictWord{12, 0, 149}, + dictWord{141, 0, 497}, + dictWord{133, 0, 377}, + dictWord{4, 11, 122}, + dictWord{5, 11, 796}, + dictWord{ + 5, + 11, + 952, + }, + dictWord{6, 11, 1660}, + dictWord{6, 11, 1671}, + dictWord{8, 11, 567}, + dictWord{9, 11, 687}, + dictWord{9, 11, 742}, + dictWord{10, 11, 686}, + dictWord{ + 11, + 11, + 356, + }, + dictWord{11, 11, 682}, + dictWord{140, 11, 281}, + dictWord{145, 0, 101}, + dictWord{11, 11, 0}, + dictWord{144, 11, 78}, + dictWord{5, 11, 179}, + dictWord{ + 5, + 10, + 791, + }, + dictWord{7, 11, 1095}, + dictWord{135, 11, 1213}, + dictWord{8, 11, 372}, + dictWord{9, 11, 122}, + dictWord{138, 11, 175}, + dictWord{7, 10, 686}, + dictWord{8, 10, 33}, + dictWord{8, 10, 238}, + dictWord{10, 10, 616}, + dictWord{11, 10, 467}, + dictWord{11, 10, 881}, + dictWord{13, 10, 217}, + dictWord{13, 10, 253}, + dictWord{142, 10, 268}, + dictWord{9, 0, 476}, + dictWord{4, 11, 66}, + dictWord{7, 11, 722}, + dictWord{135, 11, 904}, + dictWord{7, 11, 352}, + dictWord{137, 11, 684}, + dictWord{135, 0, 2023}, + dictWord{135, 0, 1836}, + dictWord{132, 10, 447}, + dictWord{5, 0, 843}, + dictWord{144, 0, 35}, + dictWord{137, 11, 779}, + dictWord{ + 141, + 11, + 35, + }, + dictWord{4, 10, 128}, + dictWord{5, 10, 415}, + dictWord{6, 10, 462}, + dictWord{7, 10, 294}, + dictWord{7, 10, 578}, + dictWord{10, 10, 710}, + dictWord{ + 139, + 10, + 86, + }, + dictWord{132, 0, 554}, + dictWord{133, 0, 536}, + dictWord{136, 10, 587}, + dictWord{5, 0, 207}, + dictWord{9, 0, 79}, + dictWord{11, 0, 625}, + dictWord{ + 145, + 0, + 7, + }, + dictWord{7, 0, 1371}, + dictWord{6, 10, 427}, + dictWord{138, 10, 692}, + dictWord{4, 0, 424}, + dictWord{4, 10, 195}, + dictWord{135, 10, 802}, + dictWord{ + 8, + 0, + 785, + }, + dictWord{133, 11, 564}, + dictWord{135, 0, 336}, + dictWord{4, 0, 896}, + dictWord{6, 0, 1777}, + dictWord{134, 11, 556}, + dictWord{137, 11, 103}, + dictWord{134, 10, 1683}, + dictWord{7, 11, 544}, + dictWord{8, 11, 719}, + dictWord{138, 11, 61}, + dictWord{138, 10, 472}, + dictWord{4, 11, 5}, + dictWord{5, 11, 498}, + dictWord{136, 11, 637}, + dictWord{7, 0, 750}, + dictWord{9, 0, 223}, + dictWord{11, 0, 27}, + dictWord{11, 0, 466}, + dictWord{12, 0, 624}, + dictWord{14, 0, 265}, + dictWord{ + 146, + 0, + 61, + }, + dictWord{12, 0, 238}, + dictWord{18, 0, 155}, + dictWord{12, 11, 238}, + dictWord{146, 11, 155}, + dictWord{151, 10, 28}, + dictWord{133, 11, 927}, + dictWord{12, 0, 383}, + dictWord{5, 10, 3}, + dictWord{8, 10, 578}, + dictWord{9, 10, 118}, + dictWord{10, 10, 705}, + dictWord{141, 10, 279}, + dictWord{4, 11, 893}, + dictWord{ + 5, + 11, + 780, + }, + dictWord{133, 11, 893}, + dictWord{4, 0, 603}, + dictWord{133, 0, 661}, + dictWord{4, 0, 11}, + dictWord{6, 0, 128}, + dictWord{7, 0, 231}, + dictWord{ + 7, + 0, + 1533, + }, + dictWord{10, 0, 725}, + dictWord{5, 10, 229}, + dictWord{5, 11, 238}, + dictWord{135, 11, 1350}, + dictWord{8, 10, 102}, + dictWord{10, 10, 578}, + dictWord{ + 10, + 10, + 672, + }, + dictWord{12, 10, 496}, + dictWord{13, 10, 408}, + dictWord{14, 10, 121}, + dictWord{145, 10, 106}, + dictWord{132, 0, 476}, + dictWord{134, 0, 1552}, + dictWord{134, 11, 1729}, + dictWord{8, 10, 115}, + dictWord{8, 10, 350}, + dictWord{9, 10, 489}, + dictWord{10, 10, 128}, + dictWord{11, 10, 306}, + dictWord{ + 12, + 10, + 373, + }, + dictWord{14, 10, 30}, + dictWord{17, 10, 79}, + dictWord{19, 10, 80}, + dictWord{150, 10, 55}, + dictWord{135, 0, 1807}, + dictWord{4, 0, 680}, + dictWord{ + 4, + 11, + 60, + }, + dictWord{7, 11, 760}, + dictWord{7, 11, 1800}, + dictWord{8, 11, 314}, + dictWord{9, 11, 700}, + dictWord{139, 11, 487}, + dictWord{4, 10, 230}, + dictWord{ + 5, + 10, + 702, + }, + dictWord{148, 11, 94}, + dictWord{132, 11, 228}, + dictWord{139, 0, 435}, + dictWord{9, 0, 20}, + dictWord{10, 0, 324}, + dictWord{10, 0, 807}, + dictWord{ + 139, + 0, + 488, + }, + dictWord{6, 10, 1728}, + dictWord{136, 11, 419}, + dictWord{4, 10, 484}, + dictWord{18, 10, 26}, + dictWord{19, 10, 42}, + dictWord{20, 10, 43}, + dictWord{ + 21, + 10, + 0, + }, + dictWord{23, 10, 27}, + dictWord{152, 10, 14}, + dictWord{135, 0, 1431}, + dictWord{133, 11, 828}, + dictWord{5, 0, 112}, + dictWord{6, 0, 103}, + dictWord{ + 6, + 0, + 150, + }, + dictWord{7, 0, 1303}, + dictWord{9, 0, 292}, + dictWord{10, 0, 481}, + dictWord{20, 0, 13}, + dictWord{7, 11, 176}, + dictWord{7, 11, 178}, + dictWord{7, 11, 1110}, + dictWord{10, 11, 481}, + dictWord{148, 11, 13}, + dictWord{138, 0, 356}, + dictWord{4, 11, 51}, + dictWord{5, 11, 39}, + dictWord{6, 11, 4}, + dictWord{7, 11, 591}, + dictWord{ + 7, + 11, + 849, + }, + dictWord{7, 11, 951}, + dictWord{7, 11, 1129}, + dictWord{7, 11, 1613}, + dictWord{7, 11, 1760}, + dictWord{7, 11, 1988}, + dictWord{9, 11, 434}, + dictWord{10, 11, 754}, + dictWord{11, 11, 25}, + dictWord{11, 11, 37}, + dictWord{139, 11, 414}, + dictWord{6, 0, 1963}, + dictWord{134, 0, 2000}, + dictWord{ + 132, + 10, + 633, + }, + dictWord{6, 0, 1244}, + dictWord{133, 11, 902}, + dictWord{135, 11, 928}, + dictWord{140, 0, 18}, + dictWord{138, 0, 204}, + dictWord{135, 11, 1173}, + dictWord{134, 0, 867}, + dictWord{4, 0, 708}, + dictWord{8, 0, 15}, + dictWord{9, 0, 50}, + dictWord{9, 0, 386}, + dictWord{11, 0, 18}, + dictWord{11, 0, 529}, + dictWord{140, 0, 228}, + dictWord{134, 11, 270}, + dictWord{4, 0, 563}, + dictWord{7, 0, 109}, + dictWord{7, 0, 592}, + dictWord{7, 0, 637}, + dictWord{7, 0, 770}, + dictWord{8, 0, 463}, + dictWord{ + 9, + 0, + 60, + }, + dictWord{9, 0, 335}, + dictWord{9, 0, 904}, + dictWord{10, 0, 73}, + dictWord{11, 0, 434}, + dictWord{12, 0, 585}, + dictWord{13, 0, 331}, + dictWord{18, 0, 110}, + dictWord{148, 0, 60}, + dictWord{132, 0, 502}, + dictWord{14, 11, 359}, + dictWord{19, 11, 52}, + dictWord{148, 11, 47}, + dictWord{6, 11, 377}, + dictWord{7, 11, 1025}, + dictWord{9, 11, 613}, + dictWord{145, 11, 104}, + dictWord{6, 0, 347}, + dictWord{10, 0, 161}, + dictWord{5, 10, 70}, + dictWord{5, 10, 622}, + dictWord{6, 10, 334}, + dictWord{ + 7, + 10, + 1032, + }, + dictWord{9, 10, 171}, + dictWord{11, 10, 26}, + dictWord{11, 10, 213}, + dictWord{11, 10, 637}, + dictWord{11, 10, 707}, + dictWord{12, 10, 202}, + dictWord{12, 10, 380}, + dictWord{13, 10, 226}, + dictWord{13, 10, 355}, + dictWord{14, 10, 222}, + dictWord{145, 10, 42}, + dictWord{132, 11, 416}, + dictWord{4, 0, 33}, + dictWord{5, 0, 102}, + dictWord{6, 0, 284}, + dictWord{7, 0, 1079}, + dictWord{7, 0, 1423}, + dictWord{7, 0, 1702}, + dictWord{8, 0, 470}, + dictWord{9, 0, 554}, + dictWord{ + 9, + 0, + 723, + }, + dictWord{11, 0, 333}, + dictWord{142, 11, 372}, + dictWord{5, 11, 152}, + dictWord{5, 11, 197}, + dictWord{7, 11, 340}, + dictWord{7, 11, 867}, + dictWord{ + 10, + 11, + 548, + }, + dictWord{10, 11, 581}, + dictWord{11, 11, 6}, + dictWord{12, 11, 3}, + dictWord{12, 11, 19}, + dictWord{14, 11, 110}, + dictWord{142, 11, 289}, + dictWord{ + 7, + 0, + 246, + }, + dictWord{135, 0, 840}, + dictWord{6, 0, 10}, + dictWord{8, 0, 571}, + dictWord{9, 0, 739}, + dictWord{143, 0, 91}, + dictWord{6, 0, 465}, + dictWord{7, 0, 1465}, + dictWord{ + 4, + 10, + 23, + }, + dictWord{4, 10, 141}, + dictWord{5, 10, 313}, + dictWord{5, 10, 1014}, + dictWord{6, 10, 50}, + dictWord{7, 10, 142}, + dictWord{7, 10, 559}, + dictWord{ + 8, + 10, + 640, + }, + dictWord{9, 10, 460}, + dictWord{9, 10, 783}, + dictWord{11, 10, 741}, + dictWord{12, 10, 183}, + dictWord{141, 10, 488}, + dictWord{133, 0, 626}, + dictWord{ + 136, + 0, + 614, + }, + dictWord{138, 0, 237}, + dictWord{7, 11, 34}, + dictWord{7, 11, 190}, + dictWord{8, 11, 28}, + dictWord{8, 11, 141}, + dictWord{8, 11, 444}, + dictWord{ + 8, + 11, + 811, + }, + dictWord{9, 11, 468}, + dictWord{11, 11, 334}, + dictWord{12, 11, 24}, + dictWord{12, 11, 386}, + dictWord{140, 11, 576}, + dictWord{133, 11, 757}, + dictWord{ + 5, + 0, + 18, + }, + dictWord{6, 0, 526}, + dictWord{13, 0, 24}, + dictWord{13, 0, 110}, + dictWord{19, 0, 5}, + dictWord{147, 0, 44}, + dictWord{6, 0, 506}, + dictWord{134, 11, 506}, + dictWord{135, 11, 1553}, + dictWord{4, 0, 309}, + dictWord{5, 0, 462}, + dictWord{7, 0, 970}, + dictWord{7, 0, 1097}, + dictWord{22, 0, 30}, + dictWord{22, 0, 33}, + dictWord{ + 7, + 11, + 1385, + }, + dictWord{11, 11, 582}, + dictWord{11, 11, 650}, + dictWord{11, 11, 901}, + dictWord{11, 11, 949}, + dictWord{12, 11, 232}, + dictWord{12, 11, 236}, + dictWord{13, 11, 413}, + dictWord{13, 11, 501}, + dictWord{146, 11, 116}, + dictWord{9, 0, 140}, + dictWord{5, 10, 222}, + dictWord{138, 10, 534}, + dictWord{6, 0, 1056}, + dictWord{137, 10, 906}, + dictWord{134, 0, 1704}, + dictWord{138, 10, 503}, + dictWord{134, 0, 1036}, + dictWord{5, 10, 154}, + dictWord{7, 10, 1491}, + dictWord{ + 10, + 10, + 379, + }, + dictWord{138, 10, 485}, + dictWord{4, 11, 383}, + dictWord{133, 10, 716}, + dictWord{134, 0, 1315}, + dictWord{5, 0, 86}, + dictWord{7, 0, 743}, + dictWord{ + 9, + 0, + 85, + }, + dictWord{10, 0, 281}, + dictWord{10, 0, 432}, + dictWord{11, 0, 825}, + dictWord{12, 0, 251}, + dictWord{13, 0, 118}, + dictWord{142, 0, 378}, + dictWord{ + 8, + 0, + 264, + }, + dictWord{4, 10, 91}, + dictWord{5, 10, 388}, + dictWord{5, 10, 845}, + dictWord{6, 10, 206}, + dictWord{6, 10, 252}, + dictWord{6, 10, 365}, + dictWord{7, 10, 136}, + dictWord{7, 10, 531}, + dictWord{136, 10, 621}, + dictWord{5, 0, 524}, + dictWord{133, 0, 744}, + dictWord{5, 11, 277}, + dictWord{141, 11, 247}, + dictWord{ + 132, + 11, + 435, + }, + dictWord{10, 0, 107}, + dictWord{140, 0, 436}, + dictWord{132, 0, 927}, + dictWord{10, 0, 123}, + dictWord{12, 0, 670}, + dictWord{146, 0, 94}, + dictWord{ + 7, + 0, + 1149, + }, + dictWord{9, 0, 156}, + dictWord{138, 0, 957}, + dictWord{5, 11, 265}, + dictWord{6, 11, 212}, + dictWord{135, 11, 28}, + dictWord{133, 0, 778}, + dictWord{ + 133, + 0, + 502, + }, + dictWord{8, 0, 196}, + dictWord{10, 0, 283}, + dictWord{139, 0, 406}, + dictWord{135, 10, 576}, + dictWord{136, 11, 535}, + dictWord{134, 0, 1312}, + dictWord{ + 5, + 10, + 771, + }, + dictWord{5, 10, 863}, + dictWord{5, 10, 898}, + dictWord{6, 10, 1632}, + dictWord{6, 10, 1644}, + dictWord{134, 10, 1780}, + dictWord{5, 0, 855}, + dictWord{5, 10, 331}, + dictWord{135, 11, 1487}, + dictWord{132, 11, 702}, + dictWord{5, 11, 808}, + dictWord{135, 11, 2045}, + dictWord{7, 0, 1400}, + dictWord{ + 9, + 0, + 446, + }, + dictWord{138, 0, 45}, + dictWord{140, 10, 632}, + dictWord{132, 0, 1003}, + dictWord{5, 11, 166}, + dictWord{8, 11, 739}, + dictWord{140, 11, 511}, + dictWord{ + 5, + 10, + 107, + }, + dictWord{7, 10, 201}, + dictWord{136, 10, 518}, + dictWord{6, 10, 446}, + dictWord{135, 10, 1817}, + dictWord{134, 0, 1532}, + dictWord{ + 134, + 0, + 1097, + }, + dictWord{4, 11, 119}, + dictWord{5, 11, 170}, + dictWord{5, 11, 447}, + dictWord{7, 11, 1708}, + dictWord{7, 11, 1889}, + dictWord{9, 11, 357}, + dictWord{ + 9, + 11, + 719, + }, + dictWord{12, 11, 486}, + dictWord{140, 11, 596}, + dictWord{9, 10, 851}, + dictWord{141, 10, 510}, + dictWord{7, 0, 612}, + dictWord{8, 0, 545}, + dictWord{ + 8, + 0, + 568, + }, + dictWord{8, 0, 642}, + dictWord{9, 0, 717}, + dictWord{10, 0, 541}, + dictWord{10, 0, 763}, + dictWord{11, 0, 449}, + dictWord{12, 0, 489}, + dictWord{13, 0, 153}, + dictWord{13, 0, 296}, + dictWord{14, 0, 138}, + dictWord{14, 0, 392}, + dictWord{15, 0, 50}, + dictWord{16, 0, 6}, + dictWord{16, 0, 12}, + dictWord{20, 0, 9}, + dictWord{ + 132, + 10, + 504, + }, + dictWord{4, 11, 450}, + dictWord{135, 11, 1158}, + dictWord{11, 0, 54}, + dictWord{13, 0, 173}, + dictWord{13, 0, 294}, + dictWord{5, 10, 883}, + dictWord{ + 5, + 10, + 975, + }, + dictWord{8, 10, 392}, + dictWord{148, 10, 7}, + dictWord{13, 0, 455}, + dictWord{15, 0, 99}, + dictWord{15, 0, 129}, + dictWord{144, 0, 68}, + dictWord{135, 0, 172}, + dictWord{132, 11, 754}, + dictWord{5, 10, 922}, + dictWord{134, 10, 1707}, + dictWord{134, 0, 1029}, + dictWord{17, 11, 39}, + dictWord{148, 11, 36}, + dictWord{ + 4, + 0, + 568, + }, + dictWord{5, 10, 993}, + dictWord{7, 10, 515}, + dictWord{137, 10, 91}, + dictWord{132, 0, 732}, + dictWord{10, 0, 617}, + dictWord{138, 11, 617}, + dictWord{ + 134, + 0, + 974, + }, + dictWord{7, 0, 989}, + dictWord{10, 0, 377}, + dictWord{12, 0, 363}, + dictWord{13, 0, 68}, + dictWord{13, 0, 94}, + dictWord{14, 0, 108}, + dictWord{ + 142, + 0, + 306, + }, + dictWord{136, 0, 733}, + dictWord{132, 0, 428}, + dictWord{7, 0, 1789}, + dictWord{135, 11, 1062}, + dictWord{7, 0, 2015}, + dictWord{140, 0, 665}, + dictWord{135, 10, 1433}, + dictWord{5, 0, 287}, + dictWord{7, 10, 921}, + dictWord{8, 10, 580}, + dictWord{8, 10, 593}, + dictWord{8, 10, 630}, + dictWord{138, 10, 28}, + dictWord{138, 0, 806}, + dictWord{4, 10, 911}, + dictWord{5, 10, 867}, + dictWord{5, 10, 1013}, + dictWord{7, 10, 2034}, + dictWord{8, 10, 798}, + dictWord{136, 10, 813}, + dictWord{134, 0, 1539}, + dictWord{8, 11, 523}, + dictWord{150, 11, 34}, + dictWord{135, 11, 740}, + dictWord{7, 11, 238}, + dictWord{7, 11, 2033}, + dictWord{ + 8, + 11, + 120, + }, + dictWord{8, 11, 188}, + dictWord{8, 11, 659}, + dictWord{9, 11, 598}, + dictWord{10, 11, 466}, + dictWord{12, 11, 342}, + dictWord{12, 11, 588}, + dictWord{ + 13, + 11, + 503, + }, + dictWord{14, 11, 246}, + dictWord{143, 11, 92}, + dictWord{7, 0, 1563}, + dictWord{141, 0, 182}, + dictWord{5, 10, 135}, + dictWord{6, 10, 519}, + dictWord{ + 7, + 10, + 1722, + }, + dictWord{10, 10, 271}, + dictWord{11, 10, 261}, + dictWord{145, 10, 54}, + dictWord{14, 10, 338}, + dictWord{148, 10, 81}, + dictWord{7, 0, 484}, + dictWord{ + 4, + 10, + 300, + }, + dictWord{133, 10, 436}, + dictWord{145, 11, 114}, + dictWord{6, 0, 1623}, + dictWord{134, 0, 1681}, + dictWord{133, 11, 640}, + dictWord{4, 11, 201}, + dictWord{7, 11, 1744}, + dictWord{8, 11, 602}, + dictWord{11, 11, 247}, + dictWord{11, 11, 826}, + dictWord{145, 11, 65}, + dictWord{8, 11, 164}, + dictWord{ + 146, + 11, + 62, + }, + dictWord{6, 0, 1833}, + dictWord{6, 0, 1861}, + dictWord{136, 0, 878}, + dictWord{134, 0, 1569}, + dictWord{8, 10, 357}, + dictWord{10, 10, 745}, + dictWord{ + 14, + 10, + 426, + }, + dictWord{17, 10, 94}, + dictWord{147, 10, 57}, + dictWord{12, 0, 93}, + dictWord{12, 0, 501}, + dictWord{13, 0, 362}, + dictWord{14, 0, 151}, + dictWord{15, 0, 40}, + dictWord{15, 0, 59}, + dictWord{16, 0, 46}, + dictWord{17, 0, 25}, + dictWord{18, 0, 14}, + dictWord{18, 0, 134}, + dictWord{19, 0, 25}, + dictWord{19, 0, 69}, + dictWord{ + 20, + 0, + 16, + }, + dictWord{20, 0, 19}, + dictWord{20, 0, 66}, + dictWord{21, 0, 23}, + dictWord{21, 0, 25}, + dictWord{150, 0, 42}, + dictWord{6, 0, 1748}, + dictWord{8, 0, 715}, + dictWord{ + 9, + 0, + 802, + }, + dictWord{10, 0, 46}, + dictWord{10, 0, 819}, + dictWord{13, 0, 308}, + dictWord{14, 0, 351}, + dictWord{14, 0, 363}, + dictWord{146, 0, 67}, + dictWord{ + 132, + 0, + 994, + }, + dictWord{4, 0, 63}, + dictWord{133, 0, 347}, + dictWord{132, 0, 591}, + dictWord{133, 0, 749}, + dictWord{7, 11, 1577}, + dictWord{10, 11, 304}, + dictWord{ + 10, + 11, + 549, + }, + dictWord{11, 11, 424}, + dictWord{12, 11, 365}, + dictWord{13, 11, 220}, + dictWord{13, 11, 240}, + dictWord{142, 11, 33}, + dictWord{133, 0, 366}, + dictWord{ + 7, + 0, + 557, + }, + dictWord{12, 0, 547}, + dictWord{14, 0, 86}, + dictWord{133, 10, 387}, + dictWord{135, 0, 1747}, + dictWord{132, 11, 907}, + dictWord{5, 11, 100}, + dictWord{10, 11, 329}, + dictWord{12, 11, 416}, + dictWord{149, 11, 29}, + dictWord{4, 10, 6}, + dictWord{5, 10, 708}, + dictWord{136, 10, 75}, + dictWord{7, 10, 1351}, + dictWord{9, 10, 581}, + dictWord{10, 10, 639}, + dictWord{11, 10, 453}, + dictWord{140, 10, 584}, + dictWord{7, 0, 89}, + dictWord{132, 10, 303}, + dictWord{138, 10, 772}, + dictWord{132, 11, 176}, + dictWord{5, 11, 636}, + dictWord{5, 11, 998}, + dictWord{8, 11, 26}, + dictWord{137, 11, 358}, + dictWord{7, 11, 9}, + dictWord{7, 11, 1508}, + dictWord{9, 11, 317}, + dictWord{10, 11, 210}, + dictWord{10, 11, 292}, + dictWord{10, 11, 533}, + dictWord{11, 11, 555}, + dictWord{12, 11, 526}, + dictWord{ + 12, + 11, + 607, + }, + dictWord{13, 11, 263}, + dictWord{13, 11, 459}, + dictWord{142, 11, 271}, + dictWord{134, 0, 1463}, + dictWord{6, 0, 772}, + dictWord{6, 0, 1137}, + dictWord{ + 139, + 11, + 595, + }, + dictWord{7, 0, 977}, + dictWord{139, 11, 66}, + dictWord{138, 0, 893}, + dictWord{20, 0, 48}, + dictWord{148, 11, 48}, + dictWord{5, 0, 824}, + dictWord{ + 133, + 0, + 941, + }, + dictWord{134, 11, 295}, + dictWord{7, 0, 1543}, + dictWord{7, 0, 1785}, + dictWord{10, 0, 690}, + dictWord{4, 10, 106}, + dictWord{139, 10, 717}, + dictWord{ + 7, + 0, + 440, + }, + dictWord{8, 0, 230}, + dictWord{139, 0, 106}, + dictWord{5, 10, 890}, + dictWord{133, 10, 988}, + dictWord{6, 10, 626}, + dictWord{142, 10, 431}, + dictWord{ + 10, + 11, + 127, + }, + dictWord{141, 11, 27}, + dictWord{17, 0, 32}, + dictWord{10, 10, 706}, + dictWord{150, 10, 44}, + dictWord{132, 0, 216}, + dictWord{137, 0, 332}, + dictWord{4, 10, 698}, + dictWord{136, 11, 119}, + dictWord{139, 11, 267}, + dictWord{138, 10, 17}, + dictWord{11, 11, 526}, + dictWord{11, 11, 939}, + dictWord{ + 141, + 11, + 290, + }, + dictWord{7, 11, 1167}, + dictWord{11, 11, 934}, + dictWord{13, 11, 391}, + dictWord{145, 11, 76}, + dictWord{139, 11, 39}, + dictWord{134, 10, 84}, + dictWord{ + 4, + 0, + 914, + }, + dictWord{5, 0, 800}, + dictWord{133, 0, 852}, + dictWord{10, 0, 416}, + dictWord{141, 0, 115}, + dictWord{7, 0, 564}, + dictWord{142, 0, 168}, + dictWord{ + 4, + 0, + 918, + }, + dictWord{133, 0, 876}, + dictWord{134, 0, 1764}, + dictWord{152, 0, 3}, + dictWord{4, 0, 92}, + dictWord{5, 0, 274}, + dictWord{7, 11, 126}, + dictWord{136, 11, 84}, + dictWord{140, 10, 498}, + dictWord{136, 11, 790}, + dictWord{8, 0, 501}, + dictWord{5, 10, 986}, + dictWord{6, 10, 130}, + dictWord{7, 10, 1582}, + dictWord{ + 8, + 10, + 458, + }, + dictWord{10, 10, 101}, + dictWord{10, 10, 318}, + dictWord{138, 10, 823}, + dictWord{6, 11, 64}, + dictWord{12, 11, 377}, + dictWord{141, 11, 309}, + dictWord{ + 5, + 0, + 743, + }, + dictWord{138, 0, 851}, + dictWord{4, 0, 49}, + dictWord{7, 0, 280}, + dictWord{135, 0, 1633}, + dictWord{134, 0, 879}, + dictWord{136, 0, 47}, + dictWord{ + 7, + 10, + 1644, + }, + dictWord{137, 10, 129}, + dictWord{132, 0, 865}, + dictWord{134, 0, 1202}, + dictWord{9, 11, 34}, + dictWord{139, 11, 484}, + dictWord{135, 10, 997}, + dictWord{5, 0, 272}, + dictWord{5, 0, 908}, + dictWord{5, 0, 942}, + dictWord{8, 0, 197}, + dictWord{9, 0, 47}, + dictWord{11, 0, 538}, + dictWord{139, 0, 742}, + dictWord{ + 6, + 11, + 1700, + }, + dictWord{7, 11, 26}, + dictWord{7, 11, 293}, + dictWord{7, 11, 382}, + dictWord{7, 11, 1026}, + dictWord{7, 11, 1087}, + dictWord{7, 11, 2027}, + dictWord{ + 8, + 11, + 24, + }, + dictWord{8, 11, 114}, + dictWord{8, 11, 252}, + dictWord{8, 11, 727}, + dictWord{8, 11, 729}, + dictWord{9, 11, 30}, + dictWord{9, 11, 199}, + dictWord{9, 11, 231}, + dictWord{9, 11, 251}, + dictWord{9, 11, 334}, + dictWord{9, 11, 361}, + dictWord{9, 11, 488}, + dictWord{9, 11, 712}, + dictWord{10, 11, 55}, + dictWord{10, 11, 60}, + dictWord{ + 10, + 11, + 232, + }, + dictWord{10, 11, 332}, + dictWord{10, 11, 384}, + dictWord{10, 11, 396}, + dictWord{10, 11, 504}, + dictWord{10, 11, 542}, + dictWord{10, 11, 652}, + dictWord{11, 11, 20}, + dictWord{11, 11, 48}, + dictWord{11, 11, 207}, + dictWord{11, 11, 291}, + dictWord{11, 11, 298}, + dictWord{11, 11, 342}, + dictWord{ + 11, + 11, + 365, + }, + dictWord{11, 11, 394}, + dictWord{11, 11, 620}, + dictWord{11, 11, 705}, + dictWord{11, 11, 1017}, + dictWord{12, 11, 123}, + dictWord{12, 11, 340}, + dictWord{12, 11, 406}, + dictWord{12, 11, 643}, + dictWord{13, 11, 61}, + dictWord{13, 11, 269}, + dictWord{13, 11, 311}, + dictWord{13, 11, 319}, + dictWord{13, 11, 486}, + dictWord{14, 11, 234}, + dictWord{15, 11, 62}, + dictWord{15, 11, 85}, + dictWord{16, 11, 71}, + dictWord{18, 11, 119}, + dictWord{148, 11, 105}, + dictWord{ + 6, + 0, + 1455, + }, + dictWord{150, 11, 37}, + dictWord{135, 10, 1927}, + dictWord{135, 0, 1911}, + dictWord{137, 0, 891}, + dictWord{7, 10, 1756}, + dictWord{137, 10, 98}, + dictWord{7, 10, 1046}, + dictWord{139, 10, 160}, + dictWord{132, 0, 761}, + dictWord{6, 11, 379}, + dictWord{7, 11, 270}, + dictWord{7, 11, 1116}, + dictWord{ + 8, + 11, + 176, + }, + dictWord{8, 11, 183}, + dictWord{9, 11, 432}, + dictWord{9, 11, 661}, + dictWord{12, 11, 247}, + dictWord{12, 11, 617}, + dictWord{146, 11, 125}, + dictWord{ + 6, + 10, + 45, + }, + dictWord{7, 10, 433}, + dictWord{8, 10, 129}, + dictWord{9, 10, 21}, + dictWord{10, 10, 392}, + dictWord{11, 10, 79}, + dictWord{12, 10, 499}, + dictWord{ + 13, + 10, + 199, + }, + dictWord{141, 10, 451}, + dictWord{4, 0, 407}, + dictWord{5, 11, 792}, + dictWord{133, 11, 900}, + dictWord{132, 0, 560}, + dictWord{135, 0, 183}, + dictWord{ + 13, + 0, + 490, + }, + dictWord{7, 10, 558}, + dictWord{136, 10, 353}, + dictWord{4, 0, 475}, + dictWord{6, 0, 731}, + dictWord{11, 0, 35}, + dictWord{13, 0, 71}, + dictWord{13, 0, 177}, + dictWord{14, 0, 422}, + dictWord{133, 10, 785}, + dictWord{8, 10, 81}, + dictWord{9, 10, 189}, + dictWord{9, 10, 201}, + dictWord{11, 10, 478}, + dictWord{11, 10, 712}, + dictWord{141, 10, 338}, + dictWord{4, 0, 418}, + dictWord{4, 0, 819}, + dictWord{133, 10, 353}, + dictWord{151, 10, 26}, + dictWord{4, 11, 901}, + dictWord{ + 133, + 11, + 776, + }, + dictWord{132, 0, 575}, + dictWord{7, 0, 818}, + dictWord{16, 0, 92}, + dictWord{17, 0, 14}, + dictWord{17, 0, 45}, + dictWord{18, 0, 75}, + dictWord{148, 0, 18}, + dictWord{ + 6, + 0, + 222, + }, + dictWord{7, 0, 636}, + dictWord{7, 0, 1620}, + dictWord{8, 0, 409}, + dictWord{9, 0, 693}, + dictWord{139, 0, 77}, + dictWord{6, 10, 25}, + dictWord{7, 10, 855}, + dictWord{7, 10, 1258}, + dictWord{144, 10, 32}, + dictWord{6, 0, 1880}, + dictWord{6, 0, 1887}, + dictWord{6, 0, 1918}, + dictWord{6, 0, 1924}, + dictWord{9, 0, 967}, + dictWord{9, 0, 995}, + dictWord{9, 0, 1015}, + dictWord{12, 0, 826}, + dictWord{12, 0, 849}, + dictWord{12, 0, 857}, + dictWord{12, 0, 860}, + dictWord{12, 0, 886}, + dictWord{ + 12, + 0, + 932, + }, + dictWord{18, 0, 228}, + dictWord{18, 0, 231}, + dictWord{146, 0, 240}, + dictWord{134, 0, 633}, + dictWord{134, 0, 1308}, + dictWord{4, 11, 37}, + dictWord{ + 5, + 11, + 334, + }, + dictWord{135, 11, 1253}, + dictWord{10, 0, 86}, + dictWord{4, 10, 4}, + dictWord{7, 10, 1118}, + dictWord{7, 10, 1320}, + dictWord{7, 10, 1706}, + dictWord{ + 8, + 10, + 277, + }, + dictWord{9, 10, 622}, + dictWord{11, 10, 724}, + dictWord{12, 10, 350}, + dictWord{12, 10, 397}, + dictWord{13, 10, 28}, + dictWord{13, 10, 159}, + dictWord{ + 15, + 10, + 89, + }, + dictWord{18, 10, 5}, + dictWord{19, 10, 9}, + dictWord{20, 10, 34}, + dictWord{150, 10, 47}, + dictWord{132, 11, 508}, + dictWord{137, 11, 448}, + dictWord{ + 12, + 11, + 107, + }, + dictWord{146, 11, 31}, + dictWord{132, 0, 817}, + dictWord{134, 0, 663}, + dictWord{133, 0, 882}, + dictWord{134, 0, 914}, + dictWord{132, 11, 540}, + dictWord{132, 11, 533}, + dictWord{136, 11, 608}, + dictWord{8, 0, 885}, + dictWord{138, 0, 865}, + dictWord{132, 0, 426}, + dictWord{6, 0, 58}, + dictWord{7, 0, 745}, + dictWord{7, 0, 1969}, + dictWord{8, 0, 399}, + dictWord{8, 0, 675}, + dictWord{9, 0, 479}, + dictWord{9, 0, 731}, + dictWord{10, 0, 330}, + dictWord{10, 0, 593}, + dictWord{ + 10, + 0, + 817, + }, + dictWord{11, 0, 32}, + dictWord{11, 0, 133}, + dictWord{11, 0, 221}, + dictWord{145, 0, 68}, + dictWord{134, 10, 255}, + dictWord{7, 0, 102}, + dictWord{ + 137, + 0, + 538, + }, + dictWord{137, 10, 216}, + dictWord{7, 11, 253}, + dictWord{136, 11, 549}, + dictWord{135, 11, 912}, + dictWord{9, 10, 183}, + dictWord{139, 10, 286}, + dictWord{11, 10, 956}, + dictWord{151, 10, 3}, + dictWord{8, 11, 527}, + dictWord{18, 11, 60}, + dictWord{147, 11, 24}, + dictWord{4, 10, 536}, + dictWord{7, 10, 1141}, + dictWord{10, 10, 723}, + dictWord{139, 10, 371}, + dictWord{133, 11, 920}, + dictWord{7, 0, 876}, + dictWord{135, 10, 285}, + dictWord{135, 10, 560}, + dictWord{ + 132, + 10, + 690, + }, + dictWord{142, 11, 126}, + dictWord{11, 10, 33}, + dictWord{12, 10, 571}, + dictWord{149, 10, 1}, + dictWord{133, 0, 566}, + dictWord{9, 0, 139}, + dictWord{ + 10, + 0, + 399, + }, + dictWord{11, 0, 469}, + dictWord{12, 0, 634}, + dictWord{13, 0, 223}, + dictWord{132, 11, 483}, + dictWord{6, 0, 48}, + dictWord{135, 0, 63}, + dictWord{18, 0, 12}, + dictWord{7, 10, 1862}, + dictWord{12, 10, 491}, + dictWord{12, 10, 520}, + dictWord{13, 10, 383}, + dictWord{142, 10, 244}, + dictWord{135, 11, 1665}, + dictWord{132, 11, 448}, + dictWord{9, 11, 495}, + dictWord{146, 11, 104}, + dictWord{6, 0, 114}, + dictWord{7, 0, 1224}, + dictWord{7, 0, 1556}, + dictWord{136, 0, 3}, + dictWord{ + 4, + 10, + 190, + }, + dictWord{133, 10, 554}, + dictWord{8, 0, 576}, + dictWord{9, 0, 267}, + dictWord{133, 10, 1001}, + dictWord{133, 10, 446}, + dictWord{133, 0, 933}, + dictWord{139, 11, 1009}, + dictWord{8, 11, 653}, + dictWord{13, 11, 93}, + dictWord{147, 11, 14}, + dictWord{6, 0, 692}, + dictWord{6, 0, 821}, + dictWord{134, 0, 1077}, + dictWord{5, 11, 172}, + dictWord{135, 11, 801}, + dictWord{138, 0, 752}, + dictWord{4, 0, 375}, + dictWord{134, 0, 638}, + dictWord{134, 0, 1011}, + dictWord{ + 140, + 11, + 540, + }, + dictWord{9, 0, 96}, + dictWord{133, 11, 260}, + dictWord{139, 11, 587}, + dictWord{135, 10, 1231}, + dictWord{12, 0, 30}, + dictWord{13, 0, 148}, + dictWord{ + 14, + 0, + 87, + }, + dictWord{14, 0, 182}, + dictWord{16, 0, 42}, + dictWord{20, 0, 70}, + dictWord{132, 10, 304}, + dictWord{6, 0, 1398}, + dictWord{7, 0, 56}, + dictWord{7, 0, 1989}, + dictWord{8, 0, 337}, + dictWord{8, 0, 738}, + dictWord{9, 0, 600}, + dictWord{12, 0, 37}, + dictWord{13, 0, 447}, + dictWord{142, 0, 92}, + dictWord{138, 0, 666}, + dictWord{ + 5, + 0, + 394, + }, + dictWord{7, 0, 487}, + dictWord{136, 0, 246}, + dictWord{9, 0, 437}, + dictWord{6, 10, 53}, + dictWord{6, 10, 199}, + dictWord{7, 10, 1408}, + dictWord{8, 10, 32}, + dictWord{8, 10, 93}, + dictWord{10, 10, 397}, + dictWord{10, 10, 629}, + dictWord{11, 10, 593}, + dictWord{11, 10, 763}, + dictWord{13, 10, 326}, + dictWord{145, 10, 35}, + dictWord{134, 10, 105}, + dictWord{9, 0, 320}, + dictWord{10, 0, 506}, + dictWord{138, 10, 794}, + dictWord{7, 11, 57}, + dictWord{8, 11, 167}, + dictWord{8, 11, 375}, + dictWord{9, 11, 82}, + dictWord{9, 11, 561}, + dictWord{10, 11, 620}, + dictWord{10, 11, 770}, + dictWord{11, 10, 704}, + dictWord{141, 10, 396}, + dictWord{6, 0, 1003}, + dictWord{5, 10, 114}, + dictWord{5, 10, 255}, + dictWord{141, 10, 285}, + dictWord{7, 0, 866}, + dictWord{135, 0, 1163}, + dictWord{133, 11, 531}, + dictWord{ + 132, + 0, + 328, + }, + dictWord{7, 10, 2035}, + dictWord{8, 10, 19}, + dictWord{9, 10, 89}, + dictWord{138, 10, 831}, + dictWord{8, 11, 194}, + dictWord{136, 11, 756}, + dictWord{ + 136, + 0, + 1000, + }, + dictWord{5, 11, 453}, + dictWord{134, 11, 441}, + dictWord{4, 0, 101}, + dictWord{5, 0, 833}, + dictWord{7, 0, 1171}, + dictWord{136, 0, 744}, + dictWord{ + 133, + 0, + 726, + }, + dictWord{136, 10, 746}, + dictWord{138, 0, 176}, + dictWord{6, 0, 9}, + dictWord{6, 0, 397}, + dictWord{7, 0, 53}, + dictWord{7, 0, 1742}, + dictWord{10, 0, 632}, + dictWord{11, 0, 828}, + dictWord{140, 0, 146}, + dictWord{135, 11, 22}, + dictWord{145, 11, 64}, + dictWord{132, 0, 839}, + dictWord{11, 0, 417}, + dictWord{12, 0, 223}, + dictWord{140, 0, 265}, + dictWord{4, 11, 102}, + dictWord{7, 11, 815}, + dictWord{7, 11, 1699}, + dictWord{139, 11, 964}, + dictWord{5, 10, 955}, + dictWord{ + 136, + 10, + 814, + }, + dictWord{6, 0, 1931}, + dictWord{6, 0, 2007}, + dictWord{18, 0, 246}, + dictWord{146, 0, 247}, + dictWord{8, 0, 198}, + dictWord{11, 0, 29}, + dictWord{140, 0, 534}, + dictWord{135, 0, 1771}, + dictWord{6, 0, 846}, + dictWord{7, 11, 1010}, + dictWord{11, 11, 733}, + dictWord{11, 11, 759}, + dictWord{12, 11, 563}, + dictWord{ + 13, + 11, + 34, + }, + dictWord{14, 11, 101}, + dictWord{18, 11, 45}, + dictWord{146, 11, 129}, + dictWord{4, 0, 186}, + dictWord{5, 0, 157}, + dictWord{8, 0, 168}, + dictWord{138, 0, 6}, + dictWord{132, 11, 899}, + dictWord{133, 10, 56}, + dictWord{148, 10, 100}, + dictWord{133, 0, 875}, + dictWord{5, 0, 773}, + dictWord{5, 0, 991}, + dictWord{6, 0, 1635}, + dictWord{134, 0, 1788}, + dictWord{6, 0, 1274}, + dictWord{9, 0, 477}, + dictWord{141, 0, 78}, + dictWord{4, 0, 639}, + dictWord{7, 0, 111}, + dictWord{8, 0, 581}, + dictWord{ + 12, + 0, + 177, + }, + dictWord{6, 11, 52}, + dictWord{9, 11, 104}, + dictWord{9, 11, 559}, + dictWord{10, 10, 4}, + dictWord{10, 10, 13}, + dictWord{11, 10, 638}, + dictWord{ + 12, + 11, + 308, + }, + dictWord{19, 11, 87}, + dictWord{148, 10, 57}, + dictWord{132, 11, 604}, + dictWord{4, 11, 301}, + dictWord{133, 10, 738}, + dictWord{133, 10, 758}, + dictWord{134, 0, 1747}, + dictWord{7, 11, 1440}, + dictWord{11, 11, 854}, + dictWord{11, 11, 872}, + dictWord{11, 11, 921}, + dictWord{12, 11, 551}, + dictWord{ + 13, + 11, + 472, + }, + dictWord{142, 11, 367}, + dictWord{7, 0, 1364}, + dictWord{7, 0, 1907}, + dictWord{141, 0, 158}, + dictWord{134, 0, 873}, + dictWord{4, 0, 404}, + dictWord{ + 4, + 0, + 659, + }, + dictWord{7, 0, 552}, + dictWord{135, 0, 675}, + dictWord{135, 10, 1112}, + dictWord{139, 10, 328}, + dictWord{7, 11, 508}, + dictWord{137, 10, 133}, + dictWord{133, 0, 391}, + dictWord{5, 10, 110}, + dictWord{6, 10, 169}, + dictWord{6, 10, 1702}, + dictWord{7, 10, 400}, + dictWord{8, 10, 538}, + dictWord{9, 10, 184}, + dictWord{ + 9, + 10, + 524, + }, + dictWord{140, 10, 218}, + dictWord{6, 11, 310}, + dictWord{7, 11, 1849}, + dictWord{8, 11, 72}, + dictWord{8, 11, 272}, + dictWord{8, 11, 431}, + dictWord{ + 9, + 11, + 12, + }, + dictWord{9, 11, 351}, + dictWord{10, 11, 563}, + dictWord{10, 11, 630}, + dictWord{10, 11, 810}, + dictWord{11, 11, 367}, + dictWord{11, 11, 599}, + dictWord{11, 11, 686}, + dictWord{140, 11, 672}, + dictWord{5, 0, 540}, + dictWord{6, 0, 1697}, + dictWord{136, 0, 668}, + dictWord{132, 0, 883}, + dictWord{134, 0, 78}, + dictWord{12, 0, 628}, + dictWord{18, 0, 79}, + dictWord{6, 10, 133}, + dictWord{9, 10, 353}, + dictWord{139, 10, 993}, + dictWord{6, 11, 181}, + dictWord{7, 11, 537}, + dictWord{ + 8, + 11, + 64, + }, + dictWord{9, 11, 127}, + dictWord{10, 11, 496}, + dictWord{12, 11, 510}, + dictWord{141, 11, 384}, + dictWord{6, 10, 93}, + dictWord{7, 10, 1422}, + dictWord{ + 7, + 10, + 1851, + }, + dictWord{8, 10, 673}, + dictWord{9, 10, 529}, + dictWord{140, 10, 43}, + dictWord{137, 10, 371}, + dictWord{134, 0, 1460}, + dictWord{134, 0, 962}, + dictWord{4, 11, 244}, + dictWord{135, 11, 233}, + dictWord{9, 10, 25}, + dictWord{10, 10, 467}, + dictWord{138, 10, 559}, + dictWord{4, 10, 335}, + dictWord{ + 135, + 10, + 942, + }, + dictWord{133, 0, 460}, + dictWord{135, 11, 334}, + dictWord{134, 11, 1650}, + dictWord{4, 0, 199}, + dictWord{139, 0, 34}, + dictWord{5, 10, 601}, + dictWord{ + 8, + 10, + 39, + }, + dictWord{10, 10, 773}, + dictWord{11, 10, 84}, + dictWord{12, 10, 205}, + dictWord{142, 10, 1}, + dictWord{133, 10, 870}, + dictWord{134, 0, 388}, + dictWord{14, 0, 474}, + dictWord{148, 0, 120}, + dictWord{133, 11, 369}, + dictWord{139, 0, 271}, + dictWord{4, 0, 511}, + dictWord{9, 0, 333}, + dictWord{9, 0, 379}, + dictWord{ + 10, + 0, + 602, + }, + dictWord{11, 0, 441}, + dictWord{11, 0, 723}, + dictWord{11, 0, 976}, + dictWord{12, 0, 357}, + dictWord{132, 10, 181}, + dictWord{134, 0, 608}, + dictWord{134, 10, 1652}, + dictWord{22, 0, 49}, + dictWord{137, 11, 338}, + dictWord{140, 0, 988}, + dictWord{134, 0, 617}, + dictWord{5, 0, 938}, + dictWord{136, 0, 707}, + dictWord{132, 10, 97}, + dictWord{5, 10, 147}, + dictWord{6, 10, 286}, + dictWord{7, 10, 1362}, + dictWord{141, 10, 176}, + dictWord{6, 0, 756}, + dictWord{ + 134, + 0, + 1149, + }, + dictWord{133, 11, 896}, + dictWord{6, 10, 375}, + dictWord{7, 10, 169}, + dictWord{7, 10, 254}, + dictWord{136, 10, 780}, + dictWord{134, 0, 1583}, + dictWord{135, 10, 1447}, + dictWord{139, 0, 285}, + dictWord{7, 11, 1117}, + dictWord{8, 11, 393}, + dictWord{136, 11, 539}, + dictWord{135, 0, 344}, + dictWord{ + 6, + 0, + 469, + }, + dictWord{7, 0, 1709}, + dictWord{138, 0, 515}, + dictWord{5, 10, 629}, + dictWord{135, 10, 1549}, + dictWord{5, 11, 4}, + dictWord{5, 11, 810}, + dictWord{ + 6, + 11, + 13, + }, + dictWord{6, 11, 538}, + dictWord{6, 11, 1690}, + dictWord{6, 11, 1726}, + dictWord{7, 11, 499}, + dictWord{7, 11, 1819}, + dictWord{8, 11, 148}, + dictWord{ + 8, + 11, + 696, + }, + dictWord{8, 11, 791}, + dictWord{12, 11, 125}, + dictWord{13, 11, 54}, + dictWord{143, 11, 9}, + dictWord{135, 11, 1268}, + dictWord{137, 0, 404}, + dictWord{ + 132, + 0, + 500, + }, + dictWord{5, 0, 68}, + dictWord{134, 0, 383}, + dictWord{11, 0, 216}, + dictWord{139, 0, 340}, + dictWord{4, 11, 925}, + dictWord{5, 11, 803}, + dictWord{ + 8, + 11, + 698, + }, + dictWord{138, 11, 828}, + dictWord{4, 0, 337}, + dictWord{6, 0, 353}, + dictWord{7, 0, 1934}, + dictWord{8, 0, 488}, + dictWord{137, 0, 429}, + dictWord{7, 0, 236}, + dictWord{7, 0, 1795}, + dictWord{8, 0, 259}, + dictWord{9, 0, 135}, + dictWord{9, 0, 177}, + dictWord{9, 0, 860}, + dictWord{10, 0, 825}, + dictWord{11, 0, 115}, + dictWord{ + 11, + 0, + 370, + }, + dictWord{11, 0, 405}, + dictWord{11, 0, 604}, + dictWord{12, 0, 10}, + dictWord{12, 0, 667}, + dictWord{12, 0, 669}, + dictWord{13, 0, 76}, + dictWord{14, 0, 310}, + dictWord{15, 0, 76}, + dictWord{15, 0, 147}, + dictWord{148, 0, 23}, + dictWord{4, 0, 15}, + dictWord{4, 0, 490}, + dictWord{5, 0, 22}, + dictWord{6, 0, 244}, + dictWord{7, 0, 40}, + dictWord{7, 0, 200}, + dictWord{7, 0, 906}, + dictWord{7, 0, 1199}, + dictWord{9, 0, 616}, + dictWord{10, 0, 716}, + dictWord{11, 0, 635}, + dictWord{11, 0, 801}, + dictWord{ + 140, + 0, + 458, + }, + dictWord{12, 0, 756}, + dictWord{132, 10, 420}, + dictWord{134, 0, 1504}, + dictWord{6, 0, 757}, + dictWord{133, 11, 383}, + dictWord{6, 0, 1266}, + dictWord{ + 135, + 0, + 1735, + }, + dictWord{5, 0, 598}, + dictWord{7, 0, 791}, + dictWord{8, 0, 108}, + dictWord{9, 0, 123}, + dictWord{7, 10, 1570}, + dictWord{140, 10, 542}, + dictWord{ + 142, + 11, + 410, + }, + dictWord{9, 11, 660}, + dictWord{138, 11, 347}, +} diff --git a/vendor/github.com/andybalholm/brotli/symbol_list.go b/vendor/github.com/andybalholm/brotli/symbol_list.go new file mode 100644 index 0000000..c5cb49e --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/symbol_list.go @@ -0,0 +1,22 @@ +package brotli + +/* Copyright 2013 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Utilities for building Huffman decoding tables. */ + +type symbolList struct { + storage []uint16 + offset int +} + +func symbolListGet(sl symbolList, i int) uint16 { + return sl.storage[i+sl.offset] +} + +func symbolListPut(sl symbolList, i int, val uint16) { + sl.storage[i+sl.offset] = val +} diff --git a/vendor/github.com/andybalholm/brotli/transform.go b/vendor/github.com/andybalholm/brotli/transform.go new file mode 100644 index 0000000..d2c043a --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/transform.go @@ -0,0 +1,641 @@ +package brotli + +const ( + transformIdentity = 0 + transformOmitLast1 = 1 + transformOmitLast2 = 2 + transformOmitLast3 = 3 + transformOmitLast4 = 4 + transformOmitLast5 = 5 + transformOmitLast6 = 6 + transformOmitLast7 = 7 + transformOmitLast8 = 8 + transformOmitLast9 = 9 + transformUppercaseFirst = 10 + transformUppercaseAll = 11 + transformOmitFirst1 = 12 + transformOmitFirst2 = 13 + transformOmitFirst3 = 14 + transformOmitFirst4 = 15 + transformOmitFirst5 = 16 + transformOmitFirst6 = 17 + transformOmitFirst7 = 18 + transformOmitFirst8 = 19 + transformOmitFirst9 = 20 + transformShiftFirst = 21 + transformShiftAll = 22 + iota - 22 + numTransformTypes +) + +const transformsMaxCutOff = transformOmitLast9 + +type transforms struct { + prefix_suffix_size uint16 + prefix_suffix []byte + prefix_suffix_map []uint16 + num_transforms uint32 + transforms []byte + params []byte + cutOffTransforms [transformsMaxCutOff + 1]int16 +} + +func transformPrefixId(t *transforms, I int) byte { + return t.transforms[(I*3)+0] +} + +func transformType(t *transforms, I int) byte { + return t.transforms[(I*3)+1] +} + +func transformSuffixId(t *transforms, I int) byte { + return t.transforms[(I*3)+2] +} + +func transformPrefix(t *transforms, I int) []byte { + return t.prefix_suffix[t.prefix_suffix_map[transformPrefixId(t, I)]:] +} + +func transformSuffix(t *transforms, I int) []byte { + return t.prefix_suffix[t.prefix_suffix_map[transformSuffixId(t, I)]:] +} + +/* RFC 7932 transforms string data */ +const kPrefixSuffix string = "\001 \002, \010 of the \004 of \002s \001.\005 and \004 " + "in \001\"\004 to \002\">\001\n\002. \001]\005 for \003 a \006 " + "that \001'\006 with \006 from \004 by \001(\006. T" + "he \004 on \004 as \004 is \004ing \002\n\t\001:\003ed " + "\002=\"\004 at \003ly \001,\002='\005.com/\007. This \005" + " not \003er \003al \004ful \004ive \005less \004es" + "t \004ize \002\xc2\xa0\004ous \005 the \002e \000" + +var kPrefixSuffixMap = [50]uint16{ + 0x00, + 0x02, + 0x05, + 0x0E, + 0x13, + 0x16, + 0x18, + 0x1E, + 0x23, + 0x25, + 0x2A, + 0x2D, + 0x2F, + 0x32, + 0x34, + 0x3A, + 0x3E, + 0x45, + 0x47, + 0x4E, + 0x55, + 0x5A, + 0x5C, + 0x63, + 0x68, + 0x6D, + 0x72, + 0x77, + 0x7A, + 0x7C, + 0x80, + 0x83, + 0x88, + 0x8C, + 0x8E, + 0x91, + 0x97, + 0x9F, + 0xA5, + 0xA9, + 0xAD, + 0xB2, + 0xB7, + 0xBD, + 0xC2, + 0xC7, + 0xCA, + 0xCF, + 0xD5, + 0xD8, +} + +/* RFC 7932 transforms */ +var kTransformsData = []byte{ + 49, + transformIdentity, + 49, + 49, + transformIdentity, + 0, + 0, + transformIdentity, + 0, + 49, + transformOmitFirst1, + 49, + 49, + transformUppercaseFirst, + 0, + 49, + transformIdentity, + 47, + 0, + transformIdentity, + 49, + 4, + transformIdentity, + 0, + 49, + transformIdentity, + 3, + 49, + transformUppercaseFirst, + 49, + 49, + transformIdentity, + 6, + 49, + transformOmitFirst2, + 49, + 49, + transformOmitLast1, + 49, + 1, + transformIdentity, + 0, + 49, + transformIdentity, + 1, + 0, + transformUppercaseFirst, + 0, + 49, + transformIdentity, + 7, + 49, + transformIdentity, + 9, + 48, + transformIdentity, + 0, + 49, + transformIdentity, + 8, + 49, + transformIdentity, + 5, + 49, + transformIdentity, + 10, + 49, + transformIdentity, + 11, + 49, + transformOmitLast3, + 49, + 49, + transformIdentity, + 13, + 49, + transformIdentity, + 14, + 49, + transformOmitFirst3, + 49, + 49, + transformOmitLast2, + 49, + 49, + transformIdentity, + 15, + 49, + transformIdentity, + 16, + 0, + transformUppercaseFirst, + 49, + 49, + transformIdentity, + 12, + 5, + transformIdentity, + 49, + 0, + transformIdentity, + 1, + 49, + transformOmitFirst4, + 49, + 49, + transformIdentity, + 18, + 49, + transformIdentity, + 17, + 49, + transformIdentity, + 19, + 49, + transformIdentity, + 20, + 49, + transformOmitFirst5, + 49, + 49, + transformOmitFirst6, + 49, + 47, + transformIdentity, + 49, + 49, + transformOmitLast4, + 49, + 49, + transformIdentity, + 22, + 49, + transformUppercaseAll, + 49, + 49, + transformIdentity, + 23, + 49, + transformIdentity, + 24, + 49, + transformIdentity, + 25, + 49, + transformOmitLast7, + 49, + 49, + transformOmitLast1, + 26, + 49, + transformIdentity, + 27, + 49, + transformIdentity, + 28, + 0, + transformIdentity, + 12, + 49, + transformIdentity, + 29, + 49, + transformOmitFirst9, + 49, + 49, + transformOmitFirst7, + 49, + 49, + transformOmitLast6, + 49, + 49, + transformIdentity, + 21, + 49, + transformUppercaseFirst, + 1, + 49, + transformOmitLast8, + 49, + 49, + transformIdentity, + 31, + 49, + transformIdentity, + 32, + 47, + transformIdentity, + 3, + 49, + transformOmitLast5, + 49, + 49, + transformOmitLast9, + 49, + 0, + transformUppercaseFirst, + 1, + 49, + transformUppercaseFirst, + 8, + 5, + transformIdentity, + 21, + 49, + transformUppercaseAll, + 0, + 49, + transformUppercaseFirst, + 10, + 49, + transformIdentity, + 30, + 0, + transformIdentity, + 5, + 35, + transformIdentity, + 49, + 47, + transformIdentity, + 2, + 49, + transformUppercaseFirst, + 17, + 49, + transformIdentity, + 36, + 49, + transformIdentity, + 33, + 5, + transformIdentity, + 0, + 49, + transformUppercaseFirst, + 21, + 49, + transformUppercaseFirst, + 5, + 49, + transformIdentity, + 37, + 0, + transformIdentity, + 30, + 49, + transformIdentity, + 38, + 0, + transformUppercaseAll, + 0, + 49, + transformIdentity, + 39, + 0, + transformUppercaseAll, + 49, + 49, + transformIdentity, + 34, + 49, + transformUppercaseAll, + 8, + 49, + transformUppercaseFirst, + 12, + 0, + transformIdentity, + 21, + 49, + transformIdentity, + 40, + 0, + transformUppercaseFirst, + 12, + 49, + transformIdentity, + 41, + 49, + transformIdentity, + 42, + 49, + transformUppercaseAll, + 17, + 49, + transformIdentity, + 43, + 0, + transformUppercaseFirst, + 5, + 49, + transformUppercaseAll, + 10, + 0, + transformIdentity, + 34, + 49, + transformUppercaseFirst, + 33, + 49, + transformIdentity, + 44, + 49, + transformUppercaseAll, + 5, + 45, + transformIdentity, + 49, + 0, + transformIdentity, + 33, + 49, + transformUppercaseFirst, + 30, + 49, + transformUppercaseAll, + 30, + 49, + transformIdentity, + 46, + 49, + transformUppercaseAll, + 1, + 49, + transformUppercaseFirst, + 34, + 0, + transformUppercaseFirst, + 33, + 0, + transformUppercaseAll, + 30, + 0, + transformUppercaseAll, + 1, + 49, + transformUppercaseAll, + 33, + 49, + transformUppercaseAll, + 21, + 49, + transformUppercaseAll, + 12, + 0, + transformUppercaseAll, + 5, + 49, + transformUppercaseAll, + 34, + 0, + transformUppercaseAll, + 12, + 0, + transformUppercaseFirst, + 30, + 0, + transformUppercaseAll, + 34, + 0, + transformUppercaseFirst, + 34, +} + +var kBrotliTransforms = transforms{ + 217, + []byte(kPrefixSuffix), + kPrefixSuffixMap[:], + 121, + kTransformsData, + nil, /* no extra parameters */ + [transformsMaxCutOff + 1]int16{0, 12, 27, 23, 42, 63, 56, 48, 59, 64}, +} + +func getTransforms() *transforms { + return &kBrotliTransforms +} + +func toUpperCase(p []byte) int { + if p[0] < 0xC0 { + if p[0] >= 'a' && p[0] <= 'z' { + p[0] ^= 32 + } + + return 1 + } + + /* An overly simplified uppercasing model for UTF-8. */ + if p[0] < 0xE0 { + p[1] ^= 32 + return 2 + } + + /* An arbitrary transform for three byte characters. */ + p[2] ^= 5 + + return 3 +} + +func shiftTransform(word []byte, word_len int, parameter uint16) int { + /* Limited sign extension: scalar < (1 << 24). */ + var scalar uint32 = (uint32(parameter) & 0x7FFF) + (0x1000000 - (uint32(parameter) & 0x8000)) + if word[0] < 0x80 { + /* 1-byte rune / 0sssssss / 7 bit scalar (ASCII). */ + scalar += uint32(word[0]) + + word[0] = byte(scalar & 0x7F) + return 1 + } else if word[0] < 0xC0 { + /* Continuation / 10AAAAAA. */ + return 1 + } else if word[0] < 0xE0 { + /* 2-byte rune / 110sssss AAssssss / 11 bit scalar. */ + if word_len < 2 { + return 1 + } + scalar += uint32(word[1]&0x3F | (word[0]&0x1F)<<6) + word[0] = byte(0xC0 | (scalar>>6)&0x1F) + word[1] = byte(uint32(word[1]&0xC0) | scalar&0x3F) + return 2 + } else if word[0] < 0xF0 { + /* 3-byte rune / 1110ssss AAssssss BBssssss / 16 bit scalar. */ + if word_len < 3 { + return word_len + } + scalar += uint32(word[2])&0x3F | uint32(word[1]&0x3F)<<6 | uint32(word[0]&0x0F)<<12 + word[0] = byte(0xE0 | (scalar>>12)&0x0F) + word[1] = byte(uint32(word[1]&0xC0) | (scalar>>6)&0x3F) + word[2] = byte(uint32(word[2]&0xC0) | scalar&0x3F) + return 3 + } else if word[0] < 0xF8 { + /* 4-byte rune / 11110sss AAssssss BBssssss CCssssss / 21 bit scalar. */ + if word_len < 4 { + return word_len + } + scalar += uint32(word[3])&0x3F | uint32(word[2]&0x3F)<<6 | uint32(word[1]&0x3F)<<12 | uint32(word[0]&0x07)<<18 + word[0] = byte(0xF0 | (scalar>>18)&0x07) + word[1] = byte(uint32(word[1]&0xC0) | (scalar>>12)&0x3F) + word[2] = byte(uint32(word[2]&0xC0) | (scalar>>6)&0x3F) + word[3] = byte(uint32(word[3]&0xC0) | scalar&0x3F) + return 4 + } + + return 1 +} + +func transformDictionaryWord(dst []byte, word []byte, len int, trans *transforms, transform_idx int) int { + var idx int = 0 + var prefix []byte = transformPrefix(trans, transform_idx) + var type_ byte = transformType(trans, transform_idx) + var suffix []byte = transformSuffix(trans, transform_idx) + { + var prefix_len int = int(prefix[0]) + prefix = prefix[1:] + for { + tmp1 := prefix_len + prefix_len-- + if tmp1 == 0 { + break + } + dst[idx] = prefix[0] + idx++ + prefix = prefix[1:] + } + } + { + var t int = int(type_) + var i int = 0 + if t <= transformOmitLast9 { + len -= t + } else if t >= transformOmitFirst1 && t <= transformOmitFirst9 { + var skip int = t - (transformOmitFirst1 - 1) + word = word[skip:] + len -= skip + } + + for i < len { + dst[idx] = word[i] + idx++ + i++ + } + if t == transformUppercaseFirst { + toUpperCase(dst[idx-len:]) + } else if t == transformUppercaseAll { + var uppercase []byte = dst + uppercase = uppercase[idx-len:] + for len > 0 { + var step int = toUpperCase(uppercase) + uppercase = uppercase[step:] + len -= step + } + } else if t == transformShiftFirst { + var param uint16 = uint16(trans.params[transform_idx*2]) + uint16(trans.params[transform_idx*2+1])<<8 + shiftTransform(dst[idx-len:], int(len), param) + } else if t == transformShiftAll { + var param uint16 = uint16(trans.params[transform_idx*2]) + uint16(trans.params[transform_idx*2+1])<<8 + var shift []byte = dst + shift = shift[idx-len:] + for len > 0 { + var step int = shiftTransform(shift, int(len), param) + shift = shift[step:] + len -= step + } + } + } + { + var suffix_len int = int(suffix[0]) + suffix = suffix[1:] + for { + tmp2 := suffix_len + suffix_len-- + if tmp2 == 0 { + break + } + dst[idx] = suffix[0] + idx++ + suffix = suffix[1:] + } + return idx + } +} diff --git a/vendor/github.com/andybalholm/brotli/utf8_util.go b/vendor/github.com/andybalholm/brotli/utf8_util.go new file mode 100644 index 0000000..3244247 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/utf8_util.go @@ -0,0 +1,70 @@ +package brotli + +/* Copyright 2013 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Heuristics for deciding about the UTF8-ness of strings. */ + +const kMinUTF8Ratio float64 = 0.75 + +/* Returns 1 if at least min_fraction of the bytes between pos and + pos + length in the (data, mask) ring-buffer is UTF8-encoded, otherwise + returns 0. */ +func parseAsUTF8(symbol *int, input []byte, size uint) uint { + /* ASCII */ + if input[0]&0x80 == 0 { + *symbol = int(input[0]) + if *symbol > 0 { + return 1 + } + } + + /* 2-byte UTF8 */ + if size > 1 && input[0]&0xE0 == 0xC0 && input[1]&0xC0 == 0x80 { + *symbol = (int(input[0])&0x1F)<<6 | int(input[1])&0x3F + if *symbol > 0x7F { + return 2 + } + } + + /* 3-byte UFT8 */ + if size > 2 && input[0]&0xF0 == 0xE0 && input[1]&0xC0 == 0x80 && input[2]&0xC0 == 0x80 { + *symbol = (int(input[0])&0x0F)<<12 | (int(input[1])&0x3F)<<6 | int(input[2])&0x3F + if *symbol > 0x7FF { + return 3 + } + } + + /* 4-byte UFT8 */ + if size > 3 && input[0]&0xF8 == 0xF0 && input[1]&0xC0 == 0x80 && input[2]&0xC0 == 0x80 && input[3]&0xC0 == 0x80 { + *symbol = (int(input[0])&0x07)<<18 | (int(input[1])&0x3F)<<12 | (int(input[2])&0x3F)<<6 | int(input[3])&0x3F + if *symbol > 0xFFFF && *symbol <= 0x10FFFF { + return 4 + } + } + + /* Not UTF8, emit a special symbol above the UTF8-code space */ + *symbol = 0x110000 | int(input[0]) + + return 1 +} + +/* Returns 1 if at least min_fraction of the data is UTF8-encoded.*/ +func isMostlyUTF8(data []byte, pos uint, mask uint, length uint, min_fraction float64) bool { + var size_utf8 uint = 0 + var i uint = 0 + for i < length { + var symbol int + current_data := data[(pos+i)&mask:] + var bytes_read uint = parseAsUTF8(&symbol, current_data, length-i) + i += bytes_read + if symbol < 0x110000 { + size_utf8 += bytes_read + } + } + + return float64(size_utf8) > min_fraction*float64(length) +} diff --git a/vendor/github.com/andybalholm/brotli/util.go b/vendor/github.com/andybalholm/brotli/util.go new file mode 100644 index 0000000..a84553a --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/util.go @@ -0,0 +1,7 @@ +package brotli + +func assert(cond bool) { + if !cond { + panic("assertion failure") + } +} diff --git a/vendor/github.com/andybalholm/brotli/write_bits.go b/vendor/github.com/andybalholm/brotli/write_bits.go new file mode 100644 index 0000000..8729901 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/write_bits.go @@ -0,0 +1,52 @@ +package brotli + +import "encoding/binary" + +/* Copyright 2010 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Write bits into a byte array. */ + +/* This function writes bits into bytes in increasing addresses, and within + a byte least-significant-bit first. + + The function can write up to 56 bits in one go with WriteBits + Example: let's assume that 3 bits (Rs below) have been written already: + + BYTE-0 BYTE+1 BYTE+2 + + 0000 0RRR 0000 0000 0000 0000 + + Now, we could write 5 or less bits in MSB by just sifting by 3 + and OR'ing to BYTE-0. + + For n bits, we take the last 5 bits, OR that with high bits in BYTE-0, + and locate the rest in BYTE+1, BYTE+2, etc. */ +func writeBits(n_bits uint, bits uint64, pos *uint, array []byte) { + /* This branch of the code can write up to 56 bits at a time, + 7 bits are lost by being perhaps already in *p and at least + 1 bit is needed to initialize the bit-stream ahead (i.e. if 7 + bits are in *p and we write 57 bits, then the next write will + access a byte that was never initialized). */ + p := array[*pos>>3:] + v := uint64(p[0]) + v |= bits << (*pos & 7) + binary.LittleEndian.PutUint64(p, v) + *pos += n_bits +} + +func writeSingleBit(bit bool, pos *uint, array []byte) { + if bit { + writeBits(1, 1, pos, array) + } else { + writeBits(1, 0, pos, array) + } +} + +func writeBitsPrepareStorage(pos uint, array []byte) { + assert(pos&7 == 0) + array[pos>>3] = 0 +} diff --git a/vendor/github.com/andybalholm/brotli/writer.go b/vendor/github.com/andybalholm/brotli/writer.go new file mode 100644 index 0000000..8a68811 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/writer.go @@ -0,0 +1,162 @@ +package brotli + +import ( + "errors" + "io" + + "github.com/andybalholm/brotli/matchfinder" +) + +const ( + BestSpeed = 0 + BestCompression = 11 + DefaultCompression = 6 +) + +// WriterOptions configures Writer. +type WriterOptions struct { + // Quality controls the compression-speed vs compression-density trade-offs. + // The higher the quality, the slower the compression. Range is 0 to 11. + Quality int + // LGWin is the base 2 logarithm of the sliding window size. + // Range is 10 to 24. 0 indicates automatic configuration based on Quality. + LGWin int +} + +var ( + errEncode = errors.New("brotli: encode error") + errWriterClosed = errors.New("brotli: Writer is closed") +) + +// Writes to the returned writer are compressed and written to dst. +// It is the caller's responsibility to call Close on the Writer when done. +// Writes may be buffered and not flushed until Close. +func NewWriter(dst io.Writer) *Writer { + return NewWriterLevel(dst, DefaultCompression) +} + +// NewWriterLevel is like NewWriter but specifies the compression level instead +// of assuming DefaultCompression. +// The compression level can be DefaultCompression or any integer value between +// BestSpeed and BestCompression inclusive. +func NewWriterLevel(dst io.Writer, level int) *Writer { + return NewWriterOptions(dst, WriterOptions{ + Quality: level, + }) +} + +// NewWriterOptions is like NewWriter but specifies WriterOptions +func NewWriterOptions(dst io.Writer, options WriterOptions) *Writer { + w := new(Writer) + w.options = options + w.Reset(dst) + return w +} + +// Reset discards the Writer's state and makes it equivalent to the result of +// its original state from NewWriter or NewWriterLevel, but writing to dst +// instead. This permits reusing a Writer rather than allocating a new one. +func (w *Writer) Reset(dst io.Writer) { + encoderInitState(w) + w.params.quality = w.options.Quality + if w.options.LGWin > 0 { + w.params.lgwin = uint(w.options.LGWin) + } + w.dst = dst + w.err = nil +} + +func (w *Writer) writeChunk(p []byte, op int) (n int, err error) { + if w.dst == nil { + return 0, errWriterClosed + } + if w.err != nil { + return 0, w.err + } + + for { + availableIn := uint(len(p)) + nextIn := p + success := encoderCompressStream(w, op, &availableIn, &nextIn) + bytesConsumed := len(p) - int(availableIn) + p = p[bytesConsumed:] + n += bytesConsumed + if !success { + return n, errEncode + } + + if len(p) == 0 || w.err != nil { + return n, w.err + } + } +} + +// Flush outputs encoded data for all input provided to Write. The resulting +// output can be decoded to match all input before Flush, but the stream is +// not yet complete until after Close. +// Flush has a negative impact on compression. +func (w *Writer) Flush() error { + _, err := w.writeChunk(nil, operationFlush) + return err +} + +// Close flushes remaining data to the decorated writer. +func (w *Writer) Close() error { + // If stream is already closed, it is reported by `writeChunk`. + _, err := w.writeChunk(nil, operationFinish) + w.dst = nil + return err +} + +// Write implements io.Writer. Flush or Close must be called to ensure that the +// encoded bytes are actually flushed to the underlying Writer. +func (w *Writer) Write(p []byte) (n int, err error) { + return w.writeChunk(p, operationProcess) +} + +type nopCloser struct { + io.Writer +} + +func (nopCloser) Close() error { return nil } + +// NewWriterV2 is like NewWriterLevel, but it uses the new implementation +// based on the matchfinder package. It currently supports up to level 7; +// if a higher level is specified, level 7 will be used. +func NewWriterV2(dst io.Writer, level int) *matchfinder.Writer { + var mf matchfinder.MatchFinder + if level < 2 { + mf = matchfinder.M0{Lazy: level == 1} + } else { + hashLen := 6 + if level >= 6 { + hashLen = 5 + } + chainLen := 64 + switch level { + case 2: + chainLen = 0 + case 3: + chainLen = 1 + case 4: + chainLen = 2 + case 5: + chainLen = 4 + case 6: + chainLen = 8 + } + mf = &matchfinder.M4{ + MaxDistance: 1 << 20, + ChainLength: chainLen, + HashLen: hashLen, + DistanceBitCost: 57, + } + } + + return &matchfinder.Writer{ + Dest: dst, + MatchFinder: mf, + Encoder: &Encoder{}, + BlockSize: 1 << 16, + } +} diff --git a/vendor/github.com/elastic/go-sysinfo/.gitignore b/vendor/github.com/elastic/go-sysinfo/.gitignore index f3827eb..52a75b7 100644 --- a/vendor/github.com/elastic/go-sysinfo/.gitignore +++ b/vendor/github.com/elastic/go-sysinfo/.gitignore @@ -6,8 +6,6 @@ _obj *TEST.out -main.retry -testing/ssh_config -testing/ve -build/ \ No newline at end of file +build/ +**/testdata/fuzz diff --git a/vendor/github.com/elastic/go-sysinfo/.golangci.yml b/vendor/github.com/elastic/go-sysinfo/.golangci.yml new file mode 100644 index 0000000..5c0e861 --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/.golangci.yml @@ -0,0 +1,16 @@ +--- + +run: + issues-exit-code: 1 + modules-download-mode: readonly + +linters: + disable-all: true + fast: false + enable: + - goimports + - revive + +linters-settings: + goimports: + local-prefixes: github.com/elastic/go-sysinfo diff --git a/vendor/github.com/elastic/go-sysinfo/CHANGELOG.md b/vendor/github.com/elastic/go-sysinfo/CHANGELOG.md deleted file mode 100644 index 4e32864..0000000 --- a/vendor/github.com/elastic/go-sysinfo/CHANGELOG.md +++ /dev/null @@ -1,165 +0,0 @@ -# Changelog - -All notable changes to this project will be documented in this file. - -The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), -and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). - -## [Unreleased] - -### Added - -### Changed - -### Deprecated - -### Removed - -### Fixed - -## [1.9.0] - -### Added - -- Replace pkg/errors with Go 1.13 native errors. [#123](https://github.com/elastic/go-sysinfo/pull/123) -- Add OS family mappings for `rocky`, `openEuler`, and `almalinux`. [#143](https://github.com/elastic/go-sysinfo/pull/143) - -### Changed - -- Remove custom sysctl implementation and partial cgo requirement. [#135](https://github.com/elastic/go-sysinfo/pull/135) -- Changes on the `Host` and `LoadAverage` interfaces, now implemented by default on Linux and Darwin platforms. [#140](https://github.com/elastic/go-sysinfo/pull/140) - -## [1.8.1] - -### Fixed - -- Report OS name as Windows 11 when version is >= 10.0.22000. [#118](https://github.com/elastic/go-sysinfo/issues/118) [#121](https://github.com/elastic/go-sysinfo/pull/121) - -## [1.8.0] - -### Added - -- Added the Oracle Linux ("ol") platform to the "redhat" OS family. [#54](https://github.com/elastic/go-sysinfo/issues/54) [#115](https://github.com/elastic/go-sysinfo/pull/115) -- Added the Linux Mint ("linuxmint") platform to the "debian" OS family. [#52](https://github.com/elastic/go-sysinfo/issues/52) - -### Changed - -- Updated module to require Go 1.17. [#111](https://github.com/elastic/go-sysinfo/pull/111) -- The boot time value for Windows is now rounded to the nearest second to provide a more stable value. [#53](https://github.com/elastic/go-sysinfo/issues/53) [#114](https://github.com/elastic/go-sysinfo/pull/114) - -### Fixed - -- Fix handling of environment variables without values on macOS. [#94](https://github.com/elastic/go-sysinfo/pull/94) -- Fix build tags on AIX provider such that CGO is required. [#106](https://github.com/elastic/go-sysinfo/issues/106) - -## [1.7.1] - 2021-10-11 - -### Fixed - -- Fixed getting OS info when an unsupported file or directory is found matching /etc/\*-release [#102](https://github.com/elastic/go-sysinfo/pull/102) - -## [1.7.0] - 2021-02-22 - -### Added - -- Add per-process network stats [#96](https://github.com/elastic/go-sysinfo/pull/96) - -## [1.6.0] - 2021-02-09 - -### Added - -- Add darwin/arm64 support (Apple M1). [#91](https://github.com/elastic/go-sysinfo/pull/91) - -## [1.5.0] - 2021-01-14 - -### Added - -- Added os.type field to host info. [#87](https://github.com/elastic/go-sysinfo/pull/87) - -## [1.4.0] - 2020-07-21 - -### Added - -- Add AIX support [#77](https://github.com/elastic/go-sysinfo/pull/77) -- Added detection of containerized cgroup in Kubernetes [#80](https://github.com/elastic/go-sysinfo/pull/80) - -## [1.3.0] - 2020-01-13 - -### Changed - -- Convert NetworkCountersInfo maps to uint64 [#75](https://github.com/elastic/go-sysinfo/pull/75) - -## [1.2.1] - 2020-01-03 - -### Fixed - -- Create a `sidToString` function to deal with API changes in various versions of golang.org/x/sys/windows. [#74](https://github.com/elastic/go-sysinfo/pull/74) - -## [1.2.0] - 2019-12-09 - -### Added - -- Added detection of systemd cgroups to the `IsContainerized` check. [#71](https://github.com/elastic/go-sysinfo/pull/71) -- Added networking counters for Linux hosts. [#72](https://github.com/elastic/go-sysinfo/pull/72) - -## [1.1.1] - 2019-10-29 - -### Fixed - -- Fixed an issue determining the Linux distribution for Fedora 30. [#69](https://github.com/elastic/go-sysinfo/pull/69) - -## [1.1.0] - 2019-08-22 - -### Added - -- Add `VMStat` interface for Linux. [#59](https://github.com/elastic/go-sysinfo/pull/59) - -## [1.0.2] - 2019-07-09 - -### Fixed - -- Fixed a leak when calling the CommandLineToArgv function. [#51](https://github.com/elastic/go-sysinfo/pull/51) -- Fixed a crash when calling the CommandLineToArgv function. [#58](https://github.com/elastic/go-sysinfo/pull/58) - -## [1.0.1] - 2019-05-08 - -### Fixed - -- Add support for new prometheus/procfs API. [#49](https://github.com/elastic/go-sysinfo/pull/49) - -## [1.0.0] - 2019-05-03 - -### Added - -- Add Windows provider implementation. [#22](https://github.com/elastic/go-sysinfo/pull/22) -- Add Windows process provider. [#26](https://github.com/elastic/go-sysinfo/pull/26) -- Add `OpenHandleEnumerator` and `OpenHandleCount` and implement these for Windows. [#27](https://github.com/elastic/go-sysinfo/pull/27) -- Add user info to Process. [#34](https://github.com/elastic/go-sysinfo/pull/34) -- Implement `Processes` for Darwin. [#35](https://github.com/elastic/go-sysinfo/pull/35) -- Add `Parent()` to `Process`. [#46](https://github.com/elastic/go-sysinfo/pull/46) - -### Fixed - -- Fix Windows registry handle leak. [#33](https://github.com/elastic/go-sysinfo/pull/33) -- Fix Linux host ID by search for older locations for the machine-id file. [#44](https://github.com/elastic/go-sysinfo/pull/44) - -### Changed - -- Changed the host containerized check to reduce false positives. [#42](https://github.com/elastic/go-sysinfo/pull/42) [#43](https://github.com/elastic/go-sysinfo/pull/43) - -[Unreleased]: https://github.com/elastic/go-sysinfo/compare/v1.8.1...HEAD -[1.8.1]: https://github.com/elastic/go-sysinfo/releases/tag/v1.8.1 -[1.8.0]: https://github.com/elastic/go-sysinfo/releases/tag/v1.8.0 -[1.7.1]: https://github.com/elastic/go-sysinfo/releases/tag/v1.7.1 -[1.7.0]: https://github.com/elastic/go-sysinfo/releases/tag/v1.7.0 -[1.6.0]: https://github.com/elastic/go-sysinfo/releases/tag/v1.6.0 -[1.5.0]: https://github.com/elastic/go-sysinfo/releases/tag/v1.5.0 -[1.4.0]: https://github.com/elastic/go-sysinfo/releases/tag/v1.4.0 -[1.3.0]: https://github.com/elastic/go-sysinfo/releases/tag/v1.3.0 -[1.2.1]: https://github.com/elastic/go-sysinfo/releases/tag/v1.2.1 -[1.2.0]: https://github.com/elastic/go-sysinfo/releases/tag/v1.2.0 -[1.1.1]: https://github.com/elastic/go-sysinfo/releases/tag/v1.1.0 -[1.1.0]: https://github.com/elastic/go-sysinfo/releases/tag/v1.1.0 -[1.0.2]: https://github.com/elastic/go-sysinfo/releases/tag/v1.0.2 -[1.0.1]: https://github.com/elastic/go-sysinfo/releases/tag/v1.0.1 -[1.0.0]: https://github.com/elastic/go-sysinfo/releases/tag/v1.0.0 diff --git a/vendor/github.com/elastic/go-sysinfo/CONTRIBUTING.md b/vendor/github.com/elastic/go-sysinfo/CONTRIBUTING.md new file mode 100644 index 0000000..c206aa3 --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/CONTRIBUTING.md @@ -0,0 +1,16 @@ +# Contributing + +Pull requests are welcomed. You must + +- Sign the Elastic [Contributor License Agreement](https://www.elastic.co/contributor-agreement). +- Include a [changelog][changelog_docs] entry at `.changelog/{pr-number}.txt` with your pull request. +- Include tests that demonstrate the change is working. + +[changelog_docs]: https://github.com/GoogleCloudPlatform/magic-modules/blob/2834761fec3acbf35cacbffe100530f82eada650/.ci/RELEASE_NOTES_GUIDE.md#expected-format + +## Releasing + +To create a new release use the release workflow in GitHub actions. This will create a new draft +release in GitHub releases with a changelog. After the job completes, review the draft and if +everything is correct, publish the release. When the release is published GitHub will create the +git tag. diff --git a/vendor/github.com/elastic/go-sysinfo/README.md b/vendor/github.com/elastic/go-sysinfo/README.md index 409432c..c0f35aa 100644 --- a/vendor/github.com/elastic/go-sysinfo/README.md +++ b/vendor/github.com/elastic/go-sysinfo/README.md @@ -1,6 +1,6 @@ # go-sysinfo -[![Build Status](https://beats-ci.elastic.co/job/Library/job/go-sysinfo-mbp/job/main/badge/icon)](https://beats-ci.elastic.co/job/Library/job/go-sysinfo-mbp/job/main/) +[![go](https://github.com/elastic/go-sysinfo/actions/workflows/go.yml/badge.svg)](https://github.com/elastic/go-sysinfo/actions/workflows/go.yml) [![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs] [godocs]: http://godoc.org/github.com/elastic/go-sysinfo diff --git a/vendor/github.com/elastic/go-sysinfo/providers/aix/boottime_aix_ppc64.go b/vendor/github.com/elastic/go-sysinfo/providers/aix/boottime_aix_ppc64.go index f1ecd41..e158f46 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/aix/boottime_aix_ppc64.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/aix/boottime_aix_ppc64.go @@ -16,7 +16,6 @@ // under the License. //go:build aix && ppc64 -// +build aix,ppc64 package aix diff --git a/vendor/github.com/elastic/go-sysinfo/providers/aix/host_aix_ppc64.go b/vendor/github.com/elastic/go-sysinfo/providers/aix/host_aix_ppc64.go index 3cabe3f..ea62d20 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/aix/host_aix_ppc64.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/aix/host_aix_ppc64.go @@ -16,7 +16,6 @@ // under the License. //go:build aix && ppc64 && cgo -// +build aix,ppc64,cgo package aix @@ -34,6 +33,7 @@ import ( "errors" "fmt" "os" + "strings" "time" "github.com/joeshaw/multierror" @@ -115,9 +115,23 @@ func (*host) Memory() (*types.HostMemoryInfo, error) { mem.VirtualFree = mem.Free + uint64(meminfo.pgsp_free)*pagesize mem.VirtualUsed = mem.VirtualTotal - mem.VirtualFree + mem.Metrics = map[string]uint64{ + "bytes_coalesced": uint64(meminfo.bytes_coalesced), + "bytes_coalesced_mempool": uint64(meminfo.bytes_coalesced_mempool), + "real_pinned": uint64(meminfo.real_pinned) * pagesize, + "pgins": uint64(meminfo.pgins), + "pgouts": uint64(meminfo.pgouts), + "pgsp_free": uint64(meminfo.pgsp_free) * pagesize, + "pgsp_rsvd": uint64(meminfo.pgsp_rsvd) * pagesize, + } + return &mem, nil } +func (h *host) FQDN() (string, error) { + return shared.FQDN() +} + func newHost() (*host, error) { h := &host{} r := &reader{} @@ -174,7 +188,7 @@ func (r *reader) hostname(h *host) { if r.addErr(err) { return } - h.info.Hostname = v + h.info.Hostname = strings.ToLower(v) } func (r *reader) network(h *host) { diff --git a/vendor/github.com/elastic/go-sysinfo/providers/aix/kernel_aix_ppc64.go b/vendor/github.com/elastic/go-sysinfo/providers/aix/kernel_aix_ppc64.go index 9b03e2f..dc3af83 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/aix/kernel_aix_ppc64.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/aix/kernel_aix_ppc64.go @@ -16,7 +16,6 @@ // under the License. //go:build aix && ppc64 && cgo -// +build aix,ppc64,cgo package aix diff --git a/vendor/github.com/elastic/go-sysinfo/providers/aix/machineid_aix_ppc64.go b/vendor/github.com/elastic/go-sysinfo/providers/aix/machineid_aix_ppc64.go index 13458e5..945ce34 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/aix/machineid_aix_ppc64.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/aix/machineid_aix_ppc64.go @@ -16,7 +16,6 @@ // under the License. //go:build aix && ppc64 && cgo -// +build aix,ppc64,cgo package aix diff --git a/vendor/github.com/elastic/go-sysinfo/providers/aix/os_aix_ppc64.go b/vendor/github.com/elastic/go-sysinfo/providers/aix/os_aix_ppc64.go index 03c87da..d1220db 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/aix/os_aix_ppc64.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/aix/os_aix_ppc64.go @@ -16,7 +16,6 @@ // under the License. //go:build aix && ppc64 && cgo -// +build aix,ppc64,cgo package aix diff --git a/vendor/github.com/elastic/go-sysinfo/providers/aix/process_aix_ppc64.go b/vendor/github.com/elastic/go-sysinfo/providers/aix/process_aix_ppc64.go index 440574e..cfa35f2 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/aix/process_aix_ppc64.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/aix/process_aix_ppc64.go @@ -16,7 +16,6 @@ // under the License. //go:build aix && ppc64 && cgo -// +build aix,ppc64,cgo package aix diff --git a/vendor/github.com/elastic/go-sysinfo/providers/aix/ztypes_aix_ppc64.go b/vendor/github.com/elastic/go-sysinfo/providers/aix/ztypes_aix_ppc64.go index fb60e7d..0e369bb 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/aix/ztypes_aix_ppc64.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/aix/ztypes_aix_ppc64.go @@ -19,7 +19,6 @@ // cgo -godefs defs_aix.go //go:build aix && ppc64 -// +build aix,ppc64 package aix diff --git a/vendor/github.com/elastic/go-sysinfo/providers/darwin/arch_darwin.go b/vendor/github.com/elastic/go-sysinfo/providers/darwin/arch_darwin.go index 591486f..8b3ed91 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/darwin/arch_darwin.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/darwin/arch_darwin.go @@ -16,7 +16,6 @@ // under the License. //go:build amd64 || arm64 -// +build amd64 arm64 package darwin diff --git a/vendor/github.com/elastic/go-sysinfo/providers/darwin/boottime_darwin.go b/vendor/github.com/elastic/go-sysinfo/providers/darwin/boottime_darwin.go index 989c708..1954e2a 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/darwin/boottime_darwin.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/darwin/boottime_darwin.go @@ -16,7 +16,6 @@ // under the License. //go:build amd64 || arm64 -// +build amd64 arm64 package darwin diff --git a/vendor/github.com/elastic/go-sysinfo/providers/darwin/host_darwin.go b/vendor/github.com/elastic/go-sysinfo/providers/darwin/host_darwin.go index 7d23d01..9e369d3 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/darwin/host_darwin.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/darwin/host_darwin.go @@ -16,7 +16,6 @@ // under the License. //go:build amd64 || arm64 -// +build amd64 arm64 package darwin @@ -24,6 +23,7 @@ import ( "errors" "fmt" "os" + "strings" "time" "github.com/joeshaw/multierror" @@ -139,6 +139,10 @@ func (h *host) Memory() (*types.HostMemoryInfo, error) { return &mem, nil } +func (h *host) FQDN() (string, error) { + return shared.FQDN() +} + func (h *host) LoadAverage() (*types.LoadAverageInfo, error) { load, err := getLoadAverage() if err != nil { @@ -210,7 +214,7 @@ func (r *reader) hostname(h *host) { if r.addErr(err) { return } - h.info.Hostname = v + h.info.Hostname = strings.ToLower(v) } func (r *reader) network(h *host) { diff --git a/vendor/github.com/elastic/go-sysinfo/providers/darwin/kernel_darwin.go b/vendor/github.com/elastic/go-sysinfo/providers/darwin/kernel_darwin.go index cc2a1c6..7246257 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/darwin/kernel_darwin.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/darwin/kernel_darwin.go @@ -16,7 +16,6 @@ // under the License. //go:build !386 -// +build !386 package darwin diff --git a/vendor/github.com/elastic/go-sysinfo/providers/darwin/load_average_darwin.go b/vendor/github.com/elastic/go-sysinfo/providers/darwin/load_average_darwin.go index 3232208..34f3a34 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/darwin/load_average_darwin.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/darwin/load_average_darwin.go @@ -16,7 +16,6 @@ // under the License. //go:build amd64 || arm64 -// +build amd64 arm64 package darwin diff --git a/vendor/github.com/elastic/go-sysinfo/providers/darwin/machineid_darwin.go b/vendor/github.com/elastic/go-sysinfo/providers/darwin/machineid_darwin.go index 178a854..4339366 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/darwin/machineid_darwin.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/darwin/machineid_darwin.go @@ -16,7 +16,6 @@ // under the License. //go:build (amd64 && cgo) || (arm64 && cgo) -// +build amd64,cgo arm64,cgo package darwin diff --git a/vendor/github.com/elastic/go-sysinfo/providers/darwin/memory_darwin.go b/vendor/github.com/elastic/go-sysinfo/providers/darwin/memory_darwin.go index 02e39dd..73dd7cf 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/darwin/memory_darwin.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/darwin/memory_darwin.go @@ -16,7 +16,6 @@ // under the License. //go:build amd64 || arm64 -// +build amd64 arm64 package darwin diff --git a/vendor/github.com/elastic/go-sysinfo/providers/darwin/process_cgo_darwin.go b/vendor/github.com/elastic/go-sysinfo/providers/darwin/process_cgo_darwin.go index 4d82812..6a72f9f 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/darwin/process_cgo_darwin.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/darwin/process_cgo_darwin.go @@ -16,11 +16,9 @@ // under the License. //go:build (amd64 && cgo) || (arm64 && cgo) -// +build amd64,cgo arm64,cgo package darwin -// #cgo LDFLAGS:-lproc // #include // #include import "C" diff --git a/vendor/github.com/elastic/go-sysinfo/providers/darwin/process_darwin.go b/vendor/github.com/elastic/go-sysinfo/providers/darwin/process_darwin.go index b63367e..7c73b69 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/darwin/process_darwin.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/darwin/process_darwin.go @@ -16,16 +16,18 @@ // under the License. //go:build amd64 || arm64 -// +build amd64 arm64 package darwin import ( "bytes" "encoding/binary" + "errors" "fmt" "os" "strconv" + "strings" + "syscall" "time" "golang.org/x/sys/unix" @@ -33,6 +35,8 @@ import ( "github.com/elastic/go-sysinfo/types" ) +var errInvalidProcargs2Data = errors.New("invalid kern.procargs2 data") + func (s darwinSystem) Processes() ([]types.Process, error) { ps, err := unix.SysctlKinfoProcSlice("kern.proc.all") if err != nil { @@ -170,42 +174,48 @@ func (p *process) Memory() (types.MemoryInfo, error) { }, nil } -var nullTerminator = []byte{0} - // wrapper around sysctl KERN_PROCARGS2 // callbacks params are optional, // up to the caller as to which pieces of data they want func kern_procargs(pid int, p *process) error { data, err := unix.SysctlRaw("kern.procargs2", pid) if err != nil { - return nil + if errors.Is(err, syscall.EINVAL) { + // sysctl returns "invalid argument" for both "no such process" + // and "operation not permitted" errors. + return fmt.Errorf("no such process or operation not permitted: %w", err) + } + return err } - buf := bytes.NewBuffer(data) + return parseKernProcargs2(data, p) +} + +func parseKernProcargs2(data []byte, p *process) error { // argc - var argc int32 - if err := binary.Read(buf, binary.LittleEndian, &argc); err != nil { - return err + if len(data) < 4 { + return errInvalidProcargs2Data } + argc := binary.LittleEndian.Uint32(data) + data = data[4:] // exe - lines := bytes.Split(buf.Bytes(), nullTerminator) - p.exe = string(lines[0]) + lines := strings.Split(string(data), "\x00") + p.exe = lines[0] lines = lines[1:] - // skip nulls + // Skip nulls that may be appended after the exe. for len(lines) > 0 { - if len(lines[0]) == 0 { - lines = lines[1:] - continue + if lines[0] != "" { + break } - break + lines = lines[1:] } - // args - for i := 0; i < int(argc); i++ { - p.args = append(p.args, string(lines[0])) - lines = lines[1:] + // argv + if c := min(argc, uint32(len(lines))); c > 0 { + p.args = lines[:c] + lines = lines[c:] } // env vars @@ -215,13 +225,8 @@ func kern_procargs(pid int, p *process) error { break } - parts := bytes.SplitN(l, []byte{'='}, 2) - key := string(parts[0]) - var value string - if len(parts) == 2 { - value = string(parts[1]) - } - env[key] = value + key, val, _ := strings.Cut(l, "=") + env[key] = val } p.env = env @@ -240,3 +245,10 @@ func int8SliceToString(s []int8) string { } return buf.String() } + +func min(a, b uint32) uint32 { + if a < b { + return a + } + return b +} diff --git a/vendor/github.com/elastic/go-sysinfo/providers/darwin/syscall_cgo_darwin.go b/vendor/github.com/elastic/go-sysinfo/providers/darwin/syscall_cgo_darwin.go index b79072e..ce4ee10 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/darwin/syscall_cgo_darwin.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/darwin/syscall_cgo_darwin.go @@ -16,7 +16,6 @@ // under the License. //go:build (amd64 && cgo) || (arm64 && cgo) -// +build amd64,cgo arm64,cgo package darwin diff --git a/vendor/github.com/elastic/go-sysinfo/providers/darwin/syscall_darwin.go b/vendor/github.com/elastic/go-sysinfo/providers/darwin/syscall_darwin.go index 91d883f..fe14050 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/darwin/syscall_darwin.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/darwin/syscall_darwin.go @@ -16,7 +16,6 @@ // under the License. //go:build amd64 || arm64 -// +build amd64 arm64 package darwin diff --git a/vendor/github.com/elastic/go-sysinfo/providers/linux/boottime_linux.go b/vendor/github.com/elastic/go-sysinfo/providers/linux/boottime_linux.go index e229d54..58665a7 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/linux/boottime_linux.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/linux/boottime_linux.go @@ -37,7 +37,7 @@ func bootTime(fs procfs.FS) (time.Time, error) { return bootTimeValue, nil } - stat, err := fs.NewStat() + stat, err := fs.Stat() if err != nil { return time.Time{}, err } diff --git a/vendor/github.com/elastic/go-sysinfo/providers/linux/capabilities_linux.go b/vendor/github.com/elastic/go-sysinfo/providers/linux/capabilities_linux.go index 0b53d75..40bf454 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/linux/capabilities_linux.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/linux/capabilities_linux.go @@ -28,7 +28,8 @@ import ( // Generated with: // // curl -s https://raw.githubusercontent.com/torvalds/linux/master/include/uapi/linux/capability.h | \ -// grep -P '^#define CAP_\w+\s+\d+' | perl -pe 's/#define (\w+)\s+(\d+)/\2: "\1",/g' +// grep -P '^#define CAP_\w+\s+\d+' | \ +// perl -pe 's/#define CAP_(\w+)\s+(\d+)/\2: "\L\1",/g' var capabilityNames = map[int]string{ 0: "chown", 1: "dac_override", @@ -68,6 +69,9 @@ var capabilityNames = map[int]string{ 35: "wake_alarm", 36: "block_suspend", 37: "audit_read", + 38: "perfmon", + 39: "bpf", + 40: "checkpoint_restore", } func capabilityName(num int) string { @@ -82,7 +86,7 @@ func capabilityName(num int) string { func readCapabilities(content []byte) (*types.CapabilityInfo, error) { var cap types.CapabilityInfo - err := parseKeyValue(content, ":", func(key, value []byte) error { + err := parseKeyValue(content, ':', func(key, value []byte) error { var err error switch string(key) { case "CapInh": diff --git a/vendor/github.com/elastic/go-sysinfo/providers/linux/host_linux.go b/vendor/github.com/elastic/go-sysinfo/providers/linux/host_linux.go index 9adb5cb..cd6c010 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/linux/host_linux.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/linux/host_linux.go @@ -23,6 +23,7 @@ import ( "io/ioutil" "os" "path/filepath" + "strings" "time" "github.com/joeshaw/multierror" @@ -72,6 +73,10 @@ func (h *host) Memory() (*types.HostMemoryInfo, error) { return parseMemInfo(content) } +func (h *host) FQDN() (string, error) { + return shared.FQDN() +} + // VMStat reports data from /proc/vmstat on linux. func (h *host) VMStat() (*types.VMStatInfo, error) { content, err := ioutil.ReadFile(h.procFS.path("vmstat")) @@ -120,7 +125,7 @@ func (h *host) NetworkCounters() (*types.NetworkCountersInfo, error) { } func (h *host) CPUTime() (types.CPUTimes, error) { - stat, err := h.procFS.NewStat() + stat, err := h.procFS.Stat() if err != nil { return types.CPUTimes{}, err } @@ -138,7 +143,7 @@ func (h *host) CPUTime() (types.CPUTimes, error) { } func newHost(fs procFS) (*host, error) { - stat, err := fs.NewStat() + stat, err := fs.Stat() if err != nil { return nil, fmt.Errorf("failed to read proc stat: %w", err) } @@ -154,6 +159,7 @@ func newHost(fs procFS) (*host, error) { r.os(h) r.time(h) r.uniqueID(h) + return h, r.Err() } @@ -207,7 +213,7 @@ func (r *reader) hostname(h *host) { if r.addErr(err) { return } - h.info.Hostname = v + h.info.Hostname = strings.ToLower(v) } func (r *reader) network(h *host) { diff --git a/vendor/github.com/elastic/go-sysinfo/providers/linux/memory_linux.go b/vendor/github.com/elastic/go-sysinfo/providers/linux/memory_linux.go index c04bad0..c0c5ab8 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/linux/memory_linux.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/linux/memory_linux.go @@ -29,7 +29,7 @@ func parseMemInfo(content []byte) (*types.HostMemoryInfo, error) { } hasAvailable := false - err := parseKeyValue(content, ":", func(key, value []byte) error { + err := parseKeyValue(content, ':', func(key, value []byte) error { num, err := parseBytesOrNumber(value) if err != nil { return fmt.Errorf("failed to parse %v value of %v: %w", string(key), string(value), err) diff --git a/vendor/github.com/elastic/go-sysinfo/providers/linux/os.go b/vendor/github.com/elastic/go-sysinfo/providers/linux/os.go index 863234c..f5b02be 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/linux/os.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/linux/os.go @@ -21,7 +21,6 @@ import ( "bufio" "bytes" "fmt" - "io/ioutil" "os" "path/filepath" "regexp" @@ -50,8 +49,11 @@ var ( // familyMap contains a mapping of family -> []platforms. var familyMap = map[string][]string{ - "redhat": {"redhat", "fedora", "centos", "scientific", "oraclelinux", "ol", - "amzn", "rhel", "almalinux", "openeuler", "rocky"}, + "arch": {"arch", "antergos", "manjaro"}, + "redhat": { + "redhat", "fedora", "centos", "scientific", "oraclelinux", "ol", + "amzn", "rhel", "almalinux", "openeuler", "rocky", + }, "debian": {"debian", "ubuntu", "raspbian", "linuxmint"}, "suse": {"suse", "sles", "opensuse"}, } @@ -97,9 +99,9 @@ func getOSInfo(baseDir string) (*types.OSInfo, error) { } func getOSRelease(baseDir string) (*types.OSInfo, error) { - lsbRel, _ := ioutil.ReadFile(filepath.Join(baseDir, lsbRelease)) + lsbRel, _ := os.ReadFile(filepath.Join(baseDir, lsbRelease)) - osRel, err := ioutil.ReadFile(filepath.Join(baseDir, osRelease)) + osRel, err := os.ReadFile(filepath.Join(baseDir, osRelease)) if err != nil { return nil, err } @@ -148,16 +150,15 @@ func parseOSRelease(content []byte) (*types.OSInfo, error) { func makeOSInfo(osRelease map[string]string) (*types.OSInfo, error) { os := &types.OSInfo{ Type: "linux", - Platform: osRelease["ID"], - Name: osRelease["NAME"], - Version: osRelease["VERSION"], + Platform: firstOf(osRelease, "ID", "DISTRIB_ID"), + Name: firstOf(osRelease, "NAME", "PRETTY_NAME"), + Version: firstOf(osRelease, "VERSION", "VERSION_ID", "DISTRIB_RELEASE"), Build: osRelease["BUILD_ID"], - Codename: osRelease["VERSION_CODENAME"], + Codename: firstOf(osRelease, "VERSION_CODENAME", "DISTRIB_CODENAME"), } if os.Codename == "" { - // Some OSes uses their own CODENAME keys (e.g UBUNTU_CODENAME) or we - // can get the DISTRIB_CODENAME value from the lsb-release data. + // Some OSes use their own CODENAME keys (e.g UBUNTU_CODENAME). for k, v := range osRelease { if strings.Contains(k, "CODENAME") { os.Codename = v @@ -167,10 +168,19 @@ func makeOSInfo(osRelease map[string]string) (*types.OSInfo, error) { } if os.Platform == "" { - // Fallback to the first word of the NAME field. - parts := strings.SplitN(os.Name, " ", 2) - if len(parts) > 0 { - os.Platform = strings.ToLower(parts[0]) + // Fallback to the first word of the Name field. + os.Platform, _, _ = strings.Cut(os.Name, " ") + } + + os.Family = linuxFamily(os.Platform) + if os.Family == "" { + // ID_LIKE is a space-separated list of OS identifiers that this + // OS is similar to. Use this to figure out the Linux family. + for _, id := range strings.Fields(osRelease["ID_LIKE"]) { + os.Family = linuxFamily(id) + if os.Family != "" { + break + } } } @@ -193,7 +203,6 @@ func makeOSInfo(osRelease map[string]string) (*types.OSInfo, error) { } } - os.Family = platformToFamilyMap[strings.ToLower(os.Platform)] return os, nil } @@ -224,7 +233,7 @@ func findDistribRelease(baseDir string) (*types.OSInfo, error) { } func getDistribRelease(file string) (*types.OSInfo, error) { - data, err := ioutil.ReadFile(file) + data, err := os.ReadFile(file) if err != nil { return nil, err } @@ -270,6 +279,40 @@ func parseDistribRelease(platform string, content []byte) (*types.OSInfo, error) } } - os.Family = platformToFamilyMap[strings.ToLower(os.Platform)] + os.Family = linuxFamily(os.Platform) return os, nil } + +// firstOf returns the first non-empty value found in the map while +// iterating over keys. +func firstOf(kv map[string]string, keys ...string) string { + for _, key := range keys { + if v := kv[key]; v != "" { + return v + } + } + return "" +} + +// linuxFamily returns the linux distribution family associated to the OS platform. +// If there is no family associated then it returns an empty string. +func linuxFamily(platform string) string { + if platform == "" { + return "" + } + + platform = strings.ToLower(platform) + + // First try a direct lookup. + if family, found := platformToFamilyMap[platform]; found { + return family + } + + // Try prefix matching (e.g. opensuse matches opensuse-tumpleweed). + for platformPrefix, family := range platformToFamilyMap { + if strings.HasPrefix(platform, platformPrefix) { + return family + } + } + return "" +} diff --git a/vendor/github.com/elastic/go-sysinfo/providers/linux/process_linux.go b/vendor/github.com/elastic/go-sysinfo/providers/linux/process_linux.go index 10cb947..52bae25 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/linux/process_linux.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/linux/process_linux.go @@ -229,7 +229,7 @@ func (p *process) User() (types.UserInfo, error) { } var user types.UserInfo - err = parseKeyValue(content, ":", func(key, value []byte) error { + err = parseKeyValue(content, ':', func(key, value []byte) error { // See proc(5) for the format of /proc/[pid]/status switch string(key) { case "Uid": diff --git a/vendor/github.com/elastic/go-sysinfo/providers/linux/seccomp_linux.go b/vendor/github.com/elastic/go-sysinfo/providers/linux/seccomp_linux.go index d04bb3c..fd38ea4 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/linux/seccomp_linux.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/linux/seccomp_linux.go @@ -47,7 +47,7 @@ func (m SeccompMode) String() string { func readSeccompFields(content []byte) (*types.SeccompInfo, error) { var seccomp types.SeccompInfo - err := parseKeyValue(content, ":", func(key, value []byte) error { + err := parseKeyValue(content, ':', func(key, value []byte) error { switch string(key) { case "Seccomp": mode, err := strconv.ParseUint(string(value), 10, 8) diff --git a/vendor/github.com/elastic/go-sysinfo/providers/linux/util.go b/vendor/github.com/elastic/go-sysinfo/providers/linux/util.go index b8705a1..8d9c27d 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/linux/util.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/linux/util.go @@ -26,20 +26,28 @@ import ( "strconv" ) -func parseKeyValue(content []byte, separator string, callback func(key, value []byte) error) error { - sc := bufio.NewScanner(bytes.NewReader(content)) - for sc.Scan() { - parts := bytes.SplitN(sc.Bytes(), []byte(separator), 2) - if len(parts) != 2 { +// parseKeyValue parses key/val pairs separated by the provided separator from +// each line in content and invokes the callback. White-space is trimmed from +// val. Empty lines are ignored. All non-empty lines must contain the separator +// otherwise an error is returned. +func parseKeyValue(content []byte, separator byte, callback func(key, value []byte) error) error { + var line []byte + + for len(content) > 0 { + line, content, _ = bytes.Cut(content, []byte{'\n'}) + if len(line) == 0 { continue } - if err := callback(parts[0], bytes.TrimSpace(parts[1])); err != nil { - return err + key, value, ok := bytes.Cut(line, []byte{separator}) + if !ok { + return fmt.Errorf("separator %q not found", separator) } + + callback(key, bytes.TrimSpace(value)) } - return sc.Err() + return nil } func findValue(filename, separator, key string) (string, error) { diff --git a/vendor/github.com/elastic/go-sysinfo/providers/linux/vmstat.go b/vendor/github.com/elastic/go-sysinfo/providers/linux/vmstat.go index 2b9e878..ea918c8 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/linux/vmstat.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/linux/vmstat.go @@ -45,7 +45,7 @@ func parseVMStat(content []byte) (*types.VMStatInfo, error) { var vmStat types.VMStatInfo refValues := reflect.ValueOf(&vmStat).Elem() - err := parseKeyValue(content, " ", func(key, value []byte) error { + err := parseKeyValue(content, ' ', func(key, value []byte) error { // turn our []byte value into an int val, err := parseBytesOrNumber(value) if err != nil { diff --git a/vendor/github.com/elastic/go-sysinfo/providers/shared/fqdn.go b/vendor/github.com/elastic/go-sysinfo/providers/shared/fqdn.go new file mode 100644 index 0000000..8cba7bc --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/providers/shared/fqdn.go @@ -0,0 +1,77 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build linux || darwin || aix + +package shared + +import ( + "fmt" + "net" + "os" + "strings" +) + +// FQDN attempts to lookup the host's fully-qualified domain name and returns it. +// It does so using the following algorithm: +// +// 1. It gets the hostname from the OS. If this step fails, it returns an error. +// +// 2. It tries to perform a CNAME DNS lookup for the hostname. If this succeeds, it +// returns the CNAME (after trimming any trailing period) as the FQDN. +// +// 3. It tries to perform an IP lookup for the hostname. If this succeeds, it tries +// to perform a reverse DNS lookup on the returned IPs and returns the first +// successful result (after trimming any trailing period) as the FQDN. +// +// 4. If steps 2 and 3 both fail, an empty string is returned as the FQDN along with +// errors from those steps. +func FQDN() (string, error) { + hostname, err := os.Hostname() + if err != nil { + return "", fmt.Errorf("could not get hostname to look for FQDN: %w", err) + } + + return fqdn(hostname) +} + +func fqdn(hostname string) (string, error) { + var errs error + cname, err := net.LookupCNAME(hostname) + if err != nil { + errs = fmt.Errorf("could not get FQDN, all methods failed: failed looking up CNAME: %w", + err) + } + if cname != "" { + return strings.ToLower(strings.TrimSuffix(cname, ".")), nil + } + + ips, err := net.LookupIP(hostname) + if err != nil { + errs = fmt.Errorf("%s: failed looking up IP: %w", errs, err) + } + + for _, ip := range ips { + names, err := net.LookupAddr(ip.String()) + if err != nil || len(names) == 0 { + continue + } + return strings.ToLower(strings.TrimSuffix(names[0], ".")), nil + } + + return "", errs +} diff --git a/vendor/github.com/elastic/go-sysinfo/providers/windows/host_windows.go b/vendor/github.com/elastic/go-sysinfo/providers/windows/host_windows.go index 96a90d9..b429ff2 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/windows/host_windows.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/windows/host_windows.go @@ -19,12 +19,18 @@ package windows import ( "errors" + "fmt" "os" + "strings" + "syscall" "time" - windows "github.com/elastic/go-windows" "github.com/joeshaw/multierror" + stdwindows "golang.org/x/sys/windows" + + windows "github.com/elastic/go-windows" + "github.com/elastic/go-sysinfo/internal/registry" "github.com/elastic/go-sysinfo/providers/shared" "github.com/elastic/go-sysinfo/types" @@ -78,6 +84,15 @@ func (h *host) Memory() (*types.HostMemoryInfo, error) { }, nil } +func (h *host) FQDN() (string, error) { + fqdn, err := getComputerNameEx(stdwindows.ComputerNamePhysicalDnsFullyQualified) + if err != nil { + return "", fmt.Errorf("could not get windows FQDN: %s", err) + } + + return strings.ToLower(strings.TrimSuffix(fqdn, ".")), nil +} + func newHost() (*host, error) { h := &host{} r := &reader{} @@ -134,7 +149,38 @@ func (r *reader) hostname(h *host) { if r.addErr(err) { return } - h.info.Hostname = v + h.info.Hostname = strings.ToLower(v) +} + +func getComputerNameEx(name uint32) (string, error) { + size := uint32(64) + + for { + buff := make([]uint16, size) + err := stdwindows.GetComputerNameEx( + name, &buff[0], &size) + if err == nil { + return syscall.UTF16ToString(buff[:size]), nil + } + + // ERROR_MORE_DATA means buff is too small and size is set to the + // number of bytes needed to store the FQDN. For details, see + // https://learn.microsoft.com/en-us/windows/win32/api/sysinfoapi/nf-sysinfoapi-getcomputernameexw#return-value + if errors.Is(err, syscall.ERROR_MORE_DATA) { + // Safeguard to avoid an infinite loop. + if size <= uint32(len(buff)) { + return "", fmt.Errorf( + "windows.GetComputerNameEx returned ERROR_MORE_DATA, " + + "but data size should fit into buffer") + } else { + // Grow the buffer and try again. + buff = make([]uint16, size) + continue + } + } + + return "", fmt.Errorf("could not get windows FQDN: could not get windows.ComputerNamePhysicalDnsFullyQualified: %w", err) + } } func (r *reader) network(h *host) { diff --git a/vendor/github.com/elastic/go-sysinfo/providers/windows/os_windows.go b/vendor/github.com/elastic/go-sysinfo/providers/windows/os_windows.go index bc74a1e..5fa696a 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/windows/os_windows.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/windows/os_windows.go @@ -68,7 +68,7 @@ func OperatingSystem() (*types.OSInfo, error) { case 0: osInfo.Major, _ = strconv.Atoi(p) case 1: - osInfo.Major, _ = strconv.Atoi(p) + osInfo.Minor, _ = strconv.Atoi(p) } } } diff --git a/vendor/github.com/elastic/go-sysinfo/types/host.go b/vendor/github.com/elastic/go-sysinfo/types/host.go index d2911ee..5685e98 100644 --- a/vendor/github.com/elastic/go-sysinfo/types/host.go +++ b/vendor/github.com/elastic/go-sysinfo/types/host.go @@ -26,6 +26,9 @@ type Host interface { CPUTimer Info() HostInfo Memory() (*HostMemoryInfo, error) + + // FQDN returns the fully-qualified domain name of the host, lowercased. + FQDN() (string, error) } // NetworkCounters represents network stats from /proc/net @@ -66,7 +69,7 @@ type HostInfo struct { Architecture string `json:"architecture"` // Hardware architecture (e.g. x86_64, arm, ppc, mips). BootTime time.Time `json:"boot_time"` // Host boot time. Containerized *bool `json:"containerized,omitempty"` // Is the process containerized. - Hostname string `json:"name"` // Hostname + Hostname string `json:"name"` // Hostname, lowercased. IPs []string `json:"ip,omitempty"` // List of all IPs. KernelVersion string `json:"kernel_version"` // Kernel version. MACs []string `json:"mac"` // List of MAC addresses. diff --git a/vendor/github.com/elastic/go-sysinfo/types/process.go b/vendor/github.com/elastic/go-sysinfo/types/process.go index 74a396e..c02ac9d 100644 --- a/vendor/github.com/elastic/go-sysinfo/types/process.go +++ b/vendor/github.com/elastic/go-sysinfo/types/process.go @@ -24,7 +24,7 @@ type Process interface { CPUTimer // Info returns process info. // It may return partial information if the provider - // implementation is unable to collect all of the necessary data. + // implementation is unable to collect all the necessary data. Info() (ProcessInfo, error) Memory() (MemoryInfo, error) User() (UserInfo, error) diff --git a/vendor/github.com/go-faster/city/.codecov.yml b/vendor/github.com/go-faster/city/.codecov.yml new file mode 100644 index 0000000..a8a1743 --- /dev/null +++ b/vendor/github.com/go-faster/city/.codecov.yml @@ -0,0 +1,9 @@ +ignore: + # We have only auxiliary binaries in internal/ no actual user-facing code. + - internal/** +coverage: + status: + patch: false + project: + default: + threshold: 0.5% diff --git a/vendor/github.com/go-faster/city/.gitignore b/vendor/github.com/go-faster/city/.gitignore new file mode 100644 index 0000000..f1c181e --- /dev/null +++ b/vendor/github.com/go-faster/city/.gitignore @@ -0,0 +1,12 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out diff --git a/vendor/github.com/go-faster/city/.golangci.yml b/vendor/github.com/go-faster/city/.golangci.yml new file mode 100644 index 0000000..629307a --- /dev/null +++ b/vendor/github.com/go-faster/city/.golangci.yml @@ -0,0 +1,111 @@ +linters-settings: + govet: + check-shadowing: true + gocyclo: + min-complexity: 15 + maligned: + suggest-new: true + dupl: + threshold: 120 + goconst: + min-len: 2 + min-occurrences: 3 + misspell: + locale: US + lll: + line-length: 140 + goimports: + local-prefixes: github.com/go-faster/ + gocritic: + enabled-tags: + - diagnostic + - experimental + - opinionated + - performance + - style + disabled-checks: + - hugeParam + - rangeValCopy + - exitAfterDefer + - whyNoLint + - singleCaseSwitch + - commentedOutCode + - appendAssign + - unnecessaryBlock + - redundantSprint + +linters: + disable-all: true + enable: + - deadcode + - depguard + - dogsled + - errcheck + - goconst + - gocritic + - gofmt + - goimports + - revive + - gosec + - gosimple + - govet + - ineffassign + - misspell + - nakedret + - staticcheck + - structcheck + - stylecheck + - typecheck + - unconvert + - unparam + - unused + - varcheck + - whitespace + + # Do not enable: + # - wsl (too opinionated about newlines) + # - godox (todos are OK) + # - bodyclose (false positives on helper functions) + # - prealloc (not worth it in scope of this project) + # - maligned (same as prealloc) + # - funlen (gocyclo is enough) + # - gochecknoglobals (we know when it is ok to use globals) + +issues: + exclude-use-default: false + exclude-rules: + # Disable linters that are annoying in tests. + - path: _test\.go + linters: + - gocyclo + - errcheck + - dupl + - gosec + - funlen + - goconst + - gocognit + - scopelint + - lll + + - path: _test\.go + text: "Combine" + linters: + - gocritic + + # Check that equal to self is true + - linters: [gocritic] + source: '(assert|require).+Equal' + text: 'dupArg' + path: _test\.go + + # Ignore shadowing of err. + - linters: [ govet ] + text: 'declaration of "(err|ctx|log|c)"' + + # Ignore linters in main packages. + - path: main\.go + linters: [ goconst, funlen, gocognit, gocyclo ] + + # Ignore run-once generator + - path: citygen + linters: [ revive, gosec ] diff --git a/vendor/github.com/go-faster/city/128.go b/vendor/github.com/go-faster/city/128.go new file mode 100644 index 0000000..bccf81b --- /dev/null +++ b/vendor/github.com/go-faster/city/128.go @@ -0,0 +1,143 @@ +package city + +import "encoding/binary" + +// much faster than uint64[2] + +// U128 is uint128. +type U128 struct { + Low uint64 // first 64 bits + High uint64 // last 64 bits +} + +// A subroutine for Hash128(). Returns a decent 128-bit hash for strings +// of any length representable in signed long. Based on City and Mumur. +func cityMurmur(s []byte, seed U128) U128 { + length := len(s) + a := seed.Low + b := seed.High + c := uint64(0) + d := uint64(0) + l := length - 16 + if l <= 0 { // length <= 16 + a = shiftMix(a*k1) * k1 + c = b*k1 + hash0to16(s, length) + + tmp := c + if length >= 8 { + tmp = binary.LittleEndian.Uint64(s) + } + d = shiftMix(a + tmp) + } else { // length > 16 + c = hash16(binary.LittleEndian.Uint64(s[length-8:])+k1, a) + d = hash16(b+uint64(length), c+binary.LittleEndian.Uint64(s[length-16:])) + a += d + for { + a ^= shiftMix(binary.LittleEndian.Uint64(s)*k1) * k1 + a *= k1 + b ^= a + c ^= shiftMix(binary.LittleEndian.Uint64(s[8:])*k1) * k1 + c *= k1 + d ^= c + s = s[16:] + l -= 16 + if l <= 0 { + break + } + } + } + a = hash16(a, c) + b = hash16(d, b) + return U128{a ^ b, hash16(b, a)} +} + +// Hash128Seed return a 128-bit hash with a seed. +func Hash128Seed(s []byte, seed U128) U128 { + if len(s) < 128 { + return cityMurmur(s, seed) + } + + // Saving initial input for tail hashing. + t := s + + // We expect len >= 128 to be the common case. Keep 56 bytes of state: + // v, w, x, y and z. + var v, w U128 + x := seed.Low + y := seed.High + z := uint64(len(s)) * k1 + + v.Low = rot64(y^k1, 49)*k1 + binary.LittleEndian.Uint64(s) + v.High = rot64(v.Low, 42)*k1 + binary.LittleEndian.Uint64(s[8:]) + w.Low = rot64(y+z, 35)*k1 + x + w.High = rot64(x+binary.LittleEndian.Uint64(s[88:]), 53) * k1 + + // This is the same inner loop as Hash64(), manually unrolled. + for len(s) >= 128 { + // Roll 1. + x = rot64(x+y+v.Low+binary.LittleEndian.Uint64(s[8:]), 37) * k1 + y = rot64(y+v.High+binary.LittleEndian.Uint64(s[48:]), 42) * k1 + x ^= w.High + y += v.Low + binary.LittleEndian.Uint64(s[40:]) + z = rot64(z+w.Low, 33) * k1 + v = weakHash32SeedsByte(s, v.High*k1, x+w.Low) + w = weakHash32SeedsByte(s[32:], z+w.High, y+binary.LittleEndian.Uint64(s[16:])) + z, x = x, z + s = s[64:] + + // Roll 2. + x = rot64(x+y+v.Low+binary.LittleEndian.Uint64(s[8:]), 37) * k1 + y = rot64(y+v.High+binary.LittleEndian.Uint64(s[48:]), 42) * k1 + x ^= w.High + y += v.Low + binary.LittleEndian.Uint64(s[40:]) + z = rot64(z+w.Low, 33) * k1 + v = weakHash32SeedsByte(s, v.High*k1, x+w.Low) + w = weakHash32SeedsByte(s[32:], z+w.High, y+binary.LittleEndian.Uint64(s[16:])) + z, x = x, z + s = s[64:] + } + + x += rot64(v.Low+z, 49) * k0 + y = y*k0 + rot64(w.High, 37) + z = z*k0 + rot64(w.Low, 27) + w.Low *= 9 + v.Low *= k0 + + // If 0 < length < 128, hash up to 4 chunks of 32 bytes each from the end of s. + for i := 0; i < len(s); { + i += 32 + y = rot64(x+y, 42)*k0 + v.High + w.Low += binary.LittleEndian.Uint64(t[len(t)-i+16:]) + x = x*k0 + w.Low + z += w.High + binary.LittleEndian.Uint64(t[len(t)-i:]) + w.High += v.Low + v = weakHash32SeedsByte(t[len(t)-i:], v.Low+z, v.High) + v.Low *= k0 + } + + // At this point our 56 bytes of state should contain more than + // enough information for a strong 128-bit hash. We use two different + // 56-byte-to-8-byte hashes to get a 16-byte final result. + x = hash16(x, v.Low) + y = hash16(y+z, w.Low) + + return U128{ + Low: hash16(x+v.High, w.High) + y, + High: hash16(x+w.High, y+v.High), + } +} + +// Hash128 returns a 128-bit hash and are tuned for strings of at least +// a few hundred bytes. Depending on your compiler and hardware, +// it's likely faster than Hash64() on sufficiently long strings. +// It's slower than necessary on shorter strings, but we expect +// that case to be relatively unimportant. +func Hash128(s []byte) U128 { + if len(s) >= 16 { + return Hash128Seed(s[16:], U128{ + Low: binary.LittleEndian.Uint64(s), + High: binary.LittleEndian.Uint64(s[8:]) + k0}, + ) + } + return Hash128Seed(s, U128{Low: k0, High: k1}) +} diff --git a/vendor/github.com/go-faster/city/32.go b/vendor/github.com/go-faster/city/32.go new file mode 100644 index 0000000..783ec9d --- /dev/null +++ b/vendor/github.com/go-faster/city/32.go @@ -0,0 +1,169 @@ +package city + +import "encoding/binary" + +// Some primes between 2^63 and 2^64 for various uses. +const ( + k0 uint64 = 0xc3a5c85c97cb3127 + k1 uint64 = 0xb492b66fbe98f273 + k2 uint64 = 0x9ae16a3b2f90404f +) + +// Magic numbers for 32-bit hashing. Copied from Murmur3. +const ( + c1 uint32 = 0xcc9e2d51 + c2 uint32 = 0x1b873593 +) + +func bswap32(x uint32) uint32 { + return ((x & 0xff000000) >> 24) | + ((x & 0x00ff0000) >> 8) | + ((x & 0x0000ff00) << 8) | + ((x & 0x000000ff) << 24) +} + +func fetch32(p []byte) uint32 { + return binary.LittleEndian.Uint32(p) +} + +// A 32-bit to 32-bit integer hash copied from Murmr3. +func fmix(h uint32) uint32 { + h ^= h >> 16 + h *= 0x85ebca6b + h ^= h >> 13 + h *= 0xc2b2ae35 + h ^= h >> 16 + return h +} + +func rotate32(val uint32, shift int) uint32 { + // Avoid shifting by 32: doing so yields and undefined result. + if shift == 0 { + return val + } + return (val >> uint32(shift)) | (val << (32 - uint32(shift))) +} + +func mur(a, h uint32) uint32 { + // Helper from Murmur3 for combining two 32-bit values. + a *= c1 + a = rotate32(a, 17) + a *= c2 + h ^= a + h = rotate32(h, 19) + return h*5 + 0xe6546b64 +} + +func hash32Len13to24(s []byte, length int) uint32 { + a := fetch32(s[(length>>1)-4:]) + b := fetch32(s[4:]) + c := fetch32(s[length-8:]) + d := fetch32(s[length>>1:]) + e := fetch32(s) + f := fetch32(s[length-4:]) + h := uint32(length) + return fmix(mur(f, mur(e, mur(d, mur(c, mur(b, mur(a, h))))))) +} + +func hash32Len0to4(s []byte, length int) uint32 { + b := uint32(0) + c := uint32(9) + for _, v := range s[:length] { + b = uint32(int64(b)*int64(c1) + int64(int8(v))) + c ^= b + } + return fmix(mur(b, mur(uint32(length), c))) +} + +func hash32Len5to12(s []byte, length int) uint32 { + a := uint32(length) + b := uint32(length) * 5 + c := uint32(9) + d := b + a += fetch32(s) + b += fetch32(s[length-4:]) + c += fetch32(s[(length>>1)&4:]) + return fmix(mur(c, mur(b, mur(a, d)))) +} + +// Hash32 return 32-bit hash. +func Hash32(s []byte) uint32 { + length := len(s) + if length <= 4 { + return hash32Len0to4(s, length) + } + if length <= 12 { + return hash32Len5to12(s, length) + } + if length <= 24 { + return hash32Len13to24(s, length) + } + + // len > 24 + h := uint32(length) + g := c1 * uint32(length) + f := g + a0 := rotate32(fetch32(s[length-4:])*c1, 17) * c2 + a1 := rotate32(fetch32(s[length-8:])*c1, 17) * c2 + a2 := rotate32(fetch32(s[length-16:])*c1, 17) * c2 + a3 := rotate32(fetch32(s[length-12:])*c1, 17) * c2 + a4 := rotate32(fetch32(s[length-20:])*c1, 17) * c2 + h ^= a0 + h = rotate32(h, 19) + h = h*5 + 0xe6546b64 + h ^= a2 + h = rotate32(h, 19) + h = h*5 + 0xe6546b64 + g ^= a1 + g = rotate32(g, 19) + g = g*5 + 0xe6546b64 + g ^= a3 + g = rotate32(g, 19) + g = g*5 + 0xe6546b64 + f += a4 + f = rotate32(f, 19) + f = f*5 + 0xe6546b64 + iters := (length - 1) / 20 + for { + a0 := rotate32(fetch32(s)*c1, 17) * c2 + a1 := fetch32(s[4:]) + a2 := rotate32(fetch32(s[8:])*c1, 17) * c2 + a3 := rotate32(fetch32(s[12:])*c1, 17) * c2 + a4 := fetch32(s[16:]) + h ^= a0 + h = rotate32(h, 18) + h = h*5 + 0xe6546b64 + f += a1 + f = rotate32(f, 19) + f *= c1 + g += a2 + g = rotate32(g, 18) + g = g*5 + 0xe6546b64 + h ^= a3 + a1 + h = rotate32(h, 19) + h = h*5 + 0xe6546b64 + g ^= a4 + g = bswap32(g) * 5 + h += a4 * 5 + h = bswap32(h) + f += a0 + f, h, g = g, f, h + s = s[20:] + + iters-- + if iters == 0 { + break + } + } + g = rotate32(g, 11) * c1 + g = rotate32(g, 17) * c1 + f = rotate32(f, 11) * c1 + f = rotate32(f, 17) * c1 + h = rotate32(h+g, 19) + h = h*5 + 0xe6546b64 + h = rotate32(h, 17) * c1 + h = rotate32(h+f, 19) + h = h*5 + 0xe6546b64 + h = rotate32(h, 17) * c1 + return h +} diff --git a/vendor/github.com/go-faster/city/64.go b/vendor/github.com/go-faster/city/64.go new file mode 100644 index 0000000..f7009a8 --- /dev/null +++ b/vendor/github.com/go-faster/city/64.go @@ -0,0 +1,203 @@ +package city + +import "encoding/binary" + +func bswap64(x uint64) uint64 { + return ((x & 0xff00000000000000) >> 56) | + ((x & 0x00ff000000000000) >> 40) | + ((x & 0x0000ff0000000000) >> 24) | + ((x & 0x000000ff00000000) >> 8) | + ((x & 0x00000000ff000000) << 8) | + ((x & 0x0000000000ff0000) << 24) | + ((x & 0x000000000000ff00) << 40) | + ((x & 0x00000000000000ff) << 56) +} + +// Bitwise right rotate. +func rot64(val uint64, shift uint) uint64 { + // Avoid shifting by 64: doing so yields an undefined result. + if shift == 0 { + return val + } + return (val >> shift) | val<<(64-shift) +} + +func shiftMix(val uint64) uint64 { + return val ^ (val >> 47) +} + +func hash128to64(x U128) uint64 { + const mul = uint64(0x9ddfea08eb382d69) + a := (x.Low ^ x.High) * mul + a ^= a >> 47 + b := (x.High ^ a) * mul + b ^= b >> 47 + b *= mul + return b +} + +func hash16(u, v uint64) uint64 { + return hash128to64(U128{u, v}) +} + +func hash16mul(u, v, mul uint64) uint64 { + // Murmur-inspired hashing. + a := (u ^ v) * mul + a ^= a >> 47 + b := (v ^ a) * mul + b ^= b >> 47 + b *= mul + return b +} + +func hash0to16(s []byte, length int) uint64 { + if length >= 8 { + mul := k2 + uint64(length)*2 + a := binary.LittleEndian.Uint64(s) + k2 + b := binary.LittleEndian.Uint64(s[length-8:]) + c := rot64(b, 37)*mul + a + d := (rot64(a, 25) + b) * mul + return hash16mul(c, d, mul) + } + if length >= 4 { + mul := k2 + uint64(length)*2 + a := uint64(fetch32(s)) + first := uint64(length) + (a << 3) + second := uint64(fetch32(s[length-4:])) + result := hash16mul( + first, + second, + mul) + return result + } + if length > 0 { + a := s[0] + b := s[length>>1] + c := s[length-1] + y := uint32(a) + (uint32(b) << 8) + z := uint32(length) + (uint32(c) << 2) + return shiftMix(uint64(y)*k2^uint64(z)*k0) * k2 + } + return k2 +} + +// This probably works well for 16-byte strings as well, but is may be overkill +// in that case +func hash17to32(s []byte, length int) uint64 { + mul := k2 + uint64(length)*2 + a := binary.LittleEndian.Uint64(s) * k1 + b := binary.LittleEndian.Uint64(s[8:]) + c := binary.LittleEndian.Uint64(s[length-8:]) * mul + d := binary.LittleEndian.Uint64(s[length-16:]) * k2 + return hash16mul( + rot64(a+b, 43)+rot64(c, 30)+d, + a+rot64(b+k2, 18)+c, + mul, + ) +} + +// Return a 16-byte hash for 48 bytes. Quick and dirty. +// callers do best to use "random-looking" values for a and b. +func weakHash32Seeds(w, x, y, z, a, b uint64) U128 { + a += w + b = rot64(b+a+z, 21) + c := a + a += x + a += y + b += rot64(a, 44) + return U128{a + z, b + c} +} + +// Return a 16-byte hash for s[0] ... s[31], a, and b. Quick and dirty. +func weakHash32SeedsByte(s []byte, a, b uint64) U128 { + _ = s[31] + return weakHash32Seeds( + binary.LittleEndian.Uint64(s[0:0+8:0+8]), + binary.LittleEndian.Uint64(s[8:8+8:8+8]), + binary.LittleEndian.Uint64(s[16:16+8:16+8]), + binary.LittleEndian.Uint64(s[24:24+8:24+8]), + a, + b, + ) +} + +// Return an 8-byte hash for 33 to 64 bytes. +func hash33to64(s []byte, length int) uint64 { + mul := k2 + uint64(length)*2 + a := binary.LittleEndian.Uint64(s) * k2 + b := binary.LittleEndian.Uint64(s[8:]) + c := binary.LittleEndian.Uint64(s[length-24:]) + d := binary.LittleEndian.Uint64(s[length-32:]) + e := binary.LittleEndian.Uint64(s[16:]) * k2 + f := binary.LittleEndian.Uint64(s[24:]) * 9 + g := binary.LittleEndian.Uint64(s[length-8:]) + h := binary.LittleEndian.Uint64(s[length-16:]) * mul + u := rot64(a+g, 43) + (rot64(b, 30)+c)*9 + v := ((a + g) ^ d) + f + 1 + w := bswap64((u+v)*mul) + h + x := rot64(e+f, 42) + c + y := (bswap64((v+w)*mul) + g) * mul + z := e + f + c + a = bswap64((x+z)*mul+y) + b + b = shiftMix((z+a)*mul+d+h) * mul + return b + x +} + +// nearestMultiple64 returns the nearest multiple of 64 for length of +// provided byte slice. +func nearestMultiple64(b []byte) int { + return ((len(b)) - 1) & ^63 +} + +// Hash64 return a 64-bit hash. +func Hash64(s []byte) uint64 { + length := len(s) + if length <= 16 { + return hash0to16(s, length) + } + if length <= 32 { + return hash17to32(s, length) + } + if length <= 64 { + return hash33to64(s, length) + } + + // For string over 64 bytes we hash the end first, and then as we + // loop we keep 56 bytes of state: v, w, x, y and z. + x := binary.LittleEndian.Uint64(s[length-40:]) + y := binary.LittleEndian.Uint64(s[length-16:]) + binary.LittleEndian.Uint64(s[length-56:]) + z := hash16(binary.LittleEndian.Uint64(s[length-48:])+uint64(length), binary.LittleEndian.Uint64(s[length-24:])) + v := weakHash32SeedsByte(s[length-64:], uint64(length), z) + w := weakHash32SeedsByte(s[length-32:], y+k1, x) + x = x*k1 + binary.LittleEndian.Uint64(s) + + // Decrease len to the nearest multiple of 64, and operate on 64-byte chunks. + s = s[:nearestMultiple64(s)] + for len(s) > 0 { + x = rot64(x+y+v.Low+binary.LittleEndian.Uint64(s[8:]), 37) * k1 + y = rot64(y+v.High+binary.LittleEndian.Uint64(s[48:]), 42) * k1 + x ^= w.High + y += v.Low + binary.LittleEndian.Uint64(s[40:]) + z = rot64(z+w.Low, 33) * k1 + v = weakHash32SeedsByte(s, v.High*k1, x+w.Low) + w = weakHash32SeedsByte(s[32:], z+w.High, y+binary.LittleEndian.Uint64(s[16:])) + + z, x = x, z + s = s[64:] + } + + return hash16( + hash16(v.Low, w.Low)+shiftMix(y)*k1+z, + hash16(v.High, w.High)+x, + ) +} + +// Hash64WithSeed return a 64-bit hash with a seed. +func Hash64WithSeed(s []byte, seed uint64) uint64 { + return Hash64WithSeeds(s, k2, seed) +} + +// Hash64WithSeeds return a 64-bit hash with two seeds. +func Hash64WithSeeds(s []byte, seed0, seed1 uint64) uint64 { + return hash16(Hash64(s)-seed0, seed1) +} diff --git a/vendor/go.uber.org/atomic/LICENSE.txt b/vendor/github.com/go-faster/city/LICENSE similarity index 87% rename from vendor/go.uber.org/atomic/LICENSE.txt rename to vendor/github.com/go-faster/city/LICENSE index 8765c9f..6946826 100644 --- a/vendor/go.uber.org/atomic/LICENSE.txt +++ b/vendor/github.com/go-faster/city/LICENSE @@ -1,4 +1,6 @@ -Copyright (c) 2016 Uber Technologies, Inc. +MIT License + +Copyright (c) 2018 tenfy Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -7,13 +9,13 @@ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/go-faster/city/Makefile b/vendor/github.com/go-faster/city/Makefile new file mode 100644 index 0000000..922de89 --- /dev/null +++ b/vendor/github.com/go-faster/city/Makefile @@ -0,0 +1,13 @@ +test: + @./go.test.sh +.PHONY: test + +coverage: + @./go.coverage.sh +.PHONY: coverage + +test_fast: + go test ./... + +tidy: + go mod tidy diff --git a/vendor/github.com/go-faster/city/README.md b/vendor/github.com/go-faster/city/README.md new file mode 100644 index 0000000..276a42c --- /dev/null +++ b/vendor/github.com/go-faster/city/README.md @@ -0,0 +1,46 @@ +# city [![](https://img.shields.io/badge/go-pkg-00ADD8)](https://pkg.go.dev/github.com/go-faster/city#section-documentation) [![](https://img.shields.io/codecov/c/github/go-faster/city?label=cover)](https://codecov.io/gh/go-faster/city) [![stable](https://img.shields.io/badge/-stable-brightgreen)](https://go-faster.org/docs/projects/status#stable) +[CityHash](https://github.com/google/cityhash) in Go. Fork of [tenfyzhong/cityhash](https://github.com/tenfyzhong/cityhash). + +Note: **prefer [xxhash](https://github.com/cespare/xxhash) as non-cryptographic hash algorithm**, this package is intended +for places where CityHash is already used. + +CityHash **is not compatible** to [FarmHash](https://github.com/google/farmhash), use [go-farm](https://github.com/dgryski/go-farm). + +```console +go get github.com/go-faster/city +``` + +```go +city.Hash128([]byte("hello")) +``` + +* Faster +* Supports ClickHouse hash + +``` +name old time/op new time/op delta +CityHash64-32 333ns ± 2% 108ns ± 3% -67.57% (p=0.000 n=10+10) +CityHash128-32 347ns ± 2% 112ns ± 2% -67.74% (p=0.000 n=9+10) + +name old speed new speed delta +CityHash64-32 3.08GB/s ± 2% 9.49GB/s ± 3% +208.40% (p=0.000 n=10+10) +CityHash128-32 2.95GB/s ± 2% 9.14GB/s ± 2% +209.98% (p=0.000 n=9+10) +``` + +## Benchmarks +``` +goos: linux +goarch: amd64 +pkg: github.com/go-faster/city +cpu: AMD Ryzen 9 5950X 16-Core Processor +BenchmarkClickHouse128/16 2213.98 MB/s +BenchmarkClickHouse128/64 4712.24 MB/s +BenchmarkClickHouse128/256 7561.58 MB/s +BenchmarkClickHouse128/1024 10158.98 MB/s +BenchmarkClickHouse64 10379.89 MB/s +BenchmarkCityHash32 3140.54 MB/s +BenchmarkCityHash64 9508.45 MB/s +BenchmarkCityHash128 9304.27 MB/s +BenchmarkCityHash64Small 2700.84 MB/s +BenchmarkCityHash128Small 1175.65 MB/s +``` diff --git a/vendor/github.com/go-faster/city/ch_128.go b/vendor/github.com/go-faster/city/ch_128.go new file mode 100644 index 0000000..a69c015 --- /dev/null +++ b/vendor/github.com/go-faster/city/ch_128.go @@ -0,0 +1,158 @@ +package city + +import "encoding/binary" + +// A subroutine for CH128(). Returns a decent 128-bit hash for strings +// of any length representable in signed long. Based on City and Mumur. +func chMurmur(s []byte, seed U128) U128 { + length := len(s) + a := seed.Low + b := seed.High + c := uint64(0) + d := uint64(0) + l := length - 16 + if len(s) <= 16 { // length <= 16 + a = shiftMix(a*k1) * k1 + c = b*k1 + ch0to16(s, length) + + if length >= 8 { + d = shiftMix(a + binary.LittleEndian.Uint64(s)) + } else { + d = shiftMix(a + c) + } + } else { // length > 16 + c = ch16(binary.LittleEndian.Uint64(s[length-8:])+k1, a) + d = ch16(b+uint64(length), c+binary.LittleEndian.Uint64(s[length-16:])) + a += d + + { + a ^= shiftMix(binary.LittleEndian.Uint64(s[0:8:8])*k1) * k1 + a *= k1 + b ^= a + c ^= shiftMix(binary.LittleEndian.Uint64(s[8:8+8:8+8])*k1) * k1 + c *= k1 + d ^= c + s = s[16:] + l -= 16 + } + + if l > 0 { + for len(s) >= 16 { + a ^= shiftMix(binary.LittleEndian.Uint64(s[0:8:8])*k1) * k1 + a *= k1 + b ^= a + c ^= shiftMix(binary.LittleEndian.Uint64(s[8:8+8:8+8])*k1) * k1 + c *= k1 + d ^= c + s = s[16:] + l -= 16 + + if l <= 0 { + break + } + } + } + } + a = ch16(a, c) + b = ch16(d, b) + return U128{a ^ b, ch16(b, a)} +} + +// CH128 returns 128-bit ClickHouse CityHash. +func CH128(s []byte) U128 { + if len(s) >= 16 { + return CH128Seed(s[16:], U128{ + Low: binary.LittleEndian.Uint64(s[0:8:8]) ^ k3, + High: binary.LittleEndian.Uint64(s[8 : 8+8 : 8+8]), + }) + } + if len(s) >= 8 { + l := uint64(len(s)) + return CH128Seed(nil, U128{ + Low: binary.LittleEndian.Uint64(s) ^ (l * k0), + High: binary.LittleEndian.Uint64(s[l-8:]) ^ k1, + }) + } + return CH128Seed(s, U128{Low: k0, High: k1}) +} + +// CH128Seed returns 128-bit seeded ClickHouse CityHash. +func CH128Seed(s []byte, seed U128) U128 { + if len(s) < 128 { + return chMurmur(s, seed) + } + + // Saving initial input for tail hashing. + t := s + + // We expect len >= 128 to be the common case. Keep 56 bytes of state: + // v, w, x, y and z. + var v, w U128 + x := seed.Low + y := seed.High + z := uint64(len(s)) * k1 + + { + subSlice := (*[96]byte)(s[0:]) + v.Low = rot64(y^k1, 49)*k1 + binary.LittleEndian.Uint64(subSlice[0:]) + v.High = rot64(v.Low, 42)*k1 + binary.LittleEndian.Uint64(subSlice[8:]) + w.Low = rot64(y+z, 35)*k1 + x + w.High = rot64(x+binary.LittleEndian.Uint64(subSlice[88:]), 53) * k1 + } + + // This is the same inner loop as CH64(), manually unrolled. + for len(s) >= 128 { + // Roll 1. + { + x = rot64(x+y+v.Low+binary.LittleEndian.Uint64(s[16:16+8:16+8]), 37) * k1 + y = rot64(y+v.High+binary.LittleEndian.Uint64(s[48:48+8:48+8]), 42) * k1 + + x ^= w.High + y ^= v.Low + + z = rot64(z^w.Low, 33) + v = weakHash32SeedsByte(s, v.High*k1, x+w.Low) + w = weakHash32SeedsByte(s[32:], z+w.High, y) + z, x = x, z + } + + // Roll 2. + { + const offset = 64 + x = rot64(x+y+v.Low+binary.LittleEndian.Uint64(s[offset+16:offset+16+8:offset+16+8]), 37) * k1 + y = rot64(y+v.High+binary.LittleEndian.Uint64(s[offset+48:offset+48+8:offset+48+8]), 42) * k1 + x ^= w.High + y ^= v.Low + + z = rot64(z^w.Low, 33) + v = weakHash32SeedsByte(s[offset:], v.High*k1, x+w.Low) + w = weakHash32SeedsByte(s[offset+32:], z+w.High, y) + z, x = x, z + } + s = s[128:] + } + + y += rot64(w.Low, 37)*k0 + z + x += rot64(v.Low+z, 49) * k0 + + // If 0 < length < 128, hash up to 4 chunks of 32 bytes each from the end of s. + for i := 0; i < len(s); { + i += 32 + y = rot64(y-x, 42)*k0 + v.High + w.Low += binary.LittleEndian.Uint64(t[len(t)-i+16:]) + x = rot64(x, 49)*k0 + w.Low + w.Low += v.Low + v = weakHash32SeedsByte(t[len(t)-i:], v.Low, v.High) + } + + // At this point our 48 bytes of state should contain more than + // enough information for a strong 128-bit hash. We use two + // different 48-byte-to-8-byte hashes to get a 16-byte final result. + x = ch16(x, v.Low) + y = ch16(y, w.Low) + + return U128{ + Low: ch16(x+v.High, w.High) + y, + High: ch16(x+w.High, y+v.High), + } +} diff --git a/vendor/github.com/go-faster/city/ch_64.go b/vendor/github.com/go-faster/city/ch_64.go new file mode 100644 index 0000000..12e7515 --- /dev/null +++ b/vendor/github.com/go-faster/city/ch_64.go @@ -0,0 +1,118 @@ +package city + +import "encoding/binary" + +// Ref: +// https://github.com/xzkostyan/python-cityhash/commit/f4091154ff2c6c0de11d5d6673b5007fdd6355ad + +const k3 uint64 = 0xc949d7c7509e6557 + +func ch16(u, v uint64) uint64 { + return hash128to64(U128{u, v}) +} + +// Return an 8-byte hash for 33 to 64 bytes. +func ch33to64(s []byte, length int) uint64 { + z := binary.LittleEndian.Uint64(s[24:]) + a := binary.LittleEndian.Uint64(s) + (uint64(length)+binary.LittleEndian.Uint64(s[length-16:]))*k0 + b := rot64(a+z, 52) + c := rot64(a, 37) + + a += binary.LittleEndian.Uint64(s[8:]) + c += rot64(a, 7) + a += binary.LittleEndian.Uint64(s[16:]) + + vf := a + z + vs := b + rot64(a, 31) + c + + a = binary.LittleEndian.Uint64(s[16:]) + binary.LittleEndian.Uint64(s[length-32:]) + z = binary.LittleEndian.Uint64(s[length-8:]) + b = rot64(a+z, 52) + c = rot64(a, 37) + a += binary.LittleEndian.Uint64(s[length-24:]) + c += rot64(a, 7) + a += binary.LittleEndian.Uint64(s[length-16:]) + + wf := a + z + ws := b + rot64(a, 31) + c + r := shiftMix((vf+ws)*k2 + (wf+vs)*k0) + return shiftMix(r*k0+vs) * k2 +} + +func ch17to32(s []byte, length int) uint64 { + a := binary.LittleEndian.Uint64(s) * k1 + b := binary.LittleEndian.Uint64(s[8:]) + c := binary.LittleEndian.Uint64(s[length-8:]) * k2 + d := binary.LittleEndian.Uint64(s[length-16:]) * k0 + return hash16( + rot64(a-b, 43)+rot64(c, 30)+d, + a+rot64(b^k3, 20)-c+uint64(length), + ) +} + +func ch0to16(s []byte, length int) uint64 { + if length > 8 { + a := binary.LittleEndian.Uint64(s) + b := binary.LittleEndian.Uint64(s[length-8:]) + return ch16(a, rot64(b+uint64(length), uint(length))) ^ b + } + if length >= 4 { + a := uint64(fetch32(s)) + return ch16(uint64(length)+(a<<3), uint64(fetch32(s[length-4:]))) + } + if length > 0 { + a := s[0] + b := s[length>>1] + c := s[length-1] + y := uint32(a) + (uint32(b) << 8) + z := uint32(length) + (uint32(c) << 2) + return shiftMix(uint64(y)*k2^uint64(z)*k3) * k2 + } + return k2 +} + +// CH64 returns ClickHouse version of Hash64. +func CH64(s []byte) uint64 { + length := len(s) + if length <= 16 { + return ch0to16(s, length) + } + if length <= 32 { + return ch17to32(s, length) + } + if length <= 64 { + return ch33to64(s, length) + } + + x := binary.LittleEndian.Uint64(s) + y := binary.LittleEndian.Uint64(s[length-16:]) ^ k1 + z := binary.LittleEndian.Uint64(s[length-56:]) ^ k0 + + v := weakHash32SeedsByte(s[length-64:], uint64(length), y) + w := weakHash32SeedsByte(s[length-32:], uint64(length)*k1, k0) + z += shiftMix(v.High) * k1 + x = rot64(z+x, 39) * k1 + y = rot64(y, 33) * k1 + + // Decrease len to the nearest multiple of 64, and operate on 64-byte chunks. + s = s[:nearestMultiple64(s)] + for len(s) > 0 { + x = rot64(x+y+v.Low+binary.LittleEndian.Uint64(s[16:]), 37) * k1 + y = rot64(y+v.High+binary.LittleEndian.Uint64(s[48:]), 42) * k1 + + x ^= w.High + y ^= v.Low + + z = rot64(z^w.Low, 33) + v = weakHash32SeedsByte(s, v.High*k1, x+w.Low) + w = weakHash32SeedsByte(s[32:], z+w.High, y) + + z, x = x, z + s = s[64:] + } + + return ch16( + ch16(v.Low, w.Low)+shiftMix(y)*k1+z, + ch16(v.High, w.High)+x, + ) +} diff --git a/vendor/github.com/go-faster/city/doc.go b/vendor/github.com/go-faster/city/doc.go new file mode 100644 index 0000000..39e0dc0 --- /dev/null +++ b/vendor/github.com/go-faster/city/doc.go @@ -0,0 +1,2 @@ +// Package city implements CityHash in go. +package city diff --git a/vendor/github.com/go-faster/city/go.coverage.sh b/vendor/github.com/go-faster/city/go.coverage.sh new file mode 100644 index 0000000..c12e915 --- /dev/null +++ b/vendor/github.com/go-faster/city/go.coverage.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash + +set -e + +go test -race -v -coverpkg=./... -coverprofile=profile.out ./... +go tool cover -func profile.out diff --git a/vendor/github.com/go-faster/city/go.test.sh b/vendor/github.com/go-faster/city/go.test.sh new file mode 100644 index 0000000..f3e0bbe --- /dev/null +++ b/vendor/github.com/go-faster/city/go.test.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +set -e + +echo "test" +go test --timeout 5m ./... + +echo "test -race" +go test --timeout 5m -race ./... diff --git a/vendor/github.com/go-faster/errors/.codecov.yml b/vendor/github.com/go-faster/errors/.codecov.yml new file mode 100644 index 0000000..e61186c --- /dev/null +++ b/vendor/github.com/go-faster/errors/.codecov.yml @@ -0,0 +1,8 @@ +ignore: + - cmd/**/*.go +coverage: + status: + patch: false + project: + default: + threshold: 0.5% diff --git a/vendor/github.com/go-faster/errors/.editorconfig b/vendor/github.com/go-faster/errors/.editorconfig new file mode 100644 index 0000000..db2e5a0 --- /dev/null +++ b/vendor/github.com/go-faster/errors/.editorconfig @@ -0,0 +1,25 @@ +# http://editorconfig.org/ + +root = true + +[*] +charset = utf-8 +insert_final_newline = true +trim_trailing_whitespace = true +end_of_line = lf + +[{*.go, go.mod}] +indent_style = tab +indent_size = 4 + +[{*.yml,*.yaml}] +indent_style = space +indent_size = 2 + +[*.py] +indent_style = space +indent_size = 4 + +# Makefiles always use tabs for indentation +[Makefile] +indent_style = tab diff --git a/vendor/github.com/go-faster/errors/.gitignore b/vendor/github.com/go-faster/errors/.gitignore new file mode 100644 index 0000000..1d41f87 --- /dev/null +++ b/vendor/github.com/go-faster/errors/.gitignore @@ -0,0 +1,8 @@ +.idea +_bin/* +./examples + +*-fuzz.zip + +*.out +*.dump diff --git a/vendor/github.com/go-faster/errors/.golangci.yml b/vendor/github.com/go-faster/errors/.golangci.yml new file mode 100644 index 0000000..430e979 --- /dev/null +++ b/vendor/github.com/go-faster/errors/.golangci.yml @@ -0,0 +1,104 @@ +linters-settings: + govet: + check-shadowing: true + gocyclo: + min-complexity: 15 + maligned: + suggest-new: true + dupl: + threshold: 120 + goconst: + min-len: 2 + min-occurrences: 3 + misspell: + locale: US + lll: + line-length: 140 + goimports: + local-prefixes: github.com/ogen/ + gocritic: + enabled-tags: + - diagnostic + - experimental + - opinionated + - performance + - style + disabled-checks: + - hugeParam + - rangeValCopy + - exitAfterDefer + - whyNoLint + - singleCaseSwitch + - commentedOutCode + - appendAssign + - unnecessaryBlock + - redundantSprint + +linters: + disable-all: true + enable: + - dogsled + - errcheck + - goconst + - gocritic + - gofmt + - goimports + - revive + - gosec + - gosimple + - govet + - ineffassign + - misspell + - nakedret + - staticcheck + - stylecheck + - typecheck + - unconvert + - unparam + - unused + - whitespace + + # Do not enable: + # - wsl (too opinionated about newlines) + # - godox (todos are OK) + # - bodyclose (false positives on helper functions) + # - prealloc (not worth it in scope of this project) + # - maligned (same as prealloc) + # - funlen (gocyclo is enough) + # - gochecknoglobals (we know when it is ok to use globals) + +issues: + exclude-use-default: false + exclude-rules: + # Disable linters that are annoying in tests. + - path: _test\.go + linters: + - gocyclo + - errcheck + - dupl + - gosec + - funlen + - goconst + - gocognit + - scopelint + - lll + + - path: _test\.go + text: "Combine" + linters: [gocritic] + + # Ignore shadowing of err. + - linters: [ govet ] + text: 'declaration of "(err|ctx|log|c)"' + + # Ignore linters in main packages. + - path: main\.go + linters: [ goconst, funlen, gocognit, gocyclo ] + + - path: _test\.go + text: "suspicious identical" + linters: [gocritic] + + - path: _test\.go + text: "identical expressions" + linters: [staticcheck] diff --git a/vendor/github.com/go-faster/errors/LICENSE b/vendor/github.com/go-faster/errors/LICENSE new file mode 100644 index 0000000..6a66aea --- /dev/null +++ b/vendor/github.com/go-faster/errors/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/go-faster/errors/Makefile b/vendor/github.com/go-faster/errors/Makefile new file mode 100644 index 0000000..55c6970 --- /dev/null +++ b/vendor/github.com/go-faster/errors/Makefile @@ -0,0 +1,10 @@ +test: + @./go.test.sh +.PHONY: test + +coverage: + @./go.coverage.sh +.PHONY: coverage + +tidy: + go mod tidy diff --git a/vendor/github.com/go-faster/errors/README.md b/vendor/github.com/go-faster/errors/README.md new file mode 100644 index 0000000..9f21e13 --- /dev/null +++ b/vendor/github.com/go-faster/errors/README.md @@ -0,0 +1,56 @@ +# errors [![Go Reference](https://img.shields.io/badge/go-pkg-00ADD8)](https://pkg.go.dev/github.com/go-faster/errors#section-documentation) [![codecov](https://img.shields.io/codecov/c/github/go-faster/errors?label=cover)](https://codecov.io/gh/go-faster/errors) + +Fork of [xerrors](https://pkg.go.dev/golang.org/x/xerrors) with explicit [Wrap](https://pkg.go.dev/github.com/go-faster/errors#Wrap) instead of `%w`. + +> Clear is better than clever. + +``` +go get github.com/go-faster/errors +``` + +```go +errors.Wrap(err, "message") +``` + +## Why +* Using `Wrap` is the most explicit way to wrap errors +* Wrapping with `fmt.Errorf("foo: %w", err)` is implicit, redundant and error-prone +* Parsing `"foo: %w"` is implicit, redundant and slow +* The [pkg/errors](https://github.com/pkg/errors) and [xerrors](https://pkg.go.dev/golang.org/x/xerrors) are not maintainted +* The [cockroachdb/errors](https://github.com/cockroachdb/errors) is too big +* The `errors` has no caller stack trace + +## Don't need traces? +Call `errors.DisableTrace` or use build tag `noerrtrace`. + +## Additional features + +### Into + +Generic type assertion for errors. + +```go +// Into finds the first error in err's chain that matches target type T, and if so, returns it. +// +// Into is type-safe alternative to As. +func Into[T error](err error) (val T, ok bool) +``` + +```go +if pathError, ok := errors.Into[*os.PathError](err); ok { + fmt.Println("Failed at path:", pathError.Path) +} +``` + +### Must + +Must is a generic helper, like template.Must, that wraps a call to a function returning (T, error) +and panics if the error is non-nil. + +```go +func Must[T any](val T, err error) T +``` + +## License + +BSD-3-Clause, same as Go sources diff --git a/vendor/github.com/go-faster/errors/adaptor.go b/vendor/github.com/go-faster/errors/adaptor.go new file mode 100644 index 0000000..f58a85a --- /dev/null +++ b/vendor/github.com/go-faster/errors/adaptor.go @@ -0,0 +1,193 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package errors + +import ( + "bytes" + "fmt" + "io" + "reflect" + "strconv" +) + +// FormatError calls the FormatError method of f with an errors.Printer +// configured according to s and verb, and writes the result to s. +func FormatError(f Formatter, s fmt.State, verb rune) { + // Assuming this function is only called from the Format method, and given + // that FormatError takes precedence over Format, it cannot be called from + // any package that supports errors.Formatter. It is therefore safe to + // disregard that State may be a specific printer implementation and use one + // of our choice instead. + + // limitations: does not support printing error as Go struct. + + var ( + sep = " " // separator before next error + p = &state{State: s} + direct = true + ) + + var err error = f + + switch verb { + // Note that this switch must match the preference order + // for ordinary string printing (%#v before %+v, and so on). + + case 'v': + if s.Flag('#') { + if stringer, ok := err.(fmt.GoStringer); ok { + p.buf.WriteString(stringer.GoString()) + goto exit + } + // proceed as if it were %v + } else if s.Flag('+') { + p.printDetail = true + sep = "\n - " + } + case 's': + case 'q', 'x', 'X': + // Use an intermediate buffer in the rare cases that precision, + // truncation, or one of the alternative verbs (q, x, and X) are + // specified. + direct = false + + default: + p.buf.WriteString("%!") + p.buf.WriteRune(verb) + p.buf.WriteByte('(') + switch { + case err != nil: + p.buf.WriteString(reflect.TypeOf(f).String()) + default: + p.buf.WriteString("") + } + p.buf.WriteByte(')') + _, _ = io.Copy(s, &p.buf) + return + } + +loop: + for { + switch v := err.(type) { + case Formatter: + err = v.FormatError((*printer)(p)) + case fmt.Formatter: + v.Format(p, 'v') + break loop + default: + _, _ = p.buf.WriteString(v.Error()) + break loop + } + if err == nil { + break + } + if p.needColon || !p.printDetail { + p.buf.WriteByte(':') + p.needColon = false + } + p.buf.WriteString(sep) + p.inDetail = false + p.needNewline = false + } + +exit: + width, okW := s.Width() + prec, okP := s.Precision() + + if !direct || (okW && width > 0) || okP { + // Construct format string from State s. + format := []byte{'%'} + if s.Flag('-') { + format = append(format, '-') + } + if s.Flag('+') { + format = append(format, '+') + } + if s.Flag(' ') { + format = append(format, ' ') + } + if okW { + format = strconv.AppendInt(format, int64(width), 10) + } + if okP { + format = append(format, '.') + format = strconv.AppendInt(format, int64(prec), 10) + } + format = append(format, string(verb)...) + _, _ = fmt.Fprintf(s, string(format), p.buf.String()) + } else { + _, _ = io.Copy(s, &p.buf) + } +} + +var detailSep = []byte("\n ") + +// state tracks error printing state. It implements fmt.State. +type state struct { + fmt.State + buf bytes.Buffer + + printDetail bool + inDetail bool + needColon bool + needNewline bool +} + +func (s *state) Write(b []byte) (n int, err error) { + if s.printDetail { + if len(b) == 0 { + return 0, nil + } + if s.inDetail && s.needColon { + s.needNewline = true + if b[0] == '\n' { + b = b[1:] + } + } + k := 0 + for i, c := range b { + if s.needNewline { + if s.inDetail && s.needColon { + s.buf.WriteByte(':') + s.needColon = false + } + s.buf.Write(detailSep) + s.needNewline = false + } + if c == '\n' { + s.buf.Write(b[k:i]) + k = i + 1 + s.needNewline = true + } + } + s.buf.Write(b[k:]) + if !s.inDetail { + s.needColon = true + } + } else if !s.inDetail { + s.buf.Write(b) + } + return len(b), nil +} + +// printer wraps a state to implement an xerrors.Printer. +type printer state + +func (s *printer) Print(args ...interface{}) { + if !s.inDetail || s.printDetail { + _, _ = fmt.Fprint((*state)(s), args...) + } +} + +func (s *printer) Printf(format string, args ...interface{}) { + if !s.inDetail || s.printDetail { + _, _ = fmt.Fprintf((*state)(s), format, args...) + } +} + +func (s *printer) Detail() bool { + s.inDetail = true + return s.printDetail +} diff --git a/vendor/github.com/go-faster/errors/doc.go b/vendor/github.com/go-faster/errors/doc.go new file mode 100644 index 0000000..d97f085 --- /dev/null +++ b/vendor/github.com/go-faster/errors/doc.go @@ -0,0 +1,9 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package errors implements functions to manipulate errors. +// +// This package expands "errors" with stack traces and explicit error +// wrapping. +package errors diff --git a/vendor/github.com/go-faster/errors/errors.go b/vendor/github.com/go-faster/errors/errors.go new file mode 100644 index 0000000..6826b9d --- /dev/null +++ b/vendor/github.com/go-faster/errors/errors.go @@ -0,0 +1,37 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package errors + +import ( + "errors" + "fmt" +) + +// errorString is a trivial implementation of error. +type errorString struct { + s string + frame Frame +} + +// New returns an error that formats as the given text. +// +// The returned error contains a Frame set to the caller's location and +// implements Formatter to show this information when printed with details. +func New(text string) error { + if !Trace() { + return errors.New(text) + } + return &errorString{text, Caller(1)} +} + +func (e *errorString) Error() string { return e.s } + +func (e *errorString) Format(s fmt.State, v rune) { FormatError(e, s, v) } + +func (e *errorString) FormatError(p Printer) (next error) { + p.Print(e.s) + e.frame.Format(p) + return nil +} diff --git a/vendor/github.com/go-faster/errors/format.go b/vendor/github.com/go-faster/errors/format.go new file mode 100644 index 0000000..b83d9cb --- /dev/null +++ b/vendor/github.com/go-faster/errors/format.go @@ -0,0 +1,47 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package errors + +import ( + "fmt" + "strings" +) + +// A Formatter formats error messages. +type Formatter interface { + error + + // FormatError prints the receiver's first error and returns the next error in + // the error chain, if any. + FormatError(p Printer) (next error) +} + +// A Printer formats error messages. +// +// The most common implementation of Printer is the one provided by package fmt +// during Printf (as of Go 1.13). Localization packages such as golang.org/x/text/message +// typically provide their own implementations. +type Printer interface { + // Print appends args to the message output. + Print(args ...interface{}) + + // Printf writes a formatted string. + Printf(format string, args ...interface{}) + + // Detail reports whether error detail is requested. + // After the first call to Detail, all text written to the Printer + // is formatted as additional detail, or ignored when + // detail has not been requested. + // If Detail returns false, the caller can avoid printing the detail at all. + Detail() bool +} + +// Errorf creates new error with format. +func Errorf(format string, a ...interface{}) error { + if !Trace() || strings.Contains(format, "%w") { + return fmt.Errorf(format, a...) + } + return &errorString{fmt.Sprintf(format, a...), Caller(1)} +} diff --git a/vendor/github.com/go-faster/errors/frame.go b/vendor/github.com/go-faster/errors/frame.go new file mode 100644 index 0000000..e74908c --- /dev/null +++ b/vendor/github.com/go-faster/errors/frame.go @@ -0,0 +1,56 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package errors + +import ( + "runtime" +) + +// A Frame contains part of a call stack. +type Frame struct { + // Make room for three PCs: the one we were asked for, what it called, + // and possibly a PC for skipPleaseUseCallersFrames. See: + // https://go.googlesource.com/go/+/032678e0fb/src/runtime/extern.go#169 + frames [3]uintptr +} + +// Caller returns a Frame that describes a frame on the caller's stack. +// The argument skip is the number of frames to skip over. +// Caller(0) returns the frame for the caller of Caller. +func Caller(skip int) Frame { + var s Frame + runtime.Callers(skip+1, s.frames[:]) + return s +} + +// Location reports the file, line, and function of a frame. +// +// The returned function may be "" even if file and line are not. +func (f Frame) Location() (function, file string, line int) { + frames := runtime.CallersFrames(f.frames[:]) + if _, ok := frames.Next(); !ok { + return "", "", 0 + } + fr, ok := frames.Next() + if !ok { + return "", "", 0 + } + return fr.Function, fr.File, fr.Line +} + +// Format prints the stack as error detail. +// It should be called from an error's Format implementation +// after printing any other error detail. +func (f Frame) Format(p Printer) { + if p.Detail() { + function, file, line := f.Location() + if function != "" { + p.Printf("%s\n ", function) + } + if file != "" { + p.Printf("%s:%d\n", file, line) + } + } +} diff --git a/vendor/github.com/go-faster/errors/go.coverage.sh b/vendor/github.com/go-faster/errors/go.coverage.sh new file mode 100644 index 0000000..2211326 --- /dev/null +++ b/vendor/github.com/go-faster/errors/go.coverage.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash + +set -e + +go test -v -coverpkg=./... -coverprofile=profile.out ./... +go tool cover -func profile.out diff --git a/vendor/github.com/go-faster/errors/go.test.sh b/vendor/github.com/go-faster/errors/go.test.sh new file mode 100644 index 0000000..8f19602 --- /dev/null +++ b/vendor/github.com/go-faster/errors/go.test.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -e + +# test with -race +echo "with race:" +go test --timeout 5m -race ./... + +# test with noerrtrace build tag +tag=noerrtrace +echo "with ${tag} build tag:" +go test -tags "${tag}" --timeout 5m -race ./... diff --git a/vendor/github.com/go-faster/errors/into.go b/vendor/github.com/go-faster/errors/into.go new file mode 100644 index 0000000..01ef7cf --- /dev/null +++ b/vendor/github.com/go-faster/errors/into.go @@ -0,0 +1,11 @@ +//go:build go1.18 + +package errors + +// Into finds the first error in err's chain that matches target type T, and if so, returns it. +// +// Into is type-safe alternative to As. +func Into[T error](err error) (val T, ok bool) { + ok = As(err, &val) + return val, ok +} diff --git a/vendor/github.com/go-faster/errors/join_go120.go b/vendor/github.com/go-faster/errors/join_go120.go new file mode 100644 index 0000000..575d620 --- /dev/null +++ b/vendor/github.com/go-faster/errors/join_go120.go @@ -0,0 +1,20 @@ +//go:build go1.20 +// +build go1.20 + +package errors + +import "errors" + +// Join returns an error that wraps the given errors. +// Any nil error values are discarded. +// Join returns nil if every value in errs is nil. +// The error formats as the concatenation of the strings obtained +// by calling the Error method of each element of errs, with a newline +// between each string. +// +// A non-nil error returned by Join implements the Unwrap() []error method. +// +// Available only for go 1.20 or superior. +func Join(errs ...error) error { + return errors.Join(errs...) +} diff --git a/vendor/github.com/go-faster/errors/must.go b/vendor/github.com/go-faster/errors/must.go new file mode 100644 index 0000000..ff06f8c --- /dev/null +++ b/vendor/github.com/go-faster/errors/must.go @@ -0,0 +1,12 @@ +//go:build go1.18 + +package errors + +// Must is a generic helper, like template.Must, that wraps a call to a function returning (T, error) +// and panics if the error is non-nil. +func Must[T any](val T, err error) T { + if err != nil { + panic(err) + } + return val +} diff --git a/vendor/github.com/go-faster/errors/noerrtrace.go b/vendor/github.com/go-faster/errors/noerrtrace.go new file mode 100644 index 0000000..ff03176 --- /dev/null +++ b/vendor/github.com/go-faster/errors/noerrtrace.go @@ -0,0 +1,13 @@ +//go:build noerrtrace +// +build noerrtrace + +package errors + +// enableTrace does nothing. +func enableTrace() {} + +// DisableTrace does nothing. +func DisableTrace() {} + +// Trace always returns false. +func Trace() bool { return false } diff --git a/vendor/github.com/go-faster/errors/trace.go b/vendor/github.com/go-faster/errors/trace.go new file mode 100644 index 0000000..3cf4689 --- /dev/null +++ b/vendor/github.com/go-faster/errors/trace.go @@ -0,0 +1,37 @@ +//go:build !noerrtrace +// +build !noerrtrace + +package errors + +import ( + "sync/atomic" +) + +var traceFlag int64 + +const ( + traceEnabled = 0 // enabled by default + traceDisabled = 1 +) + +// setTrace sets tracing flag that controls capturing caller frames. +func setTrace(trace bool) { + if trace { + atomic.StoreInt64(&traceFlag, traceEnabled) + } else { + atomic.StoreInt64(&traceFlag, traceDisabled) + } +} + +// enableTrace enables capturing caller frames. +// +// Intentionally left unexported. +func enableTrace() { setTrace(true) } + +// DisableTrace disables capturing caller frames. +func DisableTrace() { setTrace(false) } + +// Trace reports whether caller stack capture is enabled. +func Trace() bool { + return atomic.LoadInt64(&traceFlag) == traceEnabled +} diff --git a/vendor/github.com/go-faster/errors/wrap.go b/vendor/github.com/go-faster/errors/wrap.go new file mode 100644 index 0000000..61f008f --- /dev/null +++ b/vendor/github.com/go-faster/errors/wrap.go @@ -0,0 +1,134 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package errors + +import ( + "errors" + "fmt" +) + +// A Wrapper provides context around another error. +type Wrapper interface { + // Unwrap returns the next error in the error chain. + // If there is no next error, Unwrap returns nil. + Unwrap() error +} + +// Opaque returns an error with the same error formatting as err +// but that does not match err and cannot be unwrapped. +func Opaque(err error) error { + return noWrapper{err} +} + +type noWrapper struct { + error +} + +func (e noWrapper) FormatError(p Printer) (next error) { + if f, ok := e.error.(Formatter); ok { + return f.FormatError(p) + } + p.Print(e.error) + return nil +} + +// Unwrap returns the result of calling the Unwrap method on err, if err's +// type contains an Unwrap method returning error. +// Otherwise, Unwrap returns nil. +func Unwrap(err error) error { + return errors.Unwrap(err) +} + +// Cause returns first recorded Frame. +func Cause(err error) (f Frame, r bool) { + for { + we, ok := err.(*wrapError) + if !ok { + return f, r + } + f = we.frame + r = r || ok + + err = we.err + } +} + +type wrapError struct { + msg string + err error + frame Frame +} + +func (e *wrapError) Error() string { + return fmt.Sprint(e) +} + +func (e *wrapError) Format(s fmt.State, v rune) { FormatError(e, s, v) } + +func (e *wrapError) FormatError(p Printer) (next error) { + p.Print(e.msg) + e.frame.Format(p) + return e.err +} + +func (e *wrapError) Unwrap() error { + return e.err +} + +// Wrap error with message and caller. +func Wrap(err error, message string) error { + frame := Frame{} + if Trace() { + frame = Caller(1) + } + return &wrapError{msg: message, err: err, frame: frame} +} + +// Wrapf wraps error with formatted message and caller. +func Wrapf(err error, format string, a ...interface{}) error { + frame := Frame{} + if Trace() { + frame = Caller(1) + } + msg := fmt.Sprintf(format, a...) + return &wrapError{msg: msg, err: err, frame: frame} +} + +// Is reports whether any error in err's chain matches target. +// +// The chain consists of err itself followed by the sequence of errors obtained by +// repeatedly calling Unwrap. +// +// An error is considered to match a target if it is equal to that target or if +// it implements a method Is(error) bool such that Is(target) returns true. +// +// An error type might provide an Is method so it can be treated as equivalent +// to an existing error. For example, if MyError defines +// +// func (m MyError) Is(target error) bool { return target == fs.ErrExist } +// +// then Is(MyError{}, fs.ErrExist) returns true. See syscall.Errno.Is for +// an example in the standard library. +func Is(err, target error) bool { + return errors.Is(err, target) +} + +// As finds the first error in err's chain that matches target, and if so, sets +// target to that error value and returns true. Otherwise, it returns false. +// +// The chain consists of err itself followed by the sequence of errors obtained by +// repeatedly calling Unwrap. +// +// An error matches target if the error's concrete value is assignable to the value +// pointed to by target, or if the error has a method As(interface{}) bool such that +// As(target) returns true. In the latter case, the As method is responsible for +// setting target. +// +// An error type might provide an As method so it can be treated as if it were a +// different error type. +// +// As panics if target is not a non-nil pointer to either a type that implements +// error, or to any interface type. +func As(err error, target interface{}) bool { return errors.As(err, target) } diff --git a/vendor/github.com/go-sql-driver/mysql/AUTHORS b/vendor/github.com/go-sql-driver/mysql/AUTHORS index 0513275..4021b96 100644 --- a/vendor/github.com/go-sql-driver/mysql/AUTHORS +++ b/vendor/github.com/go-sql-driver/mysql/AUTHORS @@ -13,6 +13,7 @@ Aaron Hopkins Achille Roussel +Aidan Alex Snast Alexey Palazhchenko Andrew Reid @@ -20,12 +21,14 @@ Animesh Ray Arne Hormann Ariel Mashraki Asta Xie +Brian Hendriks Bulat Gaifullin Caine Jette Carlos Nieto Chris Kirkland Chris Moos Craig Wilson +Daemonxiao <735462752 at qq.com> Daniel Montoya Daniel Nichter Daniël van Eeden @@ -33,9 +36,11 @@ Dave Protasowski DisposaBoy Egor Smolyakov Erwan Martin +Evan Elias Evan Shaw Frederick Mayle Gustavo Kristic +Gusted Hajime Nakagami Hanno Braun Henri Yandell @@ -47,8 +52,11 @@ INADA Naoki Jacek Szwec James Harr Janek Vedock +Jason Ng +Jean-Yves Pellé Jeff Hodges Jeffrey Charles +Jennifer Purevsuren Jerome Meyer Jiajia Zhong Jian Zhen @@ -74,15 +82,19 @@ Maciej Zimnoch Michael Woolnough Nathanial Murphy Nicola Peduzzi +Oliver Bone Olivier Mengué oscarzhao Paul Bonser +Paulius Lozys Peter Schultz +Phil Porada Rebecca Chin Reed Allman Richard Wilkes Robert Russell Runrioter Wung +Samantha Frank Santhosh Kumar Tekuri Sho Iizuka Sho Ikeda @@ -93,6 +105,7 @@ Stan Putrya Stanley Gunawan Steven Hartland Tan Jinhua <312841925 at qq.com> +Tetsuro Aoki Thomas Wodarek Tim Ruffles Tom Jenkinson @@ -102,6 +115,7 @@ Xiangyu Hu Xiaobing Jiang Xiuming Chen Xuehong Chan +Zhang Xiang Zhenye Xie Zhixin Wen Ziheng Lyu @@ -111,14 +125,18 @@ Ziheng Lyu Barracuda Networks, Inc. Counting Ltd. DigitalOcean Inc. +Dolthub Inc. dyves labs AG Facebook Inc. GitHub Inc. Google Inc. InfoSum Ltd. Keybase Inc. +Microsoft Corp. Multiplay Ltd. Percona LLC +PingCAP Inc. Pivotal Inc. +Shattered Silicon Ltd. Stripe Inc. Zendesk Inc. diff --git a/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md b/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md index 77024a8..0c9bd9b 100644 --- a/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md +++ b/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md @@ -1,3 +1,58 @@ +## Version 1.8.1 (2024-03-26) + +Bugfixes: + +- fix race condition when context is canceled in [#1562](https://github.com/go-sql-driver/mysql/pull/1562) and [#1570](https://github.com/go-sql-driver/mysql/pull/1570) + +## Version 1.8.0 (2024-03-09) + +Major Changes: + +- Use `SET NAMES charset COLLATE collation`. by @methane in [#1437](https://github.com/go-sql-driver/mysql/pull/1437) + - Older go-mysql-driver used `collation_id` in the handshake packet. But it caused collation mismatch in some situation. + - If you don't specify charset nor collation, go-mysql-driver sends `SET NAMES utf8mb4` for new connection. This uses server's default collation for utf8mb4. + - If you specify charset, go-mysql-driver sends `SET NAMES `. This uses the server's default collation for ``. + - If you specify collation and/or charset, go-mysql-driver sends `SET NAMES charset COLLATE collation`. +- PathEscape dbname in DSN. by @methane in [#1432](https://github.com/go-sql-driver/mysql/pull/1432) + - This is backward incompatible in rare case. Check your DSN. +- Drop Go 1.13-17 support by @methane in [#1420](https://github.com/go-sql-driver/mysql/pull/1420) + - Use Go 1.18+ +- Parse numbers on text protocol too by @methane in [#1452](https://github.com/go-sql-driver/mysql/pull/1452) + - When text protocol is used, go-mysql-driver passed bare `[]byte` to database/sql for avoid unnecessary allocation and conversion. + - If user specified `*any` to `Scan()`, database/sql passed the `[]byte` into the target variable. + - This confused users because most user doesn't know when text/binary protocol used. + - go-mysql-driver 1.8 converts integer/float values into int64/double even in text protocol. This doesn't increase allocation compared to `[]byte` and conversion cost is negatable. +- New options start using the Functional Option Pattern to avoid increasing technical debt in the Config object. Future version may introduce Functional Option for existing options, but not for now. + - Make TimeTruncate functional option by @methane in [1552](https://github.com/go-sql-driver/mysql/pull/1552) + - Add BeforeConnect callback to configuration object by @ItalyPaleAle in [#1469](https://github.com/go-sql-driver/mysql/pull/1469) + + +Other changes: + +- Adding DeregisterDialContext to prevent memory leaks with dialers we don't need anymore by @jypelle in https://github.com/go-sql-driver/mysql/pull/1422 +- Make logger configurable per connection by @frozenbonito in https://github.com/go-sql-driver/mysql/pull/1408 +- Fix ColumnType.DatabaseTypeName for mediumint unsigned by @evanelias in https://github.com/go-sql-driver/mysql/pull/1428 +- Add connection attributes by @Daemonxiao in https://github.com/go-sql-driver/mysql/pull/1389 +- Stop `ColumnTypeScanType()` from returning `sql.RawBytes` by @methane in https://github.com/go-sql-driver/mysql/pull/1424 +- Exec() now provides access to status of multiple statements. by @mherr-google in https://github.com/go-sql-driver/mysql/pull/1309 +- Allow to change (or disable) the default driver name for registration by @dolmen in https://github.com/go-sql-driver/mysql/pull/1499 +- Add default connection attribute '_server_host' by @oblitorum in https://github.com/go-sql-driver/mysql/pull/1506 +- QueryUnescape DSN ConnectionAttribute value by @zhangyangyu in https://github.com/go-sql-driver/mysql/pull/1470 +- Add client_ed25519 authentication by @Gusted in https://github.com/go-sql-driver/mysql/pull/1518 + +## Version 1.7.1 (2023-04-25) + +Changes: + + - bump actions/checkout@v3 and actions/setup-go@v3 (#1375) + - Add go1.20 and mariadb10.11 to the testing matrix (#1403) + - Increase default maxAllowedPacket size. (#1411) + +Bugfixes: + + - Use SET syntax as specified in the MySQL documentation (#1402) + + ## Version 1.7 (2022-11-29) Changes: @@ -149,7 +204,7 @@ New Features: - Enable microsecond resolution on TIME, DATETIME and TIMESTAMP (#249) - Support for returning table alias on Columns() (#289, #359, #382) - - Placeholder interpolation, can be actived with the DSN parameter `interpolateParams=true` (#309, #318, #490) + - Placeholder interpolation, can be activated with the DSN parameter `interpolateParams=true` (#309, #318, #490) - Support for uint64 parameters with high bit set (#332, #345) - Cleartext authentication plugin support (#327) - Exported ParseDSN function and the Config struct (#403, #419, #429) @@ -193,7 +248,7 @@ Changes: - Also exported the MySQLWarning type - mysqlConn.Close returns the first error encountered instead of ignoring all errors - writePacket() automatically writes the packet size to the header - - readPacket() uses an iterative approach instead of the recursive approach to merge splitted packets + - readPacket() uses an iterative approach instead of the recursive approach to merge split packets New Features: @@ -241,7 +296,7 @@ Bugfixes: - Fixed MySQL 4.1 support: MySQL 4.1 sends packets with lengths which differ from the specification - Convert to DB timezone when inserting `time.Time` - - Splitted packets (more than 16MB) are now merged correctly + - Split packets (more than 16MB) are now merged correctly - Fixed false positive `io.EOF` errors when the data was fully read - Avoid panics on reuse of closed connections - Fixed empty string producing false nil values diff --git a/vendor/github.com/go-sql-driver/mysql/README.md b/vendor/github.com/go-sql-driver/mysql/README.md index 25de2e5..4968cb0 100644 --- a/vendor/github.com/go-sql-driver/mysql/README.md +++ b/vendor/github.com/go-sql-driver/mysql/README.md @@ -40,15 +40,23 @@ A MySQL-Driver for Go's [database/sql](https://golang.org/pkg/database/sql/) pac * Optional placeholder interpolation ## Requirements - * Go 1.13 or higher. We aim to support the 3 latest versions of Go. - * MySQL (4.1+), MariaDB, Percona Server, Google CloudSQL or Sphinx (2.2.3+) + +* Go 1.19 or higher. We aim to support the 3 latest versions of Go. +* MySQL (5.7+) and MariaDB (10.3+) are supported. +* [TiDB](https://github.com/pingcap/tidb) is supported by PingCAP. + * Do not ask questions about TiDB in our issue tracker or forum. + * [Document](https://docs.pingcap.com/tidb/v6.1/dev-guide-sample-application-golang) + * [Forum](https://ask.pingcap.com/) +* go-mysql would work with Percona Server, Google CloudSQL or Sphinx (2.2.3+). + * Maintainers won't support them. Do not expect issues are investigated and resolved by maintainers. + * Investigate issues yourself and please send a pull request to fix it. --------------------------------------- ## Installation Simple install the package to your [$GOPATH](https://github.com/golang/go/wiki/GOPATH "GOPATH") with the [go tool](https://golang.org/cmd/go/ "go command") from shell: ```bash -$ go get -u github.com/go-sql-driver/mysql +go get -u github.com/go-sql-driver/mysql ``` Make sure [Git is installed](https://git-scm.com/downloads) on your machine and in your system's `PATH`. @@ -114,6 +122,12 @@ This has the same effect as an empty DSN string: ``` +`dbname` is escaped by [PathEscape()](https://pkg.go.dev/net/url#PathEscape) since v1.8.0. If your database name is `dbname/withslash`, it becomes: + +``` +/dbname%2Fwithslash +``` + Alternatively, [Config.FormatDSN](https://godoc.org/github.com/go-sql-driver/mysql#Config.FormatDSN) can be used to create a DSN string by filling a struct. #### Password @@ -121,7 +135,7 @@ Passwords can consist of any character. Escaping is **not** necessary. #### Protocol See [net.Dial](https://golang.org/pkg/net/#Dial) for more information which networks are available. -In general you should use an Unix domain socket if available and TCP otherwise for best performance. +In general you should use a Unix domain socket if available and TCP otherwise for best performance. #### Address For TCP and UDP networks, addresses have the form `host[:port]`. @@ -145,7 +159,7 @@ Default: false ``` `allowAllFiles=true` disables the file allowlist for `LOAD DATA LOCAL INFILE` and allows *all* files. -[*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html) +[*Might be insecure!*](https://dev.mysql.com/doc/refman/8.0/en/load-data.html#load-data-local) ##### `allowCleartextPasswords` @@ -194,10 +208,9 @@ Valid Values: Default: none ``` -Sets the charset used for client-server interaction (`"SET NAMES "`). If multiple charsets are set (separated by a comma), the following charset is used if setting the charset failes. This enables for example support for `utf8mb4` ([introduced in MySQL 5.5.3](http://dev.mysql.com/doc/refman/5.5/en/charset-unicode-utf8mb4.html)) with fallback to `utf8` for older servers (`charset=utf8mb4,utf8`). +Sets the charset used for client-server interaction (`"SET NAMES "`). If multiple charsets are set (separated by a comma), the following charset is used if setting the charset fails. This enables for example support for `utf8mb4` ([introduced in MySQL 5.5.3](http://dev.mysql.com/doc/refman/5.5/en/charset-unicode-utf8mb4.html)) with fallback to `utf8` for older servers (`charset=utf8mb4,utf8`). -Usage of the `charset` parameter is discouraged because it issues additional queries to the server. -Unless you need the fallback behavior, please use `collation` instead. +See also [Unicode Support](#unicode-support). ##### `checkConnLiveness` @@ -226,6 +239,7 @@ The default collation (`utf8mb4_general_ci`) is supported from MySQL 5.5. You s Collations for charset "ucs2", "utf16", "utf16le", and "utf32" can not be used ([ref](https://dev.mysql.com/doc/refman/5.7/en/charset-connection.html#charset-connection-impermissible-client-charset)). +See also [Unicode Support](#unicode-support). ##### `clientFoundRows` @@ -279,13 +293,22 @@ Note that this sets the location for time.Time values but does not change MySQL' Please keep in mind, that param values must be [url.QueryEscape](https://golang.org/pkg/net/url/#QueryEscape)'ed. Alternatively you can manually replace the `/` with `%2F`. For example `US/Pacific` would be `loc=US%2FPacific`. +##### `timeTruncate` + +``` +Type: duration +Default: 0 +``` + +[Truncate time values](https://pkg.go.dev/time#Duration.Truncate) to the specified duration. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*. + ##### `maxAllowedPacket` ``` Type: decimal number -Default: 4194304 +Default: 64*1024*1024 ``` -Max packet size allowed in bytes. The default value is 4 MiB and should be adjusted to match the server settings. `maxAllowedPacket=0` can be used to automatically fetch the `max_allowed_packet` variable from server *on every connection*. +Max packet size allowed in bytes. The default value is 64 MiB and should be adjusted to match the server settings. `maxAllowedPacket=0` can be used to automatically fetch the `max_allowed_packet` variable from server *on every connection*. ##### `multiStatements` @@ -295,9 +318,25 @@ Valid Values: true, false Default: false ``` -Allow multiple statements in one query. While this allows batch queries, it also greatly increases the risk of SQL injections. Only the result of the first query is returned, all other results are silently discarded. +Allow multiple statements in one query. This can be used to bach multiple queries. Use [Rows.NextResultSet()](https://pkg.go.dev/database/sql#Rows.NextResultSet) to get result of the second and subsequent queries. + +When `multiStatements` is used, `?` parameters must only be used in the first statement. [interpolateParams](#interpolateparams) can be used to avoid this limitation unless prepared statement is used explicitly. + +It's possible to access the last inserted ID and number of affected rows for multiple statements by using `sql.Conn.Raw()` and the `mysql.Result`. For example: -When `multiStatements` is used, `?` parameters must only be used in the first statement. +```go +conn, _ := db.Conn(ctx) +conn.Raw(func(conn any) error { + ex := conn.(driver.Execer) + res, err := ex.Exec(` + UPDATE point SET x = 1 WHERE y = 2; + UPDATE point SET x = 2 WHERE y = 3; + `, nil) + // Both slices have 2 elements. + log.Print(res.(mysql.Result).AllRowsAffected()) + log.Print(res.(mysql.Result).AllLastInsertIds()) +}) +``` ##### `parseTime` @@ -393,6 +432,15 @@ Default: 0 I/O write timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*. +##### `connectionAttributes` + +``` +Type: comma-delimited string of user-defined "key:value" pairs +Valid Values: (:,:,...) +Default: none +``` + +[Connection attributes](https://dev.mysql.com/doc/refman/8.0/en/performance-schema-connection-attribute-tables.html) are key-value pairs that application programs can pass to the server at connect time. ##### System Variables @@ -465,7 +513,7 @@ user:password@/ The connection pool is managed by Go's database/sql package. For details on how to configure the size of the pool and how long connections stay in the pool see `*DB.SetMaxOpenConns`, `*DB.SetMaxIdleConns`, and `*DB.SetConnMaxLifetime` in the [database/sql documentation](https://golang.org/pkg/database/sql/). The read, write, and dial timeouts for each individual connection are configured with the DSN parameters [`readTimeout`](#readtimeout), [`writeTimeout`](#writetimeout), and [`timeout`](#timeout), respectively. ## `ColumnType` Support -This driver supports the [`ColumnType` interface](https://golang.org/pkg/database/sql/#ColumnType) introduced in Go 1.8, with the exception of [`ColumnType.Length()`](https://golang.org/pkg/database/sql/#ColumnType.Length), which is currently not supported. All Unsigned database type names will be returned `UNSIGNED ` with `INT`, `TINYINT`, `SMALLINT`, `BIGINT`. +This driver supports the [`ColumnType` interface](https://golang.org/pkg/database/sql/#ColumnType) introduced in Go 1.8, with the exception of [`ColumnType.Length()`](https://golang.org/pkg/database/sql/#ColumnType.Length), which is currently not supported. All Unsigned database type names will be returned `UNSIGNED ` with `INT`, `TINYINT`, `SMALLINT`, `MEDIUMINT`, `BIGINT`. ## `context.Context` Support Go 1.8 added `database/sql` support for `context.Context`. This driver supports query timeouts and cancellation via contexts. @@ -478,7 +526,7 @@ For this feature you need direct access to the package. Therefore you must chang import "github.com/go-sql-driver/mysql" ``` -Files must be explicitly allowed by registering them with `mysql.RegisterLocalFile(filepath)` (recommended) or the allowlist check must be deactivated by using the DSN parameter `allowAllFiles=true` ([*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html)). +Files must be explicitly allowed by registering them with `mysql.RegisterLocalFile(filepath)` (recommended) or the allowlist check must be deactivated by using the DSN parameter `allowAllFiles=true` ([*Might be insecure!*](https://dev.mysql.com/doc/refman/8.0/en/load-data.html#load-data-local)). To use a `io.Reader` a handler function must be registered with `mysql.RegisterReaderHandler(name, handler)` which returns a `io.Reader` or `io.ReadCloser`. The Reader is available with the filepath `Reader::` then. Choose different names for different handlers and `DeregisterReaderHandler` when you don't need it anymore. @@ -496,9 +544,11 @@ However, many want to scan MySQL `DATE` and `DATETIME` values into `time.Time` v ### Unicode support Since version 1.5 Go-MySQL-Driver automatically uses the collation ` utf8mb4_general_ci` by default. -Other collations / charsets can be set using the [`collation`](#collation) DSN parameter. +Other charsets / collations can be set using the [`charset`](#charset) or [`collation`](#collation) DSN parameter. -Version 1.0 of the driver recommended adding `&charset=utf8` (alias for `SET NAMES utf8`) to the DSN to enable proper UTF-8 support. This is not necessary anymore. The [`collation`](#collation) parameter should be preferred to set another collation / charset than the default. +- When only the `charset` is specified, the `SET NAMES ` query is sent and the server's default collation is used. +- When both the `charset` and `collation` are specified, the `SET NAMES COLLATE ` query is sent. +- When only the `collation` is specified, the collation is specified in the protocol handshake and the `SET NAMES` query is not sent. This can save one roundtrip, but note that the server may ignore the specified collation silently and use the server's default charset/collation instead. See http://dev.mysql.com/doc/refman/8.0/en/charset-unicode.html for more details on MySQL's Unicode support. diff --git a/vendor/github.com/go-sql-driver/mysql/auth.go b/vendor/github.com/go-sql-driver/mysql/auth.go index 1ff203e..74e1bd0 100644 --- a/vendor/github.com/go-sql-driver/mysql/auth.go +++ b/vendor/github.com/go-sql-driver/mysql/auth.go @@ -13,10 +13,13 @@ import ( "crypto/rsa" "crypto/sha1" "crypto/sha256" + "crypto/sha512" "crypto/x509" "encoding/pem" "fmt" "sync" + + "filippo.io/edwards25519" ) // server pub keys registry @@ -33,7 +36,7 @@ var ( // Note: The provided rsa.PublicKey instance is exclusively owned by the driver // after registering it and may not be modified. // -// data, err := ioutil.ReadFile("mykey.pem") +// data, err := os.ReadFile("mykey.pem") // if err != nil { // log.Fatal(err) // } @@ -225,6 +228,44 @@ func encryptPassword(password string, seed []byte, pub *rsa.PublicKey) ([]byte, return rsa.EncryptOAEP(sha1, rand.Reader, pub, plain, nil) } +// authEd25519 does ed25519 authentication used by MariaDB. +func authEd25519(scramble []byte, password string) ([]byte, error) { + // Derived from https://github.com/MariaDB/server/blob/d8e6bb00888b1f82c031938f4c8ac5d97f6874c3/plugin/auth_ed25519/ref10/sign.c + // Code style is from https://cs.opensource.google/go/go/+/refs/tags/go1.21.5:src/crypto/ed25519/ed25519.go;l=207 + h := sha512.Sum512([]byte(password)) + + s, err := edwards25519.NewScalar().SetBytesWithClamping(h[:32]) + if err != nil { + return nil, err + } + A := (&edwards25519.Point{}).ScalarBaseMult(s) + + mh := sha512.New() + mh.Write(h[32:]) + mh.Write(scramble) + messageDigest := mh.Sum(nil) + r, err := edwards25519.NewScalar().SetUniformBytes(messageDigest) + if err != nil { + return nil, err + } + + R := (&edwards25519.Point{}).ScalarBaseMult(r) + + kh := sha512.New() + kh.Write(R.Bytes()) + kh.Write(A.Bytes()) + kh.Write(scramble) + hramDigest := kh.Sum(nil) + k, err := edwards25519.NewScalar().SetUniformBytes(hramDigest) + if err != nil { + return nil, err + } + + S := k.MultiplyAdd(k, s, r) + + return append(R.Bytes(), S.Bytes()...), nil +} + func (mc *mysqlConn) sendEncryptedPassword(seed []byte, pub *rsa.PublicKey) error { enc, err := encryptPassword(mc.cfg.Passwd, seed, pub) if err != nil { @@ -290,8 +331,14 @@ func (mc *mysqlConn) auth(authData []byte, plugin string) ([]byte, error) { enc, err := encryptPassword(mc.cfg.Passwd, authData, pubKey) return enc, err + case "client_ed25519": + if len(authData) != 32 { + return nil, ErrMalformPkt + } + return authEd25519(authData, mc.cfg.Passwd) + default: - errLog.Print("unknown auth plugin:", plugin) + mc.log("unknown auth plugin:", plugin) return nil, ErrUnknownPlugin } } @@ -338,7 +385,7 @@ func (mc *mysqlConn) handleAuthResult(oldAuthData []byte, plugin string) error { switch plugin { - // https://insidemysql.com/preparing-your-community-connector-for-mysql-8-part-2-sha256/ + // https://dev.mysql.com/blog-archive/preparing-your-community-connector-for-mysql-8-part-2-sha256/ case "caching_sha2_password": switch len(authData) { case 0: @@ -346,7 +393,7 @@ func (mc *mysqlConn) handleAuthResult(oldAuthData []byte, plugin string) error { case 1: switch authData[0] { case cachingSha2PasswordFastAuthSuccess: - if err = mc.readResultOK(); err == nil { + if err = mc.resultUnchanged().readResultOK(); err == nil { return nil // auth successful } @@ -376,13 +423,13 @@ func (mc *mysqlConn) handleAuthResult(oldAuthData []byte, plugin string) error { } if data[0] != iAuthMoreData { - return fmt.Errorf("unexpect resp from server for caching_sha2_password perform full authentication") + return fmt.Errorf("unexpected resp from server for caching_sha2_password, perform full authentication") } // parse public key block, rest := pem.Decode(data[1:]) if block == nil { - return fmt.Errorf("No Pem data found, data: %s", rest) + return fmt.Errorf("no pem data found, data: %s", rest) } pkix, err := x509.ParsePKIXPublicKey(block.Bytes) if err != nil { @@ -397,7 +444,7 @@ func (mc *mysqlConn) handleAuthResult(oldAuthData []byte, plugin string) error { return err } } - return mc.readResultOK() + return mc.resultUnchanged().readResultOK() default: return ErrMalformPkt @@ -426,7 +473,7 @@ func (mc *mysqlConn) handleAuthResult(oldAuthData []byte, plugin string) error { if err != nil { return err } - return mc.readResultOK() + return mc.resultUnchanged().readResultOK() } default: diff --git a/vendor/github.com/go-sql-driver/mysql/collations.go b/vendor/github.com/go-sql-driver/mysql/collations.go index 295bfbe..1cdf97b 100644 --- a/vendor/github.com/go-sql-driver/mysql/collations.go +++ b/vendor/github.com/go-sql-driver/mysql/collations.go @@ -9,7 +9,7 @@ package mysql const defaultCollation = "utf8mb4_general_ci" -const binaryCollation = "binary" +const binaryCollationID = 63 // A list of available collations mapped to the internal ID. // To update this map use the following MySQL query: diff --git a/vendor/github.com/go-sql-driver/mysql/connection.go b/vendor/github.com/go-sql-driver/mysql/connection.go index 9539077..eff978d 100644 --- a/vendor/github.com/go-sql-driver/mysql/connection.go +++ b/vendor/github.com/go-sql-driver/mysql/connection.go @@ -23,10 +23,10 @@ import ( type mysqlConn struct { buf buffer netConn net.Conn - rawConn net.Conn // underlying connection when netConn is TLS connection. - affectedRows uint64 - insertId uint64 + rawConn net.Conn // underlying connection when netConn is TLS connection. + result mysqlResult // managed by clearResult() and handleOkPacket(). cfg *Config + connector *connector maxAllowedPacket int maxWriteSize int writeTimeout time.Duration @@ -34,7 +34,6 @@ type mysqlConn struct { status statusFlag sequence uint8 parseTime bool - reset bool // set when the Go SQL package calls ResetSession // for context support (Go 1.8+) watching bool @@ -45,17 +44,27 @@ type mysqlConn struct { closed atomicBool // set when conn is closed, before closech is closed } +// Helper function to call per-connection logger. +func (mc *mysqlConn) log(v ...any) { + mc.cfg.Logger.Print(v...) +} + // Handles parameters set in DSN after the connection is established func (mc *mysqlConn) handleParams() (err error) { var cmdSet strings.Builder + for param, val := range mc.cfg.Params { switch param { // Charset: character_set_connection, character_set_client, character_set_results case "charset": charsets := strings.Split(val, ",") - for i := range charsets { + for _, cs := range charsets { // ignore errors here - a charset may not exist - err = mc.exec("SET NAMES " + charsets[i]) + if mc.cfg.Collation != "" { + err = mc.exec("SET NAMES " + cs + " COLLATE " + mc.cfg.Collation) + } else { + err = mc.exec("SET NAMES " + cs) + } if err == nil { break } @@ -68,13 +77,13 @@ func (mc *mysqlConn) handleParams() (err error) { default: if cmdSet.Len() == 0 { // Heuristic: 29 chars for each other key=value to reduce reallocations - cmdSet.Grow(4 + len(param) + 1 + len(val) + 30*(len(mc.cfg.Params)-1)) + cmdSet.Grow(4 + len(param) + 3 + len(val) + 30*(len(mc.cfg.Params)-1)) cmdSet.WriteString("SET ") } else { - cmdSet.WriteByte(',') + cmdSet.WriteString(", ") } cmdSet.WriteString(param) - cmdSet.WriteByte('=') + cmdSet.WriteString(" = ") cmdSet.WriteString(val) } } @@ -105,7 +114,7 @@ func (mc *mysqlConn) Begin() (driver.Tx, error) { func (mc *mysqlConn) begin(readOnly bool) (driver.Tx, error) { if mc.closed.Load() { - errLog.Print(ErrInvalidConn) + mc.log(ErrInvalidConn) return nil, driver.ErrBadConn } var q string @@ -128,7 +137,7 @@ func (mc *mysqlConn) Close() (err error) { } mc.cleanup() - + mc.clearResult() return } @@ -143,12 +152,16 @@ func (mc *mysqlConn) cleanup() { // Makes cleanup idempotent close(mc.closech) - if mc.netConn == nil { + conn := mc.rawConn + if conn == nil { return } - if err := mc.netConn.Close(); err != nil { - errLog.Print(err) + if err := conn.Close(); err != nil { + mc.log(err) } + // This function can be called from multiple goroutines. + // So we can not mc.clearResult() here. + // Caller should do it if they are in safe goroutine. } func (mc *mysqlConn) error() error { @@ -163,14 +176,14 @@ func (mc *mysqlConn) error() error { func (mc *mysqlConn) Prepare(query string) (driver.Stmt, error) { if mc.closed.Load() { - errLog.Print(ErrInvalidConn) + mc.log(ErrInvalidConn) return nil, driver.ErrBadConn } // Send command err := mc.writeCommandPacketStr(comStmtPrepare, query) if err != nil { // STMT_PREPARE is safe to retry. So we can return ErrBadConn here. - errLog.Print(err) + mc.log(err) return nil, driver.ErrBadConn } @@ -204,7 +217,7 @@ func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (strin buf, err := mc.buf.takeCompleteBuffer() if err != nil { // can not take the buffer. Something must be wrong with the connection - errLog.Print(err) + mc.log(err) return "", ErrInvalidConn } buf = buf[:0] @@ -246,7 +259,7 @@ func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (strin buf = append(buf, "'0000-00-00'"...) } else { buf = append(buf, '\'') - buf, err = appendDateTime(buf, v.In(mc.cfg.Loc)) + buf, err = appendDateTime(buf, v.In(mc.cfg.Loc), mc.cfg.timeTruncate) if err != nil { return "", err } @@ -296,7 +309,7 @@ func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (strin func (mc *mysqlConn) Exec(query string, args []driver.Value) (driver.Result, error) { if mc.closed.Load() { - errLog.Print(ErrInvalidConn) + mc.log(ErrInvalidConn) return nil, driver.ErrBadConn } if len(args) != 0 { @@ -310,28 +323,25 @@ func (mc *mysqlConn) Exec(query string, args []driver.Value) (driver.Result, err } query = prepared } - mc.affectedRows = 0 - mc.insertId = 0 err := mc.exec(query) if err == nil { - return &mysqlResult{ - affectedRows: int64(mc.affectedRows), - insertId: int64(mc.insertId), - }, err + copied := mc.result + return &copied, err } return nil, mc.markBadConn(err) } // Internal function to execute commands func (mc *mysqlConn) exec(query string) error { + handleOk := mc.clearResult() // Send command if err := mc.writeCommandPacketStr(comQuery, query); err != nil { return mc.markBadConn(err) } // Read Result - resLen, err := mc.readResultSetHeaderPacket() + resLen, err := handleOk.readResultSetHeaderPacket() if err != nil { return err } @@ -348,7 +358,7 @@ func (mc *mysqlConn) exec(query string) error { } } - return mc.discardResults() + return handleOk.discardResults() } func (mc *mysqlConn) Query(query string, args []driver.Value) (driver.Rows, error) { @@ -356,8 +366,10 @@ func (mc *mysqlConn) Query(query string, args []driver.Value) (driver.Rows, erro } func (mc *mysqlConn) query(query string, args []driver.Value) (*textRows, error) { + handleOk := mc.clearResult() + if mc.closed.Load() { - errLog.Print(ErrInvalidConn) + mc.log(ErrInvalidConn) return nil, driver.ErrBadConn } if len(args) != 0 { @@ -376,7 +388,7 @@ func (mc *mysqlConn) query(query string, args []driver.Value) (*textRows, error) if err == nil { // Read Result var resLen int - resLen, err = mc.readResultSetHeaderPacket() + resLen, err = handleOk.readResultSetHeaderPacket() if err == nil { rows := new(textRows) rows.mc = mc @@ -404,12 +416,13 @@ func (mc *mysqlConn) query(query string, args []driver.Value) (*textRows, error) // The returned byte slice is only valid until the next read func (mc *mysqlConn) getSystemVar(name string) ([]byte, error) { // Send command + handleOk := mc.clearResult() if err := mc.writeCommandPacketStr(comQuery, "SELECT @@"+name); err != nil { return nil, err } // Read Result - resLen, err := mc.readResultSetHeaderPacket() + resLen, err := handleOk.readResultSetHeaderPacket() if err == nil { rows := new(textRows) rows.mc = mc @@ -451,7 +464,7 @@ func (mc *mysqlConn) finish() { // Ping implements driver.Pinger interface func (mc *mysqlConn) Ping(ctx context.Context) (err error) { if mc.closed.Load() { - errLog.Print(ErrInvalidConn) + mc.log(ErrInvalidConn) return driver.ErrBadConn } @@ -460,11 +473,12 @@ func (mc *mysqlConn) Ping(ctx context.Context) (err error) { } defer mc.finish() + handleOk := mc.clearResult() if err = mc.writeCommandPacket(comPing); err != nil { return mc.markBadConn(err) } - return mc.readResultOK() + return handleOk.readResultOK() } // BeginTx implements driver.ConnBeginTx interface @@ -639,7 +653,31 @@ func (mc *mysqlConn) ResetSession(ctx context.Context) error { if mc.closed.Load() { return driver.ErrBadConn } - mc.reset = true + + // Perform a stale connection check. We only perform this check for + // the first query on a connection that has been checked out of the + // connection pool: a fresh connection from the pool is more likely + // to be stale, and it has not performed any previous writes that + // could cause data corruption, so it's safe to return ErrBadConn + // if the check fails. + if mc.cfg.CheckConnLiveness { + conn := mc.netConn + if mc.rawConn != nil { + conn = mc.rawConn + } + var err error + if mc.cfg.ReadTimeout != 0 { + err = conn.SetReadDeadline(time.Now().Add(mc.cfg.ReadTimeout)) + } + if err == nil { + err = connCheck(conn) + } + if err != nil { + mc.log("closing bad idle connection: ", err) + return driver.ErrBadConn + } + } + return nil } diff --git a/vendor/github.com/go-sql-driver/mysql/connector.go b/vendor/github.com/go-sql-driver/mysql/connector.go index d567b4e..b670775 100644 --- a/vendor/github.com/go-sql-driver/mysql/connector.go +++ b/vendor/github.com/go-sql-driver/mysql/connector.go @@ -12,10 +12,53 @@ import ( "context" "database/sql/driver" "net" + "os" + "strconv" + "strings" ) type connector struct { - cfg *Config // immutable private copy. + cfg *Config // immutable private copy. + encodedAttributes string // Encoded connection attributes. +} + +func encodeConnectionAttributes(cfg *Config) string { + connAttrsBuf := make([]byte, 0) + + // default connection attributes + connAttrsBuf = appendLengthEncodedString(connAttrsBuf, connAttrClientName) + connAttrsBuf = appendLengthEncodedString(connAttrsBuf, connAttrClientNameValue) + connAttrsBuf = appendLengthEncodedString(connAttrsBuf, connAttrOS) + connAttrsBuf = appendLengthEncodedString(connAttrsBuf, connAttrOSValue) + connAttrsBuf = appendLengthEncodedString(connAttrsBuf, connAttrPlatform) + connAttrsBuf = appendLengthEncodedString(connAttrsBuf, connAttrPlatformValue) + connAttrsBuf = appendLengthEncodedString(connAttrsBuf, connAttrPid) + connAttrsBuf = appendLengthEncodedString(connAttrsBuf, strconv.Itoa(os.Getpid())) + serverHost, _, _ := net.SplitHostPort(cfg.Addr) + if serverHost != "" { + connAttrsBuf = appendLengthEncodedString(connAttrsBuf, connAttrServerHost) + connAttrsBuf = appendLengthEncodedString(connAttrsBuf, serverHost) + } + + // user-defined connection attributes + for _, connAttr := range strings.Split(cfg.ConnectionAttributes, ",") { + k, v, found := strings.Cut(connAttr, ":") + if !found { + continue + } + connAttrsBuf = appendLengthEncodedString(connAttrsBuf, k) + connAttrsBuf = appendLengthEncodedString(connAttrsBuf, v) + } + + return string(connAttrsBuf) +} + +func newConnector(cfg *Config) *connector { + encodedAttributes := encodeConnectionAttributes(cfg) + return &connector{ + cfg: cfg, + encodedAttributes: encodedAttributes, + } } // Connect implements driver.Connector interface. @@ -23,12 +66,23 @@ type connector struct { func (c *connector) Connect(ctx context.Context) (driver.Conn, error) { var err error + // Invoke beforeConnect if present, with a copy of the configuration + cfg := c.cfg + if c.cfg.beforeConnect != nil { + cfg = c.cfg.Clone() + err = c.cfg.beforeConnect(ctx, cfg) + if err != nil { + return nil, err + } + } + // New mysqlConn mc := &mysqlConn{ maxAllowedPacket: maxPacketSize, maxWriteSize: maxPacketSize - 1, closech: make(chan struct{}), - cfg: c.cfg, + cfg: cfg, + connector: c, } mc.parseTime = mc.cfg.ParseTime @@ -48,18 +102,15 @@ func (c *connector) Connect(ctx context.Context) (driver.Conn, error) { nd := net.Dialer{Timeout: mc.cfg.Timeout} mc.netConn, err = nd.DialContext(ctx, mc.cfg.Net, mc.cfg.Addr) } - if err != nil { return nil, err } + mc.rawConn = mc.netConn // Enable TCP Keepalives on TCP connections if tc, ok := mc.netConn.(*net.TCPConn); ok { if err := tc.SetKeepAlive(true); err != nil { - // Don't send COM_QUIT before handshake. - mc.netConn.Close() - mc.netConn = nil - return nil, err + c.cfg.Logger.Print(err) } } @@ -92,7 +143,7 @@ func (c *connector) Connect(ctx context.Context) (driver.Conn, error) { authResp, err := mc.auth(authData, plugin) if err != nil { // try the default auth plugin, if using the requested plugin failed - errLog.Print("could not use requested auth plugin '"+plugin+"': ", err.Error()) + c.cfg.Logger.Print("could not use requested auth plugin '"+plugin+"': ", err.Error()) plugin = defaultAuthPlugin authResp, err = mc.auth(authData, plugin) if err != nil { diff --git a/vendor/github.com/go-sql-driver/mysql/const.go b/vendor/github.com/go-sql-driver/mysql/const.go index b1e6b85..22526e0 100644 --- a/vendor/github.com/go-sql-driver/mysql/const.go +++ b/vendor/github.com/go-sql-driver/mysql/const.go @@ -8,12 +8,25 @@ package mysql +import "runtime" + const ( defaultAuthPlugin = "mysql_native_password" - defaultMaxAllowedPacket = 4 << 20 // 4 MiB + defaultMaxAllowedPacket = 64 << 20 // 64 MiB. See https://github.com/go-sql-driver/mysql/issues/1355 minProtocolVersion = 10 maxPacketSize = 1<<24 - 1 timeFormat = "2006-01-02 15:04:05.999999" + + // Connection attributes + // See https://dev.mysql.com/doc/refman/8.0/en/performance-schema-connection-attribute-tables.html#performance-schema-connection-attributes-available + connAttrClientName = "_client_name" + connAttrClientNameValue = "Go-MySQL-Driver" + connAttrOS = "_os" + connAttrOSValue = runtime.GOOS + connAttrPlatform = "_platform" + connAttrPlatformValue = runtime.GOARCH + connAttrPid = "_pid" + connAttrServerHost = "_server_host" ) // MySQL constants documentation: diff --git a/vendor/github.com/go-sql-driver/mysql/driver.go b/vendor/github.com/go-sql-driver/mysql/driver.go index ad7aec2..105316b 100644 --- a/vendor/github.com/go-sql-driver/mysql/driver.go +++ b/vendor/github.com/go-sql-driver/mysql/driver.go @@ -55,6 +55,15 @@ func RegisterDialContext(net string, dial DialContextFunc) { dials[net] = dial } +// DeregisterDialContext removes the custom dial function registered with the given net. +func DeregisterDialContext(net string) { + dialsLock.Lock() + defer dialsLock.Unlock() + if dials != nil { + delete(dials, net) + } +} + // RegisterDial registers a custom dial function. It can then be used by the // network address mynet(addr), where mynet is the registered new network. // addr is passed as a parameter to the dial function. @@ -74,14 +83,18 @@ func (d MySQLDriver) Open(dsn string) (driver.Conn, error) { if err != nil { return nil, err } - c := &connector{ - cfg: cfg, - } + c := newConnector(cfg) return c.Connect(context.Background()) } +// This variable can be replaced with -ldflags like below: +// go build "-ldflags=-X github.com/go-sql-driver/mysql.driverName=custom" +var driverName = "mysql" + func init() { - sql.Register("mysql", &MySQLDriver{}) + if driverName != "" { + sql.Register(driverName, &MySQLDriver{}) + } } // NewConnector returns new driver.Connector. @@ -92,7 +105,7 @@ func NewConnector(cfg *Config) (driver.Connector, error) { if err := cfg.normalize(); err != nil { return nil, err } - return &connector{cfg: cfg}, nil + return newConnector(cfg), nil } // OpenConnector implements driver.DriverContext. @@ -101,7 +114,5 @@ func (d MySQLDriver) OpenConnector(dsn string) (driver.Connector, error) { if err != nil { return nil, err } - return &connector{ - cfg: cfg, - }, nil + return newConnector(cfg), nil } diff --git a/vendor/github.com/go-sql-driver/mysql/dsn.go b/vendor/github.com/go-sql-driver/mysql/dsn.go index 4b71aaa..65f5a02 100644 --- a/vendor/github.com/go-sql-driver/mysql/dsn.go +++ b/vendor/github.com/go-sql-driver/mysql/dsn.go @@ -10,6 +10,7 @@ package mysql import ( "bytes" + "context" "crypto/rsa" "crypto/tls" "errors" @@ -34,22 +35,27 @@ var ( // If a new Config is created instead of being parsed from a DSN string, // the NewConfig function should be used, which sets default values. type Config struct { - User string // Username - Passwd string // Password (requires User) - Net string // Network type - Addr string // Network address (requires Net) - DBName string // Database name - Params map[string]string // Connection parameters - Collation string // Connection collation - Loc *time.Location // Location for time.Time values - MaxAllowedPacket int // Max packet size allowed - ServerPubKey string // Server public key name - pubKey *rsa.PublicKey // Server public key - TLSConfig string // TLS configuration name - TLS *tls.Config // TLS configuration, its priority is higher than TLSConfig - Timeout time.Duration // Dial timeout - ReadTimeout time.Duration // I/O read timeout - WriteTimeout time.Duration // I/O write timeout + // non boolean fields + + User string // Username + Passwd string // Password (requires User) + Net string // Network (e.g. "tcp", "tcp6", "unix". default: "tcp") + Addr string // Address (default: "127.0.0.1:3306" for "tcp" and "/tmp/mysql.sock" for "unix") + DBName string // Database name + Params map[string]string // Connection parameters + ConnectionAttributes string // Connection Attributes, comma-delimited string of user-defined "key:value" pairs + Collation string // Connection collation + Loc *time.Location // Location for time.Time values + MaxAllowedPacket int // Max packet size allowed + ServerPubKey string // Server public key name + TLSConfig string // TLS configuration name + TLS *tls.Config // TLS configuration, its priority is higher than TLSConfig + Timeout time.Duration // Dial timeout + ReadTimeout time.Duration // I/O read timeout + WriteTimeout time.Duration // I/O write timeout + Logger Logger // Logger + + // boolean fields AllowAllFiles bool // Allow all files to be used with LOAD DATA LOCAL INFILE AllowCleartextPasswords bool // Allows the cleartext client side plugin @@ -63,17 +69,57 @@ type Config struct { MultiStatements bool // Allow multiple statements in one query ParseTime bool // Parse time values to time.Time RejectReadOnly bool // Reject read-only connections + + // unexported fields. new options should be come here + + beforeConnect func(context.Context, *Config) error // Invoked before a connection is established + pubKey *rsa.PublicKey // Server public key + timeTruncate time.Duration // Truncate time.Time values to the specified duration } +// Functional Options Pattern +// https://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis +type Option func(*Config) error + // NewConfig creates a new Config and sets default values. func NewConfig() *Config { - return &Config{ - Collation: defaultCollation, + cfg := &Config{ Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, + Logger: defaultLogger, AllowNativePasswords: true, CheckConnLiveness: true, } + + return cfg +} + +// Apply applies the given options to the Config object. +func (c *Config) Apply(opts ...Option) error { + for _, opt := range opts { + err := opt(c) + if err != nil { + return err + } + } + return nil +} + +// TimeTruncate sets the time duration to truncate time.Time values in +// query parameters. +func TimeTruncate(d time.Duration) Option { + return func(cfg *Config) error { + cfg.timeTruncate = d + return nil + } +} + +// BeforeConnect sets the function to be invoked before a connection is established. +func BeforeConnect(fn func(context.Context, *Config) error) Option { + return func(cfg *Config) error { + cfg.beforeConnect = fn + return nil + } } func (cfg *Config) Clone() *Config { @@ -97,7 +143,7 @@ func (cfg *Config) Clone() *Config { } func (cfg *Config) normalize() error { - if cfg.InterpolateParams && unsafeCollations[cfg.Collation] { + if cfg.InterpolateParams && cfg.Collation != "" && unsafeCollations[cfg.Collation] { return errInvalidDSNUnsafeCollation } @@ -153,6 +199,10 @@ func (cfg *Config) normalize() error { } } + if cfg.Logger == nil { + cfg.Logger = defaultLogger + } + return nil } @@ -171,6 +221,8 @@ func writeDSNParam(buf *bytes.Buffer, hasParam *bool, name, value string) { // FormatDSN formats the given Config into a DSN string which can be passed to // the driver. +// +// Note: use [NewConnector] and [database/sql.OpenDB] to open a connection from a [*Config]. func (cfg *Config) FormatDSN() string { var buf bytes.Buffer @@ -196,7 +248,7 @@ func (cfg *Config) FormatDSN() string { // /dbname buf.WriteByte('/') - buf.WriteString(cfg.DBName) + buf.WriteString(url.PathEscape(cfg.DBName)) // [?param1=value1&...¶mN=valueN] hasParam := false @@ -230,7 +282,7 @@ func (cfg *Config) FormatDSN() string { writeDSNParam(&buf, &hasParam, "clientFoundRows", "true") } - if col := cfg.Collation; col != defaultCollation && len(col) > 0 { + if col := cfg.Collation; col != "" { writeDSNParam(&buf, &hasParam, "collation", col) } @@ -254,6 +306,10 @@ func (cfg *Config) FormatDSN() string { writeDSNParam(&buf, &hasParam, "parseTime", "true") } + if cfg.timeTruncate > 0 { + writeDSNParam(&buf, &hasParam, "timeTruncate", cfg.timeTruncate.String()) + } + if cfg.ReadTimeout > 0 { writeDSNParam(&buf, &hasParam, "readTimeout", cfg.ReadTimeout.String()) } @@ -358,7 +414,11 @@ func ParseDSN(dsn string) (cfg *Config, err error) { break } } - cfg.DBName = dsn[i+1 : j] + + dbname := dsn[i+1 : j] + if cfg.DBName, err = url.PathUnescape(dbname); err != nil { + return nil, fmt.Errorf("invalid dbname %q: %w", dbname, err) + } break } @@ -378,13 +438,13 @@ func ParseDSN(dsn string) (cfg *Config, err error) { // Values must be url.QueryEscape'ed func parseDSNParams(cfg *Config, params string) (err error) { for _, v := range strings.Split(params, "&") { - param := strings.SplitN(v, "=", 2) - if len(param) != 2 { + key, value, found := strings.Cut(v, "=") + if !found { continue } // cfg params - switch value := param[1]; param[0] { + switch key { // Disable INFILE allowlist / enable all files case "allowAllFiles": var isBool bool @@ -490,6 +550,13 @@ func parseDSNParams(cfg *Config, params string) (err error) { return errors.New("invalid bool value: " + value) } + // time.Time truncation + case "timeTruncate": + cfg.timeTruncate, err = time.ParseDuration(value) + if err != nil { + return fmt.Errorf("invalid timeTruncate value: %v, error: %w", value, err) + } + // I/O read Timeout case "readTimeout": cfg.ReadTimeout, err = time.ParseDuration(value) @@ -554,13 +621,22 @@ func parseDSNParams(cfg *Config, params string) (err error) { if err != nil { return } + + // Connection attributes + case "connectionAttributes": + connectionAttributes, err := url.QueryUnescape(value) + if err != nil { + return fmt.Errorf("invalid connectionAttributes value: %v", err) + } + cfg.ConnectionAttributes = connectionAttributes + default: // lazy init if cfg.Params == nil { cfg.Params = make(map[string]string) } - if cfg.Params[param[0]], err = url.QueryUnescape(value); err != nil { + if cfg.Params[key], err = url.QueryUnescape(value); err != nil { return } } diff --git a/vendor/github.com/go-sql-driver/mysql/errors.go b/vendor/github.com/go-sql-driver/mysql/errors.go index 7c037e7..a7ef889 100644 --- a/vendor/github.com/go-sql-driver/mysql/errors.go +++ b/vendor/github.com/go-sql-driver/mysql/errors.go @@ -21,13 +21,13 @@ var ( ErrMalformPkt = errors.New("malformed packet") ErrNoTLS = errors.New("TLS requested but server does not support TLS") ErrCleartextPassword = errors.New("this user requires clear text authentication. If you still want to use it, please add 'allowCleartextPasswords=1' to your DSN") - ErrNativePassword = errors.New("this user requires mysql native password authentication.") + ErrNativePassword = errors.New("this user requires mysql native password authentication") ErrOldPassword = errors.New("this user requires old password authentication. If you still want to use it, please add 'allowOldPasswords=1' to your DSN. See also https://github.com/go-sql-driver/mysql/wiki/old_passwords") ErrUnknownPlugin = errors.New("this authentication plugin is not supported") ErrOldProtocol = errors.New("MySQL server does not support required protocol 41+") ErrPktSync = errors.New("commands out of sync. You can't run this command now") ErrPktSyncMul = errors.New("commands out of sync. Did you run multiple statements at once?") - ErrPktTooLarge = errors.New("packet for query is too large. Try adjusting the 'max_allowed_packet' variable on the server") + ErrPktTooLarge = errors.New("packet for query is too large. Try adjusting the `Config.MaxAllowedPacket`") ErrBusyBuffer = errors.New("busy buffer") // errBadConnNoWrite is used for connection errors where nothing was sent to the database yet. @@ -37,20 +37,26 @@ var ( errBadConnNoWrite = errors.New("bad connection") ) -var errLog = Logger(log.New(os.Stderr, "[mysql] ", log.Ldate|log.Ltime|log.Lshortfile)) +var defaultLogger = Logger(log.New(os.Stderr, "[mysql] ", log.Ldate|log.Ltime|log.Lshortfile)) // Logger is used to log critical error messages. type Logger interface { - Print(v ...interface{}) + Print(v ...any) } -// SetLogger is used to set the logger for critical errors. +// NopLogger is a nop implementation of the Logger interface. +type NopLogger struct{} + +// Print implements Logger interface. +func (nl *NopLogger) Print(_ ...any) {} + +// SetLogger is used to set the default logger for critical errors. // The initial logger is os.Stderr. func SetLogger(logger Logger) error { if logger == nil { return errors.New("logger is nil") } - errLog = logger + defaultLogger = logger return nil } diff --git a/vendor/github.com/go-sql-driver/mysql/fields.go b/vendor/github.com/go-sql-driver/mysql/fields.go index e0654a8..2860842 100644 --- a/vendor/github.com/go-sql-driver/mysql/fields.go +++ b/vendor/github.com/go-sql-driver/mysql/fields.go @@ -18,7 +18,7 @@ func (mf *mysqlField) typeDatabaseName() string { case fieldTypeBit: return "BIT" case fieldTypeBLOB: - if mf.charSet != collations[binaryCollation] { + if mf.charSet != binaryCollationID { return "TEXT" } return "BLOB" @@ -37,6 +37,9 @@ func (mf *mysqlField) typeDatabaseName() string { case fieldTypeGeometry: return "GEOMETRY" case fieldTypeInt24: + if mf.flags&flagUnsigned != 0 { + return "UNSIGNED MEDIUMINT" + } return "MEDIUMINT" case fieldTypeJSON: return "JSON" @@ -46,7 +49,7 @@ func (mf *mysqlField) typeDatabaseName() string { } return "INT" case fieldTypeLongBLOB: - if mf.charSet != collations[binaryCollation] { + if mf.charSet != binaryCollationID { return "LONGTEXT" } return "LONGBLOB" @@ -56,7 +59,7 @@ func (mf *mysqlField) typeDatabaseName() string { } return "BIGINT" case fieldTypeMediumBLOB: - if mf.charSet != collations[binaryCollation] { + if mf.charSet != binaryCollationID { return "MEDIUMTEXT" } return "MEDIUMBLOB" @@ -74,7 +77,12 @@ func (mf *mysqlField) typeDatabaseName() string { } return "SMALLINT" case fieldTypeString: - if mf.charSet == collations[binaryCollation] { + if mf.flags&flagEnum != 0 { + return "ENUM" + } else if mf.flags&flagSet != 0 { + return "SET" + } + if mf.charSet == binaryCollationID { return "BINARY" } return "CHAR" @@ -88,17 +96,17 @@ func (mf *mysqlField) typeDatabaseName() string { } return "TINYINT" case fieldTypeTinyBLOB: - if mf.charSet != collations[binaryCollation] { + if mf.charSet != binaryCollationID { return "TINYTEXT" } return "TINYBLOB" case fieldTypeVarChar: - if mf.charSet == collations[binaryCollation] { + if mf.charSet == binaryCollationID { return "VARBINARY" } return "VARCHAR" case fieldTypeVarString: - if mf.charSet == collations[binaryCollation] { + if mf.charSet == binaryCollationID { return "VARBINARY" } return "VARCHAR" @@ -110,21 +118,23 @@ func (mf *mysqlField) typeDatabaseName() string { } var ( - scanTypeFloat32 = reflect.TypeOf(float32(0)) - scanTypeFloat64 = reflect.TypeOf(float64(0)) - scanTypeInt8 = reflect.TypeOf(int8(0)) - scanTypeInt16 = reflect.TypeOf(int16(0)) - scanTypeInt32 = reflect.TypeOf(int32(0)) - scanTypeInt64 = reflect.TypeOf(int64(0)) - scanTypeNullFloat = reflect.TypeOf(sql.NullFloat64{}) - scanTypeNullInt = reflect.TypeOf(sql.NullInt64{}) - scanTypeNullTime = reflect.TypeOf(sql.NullTime{}) - scanTypeUint8 = reflect.TypeOf(uint8(0)) - scanTypeUint16 = reflect.TypeOf(uint16(0)) - scanTypeUint32 = reflect.TypeOf(uint32(0)) - scanTypeUint64 = reflect.TypeOf(uint64(0)) - scanTypeRawBytes = reflect.TypeOf(sql.RawBytes{}) - scanTypeUnknown = reflect.TypeOf(new(interface{})) + scanTypeFloat32 = reflect.TypeOf(float32(0)) + scanTypeFloat64 = reflect.TypeOf(float64(0)) + scanTypeInt8 = reflect.TypeOf(int8(0)) + scanTypeInt16 = reflect.TypeOf(int16(0)) + scanTypeInt32 = reflect.TypeOf(int32(0)) + scanTypeInt64 = reflect.TypeOf(int64(0)) + scanTypeNullFloat = reflect.TypeOf(sql.NullFloat64{}) + scanTypeNullInt = reflect.TypeOf(sql.NullInt64{}) + scanTypeNullTime = reflect.TypeOf(sql.NullTime{}) + scanTypeUint8 = reflect.TypeOf(uint8(0)) + scanTypeUint16 = reflect.TypeOf(uint16(0)) + scanTypeUint32 = reflect.TypeOf(uint32(0)) + scanTypeUint64 = reflect.TypeOf(uint64(0)) + scanTypeString = reflect.TypeOf("") + scanTypeNullString = reflect.TypeOf(sql.NullString{}) + scanTypeBytes = reflect.TypeOf([]byte{}) + scanTypeUnknown = reflect.TypeOf(new(any)) ) type mysqlField struct { @@ -187,12 +197,18 @@ func (mf *mysqlField) scanType() reflect.Type { } return scanTypeNullFloat + case fieldTypeBit, fieldTypeTinyBLOB, fieldTypeMediumBLOB, fieldTypeLongBLOB, + fieldTypeBLOB, fieldTypeVarString, fieldTypeString, fieldTypeGeometry: + if mf.charSet == binaryCollationID { + return scanTypeBytes + } + fallthrough case fieldTypeDecimal, fieldTypeNewDecimal, fieldTypeVarChar, - fieldTypeBit, fieldTypeEnum, fieldTypeSet, fieldTypeTinyBLOB, - fieldTypeMediumBLOB, fieldTypeLongBLOB, fieldTypeBLOB, - fieldTypeVarString, fieldTypeString, fieldTypeGeometry, fieldTypeJSON, - fieldTypeTime: - return scanTypeRawBytes + fieldTypeEnum, fieldTypeSet, fieldTypeJSON, fieldTypeTime: + if mf.flags&flagNotNULL != 0 { + return scanTypeString + } + return scanTypeNullString case fieldTypeDate, fieldTypeNewDate, fieldTypeTimestamp, fieldTypeDateTime: diff --git a/vendor/github.com/go-sql-driver/mysql/fuzz.go b/vendor/github.com/go-sql-driver/mysql/fuzz.go deleted file mode 100644 index 3a4ec25..0000000 --- a/vendor/github.com/go-sql-driver/mysql/fuzz.go +++ /dev/null @@ -1,25 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package. -// -// Copyright 2020 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -//go:build gofuzz -// +build gofuzz - -package mysql - -import ( - "database/sql" -) - -func Fuzz(data []byte) int { - db, err := sql.Open("mysql", string(data)) - if err != nil { - return 0 - } - db.Close() - return 1 -} diff --git a/vendor/github.com/go-sql-driver/mysql/infile.go b/vendor/github.com/go-sql-driver/mysql/infile.go index 3279dcf..0c8af9f 100644 --- a/vendor/github.com/go-sql-driver/mysql/infile.go +++ b/vendor/github.com/go-sql-driver/mysql/infile.go @@ -93,7 +93,7 @@ func deferredClose(err *error, closer io.Closer) { const defaultPacketSize = 16 * 1024 // 16KB is small enough for disk readahead and large enough for TCP -func (mc *mysqlConn) handleInFileRequest(name string) (err error) { +func (mc *okHandler) handleInFileRequest(name string) (err error) { var rdr io.Reader var data []byte packetSize := defaultPacketSize @@ -116,10 +116,10 @@ func (mc *mysqlConn) handleInFileRequest(name string) (err error) { defer deferredClose(&err, cl) } } else { - err = fmt.Errorf("Reader '%s' is ", name) + err = fmt.Errorf("reader '%s' is ", name) } } else { - err = fmt.Errorf("Reader '%s' is not registered", name) + err = fmt.Errorf("reader '%s' is not registered", name) } } else { // File name = strings.Trim(name, `"`) @@ -154,7 +154,7 @@ func (mc *mysqlConn) handleInFileRequest(name string) (err error) { for err == nil { n, err = rdr.Read(data[4:]) if n > 0 { - if ioErr := mc.writePacket(data[:4+n]); ioErr != nil { + if ioErr := mc.conn().writePacket(data[:4+n]); ioErr != nil { return ioErr } } @@ -168,7 +168,7 @@ func (mc *mysqlConn) handleInFileRequest(name string) (err error) { if data == nil { data = make([]byte, 4) } - if ioErr := mc.writePacket(data[:4]); ioErr != nil { + if ioErr := mc.conn().writePacket(data[:4]); ioErr != nil { return ioErr } @@ -177,6 +177,6 @@ func (mc *mysqlConn) handleInFileRequest(name string) (err error) { return mc.readResultOK() } - mc.readPacket() + mc.conn().readPacket() return err } diff --git a/vendor/github.com/go-sql-driver/mysql/nulltime.go b/vendor/github.com/go-sql-driver/mysql/nulltime.go index 36c8a42..316a48a 100644 --- a/vendor/github.com/go-sql-driver/mysql/nulltime.go +++ b/vendor/github.com/go-sql-driver/mysql/nulltime.go @@ -38,7 +38,7 @@ type NullTime sql.NullTime // Scan implements the Scanner interface. // The value type must be time.Time or string / []byte (formatted time-string), // otherwise Scan fails. -func (nt *NullTime) Scan(value interface{}) (err error) { +func (nt *NullTime) Scan(value any) (err error) { if value == nil { nt.Time, nt.Valid = time.Time{}, false return @@ -59,7 +59,7 @@ func (nt *NullTime) Scan(value interface{}) (err error) { } nt.Valid = false - return fmt.Errorf("Can't convert %T to time.Time", value) + return fmt.Errorf("can't convert %T to time.Time", value) } // Value implements the driver Valuer interface. diff --git a/vendor/github.com/go-sql-driver/mysql/packets.go b/vendor/github.com/go-sql-driver/mysql/packets.go index ee05c95..90a3472 100644 --- a/vendor/github.com/go-sql-driver/mysql/packets.go +++ b/vendor/github.com/go-sql-driver/mysql/packets.go @@ -14,10 +14,10 @@ import ( "database/sql/driver" "encoding/binary" "encoding/json" - "errors" "fmt" "io" "math" + "strconv" "time" ) @@ -34,7 +34,7 @@ func (mc *mysqlConn) readPacket() ([]byte, error) { if cerr := mc.canceled.Value(); cerr != nil { return nil, cerr } - errLog.Print(err) + mc.log(err) mc.Close() return nil, ErrInvalidConn } @@ -44,6 +44,7 @@ func (mc *mysqlConn) readPacket() ([]byte, error) { // check packet sync [8 bit] if data[3] != mc.sequence { + mc.Close() if data[3] > mc.sequence { return nil, ErrPktSyncMul } @@ -56,7 +57,7 @@ func (mc *mysqlConn) readPacket() ([]byte, error) { if pktLen == 0 { // there was no previous packet if prevData == nil { - errLog.Print(ErrMalformPkt) + mc.log(ErrMalformPkt) mc.Close() return nil, ErrInvalidConn } @@ -70,7 +71,7 @@ func (mc *mysqlConn) readPacket() ([]byte, error) { if cerr := mc.canceled.Value(); cerr != nil { return nil, cerr } - errLog.Print(err) + mc.log(err) mc.Close() return nil, ErrInvalidConn } @@ -97,34 +98,6 @@ func (mc *mysqlConn) writePacket(data []byte) error { return ErrPktTooLarge } - // Perform a stale connection check. We only perform this check for - // the first query on a connection that has been checked out of the - // connection pool: a fresh connection from the pool is more likely - // to be stale, and it has not performed any previous writes that - // could cause data corruption, so it's safe to return ErrBadConn - // if the check fails. - if mc.reset { - mc.reset = false - conn := mc.netConn - if mc.rawConn != nil { - conn = mc.rawConn - } - var err error - if mc.cfg.CheckConnLiveness { - if mc.cfg.ReadTimeout != 0 { - err = conn.SetReadDeadline(time.Now().Add(mc.cfg.ReadTimeout)) - } - if err == nil { - err = connCheck(conn) - } - } - if err != nil { - errLog.Print("closing bad idle connection: ", err) - mc.Close() - return driver.ErrBadConn - } - } - for { var size int if pktLen >= maxPacketSize { @@ -161,7 +134,7 @@ func (mc *mysqlConn) writePacket(data []byte) error { // Handle error if err == nil { // n != len(data) mc.cleanup() - errLog.Print(ErrMalformPkt) + mc.log(ErrMalformPkt) } else { if cerr := mc.canceled.Value(); cerr != nil { return cerr @@ -171,7 +144,7 @@ func (mc *mysqlConn) writePacket(data []byte) error { return errBadConnNoWrite } mc.cleanup() - errLog.Print(err) + mc.log(err) } return ErrInvalidConn } @@ -239,7 +212,7 @@ func (mc *mysqlConn) readHandshakePacket() (data []byte, plugin string, err erro // reserved (all [00]) [10 bytes] pos += 1 + 2 + 2 + 1 + 10 - // second part of the password cipher [mininum 13 bytes], + // second part of the password cipher [minimum 13 bytes], // where len=MAX(13, length of auth-plugin-data - 8) // // The web documentation is ambiguous about the length. However, @@ -285,6 +258,7 @@ func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, plugin string clientLocalFiles | clientPluginAuth | clientMultiResults | + clientConnectAttrs | mc.flags&clientLongFlag if mc.cfg.ClientFoundRows { @@ -318,11 +292,17 @@ func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, plugin string pktLen += n + 1 } + // encode length of the connection attributes + var connAttrsLEIBuf [9]byte + connAttrsLen := len(mc.connector.encodedAttributes) + connAttrsLEI := appendLengthEncodedInteger(connAttrsLEIBuf[:0], uint64(connAttrsLen)) + pktLen += len(connAttrsLEI) + len(mc.connector.encodedAttributes) + // Calculate packet length and get buffer with that size - data, err := mc.buf.takeSmallBuffer(pktLen + 4) + data, err := mc.buf.takeBuffer(pktLen + 4) if err != nil { // cannot take the buffer. Something must be wrong with the connection - errLog.Print(err) + mc.log(err) return errBadConnNoWrite } @@ -338,14 +318,18 @@ func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, plugin string data[10] = 0x00 data[11] = 0x00 - // Charset [1 byte] + // Collation ID [1 byte] + cname := mc.cfg.Collation + if cname == "" { + cname = defaultCollation + } var found bool - data[12], found = collations[mc.cfg.Collation] + data[12], found = collations[cname] if !found { // Note possibility for false negatives: // could be triggered although the collation is valid if the // collations map does not contain entries the server supports. - return errors.New("unknown collation") + return fmt.Errorf("unknown collation: %q", cname) } // Filler [23 bytes] (all 0x00) @@ -367,7 +351,6 @@ func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, plugin string if err := tlsConn.Handshake(); err != nil { return err } - mc.rawConn = mc.netConn mc.netConn = tlsConn mc.buf.nc = tlsConn } @@ -394,6 +377,10 @@ func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, plugin string data[pos] = 0x00 pos++ + // Connection Attributes + pos += copy(data[pos:], connAttrsLEI) + pos += copy(data[pos:], []byte(mc.connector.encodedAttributes)) + // Send Auth packet return mc.writePacket(data[:pos]) } @@ -404,7 +391,7 @@ func (mc *mysqlConn) writeAuthSwitchPacket(authData []byte) error { data, err := mc.buf.takeSmallBuffer(pktLen) if err != nil { // cannot take the buffer. Something must be wrong with the connection - errLog.Print(err) + mc.log(err) return errBadConnNoWrite } @@ -424,7 +411,7 @@ func (mc *mysqlConn) writeCommandPacket(command byte) error { data, err := mc.buf.takeSmallBuffer(4 + 1) if err != nil { // cannot take the buffer. Something must be wrong with the connection - errLog.Print(err) + mc.log(err) return errBadConnNoWrite } @@ -443,7 +430,7 @@ func (mc *mysqlConn) writeCommandPacketStr(command byte, arg string) error { data, err := mc.buf.takeBuffer(pktLen + 4) if err != nil { // cannot take the buffer. Something must be wrong with the connection - errLog.Print(err) + mc.log(err) return errBadConnNoWrite } @@ -464,7 +451,7 @@ func (mc *mysqlConn) writeCommandPacketUint32(command byte, arg uint32) error { data, err := mc.buf.takeSmallBuffer(4 + 1 + 4) if err != nil { // cannot take the buffer. Something must be wrong with the connection - errLog.Print(err) + mc.log(err) return errBadConnNoWrite } @@ -495,7 +482,9 @@ func (mc *mysqlConn) readAuthResult() ([]byte, string, error) { switch data[0] { case iOK: - return nil, "", mc.handleOkPacket(data) + // resultUnchanged, since auth happens before any queries or + // commands have been executed. + return nil, "", mc.resultUnchanged().handleOkPacket(data) case iAuthMoreData: return data[1:], "", err @@ -518,9 +507,9 @@ func (mc *mysqlConn) readAuthResult() ([]byte, string, error) { } } -// Returns error if Packet is not an 'Result OK'-Packet -func (mc *mysqlConn) readResultOK() error { - data, err := mc.readPacket() +// Returns error if Packet is not a 'Result OK'-Packet +func (mc *okHandler) readResultOK() error { + data, err := mc.conn().readPacket() if err != nil { return err } @@ -528,13 +517,17 @@ func (mc *mysqlConn) readResultOK() error { if data[0] == iOK { return mc.handleOkPacket(data) } - return mc.handleErrorPacket(data) + return mc.conn().handleErrorPacket(data) } // Result Set Header Packet // http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-ProtocolText::Resultset -func (mc *mysqlConn) readResultSetHeaderPacket() (int, error) { - data, err := mc.readPacket() +func (mc *okHandler) readResultSetHeaderPacket() (int, error) { + // handleOkPacket replaces both values; other cases leave the values unchanged. + mc.result.affectedRows = append(mc.result.affectedRows, 0) + mc.result.insertIds = append(mc.result.insertIds, 0) + + data, err := mc.conn().readPacket() if err == nil { switch data[0] { @@ -542,19 +535,16 @@ func (mc *mysqlConn) readResultSetHeaderPacket() (int, error) { return 0, mc.handleOkPacket(data) case iERR: - return 0, mc.handleErrorPacket(data) + return 0, mc.conn().handleErrorPacket(data) case iLocalInFile: return 0, mc.handleInFileRequest(string(data[1:])) } // column count - num, _, n := readLengthEncodedInteger(data) - if n-len(data) == 0 { - return int(num), nil - } - - return 0, ErrMalformPkt + num, _, _ := readLengthEncodedInteger(data) + // ignore remaining data in the packet. see #1478. + return int(num), nil } return 0, err } @@ -607,18 +597,61 @@ func readStatus(b []byte) statusFlag { return statusFlag(b[0]) | statusFlag(b[1])<<8 } +// Returns an instance of okHandler for codepaths where mysqlConn.result doesn't +// need to be cleared first (e.g. during authentication, or while additional +// resultsets are being fetched.) +func (mc *mysqlConn) resultUnchanged() *okHandler { + return (*okHandler)(mc) +} + +// okHandler represents the state of the connection when mysqlConn.result has +// been prepared for processing of OK packets. +// +// To correctly populate mysqlConn.result (updated by handleOkPacket()), all +// callpaths must either: +// +// 1. first clear it using clearResult(), or +// 2. confirm that they don't need to (by calling resultUnchanged()). +// +// Both return an instance of type *okHandler. +type okHandler mysqlConn + +// Exposes the underlying type's methods. +func (mc *okHandler) conn() *mysqlConn { + return (*mysqlConn)(mc) +} + +// clearResult clears the connection's stored affectedRows and insertIds +// fields. +// +// It returns a handler that can process OK responses. +func (mc *mysqlConn) clearResult() *okHandler { + mc.result = mysqlResult{} + return (*okHandler)(mc) +} + // Ok Packet // http://dev.mysql.com/doc/internals/en/generic-response-packets.html#packet-OK_Packet -func (mc *mysqlConn) handleOkPacket(data []byte) error { +func (mc *okHandler) handleOkPacket(data []byte) error { var n, m int + var affectedRows, insertId uint64 // 0x00 [1 byte] // Affected rows [Length Coded Binary] - mc.affectedRows, _, n = readLengthEncodedInteger(data[1:]) + affectedRows, _, n = readLengthEncodedInteger(data[1:]) // Insert id [Length Coded Binary] - mc.insertId, _, m = readLengthEncodedInteger(data[1+n:]) + insertId, _, m = readLengthEncodedInteger(data[1+n:]) + + // Update for the current statement result (only used by + // readResultSetHeaderPacket). + if len(mc.result.affectedRows) > 0 { + mc.result.affectedRows[len(mc.result.affectedRows)-1] = int64(affectedRows) + } + if len(mc.result.insertIds) > 0 { + mc.result.insertIds[len(mc.result.insertIds)-1] = int64(insertId) + } // server_status [2 bytes] mc.status = readStatus(data[1+n+m : 1+n+m+2]) @@ -769,7 +802,8 @@ func (rows *textRows) readRow(dest []driver.Value) error { for i := range dest { // Read bytes and convert to string - dest[i], isNull, n, err = readLengthEncodedString(data[pos:]) + var buf []byte + buf, isNull, n, err = readLengthEncodedString(data[pos:]) pos += n if err != nil { @@ -781,19 +815,40 @@ func (rows *textRows) readRow(dest []driver.Value) error { continue } - if !mc.parseTime { - continue - } - - // Parse time field switch rows.rs.columns[i].fieldType { case fieldTypeTimestamp, fieldTypeDateTime, fieldTypeDate, fieldTypeNewDate: - if dest[i], err = parseDateTime(dest[i].([]byte), mc.cfg.Loc); err != nil { - return err + if mc.parseTime { + dest[i], err = parseDateTime(buf, mc.cfg.Loc) + } else { + dest[i] = buf } + + case fieldTypeTiny, fieldTypeShort, fieldTypeInt24, fieldTypeYear, fieldTypeLong: + dest[i], err = strconv.ParseInt(string(buf), 10, 64) + + case fieldTypeLongLong: + if rows.rs.columns[i].flags&flagUnsigned != 0 { + dest[i], err = strconv.ParseUint(string(buf), 10, 64) + } else { + dest[i], err = strconv.ParseInt(string(buf), 10, 64) + } + + case fieldTypeFloat: + var d float64 + d, err = strconv.ParseFloat(string(buf), 32) + dest[i] = float32(d) + + case fieldTypeDouble: + dest[i], err = strconv.ParseFloat(string(buf), 64) + + default: + dest[i] = buf + } + if err != nil { + return err } } @@ -938,7 +993,7 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error { } if err != nil { // cannot take the buffer. Something must be wrong with the connection - errLog.Print(err) + mc.log(err) return errBadConnNoWrite } @@ -1116,7 +1171,7 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error { if v.IsZero() { b = append(b, "0000-00-00"...) } else { - b, err = appendDateTime(b, v.In(mc.cfg.Loc)) + b, err = appendDateTime(b, v.In(mc.cfg.Loc), mc.cfg.timeTruncate) if err != nil { return err } @@ -1137,7 +1192,7 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error { if valuesCap != cap(paramValues) { data = append(data[:pos], paramValues...) if err = mc.buf.store(data); err != nil { - errLog.Print(err) + mc.log(err) return errBadConnNoWrite } } @@ -1149,7 +1204,9 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error { return mc.writePacket(data) } -func (mc *mysqlConn) discardResults() error { +// For each remaining resultset in the stream, discards its rows and updates +// mc.affectedRows and mc.insertIds. +func (mc *okHandler) discardResults() error { for mc.status&statusMoreResultsExists != 0 { resLen, err := mc.readResultSetHeaderPacket() if err != nil { @@ -1157,11 +1214,11 @@ func (mc *mysqlConn) discardResults() error { } if resLen > 0 { // columns - if err := mc.readUntilEOF(); err != nil { + if err := mc.conn().readUntilEOF(); err != nil { return err } // rows - if err := mc.readUntilEOF(); err != nil { + if err := mc.conn().readUntilEOF(); err != nil { return err } } diff --git a/vendor/github.com/go-sql-driver/mysql/result.go b/vendor/github.com/go-sql-driver/mysql/result.go index c6438d0..d516314 100644 --- a/vendor/github.com/go-sql-driver/mysql/result.go +++ b/vendor/github.com/go-sql-driver/mysql/result.go @@ -8,15 +8,43 @@ package mysql +import "database/sql/driver" + +// Result exposes data not available through *connection.Result. +// +// This is accessible by executing statements using sql.Conn.Raw() and +// downcasting the returned result: +// +// res, err := rawConn.Exec(...) +// res.(mysql.Result).AllRowsAffected() +type Result interface { + driver.Result + // AllRowsAffected returns a slice containing the affected rows for each + // executed statement. + AllRowsAffected() []int64 + // AllLastInsertIds returns a slice containing the last inserted ID for each + // executed statement. + AllLastInsertIds() []int64 +} + type mysqlResult struct { - affectedRows int64 - insertId int64 + // One entry in both slices is created for every executed statement result. + affectedRows []int64 + insertIds []int64 } func (res *mysqlResult) LastInsertId() (int64, error) { - return res.insertId, nil + return res.insertIds[len(res.insertIds)-1], nil } func (res *mysqlResult) RowsAffected() (int64, error) { - return res.affectedRows, nil + return res.affectedRows[len(res.affectedRows)-1], nil +} + +func (res *mysqlResult) AllLastInsertIds() []int64 { + return append([]int64{}, res.insertIds...) // defensive copy +} + +func (res *mysqlResult) AllRowsAffected() []int64 { + return append([]int64{}, res.affectedRows...) // defensive copy } diff --git a/vendor/github.com/go-sql-driver/mysql/rows.go b/vendor/github.com/go-sql-driver/mysql/rows.go index 888bdb5..81fa606 100644 --- a/vendor/github.com/go-sql-driver/mysql/rows.go +++ b/vendor/github.com/go-sql-driver/mysql/rows.go @@ -123,7 +123,8 @@ func (rows *mysqlRows) Close() (err error) { err = mc.readUntilEOF() } if err == nil { - if err = mc.discardResults(); err != nil { + handleOk := mc.clearResult() + if err = handleOk.discardResults(); err != nil { return err } } @@ -160,7 +161,15 @@ func (rows *mysqlRows) nextResultSet() (int, error) { return 0, io.EOF } rows.rs = resultSet{} - return rows.mc.readResultSetHeaderPacket() + // rows.mc.affectedRows and rows.mc.insertIds accumulate on each call to + // nextResultSet. + resLen, err := rows.mc.resultUnchanged().readResultSetHeaderPacket() + if err != nil { + // Clean up about multi-results flag + rows.rs.done = true + rows.mc.status = rows.mc.status & (^statusMoreResultsExists) + } + return resLen, err } func (rows *mysqlRows) nextNotEmptyResultSet() (int, error) { diff --git a/vendor/github.com/go-sql-driver/mysql/statement.go b/vendor/github.com/go-sql-driver/mysql/statement.go index 10ece8b..0436f22 100644 --- a/vendor/github.com/go-sql-driver/mysql/statement.go +++ b/vendor/github.com/go-sql-driver/mysql/statement.go @@ -51,7 +51,7 @@ func (stmt *mysqlStmt) CheckNamedValue(nv *driver.NamedValue) (err error) { func (stmt *mysqlStmt) Exec(args []driver.Value) (driver.Result, error) { if stmt.mc.closed.Load() { - errLog.Print(ErrInvalidConn) + stmt.mc.log(ErrInvalidConn) return nil, driver.ErrBadConn } // Send command @@ -61,12 +61,10 @@ func (stmt *mysqlStmt) Exec(args []driver.Value) (driver.Result, error) { } mc := stmt.mc - - mc.affectedRows = 0 - mc.insertId = 0 + handleOk := stmt.mc.clearResult() // Read Result - resLen, err := mc.readResultSetHeaderPacket() + resLen, err := handleOk.readResultSetHeaderPacket() if err != nil { return nil, err } @@ -83,14 +81,12 @@ func (stmt *mysqlStmt) Exec(args []driver.Value) (driver.Result, error) { } } - if err := mc.discardResults(); err != nil { + if err := handleOk.discardResults(); err != nil { return nil, err } - return &mysqlResult{ - affectedRows: int64(mc.affectedRows), - insertId: int64(mc.insertId), - }, nil + copied := mc.result + return &copied, nil } func (stmt *mysqlStmt) Query(args []driver.Value) (driver.Rows, error) { @@ -99,7 +95,7 @@ func (stmt *mysqlStmt) Query(args []driver.Value) (driver.Rows, error) { func (stmt *mysqlStmt) query(args []driver.Value) (*binaryRows, error) { if stmt.mc.closed.Load() { - errLog.Print(ErrInvalidConn) + stmt.mc.log(ErrInvalidConn) return nil, driver.ErrBadConn } // Send command @@ -111,7 +107,8 @@ func (stmt *mysqlStmt) query(args []driver.Value) (*binaryRows, error) { mc := stmt.mc // Read Result - resLen, err := mc.readResultSetHeaderPacket() + handleOk := stmt.mc.clearResult() + resLen, err := handleOk.readResultSetHeaderPacket() if err != nil { return nil, err } @@ -144,7 +141,7 @@ type converter struct{} // implementation does not. This function should be kept in sync with // database/sql/driver defaultConverter.ConvertValue() except for that // deliberate difference. -func (c converter) ConvertValue(v interface{}) (driver.Value, error) { +func (c converter) ConvertValue(v any) (driver.Value, error) { if driver.IsValue(v) { return v, nil } diff --git a/vendor/github.com/go-sql-driver/mysql/utils.go b/vendor/github.com/go-sql-driver/mysql/utils.go index 15dbd8d..cda24fe 100644 --- a/vendor/github.com/go-sql-driver/mysql/utils.go +++ b/vendor/github.com/go-sql-driver/mysql/utils.go @@ -36,7 +36,7 @@ var ( // registering it. // // rootCertPool := x509.NewCertPool() -// pem, err := ioutil.ReadFile("/path/ca-cert.pem") +// pem, err := os.ReadFile("/path/ca-cert.pem") // if err != nil { // log.Fatal(err) // } @@ -265,7 +265,11 @@ func parseBinaryDateTime(num uint64, data []byte, loc *time.Location) (driver.Va return nil, fmt.Errorf("invalid DATETIME packet length %d", num) } -func appendDateTime(buf []byte, t time.Time) ([]byte, error) { +func appendDateTime(buf []byte, t time.Time, timeTruncate time.Duration) ([]byte, error) { + if timeTruncate > 0 { + t = t.Truncate(timeTruncate) + } + year, month, day := t.Date() hour, min, sec := t.Clock() nsec := t.Nanosecond() @@ -616,6 +620,11 @@ func appendLengthEncodedInteger(b []byte, n uint64) []byte { byte(n>>32), byte(n>>40), byte(n>>48), byte(n>>56)) } +func appendLengthEncodedString(b []byte, s string) []byte { + b = appendLengthEncodedInteger(b, uint64(len(s))) + return append(b, s...) +} + // reserveBuffer checks cap(buf) and expand buffer to len(buf) + appendSize. // If cap(buf) is not enough, reallocate new buffer. func reserveBuffer(buf []byte, appendSize int) []byte { diff --git a/vendor/github.com/google/uuid/.travis.yml b/vendor/github.com/google/uuid/.travis.yml deleted file mode 100644 index d8156a6..0000000 --- a/vendor/github.com/google/uuid/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go - -go: - - 1.4.3 - - 1.5.3 - - tip - -script: - - go test -v ./... diff --git a/vendor/github.com/google/uuid/CHANGELOG.md b/vendor/github.com/google/uuid/CHANGELOG.md new file mode 100644 index 0000000..7ec5ac7 --- /dev/null +++ b/vendor/github.com/google/uuid/CHANGELOG.md @@ -0,0 +1,41 @@ +# Changelog + +## [1.6.0](https://github.com/google/uuid/compare/v1.5.0...v1.6.0) (2024-01-16) + + +### Features + +* add Max UUID constant ([#149](https://github.com/google/uuid/issues/149)) ([c58770e](https://github.com/google/uuid/commit/c58770eb495f55fe2ced6284f93c5158a62e53e3)) + + +### Bug Fixes + +* fix typo in version 7 uuid documentation ([#153](https://github.com/google/uuid/issues/153)) ([016b199](https://github.com/google/uuid/commit/016b199544692f745ffc8867b914129ecb47ef06)) +* Monotonicity in UUIDv7 ([#150](https://github.com/google/uuid/issues/150)) ([a2b2b32](https://github.com/google/uuid/commit/a2b2b32373ff0b1a312b7fdf6d38a977099698a6)) + +## [1.5.0](https://github.com/google/uuid/compare/v1.4.0...v1.5.0) (2023-12-12) + + +### Features + +* Validate UUID without creating new UUID ([#141](https://github.com/google/uuid/issues/141)) ([9ee7366](https://github.com/google/uuid/commit/9ee7366e66c9ad96bab89139418a713dc584ae29)) + +## [1.4.0](https://github.com/google/uuid/compare/v1.3.1...v1.4.0) (2023-10-26) + + +### Features + +* UUIDs slice type with Strings() convenience method ([#133](https://github.com/google/uuid/issues/133)) ([cd5fbbd](https://github.com/google/uuid/commit/cd5fbbdd02f3e3467ac18940e07e062be1f864b4)) + +### Fixes + +* Clarify that Parse's job is to parse but not necessarily validate strings. (Documents current behavior) + +## [1.3.1](https://github.com/google/uuid/compare/v1.3.0...v1.3.1) (2023-08-18) + + +### Bug Fixes + +* Use .EqualFold() to parse urn prefixed UUIDs ([#118](https://github.com/google/uuid/issues/118)) ([574e687](https://github.com/google/uuid/commit/574e6874943741fb99d41764c705173ada5293f0)) + +## Changelog diff --git a/vendor/github.com/google/uuid/CONTRIBUTING.md b/vendor/github.com/google/uuid/CONTRIBUTING.md index 04fdf09..a502fdc 100644 --- a/vendor/github.com/google/uuid/CONTRIBUTING.md +++ b/vendor/github.com/google/uuid/CONTRIBUTING.md @@ -2,6 +2,22 @@ We definitely welcome patches and contribution to this project! +### Tips + +Commits must be formatted according to the [Conventional Commits Specification](https://www.conventionalcommits.org). + +Always try to include a test case! If it is not possible or not necessary, +please explain why in the pull request description. + +### Releasing + +Commits that would precipitate a SemVer change, as described in the Conventional +Commits Specification, will trigger [`release-please`](https://github.com/google-github-actions/release-please-action) +to create a release candidate pull request. Once submitted, `release-please` +will create a release. + +For tips on how to work with `release-please`, see its documentation. + ### Legal requirements In order to protect both you and ourselves, you will need to sign the diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md index f765a46..3e9a618 100644 --- a/vendor/github.com/google/uuid/README.md +++ b/vendor/github.com/google/uuid/README.md @@ -1,6 +1,6 @@ -# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master) +# uuid The uuid package generates and inspects UUIDs based on -[RFC 4122](http://tools.ietf.org/html/rfc4122) +[RFC 4122](https://datatracker.ietf.org/doc/html/rfc4122) and DCE 1.1: Authentication and Security Services. This package is based on the github.com/pborman/uuid package (previously named @@ -9,10 +9,12 @@ a UUID is a 16 byte array rather than a byte slice. One loss due to this change is the ability to represent an invalid UUID (vs a NIL UUID). ###### Install -`go get github.com/google/uuid` +```sh +go get github.com/google/uuid +``` ###### Documentation -[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid) +[![Go Reference](https://pkg.go.dev/badge/github.com/google/uuid.svg)](https://pkg.go.dev/github.com/google/uuid) Full `go doc` style documentation for the package can be viewed online without installing this package by using the GoDoc site here: diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go index b404f4b..dc60082 100644 --- a/vendor/github.com/google/uuid/hash.go +++ b/vendor/github.com/google/uuid/hash.go @@ -17,6 +17,12 @@ var ( NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) Nil UUID // empty UUID, all zeros + + // The Max UUID is special form of UUID that is specified to have all 128 bits set to 1. + Max = UUID{ + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + } ) // NewHash returns a new UUID derived from the hash of space concatenated with diff --git a/vendor/github.com/google/uuid/node_js.go b/vendor/github.com/google/uuid/node_js.go index 24b78ed..b2a0bc8 100644 --- a/vendor/github.com/google/uuid/node_js.go +++ b/vendor/github.com/google/uuid/node_js.go @@ -7,6 +7,6 @@ package uuid // getHardwareInterface returns nil values for the JS version of the code. -// This remvoves the "net" dependency, because it is not used in the browser. +// This removes the "net" dependency, because it is not used in the browser. // Using the "net" library inflates the size of the transpiled JS code by 673k bytes. func getHardwareInterface(name string) (string, []byte) { return "", nil } diff --git a/vendor/github.com/google/uuid/time.go b/vendor/github.com/google/uuid/time.go index e6ef06c..c351129 100644 --- a/vendor/github.com/google/uuid/time.go +++ b/vendor/github.com/google/uuid/time.go @@ -108,12 +108,23 @@ func setClockSequence(seq int) { } // Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in -// uuid. The time is only defined for version 1 and 2 UUIDs. +// uuid. The time is only defined for version 1, 2, 6 and 7 UUIDs. func (uuid UUID) Time() Time { - time := int64(binary.BigEndian.Uint32(uuid[0:4])) - time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 - time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 - return Time(time) + var t Time + switch uuid.Version() { + case 6: + time := binary.BigEndian.Uint64(uuid[:8]) // Ignore uuid[6] version b0110 + t = Time(time) + case 7: + time := binary.BigEndian.Uint64(uuid[:8]) + t = Time((time>>16)*10000 + g1582ns100) + default: // forward compatible + time := int64(binary.BigEndian.Uint32(uuid[0:4])) + time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 + time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 + t = Time(time) + } + return t } // ClockSequence returns the clock sequence encoded in uuid. diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go index a57207a..5232b48 100644 --- a/vendor/github.com/google/uuid/uuid.go +++ b/vendor/github.com/google/uuid/uuid.go @@ -56,11 +56,15 @@ func IsInvalidLengthError(err error) bool { return ok } -// Parse decodes s into a UUID or returns an error. Both the standard UUID -// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and -// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the -// Microsoft encoding {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} and the raw hex -// encoding: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx. +// Parse decodes s into a UUID or returns an error if it cannot be parsed. Both +// the standard UUID forms defined in RFC 4122 +// (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx) are decoded. In addition, +// Parse accepts non-standard strings such as the raw hex encoding +// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx and 38 byte "Microsoft style" encodings, +// e.g. {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}. Only the middle 36 bytes are +// examined in the latter case. Parse should not be used to validate strings as +// it parses non-standard encodings as indicated above. func Parse(s string) (UUID, error) { var uuid UUID switch len(s) { @@ -69,7 +73,7 @@ func Parse(s string) (UUID, error) { // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx case 36 + 9: - if strings.ToLower(s[:9]) != "urn:uuid:" { + if !strings.EqualFold(s[:9], "urn:uuid:") { return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9]) } s = s[9:] @@ -101,7 +105,8 @@ func Parse(s string) (UUID, error) { 9, 11, 14, 16, 19, 21, - 24, 26, 28, 30, 32, 34} { + 24, 26, 28, 30, 32, 34, + } { v, ok := xtob(s[x], s[x+1]) if !ok { return uuid, errors.New("invalid UUID format") @@ -117,7 +122,7 @@ func ParseBytes(b []byte) (UUID, error) { switch len(b) { case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) { + if !bytes.EqualFold(b[:9], []byte("urn:uuid:")) { return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9]) } b = b[9:] @@ -145,7 +150,8 @@ func ParseBytes(b []byte) (UUID, error) { 9, 11, 14, 16, 19, 21, - 24, 26, 28, 30, 32, 34} { + 24, 26, 28, 30, 32, 34, + } { v, ok := xtob(b[x], b[x+1]) if !ok { return uuid, errors.New("invalid UUID format") @@ -180,6 +186,59 @@ func Must(uuid UUID, err error) UUID { return uuid } +// Validate returns an error if s is not a properly formatted UUID in one of the following formats: +// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx +// {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} +// It returns an error if the format is invalid, otherwise nil. +func Validate(s string) error { + switch len(s) { + // Standard UUID format + case 36: + + // UUID with "urn:uuid:" prefix + case 36 + 9: + if !strings.EqualFold(s[:9], "urn:uuid:") { + return fmt.Errorf("invalid urn prefix: %q", s[:9]) + } + s = s[9:] + + // UUID enclosed in braces + case 36 + 2: + if s[0] != '{' || s[len(s)-1] != '}' { + return fmt.Errorf("invalid bracketed UUID format") + } + s = s[1 : len(s)-1] + + // UUID without hyphens + case 32: + for i := 0; i < len(s); i += 2 { + _, ok := xtob(s[i], s[i+1]) + if !ok { + return errors.New("invalid UUID format") + } + } + + default: + return invalidLengthError{len(s)} + } + + // Check for standard UUID format + if len(s) == 36 { + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return errors.New("invalid UUID format") + } + for _, x := range []int{0, 2, 4, 6, 9, 11, 14, 16, 19, 21, 24, 26, 28, 30, 32, 34} { + if _, ok := xtob(s[x], s[x+1]); !ok { + return errors.New("invalid UUID format") + } + } + } + + return nil +} + // String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx // , or "" if uuid is invalid. func (uuid UUID) String() string { @@ -292,3 +351,15 @@ func DisableRandPool() { poolMu.Lock() poolPos = randPoolSize } + +// UUIDs is a slice of UUID types. +type UUIDs []UUID + +// Strings returns a string slice containing the string form of each UUID in uuids. +func (uuids UUIDs) Strings() []string { + var uuidStrs = make([]string, len(uuids)) + for i, uuid := range uuids { + uuidStrs[i] = uuid.String() + } + return uuidStrs +} diff --git a/vendor/github.com/google/uuid/version6.go b/vendor/github.com/google/uuid/version6.go new file mode 100644 index 0000000..339a959 --- /dev/null +++ b/vendor/github.com/google/uuid/version6.go @@ -0,0 +1,56 @@ +// Copyright 2023 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "encoding/binary" + +// UUID version 6 is a field-compatible version of UUIDv1, reordered for improved DB locality. +// It is expected that UUIDv6 will primarily be used in contexts where there are existing v1 UUIDs. +// Systems that do not involve legacy UUIDv1 SHOULD consider using UUIDv7 instead. +// +// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#uuidv6 +// +// NewV6 returns a Version 6 UUID based on the current NodeID and clock +// sequence, and the current time. If the NodeID has not been set by SetNodeID +// or SetNodeInterface then it will be set automatically. If the NodeID cannot +// be set NewV6 set NodeID is random bits automatically . If clock sequence has not been set by +// SetClockSequence then it will be set automatically. If GetTime fails to +// return the current NewV6 returns Nil and an error. +func NewV6() (UUID, error) { + var uuid UUID + now, seq, err := GetTime() + if err != nil { + return uuid, err + } + + /* + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | time_high | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | time_mid | time_low_and_version | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + |clk_seq_hi_res | clk_seq_low | node (0-1) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | node (2-5) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + */ + + binary.BigEndian.PutUint64(uuid[0:], uint64(now)) + binary.BigEndian.PutUint16(uuid[8:], seq) + + uuid[6] = 0x60 | (uuid[6] & 0x0F) + uuid[8] = 0x80 | (uuid[8] & 0x3F) + + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + copy(uuid[10:], nodeID[:]) + nodeMu.Unlock() + + return uuid, nil +} diff --git a/vendor/github.com/google/uuid/version7.go b/vendor/github.com/google/uuid/version7.go new file mode 100644 index 0000000..3167b64 --- /dev/null +++ b/vendor/github.com/google/uuid/version7.go @@ -0,0 +1,104 @@ +// Copyright 2023 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "io" +) + +// UUID version 7 features a time-ordered value field derived from the widely +// implemented and well known Unix Epoch timestamp source, +// the number of milliseconds seconds since midnight 1 Jan 1970 UTC, leap seconds excluded. +// As well as improved entropy characteristics over versions 1 or 6. +// +// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#name-uuid-version-7 +// +// Implementations SHOULD utilize UUID version 7 over UUID version 1 and 6 if possible. +// +// NewV7 returns a Version 7 UUID based on the current time(Unix Epoch). +// Uses the randomness pool if it was enabled with EnableRandPool. +// On error, NewV7 returns Nil and an error +func NewV7() (UUID, error) { + uuid, err := NewRandom() + if err != nil { + return uuid, err + } + makeV7(uuid[:]) + return uuid, nil +} + +// NewV7FromReader returns a Version 7 UUID based on the current time(Unix Epoch). +// it use NewRandomFromReader fill random bits. +// On error, NewV7FromReader returns Nil and an error. +func NewV7FromReader(r io.Reader) (UUID, error) { + uuid, err := NewRandomFromReader(r) + if err != nil { + return uuid, err + } + + makeV7(uuid[:]) + return uuid, nil +} + +// makeV7 fill 48 bits time (uuid[0] - uuid[5]), set version b0111 (uuid[6]) +// uuid[8] already has the right version number (Variant is 10) +// see function NewV7 and NewV7FromReader +func makeV7(uuid []byte) { + /* + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | unix_ts_ms | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | unix_ts_ms | ver | rand_a (12 bit seq) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + |var| rand_b | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | rand_b | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + */ + _ = uuid[15] // bounds check + + t, s := getV7Time() + + uuid[0] = byte(t >> 40) + uuid[1] = byte(t >> 32) + uuid[2] = byte(t >> 24) + uuid[3] = byte(t >> 16) + uuid[4] = byte(t >> 8) + uuid[5] = byte(t) + + uuid[6] = 0x70 | (0x0F & byte(s>>8)) + uuid[7] = byte(s) +} + +// lastV7time is the last time we returned stored as: +// +// 52 bits of time in milliseconds since epoch +// 12 bits of (fractional nanoseconds) >> 8 +var lastV7time int64 + +const nanoPerMilli = 1000000 + +// getV7Time returns the time in milliseconds and nanoseconds / 256. +// The returned (milli << 12 + seq) is guarenteed to be greater than +// (milli << 12 + seq) returned by any previous call to getV7Time. +func getV7Time() (milli, seq int64) { + timeMu.Lock() + defer timeMu.Unlock() + + nano := timeNow().UnixNano() + milli = nano / nanoPerMilli + // Sequence number is between 0 and 3906 (nanoPerMilli>>8) + seq = (nano - milli*nanoPerMilli) >> 8 + now := milli<<12 + seq + if now <= lastV7time { + now = lastV7time + 1 + milli = now >> 12 + seq = now & 0xfff + } + lastV7time = now + return milli, seq +} diff --git a/vendor/github.com/klauspost/compress/.gitattributes b/vendor/github.com/klauspost/compress/.gitattributes new file mode 100644 index 0000000..4024335 --- /dev/null +++ b/vendor/github.com/klauspost/compress/.gitattributes @@ -0,0 +1,2 @@ +* -text +*.bin -text -diff diff --git a/vendor/github.com/klauspost/compress/.gitignore b/vendor/github.com/klauspost/compress/.gitignore new file mode 100644 index 0000000..d31b378 --- /dev/null +++ b/vendor/github.com/klauspost/compress/.gitignore @@ -0,0 +1,32 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof +/s2/cmd/_s2sx/sfx-exe + +# Linux perf files +perf.data +perf.data.old + +# gdb history +.gdb_history diff --git a/vendor/github.com/klauspost/compress/.goreleaser.yml b/vendor/github.com/klauspost/compress/.goreleaser.yml new file mode 100644 index 0000000..a229538 --- /dev/null +++ b/vendor/github.com/klauspost/compress/.goreleaser.yml @@ -0,0 +1,123 @@ +# This is an example goreleaser.yaml file with some sane defaults. +# Make sure to check the documentation at http://goreleaser.com +before: + hooks: + - ./gen.sh + +builds: + - + id: "s2c" + binary: s2c + main: ./s2/cmd/s2c/main.go + flags: + - -trimpath + env: + - CGO_ENABLED=0 + goos: + - aix + - linux + - freebsd + - netbsd + - windows + - darwin + goarch: + - 386 + - amd64 + - arm + - arm64 + - ppc64 + - ppc64le + - mips64 + - mips64le + goarm: + - 7 + - + id: "s2d" + binary: s2d + main: ./s2/cmd/s2d/main.go + flags: + - -trimpath + env: + - CGO_ENABLED=0 + goos: + - aix + - linux + - freebsd + - netbsd + - windows + - darwin + goarch: + - 386 + - amd64 + - arm + - arm64 + - ppc64 + - ppc64le + - mips64 + - mips64le + goarm: + - 7 + - + id: "s2sx" + binary: s2sx + main: ./s2/cmd/_s2sx/main.go + flags: + - -modfile=s2sx.mod + - -trimpath + env: + - CGO_ENABLED=0 + goos: + - aix + - linux + - freebsd + - netbsd + - windows + - darwin + goarch: + - 386 + - amd64 + - arm + - arm64 + - ppc64 + - ppc64le + - mips64 + - mips64le + goarm: + - 7 + +archives: + - + id: s2-binaries + name_template: "s2-{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}" + format_overrides: + - goos: windows + format: zip + files: + - unpack/* + - s2/LICENSE + - s2/README.md +checksum: + name_template: 'checksums.txt' +snapshot: + name_template: "{{ .Tag }}-next" +changelog: + sort: asc + filters: + exclude: + - '^doc:' + - '^docs:' + - '^test:' + - '^tests:' + - '^Update\sREADME.md' + +nfpms: + - + file_name_template: "s2_package__{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}" + vendor: Klaus Post + homepage: https://github.com/klauspost/compress + maintainer: Klaus Post + description: S2 Compression Tool + license: BSD 3-Clause + formats: + - deb + - rpm diff --git a/vendor/github.com/klauspost/compress/LICENSE b/vendor/github.com/klauspost/compress/LICENSE new file mode 100644 index 0000000..87d5574 --- /dev/null +++ b/vendor/github.com/klauspost/compress/LICENSE @@ -0,0 +1,304 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2019 Klaus Post. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------ + +Files: gzhttp/* + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016-2017 The New York Times Company + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +------------------ + +Files: s2/cmd/internal/readahead/* + +The MIT License (MIT) + +Copyright (c) 2015 Klaus Post + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +--------------------- +Files: snappy/* +Files: internal/snapref/* + +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +----------------- + +Files: s2/cmd/internal/filepathx/* + +Copyright 2016 The filepathx Authors + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md new file mode 100644 index 0000000..1f72cdd --- /dev/null +++ b/vendor/github.com/klauspost/compress/README.md @@ -0,0 +1,693 @@ +# compress + +This package provides various compression algorithms. + +* [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression in pure Go. +* [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) is a high performance replacement for Snappy. +* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). +* [snappy](https://github.com/klauspost/compress/tree/master/snappy) is a drop-in replacement for `github.com/golang/snappy` offering better compression and concurrent streams. +* [huff0](https://github.com/klauspost/compress/tree/master/huff0) and [FSE](https://github.com/klauspost/compress/tree/master/fse) implementations for raw entropy encoding. +* [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp) Provides client and server wrappers for handling gzipped requests efficiently. +* [pgzip](https://github.com/klauspost/pgzip) is a separate package that provides a very fast parallel gzip implementation. + +[![Go Reference](https://pkg.go.dev/badge/klauspost/compress.svg)](https://pkg.go.dev/github.com/klauspost/compress?tab=subdirectories) +[![Go](https://github.com/klauspost/compress/actions/workflows/go.yml/badge.svg)](https://github.com/klauspost/compress/actions/workflows/go.yml) +[![Sourcegraph Badge](https://sourcegraph.com/github.com/klauspost/compress/-/badge.svg)](https://sourcegraph.com/github.com/klauspost/compress?badge) + +# changelog + +* Feb 5th, 2024 - [1.17.6](https://github.com/klauspost/compress/releases/tag/v1.17.6) + * zstd: Fix incorrect repeat coding in best mode https://github.com/klauspost/compress/pull/923 + * s2: Fix DecodeConcurrent deadlock on errors https://github.com/klauspost/compress/pull/925 + +* Jan 26th, 2024 - [v1.17.5](https://github.com/klauspost/compress/releases/tag/v1.17.5) + * flate: Fix reset with dictionary on custom window encodes https://github.com/klauspost/compress/pull/912 + * zstd: Add Frame header encoding and stripping https://github.com/klauspost/compress/pull/908 + * zstd: Limit better/best default window to 8MB https://github.com/klauspost/compress/pull/913 + * zstd: Speed improvements by @greatroar in https://github.com/klauspost/compress/pull/896 https://github.com/klauspost/compress/pull/910 + * s2: Fix callbacks for skippable blocks and disallow 0xfe (Padding) by @Jille in https://github.com/klauspost/compress/pull/916 https://github.com/klauspost/compress/pull/917 +https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/compress/pull/918 + +* Dec 1st, 2023 - [v1.17.4](https://github.com/klauspost/compress/releases/tag/v1.17.4) + * huff0: Speed up symbol counting by @greatroar in https://github.com/klauspost/compress/pull/887 + * huff0: Remove byteReader by @greatroar in https://github.com/klauspost/compress/pull/886 + * gzhttp: Allow overriding decompression on transport https://github.com/klauspost/compress/pull/892 + * gzhttp: Clamp compression level https://github.com/klauspost/compress/pull/890 + * gzip: Error out if reserved bits are set https://github.com/klauspost/compress/pull/891 + +* Nov 15th, 2023 - [v1.17.3](https://github.com/klauspost/compress/releases/tag/v1.17.3) + * fse: Fix max header size https://github.com/klauspost/compress/pull/881 + * zstd: Improve better/best compression https://github.com/klauspost/compress/pull/877 + * gzhttp: Fix missing content type on Close https://github.com/klauspost/compress/pull/883 + +* Oct 22nd, 2023 - [v1.17.2](https://github.com/klauspost/compress/releases/tag/v1.17.2) + * zstd: Fix rare *CORRUPTION* output in "best" mode. See https://github.com/klauspost/compress/pull/876 + +* Oct 14th, 2023 - [v1.17.1](https://github.com/klauspost/compress/releases/tag/v1.17.1) + * s2: Fix S2 "best" dictionary wrong encoding by @klauspost in https://github.com/klauspost/compress/pull/871 + * flate: Reduce allocations in decompressor and minor code improvements by @fakefloordiv in https://github.com/klauspost/compress/pull/869 + * s2: Fix EstimateBlockSize on 6&7 length input by @klauspost in https://github.com/klauspost/compress/pull/867 + +* Sept 19th, 2023 - [v1.17.0](https://github.com/klauspost/compress/releases/tag/v1.17.0) + * Add experimental dictionary builder https://github.com/klauspost/compress/pull/853 + * Add xerial snappy read/writer https://github.com/klauspost/compress/pull/838 + * flate: Add limited window compression https://github.com/klauspost/compress/pull/843 + * s2: Do 2 overlapping match checks https://github.com/klauspost/compress/pull/839 + * flate: Add amd64 assembly matchlen https://github.com/klauspost/compress/pull/837 + * gzip: Copy bufio.Reader on Reset by @thatguystone in https://github.com/klauspost/compress/pull/860 + +* July 1st, 2023 - [v1.16.7](https://github.com/klauspost/compress/releases/tag/v1.16.7) + * zstd: Fix default level first dictionary encode https://github.com/klauspost/compress/pull/829 + * s2: add GetBufferCapacity() method by @GiedriusS in https://github.com/klauspost/compress/pull/832 + +* June 13, 2023 - [v1.16.6](https://github.com/klauspost/compress/releases/tag/v1.16.6) + * zstd: correctly ignore WithEncoderPadding(1) by @ianlancetaylor in https://github.com/klauspost/compress/pull/806 + * zstd: Add amd64 match length assembly https://github.com/klauspost/compress/pull/824 + * gzhttp: Handle informational headers by @rtribotte in https://github.com/klauspost/compress/pull/815 + * s2: Improve Better compression slightly https://github.com/klauspost/compress/pull/663 + +* Apr 16, 2023 - [v1.16.5](https://github.com/klauspost/compress/releases/tag/v1.16.5) + * zstd: readByte needs to use io.ReadFull by @jnoxon in https://github.com/klauspost/compress/pull/802 + * gzip: Fix WriterTo after initial read https://github.com/klauspost/compress/pull/804 + +* Apr 5, 2023 - [v1.16.4](https://github.com/klauspost/compress/releases/tag/v1.16.4) + * zstd: Improve zstd best efficiency by @greatroar and @klauspost in https://github.com/klauspost/compress/pull/784 + * zstd: Respect WithAllLitEntropyCompression https://github.com/klauspost/compress/pull/792 + * zstd: Fix amd64 not always detecting corrupt data https://github.com/klauspost/compress/pull/785 + * zstd: Various minor improvements by @greatroar in https://github.com/klauspost/compress/pull/788 https://github.com/klauspost/compress/pull/794 https://github.com/klauspost/compress/pull/795 + * s2: Fix huge block overflow https://github.com/klauspost/compress/pull/779 + * s2: Allow CustomEncoder fallback https://github.com/klauspost/compress/pull/780 + * gzhttp: Suppport ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799 + +* Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1) + * zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776 + * gzhttp: Add optional [BREACH mitigation](https://github.com/klauspost/compress/tree/master/gzhttp#breach-mitigation). https://github.com/klauspost/compress/pull/762 https://github.com/klauspost/compress/pull/768 https://github.com/klauspost/compress/pull/769 https://github.com/klauspost/compress/pull/770 https://github.com/klauspost/compress/pull/767 + * s2: Add Intel LZ4s converter https://github.com/klauspost/compress/pull/766 + * zstd: Minor bug fixes https://github.com/klauspost/compress/pull/771 https://github.com/klauspost/compress/pull/772 https://github.com/klauspost/compress/pull/773 + * huff0: Speed up compress1xDo by @greatroar in https://github.com/klauspost/compress/pull/774 + +* Feb 26, 2023 - [v1.16.0](https://github.com/klauspost/compress/releases/tag/v1.16.0) + * s2: Add [Dictionary](https://github.com/klauspost/compress/tree/master/s2#dictionaries) support. https://github.com/klauspost/compress/pull/685 + * s2: Add Compression Size Estimate. https://github.com/klauspost/compress/pull/752 + * s2: Add support for custom stream encoder. https://github.com/klauspost/compress/pull/755 + * s2: Add LZ4 block converter. https://github.com/klauspost/compress/pull/748 + * s2: Support io.ReaderAt in ReadSeeker. https://github.com/klauspost/compress/pull/747 + * s2c/s2sx: Use concurrent decoding. https://github.com/klauspost/compress/pull/746 + +
+ See changes to v1.15.x + +* Jan 21st, 2023 (v1.15.15) + * deflate: Improve level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/739 + * zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728 + * zstd: Various speed improvements by @greatroar https://github.com/klauspost/compress/pull/741 https://github.com/klauspost/compress/pull/734 https://github.com/klauspost/compress/pull/736 https://github.com/klauspost/compress/pull/744 https://github.com/klauspost/compress/pull/743 https://github.com/klauspost/compress/pull/745 + * gzhttp: Add SuffixETag() and DropETag() options to prevent ETag collisions on compressed responses by @willbicks in https://github.com/klauspost/compress/pull/740 + +* Jan 3rd, 2023 (v1.15.14) + + * flate: Improve speed in big stateless blocks https://github.com/klauspost/compress/pull/718 + * zstd: Minor speed tweaks by @greatroar in https://github.com/klauspost/compress/pull/716 https://github.com/klauspost/compress/pull/720 + * export NoGzipResponseWriter for custom ResponseWriter wrappers by @harshavardhana in https://github.com/klauspost/compress/pull/722 + * s2: Add example for indexing and existing stream https://github.com/klauspost/compress/pull/723 + +* Dec 11, 2022 (v1.15.13) + * zstd: Add [MaxEncodedSize](https://pkg.go.dev/github.com/klauspost/compress@v1.15.13/zstd#Encoder.MaxEncodedSize) to encoder https://github.com/klauspost/compress/pull/691 + * zstd: Various tweaks and improvements https://github.com/klauspost/compress/pull/693 https://github.com/klauspost/compress/pull/695 https://github.com/klauspost/compress/pull/696 https://github.com/klauspost/compress/pull/701 https://github.com/klauspost/compress/pull/702 https://github.com/klauspost/compress/pull/703 https://github.com/klauspost/compress/pull/704 https://github.com/klauspost/compress/pull/705 https://github.com/klauspost/compress/pull/706 https://github.com/klauspost/compress/pull/707 https://github.com/klauspost/compress/pull/708 + +* Oct 26, 2022 (v1.15.12) + + * zstd: Tweak decoder allocs. https://github.com/klauspost/compress/pull/680 + * gzhttp: Always delete `HeaderNoCompression` https://github.com/klauspost/compress/pull/683 + +* Sept 26, 2022 (v1.15.11) + + * flate: Improve level 1-3 compression https://github.com/klauspost/compress/pull/678 + * zstd: Improve "best" compression by @nightwolfz in https://github.com/klauspost/compress/pull/677 + * zstd: Fix+reduce decompression allocations https://github.com/klauspost/compress/pull/668 + * zstd: Fix non-effective noescape tag https://github.com/klauspost/compress/pull/667 + +* Sept 16, 2022 (v1.15.10) + + * zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/compress@v1.15.10/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649 + * Add Go 1.19 - deprecate Go 1.16 https://github.com/klauspost/compress/pull/651 + * flate: Improve level 5+6 compression https://github.com/klauspost/compress/pull/656 + * zstd: Improve "better" compresssion https://github.com/klauspost/compress/pull/657 + * s2: Improve "best" compression https://github.com/klauspost/compress/pull/658 + * s2: Improve "better" compression. https://github.com/klauspost/compress/pull/635 + * s2: Slightly faster non-assembly decompression https://github.com/klauspost/compress/pull/646 + * Use arrays for constant size copies https://github.com/klauspost/compress/pull/659 + +* July 21, 2022 (v1.15.9) + + * zstd: Fix decoder crash on amd64 (no BMI) on invalid input https://github.com/klauspost/compress/pull/645 + * zstd: Disable decoder extended memory copies (amd64) due to possible crashes https://github.com/klauspost/compress/pull/644 + * zstd: Allow single segments up to "max decoded size" by @klauspost in https://github.com/klauspost/compress/pull/643 + +* July 13, 2022 (v1.15.8) + + * gzip: fix stack exhaustion bug in Reader.Read https://github.com/klauspost/compress/pull/641 + * s2: Add Index header trim/restore https://github.com/klauspost/compress/pull/638 + * zstd: Optimize seqdeq amd64 asm by @greatroar in https://github.com/klauspost/compress/pull/636 + * zstd: Improve decoder memcopy https://github.com/klauspost/compress/pull/637 + * huff0: Pass a single bitReader pointer to asm by @greatroar in https://github.com/klauspost/compress/pull/634 + * zstd: Branchless getBits for amd64 w/o BMI2 by @greatroar in https://github.com/klauspost/compress/pull/640 + * gzhttp: Remove header before writing https://github.com/klauspost/compress/pull/639 + +* June 29, 2022 (v1.15.7) + + * s2: Fix absolute forward seeks https://github.com/klauspost/compress/pull/633 + * zip: Merge upstream https://github.com/klauspost/compress/pull/631 + * zip: Re-add zip64 fix https://github.com/klauspost/compress/pull/624 + * zstd: translate fseDecoder.buildDtable into asm by @WojciechMula in https://github.com/klauspost/compress/pull/598 + * flate: Faster histograms https://github.com/klauspost/compress/pull/620 + * deflate: Use compound hcode https://github.com/klauspost/compress/pull/622 + +* June 3, 2022 (v1.15.6) + * s2: Improve coding for long, close matches https://github.com/klauspost/compress/pull/613 + * s2c: Add Snappy/S2 stream recompression https://github.com/klauspost/compress/pull/611 + * zstd: Always use configured block size https://github.com/klauspost/compress/pull/605 + * zstd: Fix incorrect hash table placement for dict encoding in default https://github.com/klauspost/compress/pull/606 + * zstd: Apply default config to ZipDecompressor without options https://github.com/klauspost/compress/pull/608 + * gzhttp: Exclude more common archive formats https://github.com/klauspost/compress/pull/612 + * s2: Add ReaderIgnoreCRC https://github.com/klauspost/compress/pull/609 + * s2: Remove sanity load on index creation https://github.com/klauspost/compress/pull/607 + * snappy: Use dedicated function for scoring https://github.com/klauspost/compress/pull/614 + * s2c+s2d: Use official snappy framed extension https://github.com/klauspost/compress/pull/610 + +* May 25, 2022 (v1.15.5) + * s2: Add concurrent stream decompression https://github.com/klauspost/compress/pull/602 + * s2: Fix final emit oob read crash on amd64 https://github.com/klauspost/compress/pull/601 + * huff0: asm implementation of Decompress1X by @WojciechMula https://github.com/klauspost/compress/pull/596 + * zstd: Use 1 less goroutine for stream decoding https://github.com/klauspost/compress/pull/588 + * zstd: Copy literal in 16 byte blocks when possible https://github.com/klauspost/compress/pull/592 + * zstd: Speed up when WithDecoderLowmem(false) https://github.com/klauspost/compress/pull/599 + * zstd: faster next state update in BMI2 version of decode by @WojciechMula in https://github.com/klauspost/compress/pull/593 + * huff0: Do not check max size when reading table. https://github.com/klauspost/compress/pull/586 + * flate: Inplace hashing for level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/590 + + +* May 11, 2022 (v1.15.4) + * huff0: decompress directly into output by @WojciechMula in [#577](https://github.com/klauspost/compress/pull/577) + * inflate: Keep dict on stack [#581](https://github.com/klauspost/compress/pull/581) + * zstd: Faster decoding memcopy in asm [#583](https://github.com/klauspost/compress/pull/583) + * zstd: Fix ignored crc [#580](https://github.com/klauspost/compress/pull/580) + +* May 5, 2022 (v1.15.3) + * zstd: Allow to ignore checksum checking by @WojciechMula [#572](https://github.com/klauspost/compress/pull/572) + * s2: Fix incorrect seek for io.SeekEnd in [#575](https://github.com/klauspost/compress/pull/575) + +* Apr 26, 2022 (v1.15.2) + * zstd: Add x86-64 assembly for decompression on streams and blocks. Contributed by [@WojciechMula](https://github.com/WojciechMula). Typically 2x faster. [#528](https://github.com/klauspost/compress/pull/528) [#531](https://github.com/klauspost/compress/pull/531) [#545](https://github.com/klauspost/compress/pull/545) [#537](https://github.com/klauspost/compress/pull/537) + * zstd: Add options to ZipDecompressor and fixes [#539](https://github.com/klauspost/compress/pull/539) + * s2: Use sorted search for index [#555](https://github.com/klauspost/compress/pull/555) + * Minimum version is Go 1.16, added CI test on 1.18. + +* Mar 11, 2022 (v1.15.1) + * huff0: Add x86 assembly of Decode4X by @WojciechMula in [#512](https://github.com/klauspost/compress/pull/512) + * zstd: Reuse zip decoders in [#514](https://github.com/klauspost/compress/pull/514) + * zstd: Detect extra block data and report as corrupted in [#520](https://github.com/klauspost/compress/pull/520) + * zstd: Handle zero sized frame content size stricter in [#521](https://github.com/klauspost/compress/pull/521) + * zstd: Add stricter block size checks in [#523](https://github.com/klauspost/compress/pull/523) + +* Mar 3, 2022 (v1.15.0) + * zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498) + * zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505) + * huff0: Prevent single blocks exceeding 16 bits by @klauspost in[#507](https://github.com/klauspost/compress/pull/507) + * flate: Inline literal emission by @klauspost in [#509](https://github.com/klauspost/compress/pull/509) + * gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400) + * gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510) + +Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines. + +Stream decompression is now faster on asynchronous, since the goroutine allocation much more effectively splits the workload. On typical streams this will typically use 2 cores fully for decompression. When a stream has finished decoding no goroutines will be left over, so decoders can now safely be pooled and still be garbage collected. + +While the release has been extensively tested, it is recommended to testing when upgrading. + +
+ +
+ See changes to v1.14.x + +* Feb 22, 2022 (v1.14.4) + * flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503) + * zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502) + * zip: don't read data descriptor early by @saracen in [#501](https://github.com/klauspost/compress/pull/501) #501 + * huff0: Use static decompression buffer up to 30% faster by @klauspost in [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500) + +* Feb 17, 2022 (v1.14.3) + * flate: Improve fastest levels compression speed ~10% more throughput. [#482](https://github.com/klauspost/compress/pull/482) [#489](https://github.com/klauspost/compress/pull/489) [#490](https://github.com/klauspost/compress/pull/490) [#491](https://github.com/klauspost/compress/pull/491) [#494](https://github.com/klauspost/compress/pull/494) [#478](https://github.com/klauspost/compress/pull/478) + * flate: Faster decompression speed, ~5-10%. [#483](https://github.com/klauspost/compress/pull/483) + * s2: Faster compression with Go v1.18 and amd64 microarch level 3+. [#484](https://github.com/klauspost/compress/pull/484) [#486](https://github.com/klauspost/compress/pull/486) + +* Jan 25, 2022 (v1.14.2) + * zstd: improve header decoder by @dsnet [#476](https://github.com/klauspost/compress/pull/476) + * zstd: Add bigger default blocks [#469](https://github.com/klauspost/compress/pull/469) + * zstd: Remove unused decompression buffer [#470](https://github.com/klauspost/compress/pull/470) + * zstd: Fix logically dead code by @ningmingxiao [#472](https://github.com/klauspost/compress/pull/472) + * flate: Improve level 7-9 [#471](https://github.com/klauspost/compress/pull/471) [#473](https://github.com/klauspost/compress/pull/473) + * zstd: Add noasm tag for xxhash [#475](https://github.com/klauspost/compress/pull/475) + +* Jan 11, 2022 (v1.14.1) + * s2: Add stream index in [#462](https://github.com/klauspost/compress/pull/462) + * flate: Speed and efficiency improvements in [#439](https://github.com/klauspost/compress/pull/439) [#461](https://github.com/klauspost/compress/pull/461) [#455](https://github.com/klauspost/compress/pull/455) [#452](https://github.com/klauspost/compress/pull/452) [#458](https://github.com/klauspost/compress/pull/458) + * zstd: Performance improvement in [#420]( https://github.com/klauspost/compress/pull/420) [#456](https://github.com/klauspost/compress/pull/456) [#437](https://github.com/klauspost/compress/pull/437) [#467](https://github.com/klauspost/compress/pull/467) [#468](https://github.com/klauspost/compress/pull/468) + * zstd: add arm64 xxhash assembly in [#464](https://github.com/klauspost/compress/pull/464) + * Add garbled for binaries for s2 in [#445](https://github.com/klauspost/compress/pull/445) +
+ +
+ See changes to v1.13.x + +* Aug 30, 2021 (v1.13.5) + * gz/zlib/flate: Alias stdlib errors [#425](https://github.com/klauspost/compress/pull/425) + * s2: Add block support to commandline tools [#413](https://github.com/klauspost/compress/pull/413) + * zstd: pooledZipWriter should return Writers to the same pool [#426](https://github.com/klauspost/compress/pull/426) + * Removed golang/snappy as external dependency for tests [#421](https://github.com/klauspost/compress/pull/421) + +* Aug 12, 2021 (v1.13.4) + * Add [snappy replacement package](https://github.com/klauspost/compress/tree/master/snappy). + * zstd: Fix incorrect encoding in "best" mode [#415](https://github.com/klauspost/compress/pull/415) + +* Aug 3, 2021 (v1.13.3) + * zstd: Improve Best compression [#404](https://github.com/klauspost/compress/pull/404) + * zstd: Fix WriteTo error forwarding [#411](https://github.com/klauspost/compress/pull/411) + * gzhttp: Return http.HandlerFunc instead of http.Handler. Unlikely breaking change. [#406](https://github.com/klauspost/compress/pull/406) + * s2sx: Fix max size error [#399](https://github.com/klauspost/compress/pull/399) + * zstd: Add optional stream content size on reset [#401](https://github.com/klauspost/compress/pull/401) + * zstd: use SpeedBestCompression for level >= 10 [#410](https://github.com/klauspost/compress/pull/410) + +* Jun 14, 2021 (v1.13.1) + * s2: Add full Snappy output support [#396](https://github.com/klauspost/compress/pull/396) + * zstd: Add configurable [Decoder window](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithDecoderMaxWindow) size [#394](https://github.com/klauspost/compress/pull/394) + * gzhttp: Add header to skip compression [#389](https://github.com/klauspost/compress/pull/389) + * s2: Improve speed with bigger output margin [#395](https://github.com/klauspost/compress/pull/395) + +* Jun 3, 2021 (v1.13.0) + * Added [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp#gzip-handler) which allows wrapping HTTP servers and clients with GZIP compressors. + * zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382) + * zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380) +
+ + +
+ See changes to v1.12.x + +* May 25, 2021 (v1.12.3) + * deflate: Better/faster Huffman encoding [#374](https://github.com/klauspost/compress/pull/374) + * deflate: Allocate less for history. [#375](https://github.com/klauspost/compress/pull/375) + * zstd: Forward read errors [#373](https://github.com/klauspost/compress/pull/373) + +* Apr 27, 2021 (v1.12.2) + * zstd: Improve better/best compression [#360](https://github.com/klauspost/compress/pull/360) [#364](https://github.com/klauspost/compress/pull/364) [#365](https://github.com/klauspost/compress/pull/365) + * zstd: Add helpers to compress/decompress zstd inside zip files [#363](https://github.com/klauspost/compress/pull/363) + * deflate: Improve level 5+6 compression [#367](https://github.com/klauspost/compress/pull/367) + * s2: Improve better/best compression [#358](https://github.com/klauspost/compress/pull/358) [#359](https://github.com/klauspost/compress/pull/358) + * s2: Load after checking src limit on amd64. [#362](https://github.com/klauspost/compress/pull/362) + * s2sx: Limit max executable size [#368](https://github.com/klauspost/compress/pull/368) + +* Apr 14, 2021 (v1.12.1) + * snappy package removed. Upstream added as dependency. + * s2: Better compression in "best" mode [#353](https://github.com/klauspost/compress/pull/353) + * s2sx: Add stdin input and detect pre-compressed from signature [#352](https://github.com/klauspost/compress/pull/352) + * s2c/s2d: Add http as possible input [#348](https://github.com/klauspost/compress/pull/348) + * s2c/s2d/s2sx: Always truncate when writing files [#352](https://github.com/klauspost/compress/pull/352) + * zstd: Reduce memory usage further when using [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) [#346](https://github.com/klauspost/compress/pull/346) + * s2: Fix potential problem with amd64 assembly and profilers [#349](https://github.com/klauspost/compress/pull/349) +
+ +
+ See changes to v1.11.x + +* Mar 26, 2021 (v1.11.13) + * zstd: Big speedup on small dictionary encodes [#344](https://github.com/klauspost/compress/pull/344) [#345](https://github.com/klauspost/compress/pull/345) + * zstd: Add [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) encoder option [#336](https://github.com/klauspost/compress/pull/336) + * deflate: Improve entropy compression [#338](https://github.com/klauspost/compress/pull/338) + * s2: Clean up and minor performance improvement in best [#341](https://github.com/klauspost/compress/pull/341) + +* Mar 5, 2021 (v1.11.12) + * s2: Add `s2sx` binary that creates [self extracting archives](https://github.com/klauspost/compress/tree/master/s2#s2sx-self-extracting-archives). + * s2: Speed up decompression on non-assembly platforms [#328](https://github.com/klauspost/compress/pull/328) + +* Mar 1, 2021 (v1.11.9) + * s2: Add ARM64 decompression assembly. Around 2x output speed. [#324](https://github.com/klauspost/compress/pull/324) + * s2: Improve "better" speed and efficiency. [#325](https://github.com/klauspost/compress/pull/325) + * s2: Fix binaries. + +* Feb 25, 2021 (v1.11.8) + * s2: Fixed occational out-of-bounds write on amd64. Upgrade recommended. + * s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315) + * s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322) + * zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314) + * zip: Fix zip64 headers. [#313](https://github.com/klauspost/compress/pull/313) + +* Jan 14, 2021 (v1.11.7) + * Use Bytes() interface to get bytes across packages. [#309](https://github.com/klauspost/compress/pull/309) + * s2: Add 'best' compression option. [#310](https://github.com/klauspost/compress/pull/310) + * s2: Add ReaderMaxBlockSize, changes `s2.NewReader` signature to include varargs. [#311](https://github.com/klauspost/compress/pull/311) + * s2: Fix crash on small better buffers. [#308](https://github.com/klauspost/compress/pull/308) + * s2: Clean up decoder. [#312](https://github.com/klauspost/compress/pull/312) + +* Jan 7, 2021 (v1.11.6) + * zstd: Make decoder allocations smaller [#306](https://github.com/klauspost/compress/pull/306) + * zstd: Free Decoder resources when Reset is called with a nil io.Reader [#305](https://github.com/klauspost/compress/pull/305) + +* Dec 20, 2020 (v1.11.4) + * zstd: Add Best compression mode [#304](https://github.com/klauspost/compress/pull/304) + * Add header decoder [#299](https://github.com/klauspost/compress/pull/299) + * s2: Add uncompressed stream option [#297](https://github.com/klauspost/compress/pull/297) + * Simplify/speed up small blocks with known max size. [#300](https://github.com/klauspost/compress/pull/300) + * zstd: Always reset literal dict encoder [#303](https://github.com/klauspost/compress/pull/303) + +* Nov 15, 2020 (v1.11.3) + * inflate: 10-15% faster decompression [#293](https://github.com/klauspost/compress/pull/293) + * zstd: Tweak DecodeAll default allocation [#295](https://github.com/klauspost/compress/pull/295) + +* Oct 11, 2020 (v1.11.2) + * s2: Fix out of bounds read in "better" block compression [#291](https://github.com/klauspost/compress/pull/291) + +* Oct 1, 2020 (v1.11.1) + * zstd: Set allLitEntropy true in default configuration [#286](https://github.com/klauspost/compress/pull/286) + +* Sept 8, 2020 (v1.11.0) + * zstd: Add experimental compression [dictionaries](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) [#281](https://github.com/klauspost/compress/pull/281) + * zstd: Fix mixed Write and ReadFrom calls [#282](https://github.com/klauspost/compress/pull/282) + * inflate/gz: Limit variable shifts, ~5% faster decompression [#274](https://github.com/klauspost/compress/pull/274) +
+ +
+ See changes to v1.10.x + +* July 8, 2020 (v1.10.11) + * zstd: Fix extra block when compressing with ReadFrom. [#278](https://github.com/klauspost/compress/pull/278) + * huff0: Also populate compression table when reading decoding table. [#275](https://github.com/klauspost/compress/pull/275) + +* June 23, 2020 (v1.10.10) + * zstd: Skip entropy compression in fastest mode when no matches. [#270](https://github.com/klauspost/compress/pull/270) + +* June 16, 2020 (v1.10.9): + * zstd: API change for specifying dictionaries. See [#268](https://github.com/klauspost/compress/pull/268) + * zip: update CreateHeaderRaw to handle zip64 fields. [#266](https://github.com/klauspost/compress/pull/266) + * Fuzzit tests removed. The service has been purchased and is no longer available. + +* June 5, 2020 (v1.10.8): + * 1.15x faster zstd block decompression. [#265](https://github.com/klauspost/compress/pull/265) + +* June 1, 2020 (v1.10.7): + * Added zstd decompression [dictionary support](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) + * Increase zstd decompression speed up to 1.19x. [#259](https://github.com/klauspost/compress/pull/259) + * Remove internal reset call in zstd compression and reduce allocations. [#263](https://github.com/klauspost/compress/pull/263) + +* May 21, 2020: (v1.10.6) + * zstd: Reduce allocations while decoding. [#258](https://github.com/klauspost/compress/pull/258), [#252](https://github.com/klauspost/compress/pull/252) + * zstd: Stricter decompression checks. + +* April 12, 2020: (v1.10.5) + * s2-commands: Flush output when receiving SIGINT. [#239](https://github.com/klauspost/compress/pull/239) + +* Apr 8, 2020: (v1.10.4) + * zstd: Minor/special case optimizations. [#251](https://github.com/klauspost/compress/pull/251), [#250](https://github.com/klauspost/compress/pull/250), [#249](https://github.com/klauspost/compress/pull/249), [#247](https://github.com/klauspost/compress/pull/247) +* Mar 11, 2020: (v1.10.3) + * s2: Use S2 encoder in pure Go mode for Snappy output as well. [#245](https://github.com/klauspost/compress/pull/245) + * s2: Fix pure Go block encoder. [#244](https://github.com/klauspost/compress/pull/244) + * zstd: Added "better compression" mode. [#240](https://github.com/klauspost/compress/pull/240) + * zstd: Improve speed of fastest compression mode by 5-10% [#241](https://github.com/klauspost/compress/pull/241) + * zstd: Skip creating encoders when not needed. [#238](https://github.com/klauspost/compress/pull/238) + +* Feb 27, 2020: (v1.10.2) + * Close to 50% speedup in inflate (gzip/zip decompression). [#236](https://github.com/klauspost/compress/pull/236) [#234](https://github.com/klauspost/compress/pull/234) [#232](https://github.com/klauspost/compress/pull/232) + * Reduce deflate level 1-6 memory usage up to 59%. [#227](https://github.com/klauspost/compress/pull/227) + +* Feb 18, 2020: (v1.10.1) + * Fix zstd crash when resetting multiple times without sending data. [#226](https://github.com/klauspost/compress/pull/226) + * deflate: Fix dictionary use on level 1-6. [#224](https://github.com/klauspost/compress/pull/224) + * Remove deflate writer reference when closing. [#224](https://github.com/klauspost/compress/pull/224) + +* Feb 4, 2020: (v1.10.0) + * Add optional dictionary to [stateless deflate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc#StatelessDeflate). Breaking change, send `nil` for previous behaviour. [#216](https://github.com/klauspost/compress/pull/216) + * Fix buffer overflow on repeated small block deflate. [#218](https://github.com/klauspost/compress/pull/218) + * Allow copying content from an existing ZIP file without decompressing+compressing. [#214](https://github.com/klauspost/compress/pull/214) + * Added [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) AMD64 assembler and various optimizations. Stream speed >10GB/s. [#186](https://github.com/klauspost/compress/pull/186) + +
+ +
+ See changes prior to v1.10.0 + +* Jan 20,2020 (v1.9.8) Optimize gzip/deflate with better size estimates and faster table generation. [#207](https://github.com/klauspost/compress/pull/207) by [luyu6056](https://github.com/luyu6056), [#206](https://github.com/klauspost/compress/pull/206). +* Jan 11, 2020: S2 Encode/Decode will use provided buffer if capacity is big enough. [#204](https://github.com/klauspost/compress/pull/204) +* Jan 5, 2020: (v1.9.7) Fix another zstd regression in v1.9.5 - v1.9.6 removed. +* Jan 4, 2020: (v1.9.6) Regression in v1.9.5 fixed causing corrupt zstd encodes in rare cases. +* Jan 4, 2020: Faster IO in [s2c + s2d commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) compression/decompression. [#192](https://github.com/klauspost/compress/pull/192) +* Dec 29, 2019: Removed v1.9.5 since fuzz tests showed a compatibility problem with the reference zstandard decoder. +* Dec 29, 2019: (v1.9.5) zstd: 10-20% faster block compression. [#199](https://github.com/klauspost/compress/pull/199) +* Dec 29, 2019: [zip](https://godoc.org/github.com/klauspost/compress/zip) package updated with latest Go features +* Dec 29, 2019: zstd: Single segment flag condintions tweaked. [#197](https://github.com/klauspost/compress/pull/197) +* Dec 18, 2019: s2: Faster compression when ReadFrom is used. [#198](https://github.com/klauspost/compress/pull/198) +* Dec 10, 2019: s2: Fix repeat length output when just above at 16MB limit. +* Dec 10, 2019: zstd: Add function to get decoder as io.ReadCloser. [#191](https://github.com/klauspost/compress/pull/191) +* Dec 3, 2019: (v1.9.4) S2: limit max repeat length. [#188](https://github.com/klauspost/compress/pull/188) +* Dec 3, 2019: Add [WithNoEntropyCompression](https://godoc.org/github.com/klauspost/compress/zstd#WithNoEntropyCompression) to zstd [#187](https://github.com/klauspost/compress/pull/187) +* Dec 3, 2019: Reduce memory use for tests. Check for leaked goroutines. +* Nov 28, 2019 (v1.9.3) Less allocations in stateless deflate. +* Nov 28, 2019: 5-20% Faster huff0 decode. Impacts zstd as well. [#184](https://github.com/klauspost/compress/pull/184) +* Nov 12, 2019 (v1.9.2) Added [Stateless Compression](#stateless-compression) for gzip/deflate. +* Nov 12, 2019: Fixed zstd decompression of large single blocks. [#180](https://github.com/klauspost/compress/pull/180) +* Nov 11, 2019: Set default [s2c](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) block size to 4MB. +* Nov 11, 2019: Reduce inflate memory use by 1KB. +* Nov 10, 2019: Less allocations in deflate bit writer. +* Nov 10, 2019: Fix inconsistent error returned by zstd decoder. +* Oct 28, 2019 (v1.9.1) ztsd: Fix crash when compressing blocks. [#174](https://github.com/klauspost/compress/pull/174) +* Oct 24, 2019 (v1.9.0) zstd: Fix rare data corruption [#173](https://github.com/klauspost/compress/pull/173) +* Oct 24, 2019 zstd: Fix huff0 out of buffer write [#171](https://github.com/klauspost/compress/pull/171) and always return errors [#172](https://github.com/klauspost/compress/pull/172) +* Oct 10, 2019: Big deflate rewrite, 30-40% faster with better compression [#105](https://github.com/klauspost/compress/pull/105) + +
+ +
+ See changes prior to v1.9.0 + +* Oct 10, 2019: (v1.8.6) zstd: Allow partial reads to get flushed data. [#169](https://github.com/klauspost/compress/pull/169) +* Oct 3, 2019: Fix inconsistent results on broken zstd streams. +* Sep 25, 2019: Added `-rm` (remove source files) and `-q` (no output except errors) to `s2c` and `s2d` [commands](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) +* Sep 16, 2019: (v1.8.4) Add `s2c` and `s2d` [commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools). +* Sep 10, 2019: (v1.8.3) Fix s2 decoder [Skip](https://godoc.org/github.com/klauspost/compress/s2#Reader.Skip). +* Sep 7, 2019: zstd: Added [WithWindowSize](https://godoc.org/github.com/klauspost/compress/zstd#WithWindowSize), contributed by [ianwilkes](https://github.com/ianwilkes). +* Sep 5, 2019: (v1.8.2) Add [WithZeroFrames](https://godoc.org/github.com/klauspost/compress/zstd#WithZeroFrames) which adds full zero payload block encoding option. +* Sep 5, 2019: Lazy initialization of zstandard predefined en/decoder tables. +* Aug 26, 2019: (v1.8.1) S2: 1-2% compression increase in "better" compression mode. +* Aug 26, 2019: zstd: Check maximum size of Huffman 1X compressed literals while decoding. +* Aug 24, 2019: (v1.8.0) Added [S2 compression](https://github.com/klauspost/compress/tree/master/s2#s2-compression), a high performance replacement for Snappy. +* Aug 21, 2019: (v1.7.6) Fixed minor issues found by fuzzer. One could lead to zstd not decompressing. +* Aug 18, 2019: Add [fuzzit](https://fuzzit.dev/) continuous fuzzing. +* Aug 14, 2019: zstd: Skip incompressible data 2x faster. [#147](https://github.com/klauspost/compress/pull/147) +* Aug 4, 2019 (v1.7.5): Better literal compression. [#146](https://github.com/klauspost/compress/pull/146) +* Aug 4, 2019: Faster zstd compression. [#143](https://github.com/klauspost/compress/pull/143) [#144](https://github.com/klauspost/compress/pull/144) +* Aug 4, 2019: Faster zstd decompression. [#145](https://github.com/klauspost/compress/pull/145) [#143](https://github.com/klauspost/compress/pull/143) [#142](https://github.com/klauspost/compress/pull/142) +* July 15, 2019 (v1.7.4): Fix double EOF block in rare cases on zstd encoder. +* July 15, 2019 (v1.7.3): Minor speedup/compression increase in default zstd encoder. +* July 14, 2019: zstd decoder: Fix decompression error on multiple uses with mixed content. +* July 7, 2019 (v1.7.2): Snappy update, zstd decoder potential race fix. +* June 17, 2019: zstd decompression bugfix. +* June 17, 2019: fix 32 bit builds. +* June 17, 2019: Easier use in modules (less dependencies). +* June 9, 2019: New stronger "default" [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression mode. Matches zstd default compression ratio. +* June 5, 2019: 20-40% throughput in [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and better compression. +* June 5, 2019: deflate/gzip compression: Reduce memory usage of lower compression levels. +* June 2, 2019: Added [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression! +* May 25, 2019: deflate/gzip: 10% faster bit writer, mostly visible in lower levels. +* Apr 22, 2019: [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) decompression added. +* Aug 1, 2018: Added [huff0 README](https://github.com/klauspost/compress/tree/master/huff0#huff0-entropy-compression). +* Jul 8, 2018: Added [Performance Update 2018](#performance-update-2018) below. +* Jun 23, 2018: Merged [Go 1.11 inflate optimizations](https://go-review.googlesource.com/c/go/+/102235). Go 1.9 is now required. Backwards compatible version tagged with [v1.3.0](https://github.com/klauspost/compress/releases/tag/v1.3.0). +* Apr 2, 2018: Added [huff0](https://godoc.org/github.com/klauspost/compress/huff0) en/decoder. Experimental for now, API may change. +* Mar 4, 2018: Added [FSE Entropy](https://godoc.org/github.com/klauspost/compress/fse) en/decoder. Experimental for now, API may change. +* Nov 3, 2017: Add compression [Estimate](https://godoc.org/github.com/klauspost/compress#Estimate) function. +* May 28, 2017: Reduce allocations when resetting decoder. +* Apr 02, 2017: Change back to official crc32, since changes were merged in Go 1.7. +* Jan 14, 2017: Reduce stack pressure due to array copies. See [Issue #18625](https://github.com/golang/go/issues/18625). +* Oct 25, 2016: Level 2-4 have been rewritten and now offers significantly better performance than before. +* Oct 20, 2016: Port zlib changes from Go 1.7 to fix zlib writer issue. Please update. +* Oct 16, 2016: Go 1.7 changes merged. Apples to apples this package is a few percent faster, but has a significantly better balance between speed and compression per level. +* Mar 24, 2016: Always attempt Huffman encoding on level 4-7. This improves base 64 encoded data compression. +* Mar 24, 2016: Small speedup for level 1-3. +* Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster. +* Feb 19, 2016: Handle small payloads faster in level 1-3. +* Feb 19, 2016: Added faster level 2 + 3 compression modes. +* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progresssion in terms of compression. New default level is 5. +* Feb 14, 2016: Snappy: Merge upstream changes. +* Feb 14, 2016: Snappy: Fix aggressive skipping. +* Feb 14, 2016: Snappy: Update benchmark. +* Feb 13, 2016: Deflate: Fixed assembler problem that could lead to sub-optimal compression. +* Feb 12, 2016: Snappy: Added AMD64 SSE 4.2 optimizations to matching, which makes easy to compress material run faster. Typical speedup is around 25%. +* Feb 9, 2016: Added Snappy package fork. This version is 5-7% faster, much more on hard to compress content. +* Jan 30, 2016: Optimize level 1 to 3 by not considering static dictionary or storing uncompressed. ~4-5% speedup. +* Jan 16, 2016: Optimization on deflate level 1,2,3 compression. +* Jan 8 2016: Merge [CL 18317](https://go-review.googlesource.com/#/c/18317): fix reading, writing of zip64 archives. +* Dec 8 2015: Make level 1 and -2 deterministic even if write size differs. +* Dec 8 2015: Split encoding functions, so hashing and matching can potentially be inlined. 1-3% faster on AMD64. 5% faster on other platforms. +* Dec 8 2015: Fixed rare [one byte out-of bounds read](https://github.com/klauspost/compress/issues/20). Please update! +* Nov 23 2015: Optimization on token writer. ~2-4% faster. Contributed by [@dsnet](https://github.com/dsnet). +* Nov 20 2015: Small optimization to bit writer on 64 bit systems. +* Nov 17 2015: Fixed out-of-bound errors if the underlying Writer returned an error. See [#15](https://github.com/klauspost/compress/issues/15). +* Nov 12 2015: Added [io.WriterTo](https://golang.org/pkg/io/#WriterTo) support to gzip/inflate. +* Nov 11 2015: Merged [CL 16669](https://go-review.googlesource.com/#/c/16669/4): archive/zip: enable overriding (de)compressors per file +* Oct 15 2015: Added skipping on uncompressible data. Random data speed up >5x. + +
+ +# deflate usage + +The packages are drop-in replacements for standard libraries. Simply replace the import path to use them: + +| old import | new import | Documentation +|--------------------|-----------------------------------------|--------------------| +| `compress/gzip` | `github.com/klauspost/compress/gzip` | [gzip](https://pkg.go.dev/github.com/klauspost/compress/gzip?tab=doc) +| `compress/zlib` | `github.com/klauspost/compress/zlib` | [zlib](https://pkg.go.dev/github.com/klauspost/compress/zlib?tab=doc) +| `archive/zip` | `github.com/klauspost/compress/zip` | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc) +| `compress/flate` | `github.com/klauspost/compress/flate` | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc) + +* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). + +You may also be interested in [pgzip](https://github.com/klauspost/pgzip), which is a drop in replacement for gzip, which support multithreaded compression on big files and the optimized [crc32](https://github.com/klauspost/crc32) package used by these packages. + +The packages contains the same as the standard library, so you can use the godoc for that: [gzip](http://golang.org/pkg/compress/gzip/), [zip](http://golang.org/pkg/archive/zip/), [zlib](http://golang.org/pkg/compress/zlib/), [flate](http://golang.org/pkg/compress/flate/). + +Currently there is only minor speedup on decompression (mostly CRC32 calculation). + +Memory usage is typically 1MB for a Writer. stdlib is in the same range. +If you expect to have a lot of concurrently allocated Writers consider using +the stateless compress described below. + +For compression performance, see: [this spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). + +# Stateless compression + +This package offers stateless compression as a special option for gzip/deflate. +It will do compression but without maintaining any state between Write calls. + +This means there will be no memory kept between Write calls, but compression and speed will be suboptimal. + +This is only relevant in cases where you expect to run many thousands of compressors concurrently, +but with very little activity. This is *not* intended for regular web servers serving individual requests. + +Because of this, the size of actual Write calls will affect output size. + +In gzip, specify level `-3` / `gzip.StatelessCompression` to enable. + +For direct deflate use, NewStatelessWriter and StatelessDeflate are available. See [documentation](https://godoc.org/github.com/klauspost/compress/flate#NewStatelessWriter) + +A `bufio.Writer` can of course be used to control write sizes. For example, to use a 4KB buffer: + +```go + // replace 'ioutil.Discard' with your output. + gzw, err := gzip.NewWriterLevel(ioutil.Discard, gzip.StatelessCompression) + if err != nil { + return err + } + defer gzw.Close() + + w := bufio.NewWriterSize(gzw, 4096) + defer w.Flush() + + // Write to 'w' +``` + +This will only use up to 4KB in memory when the writer is idle. + +Compression is almost always worse than the fastest compression level +and each write will allocate (a little) memory. + +# Performance Update 2018 + +It has been a while since we have been looking at the speed of this package compared to the standard library, so I thought I would re-do my tests and give some overall recommendations based on the current state. All benchmarks have been performed with Go 1.10 on my Desktop Intel(R) Core(TM) i7-2600 CPU @3.40GHz. Since I last ran the tests, I have gotten more RAM, which means tests with big files are no longer limited by my SSD. + +The raw results are in my [updated spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). Due to cgo changes and upstream updates i could not get the cgo version of gzip to compile. Instead I included the [zstd](https://github.com/datadog/zstd) cgo implementation. If I get cgo gzip to work again, I might replace the results in the sheet. + +The columns to take note of are: *MB/s* - the throughput. *Reduction* - the data size reduction in percent of the original. *Rel Speed* relative speed compared to the standard library at the same level. *Smaller* - how many percent smaller is the compressed output compared to stdlib. Negative means the output was bigger. *Loss* means the loss (or gain) in compression as a percentage difference of the input. + +The `gzstd` (standard library gzip) and `gzkp` (this package gzip) only uses one CPU core. [`pgzip`](https://github.com/klauspost/pgzip), [`bgzf`](https://github.com/biogo/hts/tree/master/bgzf) uses all 4 cores. [`zstd`](https://github.com/DataDog/zstd) uses one core, and is a beast (but not Go, yet). + + +## Overall differences. + +There appears to be a roughly 5-10% speed advantage over the standard library when comparing at similar compression levels. + +The biggest difference you will see is the result of [re-balancing](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) the compression levels. I wanted by library to give a smoother transition between the compression levels than the standard library. + +This package attempts to provide a more smooth transition, where "1" is taking a lot of shortcuts, "5" is the reasonable trade-off and "9" is the "give me the best compression", and the values in between gives something reasonable in between. The standard library has big differences in levels 1-4, but levels 5-9 having no significant gains - often spending a lot more time than can be justified by the achieved compression. + +There are links to all the test data in the [spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing) in the top left field on each tab. + +## Web Content + +This test set aims to emulate typical use in a web server. The test-set is 4GB data in 53k files, and is a mixture of (mostly) HTML, JS, CSS. + +Since level 1 and 9 are close to being the same code, they are quite close. But looking at the levels in-between the differences are quite big. + +Looking at level 6, this package is 88% faster, but will output about 6% more data. For a web server, this means you can serve 88% more data, but have to pay for 6% more bandwidth. You can draw your own conclusions on what would be the most expensive for your case. + +## Object files + +This test is for typical data files stored on a server. In this case it is a collection of Go precompiled objects. They are very compressible. + +The picture is similar to the web content, but with small differences since this is very compressible. Levels 2-3 offer good speed, but is sacrificing quite a bit of compression. + +The standard library seems suboptimal on level 3 and 4 - offering both worse compression and speed than level 6 & 7 of this package respectively. + +## Highly Compressible File + +This is a JSON file with very high redundancy. The reduction starts at 95% on level 1, so in real life terms we are dealing with something like a highly redundant stream of data, etc. + +It is definitely visible that we are dealing with specialized content here, so the results are very scattered. This package does not do very well at levels 1-4, but picks up significantly at level 5 and levels 7 and 8 offering great speed for the achieved compression. + +So if you know you content is extremely compressible you might want to go slightly higher than the defaults. The standard library has a huge gap between levels 3 and 4 in terms of speed (2.75x slowdown), so it offers little "middle ground". + +## Medium-High Compressible + +This is a pretty common test corpus: [enwik9](http://mattmahoney.net/dc/textdata.html). It contains the first 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. This is a very good test of typical text based compression and more data heavy streams. + +We see a similar picture here as in "Web Content". On equal levels some compression is sacrificed for more speed. Level 5 seems to be the best trade-off between speed and size, beating stdlib level 3 in both. + +## Medium Compressible + +I will combine two test sets, one [10GB file set](http://mattmahoney.net/dc/10gb.html) and a VM disk image (~8GB). Both contain different data types and represent a typical backup scenario. + +The most notable thing is how quickly the standard library drops to very low compression speeds around level 5-6 without any big gains in compression. Since this type of data is fairly common, this does not seem like good behavior. + + +## Un-compressible Content + +This is mainly a test of how good the algorithms are at detecting un-compressible input. The standard library only offers this feature with very conservative settings at level 1. Obviously there is no reason for the algorithms to try to compress input that cannot be compressed. The only downside is that it might skip some compressible data on false detections. + + +## Huffman only compression + +This compression library adds a special compression level, named `HuffmanOnly`, which allows near linear time compression. This is done by completely disabling matching of previous data, and only reduce the number of bits to represent each character. + +This means that often used characters, like 'e' and ' ' (space) in text use the fewest bits to represent, and rare characters like '¤' takes more bits to represent. For more information see [wikipedia](https://en.wikipedia.org/wiki/Huffman_coding) or this nice [video](https://youtu.be/ZdooBTdW5bM). + +Since this type of compression has much less variance, the compression speed is mostly unaffected by the input data, and is usually more than *180MB/s* for a single core. + +The downside is that the compression ratio is usually considerably worse than even the fastest conventional compression. The compression ratio can never be better than 8:1 (12.5%). + +The linear time compression can be used as a "better than nothing" mode, where you cannot risk the encoder to slow down on some content. For comparison, the size of the "Twain" text is *233460 bytes* (+29% vs. level 1) and encode speed is 144MB/s (4.5x level 1). So in this case you trade a 30% size increase for a 4 times speedup. + +For more information see my blog post on [Fast Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/). + +This is implemented on Go 1.7 as "Huffman Only" mode, though not exposed for gzip. + +# Other packages + +Here are other packages of good quality and pure Go (no cgo wrappers or autoconverted code): + +* [github.com/pierrec/lz4](https://github.com/pierrec/lz4) - strong multithreaded LZ4 compression. +* [github.com/cosnicolaou/pbzip2](https://github.com/cosnicolaou/pbzip2) - multithreaded bzip2 decompression. +* [github.com/dsnet/compress](https://github.com/dsnet/compress) - brotli decompression, bzip2 writer. +* [github.com/ronanh/intcomp](https://github.com/ronanh/intcomp) - Integer compression. +* [github.com/spenczar/fpc](https://github.com/spenczar/fpc) - Float compression. +* [github.com/minio/zipindex](https://github.com/minio/zipindex) - External ZIP directory index. +* [github.com/ybirader/pzip](https://github.com/ybirader/pzip) - Fast concurrent zip archiver and extractor. + +# license + +This code is licensed under the same conditions as the original Go code. See LICENSE file. diff --git a/vendor/github.com/klauspost/compress/SECURITY.md b/vendor/github.com/klauspost/compress/SECURITY.md new file mode 100644 index 0000000..ca6685e --- /dev/null +++ b/vendor/github.com/klauspost/compress/SECURITY.md @@ -0,0 +1,25 @@ +# Security Policy + +## Supported Versions + +Security updates are applied only to the latest release. + +## Vulnerability Definition + +A security vulnerability is a bug that with certain input triggers a crash or an infinite loop. Most calls will have varying execution time and only in rare cases will slow operation be considered a security vulnerability. + +Corrupted output generally is not considered a security vulnerability, unless independent operations are able to affect each other. Note that not all functionality is re-entrant and safe to use concurrently. + +Out-of-memory crashes only applies if the en/decoder uses an abnormal amount of memory, with appropriate options applied, to limit maximum window size, concurrency, etc. However, if you are in doubt you are welcome to file a security issue. + +It is assumed that all callers are trusted, meaning internal data exposed through reflection or inspection of returned data structures is not considered a vulnerability. + +Vulnerabilities resulting from compiler/assembler errors should be reported upstream. Depending on the severity this package may or may not implement a workaround. + +## Reporting a Vulnerability + +If you have discovered a security vulnerability in this project, please report it privately. **Do not disclose it as a public issue.** This gives us time to work with you to fix the issue before public exposure, reducing the chance that the exploit will be used before a patch is released. + +Please disclose it at [security advisory](https://github.com/klauspost/compress/security/advisories/new). If possible please provide a minimal reproducer. If the issue only applies to a single platform, it would be helpful to provide access to that. + +This project is maintained by a team of volunteers on a reasonable-effort basis. As such, vulnerabilities will be disclosed in a best effort base. diff --git a/vendor/github.com/klauspost/compress/compressible.go b/vendor/github.com/klauspost/compress/compressible.go new file mode 100644 index 0000000..ea5a692 --- /dev/null +++ b/vendor/github.com/klauspost/compress/compressible.go @@ -0,0 +1,85 @@ +package compress + +import "math" + +// Estimate returns a normalized compressibility estimate of block b. +// Values close to zero are likely uncompressible. +// Values above 0.1 are likely to be compressible. +// Values above 0.5 are very compressible. +// Very small lengths will return 0. +func Estimate(b []byte) float64 { + if len(b) < 16 { + return 0 + } + + // Correctly predicted order 1 + hits := 0 + lastMatch := false + var o1 [256]byte + var hist [256]int + c1 := byte(0) + for _, c := range b { + if c == o1[c1] { + // We only count a hit if there was two correct predictions in a row. + if lastMatch { + hits++ + } + lastMatch = true + } else { + lastMatch = false + } + o1[c1] = c + c1 = c + hist[c]++ + } + + // Use x^0.6 to give better spread + prediction := math.Pow(float64(hits)/float64(len(b)), 0.6) + + // Calculate histogram distribution + variance := float64(0) + avg := float64(len(b)) / 256 + + for _, v := range hist { + Δ := float64(v) - avg + variance += Δ * Δ + } + + stddev := math.Sqrt(float64(variance)) / float64(len(b)) + exp := math.Sqrt(1 / float64(len(b))) + + // Subtract expected stddev + stddev -= exp + if stddev < 0 { + stddev = 0 + } + stddev *= 1 + exp + + // Use x^0.4 to give better spread + entropy := math.Pow(stddev, 0.4) + + // 50/50 weight between prediction and histogram distribution + return math.Pow((prediction+entropy)/2, 0.9) +} + +// ShannonEntropyBits returns the number of bits minimum required to represent +// an entropy encoding of the input bytes. +// https://en.wiktionary.org/wiki/Shannon_entropy +func ShannonEntropyBits(b []byte) int { + if len(b) == 0 { + return 0 + } + var hist [256]int + for _, c := range b { + hist[c]++ + } + shannon := float64(0) + invTotal := 1.0 / float64(len(b)) + for _, v := range hist[:] { + if v > 0 { + n := float64(v) + shannon += math.Ceil(-math.Log2(n*invTotal) * n) + } + } + return int(math.Ceil(shannon)) +} diff --git a/vendor/github.com/klauspost/compress/fse/README.md b/vendor/github.com/klauspost/compress/fse/README.md new file mode 100644 index 0000000..ea7324d --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/README.md @@ -0,0 +1,79 @@ +# Finite State Entropy + +This package provides Finite State Entropy encoding and decoding. + +Finite State Entropy (also referenced as [tANS](https://en.wikipedia.org/wiki/Asymmetric_numeral_systems#tANS)) +encoding provides a fast near-optimal symbol encoding/decoding +for byte blocks as implemented in [zstandard](https://github.com/facebook/zstd). + +This can be used for compressing input with a lot of similar input values to the smallest number of bytes. +This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, +but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. + +* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/fse) + +## News + + * Feb 2018: First implementation released. Consider this beta software for now. + +# Usage + +This package provides a low level interface that allows to compress single independent blocks. + +Each block is separate, and there is no built in integrity checks. +This means that the caller should keep track of block sizes and also do checksums if needed. + +Compressing a block is done via the [`Compress`](https://godoc.org/github.com/klauspost/compress/fse#Compress) function. +You must provide input and will receive the output and maybe an error. + +These error values can be returned: + +| Error | Description | +|---------------------|-----------------------------------------------------------------------------| +| `` | Everything ok, output is returned | +| `ErrIncompressible` | Returned when input is judged to be too hard to compress | +| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | +| `(error)` | An internal error occurred. | + +As can be seen above there are errors that will be returned even under normal operation so it is important to handle these. + +To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/fse#Scratch) object +that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same +object can be used for both. + +Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this +you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. + +Decompressing is done by calling the [`Decompress`](https://godoc.org/github.com/klauspost/compress/fse#Decompress) function. +You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back +your input was likely corrupted. + +It is important to note that a successful decoding does *not* mean your output matches your original input. +There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. + +For more detailed usage, see examples in the [godoc documentation](https://godoc.org/github.com/klauspost/compress/fse#pkg-examples). + +# Performance + +A lot of factors are affecting speed. Block sizes and compressibility of the material are primary factors. +All compression functions are currently only running on the calling goroutine so only one core will be used per block. + +The compressor is significantly faster if symbols are kept as small as possible. The highest byte value of the input +is used to reduce some of the processing, so if all your input is above byte value 64 for instance, it may be +beneficial to transpose all your input values down by 64. + +With moderate block sizes around 64k speed are typically 200MB/s per core for compression and +around 300MB/s decompression speed. + +The same hardware typically does Huffman (deflate) encoding at 125MB/s and decompression at 100MB/s. + +# Plans + +At one point, more internals will be exposed to facilitate more "expert" usage of the components. + +A streaming interface is also likely to be implemented. Likely compatible with [FSE stream format](https://github.com/Cyan4973/FiniteStateEntropy/blob/dev/programs/fileio.c#L261). + +# Contributing + +Contributions are always welcome. Be aware that adding public functions will require good justification and breaking +changes will likely not be accepted. If in doubt open an issue before writing the PR. \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/bitreader.go b/vendor/github.com/klauspost/compress/fse/bitreader.go new file mode 100644 index 0000000..f65eb39 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/bitreader.go @@ -0,0 +1,122 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +import ( + "encoding/binary" + "errors" + "io" +) + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReader struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReader) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.bitsRead += 8 - uint8(highBits(uint32(v))) + return nil +} + +// getBits will return n bits. n can be 0. +func (b *bitReader) getBits(n uint8) uint16 { + if n == 0 || b.bitsRead >= 64 { + return 0 + } + return b.getBitsFast(n) +} + +// getBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReader) getBitsFast(n uint8) uint16 { + const regMask = 64 - 1 + v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) + b.bitsRead += n + return v +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReader) fillFast() { + if b.bitsRead < 32 { + return + } + // 2 bounds checks. + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReader) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value = (b.value << 8) | uint64(b.in[b.off-1]) + b.bitsRead -= 8 + b.off-- + } +} + +// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. +func (b *bitReader) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReader) finished() bool { + return b.bitsRead >= 64 && b.off == 0 +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReader) close() error { + // Release reference. + b.in = nil + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/fse/bitwriter.go b/vendor/github.com/klauspost/compress/fse/bitwriter.go new file mode 100644 index 0000000..e82fa3b --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/bitwriter.go @@ -0,0 +1,167 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +import "fmt" + +// bitWriter will write bits. +// First bit will be LSB of the first byte of output. +type bitWriter struct { + bitContainer uint64 + nBits uint8 + out []byte +} + +// bitMask16 is bitmasks. Has extra to avoid bounds check. +var bitMask16 = [32]uint16{ + 0, 1, 3, 7, 0xF, 0x1F, + 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, + 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF} /* up to 16 bits */ + +// addBits16NC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16NC(value uint16, bits uint8) { + b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16ZeroNC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +// This is fastest if bits can be zero. +func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) { + if bits == 0 { + return + } + value <<= (16 - bits) & 15 + value >>= (16 - bits) & 15 + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// flush will flush all pending full bytes. +// There will be at least 56 bits available for writing when this has been called. +// Using flush32 is faster, but leaves less space for writing. +func (b *bitWriter) flush() { + v := b.nBits >> 3 + switch v { + case 0: + case 1: + b.out = append(b.out, + byte(b.bitContainer), + ) + case 2: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + ) + case 3: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + ) + case 4: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + ) + case 5: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + ) + case 6: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + ) + case 7: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + ) + case 8: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + byte(b.bitContainer>>56), + ) + default: + panic(fmt.Errorf("bits (%d) > 64", b.nBits)) + } + b.bitContainer >>= v << 3 + b.nBits &= 7 +} + +// flush32 will flush out, so there are at least 32 bits available for writing. +func (b *bitWriter) flush32() { + if b.nBits < 32 { + return + } + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24)) + b.nBits -= 32 + b.bitContainer >>= 32 +} + +// flushAlign will flush remaining full bytes and align to next byte boundary. +func (b *bitWriter) flushAlign() { + nbBytes := (b.nBits + 7) >> 3 + for i := uint8(0); i < nbBytes; i++ { + b.out = append(b.out, byte(b.bitContainer>>(i*8))) + } + b.nBits = 0 + b.bitContainer = 0 +} + +// close will write the alignment bit and write the final byte(s) +// to the output. +func (b *bitWriter) close() { + // End mark + b.addBits16Clean(1, 1) + // flush until next byte. + b.flushAlign() +} + +// reset and continue writing by appending to out. +func (b *bitWriter) reset(out []byte) { + b.bitContainer = 0 + b.nBits = 0 + b.out = out +} diff --git a/vendor/github.com/klauspost/compress/fse/bytereader.go b/vendor/github.com/klauspost/compress/fse/bytereader.go new file mode 100644 index 0000000..abade2d --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/bytereader.go @@ -0,0 +1,47 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +// byteReader provides a byte reader that reads +// little endian values from a byte stream. +// The input stream is manually advanced. +// The reader performs no bounds checks. +type byteReader struct { + b []byte + off int +} + +// init will initialize the reader and set the input. +func (b *byteReader) init(in []byte) { + b.b = in + b.off = 0 +} + +// advance the stream b n bytes. +func (b *byteReader) advance(n uint) { + b.off += int(n) +} + +// Uint32 returns a little endian uint32 starting at current offset. +func (b byteReader) Uint32() uint32 { + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := uint32(b2[3]) + v2 := uint32(b2[2]) + v1 := uint32(b2[1]) + v0 := uint32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// unread returns the unread portion of the input. +func (b byteReader) unread() []byte { + return b.b[b.off:] +} + +// remain will return the number of bytes remaining. +func (b byteReader) remain() int { + return len(b.b) - b.off +} diff --git a/vendor/github.com/klauspost/compress/fse/compress.go b/vendor/github.com/klauspost/compress/fse/compress.go new file mode 100644 index 0000000..074018d --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/compress.go @@ -0,0 +1,683 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +import ( + "errors" + "fmt" +) + +// Compress the input bytes. Input must be < 2GB. +// Provide a Scratch buffer to avoid memory allocations. +// Note that the output is also kept in the scratch buffer. +// If input is too hard to compress, ErrIncompressible is returned. +// If input is a single byte value repeated ErrUseRLE is returned. +func Compress(in []byte, s *Scratch) ([]byte, error) { + if len(in) <= 1 { + return nil, ErrIncompressible + } + if len(in) > (2<<30)-1 { + return nil, errors.New("input too big, must be < 2GB") + } + s, err := s.prepare(in) + if err != nil { + return nil, err + } + + // Create histogram, if none was provided. + maxCount := s.maxCount + if maxCount == 0 { + maxCount = s.countSimple(in) + } + // Reset for next run. + s.clearCount = true + s.maxCount = 0 + if maxCount == len(in) { + // One symbol, use RLE + return nil, ErrUseRLE + } + if maxCount == 1 || maxCount < (len(in)>>7) { + // Each symbol present maximum once or too well distributed. + return nil, ErrIncompressible + } + s.optimalTableLog() + err = s.normalizeCount() + if err != nil { + return nil, err + } + err = s.writeCount() + if err != nil { + return nil, err + } + + if false { + err = s.validateNorm() + if err != nil { + return nil, err + } + } + + err = s.buildCTable() + if err != nil { + return nil, err + } + err = s.compress(in) + if err != nil { + return nil, err + } + s.Out = s.bw.out + // Check if we compressed. + if len(s.Out) >= len(in) { + return nil, ErrIncompressible + } + return s.Out, nil +} + +// cState contains the compression state of a stream. +type cState struct { + bw *bitWriter + stateTable []uint16 + state uint16 +} + +// init will initialize the compression state to the first symbol of the stream. +func (c *cState) init(bw *bitWriter, ct *cTable, tableLog uint8, first symbolTransform) { + c.bw = bw + c.stateTable = ct.stateTable + + nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 + im := int32((nbBitsOut << 16) - first.deltaNbBits) + lu := (im >> nbBitsOut) + first.deltaFindState + c.state = c.stateTable[lu] +} + +// encode the output symbol provided and write it to the bitstream. +func (c *cState) encode(symbolTT symbolTransform) { + nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 + dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState + c.bw.addBits16NC(c.state, uint8(nbBitsOut)) + c.state = c.stateTable[dstState] +} + +// encode the output symbol provided and write it to the bitstream. +func (c *cState) encodeZero(symbolTT symbolTransform) { + nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 + dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState + c.bw.addBits16ZeroNC(c.state, uint8(nbBitsOut)) + c.state = c.stateTable[dstState] +} + +// flush will write the tablelog to the output and flush the remaining full bytes. +func (c *cState) flush(tableLog uint8) { + c.bw.flush32() + c.bw.addBits16NC(c.state, tableLog) + c.bw.flush() +} + +// compress is the main compression loop that will encode the input from the last byte to the first. +func (s *Scratch) compress(src []byte) error { + if len(src) <= 2 { + return errors.New("compress: src too small") + } + tt := s.ct.symbolTT[:256] + s.bw.reset(s.Out) + + // Our two states each encodes every second byte. + // Last byte encoded (first byte decoded) will always be encoded by c1. + var c1, c2 cState + + // Encode so remaining size is divisible by 4. + ip := len(src) + if ip&1 == 1 { + c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]]) + c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]]) + c1.encodeZero(tt[src[ip-3]]) + ip -= 3 + } else { + c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]]) + c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]]) + ip -= 2 + } + if ip&2 != 0 { + c2.encodeZero(tt[src[ip-1]]) + c1.encodeZero(tt[src[ip-2]]) + ip -= 2 + } + src = src[:ip] + + // Main compression loop. + switch { + case !s.zeroBits && s.actualTableLog <= 8: + // We can encode 4 symbols without requiring a flush. + // We do not need to check if any output is 0 bits. + for ; len(src) >= 4; src = src[:len(src)-4] { + s.bw.flush32() + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] + c2.encode(tt[v0]) + c1.encode(tt[v1]) + c2.encode(tt[v2]) + c1.encode(tt[v3]) + } + case !s.zeroBits: + // We do not need to check if any output is 0 bits. + for ; len(src) >= 4; src = src[:len(src)-4] { + s.bw.flush32() + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] + c2.encode(tt[v0]) + c1.encode(tt[v1]) + s.bw.flush32() + c2.encode(tt[v2]) + c1.encode(tt[v3]) + } + case s.actualTableLog <= 8: + // We can encode 4 symbols without requiring a flush + for ; len(src) >= 4; src = src[:len(src)-4] { + s.bw.flush32() + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] + c2.encodeZero(tt[v0]) + c1.encodeZero(tt[v1]) + c2.encodeZero(tt[v2]) + c1.encodeZero(tt[v3]) + } + default: + for ; len(src) >= 4; src = src[:len(src)-4] { + s.bw.flush32() + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] + c2.encodeZero(tt[v0]) + c1.encodeZero(tt[v1]) + s.bw.flush32() + c2.encodeZero(tt[v2]) + c1.encodeZero(tt[v3]) + } + } + + // Flush final state. + // Used to initialize state when decoding. + c2.flush(s.actualTableLog) + c1.flush(s.actualTableLog) + + s.bw.close() + return nil +} + +// writeCount will write the normalized histogram count to header. +// This is read back by readNCount. +func (s *Scratch) writeCount() error { + var ( + tableLog = s.actualTableLog + tableSize = 1 << tableLog + previous0 bool + charnum uint16 + + maxHeaderSize = ((int(s.symbolLen)*int(tableLog) + 4 + 2) >> 3) + 3 + + // Write Table Size + bitStream = uint32(tableLog - minTablelog) + bitCount = uint(4) + remaining = int16(tableSize + 1) /* +1 for extra accuracy */ + threshold = int16(tableSize) + nbBits = uint(tableLog + 1) + ) + if cap(s.Out) < maxHeaderSize { + s.Out = make([]byte, 0, s.br.remain()+maxHeaderSize) + } + outP := uint(0) + out := s.Out[:maxHeaderSize] + + // stops at 1 + for remaining > 1 { + if previous0 { + start := charnum + for s.norm[charnum] == 0 { + charnum++ + } + for charnum >= start+24 { + start += 24 + bitStream += uint32(0xFFFF) << bitCount + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + } + for charnum >= start+3 { + start += 3 + bitStream += 3 << bitCount + bitCount += 2 + } + bitStream += uint32(charnum-start) << bitCount + bitCount += 2 + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + count := s.norm[charnum] + charnum++ + max := (2*threshold - 1) - remaining + if count < 0 { + remaining += count + } else { + remaining -= count + } + count++ // +1 for extra accuracy + if count >= threshold { + count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ + } + bitStream += uint32(count) << bitCount + bitCount += nbBits + if count < max { + bitCount-- + } + + previous0 = count == 1 + if remaining < 1 { + return errors.New("internal error: remaining<1") + } + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += (bitCount + 7) / 8 + + if charnum > s.symbolLen { + return errors.New("internal error: charnum > s.symbolLen") + } + s.Out = out[:outP] + return nil +} + +// symbolTransform contains the state transform for a symbol. +type symbolTransform struct { + deltaFindState int32 + deltaNbBits uint32 +} + +// String prints values as a human readable string. +func (s symbolTransform) String() string { + return fmt.Sprintf("dnbits: %08x, fs:%d", s.deltaNbBits, s.deltaFindState) +} + +// cTable contains tables used for compression. +type cTable struct { + tableSymbol []byte + stateTable []uint16 + symbolTT []symbolTransform +} + +// allocCtable will allocate tables needed for compression. +// If existing tables a re big enough, they are simply re-used. +func (s *Scratch) allocCtable() { + tableSize := 1 << s.actualTableLog + // get tableSymbol that is big enough. + if cap(s.ct.tableSymbol) < tableSize { + s.ct.tableSymbol = make([]byte, tableSize) + } + s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] + + ctSize := tableSize + if cap(s.ct.stateTable) < ctSize { + s.ct.stateTable = make([]uint16, ctSize) + } + s.ct.stateTable = s.ct.stateTable[:ctSize] + + if cap(s.ct.symbolTT) < 256 { + s.ct.symbolTT = make([]symbolTransform, 256) + } + s.ct.symbolTT = s.ct.symbolTT[:256] +} + +// buildCTable will populate the compression table so it is ready to be used. +func (s *Scratch) buildCTable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + var cumul [maxSymbolValue + 2]int16 + + s.allocCtable() + tableSymbol := s.ct.tableSymbol[:tableSize] + // symbol start positions + { + cumul[0] = 0 + for ui, v := range s.norm[:s.symbolLen-1] { + u := byte(ui) // one less than reference + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = u + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + } + // Encode last symbol separately to avoid overflowing u + u := int(s.symbolLen - 1) + v := s.norm[s.symbolLen-1] + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = byte(u) + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + if uint32(cumul[s.symbolLen]) != tableSize { + return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) + } + cumul[s.symbolLen] = int16(tableSize) + 1 + } + // Spread symbols + s.zeroBits = false + { + step := tableStep(tableSize) + tableMask := tableSize - 1 + var position uint32 + // if any symbol > largeLimit, we may have 0 bits output. + largeLimit := int16(1 << (s.actualTableLog - 1)) + for ui, v := range s.norm[:s.symbolLen] { + symbol := byte(ui) + if v > largeLimit { + s.zeroBits = true + } + for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { + tableSymbol[position] = symbol + position = (position + step) & tableMask + for position > highThreshold { + position = (position + step) & tableMask + } /* Low proba area */ + } + } + + // Check if we have gone through all positions + if position != 0 { + return errors.New("position!=0") + } + } + + // Build table + table := s.ct.stateTable + { + tsi := int(tableSize) + for u, v := range tableSymbol { + // TableU16 : sorted by symbol order; gives next state value + table[cumul[v]] = uint16(tsi + u) + cumul[v]++ + } + } + + // Build Symbol Transformation Table + { + total := int16(0) + symbolTT := s.ct.symbolTT[:s.symbolLen] + tableLog := s.actualTableLog + tl := (uint32(tableLog) << 16) - (1 << tableLog) + for i, v := range s.norm[:s.symbolLen] { + switch v { + case 0: + case -1, 1: + symbolTT[i].deltaNbBits = tl + symbolTT[i].deltaFindState = int32(total - 1) + total++ + default: + maxBitsOut := uint32(tableLog) - highBits(uint32(v-1)) + minStatePlus := uint32(v) << maxBitsOut + symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus + symbolTT[i].deltaFindState = int32(total - v) + total += v + } + } + if total != int16(tableSize) { + return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) + } + } + return nil +} + +// countSimple will create a simple histogram in s.count. +// Returns the biggest count. +// Does not update s.clearCount. +func (s *Scratch) countSimple(in []byte) (max int) { + for _, v := range in { + s.count[v]++ + } + m, symlen := uint32(0), s.symbolLen + for i, v := range s.count[:] { + if v == 0 { + continue + } + if v > m { + m = v + } + symlen = uint16(i) + 1 + } + s.symbolLen = symlen + return int(m) +} + +// minTableLog provides the minimum logSize to safely represent a distribution. +func (s *Scratch) minTableLog() uint8 { + minBitsSrc := highBits(uint32(s.br.remain()-1)) + 1 + minBitsSymbols := highBits(uint32(s.symbolLen-1)) + 2 + if minBitsSrc < minBitsSymbols { + return uint8(minBitsSrc) + } + return uint8(minBitsSymbols) +} + +// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog +func (s *Scratch) optimalTableLog() { + tableLog := s.TableLog + minBits := s.minTableLog() + maxBitsSrc := uint8(highBits(uint32(s.br.remain()-1))) - 2 + if maxBitsSrc < tableLog { + // Accuracy can be reduced + tableLog = maxBitsSrc + } + if minBits > tableLog { + tableLog = minBits + } + // Need a minimum to safely represent all symbol values + if tableLog < minTablelog { + tableLog = minTablelog + } + if tableLog > maxTableLog { + tableLog = maxTableLog + } + s.actualTableLog = tableLog +} + +var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} + +// normalizeCount will normalize the count of the symbols so +// the total is equal to the table size. +func (s *Scratch) normalizeCount() error { + var ( + tableLog = s.actualTableLog + scale = 62 - uint64(tableLog) + step = (1 << 62) / uint64(s.br.remain()) + vStep = uint64(1) << (scale - 20) + stillToDistribute = int16(1 << tableLog) + largest int + largestP int16 + lowThreshold = (uint32)(s.br.remain() >> tableLog) + ) + + for i, cnt := range s.count[:s.symbolLen] { + // already handled + // if (count[s] == s.length) return 0; /* rle special case */ + + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + stillToDistribute-- + } else { + proba := (int16)((uint64(cnt) * step) >> scale) + if proba < 8 { + restToBeat := vStep * uint64(rtbTable[proba]) + v := uint64(cnt)*step - (uint64(proba) << scale) + if v > restToBeat { + proba++ + } + } + if proba > largestP { + largestP = proba + largest = i + } + s.norm[i] = proba + stillToDistribute -= proba + } + } + + if -stillToDistribute >= (s.norm[largest] >> 1) { + // corner case, need another normalization method + return s.normalizeCount2() + } + s.norm[largest] += stillToDistribute + return nil +} + +// Secondary normalization method. +// To be used when primary method fails. +func (s *Scratch) normalizeCount2() error { + const notYetAssigned = -2 + var ( + distributed uint32 + total = uint32(s.br.remain()) + tableLog = s.actualTableLog + lowThreshold = total >> tableLog + lowOne = (total * 3) >> (tableLog + 1) + ) + for i, cnt := range s.count[:s.symbolLen] { + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + distributed++ + total -= cnt + continue + } + if cnt <= lowOne { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + s.norm[i] = notYetAssigned + } + toDistribute := (1 << tableLog) - distributed + + if (total / toDistribute) > lowOne { + // risk of rounding to zero + lowOne = (total * 3) / (toDistribute * 2) + for i, cnt := range s.count[:s.symbolLen] { + if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + } + toDistribute = (1 << tableLog) - distributed + } + if distributed == uint32(s.symbolLen)+1 { + // all values are pretty poor; + // probably incompressible data (should have already been detected); + // find max, then give all remaining points to max + var maxV int + var maxC uint32 + for i, cnt := range s.count[:s.symbolLen] { + if cnt > maxC { + maxV = i + maxC = cnt + } + } + s.norm[maxV] += int16(toDistribute) + return nil + } + + if total == 0 { + // all of the symbols were low enough for the lowOne or lowThreshold + for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { + if s.norm[i] > 0 { + toDistribute-- + s.norm[i]++ + } + } + return nil + } + + var ( + vStepLog = 62 - uint64(tableLog) + mid = uint64((1 << (vStepLog - 1)) - 1) + rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining + tmpTotal = mid + ) + for i, cnt := range s.count[:s.symbolLen] { + if s.norm[i] == notYetAssigned { + var ( + end = tmpTotal + uint64(cnt)*rStep + sStart = uint32(tmpTotal >> vStepLog) + sEnd = uint32(end >> vStepLog) + weight = sEnd - sStart + ) + if weight < 1 { + return errors.New("weight < 1") + } + s.norm[i] = int16(weight) + tmpTotal = end + } + } + return nil +} + +// validateNorm validates the normalized histogram table. +func (s *Scratch) validateNorm() (err error) { + var total int + for _, v := range s.norm[:s.symbolLen] { + if v >= 0 { + total += int(v) + } else { + total -= int(v) + } + } + defer func() { + if err == nil { + return + } + fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) + for i, v := range s.norm[:s.symbolLen] { + fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) + } + }() + if total != (1 << s.actualTableLog) { + return fmt.Errorf("warning: Total == %d != %d", total, 1< tablelogAbsoluteMax { + return errors.New("tableLog too large") + } + bitStream >>= 4 + bitCount := uint(4) + + s.actualTableLog = uint8(nbBits) + remaining := int32((1 << nbBits) + 1) + threshold := int32(1 << nbBits) + gotTotal := int32(0) + nbBits++ + + for remaining > 1 { + if previous0 { + n0 := charnum + for (bitStream & 0xFFFF) == 0xFFFF { + n0 += 24 + if b.off < iend-5 { + b.advance(2) + bitStream = b.Uint32() >> bitCount + } else { + bitStream >>= 16 + bitCount += 16 + } + } + for (bitStream & 3) == 3 { + n0 += 3 + bitStream >>= 2 + bitCount += 2 + } + n0 += uint16(bitStream & 3) + bitCount += 2 + if n0 > maxSymbolValue { + return errors.New("maxSymbolValue too small") + } + for charnum < n0 { + s.norm[charnum&0xff] = 0 + charnum++ + } + + if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 { + b.advance(bitCount >> 3) + bitCount &= 7 + bitStream = b.Uint32() >> bitCount + } else { + bitStream >>= 2 + } + } + + max := (2*(threshold) - 1) - (remaining) + var count int32 + + if (int32(bitStream) & (threshold - 1)) < max { + count = int32(bitStream) & (threshold - 1) + bitCount += nbBits - 1 + } else { + count = int32(bitStream) & (2*threshold - 1) + if count >= threshold { + count -= max + } + bitCount += nbBits + } + + count-- // extra accuracy + if count < 0 { + // -1 means +1 + remaining += count + gotTotal -= count + } else { + remaining -= count + gotTotal += count + } + s.norm[charnum&0xff] = int16(count) + charnum++ + previous0 = count == 0 + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 { + b.advance(bitCount >> 3) + bitCount &= 7 + } else { + bitCount -= (uint)(8 * (len(b.b) - 4 - b.off)) + b.off = len(b.b) - 4 + } + bitStream = b.Uint32() >> (bitCount & 31) + } + s.symbolLen = charnum + + if s.symbolLen <= 1 { + return fmt.Errorf("symbolLen (%d) too small", s.symbolLen) + } + if s.symbolLen > maxSymbolValue+1 { + return fmt.Errorf("symbolLen (%d) too big", s.symbolLen) + } + if remaining != 1 { + return fmt.Errorf("corruption detected (remaining %d != 1)", remaining) + } + if bitCount > 32 { + return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount) + } + if gotTotal != 1<> 3) + return nil +} + +// decSymbol contains information about a state entry, +// Including the state offset base, the output symbol and +// the number of bits to read for the low part of the destination state. +type decSymbol struct { + newState uint16 + symbol uint8 + nbBits uint8 +} + +// allocDtable will allocate decoding tables if they are not big enough. +func (s *Scratch) allocDtable() { + tableSize := 1 << s.actualTableLog + if cap(s.decTable) < tableSize { + s.decTable = make([]decSymbol, tableSize) + } + s.decTable = s.decTable[:tableSize] + + if cap(s.ct.tableSymbol) < 256 { + s.ct.tableSymbol = make([]byte, 256) + } + s.ct.tableSymbol = s.ct.tableSymbol[:256] + + if cap(s.ct.stateTable) < 256 { + s.ct.stateTable = make([]uint16, 256) + } + s.ct.stateTable = s.ct.stateTable[:256] +} + +// buildDtable will build the decoding table. +func (s *Scratch) buildDtable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + s.allocDtable() + symbolNext := s.ct.stateTable[:256] + + // Init, lay down lowprob symbols + s.zeroBits = false + { + largeLimit := int16(1 << (s.actualTableLog - 1)) + for i, v := range s.norm[:s.symbolLen] { + if v == -1 { + s.decTable[highThreshold].symbol = uint8(i) + highThreshold-- + symbolNext[i] = 1 + } else { + if v >= largeLimit { + s.zeroBits = true + } + symbolNext[i] = uint16(v) + } + } + } + // Spread symbols + { + tableMask := tableSize - 1 + step := tableStep(tableSize) + position := uint32(0) + for ss, v := range s.norm[:s.symbolLen] { + for i := 0; i < int(v); i++ { + s.decTable[position].symbol = uint8(ss) + position = (position + step) & tableMask + for position > highThreshold { + // lowprob area + position = (position + step) & tableMask + } + } + } + if position != 0 { + // position must reach all cells once, otherwise normalizedCounter is incorrect + return errors.New("corrupted input (position != 0)") + } + } + + // Build Decoding table + { + tableSize := uint16(1 << s.actualTableLog) + for u, v := range s.decTable { + symbol := v.symbol + nextState := symbolNext[symbol] + symbolNext[symbol] = nextState + 1 + nBits := s.actualTableLog - byte(highBits(uint32(nextState))) + s.decTable[u].nbBits = nBits + newState := (nextState << nBits) - tableSize + if newState >= tableSize { + return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) + } + if newState == uint16(u) && nBits == 0 { + // Seems weird that this is possible with nbits > 0. + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) + } + s.decTable[u].newState = newState + } + } + return nil +} + +// decompress will decompress the bitstream. +// If the buffer is over-read an error is returned. +func (s *Scratch) decompress() error { + br := &s.bits + if err := br.init(s.br.unread()); err != nil { + return err + } + + var s1, s2 decoder + // Initialize and decode first state and symbol. + s1.init(br, s.decTable, s.actualTableLog) + s2.init(br, s.decTable, s.actualTableLog) + + // Use temp table to avoid bound checks/append penalty. + var tmp = s.ct.tableSymbol[:256] + var off uint8 + + // Main part + if !s.zeroBits { + for br.off >= 8 { + br.fillFast() + tmp[off+0] = s1.nextFast() + tmp[off+1] = s2.nextFast() + br.fillFast() + tmp[off+2] = s1.nextFast() + tmp[off+3] = s2.nextFast() + off += 4 + // When off is 0, we have overflowed and should write. + if off == 0 { + s.Out = append(s.Out, tmp...) + if len(s.Out) >= s.DecompressLimit { + return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) + } + } + } + } else { + for br.off >= 8 { + br.fillFast() + tmp[off+0] = s1.next() + tmp[off+1] = s2.next() + br.fillFast() + tmp[off+2] = s1.next() + tmp[off+3] = s2.next() + off += 4 + if off == 0 { + s.Out = append(s.Out, tmp...) + // When off is 0, we have overflowed and should write. + if len(s.Out) >= s.DecompressLimit { + return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) + } + } + } + } + s.Out = append(s.Out, tmp[:off]...) + + // Final bits, a bit more expensive check + for { + if s1.finished() { + s.Out = append(s.Out, s1.final(), s2.final()) + break + } + br.fill() + s.Out = append(s.Out, s1.next()) + if s2.finished() { + s.Out = append(s.Out, s2.final(), s1.final()) + break + } + s.Out = append(s.Out, s2.next()) + if len(s.Out) >= s.DecompressLimit { + return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) + } + } + return br.close() +} + +// decoder keeps track of the current state and updates it from the bitstream. +type decoder struct { + state uint16 + br *bitReader + dt []decSymbol +} + +// init will initialize the decoder and read the first state from the stream. +func (d *decoder) init(in *bitReader, dt []decSymbol, tableLog uint8) { + d.dt = dt + d.br = in + d.state = in.getBits(tableLog) +} + +// next returns the next symbol and sets the next state. +// At least tablelog bits must be available in the bit reader. +func (d *decoder) next() uint8 { + n := &d.dt[d.state] + lowBits := d.br.getBits(n.nbBits) + d.state = n.newState + lowBits + return n.symbol +} + +// finished returns true if all bits have been read from the bitstream +// and the next state would require reading bits from the input. +func (d *decoder) finished() bool { + return d.br.finished() && d.dt[d.state].nbBits > 0 +} + +// final returns the current state symbol without decoding the next. +func (d *decoder) final() uint8 { + return d.dt[d.state].symbol +} + +// nextFast returns the next symbol and sets the next state. +// This can only be used if no symbols are 0 bits. +// At least tablelog bits must be available in the bit reader. +func (d *decoder) nextFast() uint8 { + n := d.dt[d.state] + lowBits := d.br.getBitsFast(n.nbBits) + d.state = n.newState + lowBits + return n.symbol +} diff --git a/vendor/github.com/klauspost/compress/fse/fse.go b/vendor/github.com/klauspost/compress/fse/fse.go new file mode 100644 index 0000000..535cbad --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fse.go @@ -0,0 +1,144 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +// Package fse provides Finite State Entropy encoding and decoding. +// +// Finite State Entropy encoding provides a fast near-optimal symbol encoding/decoding +// for byte blocks as implemented in zstd. +// +// See https://github.com/klauspost/compress/tree/master/fse for more information. +package fse + +import ( + "errors" + "fmt" + "math/bits" +) + +const ( + /*!MEMORY_USAGE : + * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) + * Increasing memory usage improves compression ratio + * Reduced memory usage can improve speed, due to cache effect + * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ + maxMemoryUsage = 14 + defaultMemoryUsage = 13 + + maxTableLog = maxMemoryUsage - 2 + maxTablesize = 1 << maxTableLog + defaultTablelog = defaultMemoryUsage - 2 + minTablelog = 5 + maxSymbolValue = 255 +) + +var ( + // ErrIncompressible is returned when input is judged to be too hard to compress. + ErrIncompressible = errors.New("input is not compressible") + + // ErrUseRLE is returned from the compressor when the input is a single byte value repeated. + ErrUseRLE = errors.New("input is single value repeated") +) + +// Scratch provides temporary storage for compression and decompression. +type Scratch struct { + // Private + count [maxSymbolValue + 1]uint32 + norm [maxSymbolValue + 1]int16 + br byteReader + bits bitReader + bw bitWriter + ct cTable // Compression tables. + decTable []decSymbol // Decompression table. + maxCount int // count of the most probable symbol + + // Per block parameters. + // These can be used to override compression parameters of the block. + // Do not touch, unless you know what you are doing. + + // Out is output buffer. + // If the scratch is re-used before the caller is done processing the output, + // set this field to nil. + // Otherwise the output buffer will be re-used for next Compression/Decompression step + // and allocation will be avoided. + Out []byte + + // DecompressLimit limits the maximum decoded size acceptable. + // If > 0 decompression will stop when approximately this many bytes + // has been decoded. + // If 0, maximum size will be 2GB. + DecompressLimit int + + symbolLen uint16 // Length of active part of the symbol table. + actualTableLog uint8 // Selected tablelog. + zeroBits bool // no bits has prob > 50%. + clearCount bool // clear count + + // MaxSymbolValue will override the maximum symbol value of the next block. + MaxSymbolValue uint8 + + // TableLog will attempt to override the tablelog for the next block. + TableLog uint8 +} + +// Histogram allows to populate the histogram and skip that step in the compression, +// It otherwise allows to inspect the histogram when compression is done. +// To indicate that you have populated the histogram call HistogramFinished +// with the value of the highest populated symbol, as well as the number of entries +// in the most populated entry. These are accepted at face value. +// The returned slice will always be length 256. +func (s *Scratch) Histogram() []uint32 { + return s.count[:] +} + +// HistogramFinished can be called to indicate that the histogram has been populated. +// maxSymbol is the index of the highest set symbol of the next data segment. +// maxCount is the number of entries in the most populated entry. +// These are accepted at face value. +func (s *Scratch) HistogramFinished(maxSymbol uint8, maxCount int) { + s.maxCount = maxCount + s.symbolLen = uint16(maxSymbol) + 1 + s.clearCount = maxCount != 0 +} + +// prepare will prepare and allocate scratch tables used for both compression and decompression. +func (s *Scratch) prepare(in []byte) (*Scratch, error) { + if s == nil { + s = &Scratch{} + } + if s.MaxSymbolValue == 0 { + s.MaxSymbolValue = 255 + } + if s.TableLog == 0 { + s.TableLog = defaultTablelog + } + if s.TableLog > maxTableLog { + return nil, fmt.Errorf("tableLog (%d) > maxTableLog (%d)", s.TableLog, maxTableLog) + } + if cap(s.Out) == 0 { + s.Out = make([]byte, 0, len(in)) + } + if s.clearCount && s.maxCount == 0 { + for i := range s.count { + s.count[i] = 0 + } + s.clearCount = false + } + s.br.init(in) + if s.DecompressLimit == 0 { + // Max size 2GB. + s.DecompressLimit = (2 << 30) - 1 + } + + return s, nil +} + +// tableStep returns the next table index. +func tableStep(tableSize uint32) uint32 { + return (tableSize >> 1) + (tableSize >> 3) + 3 +} + +func highBits(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} diff --git a/vendor/github.com/klauspost/compress/gen.sh b/vendor/github.com/klauspost/compress/gen.sh new file mode 100644 index 0000000..aff9422 --- /dev/null +++ b/vendor/github.com/klauspost/compress/gen.sh @@ -0,0 +1,4 @@ +#!/bin/sh + +cd s2/cmd/_s2sx/ || exit 1 +go generate . diff --git a/vendor/github.com/klauspost/compress/huff0/.gitignore b/vendor/github.com/klauspost/compress/huff0/.gitignore new file mode 100644 index 0000000..b3d2629 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/.gitignore @@ -0,0 +1 @@ +/huff0-fuzz.zip diff --git a/vendor/github.com/klauspost/compress/huff0/README.md b/vendor/github.com/klauspost/compress/huff0/README.md new file mode 100644 index 0000000..8b6e5c6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/README.md @@ -0,0 +1,89 @@ +# Huff0 entropy compression + +This package provides Huff0 encoding and decoding as used in zstd. + +[Huff0](https://github.com/Cyan4973/FiniteStateEntropy#new-generation-entropy-coders), +a Huffman codec designed for modern CPU, featuring OoO (Out of Order) operations on multiple ALU +(Arithmetic Logic Unit), achieving extremely fast compression and decompression speeds. + +This can be used for compressing input with a lot of similar input values to the smallest number of bytes. +This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, +but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. + +* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/huff0) + +## News + +This is used as part of the [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression package. + +This ensures that most functionality is well tested. + +# Usage + +This package provides a low level interface that allows to compress single independent blocks. + +Each block is separate, and there is no built in integrity checks. +This means that the caller should keep track of block sizes and also do checksums if needed. + +Compressing a block is done via the [`Compress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress1X) and +[`Compress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress4X) functions. +You must provide input and will receive the output and maybe an error. + +These error values can be returned: + +| Error | Description | +|---------------------|-----------------------------------------------------------------------------| +| `` | Everything ok, output is returned | +| `ErrIncompressible` | Returned when input is judged to be too hard to compress | +| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | +| `ErrTooBig` | Returned if the input block exceeds the maximum allowed size (128 Kib) | +| `(error)` | An internal error occurred. | + + +As can be seen above some of there are errors that will be returned even under normal operation so it is important to handle these. + +To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object +that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same +object can be used for both. + +Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this +you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. + +The `Scratch` object will retain state that allows to re-use previous tables for encoding and decoding. + +## Tables and re-use + +Huff0 allows for reusing tables from the previous block to save space if that is expected to give better/faster results. + +The Scratch object allows you to set a [`ReusePolicy`](https://godoc.org/github.com/klauspost/compress/huff0#ReusePolicy) +that controls this behaviour. See the documentation for details. This can be altered between each block. + +Do however note that this information is *not* stored in the output block and it is up to the users of the package to +record whether [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable) should be called, +based on the boolean reported back from the CompressXX call. + +If you want to store the table separate from the data, you can access them as `OutData` and `OutTable` on the +[`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object. + +## Decompressing + +The first part of decoding is to initialize the decoding table through [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable). +This will initialize the decoding tables. +You can supply the complete block to `ReadTable` and it will return the data part of the block +which can be given to the decompressor. + +Decompressing is done by calling the [`Decompress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress1X) +or [`Decompress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress4X) function. + +For concurrently decompressing content with a fixed table a stateless [`Decoder`](https://godoc.org/github.com/klauspost/compress/huff0#Decoder) can be requested which will remain correct as long as the scratch is unchanged. The capacity of the provided slice indicates the expected output size. + +You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back +your input was likely corrupted. + +It is important to note that a successful decoding does *not* mean your output matches your original input. +There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. + +# Contributing + +Contributions are always welcome. Be aware that adding public functions will require good justification and breaking +changes will likely not be accepted. If in doubt open an issue before writing the PR. diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go new file mode 100644 index 0000000..e36d974 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/bitreader.go @@ -0,0 +1,229 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package huff0 + +import ( + "encoding/binary" + "errors" + "fmt" + "io" +) + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReaderBytes struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReaderBytes) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.advance(8 - uint8(highBit32(uint32(v)))) + return nil +} + +// peekBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReaderBytes) peekByteFast() uint8 { + got := uint8(b.value >> 56) + return got +} + +func (b *bitReaderBytes) advance(n uint8) { + b.bitsRead += n + b.value <<= n & 63 +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReaderBytes) fillFast() { + if b.bitsRead < 32 { + return + } + + // 2 bounds checks. + v := b.in[b.off-4 : b.off] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << (b.bitsRead - 32) + b.bitsRead -= 32 + b.off -= 4 +} + +// fillFastStart() assumes the bitReaderBytes is empty and there is at least 8 bytes to read. +func (b *bitReaderBytes) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReaderBytes) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4 : b.off] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << (b.bitsRead - 32) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value |= uint64(b.in[b.off-1]) << (b.bitsRead - 8) + b.bitsRead -= 8 + b.off-- + } +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReaderBytes) finished() bool { + return b.off == 0 && b.bitsRead >= 64 +} + +func (b *bitReaderBytes) remaining() uint { + return b.off*8 + uint(64-b.bitsRead) +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReaderBytes) close() error { + // Release reference. + b.in = nil + if b.remaining() > 0 { + return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining()) + } + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} + +// bitReaderShifted reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReaderShifted struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReaderShifted) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.advance(8 - uint8(highBit32(uint32(v)))) + return nil +} + +// peekBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReaderShifted) peekBitsFast(n uint8) uint16 { + return uint16(b.value >> ((64 - n) & 63)) +} + +func (b *bitReaderShifted) advance(n uint8) { + b.bitsRead += n + b.value <<= n & 63 +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReaderShifted) fillFast() { + if b.bitsRead < 32 { + return + } + + // 2 bounds checks. + v := b.in[b.off-4 : b.off] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << ((b.bitsRead - 32) & 63) + b.bitsRead -= 32 + b.off -= 4 +} + +// fillFastStart() assumes the bitReaderShifted is empty and there is at least 8 bytes to read. +func (b *bitReaderShifted) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReaderShifted) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4 : b.off] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << ((b.bitsRead - 32) & 63) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value |= uint64(b.in[b.off-1]) << ((b.bitsRead - 8) & 63) + b.bitsRead -= 8 + b.off-- + } +} + +func (b *bitReaderShifted) remaining() uint { + return b.off*8 + uint(64-b.bitsRead) +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReaderShifted) close() error { + // Release reference. + b.in = nil + if b.remaining() > 0 { + return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining()) + } + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go new file mode 100644 index 0000000..0ebc9aa --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/bitwriter.go @@ -0,0 +1,102 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package huff0 + +// bitWriter will write bits. +// First bit will be LSB of the first byte of output. +type bitWriter struct { + bitContainer uint64 + nBits uint8 + out []byte +} + +// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// encSymbol will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) encSymbol(ct cTable, symbol byte) { + enc := ct[symbol] + b.bitContainer |= uint64(enc.val) << (b.nBits & 63) + if false { + if enc.nBits == 0 { + panic("nbits 0") + } + } + b.nBits += enc.nBits +} + +// encTwoSymbols will add up to 32 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) { + encA := ct[av] + encB := ct[bv] + sh := b.nBits & 63 + combined := uint64(encA.val) | (uint64(encB.val) << (encA.nBits & 63)) + b.bitContainer |= combined << sh + if false { + if encA.nBits == 0 { + panic("nbitsA 0") + } + if encB.nBits == 0 { + panic("nbitsB 0") + } + } + b.nBits += encA.nBits + encB.nBits +} + +// encFourSymbols adds up to 32 bits from four symbols. +// It will not check if there is space for them, +// so the caller must ensure that b has been flushed recently. +func (b *bitWriter) encFourSymbols(encA, encB, encC, encD cTableEntry) { + bitsA := encA.nBits + bitsB := bitsA + encB.nBits + bitsC := bitsB + encC.nBits + bitsD := bitsC + encD.nBits + combined := uint64(encA.val) | + (uint64(encB.val) << (bitsA & 63)) | + (uint64(encC.val) << (bitsB & 63)) | + (uint64(encD.val) << (bitsC & 63)) + b.bitContainer |= combined << (b.nBits & 63) + b.nBits += bitsD +} + +// flush32 will flush out, so there are at least 32 bits available for writing. +func (b *bitWriter) flush32() { + if b.nBits < 32 { + return + } + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24)) + b.nBits -= 32 + b.bitContainer >>= 32 +} + +// flushAlign will flush remaining full bytes and align to next byte boundary. +func (b *bitWriter) flushAlign() { + nbBytes := (b.nBits + 7) >> 3 + for i := uint8(0); i < nbBytes; i++ { + b.out = append(b.out, byte(b.bitContainer>>(i*8))) + } + b.nBits = 0 + b.bitContainer = 0 +} + +// close will write the alignment bit and write the final byte(s) +// to the output. +func (b *bitWriter) close() { + // End mark + b.addBits16Clean(1, 1) + // flush until next byte. + b.flushAlign() +} diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go new file mode 100644 index 0000000..84aa3d1 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/compress.go @@ -0,0 +1,742 @@ +package huff0 + +import ( + "fmt" + "math" + "runtime" + "sync" +) + +// Compress1X will compress the input. +// The output can be decoded using Decompress1X. +// Supply a Scratch object. The scratch object contains state about re-use, +// So when sharing across independent encodes, be sure to set the re-use policy. +func Compress1X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { + s, err = s.prepare(in) + if err != nil { + return nil, false, err + } + return compress(in, s, s.compress1X) +} + +// Compress4X will compress the input. The input is split into 4 independent blocks +// and compressed similar to Compress1X. +// The output can be decoded using Decompress4X. +// Supply a Scratch object. The scratch object contains state about re-use, +// So when sharing across independent encodes, be sure to set the re-use policy. +func Compress4X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { + s, err = s.prepare(in) + if err != nil { + return nil, false, err + } + if false { + // TODO: compress4Xp only slightly faster. + const parallelThreshold = 8 << 10 + if len(in) < parallelThreshold || runtime.GOMAXPROCS(0) == 1 { + return compress(in, s, s.compress4X) + } + return compress(in, s, s.compress4Xp) + } + return compress(in, s, s.compress4X) +} + +func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error)) (out []byte, reUsed bool, err error) { + // Nuke previous table if we cannot reuse anyway. + if s.Reuse == ReusePolicyNone { + s.prevTable = s.prevTable[:0] + } + + // Create histogram, if none was provided. + maxCount := s.maxCount + var canReuse = false + if maxCount == 0 { + maxCount, canReuse = s.countSimple(in) + } else { + canReuse = s.canUseTable(s.prevTable) + } + + // We want the output size to be less than this: + wantSize := len(in) + if s.WantLogLess > 0 { + wantSize -= wantSize >> s.WantLogLess + } + + // Reset for next run. + s.clearCount = true + s.maxCount = 0 + if maxCount >= len(in) { + if maxCount > len(in) { + return nil, false, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in)) + } + if len(in) == 1 { + return nil, false, ErrIncompressible + } + // One symbol, use RLE + return nil, false, ErrUseRLE + } + if maxCount == 1 || maxCount < (len(in)>>7) { + // Each symbol present maximum once or too well distributed. + return nil, false, ErrIncompressible + } + if s.Reuse == ReusePolicyMust && !canReuse { + // We must reuse, but we can't. + return nil, false, ErrIncompressible + } + if (s.Reuse == ReusePolicyPrefer || s.Reuse == ReusePolicyMust) && canReuse { + keepTable := s.cTable + keepTL := s.actualTableLog + s.cTable = s.prevTable + s.actualTableLog = s.prevTableLog + s.Out, err = compressor(in) + s.cTable = keepTable + s.actualTableLog = keepTL + if err == nil && len(s.Out) < wantSize { + s.OutData = s.Out + return s.Out, true, nil + } + if s.Reuse == ReusePolicyMust { + return nil, false, ErrIncompressible + } + // Do not attempt to re-use later. + s.prevTable = s.prevTable[:0] + } + + // Calculate new table. + err = s.buildCTable() + if err != nil { + return nil, false, err + } + + if false && !s.canUseTable(s.cTable) { + panic("invalid table generated") + } + + if s.Reuse == ReusePolicyAllow && canReuse { + hSize := len(s.Out) + oldSize := s.prevTable.estimateSize(s.count[:s.symbolLen]) + newSize := s.cTable.estimateSize(s.count[:s.symbolLen]) + if oldSize <= hSize+newSize || hSize+12 >= wantSize { + // Retain cTable even if we re-use. + keepTable := s.cTable + keepTL := s.actualTableLog + + s.cTable = s.prevTable + s.actualTableLog = s.prevTableLog + s.Out, err = compressor(in) + + // Restore ctable. + s.cTable = keepTable + s.actualTableLog = keepTL + if err != nil { + return nil, false, err + } + if len(s.Out) >= wantSize { + return nil, false, ErrIncompressible + } + s.OutData = s.Out + return s.Out, true, nil + } + } + + // Use new table + err = s.cTable.write(s) + if err != nil { + s.OutTable = nil + return nil, false, err + } + s.OutTable = s.Out + + // Compress using new table + s.Out, err = compressor(in) + if err != nil { + s.OutTable = nil + return nil, false, err + } + if len(s.Out) >= wantSize { + s.OutTable = nil + return nil, false, ErrIncompressible + } + // Move current table into previous. + s.prevTable, s.prevTableLog, s.cTable = s.cTable, s.actualTableLog, s.prevTable[:0] + s.OutData = s.Out[len(s.OutTable):] + return s.Out, false, nil +} + +// EstimateSizes will estimate the data sizes +func EstimateSizes(in []byte, s *Scratch) (tableSz, dataSz, reuseSz int, err error) { + s, err = s.prepare(in) + if err != nil { + return 0, 0, 0, err + } + + // Create histogram, if none was provided. + tableSz, dataSz, reuseSz = -1, -1, -1 + maxCount := s.maxCount + var canReuse = false + if maxCount == 0 { + maxCount, canReuse = s.countSimple(in) + } else { + canReuse = s.canUseTable(s.prevTable) + } + + // We want the output size to be less than this: + wantSize := len(in) + if s.WantLogLess > 0 { + wantSize -= wantSize >> s.WantLogLess + } + + // Reset for next run. + s.clearCount = true + s.maxCount = 0 + if maxCount >= len(in) { + if maxCount > len(in) { + return 0, 0, 0, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in)) + } + if len(in) == 1 { + return 0, 0, 0, ErrIncompressible + } + // One symbol, use RLE + return 0, 0, 0, ErrUseRLE + } + if maxCount == 1 || maxCount < (len(in)>>7) { + // Each symbol present maximum once or too well distributed. + return 0, 0, 0, ErrIncompressible + } + + // Calculate new table. + err = s.buildCTable() + if err != nil { + return 0, 0, 0, err + } + + if false && !s.canUseTable(s.cTable) { + panic("invalid table generated") + } + + tableSz, err = s.cTable.estTableSize(s) + if err != nil { + return 0, 0, 0, err + } + if canReuse { + reuseSz = s.prevTable.estimateSize(s.count[:s.symbolLen]) + } + dataSz = s.cTable.estimateSize(s.count[:s.symbolLen]) + + // Restore + return tableSz, dataSz, reuseSz, nil +} + +func (s *Scratch) compress1X(src []byte) ([]byte, error) { + return s.compress1xDo(s.Out, src), nil +} + +func (s *Scratch) compress1xDo(dst, src []byte) []byte { + var bw = bitWriter{out: dst} + + // N is length divisible by 4. + n := len(src) + n -= n & 3 + cTable := s.cTable[:256] + + // Encode last bytes. + for i := len(src) & 3; i > 0; i-- { + bw.encSymbol(cTable, src[n+i-1]) + } + n -= 4 + if s.actualTableLog <= 8 { + for ; n >= 0; n -= 4 { + tmp := src[n : n+4] + // tmp should be len 4 + bw.flush32() + bw.encFourSymbols(cTable[tmp[3]], cTable[tmp[2]], cTable[tmp[1]], cTable[tmp[0]]) + } + } else { + for ; n >= 0; n -= 4 { + tmp := src[n : n+4] + // tmp should be len 4 + bw.flush32() + bw.encTwoSymbols(cTable, tmp[3], tmp[2]) + bw.flush32() + bw.encTwoSymbols(cTable, tmp[1], tmp[0]) + } + } + bw.close() + return bw.out +} + +var sixZeros [6]byte + +func (s *Scratch) compress4X(src []byte) ([]byte, error) { + if len(src) < 12 { + return nil, ErrIncompressible + } + segmentSize := (len(src) + 3) / 4 + + // Add placeholder for output length + offsetIdx := len(s.Out) + s.Out = append(s.Out, sixZeros[:]...) + + for i := 0; i < 4; i++ { + toDo := src + if len(toDo) > segmentSize { + toDo = toDo[:segmentSize] + } + src = src[len(toDo):] + + idx := len(s.Out) + s.Out = s.compress1xDo(s.Out, toDo) + if len(s.Out)-idx > math.MaxUint16 { + // We cannot store the size in the jump table + return nil, ErrIncompressible + } + // Write compressed length as little endian before block. + if i < 3 { + // Last length is not written. + length := len(s.Out) - idx + s.Out[i*2+offsetIdx] = byte(length) + s.Out[i*2+offsetIdx+1] = byte(length >> 8) + } + } + + return s.Out, nil +} + +// compress4Xp will compress 4 streams using separate goroutines. +func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { + if len(src) < 12 { + return nil, ErrIncompressible + } + // Add placeholder for output length + s.Out = s.Out[:6] + + segmentSize := (len(src) + 3) / 4 + var wg sync.WaitGroup + wg.Add(4) + for i := 0; i < 4; i++ { + toDo := src + if len(toDo) > segmentSize { + toDo = toDo[:segmentSize] + } + src = src[len(toDo):] + + // Separate goroutine for each block. + go func(i int) { + s.tmpOut[i] = s.compress1xDo(s.tmpOut[i][:0], toDo) + wg.Done() + }(i) + } + wg.Wait() + for i := 0; i < 4; i++ { + o := s.tmpOut[i] + if len(o) > math.MaxUint16 { + // We cannot store the size in the jump table + return nil, ErrIncompressible + } + // Write compressed length as little endian before block. + if i < 3 { + // Last length is not written. + s.Out[i*2] = byte(len(o)) + s.Out[i*2+1] = byte(len(o) >> 8) + } + + // Write output. + s.Out = append(s.Out, o...) + } + return s.Out, nil +} + +// countSimple will create a simple histogram in s.count. +// Returns the biggest count. +// Does not update s.clearCount. +func (s *Scratch) countSimple(in []byte) (max int, reuse bool) { + reuse = true + _ = s.count // Assert that s != nil to speed up the following loop. + for _, v := range in { + s.count[v]++ + } + m := uint32(0) + if len(s.prevTable) > 0 { + for i, v := range s.count[:] { + if v == 0 { + continue + } + if v > m { + m = v + } + s.symbolLen = uint16(i) + 1 + if i >= len(s.prevTable) { + reuse = false + } else if s.prevTable[i].nBits == 0 { + reuse = false + } + } + return int(m), reuse + } + for i, v := range s.count[:] { + if v == 0 { + continue + } + if v > m { + m = v + } + s.symbolLen = uint16(i) + 1 + } + return int(m), false +} + +func (s *Scratch) canUseTable(c cTable) bool { + if len(c) < int(s.symbolLen) { + return false + } + for i, v := range s.count[:s.symbolLen] { + if v != 0 && c[i].nBits == 0 { + return false + } + } + return true +} + +//lint:ignore U1000 used for debugging +func (s *Scratch) validateTable(c cTable) bool { + if len(c) < int(s.symbolLen) { + return false + } + for i, v := range s.count[:s.symbolLen] { + if v != 0 { + if c[i].nBits == 0 { + return false + } + if c[i].nBits > s.actualTableLog { + return false + } + } + } + return true +} + +// minTableLog provides the minimum logSize to safely represent a distribution. +func (s *Scratch) minTableLog() uint8 { + minBitsSrc := highBit32(uint32(s.srcLen)) + 1 + minBitsSymbols := highBit32(uint32(s.symbolLen-1)) + 2 + if minBitsSrc < minBitsSymbols { + return uint8(minBitsSrc) + } + return uint8(minBitsSymbols) +} + +// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog +func (s *Scratch) optimalTableLog() { + tableLog := s.TableLog + minBits := s.minTableLog() + maxBitsSrc := uint8(highBit32(uint32(s.srcLen-1))) - 1 + if maxBitsSrc < tableLog { + // Accuracy can be reduced + tableLog = maxBitsSrc + } + if minBits > tableLog { + tableLog = minBits + } + // Need a minimum to safely represent all symbol values + if tableLog < minTablelog { + tableLog = minTablelog + } + if tableLog > tableLogMax { + tableLog = tableLogMax + } + s.actualTableLog = tableLog +} + +type cTableEntry struct { + val uint16 + nBits uint8 + // We have 8 bits extra +} + +const huffNodesMask = huffNodesLen - 1 + +func (s *Scratch) buildCTable() error { + s.optimalTableLog() + s.huffSort() + if cap(s.cTable) < maxSymbolValue+1 { + s.cTable = make([]cTableEntry, s.symbolLen, maxSymbolValue+1) + } else { + s.cTable = s.cTable[:s.symbolLen] + for i := range s.cTable { + s.cTable[i] = cTableEntry{} + } + } + + var startNode = int16(s.symbolLen) + nonNullRank := s.symbolLen - 1 + + nodeNb := startNode + huffNode := s.nodes[1 : huffNodesLen+1] + + // This overlays the slice above, but allows "-1" index lookups. + // Different from reference implementation. + huffNode0 := s.nodes[0 : huffNodesLen+1] + + for huffNode[nonNullRank].count() == 0 { + nonNullRank-- + } + + lowS := int16(nonNullRank) + nodeRoot := nodeNb + lowS - 1 + lowN := nodeNb + huffNode[nodeNb].setCount(huffNode[lowS].count() + huffNode[lowS-1].count()) + huffNode[lowS].setParent(nodeNb) + huffNode[lowS-1].setParent(nodeNb) + nodeNb++ + lowS -= 2 + for n := nodeNb; n <= nodeRoot; n++ { + huffNode[n].setCount(1 << 30) + } + // fake entry, strong barrier + huffNode0[0].setCount(1 << 31) + + // create parents + for nodeNb <= nodeRoot { + var n1, n2 int16 + if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() { + n1 = lowS + lowS-- + } else { + n1 = lowN + lowN++ + } + if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() { + n2 = lowS + lowS-- + } else { + n2 = lowN + lowN++ + } + + huffNode[nodeNb].setCount(huffNode0[n1+1].count() + huffNode0[n2+1].count()) + huffNode0[n1+1].setParent(nodeNb) + huffNode0[n2+1].setParent(nodeNb) + nodeNb++ + } + + // distribute weights (unlimited tree height) + huffNode[nodeRoot].setNbBits(0) + for n := nodeRoot - 1; n >= startNode; n-- { + huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1) + } + for n := uint16(0); n <= nonNullRank; n++ { + huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1) + } + s.actualTableLog = s.setMaxHeight(int(nonNullRank)) + maxNbBits := s.actualTableLog + + // fill result into tree (val, nbBits) + if maxNbBits > tableLogMax { + return fmt.Errorf("internal error: maxNbBits (%d) > tableLogMax (%d)", maxNbBits, tableLogMax) + } + var nbPerRank [tableLogMax + 1]uint16 + var valPerRank [16]uint16 + for _, v := range huffNode[:nonNullRank+1] { + nbPerRank[v.nbBits()]++ + } + // determine stating value per rank + { + min := uint16(0) + for n := maxNbBits; n > 0; n-- { + // get starting value within each rank + valPerRank[n] = min + min += nbPerRank[n] + min >>= 1 + } + } + + // push nbBits per symbol, symbol order + for _, v := range huffNode[:nonNullRank+1] { + s.cTable[v.symbol()].nBits = v.nbBits() + } + + // assign value within rank, symbol order + t := s.cTable[:s.symbolLen] + for n, val := range t { + nbits := val.nBits & 15 + v := valPerRank[nbits] + t[n].val = v + valPerRank[nbits] = v + 1 + } + + return nil +} + +// huffSort will sort symbols, decreasing order. +func (s *Scratch) huffSort() { + type rankPos struct { + base uint32 + current uint32 + } + + // Clear nodes + nodes := s.nodes[:huffNodesLen+1] + s.nodes = nodes + nodes = nodes[1 : huffNodesLen+1] + + // Sort into buckets based on length of symbol count. + var rank [32]rankPos + for _, v := range s.count[:s.symbolLen] { + r := highBit32(v+1) & 31 + rank[r].base++ + } + // maxBitLength is log2(BlockSizeMax) + 1 + const maxBitLength = 18 + 1 + for n := maxBitLength; n > 0; n-- { + rank[n-1].base += rank[n].base + } + for n := range rank[:maxBitLength] { + rank[n].current = rank[n].base + } + for n, c := range s.count[:s.symbolLen] { + r := (highBit32(c+1) + 1) & 31 + pos := rank[r].current + rank[r].current++ + prev := nodes[(pos-1)&huffNodesMask] + for pos > rank[r].base && c > prev.count() { + nodes[pos&huffNodesMask] = prev + pos-- + prev = nodes[(pos-1)&huffNodesMask] + } + nodes[pos&huffNodesMask] = makeNodeElt(c, byte(n)) + } +} + +func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { + maxNbBits := s.actualTableLog + huffNode := s.nodes[1 : huffNodesLen+1] + //huffNode = huffNode[: huffNodesLen] + + largestBits := huffNode[lastNonNull].nbBits() + + // early exit : no elt > maxNbBits + if largestBits <= maxNbBits { + return largestBits + } + totalCost := int(0) + baseCost := int(1) << (largestBits - maxNbBits) + n := uint32(lastNonNull) + + for huffNode[n].nbBits() > maxNbBits { + totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits())) + huffNode[n].setNbBits(maxNbBits) + n-- + } + // n stops at huffNode[n].nbBits <= maxNbBits + + for huffNode[n].nbBits() == maxNbBits { + n-- + } + // n end at index of smallest symbol using < maxNbBits + + // renorm totalCost + totalCost >>= largestBits - maxNbBits /* note : totalCost is necessarily a multiple of baseCost */ + + // repay normalized cost + { + const noSymbol = 0xF0F0F0F0 + var rankLast [tableLogMax + 2]uint32 + + for i := range rankLast[:] { + rankLast[i] = noSymbol + } + + // Get pos of last (smallest) symbol per rank + { + currentNbBits := maxNbBits + for pos := int(n); pos >= 0; pos-- { + if huffNode[pos].nbBits() >= currentNbBits { + continue + } + currentNbBits = huffNode[pos].nbBits() // < maxNbBits + rankLast[maxNbBits-currentNbBits] = uint32(pos) + } + } + + for totalCost > 0 { + nBitsToDecrease := uint8(highBit32(uint32(totalCost))) + 1 + + for ; nBitsToDecrease > 1; nBitsToDecrease-- { + highPos := rankLast[nBitsToDecrease] + lowPos := rankLast[nBitsToDecrease-1] + if highPos == noSymbol { + continue + } + if lowPos == noSymbol { + break + } + highTotal := huffNode[highPos].count() + lowTotal := 2 * huffNode[lowPos].count() + if highTotal <= lowTotal { + break + } + } + // only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) + // HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary + // FIXME: try to remove + for (nBitsToDecrease <= tableLogMax) && (rankLast[nBitsToDecrease] == noSymbol) { + nBitsToDecrease++ + } + totalCost -= 1 << (nBitsToDecrease - 1) + if rankLast[nBitsToDecrease-1] == noSymbol { + // this rank is no longer empty + rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease] + } + huffNode[rankLast[nBitsToDecrease]].setNbBits(1 + + huffNode[rankLast[nBitsToDecrease]].nbBits()) + if rankLast[nBitsToDecrease] == 0 { + /* special case, reached largest symbol */ + rankLast[nBitsToDecrease] = noSymbol + } else { + rankLast[nBitsToDecrease]-- + if huffNode[rankLast[nBitsToDecrease]].nbBits() != maxNbBits-nBitsToDecrease { + rankLast[nBitsToDecrease] = noSymbol /* this rank is now empty */ + } + } + } + + for totalCost < 0 { /* Sometimes, cost correction overshoot */ + if rankLast[1] == noSymbol { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */ + for huffNode[n].nbBits() == maxNbBits { + n-- + } + huffNode[n+1].setNbBits(huffNode[n+1].nbBits() - 1) + rankLast[1] = n + 1 + totalCost++ + continue + } + huffNode[rankLast[1]+1].setNbBits(huffNode[rankLast[1]+1].nbBits() - 1) + rankLast[1]++ + totalCost++ + } + } + return maxNbBits +} + +// A nodeElt is the fields +// +// count uint32 +// parent uint16 +// symbol byte +// nbBits uint8 +// +// in some order, all squashed into an integer so that the compiler +// always loads and stores entire nodeElts instead of separate fields. +type nodeElt uint64 + +func makeNodeElt(count uint32, symbol byte) nodeElt { + return nodeElt(count) | nodeElt(symbol)<<48 +} + +func (e *nodeElt) count() uint32 { return uint32(*e) } +func (e *nodeElt) parent() uint16 { return uint16(*e >> 32) } +func (e *nodeElt) symbol() byte { return byte(*e >> 48) } +func (e *nodeElt) nbBits() uint8 { return uint8(*e >> 56) } + +func (e *nodeElt) setCount(c uint32) { *e = (*e)&0xffffffff00000000 | nodeElt(c) } +func (e *nodeElt) setParent(p int16) { *e = (*e)&0xffff0000ffffffff | nodeElt(uint16(p))<<32 } +func (e *nodeElt) setNbBits(n uint8) { *e = (*e)&0x00ffffffffffffff | nodeElt(n)<<56 } diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go new file mode 100644 index 0000000..54bd08b --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress.go @@ -0,0 +1,1167 @@ +package huff0 + +import ( + "errors" + "fmt" + "io" + "sync" + + "github.com/klauspost/compress/fse" +) + +type dTable struct { + single []dEntrySingle +} + +// single-symbols decoding +type dEntrySingle struct { + entry uint16 +} + +// Uses special code for all tables that are < 8 bits. +const use8BitTables = true + +// ReadTable will read a table from the input. +// The size of the input may be larger than the table definition. +// Any content remaining after the table definition will be returned. +// If no Scratch is provided a new one is allocated. +// The returned Scratch can be used for encoding or decoding input using this table. +func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) { + s, err = s.prepare(nil) + if err != nil { + return s, nil, err + } + if len(in) <= 1 { + return s, nil, errors.New("input too small for table") + } + iSize := in[0] + in = in[1:] + if iSize >= 128 { + // Uncompressed + oSize := iSize - 127 + iSize = (oSize + 1) / 2 + if int(iSize) > len(in) { + return s, nil, errors.New("input too small for table") + } + for n := uint8(0); n < oSize; n += 2 { + v := in[n/2] + s.huffWeight[n] = v >> 4 + s.huffWeight[n+1] = v & 15 + } + s.symbolLen = uint16(oSize) + in = in[iSize:] + } else { + if len(in) < int(iSize) { + return s, nil, fmt.Errorf("input too small for table, want %d bytes, have %d", iSize, len(in)) + } + // FSE compressed weights + s.fse.DecompressLimit = 255 + hw := s.huffWeight[:] + s.fse.Out = hw + b, err := fse.Decompress(in[:iSize], s.fse) + s.fse.Out = nil + if err != nil { + return s, nil, fmt.Errorf("fse decompress returned: %w", err) + } + if len(b) > 255 { + return s, nil, errors.New("corrupt input: output table too large") + } + s.symbolLen = uint16(len(b)) + in = in[iSize:] + } + + // collect weight stats + var rankStats [16]uint32 + weightTotal := uint32(0) + for _, v := range s.huffWeight[:s.symbolLen] { + if v > tableLogMax { + return s, nil, errors.New("corrupt input: weight too large") + } + v2 := v & 15 + rankStats[v2]++ + // (1 << (v2-1)) is slower since the compiler cannot prove that v2 isn't 0. + weightTotal += (1 << v2) >> 1 + } + if weightTotal == 0 { + return s, nil, errors.New("corrupt input: weights zero") + } + + // get last non-null symbol weight (implied, total must be 2^n) + { + tableLog := highBit32(weightTotal) + 1 + if tableLog > tableLogMax { + return s, nil, errors.New("corrupt input: tableLog too big") + } + s.actualTableLog = uint8(tableLog) + // determine last weight + { + total := uint32(1) << tableLog + rest := total - weightTotal + verif := uint32(1) << highBit32(rest) + lastWeight := highBit32(rest) + 1 + if verif != rest { + // last value must be a clean power of 2 + return s, nil, errors.New("corrupt input: last value not power of two") + } + s.huffWeight[s.symbolLen] = uint8(lastWeight) + s.symbolLen++ + rankStats[lastWeight]++ + } + } + + if (rankStats[1] < 2) || (rankStats[1]&1 != 0) { + // by construction : at least 2 elts of rank 1, must be even + return s, nil, errors.New("corrupt input: min elt size, even check failed ") + } + + // TODO: Choose between single/double symbol decoding + + // Calculate starting value for each rank + { + var nextRankStart uint32 + for n := uint8(1); n < s.actualTableLog+1; n++ { + current := nextRankStart + nextRankStart += rankStats[n] << (n - 1) + rankStats[n] = current + } + } + + // fill DTable (always full size) + tSize := 1 << tableLogMax + if len(s.dt.single) != tSize { + s.dt.single = make([]dEntrySingle, tSize) + } + cTable := s.prevTable + if cap(cTable) < maxSymbolValue+1 { + cTable = make([]cTableEntry, 0, maxSymbolValue+1) + } + cTable = cTable[:maxSymbolValue+1] + s.prevTable = cTable[:s.symbolLen] + s.prevTableLog = s.actualTableLog + + for n, w := range s.huffWeight[:s.symbolLen] { + if w == 0 { + cTable[n] = cTableEntry{ + val: 0, + nBits: 0, + } + continue + } + length := (uint32(1) << w) >> 1 + d := dEntrySingle{ + entry: uint16(s.actualTableLog+1-w) | (uint16(n) << 8), + } + + rank := &rankStats[w] + cTable[n] = cTableEntry{ + val: uint16(*rank >> (w - 1)), + nBits: uint8(d.entry), + } + + single := s.dt.single[*rank : *rank+length] + for i := range single { + single[i] = d + } + *rank += length + } + + return s, in, nil +} + +// Decompress1X will decompress a 1X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// Before this is called, the table must be initialized with ReadTable unless +// the encoder re-used the table. +// deprecated: Use the stateless Decoder() to get a concurrent version. +func (s *Scratch) Decompress1X(in []byte) (out []byte, err error) { + if cap(s.Out) < s.MaxDecodedSize { + s.Out = make([]byte, s.MaxDecodedSize) + } + s.Out = s.Out[:0:s.MaxDecodedSize] + s.Out, err = s.Decoder().Decompress1X(s.Out, in) + return s.Out, err +} + +// Decompress4X will decompress a 4X encoded stream. +// Before this is called, the table must be initialized with ReadTable unless +// the encoder re-used the table. +// The length of the supplied input must match the end of a block exactly. +// The destination size of the uncompressed data must be known and provided. +// deprecated: Use the stateless Decoder() to get a concurrent version. +func (s *Scratch) Decompress4X(in []byte, dstSize int) (out []byte, err error) { + if dstSize > s.MaxDecodedSize { + return nil, ErrMaxDecodedSizeExceeded + } + if cap(s.Out) < dstSize { + s.Out = make([]byte, s.MaxDecodedSize) + } + s.Out = s.Out[:0:dstSize] + s.Out, err = s.Decoder().Decompress4X(s.Out, in) + return s.Out, err +} + +// Decoder will return a stateless decoder that can be used by multiple +// decompressors concurrently. +// Before this is called, the table must be initialized with ReadTable. +// The Decoder is still linked to the scratch buffer so that cannot be reused. +// However, it is safe to discard the scratch. +func (s *Scratch) Decoder() *Decoder { + return &Decoder{ + dt: s.dt, + actualTableLog: s.actualTableLog, + bufs: &s.decPool, + } +} + +// Decoder provides stateless decoding. +type Decoder struct { + dt dTable + actualTableLog uint8 + bufs *sync.Pool +} + +func (d *Decoder) buffer() *[4][256]byte { + buf, ok := d.bufs.Get().(*[4][256]byte) + if ok { + return buf + } + return &[4][256]byte{} +} + +// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { + if d.actualTableLog == 8 { + return d.decompress1X8BitExactly(dst, src) + } + var br bitReaderBytes + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + dt := d.dt.single[:256] + + // Use temp table to avoid bound checks/append penalty. + bufs := d.buffer() + buf := &bufs[0] + var off uint8 + + switch d.actualTableLog { + case 8: + const shift = 0 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 7: + const shift = 8 - 7 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 6: + const shift = 8 - 6 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 5: + const shift = 8 - 5 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 4: + const shift = 8 - 4 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 3: + const shift = 8 - 3 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 2: + const shift = 8 - 2 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 1: + const shift = 8 - 1 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + default: + d.bufs.Put(bufs) + return nil, fmt.Errorf("invalid tablelog: %d", d.actualTableLog) + } + + if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 4, so uint8 is fine + bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead)) + shift := (8 - d.actualTableLog) & 7 + + for bitsLeft > 0 { + if br.bitsRead >= 64-8 { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + if len(dst) >= maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + v := dt[br.peekByteFast()>>shift] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= int8(nBits) + dst = append(dst, uint8(v.entry>>8)) + } + d.bufs.Put(bufs) + return dst, br.close() +} + +// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { + var br bitReaderBytes + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + dt := d.dt.single[:256] + + // Use temp table to avoid bound checks/append penalty. + bufs := d.buffer() + buf := &bufs[0] + var off uint8 + + const shift = 56 + + //fmt.Printf("mask: %b, tl:%d\n", mask, d.actualTableLog) + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>shift)] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>shift)] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>shift)] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>shift)] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + + if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 4, so uint8 is fine + bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead)) + for bitsLeft > 0 { + if br.bitsRead >= 64-8 { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + if len(dst) >= maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := dt[br.peekByteFast()] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= int8(nBits) + dst = append(dst, uint8(v.entry>>8)) + } + d.bufs.Put(bufs) + return dst, br.close() +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { + if d.actualTableLog == 8 { + return d.decompress4X8bitExactly(dst, src) + } + + var br [4]bitReaderBytes + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + shift := (56 + (8 - d.actualTableLog)) & 63 + + const tlSize = 1 << 8 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + buf := d.buffer() + var off uint8 + var decoded int + + // Decode 4 values from each decoder/loop. + const bufoff = 256 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + // Interleave 2 decodes. + const stream = 0 + const stream2 = 1 + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) + } + + { + const stream = 2 + const stream2 = 3 + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) + } + + off += 4 + + if off == 0 { + if bufoff > dstEvery { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 1") + } + // There must at least be 3 buffers left. + if len(out)-bufoff < dstEvery*3 { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 2") + } + //copy(out, buf[0][:]) + //copy(out[dstEvery:], buf[1][:]) + //copy(out[dstEvery*2:], buf[2][:]) + *(*[bufoff]byte)(out) = buf[0] + *(*[bufoff]byte)(out[dstEvery:]) = buf[1] + *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] + *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] + out = out[bufoff:] + decoded += bufoff * 4 + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + if br.finished() { + d.bufs.Put(buf) + return nil, io.ErrUnexpectedEOF + } + if br.bitsRead >= 56 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value |= uint64(low) << (br.bitsRead - 32) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + } + // end inline... + if offset >= endsAt { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + v := single[uint8(br.value>>shift)].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + d.bufs.Put(buf) + return nil, err + } + } + d.bufs.Put(buf) + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { + var br [4]bitReaderBytes + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const shift = 56 + const tlSize = 1 << 8 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + buf := d.buffer() + var off uint8 + var decoded int + + // Decode 4 values from each decoder/loop. + const bufoff = 256 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + // Interleave 2 decodes. + const stream = 0 + const stream2 = 1 + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) + } + + { + const stream = 2 + const stream2 = 3 + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) + } + + off += 4 + + if off == 0 { + if bufoff > dstEvery { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 1") + } + // There must at least be 3 buffers left. + if len(out)-bufoff < dstEvery*3 { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 2") + } + + //copy(out, buf[0][:]) + //copy(out[dstEvery:], buf[1][:]) + //copy(out[dstEvery*2:], buf[2][:]) + // copy(out[dstEvery*3:], buf[3][:]) + *(*[bufoff]byte)(out) = buf[0] + *(*[bufoff]byte)(out[dstEvery:]) = buf[1] + *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] + *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] + out = out[bufoff:] + decoded += bufoff * 4 + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + if br.finished() { + d.bufs.Put(buf) + return nil, io.ErrUnexpectedEOF + } + if br.bitsRead >= 56 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value |= uint64(low) << (br.bitsRead - 32) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + } + // end inline... + if offset >= endsAt { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + v := single[br.peekByteFast()].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + d.bufs.Put(buf) + return nil, err + } + } + d.bufs.Put(buf) + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// matches will compare a decoding table to a coding table. +// Errors are written to the writer. +// Nothing will be written if table is ok. +func (s *Scratch) matches(ct cTable, w io.Writer) { + if s == nil || len(s.dt.single) == 0 { + return + } + dt := s.dt.single[:1<>8) == byte(sym) { + fmt.Fprintf(w, "symbol %x has decoder, but no encoder\n", sym) + errs++ + break + } + } + if errs == 0 { + broken-- + } + continue + } + // Unused bits in input + ub := tablelog - enc.nBits + top := enc.val << ub + // decoder looks at top bits. + dec := dt[top] + if uint8(dec.entry) != enc.nBits { + fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", sym, enc.nBits, uint8(dec.entry)) + errs++ + } + if uint8(dec.entry>>8) != uint8(sym) { + fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", sym, sym, uint8(dec.entry>>8)) + errs++ + } + if errs > 0 { + fmt.Fprintf(w, "%d errros in base, stopping\n", errs) + continue + } + // Ensure that all combinations are covered. + for i := uint16(0); i < (1 << ub); i++ { + vval := top | i + dec := dt[vval] + if uint8(dec.entry) != enc.nBits { + fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", vval, enc.nBits, uint8(dec.entry)) + errs++ + } + if uint8(dec.entry>>8) != uint8(sym) { + fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", vval, sym, uint8(dec.entry>>8)) + errs++ + } + if errs > 20 { + fmt.Fprintf(w, "%d errros, stopping\n", errs) + break + } + } + if errs == 0 { + ok++ + broken-- + } + } + if broken > 0 { + fmt.Fprintf(w, "%d broken, %d ok\n", broken, ok) + } +} diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go new file mode 100644 index 0000000..ba7e8e6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go @@ -0,0 +1,226 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +// This file contains the specialisation of Decoder.Decompress4X +// and Decoder.Decompress1X that use an asm implementation of thir main loops. +package huff0 + +import ( + "errors" + "fmt" + + "github.com/klauspost/compress/internal/cpuinfo" +) + +// decompress4x_main_loop_x86 is an x86 assembler implementation +// of Decompress4X when tablelog > 8. +// +//go:noescape +func decompress4x_main_loop_amd64(ctx *decompress4xContext) + +// decompress4x_8b_loop_x86 is an x86 assembler implementation +// of Decompress4X when tablelog <= 8 which decodes 4 entries +// per loop. +// +//go:noescape +func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) + +// fallback8BitSize is the size where using Go version is faster. +const fallback8BitSize = 800 + +type decompress4xContext struct { + pbr *[4]bitReaderShifted + peekBits uint8 + out *byte + dstEvery int + tbl *dEntrySingle + decoded int + limit *byte +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if len(src) < 6+(4*1) { + return nil, errors.New("input too small") + } + + use8BitTables := d.actualTableLog <= 8 + if cap(dst) < fallback8BitSize && use8BitTables { + return d.decompress4X8bit(dst, src) + } + + var br [4]bitReaderShifted + // Decode "jump table" + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + var decoded int + + if len(out) > 4*4 && !(br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4) { + ctx := decompress4xContext{ + pbr: &br, + peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast() + out: &out[0], + dstEvery: dstEvery, + tbl: &single[0], + limit: &out[dstEvery-4], // Always stop decoding when first buffer gets here to avoid writing OOB on last. + } + if use8BitTables { + decompress4x_8b_main_loop_amd64(&ctx) + } else { + decompress4x_main_loop_amd64(&ctx) + } + + decoded = ctx.decoded + out = out[decoded/4:] + } + + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + br.fill() + if offset >= endsAt { + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + val := br.peekBitsFast(d.actualTableLog) + v := single[val&tlMask].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// decompress4x_main_loop_x86 is an x86 assembler implementation +// of Decompress1X when tablelog > 8. +// +//go:noescape +func decompress1x_main_loop_amd64(ctx *decompress1xContext) + +// decompress4x_main_loop_x86 is an x86 with BMI2 assembler implementation +// of Decompress1X when tablelog > 8. +// +//go:noescape +func decompress1x_main_loop_bmi2(ctx *decompress1xContext) + +type decompress1xContext struct { + pbr *bitReaderShifted + peekBits uint8 + out *byte + outCap int + tbl *dEntrySingle + decoded int +} + +// Error reported by asm implementations +const error_max_decoded_size_exeeded = -1 + +// Decompress1X will decompress a 1X encoded stream. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + var br bitReaderShifted + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:maxDecodedSize] + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + + if maxDecodedSize >= 4 { + ctx := decompress1xContext{ + pbr: &br, + out: &dst[0], + outCap: maxDecodedSize, + peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast() + tbl: &d.dt.single[0], + } + + if cpuinfo.HasBMI2() { + decompress1x_main_loop_bmi2(&ctx) + } else { + decompress1x_main_loop_amd64(&ctx) + } + if ctx.decoded == error_max_decoded_size_exeeded { + return nil, ErrMaxDecodedSizeExceeded + } + + dst = dst[:ctx.decoded] + } + + // br < 8, so uint8 is fine + bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead + for bitsLeft > 0 { + br.fill() + if len(dst) >= maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= nBits + dst = append(dst, uint8(v.entry>>8)) + } + return dst, br.close() +} diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s new file mode 100644 index 0000000..c4c7ab2 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s @@ -0,0 +1,830 @@ +// Code generated by command: go run gen.go -out ../decompress_amd64.s -pkg=huff0. DO NOT EDIT. + +//go:build amd64 && !appengine && !noasm && gc + +// func decompress4x_main_loop_amd64(ctx *decompress4xContext) +TEXT ·decompress4x_main_loop_amd64(SB), $0-8 + // Preload values + MOVQ ctx+0(FP), AX + MOVBQZX 8(AX), DI + MOVQ 16(AX), BX + MOVQ 48(AX), SI + MOVQ 24(AX), R8 + MOVQ 32(AX), R9 + MOVQ (AX), R10 + + // Main loop +main_loop: + XORL DX, DX + CMPQ BX, SI + SETGE DL + + // br0.fillFast32() + MOVQ 32(R10), R11 + MOVBQZX 40(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill0 + MOVQ 24(R10), AX + SUBQ $0x20, R12 + SUBQ $0x04, AX + MOVQ (R10), R13 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 24(R10) + ORQ R13, R11 + + // exhausted += (br0.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL + +skip_fill0: + // val0 := br0.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br0.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R11, R13 + SHRQ CL, R13 + + // v1 := table[val1&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (BX) + + // update the bitreader structure + MOVQ R11, 32(R10) + MOVB R12, 40(R10) + + // br1.fillFast32() + MOVQ 80(R10), R11 + MOVBQZX 88(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill1 + MOVQ 72(R10), AX + SUBQ $0x20, R12 + SUBQ $0x04, AX + MOVQ 48(R10), R13 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 72(R10) + ORQ R13, R11 + + // exhausted += (br1.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL + +skip_fill1: + // val0 := br1.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br1.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R11, R13 + SHRQ CL, R13 + + // v1 := table[val1&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (BX)(R8*1) + + // update the bitreader structure + MOVQ R11, 80(R10) + MOVB R12, 88(R10) + + // br2.fillFast32() + MOVQ 128(R10), R11 + MOVBQZX 136(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill2 + MOVQ 120(R10), AX + SUBQ $0x20, R12 + SUBQ $0x04, AX + MOVQ 96(R10), R13 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 120(R10) + ORQ R13, R11 + + // exhausted += (br2.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL + +skip_fill2: + // val0 := br2.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br2.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R11, R13 + SHRQ CL, R13 + + // v1 := table[val1&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (BX)(R8*2) + + // update the bitreader structure + MOVQ R11, 128(R10) + MOVB R12, 136(R10) + + // br3.fillFast32() + MOVQ 176(R10), R11 + MOVBQZX 184(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill3 + MOVQ 168(R10), AX + SUBQ $0x20, R12 + SUBQ $0x04, AX + MOVQ 144(R10), R13 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 168(R10) + ORQ R13, R11 + + // exhausted += (br3.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL + +skip_fill3: + // val0 := br3.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br3.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R11, R13 + SHRQ CL, R13 + + // v1 := table[val1&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + LEAQ (R8)(R8*2), CX + MOVW AX, (BX)(CX*1) + + // update the bitreader structure + MOVQ R11, 176(R10) + MOVB R12, 184(R10) + ADDQ $0x02, BX + TESTB DL, DL + JZ main_loop + MOVQ ctx+0(FP), AX + SUBQ 16(AX), BX + SHLQ $0x02, BX + MOVQ BX, 40(AX) + RET + +// func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) +TEXT ·decompress4x_8b_main_loop_amd64(SB), $0-8 + // Preload values + MOVQ ctx+0(FP), CX + MOVBQZX 8(CX), DI + MOVQ 16(CX), BX + MOVQ 48(CX), SI + MOVQ 24(CX), R8 + MOVQ 32(CX), R9 + MOVQ (CX), R10 + + // Main loop +main_loop: + XORL DX, DX + CMPQ BX, SI + SETGE DL + + // br0.fillFast32() + MOVQ 32(R10), R11 + MOVBQZX 40(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill0 + MOVQ 24(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ (R10), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 24(R10) + ORQ R14, R11 + + // exhausted += (br0.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL + +skip_fill0: + // val0 := br0.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br0.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v1 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // val2 := br0.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v2 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // val3 := br0.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v3 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (BX) + + // update the bitreader structure + MOVQ R11, 32(R10) + MOVB R12, 40(R10) + + // br1.fillFast32() + MOVQ 80(R10), R11 + MOVBQZX 88(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill1 + MOVQ 72(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ 48(R10), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 72(R10) + ORQ R14, R11 + + // exhausted += (br1.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL + +skip_fill1: + // val0 := br1.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br1.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v1 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // val2 := br1.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v2 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // val3 := br1.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v3 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (BX)(R8*1) + + // update the bitreader structure + MOVQ R11, 80(R10) + MOVB R12, 88(R10) + + // br2.fillFast32() + MOVQ 128(R10), R11 + MOVBQZX 136(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill2 + MOVQ 120(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ 96(R10), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 120(R10) + ORQ R14, R11 + + // exhausted += (br2.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL + +skip_fill2: + // val0 := br2.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br2.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v1 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // val2 := br2.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v2 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // val3 := br2.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v3 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (BX)(R8*2) + + // update the bitreader structure + MOVQ R11, 128(R10) + MOVB R12, 136(R10) + + // br3.fillFast32() + MOVQ 176(R10), R11 + MOVBQZX 184(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill3 + MOVQ 168(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ 144(R10), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 168(R10) + ORQ R14, R11 + + // exhausted += (br3.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL + +skip_fill3: + // val0 := br3.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br3.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v1 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // val2 := br3.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v2 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // val3 := br3.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v3 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + LEAQ (R8)(R8*2), CX + MOVL AX, (BX)(CX*1) + + // update the bitreader structure + MOVQ R11, 176(R10) + MOVB R12, 184(R10) + ADDQ $0x04, BX + TESTB DL, DL + JZ main_loop + MOVQ ctx+0(FP), AX + SUBQ 16(AX), BX + SHLQ $0x02, BX + MOVQ BX, 40(AX) + RET + +// func decompress1x_main_loop_amd64(ctx *decompress1xContext) +TEXT ·decompress1x_main_loop_amd64(SB), $0-8 + MOVQ ctx+0(FP), CX + MOVQ 16(CX), DX + MOVQ 24(CX), BX + CMPQ BX, $0x04 + JB error_max_decoded_size_exceeded + LEAQ (DX)(BX*1), BX + MOVQ (CX), SI + MOVQ (SI), R8 + MOVQ 24(SI), R9 + MOVQ 32(SI), R10 + MOVBQZX 40(SI), R11 + MOVQ 32(CX), SI + MOVBQZX 8(CX), DI + JMP loop_condition + +main_loop: + // Check if we have room for 4 bytes in the output buffer + LEAQ 4(DX), CX + CMPQ CX, BX + JGE error_max_decoded_size_exceeded + + // Decode 4 values + CMPQ R11, $0x20 + JL bitReader_fillFast_1_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), R12 + MOVQ R11, CX + SHLQ CL, R12 + ORQ R12, R10 + +bitReader_fillFast_1_end: + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + BSWAPL AX + CMPQ R11, $0x20 + JL bitReader_fillFast_2_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), R12 + MOVQ R11, CX + SHLQ CL, R12 + ORQ R12, R10 + +bitReader_fillFast_2_end: + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + BSWAPL AX + + // Store the decoded values + MOVL AX, (DX) + ADDQ $0x04, DX + +loop_condition: + CMPQ R9, $0x08 + JGE main_loop + + // Update ctx structure + MOVQ ctx+0(FP), AX + SUBQ 16(AX), DX + MOVQ DX, 40(AX) + MOVQ (AX), AX + MOVQ R9, 24(AX) + MOVQ R10, 32(AX) + MOVB R11, 40(AX) + RET + + // Report error +error_max_decoded_size_exceeded: + MOVQ ctx+0(FP), AX + MOVQ $-1, CX + MOVQ CX, 40(AX) + RET + +// func decompress1x_main_loop_bmi2(ctx *decompress1xContext) +// Requires: BMI2 +TEXT ·decompress1x_main_loop_bmi2(SB), $0-8 + MOVQ ctx+0(FP), CX + MOVQ 16(CX), DX + MOVQ 24(CX), BX + CMPQ BX, $0x04 + JB error_max_decoded_size_exceeded + LEAQ (DX)(BX*1), BX + MOVQ (CX), SI + MOVQ (SI), R8 + MOVQ 24(SI), R9 + MOVQ 32(SI), R10 + MOVBQZX 40(SI), R11 + MOVQ 32(CX), SI + MOVBQZX 8(CX), DI + JMP loop_condition + +main_loop: + // Check if we have room for 4 bytes in the output buffer + LEAQ 4(DX), CX + CMPQ CX, BX + JGE error_max_decoded_size_exceeded + + // Decode 4 values + CMPQ R11, $0x20 + JL bitReader_fillFast_1_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), CX + SHLXQ R11, CX, CX + ORQ CX, R10 + +bitReader_fillFast_1_end: + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + BSWAPL AX + CMPQ R11, $0x20 + JL bitReader_fillFast_2_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), CX + SHLXQ R11, CX, CX + ORQ CX, R10 + +bitReader_fillFast_2_end: + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + BSWAPL AX + + // Store the decoded values + MOVL AX, (DX) + ADDQ $0x04, DX + +loop_condition: + CMPQ R9, $0x08 + JGE main_loop + + // Update ctx structure + MOVQ ctx+0(FP), AX + SUBQ 16(AX), DX + MOVQ DX, 40(AX) + MOVQ (AX), AX + MOVQ R9, 24(AX) + MOVQ R10, 32(AX) + MOVB R11, 40(AX) + RET + + // Report error +error_max_decoded_size_exceeded: + MOVQ ctx+0(FP), AX + MOVQ $-1, CX + MOVQ CX, 40(AX) + RET diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go new file mode 100644 index 0000000..908c17d --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go @@ -0,0 +1,299 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +// This file contains a generic implementation of Decoder.Decompress4X. +package huff0 + +import ( + "errors" + "fmt" +) + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if len(src) < 6+(4*1) { + return nil, errors.New("input too small") + } + if use8BitTables && d.actualTableLog <= 8 { + return d.decompress4X8bit(dst, src) + } + + var br [4]bitReaderShifted + // Decode "jump table" + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + buf := d.buffer() + var off uint8 + var decoded int + + // Decode 2 values from each decoder/loop. + const bufoff = 256 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + const stream = 0 + const stream2 = 1 + br[stream].fillFast() + br[stream2].fillFast() + + val := br[stream].peekBitsFast(d.actualTableLog) + val2 := br[stream2].peekBitsFast(d.actualTableLog) + v := single[val&tlMask] + v2 := single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off] = uint8(v.entry >> 8) + buf[stream2][off] = uint8(v2.entry >> 8) + + val = br[stream].peekBitsFast(d.actualTableLog) + val2 = br[stream2].peekBitsFast(d.actualTableLog) + v = single[val&tlMask] + v2 = single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off+1] = uint8(v.entry >> 8) + buf[stream2][off+1] = uint8(v2.entry >> 8) + } + + { + const stream = 2 + const stream2 = 3 + br[stream].fillFast() + br[stream2].fillFast() + + val := br[stream].peekBitsFast(d.actualTableLog) + val2 := br[stream2].peekBitsFast(d.actualTableLog) + v := single[val&tlMask] + v2 := single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off] = uint8(v.entry >> 8) + buf[stream2][off] = uint8(v2.entry >> 8) + + val = br[stream].peekBitsFast(d.actualTableLog) + val2 = br[stream2].peekBitsFast(d.actualTableLog) + v = single[val&tlMask] + v2 = single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off+1] = uint8(v.entry >> 8) + buf[stream2][off+1] = uint8(v2.entry >> 8) + } + + off += 2 + + if off == 0 { + if bufoff > dstEvery { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 1") + } + // There must at least be 3 buffers left. + if len(out)-bufoff < dstEvery*3 { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 2") + } + //copy(out, buf[0][:]) + //copy(out[dstEvery:], buf[1][:]) + //copy(out[dstEvery*2:], buf[2][:]) + //copy(out[dstEvery*3:], buf[3][:]) + *(*[bufoff]byte)(out) = buf[0] + *(*[bufoff]byte)(out[dstEvery:]) = buf[1] + *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] + *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] + out = out[bufoff:] + decoded += bufoff * 4 + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + br.fill() + if offset >= endsAt { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + val := br.peekBitsFast(d.actualTableLog) + v := single[val&tlMask].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + d.bufs.Put(buf) + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// Decompress1X will decompress a 1X encoded stream. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if use8BitTables && d.actualTableLog <= 8 { + return d.decompress1X8Bit(dst, src) + } + var br bitReaderShifted + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + dt := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + bufs := d.buffer() + buf := &bufs[0] + var off uint8 + + for br.off >= 8 { + br.fillFast() + v := dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + // Refill + br.fillFast() + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + + if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 8, so uint8 is fine + bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead + for bitsLeft > 0 { + br.fill() + if false && br.bitsRead >= 32 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value = (br.value << 32) | uint64(low) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value = (br.value << 8) | uint64(br.in[br.off-1]) + br.bitsRead -= 8 + br.off-- + } + } + } + if len(dst) >= maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= nBits + dst = append(dst, uint8(v.entry>>8)) + } + d.bufs.Put(bufs) + return dst, br.close() +} diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go new file mode 100644 index 0000000..77ecd68 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/huff0.go @@ -0,0 +1,337 @@ +// Package huff0 provides fast huffman encoding as used in zstd. +// +// See README.md at https://github.com/klauspost/compress/tree/master/huff0 for details. +package huff0 + +import ( + "errors" + "fmt" + "math" + "math/bits" + "sync" + + "github.com/klauspost/compress/fse" +) + +const ( + maxSymbolValue = 255 + + // zstandard limits tablelog to 11, see: + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#huffman-tree-description + tableLogMax = 11 + tableLogDefault = 11 + minTablelog = 5 + huffNodesLen = 512 + + // BlockSizeMax is maximum input size for a single block uncompressed. + BlockSizeMax = 1<<18 - 1 +) + +var ( + // ErrIncompressible is returned when input is judged to be too hard to compress. + ErrIncompressible = errors.New("input is not compressible") + + // ErrUseRLE is returned from the compressor when the input is a single byte value repeated. + ErrUseRLE = errors.New("input is single value repeated") + + // ErrTooBig is return if input is too large for a single block. + ErrTooBig = errors.New("input too big") + + // ErrMaxDecodedSizeExceeded is return if input is too large for a single block. + ErrMaxDecodedSizeExceeded = errors.New("maximum output size exceeded") +) + +type ReusePolicy uint8 + +const ( + // ReusePolicyAllow will allow reuse if it produces smaller output. + ReusePolicyAllow ReusePolicy = iota + + // ReusePolicyPrefer will re-use aggressively if possible. + // This will not check if a new table will produce smaller output, + // except if the current table is impossible to use or + // compressed output is bigger than input. + ReusePolicyPrefer + + // ReusePolicyNone will disable re-use of tables. + // This is slightly faster than ReusePolicyAllow but may produce larger output. + ReusePolicyNone + + // ReusePolicyMust must allow reuse and produce smaller output. + ReusePolicyMust +) + +type Scratch struct { + count [maxSymbolValue + 1]uint32 + + // Per block parameters. + // These can be used to override compression parameters of the block. + // Do not touch, unless you know what you are doing. + + // Out is output buffer. + // If the scratch is re-used before the caller is done processing the output, + // set this field to nil. + // Otherwise the output buffer will be re-used for next Compression/Decompression step + // and allocation will be avoided. + Out []byte + + // OutTable will contain the table data only, if a new table has been generated. + // Slice of the returned data. + OutTable []byte + + // OutData will contain the compressed data. + // Slice of the returned data. + OutData []byte + + // MaxDecodedSize will set the maximum allowed output size. + // This value will automatically be set to BlockSizeMax if not set. + // Decoders will return ErrMaxDecodedSizeExceeded is this limit is exceeded. + MaxDecodedSize int + + srcLen int + + // MaxSymbolValue will override the maximum symbol value of the next block. + MaxSymbolValue uint8 + + // TableLog will attempt to override the tablelog for the next block. + // Must be <= 11 and >= 5. + TableLog uint8 + + // Reuse will specify the reuse policy + Reuse ReusePolicy + + // WantLogLess allows to specify a log 2 reduction that should at least be achieved, + // otherwise the block will be returned as incompressible. + // The reduction should then at least be (input size >> WantLogLess) + // If WantLogLess == 0 any improvement will do. + WantLogLess uint8 + + symbolLen uint16 // Length of active part of the symbol table. + maxCount int // count of the most probable symbol + clearCount bool // clear count + actualTableLog uint8 // Selected tablelog. + prevTableLog uint8 // Tablelog for previous table + prevTable cTable // Table used for previous compression. + cTable cTable // compression table + dt dTable // decompression table + nodes []nodeElt + tmpOut [4][]byte + fse *fse.Scratch + decPool sync.Pool // *[4][256]byte buffers. + huffWeight [maxSymbolValue + 1]byte +} + +// TransferCTable will transfer the previously used compression table. +func (s *Scratch) TransferCTable(src *Scratch) { + if cap(s.prevTable) < len(src.prevTable) { + s.prevTable = make(cTable, 0, maxSymbolValue+1) + } + s.prevTable = s.prevTable[:len(src.prevTable)] + copy(s.prevTable, src.prevTable) + s.prevTableLog = src.prevTableLog +} + +func (s *Scratch) prepare(in []byte) (*Scratch, error) { + if len(in) > BlockSizeMax { + return nil, ErrTooBig + } + if s == nil { + s = &Scratch{} + } + if s.MaxSymbolValue == 0 { + s.MaxSymbolValue = maxSymbolValue + } + if s.TableLog == 0 { + s.TableLog = tableLogDefault + } + if s.TableLog > tableLogMax || s.TableLog < minTablelog { + return nil, fmt.Errorf(" invalid tableLog %d (%d -> %d)", s.TableLog, minTablelog, tableLogMax) + } + if s.MaxDecodedSize <= 0 || s.MaxDecodedSize > BlockSizeMax { + s.MaxDecodedSize = BlockSizeMax + } + if s.clearCount && s.maxCount == 0 { + for i := range s.count { + s.count[i] = 0 + } + s.clearCount = false + } + if cap(s.Out) == 0 { + s.Out = make([]byte, 0, len(in)) + } + s.Out = s.Out[:0] + + s.OutTable = nil + s.OutData = nil + if cap(s.nodes) < huffNodesLen+1 { + s.nodes = make([]nodeElt, 0, huffNodesLen+1) + } + s.nodes = s.nodes[:0] + if s.fse == nil { + s.fse = &fse.Scratch{} + } + s.srcLen = len(in) + + return s, nil +} + +type cTable []cTableEntry + +func (c cTable) write(s *Scratch) error { + var ( + // precomputed conversion table + bitsToWeight [tableLogMax + 1]byte + huffLog = s.actualTableLog + // last weight is not saved. + maxSymbolValue = uint8(s.symbolLen - 1) + huffWeight = s.huffWeight[:256] + ) + const ( + maxFSETableLog = 6 + ) + // convert to weight + bitsToWeight[0] = 0 + for n := uint8(1); n < huffLog+1; n++ { + bitsToWeight[n] = huffLog + 1 - n + } + + // Acquire histogram for FSE. + hist := s.fse.Histogram() + hist = hist[:256] + for i := range hist[:16] { + hist[i] = 0 + } + for n := uint8(0); n < maxSymbolValue; n++ { + v := bitsToWeight[c[n].nBits] & 15 + huffWeight[n] = v + hist[v]++ + } + + // FSE compress if feasible. + if maxSymbolValue >= 2 { + huffMaxCnt := uint32(0) + huffMax := uint8(0) + for i, v := range hist[:16] { + if v == 0 { + continue + } + huffMax = byte(i) + if v > huffMaxCnt { + huffMaxCnt = v + } + } + s.fse.HistogramFinished(huffMax, int(huffMaxCnt)) + s.fse.TableLog = maxFSETableLog + b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse) + if err == nil && len(b) < int(s.symbolLen>>1) { + s.Out = append(s.Out, uint8(len(b))) + s.Out = append(s.Out, b...) + return nil + } + // Unable to compress (RLE/uncompressible) + } + // write raw values as 4-bits (max : 15) + if maxSymbolValue > (256 - 128) { + // should not happen : likely means source cannot be compressed + return ErrIncompressible + } + op := s.Out + // special case, pack weights 4 bits/weight. + op = append(op, 128|(maxSymbolValue-1)) + // be sure it doesn't cause msan issue in final combination + huffWeight[maxSymbolValue] = 0 + for n := uint16(0); n < uint16(maxSymbolValue); n += 2 { + op = append(op, (huffWeight[n]<<4)|huffWeight[n+1]) + } + s.Out = op + return nil +} + +func (c cTable) estTableSize(s *Scratch) (sz int, err error) { + var ( + // precomputed conversion table + bitsToWeight [tableLogMax + 1]byte + huffLog = s.actualTableLog + // last weight is not saved. + maxSymbolValue = uint8(s.symbolLen - 1) + huffWeight = s.huffWeight[:256] + ) + const ( + maxFSETableLog = 6 + ) + // convert to weight + bitsToWeight[0] = 0 + for n := uint8(1); n < huffLog+1; n++ { + bitsToWeight[n] = huffLog + 1 - n + } + + // Acquire histogram for FSE. + hist := s.fse.Histogram() + hist = hist[:256] + for i := range hist[:16] { + hist[i] = 0 + } + for n := uint8(0); n < maxSymbolValue; n++ { + v := bitsToWeight[c[n].nBits] & 15 + huffWeight[n] = v + hist[v]++ + } + + // FSE compress if feasible. + if maxSymbolValue >= 2 { + huffMaxCnt := uint32(0) + huffMax := uint8(0) + for i, v := range hist[:16] { + if v == 0 { + continue + } + huffMax = byte(i) + if v > huffMaxCnt { + huffMaxCnt = v + } + } + s.fse.HistogramFinished(huffMax, int(huffMaxCnt)) + s.fse.TableLog = maxFSETableLog + b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse) + if err == nil && len(b) < int(s.symbolLen>>1) { + sz += 1 + len(b) + return sz, nil + } + // Unable to compress (RLE/uncompressible) + } + // write raw values as 4-bits (max : 15) + if maxSymbolValue > (256 - 128) { + // should not happen : likely means source cannot be compressed + return 0, ErrIncompressible + } + // special case, pack weights 4 bits/weight. + sz += 1 + int(maxSymbolValue/2) + return sz, nil +} + +// estimateSize returns the estimated size in bytes of the input represented in the +// histogram supplied. +func (c cTable) estimateSize(hist []uint32) int { + nbBits := uint32(7) + for i, v := range c[:len(hist)] { + nbBits += uint32(v.nBits) * hist[i] + } + return int(nbBits >> 3) +} + +// minSize returns the minimum possible size considering the shannon limit. +func (s *Scratch) minSize(total int) int { + nbBits := float64(7) + fTotal := float64(total) + for _, v := range s.count[:s.symbolLen] { + n := float64(v) + if n > 0 { + nbBits += math.Log2(fTotal/n) * n + } + } + return int(nbBits) >> 3 +} + +func highBit32(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go new file mode 100644 index 0000000..3954c51 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go @@ -0,0 +1,34 @@ +// Package cpuinfo gives runtime info about the current CPU. +// +// This is a very limited module meant for use internally +// in this project. For more versatile solution check +// https://github.com/klauspost/cpuid. +package cpuinfo + +// HasBMI1 checks whether an x86 CPU supports the BMI1 extension. +func HasBMI1() bool { + return hasBMI1 +} + +// HasBMI2 checks whether an x86 CPU supports the BMI2 extension. +func HasBMI2() bool { + return hasBMI2 +} + +// DisableBMI2 will disable BMI2, for testing purposes. +// Call returned function to restore previous state. +func DisableBMI2() func() { + old := hasBMI2 + hasBMI2 = false + return func() { + hasBMI2 = old + } +} + +// HasBMI checks whether an x86 CPU supports both BMI1 and BMI2 extensions. +func HasBMI() bool { + return HasBMI1() && HasBMI2() +} + +var hasBMI1 bool +var hasBMI2 bool diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go new file mode 100644 index 0000000..e802579 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go @@ -0,0 +1,11 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package cpuinfo + +// go:noescape +func x86extensions() (bmi1, bmi2 bool) + +func init() { + hasBMI1, hasBMI2 = x86extensions() +} diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s new file mode 100644 index 0000000..4465fbe --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s @@ -0,0 +1,36 @@ +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" +#include "funcdata.h" +#include "go_asm.h" + +TEXT ·x86extensions(SB), NOSPLIT, $0 + // 1. determine max EAX value + XORQ AX, AX + CPUID + + CMPQ AX, $7 + JB unsupported + + // 2. EAX = 7, ECX = 0 --- see Table 3-8 "Information Returned by CPUID Instruction" + MOVQ $7, AX + MOVQ $0, CX + CPUID + + BTQ $3, BX // bit 3 = BMI1 + SETCS AL + + BTQ $8, BX // bit 8 = BMI2 + SETCS AH + + MOVB AL, bmi1+0(FP) + MOVB AH, bmi2+1(FP) + RET + +unsupported: + XORQ AX, AX + MOVB AL, bmi1+0(FP) + MOVB AL, bmi2+1(FP) + RET diff --git a/vendor/github.com/klauspost/compress/internal/snapref/LICENSE b/vendor/github.com/klauspost/compress/internal/snapref/LICENSE new file mode 100644 index 0000000..6050c10 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/klauspost/compress/internal/snapref/decode.go b/vendor/github.com/klauspost/compress/internal/snapref/decode.go new file mode 100644 index 0000000..40796a4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/decode.go @@ -0,0 +1,264 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snapref + +import ( + "encoding/binary" + "errors" + "io" +) + +var ( + // ErrCorrupt reports that the input is invalid. + ErrCorrupt = errors.New("snappy: corrupt input") + // ErrTooLarge reports that the uncompressed length is too large. + ErrTooLarge = errors.New("snappy: decoded block is too large") + // ErrUnsupported reports that the input isn't supported. + ErrUnsupported = errors.New("snappy: unsupported input") + + errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") +) + +// DecodedLen returns the length of the decoded block. +func DecodedLen(src []byte) (int, error) { + v, _, err := decodedLen(src) + return v, err +} + +// decodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func decodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrTooLarge + } + return int(v), n, nil +} + +const ( + decodeErrCodeCorrupt = 1 + decodeErrCodeUnsupportedLiteralLength = 2 +) + +// Decode returns the decoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire decoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// Decode handles the Snappy block format, not the Snappy stream format. +func Decode(dst, src []byte) ([]byte, error) { + dLen, s, err := decodedLen(src) + if err != nil { + return nil, err + } + if dLen <= len(dst) { + dst = dst[:dLen] + } else { + dst = make([]byte, dLen) + } + switch decode(dst, src[s:]) { + case 0: + return dst, nil + case decodeErrCodeUnsupportedLiteralLength: + return nil, errUnsupportedLiteralLength + } + return nil, ErrCorrupt +} + +// NewReader returns a new Reader that decompresses from r, using the framing +// format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +func NewReader(r io.Reader) *Reader { + return &Reader{ + r: r, + decoded: make([]byte, maxBlockSize), + buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), + } +} + +// Reader is an io.Reader that can read Snappy-compressed bytes. +// +// Reader handles the Snappy stream format, not the Snappy block format. +type Reader struct { + r io.Reader + err error + decoded []byte + buf []byte + // decoded[i:j] contains decoded bytes that have not yet been passed on. + i, j int + readHeader bool +} + +// Reset discards any buffered data, resets all state, and switches the Snappy +// reader to read from r. This permits reusing a Reader rather than allocating +// a new one. +func (r *Reader) Reset(reader io.Reader) { + r.r = reader + r.err = nil + r.i = 0 + r.j = 0 + r.readHeader = false +} + +func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrCorrupt + } + return false + } + return true +} + +func (r *Reader) fill() error { + for r.i >= r.j { + if !r.readFull(r.buf[:4], true) { + return r.err + } + chunkType := r.buf[0] + if !r.readHeader { + if chunkType != chunkTypeStreamIdentifier { + r.err = ErrCorrupt + return r.err + } + r.readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + if chunkLen > len(r.buf) { + r.err = ErrUnsupported + return r.err + } + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[checksumSize:] + + n, err := DecodedLen(buf) + if err != nil { + r.err = err + return r.err + } + if n > len(r.decoded) { + r.err = ErrCorrupt + return r.err + } + if _, err := Decode(r.decoded, buf); err != nil { + r.err = err + return r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeUncompressedData: + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return r.err + } + buf := r.buf[:checksumSize] + if !r.readFull(buf, false) { + return r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - checksumSize + if n > len(r.decoded) { + r.err = ErrCorrupt + return r.err + } + if !r.readFull(r.decoded[:n], false) { + return r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeStreamIdentifier: + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(magicBody) { + r.err = ErrCorrupt + return r.err + } + if !r.readFull(r.buf[:len(magicBody)], false) { + return r.err + } + for i := 0; i < len(magicBody); i++ { + if r.buf[i] != magicBody[i] { + r.err = ErrCorrupt + return r.err + } + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + r.err = ErrUnsupported + return r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.readFull(r.buf[:chunkLen], false) { + return r.err + } + } + + return nil +} + +// Read satisfies the io.Reader interface. +func (r *Reader) Read(p []byte) (int, error) { + if r.err != nil { + return 0, r.err + } + + if err := r.fill(); err != nil { + return 0, err + } + + n := copy(p, r.decoded[r.i:r.j]) + r.i += n + return n, nil +} + +// ReadByte satisfies the io.ByteReader interface. +func (r *Reader) ReadByte() (byte, error) { + if r.err != nil { + return 0, r.err + } + + if err := r.fill(); err != nil { + return 0, err + } + + c := r.decoded[r.i] + r.i++ + return c, nil +} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go new file mode 100644 index 0000000..77395a6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go @@ -0,0 +1,113 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snapref + +// decode writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read, and that len(dst) +// equals that length. +// +// It returns 0 on success or a decodeErrCodeXxx error code on failure. +func decode(dst, src []byte) int { + var d, s, offset, length int + for s < len(src) { + switch src[s] & 0x03 { + case tagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + length = int(x) + 1 + if length <= 0 { + return decodeErrCodeUnsupportedLiteralLength + } + if length > len(dst)-d || length > len(src)-s { + return decodeErrCodeCorrupt + } + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + + case tagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + + case tagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + } + + if offset <= 0 || d < offset || length > len(dst)-d { + return decodeErrCodeCorrupt + } + // Copy from an earlier sub-slice of dst to a later sub-slice. + // If no overlap, use the built-in copy: + if offset >= length { + copy(dst[d:d+length], dst[d-offset:]) + d += length + continue + } + + // Unlike the built-in copy function, this byte-by-byte copy always runs + // forwards, even if the slices overlap. Conceptually, this is: + // + // d += forwardCopy(dst[d:d+length], dst[d-offset:]) + // + // We align the slices into a and b and show the compiler they are the same size. + // This allows the loop to run without bounds checks. + a := dst[d : d+length] + b := dst[d-offset:] + b = b[:len(a)] + for i := range a { + a[i] = b[i] + } + d += length + } + if d != len(dst) { + return decodeErrCodeCorrupt + } + return 0 +} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode.go b/vendor/github.com/klauspost/compress/internal/snapref/encode.go new file mode 100644 index 0000000..13c6040 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/encode.go @@ -0,0 +1,289 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snapref + +import ( + "encoding/binary" + "errors" + "io" +) + +// Encode returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// Encode handles the Snappy block format, not the Snappy stream format. +func Encode(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + for len(src) > 0 { + p := src + src = nil + if len(p) > maxBlockSize { + p, src = p[:maxBlockSize], p[maxBlockSize:] + } + if len(p) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], p) + } else { + d += encodeBlock(dst[d:], p) + } + } + return dst[:d] +} + +// inputMargin is the minimum number of extra input bytes to keep, inside +// encodeBlock's inner loop. On some architectures, this margin lets us +// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) +// literals can be implemented as a single load to and store from a 16-byte +// register. That literal's actual length can be as short as 1 byte, so this +// can copy up to 15 bytes too much, but that's OK as subsequent iterations of +// the encoding loop will fix up the copy overrun, and this inputMargin ensures +// that we don't overrun the dst and src buffers. +const inputMargin = 16 - 1 + +// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that +// could be encoded with a copy tag. This is the minimum with respect to the +// algorithm used by encodeBlock, not a minimum enforced by the file format. +// +// The encoded output must start with at least a 1 byte literal, as there are +// no previous bytes to copy. A minimal (1 byte) copy after that, generated +// from an emitCopy call in encodeBlock's main loop, would require at least +// another inputMargin bytes, for the reason above: we want any emitLiteral +// calls inside encodeBlock's main loop to use the fast path if possible, which +// requires being able to overrun by inputMargin bytes. Thus, +// minNonLiteralBlockSize equals 1 + 1 + inputMargin. +// +// The C++ code doesn't use this exact threshold, but it could, as discussed at +// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion +// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an +// optimization. It should not affect the encoded form. This is tested by +// TestSameEncodingAsCppShortCopies. +const minNonLiteralBlockSize = 1 + 1 + inputMargin + +// MaxEncodedLen returns the maximum length of a snappy block, given its +// uncompressed length. +// +// It will return a negative value if srcLen is too large to encode. +func MaxEncodedLen(srcLen int) int { + n := uint64(srcLen) + if n > 0xffffffff { + return -1 + } + // Compressed data can be defined as: + // compressed := item* literal* + // item := literal* copy + // + // The trailing literal sequence has a space blowup of at most 62/60 + // since a literal of length 60 needs one tag byte + one extra byte + // for length information. + // + // Item blowup is trickier to measure. Suppose the "copy" op copies + // 4 bytes of data. Because of a special check in the encoding code, + // we produce a 4-byte copy only if the offset is < 65536. Therefore + // the copy op takes 3 bytes to encode, and this type of item leads + // to at most the 62/60 blowup for representing literals. + // + // Suppose the "copy" op copies 5 bytes of data. If the offset is big + // enough, it will take 5 bytes to encode the copy op. Therefore the + // worst case here is a one-byte literal followed by a five-byte copy. + // That is, 6 bytes of input turn into 7 bytes of "compressed" data. + // + // This last factor dominates the blowup, so the final estimate is: + n = 32 + n + n/6 + if n > 0xffffffff { + return -1 + } + return int(n) +} + +var errClosed = errors.New("snappy: Writer is closed") + +// NewWriter returns a new Writer that compresses to w. +// +// The Writer returned does not buffer writes. There is no need to Flush or +// Close such a Writer. +// +// Deprecated: the Writer returned is not suitable for many small writes, only +// for few large writes. Use NewBufferedWriter instead, which is efficient +// regardless of the frequency and shape of the writes, and remember to Close +// that Writer when done. +func NewWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + obuf: make([]byte, obufLen), + } +} + +// NewBufferedWriter returns a new Writer that compresses to w, using the +// framing format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +// +// The Writer returned buffers writes. Users must call Close to guarantee all +// data has been forwarded to the underlying io.Writer. They may also call +// Flush zero or more times before calling Close. +func NewBufferedWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + ibuf: make([]byte, 0, maxBlockSize), + obuf: make([]byte, obufLen), + } +} + +// Writer is an io.Writer that can write Snappy-compressed bytes. +// +// Writer handles the Snappy stream format, not the Snappy block format. +type Writer struct { + w io.Writer + err error + + // ibuf is a buffer for the incoming (uncompressed) bytes. + // + // Its use is optional. For backwards compatibility, Writers created by the + // NewWriter function have ibuf == nil, do not buffer incoming bytes, and + // therefore do not need to be Flush'ed or Close'd. + ibuf []byte + + // obuf is a buffer for the outgoing (compressed) bytes. + obuf []byte + + // wroteStreamHeader is whether we have written the stream header. + wroteStreamHeader bool +} + +// Reset discards the writer's state and switches the Snappy writer to write to +// w. This permits reusing a Writer rather than allocating a new one. +func (w *Writer) Reset(writer io.Writer) { + w.w = writer + w.err = nil + if w.ibuf != nil { + w.ibuf = w.ibuf[:0] + } + w.wroteStreamHeader = false +} + +// Write satisfies the io.Writer interface. +func (w *Writer) Write(p []byte) (nRet int, errRet error) { + if w.ibuf == nil { + // Do not buffer incoming bytes. This does not perform or compress well + // if the caller of Writer.Write writes many small slices. This + // behavior is therefore deprecated, but still supported for backwards + // compatibility with code that doesn't explicitly Flush or Close. + return w.write(p) + } + + // The remainder of this method is based on bufio.Writer.Write from the + // standard library. + + for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { + var n int + if len(w.ibuf) == 0 { + // Large write, empty buffer. + // Write directly from p to avoid copy. + n, _ = w.write(p) + } else { + n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + w.Flush() + } + nRet += n + p = p[n:] + } + if w.err != nil { + return nRet, w.err + } + n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + nRet += n + return nRet, nil +} + +func (w *Writer) write(p []byte) (nRet int, errRet error) { + if w.err != nil { + return 0, w.err + } + for len(p) > 0 { + obufStart := len(magicChunk) + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + copy(w.obuf, magicChunk) + obufStart = 0 + } + + var uncompressed []byte + if len(p) > maxBlockSize { + uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] + } else { + uncompressed, p = p, nil + } + checksum := crc(uncompressed) + + // Compress the buffer, discarding the result if the improvement + // isn't at least 12.5%. + compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) + chunkType := uint8(chunkTypeCompressedData) + chunkLen := 4 + len(compressed) + obufEnd := obufHeaderLen + len(compressed) + if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { + chunkType = chunkTypeUncompressedData + chunkLen = 4 + len(uncompressed) + obufEnd = obufHeaderLen + } + + // Fill in the per-chunk header that comes before the body. + w.obuf[len(magicChunk)+0] = chunkType + w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) + w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) + w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) + w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) + w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) + w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) + w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) + + if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { + w.err = err + return nRet, err + } + if chunkType == chunkTypeUncompressedData { + if _, err := w.w.Write(uncompressed); err != nil { + w.err = err + return nRet, err + } + } + nRet += len(uncompressed) + } + return nRet, nil +} + +// Flush flushes the Writer to its underlying io.Writer. +func (w *Writer) Flush() error { + if w.err != nil { + return w.err + } + if len(w.ibuf) == 0 { + return nil + } + w.write(w.ibuf) + w.ibuf = w.ibuf[:0] + return w.err +} + +// Close calls Flush and then closes the Writer. +func (w *Writer) Close() error { + w.Flush() + ret := w.err + if w.err == nil { + w.err = errClosed + } + return ret +} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go new file mode 100644 index 0000000..2aa6a95 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go @@ -0,0 +1,250 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snapref + +func load32(b []byte, i int) uint32 { + b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load64(b []byte, i int) uint64 { + b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +// emitLiteral writes a literal chunk and returns the number of bytes written. +// +// It assumes that: +// +// dst is long enough to hold the encoded bytes +// 1 <= len(lit) && len(lit) <= 65536 +func emitLiteral(dst, lit []byte) int { + i, n := 0, uint(len(lit)-1) + switch { + case n < 60: + dst[0] = uint8(n)<<2 | tagLiteral + i = 1 + case n < 1<<8: + dst[0] = 60<<2 | tagLiteral + dst[1] = uint8(n) + i = 2 + default: + dst[0] = 61<<2 | tagLiteral + dst[1] = uint8(n) + dst[2] = uint8(n >> 8) + i = 3 + } + return i + copy(dst[i:], lit) +} + +// emitCopy writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= 65535 +// 4 <= length && length <= 65535 +func emitCopy(dst []byte, offset, length int) int { + i := 0 + // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The + // threshold for this loop is a little higher (at 68 = 64 + 4), and the + // length emitted down below is is a little lower (at 60 = 64 - 4), because + // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed + // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as + // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as + // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a + // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an + // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1. + for length >= 68 { + // Emit a length 64 copy, encoded as 3 bytes. + dst[i+0] = 63<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 64 + } + if length > 64 { + // Emit a length 60 copy, encoded as 3 bytes. + dst[i+0] = 59<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 60 + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[i+0] = uint8(length-1)<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + return i + 3 + } + // Emit the remaining copy, encoded as 2 bytes. + dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + dst[i+1] = uint8(offset) + return i + 2 +} + +func hash(u, shift uint32) uint32 { + return (u * 0x1e35a7bd) >> shift +} + +// EncodeBlockInto exposes encodeBlock but checks dst size. +func EncodeBlockInto(dst, src []byte) (d int) { + if MaxEncodedLen(len(src)) > len(dst) { + return 0 + } + + // encodeBlock breaks on too big blocks, so split. + for len(src) > 0 { + p := src + src = nil + if len(p) > maxBlockSize { + p, src = p[:maxBlockSize], p[maxBlockSize:] + } + if len(p) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], p) + } else { + d += encodeBlock(dst[d:], p) + } + } + return d +} + +// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlock(dst, src []byte) (d int) { + // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. + // The table element type is uint16, as s < sLimit and sLimit < len(src) + // and len(src) <= maxBlockSize and maxBlockSize == 65536. + const ( + maxTableSize = 1 << 14 + // tableMask is redundant, but helps the compiler eliminate bounds + // checks. + tableMask = maxTableSize - 1 + ) + shift := uint32(32 - 8) + for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + shift-- + } + // In Go, all array elements are zero-initialized, so there is no advantage + // to a smaller tableSize per se. However, it matches the C++ algorithm, + // and in the asm versions of this code, we can get away with zeroing only + // the first tableSize elements. + var table [maxTableSize]uint16 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + nextHash := hash(load32(src, s), shift) + + for { + // Copied from the C++ snappy implementation: + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match + // is found, immediately go back to looking at every byte. This is a + // small loss (~5% performance, ~0.1% density) for compressible data + // due to more bookkeeping, but for non-compressible data (such as + // JPEG) it's a huge win since the compressor quickly "realizes" the + // data is incompressible and doesn't bother looking for matches + // everywhere. + // + // The "skip" variable keeps track of how many bytes there are since + // the last match; dividing it by 32 (ie. right-shifting by five) gives + // the number of bytes to move ahead for each iteration. + skip := 32 + + nextS := s + candidate := 0 + for { + s = nextS + bytesBetweenHashLookups := skip >> 5 + nextS = s + bytesBetweenHashLookups + skip += bytesBetweenHashLookups + if nextS > sLimit { + goto emitRemainder + } + candidate = int(table[nextHash&tableMask]) + table[nextHash&tableMask] = uint16(s) + nextHash = hash(load32(src, nextS), shift) + if load32(src, s) == load32(src, candidate) { + break + } + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + d += emitLiteral(dst[d:], src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + + // Extend the 4-byte match as long as possible. + // + // This is an inlined version of: + // s = extendMatch(src, candidate+4, s+4) + s += 4 + for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { + } + + d += emitCopy(dst[d:], base-candidate, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load64(src, s-1) + prevHash := hash(uint32(x>>0), shift) + table[prevHash&tableMask] = uint16(s - 1) + currHash := hash(uint32(x>>8), shift) + candidate = int(table[currHash&tableMask]) + table[currHash&tableMask] = uint16(s) + if uint32(x>>8) != load32(src, candidate) { + nextHash = hash(uint32(x>>16), shift) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/snappy.go b/vendor/github.com/klauspost/compress/internal/snapref/snappy.go new file mode 100644 index 0000000..34d01f4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/snappy.go @@ -0,0 +1,98 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package snapref implements the Snappy compression format. It aims for very +// high speeds and reasonable compression. +// +// There are actually two Snappy formats: block and stream. They are related, +// but different: trying to decompress block-compressed data as a Snappy stream +// will fail, and vice versa. The block format is the Decode and Encode +// functions and the stream format is the Reader and Writer types. +// +// The block format, the more common case, is used when the complete size (the +// number of bytes) of the original data is known upfront, at the time +// compression starts. The stream format, also known as the framing format, is +// for when that isn't always true. +// +// The canonical, C++ implementation is at https://github.com/google/snappy and +// it only implements the block format. +package snapref + +import ( + "hash/crc32" +) + +/* +Each encoded block begins with the varint-encoded length of the decoded data, +followed by a sequence of chunks. Chunks begin and end on byte boundaries. The +first byte of each chunk is broken into its 2 least and 6 most significant bits +called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. +Zero means a literal tag. All other values mean a copy tag. + +For literal tags: + - If m < 60, the next 1 + m bytes are literal bytes. + - Otherwise, let n be the little-endian unsigned integer denoted by the next + m - 59 bytes. The next 1 + n bytes after that are literal bytes. + +For copy tags, length bytes are copied from offset bytes ago, in the style of +Lempel-Ziv compression algorithms. In particular: + - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). + The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 + of the offset. The next byte is bits 0-7 of the offset. + - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). + The length is 1 + m. The offset is the little-endian unsigned integer + denoted by the next 2 bytes. + - For l == 3, this tag is a legacy format that is no longer issued by most + encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in + [1, 65). The length is 1 + m. The offset is the little-endian unsigned + integer denoted by the next 4 bytes. +*/ +const ( + tagLiteral = 0x00 + tagCopy1 = 0x01 + tagCopy2 = 0x02 + tagCopy4 = 0x03 +) + +const ( + checksumSize = 4 + chunkHeaderSize = 4 + magicChunk = "\xff\x06\x00\x00" + magicBody + magicBody = "sNaPpY" + + // maxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + maxBlockSize = 65536 + + // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is + // hard coded to be a const instead of a variable, so that obufLen can also + // be a const. Their equivalence is confirmed by + // TestMaxEncodedLenOfMaxBlockSize. + maxEncodedLenOfMaxBlockSize = 76490 + + obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize + obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func crc(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return uint32(c>>15|c<<17) + 0xa282ead8 +} diff --git a/vendor/github.com/klauspost/compress/s2sx.mod b/vendor/github.com/klauspost/compress/s2sx.mod new file mode 100644 index 0000000..5a4412f --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2sx.mod @@ -0,0 +1,4 @@ +module github.com/klauspost/compress + +go 1.19 + diff --git a/vendor/github.com/klauspost/compress/s2sx.sum b/vendor/github.com/klauspost/compress/s2sx.sum new file mode 100644 index 0000000..e69de29 diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md new file mode 100644 index 0000000..92e2347 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/README.md @@ -0,0 +1,441 @@ +# zstd + +[Zstandard](https://facebook.github.io/zstd/) is a real-time compression algorithm, providing high compression ratios. +It offers a very wide range of compression / speed trade-off, while being backed by a very fast decoder. +A high performance compression algorithm is implemented. For now focused on speed. + +This package provides [compression](#Compressor) to and [decompression](#Decompressor) of Zstandard content. + +This package is pure Go and without use of "unsafe". + +The `zstd` package is provided as open source software using a Go standard license. + +Currently the package is heavily optimized for 64 bit processors and will be significantly slower on 32 bit processors. + +For seekable zstd streams, see [this excellent package](https://github.com/SaveTheRbtz/zstd-seekable-format-go). + +## Installation + +Install using `go get -u github.com/klauspost/compress`. The package is located in `github.com/klauspost/compress/zstd`. + +[![Go Reference](https://pkg.go.dev/badge/github.com/klauspost/compress/zstd.svg)](https://pkg.go.dev/github.com/klauspost/compress/zstd) + +## Compressor + +### Status: + +STABLE - there may always be subtle bugs, a wide variety of content has been tested and the library is actively +used by several projects. This library is being [fuzz-tested](https://github.com/klauspost/compress-fuzz) for all updates. + +There may still be specific combinations of data types/size/settings that could lead to edge cases, +so as always, testing is recommended. + +For now, a high speed (fastest) and medium-fast (default) compressor has been implemented. + +* The "Fastest" compression ratio is roughly equivalent to zstd level 1. +* The "Default" compression ratio is roughly equivalent to zstd level 3 (default). +* The "Better" compression ratio is roughly equivalent to zstd level 7. +* The "Best" compression ratio is roughly equivalent to zstd level 11. + +In terms of speed, it is typically 2x as fast as the stdlib deflate/gzip in its fastest mode. +The compression ratio compared to stdlib is around level 3, but usually 3x as fast. + + +### Usage + +An Encoder can be used for either compressing a stream via the +`io.WriteCloser` interface supported by the Encoder or as multiple independent +tasks via the `EncodeAll` function. +Smaller encodes are encouraged to use the EncodeAll function. +Use `NewWriter` to create a new instance that can be used for both. + +To create a writer with default options, do like this: + +```Go +// Compress input to output. +func Compress(in io.Reader, out io.Writer) error { + enc, err := zstd.NewWriter(out) + if err != nil { + return err + } + _, err = io.Copy(enc, in) + if err != nil { + enc.Close() + return err + } + return enc.Close() +} +``` + +Now you can encode by writing data to `enc`. The output will be finished writing when `Close()` is called. +Even if your encode fails, you should still call `Close()` to release any resources that may be held up. + +The above is fine for big encodes. However, whenever possible try to *reuse* the writer. + +To reuse the encoder, you can use the `Reset(io.Writer)` function to change to another output. +This will allow the encoder to reuse all resources and avoid wasteful allocations. + +Currently stream encoding has 'light' concurrency, meaning up to 2 goroutines can be working on part +of a stream. This is independent of the `WithEncoderConcurrency(n)`, but that is likely to change +in the future. So if you want to limit concurrency for future updates, specify the concurrency +you would like. + +If you would like stream encoding to be done without spawning async goroutines, use `WithEncoderConcurrency(1)` +which will compress input as each block is completed, blocking on writes until each has completed. + +You can specify your desired compression level using `WithEncoderLevel()` option. Currently only pre-defined +compression settings can be specified. + +#### Future Compatibility Guarantees + +This will be an evolving project. When using this package it is important to note that both the compression efficiency and speed may change. + +The goal will be to keep the default efficiency at the default zstd (level 3). +However the encoding should never be assumed to remain the same, +and you should not use hashes of compressed output for similarity checks. + +The Encoder can be assumed to produce the same output from the exact same code version. +However, the may be modes in the future that break this, +although they will not be enabled without an explicit option. + +This encoder is not designed to (and will probably never) output the exact same bitstream as the reference encoder. + +Also note, that the cgo decompressor currently does not [report all errors on invalid input](https://github.com/DataDog/zstd/issues/59), +[omits error checks](https://github.com/DataDog/zstd/issues/61), [ignores checksums](https://github.com/DataDog/zstd/issues/43) +and seems to ignore concatenated streams, even though [it is part of the spec](https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frames). + +#### Blocks + +For compressing small blocks, the returned encoder has a function called `EncodeAll(src, dst []byte) []byte`. + +`EncodeAll` will encode all input in src and append it to dst. +This function can be called concurrently. +Each call will only run on a same goroutine as the caller. + +Encoded blocks can be concatenated and the result will be the combined input stream. +Data compressed with EncodeAll can be decoded with the Decoder, using either a stream or `DecodeAll`. + +Especially when encoding blocks you should take special care to reuse the encoder. +This will effectively make it run without allocations after a warmup period. +To make it run completely without allocations, supply a destination buffer with space for all content. + +```Go +import "github.com/klauspost/compress/zstd" + +// Create a writer that caches compressors. +// For this operation type we supply a nil Reader. +var encoder, _ = zstd.NewWriter(nil) + +// Compress a buffer. +// If you have a destination buffer, the allocation in the call can also be eliminated. +func Compress(src []byte) []byte { + return encoder.EncodeAll(src, make([]byte, 0, len(src))) +} +``` + +You can control the maximum number of concurrent encodes using the `WithEncoderConcurrency(n)` +option when creating the writer. + +Using the Encoder for both a stream and individual blocks concurrently is safe. + +### Performance + +I have collected some speed examples to compare speed and compression against other compressors. + +* `file` is the input file. +* `out` is the compressor used. `zskp` is this package. `zstd` is the Datadog cgo library. `gzstd/gzkp` is gzip standard and this library. +* `level` is the compression level used. For `zskp` level 1 is "fastest", level 2 is "default"; 3 is "better", 4 is "best". +* `insize`/`outsize` is the input/output size. +* `millis` is the number of milliseconds used for compression. +* `mb/s` is megabytes (2^20 bytes) per second. + +``` +Silesia Corpus: +http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip + +This package: +file out level insize outsize millis mb/s +silesia.tar zskp 1 211947520 73821326 634 318.47 +silesia.tar zskp 2 211947520 67655404 1508 133.96 +silesia.tar zskp 3 211947520 64746933 3000 67.37 +silesia.tar zskp 4 211947520 60073508 16926 11.94 + +cgo zstd: +silesia.tar zstd 1 211947520 73605392 543 371.56 +silesia.tar zstd 3 211947520 66793289 864 233.68 +silesia.tar zstd 6 211947520 62916450 1913 105.66 +silesia.tar zstd 9 211947520 60212393 5063 39.92 + +gzip, stdlib/this package: +silesia.tar gzstd 1 211947520 80007735 1498 134.87 +silesia.tar gzkp 1 211947520 80088272 1009 200.31 + +GOB stream of binary data. Highly compressible. +https://files.klauspost.com/compress/gob-stream.7z + +file out level insize outsize millis mb/s +gob-stream zskp 1 1911399616 233948096 3230 564.34 +gob-stream zskp 2 1911399616 203997694 4997 364.73 +gob-stream zskp 3 1911399616 173526523 13435 135.68 +gob-stream zskp 4 1911399616 162195235 47559 38.33 + +gob-stream zstd 1 1911399616 249810424 2637 691.26 +gob-stream zstd 3 1911399616 208192146 3490 522.31 +gob-stream zstd 6 1911399616 193632038 6687 272.56 +gob-stream zstd 9 1911399616 177620386 16175 112.70 + +gob-stream gzstd 1 1911399616 357382013 9046 201.49 +gob-stream gzkp 1 1911399616 359136669 4885 373.08 + +The test data for the Large Text Compression Benchmark is the first +10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. +http://mattmahoney.net/dc/textdata.html + +file out level insize outsize millis mb/s +enwik9 zskp 1 1000000000 343833605 3687 258.64 +enwik9 zskp 2 1000000000 317001237 7672 124.29 +enwik9 zskp 3 1000000000 291915823 15923 59.89 +enwik9 zskp 4 1000000000 261710291 77697 12.27 + +enwik9 zstd 1 1000000000 358072021 3110 306.65 +enwik9 zstd 3 1000000000 313734672 4784 199.35 +enwik9 zstd 6 1000000000 295138875 10290 92.68 +enwik9 zstd 9 1000000000 278348700 28549 33.40 + +enwik9 gzstd 1 1000000000 382578136 8608 110.78 +enwik9 gzkp 1 1000000000 382781160 5628 169.45 + +Highly compressible JSON file. +https://files.klauspost.com/compress/github-june-2days-2019.json.zst + +file out level insize outsize millis mb/s +github-june-2days-2019.json zskp 1 6273951764 697439532 9789 611.17 +github-june-2days-2019.json zskp 2 6273951764 610876538 18553 322.49 +github-june-2days-2019.json zskp 3 6273951764 517662858 44186 135.41 +github-june-2days-2019.json zskp 4 6273951764 464617114 165373 36.18 + +github-june-2days-2019.json zstd 1 6273951764 766284037 8450 708.00 +github-june-2days-2019.json zstd 3 6273951764 661889476 10927 547.57 +github-june-2days-2019.json zstd 6 6273951764 642756859 22996 260.18 +github-june-2days-2019.json zstd 9 6273951764 601974523 52413 114.16 + +github-june-2days-2019.json gzstd 1 6273951764 1164397768 26793 223.32 +github-june-2days-2019.json gzkp 1 6273951764 1120631856 17693 338.16 + +VM Image, Linux mint with a few installed applications: +https://files.klauspost.com/compress/rawstudio-mint14.7z + +file out level insize outsize millis mb/s +rawstudio-mint14.tar zskp 1 8558382592 3718400221 18206 448.29 +rawstudio-mint14.tar zskp 2 8558382592 3326118337 37074 220.15 +rawstudio-mint14.tar zskp 3 8558382592 3163842361 87306 93.49 +rawstudio-mint14.tar zskp 4 8558382592 2970480650 783862 10.41 + +rawstudio-mint14.tar zstd 1 8558382592 3609250104 17136 476.27 +rawstudio-mint14.tar zstd 3 8558382592 3341679997 29262 278.92 +rawstudio-mint14.tar zstd 6 8558382592 3235846406 77904 104.77 +rawstudio-mint14.tar zstd 9 8558382592 3160778861 140946 57.91 + +rawstudio-mint14.tar gzstd 1 8558382592 3926234992 51345 158.96 +rawstudio-mint14.tar gzkp 1 8558382592 3960117298 36722 222.26 + +CSV data: +https://files.klauspost.com/compress/nyc-taxi-data-10M.csv.zst + +file out level insize outsize millis mb/s +nyc-taxi-data-10M.csv zskp 1 3325605752 641319332 9462 335.17 +nyc-taxi-data-10M.csv zskp 2 3325605752 588976126 17570 180.50 +nyc-taxi-data-10M.csv zskp 3 3325605752 529329260 32432 97.79 +nyc-taxi-data-10M.csv zskp 4 3325605752 474949772 138025 22.98 + +nyc-taxi-data-10M.csv zstd 1 3325605752 687399637 8233 385.18 +nyc-taxi-data-10M.csv zstd 3 3325605752 598514411 10065 315.07 +nyc-taxi-data-10M.csv zstd 6 3325605752 570522953 20038 158.27 +nyc-taxi-data-10M.csv zstd 9 3325605752 517554797 64565 49.12 + +nyc-taxi-data-10M.csv gzstd 1 3325605752 928654908 21270 149.11 +nyc-taxi-data-10M.csv gzkp 1 3325605752 922273214 13929 227.68 +``` + +## Decompressor + +Status: STABLE - there may still be subtle bugs, but a wide variety of content has been tested. + +This library is being continuously [fuzz-tested](https://github.com/klauspost/compress-fuzz), +kindly supplied by [fuzzit.dev](https://fuzzit.dev/). +The main purpose of the fuzz testing is to ensure that it is not possible to crash the decoder, +or run it past its limits with ANY input provided. + +### Usage + +The package has been designed for two main usages, big streams of data and smaller in-memory buffers. +There are two main usages of the package for these. Both of them are accessed by creating a `Decoder`. + +For streaming use a simple setup could look like this: + +```Go +import "github.com/klauspost/compress/zstd" + +func Decompress(in io.Reader, out io.Writer) error { + d, err := zstd.NewReader(in) + if err != nil { + return err + } + defer d.Close() + + // Copy content... + _, err = io.Copy(out, d) + return err +} +``` + +It is important to use the "Close" function when you no longer need the Reader to stop running goroutines, +when running with default settings. +Goroutines will exit once an error has been returned, including `io.EOF` at the end of a stream. + +Streams are decoded concurrently in 4 asynchronous stages to give the best possible throughput. +However, if you prefer synchronous decompression, use `WithDecoderConcurrency(1)` which will decompress data +as it is being requested only. + +For decoding buffers, it could look something like this: + +```Go +import "github.com/klauspost/compress/zstd" + +// Create a reader that caches decompressors. +// For this operation type we supply a nil Reader. +var decoder, _ = zstd.NewReader(nil, zstd.WithDecoderConcurrency(0)) + +// Decompress a buffer. We don't supply a destination buffer, +// so it will be allocated by the decoder. +func Decompress(src []byte) ([]byte, error) { + return decoder.DecodeAll(src, nil) +} +``` + +Both of these cases should provide the functionality needed. +The decoder can be used for *concurrent* decompression of multiple buffers. +By default 4 decompressors will be created. + +It will only allow a certain number of concurrent operations to run. +To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder. +It is possible to use `WithDecoderConcurrency(0)` to create GOMAXPROCS decoders. + +### Dictionaries + +Data compressed with [dictionaries](https://github.com/facebook/zstd#the-case-for-small-data-compression) can be decompressed. + +Dictionaries are added individually to Decoders. +Dictionaries are generated by the `zstd --train` command and contains an initial state for the decoder. +To add a dictionary use the `WithDecoderDicts(dicts ...[]byte)` option with the dictionary data. +Several dictionaries can be added at once. + +The dictionary will be used automatically for the data that specifies them. +A re-used Decoder will still contain the dictionaries registered. + +When registering multiple dictionaries with the same ID, the last one will be used. + +It is possible to use dictionaries when compressing data. + +To enable a dictionary use `WithEncoderDict(dict []byte)`. Here only one dictionary will be used +and it will likely be used even if it doesn't improve compression. + +The used dictionary must be used to decompress the content. + +For any real gains, the dictionary should be built with similar data. +If an unsuitable dictionary is used the output may be slightly larger than using no dictionary. +Use the [zstd commandline tool](https://github.com/facebook/zstd/releases) to build a dictionary from sample data. +For information see [zstd dictionary information](https://github.com/facebook/zstd#the-case-for-small-data-compression). + +For now there is a fixed startup performance penalty for compressing content with dictionaries. +This will likely be improved over time. Just be aware to test performance when implementing. + +### Allocation-less operation + +The decoder has been designed to operate without allocations after a warmup. + +This means that you should *store* the decoder for best performance. +To re-use a stream decoder, use the `Reset(r io.Reader) error` to switch to another stream. +A decoder can safely be re-used even if the previous stream failed. + +To release the resources, you must call the `Close()` function on a decoder. +After this it can *no longer be reused*, but all running goroutines will be stopped. +So you *must* use this if you will no longer need the Reader. + +For decompressing smaller buffers a single decoder can be used. +When decoding buffers, you can supply a destination slice with length 0 and your expected capacity. +In this case no unneeded allocations should be made. + +### Concurrency + +The buffer decoder does everything on the same goroutine and does nothing concurrently. +It can however decode several buffers concurrently. Use `WithDecoderConcurrency(n)` to limit that. + +The stream decoder will create goroutines that: + +1) Reads input and splits the input into blocks. +2) Decompression of literals. +3) Decompression of sequences. +4) Reconstruction of output stream. + +So effectively this also means the decoder will "read ahead" and prepare data to always be available for output. + +The concurrency level will, for streams, determine how many blocks ahead the compression will start. + +Since "blocks" are quite dependent on the output of the previous block stream decoding will only have limited concurrency. + +In practice this means that concurrency is often limited to utilizing about 3 cores effectively. + +### Benchmarks + +The first two are streaming decodes and the last are smaller inputs. + +Running on AMD Ryzen 9 3950X 16-Core Processor. AMD64 assembly used. + +``` +BenchmarkDecoderSilesia-32 5 206878840 ns/op 1024.50 MB/s 49808 B/op 43 allocs/op +BenchmarkDecoderEnwik9-32 1 1271809000 ns/op 786.28 MB/s 72048 B/op 52 allocs/op + +Concurrent blocks, performance: + +BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-32 67356 17857 ns/op 10321.96 MB/s 22.48 pct 102 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-32 266656 4421 ns/op 26823.21 MB/s 11.89 pct 19 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-32 20992 56842 ns/op 8477.17 MB/s 39.90 pct 754 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-32 27456 43932 ns/op 9714.01 MB/s 33.27 pct 524 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-32 78432 15047 ns/op 8319.15 MB/s 40.34 pct 66 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-32 65800 18436 ns/op 8249.63 MB/s 37.75 pct 88 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-32 102993 11523 ns/op 35546.09 MB/s 3.637 pct 143 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-32 1000000 1070 ns/op 95720.98 MB/s 80.53 pct 3 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-32 749802 1752 ns/op 70272.35 MB/s 100.0 pct 5 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-32 22640 52934 ns/op 13263.37 MB/s 26.25 pct 1014 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/html.zst-32 226412 5232 ns/op 19572.27 MB/s 14.49 pct 20 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-32 923041 1276 ns/op 3194.71 MB/s 31.26 pct 0 B/op 0 allocs/op +``` + +This reflects the performance around May 2022, but this may be out of date. + +## Zstd inside ZIP files + +It is possible to use zstandard to compress individual files inside zip archives. +While this isn't widely supported it can be useful for internal files. + +To support the compression and decompression of these files you must register a compressor and decompressor. + +It is highly recommended registering the (de)compressors on individual zip Reader/Writer and NOT +use the global registration functions. The main reason for this is that 2 registrations from +different packages will result in a panic. + +It is a good idea to only have a single compressor and decompressor, since they can be used for multiple zip +files concurrently, and using a single instance will allow reusing some resources. + +See [this example](https://pkg.go.dev/github.com/klauspost/compress/zstd#example-ZipCompressor) for +how to compress and decompress files inside zip archives. + +# Contributions + +Contributions are always welcome. +For new features/fixes, remember to add tests and for performance enhancements include benchmarks. + +For general feedback and experience reports, feel free to open an issue or write me on [Twitter](https://twitter.com/sh0dan). + +This package includes the excellent [`github.com/cespare/xxhash`](https://github.com/cespare/xxhash) package Copyright (c) 2016 Caleb Spare. diff --git a/vendor/github.com/klauspost/compress/zstd/bitreader.go b/vendor/github.com/klauspost/compress/zstd/bitreader.go new file mode 100644 index 0000000..25ca983 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bitreader.go @@ -0,0 +1,136 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "math/bits" +) + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReader struct { + in []byte + value uint64 // Maybe use [16]byte, but shifting is awkward. + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReader) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.bitsRead += 8 - uint8(highBits(uint32(v))) + return nil +} + +// getBits will return n bits. n can be 0. +func (b *bitReader) getBits(n uint8) int { + if n == 0 /*|| b.bitsRead >= 64 */ { + return 0 + } + return int(b.get32BitsFast(n)) +} + +// get32BitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReader) get32BitsFast(n uint8) uint32 { + const regMask = 64 - 1 + v := uint32((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) + b.bitsRead += n + return v +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReader) fillFast() { + if b.bitsRead < 32 { + return + } + v := b.in[len(b.in)-4:] + b.in = b.in[:len(b.in)-4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 +} + +// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. +func (b *bitReader) fillFastStart() { + v := b.in[len(b.in)-8:] + b.in = b.in[:len(b.in)-8] + b.value = binary.LittleEndian.Uint64(v) + b.bitsRead = 0 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReader) fill() { + if b.bitsRead < 32 { + return + } + if len(b.in) >= 4 { + v := b.in[len(b.in)-4:] + b.in = b.in[:len(b.in)-4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + return + } + + b.bitsRead -= uint8(8 * len(b.in)) + for len(b.in) > 0 { + b.value = (b.value << 8) | uint64(b.in[len(b.in)-1]) + b.in = b.in[:len(b.in)-1] + } +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReader) finished() bool { + return len(b.in) == 0 && b.bitsRead >= 64 +} + +// overread returns true if more bits have been requested than is on the stream. +func (b *bitReader) overread() bool { + return b.bitsRead > 64 +} + +// remain returns the number of bits remaining. +func (b *bitReader) remain() uint { + return 8*uint(len(b.in)) + 64 - uint(b.bitsRead) +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReader) close() error { + // Release reference. + b.in = nil + if !b.finished() { + return fmt.Errorf("%d extra bits on block, should be 0", b.remain()) + } + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} + +func highBits(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} diff --git a/vendor/github.com/klauspost/compress/zstd/bitwriter.go b/vendor/github.com/klauspost/compress/zstd/bitwriter.go new file mode 100644 index 0000000..1952f17 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bitwriter.go @@ -0,0 +1,112 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package zstd + +// bitWriter will write bits. +// First bit will be LSB of the first byte of output. +type bitWriter struct { + bitContainer uint64 + nBits uint8 + out []byte +} + +// bitMask16 is bitmasks. Has extra to avoid bounds check. +var bitMask16 = [32]uint16{ + 0, 1, 3, 7, 0xF, 0x1F, + 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, + 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF} /* up to 16 bits */ + +var bitMask32 = [32]uint32{ + 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, + 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, + 0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF, + 0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF, +} // up to 32 bits + +// addBits16NC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16NC(value uint16, bits uint8) { + b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits32NC will add up to 31 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits32NC(value uint32, bits uint8) { + b.bitContainer |= uint64(value&bitMask32[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits64NC will add up to 64 bits. +// There must be space for 32 bits. +func (b *bitWriter) addBits64NC(value uint64, bits uint8) { + if bits <= 31 { + b.addBits32Clean(uint32(value), bits) + return + } + b.addBits32Clean(uint32(value), 32) + b.flush32() + b.addBits32Clean(uint32(value>>32), bits-32) +} + +// addBits32Clean will add up to 32 bits. +// It will not check if there is space for them. +// The input must not contain more bits than specified. +func (b *bitWriter) addBits32Clean(value uint32, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// flush32 will flush out, so there are at least 32 bits available for writing. +func (b *bitWriter) flush32() { + if b.nBits < 32 { + return + } + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24)) + b.nBits -= 32 + b.bitContainer >>= 32 +} + +// flushAlign will flush remaining full bytes and align to next byte boundary. +func (b *bitWriter) flushAlign() { + nbBytes := (b.nBits + 7) >> 3 + for i := uint8(0); i < nbBytes; i++ { + b.out = append(b.out, byte(b.bitContainer>>(i*8))) + } + b.nBits = 0 + b.bitContainer = 0 +} + +// close will write the alignment bit and write the final byte(s) +// to the output. +func (b *bitWriter) close() { + // End mark + b.addBits16Clean(1, 1) + // flush until next byte. + b.flushAlign() +} + +// reset and continue writing by appending to out. +func (b *bitWriter) reset(out []byte) { + b.bitContainer = 0 + b.nBits = 0 + b.out = out +} diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go new file mode 100644 index 0000000..9f17ce6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -0,0 +1,726 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "hash/crc32" + "io" + "os" + "path/filepath" + "sync" + + "github.com/klauspost/compress/huff0" + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +type blockType uint8 + +//go:generate stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex + +const ( + blockTypeRaw blockType = iota + blockTypeRLE + blockTypeCompressed + blockTypeReserved +) + +type literalsBlockType uint8 + +const ( + literalsBlockRaw literalsBlockType = iota + literalsBlockRLE + literalsBlockCompressed + literalsBlockTreeless +) + +const ( + // maxCompressedBlockSize is the biggest allowed compressed block size (128KB) + maxCompressedBlockSize = 128 << 10 + + compressedBlockOverAlloc = 16 + maxCompressedBlockSizeAlloc = 128<<10 + compressedBlockOverAlloc + + // Maximum possible block size (all Raw+Uncompressed). + maxBlockSize = (1 << 21) - 1 + + maxMatchLen = 131074 + maxSequences = 0x7f00 + 0xffff + + // We support slightly less than the reference decoder to be able to + // use ints on 32 bit archs. + maxOffsetBits = 30 +) + +var ( + huffDecoderPool = sync.Pool{New: func() interface{} { + return &huff0.Scratch{} + }} + + fseDecoderPool = sync.Pool{New: func() interface{} { + return &fseDecoder{} + }} +) + +type blockDec struct { + // Raw source data of the block. + data []byte + dataStorage []byte + + // Destination of the decoded data. + dst []byte + + // Buffer for literals data. + literalBuf []byte + + // Window size of the block. + WindowSize uint64 + + err error + + // Check against this crc, if hasCRC is true. + checkCRC uint32 + hasCRC bool + + // Frame to use for singlethreaded decoding. + // Should not be used by the decoder itself since parent may be another frame. + localFrame *frameDec + + sequence []seqVals + + async struct { + newHist *history + literals []byte + seqData []byte + seqSize int // Size of uncompressed sequences + fcs uint64 + } + + // Block is RLE, this is the size. + RLESize uint32 + + Type blockType + + // Is this the last block of a frame? + Last bool + + // Use less memory + lowMem bool +} + +func (b *blockDec) String() string { + if b == nil { + return "" + } + return fmt.Sprintf("Steam Size: %d, Type: %v, Last: %t, Window: %d", len(b.data), b.Type, b.Last, b.WindowSize) +} + +func newBlockDec(lowMem bool) *blockDec { + b := blockDec{ + lowMem: lowMem, + } + return &b +} + +// reset will reset the block. +// Input must be a start of a block and will be at the end of the block when returned. +func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { + b.WindowSize = windowSize + tmp, err := br.readSmall(3) + if err != nil { + println("Reading block header:", err) + return err + } + bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) + b.Last = bh&1 != 0 + b.Type = blockType((bh >> 1) & 3) + // find size. + cSize := int(bh >> 3) + maxSize := maxCompressedBlockSizeAlloc + switch b.Type { + case blockTypeReserved: + return ErrReservedBlockType + case blockTypeRLE: + if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) { + if debugDecoder { + printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrWindowSizeExceeded + } + b.RLESize = uint32(cSize) + if b.lowMem { + maxSize = cSize + } + cSize = 1 + case blockTypeCompressed: + if debugDecoder { + println("Data size on stream:", cSize) + } + b.RLESize = 0 + maxSize = maxCompressedBlockSizeAlloc + if windowSize < maxCompressedBlockSize && b.lowMem { + maxSize = int(windowSize) + compressedBlockOverAlloc + } + if cSize > maxCompressedBlockSize || uint64(cSize) > b.WindowSize { + if debugDecoder { + printf("compressed block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrCompressedSizeTooBig + } + // Empty compressed blocks must at least be 2 bytes + // for Literals_Block_Type and one for Sequences_Section_Header. + if cSize < 2 { + return ErrBlockTooSmall + } + case blockTypeRaw: + if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) { + if debugDecoder { + printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrWindowSizeExceeded + } + + b.RLESize = 0 + // We do not need a destination for raw blocks. + maxSize = -1 + default: + panic("Invalid block type") + } + + // Read block data. + if _, ok := br.(*byteBuf); !ok && cap(b.dataStorage) < cSize { + // byteBuf doesn't need a destination buffer. + if b.lowMem || cSize > maxCompressedBlockSize { + b.dataStorage = make([]byte, 0, cSize+compressedBlockOverAlloc) + } else { + b.dataStorage = make([]byte, 0, maxCompressedBlockSizeAlloc) + } + } + b.data, err = br.readBig(cSize, b.dataStorage) + if err != nil { + if debugDecoder { + println("Reading block:", err, "(", cSize, ")", len(b.data)) + printf("%T", br) + } + return err + } + if cap(b.dst) <= maxSize { + b.dst = make([]byte, 0, maxSize+1) + } + return nil +} + +// sendEOF will make the decoder send EOF on this frame. +func (b *blockDec) sendErr(err error) { + b.Last = true + b.Type = blockTypeReserved + b.err = err +} + +// Close will release resources. +// Closed blockDec cannot be reset. +func (b *blockDec) Close() { +} + +// decodeBuf +func (b *blockDec) decodeBuf(hist *history) error { + switch b.Type { + case blockTypeRLE: + if cap(b.dst) < int(b.RLESize) { + if b.lowMem { + b.dst = make([]byte, b.RLESize) + } else { + b.dst = make([]byte, maxCompressedBlockSize) + } + } + b.dst = b.dst[:b.RLESize] + v := b.data[0] + for i := range b.dst { + b.dst[i] = v + } + hist.appendKeep(b.dst) + return nil + case blockTypeRaw: + hist.appendKeep(b.data) + return nil + case blockTypeCompressed: + saved := b.dst + // Append directly to history + if hist.ignoreBuffer == 0 { + b.dst = hist.b + hist.b = nil + } else { + b.dst = b.dst[:0] + } + err := b.decodeCompressed(hist) + if debugDecoder { + println("Decompressed to total", len(b.dst), "bytes, hash:", xxhash.Sum64(b.dst), "error:", err) + } + if hist.ignoreBuffer == 0 { + hist.b = b.dst + b.dst = saved + } else { + hist.appendKeep(b.dst) + } + return err + case blockTypeReserved: + // Used for returning errors. + return b.err + default: + panic("Invalid block type") + } +} + +func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err error) { + // There must be at least one byte for Literals_Block_Type and one for Sequences_Section_Header + if len(in) < 2 { + return in, ErrBlockTooSmall + } + + litType := literalsBlockType(in[0] & 3) + var litRegenSize int + var litCompSize int + sizeFormat := (in[0] >> 2) & 3 + var fourStreams bool + var literals []byte + switch litType { + case literalsBlockRaw, literalsBlockRLE: + switch sizeFormat { + case 0, 2: + // Regenerated_Size uses 5 bits (0-31). Literals_Section_Header uses 1 byte. + litRegenSize = int(in[0] >> 3) + in = in[1:] + case 1: + // Regenerated_Size uses 12 bits (0-4095). Literals_Section_Header uses 2 bytes. + litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + in = in[2:] + case 3: + // Regenerated_Size uses 20 bits (0-1048575). Literals_Section_Header uses 3 bytes. + if len(in) < 3 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return in, ErrBlockTooSmall + } + litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + (int(in[2]) << 12) + in = in[3:] + } + case literalsBlockCompressed, literalsBlockTreeless: + switch sizeFormat { + case 0, 1: + // Both Regenerated_Size and Compressed_Size use 10 bits (0-1023). + if len(in) < 3 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return in, ErrBlockTooSmall + } + n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + litRegenSize = int(n & 1023) + litCompSize = int(n >> 10) + fourStreams = sizeFormat == 1 + in = in[3:] + case 2: + fourStreams = true + if len(in) < 4 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return in, ErrBlockTooSmall + } + n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + litRegenSize = int(n & 16383) + litCompSize = int(n >> 14) + in = in[4:] + case 3: + fourStreams = true + if len(in) < 5 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return in, ErrBlockTooSmall + } + n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + (uint64(in[4]) << 28) + litRegenSize = int(n & 262143) + litCompSize = int(n >> 18) + in = in[5:] + } + } + if debugDecoder { + println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize:", litCompSize, "sizeFormat:", sizeFormat, "4X:", fourStreams) + } + if litRegenSize > int(b.WindowSize) || litRegenSize > maxCompressedBlockSize { + return in, ErrWindowSizeExceeded + } + + switch litType { + case literalsBlockRaw: + if len(in) < litRegenSize { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litRegenSize) + return in, ErrBlockTooSmall + } + literals = in[:litRegenSize] + in = in[litRegenSize:] + //printf("Found %d uncompressed literals\n", litRegenSize) + case literalsBlockRLE: + if len(in) < 1 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", 1) + return in, ErrBlockTooSmall + } + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, litRegenSize, litRegenSize+compressedBlockOverAlloc) + } else { + b.literalBuf = make([]byte, litRegenSize, maxCompressedBlockSize+compressedBlockOverAlloc) + } + } + literals = b.literalBuf[:litRegenSize] + v := in[0] + for i := range literals { + literals[i] = v + } + in = in[1:] + if debugDecoder { + printf("Found %d RLE compressed literals\n", litRegenSize) + } + case literalsBlockTreeless: + if len(in) < litCompSize { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) + return in, ErrBlockTooSmall + } + // Store compressed literals, so we defer decoding until we get history. + literals = in[:litCompSize] + in = in[litCompSize:] + if debugDecoder { + printf("Found %d compressed literals\n", litCompSize) + } + huff := hist.huffTree + if huff == nil { + return in, errors.New("literal block was treeless, but no history was defined") + } + // Ensure we have space to store it. + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc) + } else { + b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc) + } + } + var err error + // Use our out buffer. + huff.MaxDecodedSize = litRegenSize + if fourStreams { + literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) + } else { + literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) + } + // Make sure we don't leak our literals buffer + if err != nil { + println("decompressing literals:", err) + return in, err + } + if len(literals) != litRegenSize { + return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) + } + + case literalsBlockCompressed: + if len(in) < litCompSize { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) + return in, ErrBlockTooSmall + } + literals = in[:litCompSize] + in = in[litCompSize:] + // Ensure we have space to store it. + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc) + } else { + b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc) + } + } + huff := hist.huffTree + if huff == nil || (hist.dict != nil && huff == hist.dict.litEnc) { + huff = huffDecoderPool.Get().(*huff0.Scratch) + if huff == nil { + huff = &huff0.Scratch{} + } + } + var err error + if debugDecoder { + println("huff table input:", len(literals), "CRC:", crc32.ChecksumIEEE(literals)) + } + huff, literals, err = huff0.ReadTable(literals, huff) + if err != nil { + println("reading huffman table:", err) + return in, err + } + hist.huffTree = huff + huff.MaxDecodedSize = litRegenSize + // Use our out buffer. + if fourStreams { + literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) + } else { + literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) + } + if err != nil { + println("decoding compressed literals:", err) + return in, err + } + // Make sure we don't leak our literals buffer + if len(literals) != litRegenSize { + return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) + } + // Re-cap to get extra size. + literals = b.literalBuf[:len(literals)] + if debugDecoder { + printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize) + } + } + hist.decoders.literals = literals + return in, nil +} + +// decodeCompressed will start decompressing a block. +func (b *blockDec) decodeCompressed(hist *history) error { + in := b.data + in, err := b.decodeLiterals(in, hist) + if err != nil { + return err + } + err = b.prepareSequences(in, hist) + if err != nil { + return err + } + if hist.decoders.nSeqs == 0 { + b.dst = append(b.dst, hist.decoders.literals...) + return nil + } + before := len(hist.decoders.out) + err = hist.decoders.decodeSync(hist.b[hist.ignoreBuffer:]) + if err != nil { + return err + } + if hist.decoders.maxSyncLen > 0 { + hist.decoders.maxSyncLen += uint64(before) + hist.decoders.maxSyncLen -= uint64(len(hist.decoders.out)) + } + b.dst = hist.decoders.out + hist.recentOffsets = hist.decoders.prevOffset + return nil +} + +func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { + if debugDecoder { + printf("prepareSequences: %d byte(s) input\n", len(in)) + } + // Decode Sequences + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#sequences-section + if len(in) < 1 { + return ErrBlockTooSmall + } + var nSeqs int + seqHeader := in[0] + switch { + case seqHeader < 128: + nSeqs = int(seqHeader) + in = in[1:] + case seqHeader < 255: + if len(in) < 2 { + return ErrBlockTooSmall + } + nSeqs = int(seqHeader-128)<<8 | int(in[1]) + in = in[2:] + case seqHeader == 255: + if len(in) < 3 { + return ErrBlockTooSmall + } + nSeqs = 0x7f00 + int(in[1]) + (int(in[2]) << 8) + in = in[3:] + } + if nSeqs == 0 && len(in) != 0 { + // When no sequences, there should not be any more data... + if debugDecoder { + printf("prepareSequences: 0 sequences, but %d byte(s) left on stream\n", len(in)) + } + return ErrUnexpectedBlockSize + } + + var seqs = &hist.decoders + seqs.nSeqs = nSeqs + if nSeqs > 0 { + if len(in) < 1 { + return ErrBlockTooSmall + } + br := byteReader{b: in, off: 0} + compMode := br.Uint8() + br.advance(1) + if debugDecoder { + printf("Compression modes: 0b%b", compMode) + } + for i := uint(0); i < 3; i++ { + mode := seqCompMode((compMode >> (6 - i*2)) & 3) + if debugDecoder { + println("Table", tableIndex(i), "is", mode) + } + var seq *sequenceDec + switch tableIndex(i) { + case tableLiteralLengths: + seq = &seqs.litLengths + case tableOffsets: + seq = &seqs.offsets + case tableMatchLengths: + seq = &seqs.matchLengths + default: + panic("unknown table") + } + switch mode { + case compModePredefined: + if seq.fse != nil && !seq.fse.preDefined { + fseDecoderPool.Put(seq.fse) + } + seq.fse = &fsePredef[i] + case compModeRLE: + if br.remain() < 1 { + return ErrBlockTooSmall + } + v := br.Uint8() + br.advance(1) + if seq.fse == nil || seq.fse.preDefined { + seq.fse = fseDecoderPool.Get().(*fseDecoder) + } + symb, err := decSymbolValue(v, symbolTableX[i]) + if err != nil { + printf("RLE Transform table (%v) error: %v", tableIndex(i), err) + return err + } + seq.fse.setRLE(symb) + if debugDecoder { + printf("RLE set to 0x%x, code: %v", symb, v) + } + case compModeFSE: + println("Reading table for", tableIndex(i)) + if seq.fse == nil || seq.fse.preDefined { + seq.fse = fseDecoderPool.Get().(*fseDecoder) + } + err := seq.fse.readNCount(&br, uint16(maxTableSymbol[i])) + if err != nil { + println("Read table error:", err) + return err + } + err = seq.fse.transform(symbolTableX[i]) + if err != nil { + println("Transform table error:", err) + return err + } + if debugDecoder { + println("Read table ok", "symbolLen:", seq.fse.symbolLen) + } + case compModeRepeat: + seq.repeat = true + } + if br.overread() { + return io.ErrUnexpectedEOF + } + } + in = br.unread() + } + if debugDecoder { + println("Literals:", len(seqs.literals), "hash:", xxhash.Sum64(seqs.literals), "and", seqs.nSeqs, "sequences.") + } + + if nSeqs == 0 { + if len(b.sequence) > 0 { + b.sequence = b.sequence[:0] + } + return nil + } + br := seqs.br + if br == nil { + br = &bitReader{} + } + if err := br.init(in); err != nil { + return err + } + + if err := seqs.initialize(br, hist, b.dst); err != nil { + println("initializing sequences:", err) + return err + } + // Extract blocks... + if false && hist.dict == nil { + fatalErr := func(err error) { + if err != nil { + panic(err) + } + } + fn := fmt.Sprintf("n-%d-lits-%d-prev-%d-%d-%d-win-%d.blk", hist.decoders.nSeqs, len(hist.decoders.literals), hist.recentOffsets[0], hist.recentOffsets[1], hist.recentOffsets[2], hist.windowSize) + var buf bytes.Buffer + fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.litLengths.fse)) + fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.matchLengths.fse)) + fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.offsets.fse)) + buf.Write(in) + os.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm) + } + + return nil +} + +func (b *blockDec) decodeSequences(hist *history) error { + if cap(b.sequence) < hist.decoders.nSeqs { + if b.lowMem { + b.sequence = make([]seqVals, 0, hist.decoders.nSeqs) + } else { + b.sequence = make([]seqVals, 0, 0x7F00+0xffff) + } + } + b.sequence = b.sequence[:hist.decoders.nSeqs] + if hist.decoders.nSeqs == 0 { + hist.decoders.seqSize = len(hist.decoders.literals) + return nil + } + hist.decoders.windowSize = hist.windowSize + hist.decoders.prevOffset = hist.recentOffsets + + err := hist.decoders.decode(b.sequence) + hist.recentOffsets = hist.decoders.prevOffset + return err +} + +func (b *blockDec) executeSequences(hist *history) error { + hbytes := hist.b + if len(hbytes) > hist.windowSize { + hbytes = hbytes[len(hbytes)-hist.windowSize:] + // We do not need history anymore. + if hist.dict != nil { + hist.dict.content = nil + } + } + hist.decoders.windowSize = hist.windowSize + hist.decoders.out = b.dst[:0] + err := hist.decoders.execute(b.sequence, hbytes) + if err != nil { + return err + } + return b.updateHistory(hist) +} + +func (b *blockDec) updateHistory(hist *history) error { + if len(b.data) > maxCompressedBlockSize { + return fmt.Errorf("compressed block size too large (%d)", len(b.data)) + } + // Set output and release references. + b.dst = hist.decoders.out + hist.recentOffsets = hist.decoders.prevOffset + + if b.Last { + // if last block we don't care about history. + println("Last block, no history returned") + hist.b = hist.b[:0] + return nil + } else { + hist.append(b.dst) + if debugDecoder { + println("Finished block with ", len(b.sequence), "sequences. Added", len(b.dst), "to history, now length", len(hist.b)) + } + } + hist.decoders.out, hist.decoders.literals = nil, nil + + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go new file mode 100644 index 0000000..2cfe925 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go @@ -0,0 +1,889 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "math" + "math/bits" + + "github.com/klauspost/compress/huff0" +) + +type blockEnc struct { + size int + literals []byte + sequences []seq + coders seqCoders + litEnc *huff0.Scratch + dictLitEnc *huff0.Scratch + wr bitWriter + + extraLits int + output []byte + recentOffsets [3]uint32 + prevRecentOffsets [3]uint32 + + last bool + lowMem bool +} + +// init should be used once the block has been created. +// If called more than once, the effect is the same as calling reset. +func (b *blockEnc) init() { + if b.lowMem { + // 1K literals + if cap(b.literals) < 1<<10 { + b.literals = make([]byte, 0, 1<<10) + } + const defSeqs = 20 + if cap(b.sequences) < defSeqs { + b.sequences = make([]seq, 0, defSeqs) + } + // 1K + if cap(b.output) < 1<<10 { + b.output = make([]byte, 0, 1<<10) + } + } else { + if cap(b.literals) < maxCompressedBlockSize { + b.literals = make([]byte, 0, maxCompressedBlockSize) + } + const defSeqs = 2000 + if cap(b.sequences) < defSeqs { + b.sequences = make([]seq, 0, defSeqs) + } + if cap(b.output) < maxCompressedBlockSize { + b.output = make([]byte, 0, maxCompressedBlockSize) + } + } + + if b.coders.mlEnc == nil { + b.coders.mlEnc = &fseEncoder{} + b.coders.mlPrev = &fseEncoder{} + b.coders.ofEnc = &fseEncoder{} + b.coders.ofPrev = &fseEncoder{} + b.coders.llEnc = &fseEncoder{} + b.coders.llPrev = &fseEncoder{} + } + b.litEnc = &huff0.Scratch{WantLogLess: 4} + b.reset(nil) +} + +// initNewEncode can be used to reset offsets and encoders to the initial state. +func (b *blockEnc) initNewEncode() { + b.recentOffsets = [3]uint32{1, 4, 8} + b.litEnc.Reuse = huff0.ReusePolicyNone + b.coders.setPrev(nil, nil, nil) +} + +// reset will reset the block for a new encode, but in the same stream, +// meaning that state will be carried over, but the block content is reset. +// If a previous block is provided, the recent offsets are carried over. +func (b *blockEnc) reset(prev *blockEnc) { + b.extraLits = 0 + b.literals = b.literals[:0] + b.size = 0 + b.sequences = b.sequences[:0] + b.output = b.output[:0] + b.last = false + if prev != nil { + b.recentOffsets = prev.prevRecentOffsets + } + b.dictLitEnc = nil +} + +// reset will reset the block for a new encode, but in the same stream, +// meaning that state will be carried over, but the block content is reset. +// If a previous block is provided, the recent offsets are carried over. +func (b *blockEnc) swapEncoders(prev *blockEnc) { + b.coders.swap(&prev.coders) + b.litEnc, prev.litEnc = prev.litEnc, b.litEnc +} + +// blockHeader contains the information for a block header. +type blockHeader uint32 + +// setLast sets the 'last' indicator on a block. +func (h *blockHeader) setLast(b bool) { + if b { + *h = *h | 1 + } else { + const mask = (1 << 24) - 2 + *h = *h & mask + } +} + +// setSize will store the compressed size of a block. +func (h *blockHeader) setSize(v uint32) { + const mask = 7 + *h = (*h)&mask | blockHeader(v<<3) +} + +// setType sets the block type. +func (h *blockHeader) setType(t blockType) { + const mask = 1 | (((1 << 24) - 1) ^ 7) + *h = (*h & mask) | blockHeader(t<<1) +} + +// appendTo will append the block header to a slice. +func (h blockHeader) appendTo(b []byte) []byte { + return append(b, uint8(h), uint8(h>>8), uint8(h>>16)) +} + +// String returns a string representation of the block. +func (h blockHeader) String() string { + return fmt.Sprintf("Type: %d, Size: %d, Last:%t", (h>>1)&3, h>>3, h&1 == 1) +} + +// literalsHeader contains literals header information. +type literalsHeader uint64 + +// setType can be used to set the type of literal block. +func (h *literalsHeader) setType(t literalsBlockType) { + const mask = math.MaxUint64 - 3 + *h = (*h & mask) | literalsHeader(t) +} + +// setSize can be used to set a single size, for uncompressed and RLE content. +func (h *literalsHeader) setSize(regenLen int) { + inBits := bits.Len32(uint32(regenLen)) + // Only retain 2 bits + const mask = 3 + lh := uint64(*h & mask) + switch { + case inBits < 5: + lh |= (uint64(regenLen) << 3) | (1 << 60) + if debugEncoder { + got := int(lh>>3) & 0xff + if got != regenLen { + panic(fmt.Sprint("litRegenSize = ", regenLen, "(want) != ", got, "(got)")) + } + } + case inBits < 12: + lh |= (1 << 2) | (uint64(regenLen) << 4) | (2 << 60) + case inBits < 20: + lh |= (3 << 2) | (uint64(regenLen) << 4) | (3 << 60) + default: + panic(fmt.Errorf("internal error: block too big (%d)", regenLen)) + } + *h = literalsHeader(lh) +} + +// setSizes will set the size of a compressed literals section and the input length. +func (h *literalsHeader) setSizes(compLen, inLen int, single bool) { + compBits, inBits := bits.Len32(uint32(compLen)), bits.Len32(uint32(inLen)) + // Only retain 2 bits + const mask = 3 + lh := uint64(*h & mask) + switch { + case compBits <= 10 && inBits <= 10: + if !single { + lh |= 1 << 2 + } + lh |= (uint64(inLen) << 4) | (uint64(compLen) << (10 + 4)) | (3 << 60) + if debugEncoder { + const mmask = (1 << 24) - 1 + n := (lh >> 4) & mmask + if int(n&1023) != inLen { + panic(fmt.Sprint("regensize:", int(n&1023), "!=", inLen, inBits)) + } + if int(n>>10) != compLen { + panic(fmt.Sprint("compsize:", int(n>>10), "!=", compLen, compBits)) + } + } + case compBits <= 14 && inBits <= 14: + lh |= (2 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (14 + 4)) | (4 << 60) + if single { + panic("single stream used with more than 10 bits length.") + } + case compBits <= 18 && inBits <= 18: + lh |= (3 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (18 + 4)) | (5 << 60) + if single { + panic("single stream used with more than 10 bits length.") + } + default: + panic("internal error: block too big") + } + *h = literalsHeader(lh) +} + +// appendTo will append the literals header to a byte slice. +func (h literalsHeader) appendTo(b []byte) []byte { + size := uint8(h >> 60) + switch size { + case 1: + b = append(b, uint8(h)) + case 2: + b = append(b, uint8(h), uint8(h>>8)) + case 3: + b = append(b, uint8(h), uint8(h>>8), uint8(h>>16)) + case 4: + b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24)) + case 5: + b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24), uint8(h>>32)) + default: + panic(fmt.Errorf("internal error: literalsHeader has invalid size (%d)", size)) + } + return b +} + +// size returns the output size with currently set values. +func (h literalsHeader) size() int { + return int(h >> 60) +} + +func (h literalsHeader) String() string { + return fmt.Sprintf("Type: %d, SizeFormat: %d, Size: 0x%d, Bytes:%d", literalsBlockType(h&3), (h>>2)&3, h&((1<<60)-1)>>4, h>>60) +} + +// pushOffsets will push the recent offsets to the backup store. +func (b *blockEnc) pushOffsets() { + b.prevRecentOffsets = b.recentOffsets +} + +// pushOffsets will push the recent offsets to the backup store. +func (b *blockEnc) popOffsets() { + b.recentOffsets = b.prevRecentOffsets +} + +// matchOffset will adjust recent offsets and return the adjusted one, +// if it matches a previous offset. +func (b *blockEnc) matchOffset(offset, lits uint32) uint32 { + // Check if offset is one of the recent offsets. + // Adjusts the output offset accordingly. + // Gives a tiny bit of compression, typically around 1%. + if true { + if lits > 0 { + switch offset { + case b.recentOffsets[0]: + offset = 1 + case b.recentOffsets[1]: + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 2 + case b.recentOffsets[2]: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 3 + default: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset += 3 + } + } else { + switch offset { + case b.recentOffsets[1]: + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 1 + case b.recentOffsets[2]: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 2 + case b.recentOffsets[0] - 1: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 3 + default: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset += 3 + } + } + } else { + offset += 3 + } + return offset +} + +// encodeRaw can be used to set the output to a raw representation of supplied bytes. +func (b *blockEnc) encodeRaw(a []byte) { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(uint32(len(a))) + bh.setType(blockTypeRaw) + b.output = bh.appendTo(b.output[:0]) + b.output = append(b.output, a...) + if debugEncoder { + println("Adding RAW block, length", len(a), "last:", b.last) + } +} + +// encodeRaw can be used to set the output to a raw representation of supplied bytes. +func (b *blockEnc) encodeRawTo(dst, src []byte) []byte { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(uint32(len(src))) + bh.setType(blockTypeRaw) + dst = bh.appendTo(dst) + dst = append(dst, src...) + if debugEncoder { + println("Adding RAW block, length", len(src), "last:", b.last) + } + return dst +} + +// encodeLits can be used if the block is only litLen. +func (b *blockEnc) encodeLits(lits []byte, raw bool) error { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(uint32(len(lits))) + + // Don't compress extremely small blocks + if len(lits) < 8 || (len(lits) < 32 && b.dictLitEnc == nil) || raw { + if debugEncoder { + println("Adding RAW block, length", len(lits), "last:", b.last) + } + bh.setType(blockTypeRaw) + b.output = bh.appendTo(b.output) + b.output = append(b.output, lits...) + return nil + } + + var ( + out []byte + reUsed, single bool + err error + ) + if b.dictLitEnc != nil { + b.litEnc.TransferCTable(b.dictLitEnc) + b.litEnc.Reuse = huff0.ReusePolicyAllow + b.dictLitEnc = nil + } + if len(lits) >= 1024 { + // Use 4 Streams. + out, reUsed, err = huff0.Compress4X(lits, b.litEnc) + } else if len(lits) > 16 { + // Use 1 stream + single = true + out, reUsed, err = huff0.Compress1X(lits, b.litEnc) + } else { + err = huff0.ErrIncompressible + } + if err == nil && len(out)+5 > len(lits) { + // If we are close, we may still be worse or equal to raw. + var lh literalsHeader + lh.setSizes(len(out), len(lits), single) + if len(out)+lh.size() >= len(lits) { + err = huff0.ErrIncompressible + } + } + switch err { + case huff0.ErrIncompressible: + if debugEncoder { + println("Adding RAW block, length", len(lits), "last:", b.last) + } + bh.setType(blockTypeRaw) + b.output = bh.appendTo(b.output) + b.output = append(b.output, lits...) + return nil + case huff0.ErrUseRLE: + if debugEncoder { + println("Adding RLE block, length", len(lits)) + } + bh.setType(blockTypeRLE) + b.output = bh.appendTo(b.output) + b.output = append(b.output, lits[0]) + return nil + case nil: + default: + return err + } + // Compressed... + // Now, allow reuse + b.litEnc.Reuse = huff0.ReusePolicyAllow + bh.setType(blockTypeCompressed) + var lh literalsHeader + if reUsed { + if debugEncoder { + println("Reused tree, compressed to", len(out)) + } + lh.setType(literalsBlockTreeless) + } else { + if debugEncoder { + println("New tree, compressed to", len(out), "tree size:", len(b.litEnc.OutTable)) + } + lh.setType(literalsBlockCompressed) + } + // Set sizes + lh.setSizes(len(out), len(lits), single) + bh.setSize(uint32(len(out) + lh.size() + 1)) + + // Write block headers. + b.output = bh.appendTo(b.output) + b.output = lh.appendTo(b.output) + // Add compressed data. + b.output = append(b.output, out...) + // No sequences. + b.output = append(b.output, 0) + return nil +} + +// fuzzFseEncoder can be used to fuzz the FSE encoder. +func fuzzFseEncoder(data []byte) int { + if len(data) > maxSequences || len(data) < 2 { + return 0 + } + enc := fseEncoder{} + hist := enc.Histogram() + maxSym := uint8(0) + for i, v := range data { + v = v & 63 + data[i] = v + hist[v]++ + if v > maxSym { + maxSym = v + } + } + if maxSym == 0 { + // All 0 + return 0 + } + maxCount := func(a []uint32) int { + var max uint32 + for _, v := range a { + if v > max { + max = v + } + } + return int(max) + } + cnt := maxCount(hist[:maxSym]) + if cnt == len(data) { + // RLE + return 0 + } + enc.HistogramFinished(maxSym, cnt) + err := enc.normalizeCount(len(data)) + if err != nil { + return 0 + } + _, err = enc.writeCount(nil) + if err != nil { + panic(err) + } + return 1 +} + +// encode will encode the block and append the output in b.output. +// Previous offset codes must be pushed if more blocks are expected. +func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { + if len(b.sequences) == 0 { + return b.encodeLits(b.literals, rawAllLits) + } + // We want some difference to at least account for the headers. + saved := b.size - len(b.literals) - (b.size >> 6) + if saved < 16 { + if org == nil { + return errIncompressible + } + b.popOffsets() + return b.encodeLits(org, rawAllLits) + } + + var bh blockHeader + var lh literalsHeader + bh.setLast(b.last) + bh.setType(blockTypeCompressed) + // Store offset of the block header. Needed when we know the size. + bhOffset := len(b.output) + b.output = bh.appendTo(b.output) + + var ( + out []byte + reUsed, single bool + err error + ) + if b.dictLitEnc != nil { + b.litEnc.TransferCTable(b.dictLitEnc) + b.litEnc.Reuse = huff0.ReusePolicyAllow + b.dictLitEnc = nil + } + if len(b.literals) >= 1024 && !raw { + // Use 4 Streams. + out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc) + } else if len(b.literals) > 16 && !raw { + // Use 1 stream + single = true + out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc) + } else { + err = huff0.ErrIncompressible + } + + if err == nil && len(out)+5 > len(b.literals) { + // If we are close, we may still be worse or equal to raw. + var lh literalsHeader + lh.setSize(len(b.literals)) + szRaw := lh.size() + lh.setSizes(len(out), len(b.literals), single) + szComp := lh.size() + if len(out)+szComp >= len(b.literals)+szRaw { + err = huff0.ErrIncompressible + } + } + switch err { + case huff0.ErrIncompressible: + lh.setType(literalsBlockRaw) + lh.setSize(len(b.literals)) + b.output = lh.appendTo(b.output) + b.output = append(b.output, b.literals...) + if debugEncoder { + println("Adding literals RAW, length", len(b.literals)) + } + case huff0.ErrUseRLE: + lh.setType(literalsBlockRLE) + lh.setSize(len(b.literals)) + b.output = lh.appendTo(b.output) + b.output = append(b.output, b.literals[0]) + if debugEncoder { + println("Adding literals RLE") + } + case nil: + // Compressed litLen... + if reUsed { + if debugEncoder { + println("reused tree") + } + lh.setType(literalsBlockTreeless) + } else { + if debugEncoder { + println("new tree, size:", len(b.litEnc.OutTable)) + } + lh.setType(literalsBlockCompressed) + if debugEncoder { + _, _, err := huff0.ReadTable(out, nil) + if err != nil { + panic(err) + } + } + } + lh.setSizes(len(out), len(b.literals), single) + if debugEncoder { + printf("Compressed %d literals to %d bytes", len(b.literals), len(out)) + println("Adding literal header:", lh) + } + b.output = lh.appendTo(b.output) + b.output = append(b.output, out...) + b.litEnc.Reuse = huff0.ReusePolicyAllow + if debugEncoder { + println("Adding literals compressed") + } + default: + if debugEncoder { + println("Adding literals ERROR:", err) + } + return err + } + // Sequence compression + + // Write the number of sequences + switch { + case len(b.sequences) < 128: + b.output = append(b.output, uint8(len(b.sequences))) + case len(b.sequences) < 0x7f00: // TODO: this could be wrong + n := len(b.sequences) + b.output = append(b.output, 128+uint8(n>>8), uint8(n)) + default: + n := len(b.sequences) - 0x7f00 + b.output = append(b.output, 255, uint8(n), uint8(n>>8)) + } + if debugEncoder { + println("Encoding", len(b.sequences), "sequences") + } + b.genCodes() + llEnc := b.coders.llEnc + ofEnc := b.coders.ofEnc + mlEnc := b.coders.mlEnc + err = llEnc.normalizeCount(len(b.sequences)) + if err != nil { + return err + } + err = ofEnc.normalizeCount(len(b.sequences)) + if err != nil { + return err + } + err = mlEnc.normalizeCount(len(b.sequences)) + if err != nil { + return err + } + + // Choose the best compression mode for each type. + // Will evaluate the new vs predefined and previous. + chooseComp := func(cur, prev, preDef *fseEncoder) (*fseEncoder, seqCompMode) { + // See if predefined/previous is better + hist := cur.count[:cur.symbolLen] + nSize := cur.approxSize(hist) + cur.maxHeaderSize() + predefSize := preDef.approxSize(hist) + prevSize := prev.approxSize(hist) + + // Add a small penalty for new encoders. + // Don't bother with extremely small (<2 byte gains). + nSize = nSize + (nSize+2*8*16)>>4 + switch { + case predefSize <= prevSize && predefSize <= nSize || forcePreDef: + if debugEncoder { + println("Using predefined", predefSize>>3, "<=", nSize>>3) + } + return preDef, compModePredefined + case prevSize <= nSize: + if debugEncoder { + println("Using previous", prevSize>>3, "<=", nSize>>3) + } + return prev, compModeRepeat + default: + if debugEncoder { + println("Using new, predef", predefSize>>3, ". previous:", prevSize>>3, ">", nSize>>3, "header max:", cur.maxHeaderSize()>>3, "bytes") + println("tl:", cur.actualTableLog, "symbolLen:", cur.symbolLen, "norm:", cur.norm[:cur.symbolLen], "hist", cur.count[:cur.symbolLen]) + } + return cur, compModeFSE + } + } + + // Write compression mode + var mode uint8 + if llEnc.useRLE { + mode |= uint8(compModeRLE) << 6 + llEnc.setRLE(b.sequences[0].llCode) + if debugEncoder { + println("llEnc.useRLE") + } + } else { + var m seqCompMode + llEnc, m = chooseComp(llEnc, b.coders.llPrev, &fsePredefEnc[tableLiteralLengths]) + mode |= uint8(m) << 6 + } + if ofEnc.useRLE { + mode |= uint8(compModeRLE) << 4 + ofEnc.setRLE(b.sequences[0].ofCode) + if debugEncoder { + println("ofEnc.useRLE") + } + } else { + var m seqCompMode + ofEnc, m = chooseComp(ofEnc, b.coders.ofPrev, &fsePredefEnc[tableOffsets]) + mode |= uint8(m) << 4 + } + + if mlEnc.useRLE { + mode |= uint8(compModeRLE) << 2 + mlEnc.setRLE(b.sequences[0].mlCode) + if debugEncoder { + println("mlEnc.useRLE, code: ", b.sequences[0].mlCode, "value", b.sequences[0].matchLen) + } + } else { + var m seqCompMode + mlEnc, m = chooseComp(mlEnc, b.coders.mlPrev, &fsePredefEnc[tableMatchLengths]) + mode |= uint8(m) << 2 + } + b.output = append(b.output, mode) + if debugEncoder { + printf("Compression modes: 0b%b", mode) + } + b.output, err = llEnc.writeCount(b.output) + if err != nil { + return err + } + start := len(b.output) + b.output, err = ofEnc.writeCount(b.output) + if err != nil { + return err + } + if false { + println("block:", b.output[start:], "tablelog", ofEnc.actualTableLog, "maxcount:", ofEnc.maxCount) + fmt.Printf("selected TableLog: %d, Symbol length: %d\n", ofEnc.actualTableLog, ofEnc.symbolLen) + for i, v := range ofEnc.norm[:ofEnc.symbolLen] { + fmt.Printf("%3d: %5d -> %4d \n", i, ofEnc.count[i], v) + } + } + b.output, err = mlEnc.writeCount(b.output) + if err != nil { + return err + } + + // Maybe in block? + wr := &b.wr + wr.reset(b.output) + + var ll, of, ml cState + + // Current sequence + seq := len(b.sequences) - 1 + s := b.sequences[seq] + llEnc.setBits(llBitsTable[:]) + mlEnc.setBits(mlBitsTable[:]) + ofEnc.setBits(nil) + + llTT, ofTT, mlTT := llEnc.ct.symbolTT[:256], ofEnc.ct.symbolTT[:256], mlEnc.ct.symbolTT[:256] + + // We have 3 bounds checks here (and in the loop). + // Since we are iterating backwards it is kinda hard to avoid. + llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode] + ll.init(wr, &llEnc.ct, llB) + of.init(wr, &ofEnc.ct, ofB) + wr.flush32() + ml.init(wr, &mlEnc.ct, mlB) + + // Each of these lookups also generates a bounds check. + wr.addBits32NC(s.litLen, llB.outBits) + wr.addBits32NC(s.matchLen, mlB.outBits) + wr.flush32() + wr.addBits32NC(s.offset, ofB.outBits) + if debugSequences { + println("Encoded seq", seq, s, "codes:", s.llCode, s.mlCode, s.ofCode, "states:", ll.state, ml.state, of.state, "bits:", llB, mlB, ofB) + } + seq-- + // Store sequences in reverse... + for seq >= 0 { + s = b.sequences[seq] + + ofB := ofTT[s.ofCode] + wr.flush32() // tablelog max is below 8 for each, so it will fill max 24 bits. + //of.encode(ofB) + nbBitsOut := (uint32(of.state) + ofB.deltaNbBits) >> 16 + dstState := int32(of.state>>(nbBitsOut&15)) + int32(ofB.deltaFindState) + wr.addBits16NC(of.state, uint8(nbBitsOut)) + of.state = of.stateTable[dstState] + + // Accumulate extra bits. + outBits := ofB.outBits & 31 + extraBits := uint64(s.offset & bitMask32[outBits]) + extraBitsN := outBits + + mlB := mlTT[s.mlCode] + //ml.encode(mlB) + nbBitsOut = (uint32(ml.state) + mlB.deltaNbBits) >> 16 + dstState = int32(ml.state>>(nbBitsOut&15)) + int32(mlB.deltaFindState) + wr.addBits16NC(ml.state, uint8(nbBitsOut)) + ml.state = ml.stateTable[dstState] + + outBits = mlB.outBits & 31 + extraBits = extraBits<> 16 + dstState = int32(ll.state>>(nbBitsOut&15)) + int32(llB.deltaFindState) + wr.addBits16NC(ll.state, uint8(nbBitsOut)) + ll.state = ll.stateTable[dstState] + + outBits = llB.outBits & 31 + extraBits = extraBits<= b.size { + // Discard and encode as raw block. + b.output = b.encodeRawTo(b.output[:bhOffset], org) + b.popOffsets() + b.litEnc.Reuse = huff0.ReusePolicyNone + return nil + } + + // Size is output minus block header. + bh.setSize(uint32(len(b.output)-bhOffset) - 3) + if debugEncoder { + println("Rewriting block header", bh) + } + _ = bh.appendTo(b.output[bhOffset:bhOffset]) + b.coders.setPrev(llEnc, mlEnc, ofEnc) + return nil +} + +var errIncompressible = errors.New("incompressible") + +func (b *blockEnc) genCodes() { + if len(b.sequences) == 0 { + // nothing to do + return + } + if len(b.sequences) > math.MaxUint16 { + panic("can only encode up to 64K sequences") + } + // No bounds checks after here: + llH := b.coders.llEnc.Histogram() + ofH := b.coders.ofEnc.Histogram() + mlH := b.coders.mlEnc.Histogram() + for i := range llH { + llH[i] = 0 + } + for i := range ofH { + ofH[i] = 0 + } + for i := range mlH { + mlH[i] = 0 + } + + var llMax, ofMax, mlMax uint8 + for i := range b.sequences { + seq := &b.sequences[i] + v := llCode(seq.litLen) + seq.llCode = v + llH[v]++ + if v > llMax { + llMax = v + } + + v = ofCode(seq.offset) + seq.ofCode = v + ofH[v]++ + if v > ofMax { + ofMax = v + } + + v = mlCode(seq.matchLen) + seq.mlCode = v + mlH[v]++ + if v > mlMax { + mlMax = v + if debugAsserts && mlMax > maxMatchLengthSymbol { + panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d), matchlen: %d", mlMax, seq.matchLen)) + } + } + } + maxCount := func(a []uint32) int { + var max uint32 + for _, v := range a { + if v > max { + max = v + } + } + return int(max) + } + if debugAsserts && mlMax > maxMatchLengthSymbol { + panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d)", mlMax)) + } + if debugAsserts && ofMax > maxOffsetBits { + panic(fmt.Errorf("ofMax > maxOffsetBits (%d)", ofMax)) + } + if debugAsserts && llMax > maxLiteralLengthSymbol { + panic(fmt.Errorf("llMax > maxLiteralLengthSymbol (%d)", llMax)) + } + + b.coders.mlEnc.HistogramFinished(mlMax, maxCount(mlH[:mlMax+1])) + b.coders.ofEnc.HistogramFinished(ofMax, maxCount(ofH[:ofMax+1])) + b.coders.llEnc.HistogramFinished(llMax, maxCount(llH[:llMax+1])) +} diff --git a/vendor/github.com/klauspost/compress/zstd/blocktype_string.go b/vendor/github.com/klauspost/compress/zstd/blocktype_string.go new file mode 100644 index 0000000..01a01e4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/blocktype_string.go @@ -0,0 +1,85 @@ +// Code generated by "stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex"; DO NOT EDIT. + +package zstd + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[blockTypeRaw-0] + _ = x[blockTypeRLE-1] + _ = x[blockTypeCompressed-2] + _ = x[blockTypeReserved-3] +} + +const _blockType_name = "blockTypeRawblockTypeRLEblockTypeCompressedblockTypeReserved" + +var _blockType_index = [...]uint8{0, 12, 24, 43, 60} + +func (i blockType) String() string { + if i >= blockType(len(_blockType_index)-1) { + return "blockType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _blockType_name[_blockType_index[i]:_blockType_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[literalsBlockRaw-0] + _ = x[literalsBlockRLE-1] + _ = x[literalsBlockCompressed-2] + _ = x[literalsBlockTreeless-3] +} + +const _literalsBlockType_name = "literalsBlockRawliteralsBlockRLEliteralsBlockCompressedliteralsBlockTreeless" + +var _literalsBlockType_index = [...]uint8{0, 16, 32, 55, 76} + +func (i literalsBlockType) String() string { + if i >= literalsBlockType(len(_literalsBlockType_index)-1) { + return "literalsBlockType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _literalsBlockType_name[_literalsBlockType_index[i]:_literalsBlockType_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[compModePredefined-0] + _ = x[compModeRLE-1] + _ = x[compModeFSE-2] + _ = x[compModeRepeat-3] +} + +const _seqCompMode_name = "compModePredefinedcompModeRLEcompModeFSEcompModeRepeat" + +var _seqCompMode_index = [...]uint8{0, 18, 29, 40, 54} + +func (i seqCompMode) String() string { + if i >= seqCompMode(len(_seqCompMode_index)-1) { + return "seqCompMode(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _seqCompMode_name[_seqCompMode_index[i]:_seqCompMode_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[tableLiteralLengths-0] + _ = x[tableOffsets-1] + _ = x[tableMatchLengths-2] +} + +const _tableIndex_name = "tableLiteralLengthstableOffsetstableMatchLengths" + +var _tableIndex_index = [...]uint8{0, 19, 31, 48} + +func (i tableIndex) String() string { + if i >= tableIndex(len(_tableIndex_index)-1) { + return "tableIndex(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _tableIndex_name[_tableIndex_index[i]:_tableIndex_index[i+1]] +} diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go new file mode 100644 index 0000000..55a3885 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bytebuf.go @@ -0,0 +1,131 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" + "io" +) + +type byteBuffer interface { + // Read up to 8 bytes. + // Returns io.ErrUnexpectedEOF if this cannot be satisfied. + readSmall(n int) ([]byte, error) + + // Read >8 bytes. + // MAY use the destination slice. + readBig(n int, dst []byte) ([]byte, error) + + // Read a single byte. + readByte() (byte, error) + + // Skip n bytes. + skipN(n int64) error +} + +// in-memory buffer +type byteBuf []byte + +func (b *byteBuf) readSmall(n int) ([]byte, error) { + if debugAsserts && n > 8 { + panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) + } + bb := *b + if len(bb) < n { + return nil, io.ErrUnexpectedEOF + } + r := bb[:n] + *b = bb[n:] + return r, nil +} + +func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) { + bb := *b + if len(bb) < n { + return nil, io.ErrUnexpectedEOF + } + r := bb[:n] + *b = bb[n:] + return r, nil +} + +func (b *byteBuf) readByte() (byte, error) { + bb := *b + if len(bb) < 1 { + return 0, io.ErrUnexpectedEOF + } + r := bb[0] + *b = bb[1:] + return r, nil +} + +func (b *byteBuf) skipN(n int64) error { + bb := *b + if n < 0 { + return fmt.Errorf("negative skip (%d) requested", n) + } + if int64(len(bb)) < n { + return io.ErrUnexpectedEOF + } + *b = bb[n:] + return nil +} + +// wrapper around a reader. +type readerWrapper struct { + r io.Reader + tmp [8]byte +} + +func (r *readerWrapper) readSmall(n int) ([]byte, error) { + if debugAsserts && n > 8 { + panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) + } + n2, err := io.ReadFull(r.r, r.tmp[:n]) + // We only really care about the actual bytes read. + if err != nil { + if err == io.EOF { + return nil, io.ErrUnexpectedEOF + } + if debugDecoder { + println("readSmall: got", n2, "want", n, "err", err) + } + return nil, err + } + return r.tmp[:n], nil +} + +func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) { + if cap(dst) < n { + dst = make([]byte, n) + } + n2, err := io.ReadFull(r.r, dst[:n]) + if err == io.EOF && n > 0 { + err = io.ErrUnexpectedEOF + } + return dst[:n2], err +} + +func (r *readerWrapper) readByte() (byte, error) { + n2, err := io.ReadFull(r.r, r.tmp[:1]) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return 0, err + } + if n2 != 1 { + return 0, io.ErrUnexpectedEOF + } + return r.tmp[0], nil +} + +func (r *readerWrapper) skipN(n int64) error { + n2, err := io.CopyN(io.Discard, r.r, n) + if n2 != n { + err = io.ErrUnexpectedEOF + } + return err +} diff --git a/vendor/github.com/klauspost/compress/zstd/bytereader.go b/vendor/github.com/klauspost/compress/zstd/bytereader.go new file mode 100644 index 0000000..0e59a24 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bytereader.go @@ -0,0 +1,82 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +// byteReader provides a byte reader that reads +// little endian values from a byte stream. +// The input stream is manually advanced. +// The reader performs no bounds checks. +type byteReader struct { + b []byte + off int +} + +// advance the stream b n bytes. +func (b *byteReader) advance(n uint) { + b.off += int(n) +} + +// overread returns whether we have advanced too far. +func (b *byteReader) overread() bool { + return b.off > len(b.b) +} + +// Int32 returns a little endian int32 starting at current offset. +func (b byteReader) Int32() int32 { + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := int32(b2[3]) + v2 := int32(b2[2]) + v1 := int32(b2[1]) + v0 := int32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// Uint8 returns the next byte +func (b *byteReader) Uint8() uint8 { + v := b.b[b.off] + return v +} + +// Uint32 returns a little endian uint32 starting at current offset. +func (b byteReader) Uint32() uint32 { + if r := b.remain(); r < 4 { + // Very rare + v := uint32(0) + for i := 1; i <= r; i++ { + v = (v << 8) | uint32(b.b[len(b.b)-i]) + } + return v + } + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := uint32(b2[3]) + v2 := uint32(b2[2]) + v1 := uint32(b2[1]) + v0 := uint32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// Uint32NC returns a little endian uint32 starting at current offset. +// The caller must be sure if there are at least 4 bytes left. +func (b byteReader) Uint32NC() uint32 { + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := uint32(b2[3]) + v2 := uint32(b2[2]) + v1 := uint32(b2[1]) + v0 := uint32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// unread returns the unread portion of the input. +func (b byteReader) unread() []byte { + return b.b[b.off:] +} + +// remain will return the number of bytes remaining. +func (b byteReader) remain() int { + return len(b.b) - b.off +} diff --git a/vendor/github.com/klauspost/compress/zstd/decodeheader.go b/vendor/github.com/klauspost/compress/zstd/decodeheader.go new file mode 100644 index 0000000..6a5a298 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/decodeheader.go @@ -0,0 +1,261 @@ +// Copyright 2020+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package zstd + +import ( + "encoding/binary" + "errors" + "io" +) + +// HeaderMaxSize is the maximum size of a Frame and Block Header. +// If less is sent to Header.Decode it *may* still contain enough information. +const HeaderMaxSize = 14 + 3 + +// Header contains information about the first frame and block within that. +type Header struct { + // SingleSegment specifies whether the data is to be decompressed into a + // single contiguous memory segment. + // It implies that WindowSize is invalid and that FrameContentSize is valid. + SingleSegment bool + + // WindowSize is the window of data to keep while decoding. + // Will only be set if SingleSegment is false. + WindowSize uint64 + + // Dictionary ID. + // If 0, no dictionary. + DictionaryID uint32 + + // HasFCS specifies whether FrameContentSize has a valid value. + HasFCS bool + + // FrameContentSize is the expected uncompressed size of the entire frame. + FrameContentSize uint64 + + // Skippable will be true if the frame is meant to be skipped. + // This implies that FirstBlock.OK is false. + Skippable bool + + // SkippableID is the user-specific ID for the skippable frame. + // Valid values are between 0 to 15, inclusive. + SkippableID int + + // SkippableSize is the length of the user data to skip following + // the header. + SkippableSize uint32 + + // HeaderSize is the raw size of the frame header. + // + // For normal frames, it includes the size of the magic number and + // the size of the header (per section 3.1.1.1). + // It does not include the size for any data blocks (section 3.1.1.2) nor + // the size for the trailing content checksum. + // + // For skippable frames, this counts the size of the magic number + // along with the size of the size field of the payload. + // It does not include the size of the skippable payload itself. + // The total frame size is the HeaderSize plus the SkippableSize. + HeaderSize int + + // First block information. + FirstBlock struct { + // OK will be set if first block could be decoded. + OK bool + + // Is this the last block of a frame? + Last bool + + // Is the data compressed? + // If true CompressedSize will be populated. + // Unfortunately DecompressedSize cannot be determined + // without decoding the blocks. + Compressed bool + + // DecompressedSize is the expected decompressed size of the block. + // Will be 0 if it cannot be determined. + DecompressedSize int + + // CompressedSize of the data in the block. + // Does not include the block header. + // Will be equal to DecompressedSize if not Compressed. + CompressedSize int + } + + // If set there is a checksum present for the block content. + // The checksum field at the end is always 4 bytes long. + HasCheckSum bool +} + +// Decode the header from the beginning of the stream. +// This will decode the frame header and the first block header if enough bytes are provided. +// It is recommended to provide at least HeaderMaxSize bytes. +// If the frame header cannot be read an error will be returned. +// If there isn't enough input, io.ErrUnexpectedEOF is returned. +// The FirstBlock.OK will indicate if enough information was available to decode the first block header. +func (h *Header) Decode(in []byte) error { + _, err := h.DecodeAndStrip(in) + return err +} + +// DecodeAndStrip will decode the header from the beginning of the stream +// and on success return the remaining bytes. +// This will decode the frame header and the first block header if enough bytes are provided. +// It is recommended to provide at least HeaderMaxSize bytes. +// If the frame header cannot be read an error will be returned. +// If there isn't enough input, io.ErrUnexpectedEOF is returned. +// The FirstBlock.OK will indicate if enough information was available to decode the first block header. +func (h *Header) DecodeAndStrip(in []byte) (remain []byte, err error) { + *h = Header{} + if len(in) < 4 { + return nil, io.ErrUnexpectedEOF + } + h.HeaderSize += 4 + b, in := in[:4], in[4:] + if string(b) != frameMagic { + if string(b[1:4]) != skippableFrameMagic || b[0]&0xf0 != 0x50 { + return nil, ErrMagicMismatch + } + if len(in) < 4 { + return nil, io.ErrUnexpectedEOF + } + h.HeaderSize += 4 + h.Skippable = true + h.SkippableID = int(b[0] & 0xf) + h.SkippableSize = binary.LittleEndian.Uint32(in) + return in[4:], nil + } + + // Read Window_Descriptor + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor + if len(in) < 1 { + return nil, io.ErrUnexpectedEOF + } + fhd, in := in[0], in[1:] + h.HeaderSize++ + h.SingleSegment = fhd&(1<<5) != 0 + h.HasCheckSum = fhd&(1<<2) != 0 + if fhd&(1<<3) != 0 { + return nil, errors.New("reserved bit set on frame header") + } + + if !h.SingleSegment { + if len(in) < 1 { + return nil, io.ErrUnexpectedEOF + } + var wd byte + wd, in = in[0], in[1:] + h.HeaderSize++ + windowLog := 10 + (wd >> 3) + windowBase := uint64(1) << windowLog + windowAdd := (windowBase / 8) * uint64(wd&0x7) + h.WindowSize = windowBase + windowAdd + } + + // Read Dictionary_ID + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id + if size := fhd & 3; size != 0 { + if size == 3 { + size = 4 + } + if len(in) < int(size) { + return nil, io.ErrUnexpectedEOF + } + b, in = in[:size], in[size:] + h.HeaderSize += int(size) + switch len(b) { + case 1: + h.DictionaryID = uint32(b[0]) + case 2: + h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) + case 4: + h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + } + } + + // Read Frame_Content_Size + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size + var fcsSize int + v := fhd >> 6 + switch v { + case 0: + if h.SingleSegment { + fcsSize = 1 + } + default: + fcsSize = 1 << v + } + + if fcsSize > 0 { + h.HasFCS = true + if len(in) < fcsSize { + return nil, io.ErrUnexpectedEOF + } + b, in = in[:fcsSize], in[fcsSize:] + h.HeaderSize += int(fcsSize) + switch len(b) { + case 1: + h.FrameContentSize = uint64(b[0]) + case 2: + // When FCS_Field_Size is 2, the offset of 256 is added. + h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256 + case 4: + h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) + case 8: + d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24) + h.FrameContentSize = uint64(d1) | (uint64(d2) << 32) + } + } + + // Frame Header done, we will not fail from now on. + if len(in) < 3 { + return in, nil + } + tmp := in[:3] + bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) + h.FirstBlock.Last = bh&1 != 0 + blockType := blockType((bh >> 1) & 3) + // find size. + cSize := int(bh >> 3) + switch blockType { + case blockTypeReserved: + return in, nil + case blockTypeRLE: + h.FirstBlock.Compressed = true + h.FirstBlock.DecompressedSize = cSize + h.FirstBlock.CompressedSize = 1 + case blockTypeCompressed: + h.FirstBlock.Compressed = true + h.FirstBlock.CompressedSize = cSize + case blockTypeRaw: + h.FirstBlock.DecompressedSize = cSize + h.FirstBlock.CompressedSize = cSize + default: + panic("Invalid block type") + } + + h.FirstBlock.OK = true + return in, nil +} + +// AppendTo will append the encoded header to the dst slice. +// There is no error checking performed on the header values. +func (h *Header) AppendTo(dst []byte) ([]byte, error) { + if h.Skippable { + magic := [4]byte{0x50, 0x2a, 0x4d, 0x18} + magic[0] |= byte(h.SkippableID & 0xf) + dst = append(dst, magic[:]...) + f := h.SkippableSize + return append(dst, uint8(f), uint8(f>>8), uint8(f>>16), uint8(f>>24)), nil + } + f := frameHeader{ + ContentSize: h.FrameContentSize, + WindowSize: uint32(h.WindowSize), + SingleSegment: h.SingleSegment, + Checksum: h.HasCheckSum, + DictID: h.DictionaryID, + } + return f.appendTo(dst), nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go new file mode 100644 index 0000000..f04aaa2 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -0,0 +1,948 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "context" + "encoding/binary" + "io" + "sync" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +// Decoder provides decoding of zstandard streams. +// The decoder has been designed to operate without allocations after a warmup. +// This means that you should store the decoder for best performance. +// To re-use a stream decoder, use the Reset(r io.Reader) error to switch to another stream. +// A decoder can safely be re-used even if the previous stream failed. +// To release the resources, you must call the Close() function on a decoder. +type Decoder struct { + o decoderOptions + + // Unreferenced decoders, ready for use. + decoders chan *blockDec + + // Current read position used for Reader functionality. + current decoderState + + // sync stream decoding + syncStream struct { + decodedFrame uint64 + br readerWrapper + enabled bool + inFrame bool + dstBuf []byte + } + + frame *frameDec + + // Custom dictionaries. + dicts map[uint32]*dict + + // streamWg is the waitgroup for all streams + streamWg sync.WaitGroup +} + +// decoderState is used for maintaining state when the decoder +// is used for streaming. +type decoderState struct { + // current block being written to stream. + decodeOutput + + // output in order to be written to stream. + output chan decodeOutput + + // cancel remaining output. + cancel context.CancelFunc + + // crc of current frame + crc *xxhash.Digest + + flushed bool +} + +var ( + // Check the interfaces we want to support. + _ = io.WriterTo(&Decoder{}) + _ = io.Reader(&Decoder{}) +) + +// NewReader creates a new decoder. +// A nil Reader can be provided in which case Reset can be used to start a decode. +// +// A Decoder can be used in two modes: +// +// 1) As a stream, or +// 2) For stateless decoding using DecodeAll. +// +// Only a single stream can be decoded concurrently, but the same decoder +// can run multiple concurrent stateless decodes. It is even possible to +// use stateless decodes while a stream is being decoded. +// +// The Reset function can be used to initiate a new stream, which is will considerably +// reduce the allocations normally caused by NewReader. +func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { + initPredefined() + var d Decoder + d.o.setDefault() + for _, o := range opts { + err := o(&d.o) + if err != nil { + return nil, err + } + } + d.current.crc = xxhash.New() + d.current.flushed = true + + if r == nil { + d.current.err = ErrDecoderNilInput + } + + // Transfer option dicts. + d.dicts = make(map[uint32]*dict, len(d.o.dicts)) + for _, dc := range d.o.dicts { + d.dicts[dc.id] = dc + } + d.o.dicts = nil + + // Create decoders + d.decoders = make(chan *blockDec, d.o.concurrent) + for i := 0; i < d.o.concurrent; i++ { + dec := newBlockDec(d.o.lowMem) + dec.localFrame = newFrameDec(d.o) + d.decoders <- dec + } + + if r == nil { + return &d, nil + } + return &d, d.Reset(r) +} + +// Read bytes from the decompressed stream into p. +// Returns the number of bytes written and any error that occurred. +// When the stream is done, io.EOF will be returned. +func (d *Decoder) Read(p []byte) (int, error) { + var n int + for { + if len(d.current.b) > 0 { + filled := copy(p, d.current.b) + p = p[filled:] + d.current.b = d.current.b[filled:] + n += filled + } + if len(p) == 0 { + break + } + if len(d.current.b) == 0 { + // We have an error and no more data + if d.current.err != nil { + break + } + if !d.nextBlock(n == 0) { + return n, d.current.err + } + } + } + if len(d.current.b) > 0 { + if debugDecoder { + println("returning", n, "still bytes left:", len(d.current.b)) + } + // Only return error at end of block + return n, nil + } + if d.current.err != nil { + d.drainOutput() + } + if debugDecoder { + println("returning", n, d.current.err, len(d.decoders)) + } + return n, d.current.err +} + +// Reset will reset the decoder the supplied stream after the current has finished processing. +// Note that this functionality cannot be used after Close has been called. +// Reset can be called with a nil reader to release references to the previous reader. +// After being called with a nil reader, no other operations than Reset or DecodeAll or Close +// should be used. +func (d *Decoder) Reset(r io.Reader) error { + if d.current.err == ErrDecoderClosed { + return d.current.err + } + + d.drainOutput() + + d.syncStream.br.r = nil + if r == nil { + d.current.err = ErrDecoderNilInput + if len(d.current.b) > 0 { + d.current.b = d.current.b[:0] + } + d.current.flushed = true + return nil + } + + // If bytes buffer and < 5MB, do sync decoding anyway. + if bb, ok := r.(byter); ok && bb.Len() < d.o.decodeBufsBelow && !d.o.limitToCap { + bb2 := bb + if debugDecoder { + println("*bytes.Buffer detected, doing sync decode, len:", bb.Len()) + } + b := bb2.Bytes() + var dst []byte + if cap(d.syncStream.dstBuf) > 0 { + dst = d.syncStream.dstBuf[:0] + } + + dst, err := d.DecodeAll(b, dst) + if err == nil { + err = io.EOF + } + // Save output buffer + d.syncStream.dstBuf = dst + d.current.b = dst + d.current.err = err + d.current.flushed = true + if debugDecoder { + println("sync decode to", len(dst), "bytes, err:", err) + } + return nil + } + // Remove current block. + d.stashDecoder() + d.current.decodeOutput = decodeOutput{} + d.current.err = nil + d.current.flushed = false + d.current.d = nil + d.syncStream.dstBuf = nil + + // Ensure no-one else is still running... + d.streamWg.Wait() + if d.frame == nil { + d.frame = newFrameDec(d.o) + } + + if d.o.concurrent == 1 { + return d.startSyncDecoder(r) + } + + d.current.output = make(chan decodeOutput, d.o.concurrent) + ctx, cancel := context.WithCancel(context.Background()) + d.current.cancel = cancel + d.streamWg.Add(1) + go d.startStreamDecoder(ctx, r, d.current.output) + + return nil +} + +// drainOutput will drain the output until errEndOfStream is sent. +func (d *Decoder) drainOutput() { + if d.current.cancel != nil { + if debugDecoder { + println("cancelling current") + } + d.current.cancel() + d.current.cancel = nil + } + if d.current.d != nil { + if debugDecoder { + printf("re-adding current decoder %p, decoders: %d", d.current.d, len(d.decoders)) + } + d.decoders <- d.current.d + d.current.d = nil + d.current.b = nil + } + if d.current.output == nil || d.current.flushed { + println("current already flushed") + return + } + for v := range d.current.output { + if v.d != nil { + if debugDecoder { + printf("re-adding decoder %p", v.d) + } + d.decoders <- v.d + } + } + d.current.output = nil + d.current.flushed = true +} + +// WriteTo writes data to w until there's no more data to write or when an error occurs. +// The return value n is the number of bytes written. +// Any error encountered during the write is also returned. +func (d *Decoder) WriteTo(w io.Writer) (int64, error) { + var n int64 + for { + if len(d.current.b) > 0 { + n2, err2 := w.Write(d.current.b) + n += int64(n2) + if err2 != nil && (d.current.err == nil || d.current.err == io.EOF) { + d.current.err = err2 + } else if n2 != len(d.current.b) { + d.current.err = io.ErrShortWrite + } + } + if d.current.err != nil { + break + } + d.nextBlock(true) + } + err := d.current.err + if err != nil { + d.drainOutput() + } + if err == io.EOF { + err = nil + } + return n, err +} + +// DecodeAll allows stateless decoding of a blob of bytes. +// Output will be appended to dst, so if the destination size is known +// you can pre-allocate the destination slice to avoid allocations. +// DecodeAll can be used concurrently. +// The Decoder concurrency limits will be respected. +func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { + if d.decoders == nil { + return dst, ErrDecoderClosed + } + + // Grab a block decoder and frame decoder. + block := <-d.decoders + frame := block.localFrame + initialSize := len(dst) + defer func() { + if debugDecoder { + printf("re-adding decoder: %p", block) + } + frame.rawInput = nil + frame.bBuf = nil + if frame.history.decoders.br != nil { + frame.history.decoders.br.in = nil + } + d.decoders <- block + }() + frame.bBuf = input + + for { + frame.history.reset() + err := frame.reset(&frame.bBuf) + if err != nil { + if err == io.EOF { + if debugDecoder { + println("frame reset return EOF") + } + return dst, nil + } + return dst, err + } + if err = d.setDict(frame); err != nil { + return nil, err + } + if frame.WindowSize > d.o.maxWindowSize { + if debugDecoder { + println("window size exceeded:", frame.WindowSize, ">", d.o.maxWindowSize) + } + return dst, ErrWindowSizeExceeded + } + if frame.FrameContentSize != fcsUnknown { + if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)-initialSize) { + if debugDecoder { + println("decoder size exceeded; fcs:", frame.FrameContentSize, "> mcs:", d.o.maxDecodedSize-uint64(len(dst)-initialSize), "len:", len(dst)) + } + return dst, ErrDecoderSizeExceeded + } + if d.o.limitToCap && frame.FrameContentSize > uint64(cap(dst)-len(dst)) { + if debugDecoder { + println("decoder size exceeded; fcs:", frame.FrameContentSize, "> (cap-len)", cap(dst)-len(dst)) + } + return dst, ErrDecoderSizeExceeded + } + if cap(dst)-len(dst) < int(frame.FrameContentSize) { + dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)+compressedBlockOverAlloc) + copy(dst2, dst) + dst = dst2 + } + } + + if cap(dst) == 0 && !d.o.limitToCap { + // Allocate len(input) * 2 by default if nothing is provided + // and we didn't get frame content size. + size := len(input) * 2 + // Cap to 1 MB. + if size > 1<<20 { + size = 1 << 20 + } + if uint64(size) > d.o.maxDecodedSize { + size = int(d.o.maxDecodedSize) + } + dst = make([]byte, 0, size) + } + + dst, err = frame.runDecoder(dst, block) + if err != nil { + return dst, err + } + if uint64(len(dst)-initialSize) > d.o.maxDecodedSize { + return dst, ErrDecoderSizeExceeded + } + if len(frame.bBuf) == 0 { + if debugDecoder { + println("frame dbuf empty") + } + break + } + } + return dst, nil +} + +// nextBlock returns the next block. +// If an error occurs d.err will be set. +// Optionally the function can block for new output. +// If non-blocking mode is used the returned boolean will be false +// if no data was available without blocking. +func (d *Decoder) nextBlock(blocking bool) (ok bool) { + if d.current.err != nil { + // Keep error state. + return false + } + d.current.b = d.current.b[:0] + + // SYNC: + if d.syncStream.enabled { + if !blocking { + return false + } + ok = d.nextBlockSync() + if !ok { + d.stashDecoder() + } + return ok + } + + //ASYNC: + d.stashDecoder() + if blocking { + d.current.decodeOutput, ok = <-d.current.output + } else { + select { + case d.current.decodeOutput, ok = <-d.current.output: + default: + return false + } + } + if !ok { + // This should not happen, so signal error state... + d.current.err = io.ErrUnexpectedEOF + return false + } + next := d.current.decodeOutput + if next.d != nil && next.d.async.newHist != nil { + d.current.crc.Reset() + } + if debugDecoder { + var tmp [4]byte + binary.LittleEndian.PutUint32(tmp[:], uint32(xxhash.Sum64(next.b))) + println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp) + } + + if d.o.ignoreChecksum { + return true + } + + if len(next.b) > 0 { + d.current.crc.Write(next.b) + } + if next.err == nil && next.d != nil && next.d.hasCRC { + got := uint32(d.current.crc.Sum64()) + if got != next.d.checkCRC { + if debugDecoder { + printf("CRC Check Failed: %08x (got) != %08x (on stream)\n", got, next.d.checkCRC) + } + d.current.err = ErrCRCMismatch + } else { + if debugDecoder { + printf("CRC ok %08x\n", got) + } + } + } + + return true +} + +func (d *Decoder) nextBlockSync() (ok bool) { + if d.current.d == nil { + d.current.d = <-d.decoders + } + for len(d.current.b) == 0 { + if !d.syncStream.inFrame { + d.frame.history.reset() + d.current.err = d.frame.reset(&d.syncStream.br) + if d.current.err == nil { + d.current.err = d.setDict(d.frame) + } + if d.current.err != nil { + return false + } + if d.frame.WindowSize > d.o.maxDecodedSize || d.frame.WindowSize > d.o.maxWindowSize { + d.current.err = ErrDecoderSizeExceeded + return false + } + + d.syncStream.decodedFrame = 0 + d.syncStream.inFrame = true + } + d.current.err = d.frame.next(d.current.d) + if d.current.err != nil { + return false + } + d.frame.history.ensureBlock() + if debugDecoder { + println("History trimmed:", len(d.frame.history.b), "decoded already:", d.syncStream.decodedFrame) + } + histBefore := len(d.frame.history.b) + d.current.err = d.current.d.decodeBuf(&d.frame.history) + + if d.current.err != nil { + println("error after:", d.current.err) + return false + } + d.current.b = d.frame.history.b[histBefore:] + if debugDecoder { + println("history after:", len(d.frame.history.b)) + } + + // Check frame size (before CRC) + d.syncStream.decodedFrame += uint64(len(d.current.b)) + if d.syncStream.decodedFrame > d.frame.FrameContentSize { + if debugDecoder { + printf("DecodedFrame (%d) > FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) + } + d.current.err = ErrFrameSizeExceeded + return false + } + + // Check FCS + if d.current.d.Last && d.frame.FrameContentSize != fcsUnknown && d.syncStream.decodedFrame != d.frame.FrameContentSize { + if debugDecoder { + printf("DecodedFrame (%d) != FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) + } + d.current.err = ErrFrameSizeMismatch + return false + } + + // Update/Check CRC + if d.frame.HasCheckSum { + if !d.o.ignoreChecksum { + d.frame.crc.Write(d.current.b) + } + if d.current.d.Last { + if !d.o.ignoreChecksum { + d.current.err = d.frame.checkCRC() + } else { + d.current.err = d.frame.consumeCRC() + } + if d.current.err != nil { + println("CRC error:", d.current.err) + return false + } + } + } + d.syncStream.inFrame = !d.current.d.Last + } + return true +} + +func (d *Decoder) stashDecoder() { + if d.current.d != nil { + if debugDecoder { + printf("re-adding current decoder %p", d.current.d) + } + d.decoders <- d.current.d + d.current.d = nil + } +} + +// Close will release all resources. +// It is NOT possible to reuse the decoder after this. +func (d *Decoder) Close() { + if d.current.err == ErrDecoderClosed { + return + } + d.drainOutput() + if d.current.cancel != nil { + d.current.cancel() + d.streamWg.Wait() + d.current.cancel = nil + } + if d.decoders != nil { + close(d.decoders) + for dec := range d.decoders { + dec.Close() + } + d.decoders = nil + } + if d.current.d != nil { + d.current.d.Close() + d.current.d = nil + } + d.current.err = ErrDecoderClosed +} + +// IOReadCloser returns the decoder as an io.ReadCloser for convenience. +// Any changes to the decoder will be reflected, so the returned ReadCloser +// can be reused along with the decoder. +// io.WriterTo is also supported by the returned ReadCloser. +func (d *Decoder) IOReadCloser() io.ReadCloser { + return closeWrapper{d: d} +} + +// closeWrapper wraps a function call as a closer. +type closeWrapper struct { + d *Decoder +} + +// WriteTo forwards WriteTo calls to the decoder. +func (c closeWrapper) WriteTo(w io.Writer) (n int64, err error) { + return c.d.WriteTo(w) +} + +// Read forwards read calls to the decoder. +func (c closeWrapper) Read(p []byte) (n int, err error) { + return c.d.Read(p) +} + +// Close closes the decoder. +func (c closeWrapper) Close() error { + c.d.Close() + return nil +} + +type decodeOutput struct { + d *blockDec + b []byte + err error +} + +func (d *Decoder) startSyncDecoder(r io.Reader) error { + d.frame.history.reset() + d.syncStream.br = readerWrapper{r: r} + d.syncStream.inFrame = false + d.syncStream.enabled = true + d.syncStream.decodedFrame = 0 + return nil +} + +// Create Decoder: +// ASYNC: +// Spawn 3 go routines. +// 0: Read frames and decode block literals. +// 1: Decode sequences. +// 2: Execute sequences, send to output. +func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output chan decodeOutput) { + defer d.streamWg.Done() + br := readerWrapper{r: r} + + var seqDecode = make(chan *blockDec, d.o.concurrent) + var seqExecute = make(chan *blockDec, d.o.concurrent) + + // Async 1: Decode sequences... + go func() { + var hist history + var hasErr bool + + for block := range seqDecode { + if hasErr { + if block != nil { + seqExecute <- block + } + continue + } + if block.async.newHist != nil { + if debugDecoder { + println("Async 1: new history, recent:", block.async.newHist.recentOffsets) + } + hist.reset() + hist.decoders = block.async.newHist.decoders + hist.recentOffsets = block.async.newHist.recentOffsets + hist.windowSize = block.async.newHist.windowSize + if block.async.newHist.dict != nil { + hist.setDict(block.async.newHist.dict) + } + } + if block.err != nil || block.Type != blockTypeCompressed { + hasErr = block.err != nil + seqExecute <- block + continue + } + + hist.decoders.literals = block.async.literals + block.err = block.prepareSequences(block.async.seqData, &hist) + if debugDecoder && block.err != nil { + println("prepareSequences returned:", block.err) + } + hasErr = block.err != nil + if block.err == nil { + block.err = block.decodeSequences(&hist) + if debugDecoder && block.err != nil { + println("decodeSequences returned:", block.err) + } + hasErr = block.err != nil + // block.async.sequence = hist.decoders.seq[:hist.decoders.nSeqs] + block.async.seqSize = hist.decoders.seqSize + } + seqExecute <- block + } + close(seqExecute) + hist.reset() + }() + + var wg sync.WaitGroup + wg.Add(1) + + // Async 3: Execute sequences... + frameHistCache := d.frame.history.b + go func() { + var hist history + var decodedFrame uint64 + var fcs uint64 + var hasErr bool + for block := range seqExecute { + out := decodeOutput{err: block.err, d: block} + if block.err != nil || hasErr { + hasErr = true + output <- out + continue + } + if block.async.newHist != nil { + if debugDecoder { + println("Async 2: new history") + } + hist.reset() + hist.windowSize = block.async.newHist.windowSize + hist.allocFrameBuffer = block.async.newHist.allocFrameBuffer + if block.async.newHist.dict != nil { + hist.setDict(block.async.newHist.dict) + } + + if cap(hist.b) < hist.allocFrameBuffer { + if cap(frameHistCache) >= hist.allocFrameBuffer { + hist.b = frameHistCache + } else { + hist.b = make([]byte, 0, hist.allocFrameBuffer) + println("Alloc history sized", hist.allocFrameBuffer) + } + } + hist.b = hist.b[:0] + fcs = block.async.fcs + decodedFrame = 0 + } + do := decodeOutput{err: block.err, d: block} + switch block.Type { + case blockTypeRLE: + if debugDecoder { + println("add rle block length:", block.RLESize) + } + + if cap(block.dst) < int(block.RLESize) { + if block.lowMem { + block.dst = make([]byte, block.RLESize) + } else { + block.dst = make([]byte, maxCompressedBlockSize) + } + } + block.dst = block.dst[:block.RLESize] + v := block.data[0] + for i := range block.dst { + block.dst[i] = v + } + hist.append(block.dst) + do.b = block.dst + case blockTypeRaw: + if debugDecoder { + println("add raw block length:", len(block.data)) + } + hist.append(block.data) + do.b = block.data + case blockTypeCompressed: + if debugDecoder { + println("execute with history length:", len(hist.b), "window:", hist.windowSize) + } + hist.decoders.seqSize = block.async.seqSize + hist.decoders.literals = block.async.literals + do.err = block.executeSequences(&hist) + hasErr = do.err != nil + if debugDecoder && hasErr { + println("executeSequences returned:", do.err) + } + do.b = block.dst + } + if !hasErr { + decodedFrame += uint64(len(do.b)) + if decodedFrame > fcs { + println("fcs exceeded", block.Last, fcs, decodedFrame) + do.err = ErrFrameSizeExceeded + hasErr = true + } else if block.Last && fcs != fcsUnknown && decodedFrame != fcs { + do.err = ErrFrameSizeMismatch + hasErr = true + } else { + if debugDecoder { + println("fcs ok", block.Last, fcs, decodedFrame) + } + } + } + output <- do + } + close(output) + frameHistCache = hist.b + wg.Done() + if debugDecoder { + println("decoder goroutines finished") + } + hist.reset() + }() + + var hist history +decodeStream: + for { + var hasErr bool + hist.reset() + decodeBlock := func(block *blockDec) { + if hasErr { + if block != nil { + seqDecode <- block + } + return + } + if block.err != nil || block.Type != blockTypeCompressed { + hasErr = block.err != nil + seqDecode <- block + return + } + + remain, err := block.decodeLiterals(block.data, &hist) + block.err = err + hasErr = block.err != nil + if err == nil { + block.async.literals = hist.decoders.literals + block.async.seqData = remain + } else if debugDecoder { + println("decodeLiterals error:", err) + } + seqDecode <- block + } + frame := d.frame + if debugDecoder { + println("New frame...") + } + var historySent bool + frame.history.reset() + err := frame.reset(&br) + if debugDecoder && err != nil { + println("Frame decoder returned", err) + } + if err == nil { + err = d.setDict(frame) + } + if err == nil && d.frame.WindowSize > d.o.maxWindowSize { + if debugDecoder { + println("decoder size exceeded, fws:", d.frame.WindowSize, "> mws:", d.o.maxWindowSize) + } + + err = ErrDecoderSizeExceeded + } + if err != nil { + select { + case <-ctx.Done(): + case dec := <-d.decoders: + dec.sendErr(err) + decodeBlock(dec) + } + break decodeStream + } + + // Go through all blocks of the frame. + for { + var dec *blockDec + select { + case <-ctx.Done(): + break decodeStream + case dec = <-d.decoders: + // Once we have a decoder, we MUST return it. + } + err := frame.next(dec) + if !historySent { + h := frame.history + if debugDecoder { + println("Alloc History:", h.allocFrameBuffer) + } + hist.reset() + if h.dict != nil { + hist.setDict(h.dict) + } + dec.async.newHist = &h + dec.async.fcs = frame.FrameContentSize + historySent = true + } else { + dec.async.newHist = nil + } + if debugDecoder && err != nil { + println("next block returned error:", err) + } + dec.err = err + dec.hasCRC = false + if dec.Last && frame.HasCheckSum && err == nil { + crc, err := frame.rawInput.readSmall(4) + if len(crc) < 4 { + if err == nil { + err = io.ErrUnexpectedEOF + + } + println("CRC missing?", err) + dec.err = err + } else { + dec.checkCRC = binary.LittleEndian.Uint32(crc) + dec.hasCRC = true + if debugDecoder { + printf("found crc to check: %08x\n", dec.checkCRC) + } + } + } + err = dec.err + last := dec.Last + decodeBlock(dec) + if err != nil { + break decodeStream + } + if last { + break + } + } + } + close(seqDecode) + wg.Wait() + hist.reset() + d.frame.history.b = frameHistCache +} + +func (d *Decoder) setDict(frame *frameDec) (err error) { + dict, ok := d.dicts[frame.DictionaryID] + if ok { + if debugDecoder { + println("setting dict", frame.DictionaryID) + } + frame.history.setDict(dict) + } else if frame.DictionaryID != 0 { + // A zero or missing dictionary id is ambiguous: + // either dictionary zero, or no dictionary. In particular, + // zstd --patch-from uses this id for the source file, + // so only return an error if the dictionary id is not zero. + err = ErrUnknownDictionary + } + return err +} diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go new file mode 100644 index 0000000..774c5f0 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/decoder_options.go @@ -0,0 +1,169 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "math/bits" + "runtime" +) + +// DOption is an option for creating a decoder. +type DOption func(*decoderOptions) error + +// options retains accumulated state of multiple options. +type decoderOptions struct { + lowMem bool + concurrent int + maxDecodedSize uint64 + maxWindowSize uint64 + dicts []*dict + ignoreChecksum bool + limitToCap bool + decodeBufsBelow int +} + +func (o *decoderOptions) setDefault() { + *o = decoderOptions{ + // use less ram: true for now, but may change. + lowMem: true, + concurrent: runtime.GOMAXPROCS(0), + maxWindowSize: MaxWindowSize, + decodeBufsBelow: 128 << 10, + } + if o.concurrent > 4 { + o.concurrent = 4 + } + o.maxDecodedSize = 64 << 30 +} + +// WithDecoderLowmem will set whether to use a lower amount of memory, +// but possibly have to allocate more while running. +func WithDecoderLowmem(b bool) DOption { + return func(o *decoderOptions) error { o.lowMem = b; return nil } +} + +// WithDecoderConcurrency sets the number of created decoders. +// When decoding block with DecodeAll, this will limit the number +// of possible concurrently running decodes. +// When decoding streams, this will limit the number of +// inflight blocks. +// When decoding streams and setting maximum to 1, +// no async decoding will be done. +// When a value of 0 is provided GOMAXPROCS will be used. +// By default this will be set to 4 or GOMAXPROCS, whatever is lower. +func WithDecoderConcurrency(n int) DOption { + return func(o *decoderOptions) error { + if n < 0 { + return errors.New("concurrency must be at least 1") + } + if n == 0 { + o.concurrent = runtime.GOMAXPROCS(0) + } else { + o.concurrent = n + } + return nil + } +} + +// WithDecoderMaxMemory allows to set a maximum decoded size for in-memory +// non-streaming operations or maximum window size for streaming operations. +// This can be used to control memory usage of potentially hostile content. +// Maximum is 1 << 63 bytes. Default is 64GiB. +func WithDecoderMaxMemory(n uint64) DOption { + return func(o *decoderOptions) error { + if n == 0 { + return errors.New("WithDecoderMaxMemory must be at least 1") + } + if n > 1<<63 { + return errors.New("WithDecoderMaxmemory must be less than 1 << 63") + } + o.maxDecodedSize = n + return nil + } +} + +// WithDecoderDicts allows to register one or more dictionaries for the decoder. +// +// Each slice in dict must be in the [dictionary format] produced by +// "zstd --train" from the Zstandard reference implementation. +// +// If several dictionaries with the same ID are provided, the last one will be used. +// +// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format +func WithDecoderDicts(dicts ...[]byte) DOption { + return func(o *decoderOptions) error { + for _, b := range dicts { + d, err := loadDict(b) + if err != nil { + return err + } + o.dicts = append(o.dicts, d) + } + return nil + } +} + +// WithDecoderDictRaw registers a dictionary that may be used by the decoder. +// The slice content can be arbitrary data. +func WithDecoderDictRaw(id uint32, content []byte) DOption { + return func(o *decoderOptions) error { + if bits.UintSize > 32 && uint(len(content)) > dictMaxLength { + return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content)) + } + o.dicts = append(o.dicts, &dict{id: id, content: content, offsets: [3]int{1, 4, 8}}) + return nil + } +} + +// WithDecoderMaxWindow allows to set a maximum window size for decodes. +// This allows rejecting packets that will cause big memory usage. +// The Decoder will likely allocate more memory based on the WithDecoderLowmem setting. +// If WithDecoderMaxMemory is set to a lower value, that will be used. +// Default is 512MB, Maximum is ~3.75 TB as per zstandard spec. +func WithDecoderMaxWindow(size uint64) DOption { + return func(o *decoderOptions) error { + if size < MinWindowSize { + return errors.New("WithMaxWindowSize must be at least 1KB, 1024 bytes") + } + if size > (1<<41)+7*(1<<38) { + return errors.New("WithMaxWindowSize must be less than (1<<41) + 7*(1<<38) ~ 3.75TB") + } + o.maxWindowSize = size + return nil + } +} + +// WithDecodeAllCapLimit will limit DecodeAll to decoding cap(dst)-len(dst) bytes, +// or any size set in WithDecoderMaxMemory. +// This can be used to limit decoding to a specific maximum output size. +// Disabled by default. +func WithDecodeAllCapLimit(b bool) DOption { + return func(o *decoderOptions) error { + o.limitToCap = b + return nil + } +} + +// WithDecodeBuffersBelow will fully decode readers that have a +// `Bytes() []byte` and `Len() int` interface similar to bytes.Buffer. +// This typically uses less allocations but will have the full decompressed object in memory. +// Note that DecodeAllCapLimit will disable this, as well as giving a size of 0 or less. +// Default is 128KiB. +func WithDecodeBuffersBelow(size int) DOption { + return func(o *decoderOptions) error { + o.decodeBufsBelow = size + return nil + } +} + +// IgnoreChecksum allows to forcibly ignore checksum checking. +func IgnoreChecksum(b bool) DOption { + return func(o *decoderOptions) error { + o.ignoreChecksum = b + return nil + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go new file mode 100644 index 0000000..8d5567f --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/dict.go @@ -0,0 +1,534 @@ +package zstd + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "sort" + + "github.com/klauspost/compress/huff0" +) + +type dict struct { + id uint32 + + litEnc *huff0.Scratch + llDec, ofDec, mlDec sequenceDec + offsets [3]int + content []byte +} + +const dictMagic = "\x37\xa4\x30\xec" + +// Maximum dictionary size for the reference implementation (1.5.3) is 2 GiB. +const dictMaxLength = 1 << 31 + +// ID returns the dictionary id or 0 if d is nil. +func (d *dict) ID() uint32 { + if d == nil { + return 0 + } + return d.id +} + +// ContentSize returns the dictionary content size or 0 if d is nil. +func (d *dict) ContentSize() int { + if d == nil { + return 0 + } + return len(d.content) +} + +// Content returns the dictionary content. +func (d *dict) Content() []byte { + if d == nil { + return nil + } + return d.content +} + +// Offsets returns the initial offsets. +func (d *dict) Offsets() [3]int { + if d == nil { + return [3]int{} + } + return d.offsets +} + +// LitEncoder returns the literal encoder. +func (d *dict) LitEncoder() *huff0.Scratch { + if d == nil { + return nil + } + return d.litEnc +} + +// Load a dictionary as described in +// https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format +func loadDict(b []byte) (*dict, error) { + // Check static field size. + if len(b) <= 8+(3*4) { + return nil, io.ErrUnexpectedEOF + } + d := dict{ + llDec: sequenceDec{fse: &fseDecoder{}}, + ofDec: sequenceDec{fse: &fseDecoder{}}, + mlDec: sequenceDec{fse: &fseDecoder{}}, + } + if string(b[:4]) != dictMagic { + return nil, ErrMagicMismatch + } + d.id = binary.LittleEndian.Uint32(b[4:8]) + if d.id == 0 { + return nil, errors.New("dictionaries cannot have ID 0") + } + + // Read literal table + var err error + d.litEnc, b, err = huff0.ReadTable(b[8:], nil) + if err != nil { + return nil, fmt.Errorf("loading literal table: %w", err) + } + d.litEnc.Reuse = huff0.ReusePolicyMust + + br := byteReader{ + b: b, + off: 0, + } + readDec := func(i tableIndex, dec *fseDecoder) error { + if err := dec.readNCount(&br, uint16(maxTableSymbol[i])); err != nil { + return err + } + if br.overread() { + return io.ErrUnexpectedEOF + } + err = dec.transform(symbolTableX[i]) + if err != nil { + println("Transform table error:", err) + return err + } + if debugDecoder || debugEncoder { + println("Read table ok", "symbolLen:", dec.symbolLen) + } + // Set decoders as predefined so they aren't reused. + dec.preDefined = true + return nil + } + + if err := readDec(tableOffsets, d.ofDec.fse); err != nil { + return nil, err + } + if err := readDec(tableMatchLengths, d.mlDec.fse); err != nil { + return nil, err + } + if err := readDec(tableLiteralLengths, d.llDec.fse); err != nil { + return nil, err + } + if br.remain() < 12 { + return nil, io.ErrUnexpectedEOF + } + + d.offsets[0] = int(br.Uint32()) + br.advance(4) + d.offsets[1] = int(br.Uint32()) + br.advance(4) + d.offsets[2] = int(br.Uint32()) + br.advance(4) + if d.offsets[0] <= 0 || d.offsets[1] <= 0 || d.offsets[2] <= 0 { + return nil, errors.New("invalid offset in dictionary") + } + d.content = make([]byte, br.remain()) + copy(d.content, br.unread()) + if d.offsets[0] > len(d.content) || d.offsets[1] > len(d.content) || d.offsets[2] > len(d.content) { + return nil, fmt.Errorf("initial offset bigger than dictionary content size %d, offsets: %v", len(d.content), d.offsets) + } + + return &d, nil +} + +// InspectDictionary loads a zstd dictionary and provides functions to inspect the content. +func InspectDictionary(b []byte) (interface { + ID() uint32 + ContentSize() int + Content() []byte + Offsets() [3]int + LitEncoder() *huff0.Scratch +}, error) { + initPredefined() + d, err := loadDict(b) + return d, err +} + +type BuildDictOptions struct { + // Dictionary ID. + ID uint32 + + // Content to use to create dictionary tables. + Contents [][]byte + + // History to use for all blocks. + History []byte + + // Offsets to use. + Offsets [3]int + + // CompatV155 will make the dictionary compatible with Zstd v1.5.5 and earlier. + // See https://github.com/facebook/zstd/issues/3724 + CompatV155 bool + + // Use the specified encoder level. + // The dictionary will be built using the specified encoder level, + // which will reflect speed and make the dictionary tailored for that level. + // If not set SpeedBestCompression will be used. + Level EncoderLevel + + // DebugOut will write stats and other details here if set. + DebugOut io.Writer +} + +func BuildDict(o BuildDictOptions) ([]byte, error) { + initPredefined() + hist := o.History + contents := o.Contents + debug := o.DebugOut != nil + println := func(args ...interface{}) { + if o.DebugOut != nil { + fmt.Fprintln(o.DebugOut, args...) + } + } + printf := func(s string, args ...interface{}) { + if o.DebugOut != nil { + fmt.Fprintf(o.DebugOut, s, args...) + } + } + print := func(args ...interface{}) { + if o.DebugOut != nil { + fmt.Fprint(o.DebugOut, args...) + } + } + + if int64(len(hist)) > dictMaxLength { + return nil, fmt.Errorf("dictionary of size %d > %d", len(hist), int64(dictMaxLength)) + } + if len(hist) < 8 { + return nil, fmt.Errorf("dictionary of size %d < %d", len(hist), 8) + } + if len(contents) == 0 { + return nil, errors.New("no content provided") + } + d := dict{ + id: o.ID, + litEnc: nil, + llDec: sequenceDec{}, + ofDec: sequenceDec{}, + mlDec: sequenceDec{}, + offsets: o.Offsets, + content: hist, + } + block := blockEnc{lowMem: false} + block.init() + enc := encoder(&bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(maxMatchLen), bufferReset: math.MaxInt32 - int32(maxMatchLen*2), lowMem: false}}) + if o.Level != 0 { + eOpts := encoderOptions{ + level: o.Level, + blockSize: maxMatchLen, + windowSize: maxMatchLen, + dict: &d, + lowMem: false, + } + enc = eOpts.encoder() + } else { + o.Level = SpeedBestCompression + } + var ( + remain [256]int + ll [256]int + ml [256]int + of [256]int + ) + addValues := func(dst *[256]int, src []byte) { + for _, v := range src { + dst[v]++ + } + } + addHist := func(dst *[256]int, src *[256]uint32) { + for i, v := range src { + dst[i] += int(v) + } + } + seqs := 0 + nUsed := 0 + litTotal := 0 + newOffsets := make(map[uint32]int, 1000) + for _, b := range contents { + block.reset(nil) + if len(b) < 8 { + continue + } + nUsed++ + enc.Reset(&d, true) + enc.Encode(&block, b) + addValues(&remain, block.literals) + litTotal += len(block.literals) + seqs += len(block.sequences) + block.genCodes() + addHist(&ll, block.coders.llEnc.Histogram()) + addHist(&ml, block.coders.mlEnc.Histogram()) + addHist(&of, block.coders.ofEnc.Histogram()) + for i, seq := range block.sequences { + if i > 3 { + break + } + offset := seq.offset + if offset == 0 { + continue + } + if offset > 3 { + newOffsets[offset-3]++ + } else { + newOffsets[uint32(o.Offsets[offset-1])]++ + } + } + } + // Find most used offsets. + var sortedOffsets []uint32 + for k := range newOffsets { + sortedOffsets = append(sortedOffsets, k) + } + sort.Slice(sortedOffsets, func(i, j int) bool { + a, b := sortedOffsets[i], sortedOffsets[j] + if a == b { + // Prefer the longer offset + return sortedOffsets[i] > sortedOffsets[j] + } + return newOffsets[sortedOffsets[i]] > newOffsets[sortedOffsets[j]] + }) + if len(sortedOffsets) > 3 { + if debug { + print("Offsets:") + for i, v := range sortedOffsets { + if i > 20 { + break + } + printf("[%d: %d],", v, newOffsets[v]) + } + println("") + } + + sortedOffsets = sortedOffsets[:3] + } + for i, v := range sortedOffsets { + o.Offsets[i] = int(v) + } + if debug { + println("New repeat offsets", o.Offsets) + } + + if nUsed == 0 || seqs == 0 { + return nil, fmt.Errorf("%d blocks, %d sequences found", nUsed, seqs) + } + if debug { + println("Sequences:", seqs, "Blocks:", nUsed, "Literals:", litTotal) + } + if seqs/nUsed < 512 { + // Use 512 as minimum. + nUsed = seqs / 512 + } + copyHist := func(dst *fseEncoder, src *[256]int) ([]byte, error) { + hist := dst.Histogram() + var maxSym uint8 + var maxCount int + var fakeLength int + for i, v := range src { + if v > 0 { + v = v / nUsed + if v == 0 { + v = 1 + } + } + if v > maxCount { + maxCount = v + } + if v != 0 { + maxSym = uint8(i) + } + fakeLength += v + hist[i] = uint32(v) + } + dst.HistogramFinished(maxSym, maxCount) + dst.reUsed = false + dst.useRLE = false + err := dst.normalizeCount(fakeLength) + if err != nil { + return nil, err + } + if debug { + println("RAW:", dst.count[:maxSym+1], "NORM:", dst.norm[:maxSym+1], "LEN:", fakeLength) + } + return dst.writeCount(nil) + } + if debug { + print("Literal lengths: ") + } + llTable, err := copyHist(block.coders.llEnc, &ll) + if err != nil { + return nil, err + } + if debug { + print("Match lengths: ") + } + mlTable, err := copyHist(block.coders.mlEnc, &ml) + if err != nil { + return nil, err + } + if debug { + print("Offsets: ") + } + ofTable, err := copyHist(block.coders.ofEnc, &of) + if err != nil { + return nil, err + } + + // Literal table + avgSize := litTotal + if avgSize > huff0.BlockSizeMax/2 { + avgSize = huff0.BlockSizeMax / 2 + } + huffBuff := make([]byte, 0, avgSize) + // Target size + div := litTotal / avgSize + if div < 1 { + div = 1 + } + if debug { + println("Huffman weights:") + } + for i, n := range remain[:] { + if n > 0 { + n = n / div + // Allow all entries to be represented. + if n == 0 { + n = 1 + } + huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...) + if debug { + printf("[%d: %d], ", i, n) + } + } + } + if o.CompatV155 && remain[255]/div == 0 { + huffBuff = append(huffBuff, 255) + } + scratch := &huff0.Scratch{TableLog: 11} + for tries := 0; tries < 255; tries++ { + scratch = &huff0.Scratch{TableLog: 11} + _, _, err = huff0.Compress1X(huffBuff, scratch) + if err == nil { + break + } + if debug { + printf("Try %d: Huffman error: %v\n", tries+1, err) + } + huffBuff = huffBuff[:0] + if tries == 250 { + if debug { + println("Huffman: Bailing out with predefined table") + } + + // Bail out.... Just generate something + huffBuff = append(huffBuff, bytes.Repeat([]byte{255}, 10000)...) + for i := 0; i < 128; i++ { + huffBuff = append(huffBuff, byte(i)) + } + continue + } + if errors.Is(err, huff0.ErrIncompressible) { + // Try truncating least common. + for i, n := range remain[:] { + if n > 0 { + n = n / (div * (i + 1)) + if n > 0 { + huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...) + } + } + } + if o.CompatV155 && len(huffBuff) > 0 && huffBuff[len(huffBuff)-1] != 255 { + huffBuff = append(huffBuff, 255) + } + if len(huffBuff) == 0 { + huffBuff = append(huffBuff, 0, 255) + } + } + if errors.Is(err, huff0.ErrUseRLE) { + for i, n := range remain[:] { + n = n / (div * (i + 1)) + // Allow all entries to be represented. + if n == 0 { + n = 1 + } + huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...) + } + } + } + + var out bytes.Buffer + out.Write([]byte(dictMagic)) + out.Write(binary.LittleEndian.AppendUint32(nil, o.ID)) + out.Write(scratch.OutTable) + if debug { + println("huff table:", len(scratch.OutTable), "bytes") + println("of table:", len(ofTable), "bytes") + println("ml table:", len(mlTable), "bytes") + println("ll table:", len(llTable), "bytes") + } + out.Write(ofTable) + out.Write(mlTable) + out.Write(llTable) + out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[0]))) + out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[1]))) + out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[2]))) + out.Write(hist) + if debug { + _, err := loadDict(out.Bytes()) + if err != nil { + panic(err) + } + i, err := InspectDictionary(out.Bytes()) + if err != nil { + panic(err) + } + println("ID:", i.ID()) + println("Content size:", i.ContentSize()) + println("Encoder:", i.LitEncoder() != nil) + println("Offsets:", i.Offsets()) + var totalSize int + for _, b := range contents { + totalSize += len(b) + } + + encWith := func(opts ...EOption) int { + enc, err := NewWriter(nil, opts...) + if err != nil { + panic(err) + } + defer enc.Close() + var dst []byte + var totalSize int + for _, b := range contents { + dst = enc.EncodeAll(b, dst[:0]) + totalSize += len(dst) + } + return totalSize + } + plain := encWith(WithEncoderLevel(o.Level)) + withDict := encWith(WithEncoderLevel(o.Level), WithEncoderDict(out.Bytes())) + println("Input size:", totalSize) + println("Plain Compressed:", plain) + println("Dict Compressed:", withDict) + println("Saved:", plain-withDict, (plain-withDict)/len(contents), "bytes per input (rounded down)") + } + return out.Bytes(), nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go new file mode 100644 index 0000000..5ca4603 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_base.go @@ -0,0 +1,173 @@ +package zstd + +import ( + "fmt" + "math/bits" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +const ( + dictShardBits = 6 +) + +type fastBase struct { + // cur is the offset at the start of hist + cur int32 + // maximum offset. Should be at least 2x block size. + maxMatchOff int32 + bufferReset int32 + hist []byte + crc *xxhash.Digest + tmp [8]byte + blk *blockEnc + lastDictID uint32 + lowMem bool +} + +// CRC returns the underlying CRC writer. +func (e *fastBase) CRC() *xxhash.Digest { + return e.crc +} + +// AppendCRC will append the CRC to the destination slice and return it. +func (e *fastBase) AppendCRC(dst []byte) []byte { + crc := e.crc.Sum(e.tmp[:0]) + dst = append(dst, crc[7], crc[6], crc[5], crc[4]) + return dst +} + +// WindowSize returns the window size of the encoder, +// or a window size small enough to contain the input size, if > 0. +func (e *fastBase) WindowSize(size int64) int32 { + if size > 0 && size < int64(e.maxMatchOff) { + b := int32(1) << uint(bits.Len(uint(size))) + // Keep minimum window. + if b < 1024 { + b = 1024 + } + return b + } + return e.maxMatchOff +} + +// Block returns the current block. +func (e *fastBase) Block() *blockEnc { + return e.blk +} + +func (e *fastBase) addBlock(src []byte) int32 { + if debugAsserts && e.cur > e.bufferReset { + panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, e.bufferReset)) + } + // check if we have space already + if len(e.hist)+len(src) > cap(e.hist) { + if cap(e.hist) == 0 { + e.ensureHist(len(src)) + } else { + if cap(e.hist) < int(e.maxMatchOff+maxCompressedBlockSize) { + panic(fmt.Errorf("unexpected buffer cap %d, want at least %d with window %d", cap(e.hist), e.maxMatchOff+maxCompressedBlockSize, e.maxMatchOff)) + } + // Move down + offset := int32(len(e.hist)) - e.maxMatchOff + copy(e.hist[0:e.maxMatchOff], e.hist[offset:]) + e.cur += offset + e.hist = e.hist[:e.maxMatchOff] + } + } + s := int32(len(e.hist)) + e.hist = append(e.hist, src...) + return s +} + +// ensureHist will ensure that history can keep at least this many bytes. +func (e *fastBase) ensureHist(n int) { + if cap(e.hist) >= n { + return + } + l := e.maxMatchOff + if (e.lowMem && e.maxMatchOff > maxCompressedBlockSize) || e.maxMatchOff <= maxCompressedBlockSize { + l += maxCompressedBlockSize + } else { + l += e.maxMatchOff + } + // Make it at least 1MB. + if l < 1<<20 && !e.lowMem { + l = 1 << 20 + } + // Make it at least the requested size. + if l < int32(n) { + l = int32(n) + } + e.hist = make([]byte, 0, l) +} + +// useBlock will replace the block with the provided one, +// but transfer recent offsets from the previous. +func (e *fastBase) UseBlock(enc *blockEnc) { + enc.reset(e.blk) + e.blk = enc +} + +func (e *fastBase) matchlen(s, t int32, src []byte) int32 { + if debugAsserts { + if s < 0 { + err := fmt.Sprintf("s (%d) < 0", s) + panic(err) + } + if t < 0 { + err := fmt.Sprintf("s (%d) < 0", s) + panic(err) + } + if s-t > e.maxMatchOff { + err := fmt.Sprintf("s (%d) - t (%d) > maxMatchOff (%d)", s, t, e.maxMatchOff) + panic(err) + } + if len(src)-int(s) > maxCompressedBlockSize { + panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize)) + } + } + return int32(matchLen(src[s:], src[t:])) +} + +// Reset the encoding table. +func (e *fastBase) resetBase(d *dict, singleBlock bool) { + if e.blk == nil { + e.blk = &blockEnc{lowMem: e.lowMem} + e.blk.init() + } else { + e.blk.reset(nil) + } + e.blk.initNewEncode() + if e.crc == nil { + e.crc = xxhash.New() + } else { + e.crc.Reset() + } + e.blk.dictLitEnc = nil + if d != nil { + low := e.lowMem + if singleBlock { + e.lowMem = true + } + e.ensureHist(d.ContentSize() + maxCompressedBlockSize) + e.lowMem = low + } + + // We offset current position so everything will be out of reach. + // If above reset line, history will be purged. + if e.cur < e.bufferReset { + e.cur += e.maxMatchOff + int32(len(e.hist)) + } + e.hist = e.hist[:0] + if d != nil { + // Set offsets (currently not used) + for i, off := range d.offsets { + e.blk.recentOffsets[i] = uint32(off) + e.blk.prevRecentOffsets[i] = e.blk.recentOffsets[i] + } + // Transfer litenc. + e.blk.dictLitEnc = d.litEnc + e.hist = append(e.hist, d.content...) + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go new file mode 100644 index 0000000..87f4287 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go @@ -0,0 +1,548 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "bytes" + "fmt" + + "github.com/klauspost/compress" +) + +const ( + bestLongTableBits = 22 // Bits used in the long match table + bestLongTableSize = 1 << bestLongTableBits // Size of the table + bestLongLen = 8 // Bytes used for table hash + + // Note: Increasing the short table bits or making the hash shorter + // can actually lead to compression degradation since it will 'steal' more from the + // long match table and match offsets are quite big. + // This greatly depends on the type of input. + bestShortTableBits = 18 // Bits used in the short match table + bestShortTableSize = 1 << bestShortTableBits // Size of the table + bestShortLen = 4 // Bytes used for table hash + +) + +type match struct { + offset int32 + s int32 + length int32 + rep int32 + est int32 +} + +const highScore = maxMatchLen * 8 + +// estBits will estimate output bits from predefined tables. +func (m *match) estBits(bitsPerByte int32) { + mlc := mlCode(uint32(m.length - zstdMinMatch)) + var ofc uint8 + if m.rep < 0 { + ofc = ofCode(uint32(m.s-m.offset) + 3) + } else { + ofc = ofCode(uint32(m.rep) & 3) + } + // Cost, excluding + ofTT, mlTT := fsePredefEnc[tableOffsets].ct.symbolTT[ofc], fsePredefEnc[tableMatchLengths].ct.symbolTT[mlc] + + // Add cost of match encoding... + m.est = int32(ofTT.outBits + mlTT.outBits) + m.est += int32(ofTT.deltaNbBits>>16 + mlTT.deltaNbBits>>16) + // Subtract savings compared to literal encoding... + m.est -= (m.length * bitsPerByte) >> 10 + if m.est > 0 { + // Unlikely gain.. + m.length = 0 + m.est = highScore + } +} + +// bestFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. +// The long match table contains the previous entry with the same hash, +// effectively making it a "chain" of length 2. +// When we find a long match we choose between the two values and select the longest. +// When we find a short match, after checking the long, we check if we can find a long at n+1 +// and that it is longer (lazy matching). +type bestFastEncoder struct { + fastBase + table [bestShortTableSize]prevEntry + longTable [bestLongTableSize]prevEntry + dictTable []prevEntry + dictLongTable []prevEntry +} + +// Encode improves compression... +func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 4 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + e.table = [bestShortTableSize]prevEntry{} + e.longTable = [bestLongTableSize]prevEntry{} + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + v2 := e.table[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.table[i] = prevEntry{ + offset: v, + prev: v2, + } + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + v2 := e.longTable[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.longTable[i] = prevEntry{ + offset: v, + prev: v2, + } + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Use this to estimate literal cost. + // Scaled by 10 bits. + bitsPerByte := int32((compress.ShannonEntropyBits(src) * 1024) / len(src)) + // Huffman can never go < 1 bit/byte + if bitsPerByte < 1024 { + bitsPerByte = 1024 + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + const kSearchStrength = 10 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + offset3 := int32(blk.recentOffsets[2]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + const goodEnough = 250 + + cv := load6432(src, s) + + nextHashL := hashLen(cv, bestLongTableBits, bestLongLen) + nextHashS := hashLen(cv, bestShortTableBits, bestShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + // Set m to a match at offset if it looks like that will improve compression. + improve := func(m *match, offset int32, s int32, first uint32, rep int32) { + delta := s - offset + if delta >= e.maxMatchOff || delta <= 0 || load3232(src, offset) != first { + return + } + // Try to quick reject if we already have a long match. + if m.length > 16 { + left := len(src) - int(m.s+m.length) + // If we are too close to the end, keep as is. + if left <= 0 { + return + } + checkLen := m.length - (s - m.s) - 8 + if left > 2 && checkLen > 4 { + // Check 4 bytes, 4 bytes from the end of the current match. + a := load3232(src, offset+checkLen) + b := load3232(src, s+checkLen) + if a != b { + return + } + } + } + l := 4 + e.matchlen(s+4, offset+4, src) + if m.rep <= 0 { + // Extend candidate match backwards as far as possible. + // Do not extend repeats as we can assume they are optimal + // and offsets change if s == nextEmit. + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for offset > tMin && s > nextEmit && src[offset-1] == src[s-1] && l < maxMatchLength { + s-- + offset-- + l++ + } + } + if debugAsserts { + if offset >= s { + panic(fmt.Sprintf("offset: %d - s:%d - rep: %d - cur :%d - max: %d", offset, s, rep, e.cur, e.maxMatchOff)) + } + if !bytes.Equal(src[s:s+l], src[offset:offset+l]) { + panic(fmt.Sprintf("second match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first)) + } + } + cand := match{offset: offset, s: s, length: l, rep: rep} + cand.estBits(bitsPerByte) + if m.est >= highScore || cand.est-m.est+(cand.s-m.s)*bitsPerByte>>10 < 0 { + *m = cand + } + } + + best := match{s: s, est: highScore} + improve(&best, candidateL.offset-e.cur, s, uint32(cv), -1) + improve(&best, candidateL.prev-e.cur, s, uint32(cv), -1) + improve(&best, candidateS.offset-e.cur, s, uint32(cv), -1) + improve(&best, candidateS.prev-e.cur, s, uint32(cv), -1) + + if canRepeat && best.length < goodEnough { + if s == nextEmit { + // Check repeats straight after a match. + improve(&best, s-offset2, s, uint32(cv), 1|4) + improve(&best, s-offset3, s, uint32(cv), 2|4) + if offset1 > 1 { + improve(&best, s-(offset1-1), s, uint32(cv), 3|4) + } + } + + // If either no match or a non-repeat match, check at + 1 + if best.rep <= 0 { + cv32 := uint32(cv >> 8) + spp := s + 1 + improve(&best, spp-offset1, spp, cv32, 1) + improve(&best, spp-offset2, spp, cv32, 2) + improve(&best, spp-offset3, spp, cv32, 3) + if best.rep < 0 { + cv32 = uint32(cv >> 24) + spp += 2 + improve(&best, spp-offset1, spp, cv32, 1) + improve(&best, spp-offset2, spp, cv32, 2) + improve(&best, spp-offset3, spp, cv32, 3) + } + } + } + // Load next and check... + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: candidateL.offset} + e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: candidateS.offset} + index0 := s + 1 + + // Look far ahead, unless we have a really long match already... + if best.length < goodEnough { + // No match found, move forward on input, no need to check forward... + if best.length < 4 { + s += 1 + (s-nextEmit)>>(kSearchStrength-1) + if s >= sLimit { + break encodeLoop + } + continue + } + + candidateS = e.table[hashLen(cv>>8, bestShortTableBits, bestShortLen)] + cv = load6432(src, s+1) + cv2 := load6432(src, s+2) + candidateL = e.longTable[hashLen(cv, bestLongTableBits, bestLongLen)] + candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)] + + // Short at s+1 + improve(&best, candidateS.offset-e.cur, s+1, uint32(cv), -1) + // Long at s+1, s+2 + improve(&best, candidateL.offset-e.cur, s+1, uint32(cv), -1) + improve(&best, candidateL.prev-e.cur, s+1, uint32(cv), -1) + improve(&best, candidateL2.offset-e.cur, s+2, uint32(cv2), -1) + improve(&best, candidateL2.prev-e.cur, s+2, uint32(cv2), -1) + if false { + // Short at s+3. + // Too often worse... + improve(&best, e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+3, uint32(cv2>>8), -1) + } + + // Start check at a fixed offset to allow for a few mismatches. + // For this compression level 2 yields the best results. + // We cannot do this if we have already indexed this position. + const skipBeginning = 2 + if best.s > s-skipBeginning { + // See if we can find a better match by checking where the current best ends. + // Use that offset to see if we can find a better full match. + if sAt := best.s + best.length; sAt < sLimit { + nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen) + candidateEnd := e.longTable[nextHashL] + + if off := candidateEnd.offset - e.cur - best.length + skipBeginning; off >= 0 { + improve(&best, off, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) + if off := candidateEnd.prev - e.cur - best.length + skipBeginning; off >= 0 { + improve(&best, off, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) + } + } + } + } + } + + if debugAsserts { + if best.offset >= best.s { + panic(fmt.Sprintf("best.offset > s: %d >= %d", best.offset, best.s)) + } + if best.s < nextEmit { + panic(fmt.Sprintf("s %d < nextEmit %d", best.s, nextEmit)) + } + if best.offset < s-e.maxMatchOff { + panic(fmt.Sprintf("best.offset < s-e.maxMatchOff: %d < %d", best.offset, s-e.maxMatchOff)) + } + if !bytes.Equal(src[best.s:best.s+best.length], src[best.offset:best.offset+best.length]) { + panic(fmt.Sprintf("match mismatch: %v != %v", src[best.s:best.s+best.length], src[best.offset:best.offset+best.length])) + } + } + + // We have a match, we can store the forward value + s = best.s + if best.rep > 0 { + var seq seq + seq.matchLen = uint32(best.length - zstdMinMatch) + addLiterals(&seq, best.s) + + // Repeat. If bit 4 is set, this is a non-lit repeat. + seq.offset = uint32(best.rep & 3) + if debugSequences { + println("repeat sequence", seq, "next s:", best.s, "off:", best.s-best.offset) + } + blk.sequences = append(blk.sequences, seq) + + // Index old s + 1 -> s - 1 + s = best.s + best.length + nextEmit = s + + // Index skipped... + end := s + if s > sLimit+4 { + end = sLimit + 4 + } + off := index0 + e.cur + for index0 < end { + cv0 := load6432(src, index0) + h0 := hashLen(cv0, bestLongTableBits, bestLongLen) + h1 := hashLen(cv0, bestShortTableBits, bestShortLen) + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} + off++ + index0++ + } + + switch best.rep { + case 2, 4 | 1: + offset1, offset2 = offset2, offset1 + case 3, 4 | 2: + offset1, offset2, offset3 = offset3, offset1, offset2 + case 4 | 3: + offset1, offset2, offset3 = offset1-1, offset1, offset2 + } + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, best.length) + } + break encodeLoop + } + continue + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + t := best.offset + offset1, offset2, offset3 = s-t, offset1, offset2 + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && int(offset1) > len(src) { + panic("invalid offset") + } + + // Write our sequence + var seq seq + l := best.length + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + + // Index old s + 1 -> s - 1 or sLimit + end := s + if s > sLimit-4 { + end = sLimit - 4 + } + + off := index0 + e.cur + for index0 < end { + cv0 := load6432(src, index0) + h0 := hashLen(cv0, bestLongTableBits, bestLongLen) + h1 := hashLen(cv0, bestShortTableBits, bestShortLen) + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} + index0++ + off++ + } + if s >= sLimit { + break encodeLoop + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + blk.recentOffsets[2] = uint32(offset3) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *bestFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + e.ensureHist(len(src)) + e.Encode(blk, src) +} + +// Reset will reset and set a dictionary if not nil +func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d == nil { + return + } + // Init or copy dict table + if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) { + e.dictTable = make([]prevEntry, len(e.table)) + } + end := int32(len(d.content)) - 8 + e.maxMatchOff + for i := e.maxMatchOff; i < end; i += 4 { + const hashLog = bestShortTableBits + + cv := load6432(d.content, i-e.maxMatchOff) + nextHash := hashLen(cv, hashLog, bestShortLen) // 0 -> 4 + nextHash1 := hashLen(cv>>8, hashLog, bestShortLen) // 1 -> 5 + nextHash2 := hashLen(cv>>16, hashLog, bestShortLen) // 2 -> 6 + nextHash3 := hashLen(cv>>24, hashLog, bestShortLen) // 3 -> 7 + e.dictTable[nextHash] = prevEntry{ + prev: e.dictTable[nextHash].offset, + offset: i, + } + e.dictTable[nextHash1] = prevEntry{ + prev: e.dictTable[nextHash1].offset, + offset: i + 1, + } + e.dictTable[nextHash2] = prevEntry{ + prev: e.dictTable[nextHash2].offset, + offset: i + 2, + } + e.dictTable[nextHash3] = prevEntry{ + prev: e.dictTable[nextHash3].offset, + offset: i + 3, + } + } + e.lastDictID = d.id + } + + // Init or copy dict table + if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + if len(e.dictLongTable) != len(e.longTable) { + e.dictLongTable = make([]prevEntry, len(e.longTable)) + } + if len(d.content) >= 8 { + cv := load6432(d.content, 0) + h := hashLen(cv, bestLongTableBits, bestLongLen) + e.dictLongTable[h] = prevEntry{ + offset: e.maxMatchOff, + prev: e.dictLongTable[h].offset, + } + + end := int32(len(d.content)) - 8 + e.maxMatchOff + off := 8 // First to read + for i := e.maxMatchOff + 1; i < end; i++ { + cv = cv>>8 | (uint64(d.content[off]) << 56) + h := hashLen(cv, bestLongTableBits, bestLongLen) + e.dictLongTable[h] = prevEntry{ + offset: i, + prev: e.dictLongTable[h].offset, + } + off++ + } + } + e.lastDictID = d.id + } + // Reset table to initial state + copy(e.longTable[:], e.dictLongTable) + + e.cur = e.maxMatchOff + // Reset table to initial state + copy(e.table[:], e.dictTable) +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go new file mode 100644 index 0000000..20d25b0 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -0,0 +1,1241 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import "fmt" + +const ( + betterLongTableBits = 19 // Bits used in the long match table + betterLongTableSize = 1 << betterLongTableBits // Size of the table + betterLongLen = 8 // Bytes used for table hash + + // Note: Increasing the short table bits or making the hash shorter + // can actually lead to compression degradation since it will 'steal' more from the + // long match table and match offsets are quite big. + // This greatly depends on the type of input. + betterShortTableBits = 13 // Bits used in the short match table + betterShortTableSize = 1 << betterShortTableBits // Size of the table + betterShortLen = 5 // Bytes used for table hash + + betterLongTableShardCnt = 1 << (betterLongTableBits - dictShardBits) // Number of shards in the table + betterLongTableShardSize = betterLongTableSize / betterLongTableShardCnt // Size of an individual shard + + betterShortTableShardCnt = 1 << (betterShortTableBits - dictShardBits) // Number of shards in the table + betterShortTableShardSize = betterShortTableSize / betterShortTableShardCnt // Size of an individual shard +) + +type prevEntry struct { + offset int32 + prev int32 +} + +// betterFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. +// The long match table contains the previous entry with the same hash, +// effectively making it a "chain" of length 2. +// When we find a long match we choose between the two values and select the longest. +// When we find a short match, after checking the long, we check if we can find a long at n+1 +// and that it is longer (lazy matching). +type betterFastEncoder struct { + fastBase + table [betterShortTableSize]tableEntry + longTable [betterLongTableSize]prevEntry +} + +type betterFastEncoderDict struct { + betterFastEncoder + dictTable []tableEntry + dictLongTable []prevEntry + shortTableShardDirty [betterShortTableShardCnt]bool + longTableShardDirty [betterLongTableShardCnt]bool + allDirty bool +} + +// Encode improves compression... +func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + e.table = [betterShortTableSize]tableEntry{} + e.longTable = [betterLongTableSize]prevEntry{} + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + v2 := e.longTable[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.longTable[i] = prevEntry{ + offset: v, + prev: v2, + } + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 9 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + var matched, index0 int32 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + off := s + e.cur + e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} + e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} + index0 = s + 1 + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Index match start+1 (long) -> s - 1 + index0 := s + repOff + s += lenght + repOff + + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + } + cv = load6432(src, s) + continue + } + const repOff2 = 1 + + // We deviate from the reference encoder and also check offset 2. + // Still slower and not much better, so disabled. + // repIndex = s - offset2 + repOff2 + if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { + // Consider history as well. + var seq seq + lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 2 + seq.offset = 2 + if debugSequences { + println("repeat sequence 2", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + s += lenght + repOff2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + } + cv = load6432(src, s) + // Swap offsets + offset1, offset2 = offset2, offset1 + continue + } + } + // Find the offsets of our two matches. + coffsetL := candidateL.offset - e.cur + coffsetLP := candidateL.prev - e.cur + + // Check if we have a long match. + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetL+8, src) + 8 + t = coffsetL + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8 + if prevMatch > matched { + matched = prevMatch + t = coffsetLP + } + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + } + break + } + + // Check if we have a long match on prev. + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetLP+8, src) + 8 + t = coffsetLP + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + coffsetS := candidateS.offset - e.cur + + // Check if we have a short match. + if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + matched = e.matchlen(s+4, coffsetS+4, src) + 4 + + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, betterLongTableBits, betterLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = candidateL.offset - e.cur + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset} + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("long match (after short)") + } + break + } + } + + // Check prev long... + coffsetL = candidateL.prev - e.cur + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("prev long match (after short)") + } + break + } + } + t = coffsetS + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // Try to find a better match by searching for a long match at the end of the current best match + if s+matched < sLimit { + // Allow some bytes at the beginning to mismatch. + // Sweet spot is around 3 bytes, but depends on input. + // The skipped bytes are tested in Extend backwards, + // and still picked up as part of the match if they do. + const skipBeginning = 3 + + nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen) + s2 := s + skipBeginning + cv := load3232(src, s2) + candidateL := e.longTable[nextHashL] + coffsetL := candidateL.offset - e.cur - matched + skipBeginning + if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + s = s2 + matched = matchedNext + if debugMatches { + println("long match at end-of-match") + } + } + } + + // Check prev long... + if true { + coffsetL = candidateL.prev - e.cur - matched + skipBeginning + if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + s = s2 + matched = matchedNext + if debugMatches { + println("prev long match at end-of-match") + } + } + } + } + } + // A match has been found. Update recent offsets. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the n-byte match as long as possible. + l := matched + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) -> s - 1 + off := index0 + e.cur + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + off += 2 + } + + cv = load6432(src, s) + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} + e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *betterFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + e.ensureHist(len(src)) + e.Encode(blk, src) +} + +// Encode improves compression... +func (e *betterFastEncoderDict) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = prevEntry{} + } + e.cur = e.maxMatchOff + e.allDirty = true + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + v2 := e.longTable[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.longTable[i] = prevEntry{ + offset: v, + prev: v2, + } + } + e.allDirty = true + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 9 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + var matched, index0 int32 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + off := s + e.cur + e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} + e.markShortShardDirty(nextHashS) + index0 = s + 1 + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Index match start+1 (long) -> s - 1 + s += lenght + repOff + + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.markLongShardDirty(h0) + h1 := hashLen(cv1, betterShortTableBits, betterShortLen) + e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.markShortShardDirty(h1) + index0 += 2 + } + cv = load6432(src, s) + continue + } + const repOff2 = 1 + + // We deviate from the reference encoder and also check offset 2. + // Still slower and not much better, so disabled. + // repIndex = s - offset2 + repOff2 + if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { + // Consider history as well. + var seq seq + lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 2 + seq.offset = 2 + if debugSequences { + println("repeat sequence 2", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + s += lenght + repOff2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.markLongShardDirty(h0) + h1 := hashLen(cv1, betterShortTableBits, betterShortLen) + e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.markShortShardDirty(h1) + index0 += 2 + } + cv = load6432(src, s) + // Swap offsets + offset1, offset2 = offset2, offset1 + continue + } + } + // Find the offsets of our two matches. + coffsetL := candidateL.offset - e.cur + coffsetLP := candidateL.prev - e.cur + + // Check if we have a long match. + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetL+8, src) + 8 + t = coffsetL + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8 + if prevMatch > matched { + matched = prevMatch + t = coffsetLP + } + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + } + break + } + + // Check if we have a long match on prev. + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetLP+8, src) + 8 + t = coffsetLP + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + coffsetS := candidateS.offset - e.cur + + // Check if we have a short match. + if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + matched = e.matchlen(s+4, coffsetS+4, src) + 4 + + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, betterLongTableBits, betterLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = candidateL.offset - e.cur + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset} + e.markLongShardDirty(nextHashL) + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("long match (after short)") + } + break + } + } + + // Check prev long... + coffsetL = candidateL.prev - e.cur + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("prev long match (after short)") + } + break + } + } + t = coffsetS + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // Try to find a better match by searching for a long match at the end of the current best match + if s+matched < sLimit { + nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen) + cv := load3232(src, s) + candidateL := e.longTable[nextHashL] + coffsetL := candidateL.offset - e.cur - matched + if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + matched = matchedNext + if debugMatches { + println("long match at end-of-match") + } + } + } + + // Check prev long... + if true { + coffsetL = candidateL.prev - e.cur - matched + if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + matched = matchedNext + if debugMatches { + println("prev long match at end-of-match") + } + } + } + } + } + // A match has been found. Update recent offsets. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the n-byte match as long as possible. + l := matched + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) -> s - 1 + off := index0 + e.cur + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.markLongShardDirty(h0) + h1 := hashLen(cv1, betterShortTableBits, betterShortLen) + e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.markShortShardDirty(h1) + index0 += 2 + off += 2 + } + + cv = load6432(src, s) + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.markShortShardDirty(nextHashS) + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *betterFastEncoder) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d != nil { + panic("betterFastEncoder: Reset with dict") + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d == nil { + return + } + // Init or copy dict table + if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) { + e.dictTable = make([]tableEntry, len(e.table)) + } + end := int32(len(d.content)) - 8 + e.maxMatchOff + for i := e.maxMatchOff; i < end; i += 4 { + const hashLog = betterShortTableBits + + cv := load6432(d.content, i-e.maxMatchOff) + nextHash := hashLen(cv, hashLog, betterShortLen) // 0 -> 4 + nextHash1 := hashLen(cv>>8, hashLog, betterShortLen) // 1 -> 5 + nextHash2 := hashLen(cv>>16, hashLog, betterShortLen) // 2 -> 6 + nextHash3 := hashLen(cv>>24, hashLog, betterShortLen) // 3 -> 7 + e.dictTable[nextHash] = tableEntry{ + val: uint32(cv), + offset: i, + } + e.dictTable[nextHash1] = tableEntry{ + val: uint32(cv >> 8), + offset: i + 1, + } + e.dictTable[nextHash2] = tableEntry{ + val: uint32(cv >> 16), + offset: i + 2, + } + e.dictTable[nextHash3] = tableEntry{ + val: uint32(cv >> 24), + offset: i + 3, + } + } + e.lastDictID = d.id + e.allDirty = true + } + + // Init or copy dict table + if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + if len(e.dictLongTable) != len(e.longTable) { + e.dictLongTable = make([]prevEntry, len(e.longTable)) + } + if len(d.content) >= 8 { + cv := load6432(d.content, 0) + h := hashLen(cv, betterLongTableBits, betterLongLen) + e.dictLongTable[h] = prevEntry{ + offset: e.maxMatchOff, + prev: e.dictLongTable[h].offset, + } + + end := int32(len(d.content)) - 8 + e.maxMatchOff + off := 8 // First to read + for i := e.maxMatchOff + 1; i < end; i++ { + cv = cv>>8 | (uint64(d.content[off]) << 56) + h := hashLen(cv, betterLongTableBits, betterLongLen) + e.dictLongTable[h] = prevEntry{ + offset: i, + prev: e.dictLongTable[h].offset, + } + off++ + } + } + e.lastDictID = d.id + e.allDirty = true + } + + // Reset table to initial state + { + dirtyShardCnt := 0 + if !e.allDirty { + for i := range e.shortTableShardDirty { + if e.shortTableShardDirty[i] { + dirtyShardCnt++ + } + } + } + const shardCnt = betterShortTableShardCnt + const shardSize = betterShortTableShardSize + if e.allDirty || dirtyShardCnt > shardCnt*4/6 { + copy(e.table[:], e.dictTable) + for i := range e.shortTableShardDirty { + e.shortTableShardDirty[i] = false + } + } else { + for i := range e.shortTableShardDirty { + if !e.shortTableShardDirty[i] { + continue + } + + copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) + e.shortTableShardDirty[i] = false + } + } + } + { + dirtyShardCnt := 0 + if !e.allDirty { + for i := range e.shortTableShardDirty { + if e.shortTableShardDirty[i] { + dirtyShardCnt++ + } + } + } + const shardCnt = betterLongTableShardCnt + const shardSize = betterLongTableShardSize + if e.allDirty || dirtyShardCnt > shardCnt*4/6 { + copy(e.longTable[:], e.dictLongTable) + for i := range e.longTableShardDirty { + e.longTableShardDirty[i] = false + } + } else { + for i := range e.longTableShardDirty { + if !e.longTableShardDirty[i] { + continue + } + + copy(e.longTable[i*shardSize:(i+1)*shardSize], e.dictLongTable[i*shardSize:(i+1)*shardSize]) + e.longTableShardDirty[i] = false + } + } + } + e.cur = e.maxMatchOff + e.allDirty = false +} + +func (e *betterFastEncoderDict) markLongShardDirty(entryNum uint32) { + e.longTableShardDirty[entryNum/betterLongTableShardSize] = true +} + +func (e *betterFastEncoderDict) markShortShardDirty(entryNum uint32) { + e.shortTableShardDirty[entryNum/betterShortTableShardSize] = true +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go new file mode 100644 index 0000000..a154c18 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go @@ -0,0 +1,1123 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import "fmt" + +const ( + dFastLongTableBits = 17 // Bits used in the long match table + dFastLongTableSize = 1 << dFastLongTableBits // Size of the table + dFastLongTableMask = dFastLongTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + dFastLongLen = 8 // Bytes used for table hash + + dLongTableShardCnt = 1 << (dFastLongTableBits - dictShardBits) // Number of shards in the table + dLongTableShardSize = dFastLongTableSize / tableShardCnt // Size of an individual shard + + dFastShortTableBits = tableBits // Bits used in the short match table + dFastShortTableSize = 1 << dFastShortTableBits // Size of the table + dFastShortTableMask = dFastShortTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + dFastShortLen = 5 // Bytes used for table hash + +) + +type doubleFastEncoder struct { + fastEncoder + longTable [dFastLongTableSize]tableEntry +} + +type doubleFastEncoderDict struct { + fastEncoderDict + longTable [dFastLongTableSize]tableEntry + dictLongTable []tableEntry + longTableShardDirty [dLongTableShardCnt]bool +} + +// Encode mimmics functionality in zstd_dfast.c +func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + e.table = [dFastShortTableSize]tableEntry{} + e.longTable = [dFastLongTableSize]tableEntry{} + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.longTable[i].offset = v + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += lenght + repOff + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + } + // Find the offsets of our two matches. + coffsetL := s - (candidateL.offset - e.cur) + coffsetS := s - (candidateS.offset - e.cur) + + // Check if we have a long match. + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + // Check if we have a short match. + if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = s - (candidateL.offset - e.cur) + checkAt + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + s += checkAt + if debugMatches { + println("long match (after short)") + } + break + } + + t = candidateS.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) and start+2 (short) + index0 := s - l + 1 + // Index match end-2 (long) and end-1 (short) + index1 := s - 2 + + cv0 := load6432(src, index0) + cv1 := load6432(src, index1) + te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} + te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} + e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0 + e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1 + cv0 >>= 8 + cv1 >>= 8 + te0.offset++ + te1.offset++ + te0.val = uint32(cv0) + te1.val = uint32(cv1) + e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0 + e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1 + + cv = load6432(src, s) + + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *doubleFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + if e.cur >= e.bufferReset { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = tableEntry{} + } + e.cur = e.maxMatchOff + } + + s := int32(0) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + for { + + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + + if len(blk.sequences) > 2 { + if load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + //length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + int32(matchLen(src[s+4+repOff:], src[repIndex+4:])) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + repOff + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + } + // Find the offsets of our two matches. + coffsetL := s - (candidateL.offset - e.cur) + coffsetS := s - (candidateS.offset - e.cur) + + // Check if we have a long match. + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d). cur: %d", s, t, e.cur)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + // Check if we have a short match. + if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = s - (candidateL.offset - e.cur) + checkAt + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + s += checkAt + if debugMatches { + println("long match (after short)") + } + break + } + + t = candidateS.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + // Extend the 4-byte match as long as possible. + //l := e.matchlen(s+4, t+4, src) + 4 + l := int32(matchLen(src[s+4:], src[t+4:])) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) and start+2 (short) + index0 := s - l + 1 + // Index match end-2 (long) and end-1 (short) + index1 := s - 2 + + cv0 := load6432(src, index0) + cv1 := load6432(src, index1) + te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} + te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} + e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0 + e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1 + cv0 >>= 8 + cv1 >>= 8 + te0.offset++ + te1.offset++ + te0.val = uint32(cv0) + te1.val = uint32(cv1) + e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0 + e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1 + + cv = load6432(src, s) + + if len(blk.sequences) <= 2 { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hashLen(cv1>>8, dFastShortTableBits, dFastShortLen) + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + //l := 4 + e.matchlen(s+4, o2+4, src) + l := 4 + int32(matchLen(src[s+4:], src[o2+4:])) + + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } + + // We do not store history, so we must offset e.cur to avoid false matches for next user. + if e.cur < e.bufferReset { + e.cur += int32(len(src)) + } +} + +// Encode will encode the content, with a dictionary if initialized for it. +func (e *doubleFastEncoderDict) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = tableEntry{} + } + e.markAllShardsDirty() + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.longTable[i].offset = v + } + e.markAllShardsDirty() + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = entry + e.markShardDirty(nextHashS) + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += lenght + repOff + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + } + // Find the offsets of our two matches. + coffsetL := s - (candidateL.offset - e.cur) + coffsetS := s - (candidateS.offset - e.cur) + + // Check if we have a long match. + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + // Check if we have a short match. + if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = s - (candidateL.offset - e.cur) + checkAt + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} + e.markLongShardDirty(nextHashL) + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + s += checkAt + if debugMatches { + println("long match (after short)") + } + break + } + + t = candidateS.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) and start+2 (short) + index0 := s - l + 1 + // Index match end-2 (long) and end-1 (short) + index1 := s - 2 + + cv0 := load6432(src, index0) + cv1 := load6432(src, index1) + te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} + te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} + longHash1 := hashLen(cv0, dFastLongTableBits, dFastLongLen) + longHash2 := hashLen(cv1, dFastLongTableBits, dFastLongLen) + e.longTable[longHash1] = te0 + e.longTable[longHash2] = te1 + e.markLongShardDirty(longHash1) + e.markLongShardDirty(longHash2) + cv0 >>= 8 + cv1 >>= 8 + te0.offset++ + te1.offset++ + te0.val = uint32(cv0) + te1.val = uint32(cv1) + hashVal1 := hashLen(cv0, dFastShortTableBits, dFastShortLen) + hashVal2 := hashLen(cv1, dFastShortTableBits, dFastShortLen) + e.table[hashVal1] = te0 + e.markShardDirty(hashVal1) + e.table[hashVal2] = te1 + e.markShardDirty(hashVal2) + + cv = load6432(src, s) + + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = entry + e.markShardDirty(nextHashS) + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } + // If we encoded more than 64K mark all dirty. + if len(src) > 64<<10 { + e.markAllShardsDirty() + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *doubleFastEncoder) Reset(d *dict, singleBlock bool) { + e.fastEncoder.Reset(d, singleBlock) + if d != nil { + panic("doubleFastEncoder: Reset with dict not supported") + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) { + allDirty := e.allDirty + e.fastEncoderDict.Reset(d, singleBlock) + if d == nil { + return + } + + // Init or copy dict table + if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + if len(e.dictLongTable) != len(e.longTable) { + e.dictLongTable = make([]tableEntry, len(e.longTable)) + } + if len(d.content) >= 8 { + cv := load6432(d.content, 0) + e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{ + val: uint32(cv), + offset: e.maxMatchOff, + } + end := int32(len(d.content)) - 8 + e.maxMatchOff + for i := e.maxMatchOff + 1; i < end; i++ { + cv = cv>>8 | (uint64(d.content[i-e.maxMatchOff+7]) << 56) + e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{ + val: uint32(cv), + offset: i, + } + } + } + e.lastDictID = d.id + allDirty = true + } + // Reset table to initial state + e.cur = e.maxMatchOff + + dirtyShardCnt := 0 + if !allDirty { + for i := range e.longTableShardDirty { + if e.longTableShardDirty[i] { + dirtyShardCnt++ + } + } + } + + if allDirty || dirtyShardCnt > dLongTableShardCnt/2 { + //copy(e.longTable[:], e.dictLongTable) + e.longTable = *(*[dFastLongTableSize]tableEntry)(e.dictLongTable) + for i := range e.longTableShardDirty { + e.longTableShardDirty[i] = false + } + return + } + for i := range e.longTableShardDirty { + if !e.longTableShardDirty[i] { + continue + } + + // copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize]) + *(*[dLongTableShardSize]tableEntry)(e.longTable[i*dLongTableShardSize:]) = *(*[dLongTableShardSize]tableEntry)(e.dictLongTable[i*dLongTableShardSize:]) + + e.longTableShardDirty[i] = false + } +} + +func (e *doubleFastEncoderDict) markLongShardDirty(entryNum uint32) { + e.longTableShardDirty[entryNum/dLongTableShardSize] = true +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go new file mode 100644 index 0000000..f45a3da --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go @@ -0,0 +1,891 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" +) + +const ( + tableBits = 15 // Bits used in the table + tableSize = 1 << tableBits // Size of the table + tableShardCnt = 1 << (tableBits - dictShardBits) // Number of shards in the table + tableShardSize = tableSize / tableShardCnt // Size of an individual shard + tableFastHashLen = 6 + tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + maxMatchLength = 131074 +) + +type tableEntry struct { + val uint32 + offset int32 +} + +type fastEncoder struct { + fastBase + table [tableSize]tableEntry +} + +type fastEncoderDict struct { + fastEncoder + dictTable []tableEntry + tableShardDirty [tableShardCnt]bool + allDirty bool +} + +// Encode mimmics functionality in zstd_fast.c +func (e *fastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + inputMargin = 8 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 2. + const stepSize = 2 + + // TEMPLATE + const hashLog = tableBits + // seems global, but would be nice to tweak. + const kSearchStrength = 6 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // t will contain the match offset when we find one. + // When existing the search loop, we have already checked 4 bytes. + var t int32 + + // We will not use repeat offsets across blocks. + // By not using them for the first 3 matches + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHash := hashLen(cv, hashLog, tableFastHashLen) + nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) + candidate := e.table[nextHash] + candidate2 := e.table[nextHash2] + repIndex := s - offset1 + 2 + + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} + + if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { + // Consider history as well. + var seq seq + length := 4 + e.matchlen(s+6, repIndex+4, src) + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + 2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + sMin := s - e.maxMatchOff + if sMin < 0 { + sMin = 0 + } + for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + 2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + coffset0 := s - (candidate.offset - e.cur) + coffset1 := s - (candidate2.offset - e.cur) + 1 + if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { + // found a regular match + t = candidate.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + break + } + + if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { + // found a regular match + t = candidate2.offset - e.cur + s++ + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + break + } + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // A 4-byte match has been found. We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence. + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + // Don't use repeat offsets + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + + // Check offset 2 + if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + // Store this, since we have it. + nextHash := hashLen(cv, hashLog, tableFastHashLen) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + break encodeLoop + } + // Prepare next loop. + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + const ( + inputMargin = 8 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + if debugEncoder { + if len(src) > maxCompressedBlockSize { + panic("src too big") + } + } + + // Protect against e.cur wraparound. + if e.cur >= e.bufferReset { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = e.maxMatchOff + } + + s := int32(0) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 2. + const stepSize = 2 + + // TEMPLATE + const hashLog = tableBits + // seems global, but would be nice to tweak. + const kSearchStrength = 6 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // t will contain the match offset when we find one. + // When existing the search loop, we have already checked 4 bytes. + var t int32 + + // We will not use repeat offsets across blocks. + // By not using them for the first 3 matches + + for { + nextHash := hashLen(cv, hashLog, tableFastHashLen) + nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) + candidate := e.table[nextHash] + candidate2 := e.table[nextHash2] + repIndex := s - offset1 + 2 + + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} + + if len(blk.sequences) > 2 && load3232(src, repIndex) == uint32(cv>>16) { + // Consider history as well. + var seq seq + length := 4 + e.matchlen(s+6, repIndex+4, src) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + 2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + sMin := s - e.maxMatchOff + if sMin < 0 { + sMin = 0 + } + for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + 2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + coffset0 := s - (candidate.offset - e.cur) + coffset1 := s - (candidate2.offset - e.cur) + 1 + if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { + // found a regular match + t = candidate.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic(fmt.Sprintf("t (%d) < 0, candidate.offset: %d, e.cur: %d, coffset0: %d, e.maxMatchOff: %d", t, candidate.offset, e.cur, coffset0, e.maxMatchOff)) + } + break + } + + if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { + // found a regular match + t = candidate2.offset - e.cur + s++ + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + break + } + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // A 4-byte match has been found. We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && t < 0 { + panic(fmt.Sprintf("t (%d) < 0 ", t)) + } + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + + // Write our sequence. + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + // Don't use repeat offsets + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + + // Check offset 2 + if o2 := s - offset2; len(blk.sequences) > 2 && load3232(src, o2) == uint32(cv) { + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + // Store this, since we have it. + nextHash := hashLen(cv, hashLog, tableFastHashLen) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + break encodeLoop + } + // Prepare next loop. + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } + // We do not store history, so we must offset e.cur to avoid false matches for next user. + if e.cur < e.bufferReset { + e.cur += int32(len(src)) + } +} + +// Encode will encode the content, with a dictionary if initialized for it. +func (e *fastEncoderDict) Encode(blk *blockEnc, src []byte) { + const ( + inputMargin = 8 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + if e.allDirty || len(src) > 32<<10 { + e.fastEncoder.Encode(blk, src) + e.allDirty = true + return + } + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + e.table = [tableSize]tableEntry{} + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 2. + const stepSize = 2 + + // TEMPLATE + const hashLog = tableBits + // seems global, but would be nice to tweak. + const kSearchStrength = 7 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // t will contain the match offset when we find one. + // When existing the search loop, we have already checked 4 bytes. + var t int32 + + // We will not use repeat offsets across blocks. + // By not using them for the first 3 matches + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHash := hashLen(cv, hashLog, tableFastHashLen) + nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) + candidate := e.table[nextHash] + candidate2 := e.table[nextHash2] + repIndex := s - offset1 + 2 + + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.markShardDirty(nextHash) + e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} + e.markShardDirty(nextHash2) + + if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { + // Consider history as well. + var seq seq + length := 4 + e.matchlen(s+6, repIndex+4, src) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + 2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + sMin := s - e.maxMatchOff + if sMin < 0 { + sMin = 0 + } + for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + 2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + coffset0 := s - (candidate.offset - e.cur) + coffset1 := s - (candidate2.offset - e.cur) + 1 + if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { + // found a regular match + t = candidate.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + break + } + + if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { + // found a regular match + t = candidate2.offset - e.cur + s++ + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + break + } + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // A 4-byte match has been found. We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence. + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + // Don't use repeat offsets + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + + // Check offset 2 + if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + // Store this, since we have it. + nextHash := hashLen(cv, hashLog, tableFastHashLen) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.markShardDirty(nextHash) + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + break encodeLoop + } + // Prepare next loop. + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *fastEncoder) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d != nil { + panic("fastEncoder: Reset with dict") + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d == nil { + return + } + + // Init or copy dict table + if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) { + e.dictTable = make([]tableEntry, len(e.table)) + } + if true { + end := e.maxMatchOff + int32(len(d.content)) - 8 + for i := e.maxMatchOff; i < end; i += 2 { + const hashLog = tableBits + + cv := load6432(d.content, i-e.maxMatchOff) + nextHash := hashLen(cv, hashLog, tableFastHashLen) // 0 -> 6 + nextHash1 := hashLen(cv>>8, hashLog, tableFastHashLen) // 1 -> 7 + e.dictTable[nextHash] = tableEntry{ + val: uint32(cv), + offset: i, + } + e.dictTable[nextHash1] = tableEntry{ + val: uint32(cv >> 8), + offset: i + 1, + } + } + } + e.lastDictID = d.id + e.allDirty = true + } + + e.cur = e.maxMatchOff + dirtyShardCnt := 0 + if !e.allDirty { + for i := range e.tableShardDirty { + if e.tableShardDirty[i] { + dirtyShardCnt++ + } + } + } + + const shardCnt = tableShardCnt + const shardSize = tableShardSize + if e.allDirty || dirtyShardCnt > shardCnt*4/6 { + //copy(e.table[:], e.dictTable) + e.table = *(*[tableSize]tableEntry)(e.dictTable) + for i := range e.tableShardDirty { + e.tableShardDirty[i] = false + } + e.allDirty = false + return + } + for i := range e.tableShardDirty { + if !e.tableShardDirty[i] { + continue + } + + //copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) + *(*[shardSize]tableEntry)(e.table[i*shardSize:]) = *(*[shardSize]tableEntry)(e.dictTable[i*shardSize:]) + e.tableShardDirty[i] = false + } + e.allDirty = false +} + +func (e *fastEncoderDict) markAllShardsDirty() { + e.allDirty = true +} + +func (e *fastEncoderDict) markShardDirty(entryNum uint32) { + e.tableShardDirty[entryNum/tableShardSize] = true +} diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go new file mode 100644 index 0000000..72af7ef --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -0,0 +1,619 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "crypto/rand" + "fmt" + "io" + "math" + rdebug "runtime/debug" + "sync" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +// Encoder provides encoding to Zstandard. +// An Encoder can be used for either compressing a stream via the +// io.WriteCloser interface supported by the Encoder or as multiple independent +// tasks via the EncodeAll function. +// Smaller encodes are encouraged to use the EncodeAll function. +// Use NewWriter to create a new instance. +type Encoder struct { + o encoderOptions + encoders chan encoder + state encoderState + init sync.Once +} + +type encoder interface { + Encode(blk *blockEnc, src []byte) + EncodeNoHist(blk *blockEnc, src []byte) + Block() *blockEnc + CRC() *xxhash.Digest + AppendCRC([]byte) []byte + WindowSize(size int64) int32 + UseBlock(*blockEnc) + Reset(d *dict, singleBlock bool) +} + +type encoderState struct { + w io.Writer + filling []byte + current []byte + previous []byte + encoder encoder + writing *blockEnc + err error + writeErr error + nWritten int64 + nInput int64 + frameContentSize int64 + headerWritten bool + eofWritten bool + fullFrameWritten bool + + // This waitgroup indicates an encode is running. + wg sync.WaitGroup + // This waitgroup indicates we have a block encoding/writing. + wWg sync.WaitGroup +} + +// NewWriter will create a new Zstandard encoder. +// If the encoder will be used for encoding blocks a nil writer can be used. +func NewWriter(w io.Writer, opts ...EOption) (*Encoder, error) { + initPredefined() + var e Encoder + e.o.setDefault() + for _, o := range opts { + err := o(&e.o) + if err != nil { + return nil, err + } + } + if w != nil { + e.Reset(w) + } + return &e, nil +} + +func (e *Encoder) initialize() { + if e.o.concurrent == 0 { + e.o.setDefault() + } + e.encoders = make(chan encoder, e.o.concurrent) + for i := 0; i < e.o.concurrent; i++ { + enc := e.o.encoder() + e.encoders <- enc + } +} + +// Reset will re-initialize the writer and new writes will encode to the supplied writer +// as a new, independent stream. +func (e *Encoder) Reset(w io.Writer) { + s := &e.state + s.wg.Wait() + s.wWg.Wait() + if cap(s.filling) == 0 { + s.filling = make([]byte, 0, e.o.blockSize) + } + if e.o.concurrent > 1 { + if cap(s.current) == 0 { + s.current = make([]byte, 0, e.o.blockSize) + } + if cap(s.previous) == 0 { + s.previous = make([]byte, 0, e.o.blockSize) + } + s.current = s.current[:0] + s.previous = s.previous[:0] + if s.writing == nil { + s.writing = &blockEnc{lowMem: e.o.lowMem} + s.writing.init() + } + s.writing.initNewEncode() + } + if s.encoder == nil { + s.encoder = e.o.encoder() + } + s.filling = s.filling[:0] + s.encoder.Reset(e.o.dict, false) + s.headerWritten = false + s.eofWritten = false + s.fullFrameWritten = false + s.w = w + s.err = nil + s.nWritten = 0 + s.nInput = 0 + s.writeErr = nil + s.frameContentSize = 0 +} + +// ResetContentSize will reset and set a content size for the next stream. +// If the bytes written does not match the size given an error will be returned +// when calling Close(). +// This is removed when Reset is called. +// Sizes <= 0 results in no content size set. +func (e *Encoder) ResetContentSize(w io.Writer, size int64) { + e.Reset(w) + if size >= 0 { + e.state.frameContentSize = size + } +} + +// Write data to the encoder. +// Input data will be buffered and as the buffer fills up +// content will be compressed and written to the output. +// When done writing, use Close to flush the remaining output +// and write CRC if requested. +func (e *Encoder) Write(p []byte) (n int, err error) { + s := &e.state + for len(p) > 0 { + if len(p)+len(s.filling) < e.o.blockSize { + if e.o.crc { + _, _ = s.encoder.CRC().Write(p) + } + s.filling = append(s.filling, p...) + return n + len(p), nil + } + add := p + if len(p)+len(s.filling) > e.o.blockSize { + add = add[:e.o.blockSize-len(s.filling)] + } + if e.o.crc { + _, _ = s.encoder.CRC().Write(add) + } + s.filling = append(s.filling, add...) + p = p[len(add):] + n += len(add) + if len(s.filling) < e.o.blockSize { + return n, nil + } + err := e.nextBlock(false) + if err != nil { + return n, err + } + if debugAsserts && len(s.filling) > 0 { + panic(len(s.filling)) + } + } + return n, nil +} + +// nextBlock will synchronize and start compressing input in e.state.filling. +// If an error has occurred during encoding it will be returned. +func (e *Encoder) nextBlock(final bool) error { + s := &e.state + // Wait for current block. + s.wg.Wait() + if s.err != nil { + return s.err + } + if len(s.filling) > e.o.blockSize { + return fmt.Errorf("block > maxStoreBlockSize") + } + if !s.headerWritten { + // If we have a single block encode, do a sync compression. + if final && len(s.filling) == 0 && !e.o.fullZero { + s.headerWritten = true + s.fullFrameWritten = true + s.eofWritten = true + return nil + } + if final && len(s.filling) > 0 { + s.current = e.EncodeAll(s.filling, s.current[:0]) + var n2 int + n2, s.err = s.w.Write(s.current) + if s.err != nil { + return s.err + } + s.nWritten += int64(n2) + s.nInput += int64(len(s.filling)) + s.current = s.current[:0] + s.filling = s.filling[:0] + s.headerWritten = true + s.fullFrameWritten = true + s.eofWritten = true + return nil + } + + var tmp [maxHeaderSize]byte + fh := frameHeader{ + ContentSize: uint64(s.frameContentSize), + WindowSize: uint32(s.encoder.WindowSize(s.frameContentSize)), + SingleSegment: false, + Checksum: e.o.crc, + DictID: e.o.dict.ID(), + } + + dst := fh.appendTo(tmp[:0]) + s.headerWritten = true + s.wWg.Wait() + var n2 int + n2, s.err = s.w.Write(dst) + if s.err != nil { + return s.err + } + s.nWritten += int64(n2) + } + if s.eofWritten { + // Ensure we only write it once. + final = false + } + + if len(s.filling) == 0 { + // Final block, but no data. + if final { + enc := s.encoder + blk := enc.Block() + blk.reset(nil) + blk.last = true + blk.encodeRaw(nil) + s.wWg.Wait() + _, s.err = s.w.Write(blk.output) + s.nWritten += int64(len(blk.output)) + s.eofWritten = true + } + return s.err + } + + // SYNC: + if e.o.concurrent == 1 { + src := s.filling + s.nInput += int64(len(s.filling)) + if debugEncoder { + println("Adding sync block,", len(src), "bytes, final:", final) + } + enc := s.encoder + blk := enc.Block() + blk.reset(nil) + enc.Encode(blk, src) + blk.last = final + if final { + s.eofWritten = true + } + + s.err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + if s.err != nil { + return s.err + } + _, s.err = s.w.Write(blk.output) + s.nWritten += int64(len(blk.output)) + s.filling = s.filling[:0] + return s.err + } + + // Move blocks forward. + s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current + s.nInput += int64(len(s.current)) + s.wg.Add(1) + go func(src []byte) { + if debugEncoder { + println("Adding block,", len(src), "bytes, final:", final) + } + defer func() { + if r := recover(); r != nil { + s.err = fmt.Errorf("panic while encoding: %v", r) + rdebug.PrintStack() + } + s.wg.Done() + }() + enc := s.encoder + blk := enc.Block() + enc.Encode(blk, src) + blk.last = final + if final { + s.eofWritten = true + } + // Wait for pending writes. + s.wWg.Wait() + if s.writeErr != nil { + s.err = s.writeErr + return + } + // Transfer encoders from previous write block. + blk.swapEncoders(s.writing) + // Transfer recent offsets to next. + enc.UseBlock(s.writing) + s.writing = blk + s.wWg.Add(1) + go func() { + defer func() { + if r := recover(); r != nil { + s.writeErr = fmt.Errorf("panic while encoding/writing: %v", r) + rdebug.PrintStack() + } + s.wWg.Done() + }() + s.writeErr = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + if s.writeErr != nil { + return + } + _, s.writeErr = s.w.Write(blk.output) + s.nWritten += int64(len(blk.output)) + }() + }(s.current) + return nil +} + +// ReadFrom reads data from r until EOF or error. +// The return value n is the number of bytes read. +// Any error except io.EOF encountered during the read is also returned. +// +// The Copy function uses ReaderFrom if available. +func (e *Encoder) ReadFrom(r io.Reader) (n int64, err error) { + if debugEncoder { + println("Using ReadFrom") + } + + // Flush any current writes. + if len(e.state.filling) > 0 { + if err := e.nextBlock(false); err != nil { + return 0, err + } + } + e.state.filling = e.state.filling[:e.o.blockSize] + src := e.state.filling + for { + n2, err := r.Read(src) + if e.o.crc { + _, _ = e.state.encoder.CRC().Write(src[:n2]) + } + // src is now the unfilled part... + src = src[n2:] + n += int64(n2) + switch err { + case io.EOF: + e.state.filling = e.state.filling[:len(e.state.filling)-len(src)] + if debugEncoder { + println("ReadFrom: got EOF final block:", len(e.state.filling)) + } + return n, nil + case nil: + default: + if debugEncoder { + println("ReadFrom: got error:", err) + } + e.state.err = err + return n, err + } + if len(src) > 0 { + if debugEncoder { + println("ReadFrom: got space left in source:", len(src)) + } + continue + } + err = e.nextBlock(false) + if err != nil { + return n, err + } + e.state.filling = e.state.filling[:e.o.blockSize] + src = e.state.filling + } +} + +// Flush will send the currently written data to output +// and block until everything has been written. +// This should only be used on rare occasions where pushing the currently queued data is critical. +func (e *Encoder) Flush() error { + s := &e.state + if len(s.filling) > 0 { + err := e.nextBlock(false) + if err != nil { + return err + } + } + s.wg.Wait() + s.wWg.Wait() + if s.err != nil { + return s.err + } + return s.writeErr +} + +// Close will flush the final output and close the stream. +// The function will block until everything has been written. +// The Encoder can still be re-used after calling this. +func (e *Encoder) Close() error { + s := &e.state + if s.encoder == nil { + return nil + } + err := e.nextBlock(true) + if err != nil { + return err + } + if s.frameContentSize > 0 { + if s.nInput != s.frameContentSize { + return fmt.Errorf("frame content size %d given, but %d bytes was written", s.frameContentSize, s.nInput) + } + } + if e.state.fullFrameWritten { + return s.err + } + s.wg.Wait() + s.wWg.Wait() + + if s.err != nil { + return s.err + } + if s.writeErr != nil { + return s.writeErr + } + + // Write CRC + if e.o.crc && s.err == nil { + // heap alloc. + var tmp [4]byte + _, s.err = s.w.Write(s.encoder.AppendCRC(tmp[:0])) + s.nWritten += 4 + } + + // Add padding with content from crypto/rand.Reader + if s.err == nil && e.o.pad > 0 { + add := calcSkippableFrame(s.nWritten, int64(e.o.pad)) + frame, err := skippableFrame(s.filling[:0], add, rand.Reader) + if err != nil { + return err + } + _, s.err = s.w.Write(frame) + } + return s.err +} + +// EncodeAll will encode all input in src and append it to dst. +// This function can be called concurrently, but each call will only run on a single goroutine. +// If empty input is given, nothing is returned, unless WithZeroFrames is specified. +// Encoded blocks can be concatenated and the result will be the combined input stream. +// Data compressed with EncodeAll can be decoded with the Decoder, +// using either a stream or DecodeAll. +func (e *Encoder) EncodeAll(src, dst []byte) []byte { + if len(src) == 0 { + if e.o.fullZero { + // Add frame header. + fh := frameHeader{ + ContentSize: 0, + WindowSize: MinWindowSize, + SingleSegment: true, + // Adding a checksum would be a waste of space. + Checksum: false, + DictID: 0, + } + dst = fh.appendTo(dst) + + // Write raw block as last one only. + var blk blockHeader + blk.setSize(0) + blk.setType(blockTypeRaw) + blk.setLast(true) + dst = blk.appendTo(dst) + } + return dst + } + e.init.Do(e.initialize) + enc := <-e.encoders + defer func() { + // Release encoder reference to last block. + // If a non-single block is needed the encoder will reset again. + e.encoders <- enc + }() + // Use single segments when above minimum window and below window size. + single := len(src) <= e.o.windowSize && len(src) > MinWindowSize + if e.o.single != nil { + single = *e.o.single + } + fh := frameHeader{ + ContentSize: uint64(len(src)), + WindowSize: uint32(enc.WindowSize(int64(len(src)))), + SingleSegment: single, + Checksum: e.o.crc, + DictID: e.o.dict.ID(), + } + + // If less than 1MB, allocate a buffer up front. + if len(dst) == 0 && cap(dst) == 0 && len(src) < 1<<20 && !e.o.lowMem { + dst = make([]byte, 0, len(src)) + } + dst = fh.appendTo(dst) + + // If we can do everything in one block, prefer that. + if len(src) <= e.o.blockSize { + enc.Reset(e.o.dict, true) + // Slightly faster with no history and everything in one block. + if e.o.crc { + _, _ = enc.CRC().Write(src) + } + blk := enc.Block() + blk.last = true + if e.o.dict == nil { + enc.EncodeNoHist(blk, src) + } else { + enc.Encode(blk, src) + } + + // If we got the exact same number of literals as input, + // assume the literals cannot be compressed. + oldout := blk.output + // Output directly to dst + blk.output = dst + + err := blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + if err != nil { + panic(err) + } + dst = blk.output + blk.output = oldout + } else { + enc.Reset(e.o.dict, false) + blk := enc.Block() + for len(src) > 0 { + todo := src + if len(todo) > e.o.blockSize { + todo = todo[:e.o.blockSize] + } + src = src[len(todo):] + if e.o.crc { + _, _ = enc.CRC().Write(todo) + } + blk.pushOffsets() + enc.Encode(blk, todo) + if len(src) == 0 { + blk.last = true + } + err := blk.encode(todo, e.o.noEntropy, !e.o.allLitEntropy) + if err != nil { + panic(err) + } + dst = append(dst, blk.output...) + blk.reset(nil) + } + } + if e.o.crc { + dst = enc.AppendCRC(dst) + } + // Add padding with content from crypto/rand.Reader + if e.o.pad > 0 { + add := calcSkippableFrame(int64(len(dst)), int64(e.o.pad)) + var err error + dst, err = skippableFrame(dst, add, rand.Reader) + if err != nil { + panic(err) + } + } + return dst +} + +// MaxEncodedSize returns the expected maximum +// size of an encoded block or stream. +func (e *Encoder) MaxEncodedSize(size int) int { + frameHeader := 4 + 2 // magic + frame header & window descriptor + if e.o.dict != nil { + frameHeader += 4 + } + // Frame content size: + if size < 256 { + frameHeader++ + } else if size < 65536+256 { + frameHeader += 2 + } else if size < math.MaxInt32 { + frameHeader += 4 + } else { + frameHeader += 8 + } + // Final crc + if e.o.crc { + frameHeader += 4 + } + + // Max overhead is 3 bytes/block. + // There cannot be 0 blocks. + blocks := (size + e.o.blockSize) / e.o.blockSize + + // Combine, add padding. + maxSz := frameHeader + 3*blocks + size + if e.o.pad > 1 { + maxSz += calcSkippableFrame(int64(maxSz), int64(e.o.pad)) + } + return maxSz +} diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go new file mode 100644 index 0000000..20671dc --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go @@ -0,0 +1,339 @@ +package zstd + +import ( + "errors" + "fmt" + "math" + "math/bits" + "runtime" + "strings" +) + +// EOption is an option for creating a encoder. +type EOption func(*encoderOptions) error + +// options retains accumulated state of multiple options. +type encoderOptions struct { + concurrent int + level EncoderLevel + single *bool + pad int + blockSize int + windowSize int + crc bool + fullZero bool + noEntropy bool + allLitEntropy bool + customWindow bool + customALEntropy bool + customBlockSize bool + lowMem bool + dict *dict +} + +func (o *encoderOptions) setDefault() { + *o = encoderOptions{ + concurrent: runtime.GOMAXPROCS(0), + crc: true, + single: nil, + blockSize: maxCompressedBlockSize, + windowSize: 8 << 20, + level: SpeedDefault, + allLitEntropy: false, + lowMem: false, + } +} + +// encoder returns an encoder with the selected options. +func (o encoderOptions) encoder() encoder { + switch o.level { + case SpeedFastest: + if o.dict != nil { + return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} + } + return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} + + case SpeedDefault: + if o.dict != nil { + return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}} + } + return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} + case SpeedBetterCompression: + if o.dict != nil { + return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} + } + return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} + case SpeedBestCompression: + return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} + } + panic("unknown compression level") +} + +// WithEncoderCRC will add CRC value to output. +// Output will be 4 bytes larger. +func WithEncoderCRC(b bool) EOption { + return func(o *encoderOptions) error { o.crc = b; return nil } +} + +// WithEncoderConcurrency will set the concurrency, +// meaning the maximum number of encoders to run concurrently. +// The value supplied must be at least 1. +// For streams, setting a value of 1 will disable async compression. +// By default this will be set to GOMAXPROCS. +func WithEncoderConcurrency(n int) EOption { + return func(o *encoderOptions) error { + if n <= 0 { + return fmt.Errorf("concurrency must be at least 1") + } + o.concurrent = n + return nil + } +} + +// WithWindowSize will set the maximum allowed back-reference distance. +// The value must be a power of two between MinWindowSize and MaxWindowSize. +// A larger value will enable better compression but allocate more memory and, +// for above-default values, take considerably longer. +// The default value is determined by the compression level and max 8MB. +func WithWindowSize(n int) EOption { + return func(o *encoderOptions) error { + switch { + case n < MinWindowSize: + return fmt.Errorf("window size must be at least %d", MinWindowSize) + case n > MaxWindowSize: + return fmt.Errorf("window size must be at most %d", MaxWindowSize) + case (n & (n - 1)) != 0: + return errors.New("window size must be a power of 2") + } + + o.windowSize = n + o.customWindow = true + if o.blockSize > o.windowSize { + o.blockSize = o.windowSize + o.customBlockSize = true + } + return nil + } +} + +// WithEncoderPadding will add padding to all output so the size will be a multiple of n. +// This can be used to obfuscate the exact output size or make blocks of a certain size. +// The contents will be a skippable frame, so it will be invisible by the decoder. +// n must be > 0 and <= 1GB, 1<<30 bytes. +// The padded area will be filled with data from crypto/rand.Reader. +// If `EncodeAll` is used with data already in the destination, the total size will be multiple of this. +func WithEncoderPadding(n int) EOption { + return func(o *encoderOptions) error { + if n <= 0 { + return fmt.Errorf("padding must be at least 1") + } + // No need to waste our time. + if n == 1 { + n = 0 + } + if n > 1<<30 { + return fmt.Errorf("padding must less than 1GB (1<<30 bytes) ") + } + o.pad = n + return nil + } +} + +// EncoderLevel predefines encoder compression levels. +// Only use the constants made available, since the actual mapping +// of these values are very likely to change and your compression could change +// unpredictably when upgrading the library. +type EncoderLevel int + +const ( + speedNotSet EncoderLevel = iota + + // SpeedFastest will choose the fastest reasonable compression. + // This is roughly equivalent to the fastest Zstandard mode. + SpeedFastest + + // SpeedDefault is the default "pretty fast" compression option. + // This is roughly equivalent to the default Zstandard mode (level 3). + SpeedDefault + + // SpeedBetterCompression will yield better compression than the default. + // Currently it is about zstd level 7-8 with ~ 2x-3x the default CPU usage. + // By using this, notice that CPU usage may go up in the future. + SpeedBetterCompression + + // SpeedBestCompression will choose the best available compression option. + // This will offer the best compression no matter the CPU cost. + SpeedBestCompression + + // speedLast should be kept as the last actual compression option. + // The is not for external usage, but is used to keep track of the valid options. + speedLast +) + +// EncoderLevelFromString will convert a string representation of an encoding level back +// to a compression level. The compare is not case sensitive. +// If the string wasn't recognized, (false, SpeedDefault) will be returned. +func EncoderLevelFromString(s string) (bool, EncoderLevel) { + for l := speedNotSet + 1; l < speedLast; l++ { + if strings.EqualFold(s, l.String()) { + return true, l + } + } + return false, SpeedDefault +} + +// EncoderLevelFromZstd will return an encoder level that closest matches the compression +// ratio of a specific zstd compression level. +// Many input values will provide the same compression level. +func EncoderLevelFromZstd(level int) EncoderLevel { + switch { + case level < 3: + return SpeedFastest + case level >= 3 && level < 6: + return SpeedDefault + case level >= 6 && level < 10: + return SpeedBetterCompression + default: + return SpeedBestCompression + } +} + +// String provides a string representation of the compression level. +func (e EncoderLevel) String() string { + switch e { + case SpeedFastest: + return "fastest" + case SpeedDefault: + return "default" + case SpeedBetterCompression: + return "better" + case SpeedBestCompression: + return "best" + default: + return "invalid" + } +} + +// WithEncoderLevel specifies a predefined compression level. +func WithEncoderLevel(l EncoderLevel) EOption { + return func(o *encoderOptions) error { + switch { + case l <= speedNotSet || l >= speedLast: + return fmt.Errorf("unknown encoder level") + } + o.level = l + if !o.customWindow { + switch o.level { + case SpeedFastest: + o.windowSize = 4 << 20 + if !o.customBlockSize { + o.blockSize = 1 << 16 + } + case SpeedDefault: + o.windowSize = 8 << 20 + case SpeedBetterCompression: + o.windowSize = 8 << 20 + case SpeedBestCompression: + o.windowSize = 8 << 20 + } + } + if !o.customALEntropy { + o.allLitEntropy = l > SpeedDefault + } + + return nil + } +} + +// WithZeroFrames will encode 0 length input as full frames. +// This can be needed for compatibility with zstandard usage, +// but is not needed for this package. +func WithZeroFrames(b bool) EOption { + return func(o *encoderOptions) error { + o.fullZero = b + return nil + } +} + +// WithAllLitEntropyCompression will apply entropy compression if no matches are found. +// Disabling this will skip incompressible data faster, but in cases with no matches but +// skewed character distribution compression is lost. +// Default value depends on the compression level selected. +func WithAllLitEntropyCompression(b bool) EOption { + return func(o *encoderOptions) error { + o.customALEntropy = true + o.allLitEntropy = b + return nil + } +} + +// WithNoEntropyCompression will always skip entropy compression of literals. +// This can be useful if content has matches, but unlikely to benefit from entropy +// compression. Usually the slight speed improvement is not worth enabling this. +func WithNoEntropyCompression(b bool) EOption { + return func(o *encoderOptions) error { + o.noEntropy = b + return nil + } +} + +// WithSingleSegment will set the "single segment" flag when EncodeAll is used. +// If this flag is set, data must be regenerated within a single continuous memory segment. +// In this case, Window_Descriptor byte is skipped, but Frame_Content_Size is necessarily present. +// As a consequence, the decoder must allocate a memory segment of size equal or larger than size of your content. +// In order to preserve the decoder from unreasonable memory requirements, +// a decoder is allowed to reject a compressed frame which requests a memory size beyond decoder's authorized range. +// For broader compatibility, decoders are recommended to support memory sizes of at least 8 MB. +// This is only a recommendation, each decoder is free to support higher or lower limits, depending on local limitations. +// If this is not specified, block encodes will automatically choose this based on the input size and the window size. +// This setting has no effect on streamed encodes. +func WithSingleSegment(b bool) EOption { + return func(o *encoderOptions) error { + o.single = &b + return nil + } +} + +// WithLowerEncoderMem will trade in some memory cases trade less memory usage for +// slower encoding speed. +// This will not change the window size which is the primary function for reducing +// memory usage. See WithWindowSize. +func WithLowerEncoderMem(b bool) EOption { + return func(o *encoderOptions) error { + o.lowMem = b + return nil + } +} + +// WithEncoderDict allows to register a dictionary that will be used for the encode. +// +// The slice dict must be in the [dictionary format] produced by +// "zstd --train" from the Zstandard reference implementation. +// +// The encoder *may* choose to use no dictionary instead for certain payloads. +// +// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format +func WithEncoderDict(dict []byte) EOption { + return func(o *encoderOptions) error { + d, err := loadDict(dict) + if err != nil { + return err + } + o.dict = d + return nil + } +} + +// WithEncoderDictRaw registers a dictionary that may be used by the encoder. +// +// The slice content may contain arbitrary data. It will be used as an initial +// history. +func WithEncoderDictRaw(id uint32, content []byte) EOption { + return func(o *encoderOptions) error { + if bits.UintSize > 32 && uint(len(content)) > dictMaxLength { + return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content)) + } + o.dict = &dict{id: id, content: content, offsets: [3]int{1, 4, 8}} + return nil + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go new file mode 100644 index 0000000..53e160f --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -0,0 +1,413 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "encoding/hex" + "errors" + "io" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +type frameDec struct { + o decoderOptions + crc *xxhash.Digest + + WindowSize uint64 + + // Frame history passed between blocks + history history + + rawInput byteBuffer + + // Byte buffer that can be reused for small input blocks. + bBuf byteBuf + + FrameContentSize uint64 + + DictionaryID uint32 + HasCheckSum bool + SingleSegment bool +} + +const ( + // MinWindowSize is the minimum Window Size, which is 1 KB. + MinWindowSize = 1 << 10 + + // MaxWindowSize is the maximum encoder window size + // and the default decoder maximum window size. + MaxWindowSize = 1 << 29 +) + +const ( + frameMagic = "\x28\xb5\x2f\xfd" + skippableFrameMagic = "\x2a\x4d\x18" +) + +func newFrameDec(o decoderOptions) *frameDec { + if o.maxWindowSize > o.maxDecodedSize { + o.maxWindowSize = o.maxDecodedSize + } + d := frameDec{ + o: o, + } + return &d +} + +// reset will read the frame header and prepare for block decoding. +// If nothing can be read from the input, io.EOF will be returned. +// Any other error indicated that the stream contained data, but +// there was a problem. +func (d *frameDec) reset(br byteBuffer) error { + d.HasCheckSum = false + d.WindowSize = 0 + var signature [4]byte + for { + var err error + // Check if we can read more... + b, err := br.readSmall(1) + switch err { + case io.EOF, io.ErrUnexpectedEOF: + return io.EOF + case nil: + signature[0] = b[0] + default: + return err + } + // Read the rest, don't allow io.ErrUnexpectedEOF + b, err = br.readSmall(3) + switch err { + case io.EOF: + return io.EOF + case nil: + copy(signature[1:], b) + default: + return err + } + + if string(signature[1:4]) != skippableFrameMagic || signature[0]&0xf0 != 0x50 { + if debugDecoder { + println("Not skippable", hex.EncodeToString(signature[:]), hex.EncodeToString([]byte(skippableFrameMagic))) + } + // Break if not skippable frame. + break + } + // Read size to skip + b, err = br.readSmall(4) + if err != nil { + if debugDecoder { + println("Reading Frame Size", err) + } + return err + } + n := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + println("Skipping frame with", n, "bytes.") + err = br.skipN(int64(n)) + if err != nil { + if debugDecoder { + println("Reading discarded frame", err) + } + return err + } + } + if string(signature[:]) != frameMagic { + if debugDecoder { + println("Got magic numbers: ", signature, "want:", []byte(frameMagic)) + } + return ErrMagicMismatch + } + + // Read Frame_Header_Descriptor + fhd, err := br.readByte() + if err != nil { + if debugDecoder { + println("Reading Frame_Header_Descriptor", err) + } + return err + } + d.SingleSegment = fhd&(1<<5) != 0 + + if fhd&(1<<3) != 0 { + return errors.New("reserved bit set on frame header") + } + + // Read Window_Descriptor + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor + d.WindowSize = 0 + if !d.SingleSegment { + wd, err := br.readByte() + if err != nil { + if debugDecoder { + println("Reading Window_Descriptor", err) + } + return err + } + printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3) + windowLog := 10 + (wd >> 3) + windowBase := uint64(1) << windowLog + windowAdd := (windowBase / 8) * uint64(wd&0x7) + d.WindowSize = windowBase + windowAdd + } + + // Read Dictionary_ID + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id + d.DictionaryID = 0 + if size := fhd & 3; size != 0 { + if size == 3 { + size = 4 + } + + b, err := br.readSmall(int(size)) + if err != nil { + println("Reading Dictionary_ID", err) + return err + } + var id uint32 + switch len(b) { + case 1: + id = uint32(b[0]) + case 2: + id = uint32(b[0]) | (uint32(b[1]) << 8) + case 4: + id = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + } + if debugDecoder { + println("Dict size", size, "ID:", id) + } + d.DictionaryID = id + } + + // Read Frame_Content_Size + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size + var fcsSize int + v := fhd >> 6 + switch v { + case 0: + if d.SingleSegment { + fcsSize = 1 + } + default: + fcsSize = 1 << v + } + d.FrameContentSize = fcsUnknown + if fcsSize > 0 { + b, err := br.readSmall(fcsSize) + if err != nil { + println("Reading Frame content", err) + return err + } + switch len(b) { + case 1: + d.FrameContentSize = uint64(b[0]) + case 2: + // When FCS_Field_Size is 2, the offset of 256 is added. + d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256 + case 4: + d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) + case 8: + d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24) + d.FrameContentSize = uint64(d1) | (uint64(d2) << 32) + } + if debugDecoder { + println("Read FCS:", d.FrameContentSize) + } + } + + // Move this to shared. + d.HasCheckSum = fhd&(1<<2) != 0 + if d.HasCheckSum { + if d.crc == nil { + d.crc = xxhash.New() + } + d.crc.Reset() + } + + if d.WindowSize > d.o.maxWindowSize { + if debugDecoder { + printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) + } + return ErrWindowSizeExceeded + } + + if d.WindowSize == 0 && d.SingleSegment { + // We may not need window in this case. + d.WindowSize = d.FrameContentSize + if d.WindowSize < MinWindowSize { + d.WindowSize = MinWindowSize + } + if d.WindowSize > d.o.maxDecodedSize { + if debugDecoder { + printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) + } + return ErrDecoderSizeExceeded + } + } + + // The minimum Window_Size is 1 KB. + if d.WindowSize < MinWindowSize { + if debugDecoder { + println("got window size: ", d.WindowSize) + } + return ErrWindowSizeTooSmall + } + d.history.windowSize = int(d.WindowSize) + if !d.o.lowMem || d.history.windowSize < maxBlockSize { + // Alloc 2x window size if not low-mem, or window size below 2MB. + d.history.allocFrameBuffer = d.history.windowSize * 2 + } else { + if d.o.lowMem { + // Alloc with 1MB extra. + d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize/2 + } else { + // Alloc with 2MB extra. + d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize + } + } + + if debugDecoder { + println("Frame: Dict:", d.DictionaryID, "FrameContentSize:", d.FrameContentSize, "singleseg:", d.SingleSegment, "window:", d.WindowSize, "crc:", d.HasCheckSum) + } + + // history contains input - maybe we do something + d.rawInput = br + return nil +} + +// next will start decoding the next block from stream. +func (d *frameDec) next(block *blockDec) error { + if debugDecoder { + println("decoding new block") + } + err := block.reset(d.rawInput, d.WindowSize) + if err != nil { + println("block error:", err) + // Signal the frame decoder we have a problem. + block.sendErr(err) + return err + } + return nil +} + +// checkCRC will check the checksum, assuming the frame has one. +// Will return ErrCRCMismatch if crc check failed, otherwise nil. +func (d *frameDec) checkCRC() error { + // We can overwrite upper tmp now + buf, err := d.rawInput.readSmall(4) + if err != nil { + println("CRC missing?", err) + return err + } + + want := binary.LittleEndian.Uint32(buf[:4]) + got := uint32(d.crc.Sum64()) + + if got != want { + if debugDecoder { + printf("CRC check failed: got %08x, want %08x\n", got, want) + } + return ErrCRCMismatch + } + if debugDecoder { + printf("CRC ok %08x\n", got) + } + return nil +} + +// consumeCRC skips over the checksum, assuming the frame has one. +func (d *frameDec) consumeCRC() error { + _, err := d.rawInput.readSmall(4) + if err != nil { + println("CRC missing?", err) + } + return err +} + +// runDecoder will run the decoder for the remainder of the frame. +func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { + saved := d.history.b + + // We use the history for output to avoid copying it. + d.history.b = dst + d.history.ignoreBuffer = len(dst) + // Store input length, so we only check new data. + crcStart := len(dst) + d.history.decoders.maxSyncLen = 0 + if d.o.limitToCap { + d.history.decoders.maxSyncLen = uint64(cap(dst) - len(dst)) + } + if d.FrameContentSize != fcsUnknown { + if !d.o.limitToCap || d.FrameContentSize+uint64(len(dst)) < d.history.decoders.maxSyncLen { + d.history.decoders.maxSyncLen = d.FrameContentSize + uint64(len(dst)) + } + if d.history.decoders.maxSyncLen > d.o.maxDecodedSize { + if debugDecoder { + println("maxSyncLen:", d.history.decoders.maxSyncLen, "> maxDecodedSize:", d.o.maxDecodedSize) + } + return dst, ErrDecoderSizeExceeded + } + if debugDecoder { + println("maxSyncLen:", d.history.decoders.maxSyncLen) + } + if !d.o.limitToCap && uint64(cap(dst)) < d.history.decoders.maxSyncLen { + // Alloc for output + dst2 := make([]byte, len(dst), d.history.decoders.maxSyncLen+compressedBlockOverAlloc) + copy(dst2, dst) + dst = dst2 + } + } + var err error + for { + err = dec.reset(d.rawInput, d.WindowSize) + if err != nil { + break + } + if debugDecoder { + println("next block:", dec) + } + err = dec.decodeBuf(&d.history) + if err != nil { + break + } + if uint64(len(d.history.b)-crcStart) > d.o.maxDecodedSize { + println("runDecoder: maxDecodedSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.o.maxDecodedSize) + err = ErrDecoderSizeExceeded + break + } + if d.o.limitToCap && len(d.history.b) > cap(dst) { + println("runDecoder: cap exceeded", uint64(len(d.history.b)), ">", cap(dst)) + err = ErrDecoderSizeExceeded + break + } + if uint64(len(d.history.b)-crcStart) > d.FrameContentSize { + println("runDecoder: FrameContentSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.FrameContentSize) + err = ErrFrameSizeExceeded + break + } + if dec.Last { + break + } + if debugDecoder { + println("runDecoder: FrameContentSize", uint64(len(d.history.b)-crcStart), "<=", d.FrameContentSize) + } + } + dst = d.history.b + if err == nil { + if d.FrameContentSize != fcsUnknown && uint64(len(d.history.b)-crcStart) != d.FrameContentSize { + err = ErrFrameSizeMismatch + } else if d.HasCheckSum { + if d.o.ignoreChecksum { + err = d.consumeCRC() + } else { + d.crc.Write(dst[crcStart:]) + err = d.checkCRC() + } + } + } + d.history.b = saved + return dst, err +} diff --git a/vendor/github.com/klauspost/compress/zstd/frameenc.go b/vendor/github.com/klauspost/compress/zstd/frameenc.go new file mode 100644 index 0000000..667ca06 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/frameenc.go @@ -0,0 +1,137 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "fmt" + "io" + "math" + "math/bits" +) + +type frameHeader struct { + ContentSize uint64 + WindowSize uint32 + SingleSegment bool + Checksum bool + DictID uint32 +} + +const maxHeaderSize = 14 + +func (f frameHeader) appendTo(dst []byte) []byte { + dst = append(dst, frameMagic...) + var fhd uint8 + if f.Checksum { + fhd |= 1 << 2 + } + if f.SingleSegment { + fhd |= 1 << 5 + } + + var dictIDContent []byte + if f.DictID > 0 { + var tmp [4]byte + if f.DictID < 256 { + fhd |= 1 + tmp[0] = uint8(f.DictID) + dictIDContent = tmp[:1] + } else if f.DictID < 1<<16 { + fhd |= 2 + binary.LittleEndian.PutUint16(tmp[:2], uint16(f.DictID)) + dictIDContent = tmp[:2] + } else { + fhd |= 3 + binary.LittleEndian.PutUint32(tmp[:4], f.DictID) + dictIDContent = tmp[:4] + } + } + var fcs uint8 + if f.ContentSize >= 256 { + fcs++ + } + if f.ContentSize >= 65536+256 { + fcs++ + } + if f.ContentSize >= 0xffffffff { + fcs++ + } + + fhd |= fcs << 6 + + dst = append(dst, fhd) + if !f.SingleSegment { + const winLogMin = 10 + windowLog := (bits.Len32(f.WindowSize-1) - winLogMin) << 3 + dst = append(dst, uint8(windowLog)) + } + if f.DictID > 0 { + dst = append(dst, dictIDContent...) + } + switch fcs { + case 0: + if f.SingleSegment { + dst = append(dst, uint8(f.ContentSize)) + } + // Unless SingleSegment is set, framessizes < 256 are not stored. + case 1: + f.ContentSize -= 256 + dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8)) + case 2: + dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24)) + case 3: + dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24), + uint8(f.ContentSize>>32), uint8(f.ContentSize>>40), uint8(f.ContentSize>>48), uint8(f.ContentSize>>56)) + default: + panic("invalid fcs") + } + return dst +} + +const skippableFrameHeader = 4 + 4 + +// calcSkippableFrame will return a total size to be added for written +// to be divisible by multiple. +// The value will always be > skippableFrameHeader. +// The function will panic if written < 0 or wantMultiple <= 0. +func calcSkippableFrame(written, wantMultiple int64) int { + if wantMultiple <= 0 { + panic("wantMultiple <= 0") + } + if written < 0 { + panic("written < 0") + } + leftOver := written % wantMultiple + if leftOver == 0 { + return 0 + } + toAdd := wantMultiple - leftOver + for toAdd < skippableFrameHeader { + toAdd += wantMultiple + } + return int(toAdd) +} + +// skippableFrame will add a skippable frame with a total size of bytes. +// total should be >= skippableFrameHeader and < math.MaxUint32. +func skippableFrame(dst []byte, total int, r io.Reader) ([]byte, error) { + if total == 0 { + return dst, nil + } + if total < skippableFrameHeader { + return dst, fmt.Errorf("requested skippable frame (%d) < 8", total) + } + if int64(total) > math.MaxUint32 { + return dst, fmt.Errorf("requested skippable frame (%d) > max uint32", total) + } + dst = append(dst, 0x50, 0x2a, 0x4d, 0x18) + f := uint32(total - skippableFrameHeader) + dst = append(dst, uint8(f), uint8(f>>8), uint8(f>>16), uint8(f>>24)) + start := len(dst) + dst = append(dst, make([]byte, f)...) + _, err := io.ReadFull(r, dst[start:]) + return dst, err +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go new file mode 100644 index 0000000..2f8860a --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go @@ -0,0 +1,307 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "errors" + "fmt" + "io" +) + +const ( + tablelogAbsoluteMax = 9 +) + +const ( + /*!MEMORY_USAGE : + * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) + * Increasing memory usage improves compression ratio + * Reduced memory usage can improve speed, due to cache effect + * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ + maxMemoryUsage = tablelogAbsoluteMax + 2 + + maxTableLog = maxMemoryUsage - 2 + maxTablesize = 1 << maxTableLog + maxTableMask = (1 << maxTableLog) - 1 + minTablelog = 5 + maxSymbolValue = 255 +) + +// fseDecoder provides temporary storage for compression and decompression. +type fseDecoder struct { + dt [maxTablesize]decSymbol // Decompression table. + symbolLen uint16 // Length of active part of the symbol table. + actualTableLog uint8 // Selected tablelog. + maxBits uint8 // Maximum number of additional bits + + // used for table creation to avoid allocations. + stateTable [256]uint16 + norm [maxSymbolValue + 1]int16 + preDefined bool +} + +// tableStep returns the next table index. +func tableStep(tableSize uint32) uint32 { + return (tableSize >> 1) + (tableSize >> 3) + 3 +} + +// readNCount will read the symbol distribution so decoding tables can be constructed. +func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error { + var ( + charnum uint16 + previous0 bool + ) + if b.remain() < 4 { + return errors.New("input too small") + } + bitStream := b.Uint32NC() + nbBits := uint((bitStream & 0xF) + minTablelog) // extract tableLog + if nbBits > tablelogAbsoluteMax { + println("Invalid tablelog:", nbBits) + return errors.New("tableLog too large") + } + bitStream >>= 4 + bitCount := uint(4) + + s.actualTableLog = uint8(nbBits) + remaining := int32((1 << nbBits) + 1) + threshold := int32(1 << nbBits) + gotTotal := int32(0) + nbBits++ + + for remaining > 1 && charnum <= maxSymbol { + if previous0 { + //println("prev0") + n0 := charnum + for (bitStream & 0xFFFF) == 0xFFFF { + //println("24 x 0") + n0 += 24 + if r := b.remain(); r > 5 { + b.advance(2) + // The check above should make sure we can read 32 bits + bitStream = b.Uint32NC() >> bitCount + } else { + // end of bit stream + bitStream >>= 16 + bitCount += 16 + } + } + //printf("bitstream: %d, 0b%b", bitStream&3, bitStream) + for (bitStream & 3) == 3 { + n0 += 3 + bitStream >>= 2 + bitCount += 2 + } + n0 += uint16(bitStream & 3) + bitCount += 2 + + if n0 > maxSymbolValue { + return errors.New("maxSymbolValue too small") + } + //println("inserting ", n0-charnum, "zeroes from idx", charnum, "ending before", n0) + for charnum < n0 { + s.norm[uint8(charnum)] = 0 + charnum++ + } + + if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 { + b.advance(bitCount >> 3) + bitCount &= 7 + // The check above should make sure we can read 32 bits + bitStream = b.Uint32NC() >> bitCount + } else { + bitStream >>= 2 + } + } + + max := (2*threshold - 1) - remaining + var count int32 + + if int32(bitStream)&(threshold-1) < max { + count = int32(bitStream) & (threshold - 1) + if debugAsserts && nbBits < 1 { + panic("nbBits underflow") + } + bitCount += nbBits - 1 + } else { + count = int32(bitStream) & (2*threshold - 1) + if count >= threshold { + count -= max + } + bitCount += nbBits + } + + // extra accuracy + count-- + if count < 0 { + // -1 means +1 + remaining += count + gotTotal -= count + } else { + remaining -= count + gotTotal += count + } + s.norm[charnum&0xff] = int16(count) + charnum++ + previous0 = count == 0 + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + + if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 { + b.advance(bitCount >> 3) + bitCount &= 7 + // The check above should make sure we can read 32 bits + bitStream = b.Uint32NC() >> (bitCount & 31) + } else { + bitCount -= (uint)(8 * (len(b.b) - 4 - b.off)) + b.off = len(b.b) - 4 + bitStream = b.Uint32() >> (bitCount & 31) + } + } + s.symbolLen = charnum + if s.symbolLen <= 1 { + return fmt.Errorf("symbolLen (%d) too small", s.symbolLen) + } + if s.symbolLen > maxSymbolValue+1 { + return fmt.Errorf("symbolLen (%d) too big", s.symbolLen) + } + if remaining != 1 { + return fmt.Errorf("corruption detected (remaining %d != 1)", remaining) + } + if bitCount > 32 { + return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount) + } + if gotTotal != 1<> 3) + return s.buildDtable() +} + +func (s *fseDecoder) mustReadFrom(r io.Reader) { + fatalErr := func(err error) { + if err != nil { + panic(err) + } + } + // dt [maxTablesize]decSymbol // Decompression table. + // symbolLen uint16 // Length of active part of the symbol table. + // actualTableLog uint8 // Selected tablelog. + // maxBits uint8 // Maximum number of additional bits + // // used for table creation to avoid allocations. + // stateTable [256]uint16 + // norm [maxSymbolValue + 1]int16 + // preDefined bool + fatalErr(binary.Read(r, binary.LittleEndian, &s.dt)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.symbolLen)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.actualTableLog)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.maxBits)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.stateTable)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.norm)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.preDefined)) +} + +// decSymbol contains information about a state entry, +// Including the state offset base, the output symbol and +// the number of bits to read for the low part of the destination state. +// Using a composite uint64 is faster than a struct with separate members. +type decSymbol uint64 + +func newDecSymbol(nbits, addBits uint8, newState uint16, baseline uint32) decSymbol { + return decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32) +} + +func (d decSymbol) nbBits() uint8 { + return uint8(d) +} + +func (d decSymbol) addBits() uint8 { + return uint8(d >> 8) +} + +func (d decSymbol) newState() uint16 { + return uint16(d >> 16) +} + +func (d decSymbol) baselineInt() int { + return int(d >> 32) +} + +func (d *decSymbol) setNBits(nBits uint8) { + const mask = 0xffffffffffffff00 + *d = (*d & mask) | decSymbol(nBits) +} + +func (d *decSymbol) setAddBits(addBits uint8) { + const mask = 0xffffffffffff00ff + *d = (*d & mask) | (decSymbol(addBits) << 8) +} + +func (d *decSymbol) setNewState(state uint16) { + const mask = 0xffffffff0000ffff + *d = (*d & mask) | decSymbol(state)<<16 +} + +func (d *decSymbol) setExt(addBits uint8, baseline uint32) { + const mask = 0xffff00ff + *d = (*d & mask) | (decSymbol(addBits) << 8) | (decSymbol(baseline) << 32) +} + +// decSymbolValue returns the transformed decSymbol for the given symbol. +func decSymbolValue(symb uint8, t []baseOffset) (decSymbol, error) { + if int(symb) >= len(t) { + return 0, fmt.Errorf("rle symbol %d >= max %d", symb, len(t)) + } + lu := t[symb] + return newDecSymbol(0, lu.addBits, 0, lu.baseLine), nil +} + +// setRLE will set the decoder til RLE mode. +func (s *fseDecoder) setRLE(symbol decSymbol) { + s.actualTableLog = 0 + s.maxBits = symbol.addBits() + s.dt[0] = symbol +} + +// transform will transform the decoder table into a table usable for +// decoding without having to apply the transformation while decoding. +// The state will contain the base value and the number of bits to read. +func (s *fseDecoder) transform(t []baseOffset) error { + tableSize := uint16(1 << s.actualTableLog) + s.maxBits = 0 + for i, v := range s.dt[:tableSize] { + add := v.addBits() + if int(add) >= len(t) { + return fmt.Errorf("invalid decoding table entry %d, symbol %d >= max (%d)", i, v.addBits(), len(t)) + } + lu := t[add] + if lu.addBits > s.maxBits { + s.maxBits = lu.addBits + } + v.setExt(lu.addBits, lu.baseLine) + s.dt[i] = v + } + return nil +} + +type fseState struct { + dt []decSymbol + state decSymbol +} + +// Initialize and decodeAsync first state and symbol. +func (s *fseState) init(br *bitReader, tableLog uint8, dt []decSymbol) { + s.dt = dt + br.fill() + s.state = dt[br.getBits(tableLog)] +} + +// final returns the current state symbol without decoding the next. +func (s decSymbol) final() (int, uint8) { + return s.baselineInt(), s.addBits() +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go new file mode 100644 index 0000000..d04a829 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go @@ -0,0 +1,65 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package zstd + +import ( + "fmt" +) + +type buildDtableAsmContext struct { + // inputs + stateTable *uint16 + norm *int16 + dt *uint64 + + // outputs --- set by the procedure in the case of error; + // for interpretation please see the error handling part below + errParam1 uint64 + errParam2 uint64 +} + +// buildDtable_asm is an x86 assembly implementation of fseDecoder.buildDtable. +// Function returns non-zero exit code on error. +// +//go:noescape +func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int + +// please keep in sync with _generate/gen_fse.go +const ( + errorCorruptedNormalizedCounter = 1 + errorNewStateTooBig = 2 + errorNewStateNoBits = 3 +) + +// buildDtable will build the decoding table. +func (s *fseDecoder) buildDtable() error { + ctx := buildDtableAsmContext{ + stateTable: &s.stateTable[0], + norm: &s.norm[0], + dt: (*uint64)(&s.dt[0]), + } + code := buildDtable_asm(s, &ctx) + + if code != 0 { + switch code { + case errorCorruptedNormalizedCounter: + position := ctx.errParam1 + return fmt.Errorf("corrupted input (position=%d, expected 0)", position) + + case errorNewStateTooBig: + newState := decSymbol(ctx.errParam1) + size := ctx.errParam2 + return fmt.Errorf("newState (%d) outside table size (%d)", newState, size) + + case errorNewStateNoBits: + newState := decSymbol(ctx.errParam1) + oldState := decSymbol(ctx.errParam2) + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, oldState) + + default: + return fmt.Errorf("buildDtable_asm returned unhandled nonzero code = %d", code) + } + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s new file mode 100644 index 0000000..bcde398 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s @@ -0,0 +1,126 @@ +// Code generated by command: go run gen_fse.go -out ../fse_decoder_amd64.s -pkg=zstd. DO NOT EDIT. + +//go:build !appengine && !noasm && gc && !noasm + +// func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int +TEXT ·buildDtable_asm(SB), $0-24 + MOVQ ctx+8(FP), CX + MOVQ s+0(FP), DI + + // Load values + MOVBQZX 4098(DI), DX + XORQ AX, AX + BTSQ DX, AX + MOVQ (CX), BX + MOVQ 16(CX), SI + LEAQ -1(AX), R8 + MOVQ 8(CX), CX + MOVWQZX 4096(DI), DI + + // End load values + // Init, lay down lowprob symbols + XORQ R9, R9 + JMP init_main_loop_condition + +init_main_loop: + MOVWQSX (CX)(R9*2), R10 + CMPW R10, $-1 + JNE do_not_update_high_threshold + MOVB R9, 1(SI)(R8*8) + DECQ R8 + MOVQ $0x0000000000000001, R10 + +do_not_update_high_threshold: + MOVW R10, (BX)(R9*2) + INCQ R9 + +init_main_loop_condition: + CMPQ R9, DI + JL init_main_loop + + // Spread symbols + // Calculate table step + MOVQ AX, R9 + SHRQ $0x01, R9 + MOVQ AX, R10 + SHRQ $0x03, R10 + LEAQ 3(R9)(R10*1), R9 + + // Fill add bits values + LEAQ -1(AX), R10 + XORQ R11, R11 + XORQ R12, R12 + JMP spread_main_loop_condition + +spread_main_loop: + XORQ R13, R13 + MOVWQSX (CX)(R12*2), R14 + JMP spread_inner_loop_condition + +spread_inner_loop: + MOVB R12, 1(SI)(R11*8) + +adjust_position: + ADDQ R9, R11 + ANDQ R10, R11 + CMPQ R11, R8 + JG adjust_position + INCQ R13 + +spread_inner_loop_condition: + CMPQ R13, R14 + JL spread_inner_loop + INCQ R12 + +spread_main_loop_condition: + CMPQ R12, DI + JL spread_main_loop + TESTQ R11, R11 + JZ spread_check_ok + MOVQ ctx+8(FP), AX + MOVQ R11, 24(AX) + MOVQ $+1, ret+16(FP) + RET + +spread_check_ok: + // Build Decoding table + XORQ DI, DI + +build_table_main_table: + MOVBQZX 1(SI)(DI*8), CX + MOVWQZX (BX)(CX*2), R8 + LEAQ 1(R8), R9 + MOVW R9, (BX)(CX*2) + MOVQ R8, R9 + BSRQ R9, R9 + MOVQ DX, CX + SUBQ R9, CX + SHLQ CL, R8 + SUBQ AX, R8 + MOVB CL, (SI)(DI*8) + MOVW R8, 2(SI)(DI*8) + CMPQ R8, AX + JLE build_table_check1_ok + MOVQ ctx+8(FP), CX + MOVQ R8, 24(CX) + MOVQ AX, 32(CX) + MOVQ $+2, ret+16(FP) + RET + +build_table_check1_ok: + TESTB CL, CL + JNZ build_table_check2_ok + CMPW R8, DI + JNE build_table_check2_ok + MOVQ ctx+8(FP), AX + MOVQ R8, 24(AX) + MOVQ DI, 32(AX) + MOVQ $+3, ret+16(FP) + RET + +build_table_check2_ok: + INCQ DI + CMPQ DI, AX + JL build_table_main_table + MOVQ $+0, ret+16(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go new file mode 100644 index 0000000..8adfebb --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go @@ -0,0 +1,73 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +package zstd + +import ( + "errors" + "fmt" +) + +// buildDtable will build the decoding table. +func (s *fseDecoder) buildDtable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + symbolNext := s.stateTable[:256] + + // Init, lay down lowprob symbols + { + for i, v := range s.norm[:s.symbolLen] { + if v == -1 { + s.dt[highThreshold].setAddBits(uint8(i)) + highThreshold-- + v = 1 + } + symbolNext[i] = uint16(v) + } + } + + // Spread symbols + { + tableMask := tableSize - 1 + step := tableStep(tableSize) + position := uint32(0) + for ss, v := range s.norm[:s.symbolLen] { + for i := 0; i < int(v); i++ { + s.dt[position].setAddBits(uint8(ss)) + for { + // lowprob area + position = (position + step) & tableMask + if position <= highThreshold { + break + } + } + } + } + if position != 0 { + // position must reach all cells once, otherwise normalizedCounter is incorrect + return errors.New("corrupted input (position != 0)") + } + } + + // Build Decoding table + { + tableSize := uint16(1 << s.actualTableLog) + for u, v := range s.dt[:tableSize] { + symbol := v.addBits() + nextState := symbolNext[symbol] + symbolNext[symbol] = nextState + 1 + nBits := s.actualTableLog - byte(highBits(uint32(nextState))) + s.dt[u&maxTableMask].setNBits(nBits) + newState := (nextState << nBits) - tableSize + if newState > tableSize { + return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) + } + if newState == uint16(u) && nBits == 0 { + // Seems weird that this is possible with nbits > 0. + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) + } + s.dt[u&maxTableMask].setNewState(newState) + } + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go new file mode 100644 index 0000000..ab26326 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go @@ -0,0 +1,701 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "math" +) + +const ( + // For encoding we only support up to + maxEncTableLog = 8 + maxEncTablesize = 1 << maxTableLog + maxEncTableMask = (1 << maxTableLog) - 1 + minEncTablelog = 5 + maxEncSymbolValue = maxMatchLengthSymbol +) + +// Scratch provides temporary storage for compression and decompression. +type fseEncoder struct { + symbolLen uint16 // Length of active part of the symbol table. + actualTableLog uint8 // Selected tablelog. + ct cTable // Compression tables. + maxCount int // count of the most probable symbol + zeroBits bool // no bits has prob > 50%. + clearCount bool // clear count + useRLE bool // This encoder is for RLE + preDefined bool // This encoder is predefined. + reUsed bool // Set to know when the encoder has been reused. + rleVal uint8 // RLE Symbol + maxBits uint8 // Maximum output bits after transform. + + // TODO: Technically zstd should be fine with 64 bytes. + count [256]uint32 + norm [256]int16 +} + +// cTable contains tables used for compression. +type cTable struct { + tableSymbol []byte + stateTable []uint16 + symbolTT []symbolTransform +} + +// symbolTransform contains the state transform for a symbol. +type symbolTransform struct { + deltaNbBits uint32 + deltaFindState int16 + outBits uint8 +} + +// String prints values as a human readable string. +func (s symbolTransform) String() string { + return fmt.Sprintf("{deltabits: %08x, findstate:%d outbits:%d}", s.deltaNbBits, s.deltaFindState, s.outBits) +} + +// Histogram allows to populate the histogram and skip that step in the compression, +// It otherwise allows to inspect the histogram when compression is done. +// To indicate that you have populated the histogram call HistogramFinished +// with the value of the highest populated symbol, as well as the number of entries +// in the most populated entry. These are accepted at face value. +func (s *fseEncoder) Histogram() *[256]uint32 { + return &s.count +} + +// HistogramFinished can be called to indicate that the histogram has been populated. +// maxSymbol is the index of the highest set symbol of the next data segment. +// maxCount is the number of entries in the most populated entry. +// These are accepted at face value. +func (s *fseEncoder) HistogramFinished(maxSymbol uint8, maxCount int) { + s.maxCount = maxCount + s.symbolLen = uint16(maxSymbol) + 1 + s.clearCount = maxCount != 0 +} + +// allocCtable will allocate tables needed for compression. +// If existing tables a re big enough, they are simply re-used. +func (s *fseEncoder) allocCtable() { + tableSize := 1 << s.actualTableLog + // get tableSymbol that is big enough. + if cap(s.ct.tableSymbol) < tableSize { + s.ct.tableSymbol = make([]byte, tableSize) + } + s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] + + ctSize := tableSize + if cap(s.ct.stateTable) < ctSize { + s.ct.stateTable = make([]uint16, ctSize) + } + s.ct.stateTable = s.ct.stateTable[:ctSize] + + if cap(s.ct.symbolTT) < 256 { + s.ct.symbolTT = make([]symbolTransform, 256) + } + s.ct.symbolTT = s.ct.symbolTT[:256] +} + +// buildCTable will populate the compression table so it is ready to be used. +func (s *fseEncoder) buildCTable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + var cumul [256]int16 + + s.allocCtable() + tableSymbol := s.ct.tableSymbol[:tableSize] + // symbol start positions + { + cumul[0] = 0 + for ui, v := range s.norm[:s.symbolLen-1] { + u := byte(ui) // one less than reference + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = u + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + } + // Encode last symbol separately to avoid overflowing u + u := int(s.symbolLen - 1) + v := s.norm[s.symbolLen-1] + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = byte(u) + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + if uint32(cumul[s.symbolLen]) != tableSize { + return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) + } + cumul[s.symbolLen] = int16(tableSize) + 1 + } + // Spread symbols + s.zeroBits = false + { + step := tableStep(tableSize) + tableMask := tableSize - 1 + var position uint32 + // if any symbol > largeLimit, we may have 0 bits output. + largeLimit := int16(1 << (s.actualTableLog - 1)) + for ui, v := range s.norm[:s.symbolLen] { + symbol := byte(ui) + if v > largeLimit { + s.zeroBits = true + } + for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { + tableSymbol[position] = symbol + position = (position + step) & tableMask + for position > highThreshold { + position = (position + step) & tableMask + } /* Low proba area */ + } + } + + // Check if we have gone through all positions + if position != 0 { + return errors.New("position!=0") + } + } + + // Build table + table := s.ct.stateTable + { + tsi := int(tableSize) + for u, v := range tableSymbol { + // TableU16 : sorted by symbol order; gives next state value + table[cumul[v]] = uint16(tsi + u) + cumul[v]++ + } + } + + // Build Symbol Transformation Table + { + total := int16(0) + symbolTT := s.ct.symbolTT[:s.symbolLen] + tableLog := s.actualTableLog + tl := (uint32(tableLog) << 16) - (1 << tableLog) + for i, v := range s.norm[:s.symbolLen] { + switch v { + case 0: + case -1, 1: + symbolTT[i].deltaNbBits = tl + symbolTT[i].deltaFindState = total - 1 + total++ + default: + maxBitsOut := uint32(tableLog) - highBit(uint32(v-1)) + minStatePlus := uint32(v) << maxBitsOut + symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus + symbolTT[i].deltaFindState = total - v + total += v + } + } + if total != int16(tableSize) { + return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) + } + } + return nil +} + +var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} + +func (s *fseEncoder) setRLE(val byte) { + s.allocCtable() + s.actualTableLog = 0 + s.ct.stateTable = s.ct.stateTable[:1] + s.ct.symbolTT[val] = symbolTransform{ + deltaFindState: 0, + deltaNbBits: 0, + } + if debugEncoder { + println("setRLE: val", val, "symbolTT", s.ct.symbolTT[val]) + } + s.rleVal = val + s.useRLE = true +} + +// setBits will set output bits for the transform. +// if nil is provided, the number of bits is equal to the index. +func (s *fseEncoder) setBits(transform []byte) { + if s.reUsed || s.preDefined { + return + } + if s.useRLE { + if transform == nil { + s.ct.symbolTT[s.rleVal].outBits = s.rleVal + s.maxBits = s.rleVal + return + } + s.maxBits = transform[s.rleVal] + s.ct.symbolTT[s.rleVal].outBits = s.maxBits + return + } + if transform == nil { + for i := range s.ct.symbolTT[:s.symbolLen] { + s.ct.symbolTT[i].outBits = uint8(i) + } + s.maxBits = uint8(s.symbolLen - 1) + return + } + s.maxBits = 0 + for i, v := range transform[:s.symbolLen] { + s.ct.symbolTT[i].outBits = v + if v > s.maxBits { + // We could assume bits always going up, but we play safe. + s.maxBits = v + } + } +} + +// normalizeCount will normalize the count of the symbols so +// the total is equal to the table size. +// If successful, compression tables will also be made ready. +func (s *fseEncoder) normalizeCount(length int) error { + if s.reUsed { + return nil + } + s.optimalTableLog(length) + var ( + tableLog = s.actualTableLog + scale = 62 - uint64(tableLog) + step = (1 << 62) / uint64(length) + vStep = uint64(1) << (scale - 20) + stillToDistribute = int16(1 << tableLog) + largest int + largestP int16 + lowThreshold = (uint32)(length >> tableLog) + ) + if s.maxCount == length { + s.useRLE = true + return nil + } + s.useRLE = false + for i, cnt := range s.count[:s.symbolLen] { + // already handled + // if (count[s] == s.length) return 0; /* rle special case */ + + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + stillToDistribute-- + } else { + proba := (int16)((uint64(cnt) * step) >> scale) + if proba < 8 { + restToBeat := vStep * uint64(rtbTable[proba]) + v := uint64(cnt)*step - (uint64(proba) << scale) + if v > restToBeat { + proba++ + } + } + if proba > largestP { + largestP = proba + largest = i + } + s.norm[i] = proba + stillToDistribute -= proba + } + } + + if -stillToDistribute >= (s.norm[largest] >> 1) { + // corner case, need another normalization method + err := s.normalizeCount2(length) + if err != nil { + return err + } + if debugAsserts { + err = s.validateNorm() + if err != nil { + return err + } + } + return s.buildCTable() + } + s.norm[largest] += stillToDistribute + if debugAsserts { + err := s.validateNorm() + if err != nil { + return err + } + } + return s.buildCTable() +} + +// Secondary normalization method. +// To be used when primary method fails. +func (s *fseEncoder) normalizeCount2(length int) error { + const notYetAssigned = -2 + var ( + distributed uint32 + total = uint32(length) + tableLog = s.actualTableLog + lowThreshold = total >> tableLog + lowOne = (total * 3) >> (tableLog + 1) + ) + for i, cnt := range s.count[:s.symbolLen] { + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + distributed++ + total -= cnt + continue + } + if cnt <= lowOne { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + s.norm[i] = notYetAssigned + } + toDistribute := (1 << tableLog) - distributed + + if (total / toDistribute) > lowOne { + // risk of rounding to zero + lowOne = (total * 3) / (toDistribute * 2) + for i, cnt := range s.count[:s.symbolLen] { + if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + } + toDistribute = (1 << tableLog) - distributed + } + if distributed == uint32(s.symbolLen)+1 { + // all values are pretty poor; + // probably incompressible data (should have already been detected); + // find max, then give all remaining points to max + var maxV int + var maxC uint32 + for i, cnt := range s.count[:s.symbolLen] { + if cnt > maxC { + maxV = i + maxC = cnt + } + } + s.norm[maxV] += int16(toDistribute) + return nil + } + + if total == 0 { + // all of the symbols were low enough for the lowOne or lowThreshold + for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { + if s.norm[i] > 0 { + toDistribute-- + s.norm[i]++ + } + } + return nil + } + + var ( + vStepLog = 62 - uint64(tableLog) + mid = uint64((1 << (vStepLog - 1)) - 1) + rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining + tmpTotal = mid + ) + for i, cnt := range s.count[:s.symbolLen] { + if s.norm[i] == notYetAssigned { + var ( + end = tmpTotal + uint64(cnt)*rStep + sStart = uint32(tmpTotal >> vStepLog) + sEnd = uint32(end >> vStepLog) + weight = sEnd - sStart + ) + if weight < 1 { + return errors.New("weight < 1") + } + s.norm[i] = int16(weight) + tmpTotal = end + } + } + return nil +} + +// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog +func (s *fseEncoder) optimalTableLog(length int) { + tableLog := uint8(maxEncTableLog) + minBitsSrc := highBit(uint32(length)) + 1 + minBitsSymbols := highBit(uint32(s.symbolLen-1)) + 2 + minBits := uint8(minBitsSymbols) + if minBitsSrc < minBitsSymbols { + minBits = uint8(minBitsSrc) + } + + maxBitsSrc := uint8(highBit(uint32(length-1))) - 2 + if maxBitsSrc < tableLog { + // Accuracy can be reduced + tableLog = maxBitsSrc + } + if minBits > tableLog { + tableLog = minBits + } + // Need a minimum to safely represent all symbol values + if tableLog < minEncTablelog { + tableLog = minEncTablelog + } + if tableLog > maxEncTableLog { + tableLog = maxEncTableLog + } + s.actualTableLog = tableLog +} + +// validateNorm validates the normalized histogram table. +func (s *fseEncoder) validateNorm() (err error) { + var total int + for _, v := range s.norm[:s.symbolLen] { + if v >= 0 { + total += int(v) + } else { + total -= int(v) + } + } + defer func() { + if err == nil { + return + } + fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) + for i, v := range s.norm[:s.symbolLen] { + fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) + } + }() + if total != (1 << s.actualTableLog) { + return fmt.Errorf("warning: Total == %d != %d", total, 1<> 3) + 3 + 2 + + // Write Table Size + bitStream = uint32(tableLog - minEncTablelog) + bitCount = uint(4) + remaining = int16(tableSize + 1) /* +1 for extra accuracy */ + threshold = int16(tableSize) + nbBits = uint(tableLog + 1) + outP = len(out) + ) + if cap(out) < outP+maxHeaderSize { + out = append(out, make([]byte, maxHeaderSize*3)...) + out = out[:len(out)-maxHeaderSize*3] + } + out = out[:outP+maxHeaderSize] + + // stops at 1 + for remaining > 1 { + if previous0 { + start := charnum + for s.norm[charnum] == 0 { + charnum++ + } + for charnum >= start+24 { + start += 24 + bitStream += uint32(0xFFFF) << bitCount + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + } + for charnum >= start+3 { + start += 3 + bitStream += 3 << bitCount + bitCount += 2 + } + bitStream += uint32(charnum-start) << bitCount + bitCount += 2 + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + count := s.norm[charnum] + charnum++ + max := (2*threshold - 1) - remaining + if count < 0 { + remaining += count + } else { + remaining -= count + } + count++ // +1 for extra accuracy + if count >= threshold { + count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ + } + bitStream += uint32(count) << bitCount + bitCount += nbBits + if count < max { + bitCount-- + } + + previous0 = count == 1 + if remaining < 1 { + return nil, errors.New("internal error: remaining < 1") + } + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + if outP+2 > len(out) { + return nil, fmt.Errorf("internal error: %d > %d, maxheader: %d, sl: %d, tl: %d, normcount: %v", outP+2, len(out), maxHeaderSize, s.symbolLen, int(tableLog), s.norm[:s.symbolLen]) + } + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += int((bitCount + 7) / 8) + + if charnum > s.symbolLen { + return nil, errors.New("internal error: charnum > s.symbolLen") + } + return out[:outP], nil +} + +// Approximate symbol cost, as fractional value, using fixed-point format (accuracyLog fractional bits) +// note 1 : assume symbolValue is valid (<= maxSymbolValue) +// note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits * +func (s *fseEncoder) bitCost(symbolValue uint8, accuracyLog uint32) uint32 { + minNbBits := s.ct.symbolTT[symbolValue].deltaNbBits >> 16 + threshold := (minNbBits + 1) << 16 + if debugAsserts { + if !(s.actualTableLog < 16) { + panic("!s.actualTableLog < 16") + } + // ensure enough room for renormalization double shift + if !(uint8(accuracyLog) < 31-s.actualTableLog) { + panic("!uint8(accuracyLog) < 31-s.actualTableLog") + } + } + tableSize := uint32(1) << s.actualTableLog + deltaFromThreshold := threshold - (s.ct.symbolTT[symbolValue].deltaNbBits + tableSize) + // linear interpolation (very approximate) + normalizedDeltaFromThreshold := (deltaFromThreshold << accuracyLog) >> s.actualTableLog + bitMultiplier := uint32(1) << accuracyLog + if debugAsserts { + if s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold { + panic("s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold") + } + if normalizedDeltaFromThreshold > bitMultiplier { + panic("normalizedDeltaFromThreshold > bitMultiplier") + } + } + return (minNbBits+1)*bitMultiplier - normalizedDeltaFromThreshold +} + +// Returns the cost in bits of encoding the distribution in count using ctable. +// Histogram should only be up to the last non-zero symbol. +// Returns an -1 if ctable cannot represent all the symbols in count. +func (s *fseEncoder) approxSize(hist []uint32) uint32 { + if int(s.symbolLen) < len(hist) { + // More symbols than we have. + return math.MaxUint32 + } + if s.useRLE { + // We will never reuse RLE encoders. + return math.MaxUint32 + } + const kAccuracyLog = 8 + badCost := (uint32(s.actualTableLog) + 1) << kAccuracyLog + var cost uint32 + for i, v := range hist { + if v == 0 { + continue + } + if s.norm[i] == 0 { + return math.MaxUint32 + } + bitCost := s.bitCost(uint8(i), kAccuracyLog) + if bitCost > badCost { + return math.MaxUint32 + } + cost += v * bitCost + } + return cost >> kAccuracyLog +} + +// maxHeaderSize returns the maximum header size in bits. +// This is not exact size, but we want a penalty for new tables anyway. +func (s *fseEncoder) maxHeaderSize() uint32 { + if s.preDefined { + return 0 + } + if s.useRLE { + return 8 + } + return (((uint32(s.symbolLen) * uint32(s.actualTableLog)) >> 3) + 3) * 8 +} + +// cState contains the compression state of a stream. +type cState struct { + bw *bitWriter + stateTable []uint16 + state uint16 +} + +// init will initialize the compression state to the first symbol of the stream. +func (c *cState) init(bw *bitWriter, ct *cTable, first symbolTransform) { + c.bw = bw + c.stateTable = ct.stateTable + if len(c.stateTable) == 1 { + // RLE + c.stateTable[0] = uint16(0) + c.state = 0 + return + } + nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 + im := int32((nbBitsOut << 16) - first.deltaNbBits) + lu := (im >> nbBitsOut) + int32(first.deltaFindState) + c.state = c.stateTable[lu] +} + +// flush will write the tablelog to the output and flush the remaining full bytes. +func (c *cState) flush(tableLog uint8) { + c.bw.flush32() + c.bw.addBits16NC(c.state, tableLog) +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_predefined.go b/vendor/github.com/klauspost/compress/zstd/fse_predefined.go new file mode 100644 index 0000000..474cb77 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_predefined.go @@ -0,0 +1,158 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" + "math" + "sync" +) + +var ( + // fsePredef are the predefined fse tables as defined here: + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions + // These values are already transformed. + fsePredef [3]fseDecoder + + // fsePredefEnc are the predefined encoder based on fse tables as defined here: + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions + // These values are already transformed. + fsePredefEnc [3]fseEncoder + + // symbolTableX contain the transformations needed for each type as defined in + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets + symbolTableX [3][]baseOffset + + // maxTableSymbol is the biggest supported symbol for each table type + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets + maxTableSymbol = [3]uint8{tableLiteralLengths: maxLiteralLengthSymbol, tableOffsets: maxOffsetLengthSymbol, tableMatchLengths: maxMatchLengthSymbol} + + // bitTables is the bits table for each table. + bitTables = [3][]byte{tableLiteralLengths: llBitsTable[:], tableOffsets: nil, tableMatchLengths: mlBitsTable[:]} +) + +type tableIndex uint8 + +const ( + // indexes for fsePredef and symbolTableX + tableLiteralLengths tableIndex = 0 + tableOffsets tableIndex = 1 + tableMatchLengths tableIndex = 2 + + maxLiteralLengthSymbol = 35 + maxOffsetLengthSymbol = 30 + maxMatchLengthSymbol = 52 +) + +// baseOffset is used for calculating transformations. +type baseOffset struct { + baseLine uint32 + addBits uint8 +} + +// fillBase will precalculate base offsets with the given bit distributions. +func fillBase(dst []baseOffset, base uint32, bits ...uint8) { + if len(bits) != len(dst) { + panic(fmt.Sprintf("len(dst) (%d) != len(bits) (%d)", len(dst), len(bits))) + } + for i, bit := range bits { + if base > math.MaxInt32 { + panic("invalid decoding table, base overflows int32") + } + + dst[i] = baseOffset{ + baseLine: base, + addBits: bit, + } + base += 1 << bit + } +} + +var predef sync.Once + +func initPredefined() { + predef.Do(func() { + // Literals length codes + tmp := make([]baseOffset, 36) + for i := range tmp[:16] { + tmp[i] = baseOffset{ + baseLine: uint32(i), + addBits: 0, + } + } + fillBase(tmp[16:], 16, 1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) + symbolTableX[tableLiteralLengths] = tmp + + // Match length codes + tmp = make([]baseOffset, 53) + for i := range tmp[:32] { + tmp[i] = baseOffset{ + // The transformation adds the 3 length. + baseLine: uint32(i) + 3, + addBits: 0, + } + } + fillBase(tmp[32:], 35, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) + symbolTableX[tableMatchLengths] = tmp + + // Offset codes + tmp = make([]baseOffset, maxOffsetBits+1) + tmp[1] = baseOffset{ + baseLine: 1, + addBits: 1, + } + fillBase(tmp[2:], 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30) + symbolTableX[tableOffsets] = tmp + + // Fill predefined tables and transform them. + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions + for i := range fsePredef[:] { + f := &fsePredef[i] + switch tableIndex(i) { + case tableLiteralLengths: + // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L243 + f.actualTableLog = 6 + copy(f.norm[:], []int16{4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1, + -1, -1, -1, -1}) + f.symbolLen = 36 + case tableOffsets: + // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L281 + f.actualTableLog = 5 + copy(f.norm[:], []int16{ + 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1}) + f.symbolLen = 29 + case tableMatchLengths: + //https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L304 + f.actualTableLog = 6 + copy(f.norm[:], []int16{ + 1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, + -1, -1, -1, -1, -1}) + f.symbolLen = 53 + } + if err := f.buildDtable(); err != nil { + panic(fmt.Errorf("building table %v: %v", tableIndex(i), err)) + } + if err := f.transform(symbolTableX[i]); err != nil { + panic(fmt.Errorf("building table %v: %v", tableIndex(i), err)) + } + f.preDefined = true + + // Create encoder as well + enc := &fsePredefEnc[i] + copy(enc.norm[:], f.norm[:]) + enc.symbolLen = f.symbolLen + enc.actualTableLog = f.actualTableLog + if err := enc.buildCTable(); err != nil { + panic(fmt.Errorf("building encoding table %v: %v", tableIndex(i), err)) + } + enc.setBits(bitTables[i]) + enc.preDefined = true + } + }) +} diff --git a/vendor/github.com/klauspost/compress/zstd/hash.go b/vendor/github.com/klauspost/compress/zstd/hash.go new file mode 100644 index 0000000..5d73c21 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/hash.go @@ -0,0 +1,35 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +const ( + prime3bytes = 506832829 + prime4bytes = 2654435761 + prime5bytes = 889523592379 + prime6bytes = 227718039650203 + prime7bytes = 58295818150454627 + prime8bytes = 0xcf1bbcdcb7a56463 +) + +// hashLen returns a hash of the lowest mls bytes of with length output bits. +// mls must be >=3 and <=8. Any other value will return hash for 4 bytes. +// length should always be < 32. +// Preferably length and mls should be a constant for inlining. +func hashLen(u uint64, length, mls uint8) uint32 { + switch mls { + case 3: + return (uint32(u<<8) * prime3bytes) >> (32 - length) + case 5: + return uint32(((u << (64 - 40)) * prime5bytes) >> (64 - length)) + case 6: + return uint32(((u << (64 - 48)) * prime6bytes) >> (64 - length)) + case 7: + return uint32(((u << (64 - 56)) * prime7bytes) >> (64 - length)) + case 8: + return uint32((u * prime8bytes) >> (64 - length)) + default: + return (uint32(u) * prime4bytes) >> (32 - length) + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/history.go b/vendor/github.com/klauspost/compress/zstd/history.go new file mode 100644 index 0000000..0916485 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/history.go @@ -0,0 +1,116 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "github.com/klauspost/compress/huff0" +) + +// history contains the information transferred between blocks. +type history struct { + // Literal decompression + huffTree *huff0.Scratch + + // Sequence decompression + decoders sequenceDecs + recentOffsets [3]int + + // History buffer... + b []byte + + // ignoreBuffer is meant to ignore a number of bytes + // when checking for matches in history + ignoreBuffer int + + windowSize int + allocFrameBuffer int // needed? + error bool + dict *dict +} + +// reset will reset the history to initial state of a frame. +// The history must already have been initialized to the desired size. +func (h *history) reset() { + h.b = h.b[:0] + h.ignoreBuffer = 0 + h.error = false + h.recentOffsets = [3]int{1, 4, 8} + h.decoders.freeDecoders() + h.decoders = sequenceDecs{br: h.decoders.br} + h.freeHuffDecoder() + h.huffTree = nil + h.dict = nil + //printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b)) +} + +func (h *history) freeHuffDecoder() { + if h.huffTree != nil { + if h.dict == nil || h.dict.litEnc != h.huffTree { + huffDecoderPool.Put(h.huffTree) + h.huffTree = nil + } + } +} + +func (h *history) setDict(dict *dict) { + if dict == nil { + return + } + h.dict = dict + h.decoders.litLengths = dict.llDec + h.decoders.offsets = dict.ofDec + h.decoders.matchLengths = dict.mlDec + h.decoders.dict = dict.content + h.recentOffsets = dict.offsets + h.huffTree = dict.litEnc +} + +// append bytes to history. +// This function will make sure there is space for it, +// if the buffer has been allocated with enough extra space. +func (h *history) append(b []byte) { + if len(b) >= h.windowSize { + // Discard all history by simply overwriting + h.b = h.b[:h.windowSize] + copy(h.b, b[len(b)-h.windowSize:]) + return + } + + // If there is space, append it. + if len(b) < cap(h.b)-len(h.b) { + h.b = append(h.b, b...) + return + } + + // Move data down so we only have window size left. + // We know we have less than window size in b at this point. + discard := len(b) + len(h.b) - h.windowSize + copy(h.b, h.b[discard:]) + h.b = h.b[:h.windowSize] + copy(h.b[h.windowSize-len(b):], b) +} + +// ensureBlock will ensure there is space for at least one block... +func (h *history) ensureBlock() { + if cap(h.b) < h.allocFrameBuffer { + h.b = make([]byte, 0, h.allocFrameBuffer) + return + } + + avail := cap(h.b) - len(h.b) + if avail >= h.windowSize || avail > maxCompressedBlockSize { + return + } + // Move data down so we only have window size left. + // We know we have less than window size in b at this point. + discard := len(h.b) - h.windowSize + copy(h.b, h.b[discard:]) + h.b = h.b[:h.windowSize] +} + +// append bytes to history without ever discarding anything. +func (h *history) appendKeep(b []byte) { + h.b = append(h.b, b...) +} diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt new file mode 100644 index 0000000..24b5306 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt @@ -0,0 +1,22 @@ +Copyright (c) 2016 Caleb Spare + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md new file mode 100644 index 0000000..777290d --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md @@ -0,0 +1,71 @@ +# xxhash + +VENDORED: Go to [github.com/cespare/xxhash](https://github.com/cespare/xxhash) for original package. + +xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a +high-quality hashing algorithm that is much faster than anything in the Go +standard library. + +This package provides a straightforward API: + +``` +func Sum64(b []byte) uint64 +func Sum64String(s string) uint64 +type Digest struct{ ... } + func New() *Digest +``` + +The `Digest` type implements hash.Hash64. Its key methods are: + +``` +func (*Digest) Write([]byte) (int, error) +func (*Digest) WriteString(string) (int, error) +func (*Digest) Sum64() uint64 +``` + +The package is written with optimized pure Go and also contains even faster +assembly implementations for amd64 and arm64. If desired, the `purego` build tag +opts into using the Go code even on those architectures. + +[xxHash]: http://cyan4973.github.io/xxHash/ + +## Compatibility + +This package is in a module and the latest code is in version 2 of the module. +You need a version of Go with at least "minimal module compatibility" to use +github.com/cespare/xxhash/v2: + +* 1.9.7+ for Go 1.9 +* 1.10.3+ for Go 1.10 +* Go 1.11 or later + +I recommend using the latest release of Go. + +## Benchmarks + +Here are some quick benchmarks comparing the pure-Go and assembly +implementations of Sum64. + +| input size | purego | asm | +| ---------- | --------- | --------- | +| 4 B | 1.3 GB/s | 1.2 GB/s | +| 16 B | 2.9 GB/s | 3.5 GB/s | +| 100 B | 6.9 GB/s | 8.1 GB/s | +| 4 KB | 11.7 GB/s | 16.7 GB/s | +| 10 MB | 12.0 GB/s | 17.3 GB/s | + +These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C +CPU using the following commands under Go 1.19.2: + +``` +benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$') +benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$') +``` + +## Projects using this package + +- [InfluxDB](https://github.com/influxdata/influxdb) +- [Prometheus](https://github.com/prometheus/prometheus) +- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) +- [FreeCache](https://github.com/coocood/freecache) +- [FastCache](https://github.com/VictoriaMetrics/fastcache) diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go new file mode 100644 index 0000000..fc40c82 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go @@ -0,0 +1,230 @@ +// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described +// at http://cyan4973.github.io/xxHash/. +// THIS IS VENDORED: Go to github.com/cespare/xxhash for original package. + +package xxhash + +import ( + "encoding/binary" + "errors" + "math/bits" +) + +const ( + prime1 uint64 = 11400714785074694791 + prime2 uint64 = 14029467366897019727 + prime3 uint64 = 1609587929392839161 + prime4 uint64 = 9650029242287828579 + prime5 uint64 = 2870177450012600261 +) + +// Store the primes in an array as well. +// +// The consts are used when possible in Go code to avoid MOVs but we need a +// contiguous array of the assembly code. +var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5} + +// Digest implements hash.Hash64. +type Digest struct { + v1 uint64 + v2 uint64 + v3 uint64 + v4 uint64 + total uint64 + mem [32]byte + n int // how much of mem is used +} + +// New creates a new Digest that computes the 64-bit xxHash algorithm. +func New() *Digest { + var d Digest + d.Reset() + return &d +} + +// Reset clears the Digest's state so that it can be reused. +func (d *Digest) Reset() { + d.v1 = primes[0] + prime2 + d.v2 = prime2 + d.v3 = 0 + d.v4 = -primes[0] + d.total = 0 + d.n = 0 +} + +// Size always returns 8 bytes. +func (d *Digest) Size() int { return 8 } + +// BlockSize always returns 32 bytes. +func (d *Digest) BlockSize() int { return 32 } + +// Write adds more data to d. It always returns len(b), nil. +func (d *Digest) Write(b []byte) (n int, err error) { + n = len(b) + d.total += uint64(n) + + memleft := d.mem[d.n&(len(d.mem)-1):] + + if d.n+n < 32 { + // This new data doesn't even fill the current block. + copy(memleft, b) + d.n += n + return + } + + if d.n > 0 { + // Finish off the partial block. + c := copy(memleft, b) + d.v1 = round(d.v1, u64(d.mem[0:8])) + d.v2 = round(d.v2, u64(d.mem[8:16])) + d.v3 = round(d.v3, u64(d.mem[16:24])) + d.v4 = round(d.v4, u64(d.mem[24:32])) + b = b[c:] + d.n = 0 + } + + if len(b) >= 32 { + // One or more full blocks left. + nw := writeBlocks(d, b) + b = b[nw:] + } + + // Store any remaining partial block. + copy(d.mem[:], b) + d.n = len(b) + + return +} + +// Sum appends the current hash to b and returns the resulting slice. +func (d *Digest) Sum(b []byte) []byte { + s := d.Sum64() + return append( + b, + byte(s>>56), + byte(s>>48), + byte(s>>40), + byte(s>>32), + byte(s>>24), + byte(s>>16), + byte(s>>8), + byte(s), + ) +} + +// Sum64 returns the current hash. +func (d *Digest) Sum64() uint64 { + var h uint64 + + if d.total >= 32 { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = d.v3 + prime5 + } + + h += d.total + + b := d.mem[:d.n&(len(d.mem)-1)] + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 + h = rol23(h)*prime2 + prime3 + b = b[4:] + } + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 + h = rol11(h) * prime1 + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +const ( + magic = "xxh\x06" + marshaledSize = len(magic) + 8*5 + 32 +) + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (d *Digest) MarshalBinary() ([]byte, error) { + b := make([]byte, 0, marshaledSize) + b = append(b, magic...) + b = appendUint64(b, d.v1) + b = appendUint64(b, d.v2) + b = appendUint64(b, d.v3) + b = appendUint64(b, d.v4) + b = appendUint64(b, d.total) + b = append(b, d.mem[:d.n]...) + b = b[:len(b)+len(d.mem)-d.n] + return b, nil +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +func (d *Digest) UnmarshalBinary(b []byte) error { + if len(b) < len(magic) || string(b[:len(magic)]) != magic { + return errors.New("xxhash: invalid hash state identifier") + } + if len(b) != marshaledSize { + return errors.New("xxhash: invalid hash state size") + } + b = b[len(magic):] + b, d.v1 = consumeUint64(b) + b, d.v2 = consumeUint64(b) + b, d.v3 = consumeUint64(b) + b, d.v4 = consumeUint64(b) + b, d.total = consumeUint64(b) + copy(d.mem[:], b) + d.n = int(d.total % uint64(len(d.mem))) + return nil +} + +func appendUint64(b []byte, x uint64) []byte { + var a [8]byte + binary.LittleEndian.PutUint64(a[:], x) + return append(b, a[:]...) +} + +func consumeUint64(b []byte) ([]byte, uint64) { + x := u64(b) + return b[8:], x +} + +func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } +func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } + +func round(acc, input uint64) uint64 { + acc += input * prime2 + acc = rol31(acc) + acc *= prime1 + return acc +} + +func mergeRound(acc, val uint64) uint64 { + val = round(0, val) + acc ^= val + acc = acc*prime1 + prime4 + return acc +} + +func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } +func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } +func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } +func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } +func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } +func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } +func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } +func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s new file mode 100644 index 0000000..ddb63aa --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s @@ -0,0 +1,210 @@ +//go:build !appengine && gc && !purego && !noasm +// +build !appengine +// +build gc +// +build !purego +// +build !noasm + +#include "textflag.h" + +// Registers: +#define h AX +#define d AX +#define p SI // pointer to advance through b +#define n DX +#define end BX // loop end +#define v1 R8 +#define v2 R9 +#define v3 R10 +#define v4 R11 +#define x R12 +#define prime1 R13 +#define prime2 R14 +#define prime4 DI + +#define round(acc, x) \ + IMULQ prime2, x \ + ADDQ x, acc \ + ROLQ $31, acc \ + IMULQ prime1, acc + +// round0 performs the operation x = round(0, x). +#define round0(x) \ + IMULQ prime2, x \ + ROLQ $31, x \ + IMULQ prime1, x + +// mergeRound applies a merge round on the two registers acc and x. +// It assumes that prime1, prime2, and prime4 have been loaded. +#define mergeRound(acc, x) \ + round0(x) \ + XORQ x, acc \ + IMULQ prime1, acc \ + ADDQ prime4, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that there is at least one block +// to process. +#define blockLoop() \ +loop: \ + MOVQ +0(p), x \ + round(v1, x) \ + MOVQ +8(p), x \ + round(v2, x) \ + MOVQ +16(p), x \ + round(v3, x) \ + MOVQ +24(p), x \ + round(v4, x) \ + ADDQ $32, p \ + CMPQ p, end \ + JLE loop + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 + // Load fixed primes. + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 + MOVQ ·primes+24(SB), prime4 + + // Load slice. + MOVQ b_base+0(FP), p + MOVQ b_len+8(FP), n + LEAQ (p)(n*1), end + + // The first loop limit will be len(b)-32. + SUBQ $32, end + + // Check whether we have at least one block. + CMPQ n, $32 + JLT noBlocks + + // Set up initial state (v1, v2, v3, v4). + MOVQ prime1, v1 + ADDQ prime2, v1 + MOVQ prime2, v2 + XORQ v3, v3 + XORQ v4, v4 + SUBQ prime1, v4 + + blockLoop() + + MOVQ v1, h + ROLQ $1, h + MOVQ v2, x + ROLQ $7, x + ADDQ x, h + MOVQ v3, x + ROLQ $12, x + ADDQ x, h + MOVQ v4, x + ROLQ $18, x + ADDQ x, h + + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) + + JMP afterBlocks + +noBlocks: + MOVQ ·primes+32(SB), h + +afterBlocks: + ADDQ n, h + + ADDQ $24, end + CMPQ p, end + JG try4 + +loop8: + MOVQ (p), x + ADDQ $8, p + round0(x) + XORQ x, h + ROLQ $27, h + IMULQ prime1, h + ADDQ prime4, h + + CMPQ p, end + JLE loop8 + +try4: + ADDQ $4, end + CMPQ p, end + JG try1 + + MOVL (p), x + ADDQ $4, p + IMULQ prime1, x + XORQ x, h + + ROLQ $23, h + IMULQ prime2, h + ADDQ ·primes+16(SB), h + +try1: + ADDQ $4, end + CMPQ p, end + JGE finalize + +loop1: + MOVBQZX (p), x + ADDQ $1, p + IMULQ ·primes+32(SB), x + XORQ x, h + ROLQ $11, h + IMULQ prime1, h + + CMPQ p, end + JL loop1 + +finalize: + MOVQ h, x + SHRQ $33, x + XORQ x, h + IMULQ prime2, h + MOVQ h, x + SHRQ $29, x + XORQ x, h + IMULQ ·primes+16(SB), h + MOVQ h, x + SHRQ $32, x + XORQ x, h + + MOVQ h, ret+24(FP) + RET + +// func writeBlocks(d *Digest, b []byte) int +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 + // Load fixed primes needed for round. + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 + + // Load slice. + MOVQ b_base+8(FP), p + MOVQ b_len+16(FP), n + LEAQ (p)(n*1), end + SUBQ $32, end + + // Load vN from d. + MOVQ s+0(FP), d + MOVQ 0(d), v1 + MOVQ 8(d), v2 + MOVQ 16(d), v3 + MOVQ 24(d), v4 + + // We don't need to check the loop condition here; this function is + // always called with at least one block of data to process. + blockLoop() + + // Copy vN back to d. + MOVQ v1, 0(d) + MOVQ v2, 8(d) + MOVQ v3, 16(d) + MOVQ v4, 24(d) + + // The number of bytes written is p minus the old base pointer. + SUBQ b_base+8(FP), p + MOVQ p, ret+32(FP) + + RET diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s new file mode 100644 index 0000000..17901e0 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s @@ -0,0 +1,184 @@ +//go:build !appengine && gc && !purego && !noasm +// +build !appengine +// +build gc +// +build !purego +// +build !noasm + +#include "textflag.h" + +// Registers: +#define digest R1 +#define h R2 // return value +#define p R3 // input pointer +#define n R4 // input length +#define nblocks R5 // n / 32 +#define prime1 R7 +#define prime2 R8 +#define prime3 R9 +#define prime4 R10 +#define prime5 R11 +#define v1 R12 +#define v2 R13 +#define v3 R14 +#define v4 R15 +#define x1 R20 +#define x2 R21 +#define x3 R22 +#define x4 R23 + +#define round(acc, x) \ + MADD prime2, acc, x, acc \ + ROR $64-31, acc \ + MUL prime1, acc + +// round0 performs the operation x = round(0, x). +#define round0(x) \ + MUL prime2, x \ + ROR $64-31, x \ + MUL prime1, x + +#define mergeRound(acc, x) \ + round0(x) \ + EOR x, acc \ + MADD acc, prime4, prime1, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that n >= 32. +#define blockLoop() \ + LSR $5, n, nblocks \ + PCALIGN $16 \ + loop: \ + LDP.P 16(p), (x1, x2) \ + LDP.P 16(p), (x3, x4) \ + round(v1, x1) \ + round(v2, x2) \ + round(v3, x3) \ + round(v4, x4) \ + SUB $1, nblocks \ + CBNZ nblocks, loop + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 + LDP b_base+0(FP), (p, n) + + LDP ·primes+0(SB), (prime1, prime2) + LDP ·primes+16(SB), (prime3, prime4) + MOVD ·primes+32(SB), prime5 + + CMP $32, n + CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 } + BLT afterLoop + + ADD prime1, prime2, v1 + MOVD prime2, v2 + MOVD $0, v3 + NEG prime1, v4 + + blockLoop() + + ROR $64-1, v1, x1 + ROR $64-7, v2, x2 + ADD x1, x2 + ROR $64-12, v3, x3 + ROR $64-18, v4, x4 + ADD x3, x4 + ADD x2, x4, h + + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) + +afterLoop: + ADD n, h + + TBZ $4, n, try8 + LDP.P 16(p), (x1, x2) + + round0(x1) + + // NOTE: here and below, sequencing the EOR after the ROR (using a + // rotated register) is worth a small but measurable speedup for small + // inputs. + ROR $64-27, h + EOR x1 @> 64-27, h, h + MADD h, prime4, prime1, h + + round0(x2) + ROR $64-27, h + EOR x2 @> 64-27, h, h + MADD h, prime4, prime1, h + +try8: + TBZ $3, n, try4 + MOVD.P 8(p), x1 + + round0(x1) + ROR $64-27, h + EOR x1 @> 64-27, h, h + MADD h, prime4, prime1, h + +try4: + TBZ $2, n, try2 + MOVWU.P 4(p), x2 + + MUL prime1, x2 + ROR $64-23, h + EOR x2 @> 64-23, h, h + MADD h, prime3, prime2, h + +try2: + TBZ $1, n, try1 + MOVHU.P 2(p), x3 + AND $255, x3, x1 + LSR $8, x3, x2 + + MUL prime5, x1 + ROR $64-11, h + EOR x1 @> 64-11, h, h + MUL prime1, h + + MUL prime5, x2 + ROR $64-11, h + EOR x2 @> 64-11, h, h + MUL prime1, h + +try1: + TBZ $0, n, finalize + MOVBU (p), x4 + + MUL prime5, x4 + ROR $64-11, h + EOR x4 @> 64-11, h, h + MUL prime1, h + +finalize: + EOR h >> 33, h + MUL prime2, h + EOR h >> 29, h + MUL prime3, h + EOR h >> 32, h + + MOVD h, ret+24(FP) + RET + +// func writeBlocks(d *Digest, b []byte) int +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 + LDP ·primes+0(SB), (prime1, prime2) + + // Load state. Assume v[1-4] are stored contiguously. + MOVD d+0(FP), digest + LDP 0(digest), (v1, v2) + LDP 16(digest), (v3, v4) + + LDP b_base+8(FP), (p, n) + + blockLoop() + + // Store updated state. + STP (v1, v2), 0(digest) + STP (v3, v4), 16(digest) + + BIC $31, n + MOVD n, ret+32(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go new file mode 100644 index 0000000..d4221ed --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go @@ -0,0 +1,16 @@ +//go:build (amd64 || arm64) && !appengine && gc && !purego && !noasm +// +build amd64 arm64 +// +build !appengine +// +build gc +// +build !purego +// +build !noasm + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +// +//go:noescape +func Sum64(b []byte) uint64 + +//go:noescape +func writeBlocks(s *Digest, b []byte) int diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go new file mode 100644 index 0000000..0be16ce --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go @@ -0,0 +1,76 @@ +//go:build (!amd64 && !arm64) || appengine || !gc || purego || noasm +// +build !amd64,!arm64 appengine !gc purego noasm + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +func Sum64(b []byte) uint64 { + // A simpler version would be + // d := New() + // d.Write(b) + // return d.Sum64() + // but this is faster, particularly for small inputs. + + n := len(b) + var h uint64 + + if n >= 32 { + v1 := primes[0] + prime2 + v2 := prime2 + v3 := uint64(0) + v4 := -primes[0] + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = prime5 + } + + h += uint64(n) + + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 + h = rol23(h)*prime2 + prime3 + b = b[4:] + } + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 + h = rol11(h) * prime1 + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +func writeBlocks(d *Digest, b []byte) int { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + n := len(b) + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4 + return n - len(b) +} diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go new file mode 100644 index 0000000..6f3b0cb --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go @@ -0,0 +1,11 @@ +package xxhash + +// Sum64String computes the 64-bit xxHash digest of s. +func Sum64String(s string) uint64 { + return Sum64([]byte(s)) +} + +// WriteString adds more data to d. It always returns len(s), nil. +func (d *Digest) WriteString(s string) (n int, err error) { + return d.Write([]byte(s)) +} diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go new file mode 100644 index 0000000..f41932b --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go @@ -0,0 +1,16 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package zstd + +// matchLen returns how many bytes match in a and b +// +// It assumes that: +// +// len(a) <= len(b) and len(a) > 0 +// +//go:noescape +func matchLen(a []byte, b []byte) int diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s new file mode 100644 index 0000000..9a7655c --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s @@ -0,0 +1,68 @@ +// Copied from S2 implementation. + +//go:build !appengine && !noasm && gc && !noasm + +#include "textflag.h" + +// func matchLen(a []byte, b []byte) int +// Requires: BMI +TEXT ·matchLen(SB), NOSPLIT, $0-56 + MOVQ a_base+0(FP), AX + MOVQ b_base+24(FP), CX + MOVQ a_len+8(FP), DX + + // matchLen + XORL SI, SI + CMPL DX, $0x08 + JB matchlen_match4_standalone + +matchlen_loopback_standalone: + MOVQ (AX)(SI*1), BX + XORQ (CX)(SI*1), BX + TESTQ BX, BX + JZ matchlen_loop_standalone + +#ifdef GOAMD64_v3 + TZCNTQ BX, BX +#else + BSFQ BX, BX +#endif + SARQ $0x03, BX + LEAL (SI)(BX*1), SI + JMP gen_match_len_end + +matchlen_loop_standalone: + LEAL -8(DX), DX + LEAL 8(SI), SI + CMPL DX, $0x08 + JAE matchlen_loopback_standalone + +matchlen_match4_standalone: + CMPL DX, $0x04 + JB matchlen_match2_standalone + MOVL (AX)(SI*1), BX + CMPL (CX)(SI*1), BX + JNE matchlen_match2_standalone + LEAL -4(DX), DX + LEAL 4(SI), SI + +matchlen_match2_standalone: + CMPL DX, $0x02 + JB matchlen_match1_standalone + MOVW (AX)(SI*1), BX + CMPW (CX)(SI*1), BX + JNE matchlen_match1_standalone + LEAL -2(DX), DX + LEAL 2(SI), SI + +matchlen_match1_standalone: + CMPL DX, $0x01 + JB gen_match_len_end + MOVB (AX)(SI*1), BL + CMPB (CX)(SI*1), BL + JNE gen_match_len_end + INCL SI + +gen_match_len_end: + MOVQ SI, ret+48(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go b/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go new file mode 100644 index 0000000..57b9c31 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go @@ -0,0 +1,33 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package zstd + +import ( + "encoding/binary" + "math/bits" +) + +// matchLen returns the maximum common prefix length of a and b. +// a must be the shortest of the two. +func matchLen(a, b []byte) (n int) { + for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] { + diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b) + if diff != 0 { + return n + bits.TrailingZeros64(diff)>>3 + } + n += 8 + } + + for i := range a { + if a[i] != b[i] { + break + } + n++ + } + return n + +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go new file mode 100644 index 0000000..d7fe6d8 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go @@ -0,0 +1,503 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "io" +) + +type seq struct { + litLen uint32 + matchLen uint32 + offset uint32 + + // Codes are stored here for the encoder + // so they only have to be looked up once. + llCode, mlCode, ofCode uint8 +} + +type seqVals struct { + ll, ml, mo int +} + +func (s seq) String() string { + if s.offset <= 3 { + if s.offset == 0 { + return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset: INVALID (0)") + } + return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset, " (repeat)") + } + return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset-3, " (new)") +} + +type seqCompMode uint8 + +const ( + compModePredefined seqCompMode = iota + compModeRLE + compModeFSE + compModeRepeat +) + +type sequenceDec struct { + // decoder keeps track of the current state and updates it from the bitstream. + fse *fseDecoder + state fseState + repeat bool +} + +// init the state of the decoder with input from stream. +func (s *sequenceDec) init(br *bitReader) error { + if s.fse == nil { + return errors.New("sequence decoder not defined") + } + s.state.init(br, s.fse.actualTableLog, s.fse.dt[:1< cap(s.out) { + addBytes := s.seqSize + len(s.out) + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with hist %d, dict %d, literals: %d into %d bytes\n", len(seqs), len(hist), len(s.dict), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + for _, seq := range seqs { + // Add literals + copy(out[t:], s.literals[:seq.ll]) + t += seq.ll + s.literals = s.literals[seq.ll:] + + // Copy from dictionary... + if seq.mo > t+len(hist) || seq.mo > s.windowSize { + if len(s.dict) == 0 { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist)) + } + + // we may be in dictionary. + dictO := len(s.dict) - (seq.mo - (t + len(hist))) + if dictO < 0 || dictO >= len(s.dict) { + return fmt.Errorf("match offset (%d) bigger than current history+dict (%d)", seq.mo, t+len(hist)+len(s.dict)) + } + end := dictO + seq.ml + if end > len(s.dict) { + n := len(s.dict) - dictO + copy(out[t:], s.dict[dictO:]) + t += n + seq.ml -= n + } else { + copy(out[t:], s.dict[dictO:end]) + t += end - dictO + continue + } + } + + // Copy from history. + if v := seq.mo - t; v > 0 { + // v is the start position in history from end. + start := len(hist) - v + if seq.ml > v { + // Some goes into current block. + // Copy remainder of history + copy(out[t:], hist[start:]) + t += v + seq.ml -= v + } else { + copy(out[t:], hist[start:start+seq.ml]) + t += seq.ml + continue + } + } + // We must be in current buffer now + if seq.ml > 0 { + start := t - seq.mo + if seq.ml <= t-start { + // No overlap + copy(out[t:], out[start:start+seq.ml]) + t += seq.ml + continue + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + src := out[start : start+seq.ml] + dst := out[t:] + dst = dst[:len(src)] + t += len(src) + // Destination is the space we just added. + for i := range src { + dst[i] = src[i] + } + } + } + } + + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} + +// decode sequences from the stream with the provided history. +func (s *sequenceDecs) decodeSync(hist []byte) error { + supported, err := s.decodeSyncSimple(hist) + if supported { + return err + } + + br := s.br + seqs := s.nSeqs + startSize := len(s.out) + // Grab full sizes tables, to avoid bounds checks. + llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] + llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state + out := s.out + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + + if debugDecoder { + println("decodeSync: decoding", seqs, "sequences", br.remain(), "bits remain on stream") + } + for i := seqs - 1; i >= 0; i-- { + if br.overread() { + printf("reading sequence %d, exceeded available data. Overread by %d\n", seqs-i, -br.remain()) + return io.ErrUnexpectedEOF + } + var ll, mo, ml int + if len(br.in) > 4+((maxOffsetBits+16+16)>>3) { + // inlined function: + // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) + + // Final will not read from stream. + var llB, mlB, moB uint8 + ll, llB = llState.final() + ml, mlB = mlState.final() + mo, moB = ofState.final() + + // extra bits are stored in reverse order. + br.fillFast() + mo += br.getBits(moB) + if s.maxBits > 32 { + br.fillFast() + } + ml += br.getBits(mlB) + ll += br.getBits(llB) + + if moB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = mo + } else { + // mo = s.adjustOffset(mo, ll, moB) + // Inlined for rather big speedup + if ll == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + mo++ + } + + if mo == 0 { + mo = s.prevOffset[0] + } else { + var temp int + if mo == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[mo] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("WARNING: temp was 0") + temp = 1 + } + + if mo != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + mo = temp + } + } + br.fillFast() + } else { + ll, mo, ml = s.next(br, llState, mlState, ofState) + br.fill() + } + + if debugSequences { + println("Seq", seqs-i-1, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) + } + + if ll > len(s.literals) { + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, len(s.literals)) + } + size := ll + ml + len(out) + if size-startSize > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + if size > cap(out) { + // Not enough size, which can happen under high volume block streaming conditions + // but could be if destination slice is too small for sync operations. + // over-allocating here can create a large amount of GC pressure so we try to keep + // it as contained as possible + used := len(out) - startSize + addBytes := 256 + ll + ml + used>>2 + // Clamp to max block size. + if used+addBytes > maxBlockSize { + addBytes = maxBlockSize - used + } + out = append(out, make([]byte, addBytes)...) + out = out[:len(out)-addBytes] + } + if ml > maxMatchLen { + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + } + + // Add literals + out = append(out, s.literals[:ll]...) + s.literals = s.literals[ll:] + + if mo == 0 && ml > 0 { + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + } + + if mo > len(out)+len(hist) || mo > s.windowSize { + if len(s.dict) == 0 { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize) + } + + // we may be in dictionary. + dictO := len(s.dict) - (mo - (len(out) + len(hist))) + if dictO < 0 || dictO >= len(s.dict) { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize) + } + end := dictO + ml + if end > len(s.dict) { + out = append(out, s.dict[dictO:]...) + ml -= len(s.dict) - dictO + } else { + out = append(out, s.dict[dictO:end]...) + mo = 0 + ml = 0 + } + } + + // Copy from history. + // TODO: Blocks without history could be made to ignore this completely. + if v := mo - len(out); v > 0 { + // v is the start position in history from end. + start := len(hist) - v + if ml > v { + // Some goes into current block. + // Copy remainder of history + out = append(out, hist[start:]...) + ml -= v + } else { + out = append(out, hist[start:start+ml]...) + ml = 0 + } + } + // We must be in current buffer now + if ml > 0 { + start := len(out) - mo + if ml <= len(out)-start { + // No overlap + out = append(out, out[start:start+ml]...) + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + out = out[:len(out)+ml] + src := out[start : start+ml] + // Destination is the space we just added. + dst := out[len(out)-ml:] + dst = dst[:len(src)] + for i := range src { + dst[i] = src[i] + } + } + } + if i == 0 { + // This is the last sequence, so we shouldn't update state. + break + } + + // Manually inlined, ~ 5-20% faster + // Update all 3 states at once. Approx 20% faster. + nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() + if nBits == 0 { + llState = llTable[llState.newState()&maxTableMask] + mlState = mlTable[mlState.newState()&maxTableMask] + ofState = ofTable[ofState.newState()&maxTableMask] + } else { + bits := br.get32BitsFast(nBits) + + lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) + llState = llTable[(llState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits >> (ofState.nbBits() & 31)) + lowBits &= bitMask[mlState.nbBits()&15] + mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] + ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] + } + } + + if size := len(s.literals) + len(out) - startSize; size > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + + // Add final literals + s.out = append(out, s.literals...) + return br.close() +} + +var bitMask [16]uint16 + +func init() { + for i := range bitMask[:] { + bitMask[i] = uint16((1 << uint(i)) - 1) + } +} + +func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) { + // Final will not read from stream. + ll, llB := llState.final() + ml, mlB := mlState.final() + mo, moB := ofState.final() + + // extra bits are stored in reverse order. + br.fill() + mo += br.getBits(moB) + if s.maxBits > 32 { + br.fill() + } + // matchlength+literal length, max 32 bits + ml += br.getBits(mlB) + ll += br.getBits(llB) + mo = s.adjustOffset(mo, ll, moB) + return +} + +func (s *sequenceDecs) adjustOffset(offset, litLen int, offsetB uint8) int { + if offsetB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = offset + return offset + } + + if litLen == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + offset++ + } + + if offset == 0 { + return s.prevOffset[0] + } + var temp int + if offset == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[offset] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("temp was 0") + temp = 1 + } + + if offset != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + return temp +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go new file mode 100644 index 0000000..8adabd8 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go @@ -0,0 +1,394 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package zstd + +import ( + "fmt" + "io" + + "github.com/klauspost/compress/internal/cpuinfo" +) + +type decodeSyncAsmContext struct { + llTable []decSymbol + mlTable []decSymbol + ofTable []decSymbol + llState uint64 + mlState uint64 + ofState uint64 + iteration int + litRemain int + out []byte + outPosition int + literals []byte + litPosition int + history []byte + windowSize int + ll int // set on error (not for all errors, please refer to _generate/gen.go) + ml int // set on error (not for all errors, please refer to _generate/gen.go) + mo int // set on error (not for all errors, please refer to _generate/gen.go) +} + +// sequenceDecs_decodeSync_amd64 implements the main loop of sequenceDecs.decodeSync in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_bmi2 implements the main loop of sequenceDecs.decodeSync in x86 asm with BMI2 extensions. +// +//go:noescape +func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_safe_amd64 does the same as above, but does not write more than output buffer. +// +//go:noescape +func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_safe_bmi2 does the same as above, but does not write more than output buffer. +// +//go:noescape +func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// decode sequences from the stream with the provided history but without a dictionary. +func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { + if len(s.dict) > 0 { + return false, nil + } + if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSize { + return false, nil + } + + // FIXME: Using unsafe memory copies leads to rare, random crashes + // with fuzz testing. It is therefore disabled for now. + const useSafe = true + /* + useSafe := false + if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSizeAlloc { + useSafe = true + } + if s.maxSyncLen > 0 && cap(s.out)-len(s.out)-compressedBlockOverAlloc < int(s.maxSyncLen) { + useSafe = true + } + if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc { + useSafe = true + } + */ + + br := s.br + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + + ctx := decodeSyncAsmContext{ + llTable: s.litLengths.fse.dt[:maxTablesize], + mlTable: s.matchLengths.fse.dt[:maxTablesize], + ofTable: s.offsets.fse.dt[:maxTablesize], + llState: uint64(s.litLengths.state.state), + mlState: uint64(s.matchLengths.state.state), + ofState: uint64(s.offsets.state.state), + iteration: s.nSeqs - 1, + litRemain: len(s.literals), + out: s.out, + outPosition: len(s.out), + literals: s.literals, + windowSize: s.windowSize, + history: hist, + } + + s.seqSize = 0 + startSize := len(s.out) + + var errCode int + if cpuinfo.HasBMI2() { + if useSafe { + errCode = sequenceDecs_decodeSync_safe_bmi2(s, br, &ctx) + } else { + errCode = sequenceDecs_decodeSync_bmi2(s, br, &ctx) + } + } else { + if useSafe { + errCode = sequenceDecs_decodeSync_safe_amd64(s, br, &ctx) + } else { + errCode = sequenceDecs_decodeSync_amd64(s, br, &ctx) + } + } + switch errCode { + case noError: + break + + case errorMatchLenOfsMismatch: + return true, fmt.Errorf("zero matchoff and matchlen (%d) > 0", ctx.ml) + + case errorMatchLenTooBig: + return true, fmt.Errorf("match len (%d) bigger than max allowed length", ctx.ml) + + case errorMatchOffTooBig: + return true, fmt.Errorf("match offset (%d) bigger than current history (%d)", + ctx.mo, ctx.outPosition+len(hist)-startSize) + + case errorNotEnoughLiterals: + return true, fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", + ctx.ll, ctx.litRemain+ctx.ll) + + case errorOverread: + return true, io.ErrUnexpectedEOF + + case errorNotEnoughSpace: + size := ctx.outPosition + ctx.ll + ctx.ml + if debugDecoder { + println("msl:", s.maxSyncLen, "cap", cap(s.out), "bef:", startSize, "sz:", size-startSize, "mbs:", maxBlockSize, "outsz:", cap(s.out)-startSize) + } + return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + + default: + return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode) + } + + s.seqSize += ctx.litRemain + if s.seqSize > maxBlockSize { + return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + return true, err + } + + s.literals = s.literals[ctx.litPosition:] + t := ctx.outPosition + s.out = s.out[:t] + + // Add final literals + s.out = append(s.out, s.literals...) + if debugDecoder { + t += len(s.literals) + if t != len(s.out) { + panic(fmt.Errorf("length mismatch, want %d, got %d", len(s.out), t)) + } + } + + return true, nil +} + +// -------------------------------------------------------------------------------- + +type decodeAsmContext struct { + llTable []decSymbol + mlTable []decSymbol + ofTable []decSymbol + llState uint64 + mlState uint64 + ofState uint64 + iteration int + seqs []seqVals + litRemain int +} + +const noError = 0 + +// error reported when mo == 0 && ml > 0 +const errorMatchLenOfsMismatch = 1 + +// error reported when ml > maxMatchLen +const errorMatchLenTooBig = 2 + +// error reported when mo > available history or mo > s.windowSize +const errorMatchOffTooBig = 3 + +// error reported when the sum of literal lengths exeeceds the literal buffer size +const errorNotEnoughLiterals = 4 + +// error reported when capacity of `out` is too small +const errorNotEnoughSpace = 5 + +// error reported when bits are overread. +const errorOverread = 6 + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions. +// +//go:noescape +func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions. +// +//go:noescape +func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// decode sequences from the stream without the provided history. +func (s *sequenceDecs) decode(seqs []seqVals) error { + br := s.br + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + + ctx := decodeAsmContext{ + llTable: s.litLengths.fse.dt[:maxTablesize], + mlTable: s.matchLengths.fse.dt[:maxTablesize], + ofTable: s.offsets.fse.dt[:maxTablesize], + llState: uint64(s.litLengths.state.state), + mlState: uint64(s.matchLengths.state.state), + ofState: uint64(s.offsets.state.state), + seqs: seqs, + iteration: len(seqs) - 1, + litRemain: len(s.literals), + } + + if debugDecoder { + println("decode: decoding", len(seqs), "sequences", br.remain(), "bits remain on stream") + } + + s.seqSize = 0 + lte56bits := s.maxBits+s.offsets.fse.actualTableLog+s.matchLengths.fse.actualTableLog+s.litLengths.fse.actualTableLog <= 56 + var errCode int + if cpuinfo.HasBMI2() { + if lte56bits { + errCode = sequenceDecs_decode_56_bmi2(s, br, &ctx) + } else { + errCode = sequenceDecs_decode_bmi2(s, br, &ctx) + } + } else { + if lte56bits { + errCode = sequenceDecs_decode_56_amd64(s, br, &ctx) + } else { + errCode = sequenceDecs_decode_amd64(s, br, &ctx) + } + } + if errCode != 0 { + i := len(seqs) - ctx.iteration - 1 + switch errCode { + case errorMatchLenOfsMismatch: + ml := ctx.seqs[i].ml + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + + case errorMatchLenTooBig: + ml := ctx.seqs[i].ml + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + + case errorNotEnoughLiterals: + ll := ctx.seqs[i].ll + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, ctx.litRemain+ll) + case errorOverread: + return io.ErrUnexpectedEOF + } + + return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode) + } + + if ctx.litRemain < 0 { + return fmt.Errorf("literal count is too big: total available %d, total requested %d", + len(s.literals), len(s.literals)-ctx.litRemain) + } + + s.seqSize += ctx.litRemain + if s.seqSize > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + if debugDecoder { + println("decode: ", br.remain(), "bits remain on stream. code:", errCode) + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + } + return err +} + +// -------------------------------------------------------------------------------- + +type executeAsmContext struct { + seqs []seqVals + seqIndex int + out []byte + history []byte + literals []byte + outPosition int + litPosition int + windowSize int +} + +// sequenceDecs_executeSimple_amd64 implements the main loop of sequenceDecs.executeSimple in x86 asm. +// +// Returns false if a match offset is too big. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool + +// Same as above, but with safe memcopies +// +//go:noescape +func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool + +// executeSimple handles cases when dictionary is not used. +func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { + // Ensure we have enough output size... + if len(s.out)+s.seqSize+compressedBlockOverAlloc > cap(s.out) { + addBytes := s.seqSize + len(s.out) + compressedBlockOverAlloc + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + ctx := executeAsmContext{ + seqs: seqs, + seqIndex: 0, + out: out, + history: hist, + outPosition: t, + litPosition: 0, + literals: s.literals, + windowSize: s.windowSize, + } + var ok bool + if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc { + ok = sequenceDecs_executeSimple_safe_amd64(&ctx) + } else { + ok = sequenceDecs_executeSimple_amd64(&ctx) + } + if !ok { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", + seqs[ctx.seqIndex].mo, ctx.outPosition+len(hist)) + } + s.literals = s.literals[ctx.litPosition:] + t = ctx.outPosition + + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s new file mode 100644 index 0000000..5b06174 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s @@ -0,0 +1,4151 @@ +// Code generated by command: go run gen.go -out ../seqdec_amd64.s -pkg=zstd. DO NOT EDIT. + +//go:build !appengine && !noasm && gc && !noasm + +// func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: CMOV +TEXT ·sequenceDecs_decode_amd64(SB), $8-32 + MOVQ br+8(FP), CX + MOVQ 24(CX), DX + MOVBQZX 32(CX), BX + MOVQ (CX), AX + MOVQ 8(CX), SI + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + MOVQ 104(AX), R10 + MOVQ s+0(FP), AX + MOVQ 144(AX), R11 + MOVQ 152(AX), R12 + MOVQ 160(AX), R13 + +sequenceDecs_decode_amd64_main_loop: + MOVQ (SP), R14 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decode_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_amd64_fill_end + +sequenceDecs_decode_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_amd64_fill_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decode_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_amd64_fill_byte_by_byte + +sequenceDecs_decode_amd64_fill_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decode_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_amd64_of_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_amd64_of_update_zero: + MOVQ AX, 16(R10) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_amd64_ml_update_zero: + MOVQ AX, 8(R10) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decode_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_amd64_fill_2_end + +sequenceDecs_decode_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_amd64_fill_2_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decode_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_amd64_fill_2_byte_by_byte + +sequenceDecs_decode_amd64_fill_2_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decode_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_amd64_ll_update_zero: + MOVQ AX, (R10) + + // Fill bitreader for state updates + MOVQ R14, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R14 + SHRL $0x10, DI + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R14 + SHRL $0x10, R8 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R14 + SHRL $0x10, R9 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decode_amd64_skip_update: + // Adjust offset + MOVQ 16(R10), CX + CMPQ AX, $0x01 + JBE sequenceDecs_decode_amd64_adjust_offsetB_1_or_0 + MOVQ R12, R13 + MOVQ R11, R12 + MOVQ CX, R11 + JMP sequenceDecs_decode_amd64_after_adjust + +sequenceDecs_decode_amd64_adjust_offsetB_1_or_0: + CMPQ (R10), $0x00000000 + JNE sequenceDecs_decode_amd64_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_amd64_adjust_offset_nonzero + +sequenceDecs_decode_amd64_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_amd64_adjust_offset_nonzero + MOVQ R11, CX + JMP sequenceDecs_decode_amd64_after_adjust + +sequenceDecs_decode_amd64_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_amd64_adjust_zero + JEQ sequenceDecs_decode_amd64_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_amd64_adjust_three + JMP sequenceDecs_decode_amd64_adjust_two + +sequenceDecs_decode_amd64_adjust_zero: + MOVQ R11, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_one: + MOVQ R12, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_two: + MOVQ R13, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_three: + LEAQ -1(R11), AX + +sequenceDecs_decode_amd64_adjust_test_temp_valid: + TESTQ AX, AX + JNZ sequenceDecs_decode_amd64_adjust_temp_valid + MOVQ $0x00000001, AX + +sequenceDecs_decode_amd64_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R12, R13 + MOVQ R11, R12 + MOVQ AX, R11 + MOVQ AX, CX + +sequenceDecs_decode_amd64_after_adjust: + MOVQ CX, 16(R10) + + // Check values + MOVQ 8(R10), AX + MOVQ (R10), R14 + LEAQ (AX)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decode_amd64_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decode_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decode_amd64_match_len_ofs_ok: + ADDQ $0x18, R10 + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decode_amd64_main_loop + MOVQ s+0(FP), AX + MOVQ R11, 144(AX) + MOVQ R12, 152(AX) + MOVQ R13, 160(AX) + MOVQ br+8(FP), AX + MOVQ DX, 24(AX) + MOVB BL, 32(AX) + MOVQ SI, 8(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_amd64_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_amd64_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + +// func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: CMOV +TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32 + MOVQ br+8(FP), CX + MOVQ 24(CX), DX + MOVBQZX 32(CX), BX + MOVQ (CX), AX + MOVQ 8(CX), SI + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + MOVQ 104(AX), R10 + MOVQ s+0(FP), AX + MOVQ 144(AX), R11 + MOVQ 152(AX), R12 + MOVQ 160(AX), R13 + +sequenceDecs_decode_56_amd64_main_loop: + MOVQ (SP), R14 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decode_56_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_56_amd64_fill_end + +sequenceDecs_decode_56_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_56_amd64_fill_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decode_56_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_56_amd64_fill_byte_by_byte + +sequenceDecs_decode_56_amd64_fill_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decode_56_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_56_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_56_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_56_amd64_of_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_56_amd64_of_update_zero: + MOVQ AX, 16(R10) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_56_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_56_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_56_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_56_amd64_ml_update_zero: + MOVQ AX, 8(R10) + + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_56_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_56_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_56_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_56_amd64_ll_update_zero: + MOVQ AX, (R10) + + // Fill bitreader for state updates + MOVQ R14, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_56_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R14 + SHRL $0x10, DI + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R14 + SHRL $0x10, R8 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R14 + SHRL $0x10, R9 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decode_56_amd64_skip_update: + // Adjust offset + MOVQ 16(R10), CX + CMPQ AX, $0x01 + JBE sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0 + MOVQ R12, R13 + MOVQ R11, R12 + MOVQ CX, R11 + JMP sequenceDecs_decode_56_amd64_after_adjust + +sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0: + CMPQ (R10), $0x00000000 + JNE sequenceDecs_decode_56_amd64_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_56_amd64_adjust_offset_nonzero + +sequenceDecs_decode_56_amd64_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_56_amd64_adjust_offset_nonzero + MOVQ R11, CX + JMP sequenceDecs_decode_56_amd64_after_adjust + +sequenceDecs_decode_56_amd64_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_56_amd64_adjust_zero + JEQ sequenceDecs_decode_56_amd64_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_56_amd64_adjust_three + JMP sequenceDecs_decode_56_amd64_adjust_two + +sequenceDecs_decode_56_amd64_adjust_zero: + MOVQ R11, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_one: + MOVQ R12, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_two: + MOVQ R13, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_three: + LEAQ -1(R11), AX + +sequenceDecs_decode_56_amd64_adjust_test_temp_valid: + TESTQ AX, AX + JNZ sequenceDecs_decode_56_amd64_adjust_temp_valid + MOVQ $0x00000001, AX + +sequenceDecs_decode_56_amd64_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R12, R13 + MOVQ R11, R12 + MOVQ AX, R11 + MOVQ AX, CX + +sequenceDecs_decode_56_amd64_after_adjust: + MOVQ CX, 16(R10) + + // Check values + MOVQ 8(R10), AX + MOVQ (R10), R14 + LEAQ (AX)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decode_56_amd64_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_56_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decode_56_amd64_match_len_ofs_ok: + ADDQ $0x18, R10 + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decode_56_amd64_main_loop + MOVQ s+0(FP), AX + MOVQ R11, 144(AX) + MOVQ R12, 152(AX) + MOVQ R13, 160(AX) + MOVQ br+8(FP), AX + MOVQ DX, 24(AX) + MOVB BL, 32(AX) + MOVQ SI, 8(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_56_amd64_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + +// func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: BMI, BMI2, CMOV +TEXT ·sequenceDecs_decode_bmi2(SB), $8-32 + MOVQ br+8(FP), BX + MOVQ 24(BX), AX + MOVBQZX 32(BX), DX + MOVQ (BX), CX + MOVQ 8(BX), BX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + MOVQ 104(CX), R9 + MOVQ s+0(FP), CX + MOVQ 144(CX), R10 + MOVQ 152(CX), R11 + MOVQ 160(CX), R12 + +sequenceDecs_decode_bmi2_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decode_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_bmi2_fill_end + +sequenceDecs_decode_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_bmi2_fill_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decode_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_bmi2_fill_byte_by_byte + +sequenceDecs_decode_bmi2_fill_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decode_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 16(R9) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 8(R9) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decode_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_bmi2_fill_2_end + +sequenceDecs_decode_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_bmi2_fill_2_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decode_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_bmi2_fill_2_byte_by_byte + +sequenceDecs_decode_bmi2_fill_2_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decode_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, (R9) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_bmi2_skip_update + LEAQ (SI)(DI*1), R14 + ADDQ R8, R14 + MOVBQZX R14, R14 + LEAQ (DX)(R14*1), CX + MOVQ AX, R15 + MOVQ CX, DX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + + // Update Offset State + BZHIQ R8, R15, CX + SHRXQ R8, R15, R15 + SHRL $0x10, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R15, CX + SHRXQ DI, R15, R15 + SHRL $0x10, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R15, CX + SHRL $0x10, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decode_bmi2_skip_update: + // Adjust offset + MOVQ 16(R9), CX + CMPQ R13, $0x01 + JBE sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0 + MOVQ R11, R12 + MOVQ R10, R11 + MOVQ CX, R10 + JMP sequenceDecs_decode_bmi2_after_adjust + +sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0: + CMPQ (R9), $0x00000000 + JNE sequenceDecs_decode_bmi2_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_bmi2_adjust_offset_nonzero + +sequenceDecs_decode_bmi2_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_bmi2_adjust_offset_nonzero + MOVQ R10, CX + JMP sequenceDecs_decode_bmi2_after_adjust + +sequenceDecs_decode_bmi2_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_bmi2_adjust_zero + JEQ sequenceDecs_decode_bmi2_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_bmi2_adjust_three + JMP sequenceDecs_decode_bmi2_adjust_two + +sequenceDecs_decode_bmi2_adjust_zero: + MOVQ R10, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_one: + MOVQ R11, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_two: + MOVQ R12, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_three: + LEAQ -1(R10), R13 + +sequenceDecs_decode_bmi2_adjust_test_temp_valid: + TESTQ R13, R13 + JNZ sequenceDecs_decode_bmi2_adjust_temp_valid + MOVQ $0x00000001, R13 + +sequenceDecs_decode_bmi2_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R11, R12 + MOVQ R10, R11 + MOVQ R13, R10 + MOVQ R13, CX + +sequenceDecs_decode_bmi2_after_adjust: + MOVQ CX, 16(R9) + + // Check values + MOVQ 8(R9), R13 + MOVQ (R9), R14 + LEAQ (R13)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ R13, $0x00020002 + JA sequenceDecs_decode_bmi2_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_bmi2_match_len_ofs_ok + TESTQ R13, R13 + JNZ sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decode_bmi2_match_len_ofs_ok: + ADDQ $0x18, R9 + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decode_bmi2_main_loop + MOVQ s+0(FP), CX + MOVQ R10, 144(CX) + MOVQ R11, 152(CX) + MOVQ R12, 160(CX) + MOVQ br+8(FP), CX + MOVQ AX, 24(CX) + MOVB DL, 32(CX) + MOVQ BX, 8(CX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_bmi2_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + +// func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: BMI, BMI2, CMOV +TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32 + MOVQ br+8(FP), BX + MOVQ 24(BX), AX + MOVBQZX 32(BX), DX + MOVQ (BX), CX + MOVQ 8(BX), BX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + MOVQ 104(CX), R9 + MOVQ s+0(FP), CX + MOVQ 144(CX), R10 + MOVQ 152(CX), R11 + MOVQ 160(CX), R12 + +sequenceDecs_decode_56_bmi2_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decode_56_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_56_bmi2_fill_end + +sequenceDecs_decode_56_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_56_bmi2_fill_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decode_56_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_56_bmi2_fill_byte_by_byte + +sequenceDecs_decode_56_bmi2_fill_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decode_56_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 16(R9) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 8(R9) + + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, (R9) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_56_bmi2_skip_update + LEAQ (SI)(DI*1), R14 + ADDQ R8, R14 + MOVBQZX R14, R14 + LEAQ (DX)(R14*1), CX + MOVQ AX, R15 + MOVQ CX, DX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + + // Update Offset State + BZHIQ R8, R15, CX + SHRXQ R8, R15, R15 + SHRL $0x10, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R15, CX + SHRXQ DI, R15, R15 + SHRL $0x10, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R15, CX + SHRL $0x10, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decode_56_bmi2_skip_update: + // Adjust offset + MOVQ 16(R9), CX + CMPQ R13, $0x01 + JBE sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0 + MOVQ R11, R12 + MOVQ R10, R11 + MOVQ CX, R10 + JMP sequenceDecs_decode_56_bmi2_after_adjust + +sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0: + CMPQ (R9), $0x00000000 + JNE sequenceDecs_decode_56_bmi2_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_56_bmi2_adjust_offset_nonzero + +sequenceDecs_decode_56_bmi2_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_56_bmi2_adjust_offset_nonzero + MOVQ R10, CX + JMP sequenceDecs_decode_56_bmi2_after_adjust + +sequenceDecs_decode_56_bmi2_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_56_bmi2_adjust_zero + JEQ sequenceDecs_decode_56_bmi2_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_56_bmi2_adjust_three + JMP sequenceDecs_decode_56_bmi2_adjust_two + +sequenceDecs_decode_56_bmi2_adjust_zero: + MOVQ R10, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_one: + MOVQ R11, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_two: + MOVQ R12, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_three: + LEAQ -1(R10), R13 + +sequenceDecs_decode_56_bmi2_adjust_test_temp_valid: + TESTQ R13, R13 + JNZ sequenceDecs_decode_56_bmi2_adjust_temp_valid + MOVQ $0x00000001, R13 + +sequenceDecs_decode_56_bmi2_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R11, R12 + MOVQ R10, R11 + MOVQ R13, R10 + MOVQ R13, CX + +sequenceDecs_decode_56_bmi2_after_adjust: + MOVQ CX, 16(R9) + + // Check values + MOVQ 8(R9), R13 + MOVQ (R9), R14 + LEAQ (R13)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ R13, $0x00020002 + JA sequenceDecs_decode_56_bmi2_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_56_bmi2_match_len_ofs_ok + TESTQ R13, R13 + JNZ sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decode_56_bmi2_match_len_ofs_ok: + ADDQ $0x18, R9 + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decode_56_bmi2_main_loop + MOVQ s+0(FP), CX + MOVQ R10, 144(CX) + MOVQ R11, 152(CX) + MOVQ R12, 160(CX) + MOVQ br+8(FP), CX + MOVQ AX, 24(CX) + MOVB DL, 32(CX) + MOVQ BX, 8(CX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_56_bmi2_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + +// func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool +// Requires: SSE +TEXT ·sequenceDecs_executeSimple_amd64(SB), $8-9 + MOVQ ctx+0(FP), R10 + MOVQ 8(R10), CX + TESTQ CX, CX + JZ empty_seqs + MOVQ (R10), AX + MOVQ 24(R10), DX + MOVQ 32(R10), BX + MOVQ 80(R10), SI + MOVQ 104(R10), DI + MOVQ 120(R10), R8 + MOVQ 56(R10), R9 + MOVQ 64(R10), R10 + ADDQ R10, R9 + + // seqsBase += 24 * seqIndex + LEAQ (DX)(DX*2), R11 + SHLQ $0x03, R11 + ADDQ R11, AX + + // outBase += outPosition + ADDQ DI, BX + +main_loop: + MOVQ (AX), R11 + MOVQ 16(AX), R12 + MOVQ 8(AX), R13 + + // Copy literals + TESTQ R11, R11 + JZ check_offset + XORQ R14, R14 + +copy_1: + MOVUPS (SI)(R14*1), X0 + MOVUPS X0, (BX)(R14*1) + ADDQ $0x10, R14 + CMPQ R14, R11 + JB copy_1 + ADDQ R11, SI + ADDQ R11, BX + ADDQ R11, DI + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + LEAQ (DI)(R10*1), R11 + CMPQ R12, R11 + JG error_match_off_too_big + CMPQ R12, R8 + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, R11 + SUBQ DI, R11 + JLS copy_match + MOVQ R9, R14 + SUBQ R11, R14 + CMPQ R13, R11 + JG copy_all_from_history + MOVQ R13, R11 + SUBQ $0x10, R11 + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R11 + JAE copy_4_loop + LEAQ 16(R14)(R11*1), R14 + LEAQ 16(BX)(R11*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), R11 + MOVB 2(R14), R12 + MOVW R11, (BX) + MOVB R12, 2(BX) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), R11 + MOVL -4(R14)(R13*1), R12 + MOVL R11, (BX) + MOVL R12, -4(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), R11 + MOVQ -8(R14)(R13*1), R12 + MOVQ R11, (BX) + MOVQ R12, -8(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + +copy_4_end: + ADDQ R13, DI + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + JMP loop_finished + +copy_all_from_history: + MOVQ R11, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(BX)(R15*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_5_end + +copy_5_small: + CMPQ R11, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ R11, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(R11*1), BP + MOVB R15, (BX) + MOVB BP, -1(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (BX) + MOVB BP, 2(BX) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(R11*1), BP + MOVL R15, (BX) + MOVL BP, -4(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(R11*1), BP + MOVQ R15, (BX) + MOVQ BP, -8(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + +copy_5_end: + ADDQ R11, DI + SUBQ R11, R13 + + // Copy match from the current buffer +copy_match: + MOVQ BX, R11 + SUBQ R12, R11 + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, DI + MOVQ BX, R12 + ADDQ R13, BX + +copy_2: + MOVUPS (R11), X0 + MOVUPS X0, (R12) + ADDQ $0x10, R11 + ADDQ $0x10, R12 + SUBQ $0x10, R13 + JHI copy_2 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, DI + +copy_slow_3: + MOVB (R11), R12 + MOVB R12, (BX) + INCQ R11 + INCQ BX + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + +loop_finished: + // Return value + MOVB $0x01, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + SUBQ 80(AX), SI + MOVQ SI, 112(AX) + RET + +error_match_off_too_big: + // Return value + MOVB $0x00, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + SUBQ 80(AX), SI + MOVQ SI, 112(AX) + RET + +empty_seqs: + // Return value + MOVB $0x01, ret+8(FP) + RET + +// func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool +// Requires: SSE +TEXT ·sequenceDecs_executeSimple_safe_amd64(SB), $8-9 + MOVQ ctx+0(FP), R10 + MOVQ 8(R10), CX + TESTQ CX, CX + JZ empty_seqs + MOVQ (R10), AX + MOVQ 24(R10), DX + MOVQ 32(R10), BX + MOVQ 80(R10), SI + MOVQ 104(R10), DI + MOVQ 120(R10), R8 + MOVQ 56(R10), R9 + MOVQ 64(R10), R10 + ADDQ R10, R9 + + // seqsBase += 24 * seqIndex + LEAQ (DX)(DX*2), R11 + SHLQ $0x03, R11 + ADDQ R11, AX + + // outBase += outPosition + ADDQ DI, BX + +main_loop: + MOVQ (AX), R11 + MOVQ 16(AX), R12 + MOVQ 8(AX), R13 + + // Copy literals + TESTQ R11, R11 + JZ check_offset + MOVQ R11, R14 + SUBQ $0x10, R14 + JB copy_1_small + +copy_1_loop: + MOVUPS (SI), X0 + MOVUPS X0, (BX) + ADDQ $0x10, SI + ADDQ $0x10, BX + SUBQ $0x10, R14 + JAE copy_1_loop + LEAQ 16(SI)(R14*1), SI + LEAQ 16(BX)(R14*1), BX + MOVUPS -16(SI), X0 + MOVUPS X0, -16(BX) + JMP copy_1_end + +copy_1_small: + CMPQ R11, $0x03 + JE copy_1_move_3 + JB copy_1_move_1or2 + CMPQ R11, $0x08 + JB copy_1_move_4through7 + JMP copy_1_move_8through16 + +copy_1_move_1or2: + MOVB (SI), R14 + MOVB -1(SI)(R11*1), R15 + MOVB R14, (BX) + MOVB R15, -1(BX)(R11*1) + ADDQ R11, SI + ADDQ R11, BX + JMP copy_1_end + +copy_1_move_3: + MOVW (SI), R14 + MOVB 2(SI), R15 + MOVW R14, (BX) + MOVB R15, 2(BX) + ADDQ R11, SI + ADDQ R11, BX + JMP copy_1_end + +copy_1_move_4through7: + MOVL (SI), R14 + MOVL -4(SI)(R11*1), R15 + MOVL R14, (BX) + MOVL R15, -4(BX)(R11*1) + ADDQ R11, SI + ADDQ R11, BX + JMP copy_1_end + +copy_1_move_8through16: + MOVQ (SI), R14 + MOVQ -8(SI)(R11*1), R15 + MOVQ R14, (BX) + MOVQ R15, -8(BX)(R11*1) + ADDQ R11, SI + ADDQ R11, BX + +copy_1_end: + ADDQ R11, DI + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + LEAQ (DI)(R10*1), R11 + CMPQ R12, R11 + JG error_match_off_too_big + CMPQ R12, R8 + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, R11 + SUBQ DI, R11 + JLS copy_match + MOVQ R9, R14 + SUBQ R11, R14 + CMPQ R13, R11 + JG copy_all_from_history + MOVQ R13, R11 + SUBQ $0x10, R11 + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R11 + JAE copy_4_loop + LEAQ 16(R14)(R11*1), R14 + LEAQ 16(BX)(R11*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), R11 + MOVB 2(R14), R12 + MOVW R11, (BX) + MOVB R12, 2(BX) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), R11 + MOVL -4(R14)(R13*1), R12 + MOVL R11, (BX) + MOVL R12, -4(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), R11 + MOVQ -8(R14)(R13*1), R12 + MOVQ R11, (BX) + MOVQ R12, -8(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + +copy_4_end: + ADDQ R13, DI + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + JMP loop_finished + +copy_all_from_history: + MOVQ R11, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(BX)(R15*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_5_end + +copy_5_small: + CMPQ R11, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ R11, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(R11*1), BP + MOVB R15, (BX) + MOVB BP, -1(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (BX) + MOVB BP, 2(BX) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(R11*1), BP + MOVL R15, (BX) + MOVL BP, -4(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(R11*1), BP + MOVQ R15, (BX) + MOVQ BP, -8(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + +copy_5_end: + ADDQ R11, DI + SUBQ R11, R13 + + // Copy match from the current buffer +copy_match: + MOVQ BX, R11 + SUBQ R12, R11 + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, DI + MOVQ R13, R12 + SUBQ $0x10, R12 + JB copy_2_small + +copy_2_loop: + MOVUPS (R11), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R11 + ADDQ $0x10, BX + SUBQ $0x10, R12 + JAE copy_2_loop + LEAQ 16(R11)(R12*1), R11 + LEAQ 16(BX)(R12*1), BX + MOVUPS -16(R11), X0 + MOVUPS X0, -16(BX) + JMP copy_2_end + +copy_2_small: + CMPQ R13, $0x03 + JE copy_2_move_3 + JB copy_2_move_1or2 + CMPQ R13, $0x08 + JB copy_2_move_4through7 + JMP copy_2_move_8through16 + +copy_2_move_1or2: + MOVB (R11), R12 + MOVB -1(R11)(R13*1), R14 + MOVB R12, (BX) + MOVB R14, -1(BX)(R13*1) + ADDQ R13, R11 + ADDQ R13, BX + JMP copy_2_end + +copy_2_move_3: + MOVW (R11), R12 + MOVB 2(R11), R14 + MOVW R12, (BX) + MOVB R14, 2(BX) + ADDQ R13, R11 + ADDQ R13, BX + JMP copy_2_end + +copy_2_move_4through7: + MOVL (R11), R12 + MOVL -4(R11)(R13*1), R14 + MOVL R12, (BX) + MOVL R14, -4(BX)(R13*1) + ADDQ R13, R11 + ADDQ R13, BX + JMP copy_2_end + +copy_2_move_8through16: + MOVQ (R11), R12 + MOVQ -8(R11)(R13*1), R14 + MOVQ R12, (BX) + MOVQ R14, -8(BX)(R13*1) + ADDQ R13, R11 + ADDQ R13, BX + +copy_2_end: + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, DI + +copy_slow_3: + MOVB (R11), R12 + MOVB R12, (BX) + INCQ R11 + INCQ BX + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + +loop_finished: + // Return value + MOVB $0x01, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + SUBQ 80(AX), SI + MOVQ SI, 112(AX) + RET + +error_match_off_too_big: + // Return value + MOVB $0x00, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + SUBQ 80(AX), SI + MOVQ SI, 112(AX) + RET + +empty_seqs: + // Return value + MOVB $0x01, ret+8(FP) + RET + +// func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: CMOV, SSE +TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32 + MOVQ br+8(FP), CX + MOVQ 24(CX), DX + MOVBQZX 32(CX), BX + MOVQ (CX), AX + MOVQ 8(CX), SI + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + XORQ CX, CX + MOVQ CX, 8(SP) + MOVQ CX, 16(SP) + MOVQ CX, 24(SP) + MOVQ 112(AX), R10 + MOVQ 128(AX), CX + MOVQ CX, 32(SP) + MOVQ 144(AX), R11 + MOVQ 136(AX), R12 + MOVQ 200(AX), CX + MOVQ CX, 56(SP) + MOVQ 176(AX), CX + MOVQ CX, 48(SP) + MOVQ 184(AX), AX + MOVQ AX, 40(SP) + MOVQ 40(SP), AX + ADDQ AX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R10, 32(SP) + + // outBase += outPosition + ADDQ R12, R10 + +sequenceDecs_decodeSync_amd64_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_amd64_fill_end + +sequenceDecs_decodeSync_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_amd64_fill_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_amd64_fill_byte_by_byte + +sequenceDecs_decodeSync_amd64_fill_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_amd64_of_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_amd64_of_update_zero: + MOVQ AX, 8(SP) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_amd64_ml_update_zero: + MOVQ AX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_amd64_fill_2_end + +sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_amd64_fill_2_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte + +sequenceDecs_decodeSync_amd64_fill_2_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_amd64_ll_update_zero: + MOVQ AX, 24(SP) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R13 + SHRL $0x10, DI + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R13 + SHRL $0x10, R8 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R13 + SHRL $0x10, R9 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decodeSync_amd64_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ AX, $0x01 + JBE sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_amd64_after_adjust + +sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_amd64_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_amd64_adjust_offset_nonzero + +sequenceDecs_decodeSync_amd64_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_amd64_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_amd64_after_adjust + +sequenceDecs_decodeSync_amd64_adjust_offset_nonzero: + MOVQ R13, AX + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, AX + CMOVQEQ R15, R14 + ADDQ 144(CX)(AX*8), R14 + JNZ sequenceDecs_decodeSync_amd64_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_amd64_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_amd64_adjust_skip + MOVQ 152(CX), AX + MOVQ AX, 160(CX) + +sequenceDecs_decodeSync_amd64_adjust_skip: + MOVQ 144(CX), AX + MOVQ AX, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_amd64_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), AX + MOVQ 24(SP), CX + LEAQ (AX)(CX*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ CX, 104(R14) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decodeSync_amd64_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_amd64_match_len_ofs_ok: + MOVQ 24(SP), AX + MOVQ 8(SP), CX + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (AX)(R13*1), R14 + ADDQ R10, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ AX, AX + JZ check_offset + XORQ R14, R14 + +copy_1: + MOVUPS (R11)(R14*1), X0 + MOVUPS X0, (R10)(R14*1) + ADDQ $0x10, R14 + CMPQ R14, AX + JB copy_1 + ADDQ AX, R11 + ADDQ AX, R10 + ADDQ AX, R12 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R12, AX + ADDQ 40(SP), AX + CMPQ CX, AX + JG error_match_off_too_big + CMPQ CX, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ CX, AX + SUBQ R12, AX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ AX, R14 + CMPQ R13, AX + JG copy_all_from_history + MOVQ R13, AX + SUBQ $0x10, AX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, AX + JAE copy_4_loop + LEAQ 16(R14)(AX*1), R14 + LEAQ 16(R10)(AX*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), AX + MOVB 2(R14), CL + MOVW AX, (R10) + MOVB CL, 2(R10) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), AX + MOVL -4(R14)(R13*1), CX + MOVL AX, (R10) + MOVL CX, -4(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), AX + MOVQ -8(R14)(R13*1), CX + MOVQ AX, (R10) + MOVQ CX, -8(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + +copy_4_end: + ADDQ R13, R12 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ AX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R10)(R15*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_5_end + +copy_5_small: + CMPQ AX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ AX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(AX*1), BP + MOVB R15, (R10) + MOVB BP, -1(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R10) + MOVB BP, 2(R10) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(AX*1), BP + MOVL R15, (R10) + MOVL BP, -4(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(AX*1), BP + MOVQ R15, (R10) + MOVQ BP, -8(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + +copy_5_end: + ADDQ AX, R12 + SUBQ AX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R10, AX + SUBQ CX, AX + + // ml <= mo + CMPQ R13, CX + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R12 + MOVQ R10, CX + ADDQ R13, R10 + +copy_2: + MOVUPS (AX), X0 + MOVUPS X0, (CX) + ADDQ $0x10, AX + ADDQ $0x10, CX + SUBQ $0x10, R13 + JHI copy_2 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R12 + +copy_slow_3: + MOVB (AX), CL + MOVB CL, (R10) + INCQ AX + INCQ R10 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decodeSync_amd64_main_loop + +loop_finished: + MOVQ br+8(FP), AX + MOVQ DX, 24(AX) + MOVB BL, 32(AX) + MOVQ SI, 8(AX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R12, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R11 + MOVQ R11, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_amd64_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: BMI, BMI2, CMOV, SSE +TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32 + MOVQ br+8(FP), BX + MOVQ 24(BX), AX + MOVBQZX 32(BX), DX + MOVQ (BX), CX + MOVQ 8(BX), BX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + XORQ R9, R9 + MOVQ R9, 8(SP) + MOVQ R9, 16(SP) + MOVQ R9, 24(SP) + MOVQ 112(CX), R9 + MOVQ 128(CX), R10 + MOVQ R10, 32(SP) + MOVQ 144(CX), R10 + MOVQ 136(CX), R11 + MOVQ 200(CX), R12 + MOVQ R12, 56(SP) + MOVQ 176(CX), R12 + MOVQ R12, 48(SP) + MOVQ 184(CX), CX + MOVQ CX, 40(SP) + MOVQ 40(SP), CX + ADDQ CX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R9, 32(SP) + + // outBase += outPosition + ADDQ R11, R9 + +sequenceDecs_decodeSync_bmi2_main_loop: + MOVQ (SP), R12 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_bmi2_fill_end + +sequenceDecs_decodeSync_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_bmi2_fill_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_bmi2_fill_byte_by_byte + +sequenceDecs_decodeSync_bmi2_fill_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 8(SP) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_bmi2_fill_2_end + +sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_bmi2_fill_2_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte + +sequenceDecs_decodeSync_bmi2_fill_2_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 24(SP) + + // Fill bitreader for state updates + MOVQ R12, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R12 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_bmi2_skip_update + LEAQ (SI)(DI*1), R13 + ADDQ R8, R13 + MOVBQZX R13, R13 + LEAQ (DX)(R13*1), CX + MOVQ AX, R14 + MOVQ CX, DX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + + // Update Offset State + BZHIQ R8, R14, CX + SHRXQ R8, R14, R14 + SHRL $0x10, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R14, CX + SHRXQ DI, R14, R14 + SHRL $0x10, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R14, CX + SHRL $0x10, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decodeSync_bmi2_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ R12, $0x01 + JBE sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_bmi2_after_adjust + +sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero + +sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_bmi2_after_adjust + +sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero: + MOVQ R13, R12 + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, R12 + CMOVQEQ R15, R14 + ADDQ 144(CX)(R12*8), R14 + JNZ sequenceDecs_decodeSync_bmi2_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_bmi2_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_bmi2_adjust_skip + MOVQ 152(CX), R12 + MOVQ R12, 160(CX) + +sequenceDecs_decodeSync_bmi2_adjust_skip: + MOVQ 144(CX), R12 + MOVQ R12, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_bmi2_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), CX + MOVQ 24(SP), R12 + LEAQ (CX)(R12*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ R12, 104(R14) + JS error_not_enough_literals + CMPQ CX, $0x00020002 + JA sequenceDecs_decodeSync_bmi2_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_bmi2_match_len_ofs_ok + TESTQ CX, CX + JNZ sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_bmi2_match_len_ofs_ok: + MOVQ 24(SP), CX + MOVQ 8(SP), R12 + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (CX)(R13*1), R14 + ADDQ R9, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ CX, CX + JZ check_offset + XORQ R14, R14 + +copy_1: + MOVUPS (R10)(R14*1), X0 + MOVUPS X0, (R9)(R14*1) + ADDQ $0x10, R14 + CMPQ R14, CX + JB copy_1 + ADDQ CX, R10 + ADDQ CX, R9 + ADDQ CX, R11 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R11, CX + ADDQ 40(SP), CX + CMPQ R12, CX + JG error_match_off_too_big + CMPQ R12, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, CX + SUBQ R11, CX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ CX, R14 + CMPQ R13, CX + JG copy_all_from_history + MOVQ R13, CX + SUBQ $0x10, CX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, CX + JAE copy_4_loop + LEAQ 16(R14)(CX*1), R14 + LEAQ 16(R9)(CX*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), CX + MOVB 2(R14), R12 + MOVW CX, (R9) + MOVB R12, 2(R9) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), CX + MOVL -4(R14)(R13*1), R12 + MOVL CX, (R9) + MOVL R12, -4(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), CX + MOVQ -8(R14)(R13*1), R12 + MOVQ CX, (R9) + MOVQ R12, -8(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + +copy_4_end: + ADDQ R13, R11 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ CX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R9)(R15*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_5_end + +copy_5_small: + CMPQ CX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ CX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(CX*1), BP + MOVB R15, (R9) + MOVB BP, -1(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R9) + MOVB BP, 2(R9) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(CX*1), BP + MOVL R15, (R9) + MOVL BP, -4(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(CX*1), BP + MOVQ R15, (R9) + MOVQ BP, -8(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + +copy_5_end: + ADDQ CX, R11 + SUBQ CX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R9, CX + SUBQ R12, CX + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R11 + MOVQ R9, R12 + ADDQ R13, R9 + +copy_2: + MOVUPS (CX), X0 + MOVUPS X0, (R12) + ADDQ $0x10, CX + ADDQ $0x10, R12 + SUBQ $0x10, R13 + JHI copy_2 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R11 + +copy_slow_3: + MOVB (CX), R12 + MOVB R12, (R9) + INCQ CX + INCQ R9 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decodeSync_bmi2_main_loop + +loop_finished: + MOVQ br+8(FP), CX + MOVQ AX, 24(CX) + MOVB DL, 32(CX) + MOVQ BX, 8(CX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R11, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R10 + MOVQ R10, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_bmi2_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: CMOV, SSE +TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32 + MOVQ br+8(FP), CX + MOVQ 24(CX), DX + MOVBQZX 32(CX), BX + MOVQ (CX), AX + MOVQ 8(CX), SI + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + XORQ CX, CX + MOVQ CX, 8(SP) + MOVQ CX, 16(SP) + MOVQ CX, 24(SP) + MOVQ 112(AX), R10 + MOVQ 128(AX), CX + MOVQ CX, 32(SP) + MOVQ 144(AX), R11 + MOVQ 136(AX), R12 + MOVQ 200(AX), CX + MOVQ CX, 56(SP) + MOVQ 176(AX), CX + MOVQ CX, 48(SP) + MOVQ 184(AX), AX + MOVQ AX, 40(SP) + MOVQ 40(SP), AX + ADDQ AX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R10, 32(SP) + + // outBase += outPosition + ADDQ R12, R10 + +sequenceDecs_decodeSync_safe_amd64_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_safe_amd64_fill_end + +sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_safe_amd64_fill_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_safe_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte + +sequenceDecs_decodeSync_safe_amd64_fill_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_safe_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_safe_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_safe_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_safe_amd64_of_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_safe_amd64_of_update_zero: + MOVQ AX, 8(SP) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_safe_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_safe_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_safe_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_safe_amd64_ml_update_zero: + MOVQ AX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_safe_amd64_fill_2_end + +sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_safe_amd64_fill_2_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte + +sequenceDecs_decodeSync_safe_amd64_fill_2_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_safe_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_safe_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_safe_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_safe_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_safe_amd64_ll_update_zero: + MOVQ AX, 24(SP) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_safe_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R13 + SHRL $0x10, DI + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R13 + SHRL $0x10, R8 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R13 + SHRL $0x10, R9 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decodeSync_safe_amd64_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ AX, $0x01 + JBE sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_safe_amd64_after_adjust + +sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero + +sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_safe_amd64_after_adjust + +sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero: + MOVQ R13, AX + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, AX + CMOVQEQ R15, R14 + ADDQ 144(CX)(AX*8), R14 + JNZ sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_safe_amd64_adjust_skip + MOVQ 152(CX), AX + MOVQ AX, 160(CX) + +sequenceDecs_decodeSync_safe_amd64_adjust_skip: + MOVQ 144(CX), AX + MOVQ AX, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_safe_amd64_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), AX + MOVQ 24(SP), CX + LEAQ (AX)(CX*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ CX, 104(R14) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok: + MOVQ 24(SP), AX + MOVQ 8(SP), CX + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (AX)(R13*1), R14 + ADDQ R10, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ AX, AX + JZ check_offset + MOVQ AX, R14 + SUBQ $0x10, R14 + JB copy_1_small + +copy_1_loop: + MOVUPS (R11), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R11 + ADDQ $0x10, R10 + SUBQ $0x10, R14 + JAE copy_1_loop + LEAQ 16(R11)(R14*1), R11 + LEAQ 16(R10)(R14*1), R10 + MOVUPS -16(R11), X0 + MOVUPS X0, -16(R10) + JMP copy_1_end + +copy_1_small: + CMPQ AX, $0x03 + JE copy_1_move_3 + JB copy_1_move_1or2 + CMPQ AX, $0x08 + JB copy_1_move_4through7 + JMP copy_1_move_8through16 + +copy_1_move_1or2: + MOVB (R11), R14 + MOVB -1(R11)(AX*1), R15 + MOVB R14, (R10) + MOVB R15, -1(R10)(AX*1) + ADDQ AX, R11 + ADDQ AX, R10 + JMP copy_1_end + +copy_1_move_3: + MOVW (R11), R14 + MOVB 2(R11), R15 + MOVW R14, (R10) + MOVB R15, 2(R10) + ADDQ AX, R11 + ADDQ AX, R10 + JMP copy_1_end + +copy_1_move_4through7: + MOVL (R11), R14 + MOVL -4(R11)(AX*1), R15 + MOVL R14, (R10) + MOVL R15, -4(R10)(AX*1) + ADDQ AX, R11 + ADDQ AX, R10 + JMP copy_1_end + +copy_1_move_8through16: + MOVQ (R11), R14 + MOVQ -8(R11)(AX*1), R15 + MOVQ R14, (R10) + MOVQ R15, -8(R10)(AX*1) + ADDQ AX, R11 + ADDQ AX, R10 + +copy_1_end: + ADDQ AX, R12 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R12, AX + ADDQ 40(SP), AX + CMPQ CX, AX + JG error_match_off_too_big + CMPQ CX, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ CX, AX + SUBQ R12, AX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ AX, R14 + CMPQ R13, AX + JG copy_all_from_history + MOVQ R13, AX + SUBQ $0x10, AX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, AX + JAE copy_4_loop + LEAQ 16(R14)(AX*1), R14 + LEAQ 16(R10)(AX*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), AX + MOVB 2(R14), CL + MOVW AX, (R10) + MOVB CL, 2(R10) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), AX + MOVL -4(R14)(R13*1), CX + MOVL AX, (R10) + MOVL CX, -4(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), AX + MOVQ -8(R14)(R13*1), CX + MOVQ AX, (R10) + MOVQ CX, -8(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + +copy_4_end: + ADDQ R13, R12 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ AX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R10)(R15*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_5_end + +copy_5_small: + CMPQ AX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ AX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(AX*1), BP + MOVB R15, (R10) + MOVB BP, -1(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R10) + MOVB BP, 2(R10) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(AX*1), BP + MOVL R15, (R10) + MOVL BP, -4(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(AX*1), BP + MOVQ R15, (R10) + MOVQ BP, -8(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + +copy_5_end: + ADDQ AX, R12 + SUBQ AX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R10, AX + SUBQ CX, AX + + // ml <= mo + CMPQ R13, CX + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R12 + MOVQ R13, CX + SUBQ $0x10, CX + JB copy_2_small + +copy_2_loop: + MOVUPS (AX), X0 + MOVUPS X0, (R10) + ADDQ $0x10, AX + ADDQ $0x10, R10 + SUBQ $0x10, CX + JAE copy_2_loop + LEAQ 16(AX)(CX*1), AX + LEAQ 16(R10)(CX*1), R10 + MOVUPS -16(AX), X0 + MOVUPS X0, -16(R10) + JMP copy_2_end + +copy_2_small: + CMPQ R13, $0x03 + JE copy_2_move_3 + JB copy_2_move_1or2 + CMPQ R13, $0x08 + JB copy_2_move_4through7 + JMP copy_2_move_8through16 + +copy_2_move_1or2: + MOVB (AX), CL + MOVB -1(AX)(R13*1), R14 + MOVB CL, (R10) + MOVB R14, -1(R10)(R13*1) + ADDQ R13, AX + ADDQ R13, R10 + JMP copy_2_end + +copy_2_move_3: + MOVW (AX), CX + MOVB 2(AX), R14 + MOVW CX, (R10) + MOVB R14, 2(R10) + ADDQ R13, AX + ADDQ R13, R10 + JMP copy_2_end + +copy_2_move_4through7: + MOVL (AX), CX + MOVL -4(AX)(R13*1), R14 + MOVL CX, (R10) + MOVL R14, -4(R10)(R13*1) + ADDQ R13, AX + ADDQ R13, R10 + JMP copy_2_end + +copy_2_move_8through16: + MOVQ (AX), CX + MOVQ -8(AX)(R13*1), R14 + MOVQ CX, (R10) + MOVQ R14, -8(R10)(R13*1) + ADDQ R13, AX + ADDQ R13, R10 + +copy_2_end: + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R12 + +copy_slow_3: + MOVB (AX), CL + MOVB CL, (R10) + INCQ AX + INCQ R10 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decodeSync_safe_amd64_main_loop + +loop_finished: + MOVQ br+8(FP), AX + MOVQ DX, 24(AX) + MOVB BL, 32(AX) + MOVQ SI, 8(AX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R12, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R11 + MOVQ R11, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: BMI, BMI2, CMOV, SSE +TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32 + MOVQ br+8(FP), BX + MOVQ 24(BX), AX + MOVBQZX 32(BX), DX + MOVQ (BX), CX + MOVQ 8(BX), BX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + XORQ R9, R9 + MOVQ R9, 8(SP) + MOVQ R9, 16(SP) + MOVQ R9, 24(SP) + MOVQ 112(CX), R9 + MOVQ 128(CX), R10 + MOVQ R10, 32(SP) + MOVQ 144(CX), R10 + MOVQ 136(CX), R11 + MOVQ 200(CX), R12 + MOVQ R12, 56(SP) + MOVQ 176(CX), R12 + MOVQ R12, 48(SP) + MOVQ 184(CX), CX + MOVQ CX, 40(SP) + MOVQ 40(SP), CX + ADDQ CX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R9, 32(SP) + + // outBase += outPosition + ADDQ R11, R9 + +sequenceDecs_decodeSync_safe_bmi2_main_loop: + MOVQ (SP), R12 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_end + +sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte + +sequenceDecs_decodeSync_safe_bmi2_fill_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_safe_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 8(SP) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_end + +sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte + +sequenceDecs_decodeSync_safe_bmi2_fill_2_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_safe_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 24(SP) + + // Fill bitreader for state updates + MOVQ R12, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R12 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_safe_bmi2_skip_update + LEAQ (SI)(DI*1), R13 + ADDQ R8, R13 + MOVBQZX R13, R13 + LEAQ (DX)(R13*1), CX + MOVQ AX, R14 + MOVQ CX, DX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + + // Update Offset State + BZHIQ R8, R14, CX + SHRXQ R8, R14, R14 + SHRL $0x10, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R14, CX + SHRXQ DI, R14, R14 + SHRL $0x10, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R14, CX + SHRL $0x10, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decodeSync_safe_bmi2_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ R12, $0x01 + JBE sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_safe_bmi2_after_adjust + +sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero + +sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_safe_bmi2_after_adjust + +sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero: + MOVQ R13, R12 + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, R12 + CMOVQEQ R15, R14 + ADDQ 144(CX)(R12*8), R14 + JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_safe_bmi2_adjust_skip + MOVQ 152(CX), R12 + MOVQ R12, 160(CX) + +sequenceDecs_decodeSync_safe_bmi2_adjust_skip: + MOVQ 144(CX), R12 + MOVQ R12, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_safe_bmi2_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), CX + MOVQ 24(SP), R12 + LEAQ (CX)(R12*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ R12, 104(R14) + JS error_not_enough_literals + CMPQ CX, $0x00020002 + JA sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok + TESTQ CX, CX + JNZ sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok: + MOVQ 24(SP), CX + MOVQ 8(SP), R12 + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (CX)(R13*1), R14 + ADDQ R9, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ CX, CX + JZ check_offset + MOVQ CX, R14 + SUBQ $0x10, R14 + JB copy_1_small + +copy_1_loop: + MOVUPS (R10), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R10 + ADDQ $0x10, R9 + SUBQ $0x10, R14 + JAE copy_1_loop + LEAQ 16(R10)(R14*1), R10 + LEAQ 16(R9)(R14*1), R9 + MOVUPS -16(R10), X0 + MOVUPS X0, -16(R9) + JMP copy_1_end + +copy_1_small: + CMPQ CX, $0x03 + JE copy_1_move_3 + JB copy_1_move_1or2 + CMPQ CX, $0x08 + JB copy_1_move_4through7 + JMP copy_1_move_8through16 + +copy_1_move_1or2: + MOVB (R10), R14 + MOVB -1(R10)(CX*1), R15 + MOVB R14, (R9) + MOVB R15, -1(R9)(CX*1) + ADDQ CX, R10 + ADDQ CX, R9 + JMP copy_1_end + +copy_1_move_3: + MOVW (R10), R14 + MOVB 2(R10), R15 + MOVW R14, (R9) + MOVB R15, 2(R9) + ADDQ CX, R10 + ADDQ CX, R9 + JMP copy_1_end + +copy_1_move_4through7: + MOVL (R10), R14 + MOVL -4(R10)(CX*1), R15 + MOVL R14, (R9) + MOVL R15, -4(R9)(CX*1) + ADDQ CX, R10 + ADDQ CX, R9 + JMP copy_1_end + +copy_1_move_8through16: + MOVQ (R10), R14 + MOVQ -8(R10)(CX*1), R15 + MOVQ R14, (R9) + MOVQ R15, -8(R9)(CX*1) + ADDQ CX, R10 + ADDQ CX, R9 + +copy_1_end: + ADDQ CX, R11 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R11, CX + ADDQ 40(SP), CX + CMPQ R12, CX + JG error_match_off_too_big + CMPQ R12, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, CX + SUBQ R11, CX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ CX, R14 + CMPQ R13, CX + JG copy_all_from_history + MOVQ R13, CX + SUBQ $0x10, CX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, CX + JAE copy_4_loop + LEAQ 16(R14)(CX*1), R14 + LEAQ 16(R9)(CX*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), CX + MOVB 2(R14), R12 + MOVW CX, (R9) + MOVB R12, 2(R9) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), CX + MOVL -4(R14)(R13*1), R12 + MOVL CX, (R9) + MOVL R12, -4(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), CX + MOVQ -8(R14)(R13*1), R12 + MOVQ CX, (R9) + MOVQ R12, -8(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + +copy_4_end: + ADDQ R13, R11 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ CX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R9)(R15*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_5_end + +copy_5_small: + CMPQ CX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ CX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(CX*1), BP + MOVB R15, (R9) + MOVB BP, -1(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R9) + MOVB BP, 2(R9) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(CX*1), BP + MOVL R15, (R9) + MOVL BP, -4(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(CX*1), BP + MOVQ R15, (R9) + MOVQ BP, -8(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + +copy_5_end: + ADDQ CX, R11 + SUBQ CX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R9, CX + SUBQ R12, CX + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R11 + MOVQ R13, R12 + SUBQ $0x10, R12 + JB copy_2_small + +copy_2_loop: + MOVUPS (CX), X0 + MOVUPS X0, (R9) + ADDQ $0x10, CX + ADDQ $0x10, R9 + SUBQ $0x10, R12 + JAE copy_2_loop + LEAQ 16(CX)(R12*1), CX + LEAQ 16(R9)(R12*1), R9 + MOVUPS -16(CX), X0 + MOVUPS X0, -16(R9) + JMP copy_2_end + +copy_2_small: + CMPQ R13, $0x03 + JE copy_2_move_3 + JB copy_2_move_1or2 + CMPQ R13, $0x08 + JB copy_2_move_4through7 + JMP copy_2_move_8through16 + +copy_2_move_1or2: + MOVB (CX), R12 + MOVB -1(CX)(R13*1), R14 + MOVB R12, (R9) + MOVB R14, -1(R9)(R13*1) + ADDQ R13, CX + ADDQ R13, R9 + JMP copy_2_end + +copy_2_move_3: + MOVW (CX), R12 + MOVB 2(CX), R14 + MOVW R12, (R9) + MOVB R14, 2(R9) + ADDQ R13, CX + ADDQ R13, R9 + JMP copy_2_end + +copy_2_move_4through7: + MOVL (CX), R12 + MOVL -4(CX)(R13*1), R14 + MOVL R12, (R9) + MOVL R14, -4(R9)(R13*1) + ADDQ R13, CX + ADDQ R13, R9 + JMP copy_2_end + +copy_2_move_8through16: + MOVQ (CX), R12 + MOVQ -8(CX)(R13*1), R14 + MOVQ R12, (R9) + MOVQ R14, -8(R9)(R13*1) + ADDQ R13, CX + ADDQ R13, R9 + +copy_2_end: + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R11 + +copy_slow_3: + MOVB (CX), R12 + MOVB R12, (R9) + INCQ CX + INCQ R9 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decodeSync_safe_bmi2_main_loop + +loop_finished: + MOVQ br+8(FP), CX + MOVQ AX, 24(CX) + MOVB DL, 32(CX) + MOVQ BX, 8(CX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R11, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R10 + MOVQ R10, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go new file mode 100644 index 0000000..2fb35b7 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go @@ -0,0 +1,237 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +package zstd + +import ( + "fmt" + "io" +) + +// decode sequences from the stream with the provided history but without dictionary. +func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { + return false, nil +} + +// decode sequences from the stream without the provided history. +func (s *sequenceDecs) decode(seqs []seqVals) error { + br := s.br + + // Grab full sizes tables, to avoid bounds checks. + llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] + llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state + s.seqSize = 0 + litRemain := len(s.literals) + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + for i := range seqs { + var ll, mo, ml int + if len(br.in) > 4+((maxOffsetBits+16+16)>>3) { + // inlined function: + // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) + + // Final will not read from stream. + var llB, mlB, moB uint8 + ll, llB = llState.final() + ml, mlB = mlState.final() + mo, moB = ofState.final() + + // extra bits are stored in reverse order. + br.fillFast() + mo += br.getBits(moB) + if s.maxBits > 32 { + br.fillFast() + } + ml += br.getBits(mlB) + ll += br.getBits(llB) + + if moB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = mo + } else { + // mo = s.adjustOffset(mo, ll, moB) + // Inlined for rather big speedup + if ll == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + mo++ + } + + if mo == 0 { + mo = s.prevOffset[0] + } else { + var temp int + if mo == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[mo] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("WARNING: temp was 0") + temp = 1 + } + + if mo != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + mo = temp + } + } + br.fillFast() + } else { + if br.overread() { + if debugDecoder { + printf("reading sequence %d, exceeded available data\n", i) + } + return io.ErrUnexpectedEOF + } + ll, mo, ml = s.next(br, llState, mlState, ofState) + br.fill() + } + + if debugSequences { + println("Seq", i, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) + } + // Evaluate. + // We might be doing this async, so do it early. + if mo == 0 && ml > 0 { + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + } + if ml > maxMatchLen { + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + } + s.seqSize += ll + ml + if s.seqSize > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + litRemain -= ll + if litRemain < 0 { + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, litRemain+ll) + } + seqs[i] = seqVals{ + ll: ll, + ml: ml, + mo: mo, + } + if i == len(seqs)-1 { + // This is the last sequence, so we shouldn't update state. + break + } + + // Manually inlined, ~ 5-20% faster + // Update all 3 states at once. Approx 20% faster. + nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() + if nBits == 0 { + llState = llTable[llState.newState()&maxTableMask] + mlState = mlTable[mlState.newState()&maxTableMask] + ofState = ofTable[ofState.newState()&maxTableMask] + } else { + bits := br.get32BitsFast(nBits) + lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) + llState = llTable[(llState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits >> (ofState.nbBits() & 31)) + lowBits &= bitMask[mlState.nbBits()&15] + mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] + ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] + } + } + s.seqSize += litRemain + if s.seqSize > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + } + return err +} + +// executeSimple handles cases when a dictionary is not used. +func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { + // Ensure we have enough output size... + if len(s.out)+s.seqSize > cap(s.out) { + addBytes := s.seqSize + len(s.out) + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + for _, seq := range seqs { + // Add literals + copy(out[t:], s.literals[:seq.ll]) + t += seq.ll + s.literals = s.literals[seq.ll:] + + // Malformed input + if seq.mo > t+len(hist) || seq.mo > s.windowSize { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist)) + } + + // Copy from history. + if v := seq.mo - t; v > 0 { + // v is the start position in history from end. + start := len(hist) - v + if seq.ml > v { + // Some goes into the current block. + // Copy remainder of history + copy(out[t:], hist[start:]) + t += v + seq.ml -= v + } else { + copy(out[t:], hist[start:start+seq.ml]) + t += seq.ml + continue + } + } + + // We must be in the current buffer now + if seq.ml > 0 { + start := t - seq.mo + if seq.ml <= t-start { + // No overlap + copy(out[t:], out[start:start+seq.ml]) + t += seq.ml + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + src := out[start : start+seq.ml] + dst := out[t:] + dst = dst[:len(src)] + t += len(src) + // Destination is the space we just added. + for i := range src { + dst[i] = src[i] + } + } + } + } + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqenc.go b/vendor/github.com/klauspost/compress/zstd/seqenc.go new file mode 100644 index 0000000..8014174 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqenc.go @@ -0,0 +1,114 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import "math/bits" + +type seqCoders struct { + llEnc, ofEnc, mlEnc *fseEncoder + llPrev, ofPrev, mlPrev *fseEncoder +} + +// swap coders with another (block). +func (s *seqCoders) swap(other *seqCoders) { + *s, *other = *other, *s +} + +// setPrev will update the previous encoders to the actually used ones +// and make sure a fresh one is in the main slot. +func (s *seqCoders) setPrev(ll, ml, of *fseEncoder) { + compareSwap := func(used *fseEncoder, current, prev **fseEncoder) { + // We used the new one, more current to history and reuse the previous history + if *current == used { + *prev, *current = *current, *prev + c := *current + p := *prev + c.reUsed = false + p.reUsed = true + return + } + if used == *prev { + return + } + // Ensure we cannot reuse by accident + prevEnc := *prev + prevEnc.symbolLen = 0 + } + compareSwap(ll, &s.llEnc, &s.llPrev) + compareSwap(ml, &s.mlEnc, &s.mlPrev) + compareSwap(of, &s.ofEnc, &s.ofPrev) +} + +func highBit(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} + +var llCodeTable = [64]byte{0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 16, 17, 17, 18, 18, 19, 19, + 20, 20, 20, 20, 21, 21, 21, 21, + 22, 22, 22, 22, 22, 22, 22, 22, + 23, 23, 23, 23, 23, 23, 23, 23, + 24, 24, 24, 24, 24, 24, 24, 24, + 24, 24, 24, 24, 24, 24, 24, 24} + +// Up to 6 bits +const maxLLCode = 35 + +// llBitsTable translates from ll code to number of bits. +var llBitsTable = [maxLLCode + 1]byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 2, 2, 3, 3, + 4, 6, 7, 8, 9, 10, 11, 12, + 13, 14, 15, 16} + +// llCode returns the code that represents the literal length requested. +func llCode(litLength uint32) uint8 { + const llDeltaCode = 19 + if litLength <= 63 { + // Compiler insists on bounds check (Go 1.12) + return llCodeTable[litLength&63] + } + return uint8(highBit(litLength)) + llDeltaCode +} + +var mlCodeTable = [128]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37, + 38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39, + 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, + 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42} + +// Up to 6 bits +const maxMLCode = 52 + +// mlBitsTable translates from ml code to number of bits. +var mlBitsTable = [maxMLCode + 1]byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 2, 2, 3, 3, + 4, 4, 5, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16} + +// note : mlBase = matchLength - MINMATCH; +// because it's the format it's stored in seqStore->sequences +func mlCode(mlBase uint32) uint8 { + const mlDeltaCode = 36 + if mlBase <= 127 { + // Compiler insists on bounds check (Go 1.12) + return mlCodeTable[mlBase&127] + } + return uint8(highBit(mlBase)) + mlDeltaCode +} + +func ofCode(offset uint32) uint8 { + // A valid offset will always be > 0. + return uint8(bits.Len32(offset) - 1) +} diff --git a/vendor/github.com/klauspost/compress/zstd/snappy.go b/vendor/github.com/klauspost/compress/zstd/snappy.go new file mode 100644 index 0000000..ec13594 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/snappy.go @@ -0,0 +1,434 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "errors" + "hash/crc32" + "io" + + "github.com/klauspost/compress/huff0" + snappy "github.com/klauspost/compress/internal/snapref" +) + +const ( + snappyTagLiteral = 0x00 + snappyTagCopy1 = 0x01 + snappyTagCopy2 = 0x02 + snappyTagCopy4 = 0x03 +) + +const ( + snappyChecksumSize = 4 + snappyMagicBody = "sNaPpY" + + // snappyMaxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + snappyMaxBlockSize = 65536 + + // snappyMaxEncodedLenOfMaxBlockSize equals MaxEncodedLen(snappyMaxBlockSize), but is + // hard coded to be a const instead of a variable, so that obufLen can also + // be a const. Their equivalence is confirmed by + // TestMaxEncodedLenOfMaxBlockSize. + snappyMaxEncodedLenOfMaxBlockSize = 76490 +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var ( + // ErrSnappyCorrupt reports that the input is invalid. + ErrSnappyCorrupt = errors.New("snappy: corrupt input") + // ErrSnappyTooLarge reports that the uncompressed length is too large. + ErrSnappyTooLarge = errors.New("snappy: decoded block is too large") + // ErrSnappyUnsupported reports that the input isn't supported. + ErrSnappyUnsupported = errors.New("snappy: unsupported input") + + errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") +) + +// SnappyConverter can read SnappyConverter-compressed streams and convert them to zstd. +// Conversion is done by converting the stream directly from Snappy without intermediate +// full decoding. +// Therefore the compression ratio is much less than what can be done by a full decompression +// and compression, and a faulty Snappy stream may lead to a faulty Zstandard stream without +// any errors being generated. +// No CRC value is being generated and not all CRC values of the Snappy stream are checked. +// However, it provides really fast recompression of Snappy streams. +// The converter can be reused to avoid allocations, even after errors. +type SnappyConverter struct { + r io.Reader + err error + buf []byte + block *blockEnc +} + +// Convert the Snappy stream supplied in 'in' and write the zStandard stream to 'w'. +// If any error is detected on the Snappy stream it is returned. +// The number of bytes written is returned. +func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { + initPredefined() + r.err = nil + r.r = in + if r.block == nil { + r.block = &blockEnc{} + r.block.init() + } + r.block.initNewEncode() + if len(r.buf) != snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize { + r.buf = make([]byte, snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize) + } + r.block.litEnc.Reuse = huff0.ReusePolicyNone + var written int64 + var readHeader bool + { + header := frameHeader{WindowSize: snappyMaxBlockSize}.appendTo(r.buf[:0]) + + var n int + n, r.err = w.Write(header) + if r.err != nil { + return written, r.err + } + written += int64(n) + } + + for { + if !r.readFull(r.buf[:4], true) { + // Add empty last block + r.block.reset(nil) + r.block.last = true + err := r.block.encodeLits(r.block.literals, false) + if err != nil { + return written, err + } + n, err := w.Write(r.block.output) + if err != nil { + return written, err + } + written += int64(n) + + return written, r.err + } + chunkType := r.buf[0] + if !readHeader { + if chunkType != chunkTypeStreamIdentifier { + println("chunkType != chunkTypeStreamIdentifier", chunkType) + r.err = ErrSnappyCorrupt + return written, r.err + } + readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + if chunkLen > len(r.buf) { + println("chunkLen > len(r.buf)", chunkType) + r.err = ErrSnappyUnsupported + return written, r.err + } + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < snappyChecksumSize { + println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return written, r.err + } + //checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[snappyChecksumSize:] + + n, hdr, err := snappyDecodedLen(buf) + if err != nil { + r.err = err + return written, r.err + } + buf = buf[hdr:] + if n > snappyMaxBlockSize { + println("n > snappyMaxBlockSize", n, snappyMaxBlockSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + r.block.reset(nil) + r.block.pushOffsets() + if err := decodeSnappy(r.block, buf); err != nil { + r.err = err + return written, r.err + } + if r.block.size+r.block.extraLits != n { + printf("invalid size, want %d, got %d\n", n, r.block.size+r.block.extraLits) + r.err = ErrSnappyCorrupt + return written, r.err + } + err = r.block.encode(nil, false, false) + switch err { + case errIncompressible: + r.block.popOffsets() + r.block.reset(nil) + r.block.literals, err = snappy.Decode(r.block.literals[:n], r.buf[snappyChecksumSize:chunkLen]) + if err != nil { + return written, err + } + err = r.block.encodeLits(r.block.literals, false) + if err != nil { + return written, err + } + case nil: + default: + return written, err + } + + n, r.err = w.Write(r.block.output) + if r.err != nil { + return written, err + } + written += int64(n) + continue + case chunkTypeUncompressedData: + if debugEncoder { + println("Uncompressed, chunklen", chunkLen) + } + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < snappyChecksumSize { + println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + r.block.reset(nil) + buf := r.buf[:snappyChecksumSize] + if !r.readFull(buf, false) { + return written, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - snappyChecksumSize + if n > snappyMaxBlockSize { + println("n > snappyMaxBlockSize", n, snappyMaxBlockSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + r.block.literals = r.block.literals[:n] + if !r.readFull(r.block.literals, false) { + return written, r.err + } + if snappyCRC(r.block.literals) != checksum { + println("literals crc mismatch") + r.err = ErrSnappyCorrupt + return written, r.err + } + err := r.block.encodeLits(r.block.literals, false) + if err != nil { + return written, err + } + n, r.err = w.Write(r.block.output) + if r.err != nil { + return written, err + } + written += int64(n) + continue + + case chunkTypeStreamIdentifier: + if debugEncoder { + println("stream id", chunkLen, len(snappyMagicBody)) + } + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(snappyMagicBody) { + println("chunkLen != len(snappyMagicBody)", chunkLen, len(snappyMagicBody)) + r.err = ErrSnappyCorrupt + return written, r.err + } + if !r.readFull(r.buf[:len(snappyMagicBody)], false) { + return written, r.err + } + for i := 0; i < len(snappyMagicBody); i++ { + if r.buf[i] != snappyMagicBody[i] { + println("r.buf[i] != snappyMagicBody[i]", r.buf[i], snappyMagicBody[i], i) + r.err = ErrSnappyCorrupt + return written, r.err + } + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + println("chunkType <= 0x7f") + r.err = ErrSnappyUnsupported + return written, r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.readFull(r.buf[:chunkLen], false) { + return written, r.err + } + } +} + +// decodeSnappy writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read. +func decodeSnappy(blk *blockEnc, src []byte) error { + //decodeRef(make([]byte, snappyMaxBlockSize), src) + var s, length int + lits := blk.extraLits + var offset uint32 + for s < len(src) { + switch src[s] & 0x03 { + case snappyTagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + if x > snappyMaxBlockSize { + println("x > snappyMaxBlockSize", x, snappyMaxBlockSize) + return ErrSnappyCorrupt + } + length = int(x) + 1 + if length <= 0 { + println("length <= 0 ", length) + + return errUnsupportedLiteralLength + } + //if length > snappyMaxBlockSize-d || uint32(length) > len(src)-s { + // return ErrSnappyCorrupt + //} + + blk.literals = append(blk.literals, src[s:s+length]...) + //println(length, "litLen") + lits += length + s += length + continue + + case snappyTagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, len(src)) + return ErrSnappyCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]) + + case snappyTagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, len(src)) + return ErrSnappyCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = uint32(src[s-2]) | uint32(src[s-1])<<8 + + case snappyTagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, len(src)) + return ErrSnappyCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + + if offset <= 0 || blk.size+lits < int(offset) /*|| length > len(blk)-d */ { + println("offset <= 0 || blk.size+lits < int(offset)", offset, blk.size+lits, int(offset), blk.size, lits) + + return ErrSnappyCorrupt + } + + // Check if offset is one of the recent offsets. + // Adjusts the output offset accordingly. + // Gives a tiny bit of compression, typically around 1%. + if false { + offset = blk.matchOffset(offset, uint32(lits)) + } else { + offset += 3 + } + + blk.sequences = append(blk.sequences, seq{ + litLen: uint32(lits), + offset: offset, + matchLen: uint32(length) - zstdMinMatch, + }) + blk.size += length + lits + lits = 0 + } + blk.extraLits = lits + return nil +} + +func (r *SnappyConverter) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrSnappyCorrupt + } + return false + } + return true +} + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func snappyCRC(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return c>>15 | c<<17 + 0xa282ead8 +} + +// snappyDecodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func snappyDecodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrSnappyCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrSnappyTooLarge + } + return int(v), n, nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/zip.go b/vendor/github.com/klauspost/compress/zstd/zip.go new file mode 100644 index 0000000..29c15c8 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/zip.go @@ -0,0 +1,141 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package zstd + +import ( + "errors" + "io" + "sync" +) + +// ZipMethodWinZip is the method for Zstandard compressed data inside Zip files for WinZip. +// See https://www.winzip.com/win/en/comp_info.html +const ZipMethodWinZip = 93 + +// ZipMethodPKWare is the original method number used by PKWARE to indicate Zstandard compression. +// Deprecated: This has been deprecated by PKWARE, use ZipMethodWinZip instead for compression. +// See https://pkware.cachefly.net/webdocs/APPNOTE/APPNOTE-6.3.9.TXT +const ZipMethodPKWare = 20 + +// zipReaderPool is the default reader pool. +var zipReaderPool = sync.Pool{New: func() interface{} { + z, err := NewReader(nil, WithDecoderLowmem(true), WithDecoderMaxWindow(128<<20), WithDecoderConcurrency(1)) + if err != nil { + panic(err) + } + return z +}} + +// newZipReader creates a pooled zip decompressor. +func newZipReader(opts ...DOption) func(r io.Reader) io.ReadCloser { + pool := &zipReaderPool + if len(opts) > 0 { + opts = append([]DOption{WithDecoderLowmem(true), WithDecoderMaxWindow(128 << 20)}, opts...) + // Force concurrency 1 + opts = append(opts, WithDecoderConcurrency(1)) + // Create our own pool + pool = &sync.Pool{} + } + return func(r io.Reader) io.ReadCloser { + dec, ok := pool.Get().(*Decoder) + if ok { + dec.Reset(r) + } else { + d, err := NewReader(r, opts...) + if err != nil { + panic(err) + } + dec = d + } + return &pooledZipReader{dec: dec, pool: pool} + } +} + +type pooledZipReader struct { + mu sync.Mutex // guards Close and Read + pool *sync.Pool + dec *Decoder +} + +func (r *pooledZipReader) Read(p []byte) (n int, err error) { + r.mu.Lock() + defer r.mu.Unlock() + if r.dec == nil { + return 0, errors.New("read after close or EOF") + } + dec, err := r.dec.Read(p) + if err == io.EOF { + r.dec.Reset(nil) + r.pool.Put(r.dec) + r.dec = nil + } + return dec, err +} + +func (r *pooledZipReader) Close() error { + r.mu.Lock() + defer r.mu.Unlock() + var err error + if r.dec != nil { + err = r.dec.Reset(nil) + r.pool.Put(r.dec) + r.dec = nil + } + return err +} + +type pooledZipWriter struct { + mu sync.Mutex // guards Close and Read + enc *Encoder + pool *sync.Pool +} + +func (w *pooledZipWriter) Write(p []byte) (n int, err error) { + w.mu.Lock() + defer w.mu.Unlock() + if w.enc == nil { + return 0, errors.New("Write after Close") + } + return w.enc.Write(p) +} + +func (w *pooledZipWriter) Close() error { + w.mu.Lock() + defer w.mu.Unlock() + var err error + if w.enc != nil { + err = w.enc.Close() + w.pool.Put(w.enc) + w.enc = nil + } + return err +} + +// ZipCompressor returns a compressor that can be registered with zip libraries. +// The provided encoder options will be used on all encodes. +func ZipCompressor(opts ...EOption) func(w io.Writer) (io.WriteCloser, error) { + var pool sync.Pool + return func(w io.Writer) (io.WriteCloser, error) { + enc, ok := pool.Get().(*Encoder) + if ok { + enc.Reset(w) + } else { + var err error + enc, err = NewWriter(w, opts...) + if err != nil { + return nil, err + } + } + return &pooledZipWriter{enc: enc, pool: &pool}, nil + } +} + +// ZipDecompressor returns a decompressor that can be registered with zip libraries. +// See ZipCompressor for example. +// Options can be specified. WithDecoderConcurrency(1) is forced, +// and by default a 128MB maximum decompression window is specified. +// The window size can be overridden if required. +func ZipDecompressor(opts ...DOption) func(r io.Reader) io.ReadCloser { + return newZipReader(opts...) +} diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go new file mode 100644 index 0000000..4be7cc7 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -0,0 +1,121 @@ +// Package zstd provides decompression of zstandard files. +// +// For advanced usage and examples, go to the README: https://github.com/klauspost/compress/tree/master/zstd#zstd +package zstd + +import ( + "bytes" + "encoding/binary" + "errors" + "log" + "math" +) + +// enable debug printing +const debug = false + +// enable encoding debug printing +const debugEncoder = debug + +// enable decoding debug printing +const debugDecoder = debug + +// Enable extra assertions. +const debugAsserts = debug || false + +// print sequence details +const debugSequences = false + +// print detailed matching information +const debugMatches = false + +// force encoder to use predefined tables. +const forcePreDef = false + +// zstdMinMatch is the minimum zstd match length. +const zstdMinMatch = 3 + +// fcsUnknown is used for unknown frame content size. +const fcsUnknown = math.MaxUint64 + +var ( + // ErrReservedBlockType is returned when a reserved block type is found. + // Typically this indicates wrong or corrupted input. + ErrReservedBlockType = errors.New("invalid input: reserved block type encountered") + + // ErrCompressedSizeTooBig is returned when a block is bigger than allowed. + // Typically this indicates wrong or corrupted input. + ErrCompressedSizeTooBig = errors.New("invalid input: compressed size too big") + + // ErrBlockTooSmall is returned when a block is too small to be decoded. + // Typically returned on invalid input. + ErrBlockTooSmall = errors.New("block too small") + + // ErrUnexpectedBlockSize is returned when a block has unexpected size. + // Typically returned on invalid input. + ErrUnexpectedBlockSize = errors.New("unexpected block size") + + // ErrMagicMismatch is returned when a "magic" number isn't what is expected. + // Typically this indicates wrong or corrupted input. + ErrMagicMismatch = errors.New("invalid input: magic number mismatch") + + // ErrWindowSizeExceeded is returned when a reference exceeds the valid window size. + // Typically this indicates wrong or corrupted input. + ErrWindowSizeExceeded = errors.New("window size exceeded") + + // ErrWindowSizeTooSmall is returned when no window size is specified. + // Typically this indicates wrong or corrupted input. + ErrWindowSizeTooSmall = errors.New("invalid input: window size was too small") + + // ErrDecoderSizeExceeded is returned if decompressed size exceeds the configured limit. + ErrDecoderSizeExceeded = errors.New("decompressed size exceeds configured limit") + + // ErrUnknownDictionary is returned if the dictionary ID is unknown. + ErrUnknownDictionary = errors.New("unknown dictionary") + + // ErrFrameSizeExceeded is returned if the stated frame size is exceeded. + // This is only returned if SingleSegment is specified on the frame. + ErrFrameSizeExceeded = errors.New("frame size exceeded") + + // ErrFrameSizeMismatch is returned if the stated frame size does not match the expected size. + // This is only returned if SingleSegment is specified on the frame. + ErrFrameSizeMismatch = errors.New("frame size does not match size on stream") + + // ErrCRCMismatch is returned if CRC mismatches. + ErrCRCMismatch = errors.New("CRC check failed") + + // ErrDecoderClosed will be returned if the Decoder was used after + // Close has been called. + ErrDecoderClosed = errors.New("decoder used after Close") + + // ErrDecoderNilInput is returned when a nil Reader was provided + // and an operation other than Reset/DecodeAll/Close was attempted. + ErrDecoderNilInput = errors.New("nil input provided as reader") +) + +func println(a ...interface{}) { + if debug || debugDecoder || debugEncoder { + log.Println(a...) + } +} + +func printf(format string, a ...interface{}) { + if debug || debugDecoder || debugEncoder { + log.Printf(format, a...) + } +} + +func load3232(b []byte, i int32) uint32 { + return binary.LittleEndian.Uint32(b[:len(b):len(b)][i:]) +} + +func load6432(b []byte, i int32) uint64 { + return binary.LittleEndian.Uint64(b[:len(b):len(b)][i:]) +} + +type byter interface { + Bytes() []byte + Len() int +} + +var _ byter = &bytes.Buffer{} diff --git a/vendor/github.com/magiconair/properties/.travis.yml b/vendor/github.com/magiconair/properties/.travis.yml deleted file mode 100644 index 3e7c3d2..0000000 --- a/vendor/github.com/magiconair/properties/.travis.yml +++ /dev/null @@ -1,10 +0,0 @@ -language: go -go: - - 1.4.x - - 1.5.x - - 1.6.x - - 1.7.x - - 1.8.x - - 1.9.x - - "1.10.x" - - tip diff --git a/vendor/github.com/magiconair/properties/CHANGELOG.md b/vendor/github.com/magiconair/properties/CHANGELOG.md index f83adc2..842e8e2 100644 --- a/vendor/github.com/magiconair/properties/CHANGELOG.md +++ b/vendor/github.com/magiconair/properties/CHANGELOG.md @@ -1,5 +1,79 @@ ## Changelog +### [1.8.7](https://github.com/magiconair/properties/tree/v1.8.7) - 08 Dec 2022 + + * [PR #65](https://github.com/magiconair/properties/pull/65): Speedup Merge + + Thanks to [@AdityaVallabh](https://github.com/AdityaVallabh) for the patch. + + * [PR #66](https://github.com/magiconair/properties/pull/66): use github actions + +### [1.8.6](https://github.com/magiconair/properties/tree/v1.8.6) - 23 Feb 2022 + + * [PR #57](https://github.com/magiconair/properties/pull/57):Fix "unreachable code" lint error + + Thanks to [@ellie](https://github.com/ellie) for the patch. + + * [PR #63](https://github.com/magiconair/properties/pull/63): Make TestMustGetParsedDuration backwards compatible + + This patch ensures that the `TestMustGetParsedDuration` still works with `go1.3` to make the + author happy until it affects real users. + + Thanks to [@maage](https://github.com/maage) for the patch. + +### [1.8.5](https://github.com/magiconair/properties/tree/v1.8.5) - 24 Mar 2021 + + * [PR #55](https://github.com/magiconair/properties/pull/55): Fix: Encoding Bug in Comments + + When reading comments \ are loaded correctly, but when writing they are then + replaced by \\. This leads to wrong comments when writing and reading multiple times. + + Thanks to [@doxsch](https://github.com/doxsch) for the patch. + +### [1.8.4](https://github.com/magiconair/properties/tree/v1.8.4) - 23 Sep 2020 + + * [PR #50](https://github.com/magiconair/properties/pull/50): enhance error message for circular references + + Thanks to [@sriv](https://github.com/sriv) for the patch. + +### [1.8.3](https://github.com/magiconair/properties/tree/v1.8.3) - 14 Sep 2020 + + * [PR #49](https://github.com/magiconair/properties/pull/49): Include the key in error message causing the circular reference + + The change is include the key in the error message which is causing the circular + reference when parsing/loading the properties files. + + Thanks to [@haroon-sheikh](https://github.com/haroon-sheikh) for the patch. + +### [1.8.2](https://github.com/magiconair/properties/tree/v1.8.2) - 25 Aug 2020 + + * [PR #36](https://github.com/magiconair/properties/pull/36): Escape backslash on write + + This patch ensures that backslashes are escaped on write. Existing applications which + rely on the old behavior may need to be updated. + + Thanks to [@apesternikov](https://github.com/apesternikov) for the patch. + + * [PR #42](https://github.com/magiconair/properties/pull/42): Made Content-Type check whitespace agnostic in LoadURL() + + Thanks to [@aliras1](https://github.com/aliras1) for the patch. + + * [PR #41](https://github.com/magiconair/properties/pull/41): Make key/value separator configurable on Write() + + Thanks to [@mkjor](https://github.com/mkjor) for the patch. + + * [PR #40](https://github.com/magiconair/properties/pull/40): Add method to return a sorted list of keys + + Thanks to [@mkjor](https://github.com/mkjor) for the patch. + +### [1.8.1](https://github.com/magiconair/properties/tree/v1.8.1) - 10 May 2019 + + * [PR #35](https://github.com/magiconair/properties/pull/35): Close body always after request + + This patch ensures that in `LoadURL` the response body is always closed. + + Thanks to [@liubog2008](https://github.com/liubog2008) for the patch. + ### [1.8](https://github.com/magiconair/properties/tree/v1.8) - 15 May 2018 * [PR #26](https://github.com/magiconair/properties/pull/26): Disable expansion during loading diff --git a/vendor/github.com/magiconair/properties/LICENSE b/vendor/github.com/magiconair/properties/LICENSE.md similarity index 84% rename from vendor/github.com/magiconair/properties/LICENSE rename to vendor/github.com/magiconair/properties/LICENSE.md index b387087..79c87e3 100644 --- a/vendor/github.com/magiconair/properties/LICENSE +++ b/vendor/github.com/magiconair/properties/LICENSE.md @@ -1,15 +1,14 @@ -goproperties - properties file decoder for Go - -Copyright (c) 2013-2018 - Frank Schroeder +Copyright (c) 2013-2020, Frank Schroeder All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: -1. Redistributions of source code must retain the above copyright notice, this + * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright notice, + + * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. diff --git a/vendor/github.com/magiconair/properties/README.md b/vendor/github.com/magiconair/properties/README.md index 2c05f29..e2edda0 100644 --- a/vendor/github.com/magiconair/properties/README.md +++ b/vendor/github.com/magiconair/properties/README.md @@ -1,6 +1,5 @@ [![](https://img.shields.io/github/tag/magiconair/properties.svg?style=flat-square&label=release)](https://github.com/magiconair/properties/releases) [![Travis CI Status](https://img.shields.io/travis/magiconair/properties.svg?branch=master&style=flat-square&label=travis)](https://travis-ci.org/magiconair/properties) -[![Codeship CI Status](https://img.shields.io/codeship/16aaf660-f615-0135-b8f0-7e33b70920c0/master.svg?label=codeship&style=flat-square)](https://app.codeship.com/projects/274177") [![License](https://img.shields.io/badge/License-BSD%202--Clause-orange.svg?style=flat-square)](https://raw.githubusercontent.com/magiconair/properties/master/LICENSE) [![GoDoc](http://img.shields.io/badge/godoc-reference-5272B4.svg?style=flat-square)](http://godoc.org/github.com/magiconair/properties) @@ -30,7 +29,7 @@ changed from `panic` to `log.Fatal` but this is configurable and custom error handling functions can be provided. See the package documentation for details. -Read the full documentation on [GoDoc](https://godoc.org/github.com/magiconair/properties) [![GoDoc](https://godoc.org/github.com/magiconair/properties?status.png)](https://godoc.org/github.com/magiconair/properties) +Read the full documentation on [![GoDoc](http://img.shields.io/badge/godoc-reference-5272B4.svg?style=flat-square)](http://godoc.org/github.com/magiconair/properties) ## Getting Started diff --git a/vendor/github.com/magiconair/properties/decode.go b/vendor/github.com/magiconair/properties/decode.go index 3ebf804..8e6aa44 100644 --- a/vendor/github.com/magiconair/properties/decode.go +++ b/vendor/github.com/magiconair/properties/decode.go @@ -1,4 +1,4 @@ -// Copyright 2018 Frank Schroeder. All rights reserved. +// Copyright 2013-2022 Frank Schroeder. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -48,49 +48,49 @@ import ( // // Examples: // -// // Field is ignored. -// Field int `properties:"-"` +// // Field is ignored. +// Field int `properties:"-"` // -// // Field is assigned value of 'Field'. -// Field int +// // Field is assigned value of 'Field'. +// Field int // -// // Field is assigned value of 'myName'. -// Field int `properties:"myName"` +// // Field is assigned value of 'myName'. +// Field int `properties:"myName"` // -// // Field is assigned value of key 'myName' and has a default -// // value 15 if the key does not exist. -// Field int `properties:"myName,default=15"` +// // Field is assigned value of key 'myName' and has a default +// // value 15 if the key does not exist. +// Field int `properties:"myName,default=15"` // -// // Field is assigned value of key 'Field' and has a default -// // value 15 if the key does not exist. -// Field int `properties:",default=15"` +// // Field is assigned value of key 'Field' and has a default +// // value 15 if the key does not exist. +// Field int `properties:",default=15"` // -// // Field is assigned value of key 'date' and the date -// // is in format 2006-01-02 -// Field time.Time `properties:"date,layout=2006-01-02"` +// // Field is assigned value of key 'date' and the date +// // is in format 2006-01-02 +// Field time.Time `properties:"date,layout=2006-01-02"` // -// // Field is assigned the non-empty and whitespace trimmed -// // values of key 'Field' split by commas. -// Field []string +// // Field is assigned the non-empty and whitespace trimmed +// // values of key 'Field' split by commas. +// Field []string // -// // Field is assigned the non-empty and whitespace trimmed -// // values of key 'Field' split by commas and has a default -// // value ["a", "b", "c"] if the key does not exist. -// Field []string `properties:",default=a;b;c"` +// // Field is assigned the non-empty and whitespace trimmed +// // values of key 'Field' split by commas and has a default +// // value ["a", "b", "c"] if the key does not exist. +// Field []string `properties:",default=a;b;c"` // -// // Field is decoded recursively with "Field." as key prefix. -// Field SomeStruct +// // Field is decoded recursively with "Field." as key prefix. +// Field SomeStruct // -// // Field is decoded recursively with "myName." as key prefix. -// Field SomeStruct `properties:"myName"` +// // Field is decoded recursively with "myName." as key prefix. +// Field SomeStruct `properties:"myName"` // -// // Field is decoded recursively with "Field." as key prefix -// // and the next dotted element of the key as map key. -// Field map[string]string +// // Field is decoded recursively with "Field." as key prefix +// // and the next dotted element of the key as map key. +// Field map[string]string // -// // Field is decoded recursively with "myName." as key prefix -// // and the next dotted element of the key as map key. -// Field map[string]string `properties:"myName"` +// // Field is decoded recursively with "myName." as key prefix +// // and the next dotted element of the key as map key. +// Field map[string]string `properties:"myName"` func (p *Properties) Decode(x interface{}) error { t, v := reflect.TypeOf(x), reflect.ValueOf(x) if t.Kind() != reflect.Ptr || v.Elem().Type().Kind() != reflect.Struct { diff --git a/vendor/github.com/magiconair/properties/doc.go b/vendor/github.com/magiconair/properties/doc.go index f8822da..7c79793 100644 --- a/vendor/github.com/magiconair/properties/doc.go +++ b/vendor/github.com/magiconair/properties/doc.go @@ -1,4 +1,4 @@ -// Copyright 2018 Frank Schroeder. All rights reserved. +// Copyright 2013-2022 Frank Schroeder. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -13,7 +13,7 @@ // // To load a single properties file use MustLoadFile(): // -// p := properties.MustLoadFile(filename, properties.UTF8) +// p := properties.MustLoadFile(filename, properties.UTF8) // // To load multiple properties files use MustLoadFiles() // which loads the files in the given order and merges the @@ -23,25 +23,25 @@ // Filenames can contain environment variables which are expanded // before loading. // -// f1 := "/etc/myapp/myapp.conf" -// f2 := "/home/${USER}/myapp.conf" -// p := MustLoadFiles([]string{f1, f2}, properties.UTF8, true) +// f1 := "/etc/myapp/myapp.conf" +// f2 := "/home/${USER}/myapp.conf" +// p := MustLoadFiles([]string{f1, f2}, properties.UTF8, true) // // All of the different key/value delimiters ' ', ':' and '=' are // supported as well as the comment characters '!' and '#' and // multi-line values. // -// ! this is a comment -// # and so is this +// ! this is a comment +// # and so is this // -// # the following expressions are equal -// key value -// key=value -// key:value -// key = value -// key : value -// key = val\ -// ue +// # the following expressions are equal +// key value +// key=value +// key:value +// key = value +// key : value +// key = val\ +// ue // // Properties stores all comments preceding a key and provides // GetComments() and SetComments() methods to retrieve and @@ -55,62 +55,62 @@ // and malformed expressions are not allowed and cause an // error. Expansion of environment variables is supported. // -// # standard property -// key = value +// # standard property +// key = value // -// # property expansion: key2 = value -// key2 = ${key} +// # property expansion: key2 = value +// key2 = ${key} // -// # recursive expansion: key3 = value -// key3 = ${key2} +// # recursive expansion: key3 = value +// key3 = ${key2} // -// # circular reference (error) -// key = ${key} +// # circular reference (error) +// key = ${key} // -// # malformed expression (error) -// key = ${ke +// # malformed expression (error) +// key = ${ke // -// # refers to the users' home dir -// home = ${HOME} +// # refers to the users' home dir +// home = ${HOME} // -// # local key takes precedence over env var: u = foo -// USER = foo -// u = ${USER} +// # local key takes precedence over env var: u = foo +// USER = foo +// u = ${USER} // // The default property expansion format is ${key} but can be // changed by setting different pre- and postfix values on the // Properties object. // -// p := properties.NewProperties() -// p.Prefix = "#[" -// p.Postfix = "]#" +// p := properties.NewProperties() +// p.Prefix = "#[" +// p.Postfix = "]#" // // Properties provides convenience functions for getting typed // values with default values if the key does not exist or the // type conversion failed. // -// # Returns true if the value is either "1", "on", "yes" or "true" -// # Returns false for every other value and the default value if -// # the key does not exist. -// v = p.GetBool("key", false) +// # Returns true if the value is either "1", "on", "yes" or "true" +// # Returns false for every other value and the default value if +// # the key does not exist. +// v = p.GetBool("key", false) // -// # Returns the value if the key exists and the format conversion -// # was successful. Otherwise, the default value is returned. -// v = p.GetInt64("key", 999) -// v = p.GetUint64("key", 999) -// v = p.GetFloat64("key", 123.0) -// v = p.GetString("key", "def") -// v = p.GetDuration("key", 999) +// # Returns the value if the key exists and the format conversion +// # was successful. Otherwise, the default value is returned. +// v = p.GetInt64("key", 999) +// v = p.GetUint64("key", 999) +// v = p.GetFloat64("key", 123.0) +// v = p.GetString("key", "def") +// v = p.GetDuration("key", 999) // // As an alternative properties may be applied with the standard // library's flag implementation at any time. // -// # Standard configuration -// v = flag.Int("key", 999, "help message") -// flag.Parse() +// # Standard configuration +// v = flag.Int("key", 999, "help message") +// flag.Parse() // -// # Merge p into the flag set -// p.MustFlag(flag.CommandLine) +// # Merge p into the flag set +// p.MustFlag(flag.CommandLine) // // Properties provides several MustXXX() convenience functions // which will terminate the app if an error occurs. The behavior @@ -119,30 +119,30 @@ // of logging the error set a different ErrorHandler before // you use the Properties package. // -// properties.ErrorHandler = properties.PanicHandler +// properties.ErrorHandler = properties.PanicHandler // -// # Will panic instead of logging an error -// p := properties.MustLoadFile("config.properties") +// # Will panic instead of logging an error +// p := properties.MustLoadFile("config.properties") // // You can also provide your own ErrorHandler function. The only requirement // is that the error handler function must exit after handling the error. // -// properties.ErrorHandler = func(err error) { -// fmt.Println(err) -// os.Exit(1) -// } +// properties.ErrorHandler = func(err error) { +// fmt.Println(err) +// os.Exit(1) +// } // -// # Will write to stdout and then exit -// p := properties.MustLoadFile("config.properties") +// # Will write to stdout and then exit +// p := properties.MustLoadFile("config.properties") // // Properties can also be loaded into a struct via the `Decode` // method, e.g. // -// type S struct { -// A string `properties:"a,default=foo"` -// D time.Duration `properties:"timeout,default=5s"` -// E time.Time `properties:"expires,layout=2006-01-02,default=2015-01-01"` -// } +// type S struct { +// A string `properties:"a,default=foo"` +// D time.Duration `properties:"timeout,default=5s"` +// E time.Time `properties:"expires,layout=2006-01-02,default=2015-01-01"` +// } // // See `Decode()` method for the full documentation. // @@ -152,5 +152,4 @@ // http://en.wikipedia.org/wiki/.properties // // http://docs.oracle.com/javase/7/docs/api/java/util/Properties.html#load%28java.io.Reader%29 -// package properties diff --git a/vendor/github.com/magiconair/properties/integrate.go b/vendor/github.com/magiconair/properties/integrate.go index 74d38dc..35d0ae9 100644 --- a/vendor/github.com/magiconair/properties/integrate.go +++ b/vendor/github.com/magiconair/properties/integrate.go @@ -1,4 +1,4 @@ -// Copyright 2018 Frank Schroeder. All rights reserved. +// Copyright 2013-2022 Frank Schroeder. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -10,8 +10,9 @@ import "flag" // the respective key for flag.Flag.Name. // // It's use is recommended with command line arguments as in: -// flag.Parse() -// p.MustFlag(flag.CommandLine) +// +// flag.Parse() +// p.MustFlag(flag.CommandLine) func (p *Properties) MustFlag(dst *flag.FlagSet) { m := make(map[string]*flag.Flag) dst.VisitAll(func(f *flag.Flag) { diff --git a/vendor/github.com/magiconair/properties/lex.go b/vendor/github.com/magiconair/properties/lex.go index 367166d..3d15a1f 100644 --- a/vendor/github.com/magiconair/properties/lex.go +++ b/vendor/github.com/magiconair/properties/lex.go @@ -1,4 +1,4 @@ -// Copyright 2018 Frank Schroeder. All rights reserved. +// Copyright 2013-2022 Frank Schroeder. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // @@ -128,18 +128,6 @@ func (l *lexer) acceptRun(valid string) { l.backup() } -// acceptRunUntil consumes a run of runes up to a terminator. -func (l *lexer) acceptRunUntil(term rune) { - for term != l.next() { - } - l.backup() -} - -// hasText returns true if the current parsed text is not empty. -func (l *lexer) isNotEmpty() bool { - return l.pos > l.start -} - // lineNumber reports which line we're on, based on the position of // the previous item returned by nextItem. Doing it this way // means we don't have to worry about peek double counting. diff --git a/vendor/github.com/magiconair/properties/load.go b/vendor/github.com/magiconair/properties/load.go index c8e1b58..635368d 100644 --- a/vendor/github.com/magiconair/properties/load.go +++ b/vendor/github.com/magiconair/properties/load.go @@ -1,4 +1,4 @@ -// Copyright 2018 Frank Schroeder. All rights reserved. +// Copyright 2013-2022 Frank Schroeder. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -115,6 +115,7 @@ func (l *Loader) LoadURL(url string) (*Properties, error) { if err != nil { return nil, fmt.Errorf("properties: error fetching %q. %s", url, err) } + defer resp.Body.Close() if resp.StatusCode == 404 && l.IgnoreMissing { LogPrintf("properties: %s returned %d. skipping", url, resp.StatusCode) @@ -129,14 +130,14 @@ func (l *Loader) LoadURL(url string) (*Properties, error) { if err != nil { return nil, fmt.Errorf("properties: %s error reading response. %s", url, err) } - defer resp.Body.Close() ct := resp.Header.Get("Content-Type") + ct = strings.Join(strings.Fields(ct), "") var enc Encoding switch strings.ToLower(ct) { - case "text/plain", "text/plain; charset=iso-8859-1", "text/plain; charset=latin1": + case "text/plain", "text/plain;charset=iso-8859-1", "text/plain;charset=latin1": enc = ISO_8859_1 - case "", "text/plain; charset=utf-8": + case "", "text/plain;charset=utf-8": enc = UTF8 default: return nil, fmt.Errorf("properties: invalid content type %s", ct) diff --git a/vendor/github.com/magiconair/properties/parser.go b/vendor/github.com/magiconair/properties/parser.go index cdc4a80..fccfd39 100644 --- a/vendor/github.com/magiconair/properties/parser.go +++ b/vendor/github.com/magiconair/properties/parser.go @@ -1,4 +1,4 @@ -// Copyright 2018 Frank Schroeder. All rights reserved. +// Copyright 2013-2022 Frank Schroeder. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -59,14 +59,6 @@ func (p *parser) errorf(format string, args ...interface{}) { panic(fmt.Errorf(format, args...)) } -func (p *parser) expect(expected itemType) (token item) { - token = p.lex.nextItem() - if token.typ != expected { - p.unexpected(token) - } - return token -} - func (p *parser) expectOneOf(expected ...itemType) (token item) { token = p.lex.nextItem() for _, v := range expected { @@ -91,5 +83,4 @@ func (p *parser) recover(errp *error) { } *errp = e.(error) } - return } diff --git a/vendor/github.com/magiconair/properties/properties.go b/vendor/github.com/magiconair/properties/properties.go index cb3d1a3..fb2f7b4 100644 --- a/vendor/github.com/magiconair/properties/properties.go +++ b/vendor/github.com/magiconair/properties/properties.go @@ -1,4 +1,4 @@ -// Copyright 2018 Frank Schroeder. All rights reserved. +// Copyright 2013-2022 Frank Schroeder. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -8,11 +8,13 @@ package properties // BUG(frank): Write() does not allow to configure the newline character. Therefore, on Windows LF is used. import ( + "bytes" "fmt" "io" "log" "os" "regexp" + "sort" "strconv" "strings" "time" @@ -69,6 +71,9 @@ type Properties struct { // Stores the keys in order of appearance. k []string + + // WriteSeparator specifies the separator of key and value while writing the properties. + WriteSeparator string } // NewProperties creates a new Properties struct with the default @@ -111,7 +116,7 @@ func (p *Properties) Get(key string) (value string, ok bool) { // circular references and malformed expressions // so we panic if we still get an error here. if err != nil { - ErrorHandler(fmt.Errorf("%s in %q", err, key+" = "+v)) + ErrorHandler(err) } return expanded, true @@ -586,6 +591,12 @@ func (p *Properties) String() string { return s } +// Sort sorts the properties keys in alphabetical order. +// This is helpfully before writing the properties. +func (p *Properties) Sort() { + sort.Strings(p.k) +} + // Write writes all unexpanded 'key = value' pairs to the given writer. // Write returns the number of bytes written and any write error encountered. func (p *Properties) Write(w io.Writer, enc Encoding) (n int, err error) { @@ -626,7 +637,7 @@ func (p *Properties) WriteComment(w io.Writer, prefix string, enc Encoding) (n i } for _, c := range comments { - x, err = fmt.Fprintf(w, "%s%s\n", prefix, encode(c, "", enc)) + x, err = fmt.Fprintf(w, "%s%s\n", prefix, c) if err != nil { return } @@ -635,8 +646,11 @@ func (p *Properties) WriteComment(w io.Writer, prefix string, enc Encoding) (n i } } } - - x, err = fmt.Fprintf(w, "%s = %s\n", encode(key, " :", enc), encode(value, "", enc)) + sep := " = " + if p.WriteSeparator != "" { + sep = p.WriteSeparator + } + x, err = fmt.Fprintf(w, "%s%s%s\n", encode(key, " :", enc), sep, encode(value, "", enc)) if err != nil { return } @@ -686,22 +700,17 @@ func (p *Properties) Delete(key string) { // Merge merges properties, comments and keys from other *Properties into p func (p *Properties) Merge(other *Properties) { + for _, k := range other.k { + if _, ok := p.m[k]; !ok { + p.k = append(p.k, k) + } + } for k, v := range other.m { p.m[k] = v } for k, v := range other.c { p.c[k] = v } - -outer: - for _, otherKey := range other.k { - for _, key := range p.k { - if otherKey == key { - continue outer - } - } - p.k = append(p.k, otherKey) - } } // ---------------------------------------------------------------------------- @@ -753,7 +762,12 @@ func expand(s string, keys []string, prefix, postfix string, values map[string]s for _, k := range keys { if key == k { - return "", fmt.Errorf("circular reference") + var b bytes.Buffer + b.WriteString("circular reference in:\n") + for _, k1 := range keys { + fmt.Fprintf(&b, "%s=%s\n", k1, values[k1]) + } + return "", fmt.Errorf(b.String()) } } @@ -767,7 +781,6 @@ func expand(s string, keys []string, prefix, postfix string, values map[string]s } s = s[:start] + new_val + s[end+1:] } - return s, nil } // encode encodes a UTF-8 string to ISO-8859-1 and escapes some characters. @@ -820,6 +833,8 @@ func escape(r rune, special string) string { return "\\r" case '\t': return "\\t" + case '\\': + return "\\\\" default: if strings.ContainsRune(special, r) { return "\\" + string(r) diff --git a/vendor/github.com/magiconair/properties/rangecheck.go b/vendor/github.com/magiconair/properties/rangecheck.go index b013a2e..dbd60b3 100644 --- a/vendor/github.com/magiconair/properties/rangecheck.go +++ b/vendor/github.com/magiconair/properties/rangecheck.go @@ -1,4 +1,4 @@ -// Copyright 2018 Frank Schroeder. All rights reserved. +// Copyright 2013-2022 Frank Schroeder. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/mfridman/interpolate/LICENSE.txt b/vendor/github.com/mfridman/interpolate/LICENSE.txt new file mode 100644 index 0000000..765b73b --- /dev/null +++ b/vendor/github.com/mfridman/interpolate/LICENSE.txt @@ -0,0 +1,24 @@ +MIT License + +Copyright (c) 2014-2017 Buildkite Pty Ltd +Copyright (c) 2023 Michael Fridman + + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/mfridman/interpolate/README.md b/vendor/github.com/mfridman/interpolate/README.md new file mode 100644 index 0000000..48db562 --- /dev/null +++ b/vendor/github.com/mfridman/interpolate/README.md @@ -0,0 +1,85 @@ +# Interpolate + +[![Build Status](https://github.com/mfridman/interpolate/actions/workflows/ci.yaml/badge.svg)](https://github.com/mfridman/interpolate/actions/workflows/ci.yaml) +[![Go Reference](https://pkg.go.dev/badge/github.com/mfridman/interpolate.svg)](https://pkg.go.dev/github.com/mfridman/interpolate) +[![Go Report Card](https://goreportcard.com/badge/github.com/mfridman/interpolate)](https://goreportcard.com/report/github.com/mfridman/interpolate) + +A Go library for parameter expansion (like `${NAME}` or `$NAME`) in strings from environment +variables. An implementation of [POSIX Parameter +Expansion](http://pubs.opengroup.org/onlinepubs/9699919799/utilities/V3_chap02.html#tag_18_06_02), +plus some other basic operations that you'd expect in a shell scripting environment [like +bash](https://www.gnu.org/software/bash/manual/html_node/Shell-Parameter-Expansion.html). + +## Installation + +``` +go get github.com/mfridman/interpolate@latest +``` + +## Usage + +```go +package main + +import ( + "github.com/mfridman/interpolate" + "fmt" +) + +func main() { + env := interpolate.NewSliceEnv([]string{ + "NAME=James", + }) + + output, _ := interpolate.Interpolate(env, "Hello... ${NAME} welcome to the ${ANOTHER_VAR:-🏖}") + + fmt.Println(output) + // Output: Hello... James welcome to the 🏖 +} +``` + +## Supported Expansions + +- `${parameter}` or `$parameter` + + - **Use value.** If parameter is set, then it shall be substituted; otherwise, it will be blank + +- `${parameter:-[word]}` + + - **Use default values.** If parameter is unset or null, the expansion of word (or an empty string + if word is omitted) shall be substituted; otherwise, the value of parameter shall be + substituted. + +- `${parameter-[word]}` + + - **Use default values when not set.** If parameter is unset, the expansion of word (or an empty + string if word is omitted) shall be substituted; otherwise, the value of parameter shall be + substituted. + +- `${parameter:[offset]}` + + - **Use the substring of parameter after offset.** A negative offset must be separated from the + colon with a space, and will select from the end of the string. If the value is out of bounds, + an empty string will be substituted. + +- `${parameter:[offset]:[length]}` + + - **Use the substring of parameter after offset of given length.** A negative offset must be + separated from the colon with a space, and will select from the end of the string. If the offset + is out of bounds, an empty string will be substituted. If the length is greater than the length + then the entire string will be returned. + +- `${parameter:?[word]}` + - **Indicate Error if Null or Unset.** If parameter is unset or null, the expansion of word (or a + message indicating it is unset if word is omitted) shall be returned as an error. + +## Prior work + +This repository is a fork of [buildkite/interpolate](https://github.com/buildkite/interpolate). I'd +like to thank the authors of that library for their work. I've forked it to make some changes that I +needed for my own use cases, and to make it easier to maintain. I've also added some tests and +documentation. + +## License + +Licensed under MIT license, in `LICENSE`. diff --git a/vendor/github.com/mfridman/interpolate/env.go b/vendor/github.com/mfridman/interpolate/env.go new file mode 100644 index 0000000..d149e86 --- /dev/null +++ b/vendor/github.com/mfridman/interpolate/env.go @@ -0,0 +1,53 @@ +package interpolate + +import ( + "runtime" + "strings" +) + +// Env is an interface for getting environment variables by name and returning a boolean indicating +// whether the variable was found. +type Env interface { + Get(key string) (string, bool) +} + +// NewSliceEnv creates an Env from a slice of environment variables in the form "key=value". +// +// This can be used with [os.Environ] to create an Env. +func NewSliceEnv(env []string) Env { + envMap := mapEnv{} + for _, l := range env { + parts := strings.SplitN(l, "=", 2) + if len(parts) == 2 { + envMap[normalizeKeyName(parts[0])] = parts[1] + } + } + return envMap +} + +// NewMapEnv creates an Env from a map of environment variables. +func NewMapEnv(env map[string]string) Env { + envMap := mapEnv{} + for k, v := range env { + envMap[normalizeKeyName(k)] = v + } + return envMap +} + +type mapEnv map[string]string + +func (m mapEnv) Get(key string) (string, bool) { + if m == nil { + return "", false + } + val, ok := m[normalizeKeyName(key)] + return val, ok +} + +// Windows isn't case sensitive for env +func normalizeKeyName(key string) string { + if runtime.GOOS == "windows" { + return strings.ToUpper(key) + } + return key +} diff --git a/vendor/github.com/mfridman/interpolate/interpolate.go b/vendor/github.com/mfridman/interpolate/interpolate.go new file mode 100644 index 0000000..b9478a8 --- /dev/null +++ b/vendor/github.com/mfridman/interpolate/interpolate.go @@ -0,0 +1,213 @@ +package interpolate + +import ( + "bytes" + "fmt" +) + +// Interpolate takes a set of environment and interpolates it into the provided string using shell +// script expansions +func Interpolate(env Env, str string) (string, error) { + if env == nil { + env = NewSliceEnv(nil) + } + expr, err := NewParser(str).Parse() + if err != nil { + return "", err + } + return expr.Expand(env) +} + +// Indentifiers parses the identifiers from any expansions in the provided string +func Identifiers(str string) ([]string, error) { + expr, err := NewParser(str).Parse() + if err != nil { + return nil, err + } + return expr.Identifiers(), nil +} + +// An expansion is something that takes in ENV and returns a string or an error +type Expansion interface { + Expand(env Env) (string, error) + Identifiers() []string +} + +// VariableExpansion represents either $VAR or ${VAR}, our simplest expansion +type VariableExpansion struct { + Identifier string +} + +func (e VariableExpansion) Identifiers() []string { + return []string{e.Identifier} +} + +func (e VariableExpansion) Expand(env Env) (string, error) { + val, _ := env.Get(e.Identifier) + return val, nil +} + +// EmptyValueExpansion returns either the value of an env, or a default value if it's unset or null +type EmptyValueExpansion struct { + Identifier string + Content Expression +} + +func (e EmptyValueExpansion) Identifiers() []string { + return append([]string{e.Identifier}, e.Content.Identifiers()...) +} + +func (e EmptyValueExpansion) Expand(env Env) (string, error) { + val, _ := env.Get(e.Identifier) + if val == "" { + return e.Content.Expand(env) + } + return val, nil +} + +// UnsetValueExpansion returns either the value of an env, or a default value if it's unset +type UnsetValueExpansion struct { + Identifier string + Content Expression +} + +func (e UnsetValueExpansion) Identifiers() []string { + return []string{e.Identifier} +} + +func (e UnsetValueExpansion) Expand(env Env) (string, error) { + val, ok := env.Get(e.Identifier) + if !ok { + return e.Content.Expand(env) + } + return val, nil +} + +// SubstringExpansion returns a substring (or slice) of the env +type SubstringExpansion struct { + Identifier string + Offset int + Length int + HasLength bool +} + +func (e SubstringExpansion) Identifiers() []string { + return []string{e.Identifier} +} + +func (e SubstringExpansion) Expand(env Env) (string, error) { + val, _ := env.Get(e.Identifier) + + from := e.Offset + + // Negative offsets = from end + if from < 0 { + from += len(val) + } + + // Still negative = too far from end? Truncate to start. + if from < 0 { + from = 0 + } + + // Beyond end? Truncate to end. + if from > len(val) { + from = len(val) + } + + if !e.HasLength { + return val[from:], nil + } + + to := e.Length + + if to >= 0 { + // Positive length = from offset + to += from + } else { + // Negative length = from end + to += len(val) + + // Too far? Truncate to offset. + if to < from { + to = from + } + } + + // Beyond end? Truncate to end. + if to > len(val) { + to = len(val) + } + + return val[from:to], nil +} + +// RequiredExpansion returns an env value, or an error if it is unset +type RequiredExpansion struct { + Identifier string + Message Expression +} + +func (e RequiredExpansion) Identifiers() []string { + return []string{e.Identifier} +} + +func (e RequiredExpansion) Expand(env Env) (string, error) { + val, ok := env.Get(e.Identifier) + if !ok { + msg, err := e.Message.Expand(env) + if err != nil { + return "", err + } + if msg == "" { + msg = "not set" + } + return "", fmt.Errorf("$%s: %s", e.Identifier, msg) + } + return val, nil +} + +// Expression is a collection of either Text or Expansions +type Expression []ExpressionItem + +func (e Expression) Identifiers() []string { + identifiers := []string{} + for _, item := range e { + if item.Expansion != nil { + identifiers = append(identifiers, item.Expansion.Identifiers()...) + } + } + return identifiers +} + +func (e Expression) Expand(env Env) (string, error) { + buf := &bytes.Buffer{} + + for _, item := range e { + if item.Expansion != nil { + result, err := item.Expansion.Expand(env) + if err != nil { + return "", err + } + _, _ = buf.WriteString(result) + } else { + _, _ = buf.WriteString(item.Text) + } + } + + return buf.String(), nil +} + +// ExpressionItem models either an Expansion or Text. Either/Or, never both. +type ExpressionItem struct { + Text string + // -- or -- + Expansion Expansion +} + +func (i ExpressionItem) String() string { + if i.Expansion != nil { + return fmt.Sprintf("%#v", i.Expansion) + } + return fmt.Sprintf("%q", i.Text) +} diff --git a/vendor/github.com/mfridman/interpolate/parser.go b/vendor/github.com/mfridman/interpolate/parser.go new file mode 100644 index 0000000..936608e --- /dev/null +++ b/vendor/github.com/mfridman/interpolate/parser.go @@ -0,0 +1,281 @@ +package interpolate + +import ( + "fmt" + "strconv" + "strings" + "unicode" + "unicode/utf8" +) + +// This is a recursive descent parser for our grammar. Because it can contain nested expressions +// like ${LLAMAS:-${ROCK:-true}} we can't use regular expressions. The simplest possible alternative +// is a recursive parser like this. It parses a chunk and then calls a function to parse that +// further and so on and so forth. It results in a tree of objects that represent the things we've +// parsed (an AST). This means that the logic for how expansions work lives in those objects, and +// the logic for how we go from plain text to parsed objects lives here. +// +// To keep things simple, we do our "lexing" or "scanning" just as a few functions at the end of the +// file rather than as a dedicated lexer that emits tokens. This matches the simplicity of the +// format we are parsing relatively well +// +// Below is an EBNF grammar for the language. The parser was built by basically turning this into +// functions and structs named the same reading the string bite by bite (peekRune and nextRune) + +/* +EscapedBackslash = "\\" EscapedDollar = ( "\$" | "$$") Identifier = letter { letters | +digit | "_" } Expansion = "$" ( Identifier | Brace ) Brace = "{" Identifier [ +Identifier BraceOperation ] "}" Text = { EscapedBackslash | EscapedDollar | all characters except +"$" } Expression = { Text | Expansion } EmptyValue = ":-" { Expression } UnsetValue = +"-" { Expression } Substring = ":" number [ ":" number ] Required = "?" { Expression } +Operation = EmptyValue | UnsetValue | Substring | Required +*/ + +const ( + eof = -1 +) + +// Parser takes a string and parses out a tree of structs that represent text and Expansions +type Parser struct { + input string // the string we are scanning + pos int // the current position +} + +// NewParser returns a new instance of a Parser +func NewParser(str string) *Parser { + return &Parser{ + input: str, + pos: 0, + } +} + +// Parse expansions out of the internal text and return them as a tree of Expressions +func (p *Parser) Parse() (Expression, error) { + return p.parseExpression() +} + +func (p *Parser) parseExpression(stop ...rune) (Expression, error) { + var expr Expression + var stopStr = string(stop) + + for { + c := p.peekRune() + if c == eof || strings.ContainsRune(stopStr, c) { + break + } + + // check for our escaped characters first, as we assume nothing subsequently is escaped + if strings.HasPrefix(p.input[p.pos:], `\\`) { + p.pos += 2 + expr = append(expr, ExpressionItem{Text: `\\`}) + continue + } else if strings.HasPrefix(p.input[p.pos:], `\$`) || strings.HasPrefix(p.input[p.pos:], `$$`) { + p.pos += 2 + expr = append(expr, ExpressionItem{Text: `$`}) + continue + } + + // Ignore bash shell expansions + if strings.HasPrefix(p.input[p.pos:], `$(`) { + p.pos += 2 + expr = append(expr, ExpressionItem{Text: `$(`}) + continue + } + + // If we run into a dollar sign and it's not the last char, it's an expansion + if c == '$' && p.pos < (len(p.input)-1) { + expansion, err := p.parseExpansion() + if err != nil { + return nil, err + } + expr = append(expr, ExpressionItem{Expansion: expansion}) + continue + } + + // nibble a character, otherwise if it's a \ or a $ we can loop + c = p.nextRune() + + // Scan as much as we can into text + text := p.scanUntil(func(r rune) bool { + return (r == '$' || r == '\\' || strings.ContainsRune(stopStr, r)) + }) + + expr = append(expr, ExpressionItem{Text: string(c) + text}) + } + + return expr, nil +} + +func (p *Parser) parseExpansion() (Expansion, error) { + if c := p.nextRune(); c != '$' { + return nil, fmt.Errorf("Expected expansion to start with $, got %c", c) + } + + // if we have an open brace, this is a brace expansion + if c := p.peekRune(); c == '{' { + return p.parseBraceExpansion() + } + + identifier, err := p.scanIdentifier() + if err != nil { + return nil, err + } + + return VariableExpansion{Identifier: identifier}, nil +} + +func (p *Parser) parseBraceExpansion() (Expansion, error) { + if c := p.nextRune(); c != '{' { + return nil, fmt.Errorf("Expected brace expansion to start with {, got %c", c) + } + + identifier, err := p.scanIdentifier() + if err != nil { + return nil, err + } + + if c := p.peekRune(); c == '}' { + _ = p.nextRune() + return VariableExpansion{Identifier: identifier}, nil + } + + var operator string + var exp Expansion + + // Parse an operator, some trickery is needed to handle : vs :- + if op1 := p.nextRune(); op1 == ':' { + if op2 := p.peekRune(); op2 == '-' { + _ = p.nextRune() + operator = ":-" + } else { + operator = ":" + } + } else if op1 == '?' || op1 == '-' { + operator = string(op1) + } else { + return nil, fmt.Errorf("Expected an operator, got %c", op1) + } + + switch operator { + case `:-`: + exp, err = p.parseEmptyValueExpansion(identifier) + if err != nil { + return nil, err + } + case `-`: + exp, err = p.parseUnsetValueExpansion(identifier) + if err != nil { + return nil, err + } + case `:`: + exp, err = p.parseSubstringExpansion(identifier) + if err != nil { + return nil, err + } + case `?`: + exp, err = p.parseRequiredExpansion(identifier) + if err != nil { + return nil, err + } + } + + if c := p.nextRune(); c != '}' { + return nil, fmt.Errorf("Expected brace expansion to end with }, got %c", c) + } + + return exp, nil +} + +func (p *Parser) parseEmptyValueExpansion(identifier string) (Expansion, error) { + // parse an expression (text and expansions) up until the end of the brace + expr, err := p.parseExpression('}') + if err != nil { + return nil, err + } + + return EmptyValueExpansion{Identifier: identifier, Content: expr}, nil +} + +func (p *Parser) parseUnsetValueExpansion(identifier string) (Expansion, error) { + expr, err := p.parseExpression('}') + if err != nil { + return nil, err + } + + return UnsetValueExpansion{Identifier: identifier, Content: expr}, nil +} + +func (p *Parser) parseSubstringExpansion(identifier string) (Expansion, error) { + offset := p.scanUntil(func(r rune) bool { + return r == ':' || r == '}' + }) + + offsetInt, err := strconv.Atoi(strings.TrimSpace(offset)) + if err != nil { + return nil, fmt.Errorf("Unable to parse offset: %v", err) + } + + if c := p.peekRune(); c == '}' { + return SubstringExpansion{Identifier: identifier, Offset: offsetInt}, nil + } + + _ = p.nextRune() + length := p.scanUntil(func(r rune) bool { + return r == '}' + }) + + lengthInt, err := strconv.Atoi(strings.TrimSpace(length)) + if err != nil { + return nil, fmt.Errorf("Unable to parse length: %v", err) + } + + return SubstringExpansion{Identifier: identifier, Offset: offsetInt, Length: lengthInt, HasLength: true}, nil +} + +func (p *Parser) parseRequiredExpansion(identifier string) (Expansion, error) { + expr, err := p.parseExpression('}') + if err != nil { + return nil, err + } + + return RequiredExpansion{Identifier: identifier, Message: expr}, nil +} + +func (p *Parser) scanUntil(f func(rune) bool) string { + start := p.pos + for int(p.pos) < len(p.input) { + c, size := utf8.DecodeRuneInString(p.input[p.pos:]) + if c == utf8.RuneError || f(c) { + break + } + p.pos += size + } + return p.input[start:p.pos] +} + +func (p *Parser) scanIdentifier() (string, error) { + if c := p.peekRune(); !unicode.IsLetter(c) { + return "", fmt.Errorf("Expected identifier to start with a letter, got %c", c) + } + var notIdentifierChar = func(r rune) bool { + return (!unicode.IsLetter(r) && !unicode.IsNumber(r) && r != '_') + } + return p.scanUntil(notIdentifierChar), nil +} + +func (p *Parser) nextRune() rune { + if int(p.pos) >= len(p.input) { + return eof + } + c, size := utf8.DecodeRuneInString(p.input[p.pos:]) + p.pos += size + return c +} + +func (p *Parser) peekRune() rune { + if int(p.pos) >= len(p.input) { + return eof + } + c, _ := utf8.DecodeRuneInString(p.input[p.pos:]) + return c +} diff --git a/vendor/github.com/paulmach/orb/CHANGELOG.md b/vendor/github.com/paulmach/orb/CHANGELOG.md index 5691ca1..1783940 100644 --- a/vendor/github.com/paulmach/orb/CHANGELOG.md +++ b/vendor/github.com/paulmach/orb/CHANGELOG.md @@ -2,6 +2,65 @@ All notable changes to this project will be documented in this file. +## [v0.11.1](https://github.com/paulmach/orb/compare/v0.11.0...v0.11.1) - 2024-01-29 + +### Fixed + +- geojson: `null` json into non-pointer Feature/FeatureCollection will set them to empty by [@paulmach](https://github.com/paulmach)in https://github.com/paulmach/orb/pull/145 + +## [v0.11.0](https://github.com/paulmach/orb/compare/v0.10.0...v0.11.0) - 2024-01-11 + +### Fixed + +- quadtree: InBoundMatching does not properly accept passed-in buffer by [@nirmal-vuppuluri](https://github.com/nirmal-vuppuluri) in https://github.com/paulmach/orb/pull/139 +- mvt: Do not swallow error cause by [@m-pavel](https://github.com/m-pavel) in https://github.com/paulmach/orb/pull/137 + +### Changed + +- simplify: Visvalingam, by default, keeps 3 points for "areas" by [@paulmach](https://github.com/paulmach) in https://github.com/paulmach/orb/pull/140 +- encoding/mvt: skip encoding of features will nil geometry by [@paulmach](https://github.com/paulmach) in https://github.com/paulmach/orb/pull/141 +- encoding/wkt: improve unmarshalling performance by [@paulmach](https://github.com/paulmach) in https://github.com/paulmach/orb/pull/142 + +## [v0.10.0](https://github.com/paulmach/orb/compare/v0.9.2...v0.10.0) - 2023-07-16 + +### Added + +- add ChildrenInZoomRange method to maptile.Tile by [@peitili](https://github.com/peitili) in https://github.com/paulmach/orb/pull/133 + +## [v0.9.2](https://github.com/paulmach/orb/compare/v0.9.1...v0.9.2) - 2023-05-04 + +### Fixed + +- encoding/wkt: better handling/validation of missing parens by [@paulmach](https://github.com/paulmach) in https://github.com/paulmach/orb/pull/131 + +## [v0.9.1](https://github.com/paulmach/orb/compare/v0.9.0...v0.9.1) - 2023-04-26 + +### Fixed + +- Bump up mongo driver to 1.11.4 by [@m-pavel](https://github.com/m-pavel) in https://github.com/paulmach/orb/pull/129 +- encoding/wkt: split strings with regexp by [@m-pavel](https://github.com/m-pavel) in https://github.com/paulmach/orb/pull/128 + +## [v0.9.0](https://github.com/paulmach/orb/compare/v0.8.0...v0.9.0) - 2023-02-19 + +### Added + +- geojson: marshal/unmarshal BSON [@paulmach](https://github.com/paulmach) in https://github.com/paulmach/orb/pull/123 + +## [v0.8.0](https://github.com/paulmach/orb/compare/v0.7.1...v0.8.0) - 2023-01-05 + +### Fixed + +- quadtree: fix bad sort due to pointer allocation issue by [@paulmach](https://github.com/paulmach) in https://github.com/paulmach/orb/pull/115 +- geojson: ensure geometry unmarshal errors get returned by [@paulmach](https://github.com/paulmach) in https://github.com/paulmach/orb/pull/117 +- encoding/mvt: remove use of crypto/md5 to compare marshalling in tests by [@paulmach](https://github.com/paulmach) in https://github.com/paulmach/orb/pull/118 +- encoding/wkt: fix panic for some invalid wkt data by [@paulmach](https://github.com/paulmach) in https://github.com/paulmach/orb/pull/119 + +### Other + +- fix typo by [@rubenpoppe](https://github.com/rubenpoppe) in https://github.com/paulmach/orb/pull/107 +- Fixed a small twister in README.md by [@Timahawk](https://github.com/Timahawk) in https://github.com/paulmach/orb/pull/108 +- update github ci to use go 1.19 by [@paulmach](https://github.com/paulmach) in https://github.com/paulmach/orb/pull/116 + ## [v0.7.1](https://github.com/paulmach/orb/compare/v0.7.0...v0.7.1) - 2022-05-16 No changes diff --git a/vendor/github.com/paulmach/orb/README.md b/vendor/github.com/paulmach/orb/README.md index 8d0e515..03fe463 100644 --- a/vendor/github.com/paulmach/orb/README.md +++ b/vendor/github.com/paulmach/orb/README.md @@ -2,7 +2,7 @@ Package `orb` defines a set of types for working with 2d geo and planar/projected geometric data in Golang. There are a set of sub-packages that use these types to do interesting things. -They each provider their own README with extra info. +They each provide their own README with extra info. ## Interesting features @@ -111,6 +111,8 @@ The library supports third party "encoding/json" replacements such [github.com/json-iterator/go](https://github.com/json-iterator/go). See the [geojson](geojson) readme for more details. +The types also support BSON so they can be used directly when working with MongoDB. + ## Mapbox Vector Tiles The [encoding/mvt](encoding/mvt) sub-package implements Marshalling and @@ -138,10 +140,10 @@ layers.Simplify(simplify.DouglasPeucker(1.0)) layers.RemoveEmpty(1.0, 2.0) // encoding using the Mapbox Vector Tile protobuf encoding. -data, err := layers.Marshal() // this data is NOT gzipped. +data, err := mvt.Marshal(layers) // this data is NOT gzipped. // Sometimes MVT data is stored and transfered gzip compressed. In that case: -data, err := layers.MarshalGzipped() +data, err := mvt.MarshalGzipped(layers) ``` ## Decoding WKB/EWKB from a database query diff --git a/vendor/github.com/pierrec/lz4/v4/README.md b/vendor/github.com/pierrec/lz4/v4/README.md index 4629c9d..dee7754 100644 --- a/vendor/github.com/pierrec/lz4/v4/README.md +++ b/vendor/github.com/pierrec/lz4/v4/README.md @@ -21,7 +21,7 @@ go get github.com/pierrec/lz4/v4 There is a command line interface tool to compress and decompress LZ4 files. ``` -go install github.com/pierrec/lz4/v4/cmd/lz4c +go install github.com/pierrec/lz4/v4/cmd/lz4c@latest ``` Usage diff --git a/vendor/github.com/pierrec/lz4/v4/compressing_reader.go b/vendor/github.com/pierrec/lz4/v4/compressing_reader.go new file mode 100644 index 0000000..8df0dc7 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/compressing_reader.go @@ -0,0 +1,222 @@ +package lz4 + +import ( + "errors" + "io" + + "github.com/pierrec/lz4/v4/internal/lz4block" + "github.com/pierrec/lz4/v4/internal/lz4errors" + "github.com/pierrec/lz4/v4/internal/lz4stream" +) + +type crState int + +const ( + crStateInitial crState = iota + crStateReading + crStateFlushing + crStateDone +) + +type CompressingReader struct { + state crState + src io.ReadCloser // source reader + level lz4block.CompressionLevel // how hard to try + frame *lz4stream.Frame // frame being built + in []byte + out ovWriter + handler func(int) +} + +// NewCompressingReader creates a reader which reads compressed data from +// raw stream. This makes it a logical opposite of a normal lz4.Reader. +// We require an io.ReadCloser as an underlying source for compatibility +// with Go's http.Request. +func NewCompressingReader(src io.ReadCloser) *CompressingReader { + zrd := &CompressingReader { + frame: lz4stream.NewFrame(), + } + + _ = zrd.Apply(DefaultBlockSizeOption, DefaultChecksumOption, defaultOnBlockDone) + zrd.Reset(src) + + return zrd +} + +// Source exposes the underlying source stream for introspection and control. +func (zrd *CompressingReader) Source() io.ReadCloser { + return zrd.src +} + +// Close simply invokes the underlying stream Close method. This method is +// provided for the benefit of Go http client/server, which relies on Close +// for goroutine termination. +func (zrd *CompressingReader) Close() error { + return zrd.src.Close() +} + +// Apply applies useful options to the lz4 encoder. +func (zrd *CompressingReader) Apply(options ...Option) (err error) { + if zrd.state != crStateInitial { + return lz4errors.ErrOptionClosedOrError + } + + zrd.Reset(zrd.src) + + for _, o := range options { + if err = o(zrd); err != nil { + return + } + } + return +} + +func (*CompressingReader) private() {} + +func (zrd *CompressingReader) init() error { + zrd.frame.InitW(&zrd.out, 1, false) + size := zrd.frame.Descriptor.Flags.BlockSizeIndex() + zrd.in = size.Get() + return zrd.frame.Descriptor.Write(zrd.frame, &zrd.out) +} + +// Read allows reading of lz4 compressed data +func (zrd *CompressingReader) Read(p []byte) (n int, err error) { + defer func() { + if err != nil { + zrd.state = crStateDone + } + }() + + if !zrd.out.reset(p) { + return len(p), nil + } + + switch zrd.state { + case crStateInitial: + err = zrd.init() + if err != nil { + return + } + zrd.state = crStateReading + case crStateDone: + return 0, errors.New("This reader is done") + case crStateFlushing: + if zrd.out.dataPos > 0 { + n = zrd.out.dataPos + zrd.out.data = nil + zrd.out.dataPos = 0 + return + } else { + zrd.state = crStateDone + return 0, io.EOF + } + } + + for zrd.state == crStateReading { + block := zrd.frame.Blocks.Block + + var rCount int + rCount, err = io.ReadFull(zrd.src, zrd.in) + switch err { + case nil: + err = block.Compress( + zrd.frame, zrd.in[ : rCount], zrd.level, + ).Write(zrd.frame, &zrd.out) + zrd.handler(len(block.Data)) + if err != nil { + return + } + + if zrd.out.dataPos == len(zrd.out.data) { + n = zrd.out.dataPos + zrd.out.dataPos = 0 + zrd.out.data = nil + return + } + case io.EOF, io.ErrUnexpectedEOF: // read may be partial + if rCount > 0 { + err = block.Compress( + zrd.frame, zrd.in[ : rCount], zrd.level, + ).Write(zrd.frame, &zrd.out) + zrd.handler(len(block.Data)) + if err != nil { + return + } + } + + err = zrd.frame.CloseW(&zrd.out, 1) + if err != nil { + return + } + zrd.state = crStateFlushing + + n = zrd.out.dataPos + zrd.out.dataPos = 0 + zrd.out.data = nil + return + default: + return + } + } + + err = lz4errors.ErrInternalUnhandledState + return +} + +// Reset makes the stream usable again; mostly handy to reuse lz4 encoder +// instances. +func (zrd *CompressingReader) Reset(src io.ReadCloser) { + zrd.frame.Reset(1) + zrd.state = crStateInitial + zrd.src = src + zrd.out.clear() +} + +type ovWriter struct { + data []byte + ov []byte + dataPos int + ovPos int +} + +func (wr *ovWriter) Write(p []byte) (n int, err error) { + count := copy(wr.data[wr.dataPos : ], p) + wr.dataPos += count + + if count < len(p) { + wr.ov = append(wr.ov, p[count : ]...) + } + + return len(p), nil +} + +func (wr *ovWriter) reset(out []byte) bool { + ovRem := len(wr.ov) - wr.ovPos + + if ovRem >= len(out) { + wr.ovPos += copy(out, wr.ov[wr.ovPos : ]) + return false + } + + if ovRem > 0 { + copy(out, wr.ov[wr.ovPos : ]) + wr.ov = wr.ov[ : 0] + wr.ovPos = 0 + wr.dataPos = ovRem + } else if wr.ovPos > 0 { + wr.ov = wr.ov[ : 0] + wr.ovPos = 0 + wr.dataPos = 0 + } + + wr.data = out + return true +} + +func (wr *ovWriter) clear() { + wr.data = nil + wr.dataPos = 0 + wr.ov = wr.ov[ : 0] + wr.ovPos = 0 +} diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/block.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/block.go index 9054998..fec8adb 100644 --- a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/block.go +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/block.go @@ -31,11 +31,10 @@ func recoverBlock(e *error) { } } -// blockHash hashes the lower five bytes of x into a value < htSize. +// blockHash hashes the lower 6 bytes into a value < htSize. func blockHash(x uint64) uint32 { const prime6bytes = 227718039650203 - x &= 1<<40 - 1 - return uint32((x * prime6bytes) >> (64 - hashLog)) + return uint32(((x << (64 - 48)) * prime6bytes) >> (64 - hashLog)) } func CompressBlockBound(n int) int { @@ -123,9 +122,9 @@ func (c *Compressor) CompressBlock(src, dst []byte) (int, error) { goto lastLiterals } - // Fast scan strategy: the hash table only stores the last five-byte sequences. + // Fast scan strategy: the hash table only stores the last 4 bytes sequences. for si < sn { - // Hash the next five bytes (sequence)... + // Hash the next 6 bytes (sequence)... match := binary.LittleEndian.Uint64(src[si:]) h := blockHash(match) h2 := blockHash(match >> 8) diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go index a1bfa99..138083d 100644 --- a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go @@ -8,12 +8,9 @@ const ( Block256Kb Block1Mb Block4Mb + Block8Mb = 2 * Block4Mb ) -// In legacy mode all blocks are compressed regardless -// of the compressed size: use the bound size. -var Block8Mb = uint32(CompressBlockBound(8 << 20)) - var ( BlockPool64K = sync.Pool{New: func() interface{} { return make([]byte, Block64Kb) }} BlockPool256K = sync.Pool{New: func() interface{} { return make([]byte, Block256Kb) }} diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm64.s b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm64.s index c43e8a8..d2fe11b 100644 --- a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm64.s +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm64.s @@ -185,7 +185,7 @@ copyMatchTry8: // A 16-at-a-time loop doesn't provide a further speedup. CMP $8, len CCMP HS, offset, $8, $0 - BLO copyMatchLoop1 + BLO copyMatchTry4 AND $7, len, lenRem SUB $8, len @@ -201,8 +201,19 @@ copyMatchLoop8: MOVD tmp2, -8(dst) B copyMatchDone +copyMatchTry4: + // Copy words if both len and offset are at least four. + CMP $4, len + CCMP HS, offset, $4, $0 + BLO copyMatchLoop1 + + MOVWU.P 4(match), tmp2 + MOVWU.P tmp2, 4(dst) + SUBS $4, len + BEQ copyMatchDone + copyMatchLoop1: - // Byte-at-a-time copy for small offsets. + // Byte-at-a-time copy for small offsets <= 3. MOVBU.P 1(match), tmp2 MOVB.P tmp2, 1(dst) SUBS $1, len diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_other.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_other.go index 2010cd7..9f568fb 100644 --- a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_other.go +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_other.go @@ -48,11 +48,14 @@ func decodeBlock(dst, src, dict []byte) (ret int) { mLen += 4 if offset := u16(src[si:]); mLen <= offset && offset < di { i := di - offset - end := i + 18 - copy(dst[di:], dst[i:end]) - si += 2 - di += mLen - continue + // The remaining buffer may not hold 18 bytes. + // See https://github.com/pierrec/lz4/issues/51. + if end := i + 18; end <= uint(len(dst)) { + copy(dst[di:], dst[i:end]) + si += 2 + di += mLen + continue + } } } case lLen == 0xF: diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go index 459086f..e964654 100644 --- a/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go @@ -224,9 +224,7 @@ func (b *FrameDataBlock) Close(f *Frame) { func (b *FrameDataBlock) Compress(f *Frame, src []byte, level lz4block.CompressionLevel) *FrameDataBlock { data := b.data if f.isLegacy() { - // In legacy mode, the buffer is sized according to CompressBlockBound, - // but only 8Mb is buffered for compression. - src = src[:8<<20] + data = data[:cap(data)] } else { data = data[:len(src)] // trigger the incompressible flag in CompressBlock } diff --git a/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero.go b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero.go index 8d3206a..651d10c 100644 --- a/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero.go +++ b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero.go @@ -1,5 +1,5 @@ // Package xxh32 implements the very fast XXH hashing algorithm (32 bits version). -// (https://github.com/Cyan4973/XXH/) +// (ported from the reference implementation https://github.com/Cyan4973/xxHash/) package xxh32 import ( diff --git a/vendor/github.com/pierrec/lz4/v4/options.go b/vendor/github.com/pierrec/lz4/v4/options.go index 46a8738..57a44e7 100644 --- a/vendor/github.com/pierrec/lz4/v4/options.go +++ b/vendor/github.com/pierrec/lz4/v4/options.go @@ -57,6 +57,13 @@ func BlockSizeOption(size BlockSize) Option { } w.frame.Descriptor.Flags.BlockSizeIndexSet(lz4block.Index(size)) return nil + case *CompressingReader: + size := uint32(size) + if !lz4block.IsValid(size) { + return fmt.Errorf("%w: %d", lz4errors.ErrOptionInvalidBlockSize, size) + } + w.frame.Descriptor.Flags.BlockSizeIndexSet(lz4block.Index(size)) + return nil } return lz4errors.ErrOptionNotApplicable } @@ -72,6 +79,9 @@ func BlockChecksumOption(flag bool) Option { case *Writer: w.frame.Descriptor.Flags.BlockChecksumSet(flag) return nil + case *CompressingReader: + w.frame.Descriptor.Flags.BlockChecksumSet(flag) + return nil } return lz4errors.ErrOptionNotApplicable } @@ -87,6 +97,9 @@ func ChecksumOption(flag bool) Option { case *Writer: w.frame.Descriptor.Flags.ContentChecksumSet(flag) return nil + case *CompressingReader: + w.frame.Descriptor.Flags.ContentChecksumSet(flag) + return nil } return lz4errors.ErrOptionNotApplicable } @@ -104,6 +117,10 @@ func SizeOption(size uint64) Option { w.frame.Descriptor.Flags.SizeSet(size > 0) w.frame.Descriptor.ContentSize = size return nil + case *CompressingReader: + w.frame.Descriptor.Flags.SizeSet(size > 0) + w.frame.Descriptor.ContentSize = size + return nil } return lz4errors.ErrOptionNotApplicable } @@ -162,6 +179,14 @@ func CompressionLevelOption(level CompressionLevel) Option { } w.level = lz4block.CompressionLevel(level) return nil + case *CompressingReader: + switch level { + case Fast, Level1, Level2, Level3, Level4, Level5, Level6, Level7, Level8, Level9: + default: + return fmt.Errorf("%w: %d", lz4errors.ErrOptionInvalidCompressionLevel, level) + } + w.level = lz4block.CompressionLevel(level) + return nil } return lz4errors.ErrOptionNotApplicable } @@ -186,6 +211,9 @@ func OnBlockDoneOption(handler func(size int)) Option { case *Reader: rw.handler = handler return nil + case *CompressingReader: + rw.handler = handler + return nil } return lz4errors.ErrOptionNotApplicable } diff --git a/vendor/github.com/pierrec/lz4/v4/writer.go b/vendor/github.com/pierrec/lz4/v4/writer.go index 77699f2..4358ade 100644 --- a/vendor/github.com/pierrec/lz4/v4/writer.go +++ b/vendor/github.com/pierrec/lz4/v4/writer.go @@ -150,6 +150,10 @@ func (w *Writer) Flush() (err error) { case writeState: case errorState: return w.state.err + case newState: + if err = w.init(); w.state.next(err) { + return + } default: return nil } diff --git a/vendor/github.com/pressly/goose/v3/.gitignore b/vendor/github.com/pressly/goose/v3/.gitignore index 69d484b..2a46657 100644 --- a/vendor/github.com/pressly/goose/v3/.gitignore +++ b/vendor/github.com/pressly/goose/v3/.gitignore @@ -10,3 +10,9 @@ # Local testing .envrc *.FAIL + +dist/ +release_notes.txt + +go.work +go.work.sum diff --git a/vendor/github.com/pressly/goose/v3/.goreleaser.yaml b/vendor/github.com/pressly/goose/v3/.goreleaser.yaml new file mode 100644 index 0000000..1f34ff4 --- /dev/null +++ b/vendor/github.com/pressly/goose/v3/.goreleaser.yaml @@ -0,0 +1,36 @@ +# yaml-language-server: $schema=https://goreleaser.com/static/schema.json +# +# See https://goreleaser.com/customization/ for more information. +version: 2 +project_name: goose + +before: + hooks: + - go mod tidy +builds: + - env: + - CGO_ENABLED=0 + binary: goose + main: ./cmd/goose + goos: + - linux + - windows + - darwin + goarch: + - amd64 + - arm64 + ldflags: + # The v prefix is stripped by goreleaser, so we need to add it back. + # https://goreleaser.com/customization/templates/#fnref:version-prefix + - "-s -w -X main.version=v{{ .Version }}" + +archives: + - format: binary + name_template: >- + {{ .ProjectName }}_{{- tolower .Os }}_{{- if eq .Arch "amd64" }}x86_64{{- else }}{{ .Arch }}{{ end }} +checksum: + name_template: "checksums.txt" +snapshot: + name_template: "{{ incpatch .Version }}-next" +changelog: + use: github-native diff --git a/vendor/github.com/pressly/goose/v3/.goreleaser.yml b/vendor/github.com/pressly/goose/v3/.goreleaser.yml deleted file mode 100644 index 0f2efdc..0000000 --- a/vendor/github.com/pressly/goose/v3/.goreleaser.yml +++ /dev/null @@ -1,51 +0,0 @@ -# Documentation at https://goreleaser.com -project_name: goose - -gomod: - proxy: true - -builds: - - env: - - CGO_ENABLED=0 - main: ./cmd/goose - binary: goose - goos: - - linux - - windows - - darwin - goarch: - - amd64 - - arm64 - # Custom ldflags templates. - # Default is `-s -w -X main.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.Date}} -X main.builtBy=goreleaser`. - ldflags: - - -s -w - -# You can disable this pipe in order to not upload any artifacts. -# Defaults to false. -release: - disable: false - -archives: - - replacements: - 386: i386 - amd64: x86_64 - name_template: "{{ tolower .Binary }}_{{ tolower .Os }}_{{ tolower .Arch }}" - format: binary - -checksum: - name_template: "checksums.txt" - -snapshot: - name_template: "{{ incpatch .Version }}-next" - -changelog: - use: github - sort: asc - # Commit messages matching the regexp listed here will be removed from - # the changelog. - filters: - exclude: - - "^docs:" - - "^test:" -# TODO(mf): add docker support? diff --git a/vendor/github.com/pressly/goose/v3/CHANGELOG.md b/vendor/github.com/pressly/goose/v3/CHANGELOG.md new file mode 100644 index 0000000..fb3485a --- /dev/null +++ b/vendor/github.com/pressly/goose/v3/CHANGELOG.md @@ -0,0 +1,218 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project +adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] + +- Update `WithDisableGlobalRegistry` behavior (#783). If set, this will ignore globally-registered + migrations instead of raising an error. Specifically, the following check is removed: + +```go +if len(global) > 0 { + return nil, errors.New("global registry disabled, but provider has registered go migrations") +} +``` + +This enables creating isolated goose provider(s) in legacy environments where global migrations may +be registered. Without updating this behavior, it would be impossible to use +`WithDisableGlobalRegistry` in combination with `WithGoMigrations`. + +## [v3.21.1] + +- Add `GetVersions` method to `goose.Provider`, returns the current (max db) version and the latest + (max filesystem) version. (#756) +- Clarify `GetLatestVersion` method MUST return `ErrVersionNotFound` if no latest migration is + found. Previously it was returning a -1 and nil error, which was inconsistent with the rest of the + API surface. + +- Add `GetLatestVersion` implementations to all existing dialects. This is an optimization to avoid + loading all migrations when only the latest version is needed. This uses the `max` function in SQL + to get the latest version_id irrespective of the order of applied migrations. + - Refactor existing portions of the code to use the new `GetLatestVersion` method. + +## [v3.21.0] + +- Retracted. Broken release, please use v3.21.1 instead. + +## [v3.20.0] + +- Expand the `Store` interface by adding a `GetLatestVersion` method and make the interface public. +- Add a (non-blocking) method to check if there are pending migrations to the `goose.Provider` + (#751): + +```go +func (p *Provider) HasPending(context.Context) (bool, error) {} +``` + +The underlying implementation **does not respect the `SessionLocker`** (if one is enabled) and can +be used to check for pending migrations without blocking or being blocked by other operations. + +- The methods `.Up`, `.UpByOne`, and `.UpTo` from `goose.Provider` will invoke `.HasPending` before + acquiring a lock with `SessionLocker` (if enabled). This addresses an edge case in + Kubernetes-style deployments where newer pods with long-running migrations prevent older pods - + which have all known migrations applied - from starting up due to an advisory lock. For more + details, refer to https://github.com/pressly/goose/pull/507#discussion_r1266498077 and #751. +- Move integration tests to `./internal/testing` and make it a separate Go module. This will allow + us to have a cleaner top-level go.mod file and avoid imports unrelated to the goose project. See + [integration/README.md](https://github.com/pressly/goose/blob/d0641b5bfb3bd5d38d95fe7a63d7ddf2d282234d/internal/testing/integration/README.md) + for more details. This shouldn't affect users of the goose library. + +## [v3.19.2] - 2024-03-13 + +- Remove duckdb support. The driver uses Cgo and we've decided to remove it until we can find a + better solution. If you were using duckdb with goose, please let us know by opening an issue. + +## [v3.19.1] - 2024-03-11 + +- Fix selecting dialect for `redshift` +- Add `GOOSE_MIGRATION_DIR` documentation +- Bump github.com/opencontainers/runc to `v1.1.12` (security fix) +- Update CI tests for go1.22 +- Make goose annotations case-insensitive + - All `-- +goose` annotations are now case-insensitive. This means that `-- +goose Up` and `-- ++goose up` are now equivalent. This change was made to improve the user experience and to make the + annotations more consistent. + +## [v3.19.0] - 2024-03-11 + +- Use [v3.19.1] instead. This was tagged but not released and does not contain release binaries. + +## [v3.18.0] - 2024-01-31 + +- Add environment variable substitution for SQL migrations. (#604) + + - This feature is **disabled by default**, and can be enabled by adding an annotation to the + migration file: + + ```sql + -- +goose ENVSUB ON + ``` + + - When enabled, goose will attempt to substitute environment variables in the SQL migration + queries until the end of the file, or until the annotation `-- +goose ENVSUB OFF` is found. For + example, if the environment variable `REGION` is set to `us_east_1`, the following SQL migration + will be substituted to `SELECT * FROM regions WHERE name = 'us_east_1';` + + ```sql + -- +goose ENVSUB ON + -- +goose Up + SELECT * FROM regions WHERE name = '${REGION}'; + ``` + +- Add native [Turso](https://turso.tech/) support with libsql driver. (#658) + +- Fixed query for list migrations in YDB (#684) + +## [v3.17.0] - 2023-12-15 + +- Standardised the MIT license (#647) +- Improve provider `Apply()` errors, add `ErrNotApplied` when attempting to rollback a migration + that has not been previously applied. (#660) +- Add `WithDisableGlobalRegistry` option to `NewProvider` to disable the global registry. (#645) +- Add `-timeout` flag to CLI to set the maximum allowed duration for queries to run. Default remains + no timeout. (#627) +- Add optional logging in `Provider` when `WithVerbose` option is supplied. (#668) + +⚠️ Potential Breaking Change ⚠️ + +- Update `goose create` to use UTC time instead of local time. (#242) + +## [v3.16.0] - 2023-11-12 + +- Added YDB support. (#592) +- Fix sqlserver query to ensure DB version. (#601) +- Allow setting / resetting the global Go migration registry. (#602) + - `SetGlobalMigrations` and `ResetGlobalMigrations` functions have been added. + - Introduce `NewGoMigration` for constructing Go migrations. +- Add initial implementation of `goose.NewProvider`. + +🎉 Read more about this new feature here: + +https://pressly.github.io/goose/blog/2023/goose-provider/ + +The motivation behind the Provider was simple - to reduce global state and make goose easier to +consume as an imported package. + +Here's a quick summary: + +- Avoid global state +- Make Provider safe to use concurrently +- Unlock (no pun intended) new features, such as database locking +- Make logging configurable +- Better error handling with proper return values +- Double down on Go migrations +- ... and more! + +## [v3.15.1] - 2023-10-10 + +- Fix regression that prevented registering Go migrations that didn't have the corresponding files + available in the filesystem. (#588) + - If Go migrations have been registered globally, but there are no .go files in the filesystem, + **always include** them. + - If Go migrations have been registered, and there are .go files in the filesystem, **only + include** those migrations. This was the original motivation behind #553. + - If there are .go files in the filesystem but not registered, **raise an error**. This is to + prevent accidentally adding valid looking Go migration files without explicitly registering + them. + +## [v3.15.0] - 2023-08-12 + +- Fix `sqlparser` to avoid skipping the last statement when it's not terminated with a semicolon + within a StatementBegin/End block. (#580) +- Add `go1.21` to the CI matrix. +- Bump minimum version of module in go.mod to `go1.19`. +- Fix version output when installing pre-built binaries (#585). + +## [v3.14.0] - 2023-07-26 + +- Filter registered Go migrations from the global map with corresponding .go files from the + filesystem. + - The code previously assumed all .go migrations would be in the same folder, so this should not + be a breaking change. + - See #553 for more details +- Improve output log message for applied up migrations. #562 +- Fix an issue where `AddMigrationNoTxContext` was registering the wrong source because it skipped + too many frames. #572 +- Improve binary version output when using go install. + +## [v3.13.4] - 2023-07-07 + +- Fix pre-built binary versioning and make small improvements to GoReleaser config. +- Fix an edge case in the `sqlparser` where the last up statement may be ignored if it's + unterminated with a semicolon and followed by a `-- +goose Down` annotation. +- Trim `Logger` interface to `Printf` and `Fatalf` methods only. Projects that have previously + implemented the `Logger` interface should not be affected, and can remove unused methods. + +## [v3.13.1] - 2023-07-03 + +- Add pre-built binaries with GoReleaser and update the build process. + +## [v3.13.0] - 2023-06-29 + +- Add a changelog to the project, based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). +- Update go.mod and retract all `v3.12.X` tags. They were accidentally pushed and contain a + reference to the wrong Go module. +- Fix `up` and `up -allowing-missing` behavior. +- Fix empty version in log output. +- Add new `context.Context`-aware functions and methods, for both sql and go migrations. +- Return error when no migration files found or dir is not a directory. + +[Unreleased]: https://github.com/pressly/goose/compare/v3.21.1...HEAD +[v3.21.1]: https://github.com/pressly/goose/compare/v3.20.0...v3.21.1 +[v3.21.0]: https://github.com/pressly/goose/compare/v3.20.0...v3.21.0 +[v3.20.0]: https://github.com/pressly/goose/compare/v3.19.2...v3.20.0 +[v3.19.2]: https://github.com/pressly/goose/compare/v3.19.1...v3.19.2 +[v3.19.1]: https://github.com/pressly/goose/compare/v3.19.0...v3.19.1 +[v3.19.0]: https://github.com/pressly/goose/compare/v3.18.0...v3.19.0 +[v3.18.0]: https://github.com/pressly/goose/compare/v3.17.0...v3.18.0 +[v3.17.0]: https://github.com/pressly/goose/compare/v3.16.0...v3.17.0 +[v3.16.0]: https://github.com/pressly/goose/compare/v3.15.1...v3.16.0 +[v3.15.1]: https://github.com/pressly/goose/compare/v3.15.0...v3.15.1 +[v3.15.0]: https://github.com/pressly/goose/compare/v3.14.0...v3.15.0 +[v3.14.0]: https://github.com/pressly/goose/compare/v3.13.4...v3.14.0 +[v3.13.4]: https://github.com/pressly/goose/compare/v3.13.1...v3.13.4 +[v3.13.1]: https://github.com/pressly/goose/compare/v3.13.0...v3.13.1 +[v3.13.0]: https://github.com/pressly/goose/releases/tag/v3.13.0 diff --git a/vendor/github.com/pressly/goose/v3/Dockerfile.local b/vendor/github.com/pressly/goose/v3/Dockerfile.local deleted file mode 100644 index 1c66de7..0000000 --- a/vendor/github.com/pressly/goose/v3/Dockerfile.local +++ /dev/null @@ -1,8 +0,0 @@ -FROM golang:1.17-buster@sha256:3e663ba6af8281b04975b0a34a14d538cdd7d284213f83f05aaf596b80a8c725 as builder - -COPY . /src -WORKDIR /src -RUN CGO_ENABLED=0 make dist - -FROM scratch AS exporter -COPY --from=builder /src/bin/ / \ No newline at end of file diff --git a/vendor/github.com/pressly/goose/v3/LICENSE b/vendor/github.com/pressly/goose/v3/LICENSE index c5c3ece..fd425e0 100644 --- a/vendor/github.com/pressly/goose/v3/LICENSE +++ b/vendor/github.com/pressly/goose/v3/LICENSE @@ -1,7 +1,8 @@ +MIT License + Original work Copyright (c) 2012 Liam Staskawicz Modified work Copyright (c) 2016 Vojtech Vitek - -MIT License +Modified work Copyright (c) 2021 Michael Fridman, Vojtech Vitek Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in diff --git a/vendor/github.com/pressly/goose/v3/Makefile b/vendor/github.com/pressly/goose/v3/Makefile index 1525a6b..e68eff0 100644 --- a/vendor/github.com/pressly/goose/v3/Makefile +++ b/vendor/github.com/pressly/goose/v3/Makefile @@ -1,3 +1,19 @@ +GO_TEST_FLAGS ?= -race -count=1 -v -timeout=5m + +# These are the default values for the test database. They can be overridden +DB_USER ?= dbuser +DB_PASSWORD ?= password1 +DB_NAME ?= testdb +DB_POSTGRES_PORT ?= 5433 +DB_MYSQL_PORT ?= 3307 +DB_CLICKHOUSE_PORT ?= 9001 +DB_YDB_PORT ?= 2136 +DB_TURSO_PORT ?= 8080 + +list-build-tags: + @echo "Available build tags:" + @echo " $$(rg -o --trim 'no_[a-zA-Z0-9_]+' ./cmd/goose --no-line-number --no-filename | sort | uniq | tr '\n' ' ')" + .PHONY: dist dist: @mkdir -p ./bin @@ -8,43 +24,110 @@ dist: GOOS=windows GOARCH=amd64 go build -o ./bin/goose-windows64.exe ./cmd/goose GOOS=windows GOARCH=386 go build -o ./bin/goose-windows386.exe ./cmd/goose +.PHONY: clean +clean: + @find . -type f -name '*.FAIL' -delete + +.PHONY: lint +lint: tools + @golangci-lint run ./... --fix + +.PHONY: tools +tools: + @go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest + test-packages: - go test -v $$(go list ./... | grep -v -e /tests -e /bin -e /cmd -e /examples) + go test $(GO_TEST_FLAGS) $$(go list ./... | grep -v -e /tests -e /bin -e /cmd -e /examples) + +test-packages-short: + go test -test.short $(GO_TEST_FLAGS) $$(go list ./... | grep -v -e /tests -e /bin -e /cmd -e /examples) + +coverage-short: + go test ./ -test.short $(GO_TEST_FLAGS) -cover -coverprofile=coverage.out + go tool cover -html=coverage.out + +coverage: + go test ./ $(GO_TEST_FLAGS) -cover -coverprofile=coverage.out + go tool cover -html=coverage.out + +# +# Integration-related targets +# +add-gowork: + @[ -f go.work ] || go work init + @[ -f go.work.sum ] || go work use -r . + +remove-gowork: + rm -rf go.work go.work.sum -test-e2e: test-e2e-postgres test-e2e-mysql +upgrade-integration-deps: + cd ./internal/testing && go get -u ./... && go mod tidy -test-e2e-postgres: - go test -v ./tests/e2e -dialect=postgres +test-postgres-long: add-gowork test-postgres + go test $(GO_TEST_FLAGS) ./internal/testing/integration -run='(TestPostgresProviderLocking|TestPostgresSessionLocker)' -test-e2e-mysql: - go test -v ./tests/e2e -dialect=mysql +test-postgres: add-gowork + go test $(GO_TEST_FLAGS) ./internal/testing/integration -run="^TestPostgres$$" -test-clickhouse: - go test -timeout=10m -count=1 -race -v ./tests/clickhouse -test.short +test-clickhouse: add-gowork + go test $(GO_TEST_FLAGS) ./internal/testing/integration -run='(TestClickhouse|TestClickhouseRemote)' -test-vertica: - go test -count=1 -v ./tests/vertica +test-mysql: add-gowork + go test $(GO_TEST_FLAGS) ./internal/testing/integration -run='TestMySQL' + +test-turso: add-gowork + go test $(GO_TEST_FLAGS) ./internal/testing/integration -run='TestTurso' + +test-vertica: add-gowork + go test $(GO_TEST_FLAGS) ./internal/testing/integration -run='TestVertica' + +test-ydb: add-gowork + go test $(GO_TEST_FLAGS) ./internal/testing/integration -run='TestYDB' + +test-integration: add-gowork + go test $(GO_TEST_FLAGS) ./internal/testing/integration/... + +# +# Docker-related targets +# docker-cleanup: docker stop -t=0 $$(docker ps --filter="label=goose_test" -aq) -start-postgres: +docker-postgres: docker run --rm -d \ - -e POSTGRES_USER=${GOOSE_POSTGRES_DB_USER} \ - -e POSTGRES_PASSWORD=${GOOSE_POSTGRES_PASSWORD} \ - -e POSTGRES_DB=${GOOSE_POSTGRES_DBNAME} \ - -p ${GOOSE_POSTGRES_PORT}:5432 \ + -e POSTGRES_USER=$(DB_USER) \ + -e POSTGRES_PASSWORD=$(DB_PASSWORD) \ + -e POSTGRES_DB=$(DB_NAME) \ + -p $(DB_POSTGRES_PORT):5432 \ -l goose_test \ - postgres:14-alpine + postgres:14-alpine -c log_statement=all + echo "postgres://$(DB_USER):$(DB_PASSWORD)@localhost:$(DB_POSTGRES_PORT)/$(DB_NAME)?sslmode=disable" -.PHONY: clean -clean: - @find . -type f -name '*.FAIL' -delete +docker-mysql: + docker run --rm -d \ + -e MYSQL_ROOT_PASSWORD=rootpassword1 \ + -e MYSQL_DATABASE=$(DB_NAME) \ + -e MYSQL_USER=$(DB_USER) \ + -e MYSQL_PASSWORD=$(DB_PASSWORD) \ + -p $(DB_MYSQL_PORT):3306 \ + -l goose_test \ + mysql:8.0.31 + echo "mysql://$(DB_USER):$(DB_PASSWORD)@localhost:$(DB_MYSQL_PORT)/$(DB_NAME)?parseTime=true" -.PHONY: lint -lint: tools - @golangci-lint run ./... --fix +docker-clickhouse: + docker run --rm -d \ + -e CLICKHOUSE_DB=$(DB_NAME) \ + -e CLICKHOUSE_USER=$(DB_USER) \ + -e CLICKHOUSE_DEFAULT_ACCESS_MANAGEMENT=1 \ + -e CLICKHOUSE_PASSWORD=$(DB_PASSWORD) \ + -p $(DB_CLICKHOUSE_PORT):9000/tcp \ + -l goose_test \ + clickhouse/clickhouse-server:23-alpine + echo "clickhouse://$(DB_USER):$(DB_PASSWORD)@localhost:$(DB_CLICKHOUSE_PORT)/$(DB_NAME)" -.PHONY: tools -tools: - @go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.50.1 +docker-turso: + docker run --rm -d \ + -p $(DB_TURSO_PORT):8080 \ + -l goose_test \ + ghcr.io/tursodatabase/libsql-server:v0.22.10 diff --git a/vendor/github.com/pressly/goose/v3/README.md b/vendor/github.com/pressly/goose/v3/README.md index 4387f11..ef281fb 100644 --- a/vendor/github.com/pressly/goose/v3/README.md +++ b/vendor/github.com/pressly/goose/v3/README.md @@ -1,8 +1,10 @@ -# goose [![Goose CI](https://github.com/pressly/goose/actions/workflows/ci.yml/badge.svg)](https://github.com/pressly/goose/actions/workflows/ci.yml) [![Go Reference](https://pkg.go.dev/badge/github.com/pressly/goose/v3.svg)](https://pkg.go.dev/github.com/pressly/goose/v3) + -

- -

+# goose + +[![Goose CI](https://github.com/pressly/goose/actions/workflows/ci.yaml/badge.svg)](https://github.com/pressly/goose/actions/workflows/ci.yaml) +[![Go Reference](https://pkg.go.dev/badge/github.com/pressly/goose/v3.svg)](https://pkg.go.dev/github.com/pressly/goose/v3) +[![Go Report Card](https://goreportcard.com/badge/github.com/pressly/goose/v3)](https://goreportcard.com/report/github.com/pressly/goose/v3) Goose is a database migration tool. Manage your database schema by creating incremental SQL changes or Go functions. @@ -13,38 +15,45 @@ Goose supports [embedding SQL migrations](#embedded-sql-migrations), which means ### Goals of this fork `github.com/pressly/goose` is a fork of `bitbucket.org/liamstask/goose` with the following changes: + - No config files - [Default goose binary](./cmd/goose/main.go) can migrate SQL files only - Go migrations: - - We don't `go build` Go migrations functions on-the-fly - from within the goose binary - - Instead, we let you - [create your own custom goose binary](examples/go-migrations), - register your Go migration functions explicitly and run complex - migrations with your own `*sql.DB` connection - - Go migration functions let you run your code within - an SQL transaction, if you use the `*sql.Tx` argument + - We don't `go build` Go migrations functions on-the-fly + from within the goose binary + - Instead, we let you + [create your own custom goose binary](examples/go-migrations), + register your Go migration functions explicitly and run complex + migrations with your own `*sql.DB` connection + - Go migration functions let you run your code within + an SQL transaction, if you use the `*sql.Tx` argument - The goose pkg is decoupled from the binary: - - goose pkg doesn't register any SQL drivers anymore, - thus no driver `panic()` conflict within your codebase! - - goose pkg doesn't have any vendor dependencies anymore + - goose pkg doesn't register any SQL drivers anymore, + thus no driver `panic()` conflict within your codebase! + - goose pkg doesn't have any vendor dependencies anymore - We use timestamped migrations by default but recommend a hybrid approach of using timestamps in the development process and sequential versions in production. - Supports missing (out-of-order) migrations with the `-allow-missing` flag, or if using as a library supply the functional option `goose.WithAllowMissing()` to Up, UpTo or UpByOne. - Supports applying ad-hoc migrations without tracking them in the schema table. Useful for seeding a database after migrations have been applied. Use `-no-versioning` flag or the functional option `goose.WithNoVersioning()`. # Install - $ go install github.com/pressly/goose/v3/cmd/goose@latest +```shell +go install github.com/pressly/goose/v3/cmd/goose@latest +``` This will install the `goose` binary to your `$GOPATH/bin` directory. For a lite version of the binary without DB connection dependent commands, use the exclusive build tags: - $ go build -tags='no_postgres no_mysql no_sqlite3' -o goose ./cmd/goose +```shell +go build -tags='no_postgres no_mysql no_sqlite3 no_ydb' -o goose ./cmd/goose +``` For macOS users `goose` is available as a [Homebrew Formulae](https://formulae.brew.sh/formula/goose#default): - $ brew install goose +```shell +brew install goose +``` See the docs for more [installation instructions](https://pressly.github.io/goose/installation/). @@ -53,6 +62,15 @@ See the docs for more [installation instructions](https://pressly.github.io/goos ``` Usage: goose [OPTIONS] DRIVER DBSTRING COMMAND +or + +Set environment key +GOOSE_DRIVER=DRIVER +GOOSE_DBSTRING=DBSTRING +GOOSE_MIGRATION_DIR=MIGRATION_DIR + +Usage: goose [OPTIONS] COMMAND + Drivers: postgres mysql @@ -62,6 +80,7 @@ Drivers: tidb clickhouse vertica + ydb Examples: goose sqlite3 ./foo.db status @@ -70,35 +89,46 @@ Examples: goose sqlite3 ./foo.db create fetch_user_data go goose sqlite3 ./foo.db up - goose postgres "user=postgres password=postgres dbname=postgres sslmode=disable" status + goose postgres "user=postgres dbname=postgres sslmode=disable" status goose mysql "user:password@/dbname?parseTime=true" status goose redshift "postgres://user:password@qwerty.us-east-1.redshift.amazonaws.com:5439/db" status goose tidb "user:password@/dbname?parseTime=true" status goose mssql "sqlserver://user:password@dbname:1433?database=master" status goose clickhouse "tcp://127.0.0.1:9000" status goose vertica "vertica://user:password@localhost:5433/dbname?connection_load_balance=1" status + goose ydb "grpcs://localhost:2135/local?go_query_mode=scripting&go_fake_tx=scripting&go_query_bind=declare,numeric" status + + GOOSE_DRIVER=sqlite3 GOOSE_DBSTRING=./foo.db goose status + GOOSE_DRIVER=sqlite3 GOOSE_DBSTRING=./foo.db goose create init sql + GOOSE_DRIVER=postgres GOOSE_DBSTRING="user=postgres dbname=postgres sslmode=disable" goose status + GOOSE_DRIVER=mysql GOOSE_DBSTRING="user:password@/dbname" goose status + GOOSE_DRIVER=redshift GOOSE_DBSTRING="postgres://user:password@qwerty.us-east-1.redshift.amazonaws.com:5439/db" goose status Options: -allow-missing - applies missing (out-of-order) migrations + applies missing (out-of-order) migrations -certfile string - file path to root CA's certificates in pem format (only support on mysql) + file path to root CA's certificates in pem format (only support on mysql) -dir string - directory with migration files (default ".") - -h print help + directory with migration files (default ".", can be set via the GOOSE_MIGRATION_DIR env variable). + -h print help + -no-color + disable color output (NO_COLOR env variable supported) -no-versioning - apply migration commands with no versioning, in file order, from directory pointed to - -s use sequential numbering for new migrations + apply migration commands with no versioning, in file order, from directory pointed to + -s use sequential numbering for new migrations -ssl-cert string - file path to SSL certificates in pem format (only support on mysql) + file path to SSL certificates in pem format (only support on mysql) -ssl-key string - file path to SSL key in pem format (only support on mysql) + file path to SSL key in pem format (only support on mysql) -table string - migrations table name (default "goose_db_version") - -v enable verbose mode + migrations table name (default "goose_db_version") + -timeout duration + maximum allowed duration for queries to run; e.g., 1h13m + -v enable verbose mode -version - print version + print version Commands: up Migrate the DB to the most recent version available @@ -112,6 +142,7 @@ Commands: version Print the current version of the database create NAME [sql|go] Creates new migration file with the current timestamp fix Apply sequential ordering to migrations + validate Check migration files without running them ``` ## create @@ -254,7 +285,50 @@ language plpgsql; -- +goose StatementEnd ``` +Goose supports environment variable substitution in SQL migrations through annotations. To enable +this feature, use the `-- +goose ENVSUB ON` annotation before the queries where you want +substitution applied. It stays active until the `-- +goose ENVSUB OFF` annotation is encountered. +You can use these annotations multiple times within a file. + +This feature is disabled by default for backward compatibility with existing scripts. + +For `PL/pgSQL` functions or other statements where substitution is not desired, wrap the annotations +explicitly around the relevant parts. For example, to exclude escaping the `**` characters: + +```sql +-- +goose StatementBegin +CREATE OR REPLACE FUNCTION test_func() +RETURNS void AS $$ +-- +goose ENVSUB ON +BEGIN + RAISE NOTICE '${SOME_ENV_VAR}'; +END; +-- +goose ENVSUB OFF +$$ LANGUAGE plpgsql; +-- +goose StatementEnd +``` + +
+Supported expansions (click here to expand): + +- `${VAR}` or $VAR - expands to the value of the environment variable `VAR` +- `${VAR:-default}` - expands to the value of the environment variable `VAR`, or `default` if `VAR` + is unset or null +- `${VAR-default}` - expands to the value of the environment variable `VAR`, or `default` if `VAR` + is unset +- `${VAR?err_msg}` - expands to the value of the environment variable `VAR`, or prints `err_msg` and + error if `VAR` unset +- ~~`${VAR:?err_msg}` - expands to the value of the environment variable `VAR`, or prints `err_msg` + and error if `VAR` unset or null.~~ **THIS IS NOT SUPPORTED** + +See +[mfridman/interpolate](https://github.com/mfridman/interpolate?tab=readme-ov-file#supported-expansions) +for more details on supported expansions. + +
+ ## Embedded sql migrations + Go 1.16 introduced new feature: [compile-time embedding](https://pkg.go.dev/embed/) files into binary and corresponding [filesystem abstraction](https://pkg.go.dev/io/fs/). @@ -338,6 +412,9 @@ func Down(tx *sql.Tx) error { } ``` +Note that Go migration files must begin with a numeric value, followed by an +underscore, and must not end with `*_test.go`. + # Development This can be used to build local `goose` binaries without having the latest Go version installed locally. @@ -347,6 +424,7 @@ DOCKER_BUILDKIT=1 docker build -f Dockerfile.local --output bin . ``` # Hybrid Versioning + Please, read the [versioning problem](https://github.com/pressly/goose/issues/63#issuecomment-428681694) first. By default, if you attempt to apply missing (out-of-order) migrations `goose` will raise an error. However, If you want to apply these missing migrations pass goose the `-allow-missing` flag, or if using as a library supply the functional option `goose.WithAllowMissing()` to Up, UpTo or UpByOne. diff --git a/vendor/github.com/pressly/goose/v3/create.go b/vendor/github.com/pressly/goose/v3/create.go index 1a8bb74..c1e8400 100644 --- a/vendor/github.com/pressly/goose/v3/create.go +++ b/vendor/github.com/pressly/goose/v3/create.go @@ -2,6 +2,7 @@ package goose import ( "database/sql" + "errors" "fmt" "os" "path/filepath" @@ -25,11 +26,12 @@ func SetSequential(s bool) { // Create writes a new blank migration file. func CreateWithTemplate(db *sql.DB, dir string, tmpl *template.Template, name, migrationType string) error { - var version string + version := time.Now().UTC().Format(timestampFormat) + if sequential { // always use DirFS here because it's modifying operation - migrations, err := collectMigrationsFS(osFS{}, dir, minVersion, maxVersion) - if err != nil { + migrations, err := collectMigrationsFS(osFS{}, dir, minVersion, maxVersion, registeredGoMigrations) + if err != nil && !errors.Is(err, ErrNoMigrationFiles) { return err } @@ -43,8 +45,6 @@ func CreateWithTemplate(db *sql.DB, dir string, tmpl *template.Template, name, m } else { version = fmt.Sprintf(seqVersionTemplate, int64(1)) } - } else { - version = time.Now().Format(timestampFormat) } filename := fmt.Sprintf("%v_%v.%v", version, snakeCase(name), migrationType) @@ -99,20 +99,21 @@ SELECT 'down SQL query'; var goSQLMigrationTemplate = template.Must(template.New("goose.go-migration").Parse(`package migrations import ( + "context" "database/sql" "github.com/pressly/goose/v3" ) func init() { - goose.AddMigration(up{{.CamelName}}, down{{.CamelName}}) + goose.AddMigrationContext(up{{.CamelName}}, down{{.CamelName}}) } -func up{{.CamelName}}(tx *sql.Tx) error { +func up{{.CamelName}}(ctx context.Context, tx *sql.Tx) error { // This code is executed when the migration is applied. return nil } -func down{{.CamelName}}(tx *sql.Tx) error { +func down{{.CamelName}}(ctx context.Context, tx *sql.Tx) error { // This code is executed when the migration is rolled back. return nil } diff --git a/vendor/github.com/pressly/goose/v3/database/dialect.go b/vendor/github.com/pressly/goose/v3/database/dialect.go new file mode 100644 index 0000000..2ac197d --- /dev/null +++ b/vendor/github.com/pressly/goose/v3/database/dialect.go @@ -0,0 +1,147 @@ +package database + +import ( + "context" + "database/sql" + "errors" + "fmt" + + "github.com/pressly/goose/v3/internal/dialect/dialectquery" +) + +// Dialect is the type of database dialect. +type Dialect string + +const ( + DialectClickHouse Dialect = "clickhouse" + DialectMSSQL Dialect = "mssql" + DialectMySQL Dialect = "mysql" + DialectPostgres Dialect = "postgres" + DialectRedshift Dialect = "redshift" + DialectSQLite3 Dialect = "sqlite3" + DialectTiDB Dialect = "tidb" + DialectTurso Dialect = "turso" + DialectVertica Dialect = "vertica" + DialectYdB Dialect = "ydb" +) + +// NewStore returns a new [Store] implementation for the given dialect. +func NewStore(dialect Dialect, tablename string) (Store, error) { + if tablename == "" { + return nil, errors.New("table name must not be empty") + } + if dialect == "" { + return nil, errors.New("dialect must not be empty") + } + lookup := map[Dialect]dialectquery.Querier{ + DialectClickHouse: &dialectquery.Clickhouse{}, + DialectMSSQL: &dialectquery.Sqlserver{}, + DialectMySQL: &dialectquery.Mysql{}, + DialectPostgres: &dialectquery.Postgres{}, + DialectRedshift: &dialectquery.Redshift{}, + DialectSQLite3: &dialectquery.Sqlite3{}, + DialectTiDB: &dialectquery.Tidb{}, + DialectVertica: &dialectquery.Vertica{}, + DialectYdB: &dialectquery.Ydb{}, + DialectTurso: &dialectquery.Turso{}, + } + querier, ok := lookup[dialect] + if !ok { + return nil, fmt.Errorf("unknown dialect: %q", dialect) + } + return &store{ + tablename: tablename, + querier: querier, + }, nil +} + +type store struct { + tablename string + querier dialectquery.Querier +} + +var _ Store = (*store)(nil) + +func (s *store) Tablename() string { + return s.tablename +} + +func (s *store) CreateVersionTable(ctx context.Context, db DBTxConn) error { + q := s.querier.CreateTable(s.tablename) + if _, err := db.ExecContext(ctx, q); err != nil { + return fmt.Errorf("failed to create version table %q: %w", s.tablename, err) + } + return nil +} + +func (s *store) Insert(ctx context.Context, db DBTxConn, req InsertRequest) error { + q := s.querier.InsertVersion(s.tablename) + if _, err := db.ExecContext(ctx, q, req.Version, true); err != nil { + return fmt.Errorf("failed to insert version %d: %w", req.Version, err) + } + return nil +} + +func (s *store) Delete(ctx context.Context, db DBTxConn, version int64) error { + q := s.querier.DeleteVersion(s.tablename) + if _, err := db.ExecContext(ctx, q, version); err != nil { + return fmt.Errorf("failed to delete version %d: %w", version, err) + } + return nil +} + +func (s *store) GetMigration( + ctx context.Context, + db DBTxConn, + version int64, +) (*GetMigrationResult, error) { + q := s.querier.GetMigrationByVersion(s.tablename) + var result GetMigrationResult + if err := db.QueryRowContext(ctx, q, version).Scan( + &result.Timestamp, + &result.IsApplied, + ); err != nil { + if errors.Is(err, sql.ErrNoRows) { + return nil, fmt.Errorf("%w: %d", ErrVersionNotFound, version) + } + return nil, fmt.Errorf("failed to get migration %d: %w", version, err) + } + return &result, nil +} + +func (s *store) GetLatestVersion(ctx context.Context, db DBTxConn) (int64, error) { + q := s.querier.GetLatestVersion(s.tablename) + var version sql.NullInt64 + if err := db.QueryRowContext(ctx, q).Scan(&version); err != nil { + return -1, fmt.Errorf("failed to get latest version: %w", err) + } + if !version.Valid { + return -1, fmt.Errorf("latest %w", ErrVersionNotFound) + } + return version.Int64, nil +} + +func (s *store) ListMigrations( + ctx context.Context, + db DBTxConn, +) ([]*ListMigrationsResult, error) { + q := s.querier.ListMigrations(s.tablename) + rows, err := db.QueryContext(ctx, q) + if err != nil { + return nil, fmt.Errorf("failed to list migrations: %w", err) + } + defer rows.Close() + + var migrations []*ListMigrationsResult + for rows.Next() { + var result ListMigrationsResult + if err := rows.Scan(&result.Version, &result.IsApplied); err != nil { + return nil, fmt.Errorf("failed to scan list migrations result: %w", err) + } + migrations = append(migrations, &result) + } + if err := rows.Err(); err != nil { + return nil, err + } + return migrations, nil +} diff --git a/vendor/github.com/pressly/goose/v3/database/doc.go b/vendor/github.com/pressly/goose/v3/database/doc.go new file mode 100644 index 0000000..4748c72 --- /dev/null +++ b/vendor/github.com/pressly/goose/v3/database/doc.go @@ -0,0 +1,14 @@ +// Package database defines a generic [Store] interface for goose to use when interacting with the +// database. It is meant to be generic and not tied to any specific database technology. +// +// At a high level, a [Store] is responsible for: +// - Creating a version table +// - Inserting and deleting a version +// - Getting a specific version +// - Listing all applied versions +// +// Use the [NewStore] function to create a [Store] for one of the supported dialects. +// +// For more advanced use cases, it's possible to implement a custom [Store] for a database that +// goose does not support. +package database diff --git a/vendor/github.com/pressly/goose/v3/database/sql_extended.go b/vendor/github.com/pressly/goose/v3/database/sql_extended.go new file mode 100644 index 0000000..8eaa939 --- /dev/null +++ b/vendor/github.com/pressly/goose/v3/database/sql_extended.go @@ -0,0 +1,23 @@ +package database + +import ( + "context" + "database/sql" +) + +// DBTxConn is a thin interface for common methods that is satisfied by *sql.DB, *sql.Tx and +// *sql.Conn. +// +// There is a long outstanding issue to formalize a std lib interface, but alas. See: +// https://github.com/golang/go/issues/14468 +type DBTxConn interface { + ExecContext(ctx context.Context, query string, args ...any) (sql.Result, error) + QueryContext(ctx context.Context, query string, args ...any) (*sql.Rows, error) + QueryRowContext(ctx context.Context, query string, args ...any) *sql.Row +} + +var ( + _ DBTxConn = (*sql.DB)(nil) + _ DBTxConn = (*sql.Tx)(nil) + _ DBTxConn = (*sql.Conn)(nil) +) diff --git a/vendor/github.com/pressly/goose/v3/database/store.go b/vendor/github.com/pressly/goose/v3/database/store.go new file mode 100644 index 0000000..0c7e44d --- /dev/null +++ b/vendor/github.com/pressly/goose/v3/database/store.go @@ -0,0 +1,67 @@ +package database + +import ( + "context" + "errors" + "time" +) + +var ( + // ErrVersionNotFound must be returned by [GetMigration] or [GetLatestVersion] when a migration + // does not exist. + ErrVersionNotFound = errors.New("version not found") + + // ErrNotImplemented must be returned by methods that are not implemented. + ErrNotImplemented = errors.New("not implemented") +) + +// Store is an interface that defines methods for tracking and managing migrations. It is used by +// the goose package to interact with a database. By defining a Store interface, multiple +// implementations can be created to support different databases without reimplementing the +// migration logic. +// +// This package provides several dialects that implement the Store interface. While most users won't +// need to create their own Store, if you need to support a database that isn't currently supported, +// you can implement your own! +type Store interface { + // Tablename is the name of the version table. This table is used to record applied migrations + // and must not be an empty string. + Tablename() string + // CreateVersionTable creates the version table, which is used to track migrations. When + // creating this table, the implementation MUST also insert a row for the initial version (0). + CreateVersionTable(ctx context.Context, db DBTxConn) error + // Insert a version id into the version table. + Insert(ctx context.Context, db DBTxConn, req InsertRequest) error + // Delete a version id from the version table. + Delete(ctx context.Context, db DBTxConn, version int64) error + // GetMigration retrieves a single migration by version id. If the query succeeds, but the + // version is not found, this method must return [ErrVersionNotFound]. + GetMigration(ctx context.Context, db DBTxConn, version int64) (*GetMigrationResult, error) + // GetLatestVersion retrieves the last applied migration version. If no migrations exist, this + // method must return [ErrVersionNotFound]. + GetLatestVersion(ctx context.Context, db DBTxConn) (int64, error) + // ListMigrations retrieves all migrations sorted in descending order by id or timestamp. If + // there are no migrations, return empty slice with no error. Typically this method will return + // at least one migration, because the initial version (0) is always inserted into the version + // table when it is created. + ListMigrations(ctx context.Context, db DBTxConn) ([]*ListMigrationsResult, error) +} + +type InsertRequest struct { + Version int64 + + // TODO(mf): in the future, we maybe want to expand this struct so implementors can store + // additional information. See the following issues for more information: + // - https://github.com/pressly/goose/issues/422 + // - https://github.com/pressly/goose/issues/288 +} + +type GetMigrationResult struct { + Timestamp time.Time + IsApplied bool +} + +type ListMigrationsResult struct { + Version int64 + IsApplied bool +} diff --git a/vendor/github.com/pressly/goose/v3/db.go b/vendor/github.com/pressly/goose/v3/db.go index 32fb9fe..280f235 100644 --- a/vendor/github.com/pressly/goose/v3/db.go +++ b/vendor/github.com/pressly/goose/v3/db.go @@ -5,20 +5,39 @@ import ( "fmt" ) -// OpenDBWithDriver creates a connection to a database, and modifies goose -// internals to be compatible with the supplied driver by calling SetDialect. +// OpenDBWithDriver creates a connection to a database, and modifies goose internals to be +// compatible with the supplied driver by calling SetDialect. func OpenDBWithDriver(driver string, dbstring string) (*sql.DB, error) { if err := SetDialect(driver); err != nil { return nil, err } + // The Go ecosystem has added more and more drivers over the years. As a result, there's no + // longer a one-to-one match between the driver name and the dialect name. For instance, there's + // no "redshift" driver, but that's the internal dialect name within goose. Hence, we need to + // convert the dialect name to a supported driver name. This conversion is a best-effort + // attempt, as we can't support both lib/pq and pgx, which some users might have. + // + // We recommend users to create a [NewProvider] with the desired dialect, open a connection + // using their preferred driver, and provide the *sql.DB to goose. This approach removes the + // need for mapping dialects to drivers, rendering this function unnecessary. + switch driver { case "mssql": driver = "sqlserver" - case "redshift": - driver = "postgres" case "tidb": driver = "mysql" + case "turso": + driver = "libsql" + case "sqlite3": + driver = "sqlite" + case "postgres", "redshift": + driver = "pgx" + } + + switch driver { + case "postgres", "pgx", "sqlite3", "sqlite", "mysql", "sqlserver", "clickhouse", "vertica", "azuresql", "ydb", "libsql": + return sql.Open(driver, dbstring) case "clickhouse-replicated": db, err := sql.Open("clickhouse", dbstring) if err != nil { @@ -26,18 +45,13 @@ func OpenDBWithDriver(driver string, dbstring string) (*sql.DB, error) { } _, err = db.Exec("SET insert_quorum=2") if err != nil { - return nil, fmt.Errorf("set insert_quorum %w", err) + return nil, fmt.Errorf("SET insert_quorum %w", err) } _, err = db.Exec("SET select_sequential_consistency=1") if err != nil { - return nil, fmt.Errorf("set select_sequential_consistency %w", err) + return nil, fmt.Errorf("SET select_sequential_consistency %w", err) } return db, nil - } - - switch driver { - case "postgres", "pgx", "sqlite3", "sqlite", "mysql", "sqlserver", "clickhouse", "vertica": - return sql.Open(driver, dbstring) default: return nil, fmt.Errorf("unsupported driver %s", driver) } diff --git a/vendor/github.com/pressly/goose/v3/dialect.go b/vendor/github.com/pressly/goose/v3/dialect.go index 0d227e9..9a8f3f4 100644 --- a/vendor/github.com/pressly/goose/v3/dialect.go +++ b/vendor/github.com/pressly/goose/v3/dialect.go @@ -1,404 +1,63 @@ package goose import ( - "database/sql" "fmt" -) -// SQLDialect abstracts the details of specific SQL dialects -// for goose's few SQL specific statements -type SQLDialect interface { - createVersionTableSQL() string // sql string to create the db version table - insertVersionSQL() string // sql string to insert the initial version table row - deleteVersionSQL() string // sql string to delete version - migrationSQL() string // sql string to retrieve migrations - dbVersionQuery(db *sql.DB) (*sql.Rows, error) -} + "github.com/pressly/goose/v3/database" + "github.com/pressly/goose/v3/internal/dialect" +) -var dialect SQLDialect = &PostgresDialect{} +// Dialect is the type of database dialect. It is an alias for [database.Dialect]. +type Dialect = database.Dialect + +const ( + DialectClickHouse Dialect = database.DialectClickHouse + DialectMSSQL Dialect = database.DialectMSSQL + DialectMySQL Dialect = database.DialectMySQL + DialectPostgres Dialect = database.DialectPostgres + DialectRedshift Dialect = database.DialectRedshift + DialectSQLite3 Dialect = database.DialectSQLite3 + DialectTiDB Dialect = database.DialectTiDB + DialectVertica Dialect = database.DialectVertica + DialectYdB Dialect = database.DialectYdB +) -// GetDialect gets the SQLDialect -func GetDialect() SQLDialect { - return dialect +func init() { + store, _ = dialect.NewStore(dialect.Postgres) } -// SetDialect sets the SQLDialect -func SetDialect(d string) error { - switch d { +var store dialect.Store + +// SetDialect sets the dialect to use for the goose package. +func SetDialect(s string) error { + var d dialect.Dialect + switch s { case "postgres", "pgx": - dialect = &PostgresDialect{} + d = dialect.Postgres case "mysql": - dialect = &MySQLDialect{} + d = dialect.Mysql case "sqlite3", "sqlite": - dialect = &Sqlite3Dialect{} - case "mssql": - dialect = &SqlServerDialect{} + d = dialect.Sqlite3 + case "mssql", "azuresql", "sqlserver": + d = dialect.Sqlserver case "redshift": - dialect = &RedshiftDialect{} + d = dialect.Redshift case "tidb": - dialect = &TiDBDialect{} + d = dialect.Tidb case "clickhouse": - dialect = &ClickHouseDialect{} + d = dialect.Clickhouse case "clickhouse-replicated": - dialect = &ClickHouseReplicatedDialect{ - ClusterName: "{cluster}", - } + d = dialect.ClickhouseReplicated case "vertica": - dialect = &VerticaDialect{} + d = dialect.Vertica + case "ydb": + d = dialect.Ydb + case "turso": + d = dialect.Turso default: - return fmt.Errorf("%q: unknown dialect", d) - } - - return nil -} - -//////////////////////////// -// Postgres -//////////////////////////// - -// PostgresDialect struct. -type PostgresDialect struct{} - -func (pg PostgresDialect) createVersionTableSQL() string { - return fmt.Sprintf(`CREATE TABLE %s ( - id serial NOT NULL, - version_id bigint NOT NULL, - is_applied boolean NOT NULL, - tstamp timestamp NULL default now(), - PRIMARY KEY(id) - );`, TableName()) -} - -func (pg PostgresDialect) insertVersionSQL() string { - return fmt.Sprintf("INSERT INTO %s (version_id, is_applied) VALUES ($1, $2);", TableName()) -} - -func (pg PostgresDialect) dbVersionQuery(db *sql.DB) (*sql.Rows, error) { - rows, err := db.Query(fmt.Sprintf("SELECT version_id, is_applied from %s ORDER BY id DESC", TableName())) - if err != nil { - return nil, err - } - - return rows, err -} - -func (m PostgresDialect) migrationSQL() string { - return fmt.Sprintf("SELECT tstamp, is_applied FROM %s WHERE version_id=$1 ORDER BY tstamp DESC LIMIT 1", TableName()) -} - -func (pg PostgresDialect) deleteVersionSQL() string { - return fmt.Sprintf("DELETE FROM %s WHERE version_id=$1;", TableName()) -} - -//////////////////////////// -// MySQL -//////////////////////////// - -// MySQLDialect struct. -type MySQLDialect struct{} - -func (m MySQLDialect) createVersionTableSQL() string { - return fmt.Sprintf(`CREATE TABLE %s ( - id serial NOT NULL, - version_id bigint NOT NULL, - is_applied boolean NOT NULL, - tstamp timestamp NULL default now(), - PRIMARY KEY(id) - );`, TableName()) -} - -func (m MySQLDialect) insertVersionSQL() string { - return fmt.Sprintf("INSERT INTO %s (version_id, is_applied) VALUES (?, ?);", TableName()) -} - -func (m MySQLDialect) dbVersionQuery(db *sql.DB) (*sql.Rows, error) { - rows, err := db.Query(fmt.Sprintf("SELECT version_id, is_applied from %s ORDER BY id DESC", TableName())) - if err != nil { - return nil, err - } - - return rows, err -} - -func (m MySQLDialect) migrationSQL() string { - return fmt.Sprintf("SELECT tstamp, is_applied FROM %s WHERE version_id=? ORDER BY tstamp DESC LIMIT 1", TableName()) -} - -func (m MySQLDialect) deleteVersionSQL() string { - return fmt.Sprintf("DELETE FROM %s WHERE version_id=?;", TableName()) -} - -//////////////////////////// -// MSSQL -//////////////////////////// - -// SqlServerDialect struct. -type SqlServerDialect struct{} - -func (m SqlServerDialect) createVersionTableSQL() string { - return fmt.Sprintf(`CREATE TABLE %s ( - id INT NOT NULL IDENTITY(1,1) PRIMARY KEY, - version_id BIGINT NOT NULL, - is_applied BIT NOT NULL, - tstamp DATETIME NULL DEFAULT CURRENT_TIMESTAMP - );`, TableName()) -} - -func (m SqlServerDialect) insertVersionSQL() string { - return fmt.Sprintf("INSERT INTO %s (version_id, is_applied) VALUES (@p1, @p2);", TableName()) -} - -func (m SqlServerDialect) dbVersionQuery(db *sql.DB) (*sql.Rows, error) { - rows, err := db.Query(fmt.Sprintf("SELECT version_id, is_applied FROM %s ORDER BY id DESC", TableName())) - if err != nil { - return nil, err + return fmt.Errorf("%q: unknown dialect", s) } - - return rows, err -} - -func (m SqlServerDialect) migrationSQL() string { - const tpl = ` -WITH Migrations AS -( - SELECT tstamp, is_applied, - ROW_NUMBER() OVER (ORDER BY tstamp) AS 'RowNumber' - FROM %s - WHERE version_id=@p1 -) -SELECT tstamp, is_applied -FROM Migrations -WHERE RowNumber BETWEEN 1 AND 2 -ORDER BY tstamp DESC -` - return fmt.Sprintf(tpl, TableName()) -} - -func (m SqlServerDialect) deleteVersionSQL() string { - return fmt.Sprintf("DELETE FROM %s WHERE version_id=@p1;", TableName()) -} - -//////////////////////////// -// sqlite3 -//////////////////////////// - -// Sqlite3Dialect struct. -type Sqlite3Dialect struct{} - -func (m Sqlite3Dialect) createVersionTableSQL() string { - return fmt.Sprintf(`CREATE TABLE %s ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - version_id INTEGER NOT NULL, - is_applied INTEGER NOT NULL, - tstamp TIMESTAMP DEFAULT (datetime('now')) - );`, TableName()) -} - -func (m Sqlite3Dialect) insertVersionSQL() string { - return fmt.Sprintf("INSERT INTO %s (version_id, is_applied) VALUES (?, ?);", TableName()) -} - -func (m Sqlite3Dialect) dbVersionQuery(db *sql.DB) (*sql.Rows, error) { - rows, err := db.Query(fmt.Sprintf("SELECT version_id, is_applied from %s ORDER BY id DESC", TableName())) - if err != nil { - return nil, err - } - - return rows, err -} - -func (m Sqlite3Dialect) migrationSQL() string { - return fmt.Sprintf("SELECT tstamp, is_applied FROM %s WHERE version_id=? ORDER BY tstamp DESC LIMIT 1", TableName()) -} - -func (m Sqlite3Dialect) deleteVersionSQL() string { - return fmt.Sprintf("DELETE FROM %s WHERE version_id=?;", TableName()) -} - -//////////////////////////// -// Redshift -//////////////////////////// - -// RedshiftDialect struct. -type RedshiftDialect struct{} - -func (rs RedshiftDialect) createVersionTableSQL() string { - return fmt.Sprintf(`CREATE TABLE %s ( - id integer NOT NULL identity(1, 1), - version_id bigint NOT NULL, - is_applied boolean NOT NULL, - tstamp timestamp NULL default sysdate, - PRIMARY KEY(id) - );`, TableName()) -} - -func (rs RedshiftDialect) insertVersionSQL() string { - return fmt.Sprintf("INSERT INTO %s (version_id, is_applied) VALUES ($1, $2);", TableName()) -} - -func (rs RedshiftDialect) dbVersionQuery(db *sql.DB) (*sql.Rows, error) { - rows, err := db.Query(fmt.Sprintf("SELECT version_id, is_applied from %s ORDER BY id DESC", TableName())) - if err != nil { - return nil, err - } - - return rows, err -} - -func (m RedshiftDialect) migrationSQL() string { - return fmt.Sprintf("SELECT tstamp, is_applied FROM %s WHERE version_id=$1 ORDER BY tstamp DESC LIMIT 1", TableName()) -} - -func (rs RedshiftDialect) deleteVersionSQL() string { - return fmt.Sprintf("DELETE FROM %s WHERE version_id=$1;", TableName()) -} - -//////////////////////////// -// TiDB -//////////////////////////// - -// TiDBDialect struct. -type TiDBDialect struct{} - -func (m TiDBDialect) createVersionTableSQL() string { - return fmt.Sprintf(`CREATE TABLE %s ( - id BIGINT UNSIGNED NOT NULL AUTO_INCREMENT UNIQUE, - version_id bigint NOT NULL, - is_applied boolean NOT NULL, - tstamp timestamp NULL default now(), - PRIMARY KEY(id) - );`, TableName()) -} - -func (m TiDBDialect) insertVersionSQL() string { - return fmt.Sprintf("INSERT INTO %s (version_id, is_applied) VALUES (?, ?);", TableName()) -} - -func (m TiDBDialect) dbVersionQuery(db *sql.DB) (*sql.Rows, error) { - rows, err := db.Query(fmt.Sprintf("SELECT version_id, is_applied from %s ORDER BY id DESC", TableName())) - if err != nil { - return nil, err - } - - return rows, err -} - -func (m TiDBDialect) migrationSQL() string { - return fmt.Sprintf("SELECT tstamp, is_applied FROM %s WHERE version_id=? ORDER BY tstamp DESC LIMIT 1", TableName()) -} - -func (m TiDBDialect) deleteVersionSQL() string { - return fmt.Sprintf("DELETE FROM %s WHERE version_id=?;", TableName()) -} - -//////////////////////////// -// ClickHouse -//////////////////////////// - -// ClickHouseDialect struct. -type ClickHouseDialect struct{} - -func (m ClickHouseDialect) createVersionTableSQL() string { - return fmt.Sprintf(`CREATE TABLE IF NOT EXISTS %s ( - version_id Int64, - is_applied UInt8, - date Date default now(), - tstamp DateTime default now() - ) - ENGINE = MergeTree() - ORDER BY (date)`, TableName()) -} - -func (m ClickHouseDialect) dbVersionQuery(db *sql.DB) (*sql.Rows, error) { - rows, err := db.Query(fmt.Sprintf("SELECT version_id, is_applied FROM %s ORDER BY version_id DESC", TableName())) - if err != nil { - return nil, err - } - return rows, err -} - -func (m ClickHouseDialect) insertVersionSQL() string { - return fmt.Sprintf("INSERT INTO %s (version_id, is_applied) VALUES ($1, $2)", TableName()) -} - -func (m ClickHouseDialect) migrationSQL() string { - return fmt.Sprintf("SELECT tstamp, is_applied FROM %s WHERE version_id = $1 ORDER BY tstamp DESC LIMIT 1", TableName()) -} - -func (m ClickHouseDialect) deleteVersionSQL() string { - return fmt.Sprintf("ALTER TABLE %s DELETE WHERE version_id = $1 SETTINGS mutations_sync = 2", TableName()) -} - -// ClickHouseReplicatedDialect struct. -type ClickHouseReplicatedDialect struct { - ClusterName string -} - -func (m ClickHouseReplicatedDialect) createVersionTableSQL() string { - return fmt.Sprintf(`CREATE TABLE IF NOT EXISTS %s ON CLUSTER '%s' ( - version_id Int64, - is_applied UInt8, - date Date default now(), - tstamp DateTime default now() - ) - ENGINE = ReplicatedMergeTree() - ORDER BY (date)`, TableName(), m.ClusterName) -} - -func (m ClickHouseReplicatedDialect) dbVersionQuery(db *sql.DB) (*sql.Rows, error) { - rows, err := db.Query(fmt.Sprintf("SELECT version_id, is_applied FROM %s ORDER BY version_id DESC", TableName())) - if err != nil { - return nil, err - } - return rows, err -} - -func (m ClickHouseReplicatedDialect) insertVersionSQL() string { - return fmt.Sprintf("INSERT INTO %s (version_id, is_applied) VALUES ($1, $2)", TableName()) -} - -func (m ClickHouseReplicatedDialect) migrationSQL() string { - return fmt.Sprintf("SELECT tstamp, is_applied FROM %s WHERE version_id = $1 ORDER BY tstamp DESC LIMIT 1", TableName()) -} - -func (m ClickHouseReplicatedDialect) deleteVersionSQL() string { - return fmt.Sprintf("ALTER TABLE %s DELETE WHERE version_id = $1 SETTINGS mutations_sync = 2", TableName()) -} - -//////////////////////////// -// Vertica -//////////////////////////// - -// VerticaDialect struct. -type VerticaDialect struct{} - -func (v VerticaDialect) createVersionTableSQL() string { - return fmt.Sprintf(`CREATE TABLE %s ( - id identity(1,1) NOT NULL, - version_id bigint NOT NULL, - is_applied boolean NOT NULL, - tstamp timestamp NULL default now(), - PRIMARY KEY(id) - );`, TableName()) -} - -func (v VerticaDialect) insertVersionSQL() string { - return fmt.Sprintf("INSERT INTO %s (version_id, is_applied) VALUES (?, ?);", TableName()) -} - -func (v VerticaDialect) dbVersionQuery(db *sql.DB) (*sql.Rows, error) { - rows, err := db.Query(fmt.Sprintf("SELECT version_id, is_applied from %s ORDER BY id DESC", TableName())) - if err != nil { - return nil, err - } - - return rows, err -} - -func (m VerticaDialect) migrationSQL() string { - return fmt.Sprintf("SELECT tstamp, is_applied FROM %s WHERE version_id=? ORDER BY tstamp DESC LIMIT 1", TableName()) -} - -func (v VerticaDialect) deleteVersionSQL() string { - return fmt.Sprintf("DELETE FROM %s WHERE version_id=?;", TableName()) + var err error + store, err = dialect.NewStore(d) + return err } diff --git a/vendor/github.com/pressly/goose/v3/down.go b/vendor/github.com/pressly/goose/v3/down.go index c58c214..7d6c21d 100644 --- a/vendor/github.com/pressly/goose/v3/down.go +++ b/vendor/github.com/pressly/goose/v3/down.go @@ -1,12 +1,19 @@ package goose import ( + "context" "database/sql" "fmt" ) // Down rolls back a single migration from the current version. func Down(db *sql.DB, dir string, opts ...OptionsFunc) error { + ctx := context.Background() + return DownContext(ctx, db, dir, opts...) +} + +// DownContext rolls back a single migration from the current version. +func DownContext(ctx context.Context, db *sql.DB, dir string, opts ...OptionsFunc) error { option := &options{} for _, f := range opts { f(option) @@ -21,21 +28,27 @@ func Down(db *sql.DB, dir string, opts ...OptionsFunc) error { } currentVersion := migrations[len(migrations)-1].Version // Migrate only the latest migration down. - return downToNoVersioning(db, migrations, currentVersion-1) + return downToNoVersioning(ctx, db, migrations, currentVersion-1) } - currentVersion, err := GetDBVersion(db) + currentVersion, err := GetDBVersionContext(ctx, db) if err != nil { return err } current, err := migrations.Current(currentVersion) if err != nil { - return fmt.Errorf("no migration %v", currentVersion) + return fmt.Errorf("migration %v: %w", currentVersion, err) } - return current.Down(db) + return current.DownContext(ctx, db) } // DownTo rolls back migrations to a specific version. func DownTo(db *sql.DB, dir string, version int64, opts ...OptionsFunc) error { + ctx := context.Background() + return DownToContext(ctx, db, dir, version, opts...) +} + +// DownToContext rolls back migrations to a specific version. +func DownToContext(ctx context.Context, db *sql.DB, dir string, version int64, opts ...OptionsFunc) error { option := &options{} for _, f := range opts { f(option) @@ -45,11 +58,11 @@ func DownTo(db *sql.DB, dir string, version int64, opts ...OptionsFunc) error { return err } if option.noVersioning { - return downToNoVersioning(db, migrations, version) + return downToNoVersioning(ctx, db, migrations, version) } for { - currentVersion, err := GetDBVersion(db) + currentVersion, err := GetDBVersionContext(ctx, db) if err != nil { return err } @@ -69,7 +82,7 @@ func DownTo(db *sql.DB, dir string, version int64, opts ...OptionsFunc) error { return nil } - if err = current.Down(db); err != nil { + if err = current.DownContext(ctx, db); err != nil { return err } } @@ -77,7 +90,7 @@ func DownTo(db *sql.DB, dir string, version int64, opts ...OptionsFunc) error { // downToNoVersioning applies down migrations down to, but not including, the // target version. -func downToNoVersioning(db *sql.DB, migrations Migrations, version int64) error { +func downToNoVersioning(ctx context.Context, db *sql.DB, migrations Migrations, version int64) error { var finalVersion int64 for i := len(migrations) - 1; i >= 0; i-- { if version >= migrations[i].Version { @@ -85,7 +98,7 @@ func downToNoVersioning(db *sql.DB, migrations Migrations, version int64) error break } migrations[i].noVersioning = true - if err := migrations[i].Down(db); err != nil { + if err := migrations[i].DownContext(ctx, db); err != nil { return err } } diff --git a/vendor/github.com/pressly/goose/v3/examples/sql-migrations/00001_create_users_table.sql b/vendor/github.com/pressly/goose/v3/examples/sql-migrations/00001_create_users_table.sql deleted file mode 100644 index efce49f..0000000 --- a/vendor/github.com/pressly/goose/v3/examples/sql-migrations/00001_create_users_table.sql +++ /dev/null @@ -1,14 +0,0 @@ --- +goose Up -CREATE TABLE users ( - id int NOT NULL PRIMARY KEY, - username text, - name text, - surname text -); - -INSERT INTO users VALUES -(0, 'root', '', ''), -(1, 'vojtechvitek', 'Vojtech', 'Vitek'); - --- +goose Down -DROP TABLE users; diff --git a/vendor/github.com/pressly/goose/v3/examples/sql-migrations/00002_rename_root.sql b/vendor/github.com/pressly/goose/v3/examples/sql-migrations/00002_rename_root.sql deleted file mode 100644 index 203d54e..0000000 --- a/vendor/github.com/pressly/goose/v3/examples/sql-migrations/00002_rename_root.sql +++ /dev/null @@ -1,9 +0,0 @@ --- +goose Up --- +goose StatementBegin -UPDATE users SET username='admin' WHERE username='root'; --- +goose StatementEnd - --- +goose Down --- +goose StatementBegin -UPDATE users SET username='root' WHERE username='admin'; --- +goose StatementEnd diff --git a/vendor/github.com/pressly/goose/v3/examples/sql-migrations/00003_no_transaction.sql b/vendor/github.com/pressly/goose/v3/examples/sql-migrations/00003_no_transaction.sql deleted file mode 100644 index b890c31..0000000 --- a/vendor/github.com/pressly/goose/v3/examples/sql-migrations/00003_no_transaction.sql +++ /dev/null @@ -1,11 +0,0 @@ --- +goose NO TRANSACTION --- +goose Up -CREATE TABLE post ( - id int NOT NULL, - title text, - body text, - PRIMARY KEY(id) -); - --- +goose Down -DROP TABLE post; diff --git a/vendor/github.com/pressly/goose/v3/fix.go b/vendor/github.com/pressly/goose/v3/fix.go index 7bc7ed5..a498358 100644 --- a/vendor/github.com/pressly/goose/v3/fix.go +++ b/vendor/github.com/pressly/goose/v3/fix.go @@ -11,7 +11,7 @@ const seqVersionTemplate = "%05v" func Fix(dir string) error { // always use osFS here because it's modifying operation - migrations, err := collectMigrationsFS(osFS{}, dir, minVersion, maxVersion) + migrations, err := collectMigrationsFS(osFS{}, dir, minVersion, maxVersion, registeredGoMigrations) if err != nil { return err } diff --git a/vendor/github.com/pressly/goose/v3/globals.go b/vendor/github.com/pressly/goose/v3/globals.go new file mode 100644 index 0000000..535f0ff --- /dev/null +++ b/vendor/github.com/pressly/goose/v3/globals.go @@ -0,0 +1,104 @@ +package goose + +import ( + "errors" + "fmt" + "path/filepath" +) + +var ( + registeredGoMigrations = make(map[int64]*Migration) +) + +// ResetGlobalMigrations resets the global Go migrations registry. +// +// Not safe for concurrent use. +func ResetGlobalMigrations() { + registeredGoMigrations = make(map[int64]*Migration) +} + +// SetGlobalMigrations registers Go migrations globally. It returns an error if a migration with the +// same version has already been registered. Go migrations must be constructed using the +// [NewGoMigration] function. +// +// Not safe for concurrent use. +func SetGlobalMigrations(migrations ...*Migration) error { + for _, m := range migrations { + if _, ok := registeredGoMigrations[m.Version]; ok { + return fmt.Errorf("go migration with version %d already registered", m.Version) + } + if err := checkGoMigration(m); err != nil { + return fmt.Errorf("invalid go migration: %w", err) + } + registeredGoMigrations[m.Version] = m + } + return nil +} + +func checkGoMigration(m *Migration) error { + if !m.construct { + return errors.New("must use NewGoMigration to construct migrations") + } + if !m.Registered { + return errors.New("must be registered") + } + if m.Type != TypeGo { + return fmt.Errorf("type must be %q", TypeGo) + } + if m.Version < 1 { + return errors.New("version must be greater than zero") + } + if m.Source != "" { + if filepath.Ext(m.Source) != ".go" { + return fmt.Errorf("source must have .go extension: %q", m.Source) + } + // If the source is set, expect it to be a path with a numeric component that matches the + // version. This field is not intended to be used for descriptive purposes. + version, err := NumericComponent(m.Source) + if err != nil { + return fmt.Errorf("invalid source: %w", err) + } + if version != m.Version { + return fmt.Errorf("version:%d does not match numeric component in source %q", m.Version, m.Source) + } + } + if err := checkGoFunc(m.goUp); err != nil { + return fmt.Errorf("up function: %w", err) + } + if err := checkGoFunc(m.goDown); err != nil { + return fmt.Errorf("down function: %w", err) + } + if m.UpFnContext != nil && m.UpFnNoTxContext != nil { + return errors.New("must specify exactly one of UpFnContext or UpFnNoTxContext") + } + if m.UpFn != nil && m.UpFnNoTx != nil { + return errors.New("must specify exactly one of UpFn or UpFnNoTx") + } + if m.DownFnContext != nil && m.DownFnNoTxContext != nil { + return errors.New("must specify exactly one of DownFnContext or DownFnNoTxContext") + } + if m.DownFn != nil && m.DownFnNoTx != nil { + return errors.New("must specify exactly one of DownFn or DownFnNoTx") + } + return nil +} + +func checkGoFunc(f *GoFunc) error { + if f.RunTx != nil && f.RunDB != nil { + return errors.New("must specify exactly one of RunTx or RunDB") + } + switch f.Mode { + case TransactionEnabled, TransactionDisabled: + // No functions, but mode is set. This is not an error. It means the user wants to + // record a version with the given mode but not run any functions. + default: + return fmt.Errorf("invalid mode: %d", f.Mode) + } + if f.RunDB != nil && f.Mode != TransactionDisabled { + return fmt.Errorf("transaction mode must be disabled or unspecified when RunDB is set") + } + if f.RunTx != nil && f.Mode != TransactionEnabled { + return fmt.Errorf("transaction mode must be enabled or unspecified when RunTx is set") + } + return nil +} diff --git a/vendor/github.com/pressly/goose/v3/goose.go b/vendor/github.com/pressly/goose/v3/goose.go index 0dbfd67..6a3fa8f 100644 --- a/vendor/github.com/pressly/goose/v3/goose.go +++ b/vendor/github.com/pressly/goose/v3/goose.go @@ -1,14 +1,15 @@ package goose import ( + "context" "database/sql" "fmt" "io/fs" "strconv" ) -// Deprecated: VERSION will no longer be supported in v4. -const VERSION = "v3.2.0" +// Deprecated: VERSION will no longer be supported in the next major release. +const VERSION = "v3.18.0" var ( minVersion = int64(0) @@ -38,23 +39,39 @@ func SetBaseFS(fsys fs.FS) { } // Run runs a goose command. +// +// Deprecated: Use RunContext. func Run(command string, db *sql.DB, dir string, args ...string) error { - return run(command, db, dir, args) + ctx := context.Background() + return RunContext(ctx, command, db, dir, args...) } -// Run runs a goose command with options. +// RunContext runs a goose command. +func RunContext(ctx context.Context, command string, db *sql.DB, dir string, args ...string) error { + return run(ctx, command, db, dir, args) +} + +// RunWithOptions runs a goose command with options. +// +// Deprecated: Use RunWithOptionsContext. func RunWithOptions(command string, db *sql.DB, dir string, args []string, options ...OptionsFunc) error { - return run(command, db, dir, args, options...) + ctx := context.Background() + return RunWithOptionsContext(ctx, command, db, dir, args, options...) +} + +// RunWithOptionsContext runs a goose command with options. +func RunWithOptionsContext(ctx context.Context, command string, db *sql.DB, dir string, args []string, options ...OptionsFunc) error { + return run(ctx, command, db, dir, args, options...) } -func run(command string, db *sql.DB, dir string, args []string, options ...OptionsFunc) error { +func run(ctx context.Context, command string, db *sql.DB, dir string, args []string, options ...OptionsFunc) error { switch command { case "up": - if err := Up(db, dir, options...); err != nil { + if err := UpContext(ctx, db, dir, options...); err != nil { return err } case "up-by-one": - if err := UpByOne(db, dir, options...); err != nil { + if err := UpByOneContext(ctx, db, dir, options...); err != nil { return err } case "up-to": @@ -66,7 +83,7 @@ func run(command string, db *sql.DB, dir string, args []string, options ...Optio if err != nil { return fmt.Errorf("version must be a number (got '%s')", args[0]) } - if err := UpTo(db, dir, version, options...); err != nil { + if err := UpToContext(ctx, db, dir, version, options...); err != nil { return err } case "create": @@ -82,7 +99,7 @@ func run(command string, db *sql.DB, dir string, args []string, options ...Optio return err } case "down": - if err := Down(db, dir, options...); err != nil { + if err := DownContext(ctx, db, dir, options...); err != nil { return err } case "down-to": @@ -94,7 +111,7 @@ func run(command string, db *sql.DB, dir string, args []string, options ...Optio if err != nil { return fmt.Errorf("version must be a number (got '%s')", args[0]) } - if err := DownTo(db, dir, version, options...); err != nil { + if err := DownToContext(ctx, db, dir, version, options...); err != nil { return err } case "fix": @@ -102,19 +119,19 @@ func run(command string, db *sql.DB, dir string, args []string, options ...Optio return err } case "redo": - if err := Redo(db, dir, options...); err != nil { + if err := RedoContext(ctx, db, dir, options...); err != nil { return err } case "reset": - if err := Reset(db, dir, options...); err != nil { + if err := ResetContext(ctx, db, dir, options...); err != nil { return err } case "status": - if err := Status(db, dir, options...); err != nil { + if err := StatusContext(ctx, db, dir, options...); err != nil { return err } case "version": - if err := Version(db, dir, options...); err != nil { + if err := VersionContext(ctx, db, dir, options...); err != nil { return err } default: diff --git a/vendor/github.com/pressly/goose/v3/install.sh b/vendor/github.com/pressly/goose/v3/install.sh index 5a94abe..de82bc3 100644 --- a/vendor/github.com/pressly/goose/v3/install.sh +++ b/vendor/github.com/pressly/goose/v3/install.sh @@ -3,13 +3,17 @@ # Ref: https://github.com/denoland/deno_install # TODO(everyone): Keep this script simple and easily auditable. -# TODO(mf): this should work on Linux and macOS. Not intended for Windows. +# TODO(mf): this should work on Linux and macOS. Not intended for Windows. set -e -os=$(uname -s) +os=$(uname -s | tr '[:upper:]' '[:lower:]') arch=$(uname -m) +if [ "$arch" = "aarch64" ]; then + arch="arm64" +fi + if [ $# -eq 0 ]; then goose_uri="https://github.com/pressly/goose/releases/latest/download/goose_${os}_${arch}" else diff --git a/vendor/github.com/pressly/goose/v3/internal/dialect/dialectquery/clickhouse.go b/vendor/github.com/pressly/goose/v3/internal/dialect/dialectquery/clickhouse.go new file mode 100644 index 0000000..b8593ec --- /dev/null +++ b/vendor/github.com/pressly/goose/v3/internal/dialect/dialectquery/clickhouse.go @@ -0,0 +1,85 @@ +package dialectquery + +import "fmt" + +type Clickhouse struct{} + +var _ Querier = (*Clickhouse)(nil) + +func (c *Clickhouse) CreateTable(tableName string) string { + q := `CREATE TABLE IF NOT EXISTS %s ( + version_id Int64, + is_applied UInt8, + date Date default now(), + tstamp DateTime default now() + ) + ENGINE = MergeTree() + ORDER BY (date)` + return fmt.Sprintf(q, tableName) +} + +func (c *Clickhouse) InsertVersion(tableName string) string { + q := `INSERT INTO %s (version_id, is_applied) VALUES ($1, $2)` + return fmt.Sprintf(q, tableName) +} + +func (c *Clickhouse) DeleteVersion(tableName string) string { + q := `ALTER TABLE %s DELETE WHERE version_id = $1 SETTINGS mutations_sync = 2` + return fmt.Sprintf(q, tableName) +} + +func (c *Clickhouse) GetMigrationByVersion(tableName string) string { + q := `SELECT tstamp, is_applied FROM %s WHERE version_id = $1 ORDER BY tstamp DESC LIMIT 1` + return fmt.Sprintf(q, tableName) +} + +func (c *Clickhouse) ListMigrations(tableName string) string { + q := `SELECT version_id, is_applied FROM %s ORDER BY version_id DESC` + return fmt.Sprintf(q, tableName) +} + +func (c *Clickhouse) GetLatestVersion(tableName string) string { + q := `SELECT max(version_id) FROM %s` + return fmt.Sprintf(q, tableName) +} + +type ClickhouseReplicated struct{} + +var _ Querier = (*ClickhouseReplicated)(nil) + +func (c *ClickhouseReplicated) CreateTable(tableName string) string { + q := `CREATE TABLE IF NOT EXISTS %s ON CLUSTER '{cluster}' ( + version_id Int64, + is_applied UInt8, + date Date default now(), + tstamp DateTime default now() + ) + ENGINE = ReplicatedMergeTree() + ORDER BY (date)` + return fmt.Sprintf(q, tableName) +} + +func (c *ClickhouseReplicated) InsertVersion(tableName string) string { + q := `SET insert_quorum=2; INSERT INTO %s (version_id, is_applied) VALUES ($1, $2)` + return fmt.Sprintf(q, tableName) +} + +func (c *ClickhouseReplicated) DeleteVersion(tableName string) string { + q := `ALTER TABLE %s DELETE WHERE version_id = $1 SETTINGS mutations_sync = 2` + return fmt.Sprintf(q, tableName) +} + +func (c *ClickhouseReplicated) GetMigrationByVersion(tableName string) string { + q := `SELECT tstamp, is_applied FROM %s WHERE version_id = $1 ORDER BY tstamp DESC LIMIT 1` + return fmt.Sprintf(q, tableName) +} + +func (c *ClickhouseReplicated) ListMigrations(tableName string) string { + q := `SELECT version_id, is_applied FROM %s ORDER BY version_id DESC` + return fmt.Sprintf(q, tableName) +} + +func (c *ClickhouseReplicated) GetLatestVersion(tableName string) string { + q := `SELECT max(version_id) FROM %s` + return fmt.Sprintf(q, tableName) +} diff --git a/vendor/github.com/pressly/goose/v3/internal/dialect/dialectquery/dialectquery.go b/vendor/github.com/pressly/goose/v3/internal/dialect/dialectquery/dialectquery.go new file mode 100644 index 0000000..5e10e46 --- /dev/null +++ b/vendor/github.com/pressly/goose/v3/internal/dialect/dialectquery/dialectquery.go @@ -0,0 +1,27 @@ +package dialectquery + +// Querier is the interface that wraps the basic methods to create a dialect specific query. +type Querier interface { + // CreateTable returns the SQL query string to create the db version table. + CreateTable(tableName string) string + + // InsertVersion returns the SQL query string to insert a new version into the db version table. + InsertVersion(tableName string) string + + // DeleteVersion returns the SQL query string to delete a version from the db version table. + DeleteVersion(tableName string) string + + // GetMigrationByVersion returns the SQL query string to get a single migration by version. + // + // The query should return the timestamp and is_applied columns. + GetMigrationByVersion(tableName string) string + + // ListMigrations returns the SQL query string to list all migrations in descending order by id. + // + // The query should return the version_id and is_applied columns. + ListMigrations(tableName string) string + + // GetLatestVersion returns the SQL query string to get the last version_id from the db version + // table. Returns a nullable int64 value. + GetLatestVersion(tableName string) string +} diff --git a/vendor/github.com/pressly/goose/v3/internal/dialect/dialectquery/mysql.go b/vendor/github.com/pressly/goose/v3/internal/dialect/dialectquery/mysql.go new file mode 100644 index 0000000..b14ef39 --- /dev/null +++ b/vendor/github.com/pressly/goose/v3/internal/dialect/dialectquery/mysql.go @@ -0,0 +1,43 @@ +package dialectquery + +import "fmt" + +type Mysql struct{} + +var _ Querier = (*Mysql)(nil) + +func (m *Mysql) CreateTable(tableName string) string { + q := `CREATE TABLE %s ( + id serial NOT NULL, + version_id bigint NOT NULL, + is_applied boolean NOT NULL, + tstamp timestamp NULL default now(), + PRIMARY KEY(id) + )` + return fmt.Sprintf(q, tableName) +} + +func (m *Mysql) InsertVersion(tableName string) string { + q := `INSERT INTO %s (version_id, is_applied) VALUES (?, ?)` + return fmt.Sprintf(q, tableName) +} + +func (m *Mysql) DeleteVersion(tableName string) string { + q := `DELETE FROM %s WHERE version_id=?` + return fmt.Sprintf(q, tableName) +} + +func (m *Mysql) GetMigrationByVersion(tableName string) string { + q := `SELECT tstamp, is_applied FROM %s WHERE version_id=? ORDER BY tstamp DESC LIMIT 1` + return fmt.Sprintf(q, tableName) +} + +func (m *Mysql) ListMigrations(tableName string) string { + q := `SELECT version_id, is_applied from %s ORDER BY id DESC` + return fmt.Sprintf(q, tableName) +} + +func (m *Mysql) GetLatestVersion(tableName string) string { + q := `SELECT MAX(version_id) FROM %s` + return fmt.Sprintf(q, tableName) +} diff --git a/vendor/github.com/pressly/goose/v3/internal/dialect/dialectquery/postgres.go b/vendor/github.com/pressly/goose/v3/internal/dialect/dialectquery/postgres.go new file mode 100644 index 0000000..0faadf5 --- /dev/null +++ b/vendor/github.com/pressly/goose/v3/internal/dialect/dialectquery/postgres.go @@ -0,0 +1,43 @@ +package dialectquery + +import "fmt" + +type Postgres struct{} + +var _ Querier = (*Postgres)(nil) + +func (p *Postgres) CreateTable(tableName string) string { + q := `CREATE TABLE %s ( + id serial NOT NULL, + version_id bigint NOT NULL, + is_applied boolean NOT NULL, + tstamp timestamp NULL default now(), + PRIMARY KEY(id) + )` + return fmt.Sprintf(q, tableName) +} + +func (p *Postgres) InsertVersion(tableName string) string { + q := `INSERT INTO %s (version_id, is_applied) VALUES ($1, $2)` + return fmt.Sprintf(q, tableName) +} + +func (p *Postgres) DeleteVersion(tableName string) string { + q := `DELETE FROM %s WHERE version_id=$1` + return fmt.Sprintf(q, tableName) +} + +func (p *Postgres) GetMigrationByVersion(tableName string) string { + q := `SELECT tstamp, is_applied FROM %s WHERE version_id=$1 ORDER BY tstamp DESC LIMIT 1` + return fmt.Sprintf(q, tableName) +} + +func (p *Postgres) ListMigrations(tableName string) string { + q := `SELECT version_id, is_applied from %s ORDER BY id DESC` + return fmt.Sprintf(q, tableName) +} + +func (p *Postgres) GetLatestVersion(tableName string) string { + q := `SELECT max(version_id) FROM %s` + return fmt.Sprintf(q, tableName) +} diff --git a/vendor/github.com/pressly/goose/v3/internal/dialect/dialectquery/redshift.go b/vendor/github.com/pressly/goose/v3/internal/dialect/dialectquery/redshift.go new file mode 100644 index 0000000..4090394 --- /dev/null +++ b/vendor/github.com/pressly/goose/v3/internal/dialect/dialectquery/redshift.go @@ -0,0 +1,43 @@ +package dialectquery + +import "fmt" + +type Redshift struct{} + +var _ Querier = (*Redshift)(nil) + +func (r *Redshift) CreateTable(tableName string) string { + q := `CREATE TABLE %s ( + id integer NOT NULL identity(1, 1), + version_id bigint NOT NULL, + is_applied boolean NOT NULL, + tstamp timestamp NULL default sysdate, + PRIMARY KEY(id) + )` + return fmt.Sprintf(q, tableName) +} + +func (r *Redshift) InsertVersion(tableName string) string { + q := `INSERT INTO %s (version_id, is_applied) VALUES ($1, $2)` + return fmt.Sprintf(q, tableName) +} + +func (r *Redshift) DeleteVersion(tableName string) string { + q := `DELETE FROM %s WHERE version_id=$1` + return fmt.Sprintf(q, tableName) +} + +func (r *Redshift) GetMigrationByVersion(tableName string) string { + q := `SELECT tstamp, is_applied FROM %s WHERE version_id=$1 ORDER BY tstamp DESC LIMIT 1` + return fmt.Sprintf(q, tableName) +} + +func (r *Redshift) ListMigrations(tableName string) string { + q := `SELECT version_id, is_applied from %s ORDER BY id DESC` + return fmt.Sprintf(q, tableName) +} + +func (r *Redshift) GetLatestVersion(tableName string) string { + q := `SELECT max(version_id) FROM %s` + return fmt.Sprintf(q, tableName) +} diff --git a/vendor/github.com/pressly/goose/v3/internal/dialect/dialectquery/sqlite3.go b/vendor/github.com/pressly/goose/v3/internal/dialect/dialectquery/sqlite3.go new file mode 100644 index 0000000..1c58a74 --- /dev/null +++ b/vendor/github.com/pressly/goose/v3/internal/dialect/dialectquery/sqlite3.go @@ -0,0 +1,42 @@ +package dialectquery + +import "fmt" + +type Sqlite3 struct{} + +var _ Querier = (*Sqlite3)(nil) + +func (s *Sqlite3) CreateTable(tableName string) string { + q := `CREATE TABLE %s ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + version_id INTEGER NOT NULL, + is_applied INTEGER NOT NULL, + tstamp TIMESTAMP DEFAULT (datetime('now')) + )` + return fmt.Sprintf(q, tableName) +} + +func (s *Sqlite3) InsertVersion(tableName string) string { + q := `INSERT INTO %s (version_id, is_applied) VALUES (?, ?)` + return fmt.Sprintf(q, tableName) +} + +func (s *Sqlite3) DeleteVersion(tableName string) string { + q := `DELETE FROM %s WHERE version_id=?` + return fmt.Sprintf(q, tableName) +} + +func (s *Sqlite3) GetMigrationByVersion(tableName string) string { + q := `SELECT tstamp, is_applied FROM %s WHERE version_id=? ORDER BY tstamp DESC LIMIT 1` + return fmt.Sprintf(q, tableName) +} + +func (s *Sqlite3) ListMigrations(tableName string) string { + q := `SELECT version_id, is_applied from %s ORDER BY id DESC` + return fmt.Sprintf(q, tableName) +} + +func (s *Sqlite3) GetLatestVersion(tableName string) string { + q := `SELECT MAX(version_id) FROM %s` + return fmt.Sprintf(q, tableName) +} diff --git a/vendor/github.com/pressly/goose/v3/internal/dialect/dialectquery/sqlserver.go b/vendor/github.com/pressly/goose/v3/internal/dialect/dialectquery/sqlserver.go new file mode 100644 index 0000000..4d172c2 --- /dev/null +++ b/vendor/github.com/pressly/goose/v3/internal/dialect/dialectquery/sqlserver.go @@ -0,0 +1,42 @@ +package dialectquery + +import "fmt" + +type Sqlserver struct{} + +var _ Querier = (*Sqlserver)(nil) + +func (s *Sqlserver) CreateTable(tableName string) string { + q := `CREATE TABLE %s ( + id INT NOT NULL IDENTITY(1,1) PRIMARY KEY, + version_id BIGINT NOT NULL, + is_applied BIT NOT NULL, + tstamp DATETIME NULL DEFAULT CURRENT_TIMESTAMP + )` + return fmt.Sprintf(q, tableName) +} + +func (s *Sqlserver) InsertVersion(tableName string) string { + q := `INSERT INTO %s (version_id, is_applied) VALUES (@p1, @p2)` + return fmt.Sprintf(q, tableName) +} + +func (s *Sqlserver) DeleteVersion(tableName string) string { + q := `DELETE FROM %s WHERE version_id=@p1` + return fmt.Sprintf(q, tableName) +} + +func (s *Sqlserver) GetMigrationByVersion(tableName string) string { + q := `SELECT TOP 1 tstamp, is_applied FROM %s WHERE version_id=@p1 ORDER BY tstamp DESC` + return fmt.Sprintf(q, tableName) +} + +func (s *Sqlserver) ListMigrations(tableName string) string { + q := `SELECT version_id, is_applied FROM %s ORDER BY id DESC` + return fmt.Sprintf(q, tableName) +} + +func (s *Sqlserver) GetLatestVersion(tableName string) string { + q := `SELECT MAX(version_id) FROM %s` + return fmt.Sprintf(q, tableName) +} diff --git a/vendor/github.com/pressly/goose/v3/internal/dialect/dialectquery/tidb.go b/vendor/github.com/pressly/goose/v3/internal/dialect/dialectquery/tidb.go new file mode 100644 index 0000000..0549e84 --- /dev/null +++ b/vendor/github.com/pressly/goose/v3/internal/dialect/dialectquery/tidb.go @@ -0,0 +1,43 @@ +package dialectquery + +import "fmt" + +type Tidb struct{} + +var _ Querier = (*Tidb)(nil) + +func (t *Tidb) CreateTable(tableName string) string { + q := `CREATE TABLE %s ( + id BIGINT UNSIGNED NOT NULL AUTO_INCREMENT UNIQUE, + version_id bigint NOT NULL, + is_applied boolean NOT NULL, + tstamp timestamp NULL default now(), + PRIMARY KEY(id) + )` + return fmt.Sprintf(q, tableName) +} + +func (t *Tidb) InsertVersion(tableName string) string { + q := `INSERT INTO %s (version_id, is_applied) VALUES (?, ?)` + return fmt.Sprintf(q, tableName) +} + +func (t *Tidb) DeleteVersion(tableName string) string { + q := `DELETE FROM %s WHERE version_id=?` + return fmt.Sprintf(q, tableName) +} + +func (t *Tidb) GetMigrationByVersion(tableName string) string { + q := `SELECT tstamp, is_applied FROM %s WHERE version_id=? ORDER BY tstamp DESC LIMIT 1` + return fmt.Sprintf(q, tableName) +} + +func (t *Tidb) ListMigrations(tableName string) string { + q := `SELECT version_id, is_applied from %s ORDER BY id DESC` + return fmt.Sprintf(q, tableName) +} + +func (t *Tidb) GetLatestVersion(tableName string) string { + q := `SELECT MAX(version_id) FROM %s` + return fmt.Sprintf(q, tableName) +} diff --git a/vendor/github.com/pressly/goose/v3/internal/dialect/dialectquery/turso.go b/vendor/github.com/pressly/goose/v3/internal/dialect/dialectquery/turso.go new file mode 100644 index 0000000..ced0f5d --- /dev/null +++ b/vendor/github.com/pressly/goose/v3/internal/dialect/dialectquery/turso.go @@ -0,0 +1,7 @@ +package dialectquery + +type Turso struct { + Sqlite3 +} + +var _ Querier = (*Turso)(nil) diff --git a/vendor/github.com/pressly/goose/v3/internal/dialect/dialectquery/vertica.go b/vendor/github.com/pressly/goose/v3/internal/dialect/dialectquery/vertica.go new file mode 100644 index 0000000..f4702be --- /dev/null +++ b/vendor/github.com/pressly/goose/v3/internal/dialect/dialectquery/vertica.go @@ -0,0 +1,43 @@ +package dialectquery + +import "fmt" + +type Vertica struct{} + +var _ Querier = (*Vertica)(nil) + +func (v *Vertica) CreateTable(tableName string) string { + q := `CREATE TABLE %s ( + id identity(1,1) NOT NULL, + version_id bigint NOT NULL, + is_applied boolean NOT NULL, + tstamp timestamp NULL default now(), + PRIMARY KEY(id) + )` + return fmt.Sprintf(q, tableName) +} + +func (v *Vertica) InsertVersion(tableName string) string { + q := `INSERT INTO %s (version_id, is_applied) VALUES (?, ?)` + return fmt.Sprintf(q, tableName) +} + +func (v *Vertica) DeleteVersion(tableName string) string { + q := `DELETE FROM %s WHERE version_id=?` + return fmt.Sprintf(q, tableName) +} + +func (v *Vertica) GetMigrationByVersion(tableName string) string { + q := `SELECT tstamp, is_applied FROM %s WHERE version_id=? ORDER BY tstamp DESC LIMIT 1` + return fmt.Sprintf(q, tableName) +} + +func (v *Vertica) ListMigrations(tableName string) string { + q := `SELECT version_id, is_applied from %s ORDER BY id DESC` + return fmt.Sprintf(q, tableName) +} + +func (v *Vertica) GetLatestVersion(tableName string) string { + q := `SELECT MAX(version_id) FROM %s` + return fmt.Sprintf(q, tableName) +} diff --git a/vendor/github.com/pressly/goose/v3/internal/dialect/dialectquery/ydb.go b/vendor/github.com/pressly/goose/v3/internal/dialect/dialectquery/ydb.go new file mode 100644 index 0000000..ab5e68e --- /dev/null +++ b/vendor/github.com/pressly/goose/v3/internal/dialect/dialectquery/ydb.go @@ -0,0 +1,53 @@ +package dialectquery + +import "fmt" + +type Ydb struct{} + +var _ Querier = (*Ydb)(nil) + +func (c *Ydb) CreateTable(tableName string) string { + q := `CREATE TABLE %s ( + version_id Uint64, + is_applied Bool, + tstamp Timestamp, + + PRIMARY KEY(version_id) + )` + return fmt.Sprintf(q, tableName) +} + +func (c *Ydb) InsertVersion(tableName string) string { + q := `INSERT INTO %s ( + version_id, + is_applied, + tstamp + ) VALUES ( + CAST($1 AS Uint64), + $2, + CurrentUtcTimestamp() + )` + return fmt.Sprintf(q, tableName) +} + +func (c *Ydb) DeleteVersion(tableName string) string { + q := `DELETE FROM %s WHERE version_id = $1` + return fmt.Sprintf(q, tableName) +} + +func (c *Ydb) GetMigrationByVersion(tableName string) string { + q := `SELECT tstamp, is_applied FROM %s WHERE version_id = $1 ORDER BY tstamp DESC LIMIT 1` + return fmt.Sprintf(q, tableName) +} + +func (c *Ydb) ListMigrations(tableName string) string { + q := ` + SELECT version_id, is_applied, tstamp AS __discard_column_tstamp + FROM %s ORDER BY __discard_column_tstamp DESC` + return fmt.Sprintf(q, tableName) +} + +func (c *Ydb) GetLatestVersion(tableName string) string { + q := `SELECT MAX(version_id) FROM %s` + return fmt.Sprintf(q, tableName) +} diff --git a/vendor/github.com/pressly/goose/v3/internal/dialect/dialects.go b/vendor/github.com/pressly/goose/v3/internal/dialect/dialects.go new file mode 100644 index 0000000..34e4e9b --- /dev/null +++ b/vendor/github.com/pressly/goose/v3/internal/dialect/dialects.go @@ -0,0 +1,18 @@ +package dialect + +// Dialect is the type of database dialect. +type Dialect string + +const ( + Postgres Dialect = "postgres" + Mysql Dialect = "mysql" + Sqlite3 Dialect = "sqlite3" + Sqlserver Dialect = "sqlserver" + Redshift Dialect = "redshift" + Tidb Dialect = "tidb" + Clickhouse Dialect = "clickhouse" + ClickhouseReplicated Dialect = "clickhouse-replicated" + Vertica Dialect = "vertica" + Ydb Dialect = "ydb" + Turso Dialect = "turso" +) diff --git a/vendor/github.com/pressly/goose/v3/internal/dialect/store.go b/vendor/github.com/pressly/goose/v3/internal/dialect/store.go new file mode 100644 index 0000000..6f9edaf --- /dev/null +++ b/vendor/github.com/pressly/goose/v3/internal/dialect/store.go @@ -0,0 +1,164 @@ +package dialect + +import ( + "context" + "database/sql" + "fmt" + "time" + + "github.com/pressly/goose/v3/internal/dialect/dialectquery" +) + +// Store is the interface that wraps the basic methods for a database dialect. +// +// A dialect is a set of SQL statements that are specific to a database. +// +// By defining a store interface, we can support multiple databases +// with a single codebase. +// +// The underlying implementation does not modify the error. It is the callers +// responsibility to assert for the correct error, such as sql.ErrNoRows. +type Store interface { + // CreateVersionTable creates the version table within a transaction. + // This table is used to store goose migrations. + CreateVersionTable(ctx context.Context, tx *sql.Tx, tableName string) error + + // InsertVersion inserts a version id into the version table within a transaction. + InsertVersion(ctx context.Context, tx *sql.Tx, tableName string, version int64) error + // InsertVersionNoTx inserts a version id into the version table without a transaction. + InsertVersionNoTx(ctx context.Context, db *sql.DB, tableName string, version int64) error + + // DeleteVersion deletes a version id from the version table within a transaction. + DeleteVersion(ctx context.Context, tx *sql.Tx, tableName string, version int64) error + // DeleteVersionNoTx deletes a version id from the version table without a transaction. + DeleteVersionNoTx(ctx context.Context, db *sql.DB, tableName string, version int64) error + + // GetMigrationRow retrieves a single migration by version id. + // + // Returns the raw sql error if the query fails. It is the callers responsibility + // to assert for the correct error, such as sql.ErrNoRows. + GetMigration(ctx context.Context, db *sql.DB, tableName string, version int64) (*GetMigrationResult, error) + + // ListMigrations retrieves all migrations sorted in descending order by id. + // + // If there are no migrations, an empty slice is returned with no error. + ListMigrations(ctx context.Context, db *sql.DB, tableName string) ([]*ListMigrationsResult, error) +} + +// NewStore returns a new Store for the given dialect. +func NewStore(d Dialect) (Store, error) { + var querier dialectquery.Querier + switch d { + case Postgres: + querier = &dialectquery.Postgres{} + case Mysql: + querier = &dialectquery.Mysql{} + case Sqlite3: + querier = &dialectquery.Sqlite3{} + case Sqlserver: + querier = &dialectquery.Sqlserver{} + case Redshift: + querier = &dialectquery.Redshift{} + case Tidb: + querier = &dialectquery.Tidb{} + case Clickhouse: + querier = &dialectquery.Clickhouse{} + case ClickhouseReplicated: + querier = &dialectquery.ClickhouseReplicated{} + case Vertica: + querier = &dialectquery.Vertica{} + case Ydb: + querier = &dialectquery.Ydb{} + case Turso: + querier = &dialectquery.Turso{} + default: + return nil, fmt.Errorf("unknown querier dialect: %v", d) + } + return &store{querier: querier}, nil +} + +type GetMigrationResult struct { + IsApplied bool + Timestamp time.Time +} + +type ListMigrationsResult struct { + VersionID int64 + IsApplied bool +} + +type store struct { + querier dialectquery.Querier +} + +var _ Store = (*store)(nil) + +func (s *store) CreateVersionTable(ctx context.Context, tx *sql.Tx, tableName string) error { + q := s.querier.CreateTable(tableName) + _, err := tx.ExecContext(ctx, q) + return err +} + +func (s *store) InsertVersion(ctx context.Context, tx *sql.Tx, tableName string, version int64) error { + q := s.querier.InsertVersion(tableName) + _, err := tx.ExecContext(ctx, q, version, true) + return err +} + +func (s *store) InsertVersionNoTx(ctx context.Context, db *sql.DB, tableName string, version int64) error { + q := s.querier.InsertVersion(tableName) + _, err := db.ExecContext(ctx, q, version, true) + return err +} + +func (s *store) DeleteVersion(ctx context.Context, tx *sql.Tx, tableName string, version int64) error { + q := s.querier.DeleteVersion(tableName) + _, err := tx.ExecContext(ctx, q, version) + return err +} + +func (s *store) DeleteVersionNoTx(ctx context.Context, db *sql.DB, tableName string, version int64) error { + q := s.querier.DeleteVersion(tableName) + _, err := db.ExecContext(ctx, q, version) + return err +} + +func (s *store) GetMigration(ctx context.Context, db *sql.DB, tableName string, version int64) (*GetMigrationResult, error) { + q := s.querier.GetMigrationByVersion(tableName) + var timestamp time.Time + var isApplied bool + err := db.QueryRowContext(ctx, q, version).Scan(×tamp, &isApplied) + if err != nil { + return nil, err + } + return &GetMigrationResult{ + IsApplied: isApplied, + Timestamp: timestamp, + }, nil +} + +func (s *store) ListMigrations(ctx context.Context, db *sql.DB, tableName string) ([]*ListMigrationsResult, error) { + q := s.querier.ListMigrations(tableName) + rows, err := db.QueryContext(ctx, q) + if err != nil { + return nil, err + } + defer rows.Close() + + var migrations []*ListMigrationsResult + for rows.Next() { + var version int64 + var isApplied bool + if err := rows.Scan(&version, &isApplied); err != nil { + return nil, err + } + migrations = append(migrations, &ListMigrationsResult{ + VersionID: version, + IsApplied: isApplied, + }) + } + if err := rows.Err(); err != nil { + return nil, err + } + return migrations, nil +} diff --git a/vendor/github.com/pressly/goose/v3/internal/gooseutil/resolve.go b/vendor/github.com/pressly/goose/v3/internal/gooseutil/resolve.go new file mode 100644 index 0000000..a70245b --- /dev/null +++ b/vendor/github.com/pressly/goose/v3/internal/gooseutil/resolve.go @@ -0,0 +1,124 @@ +// Package gooseutil provides utility functions we want to keep internal to the package. It's +// intended to be a collection of well-tested helper functions. +package gooseutil + +import ( + "fmt" + "math" + "sort" + "strconv" + "strings" +) + +// UpVersions returns a list of migrations to apply based on the versions in the filesystem and the +// versions in the database. The target version can be used to specify a target version. In most +// cases this will be math.MaxInt64. +// +// The allowMissing flag can be used to allow missing migrations as part of the list of migrations +// to apply. Otherwise, an error will be returned if there are missing migrations in the database. +func UpVersions( + fsysVersions []int64, + dbVersions []int64, + target int64, + allowMissing bool, +) ([]int64, error) { + // Sort the list of versions in the filesystem. This should already be sorted, but we do this + // just in case. + sortAscending(fsysVersions) + + // dbAppliedVersions is a map of all applied migrations in the database. + dbAppliedVersions := make(map[int64]bool, len(dbVersions)) + var dbMaxVersion int64 + for _, v := range dbVersions { + dbAppliedVersions[v] = true + if v > dbMaxVersion { + dbMaxVersion = v + } + } + + // Get a list of migrations that are missing from the database. A missing migration is one that + // has a version less than the max version in the database and has not been applied. + // + // In most cases the target version is math.MaxInt64, but it can be used to specify a target + // version. In which case we respect the target version and only surface migrations up to and + // including that target. + var missing []int64 + for _, v := range fsysVersions { + if dbAppliedVersions[v] { + continue + } + if v < dbMaxVersion && v <= target { + missing = append(missing, v) + } + } + + // feat(mf): It is very possible someone may want to apply ONLY new migrations and skip missing + // migrations entirely. At the moment this is not supported, but leaving this comment because + // that's where that logic would be handled. + // + // For example, if database has 1,4 already applied and 2,3,5 are new, we would apply only 5 and + // skip 2,3. Not sure if this is a common use case, but it's possible someone may want to do + // this. + if len(missing) > 0 && !allowMissing { + return nil, newMissingError(missing, dbMaxVersion, target) + } + + var out []int64 + + // 1. Add missing migrations to the list of migrations to apply, if any. + out = append(out, missing...) + + // 2. Add new migrations to the list of migrations to apply, if any. + for _, v := range fsysVersions { + if dbAppliedVersions[v] { + continue + } + if v > dbMaxVersion && v <= target { + out = append(out, v) + } + } + // 3. Sort the list of migrations to apply. + sortAscending(out) + + return out, nil +} + +func newMissingError( + missing []int64, + dbMaxVersion int64, + target int64, +) error { + sortAscending(missing) + + collected := make([]string, 0, len(missing)) + for _, v := range missing { + collected = append(collected, strconv.FormatInt(v, 10)) + } + + msg := "migration" + if len(collected) > 1 { + msg += "s" + } + + var versionsMsg string + if len(collected) > 1 { + versionsMsg = "versions " + strings.Join(collected, ",") + } else { + versionsMsg = "version " + collected[0] + } + + desiredMsg := fmt.Sprintf("database version (%d)", dbMaxVersion) + if target != math.MaxInt64 { + desiredMsg += fmt.Sprintf(", with target version (%d)", target) + } + + return fmt.Errorf("detected %d missing (out-of-order) %s lower than %s: %s", + len(missing), msg, desiredMsg, versionsMsg, + ) +} + +func sortAscending(versions []int64) { + sort.Slice(versions, func(i, j int) bool { + return versions[i] < versions[j] + }) +} diff --git a/vendor/github.com/pressly/goose/v3/internal/sqlparser/parse.go b/vendor/github.com/pressly/goose/v3/internal/sqlparser/parse.go new file mode 100644 index 0000000..b42fdde --- /dev/null +++ b/vendor/github.com/pressly/goose/v3/internal/sqlparser/parse.go @@ -0,0 +1,59 @@ +package sqlparser + +import ( + "fmt" + "io/fs" + + "go.uber.org/multierr" + "golang.org/x/sync/errgroup" +) + +type ParsedSQL struct { + UseTx bool + Up, Down []string +} + +func ParseAllFromFS(fsys fs.FS, filename string, debug bool) (*ParsedSQL, error) { + parsedSQL := new(ParsedSQL) + // TODO(mf): parse is called twice, once for up and once for down. This is inefficient. It + // should be possible to parse both directions in one pass. Also, UseTx is set once (but + // returned twice), which is unnecessary and potentially error-prone if the two calls to + // parseSQL disagree based on direction. + var g errgroup.Group + g.Go(func() error { + up, useTx, err := parse(fsys, filename, DirectionUp, debug) + if err != nil { + return err + } + parsedSQL.Up = up + parsedSQL.UseTx = useTx + return nil + }) + g.Go(func() error { + down, _, err := parse(fsys, filename, DirectionDown, debug) + if err != nil { + return err + } + parsedSQL.Down = down + return nil + }) + if err := g.Wait(); err != nil { + return nil, err + } + return parsedSQL, nil +} + +func parse(fsys fs.FS, filename string, direction Direction, debug bool) (_ []string, _ bool, retErr error) { + r, err := fsys.Open(filename) + if err != nil { + return nil, false, err + } + defer func() { + retErr = multierr.Append(retErr, r.Close()) + }() + stmts, useTx, err := ParseSQLMigration(r, direction, debug) + if err != nil { + return nil, false, fmt.Errorf("failed to parse %s: %w", filename, err) + } + return stmts, useTx, nil +} diff --git a/vendor/github.com/pressly/goose/v3/internal/sqlparser/parser.go b/vendor/github.com/pressly/goose/v3/internal/sqlparser/parser.go index 50fc625..f08ae3e 100644 --- a/vendor/github.com/pressly/goose/v3/internal/sqlparser/parser.go +++ b/vendor/github.com/pressly/goose/v3/internal/sqlparser/parser.go @@ -7,10 +7,35 @@ import ( "fmt" "io" "log" + "os" "strings" "sync" + + "github.com/mfridman/interpolate" +) + +type Direction string + +const ( + DirectionUp Direction = "up" + DirectionDown Direction = "down" ) +func FromBool(b bool) Direction { + if b { + return DirectionUp + } + return DirectionDown +} + +func (d Direction) String() string { + return string(d) +} + +func (d Direction) ToBool() bool { + return d == DirectionUp +} + type parserState int const ( @@ -23,15 +48,37 @@ const ( gooseStatementEndDown // 6 ) -type stateMachine parserState +type stateMachine struct { + state parserState + verbose bool +} -func (s *stateMachine) Get() parserState { - return parserState(*s) +func newStateMachine(begin parserState, verbose bool) *stateMachine { + return &stateMachine{ + state: begin, + verbose: verbose, + } } -func (s *stateMachine) Set(new parserState) { - verboseInfo("StateMachine: %v => %v", *s, new) - *s = stateMachine(new) +func (s *stateMachine) get() parserState { + return s.state +} + +func (s *stateMachine) set(new parserState) { + s.print("set %d => %d", s.state, new) + s.state = new +} + +const ( + grayColor = "\033[90m" + resetColor = "\033[00m" +) + +func (s *stateMachine) print(msg string, args ...interface{}) { + msg = "StateMachine: " + msg + if s.verbose { + log.Printf(grayColor+msg+resetColor, args...) + } } const scanBufSize = 4 * 1024 * 1024 @@ -53,7 +100,7 @@ var bufferPool = sync.Pool{ // within a statement. For these cases, we provide the explicit annotations // 'StatementBegin' and 'StatementEnd' to allow the script to // tell us to ignore semicolons. -func ParseSQLMigration(r io.Reader, direction bool) (stmts []string, useTx bool, err error) { +func ParseSQLMigration(r io.Reader, direction Direction, debug bool) (stmts []string, useTx bool, err error) { scanBufPtr := bufferPool.Get().(*[]byte) scanBuf := *scanBufPtr defer bufferPool.Put(scanBufPtr) @@ -61,66 +108,90 @@ func ParseSQLMigration(r io.Reader, direction bool) (stmts []string, useTx bool, scanner := bufio.NewScanner(r) scanner.Buffer(scanBuf, scanBufSize) - stateMachine := stateMachine(start) + stateMachine := newStateMachine(start, debug) useTx = true + useEnvsub := false var buf bytes.Buffer for scanner.Scan() { line := scanner.Text() - if verbose { + if debug { log.Println(line) } - if stateMachine.Get() == start && strings.TrimSpace(line) == "" { + if stateMachine.get() == start && strings.TrimSpace(line) == "" { continue } - // TODO(mf): validate annotations to avoid common user errors: - // https://github.com/pressly/goose/issues/163#issuecomment-501736725 - if strings.HasPrefix(line, "--") { - cmd := strings.TrimSpace(strings.TrimPrefix(line, "--")) + + // Check for annotations. + // All annotations must be in format: "-- +goose [annotation]" + if strings.HasPrefix(strings.TrimSpace(line), "--") && strings.Contains(line, "+goose") { + var cmd annotation + + cmd, err = extractAnnotation(line) + if err != nil { + return nil, false, fmt.Errorf("failed to parse annotation line %q: %w", line, err) + } switch cmd { - case "+goose Up": - switch stateMachine.Get() { + case annotationUp: + switch stateMachine.get() { case start: - stateMachine.Set(gooseUp) + stateMachine.set(gooseUp) default: - return nil, false, fmt.Errorf("duplicate '-- +goose Up' annotations; stateMachine=%v, see https://github.com/pressly/goose#sql-migrations", stateMachine) + return nil, false, fmt.Errorf("duplicate '-- +goose Up' annotations; stateMachine=%d, see https://github.com/pressly/goose#sql-migrations", stateMachine.state) } continue - case "+goose Down": - switch stateMachine.Get() { + case annotationDown: + switch stateMachine.get() { case gooseUp, gooseStatementEndUp: - stateMachine.Set(gooseDown) + // If we hit a down annotation, but the buffer is not empty, we have an unfinished SQL query from a + // previous up annotation. This is an error, because we expect the SQL query to be terminated by a semicolon + // and the buffer to have been reset. + if bufferRemaining := strings.TrimSpace(buf.String()); len(bufferRemaining) > 0 { + return nil, false, missingSemicolonError(stateMachine.state, direction, bufferRemaining) + } + stateMachine.set(gooseDown) default: - return nil, false, fmt.Errorf("must start with '-- +goose Up' annotation, stateMachine=%v, see https://github.com/pressly/goose#sql-migrations", stateMachine) + return nil, false, fmt.Errorf("must start with '-- +goose Up' annotation, stateMachine=%d, see https://github.com/pressly/goose#sql-migrations", stateMachine.state) } continue - case "+goose StatementBegin": - switch stateMachine.Get() { + case annotationStatementBegin: + switch stateMachine.get() { case gooseUp, gooseStatementEndUp: - stateMachine.Set(gooseStatementBeginUp) + stateMachine.set(gooseStatementBeginUp) case gooseDown, gooseStatementEndDown: - stateMachine.Set(gooseStatementBeginDown) + stateMachine.set(gooseStatementBeginDown) default: - return nil, false, fmt.Errorf("'-- +goose StatementBegin' must be defined after '-- +goose Up' or '-- +goose Down' annotation, stateMachine=%v, see https://github.com/pressly/goose#sql-migrations", stateMachine) + return nil, false, fmt.Errorf("'-- +goose StatementBegin' must be defined after '-- +goose Up' or '-- +goose Down' annotation, stateMachine=%d, see https://github.com/pressly/goose#sql-migrations", stateMachine.state) } continue - case "+goose StatementEnd": - switch stateMachine.Get() { + case annotationStatementEnd: + switch stateMachine.get() { case gooseStatementBeginUp: - stateMachine.Set(gooseStatementEndUp) + stateMachine.set(gooseStatementEndUp) case gooseStatementBeginDown: - stateMachine.Set(gooseStatementEndDown) + stateMachine.set(gooseStatementEndDown) default: return nil, false, errors.New("'-- +goose StatementEnd' must be defined after '-- +goose StatementBegin', see https://github.com/pressly/goose#sql-migrations") } - case "+goose NO TRANSACTION": + case annotationNoTransaction: useTx = false continue + + case annotationEnvsubOn: + useEnvsub = true + continue + + case annotationEnvsubOff: + useEnvsub = false + continue + + default: + return nil, false, fmt.Errorf("unknown annotation: %q", cmd) } } // Once we've started parsing a statement the buffer is no longer empty, @@ -129,14 +200,21 @@ func ParseSQLMigration(r io.Reader, direction bool) (stmts []string, useTx bool, if buf.Len() == 0 { // This check ensures leading comments and empty lines prior to a statement are ignored. if strings.HasPrefix(strings.TrimSpace(line), "--") || line == "" { - verboseInfo("StateMachine: ignore comment") + stateMachine.print("ignore comment") continue } } - switch stateMachine.Get() { + switch stateMachine.get() { case gooseStatementEndDown, gooseStatementEndUp: // Do not include the "+goose StatementEnd" annotation in the final statement. default: + if useEnvsub { + expanded, err := interpolate.Interpolate(&envWrapper{}, line) + if err != nil { + return nil, false, fmt.Errorf("variable substitution failed: %w:\n%s", err, line) + } + line = expanded + } // Write SQL line to a buffer. if _, err := buf.WriteString(line + "\n"); err != nil { return nil, false, fmt.Errorf("failed to write to buf: %w", err) @@ -147,46 +225,46 @@ func ParseSQLMigration(r io.Reader, direction bool) (stmts []string, useTx bool, // 1) basic query with semicolon; 2) psql statement // // Export statement once we hit end of statement. - switch stateMachine.Get() { + switch stateMachine.get() { case gooseUp, gooseStatementBeginUp, gooseStatementEndUp: - if !direction /*down*/ { + if direction == DirectionDown { buf.Reset() - verboseInfo("StateMachine: ignore down") + stateMachine.print("ignore down") continue } case gooseDown, gooseStatementBeginDown, gooseStatementEndDown: - if direction /*up*/ { + if direction == DirectionUp { buf.Reset() - verboseInfo("StateMachine: ignore up") + stateMachine.print("ignore up") continue } default: - return nil, false, fmt.Errorf("failed to parse migration: unexpected state %d on line %q, see https://github.com/pressly/goose#sql-migrations", stateMachine, line) + return nil, false, fmt.Errorf("failed to parse migration: unexpected state %d on line %q, see https://github.com/pressly/goose#sql-migrations", stateMachine.state, line) } - switch stateMachine.Get() { + switch stateMachine.get() { case gooseUp: if endsWithSemicolon(line) { stmts = append(stmts, cleanupStatement(buf.String())) buf.Reset() - verboseInfo("StateMachine: store simple Up query") + stateMachine.print("store simple Up query") } case gooseDown: if endsWithSemicolon(line) { stmts = append(stmts, cleanupStatement(buf.String())) buf.Reset() - verboseInfo("StateMachine: store simple Down query") + stateMachine.print("store simple Down query") } case gooseStatementEndUp: stmts = append(stmts, cleanupStatement(buf.String())) buf.Reset() - verboseInfo("StateMachine: store Up statement") - stateMachine.Set(gooseUp) + stateMachine.print("store Up statement") + stateMachine.set(gooseUp) case gooseStatementEndDown: stmts = append(stmts, cleanupStatement(buf.String())) buf.Reset() - verboseInfo("StateMachine: store Down statement") - stateMachine.Set(gooseDown) + stateMachine.print("store Down statement") + stateMachine.set(gooseDown) } } if err := scanner.Err(); err != nil { @@ -194,7 +272,7 @@ func ParseSQLMigration(r io.Reader, direction bool) (stmts []string, useTx bool, } // EOF - switch stateMachine.Get() { + switch stateMachine.get() { case start: return nil, false, errors.New("failed to parse migration: must start with '-- +goose Up' annotation, see https://github.com/pressly/goose#sql-migrations") case gooseStatementBeginUp, gooseStatementBeginDown: @@ -202,20 +280,94 @@ func ParseSQLMigration(r io.Reader, direction bool) (stmts []string, useTx bool, } if bufferRemaining := strings.TrimSpace(buf.String()); len(bufferRemaining) > 0 { - return nil, false, fmt.Errorf("failed to parse migration: state %d, direction: %v: unexpected unfinished SQL query: %q: missing semicolon?", stateMachine, direction, bufferRemaining) + return nil, false, missingSemicolonError(stateMachine.state, direction, bufferRemaining) } return stmts, useTx, nil } -// cleanupStatement attempts to find the last semicolon and trims -// the remaining chars from the input string. This is useful for cleaning -// up a statement containing trailing comments or empty lines. -func cleanupStatement(input string) string { - if n := strings.LastIndex(input, ";"); n > 0 { - return input[:n+1] +type annotation string + +const ( + annotationUp annotation = "Up" + annotationDown annotation = "Down" + annotationStatementBegin annotation = "StatementBegin" + annotationStatementEnd annotation = "StatementEnd" + annotationNoTransaction annotation = "NO TRANSACTION" + annotationEnvsubOn annotation = "ENVSUB ON" + annotationEnvsubOff annotation = "ENVSUB OFF" +) + +var supportedAnnotations = map[annotation]struct{}{ + annotationUp: {}, + annotationDown: {}, + annotationStatementBegin: {}, + annotationStatementEnd: {}, + annotationNoTransaction: {}, + annotationEnvsubOn: {}, + annotationEnvsubOff: {}, +} + +var ( + errEmptyAnnotation = errors.New("empty annotation") + errInvalidAnnotation = errors.New("invalid annotation") +) + +// extractAnnotation extracts the annotation from the line. +// All annotations must be in format: "-- +goose [annotation]" +// Allowed annotations: Up, Down, StatementBegin, StatementEnd, NO TRANSACTION, ENVSUB ON, ENVSUB OFF +func extractAnnotation(line string) (annotation, error) { + // If line contains leading whitespace - return error. + if strings.HasPrefix(line, " ") || strings.HasPrefix(line, "\t") { + return "", fmt.Errorf("%q contains leading whitespace: %w", line, errInvalidAnnotation) } - return input + + // Extract the annotation from the line, by removing the leading "--" + cmd := strings.ReplaceAll(line, "--", "") + + // Extract the annotation from the line, by removing the leading "+goose" + cmd = strings.Replace(cmd, "+goose", "", 1) + + if strings.Contains(cmd, "+goose") { + return "", fmt.Errorf("%q contains multiple '+goose' annotations: %w", cmd, errInvalidAnnotation) + } + + // Remove leading and trailing whitespace from the annotation command. + cmd = strings.TrimSpace(cmd) + + if cmd == "" { + return "", errEmptyAnnotation + } + + a := annotation(cmd) + + for s := range supportedAnnotations { + if strings.EqualFold(string(s), string(a)) { + return s, nil + } + } + + return "", fmt.Errorf("%q not supported: %w", cmd, errInvalidAnnotation) +} + +func missingSemicolonError(state parserState, direction Direction, s string) error { + return fmt.Errorf("failed to parse migration: state %d, direction: %v: unexpected unfinished SQL query: %q: missing semicolon?", + state, + direction, + s, + ) +} + +type envWrapper struct{} + +var _ interpolate.Env = (*envWrapper)(nil) + +func (e *envWrapper) Get(key string) (string, bool) { + return os.LookupEnv(key) +} + +func cleanupStatement(input string) string { + return strings.TrimSpace(input) } // Checks the line to see if the line has a statement-ending semicolon @@ -240,20 +392,3 @@ func endsWithSemicolon(line string) bool { return strings.HasSuffix(prev, ";") } - -var verbose bool - -func SetVersbose(b bool) { - verbose = b -} - -const ( - grayColor = "\033[90m" - resetColor = "\033[00m" -) - -func verboseInfo(s string, args ...interface{}) { - if verbose { - log.Printf(grayColor+s+resetColor, args...) - } -} diff --git a/vendor/github.com/pressly/goose/v3/lock/postgres.go b/vendor/github.com/pressly/goose/v3/lock/postgres.go new file mode 100644 index 0000000..97b7bae --- /dev/null +++ b/vendor/github.com/pressly/goose/v3/lock/postgres.go @@ -0,0 +1,118 @@ +package lock + +import ( + "context" + "database/sql" + "errors" + "fmt" + "time" + + "github.com/sethvargo/go-retry" +) + +// NewPostgresSessionLocker returns a SessionLocker that utilizes PostgreSQL's exclusive +// session-level advisory lock mechanism. +// +// This function creates a SessionLocker that can be used to acquire and release a lock for +// synchronization purposes. The lock acquisition is retried until it is successfully acquired or +// until the failure threshold is reached. The default lock duration is set to 5 minutes, and the +// default unlock duration is set to 1 minute. +// +// If you have long running migrations, you may want to increase the lock duration. +// +// See [SessionLockerOption] for options that can be used to configure the SessionLocker. +func NewPostgresSessionLocker(opts ...SessionLockerOption) (SessionLocker, error) { + cfg := sessionLockerConfig{ + lockID: DefaultLockID, + lockProbe: probe{ + periodSeconds: 5 * time.Second, + failureThreshold: 60, + }, + unlockProbe: probe{ + periodSeconds: 2 * time.Second, + failureThreshold: 30, + }, + } + for _, opt := range opts { + if err := opt.apply(&cfg); err != nil { + return nil, err + } + } + return &postgresSessionLocker{ + lockID: cfg.lockID, + retryLock: retry.WithMaxRetries( + cfg.lockProbe.failureThreshold, + retry.NewConstant(cfg.lockProbe.periodSeconds), + ), + retryUnlock: retry.WithMaxRetries( + cfg.unlockProbe.failureThreshold, + retry.NewConstant(cfg.unlockProbe.periodSeconds), + ), + }, nil +} + +type postgresSessionLocker struct { + lockID int64 + retryLock retry.Backoff + retryUnlock retry.Backoff +} + +var _ SessionLocker = (*postgresSessionLocker)(nil) + +func (l *postgresSessionLocker) SessionLock(ctx context.Context, conn *sql.Conn) error { + return retry.Do(ctx, l.retryLock, func(ctx context.Context) error { + row := conn.QueryRowContext(ctx, "SELECT pg_try_advisory_lock($1)", l.lockID) + var locked bool + if err := row.Scan(&locked); err != nil { + return fmt.Errorf("failed to execute pg_try_advisory_lock: %w", err) + } + if locked { + // A session-level advisory lock was acquired. + return nil + } + // A session-level advisory lock could not be acquired. This is likely because another + // process has already acquired the lock. We will continue retrying until the lock is + // acquired or the maximum number of retries is reached. + return retry.RetryableError(errors.New("failed to acquire lock")) + }) +} + +func (l *postgresSessionLocker) SessionUnlock(ctx context.Context, conn *sql.Conn) error { + return retry.Do(ctx, l.retryUnlock, func(ctx context.Context) error { + var unlocked bool + row := conn.QueryRowContext(ctx, "SELECT pg_advisory_unlock($1)", l.lockID) + if err := row.Scan(&unlocked); err != nil { + return fmt.Errorf("failed to execute pg_advisory_unlock: %w", err) + } + if unlocked { + // A session-level advisory lock was released. + return nil + } + /* + docs(md): provide users with some documentation on how they can unlock the session + manually. + + This is probably not an issue for 99.99% of users since pg_advisory_unlock_all() will + release all session level advisory locks held by the current session. It is implicitly + invoked at session end, even if the client disconnects ungracefully. + + Here is output from a session that has a lock held: + + SELECT pid,granted,((classid::bigint<<32)|objid::bigint)AS goose_lock_id FROM pg_locks + WHERE locktype='advisory'; + + | pid | granted | goose_lock_id | + |-----|---------|---------------------| + | 191 | t | 5887940537704921958 | + + A forceful way to unlock the session is to terminate the backend with SIGTERM: + + SELECT pg_terminate_backend(191); + + Subsequent commands on the same connection will fail with: + + Query 1 ERROR: FATAL: terminating connection due to administrator command + */ + return retry.RetryableError(errors.New("failed to unlock session")) + }) +} diff --git a/vendor/github.com/pressly/goose/v3/lock/session_locker.go b/vendor/github.com/pressly/goose/v3/lock/session_locker.go new file mode 100644 index 0000000..b741878 --- /dev/null +++ b/vendor/github.com/pressly/goose/v3/lock/session_locker.go @@ -0,0 +1,23 @@ +// Package lock defines the Locker interface and implements the locking logic. +package lock + +import ( + "context" + "database/sql" + "errors" +) + +var ( + // ErrLockNotImplemented is returned when the database does not support locking. + ErrLockNotImplemented = errors.New("lock not implemented") + // ErrUnlockNotImplemented is returned when the database does not support unlocking. + ErrUnlockNotImplemented = errors.New("unlock not implemented") +) + +// SessionLocker is the interface to lock and unlock the database for the duration of a session. The +// session is defined as the duration of a single connection and both methods must be called on the +// same connection. +type SessionLocker interface { + SessionLock(ctx context.Context, conn *sql.Conn) error + SessionUnlock(ctx context.Context, conn *sql.Conn) error +} diff --git a/vendor/github.com/pressly/goose/v3/lock/session_locker_options.go b/vendor/github.com/pressly/goose/v3/lock/session_locker_options.go new file mode 100644 index 0000000..4f1efe8 --- /dev/null +++ b/vendor/github.com/pressly/goose/v3/lock/session_locker_options.go @@ -0,0 +1,98 @@ +package lock + +import ( + "errors" + "time" +) + +const ( + // DefaultLockID is the id used to lock the database for migrations. It is a crc64 hash of the + // string "goose". This is used to ensure that the lock is unique to goose. + // + // crc64.Checksum([]byte("goose"), crc64.MakeTable(crc64.ECMA)) + DefaultLockID int64 = 5887940537704921958 +) + +// SessionLockerOption is used to configure a SessionLocker. +type SessionLockerOption interface { + apply(*sessionLockerConfig) error +} + +// WithLockID sets the lock ID to use when locking the database. +// +// If WithLockID is not called, the DefaultLockID is used. +func WithLockID(lockID int64) SessionLockerOption { + return sessionLockerConfigFunc(func(c *sessionLockerConfig) error { + c.lockID = lockID + return nil + }) +} + +// WithLockTimeout sets the max duration to wait for the lock to be acquired. The total duration +// will be the period times the failure threshold. +// +// By default, the lock timeout is 300s (5min), where the lock is retried every 5 seconds (period) +// up to 60 times (failure threshold). +// +// The minimum period is 1 second, and the minimum failure threshold is 1. +func WithLockTimeout(period, failureThreshold uint64) SessionLockerOption { + return sessionLockerConfigFunc(func(c *sessionLockerConfig) error { + if period < 1 { + return errors.New("period must be greater than 0, minimum is 1") + } + if failureThreshold < 1 { + return errors.New("failure threshold must be greater than 0, minimum is 1") + } + c.lockProbe = probe{ + periodSeconds: time.Duration(period) * time.Second, + failureThreshold: failureThreshold, + } + return nil + }) +} + +// WithUnlockTimeout sets the max duration to wait for the lock to be released. The total duration +// will be the period times the failure threshold. +// +// By default, the lock timeout is 60s, where the lock is retried every 2 seconds (period) up to 30 +// times (failure threshold). +// +// The minimum period is 1 second, and the minimum failure threshold is 1. +func WithUnlockTimeout(period, failureThreshold uint64) SessionLockerOption { + return sessionLockerConfigFunc(func(c *sessionLockerConfig) error { + if period < 1 { + return errors.New("period must be greater than 0, minimum is 1") + } + if failureThreshold < 1 { + return errors.New("failure threshold must be greater than 0, minimum is 1") + } + c.unlockProbe = probe{ + periodSeconds: time.Duration(period) * time.Second, + failureThreshold: failureThreshold, + } + return nil + }) +} + +type sessionLockerConfig struct { + lockID int64 + lockProbe probe + unlockProbe probe +} + +// probe is used to configure how often and how many times to retry a lock or unlock operation. The +// total timeout will be the period times the failure threshold. +type probe struct { + // How often (in seconds) to perform the probe. + periodSeconds time.Duration + // Number of times to retry the probe. + failureThreshold uint64 +} + +var _ SessionLockerOption = (sessionLockerConfigFunc)(nil) + +type sessionLockerConfigFunc func(*sessionLockerConfig) error + +func (f sessionLockerConfigFunc) apply(cfg *sessionLockerConfig) error { + return f(cfg) +} diff --git a/vendor/github.com/pressly/goose/v3/log.go b/vendor/github.com/pressly/goose/v3/log.go index 9d59ec8..fc2367b 100644 --- a/vendor/github.com/pressly/goose/v3/log.go +++ b/vendor/github.com/pressly/goose/v3/log.go @@ -8,10 +8,7 @@ var log Logger = &stdLogger{} // Logger is standard logger interface type Logger interface { - Fatal(v ...interface{}) Fatalf(format string, v ...interface{}) - Print(v ...interface{}) - Println(v ...interface{}) Printf(format string, v ...interface{}) } @@ -23,10 +20,7 @@ func SetLogger(l Logger) { // stdLogger is a default logger that outputs to a stdlib's log.std logger. type stdLogger struct{} -func (*stdLogger) Fatal(v ...interface{}) { std.Fatal(v...) } func (*stdLogger) Fatalf(format string, v ...interface{}) { std.Fatalf(format, v...) } -func (*stdLogger) Print(v ...interface{}) { std.Print(v...) } -func (*stdLogger) Println(v ...interface{}) { std.Println(v...) } func (*stdLogger) Printf(format string, v ...interface{}) { std.Printf(format, v...) } // NopLogger returns a logger that discards all logged output. @@ -38,8 +32,5 @@ type nopLogger struct{} var _ Logger = (*nopLogger)(nil) -func (*nopLogger) Fatal(v ...interface{}) {} func (*nopLogger) Fatalf(format string, v ...interface{}) {} -func (*nopLogger) Print(v ...interface{}) {} -func (*nopLogger) Println(v ...interface{}) {} func (*nopLogger) Printf(format string, v ...interface{}) {} diff --git a/vendor/github.com/pressly/goose/v3/migrate.go b/vendor/github.com/pressly/goose/v3/migrate.go index c92f7a8..22769ff 100644 --- a/vendor/github.com/pressly/goose/v3/migrate.go +++ b/vendor/github.com/pressly/goose/v3/migrate.go @@ -1,26 +1,27 @@ package goose import ( + "context" "database/sql" "errors" "fmt" "io/fs" "math" "path" - "runtime" "sort" + "strings" "time" ) var ( + // ErrNoMigrationFiles when no migration files have been found. + ErrNoMigrationFiles = errors.New("no migration files found") // ErrNoCurrentVersion when a current migration version is not found. ErrNoCurrentVersion = errors.New("no current version found") // ErrNoNextVersion when the next migration version is not found. ErrNoNextVersion = errors.New("no next version found") // MaxVersion is the maximum allowed version. MaxVersion int64 = math.MaxInt64 - - registeredGoMigrations = map[int64]*Migration{} ) // Migrations slice. @@ -84,7 +85,7 @@ func (ms Migrations) versioned() (Migrations, error) { // assume that the user will never have more than 19700101000000 migrations for _, m := range ms { - // parse version as timestmap + // parse version as timestamp versionTime, err := time.Parse(timestampFormat, fmt.Sprintf("%d", m.Version)) if versionTime.Before(time.Unix(0, 0)) || err != nil { @@ -101,7 +102,7 @@ func (ms Migrations) timestamped() (Migrations, error) { // assume that the user will never have more than 19700101000000 migrations for _, m := range ms { - // parse version as timestmap + // parse version as timestamp versionTime, err := time.Parse(timestampFormat, fmt.Sprintf("%d", m.Version)) if err != nil { // probably not a timestamp @@ -123,79 +124,19 @@ func (ms Migrations) String() string { return str } -// GoMigration is a Go migration func that is run within a transaction. -type GoMigration func(tx *sql.Tx) error - -// GoMigrationNoTx is a Go migration func that is run outside a transaction. -type GoMigrationNoTx func(db *sql.DB) error - -// AddMigration adds Go migrations. -func AddMigration(up, down GoMigration) { - _, filename, _, _ := runtime.Caller(1) - AddNamedMigration(filename, up, down) -} - -// AddNamedMigration adds named Go migrations. -func AddNamedMigration(filename string, up, down GoMigration) { - if err := register(filename, true, up, down, nil, nil); err != nil { - panic(err) - } -} - -// AddMigrationNoTx adds Go migrations that will be run outside transaction. -func AddMigrationNoTx(up, down GoMigrationNoTx) { - _, filename, _, _ := runtime.Caller(1) - AddNamedMigrationNoTx(filename, up, down) -} - -// AddNamedMigrationNoTx adds named Go migrations that will be run outside transaction. -func AddNamedMigrationNoTx(filename string, up, down GoMigrationNoTx) { - if err := register(filename, false, nil, nil, up, down); err != nil { - panic(err) - } -} - -func register( - filename string, - useTx bool, - up, down GoMigration, - upNoTx, downNoTx GoMigrationNoTx, -) error { - // Sanity check caller did not mix tx and non-tx based functions. - if (up != nil || down != nil) && (upNoTx != nil || downNoTx != nil) { - return fmt.Errorf("cannot mix tx and non-tx based go migrations functions") - } - v, _ := NumericComponent(filename) - if existing, ok := registeredGoMigrations[v]; ok { - return fmt.Errorf("failed to add migration %q: version %d conflicts with %q", - filename, - v, - existing.Source, - ) - } - // Add to global as a registered migration. - registeredGoMigrations[v] = &Migration{ - Version: v, - Next: -1, - Previous: -1, - Registered: true, - Source: filename, - UseTx: useTx, - UpFn: up, - DownFn: down, - UpFnNoTx: upNoTx, - DownFnNoTx: downNoTx, - } - return nil -} - -func collectMigrationsFS(fsys fs.FS, dirpath string, current, target int64) (Migrations, error) { - if _, err := fs.Stat(fsys, dirpath); errors.Is(err, fs.ErrNotExist) { - return nil, fmt.Errorf("%s directory does not exist", dirpath) +func collectMigrationsFS( + fsys fs.FS, + dirpath string, + current, target int64, + registered map[int64]*Migration, +) (Migrations, error) { + if _, err := fs.Stat(fsys, dirpath); err != nil { + if errors.Is(err, fs.ErrNotExist) { + return nil, fmt.Errorf("%s directory does not exist", dirpath) + } + return nil, err } - var migrations Migrations - // SQL migration files. sqlMigrationFiles, err := fs.Glob(fsys, path.Join(dirpath, "*.sql")) if err != nil { @@ -207,53 +148,30 @@ func collectMigrationsFS(fsys fs.FS, dirpath string, current, target int64) (Mig return nil, fmt.Errorf("could not parse SQL migration file %q: %w", file, err) } if versionFilter(v, current, target) { - migration := &Migration{Version: v, Next: -1, Previous: -1, Source: file} - migrations = append(migrations, migration) + migrations = append(migrations, &Migration{ + Version: v, + Next: -1, + Previous: -1, + Source: file, + }) } } - - // Go migrations registered via goose.AddMigration(). - for _, migration := range registeredGoMigrations { - v, err := NumericComponent(migration.Source) - if err != nil { - return nil, fmt.Errorf("could not parse go migration file %q: %w", migration.Source, err) - } - if versionFilter(v, current, target) { - migrations = append(migrations, migration) - } - } - - // Go migration files - goMigrationFiles, err := fs.Glob(fsys, path.Join(dirpath, "*.go")) + // Go migration files. + goMigrations, err := collectGoMigrations(fsys, dirpath, registered, current, target) if err != nil { return nil, err } - for _, file := range goMigrationFiles { - v, err := NumericComponent(file) - if err != nil { - continue // Skip any files that don't have version prefix. - } - - // Skip migrations already existing migrations registered via goose.AddMigration(). - if _, ok := registeredGoMigrations[v]; ok { - continue - } - - if versionFilter(v, current, target) { - migration := &Migration{Version: v, Next: -1, Previous: -1, Source: file, Registered: false} - migrations = append(migrations, migration) - } + migrations = append(migrations, goMigrations...) + if len(migrations) == 0 { + return nil, ErrNoMigrationFiles } - - migrations = sortAndConnectMigrations(migrations) - - return migrations, nil + return sortAndConnectMigrations(migrations), nil } // CollectMigrations returns all the valid looking migration scripts in the // migrations folder and go func registry, and key them by version. func CollectMigrations(dirpath string, current, target int64) (Migrations, error) { - return collectMigrationsFS(baseFS, dirpath, current, target) + return collectMigrationsFS(baseFS, dirpath, current, target, registeredGoMigrations) } func sortAndConnectMigrations(migrations Migrations) Migrations { @@ -274,98 +192,182 @@ func sortAndConnectMigrations(migrations Migrations) Migrations { } func versionFilter(v, current, target int64) bool { - if target > current { return v > current && v <= target } - if target < current { return v <= current && v > target } - return false } // EnsureDBVersion retrieves the current version for this DB. // Create and initialize the DB version table if it doesn't exist. func EnsureDBVersion(db *sql.DB) (int64, error) { - rows, err := GetDialect().dbVersionQuery(db) + ctx := context.Background() + return EnsureDBVersionContext(ctx, db) +} + +// EnsureDBVersionContext retrieves the current version for this DB. +// Create and initialize the DB version table if it doesn't exist. +func EnsureDBVersionContext(ctx context.Context, db *sql.DB) (int64, error) { + dbMigrations, err := store.ListMigrations(ctx, db, TableName()) if err != nil { - return 0, createVersionTable(db) + return 0, createVersionTable(ctx, db) } - defer rows.Close() - // The most recent record for each migration specifies // whether it has been applied or rolled back. // The first version we find that has been applied is the current version. - - toSkip := make([]int64, 0) - - for rows.Next() { - var row MigrationRecord - if err = rows.Scan(&row.VersionID, &row.IsApplied); err != nil { - return 0, fmt.Errorf("failed to scan row: %w", err) - } - - // have we already marked this version to be skipped? - skip := false - for _, v := range toSkip { - if v == row.VersionID { - skip = true - break - } - } - - if skip { + // + // TODO(mf): for historic reasons, we continue to use the is_applied column, + // but at some point we need to deprecate this logic and ideally remove + // this column. + // + // For context, see: + // https://github.com/pressly/goose/pull/131#pullrequestreview-178409168 + // + // The dbMigrations list is expected to be ordered by descending ID. But + // in the future we should be able to query the last record only. + skipLookup := make(map[int64]struct{}) + for _, m := range dbMigrations { + // Have we already marked this version to be skipped? + if _, ok := skipLookup[m.VersionID]; ok { continue } - - // if version has been applied we're done - if row.IsApplied { - return row.VersionID, nil + // If version has been applied we are done. + if m.IsApplied { + return m.VersionID, nil } - - // latest version of migration has not been applied. - toSkip = append(toSkip, row.VersionID) - } - if err := rows.Err(); err != nil { - return 0, fmt.Errorf("failed to get next row: %w", err) + // Latest version of migration has not been applied. + skipLookup[m.VersionID] = struct{}{} } - return 0, ErrNoNextVersion } -// Create the db version table -// and insert the initial 0 value into it -func createVersionTable(db *sql.DB) error { - txn, err := db.Begin() +// createVersionTable creates the db version table and inserts the +// initial 0 value into it. +func createVersionTable(ctx context.Context, db *sql.DB) error { + txn, err := db.BeginTx(ctx, nil) if err != nil { return err } - - d := GetDialect() - - if _, err := txn.Exec(d.createVersionTableSQL()); err != nil { + if err := store.CreateVersionTable(ctx, txn, TableName()); err != nil { _ = txn.Rollback() return err } - - version := 0 - applied := true - if _, err := txn.Exec(d.insertVersionSQL(), version, applied); err != nil { + if err := store.InsertVersion(ctx, txn, TableName(), 0); err != nil { _ = txn.Rollback() return err } - return txn.Commit() } // GetDBVersion is an alias for EnsureDBVersion, but returns -1 in error. func GetDBVersion(db *sql.DB) (int64, error) { - version, err := EnsureDBVersion(db) + ctx := context.Background() + return GetDBVersionContext(ctx, db) +} + +// GetDBVersionContext is an alias for EnsureDBVersion, but returns -1 in error. +func GetDBVersionContext(ctx context.Context, db *sql.DB) (int64, error) { + version, err := EnsureDBVersionContext(ctx, db) if err != nil { return -1, err } return version, nil } + +// collectGoMigrations collects Go migrations from the filesystem and merges them with registered +// migrations. +// +// If Go migrations have been registered globally, with [goose.AddNamedMigration...], but there are +// no corresponding .go files in the filesystem, add them to the migrations slice. +// +// If Go migrations have been registered, and there are .go files in the filesystem dirpath, ONLY +// include those in the migrations slices. +// +// Lastly, if there are .go files in the filesystem but they have not been registered, raise an +// error. This is to prevent users from accidentally adding valid looking Go files to the migrations +// folder without registering them. +func collectGoMigrations( + fsys fs.FS, + dirpath string, + registeredGoMigrations map[int64]*Migration, + current, target int64, +) (Migrations, error) { + // Sanity check registered migrations have the correct version prefix. + for _, m := range registeredGoMigrations { + if _, err := NumericComponent(m.Source); err != nil { + return nil, fmt.Errorf("could not parse go migration file %s: %w", m.Source, err) + } + } + goFiles, err := fs.Glob(fsys, path.Join(dirpath, "*.go")) + if err != nil { + return nil, err + } + // If there are no Go files in the filesystem and no registered Go migrations, return early. + if len(goFiles) == 0 && len(registeredGoMigrations) == 0 { + return nil, nil + } + type source struct { + fullpath string + version int64 + } + // Find all Go files that have a version prefix and are within the requested range. + var sources []source + for _, fullpath := range goFiles { + v, err := NumericComponent(fullpath) + if err != nil { + continue // Skip any files that don't have version prefix. + } + if strings.HasSuffix(fullpath, "_test.go") { + continue // Skip Go test files. + } + if versionFilter(v, current, target) { + sources = append(sources, source{ + fullpath: fullpath, + version: v, + }) + } + } + var ( + migrations Migrations + ) + if len(sources) > 0 { + for _, s := range sources { + migration, ok := registeredGoMigrations[s.version] + if ok { + migrations = append(migrations, migration) + } else { + // TODO(mf): something that bothers me about this implementation is it will be + // lazily evaluated and the error will only be raised if the user tries to run the + // migration. It would be better to raise an error much earlier in the process. + migrations = append(migrations, &Migration{ + Version: s.version, + Next: -1, + Previous: -1, + Source: s.fullpath, + Registered: false, + }) + } + } + } else { + // Some users may register Go migrations manually via AddNamedMigration_ functions but not + // provide the corresponding .go files in the filesystem. In this case, we include them + // wholesale in the migrations slice. + // + // This is a valid use case because users may want to build a custom binary that only embeds + // the SQL migration files and some other mechanism for registering Go migrations. + for _, migration := range registeredGoMigrations { + v, err := NumericComponent(migration.Source) + if err != nil { + return nil, fmt.Errorf("could not parse go migration file %s: %w", migration.Source, err) + } + if versionFilter(v, current, target) { + migrations = append(migrations, migration) + } + } + } + return migrations, nil +} diff --git a/vendor/github.com/pressly/goose/v3/migration.go b/vendor/github.com/pressly/goose/v3/migration.go index 495eb75..d1fbd7d 100644 --- a/vendor/github.com/pressly/goose/v3/migration.go +++ b/vendor/github.com/pressly/goose/v3/migration.go @@ -1,6 +1,7 @@ package goose import ( + "context" "database/sql" "errors" "fmt" @@ -12,33 +13,185 @@ import ( "github.com/pressly/goose/v3/internal/sqlparser" ) +// NewGoMigration creates a new Go migration. +// +// Both up and down functions may be nil, in which case the migration will be recorded in the +// versions table but no functions will be run. This is useful for recording (up) or deleting (down) +// a version without running any functions. See [GoFunc] for more details. +func NewGoMigration(version int64, up, down *GoFunc) *Migration { + m := &Migration{ + Type: TypeGo, + Registered: true, + Version: version, + Next: -1, Previous: -1, + goUp: &GoFunc{Mode: TransactionEnabled}, + goDown: &GoFunc{Mode: TransactionEnabled}, + construct: true, + } + updateMode := func(f *GoFunc) *GoFunc { + // infer mode from function + if f.Mode == 0 { + if f.RunTx != nil && f.RunDB == nil { + f.Mode = TransactionEnabled + } + if f.RunTx == nil && f.RunDB != nil { + f.Mode = TransactionDisabled + } + // Always default to TransactionEnabled if both functions are nil. This is the most + // common use case. + if f.RunDB == nil && f.RunTx == nil { + f.Mode = TransactionEnabled + } + } + return f + } + // To maintain backwards compatibility, we set ALL legacy functions. In a future major version, + // we will remove these fields in favor of [GoFunc]. + // + // Note, this function does not do any validation. Validation is lazily done when the migration + // is registered. + if up != nil { + m.goUp = updateMode(up) + + if up.RunDB != nil { + m.UpFnNoTxContext = up.RunDB // func(context.Context, *sql.DB) error + m.UpFnNoTx = withoutContext(up.RunDB) // func(*sql.DB) error + } + if up.RunTx != nil { + m.UseTx = true + m.UpFnContext = up.RunTx // func(context.Context, *sql.Tx) error + m.UpFn = withoutContext(up.RunTx) // func(*sql.Tx) error + } + } + if down != nil { + m.goDown = updateMode(down) + + if down.RunDB != nil { + m.DownFnNoTxContext = down.RunDB // func(context.Context, *sql.DB) error + m.DownFnNoTx = withoutContext(down.RunDB) // func(*sql.DB) error + } + if down.RunTx != nil { + m.UseTx = true + m.DownFnContext = down.RunTx // func(context.Context, *sql.Tx) error + m.DownFn = withoutContext(down.RunTx) // func(*sql.Tx) error + } + } + return m +} + +// Migration struct represents either a SQL or Go migration. +// +// Avoid constructing migrations manually, use [NewGoMigration] function. +type Migration struct { + Type MigrationType + Version int64 + // Source is the path to the .sql script or .go file. It may be empty for Go migrations that + // have been registered globally and don't have a source file. + Source string + + UpFnContext, DownFnContext GoMigrationContext + UpFnNoTxContext, DownFnNoTxContext GoMigrationNoTxContext + + // These fields will be removed in a future major version. They are here for backwards + // compatibility and are an implementation detail. + Registered bool + UseTx bool + Next int64 // next version, or -1 if none + Previous int64 // previous version, -1 if none + + // We still save the non-context versions in the struct in case someone is using them. Goose + // does not use these internally anymore in favor of the context-aware versions. These fields + // will be removed in a future major version. + + UpFn GoMigration // Deprecated: use UpFnContext instead. + DownFn GoMigration // Deprecated: use DownFnContext instead. + UpFnNoTx GoMigrationNoTx // Deprecated: use UpFnNoTxContext instead. + DownFnNoTx GoMigrationNoTx // Deprecated: use DownFnNoTxContext instead. + + noVersioning bool + + // These fields are used internally by goose and users are not expected to set them. Instead, + // use [NewGoMigration] to create a new go migration. + construct bool + goUp, goDown *GoFunc + + sql sqlMigration +} + +type sqlMigration struct { + // The Parsed field is used to track whether the SQL migration has been parsed. It serves as an + // optimization to avoid parsing migrations that may never be needed. Typically, migrations are + // incremental, and users often run only the most recent ones, making parsing of prior + // migrations unnecessary in most cases. + Parsed bool + + // Parsed must be set to true before the following fields are used. + UseTx bool + Up []string + Down []string +} + +// GoFunc represents a Go migration function. +type GoFunc struct { + // Exactly one of these must be set, or both must be nil. + RunTx func(ctx context.Context, tx *sql.Tx) error + // -- OR -- + RunDB func(ctx context.Context, db *sql.DB) error + + // Mode is the transaction mode for the migration. When one of the run functions is set, the + // mode will be inferred from the function and the field is ignored. Users do not need to set + // this field when supplying a run function. + // + // If both run functions are nil, the mode defaults to TransactionEnabled. The use case for nil + // functions is to record a version in the version table without invoking a Go migration + // function. + // + // The only time this field is required is if BOTH run functions are nil AND you want to + // override the default transaction mode. + Mode TransactionMode +} + +// TransactionMode represents the possible transaction modes for a migration. +type TransactionMode int + +const ( + TransactionEnabled TransactionMode = iota + 1 + TransactionDisabled +) + +func (m TransactionMode) String() string { + switch m { + case TransactionEnabled: + return "transaction_enabled" + case TransactionDisabled: + return "transaction_disabled" + default: + return fmt.Sprintf("unknown transaction mode (%d)", m) + } +} + // MigrationRecord struct. +// +// Deprecated: unused and will be removed in a future major version. type MigrationRecord struct { VersionID int64 TStamp time.Time IsApplied bool // was this a result of up() or down() } -// Migration struct. -type Migration struct { - Version int64 - Next int64 // next version, or -1 if none - Previous int64 // previous version, -1 if none - Source string // path to .sql script or go file - Registered bool - UseTx bool - UpFn, DownFn GoMigration - UpFnNoTx, DownFnNoTx GoMigrationNoTx - noVersioning bool -} - func (m *Migration) String() string { return fmt.Sprint(m.Source) } // Up runs an up migration. func (m *Migration) Up(db *sql.DB) error { - if err := m.run(db, true); err != nil { + ctx := context.Background() + return m.UpContext(ctx, db) +} + +// UpContext runs an up migration. +func (m *Migration) UpContext(ctx context.Context, db *sql.DB) error { + if err := m.run(ctx, db, true); err != nil { return err } return nil @@ -46,13 +199,19 @@ func (m *Migration) Up(db *sql.DB) error { // Down runs a down migration. func (m *Migration) Down(db *sql.DB) error { - if err := m.run(db, false); err != nil { + ctx := context.Background() + return m.DownContext(ctx, db) +} + +// DownContext runs a down migration. +func (m *Migration) DownContext(ctx context.Context, db *sql.DB) error { + if err := m.run(ctx, db, false); err != nil { return err } return nil } -func (m *Migration) run(db *sql.DB, direction bool) error { +func (m *Migration) run(ctx context.Context, db *sql.DB, direction bool) error { switch filepath.Ext(m.Source) { case ".sql": f, err := baseFS.Open(m.Source) @@ -61,14 +220,13 @@ func (m *Migration) run(db *sql.DB, direction bool) error { } defer f.Close() - sqlparser.SetVersbose(verbose) - statements, useTx, err := sqlparser.ParseSQLMigration(f, direction) + statements, useTx, err := sqlparser.ParseSQLMigration(f, sqlparser.FromBool(direction), verbose) if err != nil { return fmt.Errorf("ERROR %v: failed to parse SQL migration file: %w", filepath.Base(m.Source), err) } start := time.Now() - if err := runSQLMigration(db, statements, useTx, m.Version, direction, m.noVersioning); err != nil { + if err := runSQLMigration(ctx, db, statements, useTx, m.Version, direction, m.noVersioning); err != nil { return fmt.Errorf("ERROR %v: failed to run SQL migration: %w", filepath.Base(m.Source), err) } finish := truncateDuration(time.Since(start)) @@ -87,12 +245,13 @@ func (m *Migration) run(db *sql.DB, direction bool) error { var empty bool if m.UseTx { // Run go-based migration inside a tx. - fn := m.DownFn + fn := m.DownFnContext if direction { - fn = m.UpFn + fn = m.UpFnContext } empty = (fn == nil) if err := runGoMigration( + ctx, db, fn, m.Version, @@ -103,12 +262,13 @@ func (m *Migration) run(db *sql.DB, direction bool) error { } } else { // Run go-based migration outside a tx. - fn := m.DownFnNoTx + fn := m.DownFnNoTxContext if direction { - fn = m.UpFnNoTx + fn = m.UpFnNoTxContext } empty = (fn == nil) if err := runGoMigrationNoTx( + ctx, db, fn, m.Version, @@ -129,27 +289,29 @@ func (m *Migration) run(db *sql.DB, direction bool) error { } func runGoMigrationNoTx( + ctx context.Context, db *sql.DB, - fn GoMigrationNoTx, + fn GoMigrationNoTxContext, version int64, direction bool, recordVersion bool, ) error { if fn != nil { // Run go migration function. - if err := fn(db); err != nil { + if err := fn(ctx, db); err != nil { return fmt.Errorf("failed to run go migration: %w", err) } } if recordVersion { - return insertOrDeleteVersionNoTx(db, version, direction) + return insertOrDeleteVersionNoTx(ctx, db, version, direction) } return nil } func runGoMigration( + ctx context.Context, db *sql.DB, - fn GoMigration, + fn GoMigrationContext, version int64, direction bool, recordVersion bool, @@ -157,19 +319,19 @@ func runGoMigration( if fn == nil && !recordVersion { return nil } - tx, err := db.Begin() + tx, err := db.BeginTx(ctx, nil) if err != nil { return fmt.Errorf("failed to begin transaction: %w", err) } if fn != nil { // Run go migration function. - if err := fn(tx); err != nil { + if err := fn(ctx, tx); err != nil { _ = tx.Rollback() return fmt.Errorf("failed to run go migration: %w", err) } } if recordVersion { - if err := insertOrDeleteVersion(tx, version, direction); err != nil { + if err := insertOrDeleteVersion(ctx, tx, version, direction); err != nil { _ = tx.Rollback() return fmt.Errorf("failed to update version: %w", err) } @@ -180,45 +342,41 @@ func runGoMigration( return nil } -func insertOrDeleteVersion(tx *sql.Tx, version int64, direction bool) error { +func insertOrDeleteVersion(ctx context.Context, tx *sql.Tx, version int64, direction bool) error { if direction { - _, err := tx.Exec(GetDialect().insertVersionSQL(), version, direction) - return err + return store.InsertVersion(ctx, tx, TableName(), version) } - _, err := tx.Exec(GetDialect().deleteVersionSQL(), version) - return err + return store.DeleteVersion(ctx, tx, TableName(), version) } -func insertOrDeleteVersionNoTx(db *sql.DB, version int64, direction bool) error { +func insertOrDeleteVersionNoTx(ctx context.Context, db *sql.DB, version int64, direction bool) error { if direction { - _, err := db.Exec(GetDialect().insertVersionSQL(), version, direction) - return err + return store.InsertVersionNoTx(ctx, db, TableName(), version) } - _, err := db.Exec(GetDialect().deleteVersionSQL(), version) - return err + return store.DeleteVersionNoTx(ctx, db, TableName(), version) } -// NumericComponent looks for migration scripts with names in the form: -// XXX_descriptivename.ext where XXX specifies the version number -// and ext specifies the type of migration -func NumericComponent(name string) (int64, error) { - base := filepath.Base(name) - +// NumericComponent parses the version from the migration file name. +// +// XXX_descriptivename.ext where XXX specifies the version number and ext specifies the type of +// migration, either .sql or .go. +func NumericComponent(filename string) (int64, error) { + base := filepath.Base(filename) if ext := filepath.Ext(base); ext != ".go" && ext != ".sql" { - return 0, errors.New("not a recognized migration file type") + return 0, errors.New("migration file does not have .sql or .go file extension") } - idx := strings.Index(base, "_") if idx < 0 { return 0, errors.New("no filename separator '_' found") } - - n, e := strconv.ParseInt(base[:idx], 10, 64) - if e == nil && n <= 0 { - return 0, errors.New("migration IDs must be greater than zero") + n, err := strconv.ParseInt(base[:idx], 10, 64) + if err != nil { + return 0, fmt.Errorf("failed to parse version from migration file: %s: %w", base, err) } - - return n, e + if n < 1 { + return 0, errors.New("migration version must be greater than zero") + } + return n, nil } func truncateDuration(d time.Duration) time.Duration { @@ -233,3 +391,8 @@ func truncateDuration(d time.Duration) time.Duration { } return d } + +// ref returns a string that identifies the migration. This is used for logging and error messages. +func (m *Migration) ref() string { + return fmt.Sprintf("(type:%s,version:%d)", m.Type, m.Version) +} diff --git a/vendor/github.com/pressly/goose/v3/migration_sql.go b/vendor/github.com/pressly/goose/v3/migration_sql.go index 70c2e07..1c6d4d0 100644 --- a/vendor/github.com/pressly/goose/v3/migration_sql.go +++ b/vendor/github.com/pressly/goose/v3/migration_sql.go @@ -1,10 +1,10 @@ package goose import ( + "context" "database/sql" "fmt" "regexp" - "time" ) // Run a migration specified in raw SQL. @@ -13,22 +13,30 @@ import ( // starting with "-- +goose" to specify whether the section should // be applied during an Up or Down migration // -// All statements following an Up or Down directive are grouped together -// until another direction directive is found. -func runSQLMigration(db *sql.DB, statements []string, useTx bool, v int64, direction bool, noVersioning bool) error { +// All statements following an Up or Down annotation are grouped together +// until another direction annotation is found. +func runSQLMigration( + ctx context.Context, + db *sql.DB, + statements []string, + useTx bool, + v int64, + direction bool, + noVersioning bool, +) error { if useTx { // TRANSACTION. verboseInfo("Begin transaction") - tx, err := db.Begin() + tx, err := db.BeginTx(ctx, nil) if err != nil { return fmt.Errorf("failed to begin transaction: %w", err) } for _, query := range statements { verboseInfo("Executing statement: %s\n", clearStatement(query)) - if err = execQuery(tx.Exec, query); err != nil { + if _, err := tx.ExecContext(ctx, query); err != nil { verboseInfo("Rollback transaction") _ = tx.Rollback() return fmt.Errorf("failed to execute SQL query %q: %w", clearStatement(query), err) @@ -37,13 +45,13 @@ func runSQLMigration(db *sql.DB, statements []string, useTx bool, v int64, direc if !noVersioning { if direction { - if err := execQuery(tx.Exec, GetDialect().insertVersionSQL(), v, direction); err != nil { + if err := store.InsertVersion(ctx, tx, TableName(), v); err != nil { verboseInfo("Rollback transaction") _ = tx.Rollback() return fmt.Errorf("failed to insert new goose version: %w", err) } } else { - if err := execQuery(tx.Exec, GetDialect().deleteVersionSQL(), v); err != nil { + if err := store.DeleteVersion(ctx, tx, TableName(), v); err != nil { verboseInfo("Rollback transaction") _ = tx.Rollback() return fmt.Errorf("failed to delete goose version: %w", err) @@ -62,17 +70,17 @@ func runSQLMigration(db *sql.DB, statements []string, useTx bool, v int64, direc // NO TRANSACTION. for _, query := range statements { verboseInfo("Executing statement: %s", clearStatement(query)) - if err := execQuery(db.Exec, query); err != nil { + if _, err := db.ExecContext(ctx, query); err != nil { return fmt.Errorf("failed to execute SQL query %q: %w", clearStatement(query), err) } } if !noVersioning { if direction { - if err := execQuery(db.Exec, GetDialect().insertVersionSQL(), v, direction); err != nil { + if err := store.InsertVersionNoTx(ctx, db, TableName(), v); err != nil { return fmt.Errorf("failed to insert new goose version: %w", err) } } else { - if err := execQuery(db.Exec, GetDialect().deleteVersionSQL(), v); err != nil { + if err := store.DeleteVersionNoTx(ctx, db, TableName(), v); err != nil { return fmt.Errorf("failed to delete goose version: %w", err) } } @@ -81,32 +89,6 @@ func runSQLMigration(db *sql.DB, statements []string, useTx bool, v int64, direc return nil } -func execQuery(fn func(string, ...interface{}) (sql.Result, error), query string, args ...interface{}) error { - if !verbose { - _, err := fn(query, args...) - return err - } - - ch := make(chan error) - - go func() { - _, err := fn(query, args...) - ch <- err - }() - - t := time.Now() - ticker := time.NewTicker(time.Minute) - defer ticker.Stop() - for { - select { - case err := <-ch: - return err - case <-ticker.C: - verboseInfo("Executing statement still in progress for %v", time.Since(t).Round(time.Second)) - } - } -} - const ( grayColor = "\033[90m" resetColor = "\033[00m" diff --git a/vendor/github.com/pressly/goose/v3/osfs.go b/vendor/github.com/pressly/goose/v3/osfs.go index e64f770..420f95f 100644 --- a/vendor/github.com/pressly/goose/v3/osfs.go +++ b/vendor/github.com/pressly/goose/v3/osfs.go @@ -18,3 +18,11 @@ func (osFS) Stat(name string) (fs.FileInfo, error) { return os.Stat(filepath.Fro func (osFS) ReadFile(name string) ([]byte, error) { return os.ReadFile(filepath.FromSlash(name)) } func (osFS) Glob(pattern string) ([]string, error) { return filepath.Glob(filepath.FromSlash(pattern)) } + +type noopFS struct{} + +var _ fs.FS = noopFS{} + +func (f noopFS) Open(name string) (fs.File, error) { + return nil, os.ErrNotExist +} diff --git a/vendor/github.com/pressly/goose/v3/provider.go b/vendor/github.com/pressly/goose/v3/provider.go new file mode 100644 index 0000000..4da7873 --- /dev/null +++ b/vendor/github.com/pressly/goose/v3/provider.go @@ -0,0 +1,641 @@ +package goose + +import ( + "context" + "database/sql" + "errors" + "fmt" + "io/fs" + "math" + "strconv" + "strings" + "sync" + + "github.com/pressly/goose/v3/database" + "github.com/pressly/goose/v3/internal/gooseutil" + "github.com/pressly/goose/v3/internal/sqlparser" + "go.uber.org/multierr" +) + +// Provider is a goose migration provider. +type Provider struct { + // mu protects all accesses to the provider and must be held when calling operations on the + // database. + mu sync.Mutex + + db *sql.DB + store database.Store + versionTableOnce sync.Once + + fsys fs.FS + cfg config + + // migrations are ordered by version in ascending order. This list will never be empty and + // contains all migrations known to the provider. + migrations []*Migration +} + +// NewProvider returns a new goose provider. +// +// The caller is responsible for matching the database dialect with the database/sql driver. For +// example, if the database dialect is "postgres", the database/sql driver could be +// github.com/lib/pq or github.com/jackc/pgx. Each dialect has a corresponding [database.Dialect] +// constant backed by a default [database.Store] implementation. For more advanced use cases, such +// as using a custom table name or supplying a custom store implementation, see [WithStore]. +// +// fsys is the filesystem used to read migration files, but may be nil. Most users will want to use +// [os.DirFS], os.DirFS("path/to/migrations"), to read migrations from the local filesystem. +// However, it is possible to use a different "filesystem", such as [embed.FS] or filter out +// migrations using [fs.Sub]. +// +// See [ProviderOption] for more information on configuring the provider. +// +// Unless otherwise specified, all methods on Provider are safe for concurrent use. +func NewProvider(dialect Dialect, db *sql.DB, fsys fs.FS, opts ...ProviderOption) (*Provider, error) { + if db == nil { + return nil, errors.New("db must not be nil") + } + if fsys == nil { + fsys = noopFS{} + } + cfg := config{ + registered: make(map[int64]*Migration), + excludePaths: make(map[string]bool), + excludeVersions: make(map[int64]bool), + logger: &stdLogger{}, + } + for _, opt := range opts { + if err := opt.apply(&cfg); err != nil { + return nil, err + } + } + // Allow users to specify a custom store implementation, but only if they don't specify a + // dialect. If they specify a dialect, we'll use the default store implementation. + if dialect == "" && cfg.store == nil { + return nil, errors.New("dialect must not be empty") + } + if dialect != "" && cfg.store != nil { + return nil, errors.New("dialect must be empty when using a custom store implementation") + } + var store database.Store + if dialect != "" { + var err error + store, err = database.NewStore(dialect, DefaultTablename) + if err != nil { + return nil, err + } + } else { + store = cfg.store + } + if store.Tablename() == "" { + return nil, errors.New("invalid store implementation: table name must not be empty") + } + return newProvider(db, store, fsys, cfg, registeredGoMigrations /* global */) +} + +func newProvider( + db *sql.DB, + store database.Store, + fsys fs.FS, + cfg config, + global map[int64]*Migration, +) (*Provider, error) { + // Collect migrations from the filesystem and merge with registered migrations. + // + // Note, we don't parse SQL migrations here. They are parsed lazily when required. + + // feat(mf): we could add a flag to parse SQL migrations eagerly. This would allow us to return + // an error if there are any SQL parsing errors. This adds a bit overhead to startup though, so + // we should make it optional. + filesystemSources, err := collectFilesystemSources(fsys, false, cfg.excludePaths, cfg.excludeVersions) + if err != nil { + return nil, err + } + versionToGoMigration := make(map[int64]*Migration) + // Add user-registered Go migrations from the provider. + for version, m := range cfg.registered { + versionToGoMigration[version] = m + } + // Skip adding global Go migrations if explicitly disabled. + if cfg.disableGlobalRegistry { + // TODO(mf): let's add a warn-level log here to inform users if len(global) > 0. Would like + // to add this once we're on go1.21 and leverage the new slog package. + } else { + for version, m := range global { + if _, ok := versionToGoMigration[version]; ok { + return nil, fmt.Errorf("global go migration conflicts with provider-registered go migration with version %d", version) + } + versionToGoMigration[version] = m + } + } + // At this point we have all registered unique Go migrations (if any). We need to merge them + // with SQL migrations from the filesystem. + migrations, err := merge(filesystemSources, versionToGoMigration) + if err != nil { + return nil, err + } + if len(migrations) == 0 { + return nil, ErrNoMigrations + } + return &Provider{ + db: db, + fsys: fsys, + cfg: cfg, + store: store, + migrations: migrations, + }, nil +} + +// Status returns the status of all migrations, merging the list of migrations from the database and +// filesystem. The returned items are ordered by version, in ascending order. +func (p *Provider) Status(ctx context.Context) ([]*MigrationStatus, error) { + return p.status(ctx) +} + +// HasPending returns true if there are pending migrations to apply, otherwise, it returns false. If +// out-of-order migrations are disabled, yet some are detected, this method returns an error. +// +// Note, this method will not use a SessionLocker if one is configured. This allows callers to check +// for pending migrations without blocking or being blocked by other operations. +func (p *Provider) HasPending(ctx context.Context) (bool, error) { + return p.hasPending(ctx) +} + +// GetVersions returns the max database version and the target version to migrate to. +// +// Note, this method will not use a SessionLocker if one is configured. This allows callers to check +// for versions without blocking or being blocked by other operations. +func (p *Provider) GetVersions(ctx context.Context) (current, target int64, err error) { + return p.getVersions(ctx) +} + +// GetDBVersion returns the highest version recorded in the database, regardless of the order in +// which migrations were applied. For example, if migrations were applied out of order (1,4,2,3), +// this method returns 4. If no migrations have been applied, it returns 0. +func (p *Provider) GetDBVersion(ctx context.Context) (int64, error) { + if p.cfg.disableVersioning { + return -1, errors.New("getting database version not supported when versioning is disabled") + } + return p.getDBMaxVersion(ctx, nil) +} + +// ListSources returns a list of all migration sources known to the provider, sorted in ascending +// order by version. The path field may be empty for manually registered migrations, such as Go +// migrations registered using the [WithGoMigrations] option. +func (p *Provider) ListSources() []*Source { + sources := make([]*Source, 0, len(p.migrations)) + for _, m := range p.migrations { + sources = append(sources, &Source{ + Type: m.Type, + Path: m.Source, + Version: m.Version, + }) + } + return sources +} + +// Ping attempts to ping the database to verify a connection is available. +func (p *Provider) Ping(ctx context.Context) error { + return p.db.PingContext(ctx) +} + +// Close closes the database connection initially supplied to the provider. +func (p *Provider) Close() error { + return p.db.Close() +} + +// ApplyVersion applies exactly one migration for the specified version. If there is no migration +// available for the specified version, this method returns [ErrVersionNotFound]. If the migration +// has already been applied, this method returns [ErrAlreadyApplied]. +// +// The direction parameter determines the migration direction: true for up migration and false for +// down migration. +func (p *Provider) ApplyVersion(ctx context.Context, version int64, direction bool) (*MigrationResult, error) { + res, err := p.apply(ctx, version, direction) + if err != nil { + return nil, err + } + // This should never happen, we must return exactly one result. + if len(res) != 1 { + versions := make([]string, 0, len(res)) + for _, r := range res { + versions = append(versions, strconv.FormatInt(r.Source.Version, 10)) + } + return nil, fmt.Errorf( + "unexpected number of migrations applied running apply, expecting exactly one result: %v", + strings.Join(versions, ","), + ) + } + return res[0], nil +} + +// Up applies all pending migrations. If there are no new migrations to apply, this method returns +// empty list and nil error. +func (p *Provider) Up(ctx context.Context) ([]*MigrationResult, error) { + hasPending, err := p.HasPending(ctx) + if err != nil { + return nil, err + } + if !hasPending { + return nil, nil + } + return p.up(ctx, false, math.MaxInt64) +} + +// UpByOne applies the next pending migration. If there is no next migration to apply, this method +// returns [ErrNoNextVersion]. +func (p *Provider) UpByOne(ctx context.Context) (*MigrationResult, error) { + hasPending, err := p.HasPending(ctx) + if err != nil { + return nil, err + } + if !hasPending { + return nil, ErrNoNextVersion + } + res, err := p.up(ctx, true, math.MaxInt64) + if err != nil { + return nil, err + } + if len(res) == 0 { + return nil, ErrNoNextVersion + } + // This should never happen, we must return exactly one result. + if len(res) != 1 { + versions := make([]string, 0, len(res)) + for _, r := range res { + versions = append(versions, strconv.FormatInt(r.Source.Version, 10)) + } + return nil, fmt.Errorf( + "unexpected number of migrations applied running up-by-one, expecting exactly one result: %v", + strings.Join(versions, ","), + ) + } + return res[0], nil +} + +// UpTo applies all pending migrations up to, and including, the specified version. If there are no +// migrations to apply, this method returns empty list and nil error. +// +// For example, if there are three new migrations (9,10,11) and the current database version is 8 +// with a requested version of 10, only versions 9,10 will be applied. +func (p *Provider) UpTo(ctx context.Context, version int64) ([]*MigrationResult, error) { + hasPending, err := p.HasPending(ctx) + if err != nil { + return nil, err + } + if !hasPending { + return nil, nil + } + return p.up(ctx, false, version) +} + +// Down rolls back the most recently applied migration. If there are no migrations to rollback, this +// method returns [ErrNoNextVersion]. +// +// Note, migrations are rolled back in the order they were applied. And not in the reverse order of +// the migration version. This only applies in scenarios where migrations are allowed to be applied +// out of order. +func (p *Provider) Down(ctx context.Context) (*MigrationResult, error) { + res, err := p.down(ctx, true, 0) + if err != nil { + return nil, err + } + if len(res) == 0 { + return nil, ErrNoNextVersion + } + // This should never happen, we must return exactly one result. + if len(res) != 1 { + versions := make([]string, 0, len(res)) + for _, r := range res { + versions = append(versions, strconv.FormatInt(r.Source.Version, 10)) + } + return nil, fmt.Errorf( + "unexpected number of migrations applied running down, expecting exactly one result: %v", + strings.Join(versions, ","), + ) + } + return res[0], nil +} + +// DownTo rolls back all migrations down to, but not including, the specified version. +// +// For example, if the current database version is 11,10,9... and the requested version is 9, only +// migrations 11, 10 will be rolled back. +// +// Note, migrations are rolled back in the order they were applied. And not in the reverse order of +// the migration version. This only applies in scenarios where migrations are allowed to be applied +// out of order. +func (p *Provider) DownTo(ctx context.Context, version int64) ([]*MigrationResult, error) { + if version < 0 { + return nil, fmt.Errorf("invalid version: must be a valid number or zero: %d", version) + } + return p.down(ctx, false, version) +} + +// *** Internal methods *** + +func (p *Provider) up( + ctx context.Context, + byOne bool, + version int64, +) (_ []*MigrationResult, retErr error) { + if version < 1 { + return nil, errInvalidVersion + } + conn, cleanup, err := p.initialize(ctx, true) + if err != nil { + return nil, fmt.Errorf("failed to initialize: %w", err) + } + defer func() { + retErr = multierr.Append(retErr, cleanup()) + }() + + if len(p.migrations) == 0 { + return nil, nil + } + var apply []*Migration + if p.cfg.disableVersioning { + if byOne { + return nil, errors.New("up-by-one not supported when versioning is disabled") + } + apply = p.migrations + } else { + // optimize(mf): Listing all migrations from the database isn't great. This is only required + // to support the allow missing (out-of-order) feature. For users that don't use this + // feature, we could just query the database for the current max version and then apply + // migrations greater than that version. + dbMigrations, err := p.store.ListMigrations(ctx, conn) + if err != nil { + return nil, err + } + if len(dbMigrations) == 0 { + return nil, errMissingZeroVersion + } + versions, err := gooseutil.UpVersions( + getVersionsFromMigrations(p.migrations), // fsys versions + getVersionsFromListMigrations(dbMigrations), // db versions + version, + p.cfg.allowMissing, + ) + if err != nil { + return nil, err + } + for _, v := range versions { + m, err := p.getMigration(v) + if err != nil { + return nil, err + } + apply = append(apply, m) + } + } + return p.runMigrations(ctx, conn, apply, sqlparser.DirectionUp, byOne) +} + +func (p *Provider) down( + ctx context.Context, + byOne bool, + version int64, +) (_ []*MigrationResult, retErr error) { + conn, cleanup, err := p.initialize(ctx, true) + if err != nil { + return nil, fmt.Errorf("failed to initialize: %w", err) + } + defer func() { + retErr = multierr.Append(retErr, cleanup()) + }() + + if len(p.migrations) == 0 { + return nil, nil + } + if p.cfg.disableVersioning { + var downMigrations []*Migration + if byOne { + last := p.migrations[len(p.migrations)-1] + downMigrations = []*Migration{last} + } else { + downMigrations = p.migrations + } + return p.runMigrations(ctx, conn, downMigrations, sqlparser.DirectionDown, byOne) + } + dbMigrations, err := p.store.ListMigrations(ctx, conn) + if err != nil { + return nil, err + } + if len(dbMigrations) == 0 { + return nil, errMissingZeroVersion + } + // We never migrate the zero version down. + if dbMigrations[0].Version == 0 { + p.printf("no migrations to run, current version: 0") + return nil, nil + } + var apply []*Migration + for _, dbMigration := range dbMigrations { + if dbMigration.Version <= version { + break + } + m, err := p.getMigration(dbMigration.Version) + if err != nil { + return nil, err + } + apply = append(apply, m) + } + return p.runMigrations(ctx, conn, apply, sqlparser.DirectionDown, byOne) +} + +func (p *Provider) apply( + ctx context.Context, + version int64, + direction bool, +) (_ []*MigrationResult, retErr error) { + if version < 1 { + return nil, errInvalidVersion + } + m, err := p.getMigration(version) + if err != nil { + return nil, err + } + conn, cleanup, err := p.initialize(ctx, true) + if err != nil { + return nil, fmt.Errorf("failed to initialize: %w", err) + } + defer func() { + retErr = multierr.Append(retErr, cleanup()) + }() + + result, err := p.store.GetMigration(ctx, conn, version) + if err != nil && !errors.Is(err, database.ErrVersionNotFound) { + return nil, err + } + // There are a few states here: + // 1. direction is up + // a. migration is applied, this is an error (ErrAlreadyApplied) + // b. migration is not applied, apply it + if direction && result != nil { + return nil, fmt.Errorf("version %d: %w", version, ErrAlreadyApplied) + } + // 2. direction is down + // a. migration is applied, rollback + // b. migration is not applied, this is an error (ErrNotApplied) + if !direction && result == nil { + return nil, fmt.Errorf("version %d: %w", version, ErrNotApplied) + } + d := sqlparser.DirectionDown + if direction { + d = sqlparser.DirectionUp + } + return p.runMigrations(ctx, conn, []*Migration{m}, d, true) +} + +func (p *Provider) getVersions(ctx context.Context) (current, target int64, retErr error) { + conn, cleanup, err := p.initialize(ctx, false) + if err != nil { + return -1, -1, fmt.Errorf("failed to initialize: %w", err) + } + defer func() { + retErr = multierr.Append(retErr, cleanup()) + }() + + target = p.migrations[len(p.migrations)-1].Version + + // If versioning is disabled, we always have pending migrations and the target version is the + // last migration. + if p.cfg.disableVersioning { + return -1, target, nil + } + + current, err = p.store.GetLatestVersion(ctx, conn) + if err != nil { + if errors.Is(err, database.ErrVersionNotFound) { + return -1, target, errMissingZeroVersion + } + return -1, target, err + } + return current, target, nil +} + +func (p *Provider) hasPending(ctx context.Context) (_ bool, retErr error) { + conn, cleanup, err := p.initialize(ctx, false) + if err != nil { + return false, fmt.Errorf("failed to initialize: %w", err) + } + defer func() { + retErr = multierr.Append(retErr, cleanup()) + }() + + // If versioning is disabled, we always have pending migrations. + if p.cfg.disableVersioning { + return true, nil + } + + // List all migrations from the database. Careful, optimizations here can lead to subtle bugs. + // We have 2 important cases to consider: + // + // 1. Users have enabled out-of-order migrations, in which case we need to check if any + // migrations are missing and report that there are pending migrations. Do not surface an + // error because this is a valid state. + // + // 2. Users have disabled out-of-order migrations (default), in which case we need to check if all + // migrations have been applied. We cannot check for the highest applied version because we lose the + // ability to surface an error if an out-of-order migration was introduced. It would be silently + // ignored and the user would not know that they have unapplied migrations. + // + // Maybe we could consider adding a flag to the provider such as IgnoreMissing, which would + // allow silently ignoring missing migrations. This would be useful for users that have built + // checks that prevent missing migrations from being introduced. + + dbMigrations, err := p.store.ListMigrations(ctx, conn) + if err != nil { + return false, err + } + apply, err := gooseutil.UpVersions( + getVersionsFromMigrations(p.migrations), // fsys versions + getVersionsFromListMigrations(dbMigrations), // db versions + math.MaxInt64, + p.cfg.allowMissing, + ) + if err != nil { + return false, err + } + return len(apply) > 0, nil +} + +func getVersionsFromMigrations(in []*Migration) []int64 { + out := make([]int64, 0, len(in)) + for _, m := range in { + out = append(out, m.Version) + } + return out + +} + +func getVersionsFromListMigrations(in []*database.ListMigrationsResult) []int64 { + out := make([]int64, 0, len(in)) + for _, m := range in { + out = append(out, m.Version) + } + return out + +} + +func (p *Provider) status(ctx context.Context) (_ []*MigrationStatus, retErr error) { + conn, cleanup, err := p.initialize(ctx, true) + if err != nil { + return nil, fmt.Errorf("failed to initialize: %w", err) + } + defer func() { + retErr = multierr.Append(retErr, cleanup()) + }() + + status := make([]*MigrationStatus, 0, len(p.migrations)) + for _, m := range p.migrations { + migrationStatus := &MigrationStatus{ + Source: &Source{ + Type: m.Type, + Path: m.Source, + Version: m.Version, + }, + State: StatePending, + } + // If versioning is disabled, we can't check the database for applied migrations, so we + // assume all migrations are pending. + if !p.cfg.disableVersioning { + dbResult, err := p.store.GetMigration(ctx, conn, m.Version) + if err != nil && !errors.Is(err, database.ErrVersionNotFound) { + return nil, err + } + if dbResult != nil { + migrationStatus.State = StateApplied + migrationStatus.AppliedAt = dbResult.Timestamp + } + } + status = append(status, migrationStatus) + } + + return status, nil +} + +// getDBMaxVersion returns the highest version recorded in the database, regardless of the order in +// which migrations were applied. conn may be nil, in which case a connection is initialized. +func (p *Provider) getDBMaxVersion(ctx context.Context, conn *sql.Conn) (_ int64, retErr error) { + if conn == nil { + var cleanup func() error + var err error + conn, cleanup, err = p.initialize(ctx, true) + if err != nil { + return 0, err + } + defer func() { + retErr = multierr.Append(retErr, cleanup()) + }() + } + + latest, err := p.store.GetLatestVersion(ctx, conn) + if err != nil { + if errors.Is(err, database.ErrVersionNotFound) { + return 0, errMissingZeroVersion + } + return -1, err + } + return latest, nil +} diff --git a/vendor/github.com/pressly/goose/v3/provider_collect.go b/vendor/github.com/pressly/goose/v3/provider_collect.go new file mode 100644 index 0000000..6e23092 --- /dev/null +++ b/vendor/github.com/pressly/goose/v3/provider_collect.go @@ -0,0 +1,196 @@ +package goose + +import ( + "errors" + "fmt" + "io/fs" + "path/filepath" + "sort" + "strings" +) + +// fileSources represents a collection of migration files on the filesystem. +type fileSources struct { + sqlSources []Source + goSources []Source +} + +// collectFilesystemSources scans the file system for migration files that have a numeric prefix +// (greater than one) followed by an underscore and a file extension of either .go or .sql. fsys may +// be nil, in which case an empty fileSources is returned. +// +// If strict is true, then any error parsing the numeric component of the filename will result in an +// error. The file is skipped otherwise. +// +// This function DOES NOT parse SQL migrations or merge registered Go migrations. It only collects +// migration sources from the filesystem. +func collectFilesystemSources( + fsys fs.FS, + strict bool, + excludePaths map[string]bool, + excludeVersions map[int64]bool, +) (*fileSources, error) { + if fsys == nil { + return new(fileSources), nil + } + sources := new(fileSources) + versionToBaseLookup := make(map[int64]string) // map[version]filepath.Base(fullpath) + for _, pattern := range []string{ + "*.sql", + "*.go", + } { + files, err := fs.Glob(fsys, pattern) + if err != nil { + return nil, fmt.Errorf("failed to glob pattern %q: %w", pattern, err) + } + for _, fullpath := range files { + base := filepath.Base(fullpath) + if strings.HasSuffix(base, "_test.go") { + continue + } + if excludePaths[base] { + // TODO(mf): log this? + continue + } + // If the filename has a valid looking version of the form: NUMBER_.{sql,go}, then use + // that as the version. Otherwise, ignore it. This allows users to have arbitrary + // filenames, but still have versioned migrations within the same directory. For + // example, a user could have a helpers.go file which contains unexported helper + // functions for migrations. + version, err := NumericComponent(base) + if err != nil { + if strict { + return nil, fmt.Errorf("failed to parse numeric component from %q: %w", base, err) + } + continue + } + if excludeVersions[version] { + // TODO: log this? + continue + } + // Ensure there are no duplicate versions. + if existing, ok := versionToBaseLookup[version]; ok { + return nil, fmt.Errorf("found duplicate migration version %d:\n\texisting:%v\n\tcurrent:%v", + version, + existing, + base, + ) + } + switch filepath.Ext(base) { + case ".sql": + sources.sqlSources = append(sources.sqlSources, Source{ + Type: TypeSQL, + Path: fullpath, + Version: version, + }) + case ".go": + sources.goSources = append(sources.goSources, Source{ + Type: TypeGo, + Path: fullpath, + Version: version, + }) + default: + // Should never happen since we already filtered out all other file types. + return nil, fmt.Errorf("invalid file extension: %q", base) + } + // Add the version to the lookup map. + versionToBaseLookup[version] = base + } + } + return sources, nil +} + +func newSQLMigration(source Source) *Migration { + return &Migration{ + Type: source.Type, + Version: source.Version, + Source: source.Path, + construct: true, + Next: -1, Previous: -1, + sql: sqlMigration{ + Parsed: false, // SQL migrations are parsed lazily. + }, + } +} + +func merge(sources *fileSources, registerd map[int64]*Migration) ([]*Migration, error) { + var migrations []*Migration + migrationLookup := make(map[int64]*Migration) + // Add all SQL migrations to the list of migrations. + for _, source := range sources.sqlSources { + m := newSQLMigration(source) + migrations = append(migrations, m) + migrationLookup[source.Version] = m + } + // If there are no Go files in the filesystem and no registered Go migrations, return early. + if len(sources.goSources) == 0 && len(registerd) == 0 { + return migrations, nil + } + // Return an error if the given sources contain a versioned Go migration that has not been + // registered. This is a sanity check to ensure users didn't accidentally create a valid looking + // Go migration file on disk and forget to register it. + // + // This is almost always a user error. + var unregistered []string + for _, s := range sources.goSources { + m, ok := registerd[s.Version] + if !ok { + unregistered = append(unregistered, s.Path) + } else { + // Populate the source path for registered Go migrations that have a corresponding file + // on disk. + m.Source = s.Path + } + } + if len(unregistered) > 0 { + return nil, unregisteredError(unregistered) + } + // Add all registered Go migrations to the list of migrations, checking for duplicate versions. + // + // Important, users can register Go migrations manually via goose.Add_ functions. These + // migrations may not have a corresponding file on disk. Which is fine! We include them + // wholesale as part of migrations. This allows users to build a custom binary that only embeds + // the SQL migration files. + for version, r := range registerd { + // Ensure there are no duplicate versions. + if existing, ok := migrationLookup[version]; ok { + fullpath := r.Source + if fullpath == "" { + fullpath = "no source path" + } + return nil, fmt.Errorf("found duplicate migration version %d:\n\texisting:%v\n\tcurrent:%v", + version, + existing.Source, + fullpath, + ) + } + migrations = append(migrations, r) + migrationLookup[version] = r + } + // Sort migrations by version in ascending order. + sort.Slice(migrations, func(i, j int) bool { + return migrations[i].Version < migrations[j].Version + }) + return migrations, nil +} + +func unregisteredError(unregistered []string) error { + const ( + hintURL = "https://github.com/pressly/goose/tree/master/examples/go-migrations" + ) + f := "file" + if len(unregistered) > 1 { + f += "s" + } + var b strings.Builder + + b.WriteString(fmt.Sprintf("error: detected %d unregistered Go %s:\n", len(unregistered), f)) + for _, name := range unregistered { + b.WriteString("\t" + name + "\n") + } + hint := fmt.Sprintf("hint: go functions must be registered and built into a custom binary see:\n%s", hintURL) + b.WriteString(hint) + b.WriteString("\n") + + return errors.New(b.String()) +} diff --git a/vendor/github.com/pressly/goose/v3/provider_errors.go b/vendor/github.com/pressly/goose/v3/provider_errors.go new file mode 100644 index 0000000..79a2cda --- /dev/null +++ b/vendor/github.com/pressly/goose/v3/provider_errors.go @@ -0,0 +1,44 @@ +package goose + +import ( + "errors" + "fmt" +) + +var ( + // ErrVersionNotFound is returned when a specific migration version is not located. This can + // occur if a .sql file or a Go migration function for the specified version is missing. + ErrVersionNotFound = errors.New("version not found") + + // ErrNoMigrations is returned by [NewProvider] when no migrations are found. + ErrNoMigrations = errors.New("no migrations found") + + // ErrAlreadyApplied indicates that the migration cannot be applied because it has already been + // executed. This error is returned by [Provider.Apply]. + ErrAlreadyApplied = errors.New("migration already applied") + + // ErrNotApplied indicates that the rollback cannot be performed because the migration has not + // yet been applied. This error is returned by [Provider.Apply]. + ErrNotApplied = errors.New("migration not applied") + + // errInvalidVersion is returned when a migration version is invalid. + errInvalidVersion = errors.New("version must be greater than 0") +) + +// PartialError is returned when a migration fails, but some migrations already got applied. +type PartialError struct { + // Applied are migrations that were applied successfully before the error occurred. May be + // empty. + Applied []*MigrationResult + // Failed contains the result of the migration that failed. Cannot be nil. + Failed *MigrationResult + // Err is the error that occurred while running the migration and caused the failure. + Err error +} + +func (e *PartialError) Error() string { + return fmt.Sprintf( + "partial migration error (type:%s,version:%d): %v", + e.Failed.Source.Type, e.Failed.Source.Version, e.Err, + ) +} diff --git a/vendor/github.com/pressly/goose/v3/provider_options.go b/vendor/github.com/pressly/goose/v3/provider_options.go new file mode 100644 index 0000000..15ee990 --- /dev/null +++ b/vendor/github.com/pressly/goose/v3/provider_options.go @@ -0,0 +1,198 @@ +package goose + +import ( + "errors" + "fmt" + + "github.com/pressly/goose/v3/database" + "github.com/pressly/goose/v3/lock" +) + +const ( + // DefaultTablename is the default name of the database table used to track history of applied + // migrations. + DefaultTablename = "goose_db_version" +) + +// ProviderOption is a configuration option for a goose goose. +type ProviderOption interface { + apply(*config) error +} + +// WithStore configures the provider with a custom [database.Store] implementation. +// +// By default, the provider uses the [database.NewStore] function to create a store backed by the +// given dialect. However, this option allows users to provide their own implementation or call +// [database.NewStore] with custom options, such as setting the table name. +// +// Example: +// +// // Create a store with a custom table name. +// store, err := database.NewStore(database.DialectPostgres, "my_custom_table_name") +// if err != nil { +// return err +// } +// // Create a provider with the custom store. +// provider, err := goose.NewProvider("", db, nil, goose.WithStore(store)) +// if err != nil { +// return err +// } +func WithStore(store database.Store) ProviderOption { + return configFunc(func(c *config) error { + if c.store != nil { + return fmt.Errorf("store already set: %T", c.store) + } + if store == nil { + return errors.New("store must not be nil") + } + if store.Tablename() == "" { + return errors.New("store implementation must set the table name") + } + c.store = store + return nil + }) +} + +// WithVerbose enables verbose logging. +func WithVerbose(b bool) ProviderOption { + return configFunc(func(c *config) error { + c.verbose = b + return nil + }) +} + +// WithSessionLocker enables locking using the provided SessionLocker. +// +// If WithSessionLocker is not called, locking is disabled. +func WithSessionLocker(locker lock.SessionLocker) ProviderOption { + return configFunc(func(c *config) error { + if c.lockEnabled { + return errors.New("lock already enabled") + } + if c.sessionLocker != nil { + return errors.New("session locker already set") + } + if locker == nil { + return errors.New("session locker must not be nil") + } + c.lockEnabled = true + c.sessionLocker = locker + return nil + }) +} + +// WithExcludeNames excludes the given file name from the list of migrations. If called multiple +// times, the list of excludes is merged. +func WithExcludeNames(excludes []string) ProviderOption { + return configFunc(func(c *config) error { + for _, name := range excludes { + if _, ok := c.excludePaths[name]; ok { + return fmt.Errorf("duplicate exclude file name: %s", name) + } + c.excludePaths[name] = true + } + return nil + }) +} + +// WithExcludeVersions excludes the given versions from the list of migrations. If called multiple +// times, the list of excludes is merged. +func WithExcludeVersions(versions []int64) ProviderOption { + return configFunc(func(c *config) error { + for _, version := range versions { + if version < 1 { + return errInvalidVersion + } + if _, ok := c.excludeVersions[version]; ok { + return fmt.Errorf("duplicate excludes version: %d", version) + } + c.excludeVersions[version] = true + } + return nil + }) +} + +// WithGoMigrations registers Go migrations with the provider. If a Go migration with the same +// version has already been registered, an error will be returned. +// +// Go migrations must be constructed using the [NewGoMigration] function. +func WithGoMigrations(migrations ...*Migration) ProviderOption { + return configFunc(func(c *config) error { + for _, m := range migrations { + if _, ok := c.registered[m.Version]; ok { + return fmt.Errorf("go migration with version %d already registered", m.Version) + } + if err := checkGoMigration(m); err != nil { + return fmt.Errorf("invalid go migration: %w", err) + } + c.registered[m.Version] = m + } + return nil + }) +} + +// WithDisableGlobalRegistry prevents the provider from registering Go migrations from the global +// registry. By default, goose will register all Go migrations including those registered globally. +func WithDisableGlobalRegistry(b bool) ProviderOption { + return configFunc(func(c *config) error { + c.disableGlobalRegistry = b + return nil + }) +} + +// WithAllowOutofOrder allows the provider to apply missing (out-of-order) migrations. By default, +// goose will raise an error if it encounters a missing migration. +// +// For example: migrations 1,3 are applied and then version 2,6 are introduced. If this option is +// true, then goose will apply 2 (missing) and 6 (new) instead of raising an error. The final order +// of applied migrations will be: 1,3,2,6. Out-of-order migrations are always applied first, +// followed by new migrations. +func WithAllowOutofOrder(b bool) ProviderOption { + return configFunc(func(c *config) error { + c.allowMissing = b + return nil + }) +} + +// WithDisableVersioning disables versioning. Disabling versioning allows applying migrations +// without tracking the versions in the database schema table. Useful for tests, seeding a database +// or running ad-hoc queries. By default, goose will track all versions in the database schema +// table. +func WithDisableVersioning(b bool) ProviderOption { + return configFunc(func(c *config) error { + c.disableVersioning = b + return nil + }) +} + +type config struct { + store database.Store + + verbose bool + excludePaths map[string]bool + excludeVersions map[int64]bool + + // Go migrations registered by the user. These will be merged/resolved against the globally + // registered migrations. + registered map[int64]*Migration + + // Locking options + lockEnabled bool + sessionLocker lock.SessionLocker + + // Feature + disableVersioning bool + allowMissing bool + disableGlobalRegistry bool + + // Let's not expose the Logger just yet. Ideally we consolidate on the std lib slog package + // added in go1.21 and then expose that (if that's even necessary). For now, just use the std + // lib log package. + logger Logger +} + +type configFunc func(*config) error + +func (f configFunc) apply(cfg *config) error { + return f(cfg) +} diff --git a/vendor/github.com/pressly/goose/v3/provider_run.go b/vendor/github.com/pressly/goose/v3/provider_run.go new file mode 100644 index 0000000..58f354a --- /dev/null +++ b/vendor/github.com/pressly/goose/v3/provider_run.go @@ -0,0 +1,445 @@ +package goose + +import ( + "context" + "database/sql" + "errors" + "fmt" + "io/fs" + "runtime/debug" + "strings" + "time" + + "github.com/pressly/goose/v3/database" + "github.com/pressly/goose/v3/internal/sqlparser" + "github.com/sethvargo/go-retry" + "go.uber.org/multierr" +) + +var ( + errMissingZeroVersion = errors.New("missing zero version migration") +) + +func (p *Provider) prepareMigration(fsys fs.FS, m *Migration, direction bool) error { + switch m.Type { + case TypeGo: + if m.goUp.Mode == 0 { + return errors.New("go up migration mode is not set") + } + if m.goDown.Mode == 0 { + return errors.New("go down migration mode is not set") + } + var useTx bool + if direction { + useTx = m.goUp.Mode == TransactionEnabled + } else { + useTx = m.goDown.Mode == TransactionEnabled + } + // bug(mf): this is a potential deadlock scenario. We're running Go migrations with *sql.DB, + // but are locking the database with *sql.Conn. If the caller sets max open connections to + // 1, then this will deadlock because the Go migration will try to acquire a connection from + // the pool, but the pool is exhausted because the lock is held. + // + // A potential solution is to expose a third Go register function *sql.Conn. Or continue to + // use *sql.DB and document that the user SHOULD NOT SET max open connections to 1. This is + // a bit of an edge case. For now, we guard against this scenario by checking the max open + // connections and returning an error. + if p.cfg.lockEnabled && p.cfg.sessionLocker != nil && p.db.Stats().MaxOpenConnections == 1 { + if !useTx { + return errors.New("potential deadlock detected: cannot run Go migration without a transaction when max open connections set to 1") + } + } + return nil + case TypeSQL: + if m.sql.Parsed { + return nil + } + parsed, err := sqlparser.ParseAllFromFS(fsys, m.Source, false) + if err != nil { + return err + } + m.sql.Parsed = true + m.sql.UseTx = parsed.UseTx + m.sql.Up, m.sql.Down = parsed.Up, parsed.Down + return nil + } + return fmt.Errorf("invalid migration type: %+v", m) +} + +// printf is a helper function that prints the given message if verbose is enabled. It also prepends +// the "goose: " prefix to the message. +func (p *Provider) printf(msg string, args ...interface{}) { + if p.cfg.verbose { + if !strings.HasPrefix(msg, "goose:") { + msg = "goose: " + msg + } + p.cfg.logger.Printf(msg, args...) + } +} + +// runMigrations runs migrations sequentially in the given direction. If the migrations list is +// empty, return nil without error. +func (p *Provider) runMigrations( + ctx context.Context, + conn *sql.Conn, + migrations []*Migration, + direction sqlparser.Direction, + byOne bool, +) ([]*MigrationResult, error) { + if len(migrations) == 0 { + if !p.cfg.disableVersioning { + // No need to print this message if versioning is disabled because there are no + // migrations being tracked in the goose version table. + maxVersion, err := p.getDBMaxVersion(ctx, conn) + if err != nil { + return nil, err + } + p.printf("no migrations to run, current version: %d", maxVersion) + } + return nil, nil + } + apply := migrations + if byOne { + apply = migrations[:1] + } + + // SQL migrations are lazily parsed in both directions. This is done before attempting to run + // any migrations to catch errors early and prevent leaving the database in an incomplete state. + + for _, m := range apply { + if err := p.prepareMigration(p.fsys, m, direction.ToBool()); err != nil { + return nil, fmt.Errorf("failed to prepare migration %s: %w", m.ref(), err) + } + } + + // feat(mf): If we decide to add support for advisory locks at the transaction level, this may + // be a good place to acquire the lock. However, we need to be sure that ALL migrations are safe + // to run in a transaction. + + // feat(mf): this is where we can (optionally) group multiple migrations to be run in a single + // transaction. The default is to apply each migration sequentially on its own. See the + // following issues for more details: + // - https://github.com/pressly/goose/issues/485 + // - https://github.com/pressly/goose/issues/222 + // + // Be careful, we can't use a single transaction for all migrations because some may be marked + // as not using a transaction. + + var results []*MigrationResult + for _, m := range apply { + result := &MigrationResult{ + Source: &Source{ + Type: m.Type, + Path: m.Source, + Version: m.Version, + }, + Direction: direction.String(), + Empty: isEmpty(m, direction.ToBool()), + } + start := time.Now() + if err := p.runIndividually(ctx, conn, m, direction.ToBool()); err != nil { + // TODO(mf): we should also return the pending migrations here, the remaining items in + // the apply slice. + result.Error = err + result.Duration = time.Since(start) + return nil, &PartialError{ + Applied: results, + Failed: result, + Err: err, + } + } + result.Duration = time.Since(start) + results = append(results, result) + p.printf("%s", result) + } + if !p.cfg.disableVersioning && !byOne { + maxVersion, err := p.getDBMaxVersion(ctx, conn) + if err != nil { + return nil, err + } + p.printf("successfully migrated database, current version: %d", maxVersion) + } + return results, nil +} + +func (p *Provider) runIndividually( + ctx context.Context, + conn *sql.Conn, + m *Migration, + direction bool, +) error { + useTx, err := useTx(m, direction) + if err != nil { + return err + } + if useTx { + return beginTx(ctx, conn, func(tx *sql.Tx) error { + if err := runMigration(ctx, tx, m, direction); err != nil { + return err + } + return p.maybeInsertOrDelete(ctx, tx, m.Version, direction) + }) + } + switch m.Type { + case TypeGo: + // Note, we are using *sql.DB instead of *sql.Conn because it's the Go migration contract. + // This may be a deadlock scenario if max open connections is set to 1 AND a lock is + // acquired on the database. In this case, the migration will block forever unable to + // acquire a connection from the pool. + // + // For now, we guard against this scenario by checking the max open connections and + // returning an error in the prepareMigration function. + if err := runMigration(ctx, p.db, m, direction); err != nil { + return err + } + return p.maybeInsertOrDelete(ctx, p.db, m.Version, direction) + case TypeSQL: + if err := runMigration(ctx, conn, m, direction); err != nil { + return err + } + return p.maybeInsertOrDelete(ctx, conn, m.Version, direction) + } + return fmt.Errorf("failed to run individual migration: neither sql or go: %v", m) +} + +func (p *Provider) maybeInsertOrDelete( + ctx context.Context, + db database.DBTxConn, + version int64, + direction bool, +) error { + // If versioning is disabled, we don't need to insert or delete the migration version. + if p.cfg.disableVersioning { + return nil + } + if direction { + return p.store.Insert(ctx, db, database.InsertRequest{Version: version}) + } + return p.store.Delete(ctx, db, version) +} + +// beginTx begins a transaction and runs the given function. If the function returns an error, the +// transaction is rolled back. Otherwise, the transaction is committed. +func beginTx(ctx context.Context, conn *sql.Conn, fn func(tx *sql.Tx) error) (retErr error) { + tx, err := conn.BeginTx(ctx, nil) + if err != nil { + return err + } + defer func() { + if retErr != nil { + retErr = multierr.Append(retErr, tx.Rollback()) + } + }() + if err := fn(tx); err != nil { + return err + } + return tx.Commit() +} + +func (p *Provider) initialize(ctx context.Context, useSessionLocker bool) (*sql.Conn, func() error, error) { + p.mu.Lock() + conn, err := p.db.Conn(ctx) + if err != nil { + p.mu.Unlock() + return nil, nil, err + } + // cleanup is a function that cleans up the connection, and optionally, the session lock. + cleanup := func() error { + p.mu.Unlock() + return conn.Close() + } + if useSessionLocker && p.cfg.sessionLocker != nil && p.cfg.lockEnabled { + l := p.cfg.sessionLocker + if err := l.SessionLock(ctx, conn); err != nil { + return nil, nil, multierr.Append(err, cleanup()) + } + // A lock was acquired, so we need to unlock the session when we're done. This is done by + // returning a cleanup function that unlocks the session and closes the connection. + cleanup = func() error { + p.mu.Unlock() + // Use a detached context to unlock the session. This is because the context passed to + // SessionLock may have been canceled, and we don't want to cancel the unlock. + return multierr.Append(l.SessionUnlock(context.WithoutCancel(ctx), conn), conn.Close()) + } + } + // If versioning is enabled, ensure the version table exists. For ad-hoc migrations, we don't + // need the version table because no versions are being tracked. + if !p.cfg.disableVersioning { + if err := p.ensureVersionTable(ctx, conn); err != nil { + return nil, nil, multierr.Append(err, cleanup()) + } + } + return conn, cleanup, nil +} + +func (p *Provider) ensureVersionTable( + ctx context.Context, + conn *sql.Conn, +) (retErr error) { + // There are 2 optimizations here: + // - 1. We create the version table once per Provider instance. + // - 2. We retry the operation a few times in case the table is being created concurrently. + // + // Regarding item 2, certain goose operations, like HasPending, don't respect a SessionLocker. + // So, when goose is run for the first time in a multi-instance environment, it's possible that + // multiple instances will try to create the version table at the same time. This is why we + // retry this operation a few times. Best case, the table is created by one instance and all the + // other instances see that change immediately. Worst case, all instances try to create the + // table at the same time, but only one will succeed and the others will retry. + p.versionTableOnce.Do(func() { + retErr = p.tryEnsureVersionTable(ctx, conn) + }) + return retErr +} + +func (p *Provider) tryEnsureVersionTable(ctx context.Context, conn *sql.Conn) error { + b := retry.NewConstant(1 * time.Second) + b = retry.WithMaxRetries(3, b) + return retry.Do(ctx, b, func(ctx context.Context) error { + if e, ok := p.store.(interface { + TableExists(context.Context, database.DBTxConn, string) (bool, error) + }); ok { + exists, err := e.TableExists(ctx, conn, p.store.Tablename()) + if err != nil { + return fmt.Errorf("failed to check if version table exists: %w", err) + } + if exists { + return nil + } + } else { + // This chicken-and-egg behavior is the fallback for all existing implementations of the + // Store interface. We check if the version table exists by querying for the initial + // version, but the table may not exist yet. It's important this runs outside of a + // transaction to avoid failing the transaction. + if res, err := p.store.GetMigration(ctx, conn, 0); err == nil && res != nil { + return nil + } + } + if err := beginTx(ctx, conn, func(tx *sql.Tx) error { + if err := p.store.CreateVersionTable(ctx, tx); err != nil { + return err + } + return p.store.Insert(ctx, tx, database.InsertRequest{Version: 0}) + }); err != nil { + // Mark the error as retryable so we can try again. It's possible that another instance + // is creating the table at the same time and the checks above will succeed on the next + // iteration. + return retry.RetryableError(fmt.Errorf("failed to create version table: %w", err)) + } + return nil + }) +} + +// getMigration returns the migration for the given version. If no migration is found, then +// ErrVersionNotFound is returned. +func (p *Provider) getMigration(version int64) (*Migration, error) { + for _, m := range p.migrations { + if m.Version == version { + return m, nil + } + } + return nil, ErrVersionNotFound +} + +// useTx is a helper function that returns true if the migration should be run in a transaction. It +// must only be called after the migration has been parsed and initialized. +func useTx(m *Migration, direction bool) (bool, error) { + switch m.Type { + case TypeGo: + if m.goUp.Mode == 0 || m.goDown.Mode == 0 { + return false, fmt.Errorf("go migrations must have a mode set") + } + if direction { + return m.goUp.Mode == TransactionEnabled, nil + } + return m.goDown.Mode == TransactionEnabled, nil + case TypeSQL: + if !m.sql.Parsed { + return false, fmt.Errorf("sql migrations must be parsed") + } + return m.sql.UseTx, nil + } + return false, fmt.Errorf("use tx: invalid migration type: %q", m.Type) +} + +// isEmpty is a helper function that returns true if the migration has no functions or no statements +// to execute. It must only be called after the migration has been parsed and initialized. +func isEmpty(m *Migration, direction bool) bool { + switch m.Type { + case TypeGo: + if direction { + return m.goUp.RunTx == nil && m.goUp.RunDB == nil + } + return m.goDown.RunTx == nil && m.goDown.RunDB == nil + case TypeSQL: + if direction { + return len(m.sql.Up) == 0 + } + return len(m.sql.Down) == 0 + } + return true +} + +// runMigration is a helper function that runs the migration in the given direction. It must only be +// called after the migration has been parsed and initialized. +func runMigration(ctx context.Context, db database.DBTxConn, m *Migration, direction bool) error { + switch m.Type { + case TypeGo: + return runGo(ctx, db, m, direction) + case TypeSQL: + return runSQL(ctx, db, m, direction) + } + return fmt.Errorf("invalid migration type: %q", m.Type) +} + +// runGo is a helper function that runs the given Go functions in the given direction. It must only +// be called after the migration has been initialized. +func runGo(ctx context.Context, db database.DBTxConn, m *Migration, direction bool) (retErr error) { + defer func() { + if r := recover(); r != nil { + retErr = fmt.Errorf("panic: %v\n%s", r, debug.Stack()) + } + }() + + switch db := db.(type) { + case *sql.Conn: + return fmt.Errorf("go migrations are not supported with *sql.Conn") + case *sql.DB: + if direction && m.goUp.RunDB != nil { + return m.goUp.RunDB(ctx, db) + } + if !direction && m.goDown.RunDB != nil { + return m.goDown.RunDB(ctx, db) + } + return nil + case *sql.Tx: + if direction && m.goUp.RunTx != nil { + return m.goUp.RunTx(ctx, db) + } + if !direction && m.goDown.RunTx != nil { + return m.goDown.RunTx(ctx, db) + } + return nil + } + return fmt.Errorf("invalid database connection type: %T", db) +} + +// runSQL is a helper function that runs the given SQL statements in the given direction. It must +// only be called after the migration has been parsed. +func runSQL(ctx context.Context, db database.DBTxConn, m *Migration, direction bool) error { + if !m.sql.Parsed { + return fmt.Errorf("sql migrations must be parsed") + } + var statements []string + if direction { + statements = m.sql.Up + } else { + statements = m.sql.Down + } + for _, stmt := range statements { + if _, err := db.ExecContext(ctx, stmt); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/pressly/goose/v3/provider_types.go b/vendor/github.com/pressly/goose/v3/provider_types.go new file mode 100644 index 0000000..3b9ab97 --- /dev/null +++ b/vendor/github.com/pressly/goose/v3/provider_types.go @@ -0,0 +1,91 @@ +package goose + +import ( + "fmt" + "path/filepath" + "time" +) + +// MigrationType is the type of migration. +type MigrationType string + +const ( + TypeGo MigrationType = "go" + TypeSQL MigrationType = "sql" +) + +// Source represents a single migration source. +// +// The Path field may be empty if the migration was registered manually. This is typically the case +// for Go migrations registered using the [WithGoMigration] option. +type Source struct { + Type MigrationType + Path string + Version int64 +} + +// MigrationResult is the result of a single migration operation. +type MigrationResult struct { + Source *Source + Duration time.Duration + Direction string + // Empty indicates no action was taken during the migration, but it was still versioned. For + // SQL, it means no statements; for Go, it's a nil function. + Empty bool + // Error is only set if the migration failed. + Error error +} + +// String returns a string representation of the migration result. +// +// Example down: +// +// EMPTY down 00006_posts_view-copy.sql (607.83µs) +// OK down 00005_posts_view.sql (646.25µs) +// +// Example up: +// +// OK up 00005_posts_view.sql (727.5µs) +// EMPTY up 00006_posts_view-copy.sql (378.33µs) +func (m *MigrationResult) String() string { + var format string + if m.Direction == "up" { + format = "%-5s %-2s %s (%s)" + } else { + format = "%-5s %-4s %s (%s)" + } + var state string + if m.Empty { + state = "EMPTY" + } else { + state = "OK" + } + return fmt.Sprintf(format, + state, + m.Direction, + filepath.Base(m.Source.Path), + truncateDuration(m.Duration), + ) +} + +// State represents the state of a migration. +type State string + +const ( + // StatePending is a migration that exists on the filesystem, but not in the database. + StatePending State = "pending" + // StateApplied is a migration that has been applied to the database and exists on the + // filesystem. + StateApplied State = "applied" + + // TODO(mf): we could also add a third state for untracked migrations. This would be useful for + // migrations that were manually applied to the database, but not versioned. Or the Source was + // deleted, but the migration still exists in the database. StateUntracked State = "untracked" +) + +// MigrationStatus represents the status of a single migration. +type MigrationStatus struct { + Source *Source + State State + AppliedAt time.Time +} diff --git a/vendor/github.com/pressly/goose/v3/redo.go b/vendor/github.com/pressly/goose/v3/redo.go index c485f9f..ed3ff67 100644 --- a/vendor/github.com/pressly/goose/v3/redo.go +++ b/vendor/github.com/pressly/goose/v3/redo.go @@ -1,11 +1,18 @@ package goose import ( + "context" "database/sql" ) // Redo rolls back the most recently applied migration, then runs it again. func Redo(db *sql.DB, dir string, opts ...OptionsFunc) error { + ctx := context.Background() + return RedoContext(ctx, db, dir, opts...) +} + +// RedoContext rolls back the most recently applied migration, then runs it again. +func RedoContext(ctx context.Context, db *sql.DB, dir string, opts ...OptionsFunc) error { option := &options{} for _, f := range opts { f(option) @@ -23,7 +30,7 @@ func Redo(db *sql.DB, dir string, opts ...OptionsFunc) error { } currentVersion = migrations[len(migrations)-1].Version } else { - if currentVersion, err = GetDBVersion(db); err != nil { + if currentVersion, err = GetDBVersionContext(ctx, db); err != nil { return err } } @@ -34,10 +41,10 @@ func Redo(db *sql.DB, dir string, opts ...OptionsFunc) error { } current.noVersioning = option.noVersioning - if err := current.Down(db); err != nil { + if err := current.DownContext(ctx, db); err != nil { return err } - if err := current.Up(db); err != nil { + if err := current.UpContext(ctx, db); err != nil { return err } return nil diff --git a/vendor/github.com/pressly/goose/v3/register.go b/vendor/github.com/pressly/goose/v3/register.go new file mode 100644 index 0000000..524228c --- /dev/null +++ b/vendor/github.com/pressly/goose/v3/register.go @@ -0,0 +1,133 @@ +package goose + +import ( + "context" + "database/sql" + "fmt" + "runtime" +) + +// GoMigrationContext is a Go migration func that is run within a transaction and receives a +// context. +type GoMigrationContext func(ctx context.Context, tx *sql.Tx) error + +// AddMigrationContext adds Go migrations. +func AddMigrationContext(up, down GoMigrationContext) { + _, filename, _, _ := runtime.Caller(1) + AddNamedMigrationContext(filename, up, down) +} + +// AddNamedMigrationContext adds named Go migrations. +func AddNamedMigrationContext(filename string, up, down GoMigrationContext) { + if err := register( + filename, + true, + &GoFunc{RunTx: up, Mode: TransactionEnabled}, + &GoFunc{RunTx: down, Mode: TransactionEnabled}, + ); err != nil { + panic(err) + } +} + +// GoMigrationNoTxContext is a Go migration func that is run outside a transaction and receives a +// context. +type GoMigrationNoTxContext func(ctx context.Context, db *sql.DB) error + +// AddMigrationNoTxContext adds Go migrations that will be run outside transaction. +func AddMigrationNoTxContext(up, down GoMigrationNoTxContext) { + _, filename, _, _ := runtime.Caller(1) + AddNamedMigrationNoTxContext(filename, up, down) +} + +// AddNamedMigrationNoTxContext adds named Go migrations that will be run outside transaction. +func AddNamedMigrationNoTxContext(filename string, up, down GoMigrationNoTxContext) { + if err := register( + filename, + false, + &GoFunc{RunDB: up, Mode: TransactionDisabled}, + &GoFunc{RunDB: down, Mode: TransactionDisabled}, + ); err != nil { + panic(err) + } +} + +func register(filename string, useTx bool, up, down *GoFunc) error { + v, _ := NumericComponent(filename) + if existing, ok := registeredGoMigrations[v]; ok { + return fmt.Errorf("failed to add migration %q: version %d conflicts with %q", + filename, + v, + existing.Source, + ) + } + // Add to global as a registered migration. + m := NewGoMigration(v, up, down) + m.Source = filename + // We explicitly set transaction to maintain existing behavior. Both up and down may be nil, but + // we know based on the register function what the user is requesting. + m.UseTx = useTx + registeredGoMigrations[v] = m + return nil +} + +// withContext changes the signature of a function that receives one argument to receive a context +// and the argument. +func withContext[T any](fn func(T) error) func(context.Context, T) error { + if fn == nil { + return nil + } + return func(ctx context.Context, t T) error { + return fn(t) + } +} + +// withoutContext changes the signature of a function that receives a context and one argument to +// receive only the argument. When called the passed context is always context.Background(). +func withoutContext[T any](fn func(context.Context, T) error) func(T) error { + if fn == nil { + return nil + } + return func(t T) error { + return fn(context.Background(), t) + } +} + +// GoMigration is a Go migration func that is run within a transaction. +// +// Deprecated: Use GoMigrationContext. +type GoMigration func(tx *sql.Tx) error + +// GoMigrationNoTx is a Go migration func that is run outside a transaction. +// +// Deprecated: Use GoMigrationNoTxContext. +type GoMigrationNoTx func(db *sql.DB) error + +// AddMigration adds Go migrations. +// +// Deprecated: Use AddMigrationContext. +func AddMigration(up, down GoMigration) { + _, filename, _, _ := runtime.Caller(1) + AddNamedMigrationContext(filename, withContext(up), withContext(down)) +} + +// AddNamedMigration adds named Go migrations. +// +// Deprecated: Use AddNamedMigrationContext. +func AddNamedMigration(filename string, up, down GoMigration) { + AddNamedMigrationContext(filename, withContext(up), withContext(down)) +} + +// AddMigrationNoTx adds Go migrations that will be run outside transaction. +// +// Deprecated: Use AddMigrationNoTxContext. +func AddMigrationNoTx(up, down GoMigrationNoTx) { + _, filename, _, _ := runtime.Caller(1) + AddNamedMigrationNoTxContext(filename, withContext(up), withContext(down)) +} + +// AddNamedMigrationNoTx adds named Go migrations that will be run outside transaction. +// +// Deprecated: Use AddNamedMigrationNoTxContext. +func AddNamedMigrationNoTx(filename string, up, down GoMigrationNoTx) { + AddNamedMigrationNoTxContext(filename, withContext(up), withContext(down)) +} diff --git a/vendor/github.com/pressly/goose/v3/reset.go b/vendor/github.com/pressly/goose/v3/reset.go index 258841f..274c539 100644 --- a/vendor/github.com/pressly/goose/v3/reset.go +++ b/vendor/github.com/pressly/goose/v3/reset.go @@ -1,6 +1,7 @@ package goose import ( + "context" "database/sql" "fmt" "sort" @@ -8,6 +9,12 @@ import ( // Reset rolls back all migrations func Reset(db *sql.DB, dir string, opts ...OptionsFunc) error { + ctx := context.Background() + return ResetContext(ctx, db, dir, opts...) +} + +// ResetContext rolls back all migrations +func ResetContext(ctx context.Context, db *sql.DB, dir string, opts ...OptionsFunc) error { option := &options{} for _, f := range opts { f(option) @@ -17,10 +24,10 @@ func Reset(db *sql.DB, dir string, opts ...OptionsFunc) error { return fmt.Errorf("failed to collect migrations: %w", err) } if option.noVersioning { - return DownTo(db, dir, minVersion, opts...) + return DownToContext(ctx, db, dir, minVersion, opts...) } - statuses, err := dbMigrationsStatus(db) + statuses, err := dbMigrationsStatus(ctx, db) if err != nil { return fmt.Errorf("failed to get status of migrations: %w", err) } @@ -30,7 +37,7 @@ func Reset(db *sql.DB, dir string, opts ...OptionsFunc) error { if !statuses[migration.Version] { continue } - if err = migration.Down(db); err != nil { + if err = migration.DownContext(ctx, db); err != nil { return fmt.Errorf("failed to db-down: %w", err) } } @@ -38,30 +45,20 @@ func Reset(db *sql.DB, dir string, opts ...OptionsFunc) error { return nil } -func dbMigrationsStatus(db *sql.DB) (map[int64]bool, error) { - rows, err := GetDialect().dbVersionQuery(db) +func dbMigrationsStatus(ctx context.Context, db *sql.DB) (map[int64]bool, error) { + dbMigrations, err := store.ListMigrations(ctx, db, TableName()) if err != nil { - return map[int64]bool{}, nil + return nil, err } - defer rows.Close() - // The most recent record for each migration specifies // whether it has been applied or rolled back. + results := make(map[int64]bool) - result := make(map[int64]bool) - - for rows.Next() { - var row MigrationRecord - if err = rows.Scan(&row.VersionID, &row.IsApplied); err != nil { - return nil, fmt.Errorf("failed to scan row: %w", err) - } - - if _, ok := result[row.VersionID]; ok { + for _, m := range dbMigrations { + if _, ok := results[m.VersionID]; ok { continue } - - result[row.VersionID] = row.IsApplied + results[m.VersionID] = m.IsApplied } - - return result, nil + return results, nil } diff --git a/vendor/github.com/pressly/goose/v3/status.go b/vendor/github.com/pressly/goose/v3/status.go index f53f1be..50a6596 100644 --- a/vendor/github.com/pressly/goose/v3/status.go +++ b/vendor/github.com/pressly/goose/v3/status.go @@ -1,7 +1,9 @@ package goose import ( + "context" "database/sql" + "errors" "fmt" "path/filepath" "time" @@ -9,6 +11,12 @@ import ( // Status prints the status of all migrations. func Status(db *sql.DB, dir string, opts ...OptionsFunc) error { + ctx := context.Background() + return StatusContext(ctx, db, dir, opts...) +} + +// StatusContext prints the status of all migrations. +func StatusContext(ctx context.Context, db *sql.DB, dir string, opts ...OptionsFunc) error { option := &options{} for _, f := range opts { f(option) @@ -18,8 +26,8 @@ func Status(db *sql.DB, dir string, opts ...OptionsFunc) error { return fmt.Errorf("failed to collect migrations: %w", err) } if option.noVersioning { - log.Println(" Applied At Migration") - log.Println(" =======================================") + log.Printf(" Applied At Migration\n") + log.Printf(" =======================================\n") for _, current := range migrations { log.Printf(" %-24s -- %v\n", "no versioning", filepath.Base(current.Source)) } @@ -27,14 +35,14 @@ func Status(db *sql.DB, dir string, opts ...OptionsFunc) error { } // must ensure that the version table exists if we're running on a pristine DB - if _, err := EnsureDBVersion(db); err != nil { + if _, err := EnsureDBVersionContext(ctx, db); err != nil { return fmt.Errorf("failed to ensure DB version: %w", err) } - log.Println(" Applied At Migration") - log.Println(" =======================================") + log.Printf(" Applied At Migration\n") + log.Printf(" =======================================\n") for _, migration := range migrations { - if err := printMigrationStatus(db, migration.Version, filepath.Base(migration.Source)); err != nil { + if err := printMigrationStatus(ctx, db, migration.Version, filepath.Base(migration.Source)); err != nil { return fmt.Errorf("failed to print status: %w", err) } } @@ -42,23 +50,15 @@ func Status(db *sql.DB, dir string, opts ...OptionsFunc) error { return nil } -func printMigrationStatus(db *sql.DB, version int64, script string) error { - q := GetDialect().migrationSQL() - - var row MigrationRecord - - err := db.QueryRow(q, version).Scan(&row.TStamp, &row.IsApplied) - if err != nil && err != sql.ErrNoRows { +func printMigrationStatus(ctx context.Context, db *sql.DB, version int64, script string) error { + m, err := store.GetMigration(ctx, db, TableName(), version) + if err != nil && !errors.Is(err, sql.ErrNoRows) { return fmt.Errorf("failed to query the latest migration: %w", err) } - - var appliedAt string - if row.IsApplied { - appliedAt = row.TStamp.Format(time.ANSIC) - } else { - appliedAt = "Pending" + appliedAt := "Pending" + if m != nil && m.IsApplied { + appliedAt = m.Timestamp.Format(time.ANSIC) } - log.Printf(" %-24s -- %v\n", appliedAt, script) return nil } diff --git a/vendor/github.com/pressly/goose/v3/up.go b/vendor/github.com/pressly/goose/v3/up.go index 1d668e3..d907893 100644 --- a/vendor/github.com/pressly/goose/v3/up.go +++ b/vendor/github.com/pressly/goose/v3/up.go @@ -1,8 +1,8 @@ package goose import ( + "context" "database/sql" - "errors" "fmt" "sort" "strings" @@ -34,6 +34,11 @@ func withApplyUpByOne() OptionsFunc { // UpTo migrates up to a specific version. func UpTo(db *sql.DB, dir string, version int64, opts ...OptionsFunc) error { + ctx := context.Background() + return UpToContext(ctx, db, dir, version, opts...) +} + +func UpToContext(ctx context.Context, db *sql.DB, dir string, version int64, opts ...OptionsFunc) error { option := &options{} for _, f := range opts { f(option) @@ -52,18 +57,24 @@ func UpTo(db *sql.DB, dir string, version int64, opts ...OptionsFunc) error { // migration over and over. version = foundMigrations[0].Version } - return upToNoVersioning(db, foundMigrations, version) + return upToNoVersioning(ctx, db, foundMigrations, version) } - if _, err := EnsureDBVersion(db); err != nil { + if _, err := EnsureDBVersionContext(ctx, db); err != nil { return err } - dbMigrations, err := listAllDBVersions(db) + dbMigrations, err := listAllDBVersions(ctx, db) if err != nil { return err } + dbMaxVersion := dbMigrations[len(dbMigrations)-1].Version + // lookupAppliedInDB is a map of all applied migrations in the database. + lookupAppliedInDB := make(map[int64]bool) + for _, m := range dbMigrations { + lookupAppliedInDB[m.Version] = true + } - missingMigrations := findMissingMigrations(dbMigrations, foundMigrations) + missingMigrations := findMissingMigrations(dbMigrations, foundMigrations, dbMaxVersion) // feature(mf): It is very possible someone may want to apply ONLY new migrations // and skip missing migrations altogether. At the moment this is not supported, @@ -74,62 +85,70 @@ func UpTo(db *sql.DB, dir string, version int64, opts ...OptionsFunc) error { output := fmt.Sprintf("version %d: %s", m.Version, m.Source) collected = append(collected, output) } - return fmt.Errorf("error: found %d missing migrations:\n\t%s", - len(missingMigrations), strings.Join(collected, "\n\t")) + return fmt.Errorf("error: found %d missing migrations before current version %d:\n\t%s", + len(missingMigrations), dbMaxVersion, strings.Join(collected, "\n\t")) } - + var migrationsToApply Migrations if option.allowMissing { - return upWithMissing( - db, - missingMigrations, - foundMigrations, - dbMigrations, - option, - ) + migrationsToApply = missingMigrations + } + // filter all migrations with a version greater than the supplied version (min) and less than or + // equal to the requested version (max). Note, we do not need to filter out missing migrations + // because we are only appending "new" migrations that have a higher version than the current + // database max version, which inevitably means they are not "missing". + for _, m := range foundMigrations { + if lookupAppliedInDB[m.Version] { + continue + } + if m.Version > dbMaxVersion && m.Version <= version { + migrationsToApply = append(migrationsToApply, m) + } } var current int64 - for { - var err error - current, err = GetDBVersion(db) - if err != nil { - return err - } - next, err := foundMigrations.Next(current) - if err != nil { - if errors.Is(err, ErrNoNextVersion) { - break - } - return fmt.Errorf("failed to find next migration: %v", err) - } - if err := next.Up(db); err != nil { + for _, m := range migrationsToApply { + if err := m.UpContext(ctx, db); err != nil { return err } if option.applyUpByOne { return nil } + current = m.Version + } + + if len(migrationsToApply) == 0 { + current, err = GetDBVersionContext(ctx, db) + if err != nil { + return err + } + + log.Printf("goose: no migrations to run. current version: %d\n", current) + } else { + log.Printf("goose: successfully migrated database to version: %d\n", current) } + // At this point there are no more migrations to apply. But we need to maintain // the following behaviour: // UpByOne returns an error to signifying there are no more migrations. // Up and UpTo return nil - log.Printf("goose: no migrations to run. current version: %d\n", current) + if option.applyUpByOne { return ErrNoNextVersion } + return nil } // upToNoVersioning applies up migrations up to, and including, the // target version. -func upToNoVersioning(db *sql.DB, migrations Migrations, version int64) error { +func upToNoVersioning(ctx context.Context, db *sql.DB, migrations Migrations, version int64) error { var finalVersion int64 for _, current := range migrations { if current.Version > version { break } current.noVersioning = true - if err := current.Up(db); err != nil { + if err := current.UpContext(ctx, db); err != nil { return err } finalVersion = current.Version @@ -138,112 +157,43 @@ func upToNoVersioning(db *sql.DB, migrations Migrations, version int64) error { return nil } -func upWithMissing( - db *sql.DB, - missingMigrations Migrations, - foundMigrations Migrations, - dbMigrations Migrations, - option *options, -) error { - lookupApplied := make(map[int64]bool) - for _, found := range dbMigrations { - lookupApplied[found.Version] = true - } - - // Apply all missing migrations first. - for _, missing := range missingMigrations { - if err := missing.Up(db); err != nil { - return err - } - // Apply one migration and return early. - if option.applyUpByOne { - return nil - } - // TODO(mf): do we need this check? It's a bit redundant, but we may - // want to keep it as a safe-guard. Maybe we should instead have - // the underlying query (if possible) return the current version as - // part of the same transaction. - current, err := GetDBVersion(db) - if err != nil { - return err - } - if current == missing.Version { - lookupApplied[missing.Version] = true - continue - } - return fmt.Errorf("error: missing migration:%d does not match current db version:%d", - current, missing.Version) - } - - // We can no longer rely on the database version_id to be sequential because - // missing (out-of-order) migrations get applied before newer migrations. - - for _, found := range foundMigrations { - // TODO(mf): instead of relying on this lookup, consider hitting - // the database directly? - // Alternatively, we can skip a bunch migrations and start the cursor - // at a version that represents 100% applied migrations. But this is - // risky, and we should aim to keep this logic simple. - if lookupApplied[found.Version] { - continue - } - if err := found.Up(db); err != nil { - return err - } - if option.applyUpByOne { - return nil - } - } - current, err := GetDBVersion(db) - if err != nil { - return err - } - // At this point there are no more migrations to apply. But we need to maintain - // the following behaviour: - // UpByOne returns an error to signifying there are no more migrations. - // Up and UpTo return nil - log.Printf("goose: no migrations to run. current version: %d\n", current) - if option.applyUpByOne { - return ErrNoNextVersion - } - return nil -} - // Up applies all available migrations. func Up(db *sql.DB, dir string, opts ...OptionsFunc) error { - return UpTo(db, dir, maxVersion, opts...) + ctx := context.Background() + return UpContext(ctx, db, dir, opts...) +} + +// UpContext applies all available migrations. +func UpContext(ctx context.Context, db *sql.DB, dir string, opts ...OptionsFunc) error { + return UpToContext(ctx, db, dir, maxVersion, opts...) } // UpByOne migrates up by a single version. func UpByOne(db *sql.DB, dir string, opts ...OptionsFunc) error { + ctx := context.Background() + return UpByOneContext(ctx, db, dir, opts...) +} + +// UpByOneContext migrates up by a single version. +func UpByOneContext(ctx context.Context, db *sql.DB, dir string, opts ...OptionsFunc) error { opts = append(opts, withApplyUpByOne()) - return UpTo(db, dir, maxVersion, opts...) + return UpToContext(ctx, db, dir, maxVersion, opts...) } // listAllDBVersions returns a list of all migrations, ordered ascending. -// TODO(mf): fairly cheap, but a nice-to-have is pagination support. -func listAllDBVersions(db *sql.DB) (Migrations, error) { - rows, err := GetDialect().dbVersionQuery(db) +func listAllDBVersions(ctx context.Context, db *sql.DB) (Migrations, error) { + dbMigrations, err := store.ListMigrations(ctx, db, TableName()) if err != nil { - return nil, createVersionTable(db) + return nil, err } - var all Migrations - for rows.Next() { - var versionID int64 - var isApplied bool - if err := rows.Scan(&versionID, &isApplied); err != nil { - return nil, err - } + all := make(Migrations, 0, len(dbMigrations)) + for _, m := range dbMigrations { all = append(all, &Migration{ - Version: versionID, + Version: m.VersionID, }) } - if err := rows.Close(); err != nil { - return nil, err - } - if err := rows.Err(); err != nil { - return nil, err - } + // ListMigrations returns migrations in descending order by id. + // But we want to return them in ascending order by version_id, so we re-sort. sort.SliceStable(all, func(i, j int) bool { return all[i].Version < all[j].Version }) @@ -253,15 +203,14 @@ func listAllDBVersions(db *sql.DB) (Migrations, error) { // findMissingMigrations migrations returns all missing migrations. // A migrations is considered missing if it has a version less than the // current known max version. -func findMissingMigrations(knownMigrations, newMigrations Migrations) Migrations { - max := knownMigrations[len(knownMigrations)-1].Version +func findMissingMigrations(knownMigrations, newMigrations Migrations, dbMaxVersion int64) Migrations { existing := make(map[int64]bool) for _, known := range knownMigrations { existing[known.Version] = true } var missing Migrations for _, new := range newMigrations { - if !existing[new.Version] && new.Version < max { + if !existing[new.Version] && new.Version < dbMaxVersion { missing = append(missing, new) } } diff --git a/vendor/github.com/pressly/goose/v3/version.go b/vendor/github.com/pressly/goose/v3/version.go index 47765f7..89d0dcc 100644 --- a/vendor/github.com/pressly/goose/v3/version.go +++ b/vendor/github.com/pressly/goose/v3/version.go @@ -1,12 +1,19 @@ package goose import ( + "context" "database/sql" "fmt" ) // Version prints the current version of the database. func Version(db *sql.DB, dir string, opts ...OptionsFunc) error { + ctx := context.Background() + return VersionContext(ctx, db, dir, opts...) +} + +// VersionContext prints the current version of the database. +func VersionContext(ctx context.Context, db *sql.DB, dir string, opts ...OptionsFunc) error { option := &options{} for _, f := range opts { f(option) @@ -24,7 +31,7 @@ func Version(db *sql.DB, dir string, opts ...OptionsFunc) error { return nil } - current, err := GetDBVersion(db) + current, err := GetDBVersionContext(ctx, db) if err != nil { return err } diff --git a/vendor/github.com/prometheus/procfs/.golangci.yml b/vendor/github.com/prometheus/procfs/.golangci.yml index a197699..c24864a 100644 --- a/vendor/github.com/prometheus/procfs/.golangci.yml +++ b/vendor/github.com/prometheus/procfs/.golangci.yml @@ -2,6 +2,7 @@ linters: enable: - godot + - misspell - revive linter-settings: @@ -10,3 +11,5 @@ linter-settings: exclude: # Ignore "See: URL" - 'See:' + misspell: + locale: US diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common index e358db6..062a281 100644 --- a/vendor/github.com/prometheus/procfs/Makefile.common +++ b/vendor/github.com/prometheus/procfs/Makefile.common @@ -49,19 +49,19 @@ endif GOTEST := $(GO) test GOTEST_DIR := ifneq ($(CIRCLE_JOB),) -ifneq ($(shell which gotestsum),) +ifneq ($(shell command -v gotestsum > /dev/null),) GOTEST_DIR := test-results GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml -- endif endif -PROMU_VERSION ?= 0.14.0 +PROMU_VERSION ?= 0.15.0 PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz SKIP_GOLANGCI_LINT := GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v1.49.0 +GOLANGCI_LINT_VERSION ?= v1.54.2 # golangci-lint only supports linux, darwin and windows platforms on i386/amd64. # windows isn't included here because of the path separator being different. ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) @@ -91,6 +91,8 @@ BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS)) PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS)) TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS)) +SANITIZED_DOCKER_IMAGE_TAG := $(subst +,-,$(DOCKER_IMAGE_TAG)) + ifeq ($(GOHOSTARCH),amd64) ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows)) # Only supported on amd64 @@ -176,7 +178,7 @@ endif .PHONY: common-yamllint common-yamllint: @echo ">> running yamllint on all YAML files in the repository" -ifeq (, $(shell which yamllint)) +ifeq (, $(shell command -v yamllint > /dev/null)) @echo "yamllint not installed so skipping" else yamllint . @@ -205,7 +207,7 @@ common-tarball: promu .PHONY: common-docker $(BUILD_DOCKER_ARCHS) common-docker: $(BUILD_DOCKER_ARCHS) $(BUILD_DOCKER_ARCHS): common-docker-%: - docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" \ + docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" \ -f $(DOCKERFILE_PATH) \ --build-arg ARCH="$*" \ --build-arg OS="linux" \ @@ -214,19 +216,19 @@ $(BUILD_DOCKER_ARCHS): common-docker-%: .PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS) common-docker-publish: $(PUBLISH_DOCKER_ARCHS) $(PUBLISH_DOCKER_ARCHS): common-docker-publish-%: - docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" + docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" DOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION))) .PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS) common-docker-tag-latest: $(TAG_DOCKER_ARCHS) $(TAG_DOCKER_ARCHS): common-docker-tag-latest-%: - docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest" - docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)" + docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest" + docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)" .PHONY: common-docker-manifest common-docker-manifest: - DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(DOCKER_IMAGE_TAG)) - DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" + DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(SANITIZED_DOCKER_IMAGE_TAG)) + DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" .PHONY: promu promu: $(PROMU) diff --git a/vendor/github.com/prometheus/procfs/README.md b/vendor/github.com/prometheus/procfs/README.md index 43c3773..1224816 100644 --- a/vendor/github.com/prometheus/procfs/README.md +++ b/vendor/github.com/prometheus/procfs/README.md @@ -51,11 +51,11 @@ ensure the `fixtures` directory is up to date by removing the existing directory extracting the ttar file using `make fixtures/.unpacked` or just `make test`. ```bash -rm -rf fixtures +rm -rf testdata/fixtures make test ``` Next, make the required changes to the extracted files in the `fixtures` directory. When the changes are complete, run `make update_fixtures` to create a new `fixtures.ttar` file based on the updated `fixtures` directory. And finally, verify the changes using -`git diff fixtures.ttar`. +`git diff testdata/fixtures.ttar`. diff --git a/vendor/github.com/prometheus/procfs/arp.go b/vendor/github.com/prometheus/procfs/arp.go index 68f36e8..28783e2 100644 --- a/vendor/github.com/prometheus/procfs/arp.go +++ b/vendor/github.com/prometheus/procfs/arp.go @@ -55,7 +55,7 @@ type ARPEntry struct { func (fs FS) GatherARPEntries() ([]ARPEntry, error) { data, err := os.ReadFile(fs.proc.Path("net/arp")) if err != nil { - return nil, fmt.Errorf("error reading arp %q: %w", fs.proc.Path("net/arp"), err) + return nil, fmt.Errorf("%s: error reading arp %s: %w", ErrFileRead, fs.proc.Path("net/arp"), err) } return parseARPEntries(data) @@ -78,11 +78,11 @@ func parseARPEntries(data []byte) ([]ARPEntry, error) { } else if width == expectedDataWidth { entry, err := parseARPEntry(columns) if err != nil { - return []ARPEntry{}, fmt.Errorf("failed to parse ARP entry: %w", err) + return []ARPEntry{}, fmt.Errorf("%s: Failed to parse ARP entry: %v: %w", ErrFileParse, entry, err) } entries = append(entries, entry) } else { - return []ARPEntry{}, fmt.Errorf("%d columns were detected, but %d were expected", width, expectedDataWidth) + return []ARPEntry{}, fmt.Errorf("%s: %d columns found, but expected %d: %w", ErrFileParse, width, expectedDataWidth, err) } } diff --git a/vendor/github.com/prometheus/procfs/buddyinfo.go b/vendor/github.com/prometheus/procfs/buddyinfo.go index f5b7939..4a17363 100644 --- a/vendor/github.com/prometheus/procfs/buddyinfo.go +++ b/vendor/github.com/prometheus/procfs/buddyinfo.go @@ -55,7 +55,7 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) { parts := strings.Fields(line) if len(parts) < 4 { - return nil, fmt.Errorf("invalid number of fields when parsing buddyinfo") + return nil, fmt.Errorf("%w: Invalid number of fields, found: %v", ErrFileParse, parts) } node := strings.TrimRight(parts[1], ",") @@ -66,7 +66,7 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) { bucketCount = arraySize } else { if bucketCount != arraySize { - return nil, fmt.Errorf("mismatch in number of buddyinfo buckets, previous count %d, new count %d", bucketCount, arraySize) + return nil, fmt.Errorf("%w: mismatch in number of buddyinfo buckets, previous count %d, new count %d", ErrFileParse, bucketCount, arraySize) } } @@ -74,7 +74,7 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) { for i := 0; i < arraySize; i++ { sizes[i], err = strconv.ParseFloat(parts[i+4], 64) if err != nil { - return nil, fmt.Errorf("invalid value in buddyinfo: %w", err) + return nil, fmt.Errorf("%s: Invalid valid in buddyinfo: %f: %w", ErrFileParse, sizes[i], err) } } diff --git a/vendor/github.com/prometheus/procfs/cpuinfo.go b/vendor/github.com/prometheus/procfs/cpuinfo.go index 06968ca..f4f5501 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo.go @@ -79,7 +79,7 @@ func parseCPUInfoX86(info []byte) ([]CPUInfo, error) { // find the first "processor" line firstLine := firstNonEmptyLine(scanner) if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { - return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) + return nil, fmt.Errorf("%w: Cannot parse line: %q", ErrFileParse, firstLine) } field := strings.SplitN(firstLine, ": ", 2) v, err := strconv.ParseUint(field[1], 0, 32) @@ -192,9 +192,10 @@ func parseCPUInfoARM(info []byte) ([]CPUInfo, error) { scanner := bufio.NewScanner(bytes.NewReader(info)) firstLine := firstNonEmptyLine(scanner) - match, _ := regexp.MatchString("^[Pp]rocessor", firstLine) + match, err := regexp.MatchString("^[Pp]rocessor", firstLine) if !match || !strings.Contains(firstLine, ":") { - return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) + return nil, fmt.Errorf("%s: Cannot parse line: %q: %w", ErrFileParse, firstLine, err) + } field := strings.SplitN(firstLine, ": ", 2) cpuinfo := []CPUInfo{} @@ -258,7 +259,7 @@ func parseCPUInfoS390X(info []byte) ([]CPUInfo, error) { firstLine := firstNonEmptyLine(scanner) if !strings.HasPrefix(firstLine, "vendor_id") || !strings.Contains(firstLine, ":") { - return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) + return nil, fmt.Errorf("%w: Cannot parse line: %q", ErrFileParse, firstLine) } field := strings.SplitN(firstLine, ": ", 2) cpuinfo := []CPUInfo{} @@ -283,7 +284,7 @@ func parseCPUInfoS390X(info []byte) ([]CPUInfo, error) { if strings.HasPrefix(line, "processor") { match := cpuinfoS390XProcessorRegexp.FindStringSubmatch(line) if len(match) < 2 { - return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) + return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine) } cpu := commonCPUInfo v, err := strconv.ParseUint(match[1], 0, 32) @@ -343,7 +344,7 @@ func parseCPUInfoMips(info []byte) ([]CPUInfo, error) { // find the first "processor" line firstLine := firstNonEmptyLine(scanner) if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") { - return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) + return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine) } field := strings.SplitN(firstLine, ": ", 2) cpuinfo := []CPUInfo{} @@ -421,7 +422,7 @@ func parseCPUInfoPPC(info []byte) ([]CPUInfo, error) { firstLine := firstNonEmptyLine(scanner) if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { - return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) + return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine) } field := strings.SplitN(firstLine, ": ", 2) v, err := strconv.ParseUint(field[1], 0, 32) @@ -466,7 +467,7 @@ func parseCPUInfoRISCV(info []byte) ([]CPUInfo, error) { firstLine := firstNonEmptyLine(scanner) if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { - return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) + return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine) } field := strings.SplitN(firstLine, ": ", 2) v, err := strconv.ParseUint(field[1], 0, 32) diff --git a/vendor/github.com/prometheus/procfs/crypto.go b/vendor/github.com/prometheus/procfs/crypto.go index 5048ad1..9a73e26 100644 --- a/vendor/github.com/prometheus/procfs/crypto.go +++ b/vendor/github.com/prometheus/procfs/crypto.go @@ -55,12 +55,13 @@ func (fs FS) Crypto() ([]Crypto, error) { path := fs.proc.Path("crypto") b, err := util.ReadFileNoStat(path) if err != nil { - return nil, fmt.Errorf("error reading crypto %q: %w", path, err) + return nil, fmt.Errorf("%s: Cannot read file %v: %w", ErrFileRead, b, err) + } crypto, err := parseCrypto(bytes.NewReader(b)) if err != nil { - return nil, fmt.Errorf("error parsing crypto %q: %w", path, err) + return nil, fmt.Errorf("%s: Cannot parse %v: %w", ErrFileParse, crypto, err) } return crypto, nil @@ -83,7 +84,7 @@ func parseCrypto(r io.Reader) ([]Crypto, error) { kv := strings.Split(text, ":") if len(kv) != 2 { - return nil, fmt.Errorf("malformed crypto line: %q", text) + return nil, fmt.Errorf("%w: Cannot parae line: %q", ErrFileParse, text) } k := strings.TrimSpace(kv[0]) diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go index 0102ab0..4980c87 100644 --- a/vendor/github.com/prometheus/procfs/fs.go +++ b/vendor/github.com/prometheus/procfs/fs.go @@ -20,7 +20,8 @@ import ( // FS represents the pseudo-filesystem sys, which provides an interface to // kernel data structures. type FS struct { - proc fs.FS + proc fs.FS + isReal bool } // DefaultMountPoint is the common mount point of the proc filesystem. @@ -39,5 +40,11 @@ func NewFS(mountPoint string) (FS, error) { if err != nil { return FS{}, err } - return FS{fs}, nil + + isReal, err := isRealProc(mountPoint) + if err != nil { + return FS{}, err + } + + return FS{fs, isReal}, nil } diff --git a/vendor/github.com/prometheus/procfs/fs_statfs_notype.go b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go new file mode 100644 index 0000000..134767d --- /dev/null +++ b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go @@ -0,0 +1,23 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !freebsd && !linux +// +build !freebsd,!linux + +package procfs + +// isRealProc returns true on architectures that don't have a Type argument +// in their Statfs_t struct +func isRealProc(mountPoint string) (bool, error) { + return true, nil +} diff --git a/vendor/github.com/prometheus/procfs/fs_statfs_type.go b/vendor/github.com/prometheus/procfs/fs_statfs_type.go new file mode 100644 index 0000000..80df79c --- /dev/null +++ b/vendor/github.com/prometheus/procfs/fs_statfs_type.go @@ -0,0 +1,33 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build freebsd || linux +// +build freebsd linux + +package procfs + +import ( + "syscall" +) + +// isRealProc determines whether supplied mountpoint is really a proc filesystem. +func isRealProc(mountPoint string) (bool, error) { + stat := syscall.Statfs_t{} + err := syscall.Statfs(mountPoint, &stat) + if err != nil { + return false, err + } + + // 0x9fa0 is PROC_SUPER_MAGIC: https://elixir.bootlin.com/linux/v6.1/source/include/uapi/linux/magic.h#L87 + return stat.Type == 0x9fa0, nil +} diff --git a/vendor/github.com/prometheus/procfs/fscache.go b/vendor/github.com/prometheus/procfs/fscache.go index f8070e6..f560a8d 100644 --- a/vendor/github.com/prometheus/procfs/fscache.go +++ b/vendor/github.com/prometheus/procfs/fscache.go @@ -236,7 +236,7 @@ func (fs FS) Fscacheinfo() (Fscacheinfo, error) { m, err := parseFscacheinfo(bytes.NewReader(b)) if err != nil { - return Fscacheinfo{}, fmt.Errorf("failed to parse Fscacheinfo: %w", err) + return Fscacheinfo{}, fmt.Errorf("%s: Cannot parse %v: %w", ErrFileParse, m, err) } return *m, nil @@ -245,7 +245,7 @@ func (fs FS) Fscacheinfo() (Fscacheinfo, error) { func setFSCacheFields(fields []string, setFields ...*uint64) error { var err error if len(fields) < len(setFields) { - return fmt.Errorf("Insufficient number of fields, expected %v, got %v", len(setFields), len(fields)) + return fmt.Errorf("%s: Expected %d, but got %d: %w", ErrFileParse, len(setFields), len(fields), err) } for i := range setFields { @@ -263,7 +263,7 @@ func parseFscacheinfo(r io.Reader) (*Fscacheinfo, error) { for s.Scan() { fields := strings.Fields(s.Text()) if len(fields) < 2 { - return nil, fmt.Errorf("malformed Fscacheinfo line: %q", s.Text()) + return nil, fmt.Errorf("%w: malformed Fscacheinfo line: %q", ErrFileParse, s.Text()) } switch fields[0] { diff --git a/vendor/github.com/prometheus/procfs/internal/util/parse.go b/vendor/github.com/prometheus/procfs/internal/util/parse.go index b030951..14272dc 100644 --- a/vendor/github.com/prometheus/procfs/internal/util/parse.go +++ b/vendor/github.com/prometheus/procfs/internal/util/parse.go @@ -64,6 +64,21 @@ func ParsePInt64s(ss []string) ([]*int64, error) { return us, nil } +// Parses a uint64 from given hex in string. +func ParseHexUint64s(ss []string) ([]*uint64, error) { + us := make([]*uint64, 0, len(ss)) + for _, s := range ss { + u, err := strconv.ParseUint(s, 16, 64) + if err != nil { + return nil, err + } + + us = append(us, &u) + } + + return us, nil +} + // ReadUintFromFile reads a file and attempts to parse a uint64 from it. func ReadUintFromFile(path string) (uint64, error) { data, err := os.ReadFile(path) diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go index 391c079..5a145bb 100644 --- a/vendor/github.com/prometheus/procfs/ipvs.go +++ b/vendor/github.com/prometheus/procfs/ipvs.go @@ -221,15 +221,16 @@ func parseIPPort(s string) (net.IP, uint16, error) { case 46: ip = net.ParseIP(s[1:40]) if ip == nil { - return nil, 0, fmt.Errorf("invalid IPv6 address: %s", s[1:40]) + return nil, 0, fmt.Errorf("%s: Invalid IPv6 addr %s: %w", ErrFileParse, s[1:40], err) } default: - return nil, 0, fmt.Errorf("unexpected IP:Port: %s", s) + return nil, 0, fmt.Errorf("%s: Unexpected IP:Port %s: %w", ErrFileParse, s, err) } portString := s[len(s)-4:] if len(portString) != 4 { - return nil, 0, fmt.Errorf("unexpected port string format: %s", portString) + return nil, 0, + fmt.Errorf("%s: Unexpected port string format %s: %w", ErrFileParse, portString, err) } port, err := strconv.ParseUint(portString, 16, 16) if err != nil { diff --git a/vendor/github.com/prometheus/procfs/loadavg.go b/vendor/github.com/prometheus/procfs/loadavg.go index 0096caf..59465c5 100644 --- a/vendor/github.com/prometheus/procfs/loadavg.go +++ b/vendor/github.com/prometheus/procfs/loadavg.go @@ -44,14 +44,14 @@ func parseLoad(loadavgBytes []byte) (*LoadAvg, error) { loads := make([]float64, 3) parts := strings.Fields(string(loadavgBytes)) if len(parts) < 3 { - return nil, fmt.Errorf("malformed loadavg line: too few fields in loadavg string: %q", string(loadavgBytes)) + return nil, fmt.Errorf("%w: Malformed line %q", ErrFileParse, string(loadavgBytes)) } var err error for i, load := range parts[0:3] { loads[i], err = strconv.ParseFloat(load, 64) if err != nil { - return nil, fmt.Errorf("could not parse load %q: %w", load, err) + return nil, fmt.Errorf("%s: Cannot parse load: %f: %w", ErrFileParse, loads[i], err) } } return &LoadAvg{ diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go index a95c889..fdd4b95 100644 --- a/vendor/github.com/prometheus/procfs/mdstat.go +++ b/vendor/github.com/prometheus/procfs/mdstat.go @@ -70,7 +70,7 @@ func (fs FS) MDStat() ([]MDStat, error) { } mdstat, err := parseMDStat(data) if err != nil { - return nil, fmt.Errorf("error parsing mdstat %q: %w", fs.proc.Path("mdstat"), err) + return nil, fmt.Errorf("%s: Cannot parse %v: %w", ErrFileParse, fs.proc.Path("mdstat"), err) } return mdstat, nil } @@ -90,13 +90,13 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { deviceFields := strings.Fields(line) if len(deviceFields) < 3 { - return nil, fmt.Errorf("not enough fields in mdline (expected at least 3): %s", line) + return nil, fmt.Errorf("%s: Expected 3+ lines, got %q", ErrFileParse, line) } mdName := deviceFields[0] // mdx state := deviceFields[2] // active or inactive if len(lines) <= i+3 { - return nil, fmt.Errorf("error parsing %q: too few lines for md device", mdName) + return nil, fmt.Errorf("%w: Too few lines for md device: %q", ErrFileParse, mdName) } // Failed disks have the suffix (F) & Spare disks have the suffix (S). @@ -105,7 +105,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { active, total, down, size, err := evalStatusLine(lines[i], lines[i+1]) if err != nil { - return nil, fmt.Errorf("error parsing md device lines: %w", err) + return nil, fmt.Errorf("%s: Cannot parse md device lines: %v: %w", ErrFileParse, active, err) } syncLineIdx := i + 2 @@ -140,7 +140,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { } else { syncedBlocks, pct, finish, speed, err = evalRecoveryLine(lines[syncLineIdx]) if err != nil { - return nil, fmt.Errorf("error parsing sync line in md device %q: %w", mdName, err) + return nil, fmt.Errorf("%s: Cannot parse sync line in md device: %q: %w", ErrFileParse, mdName, err) } } } @@ -168,13 +168,13 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { func evalStatusLine(deviceLine, statusLine string) (active, total, down, size int64, err error) { statusFields := strings.Fields(statusLine) if len(statusFields) < 1 { - return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q", statusLine) + return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected statusline %q: %w", ErrFileParse, statusLine, err) } sizeStr := statusFields[0] size, err = strconv.ParseInt(sizeStr, 10, 64) if err != nil { - return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err) + return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected statusline %q: %w", ErrFileParse, statusLine, err) } if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") { @@ -189,17 +189,17 @@ func evalStatusLine(deviceLine, statusLine string) (active, total, down, size in matches := statusLineRE.FindStringSubmatch(statusLine) if len(matches) != 5 { - return 0, 0, 0, 0, fmt.Errorf("couldn't find all the substring matches: %s", statusLine) + return 0, 0, 0, 0, fmt.Errorf("%s: Could not fild all substring matches %s: %w", ErrFileParse, statusLine, err) } total, err = strconv.ParseInt(matches[2], 10, 64) if err != nil { - return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err) + return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected statusline %q: %w", ErrFileParse, statusLine, err) } active, err = strconv.ParseInt(matches[3], 10, 64) if err != nil { - return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err) + return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected active %d: %w", ErrFileParse, active, err) } down = int64(strings.Count(matches[4], "_")) @@ -209,42 +209,42 @@ func evalStatusLine(deviceLine, statusLine string) (active, total, down, size in func evalRecoveryLine(recoveryLine string) (syncedBlocks int64, pct float64, finish float64, speed float64, err error) { matches := recoveryLineBlocksRE.FindStringSubmatch(recoveryLine) if len(matches) != 2 { - return 0, 0, 0, 0, fmt.Errorf("unexpected recoveryLine: %s", recoveryLine) + return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected recoveryLine %s: %w", ErrFileParse, recoveryLine, err) } syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64) if err != nil { - return 0, 0, 0, 0, fmt.Errorf("error parsing int from recoveryLine %q: %w", recoveryLine, err) + return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected parsing of recoveryLine %q: %w", ErrFileParse, recoveryLine, err) } // Get percentage complete matches = recoveryLinePctRE.FindStringSubmatch(recoveryLine) if len(matches) != 2 { - return syncedBlocks, 0, 0, 0, fmt.Errorf("unexpected recoveryLine matching percentage: %s", recoveryLine) + return syncedBlocks, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching percentage %s", ErrFileParse, recoveryLine) } pct, err = strconv.ParseFloat(strings.TrimSpace(matches[1]), 64) if err != nil { - return syncedBlocks, 0, 0, 0, fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLine, err) + return syncedBlocks, 0, 0, 0, fmt.Errorf("%w: Error parsing float from recoveryLine %q", ErrFileParse, recoveryLine) } // Get time expected left to complete matches = recoveryLineFinishRE.FindStringSubmatch(recoveryLine) if len(matches) != 2 { - return syncedBlocks, pct, 0, 0, fmt.Errorf("unexpected recoveryLine matching est. finish time: %s", recoveryLine) + return syncedBlocks, pct, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching est. finish time: %s", ErrFileParse, recoveryLine) } finish, err = strconv.ParseFloat(matches[1], 64) if err != nil { - return syncedBlocks, pct, 0, 0, fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLine, err) + return syncedBlocks, pct, 0, 0, fmt.Errorf("%w: Unable to parse float from recoveryLine: %q", ErrFileParse, recoveryLine) } // Get recovery speed matches = recoveryLineSpeedRE.FindStringSubmatch(recoveryLine) if len(matches) != 2 { - return syncedBlocks, pct, finish, 0, fmt.Errorf("unexpected recoveryLine matching speed: %s", recoveryLine) + return syncedBlocks, pct, finish, 0, fmt.Errorf("%w: Unexpected recoveryLine value: %s", ErrFileParse, recoveryLine) } speed, err = strconv.ParseFloat(matches[1], 64) if err != nil { - return syncedBlocks, pct, finish, 0, fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLine, err) + return syncedBlocks, pct, finish, 0, fmt.Errorf("%s: Error parsing float from recoveryLine: %q: %w", ErrFileParse, recoveryLine, err) } return syncedBlocks, pct, finish, speed, nil diff --git a/vendor/github.com/prometheus/procfs/meminfo.go b/vendor/github.com/prometheus/procfs/meminfo.go index f65e174..eaf00e2 100644 --- a/vendor/github.com/prometheus/procfs/meminfo.go +++ b/vendor/github.com/prometheus/procfs/meminfo.go @@ -152,7 +152,7 @@ func (fs FS) Meminfo() (Meminfo, error) { m, err := parseMemInfo(bytes.NewReader(b)) if err != nil { - return Meminfo{}, fmt.Errorf("failed to parse meminfo: %w", err) + return Meminfo{}, fmt.Errorf("%s: %w", ErrFileParse, err) } return *m, nil @@ -165,7 +165,7 @@ func parseMemInfo(r io.Reader) (*Meminfo, error) { // Each line has at least a name and value; we ignore the unit. fields := strings.Fields(s.Text()) if len(fields) < 2 { - return nil, fmt.Errorf("malformed meminfo line: %q", s.Text()) + return nil, fmt.Errorf("%w: Malformed line %q", ErrFileParse, s.Text()) } v, err := strconv.ParseUint(fields[1], 0, 64) diff --git a/vendor/github.com/prometheus/procfs/mountinfo.go b/vendor/github.com/prometheus/procfs/mountinfo.go index 59f4d50..388ebf3 100644 --- a/vendor/github.com/prometheus/procfs/mountinfo.go +++ b/vendor/github.com/prometheus/procfs/mountinfo.go @@ -78,11 +78,11 @@ func parseMountInfoString(mountString string) (*MountInfo, error) { mountInfo := strings.Split(mountString, " ") mountInfoLength := len(mountInfo) if mountInfoLength < 10 { - return nil, fmt.Errorf("couldn't find enough fields in mount string: %s", mountString) + return nil, fmt.Errorf("%w: Too few fields in mount string: %s", ErrFileParse, mountString) } if mountInfo[mountInfoLength-4] != "-" { - return nil, fmt.Errorf("couldn't find separator in expected field: %s", mountInfo[mountInfoLength-4]) + return nil, fmt.Errorf("%w: couldn't find separator in expected field: %s", ErrFileParse, mountInfo[mountInfoLength-4]) } mount := &MountInfo{ @@ -98,18 +98,18 @@ func parseMountInfoString(mountString string) (*MountInfo, error) { mount.MountID, err = strconv.Atoi(mountInfo[0]) if err != nil { - return nil, fmt.Errorf("failed to parse mount ID") + return nil, fmt.Errorf("%w: mount ID: %q", ErrFileParse, mount.MountID) } mount.ParentID, err = strconv.Atoi(mountInfo[1]) if err != nil { - return nil, fmt.Errorf("failed to parse parent ID") + return nil, fmt.Errorf("%w: parent ID: %q", ErrFileParse, mount.ParentID) } // Has optional fields, which is a space separated list of values. // Example: shared:2 master:7 if mountInfo[6] != "" { mount.OptionalFields, err = mountOptionsParseOptionalFields(mountInfo[6 : mountInfoLength-4]) if err != nil { - return nil, err + return nil, fmt.Errorf("%s: %w", ErrFileParse, err) } } return mount, nil diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go index 0c482c1..9d8af6d 100644 --- a/vendor/github.com/prometheus/procfs/mountstats.go +++ b/vendor/github.com/prometheus/procfs/mountstats.go @@ -44,6 +44,14 @@ const ( fieldTransport11TCPLen = 13 fieldTransport11UDPLen = 10 + + // kernel version >= 4.14 MaxLen + // See: https://elixir.bootlin.com/linux/v6.4.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L393 + fieldTransport11RDMAMaxLen = 28 + + // kernel version <= 4.2 MinLen + // See: https://elixir.bootlin.com/linux/v4.2.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L331 + fieldTransport11RDMAMinLen = 20 ) // A Mount is a device mount parsed from /proc/[pid]/mountstats. @@ -186,6 +194,8 @@ type NFSOperationStats struct { CumulativeTotalResponseMilliseconds uint64 // Duration from when a request was enqueued to when it was completely handled. CumulativeTotalRequestMilliseconds uint64 + // The average time from the point the client sends RPC requests until it receives the response. + AverageRTTMilliseconds float64 // The count of operations that complete with tk_status < 0. These statuses usually indicate error conditions. Errors uint64 } @@ -231,6 +241,33 @@ type NFSTransportStats struct { // A running counter, incremented on each request as the current size of the // pending queue. CumulativePendingQueue uint64 + + // Stats below only available with stat version 1.1. + // Transport over RDMA + + // accessed when sending a call + ReadChunkCount uint64 + WriteChunkCount uint64 + ReplyChunkCount uint64 + TotalRdmaRequest uint64 + + // rarely accessed error counters + PullupCopyCount uint64 + HardwayRegisterCount uint64 + FailedMarshalCount uint64 + BadReplyCount uint64 + MrsRecovered uint64 + MrsOrphaned uint64 + MrsAllocated uint64 + EmptySendctxQ uint64 + + // accessed when receiving a reply + TotalRdmaReply uint64 + FixupCopyCount uint64 + ReplyWaitsForSend uint64 + LocalInvNeeded uint64 + NomsgCallCount uint64 + BcallCount uint64 } // parseMountStats parses a /proc/[pid]/mountstats file and returns a slice @@ -264,7 +301,7 @@ func parseMountStats(r io.Reader) ([]*Mount, error) { if len(ss) > deviceEntryLen { // Only NFSv3 and v4 are supported for parsing statistics if m.Type != nfs3Type && m.Type != nfs4Type { - return nil, fmt.Errorf("cannot parse MountStats for fstype %q", m.Type) + return nil, fmt.Errorf("%w: Cannot parse MountStats for %q", ErrFileParse, m.Type) } statVersion := strings.TrimPrefix(ss[8], statVersionPrefix) @@ -288,7 +325,7 @@ func parseMountStats(r io.Reader) ([]*Mount, error) { // device [device] mounted on [mount] with fstype [type] func parseMount(ss []string) (*Mount, error) { if len(ss) < deviceEntryLen { - return nil, fmt.Errorf("invalid device entry: %v", ss) + return nil, fmt.Errorf("%w: Invalid device %q", ErrFileParse, ss) } // Check for specific words appearing at specific indices to ensure @@ -306,7 +343,7 @@ func parseMount(ss []string) (*Mount, error) { for _, f := range format { if ss[f.i] != f.s { - return nil, fmt.Errorf("invalid device entry: %v", ss) + return nil, fmt.Errorf("%w: Invalid device %q", ErrFileParse, ss) } } @@ -343,7 +380,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e switch ss[0] { case fieldOpts: if len(ss) < 2 { - return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) + return nil, fmt.Errorf("%w: Incomplete information for NFS stats: %v", ErrFileParse, ss) } if stats.Opts == nil { stats.Opts = map[string]string{} @@ -358,7 +395,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e } case fieldAge: if len(ss) < 2 { - return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) + return nil, fmt.Errorf("%w: Incomplete information for NFS stats: %v", ErrFileParse, ss) } // Age integer is in seconds d, err := time.ParseDuration(ss[1] + "s") @@ -369,7 +406,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e stats.Age = d case fieldBytes: if len(ss) < 2 { - return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) + return nil, fmt.Errorf("%w: Incomplete information for NFS stats: %v", ErrFileParse, ss) } bstats, err := parseNFSBytesStats(ss[1:]) if err != nil { @@ -379,7 +416,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e stats.Bytes = *bstats case fieldEvents: if len(ss) < 2 { - return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) + return nil, fmt.Errorf("%w: Incomplete information for NFS events: %v", ErrFileParse, ss) } estats, err := parseNFSEventsStats(ss[1:]) if err != nil { @@ -389,7 +426,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e stats.Events = *estats case fieldTransport: if len(ss) < 3 { - return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss) + return nil, fmt.Errorf("%w: Incomplete information for NFS transport stats: %v", ErrFileParse, ss) } tstats, err := parseNFSTransportStats(ss[1:], statVersion) @@ -428,7 +465,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e // integer fields. func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) { if len(ss) != fieldBytesLen { - return nil, fmt.Errorf("invalid NFS bytes stats: %v", ss) + return nil, fmt.Errorf("%w: Invalid NFS bytes stats: %v", ErrFileParse, ss) } ns := make([]uint64, 0, fieldBytesLen) @@ -457,7 +494,7 @@ func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) { // integer fields. func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) { if len(ss) != fieldEventsLen { - return nil, fmt.Errorf("invalid NFS events stats: %v", ss) + return nil, fmt.Errorf("%w: invalid NFS events stats: %v", ErrFileParse, ss) } ns := make([]uint64, 0, fieldEventsLen) @@ -521,7 +558,7 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { } if len(ss) < minFields { - return nil, fmt.Errorf("invalid NFS per-operations stats: %v", ss) + return nil, fmt.Errorf("%w: invalid NFS per-operations stats: %v", ErrFileParse, ss) } // Skip string operation name for integers @@ -534,7 +571,6 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { ns = append(ns, n) } - opStats := NFSOperationStats{ Operation: strings.TrimSuffix(ss[0], ":"), Requests: ns[0], @@ -546,6 +582,9 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { CumulativeTotalResponseMilliseconds: ns[6], CumulativeTotalRequestMilliseconds: ns[7], } + if ns[0] != 0 { + opStats.AverageRTTMilliseconds = float64(ns[6]) / float64(ns[0]) + } if len(ns) > 8 { opStats.Errors = ns[8] @@ -572,10 +611,10 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats } else if protocol == "udp" { expectedLength = fieldTransport10UDPLen } else { - return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.0 statement: %v", protocol, ss) + return nil, fmt.Errorf("%w: Invalid NFS protocol \"%s\" in stats 1.0 statement: %v", ErrFileParse, protocol, ss) } if len(ss) != expectedLength { - return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss) + return nil, fmt.Errorf("%w: Invalid NFS transport stats 1.0 statement: %v", ErrFileParse, ss) } case statVersion11: var expectedLength int @@ -583,14 +622,17 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats expectedLength = fieldTransport11TCPLen } else if protocol == "udp" { expectedLength = fieldTransport11UDPLen + } else if protocol == "rdma" { + expectedLength = fieldTransport11RDMAMinLen } else { - return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.1 statement: %v", protocol, ss) + return nil, fmt.Errorf("%w: invalid NFS protocol \"%s\" in stats 1.1 statement: %v", ErrFileParse, protocol, ss) } - if len(ss) != expectedLength { - return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss) + if (len(ss) != expectedLength && (protocol == "tcp" || protocol == "udp")) || + (protocol == "rdma" && len(ss) < expectedLength) { + return nil, fmt.Errorf("%w: invalid NFS transport stats 1.1 statement: %v, protocol: %v", ErrFileParse, ss, protocol) } default: - return nil, fmt.Errorf("unrecognized NFS transport stats version: %q", statVersion) + return nil, fmt.Errorf("%s: Unrecognized NFS transport stats version: %q, protocol: %v", ErrFileParse, statVersion, protocol) } // Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay @@ -600,7 +642,9 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats // Note: slice length must be set to length of v1.1 stats to avoid a panic when // only v1.0 stats are present. // See: https://github.com/prometheus/node_exporter/issues/571. - ns := make([]uint64, fieldTransport11TCPLen) + // + // Note: NFS Over RDMA slice length is fieldTransport11RDMAMaxLen + ns := make([]uint64, fieldTransport11RDMAMaxLen+3) for i, s := range ss { n, err := strconv.ParseUint(s, 10, 64) if err != nil { @@ -618,9 +662,14 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats // we set them to 0 here. if protocol == "udp" { ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...) + } else if protocol == "tcp" { + ns = append(ns[:fieldTransport11TCPLen], make([]uint64, fieldTransport11RDMAMaxLen-fieldTransport11TCPLen+3)...) + } else if protocol == "rdma" { + ns = append(ns[:fieldTransport10TCPLen], append(make([]uint64, 3), ns[fieldTransport10TCPLen:]...)...) } return &NFSTransportStats{ + // NFS xprt over tcp or udp Protocol: protocol, Port: ns[0], Bind: ns[1], @@ -632,8 +681,32 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats BadTransactionIDs: ns[7], CumulativeActiveRequests: ns[8], CumulativeBacklog: ns[9], - MaximumRPCSlotsUsed: ns[10], - CumulativeSendingQueue: ns[11], - CumulativePendingQueue: ns[12], + + // NFS xprt over tcp or udp + // And statVersion 1.1 + MaximumRPCSlotsUsed: ns[10], + CumulativeSendingQueue: ns[11], + CumulativePendingQueue: ns[12], + + // NFS xprt over rdma + // And stat Version 1.1 + ReadChunkCount: ns[13], + WriteChunkCount: ns[14], + ReplyChunkCount: ns[15], + TotalRdmaRequest: ns[16], + PullupCopyCount: ns[17], + HardwayRegisterCount: ns[18], + FailedMarshalCount: ns[19], + BadReplyCount: ns[20], + MrsRecovered: ns[21], + MrsOrphaned: ns[22], + MrsAllocated: ns[23], + EmptySendctxQ: ns[24], + TotalRdmaReply: ns[25], + FixupCopyCount: ns[26], + ReplyWaitsForSend: ns[27], + LocalInvNeeded: ns[28], + NomsgCallCount: ns[29], + BcallCount: ns[30], }, nil } diff --git a/vendor/github.com/prometheus/procfs/net_conntrackstat.go b/vendor/github.com/prometheus/procfs/net_conntrackstat.go index 8300dac..fdfa456 100644 --- a/vendor/github.com/prometheus/procfs/net_conntrackstat.go +++ b/vendor/github.com/prometheus/procfs/net_conntrackstat.go @@ -18,7 +18,6 @@ import ( "bytes" "fmt" "io" - "strconv" "strings" "github.com/prometheus/procfs/internal/util" @@ -28,9 +27,13 @@ import ( // and contains netfilter conntrack statistics at one CPU core. type ConntrackStatEntry struct { Entries uint64 + Searched uint64 Found uint64 + New uint64 Invalid uint64 Ignore uint64 + Delete uint64 + DeleteList uint64 Insert uint64 InsertFailed uint64 Drop uint64 @@ -55,7 +58,7 @@ func readConntrackStat(path string) ([]ConntrackStatEntry, error) { stat, err := parseConntrackStat(bytes.NewReader(b)) if err != nil { - return nil, fmt.Errorf("failed to read conntrack stats from %q: %w", path, err) + return nil, fmt.Errorf("%s: Cannot read file: %v: %w", ErrFileRead, path, err) } return stat, nil @@ -81,73 +84,35 @@ func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) { // Parses a ConntrackStatEntry from given array of fields. func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) { - if len(fields) != 17 { - return nil, fmt.Errorf("invalid conntrackstat entry, missing fields") - } - entry := &ConntrackStatEntry{} - - entries, err := parseConntrackStatField(fields[0]) - if err != nil { - return nil, err - } - entry.Entries = entries - - found, err := parseConntrackStatField(fields[2]) - if err != nil { - return nil, err - } - entry.Found = found - - invalid, err := parseConntrackStatField(fields[4]) - if err != nil { - return nil, err - } - entry.Invalid = invalid - - ignore, err := parseConntrackStatField(fields[5]) - if err != nil { - return nil, err - } - entry.Ignore = ignore - - insert, err := parseConntrackStatField(fields[8]) + entries, err := util.ParseHexUint64s(fields) if err != nil { - return nil, err + return nil, fmt.Errorf("%s: Cannot parse entry: %d: %w", ErrFileParse, entries, err) } - entry.Insert = insert - - insertFailed, err := parseConntrackStatField(fields[9]) - if err != nil { - return nil, err + numEntries := len(entries) + if numEntries < 16 || numEntries > 17 { + return nil, + fmt.Errorf("%w: invalid conntrackstat entry, invalid number of fields: %d", ErrFileParse, numEntries) } - entry.InsertFailed = insertFailed - drop, err := parseConntrackStatField(fields[10]) - if err != nil { - return nil, err + stats := &ConntrackStatEntry{ + Entries: *entries[0], + Searched: *entries[1], + Found: *entries[2], + New: *entries[3], + Invalid: *entries[4], + Ignore: *entries[5], + Delete: *entries[6], + DeleteList: *entries[7], + Insert: *entries[8], + InsertFailed: *entries[9], + Drop: *entries[10], + EarlyDrop: *entries[11], } - entry.Drop = drop - earlyDrop, err := parseConntrackStatField(fields[11]) - if err != nil { - return nil, err + // Ignore missing search_restart on Linux < 2.6.35. + if numEntries == 17 { + stats.SearchRestart = *entries[16] } - entry.EarlyDrop = earlyDrop - searchRestart, err := parseConntrackStatField(fields[16]) - if err != nil { - return nil, err - } - entry.SearchRestart = searchRestart - - return entry, nil -} - -// Parses a uint64 from given hex in string. -func parseConntrackStatField(field string) (uint64, error) { - val, err := strconv.ParseUint(field, 16, 64) - if err != nil { - return 0, fmt.Errorf("couldn't parse %q field: %w", field, err) - } - return val, err + return stats, nil } diff --git a/vendor/github.com/prometheus/procfs/net_ip_socket.go b/vendor/github.com/prometheus/procfs/net_ip_socket.go index 7fd57d7..4da81ea 100644 --- a/vendor/github.com/prometheus/procfs/net_ip_socket.go +++ b/vendor/github.com/prometheus/procfs/net_ip_socket.go @@ -130,7 +130,7 @@ func parseIP(hexIP string) (net.IP, error) { var byteIP []byte byteIP, err := hex.DecodeString(hexIP) if err != nil { - return nil, fmt.Errorf("cannot parse address field in socket line %q", hexIP) + return nil, fmt.Errorf("%s: Cannot parse socket field in %q: %w", ErrFileParse, hexIP, err) } switch len(byteIP) { case 4: @@ -144,7 +144,7 @@ func parseIP(hexIP string) (net.IP, error) { } return i, nil default: - return nil, fmt.Errorf("Unable to parse IP %s", hexIP) + return nil, fmt.Errorf("%s: Unable to parse IP %s: %w", ErrFileParse, hexIP, nil) } } @@ -153,7 +153,8 @@ func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) { line := &netIPSocketLine{} if len(fields) < 10 { return nil, fmt.Errorf( - "cannot parse net socket line as it has less then 10 columns %q", + "%w: Less than 10 columns found %q", + ErrFileParse, strings.Join(fields, " "), ) } @@ -162,64 +163,65 @@ func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) { // sl s := strings.Split(fields[0], ":") if len(s) != 2 { - return nil, fmt.Errorf("cannot parse sl field in socket line %q", fields[0]) + return nil, fmt.Errorf("%w: Unable to parse sl field in line %q", ErrFileParse, fields[0]) } if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil { - return nil, fmt.Errorf("cannot parse sl value in socket line: %w", err) + return nil, fmt.Errorf("%s: Unable to parse sl field in %q: %w", ErrFileParse, line.Sl, err) } // local_address l := strings.Split(fields[1], ":") if len(l) != 2 { - return nil, fmt.Errorf("cannot parse local_address field in socket line %q", fields[1]) + return nil, fmt.Errorf("%w: Unable to parse local_address field in %q", ErrFileParse, fields[1]) } if line.LocalAddr, err = parseIP(l[0]); err != nil { return nil, err } if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil { - return nil, fmt.Errorf("cannot parse local_address port value in socket line: %w", err) + return nil, fmt.Errorf("%s: Unable to parse local_address port value line %q: %w", ErrFileParse, line.LocalPort, err) } // remote_address r := strings.Split(fields[2], ":") if len(r) != 2 { - return nil, fmt.Errorf("cannot parse rem_address field in socket line %q", fields[1]) + return nil, fmt.Errorf("%w: Unable to parse rem_address field in %q", ErrFileParse, fields[1]) } if line.RemAddr, err = parseIP(r[0]); err != nil { return nil, err } if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil { - return nil, fmt.Errorf("cannot parse rem_address port value in socket line: %w", err) + return nil, fmt.Errorf("%s: Cannot parse rem_address port value in %q: %w", ErrFileParse, line.RemPort, err) } // st if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil { - return nil, fmt.Errorf("cannot parse st value in socket line: %w", err) + return nil, fmt.Errorf("%s: Cannot parse st value in %q: %w", ErrFileParse, line.St, err) } // tx_queue and rx_queue q := strings.Split(fields[4], ":") if len(q) != 2 { return nil, fmt.Errorf( - "cannot parse tx/rx queues in socket line as it has a missing colon %q", + "%w: Missing colon for tx/rx queues in socket line %q", + ErrFileParse, fields[4], ) } if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil { - return nil, fmt.Errorf("cannot parse tx_queue value in socket line: %w", err) + return nil, fmt.Errorf("%s: Cannot parse tx_queue value in %q: %w", ErrFileParse, line.TxQueue, err) } if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil { - return nil, fmt.Errorf("cannot parse rx_queue value in socket line: %w", err) + return nil, fmt.Errorf("%s: Cannot parse trx_queue value in %q: %w", ErrFileParse, line.RxQueue, err) } // uid if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil { - return nil, fmt.Errorf("cannot parse uid value in socket line: %w", err) + return nil, fmt.Errorf("%s: Cannot parse UID value in %q: %w", ErrFileParse, line.UID, err) } // inode if line.Inode, err = strconv.ParseUint(fields[9], 0, 64); err != nil { - return nil, fmt.Errorf("cannot parse inode value in socket line: %w", err) + return nil, fmt.Errorf("%s: Cannot parse inode value in %q: %w", ErrFileParse, line.Inode, err) } return line, nil diff --git a/vendor/github.com/prometheus/procfs/net_protocols.go b/vendor/github.com/prometheus/procfs/net_protocols.go index 374b6f7..b6c77b7 100644 --- a/vendor/github.com/prometheus/procfs/net_protocols.go +++ b/vendor/github.com/prometheus/procfs/net_protocols.go @@ -131,7 +131,7 @@ func (ps NetProtocolStats) parseLine(rawLine string) (*NetProtocolStatLine, erro } else if fields[6] == disabled { line.Slab = false } else { - return nil, fmt.Errorf("unable to parse capability for protocol: %s", line.Name) + return nil, fmt.Errorf("%w: capability for protocol: %s", ErrFileParse, line.Name) } line.ModuleName = fields[7] @@ -173,7 +173,7 @@ func (pc *NetProtocolCapabilities) parseCapabilities(capabilities []string) erro } else if capabilities[i] == "n" { *capabilityFields[i] = false } else { - return fmt.Errorf("unable to parse capability block for protocol: position %d", i) + return fmt.Errorf("%w: capability block for protocol: position %d", ErrFileParse, i) } } return nil diff --git a/vendor/github.com/prometheus/procfs/net_route.go b/vendor/github.com/prometheus/procfs/net_route.go new file mode 100644 index 0000000..deb7029 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_route.go @@ -0,0 +1,143 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +const ( + blackholeRepresentation string = "*" + blackholeIfaceName string = "blackhole" + routeLineColumns int = 11 +) + +// A NetRouteLine represents one line from net/route. +type NetRouteLine struct { + Iface string + Destination uint32 + Gateway uint32 + Flags uint32 + RefCnt uint32 + Use uint32 + Metric uint32 + Mask uint32 + MTU uint32 + Window uint32 + IRTT uint32 +} + +func (fs FS) NetRoute() ([]NetRouteLine, error) { + return readNetRoute(fs.proc.Path("net", "route")) +} + +func readNetRoute(path string) ([]NetRouteLine, error) { + b, err := util.ReadFileNoStat(path) + if err != nil { + return nil, err + } + + routelines, err := parseNetRoute(bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("failed to read net route from %s: %w", path, err) + } + return routelines, nil +} + +func parseNetRoute(r io.Reader) ([]NetRouteLine, error) { + var routelines []NetRouteLine + + scanner := bufio.NewScanner(r) + scanner.Scan() + for scanner.Scan() { + fields := strings.Fields(scanner.Text()) + routeline, err := parseNetRouteLine(fields) + if err != nil { + return nil, err + } + routelines = append(routelines, *routeline) + } + return routelines, nil +} + +func parseNetRouteLine(fields []string) (*NetRouteLine, error) { + if len(fields) != routeLineColumns { + return nil, fmt.Errorf("invalid routeline, num of digits: %d", len(fields)) + } + iface := fields[0] + if iface == blackholeRepresentation { + iface = blackholeIfaceName + } + destination, err := strconv.ParseUint(fields[1], 16, 32) + if err != nil { + return nil, err + } + gateway, err := strconv.ParseUint(fields[2], 16, 32) + if err != nil { + return nil, err + } + flags, err := strconv.ParseUint(fields[3], 10, 32) + if err != nil { + return nil, err + } + refcnt, err := strconv.ParseUint(fields[4], 10, 32) + if err != nil { + return nil, err + } + use, err := strconv.ParseUint(fields[5], 10, 32) + if err != nil { + return nil, err + } + metric, err := strconv.ParseUint(fields[6], 10, 32) + if err != nil { + return nil, err + } + mask, err := strconv.ParseUint(fields[7], 16, 32) + if err != nil { + return nil, err + } + mtu, err := strconv.ParseUint(fields[8], 10, 32) + if err != nil { + return nil, err + } + window, err := strconv.ParseUint(fields[9], 10, 32) + if err != nil { + return nil, err + } + irtt, err := strconv.ParseUint(fields[10], 10, 32) + if err != nil { + return nil, err + } + routeline := &NetRouteLine{ + Iface: iface, + Destination: uint32(destination), + Gateway: uint32(gateway), + Flags: uint32(flags), + RefCnt: uint32(refcnt), + Use: uint32(use), + Metric: uint32(metric), + Mask: uint32(mask), + MTU: uint32(mtu), + Window: uint32(window), + IRTT: uint32(irtt), + } + return routeline, nil +} diff --git a/vendor/github.com/prometheus/procfs/net_sockstat.go b/vendor/github.com/prometheus/procfs/net_sockstat.go index e36f487..360e36a 100644 --- a/vendor/github.com/prometheus/procfs/net_sockstat.go +++ b/vendor/github.com/prometheus/procfs/net_sockstat.go @@ -16,7 +16,6 @@ package procfs import ( "bufio" "bytes" - "errors" "fmt" "io" "strings" @@ -70,7 +69,7 @@ func readSockstat(name string) (*NetSockstat, error) { stat, err := parseSockstat(bytes.NewReader(b)) if err != nil { - return nil, fmt.Errorf("failed to read sockstats from %q: %w", name, err) + return nil, fmt.Errorf("%s: sockstats from %q: %w", ErrFileRead, name, err) } return stat, nil @@ -84,13 +83,13 @@ func parseSockstat(r io.Reader) (*NetSockstat, error) { // Expect a minimum of a protocol and one key/value pair. fields := strings.Split(s.Text(), " ") if len(fields) < 3 { - return nil, fmt.Errorf("malformed sockstat line: %q", s.Text()) + return nil, fmt.Errorf("%w: Malformed sockstat line: %q", ErrFileParse, s.Text()) } // The remaining fields are key/value pairs. kvs, err := parseSockstatKVs(fields[1:]) if err != nil { - return nil, fmt.Errorf("error parsing sockstat key/value pairs from %q: %w", s.Text(), err) + return nil, fmt.Errorf("%s: sockstat key/value pairs from %q: %w", ErrFileParse, s.Text(), err) } // The first field is the protocol. We must trim its colon suffix. @@ -119,7 +118,7 @@ func parseSockstat(r io.Reader) (*NetSockstat, error) { // parseSockstatKVs parses a string slice into a map of key/value pairs. func parseSockstatKVs(kvs []string) (map[string]int, error) { if len(kvs)%2 != 0 { - return nil, errors.New("odd number of fields in key/value pairs") + return nil, fmt.Errorf("%w:: Odd number of fields in key/value pairs %q", ErrFileParse, kvs) } // Iterate two values at a time to gather key/value pairs. diff --git a/vendor/github.com/prometheus/procfs/net_softnet.go b/vendor/github.com/prometheus/procfs/net_softnet.go index 06b7b8f..c770852 100644 --- a/vendor/github.com/prometheus/procfs/net_softnet.go +++ b/vendor/github.com/prometheus/procfs/net_softnet.go @@ -64,7 +64,7 @@ func (fs FS) NetSoftnetStat() ([]SoftnetStat, error) { entries, err := parseSoftnet(bytes.NewReader(b)) if err != nil { - return nil, fmt.Errorf("failed to parse /proc/net/softnet_stat: %w", err) + return nil, fmt.Errorf("%s: /proc/net/softnet_stat: %w", ErrFileParse, err) } return entries, nil @@ -76,13 +76,14 @@ func parseSoftnet(r io.Reader) ([]SoftnetStat, error) { s := bufio.NewScanner(r) var stats []SoftnetStat + cpuIndex := 0 for s.Scan() { columns := strings.Fields(s.Text()) width := len(columns) softnetStat := SoftnetStat{} if width < minColumns { - return nil, fmt.Errorf("%d columns were detected, but at least %d were expected", width, minColumns) + return nil, fmt.Errorf("%w: detected %d columns, but expected at least %d", ErrFileParse, width, minColumns) } // Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2347 @@ -127,9 +128,13 @@ func parseSoftnet(r io.Reader) ([]SoftnetStat, error) { softnetStat.SoftnetBacklogLen = us[0] softnetStat.Index = us[1] + } else { + // For older kernels, create the Index based on the scan line number. + softnetStat.Index = uint32(cpuIndex) } softnetStat.Width = width stats = append(stats, softnetStat) + cpuIndex++ } return stats, nil diff --git a/vendor/github.com/prometheus/procfs/net_unix.go b/vendor/github.com/prometheus/procfs/net_unix.go index 98aa8e1..acbbc57 100644 --- a/vendor/github.com/prometheus/procfs/net_unix.go +++ b/vendor/github.com/prometheus/procfs/net_unix.go @@ -108,14 +108,14 @@ func parseNetUNIX(r io.Reader) (*NetUNIX, error) { line := s.Text() item, err := nu.parseLine(line, hasInode, minFields) if err != nil { - return nil, fmt.Errorf("failed to parse /proc/net/unix data %q: %w", line, err) + return nil, fmt.Errorf("%s: /proc/net/unix encountered data %q: %w", ErrFileParse, line, err) } nu.Rows = append(nu.Rows, item) } if err := s.Err(); err != nil { - return nil, fmt.Errorf("failed to scan /proc/net/unix data: %w", err) + return nil, fmt.Errorf("%s: /proc/net/unix encountered data: %w", ErrFileParse, err) } return &nu, nil @@ -126,7 +126,7 @@ func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, l := len(fields) if l < min { - return nil, fmt.Errorf("expected at least %d fields but got %d", min, l) + return nil, fmt.Errorf("%w: expected at least %d fields but got %d", ErrFileParse, min, l) } // Field offsets are as follows: @@ -136,29 +136,29 @@ func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, users, err := u.parseUsers(fields[1]) if err != nil { - return nil, fmt.Errorf("failed to parse ref count %q: %w", fields[1], err) + return nil, fmt.Errorf("%s: ref count %q: %w", ErrFileParse, fields[1], err) } flags, err := u.parseFlags(fields[3]) if err != nil { - return nil, fmt.Errorf("failed to parse flags %q: %w", fields[3], err) + return nil, fmt.Errorf("%s: Unable to parse flags %q: %w", ErrFileParse, fields[3], err) } typ, err := u.parseType(fields[4]) if err != nil { - return nil, fmt.Errorf("failed to parse type %q: %w", fields[4], err) + return nil, fmt.Errorf("%s: Failed to parse type %q: %w", ErrFileParse, fields[4], err) } state, err := u.parseState(fields[5]) if err != nil { - return nil, fmt.Errorf("failed to parse state %q: %w", fields[5], err) + return nil, fmt.Errorf("%s: Failed to parse state %q: %w", ErrFileParse, fields[5], err) } var inode uint64 if hasInode { inode, err = u.parseInode(fields[6]) if err != nil { - return nil, fmt.Errorf("failed to parse inode %q: %w", fields[6], err) + return nil, fmt.Errorf("%s failed to parse inode %q: %w", ErrFileParse, fields[6], err) } } diff --git a/vendor/github.com/prometheus/procfs/net_wireless.go b/vendor/github.com/prometheus/procfs/net_wireless.go new file mode 100644 index 0000000..7443edc --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_wireless.go @@ -0,0 +1,182 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Wireless models the content of /proc/net/wireless. +type Wireless struct { + Name string + + // Status is the current 4-digit hex value status of the interface. + Status uint64 + + // QualityLink is the link quality. + QualityLink int + + // QualityLevel is the signal gain (dBm). + QualityLevel int + + // QualityNoise is the signal noise baseline (dBm). + QualityNoise int + + // DiscardedNwid is the number of discarded packets with wrong nwid/essid. + DiscardedNwid int + + // DiscardedCrypt is the number of discarded packets with wrong code/decode (WEP). + DiscardedCrypt int + + // DiscardedFrag is the number of discarded packets that can't perform MAC reassembly. + DiscardedFrag int + + // DiscardedRetry is the number of discarded packets that reached max MAC retries. + DiscardedRetry int + + // DiscardedMisc is the number of discarded packets for other reasons. + DiscardedMisc int + + // MissedBeacon is the number of missed beacons/superframe. + MissedBeacon int +} + +// Wireless returns kernel wireless statistics. +func (fs FS) Wireless() ([]*Wireless, error) { + b, err := util.ReadFileNoStat(fs.proc.Path("net/wireless")) + if err != nil { + return nil, err + } + + m, err := parseWireless(bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("%s: wireless: %w", ErrFileParse, err) + } + + return m, nil +} + +// parseWireless parses the contents of /proc/net/wireless. +/* +Inter-| sta-| Quality | Discarded packets | Missed | WE +face | tus | link level noise | nwid crypt frag retry misc | beacon | 22 + eth1: 0000 5. -256. -10. 0 1 0 3 0 0 + eth2: 0000 5. -256. -20. 0 2 0 4 0 0 +*/ +func parseWireless(r io.Reader) ([]*Wireless, error) { + var ( + interfaces []*Wireless + scanner = bufio.NewScanner(r) + ) + + for n := 0; scanner.Scan(); n++ { + // Skip the 2 header lines. + if n < 2 { + continue + } + + line := scanner.Text() + + parts := strings.Split(line, ":") + if len(parts) != 2 { + return nil, fmt.Errorf("%w: expected 2 parts after splitting line by ':', got %d for line %q", ErrFileParse, len(parts), line) + } + + name := strings.TrimSpace(parts[0]) + stats := strings.Fields(parts[1]) + + if len(stats) < 10 { + return nil, fmt.Errorf("%w: invalid number of fields in line %d, expected 10+, got %d: %q", ErrFileParse, n, len(stats), line) + } + + status, err := strconv.ParseUint(stats[0], 16, 16) + if err != nil { + return nil, fmt.Errorf("%w: invalid status in line %d: %q", ErrFileParse, n, line) + } + + qlink, err := strconv.Atoi(strings.TrimSuffix(stats[1], ".")) + if err != nil { + return nil, fmt.Errorf("%s: parse Quality:link as integer %q: %w", ErrFileParse, qlink, err) + } + + qlevel, err := strconv.Atoi(strings.TrimSuffix(stats[2], ".")) + if err != nil { + return nil, fmt.Errorf("%s: Quality:level as integer %q: %w", ErrFileParse, qlevel, err) + } + + qnoise, err := strconv.Atoi(strings.TrimSuffix(stats[3], ".")) + if err != nil { + return nil, fmt.Errorf("%s: Quality:noise as integer %q: %w", ErrFileParse, qnoise, err) + } + + dnwid, err := strconv.Atoi(stats[4]) + if err != nil { + return nil, fmt.Errorf("%s: Discarded:nwid as integer %q: %w", ErrFileParse, dnwid, err) + } + + dcrypt, err := strconv.Atoi(stats[5]) + if err != nil { + return nil, fmt.Errorf("%s: Discarded:crypt as integer %q: %w", ErrFileParse, dcrypt, err) + } + + dfrag, err := strconv.Atoi(stats[6]) + if err != nil { + return nil, fmt.Errorf("%s: Discarded:frag as integer %q: %w", ErrFileParse, dfrag, err) + } + + dretry, err := strconv.Atoi(stats[7]) + if err != nil { + return nil, fmt.Errorf("%s: Discarded:retry as integer %q: %w", ErrFileParse, dretry, err) + } + + dmisc, err := strconv.Atoi(stats[8]) + if err != nil { + return nil, fmt.Errorf("%s: Discarded:misc as integer %q: %w", ErrFileParse, dmisc, err) + } + + mbeacon, err := strconv.Atoi(stats[9]) + if err != nil { + return nil, fmt.Errorf("%s: Missed:beacon as integer %q: %w", ErrFileParse, mbeacon, err) + } + + w := &Wireless{ + Name: name, + Status: status, + QualityLink: qlink, + QualityLevel: qlevel, + QualityNoise: qnoise, + DiscardedNwid: dnwid, + DiscardedCrypt: dcrypt, + DiscardedFrag: dfrag, + DiscardedRetry: dretry, + DiscardedMisc: dmisc, + MissedBeacon: mbeacon, + } + + interfaces = append(interfaces, w) + } + + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("%s: Failed to scan /proc/net/wireless: %w", ErrFileRead, err) + } + + return interfaces, nil +} diff --git a/vendor/github.com/prometheus/procfs/net_xfrm.go b/vendor/github.com/prometheus/procfs/net_xfrm.go index f9d9d24..932ef20 100644 --- a/vendor/github.com/prometheus/procfs/net_xfrm.go +++ b/vendor/github.com/prometheus/procfs/net_xfrm.go @@ -115,7 +115,7 @@ func (fs FS) NewXfrmStat() (XfrmStat, error) { fields := strings.Fields(s.Text()) if len(fields) != 2 { - return XfrmStat{}, fmt.Errorf("couldn't parse %q line %q", file.Name(), s.Text()) + return XfrmStat{}, fmt.Errorf("%w: %q line %q", ErrFileParse, file.Name(), s.Text()) } name := fields[0] diff --git a/vendor/github.com/prometheus/procfs/netstat.go b/vendor/github.com/prometheus/procfs/netstat.go index 5cc40ae..742dff4 100644 --- a/vendor/github.com/prometheus/procfs/netstat.go +++ b/vendor/github.com/prometheus/procfs/netstat.go @@ -15,7 +15,6 @@ package procfs import ( "bufio" - "io" "os" "path/filepath" "strconv" @@ -38,12 +37,7 @@ func (fs FS) NetStat() ([]NetStat, error) { var netStatsTotal []NetStat for _, filePath := range statFiles { - file, err := os.Open(filePath) - if err != nil { - return nil, err - } - - procNetstat, err := parseNetstat(file) + procNetstat, err := parseNetstat(filePath) if err != nil { return nil, err } @@ -56,14 +50,17 @@ func (fs FS) NetStat() ([]NetStat, error) { // parseNetstat parses the metrics from `/proc/net/stat/` file // and returns a NetStat structure. -func parseNetstat(r io.Reader) (NetStat, error) { - var ( - scanner = bufio.NewScanner(r) - netStat = NetStat{ - Stats: make(map[string][]uint64), - } - ) +func parseNetstat(filePath string) (NetStat, error) { + netStat := NetStat{ + Stats: make(map[string][]uint64), + } + file, err := os.Open(filePath) + if err != nil { + return netStat, err + } + defer file.Close() + scanner := bufio.NewScanner(file) scanner.Scan() // First string is always a header for stats diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go index c30223a..d1f71ca 100644 --- a/vendor/github.com/prometheus/procfs/proc.go +++ b/vendor/github.com/prometheus/procfs/proc.go @@ -15,13 +15,13 @@ package procfs import ( "bytes" + "errors" "fmt" "io" "os" "strconv" "strings" - "github.com/prometheus/procfs/internal/fs" "github.com/prometheus/procfs/internal/util" ) @@ -30,12 +30,18 @@ type Proc struct { // The process ID. PID int - fs fs.FS + fs FS } // Procs represents a list of Proc structs. type Procs []Proc +var ( + ErrFileParse = errors.New("Error Parsing File") + ErrFileRead = errors.New("Error Reading File") + ErrMountPoint = errors.New("Error Accessing Mount point") +) + func (p Procs) Len() int { return len(p) } func (p Procs) Swap(i, j int) { p[i], p[j] = p[j], p[i] } func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID } @@ -43,7 +49,7 @@ func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID } // Self returns a process for the current process read via /proc/self. func Self() (Proc, error) { fs, err := NewFS(DefaultMountPoint) - if err != nil { + if err != nil || errors.Unwrap(err) == ErrMountPoint { return Proc{}, err } return fs.Self() @@ -92,7 +98,7 @@ func (fs FS) Proc(pid int) (Proc, error) { if _, err := os.Stat(fs.proc.Path(strconv.Itoa(pid))); err != nil { return Proc{}, err } - return Proc{PID: pid, fs: fs.proc}, nil + return Proc{PID: pid, fs: fs}, nil } // AllProcs returns a list of all currently available processes. @@ -105,7 +111,7 @@ func (fs FS) AllProcs() (Procs, error) { names, err := d.Readdirnames(-1) if err != nil { - return Procs{}, fmt.Errorf("could not read %q: %w", d.Name(), err) + return Procs{}, fmt.Errorf("%s: Cannot read file: %v: %w", ErrFileRead, names, err) } p := Procs{} @@ -114,7 +120,7 @@ func (fs FS) AllProcs() (Procs, error) { if err != nil { continue } - p = append(p, Proc{PID: int(pid), fs: fs.proc}) + p = append(p, Proc{PID: int(pid), fs: fs}) } return p, nil @@ -206,7 +212,7 @@ func (p Proc) FileDescriptors() ([]uintptr, error) { for i, n := range names { fd, err := strconv.ParseInt(n, 10, 32) if err != nil { - return nil, fmt.Errorf("could not parse fd %q: %w", n, err) + return nil, fmt.Errorf("%s: Cannot parse line: %v: %w", ErrFileParse, i, err) } fds[i] = uintptr(fd) } @@ -237,6 +243,19 @@ func (p Proc) FileDescriptorTargets() ([]string, error) { // FileDescriptorsLen returns the number of currently open file descriptors of // a process. func (p Proc) FileDescriptorsLen() (int, error) { + // Use fast path if available (Linux v6.2): https://github.com/torvalds/linux/commit/f1f1f2569901 + if p.fs.isReal { + stat, err := os.Stat(p.path("fd")) + if err != nil { + return 0, err + } + + size := stat.Size() + if size > 0 { + return int(size), nil + } + } + fds, err := p.fileDescriptors() if err != nil { return 0, err @@ -278,14 +297,14 @@ func (p Proc) fileDescriptors() ([]string, error) { names, err := d.Readdirnames(-1) if err != nil { - return nil, fmt.Errorf("could not read %q: %w", d.Name(), err) + return nil, fmt.Errorf("%s: Cannot read file: %v: %w", ErrFileRead, names, err) } return names, nil } func (p Proc) path(pa ...string) string { - return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...) + return p.fs.proc.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...) } // FileDescriptorsInfo retrieves information about all file descriptors of diff --git a/vendor/github.com/prometheus/procfs/proc_cgroup.go b/vendor/github.com/prometheus/procfs/proc_cgroup.go index ea83a75..daeed7f 100644 --- a/vendor/github.com/prometheus/procfs/proc_cgroup.go +++ b/vendor/github.com/prometheus/procfs/proc_cgroup.go @@ -51,7 +51,7 @@ func parseCgroupString(cgroupStr string) (*Cgroup, error) { fields := strings.SplitN(cgroupStr, ":", 3) if len(fields) < 3 { - return nil, fmt.Errorf("at least 3 fields required, found %d fields in cgroup string: %s", len(fields), cgroupStr) + return nil, fmt.Errorf("%w: 3+ fields required, found %d fields in cgroup string: %s", ErrFileParse, len(fields), cgroupStr) } cgroup := &Cgroup{ @@ -60,7 +60,7 @@ func parseCgroupString(cgroupStr string) (*Cgroup, error) { } cgroup.HierarchyID, err = strconv.Atoi(fields[0]) if err != nil { - return nil, fmt.Errorf("failed to parse hierarchy ID") + return nil, fmt.Errorf("%w: hierarchy ID: %q", ErrFileParse, cgroup.HierarchyID) } if fields[1] != "" { ssNames := strings.Split(fields[1], ",") diff --git a/vendor/github.com/prometheus/procfs/proc_cgroups.go b/vendor/github.com/prometheus/procfs/proc_cgroups.go index 24d4dce..5dd4938 100644 --- a/vendor/github.com/prometheus/procfs/proc_cgroups.go +++ b/vendor/github.com/prometheus/procfs/proc_cgroups.go @@ -46,7 +46,7 @@ func parseCgroupSummaryString(CgroupSummaryStr string) (*CgroupSummary, error) { fields := strings.Fields(CgroupSummaryStr) // require at least 4 fields if len(fields) < 4 { - return nil, fmt.Errorf("at least 4 fields required, found %d fields in cgroup info string: %s", len(fields), CgroupSummaryStr) + return nil, fmt.Errorf("%w: 4+ fields required, found %d fields in cgroup info string: %s", ErrFileParse, len(fields), CgroupSummaryStr) } CgroupSummary := &CgroupSummary{ @@ -54,15 +54,15 @@ func parseCgroupSummaryString(CgroupSummaryStr string) (*CgroupSummary, error) { } CgroupSummary.Hierarchy, err = strconv.Atoi(fields[1]) if err != nil { - return nil, fmt.Errorf("failed to parse hierarchy ID") + return nil, fmt.Errorf("%w: Unable to parse hierarchy ID from %q", ErrFileParse, fields[1]) } CgroupSummary.Cgroups, err = strconv.Atoi(fields[2]) if err != nil { - return nil, fmt.Errorf("failed to parse Cgroup Num") + return nil, fmt.Errorf("%w: Unable to parse Cgroup Num from %q", ErrFileParse, fields[2]) } CgroupSummary.Enabled, err = strconv.Atoi(fields[3]) if err != nil { - return nil, fmt.Errorf("failed to parse Enabled") + return nil, fmt.Errorf("%w: Unable to parse Enabled from %q", ErrFileParse, fields[3]) } return CgroupSummary, nil } diff --git a/vendor/github.com/prometheus/procfs/proc_fdinfo.go b/vendor/github.com/prometheus/procfs/proc_fdinfo.go index 1bbdd4a..fa761b3 100644 --- a/vendor/github.com/prometheus/procfs/proc_fdinfo.go +++ b/vendor/github.com/prometheus/procfs/proc_fdinfo.go @@ -26,6 +26,7 @@ var ( rPos = regexp.MustCompile(`^pos:\s+(\d+)$`) rFlags = regexp.MustCompile(`^flags:\s+(\d+)$`) rMntID = regexp.MustCompile(`^mnt_id:\s+(\d+)$`) + rIno = regexp.MustCompile(`^ino:\s+(\d+)$`) rInotify = regexp.MustCompile(`^inotify`) rInotifyParts = regexp.MustCompile(`^inotify\s+wd:([0-9a-f]+)\s+ino:([0-9a-f]+)\s+sdev:([0-9a-f]+)(?:\s+mask:([0-9a-f]+))?`) ) @@ -40,6 +41,8 @@ type ProcFDInfo struct { Flags string // Mount point ID MntID string + // Inode number + Ino string // List of inotify lines (structured) in the fdinfo file (kernel 3.8+ only) InotifyInfos []InotifyInfo } @@ -51,7 +54,7 @@ func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) { return nil, err } - var text, pos, flags, mntid string + var text, pos, flags, mntid, ino string var inotify []InotifyInfo scanner := bufio.NewScanner(bytes.NewReader(data)) @@ -63,6 +66,8 @@ func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) { flags = rFlags.FindStringSubmatch(text)[1] } else if rMntID.MatchString(text) { mntid = rMntID.FindStringSubmatch(text)[1] + } else if rIno.MatchString(text) { + ino = rIno.FindStringSubmatch(text)[1] } else if rInotify.MatchString(text) { newInotify, err := parseInotifyInfo(text) if err != nil { @@ -77,6 +82,7 @@ func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) { Pos: pos, Flags: flags, MntID: mntid, + Ino: ino, InotifyInfos: inotify, } @@ -111,7 +117,7 @@ func parseInotifyInfo(line string) (*InotifyInfo, error) { } return i, nil } - return nil, fmt.Errorf("invalid inode entry: %q", line) + return nil, fmt.Errorf("%w: invalid inode entry: %q", ErrFileParse, line) } // ProcFDInfos represents a list of ProcFDInfo structs. diff --git a/vendor/github.com/prometheus/procfs/proc_interrupts.go b/vendor/github.com/prometheus/procfs/proc_interrupts.go index 9df79c2..86b4b45 100644 --- a/vendor/github.com/prometheus/procfs/proc_interrupts.go +++ b/vendor/github.com/prometheus/procfs/proc_interrupts.go @@ -66,7 +66,7 @@ func parseInterrupts(r io.Reader) (Interrupts, error) { continue } if len(parts) < 2 { - return nil, fmt.Errorf("not enough fields in interrupts (expected at least 2 fields but got %d): %s", len(parts), parts) + return nil, fmt.Errorf("%w: Not enough fields in interrupts (expected 2+ fields but got %d): %s", ErrFileParse, len(parts), parts) } intName := parts[0][:len(parts[0])-1] // remove trailing : diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go index 7a13881..c86d815 100644 --- a/vendor/github.com/prometheus/procfs/proc_limits.go +++ b/vendor/github.com/prometheus/procfs/proc_limits.go @@ -103,7 +103,7 @@ func (p Proc) Limits() (ProcLimits, error) { //fields := limitsMatch.Split(s.Text(), limitsFields) fields := limitsMatch.FindStringSubmatch(s.Text()) if len(fields) != limitsFields { - return ProcLimits{}, fmt.Errorf("couldn't parse %q line %q", f.Name(), s.Text()) + return ProcLimits{}, fmt.Errorf("%w: couldn't parse %q line %q", ErrFileParse, f.Name(), s.Text()) } switch fields[1] { @@ -154,7 +154,7 @@ func parseUint(s string) (uint64, error) { } i, err := strconv.ParseUint(s, 10, 64) if err != nil { - return 0, fmt.Errorf("couldn't parse value %q: %w", s, err) + return 0, fmt.Errorf("%s: couldn't parse value %q: %w", ErrFileParse, s, err) } return i, nil } diff --git a/vendor/github.com/prometheus/procfs/proc_maps.go b/vendor/github.com/prometheus/procfs/proc_maps.go index f1bcbf3..7e75c28 100644 --- a/vendor/github.com/prometheus/procfs/proc_maps.go +++ b/vendor/github.com/prometheus/procfs/proc_maps.go @@ -63,17 +63,17 @@ type ProcMap struct { // parseDevice parses the device token of a line and converts it to a dev_t // (mkdev) like structure. func parseDevice(s string) (uint64, error) { - toks := strings.Split(s, ":") - if len(toks) < 2 { - return 0, fmt.Errorf("unexpected number of fields") + i := strings.Index(s, ":") + if i == -1 { + return 0, fmt.Errorf("%w: expected separator `:` in %s", ErrFileParse, s) } - major, err := strconv.ParseUint(toks[0], 16, 0) + major, err := strconv.ParseUint(s[0:i], 16, 0) if err != nil { return 0, err } - minor, err := strconv.ParseUint(toks[1], 16, 0) + minor, err := strconv.ParseUint(s[i+1:], 16, 0) if err != nil { return 0, err } @@ -93,17 +93,17 @@ func parseAddress(s string) (uintptr, error) { // parseAddresses parses the start-end address. func parseAddresses(s string) (uintptr, uintptr, error) { - toks := strings.Split(s, "-") - if len(toks) < 2 { - return 0, 0, fmt.Errorf("invalid address") + idx := strings.Index(s, "-") + if idx == -1 { + return 0, 0, fmt.Errorf("%w: expected separator `-` in %s", ErrFileParse, s) } - saddr, err := parseAddress(toks[0]) + saddr, err := parseAddress(s[0:idx]) if err != nil { return 0, 0, err } - eaddr, err := parseAddress(toks[1]) + eaddr, err := parseAddress(s[idx+1:]) if err != nil { return 0, 0, err } @@ -114,7 +114,7 @@ func parseAddresses(s string) (uintptr, uintptr, error) { // parsePermissions parses a token and returns any that are set. func parsePermissions(s string) (*ProcMapPermissions, error) { if len(s) < 4 { - return nil, fmt.Errorf("invalid permissions token") + return nil, fmt.Errorf("%w: invalid permissions token", ErrFileParse) } perms := ProcMapPermissions{} @@ -141,7 +141,7 @@ func parsePermissions(s string) (*ProcMapPermissions, error) { func parseProcMap(text string) (*ProcMap, error) { fields := strings.Fields(text) if len(fields) < 5 { - return nil, fmt.Errorf("truncated procmap entry") + return nil, fmt.Errorf("%w: truncated procmap entry", ErrFileParse) } saddr, eaddr, err := parseAddresses(fields[0]) diff --git a/vendor/github.com/prometheus/procfs/proc_netstat.go b/vendor/github.com/prometheus/procfs/proc_netstat.go index 6a43bb2..8e3ff4d 100644 --- a/vendor/github.com/prometheus/procfs/proc_netstat.go +++ b/vendor/github.com/prometheus/procfs/proc_netstat.go @@ -195,8 +195,8 @@ func parseProcNetstat(r io.Reader, fileName string) (ProcNetstat, error) { // Remove trailing :. protocol := strings.TrimSuffix(nameParts[0], ":") if len(nameParts) != len(valueParts) { - return procNetstat, fmt.Errorf("mismatch field count mismatch in %s: %s", - fileName, protocol) + return procNetstat, fmt.Errorf("%w: mismatch field count mismatch in %s: %s", + ErrFileParse, fileName, protocol) } for i := 1; i < len(nameParts); i++ { value, err := strconv.ParseFloat(valueParts[i], 64) diff --git a/vendor/github.com/prometheus/procfs/proc_ns.go b/vendor/github.com/prometheus/procfs/proc_ns.go index 391b4cb..c226667 100644 --- a/vendor/github.com/prometheus/procfs/proc_ns.go +++ b/vendor/github.com/prometheus/procfs/proc_ns.go @@ -40,7 +40,7 @@ func (p Proc) Namespaces() (Namespaces, error) { names, err := d.Readdirnames(-1) if err != nil { - return nil, fmt.Errorf("failed to read contents of ns dir: %w", err) + return nil, fmt.Errorf("%s: failed to read contents of ns dir: %w", ErrFileRead, err) } ns := make(Namespaces, len(names)) @@ -52,13 +52,13 @@ func (p Proc) Namespaces() (Namespaces, error) { fields := strings.SplitN(target, ":", 2) if len(fields) != 2 { - return nil, fmt.Errorf("failed to parse namespace type and inode from %q", target) + return nil, fmt.Errorf("%w: namespace type and inode from %q", ErrFileParse, target) } typ := fields[0] inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32) if err != nil { - return nil, fmt.Errorf("failed to parse inode from %q: %w", fields[1], err) + return nil, fmt.Errorf("%s: inode from %q: %w", ErrFileParse, fields[1], err) } ns[name] = Namespace{typ, uint32(inode)} diff --git a/vendor/github.com/prometheus/procfs/proc_psi.go b/vendor/github.com/prometheus/procfs/proc_psi.go index a68fe15..fe9dbb4 100644 --- a/vendor/github.com/prometheus/procfs/proc_psi.go +++ b/vendor/github.com/prometheus/procfs/proc_psi.go @@ -61,14 +61,14 @@ type PSIStats struct { func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) { data, err := util.ReadFileNoStat(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource))) if err != nil { - return PSIStats{}, fmt.Errorf("psi_stats: unavailable for %q: %w", resource, err) + return PSIStats{}, fmt.Errorf("%s: psi_stats: unavailable for %q: %w", ErrFileRead, resource, err) } - return parsePSIStats(resource, bytes.NewReader(data)) + return parsePSIStats(bytes.NewReader(data)) } // parsePSIStats parses the specified file for pressure stall information. -func parsePSIStats(resource string, r io.Reader) (PSIStats, error) { +func parsePSIStats(r io.Reader) (PSIStats, error) { psiStats := PSIStats{} scanner := bufio.NewScanner(r) diff --git a/vendor/github.com/prometheus/procfs/proc_smaps.go b/vendor/github.com/prometheus/procfs/proc_smaps.go index 0e97d99..ad8785a 100644 --- a/vendor/github.com/prometheus/procfs/proc_smaps.go +++ b/vendor/github.com/prometheus/procfs/proc_smaps.go @@ -135,12 +135,12 @@ func (s *ProcSMapsRollup) parseLine(line string) error { } vBytes := vKBytes * 1024 - s.addValue(k, v, vKBytes, vBytes) + s.addValue(k, vBytes) return nil } -func (s *ProcSMapsRollup) addValue(k string, vString string, vUint uint64, vUintBytes uint64) { +func (s *ProcSMapsRollup) addValue(k string, vUintBytes uint64) { switch k { case "Rss": s.Rss += vUintBytes diff --git a/vendor/github.com/prometheus/procfs/proc_snmp.go b/vendor/github.com/prometheus/procfs/proc_snmp.go index 6c46b71..b9d2cf6 100644 --- a/vendor/github.com/prometheus/procfs/proc_snmp.go +++ b/vendor/github.com/prometheus/procfs/proc_snmp.go @@ -159,8 +159,8 @@ func parseSnmp(r io.Reader, fileName string) (ProcSnmp, error) { // Remove trailing :. protocol := strings.TrimSuffix(nameParts[0], ":") if len(nameParts) != len(valueParts) { - return procSnmp, fmt.Errorf("mismatch field count mismatch in %s: %s", - fileName, protocol) + return procSnmp, fmt.Errorf("%w: mismatch field count mismatch in %s: %s", + ErrFileParse, fileName, protocol) } for i := 1; i < len(nameParts); i++ { value, err := strconv.ParseFloat(valueParts[i], 64) diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go index b278eb2..923e550 100644 --- a/vendor/github.com/prometheus/procfs/proc_stat.go +++ b/vendor/github.com/prometheus/procfs/proc_stat.go @@ -18,7 +18,6 @@ import ( "fmt" "os" - "github.com/prometheus/procfs/internal/fs" "github.com/prometheus/procfs/internal/util" ) @@ -112,7 +111,7 @@ type ProcStat struct { // Aggregated block I/O delays, measured in clock ticks (centiseconds). DelayAcctBlkIOTicks uint64 - proc fs.FS + proc FS } // NewStat returns the current status information of the process. @@ -139,7 +138,7 @@ func (p Proc) Stat() (ProcStat, error) { ) if l < 0 || r < 0 { - return ProcStat{}, fmt.Errorf("unexpected format, couldn't extract comm %q", data) + return ProcStat{}, fmt.Errorf("%w: unexpected format, couldn't extract comm %q", ErrFileParse, data) } s.Comm = string(data[l+1 : r]) @@ -210,8 +209,7 @@ func (s ProcStat) ResidentMemory() int { // StartTime returns the unix timestamp of the process in seconds. func (s ProcStat) StartTime() (float64, error) { - fs := FS{proc: s.proc} - stat, err := fs.Stat() + stat, err := s.proc.Stat() if err != nil { return 0, err } diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go index 3d8c064..46307f5 100644 --- a/vendor/github.com/prometheus/procfs/proc_status.go +++ b/vendor/github.com/prometheus/procfs/proc_status.go @@ -15,6 +15,7 @@ package procfs import ( "bytes" + "sort" "strconv" "strings" @@ -22,7 +23,7 @@ import ( ) // ProcStatus provides status information about the process, -// read from /proc/[pid]/stat. +// read from /proc/[pid]/status. type ProcStatus struct { // The process ID. PID int @@ -31,6 +32,8 @@ type ProcStatus struct { // Thread group ID. TGID int + // List of Pid namespace. + NSpids []uint64 // Peak virtual memory size. VmPeak uint64 // nolint:revive @@ -76,6 +79,9 @@ type ProcStatus struct { UIDs [4]string // GIDs of the process (Real, effective, saved set, and filesystem GIDs) GIDs [4]string + + // CpusAllowedList: List of cpu cores processes are allowed to run on. + CpusAllowedList []uint64 } // NewStatus returns the current status information of the process. @@ -123,6 +129,8 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt copy(s.UIDs[:], strings.Split(vString, "\t")) case "Gid": copy(s.GIDs[:], strings.Split(vString, "\t")) + case "NSpid": + s.NSpids = calcNSPidsList(vString) case "VmPeak": s.VmPeak = vUintBytes case "VmSize": @@ -161,10 +169,53 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt s.VoluntaryCtxtSwitches = vUint case "nonvoluntary_ctxt_switches": s.NonVoluntaryCtxtSwitches = vUint + case "Cpus_allowed_list": + s.CpusAllowedList = calcCpusAllowedList(vString) } + } // TotalCtxtSwitches returns the total context switch. func (s ProcStatus) TotalCtxtSwitches() uint64 { return s.VoluntaryCtxtSwitches + s.NonVoluntaryCtxtSwitches } + +func calcCpusAllowedList(cpuString string) []uint64 { + s := strings.Split(cpuString, ",") + + var g []uint64 + + for _, cpu := range s { + // parse cpu ranges, example: 1-3=[1,2,3] + if l := strings.Split(strings.TrimSpace(cpu), "-"); len(l) > 1 { + startCPU, _ := strconv.ParseUint(l[0], 10, 64) + endCPU, _ := strconv.ParseUint(l[1], 10, 64) + + for i := startCPU; i <= endCPU; i++ { + g = append(g, i) + } + } else if len(l) == 1 { + cpu, _ := strconv.ParseUint(l[0], 10, 64) + g = append(g, cpu) + } + + } + + sort.Slice(g, func(i, j int) bool { return g[i] < g[j] }) + return g +} + +func calcNSPidsList(nspidsString string) []uint64 { + s := strings.Split(nspidsString, " ") + var nspids []uint64 + + for _, nspid := range s { + nspid, _ := strconv.ParseUint(nspid, 10, 64) + if nspid == 0 { + continue + } + nspids = append(nspids, nspid) + } + + return nspids +} diff --git a/vendor/github.com/prometheus/procfs/proc_sys.go b/vendor/github.com/prometheus/procfs/proc_sys.go index d46533e..12c5bf0 100644 --- a/vendor/github.com/prometheus/procfs/proc_sys.go +++ b/vendor/github.com/prometheus/procfs/proc_sys.go @@ -44,7 +44,7 @@ func (fs FS) SysctlInts(sysctl string) ([]int, error) { vp := util.NewValueParser(f) values[i] = vp.Int() if err := vp.Err(); err != nil { - return nil, fmt.Errorf("field %d in sysctl %s is not a valid int: %w", i, sysctl, err) + return nil, fmt.Errorf("%s: field %d in sysctl %s is not a valid int: %w", ErrFileParse, i, sysctl, err) } } return values, nil diff --git a/vendor/github.com/prometheus/procfs/slab.go b/vendor/github.com/prometheus/procfs/slab.go index bc9aaf5..8611c90 100644 --- a/vendor/github.com/prometheus/procfs/slab.go +++ b/vendor/github.com/prometheus/procfs/slab.go @@ -68,7 +68,7 @@ func parseV21SlabEntry(line string) (*Slab, error) { l := slabSpace.ReplaceAllString(line, " ") s := strings.Split(l, " ") if len(s) != 16 { - return nil, fmt.Errorf("unable to parse: %q", line) + return nil, fmt.Errorf("%w: unable to parse: %q", ErrFileParse, line) } var err error i := &Slab{Name: s[0]} diff --git a/vendor/github.com/prometheus/procfs/softirqs.go b/vendor/github.com/prometheus/procfs/softirqs.go index 559129c..b8fad67 100644 --- a/vendor/github.com/prometheus/procfs/softirqs.go +++ b/vendor/github.com/prometheus/procfs/softirqs.go @@ -57,7 +57,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { ) if !scanner.Scan() { - return Softirqs{}, fmt.Errorf("softirqs empty") + return Softirqs{}, fmt.Errorf("%w: softirqs empty", ErrFileRead) } for scanner.Scan() { @@ -74,7 +74,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.Hi = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.Hi[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("couldn't parse %q (HI%d): %w", count, i, err) + return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (HI%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "TIMER:": @@ -82,7 +82,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.Timer = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.Timer[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("couldn't parse %q (TIMER%d): %w", count, i, err) + return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (TIMER%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "NET_TX:": @@ -90,7 +90,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.NetTx = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.NetTx[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("couldn't parse %q (NET_TX%d): %w", count, i, err) + return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (NET_TX%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "NET_RX:": @@ -98,7 +98,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.NetRx = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.NetRx[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("couldn't parse %q (NET_RX%d): %w", count, i, err) + return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (NET_RX%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "BLOCK:": @@ -106,7 +106,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.Block = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.Block[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("couldn't parse %q (BLOCK%d): %w", count, i, err) + return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (BLOCK%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "IRQ_POLL:": @@ -114,7 +114,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.IRQPoll = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.IRQPoll[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("couldn't parse %q (IRQ_POLL%d): %w", count, i, err) + return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (IRQ_POLL%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "TASKLET:": @@ -122,7 +122,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.Tasklet = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.Tasklet[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("couldn't parse %q (TASKLET%d): %w", count, i, err) + return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (TASKLET%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "SCHED:": @@ -130,7 +130,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.Sched = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.Sched[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("couldn't parse %q (SCHED%d): %w", count, i, err) + return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (SCHED%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "HRTIMER:": @@ -138,7 +138,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.HRTimer = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.HRTimer[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("couldn't parse %q (HRTIMER%d): %w", count, i, err) + return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (HRTIMER%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "RCU:": @@ -146,14 +146,14 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.RCU = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.RCU[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("couldn't parse %q (RCU%d): %w", count, i, err) + return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (RCU%d): %w", ErrFileParse, count, i, err) } } } } if err := scanner.Err(); err != nil { - return Softirqs{}, fmt.Errorf("couldn't parse softirqs: %w", err) + return Softirqs{}, fmt.Errorf("%s: couldn't parse softirqs: %w", ErrFileParse, err) } return softirqs, scanner.Err() diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go index 586af48..34fc3ee 100644 --- a/vendor/github.com/prometheus/procfs/stat.go +++ b/vendor/github.com/prometheus/procfs/stat.go @@ -93,10 +93,10 @@ func parseCPUStat(line string) (CPUStat, int64, error) { &cpuStat.Guest, &cpuStat.GuestNice) if err != nil && err != io.EOF { - return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu): %w", line, err) + return CPUStat{}, -1, fmt.Errorf("%s: couldn't parse %q (cpu): %w", ErrFileParse, line, err) } if count == 0 { - return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu): 0 elements parsed", line) + return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu): 0 elements parsed", ErrFileParse, line) } cpuStat.User /= userHZ @@ -116,7 +116,7 @@ func parseCPUStat(line string) (CPUStat, int64, error) { cpuID, err := strconv.ParseInt(cpu[3:], 10, 64) if err != nil { - return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu/cpuid): %w", line, err) + return CPUStat{}, -1, fmt.Errorf("%s: couldn't parse %q (cpu/cpuid): %w", ErrFileParse, line, err) } return cpuStat, cpuID, nil @@ -136,7 +136,7 @@ func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) { &softIRQStat.Hrtimer, &softIRQStat.Rcu) if err != nil { - return SoftIRQStat{}, 0, fmt.Errorf("couldn't parse %q (softirq): %w", line, err) + return SoftIRQStat{}, 0, fmt.Errorf("%s: couldn't parse %q (softirq): %w", ErrFileParse, line, err) } return softIRQStat, total, nil @@ -187,6 +187,10 @@ func parseStat(r io.Reader, fileName string) (Stat, error) { err error ) + // Increase default scanner buffer to handle very long `intr` lines. + buf := make([]byte, 0, 8*1024) + scanner.Buffer(buf, 1024*1024) + for scanner.Scan() { line := scanner.Text() parts := strings.Fields(scanner.Text()) @@ -197,34 +201,34 @@ func parseStat(r io.Reader, fileName string) (Stat, error) { switch { case parts[0] == "btime": if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %q (btime): %w", parts[1], err) + return Stat{}, fmt.Errorf("%s: couldn't parse %q (btime): %w", ErrFileParse, parts[1], err) } case parts[0] == "intr": if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %q (intr): %w", parts[1], err) + return Stat{}, fmt.Errorf("%s: couldn't parse %q (intr): %w", ErrFileParse, parts[1], err) } numberedIRQs := parts[2:] stat.IRQ = make([]uint64, len(numberedIRQs)) for i, count := range numberedIRQs { if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %q (intr%d): %w", count, i, err) + return Stat{}, fmt.Errorf("%s: couldn't parse %q (intr%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "ctxt": if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %q (ctxt): %w", parts[1], err) + return Stat{}, fmt.Errorf("%s: couldn't parse %q (ctxt): %w", ErrFileParse, parts[1], err) } case parts[0] == "processes": if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %q (processes): %w", parts[1], err) + return Stat{}, fmt.Errorf("%s: couldn't parse %q (processes): %w", ErrFileParse, parts[1], err) } case parts[0] == "procs_running": if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %q (procs_running): %w", parts[1], err) + return Stat{}, fmt.Errorf("%s: couldn't parse %q (procs_running): %w", ErrFileParse, parts[1], err) } case parts[0] == "procs_blocked": if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %q (procs_blocked): %w", parts[1], err) + return Stat{}, fmt.Errorf("%s: couldn't parse %q (procs_blocked): %w", ErrFileParse, parts[1], err) } case parts[0] == "softirq": softIRQStats, total, err := parseSoftIRQStat(line) @@ -247,7 +251,7 @@ func parseStat(r io.Reader, fileName string) (Stat, error) { } if err := scanner.Err(); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %q: %w", fileName, err) + return Stat{}, fmt.Errorf("%s: couldn't parse %q: %w", ErrFileParse, fileName, err) } return stat, nil diff --git a/vendor/github.com/prometheus/procfs/swaps.go b/vendor/github.com/prometheus/procfs/swaps.go index 15edc22..fa00f55 100644 --- a/vendor/github.com/prometheus/procfs/swaps.go +++ b/vendor/github.com/prometheus/procfs/swaps.go @@ -64,7 +64,7 @@ func parseSwapString(swapString string) (*Swap, error) { swapFields := strings.Fields(swapString) swapLength := len(swapFields) if swapLength < 5 { - return nil, fmt.Errorf("too few fields in swap string: %s", swapString) + return nil, fmt.Errorf("%w: too few fields in swap string: %s", ErrFileParse, swapString) } swap := &Swap{ @@ -74,15 +74,15 @@ func parseSwapString(swapString string) (*Swap, error) { swap.Size, err = strconv.Atoi(swapFields[2]) if err != nil { - return nil, fmt.Errorf("invalid swap size: %s", swapFields[2]) + return nil, fmt.Errorf("%s: invalid swap size: %s: %w", ErrFileParse, swapFields[2], err) } swap.Used, err = strconv.Atoi(swapFields[3]) if err != nil { - return nil, fmt.Errorf("invalid swap used: %s", swapFields[3]) + return nil, fmt.Errorf("%s: invalid swap used: %s: %w", ErrFileParse, swapFields[3], err) } swap.Priority, err = strconv.Atoi(swapFields[4]) if err != nil { - return nil, fmt.Errorf("invalid swap priority: %s", swapFields[4]) + return nil, fmt.Errorf("%s: invalid swap priority: %s: %w", ErrFileParse, swapFields[4], err) } return swap, nil diff --git a/vendor/github.com/prometheus/procfs/thread.go b/vendor/github.com/prometheus/procfs/thread.go index f08bfc7..df2215e 100644 --- a/vendor/github.com/prometheus/procfs/thread.go +++ b/vendor/github.com/prometheus/procfs/thread.go @@ -45,7 +45,7 @@ func (fs FS) AllThreads(pid int) (Procs, error) { names, err := d.Readdirnames(-1) if err != nil { - return Procs{}, fmt.Errorf("could not read %q: %w", d.Name(), err) + return Procs{}, fmt.Errorf("%s: could not read %q: %w", ErrFileRead, d.Name(), err) } t := Procs{} @@ -54,7 +54,8 @@ func (fs FS) AllThreads(pid int) (Procs, error) { if err != nil { continue } - t = append(t, Proc{PID: int(tid), fs: fsi.FS(taskPath)}) + + t = append(t, Proc{PID: int(tid), fs: FS{fsi.FS(taskPath), fs.isReal}}) } return t, nil @@ -66,13 +67,13 @@ func (fs FS) Thread(pid, tid int) (Proc, error) { if _, err := os.Stat(taskPath); err != nil { return Proc{}, err } - return Proc{PID: tid, fs: fsi.FS(taskPath)}, nil + return Proc{PID: tid, fs: FS{fsi.FS(taskPath), fs.isReal}}, nil } // Thread returns a process for a given TID of Proc. func (proc Proc) Thread(tid int) (Proc, error) { - tfs := fsi.FS(proc.path("task")) - if _, err := os.Stat(tfs.Path(strconv.Itoa(tid))); err != nil { + tfs := FS{fsi.FS(proc.path("task")), proc.fs.isReal} + if _, err := os.Stat(tfs.proc.Path(strconv.Itoa(tid))); err != nil { return Proc{}, err } return Proc{PID: tid, fs: tfs}, nil diff --git a/vendor/github.com/prometheus/procfs/vm.go b/vendor/github.com/prometheus/procfs/vm.go index cdedcae..51c49d8 100644 --- a/vendor/github.com/prometheus/procfs/vm.go +++ b/vendor/github.com/prometheus/procfs/vm.go @@ -86,7 +86,7 @@ func (fs FS) VM() (*VM, error) { return nil, err } if !file.Mode().IsDir() { - return nil, fmt.Errorf("%s is not a directory", path) + return nil, fmt.Errorf("%w: %s is not a directory", ErrFileRead, path) } files, err := os.ReadDir(path) diff --git a/vendor/github.com/prometheus/procfs/zoneinfo.go b/vendor/github.com/prometheus/procfs/zoneinfo.go index c745a4c..ce5fefa 100644 --- a/vendor/github.com/prometheus/procfs/zoneinfo.go +++ b/vendor/github.com/prometheus/procfs/zoneinfo.go @@ -75,11 +75,11 @@ var nodeZoneRE = regexp.MustCompile(`(\d+), zone\s+(\w+)`) func (fs FS) Zoneinfo() ([]Zoneinfo, error) { data, err := os.ReadFile(fs.proc.Path("zoneinfo")) if err != nil { - return nil, fmt.Errorf("error reading zoneinfo %q: %w", fs.proc.Path("zoneinfo"), err) + return nil, fmt.Errorf("%s: error reading zoneinfo %q: %w", ErrFileRead, fs.proc.Path("zoneinfo"), err) } zoneinfo, err := parseZoneinfo(data) if err != nil { - return nil, fmt.Errorf("error parsing zoneinfo %q: %w", fs.proc.Path("zoneinfo"), err) + return nil, fmt.Errorf("%s: error parsing zoneinfo %q: %w", ErrFileParse, fs.proc.Path("zoneinfo"), err) } return zoneinfo, nil } diff --git a/vendor/github.com/segmentio/asm/LICENSE b/vendor/github.com/segmentio/asm/LICENSE new file mode 100644 index 0000000..29e1ab6 --- /dev/null +++ b/vendor/github.com/segmentio/asm/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 Segment + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/segmentio/asm/bswap/swap64.go b/vendor/github.com/segmentio/asm/bswap/swap64.go new file mode 100644 index 0000000..71be881 --- /dev/null +++ b/vendor/github.com/segmentio/asm/bswap/swap64.go @@ -0,0 +1,15 @@ +package bswap + +import _ "github.com/segmentio/asm/cpu" + +// Swap64 performs an in-place byte swap on each 64 bits elements in b. +// +// This function is useful when dealing with big-endian input; by converting it +// to little-endian, the data can then be compared using native CPU instructions +// instead of having to employ often slower byte comparison algorithms. +func Swap64(b []byte) { + if len(b)%8 != 0 { + panic("swap64 expects the input to contain full 64 bits elements") + } + swap64(b) +} diff --git a/vendor/github.com/segmentio/asm/bswap/swap64_amd64.go b/vendor/github.com/segmentio/asm/bswap/swap64_amd64.go new file mode 100644 index 0000000..ebbe64e --- /dev/null +++ b/vendor/github.com/segmentio/asm/bswap/swap64_amd64.go @@ -0,0 +1,8 @@ +// Code generated by command: go run swap64_asm.go -pkg bswap -out ../bswap/swap64_amd64.s -stubs ../bswap/swap64_amd64.go. DO NOT EDIT. + +//go:build !purego + +package bswap + +// swap64 performs an in-place byte swap on each qword of the input buffer. +func swap64(b []byte) diff --git a/vendor/github.com/segmentio/asm/bswap/swap64_amd64.s b/vendor/github.com/segmentio/asm/bswap/swap64_amd64.s new file mode 100644 index 0000000..887f5b4 --- /dev/null +++ b/vendor/github.com/segmentio/asm/bswap/swap64_amd64.s @@ -0,0 +1,74 @@ +// Code generated by command: go run swap64_asm.go -pkg bswap -out ../bswap/swap64_amd64.s -stubs ../bswap/swap64_amd64.go. DO NOT EDIT. + +//go:build !purego + +#include "textflag.h" + +// func swap64(b []byte) +// Requires: AVX, AVX2 +TEXT ·swap64(SB), NOSPLIT, $0-24 + MOVQ b_base+0(FP), AX + MOVQ b_len+8(FP), CX + MOVQ AX, DX + ADDQ CX, DX + BTL $0x08, github·com∕segmentio∕asm∕cpu·X86+0(SB) + JCC x86_loop + VMOVDQU shuffle_mask<>+0(SB), Y0 + +avx2_loop: + MOVQ AX, CX + ADDQ $0x80, CX + CMPQ CX, DX + JAE x86_loop + VMOVDQU (AX), Y1 + VMOVDQU 32(AX), Y2 + VMOVDQU 64(AX), Y3 + VMOVDQU 96(AX), Y4 + VPSHUFB Y0, Y1, Y1 + VPSHUFB Y0, Y2, Y2 + VPSHUFB Y0, Y3, Y3 + VPSHUFB Y0, Y4, Y4 + VMOVDQU Y1, (AX) + VMOVDQU Y2, 32(AX) + VMOVDQU Y3, 64(AX) + VMOVDQU Y4, 96(AX) + MOVQ CX, AX + JMP avx2_loop + +x86_loop: + MOVQ AX, CX + ADDQ $0x20, CX + CMPQ CX, DX + JAE slow_loop + MOVQ (AX), BX + MOVQ 8(AX), SI + MOVQ 16(AX), DI + MOVQ 24(AX), R8 + BSWAPQ BX + BSWAPQ SI + BSWAPQ DI + BSWAPQ R8 + MOVQ BX, (AX) + MOVQ SI, 8(AX) + MOVQ DI, 16(AX) + MOVQ R8, 24(AX) + MOVQ CX, AX + JMP x86_loop + +slow_loop: + CMPQ AX, DX + JAE done + MOVQ (AX), CX + BSWAPQ CX + MOVQ CX, (AX) + ADDQ $0x08, AX + JMP slow_loop + +done: + RET + +DATA shuffle_mask<>+0(SB)/8, $0x0001020304050607 +DATA shuffle_mask<>+8(SB)/8, $0x08090a0b0c0d0e0f +DATA shuffle_mask<>+16(SB)/8, $0x0001020304050607 +DATA shuffle_mask<>+24(SB)/8, $0x08090a0b0c0d0e0f +GLOBL shuffle_mask<>(SB), RODATA|NOPTR, $32 diff --git a/vendor/github.com/segmentio/asm/bswap/swap64_default.go b/vendor/github.com/segmentio/asm/bswap/swap64_default.go new file mode 100644 index 0000000..9d66ef2 --- /dev/null +++ b/vendor/github.com/segmentio/asm/bswap/swap64_default.go @@ -0,0 +1,13 @@ +//go:build purego || !amd64 +// +build purego !amd64 + +package bswap + +import "encoding/binary" + +func swap64(b []byte) { + for i := 0; i < len(b); i += 8 { + u := binary.BigEndian.Uint64(b[i:]) + binary.LittleEndian.PutUint64(b[i:], u) + } +} diff --git a/vendor/github.com/segmentio/asm/cpu/arm/arm.go b/vendor/github.com/segmentio/asm/cpu/arm/arm.go new file mode 100644 index 0000000..47c695a --- /dev/null +++ b/vendor/github.com/segmentio/asm/cpu/arm/arm.go @@ -0,0 +1,80 @@ +package arm + +import ( + "github.com/segmentio/asm/cpu/cpuid" + . "golang.org/x/sys/cpu" +) + +type CPU cpuid.CPU + +func (cpu CPU) Has(feature Feature) bool { + return cpuid.CPU(cpu).Has(cpuid.Feature(feature)) +} + +func (cpu *CPU) set(feature Feature, enable bool) { + (*cpuid.CPU)(cpu).Set(cpuid.Feature(feature), enable) +} + +type Feature cpuid.Feature + +const ( + SWP Feature = 1 << iota // SWP instruction support + HALF // Half-word load and store support + THUMB // ARM Thumb instruction set + BIT26 // Address space limited to 26-bits + FASTMUL // 32-bit operand, 64-bit result multiplication support + FPA // Floating point arithmetic support + VFP // Vector floating point support + EDSP // DSP Extensions support + JAVA // Java instruction set + IWMMXT // Intel Wireless MMX technology support + CRUNCH // MaverickCrunch context switching and handling + THUMBEE // Thumb EE instruction set + NEON // NEON instruction set + VFPv3 // Vector floating point version 3 support + VFPv3D16 // Vector floating point version 3 D8-D15 + TLS // Thread local storage support + VFPv4 // Vector floating point version 4 support + IDIVA // Integer divide instruction support in ARM mode + IDIVT // Integer divide instruction support in Thumb mode + VFPD32 // Vector floating point version 3 D15-D31 + LPAE // Large Physical Address Extensions + EVTSTRM // Event stream support + AES // AES hardware implementation + PMULL // Polynomial multiplication instruction set + SHA1 // SHA1 hardware implementation + SHA2 // SHA2 hardware implementation + CRC32 // CRC32 hardware implementation +) + +func ABI() CPU { + cpu := CPU(0) + cpu.set(SWP, ARM.HasSWP) + cpu.set(HALF, ARM.HasHALF) + cpu.set(THUMB, ARM.HasTHUMB) + cpu.set(BIT26, ARM.Has26BIT) + cpu.set(FASTMUL, ARM.HasFASTMUL) + cpu.set(FPA, ARM.HasFPA) + cpu.set(VFP, ARM.HasVFP) + cpu.set(EDSP, ARM.HasEDSP) + cpu.set(JAVA, ARM.HasJAVA) + cpu.set(IWMMXT, ARM.HasIWMMXT) + cpu.set(CRUNCH, ARM.HasCRUNCH) + cpu.set(THUMBEE, ARM.HasTHUMBEE) + cpu.set(NEON, ARM.HasNEON) + cpu.set(VFPv3, ARM.HasVFPv3) + cpu.set(VFPv3D16, ARM.HasVFPv3D16) + cpu.set(TLS, ARM.HasTLS) + cpu.set(VFPv4, ARM.HasVFPv4) + cpu.set(IDIVA, ARM.HasIDIVA) + cpu.set(IDIVT, ARM.HasIDIVT) + cpu.set(VFPD32, ARM.HasVFPD32) + cpu.set(LPAE, ARM.HasLPAE) + cpu.set(EVTSTRM, ARM.HasEVTSTRM) + cpu.set(AES, ARM.HasAES) + cpu.set(PMULL, ARM.HasPMULL) + cpu.set(SHA1, ARM.HasSHA1) + cpu.set(SHA2, ARM.HasSHA2) + cpu.set(CRC32, ARM.HasCRC32) + return cpu +} diff --git a/vendor/github.com/segmentio/asm/cpu/arm64/arm64.go b/vendor/github.com/segmentio/asm/cpu/arm64/arm64.go new file mode 100644 index 0000000..0c5134c --- /dev/null +++ b/vendor/github.com/segmentio/asm/cpu/arm64/arm64.go @@ -0,0 +1,74 @@ +package arm64 + +import ( + "github.com/segmentio/asm/cpu/cpuid" + . "golang.org/x/sys/cpu" +) + +type CPU cpuid.CPU + +func (cpu CPU) Has(feature Feature) bool { + return cpuid.CPU(cpu).Has(cpuid.Feature(feature)) +} + +func (cpu *CPU) set(feature Feature, enable bool) { + (*cpuid.CPU)(cpu).Set(cpuid.Feature(feature), enable) +} + +type Feature cpuid.Feature + +const ( + FP Feature = 1 << iota // Floating-point instruction set (always available) + ASIMD // Advanced SIMD (always available) + EVTSTRM // Event stream support + AES // AES hardware implementation + PMULL // Polynomial multiplication instruction set + SHA1 // SHA1 hardware implementation + SHA2 // SHA2 hardware implementation + CRC32 // CRC32 hardware implementation + ATOMICS // Atomic memory operation instruction set + FPHP // Half precision floating-point instruction set + ASIMDHP // Advanced SIMD half precision instruction set + CPUID // CPUID identification scheme registers + ASIMDRDM // Rounding double multiply add/subtract instruction set + JSCVT // Javascript conversion from floating-point to integer + FCMA // Floating-point multiplication and addition of complex numbers + LRCPC // Release Consistent processor consistent support + DCPOP // Persistent memory support + SHA3 // SHA3 hardware implementation + SM3 // SM3 hardware implementation + SM4 // SM4 hardware implementation + ASIMDDP // Advanced SIMD double precision instruction set + SHA512 // SHA512 hardware implementation + SVE // Scalable Vector Extensions + ASIMDFHM // Advanced SIMD multiplication FP16 to FP32 +) + +func ABI() CPU { + cpu := CPU(0) + cpu.set(FP, ARM64.HasFP) + cpu.set(ASIMD, ARM64.HasASIMD) + cpu.set(EVTSTRM, ARM64.HasEVTSTRM) + cpu.set(AES, ARM64.HasAES) + cpu.set(PMULL, ARM64.HasPMULL) + cpu.set(SHA1, ARM64.HasSHA1) + cpu.set(SHA2, ARM64.HasSHA2) + cpu.set(CRC32, ARM64.HasCRC32) + cpu.set(ATOMICS, ARM64.HasATOMICS) + cpu.set(FPHP, ARM64.HasFPHP) + cpu.set(ASIMDHP, ARM64.HasASIMDHP) + cpu.set(CPUID, ARM64.HasCPUID) + cpu.set(ASIMDRDM, ARM64.HasASIMDRDM) + cpu.set(JSCVT, ARM64.HasJSCVT) + cpu.set(FCMA, ARM64.HasFCMA) + cpu.set(LRCPC, ARM64.HasLRCPC) + cpu.set(DCPOP, ARM64.HasDCPOP) + cpu.set(SHA3, ARM64.HasSHA3) + cpu.set(SM3, ARM64.HasSM3) + cpu.set(SM4, ARM64.HasSM4) + cpu.set(ASIMDDP, ARM64.HasASIMDDP) + cpu.set(SHA512, ARM64.HasSHA512) + cpu.set(SVE, ARM64.HasSVE) + cpu.set(ASIMDFHM, ARM64.HasASIMDFHM) + return cpu +} diff --git a/vendor/github.com/segmentio/asm/cpu/cpu.go b/vendor/github.com/segmentio/asm/cpu/cpu.go new file mode 100644 index 0000000..6ddf497 --- /dev/null +++ b/vendor/github.com/segmentio/asm/cpu/cpu.go @@ -0,0 +1,22 @@ +// Pakage cpu provides APIs to detect CPU features available at runtime. +package cpu + +import ( + "github.com/segmentio/asm/cpu/arm" + "github.com/segmentio/asm/cpu/arm64" + "github.com/segmentio/asm/cpu/x86" +) + +var ( + // X86 is the bitset representing the set of the x86 instruction sets are + // supported by the CPU. + X86 = x86.ABI() + + // ARM is the bitset representing which parts of the arm instruction sets + // are supported by the CPU. + ARM = arm.ABI() + + // ARM64 is the bitset representing which parts of the arm64 instruction + // sets are supported by the CPU. + ARM64 = arm64.ABI() +) diff --git a/vendor/github.com/segmentio/asm/cpu/cpuid/cpuid.go b/vendor/github.com/segmentio/asm/cpu/cpuid/cpuid.go new file mode 100644 index 0000000..0949d3d --- /dev/null +++ b/vendor/github.com/segmentio/asm/cpu/cpuid/cpuid.go @@ -0,0 +1,32 @@ +// Package cpuid provides generic types used to represent CPU features supported +// by the architecture. +package cpuid + +// CPU is a bitset of feature flags representing the capabilities of various CPU +// architeectures that this package provides optimized assembly routines for. +// +// The intent is to provide a stable ABI between the Go code that generate the +// assembly, and the program that uses the library functions. +type CPU uint64 + +// Feature represents a single CPU feature. +type Feature uint64 + +const ( + // None is a Feature value that has no CPU features enabled. + None Feature = 0 + // All is a Feature value that has all CPU features enabled. + All Feature = 0xFFFFFFFFFFFFFFFF +) + +func (cpu CPU) Has(feature Feature) bool { + return (Feature(cpu) & feature) == feature +} + +func (cpu *CPU) Set(feature Feature, enabled bool) { + if enabled { + *cpu |= CPU(feature) + } else { + *cpu &= ^CPU(feature) + } +} diff --git a/vendor/github.com/segmentio/asm/cpu/x86/x86.go b/vendor/github.com/segmentio/asm/cpu/x86/x86.go new file mode 100644 index 0000000..9e93537 --- /dev/null +++ b/vendor/github.com/segmentio/asm/cpu/x86/x86.go @@ -0,0 +1,76 @@ +package x86 + +import ( + "github.com/segmentio/asm/cpu/cpuid" + . "golang.org/x/sys/cpu" +) + +type CPU cpuid.CPU + +func (cpu CPU) Has(feature Feature) bool { + return cpuid.CPU(cpu).Has(cpuid.Feature(feature)) +} + +func (cpu *CPU) set(feature Feature, enable bool) { + (*cpuid.CPU)(cpu).Set(cpuid.Feature(feature), enable) +} + +type Feature cpuid.Feature + +const ( + SSE Feature = 1 << iota // SSE functions + SSE2 // P4 SSE functions + SSE3 // Prescott SSE3 functions + SSE41 // Penryn SSE4.1 functions + SSE42 // Nehalem SSE4.2 functions + SSE4A // AMD Barcelona microarchitecture SSE4a instructions + SSSE3 // Conroe SSSE3 functions + AVX // AVX functions + AVX2 // AVX2 functions + AVX512BF16 // AVX-512 BFLOAT16 Instructions + AVX512BITALG // AVX-512 Bit Algorithms + AVX512BW // AVX-512 Byte and Word Instructions + AVX512CD // AVX-512 Conflict Detection Instructions + AVX512DQ // AVX-512 Doubleword and Quadword Instructions + AVX512ER // AVX-512 Exponential and Reciprocal Instructions + AVX512F // AVX-512 Foundation + AVX512IFMA // AVX-512 Integer Fused Multiply-Add Instructions + AVX512PF // AVX-512 Prefetch Instructions + AVX512VBMI // AVX-512 Vector Bit Manipulation Instructions + AVX512VBMI2 // AVX-512 Vector Bit Manipulation Instructions, Version 2 + AVX512VL // AVX-512 Vector Length Extensions + AVX512VNNI // AVX-512 Vector Neural Network Instructions + AVX512VP2INTERSECT // AVX-512 Intersect for D/Q + AVX512VPOPCNTDQ // AVX-512 Vector Population Count Doubleword and Quadword + CMOV // Conditional move +) + +func ABI() CPU { + cpu := CPU(0) + cpu.set(SSE, true) // TODO: golang.org/x/sys/cpu assumes all CPUs have SEE? + cpu.set(SSE2, X86.HasSSE2) + cpu.set(SSE3, X86.HasSSE3) + cpu.set(SSE41, X86.HasSSE41) + cpu.set(SSE42, X86.HasSSE42) + cpu.set(SSE4A, false) // TODO: add upstream support in golang.org/x/sys/cpu? + cpu.set(SSSE3, X86.HasSSSE3) + cpu.set(AVX, X86.HasAVX) + cpu.set(AVX2, X86.HasAVX2) + cpu.set(AVX512BF16, X86.HasAVX512BF16) + cpu.set(AVX512BITALG, X86.HasAVX512BITALG) + cpu.set(AVX512BW, X86.HasAVX512BW) + cpu.set(AVX512CD, X86.HasAVX512CD) + cpu.set(AVX512DQ, X86.HasAVX512DQ) + cpu.set(AVX512ER, X86.HasAVX512ER) + cpu.set(AVX512F, X86.HasAVX512F) + cpu.set(AVX512IFMA, X86.HasAVX512IFMA) + cpu.set(AVX512PF, X86.HasAVX512PF) + cpu.set(AVX512VBMI, X86.HasAVX512VBMI) + cpu.set(AVX512VBMI2, X86.HasAVX512VBMI2) + cpu.set(AVX512VL, X86.HasAVX512VL) + cpu.set(AVX512VNNI, X86.HasAVX512VNNI) + cpu.set(AVX512VP2INTERSECT, false) // TODO: add upstream support in golang.org/x/sys/cpu? + cpu.set(AVX512VPOPCNTDQ, X86.HasAVX512VPOPCNTDQ) + cpu.set(CMOV, true) // TODO: golang.org/x/sys/cpu assumes all CPUs have CMOV? + return cpu +} diff --git a/vendor/github.com/sethvargo/go-retry/LICENSE b/vendor/github.com/sethvargo/go-retry/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/vendor/github.com/sethvargo/go-retry/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/sethvargo/go-retry/README.md b/vendor/github.com/sethvargo/go-retry/README.md new file mode 100644 index 0000000..4773182 --- /dev/null +++ b/vendor/github.com/sethvargo/go-retry/README.md @@ -0,0 +1,185 @@ +# Retry + +[![GoDoc](https://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://pkg.go.dev/mod/github.com/sethvargo/go-retry) + +Retry is a Go library for facilitating retry logic and backoff. It's highly +extensible with full control over how and when retries occur. You can also write +your own custom backoff functions by implementing the Backoff interface. + +## Features + +- **Extensible** - Inspired by Go's built-in HTTP package, this Go backoff and + retry library is extensible via middleware. You can write custom backoff + functions or use a provided filter. + +- **Independent** - No external dependencies besides the Go standard library, + meaning it won't bloat your project. + +- **Concurrent** - Unless otherwise specified, everything is safe for concurrent + use. + +- **Context-aware** - Use native Go contexts to control cancellation. + +## Usage + +Here is an example use for connecting to a database using Go's `database/sql` +package: + +```golang +package main + +import ( + "context" + "database/sql" + "log" + "time" + + "github.com/sethvargo/go-retry" +) + +func main() { + db, err := sql.Open("mysql", "...") + if err != nil { + log.Fatal(err) + } + + ctx := context.Background() + if err := retry.Fibonacci(ctx, 1*time.Second, func(ctx context.Context) error { + if err := db.PingContext(ctx); err != nil { + // This marks the error as retryable + return retry.RetryableError(err) + } + return nil + }); err != nil { + log.Fatal(err) + } +} +``` + +## Backoffs + +In addition to your own custom algorithms, there are built-in algorithms for +backoff in the library. + +### Constant + +A very rudimentary backoff, just returns a constant value. Here is an example: + +```text +1s -> 1s -> 1s -> 1s -> 1s -> 1s +``` + +Usage: + +```golang +NewConstant(1 * time.Second) +``` + +### Exponential + +Arguably the most common backoff, the next value is double the previous value. +Here is an example: + +```text +1s -> 2s -> 4s -> 8s -> 16s -> 32s -> 64s +``` + +Usage: + +```golang +NewExponential(1 * time.Second) +``` + +### Fibonacci + +The Fibonacci backoff uses the Fibonacci sequence to calculate the backoff. The +next value is the sum of the current value and the previous value. This means +retires happen quickly at first, but then gradually take slower, ideal for +network-type issues. Here is an example: + +```text +1s -> 1s -> 2s -> 3s -> 5s -> 8s -> 13s +``` + +Usage: + +```golang +NewFibonacci(1 * time.Second) +``` + +## Modifiers (Middleware) + +The built-in backoff algorithms never terminate and have no caps or limits - you +control their behavior with middleware. There's built-in middleware, but you can +also write custom middleware. + +### Jitter + +To reduce the changes of a thundering herd, add random jitter to the returned +value. + +```golang +b := NewFibonacci(1 * time.Second) + +// Return the next value, +/- 500ms +b = WithJitter(500*time.Millisecond, b) + +// Return the next value, +/- 5% of the result +b = WithJitterPercent(5, b) +``` + +### MaxRetries + +To terminate a retry, specify the maximum number of _retries_. Note this +is _retries_, not _attempts_. Attempts is retries + 1. + +```golang +b := NewFibonacci(1 * time.Second) + +// Stop after 4 retries, when the 5th attempt has failed. In this example, the worst case elapsed +// time would be 1s + 1s + 2s + 3s = 7s. +b = WithMaxRetries(4, b) +``` + +### CappedDuration + +To ensure an individual calculated duration never exceeds a value, use a cap: + +```golang +b := NewFibonacci(1 * time.Second) + +// Ensure the maximum value is 2s. In this example, the sleep values would be +// 1s, 1s, 2s, 2s, 2s, 2s... +b = WithCappedDuration(2 * time.Second, b) +``` + +### WithMaxDuration + +For a best-effort limit on the total execution time, specify a max duration: + +```golang +b := NewFibonacci(1 * time.Second) + +// Ensure the maximum total retry time is 5s. +b = WithMaxDuration(5 * time.Second, b) +``` + +## Benchmarks + +Here are benchmarks against some other popular Go backoff and retry libraries. +You can run these benchmarks yourself via the `benchmark/` folder. Commas and +spacing fixed for clarity. + +```text +Benchmark/cenkalti-7 13,052,668 87.3 ns/op +Benchmark/lestrrat-7 902,044 1,355 ns/op +Benchmark/sethvargo-7 203,914,245 5.73 ns/op +``` + +## Notes and Caveats + +- Randomization uses `math/rand` seeded with the Unix timestamp instead of + `crypto/rand`. +- Ordering of addition of multiple modifiers will make a difference. + For example; ensure you add `CappedDuration` before `WithMaxDuration`, otherwise it may early out too early. + Another example is you could add `Jitter` before or after capping depending on your desired outcome. diff --git a/vendor/github.com/sethvargo/go-retry/backoff.go b/vendor/github.com/sethvargo/go-retry/backoff.go new file mode 100644 index 0000000..de3974f --- /dev/null +++ b/vendor/github.com/sethvargo/go-retry/backoff.go @@ -0,0 +1,134 @@ +package retry + +import ( + "sync" + "time" +) + +// Backoff is an interface that backs off. +type Backoff interface { + // Next returns the time duration to wait and whether to stop. + Next() (next time.Duration, stop bool) +} + +var _ Backoff = (BackoffFunc)(nil) + +// BackoffFunc is a backoff expressed as a function. +type BackoffFunc func() (time.Duration, bool) + +// Next implements Backoff. +func (b BackoffFunc) Next() (time.Duration, bool) { + return b() +} + +// WithJitter wraps a backoff function and adds the specified jitter. j can be +// interpreted as "+/- j". For example, if j were 5 seconds and the backoff +// returned 20s, the value could be between 15 and 25 seconds. The value can +// never be less than 0. +func WithJitter(j time.Duration, next Backoff) Backoff { + r := newLockedRandom(time.Now().UnixNano()) + + return BackoffFunc(func() (time.Duration, bool) { + val, stop := next.Next() + if stop { + return 0, true + } + + diff := time.Duration(r.Int63n(int64(j)*2) - int64(j)) + val = val + diff + if val < 0 { + val = 0 + } + return val, false + }) +} + +// WithJitterPercent wraps a backoff function and adds the specified jitter +// percentage. j can be interpreted as "+/- j%". For example, if j were 5 and +// the backoff returned 20s, the value could be between 19 and 21 seconds. The +// value can never be less than 0 or greater than 100. +func WithJitterPercent(j uint64, next Backoff) Backoff { + r := newLockedRandom(time.Now().UnixNano()) + + return BackoffFunc(func() (time.Duration, bool) { + val, stop := next.Next() + if stop { + return 0, true + } + + // Get a value between -j and j, the convert to a percentage + top := r.Int63n(int64(j)*2) - int64(j) + pct := 1 - float64(top)/100.0 + + val = time.Duration(float64(val) * pct) + if val < 0 { + val = 0 + } + return val, false + }) +} + +// WithMaxRetries executes the backoff function up until the maximum attempts. +func WithMaxRetries(max uint64, next Backoff) Backoff { + var l sync.Mutex + var attempt uint64 + + return BackoffFunc(func() (time.Duration, bool) { + l.Lock() + defer l.Unlock() + + if attempt >= max { + return 0, true + } + attempt++ + + val, stop := next.Next() + if stop { + return 0, true + } + + return val, false + }) +} + +// WithCappedDuration sets a maximum on the duration returned from the next +// backoff. This is NOT a total backoff time, but rather a cap on the maximum +// value a backoff can return. Without another middleware, the backoff will +// continue infinitely. +func WithCappedDuration(cap time.Duration, next Backoff) Backoff { + return BackoffFunc(func() (time.Duration, bool) { + val, stop := next.Next() + if stop { + return 0, true + } + + if val <= 0 || val > cap { + val = cap + } + return val, false + }) +} + +// WithMaxDuration sets a maximum on the total amount of time a backoff should +// execute. It's best-effort, and should not be used to guarantee an exact +// amount of time. +func WithMaxDuration(timeout time.Duration, next Backoff) Backoff { + start := time.Now() + + return BackoffFunc(func() (time.Duration, bool) { + diff := timeout - time.Since(start) + if diff <= 0 { + return 0, true + } + + val, stop := next.Next() + if stop { + return 0, true + } + + if val <= 0 || val > diff { + val = diff + } + return val, false + }) +} diff --git a/vendor/github.com/sethvargo/go-retry/backoff_constant.go b/vendor/github.com/sethvargo/go-retry/backoff_constant.go new file mode 100644 index 0000000..ef01fa0 --- /dev/null +++ b/vendor/github.com/sethvargo/go-retry/backoff_constant.go @@ -0,0 +1,25 @@ +package retry + +import ( + "context" + "time" +) + +// Constant is a wrapper around Retry that uses a constant backoff. It panics if +// the given base is less than zero. +func Constant(ctx context.Context, t time.Duration, f RetryFunc) error { + return Do(ctx, NewConstant(t), f) +} + +// NewConstant creates a new constant backoff using the value t. The wait time +// is the provided constant value. It panics if the given base is less than +// zero. +func NewConstant(t time.Duration) Backoff { + if t <= 0 { + panic("t must be greater than 0") + } + + return BackoffFunc(func() (time.Duration, bool) { + return t, false + }) +} diff --git a/vendor/github.com/sethvargo/go-retry/backoff_exponential.go b/vendor/github.com/sethvargo/go-retry/backoff_exponential.go new file mode 100644 index 0000000..a85b498 --- /dev/null +++ b/vendor/github.com/sethvargo/go-retry/backoff_exponential.go @@ -0,0 +1,47 @@ +package retry + +import ( + "context" + "math" + "sync/atomic" + "time" +) + +type exponentialBackoff struct { + base time.Duration + attempt uint64 +} + +// Exponential is a wrapper around Retry that uses an exponential backoff. See +// NewExponential. +func Exponential(ctx context.Context, base time.Duration, f RetryFunc) error { + return Do(ctx, NewExponential(base), f) +} + +// NewExponential creates a new exponential backoff using the starting value of +// base and doubling on each failure (1, 2, 4, 8, 16, 32, 64...), up to max. +// +// Once it overflows, the function constantly returns the maximum time.Duration +// for a 64-bit integer. +// +// It panics if the given base is less than zero. +func NewExponential(base time.Duration) Backoff { + if base <= 0 { + panic("base must be greater than 0") + } + + return &exponentialBackoff{ + base: base, + } +} + +// Next implements Backoff. It is safe for concurrent use. +func (b *exponentialBackoff) Next() (time.Duration, bool) { + next := b.base << (atomic.AddUint64(&b.attempt, 1) - 1) + if next <= 0 { + atomic.AddUint64(&b.attempt, ^uint64(0)) + next = math.MaxInt64 + } + + return next, false +} diff --git a/vendor/github.com/sethvargo/go-retry/backoff_fibonacci.go b/vendor/github.com/sethvargo/go-retry/backoff_fibonacci.go new file mode 100644 index 0000000..250a026 --- /dev/null +++ b/vendor/github.com/sethvargo/go-retry/backoff_fibonacci.go @@ -0,0 +1,56 @@ +package retry + +import ( + "context" + "math" + "sync/atomic" + "time" + "unsafe" +) + +type state [2]time.Duration + +type fibonacciBackoff struct { + state unsafe.Pointer +} + +// Fibonacci is a wrapper around Retry that uses a Fibonacci backoff. See +// NewFibonacci. +func Fibonacci(ctx context.Context, base time.Duration, f RetryFunc) error { + return Do(ctx, NewFibonacci(base), f) +} + +// NewFibonacci creates a new Fibonacci backoff using the starting value of +// base. The wait time is the sum of the previous two wait times on each failed +// attempt (1, 1, 2, 3, 5, 8, 13...). +// +// Once it overflows, the function constantly returns the maximum time.Duration +// for a 64-bit integer. +// +// It panics if the given base is less than zero. +func NewFibonacci(base time.Duration) Backoff { + if base <= 0 { + panic("base must be greater than 0") + } + + return &fibonacciBackoff{ + state: unsafe.Pointer(&state{0, base}), + } +} + +// Next implements Backoff. It is safe for concurrent use. +func (b *fibonacciBackoff) Next() (time.Duration, bool) { + for { + curr := atomic.LoadPointer(&b.state) + currState := (*state)(curr) + next := currState[0] + currState[1] + + if next <= 0 { + return math.MaxInt64, false + } + + if atomic.CompareAndSwapPointer(&b.state, curr, unsafe.Pointer(&state{currState[1], next})) { + return next, false + } + } +} diff --git a/vendor/github.com/sethvargo/go-retry/rand.go b/vendor/github.com/sethvargo/go-retry/rand.go new file mode 100644 index 0000000..4799fb0 --- /dev/null +++ b/vendor/github.com/sethvargo/go-retry/rand.go @@ -0,0 +1,54 @@ +package retry + +import ( + "math/rand" + "sync" +) + +type lockedSource struct { + src *rand.Rand + mu sync.Mutex +} + +var _ rand.Source64 = (*lockedSource)(nil) + +func newLockedRandom(seed int64) *lockedSource { + return &lockedSource{src: rand.New(rand.NewSource(seed))} +} + +// Int63 mimics math/rand.(*Rand).Int63 with mutex locked. +func (r *lockedSource) Int63() int64 { + r.mu.Lock() + defer r.mu.Unlock() + return r.src.Int63() +} + +// Seed mimics math/rand.(*Rand).Seed with mutex locked. +func (r *lockedSource) Seed(seed int64) { + r.mu.Lock() + defer r.mu.Unlock() + r.src.Seed(seed) +} + +// Uint64 mimics math/rand.(*Rand).Uint64 with mutex locked. +func (r *lockedSource) Uint64() uint64 { + r.mu.Lock() + defer r.mu.Unlock() + return r.src.Uint64() +} + +// Int63n mimics math/rand.(*Rand).Int63n with mutex locked. +func (r *lockedSource) Int63n(n int64) int64 { + if n <= 0 { + panic("invalid argument to Int63n") + } + if n&(n-1) == 0 { // n is power of two, can mask + return r.Int63() & (n - 1) + } + max := int64((1 << 63) - 1 - (1<<63)%uint64(n)) + v := r.Int63() + for v > max { + v = r.Int63() + } + return v % n +} diff --git a/vendor/github.com/sethvargo/go-retry/retry.go b/vendor/github.com/sethvargo/go-retry/retry.go new file mode 100644 index 0000000..a3eeadc --- /dev/null +++ b/vendor/github.com/sethvargo/go-retry/retry.go @@ -0,0 +1,104 @@ +// Package retry provides helpers for retrying. +// +// This package defines flexible interfaces for retrying Go functions that may +// be flakey or eventually consistent. It abstracts the "backoff" (how long to +// wait between tries) and "retry" (execute the function again) mechanisms for +// maximum flexibility. Furthermore, everything is an interface, so you can +// define your own implementations. +// +// The package is modeled after Go's built-in HTTP package, making it easy to +// customize the built-in backoff with your own custom logic. Additionally, +// callers specify which errors are retryable by wrapping them. This is helpful +// with complex operations where only certain results should retry. +package retry + +import ( + "context" + "errors" + "time" +) + +// RetryFunc is a function passed to [Do]. +type RetryFunc func(ctx context.Context) error + +// RetryFuncValue is a function passed to [Do] which returns a value. +type RetryFuncValue[T any] func(ctx context.Context) (T, error) + +type retryableError struct { + err error +} + +// RetryableError marks an error as retryable. +func RetryableError(err error) error { + if err == nil { + return nil + } + return &retryableError{err} +} + +// Unwrap implements error wrapping. +func (e *retryableError) Unwrap() error { + return e.err +} + +// Error returns the error string. +func (e *retryableError) Error() string { + if e.err == nil { + return "retryable: " + } + return "retryable: " + e.err.Error() +} + +func DoValue[T any](ctx context.Context, b Backoff, f RetryFuncValue[T]) (T, error) { + var nilT T + + for { + // Return immediately if ctx is canceled + select { + case <-ctx.Done(): + return nilT, ctx.Err() + default: + } + + v, err := f(ctx) + if err == nil { + return v, nil + } + + // Not retryable + var rerr *retryableError + if !errors.As(err, &rerr) { + return nilT, err + } + + next, stop := b.Next() + if stop { + return nilT, rerr.Unwrap() + } + + // ctx.Done() has priority, so we test it alone first + select { + case <-ctx.Done(): + return nilT, ctx.Err() + default: + } + + t := time.NewTimer(next) + select { + case <-ctx.Done(): + t.Stop() + return nilT, ctx.Err() + case <-t.C: + continue + } + } +} + +// Do wraps a function with a backoff to retry. The provided context is the same +// context passed to the [RetryFunc]. +func Do(ctx context.Context, b Backoff, f RetryFunc) error { + _, err := DoValue(ctx, b, func(ctx context.Context) (*struct{}, error) { + return nil, f(ctx) + }) + return err +} diff --git a/vendor/github.com/shopspring/decimal/.travis.yml b/vendor/github.com/shopspring/decimal/.travis.yml deleted file mode 100644 index 6326d40..0000000 --- a/vendor/github.com/shopspring/decimal/.travis.yml +++ /dev/null @@ -1,19 +0,0 @@ -language: go - -arch: - - amd64 - - ppc64le - -go: - - 1.7.x - - 1.14.x - - 1.15.x - - 1.16.x - - 1.17.x - - tip - -install: - - go build . - -script: - - go test -v diff --git a/vendor/github.com/shopspring/decimal/CHANGELOG.md b/vendor/github.com/shopspring/decimal/CHANGELOG.md index aea6115..432d0fd 100644 --- a/vendor/github.com/shopspring/decimal/CHANGELOG.md +++ b/vendor/github.com/shopspring/decimal/CHANGELOG.md @@ -1,3 +1,30 @@ +## Decimal v1.4.0 +#### BREAKING +- Drop support for Go version older than 1.10 [#361](https://github.com/shopspring/decimal/pull/361) + +#### FEATURES +- Add implementation of natural logarithm [#339](https://github.com/shopspring/decimal/pull/339) [#357](https://github.com/shopspring/decimal/pull/357) +- Add improved implementation of power operation [#358](https://github.com/shopspring/decimal/pull/358) +- Add Compare method which forwards calls to Cmp [#346](https://github.com/shopspring/decimal/pull/346) +- Add NewFromBigRat constructor [#288](https://github.com/shopspring/decimal/pull/288) +- Add NewFromUint64 constructor [#352](https://github.com/shopspring/decimal/pull/352) + +#### ENHANCEMENTS +- Migrate to Github Actions [#245](https://github.com/shopspring/decimal/pull/245) [#340](https://github.com/shopspring/decimal/pull/340) +- Fix examples for RoundDown, RoundFloor, RoundUp, and RoundCeil [#285](https://github.com/shopspring/decimal/pull/285) [#328](https://github.com/shopspring/decimal/pull/328) [#341](https://github.com/shopspring/decimal/pull/341) +- Use Godoc standard to mark deprecated Equals and StringScaled methods [#342](https://github.com/shopspring/decimal/pull/342) +- Removed unnecessary min function for RescalePair method [#265](https://github.com/shopspring/decimal/pull/265) +- Avoid reallocation of initial slice in MarshalBinary (GobEncode) [#355](https://github.com/shopspring/decimal/pull/355) +- Optimize NumDigits method [#301](https://github.com/shopspring/decimal/pull/301) [#356](https://github.com/shopspring/decimal/pull/356) +- Optimize BigInt method [#359](https://github.com/shopspring/decimal/pull/359) +- Support scanning uint64 [#131](https://github.com/shopspring/decimal/pull/131) [#364](https://github.com/shopspring/decimal/pull/364) +- Add docs section with alternative libraries [#363](https://github.com/shopspring/decimal/pull/363) + +#### BUGFIXES +- Fix incorrect calculation of decimal modulo [#258](https://github.com/shopspring/decimal/pull/258) [#317](https://github.com/shopspring/decimal/pull/317) +- Allocate new(big.Int) in Copy method to deeply clone it [#278](https://github.com/shopspring/decimal/pull/278) +- Fix overflow edge case in QuoRem method [#322](https://github.com/shopspring/decimal/pull/322) + ## Decimal v1.3.1 #### ENHANCEMENTS diff --git a/vendor/github.com/shopspring/decimal/README.md b/vendor/github.com/shopspring/decimal/README.md index 2e35df0..318c9df 100644 --- a/vendor/github.com/shopspring/decimal/README.md +++ b/vendor/github.com/shopspring/decimal/README.md @@ -1,6 +1,8 @@ # decimal -[![Build Status](https://app.travis-ci.com/shopspring/decimal.svg?branch=master)](https://app.travis-ci.com/shopspring/decimal) [![GoDoc](https://godoc.org/github.com/shopspring/decimal?status.svg)](https://godoc.org/github.com/shopspring/decimal) [![Go Report Card](https://goreportcard.com/badge/github.com/shopspring/decimal)](https://goreportcard.com/report/github.com/shopspring/decimal) +[![ci](https://github.com/shopspring/decimal/actions/workflows/ci.yml/badge.svg?branch=master)](https://github.com/shopspring/decimal/actions/workflows/ci.yml) +[![GoDoc](https://godoc.org/github.com/shopspring/decimal?status.svg)](https://godoc.org/github.com/shopspring/decimal) +[![Go Report Card](https://goreportcard.com/badge/github.com/shopspring/decimal)](https://goreportcard.com/report/github.com/shopspring/decimal) Arbitrary-precision fixed-point decimal numbers in go. @@ -20,7 +22,12 @@ Run `go get github.com/shopspring/decimal` ## Requirements -Decimal library requires Go version `>=1.7` +Decimal library requires Go version `>=1.10` + +## Documentation + +http://godoc.org/github.com/shopspring/decimal + ## Usage @@ -57,14 +64,16 @@ func main() { } ``` -## Documentation - -http://godoc.org/github.com/shopspring/decimal +## Alternative libraries -## Production Usage +When working with decimal numbers, you might face problems this library is not perfectly suited for. +Fortunately, thanks to the wonderful community we have a dozen other libraries that you can choose from. +Explore other alternatives to find the one that best fits your needs :) -* [Spring](https://shopspring.com/), since August 14, 2014. -* If you are using this in production, please let us know! +* [cockroachdb/apd](https://github.com/cockroachdb/apd) - arbitrary precision, mutable and rich API similar to `big.Int`, more performant than this library +* [alpacahq/alpacadecimal](https://github.com/alpacahq/alpacadecimal) - high performance, low precision (12 digits), fully compatible API with this library +* [govalues/decimal](https://github.com/govalues/decimal) - high performance, zero-allocation, low precision (19 digits) +* [greatcloak/decimal](https://github.com/greatcloak/decimal) - fork focusing on billing and e-commerce web application related use cases, includes out-of-the-box BSON marshaling support ## FAQ diff --git a/vendor/github.com/shopspring/decimal/const.go b/vendor/github.com/shopspring/decimal/const.go new file mode 100644 index 0000000..e5d6fa8 --- /dev/null +++ b/vendor/github.com/shopspring/decimal/const.go @@ -0,0 +1,63 @@ +package decimal + +import ( + "strings" +) + +const ( + strLn10 = "2.302585092994045684017991454684364207601101488628772976033327900967572609677352480235997205089598298341967784042286248633409525465082806756666287369098781689482907208325554680843799894826233198528393505308965377732628846163366222287698219886746543667474404243274365155048934314939391479619404400222105101714174800368808401264708068556774321622835522011480466371565912137345074785694768346361679210180644507064800027750268491674655058685693567342067058113642922455440575892572420824131469568901675894025677631135691929203337658714166023010570308963457207544037084746994016826928280848118428931484852494864487192780967627127577539702766860595249671667418348570442250719796500471495105049221477656763693866297697952211071826454973477266242570942932258279850258550978526538320760672631716430950599508780752371033310119785754733154142180842754386359177811705430982748238504564801909561029929182431823752535770975053956518769751037497088869218020518933950723853920514463419726528728696511086257149219884997874887377134568620916705849807828059751193854445009978131146915934666241071846692310107598438319191292230792503747298650929009880391941702654416816335727555703151596113564846546190897042819763365836983716328982174407366009162177850541779276367731145041782137660111010731042397832521894898817597921798666394319523936855916447118246753245630912528778330963604262982153040874560927760726641354787576616262926568298704957954913954918049209069438580790032763017941503117866862092408537949861264933479354871737451675809537088281067452440105892444976479686075120275724181874989395971643105518848195288330746699317814634930000321200327765654130472621883970596794457943468343218395304414844803701305753674262153675579814770458031413637793236291560128185336498466942261465206459942072917119370602444929358037007718981097362533224548366988505528285966192805098447175198503666680874970496982273220244823343097169111136813588418696549323714996941979687803008850408979618598756579894836445212043698216415292987811742973332588607915912510967187510929248475023930572665446276200923068791518135803477701295593646298412366497023355174586195564772461857717369368404676577047874319780573853271810933883496338813069945569399346101090745616033312247949360455361849123333063704751724871276379140924398331810164737823379692265637682071706935846394531616949411701841938119405416449466111274712819705817783293841742231409930022911502362192186723337268385688273533371925103412930705632544426611429765388301822384091026198582888433587455960453004548370789052578473166283701953392231047527564998119228742789713715713228319641003422124210082180679525276689858180956119208391760721080919923461516952599099473782780648128058792731993893453415320185969711021407542282796298237068941764740642225757212455392526179373652434440560595336591539160312524480149313234572453879524389036839236450507881731359711238145323701508413491122324390927681724749607955799151363982881058285740538000653371655553014196332241918087621018204919492651483892" +) + +var ( + ln10 = newConstApproximation(strLn10) +) + +type constApproximation struct { + exact Decimal + approximations []Decimal +} + +func newConstApproximation(value string) constApproximation { + parts := strings.Split(value, ".") + coeff, fractional := parts[0], parts[1] + + coeffLen := len(coeff) + maxPrecision := len(fractional) + + var approximations []Decimal + for p := 1; p < maxPrecision; p *= 2 { + r := RequireFromString(value[:coeffLen+p]) + approximations = append(approximations, r) + } + + return constApproximation{ + RequireFromString(value), + approximations, + } +} + +// Returns the smallest approximation available that's at least as precise +// as the passed precision (places after decimal point), i.e. Floor[ log2(precision) ] + 1 +func (c constApproximation) withPrecision(precision int32) Decimal { + i := 0 + + if precision >= 1 { + i++ + } + + for precision >= 16 { + precision /= 16 + i += 4 + } + + for precision >= 2 { + precision /= 2 + i++ + } + + if i >= len(c.approximations) { + return c.exact + } + + return c.approximations[i] +} diff --git a/vendor/github.com/shopspring/decimal/decimal.go b/vendor/github.com/shopspring/decimal/decimal.go index 84405ec..a37a230 100644 --- a/vendor/github.com/shopspring/decimal/decimal.go +++ b/vendor/github.com/shopspring/decimal/decimal.go @@ -4,14 +4,14 @@ // // The best way to create a new Decimal is to use decimal.NewFromString, ex: // -// n, err := decimal.NewFromString("-123.4567") -// n.String() // output: "-123.4567" +// n, err := decimal.NewFromString("-123.4567") +// n.String() // output: "-123.4567" // // To use Decimal as part of a struct: // -// type Struct struct { -// Number Decimal -// } +// type StructName struct { +// Number Decimal +// } // // Note: This can "only" represent numbers with a maximum of 2^31 digits after the decimal point. package decimal @@ -32,18 +32,31 @@ import ( // // Example: // -// d1 := decimal.NewFromFloat(2).Div(decimal.NewFromFloat(3)) -// d1.String() // output: "0.6666666666666667" -// d2 := decimal.NewFromFloat(2).Div(decimal.NewFromFloat(30000)) -// d2.String() // output: "0.0000666666666667" -// d3 := decimal.NewFromFloat(20000).Div(decimal.NewFromFloat(3)) -// d3.String() // output: "6666.6666666666666667" -// decimal.DivisionPrecision = 3 -// d4 := decimal.NewFromFloat(2).Div(decimal.NewFromFloat(3)) -// d4.String() // output: "0.667" -// +// d1 := decimal.NewFromFloat(2).Div(decimal.NewFromFloat(3)) +// d1.String() // output: "0.6666666666666667" +// d2 := decimal.NewFromFloat(2).Div(decimal.NewFromFloat(30000)) +// d2.String() // output: "0.0000666666666667" +// d3 := decimal.NewFromFloat(20000).Div(decimal.NewFromFloat(3)) +// d3.String() // output: "6666.6666666666666667" +// decimal.DivisionPrecision = 3 +// d4 := decimal.NewFromFloat(2).Div(decimal.NewFromFloat(3)) +// d4.String() // output: "0.667" var DivisionPrecision = 16 +// PowPrecisionNegativeExponent specifies the maximum precision of the result (digits after decimal point) +// when calculating decimal power. Only used for cases where the exponent is a negative number. +// This constant applies to Pow, PowInt32 and PowBigInt methods, PowWithPrecision method is not constrained by it. +// +// Example: +// +// d1, err := decimal.NewFromFloat(15.2).PowInt32(-2) +// d1.String() // output: "0.0043282548476454" +// +// decimal.PowPrecisionNegativeExponent = 24 +// d2, err := decimal.NewFromFloat(15.2).PowInt32(-2) +// d2.String() // output: "0.004328254847645429362881" +var PowPrecisionNegativeExponent = 16 + // MarshalJSONWithoutQuotes should be set to true if you want the decimal to // be JSON marshaled as a number, instead of as a string. // WARNING: this is dangerous for decimals with many digits, since many JSON @@ -91,12 +104,12 @@ func New(value int64, exp int32) Decimal { } } -// NewFromInt converts a int64 to Decimal. +// NewFromInt converts an int64 to Decimal. // // Example: // -// NewFromInt(123).String() // output: "123" -// NewFromInt(-10).String() // output: "-10" +// NewFromInt(123).String() // output: "123" +// NewFromInt(-10).String() // output: "-10" func NewFromInt(value int64) Decimal { return Decimal{ value: big.NewInt(value), @@ -104,12 +117,12 @@ func NewFromInt(value int64) Decimal { } } -// NewFromInt32 converts a int32 to Decimal. +// NewFromInt32 converts an int32 to Decimal. // // Example: // -// NewFromInt(123).String() // output: "123" -// NewFromInt(-10).String() // output: "-10" +// NewFromInt(123).String() // output: "123" +// NewFromInt(-10).String() // output: "-10" func NewFromInt32(value int32) Decimal { return Decimal{ value: big.NewInt(int64(value)), @@ -117,6 +130,18 @@ func NewFromInt32(value int32) Decimal { } } +// NewFromUint64 converts an uint64 to Decimal. +// +// Example: +// +// NewFromUint64(123).String() // output: "123" +func NewFromUint64(value uint64) Decimal { + return Decimal{ + value: new(big.Int).SetUint64(value), + exp: 0, + } +} + // NewFromBigInt returns a new Decimal from a big.Int, value * 10 ^ exp func NewFromBigInt(value *big.Int, exp int32) Decimal { return Decimal{ @@ -125,15 +150,33 @@ func NewFromBigInt(value *big.Int, exp int32) Decimal { } } +// NewFromBigRat returns a new Decimal from a big.Rat. The numerator and +// denominator are divided and rounded to the given precision. +// +// Example: +// +// d1 := NewFromBigRat(big.NewRat(0, 1), 0) // output: "0" +// d2 := NewFromBigRat(big.NewRat(4, 5), 1) // output: "0.8" +// d3 := NewFromBigRat(big.NewRat(1000, 3), 3) // output: "333.333" +// d4 := NewFromBigRat(big.NewRat(2, 7), 4) // output: "0.2857" +func NewFromBigRat(value *big.Rat, precision int32) Decimal { + return Decimal{ + value: new(big.Int).Set(value.Num()), + exp: 0, + }.DivRound(Decimal{ + value: new(big.Int).Set(value.Denom()), + exp: 0, + }, precision) +} + // NewFromString returns a new Decimal from a string representation. // Trailing zeroes are not trimmed. // // Example: // -// d, err := NewFromString("-123.45") -// d2, err := NewFromString(".0001") -// d3, err := NewFromString("1.47000") -// +// d, err := NewFromString("-123.45") +// d2, err := NewFromString(".0001") +// d3, err := NewFromString("1.47000") func NewFromString(value string) (Decimal, error) { originalInput := value var intString string @@ -211,15 +254,14 @@ func NewFromString(value string) (Decimal, error) { // // Example: // -// r := regexp.MustCompile("[$,]") -// d1, err := NewFromFormattedString("$5,125.99", r) +// r := regexp.MustCompile("[$,]") +// d1, err := NewFromFormattedString("$5,125.99", r) // -// r2 := regexp.MustCompile("[_]") -// d2, err := NewFromFormattedString("1_000_000", r2) -// -// r3 := regexp.MustCompile("[USD\\s]") -// d3, err := NewFromFormattedString("5000 USD", r3) +// r2 := regexp.MustCompile("[_]") +// d2, err := NewFromFormattedString("1_000_000", r2) // +// r3 := regexp.MustCompile("[USD\\s]") +// d3, err := NewFromFormattedString("5000 USD", r3) func NewFromFormattedString(value string, replRegexp *regexp.Regexp) (Decimal, error) { parsedValue := replRegexp.ReplaceAllString(value, "") d, err := NewFromString(parsedValue) @@ -230,13 +272,12 @@ func NewFromFormattedString(value string, replRegexp *regexp.Regexp) (Decimal, e } // RequireFromString returns a new Decimal from a string representation -// or panics if NewFromString would have returned an error. +// or panics if NewFromString had returned an error. // // Example: // -// d := RequireFromString("-123.45") -// d2 := RequireFromString(".0001") -// +// d := RequireFromString("-123.45") +// d2 := RequireFromString(".0001") func RequireFromString(value string) Decimal { dec, err := NewFromString(value) if err != nil { @@ -332,8 +373,7 @@ func newFromFloat(val float64, bits uint64, flt *floatInfo) Decimal { // // Example: // -// NewFromFloatWithExponent(123.456, -2).String() // output: "123.46" -// +// NewFromFloatWithExponent(123.456, -2).String() // output: "123.46" func NewFromFloatWithExponent(value float64, exp int32) Decimal { if math.IsNaN(value) || math.IsInf(value, 0) { panic(fmt.Sprintf("Cannot create a Decimal from %v", value)) @@ -418,7 +458,7 @@ func NewFromFloatWithExponent(value float64, exp int32) Decimal { func (d Decimal) Copy() Decimal { d.ensureInitialized() return Decimal{ - value: &(*d.value), + value: new(big.Int).Set(d.value), exp: d.exp, } } @@ -430,7 +470,7 @@ func (d Decimal) Copy() Decimal { // // Example: // -// d := New(12345, -4) +// d := New(12345, -4) // d2 := d.rescale(-1) // d3 := d2.rescale(-4) // println(d1) @@ -442,7 +482,6 @@ func (d Decimal) Copy() Decimal { // 1.2345 // 1.2 // 1.2000 -// func (d Decimal) rescale(exp int32) Decimal { d.ensureInitialized() @@ -552,11 +591,13 @@ func (d Decimal) Div(d2 Decimal) Decimal { return d.DivRound(d2, int32(DivisionPrecision)) } -// QuoRem does divsion with remainder +// QuoRem does division with remainder // d.QuoRem(d2,precision) returns quotient q and remainder r such that -// d = d2 * q + r, q an integer multiple of 10^(-precision) -// 0 <= r < abs(d2) * 10 ^(-precision) if d>=0 -// 0 >= r > -abs(d2) * 10 ^(-precision) if d<0 +// +// d = d2 * q + r, q an integer multiple of 10^(-precision) +// 0 <= r < abs(d2) * 10 ^(-precision) if d>=0 +// 0 >= r > -abs(d2) * 10 ^(-precision) if d<0 +// // Note that precision<0 is allowed as input. func (d Decimal) QuoRem(d2 Decimal, precision int32) (Decimal, Decimal) { d.ensureInitialized() @@ -565,7 +606,7 @@ func (d Decimal) QuoRem(d2 Decimal, precision int32) (Decimal, Decimal) { panic("decimal division by 0") } scale := -precision - e := int64(d.exp - d2.exp - scale) + e := int64(d.exp) - int64(d2.exp) - int64(scale) if e > math.MaxInt32 || e < math.MinInt32 { panic("overflow in decimal QuoRem") } @@ -599,8 +640,10 @@ func (d Decimal) QuoRem(d2 Decimal, precision int32) (Decimal, Decimal) { // DivRound divides and rounds to a given precision // i.e. to an integer multiple of 10^(-precision) -// for a positive quotient digit 5 is rounded up, away from 0 -// if the quotient is negative then digit 5 is rounded down, away from 0 +// +// for a positive quotient digit 5 is rounded up, away from 0 +// if the quotient is negative then digit 5 is rounded down, away from 0 +// // Note that precision<0 is allowed as input. func (d Decimal) DivRound(d2 Decimal, precision int32) Decimal { // QuoRem already checks initialization @@ -628,24 +671,278 @@ func (d Decimal) DivRound(d2 Decimal, precision int32) Decimal { // Mod returns d % d2. func (d Decimal) Mod(d2 Decimal) Decimal { - quo := d.Div(d2).Truncate(0) - return d.Sub(d2.Mul(quo)) + _, r := d.QuoRem(d2, 0) + return r } -// Pow returns d to the power d2 +// Pow returns d to the power of d2. +// When exponent is negative the returned decimal will have maximum precision of PowPrecisionNegativeExponent places after decimal point. +// +// Pow returns 0 (zero-value of Decimal) instead of error for power operation edge cases, to handle those edge cases use PowWithPrecision +// Edge cases not handled by Pow: +// - 0 ** 0 => undefined value +// - 0 ** y, where y < 0 => infinity +// - x ** y, where x < 0 and y is non-integer decimal => imaginary value +// +// Example: +// +// d1 := decimal.NewFromFloat(4.0) +// d2 := decimal.NewFromFloat(4.0) +// res1 := d1.Pow(d2) +// res1.String() // output: "256" +// +// d3 := decimal.NewFromFloat(5.0) +// d4 := decimal.NewFromFloat(5.73) +// res2 := d3.Pow(d4) +// res2.String() // output: "10118.08037125" func (d Decimal) Pow(d2 Decimal) Decimal { - var temp Decimal - if d2.IntPart() == 0 { - return NewFromFloat(1) + baseSign := d.Sign() + expSign := d2.Sign() + + if baseSign == 0 { + if expSign == 0 { + return Decimal{} + } + if expSign == 1 { + return Decimal{zeroInt, 0} + } + if expSign == -1 { + return Decimal{} + } + } + + if expSign == 0 { + return Decimal{oneInt, 0} + } + + // TODO: optimize extraction of fractional part + one := Decimal{oneInt, 0} + expIntPart, expFracPart := d2.QuoRem(one, 0) + + if baseSign == -1 && !expFracPart.IsZero() { + return Decimal{} + } + + intPartPow, _ := d.PowBigInt(expIntPart.value) + + // if exponent is an integer we don't need to calculate d1**frac(d2) + if expFracPart.value.Sign() == 0 { + return intPartPow + } + + // TODO: optimize NumDigits for more performant precision adjustment + digitsBase := d.NumDigits() + digitsExponent := d2.NumDigits() + + precision := digitsBase + + if digitsExponent > precision { + precision += digitsExponent + } + + precision += 6 + + // Calculate x ** frac(y), where + // x ** frac(y) = exp(ln(x ** frac(y)) = exp(ln(x) * frac(y)) + fracPartPow, err := d.Abs().Ln(-d.exp + int32(precision)) + if err != nil { + return Decimal{} + } + + fracPartPow = fracPartPow.Mul(expFracPart) + + fracPartPow, err = fracPartPow.ExpTaylor(-d.exp + int32(precision)) + if err != nil { + return Decimal{} + } + + // Join integer and fractional part, + // base ** (expBase + expFrac) = base ** expBase * base ** expFrac + res := intPartPow.Mul(fracPartPow) + + return res +} + +// PowWithPrecision returns d to the power of d2. +// Precision parameter specifies minimum precision of the result (digits after decimal point). +// Returned decimal is not rounded to 'precision' places after decimal point. +// +// PowWithPrecision returns error when: +// - 0 ** 0 => undefined value +// - 0 ** y, where y < 0 => infinity +// - x ** y, where x < 0 and y is non-integer decimal => imaginary value +// +// Example: +// +// d1 := decimal.NewFromFloat(4.0) +// d2 := decimal.NewFromFloat(4.0) +// res1, err := d1.PowWithPrecision(d2, 2) +// res1.String() // output: "256" +// +// d3 := decimal.NewFromFloat(5.0) +// d4 := decimal.NewFromFloat(5.73) +// res2, err := d3.PowWithPrecision(d4, 5) +// res2.String() // output: "10118.080371595015625" +// +// d5 := decimal.NewFromFloat(-3.0) +// d6 := decimal.NewFromFloat(-6.0) +// res3, err := d5.PowWithPrecision(d6, 10) +// res3.String() // output: "0.0013717421" +func (d Decimal) PowWithPrecision(d2 Decimal, precision int32) (Decimal, error) { + baseSign := d.Sign() + expSign := d2.Sign() + + if baseSign == 0 { + if expSign == 0 { + return Decimal{}, fmt.Errorf("cannot represent undefined value of 0**0") + } + if expSign == 1 { + return Decimal{zeroInt, 0}, nil + } + if expSign == -1 { + return Decimal{}, fmt.Errorf("cannot represent infinity value of 0 ** y, where y < 0") + } + } + + if expSign == 0 { + return Decimal{oneInt, 0}, nil + } + + // TODO: optimize extraction of fractional part + one := Decimal{oneInt, 0} + expIntPart, expFracPart := d2.QuoRem(one, 0) + + if baseSign == -1 && !expFracPart.IsZero() { + return Decimal{}, fmt.Errorf("cannot represent imaginary value of x ** y, where x < 0 and y is non-integer decimal") + } + + intPartPow, _ := d.powBigIntWithPrecision(expIntPart.value, precision) + + // if exponent is an integer we don't need to calculate d1**frac(d2) + if expFracPart.value.Sign() == 0 { + return intPartPow, nil + } + + // TODO: optimize NumDigits for more performant precision adjustment + digitsBase := d.NumDigits() + digitsExponent := d2.NumDigits() + + if int32(digitsBase) > precision { + precision = int32(digitsBase) + } + if int32(digitsExponent) > precision { + precision += int32(digitsExponent) + } + // increase precision by 10 to compensate for errors in further calculations + precision += 10 + + // Calculate x ** frac(y), where + // x ** frac(y) = exp(ln(x ** frac(y)) = exp(ln(x) * frac(y)) + fracPartPow, err := d.Abs().Ln(precision) + if err != nil { + return Decimal{}, err + } + + fracPartPow = fracPartPow.Mul(expFracPart) + + fracPartPow, err = fracPartPow.ExpTaylor(precision) + if err != nil { + return Decimal{}, err + } + + // Join integer and fractional part, + // base ** (expBase + expFrac) = base ** expBase * base ** expFrac + res := intPartPow.Mul(fracPartPow) + + return res, nil +} + +// PowInt32 returns d to the power of exp, where exp is int32. +// Only returns error when d and exp is 0, thus result is undefined. +// +// When exponent is negative the returned decimal will have maximum precision of PowPrecisionNegativeExponent places after decimal point. +// +// Example: +// +// d1, err := decimal.NewFromFloat(4.0).PowInt32(4) +// d1.String() // output: "256" +// +// d2, err := decimal.NewFromFloat(3.13).PowInt32(5) +// d2.String() // output: "300.4150512793" +func (d Decimal) PowInt32(exp int32) (Decimal, error) { + if d.IsZero() && exp == 0 { + return Decimal{}, fmt.Errorf("cannot represent undefined value of 0**0") + } + + isExpNeg := exp < 0 + exp = abs(exp) + + n, result := d, New(1, 0) + + for exp > 0 { + if exp%2 == 1 { + result = result.Mul(n) + } + exp /= 2 + + if exp > 0 { + n = n.Mul(n) + } + } + + if isExpNeg { + return New(1, 0).DivRound(result, int32(PowPrecisionNegativeExponent)), nil + } + + return result, nil +} + +// PowBigInt returns d to the power of exp, where exp is big.Int. +// Only returns error when d and exp is 0, thus result is undefined. +// +// When exponent is negative the returned decimal will have maximum precision of PowPrecisionNegativeExponent places after decimal point. +// +// Example: +// +// d1, err := decimal.NewFromFloat(3.0).PowBigInt(big.NewInt(3)) +// d1.String() // output: "27" +// +// d2, err := decimal.NewFromFloat(629.25).PowBigInt(big.NewInt(5)) +// d2.String() // output: "98654323103449.5673828125" +func (d Decimal) PowBigInt(exp *big.Int) (Decimal, error) { + return d.powBigIntWithPrecision(exp, int32(PowPrecisionNegativeExponent)) +} + +func (d Decimal) powBigIntWithPrecision(exp *big.Int, precision int32) (Decimal, error) { + if d.IsZero() && exp.Sign() == 0 { + return Decimal{}, fmt.Errorf("cannot represent undefined value of 0**0") } - temp = d.Pow(d2.Div(NewFromFloat(2))) - if d2.IntPart()%2 == 0 { - return temp.Mul(temp) + + tmpExp := new(big.Int).Set(exp) + isExpNeg := exp.Sign() < 0 + + if isExpNeg { + tmpExp.Abs(tmpExp) + } + + n, result := d, New(1, 0) + + for tmpExp.Sign() > 0 { + if tmpExp.Bit(0) == 1 { + result = result.Mul(n) + } + tmpExp.Rsh(tmpExp, 1) + + if tmpExp.Sign() > 0 { + n = n.Mul(n) + } } - if d2.IntPart() > 0 { - return temp.Mul(temp).Mul(d) + + if isExpNeg { + return New(1, 0).DivRound(result, precision), nil } - return temp.Mul(temp).Div(d) + + return result, nil } // ExpHullAbrham calculates the natural exponent of decimal (e to the power of d) using Hull-Abraham algorithm. @@ -655,9 +952,8 @@ func (d Decimal) Pow(d2 Decimal) Decimal { // // Example: // -// NewFromFloat(26.1).ExpHullAbrham(2).String() // output: "220000000000" -// NewFromFloat(26.1).ExpHullAbrham(20).String() // output: "216314672147.05767284" -// +// NewFromFloat(26.1).ExpHullAbrham(2).String() // output: "220000000000" +// NewFromFloat(26.1).ExpHullAbrham(20).String() // output: "216314672147.05767284" func (d Decimal) ExpHullAbrham(overallPrecision uint32) (Decimal, error) { // Algorithm based on Variable precision exponential function. // ACM Transactions on Mathematical Software by T. E. Hull & A. Abrham. @@ -747,15 +1043,14 @@ func (d Decimal) ExpHullAbrham(overallPrecision uint32) (Decimal, error) { // // Example: // -// d, err := NewFromFloat(26.1).ExpTaylor(2).String() -// d.String() // output: "216314672147.06" -// -// NewFromFloat(26.1).ExpTaylor(20).String() -// d.String() // output: "216314672147.05767284062928674083" +// d, err := NewFromFloat(26.1).ExpTaylor(2).String() +// d.String() // output: "216314672147.06" // -// NewFromFloat(26.1).ExpTaylor(-10).String() -// d.String() // output: "220000000000" +// NewFromFloat(26.1).ExpTaylor(20).String() +// d.String() // output: "216314672147.05767284062928674083" // +// NewFromFloat(26.1).ExpTaylor(-10).String() +// d.String() // output: "220000000000" func (d Decimal) ExpTaylor(precision int32) (Decimal, error) { // Note(mwoss): Implementation can be optimized by exclusively using big.Int API only if d.IsZero() { @@ -812,14 +1107,162 @@ func (d Decimal) ExpTaylor(precision int32) (Decimal, error) { return result, nil } +// Ln calculates natural logarithm of d. +// Precision argument specifies how precise the result must be (number of digits after decimal point). +// Negative precision is allowed. +// +// Example: +// +// d1, err := NewFromFloat(13.3).Ln(2) +// d1.String() // output: "2.59" +// +// d2, err := NewFromFloat(579.161).Ln(10) +// d2.String() // output: "6.3615805046" +func (d Decimal) Ln(precision int32) (Decimal, error) { + // Algorithm based on The Use of Iteration Methods for Approximating the Natural Logarithm, + // James F. Epperson, The American Mathematical Monthly, Vol. 96, No. 9, November 1989, pp. 831-835. + if d.IsNegative() { + return Decimal{}, fmt.Errorf("cannot calculate natural logarithm for negative decimals") + } + + if d.IsZero() { + return Decimal{}, fmt.Errorf("cannot represent natural logarithm of 0, result: -infinity") + } + + calcPrecision := precision + 2 + z := d.Copy() + + var comp1, comp3, comp2, comp4, reduceAdjust Decimal + comp1 = z.Sub(Decimal{oneInt, 0}) + comp3 = Decimal{oneInt, -1} + + // for decimal in range [0.9, 1.1] where ln(d) is close to 0 + usePowerSeries := false + + if comp1.Abs().Cmp(comp3) <= 0 { + usePowerSeries = true + } else { + // reduce input decimal to range [0.1, 1) + expDelta := int32(z.NumDigits()) + z.exp + z.exp -= expDelta + + // Input decimal was reduced by factor of 10^expDelta, thus we will need to add + // ln(10^expDelta) = expDelta * ln(10) + // to the result to compensate that + ln10 := ln10.withPrecision(calcPrecision) + reduceAdjust = NewFromInt32(expDelta) + reduceAdjust = reduceAdjust.Mul(ln10) + + comp1 = z.Sub(Decimal{oneInt, 0}) + + if comp1.Abs().Cmp(comp3) <= 0 { + usePowerSeries = true + } else { + // initial estimate using floats + zFloat := z.InexactFloat64() + comp1 = NewFromFloat(math.Log(zFloat)) + } + } + + epsilon := Decimal{oneInt, -calcPrecision} + + if usePowerSeries { + // Power Series - https://en.wikipedia.org/wiki/Logarithm#Power_series + // Calculating n-th term of formula: ln(z+1) = 2 sum [ 1 / (2n+1) * (z / (z+2))^(2n+1) ] + // until the difference between current and next term is smaller than epsilon. + // Coverage quite fast for decimals close to 1.0 + + // z + 2 + comp2 = comp1.Add(Decimal{twoInt, 0}) + // z / (z + 2) + comp3 = comp1.DivRound(comp2, calcPrecision) + // 2 * (z / (z + 2)) + comp1 = comp3.Add(comp3) + comp2 = comp1.Copy() + + for n := 1; ; n++ { + // 2 * (z / (z+2))^(2n+1) + comp2 = comp2.Mul(comp3).Mul(comp3) + + // 1 / (2n+1) * 2 * (z / (z+2))^(2n+1) + comp4 = NewFromInt(int64(2*n + 1)) + comp4 = comp2.DivRound(comp4, calcPrecision) + + // comp1 = 2 sum [ 1 / (2n+1) * (z / (z+2))^(2n+1) ] + comp1 = comp1.Add(comp4) + + if comp4.Abs().Cmp(epsilon) <= 0 { + break + } + } + } else { + // Halley's Iteration. + // Calculating n-th term of formula: a_(n+1) = a_n - 2 * (exp(a_n) - z) / (exp(a_n) + z), + // until the difference between current and next term is smaller than epsilon + var prevStep Decimal + maxIters := calcPrecision*2 + 10 + + for i := int32(0); i < maxIters; i++ { + // exp(a_n) + comp3, _ = comp1.ExpTaylor(calcPrecision) + // exp(a_n) - z + comp2 = comp3.Sub(z) + // 2 * (exp(a_n) - z) + comp2 = comp2.Add(comp2) + // exp(a_n) + z + comp4 = comp3.Add(z) + // 2 * (exp(a_n) - z) / (exp(a_n) + z) + comp3 = comp2.DivRound(comp4, calcPrecision) + // comp1 = a_(n+1) = a_n - 2 * (exp(a_n) - z) / (exp(a_n) + z) + comp1 = comp1.Sub(comp3) + + if prevStep.Add(comp3).IsZero() { + // If iteration steps oscillate we should return early and prevent an infinity loop + // NOTE(mwoss): This should be quite a rare case, returning error is not necessary + break + } + + if comp3.Abs().Cmp(epsilon) <= 0 { + break + } + + prevStep = comp3 + } + } + + comp1 = comp1.Add(reduceAdjust) + + return comp1.Round(precision), nil +} + // NumDigits returns the number of digits of the decimal coefficient (d.Value) -// Note: Current implementation is extremely slow for large decimals and/or decimals with large fractional part func (d Decimal) NumDigits() int { - // Note(mwoss): It can be optimized, unnecessary cast of big.Int to string - if d.IsNegative() { - return len(d.value.String()) - 1 + if d.value == nil { + return 1 + } + + if d.value.IsInt64() { + i64 := d.value.Int64() + // restrict fast path to integers with exact conversion to float64 + if i64 <= (1<<53) && i64 >= -(1<<53) { + if i64 == 0 { + return 1 + } + return int(math.Log10(math.Abs(float64(i64)))) + 1 + } + } + + estimatedNumDigits := int(float64(d.value.BitLen()) / math.Log2(10)) + + // estimatedNumDigits (lg10) may be off by 1, need to verify + digitsBigInt := big.NewInt(int64(estimatedNumDigits)) + errorCorrectionUnit := digitsBigInt.Exp(tenInt, digitsBigInt, nil) + + if d.value.CmpAbs(errorCorrectionUnit) >= 0 { + return estimatedNumDigits + 1 } - return len(d.value.String()) + + return estimatedNumDigits } // IsInteger returns true when decimal can be represented as an integer value, otherwise, it returns false. @@ -851,10 +1294,9 @@ func abs(n int32) int32 { // Cmp compares the numbers represented by d and d2 and returns: // -// -1 if d < d2 -// 0 if d == d2 -// +1 if d > d2 -// +// -1 if d < d2 +// 0 if d == d2 +// +1 if d > d2 func (d Decimal) Cmp(d2 Decimal) int { d.ensureInitialized() d2.ensureInitialized() @@ -868,12 +1310,21 @@ func (d Decimal) Cmp(d2 Decimal) int { return rd.value.Cmp(rd2.value) } +// Compare compares the numbers represented by d and d2 and returns: +// +// -1 if d < d2 +// 0 if d == d2 +// +1 if d > d2 +func (d Decimal) Compare(d2 Decimal) int { + return d.Cmp(d2) +} + // Equal returns whether the numbers represented by d and d2 are equal. func (d Decimal) Equal(d2 Decimal) bool { return d.Cmp(d2) == 0 } -// Equals is deprecated, please use Equal method instead +// Deprecated: Equals is deprecated, please use Equal method instead. func (d Decimal) Equals(d2 Decimal) bool { return d.Equal(d2) } @@ -905,7 +1356,6 @@ func (d Decimal) LessThanOrEqual(d2 Decimal) bool { // -1 if d < 0 // 0 if d == 0 // +1 if d > 0 -// func (d Decimal) Sign() int { if d.value == nil { return 0 @@ -968,9 +1418,7 @@ func (d Decimal) IntPart() int64 { // BigInt returns integer component of the decimal as a BigInt. func (d Decimal) BigInt() *big.Int { scaledD := d.rescale(0) - i := &big.Int{} - i.SetString(scaledD.String(), 10) - return i + return scaledD.value } // BigFloat returns decimal as BigFloat. @@ -1014,13 +1462,12 @@ func (d Decimal) InexactFloat64() float64 { // // Example: // -// d := New(-12345, -3) -// println(d.String()) +// d := New(-12345, -3) +// println(d.String()) // // Output: // -// -12.345 -// +// -12.345 func (d Decimal) String() string { return d.string(true) } @@ -1030,14 +1477,13 @@ func (d Decimal) String() string { // // Example: // -// NewFromFloat(0).StringFixed(2) // output: "0.00" -// NewFromFloat(0).StringFixed(0) // output: "0" -// NewFromFloat(5.45).StringFixed(0) // output: "5" -// NewFromFloat(5.45).StringFixed(1) // output: "5.5" -// NewFromFloat(5.45).StringFixed(2) // output: "5.45" -// NewFromFloat(5.45).StringFixed(3) // output: "5.450" -// NewFromFloat(545).StringFixed(-1) // output: "550" -// +// NewFromFloat(0).StringFixed(2) // output: "0.00" +// NewFromFloat(0).StringFixed(0) // output: "0" +// NewFromFloat(5.45).StringFixed(0) // output: "5" +// NewFromFloat(5.45).StringFixed(1) // output: "5.5" +// NewFromFloat(5.45).StringFixed(2) // output: "5.45" +// NewFromFloat(5.45).StringFixed(3) // output: "5.450" +// NewFromFloat(545).StringFixed(-1) // output: "550" func (d Decimal) StringFixed(places int32) string { rounded := d.Round(places) return rounded.string(false) @@ -1048,14 +1494,13 @@ func (d Decimal) StringFixed(places int32) string { // // Example: // -// NewFromFloat(0).StringFixedBank(2) // output: "0.00" -// NewFromFloat(0).StringFixedBank(0) // output: "0" -// NewFromFloat(5.45).StringFixedBank(0) // output: "5" -// NewFromFloat(5.45).StringFixedBank(1) // output: "5.4" -// NewFromFloat(5.45).StringFixedBank(2) // output: "5.45" -// NewFromFloat(5.45).StringFixedBank(3) // output: "5.450" -// NewFromFloat(545).StringFixedBank(-1) // output: "540" -// +// NewFromFloat(0).StringFixedBank(2) // output: "0.00" +// NewFromFloat(0).StringFixedBank(0) // output: "0" +// NewFromFloat(5.45).StringFixedBank(0) // output: "5" +// NewFromFloat(5.45).StringFixedBank(1) // output: "5.4" +// NewFromFloat(5.45).StringFixedBank(2) // output: "5.45" +// NewFromFloat(5.45).StringFixedBank(3) // output: "5.450" +// NewFromFloat(545).StringFixedBank(-1) // output: "540" func (d Decimal) StringFixedBank(places int32) string { rounded := d.RoundBank(places) return rounded.string(false) @@ -1073,9 +1518,8 @@ func (d Decimal) StringFixedCash(interval uint8) string { // // Example: // -// NewFromFloat(5.45).Round(1).String() // output: "5.5" -// NewFromFloat(545).Round(-1).String() // output: "550" -// +// NewFromFloat(5.45).Round(1).String() // output: "5.5" +// NewFromFloat(545).Round(-1).String() // output: "550" func (d Decimal) Round(places int32) Decimal { if d.exp == -places { return d @@ -1104,11 +1548,10 @@ func (d Decimal) Round(places int32) Decimal { // // Example: // -// NewFromFloat(545).RoundCeil(-2).String() // output: "600" -// NewFromFloat(500).RoundCeil(-2).String() // output: "500" -// NewFromFloat(1.1001).RoundCeil(2).String() // output: "1.11" -// NewFromFloat(-1.454).RoundCeil(1).String() // output: "-1.5" -// +// NewFromFloat(545).RoundCeil(-2).String() // output: "600" +// NewFromFloat(500).RoundCeil(-2).String() // output: "500" +// NewFromFloat(1.1001).RoundCeil(2).String() // output: "1.11" +// NewFromFloat(-1.454).RoundCeil(1).String() // output: "-1.4" func (d Decimal) RoundCeil(places int32) Decimal { if d.exp >= -places { return d @@ -1130,11 +1573,10 @@ func (d Decimal) RoundCeil(places int32) Decimal { // // Example: // -// NewFromFloat(545).RoundFloor(-2).String() // output: "500" -// NewFromFloat(-500).RoundFloor(-2).String() // output: "-500" -// NewFromFloat(1.1001).RoundFloor(2).String() // output: "1.1" -// NewFromFloat(-1.454).RoundFloor(1).String() // output: "-1.4" -// +// NewFromFloat(545).RoundFloor(-2).String() // output: "500" +// NewFromFloat(-500).RoundFloor(-2).String() // output: "-500" +// NewFromFloat(1.1001).RoundFloor(2).String() // output: "1.1" +// NewFromFloat(-1.454).RoundFloor(1).String() // output: "-1.5" func (d Decimal) RoundFloor(places int32) Decimal { if d.exp >= -places { return d @@ -1156,11 +1598,10 @@ func (d Decimal) RoundFloor(places int32) Decimal { // // Example: // -// NewFromFloat(545).RoundUp(-2).String() // output: "600" -// NewFromFloat(500).RoundUp(-2).String() // output: "500" -// NewFromFloat(1.1001).RoundUp(2).String() // output: "1.11" -// NewFromFloat(-1.454).RoundUp(1).String() // output: "-1.4" -// +// NewFromFloat(545).RoundUp(-2).String() // output: "600" +// NewFromFloat(500).RoundUp(-2).String() // output: "500" +// NewFromFloat(1.1001).RoundUp(2).String() // output: "1.11" +// NewFromFloat(-1.454).RoundUp(1).String() // output: "-1.5" func (d Decimal) RoundUp(places int32) Decimal { if d.exp >= -places { return d @@ -1184,11 +1625,10 @@ func (d Decimal) RoundUp(places int32) Decimal { // // Example: // -// NewFromFloat(545).RoundDown(-2).String() // output: "500" -// NewFromFloat(-500).RoundDown(-2).String() // output: "-500" -// NewFromFloat(1.1001).RoundDown(2).String() // output: "1.1" -// NewFromFloat(-1.454).RoundDown(1).String() // output: "-1.5" -// +// NewFromFloat(545).RoundDown(-2).String() // output: "500" +// NewFromFloat(-500).RoundDown(-2).String() // output: "-500" +// NewFromFloat(1.1001).RoundDown(2).String() // output: "1.1" +// NewFromFloat(-1.454).RoundDown(1).String() // output: "-1.4" func (d Decimal) RoundDown(places int32) Decimal { if d.exp >= -places { return d @@ -1209,13 +1649,12 @@ func (d Decimal) RoundDown(places int32) Decimal { // // Examples: // -// NewFromFloat(5.45).RoundBank(1).String() // output: "5.4" -// NewFromFloat(545).RoundBank(-1).String() // output: "540" -// NewFromFloat(5.46).RoundBank(1).String() // output: "5.5" -// NewFromFloat(546).RoundBank(-1).String() // output: "550" -// NewFromFloat(5.55).RoundBank(1).String() // output: "5.6" -// NewFromFloat(555).RoundBank(-1).String() // output: "560" -// +// NewFromFloat(5.45).RoundBank(1).String() // output: "5.4" +// NewFromFloat(545).RoundBank(-1).String() // output: "540" +// NewFromFloat(5.46).RoundBank(1).String() // output: "5.5" +// NewFromFloat(546).RoundBank(-1).String() // output: "550" +// NewFromFloat(5.55).RoundBank(1).String() // output: "5.6" +// NewFromFloat(555).RoundBank(-1).String() // output: "560" func (d Decimal) RoundBank(places int32) Decimal { round := d.Round(places) @@ -1237,11 +1676,13 @@ func (d Decimal) RoundBank(places int32) Decimal { // interval. The amount payable for a cash transaction is rounded to the nearest // multiple of the minimum currency unit available. The following intervals are // available: 5, 10, 25, 50 and 100; any other number throws a panic. -// 5: 5 cent rounding 3.43 => 3.45 -// 10: 10 cent rounding 3.45 => 3.50 (5 gets rounded up) -// 25: 25 cent rounding 3.41 => 3.50 -// 50: 50 cent rounding 3.75 => 4.00 -// 100: 100 cent rounding 3.50 => 4.00 +// +// 5: 5 cent rounding 3.43 => 3.45 +// 10: 10 cent rounding 3.45 => 3.50 (5 gets rounded up) +// 25: 25 cent rounding 3.41 => 3.50 +// 50: 50 cent rounding 3.75 => 4.00 +// 100: 100 cent rounding 3.50 => 4.00 +// // For more details: https://en.wikipedia.org/wiki/Cash_rounding func (d Decimal) RoundCash(interval uint8) Decimal { var iVal *big.Int @@ -1310,8 +1751,7 @@ func (d Decimal) Ceil() Decimal { // // Example: // -// decimal.NewFromString("123.456").Truncate(2).String() // "123.45" -// +// decimal.NewFromString("123.456").Truncate(2).String() // "123.45" func (d Decimal) Truncate(precision int32) Decimal { d.ensureInitialized() if precision >= 0 && -precision > d.exp { @@ -1373,19 +1813,18 @@ func (d *Decimal) UnmarshalBinary(data []byte) error { // MarshalBinary implements the encoding.BinaryMarshaler interface. func (d Decimal) MarshalBinary() (data []byte, err error) { - // Write the exponent first since it's a fixed size - v1 := make([]byte, 4) - binary.BigEndian.PutUint32(v1, uint32(d.exp)) - - // Add the value - var v2 []byte - if v2, err = d.value.GobEncode(); err != nil { - return + // exp is written first, but encode value first to know output size + var valueData []byte + if valueData, err = d.value.GobEncode(); err != nil { + return nil, err } + // Write the exponent in front, since it's a fixed size + expData := make([]byte, 4, len(valueData)+4) + binary.BigEndian.PutUint32(expData, uint32(d.exp)) + // Return the byte array - data = append(v1, v2...) - return + return append(expData, valueData...), nil } // Scan implements the sql.Scanner interface for database deserialization. @@ -1408,6 +1847,11 @@ func (d *Decimal) Scan(value interface{}) error { *d = New(v, 0) return nil + case uint64: + // while clickhouse may send 0 in db as uint64 + *d = NewFromUint64(v) + return nil + default: // default is trying to interpret value stored as string str, err := unquoteIfQuoted(v) @@ -1455,7 +1899,8 @@ func (d *Decimal) GobDecode(data []byte) error { } // StringScaled first scales the decimal then calls .String() on it. -// NOTE: buggy, unintuitive, and DEPRECATED! Use StringFixed instead. +// +// Deprecated: buggy and unintuitive. Use StringFixed instead. func (d Decimal) StringScaled(exp int32) string { return d.rescale(exp).String() } @@ -1515,7 +1960,7 @@ func (d *Decimal) ensureInitialized() { // // To call this function with an array, you must do: // -// Min(arr[0], arr[1:]...) +// Min(arr[0], arr[1:]...) // // This makes it harder to accidentally call Min with 0 arguments. func Min(first Decimal, rest ...Decimal) Decimal { @@ -1532,7 +1977,7 @@ func Min(first Decimal, rest ...Decimal) Decimal { // // To call this function with an array, you must do: // -// Max(arr[0], arr[1:]...) +// Max(arr[0], arr[1:]...) // // This makes it harder to accidentally call Max with 0 arguments. func Max(first Decimal, rest ...Decimal) Decimal { @@ -1567,22 +2012,13 @@ func RescalePair(d1 Decimal, d2 Decimal) (Decimal, Decimal) { d1.ensureInitialized() d2.ensureInitialized() - if d1.exp == d2.exp { - return d1, d2 + if d1.exp < d2.exp { + return d1, d2.rescale(d1.exp) + } else if d1.exp > d2.exp { + return d1.rescale(d2.exp), d2 } - baseScale := min(d1.exp, d2.exp) - if baseScale != d1.exp { - return d1.rescale(baseScale), d2 - } - return d1, d2.rescale(baseScale) -} - -func min(x, y int32) int32 { - if x >= y { - return y - } - return x + return d1, d2 } func unquoteIfQuoted(value interface{}) (string, error) { @@ -1594,8 +2030,7 @@ func unquoteIfQuoted(value interface{}) (string, error) { case []byte: bytes = v default: - return "", fmt.Errorf("could not convert value '%+v' to byte array of type '%T'", - value, value) + return "", fmt.Errorf("could not convert value '%+v' to byte array of type '%T'", value, value) } // If the amount is quoted, strip the quotes diff --git a/vendor/github.com/vertica/vertica-sql-go/CONTRIBUTING.md b/vendor/github.com/vertica/vertica-sql-go/CONTRIBUTING.md index b32a88d..92fb2c6 100644 --- a/vendor/github.com/vertica/vertica-sql-go/CONTRIBUTING.md +++ b/vendor/github.com/vertica/vertica-sql-go/CONTRIBUTING.md @@ -10,7 +10,7 @@ This document will guide you through the contribution process. There are a numbe If you find a bug, submit an [issue](https://github.com/vertica/vertica-sql-go/issues) with a complete and reproducible bug report. If the issue can't be reproduced, it will be closed. If you opened an issue, but figured out the answer later on your own, comment on the issue to let people know, then close the issue. -For issues (e.g. security related issues) that are **not suitable** to be reported publicly on the GitHub issue system, report your issues to [Vertica team](mailto:vertica-opensrc@microfocus.com) directly or file a case with Vertica support if you have a support account. +For issues (e.g. security related issues) that are **not suitable** to be reported publicly on the GitHub issue system, report your issues to [Vertica team](mailto:vertica-opensrc@opentext.com) directly or file a case with Vertica support if you have a support account. # Feature Requests @@ -75,17 +75,14 @@ were args are one of the following: | Query Argument | Description | Values | |----------------|-------------|--------| -| use_prepared_statements | whether to use client-side query interpolation or server-side argument binding | true = (default) use server-side bindings | -| | | false = user client side interpolation | -| tlsmode | the ssl policy for this connection | 'none' (default) = don't use SSL for this connection | -| | | 'server' = server must support SSL, but skip verification (INSECURE!) | -| | | 'server-strict' = server must support SSL | -| locator | host and port of the Vertica connection | (default) localhost:5433 -| user | Vertica user ID | (default) the userid of the running user | -| password | Vertica password for the connecting user | (default) (empty) -| - -**NOTE:** Since it's often a bad idea to put your password on the command line, you can set the VERTICA_TEST_PASSWORD environment variable. Even if environment variable is set, the "--password" flag will supercede it. +| use_prepared_statements | whether to use client-side query interpolation or server-side argument binding |
  • true = (default) use server-side bindings
  • false = user client side interpolation
  • | +| tlsmode | the ssl policy for this connection |
  • none (default) = don't use SSL for this connection
  • server = server must support SSL, but skip verification (INSECURE!)
  • server-strict = server must support SSL
  • custom = use custom TLS config (Need to generate certs with `resources/tests/genCerts.sh` in advance)
  • | +| locator | host and port of the Vertica connection | (default) localhost:5433 | +| user | Vertica user name | (default) dbadmin | +| password | Vertica password for the connecting user | (default) (empty) | +| oauth_access_token | the OAuth Access Token to connect to Vertica, only used for OAuth Authentication tests | (default) (empty) | + +**NOTE:** Since it's often a bad idea to put your password on the command line, you can set the VERTICA_TEST_PASSWORD environment variable. Even if environment variable is set, the "--password" flag will supercede it. Similarly, you can set the VERTICA_TEST_OAUTH_ACCESS_TOKEN environment variable instead of "--oauth_access_token" flag. For example: @@ -121,7 +118,7 @@ At this point, you're ready to make your changes! Feel free to ask for help; eve Every file in this project must use the following Apache 2.0 header (with the appropriate year or years in the "[yyyy]" box; if a copyright statement from another party is already present in the code, you may add the statement on top of the existing copyright statement): ``` -Copyright (c) [yyyy] Micro Focus or one of its affiliates. +Copyright (c) [yyyy] Open Text. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/vertica/vertica-sql-go/LICENSE b/vendor/github.com/vertica/vertica-sql-go/LICENSE index 35d7464..aecd4bf 100644 --- a/vendor/github.com/vertica/vertica-sql-go/LICENSE +++ b/vendor/github.com/vertica/vertica-sql-go/LICENSE @@ -186,7 +186,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright 2019 Micro Focus + Copyright 2023 Open Text Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/vertica/vertica-sql-go/README.md b/vendor/github.com/vertica/vertica-sql-go/README.md index bb0211e..bb37f75 100644 --- a/vendor/github.com/vertica/vertica-sql-go/README.md +++ b/vendor/github.com/vertica/vertica-sql-go/README.md @@ -8,7 +8,7 @@ vertica-sql-go is a native Go adapter for the Vertica (http://www.vertica.com) d Please check out [release notes](https://github.com/vertica/vertica-sql-go/releases) to learn about the latest improvements. -vertica-sql-go has been tested with Vertica 12.0.2 and Go 1.15/1.16/1.17/1.18/1.19. +vertica-sql-go has been tested with Vertica 23.3.0 and Go 1.16/1.17/1.18/1.19/1.20. ## Installation @@ -86,26 +86,22 @@ connDB, err := sql.Open("vertica", myDBConnectString) where *myDBConnectString* is of the form: ```Go -vertica://(user):(password)@(host):(port)/(database)?(queryArgs) +vertica://(user):(password)@(host):(port)/(database)[?arg1=value&...&argN=valueN] ``` -If the host is a literal IPv6 address it must be enclosed in square brackets. +All parameters must be escaped (to URL-encoded format). If the *host* is a literal IPv6 address it must be enclosed in square brackets. Currently supported query arguments are: | Query Argument | Description | Values | |----------------|-------------|--------| -| use_prepared_statements | whether to use client-side query interpolation or server-side argument binding | 1 = (default) use server-side bindings | -| | | 0 = user client side interpolation **(LESS SECURE)** | -| connection_load_balance | whether to enable connection load balancing on the client side | 0 = (default) disable load balancing | -| | | 1 = enable load balancing | -| tlsmode | the ssl/tls policy for this connection | 'none' (default) = don't use SSL/TLS for this connection | -| | | 'server' = server must support SSL/TLS, but skip verification **(INSECURE!)** | -| | | 'server-strict' = server must support SSL/TLS | -| | | {customName} = use custom registered `tls.Config` (see "Using custom TLS config" section below) | -| backup_server_node | a list of backup hosts for the client to try to connect if the primary host is unreachable | a comma-seperated list of backup host-port pairs. E.g.
    'host1:port1,host2:port2,host3:port3' | +| use_prepared_statements | Whether to use client-side query interpolation or server-side argument binding. | 1 = (default) use server-side bindings
    0 = user client side interpolation **(LESS SECURE)** | +| connection_load_balance | Whether to enable connection load balancing on the client side. | 0 = (default) disable load balancing
    1 = enable load balancing | +| tlsmode | The ssl/tls policy for this connection. |
  • 'none' (default) = don't use SSL/TLS for this connection
  • 'server' = server must support SSL/TLS, but skip verification **(INSECURE!)**
  • 'server-strict' = server must support SSL/TLS
  • {customName} = use custom registered `tls.Config` (see "Using custom TLS config" section below)
  • | +| backup_server_node | A list of backup hosts for the client to try to connect if the primary host is unreachable. | a comma-seperated list of backup host-port pairs. E.g.
    'host1:port1,host2:port2,host3:port3' | | client_label | Sets a label for the connection on the server. This value appears in the `client_label` column of the SESSIONS system table. | (default) vertica-sql-go-{version}-{pid}-{timestamp} | -| autocommit | Controls whether the connection automatically commits transactions. | 1 = (default) on | -| | | 0 = off +| autocommit | Controls whether the connection automatically commits transactions. | 1 = (default) on
    0 = off| +| oauth_access_token | To authenticate via OAuth, provide an OAuth Access Token that authorizes a user to the database. | unspecified by default, if specified then *user* is optional | +| workload | Sets workload property of the session, enabling use of workload routing | empty string by default. Valid values are workload names that already exist in a workload routing rule on the server. If a workload name that doesn't exist is entered, the server will reject it and it will be set to the default empty string | To ping the server and validate a connection (as the connection isn't necessarily created at that moment), simply call the *PingContext()* method. diff --git a/vendor/github.com/vertica/vertica-sql-go/common/fileutils.go b/vendor/github.com/vertica/vertica-sql-go/common/fileutils.go index 90c19f5..1877ebc 100644 --- a/vendor/github.com/vertica/vertica-sql-go/common/fileutils.go +++ b/vendor/github.com/vertica/vertica-sql-go/common/fileutils.go @@ -1,6 +1,6 @@ package common -// Copyright (c) 2019-2023 Micro Focus or one of its affiliates. +// Copyright (c) 2019-2023 Open Text. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/vertica/vertica-sql-go/common/osutils.go b/vendor/github.com/vertica/vertica-sql-go/common/osutils.go index 0094a84..05ef1b9 100644 --- a/vendor/github.com/vertica/vertica-sql-go/common/osutils.go +++ b/vendor/github.com/vertica/vertica-sql-go/common/osutils.go @@ -2,7 +2,7 @@ package common import "runtime" -// Copyright (c) 2019-2023 Micro Focus or one of its affiliates. +// Copyright (c) 2019-2023 Open Text. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/vertica/vertica-sql-go/common/types.go b/vendor/github.com/vertica/vertica-sql-go/common/types.go index a2d869a..64780d7 100644 --- a/vendor/github.com/vertica/vertica-sql-go/common/types.go +++ b/vendor/github.com/vertica/vertica-sql-go/common/types.go @@ -1,6 +1,6 @@ package common -// Copyright (c) 2019-2023 Micro Focus or one of its affiliates. +// Copyright (c) 2019-2023 Open Text. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -60,6 +60,7 @@ const ( AuthenticationOK int32 = 0 AuthenticationCleartextPassword int32 = 3 AuthenticationMD5Password int32 = 5 + AuthenticationOAuth int32 = 12 AuthenticationSHA512Password int32 = 66048 ) diff --git a/vendor/github.com/vertica/vertica-sql-go/connection.go b/vendor/github.com/vertica/vertica-sql-go/connection.go index db77e80..f212c98 100644 --- a/vendor/github.com/vertica/vertica-sql-go/connection.go +++ b/vendor/github.com/vertica/vertica-sql-go/connection.go @@ -1,6 +1,6 @@ package vertigo -// Copyright (c) 2019-2023 Micro Focus or one of its affiliates. +// Copyright (c) 2019-2023 Open Text. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -109,9 +109,11 @@ type connection struct { scratch [512]byte sessionID string autocommit string + oauthaccesstoken string serverTZOffset string dead bool // used if a ROLLBACK severity error is encountered sessMutex sync.Mutex + workload string } // Begin - Begin starts and returns a new transaction. (DEPRECATED) @@ -236,6 +238,9 @@ func newConnection(connString string) (*connection, error) { result.autocommit = "off" } + // Read OAuth access token flag. + result.oauthaccesstoken = result.connURL.Query().Get("oauth_access_token") + // Read connection load balance flag. loadBalanceFlag := result.connURL.Query().Get("connection_load_balance") @@ -256,6 +261,9 @@ func newConnection(connString string) (*connection, error) { sslFlag = tlsModeNone } + // Read Workload flag + result.workload = result.connURL.Query().Get("workload") + result.conn, err = result.establishSocketConnection() if err != nil { @@ -415,14 +423,14 @@ func min(a, b int) int { func (v *connection) handshake() error { - if v.connURL.User == nil { - return fmt.Errorf("connection string must include a user name") + if v.connURL.User == nil && len(v.oauthaccesstoken) == 0 { + return fmt.Errorf("connection string must include a user name or oauth_access_token") } userName := v.connURL.User.Username() - if len(userName) == 0 { - return fmt.Errorf("connection string must have a non-empty user name") + if len(userName) == 0 && len(v.oauthaccesstoken) == 0 { + return fmt.Errorf("connection string must have a non-empty user name or oauth_access_token") } dbName := "" @@ -431,14 +439,16 @@ func (v *connection) handshake() error { } msg := &msgs.FEStartupMsg{ - ProtocolVersion: protocolVersion, - DriverName: driverName, - DriverVersion: driverVersion, - Username: userName, - Database: dbName, - SessionID: v.sessionID, - ClientPID: v.clientPID, - Autocommit: v.autocommit, + ProtocolVersion: protocolVersion, + DriverName: driverName, + DriverVersion: driverVersion, + Username: userName, + Database: dbName, + SessionID: v.sessionID, + ClientPID: v.clientPID, + Autocommit: v.autocommit, + OAuthAccessToken: v.oauthaccesstoken, + Workload: v.workload, } if err := v.sendMessage(msg); err != nil { @@ -502,13 +512,23 @@ func (v *connection) initializeSession() error { return fmt.Errorf("can't get server timezone: %s", str) } - v.serverTZOffset = str[len(str)-3:] + v.serverTZOffset = getTimeZoneOffset(str) - connectionLogger.Debug("Setting server timezone offset to %s", str[len(str)-3:]) + connectionLogger.Debug("Setting server timezone offset to %s", v.serverTZOffset) return nil } +func getTimeZoneOffset(str string) string { + for i := len(str) - 1; i >= 0 && i >= len(str)-8; i-- { + ch := str[i] + if ch == '+' || ch == '-' { + return str[i:] + } + } + return "+00" +} + func (v *connection) defaultMessageHandler(bMsg msgs.BackEndMsg) (bool, error) { handled := true @@ -526,6 +546,8 @@ func (v *connection) defaultMessageHandler(bMsg msgs.BackEndMsg) (bool, error) { err = v.authSendMD5Password(msg.ExtraAuthData) case common.AuthenticationSHA512Password: err = v.authSendSHA512Password(msg.ExtraAuthData) + case common.AuthenticationOAuth: + err = v.authSendOAuthAccessToken() default: handled = false err = fmt.Errorf("unsupported authentication scheme: %d", msg.Response) @@ -715,6 +737,11 @@ func (v *connection) authSendSHA512Password(extraAuthData []byte) error { return v.sendMessage(msg) } +func (v *connection) authSendOAuthAccessToken() error { + msg := &msgs.FEPasswordMsg{PasswordData: v.oauthaccesstoken} + return v.sendMessage(msg) +} + func (v *connection) sync() error { err := v.sendMessage(&msgs.FESyncMsg{}) diff --git a/vendor/github.com/vertica/vertica-sql-go/context.go b/vendor/github.com/vertica/vertica-sql-go/context.go index 4acfe92..207709a 100644 --- a/vendor/github.com/vertica/vertica-sql-go/context.go +++ b/vendor/github.com/vertica/vertica-sql-go/context.go @@ -7,7 +7,7 @@ import ( "os" ) -// Copyright (c) 2019-2023 Micro Focus or one of its affiliates. +// Copyright (c) 2019-2023 Open Text. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/vertica/vertica-sql-go/driver.go b/vendor/github.com/vertica/vertica-sql-go/driver.go index 36057e8..bee34c4 100644 --- a/vendor/github.com/vertica/vertica-sql-go/driver.go +++ b/vendor/github.com/vertica/vertica-sql-go/driver.go @@ -1,6 +1,6 @@ package vertigo -// Copyright (c) 2019-2023 Micro Focus or one of its affiliates. +// Copyright (c) 2019-2023 Open Text. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -46,8 +46,8 @@ type Driver struct{} const ( driverName string = "vertica-sql-go" - driverVersion string = "1.3.0" - protocolVersion uint32 = 0x00030009 + driverVersion string = "1.3.3" + protocolVersion uint32 = 0x0003000F // 3.15 ) var driverLogger = logger.New("driver") diff --git a/vendor/github.com/vertica/vertica-sql-go/errors.go b/vendor/github.com/vertica/vertica-sql-go/errors.go index ce0690b..540bcc8 100644 --- a/vendor/github.com/vertica/vertica-sql-go/errors.go +++ b/vendor/github.com/vertica/vertica-sql-go/errors.go @@ -1,6 +1,6 @@ package vertigo -// Copyright (c) 2019-2023 Micro Focus or one of its affiliates. +// Copyright (c) 2019-2023 Open Text. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/vertica/vertica-sql-go/logger/filelogger.go b/vendor/github.com/vertica/vertica-sql-go/logger/filelogger.go index e393f7d..c5ecdac 100644 --- a/vendor/github.com/vertica/vertica-sql-go/logger/filelogger.go +++ b/vendor/github.com/vertica/vertica-sql-go/logger/filelogger.go @@ -1,6 +1,6 @@ package logger -// Copyright (c) 2019-2023 Micro Focus or one of its affiliates. +// Copyright (c) 2019-2023 Open Text. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/vertica/vertica-sql-go/logger/logger.go b/vendor/github.com/vertica/vertica-sql-go/logger/logger.go index 051e12e..eff7fc7 100644 --- a/vendor/github.com/vertica/vertica-sql-go/logger/logger.go +++ b/vendor/github.com/vertica/vertica-sql-go/logger/logger.go @@ -1,6 +1,6 @@ package logger -// Copyright (c) 2019-2023 Micro Focus or one of its affiliates. +// Copyright (c) 2019-2023 Open Text. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/vertica/vertica-sql-go/logger/stdiologger.go b/vendor/github.com/vertica/vertica-sql-go/logger/stdiologger.go index bede2f3..a84db52 100644 --- a/vendor/github.com/vertica/vertica-sql-go/logger/stdiologger.go +++ b/vendor/github.com/vertica/vertica-sql-go/logger/stdiologger.go @@ -1,6 +1,6 @@ package logger -// Copyright (c) 2019-2023 Micro Focus or one of its affiliates. +// Copyright (c) 2019-2023 Open Text. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/vertica/vertica-sql-go/msgs/beauthenticationmsg.go b/vendor/github.com/vertica/vertica-sql-go/msgs/beauthenticationmsg.go index 83b599c..437c3c6 100644 --- a/vendor/github.com/vertica/vertica-sql-go/msgs/beauthenticationmsg.go +++ b/vendor/github.com/vertica/vertica-sql-go/msgs/beauthenticationmsg.go @@ -1,6 +1,6 @@ package msgs -// Copyright (c) 2019-2023 Micro Focus or one of its affiliates. +// Copyright (c) 2019-2023 Open Text. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/vertica/vertica-sql-go/msgs/bebindcompletemsg.go b/vendor/github.com/vertica/vertica-sql-go/msgs/bebindcompletemsg.go index 3f0fa25..6196a98 100644 --- a/vendor/github.com/vertica/vertica-sql-go/msgs/bebindcompletemsg.go +++ b/vendor/github.com/vertica/vertica-sql-go/msgs/bebindcompletemsg.go @@ -1,6 +1,6 @@ package msgs -// Copyright (c) 2019-2023 Micro Focus or one of its affiliates. +// Copyright (c) 2019-2023 Open Text. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/vertica/vertica-sql-go/msgs/beclosecompletemsg.go b/vendor/github.com/vertica/vertica-sql-go/msgs/beclosecompletemsg.go index baf7952..a7aa7bd 100644 --- a/vendor/github.com/vertica/vertica-sql-go/msgs/beclosecompletemsg.go +++ b/vendor/github.com/vertica/vertica-sql-go/msgs/beclosecompletemsg.go @@ -1,6 +1,6 @@ package msgs -// Copyright (c) 2019-2023 Micro Focus or one of its affiliates. +// Copyright (c) 2019-2023 Open Text. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/vertica/vertica-sql-go/msgs/becmdcompletemsg.go b/vendor/github.com/vertica/vertica-sql-go/msgs/becmdcompletemsg.go index ff085c6..5ac6107 100644 --- a/vendor/github.com/vertica/vertica-sql-go/msgs/becmdcompletemsg.go +++ b/vendor/github.com/vertica/vertica-sql-go/msgs/becmdcompletemsg.go @@ -1,6 +1,6 @@ package msgs -// Copyright (c) 2019-2023 Micro Focus or one of its affiliates. +// Copyright (c) 2019-2023 Open Text. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/vertica/vertica-sql-go/msgs/becmddescriptionmsg.go b/vendor/github.com/vertica/vertica-sql-go/msgs/becmddescriptionmsg.go index 0288246..823aafc 100644 --- a/vendor/github.com/vertica/vertica-sql-go/msgs/becmddescriptionmsg.go +++ b/vendor/github.com/vertica/vertica-sql-go/msgs/becmddescriptionmsg.go @@ -1,6 +1,6 @@ package msgs -// Copyright (c) 2019-2023 Micro Focus or one of its affiliates. +// Copyright (c) 2019-2023 Open Text. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/vertica/vertica-sql-go/msgs/bedatarowmsg.go b/vendor/github.com/vertica/vertica-sql-go/msgs/bedatarowmsg.go index c549b95..0e84539 100644 --- a/vendor/github.com/vertica/vertica-sql-go/msgs/bedatarowmsg.go +++ b/vendor/github.com/vertica/vertica-sql-go/msgs/bedatarowmsg.go @@ -1,6 +1,6 @@ package msgs -// Copyright (c) 2019-2023 Micro Focus or one of its affiliates. +// Copyright (c) 2019-2023 Open Text. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/vertica/vertica-sql-go/msgs/beemptyqueryrespmsg.go b/vendor/github.com/vertica/vertica-sql-go/msgs/beemptyqueryrespmsg.go index babc485..18c0e8e 100644 --- a/vendor/github.com/vertica/vertica-sql-go/msgs/beemptyqueryrespmsg.go +++ b/vendor/github.com/vertica/vertica-sql-go/msgs/beemptyqueryrespmsg.go @@ -1,6 +1,6 @@ package msgs -// Copyright (c) 2019-2023 Micro Focus or one of its affiliates. +// Copyright (c) 2019-2023 Open Text. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/vertica/vertica-sql-go/msgs/beerrormsg.go b/vendor/github.com/vertica/vertica-sql-go/msgs/beerrormsg.go index 10640c0..00d5849 100644 --- a/vendor/github.com/vertica/vertica-sql-go/msgs/beerrormsg.go +++ b/vendor/github.com/vertica/vertica-sql-go/msgs/beerrormsg.go @@ -1,6 +1,6 @@ package msgs -// Copyright (c) 2019-2023 Micro Focus or one of its affiliates. +// Copyright (c) 2019-2023 Open Text. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/vertica/vertica-sql-go/msgs/beinitstdinloadmsg.go b/vendor/github.com/vertica/vertica-sql-go/msgs/beinitstdinloadmsg.go index 3d4ff72..91a0fc5 100644 --- a/vendor/github.com/vertica/vertica-sql-go/msgs/beinitstdinloadmsg.go +++ b/vendor/github.com/vertica/vertica-sql-go/msgs/beinitstdinloadmsg.go @@ -1,6 +1,6 @@ package msgs -// Copyright (c) 2019-2023 Micro Focus or one of its affiliates. +// Copyright (c) 2019-2023 Open Text. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/vertica/vertica-sql-go/msgs/bekeydatamsg.go b/vendor/github.com/vertica/vertica-sql-go/msgs/bekeydatamsg.go index 008cc20..db5f4a5 100644 --- a/vendor/github.com/vertica/vertica-sql-go/msgs/bekeydatamsg.go +++ b/vendor/github.com/vertica/vertica-sql-go/msgs/bekeydatamsg.go @@ -1,6 +1,6 @@ package msgs -// Copyright (c) 2019-2023 Micro Focus or one of its affiliates. +// Copyright (c) 2019-2023 Open Text. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/vertica/vertica-sql-go/msgs/beloadbalancemsg.go b/vendor/github.com/vertica/vertica-sql-go/msgs/beloadbalancemsg.go index 2ee61f0..e2eac3d 100644 --- a/vendor/github.com/vertica/vertica-sql-go/msgs/beloadbalancemsg.go +++ b/vendor/github.com/vertica/vertica-sql-go/msgs/beloadbalancemsg.go @@ -1,6 +1,6 @@ package msgs -// Copyright (c) 2020-2023 Micro Focus or one of its affiliates. +// Copyright (c) 2020-2023 Open Text. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/vertica/vertica-sql-go/msgs/beloadnewfilemsg.go b/vendor/github.com/vertica/vertica-sql-go/msgs/beloadnewfilemsg.go index 49a5099..c3fe24c 100644 --- a/vendor/github.com/vertica/vertica-sql-go/msgs/beloadnewfilemsg.go +++ b/vendor/github.com/vertica/vertica-sql-go/msgs/beloadnewfilemsg.go @@ -1,6 +1,6 @@ package msgs -// Copyright (c) 2019-2023 Micro Focus or one of its affiliates. +// Copyright (c) 2019-2023 Open Text. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/vertica/vertica-sql-go/msgs/benodatamsg.go b/vendor/github.com/vertica/vertica-sql-go/msgs/benodatamsg.go index 396ba9a..794f185 100644 --- a/vendor/github.com/vertica/vertica-sql-go/msgs/benodatamsg.go +++ b/vendor/github.com/vertica/vertica-sql-go/msgs/benodatamsg.go @@ -1,6 +1,6 @@ package msgs -// Copyright (c) 2019-2023 Micro Focus or one of its affiliates. +// Copyright (c) 2019-2023 Open Text. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/vertica/vertica-sql-go/msgs/benoticemsg.go b/vendor/github.com/vertica/vertica-sql-go/msgs/benoticemsg.go index 515506b..11111ce 100644 --- a/vendor/github.com/vertica/vertica-sql-go/msgs/benoticemsg.go +++ b/vendor/github.com/vertica/vertica-sql-go/msgs/benoticemsg.go @@ -1,6 +1,6 @@ package msgs -// Copyright (c) 2019-2023 Micro Focus or one of its affiliates. +// Copyright (c) 2019-2023 Open Text. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/vertica/vertica-sql-go/msgs/beparameterdescmsg.go b/vendor/github.com/vertica/vertica-sql-go/msgs/beparameterdescmsg.go index e58be8c..968d44d 100644 --- a/vendor/github.com/vertica/vertica-sql-go/msgs/beparameterdescmsg.go +++ b/vendor/github.com/vertica/vertica-sql-go/msgs/beparameterdescmsg.go @@ -1,6 +1,6 @@ package msgs -// Copyright (c) 2019-2023 Micro Focus or one of its affiliates. +// Copyright (c) 2019-2023 Open Text. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/vertica/vertica-sql-go/msgs/beparamstatusmsg.go b/vendor/github.com/vertica/vertica-sql-go/msgs/beparamstatusmsg.go index 15d206d..04d5d52 100644 --- a/vendor/github.com/vertica/vertica-sql-go/msgs/beparamstatusmsg.go +++ b/vendor/github.com/vertica/vertica-sql-go/msgs/beparamstatusmsg.go @@ -1,6 +1,6 @@ package msgs -// Copyright (c) 2019-2023 Micro Focus or one of its affiliates. +// Copyright (c) 2019-2023 Open Text. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/vertica/vertica-sql-go/msgs/beparsecomplete.go b/vendor/github.com/vertica/vertica-sql-go/msgs/beparsecomplete.go index 7837dea..784ea41 100644 --- a/vendor/github.com/vertica/vertica-sql-go/msgs/beparsecomplete.go +++ b/vendor/github.com/vertica/vertica-sql-go/msgs/beparsecomplete.go @@ -1,6 +1,6 @@ package msgs -// Copyright (c) 2019-2023 Micro Focus or one of its affiliates. +// Copyright (c) 2019-2023 Open Text. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/vertica/vertica-sql-go/msgs/beportalsuspendedmsg.go b/vendor/github.com/vertica/vertica-sql-go/msgs/beportalsuspendedmsg.go index 63dce0e..922956f 100644 --- a/vendor/github.com/vertica/vertica-sql-go/msgs/beportalsuspendedmsg.go +++ b/vendor/github.com/vertica/vertica-sql-go/msgs/beportalsuspendedmsg.go @@ -1,6 +1,6 @@ package msgs -// Copyright (c) 2019-2023 Micro Focus or one of its affiliates. +// Copyright (c) 2019-2023 Open Text. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/vertica/vertica-sql-go/msgs/bereadyforquerymsg.go b/vendor/github.com/vertica/vertica-sql-go/msgs/bereadyforquerymsg.go index f03bda0..4055379 100644 --- a/vendor/github.com/vertica/vertica-sql-go/msgs/bereadyforquerymsg.go +++ b/vendor/github.com/vertica/vertica-sql-go/msgs/bereadyforquerymsg.go @@ -1,6 +1,6 @@ package msgs -// Copyright (c) 2019-2023 Micro Focus or one of its affiliates. +// Copyright (c) 2019-2023 Open Text. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/vertica/vertica-sql-go/msgs/berowdescmsg.go b/vendor/github.com/vertica/vertica-sql-go/msgs/berowdescmsg.go index 5054235..964bf37 100644 --- a/vendor/github.com/vertica/vertica-sql-go/msgs/berowdescmsg.go +++ b/vendor/github.com/vertica/vertica-sql-go/msgs/berowdescmsg.go @@ -1,6 +1,6 @@ package msgs -// Copyright (c) 2019-2023 Micro Focus or one of its affiliates. +// Copyright (c) 2019-2023 Open Text. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/vertica/vertica-sql-go/msgs/beverifyloadfilesmsg.go b/vendor/github.com/vertica/vertica-sql-go/msgs/beverifyloadfilesmsg.go index 28ede78..11cab8a 100644 --- a/vendor/github.com/vertica/vertica-sql-go/msgs/beverifyloadfilesmsg.go +++ b/vendor/github.com/vertica/vertica-sql-go/msgs/beverifyloadfilesmsg.go @@ -1,6 +1,6 @@ package msgs -// Copyright (c) 2019-2023 Micro Focus or one of its affiliates. +// Copyright (c) 2019-2023 Open Text. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/vertica/vertica-sql-go/msgs/bewritefilemsg.go b/vendor/github.com/vertica/vertica-sql-go/msgs/bewritefilemsg.go index 2aa98ff..ba2d669 100644 --- a/vendor/github.com/vertica/vertica-sql-go/msgs/bewritefilemsg.go +++ b/vendor/github.com/vertica/vertica-sql-go/msgs/bewritefilemsg.go @@ -1,6 +1,6 @@ package msgs -// Copyright (c) 2019-2023 Micro Focus or one of its affiliates. +// Copyright (c) 2019-2023 Open Text. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/vertica/vertica-sql-go/msgs/febindmsg.go b/vendor/github.com/vertica/vertica-sql-go/msgs/febindmsg.go index 810f03f..285f64b 100644 --- a/vendor/github.com/vertica/vertica-sql-go/msgs/febindmsg.go +++ b/vendor/github.com/vertica/vertica-sql-go/msgs/febindmsg.go @@ -1,6 +1,6 @@ package msgs -// Copyright (c) 2019-2023 Micro Focus or one of its affiliates. +// Copyright (c) 2019-2023 Open Text. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/vertica/vertica-sql-go/msgs/fecancelmsg.go b/vendor/github.com/vertica/vertica-sql-go/msgs/fecancelmsg.go index 0805f98..377deda 100644 --- a/vendor/github.com/vertica/vertica-sql-go/msgs/fecancelmsg.go +++ b/vendor/github.com/vertica/vertica-sql-go/msgs/fecancelmsg.go @@ -1,6 +1,6 @@ package msgs -// Copyright (c) 2020-2023 Micro Focus or one of its affiliates. +// Copyright (c) 2020-2023 Open Text. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/vertica/vertica-sql-go/msgs/feclosemsg.go b/vendor/github.com/vertica/vertica-sql-go/msgs/feclosemsg.go index d45f1a7..92ee9c9 100644 --- a/vendor/github.com/vertica/vertica-sql-go/msgs/feclosemsg.go +++ b/vendor/github.com/vertica/vertica-sql-go/msgs/feclosemsg.go @@ -1,6 +1,6 @@ package msgs -// Copyright (c) 2019-2023 Micro Focus or one of its affiliates. +// Copyright (c) 2019-2023 Open Text. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/vertica/vertica-sql-go/msgs/fedescribemsg.go b/vendor/github.com/vertica/vertica-sql-go/msgs/fedescribemsg.go index dc962c7..3df038f 100644 --- a/vendor/github.com/vertica/vertica-sql-go/msgs/fedescribemsg.go +++ b/vendor/github.com/vertica/vertica-sql-go/msgs/fedescribemsg.go @@ -1,6 +1,6 @@ package msgs -// Copyright (c) 2019-2023 Micro Focus or one of its affiliates. +// Copyright (c) 2019-2023 Open Text. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/vertica/vertica-sql-go/msgs/feerrormsg.go b/vendor/github.com/vertica/vertica-sql-go/msgs/feerrormsg.go index 52e105d..423520f 100644 --- a/vendor/github.com/vertica/vertica-sql-go/msgs/feerrormsg.go +++ b/vendor/github.com/vertica/vertica-sql-go/msgs/feerrormsg.go @@ -1,6 +1,6 @@ package msgs -// Copyright (c) 2019-2023 Micro Focus or one of its affiliates. +// Copyright (c) 2019-2023 Open Text. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/vertica/vertica-sql-go/msgs/feexecutemsg.go b/vendor/github.com/vertica/vertica-sql-go/msgs/feexecutemsg.go index d690d33..65e20ea 100644 --- a/vendor/github.com/vertica/vertica-sql-go/msgs/feexecutemsg.go +++ b/vendor/github.com/vertica/vertica-sql-go/msgs/feexecutemsg.go @@ -1,6 +1,6 @@ package msgs -// Copyright (c) 2019-2023 Micro Focus or one of its affiliates. +// Copyright (c) 2019-2023 Open Text. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/vertica/vertica-sql-go/msgs/feflush.go b/vendor/github.com/vertica/vertica-sql-go/msgs/feflush.go index c4e2713..013a57f 100644 --- a/vendor/github.com/vertica/vertica-sql-go/msgs/feflush.go +++ b/vendor/github.com/vertica/vertica-sql-go/msgs/feflush.go @@ -1,6 +1,6 @@ package msgs -// Copyright (c) 2019-2023 Micro Focus or one of its affiliates. +// Copyright (c) 2019-2023 Open Text. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/vertica/vertica-sql-go/msgs/feloadbalancemsg.go b/vendor/github.com/vertica/vertica-sql-go/msgs/feloadbalancemsg.go index 9873a78..bca6a4e 100644 --- a/vendor/github.com/vertica/vertica-sql-go/msgs/feloadbalancemsg.go +++ b/vendor/github.com/vertica/vertica-sql-go/msgs/feloadbalancemsg.go @@ -1,6 +1,6 @@ package msgs -// Copyright (c) 2020-2023 Micro Focus or one of its affiliates. +// Copyright (c) 2020-2023 Open Text. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/vertica/vertica-sql-go/msgs/feloaddatamsg.go b/vendor/github.com/vertica/vertica-sql-go/msgs/feloaddatamsg.go index 973f1da..e10fba1 100644 --- a/vendor/github.com/vertica/vertica-sql-go/msgs/feloaddatamsg.go +++ b/vendor/github.com/vertica/vertica-sql-go/msgs/feloaddatamsg.go @@ -1,6 +1,6 @@ package msgs -// Copyright (c) 2019-2023 Micro Focus or one of its affiliates. +// Copyright (c) 2019-2023 Open Text. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/vertica/vertica-sql-go/msgs/feloaddonemsg.go b/vendor/github.com/vertica/vertica-sql-go/msgs/feloaddonemsg.go index d6bf222..bc8695e 100644 --- a/vendor/github.com/vertica/vertica-sql-go/msgs/feloaddonemsg.go +++ b/vendor/github.com/vertica/vertica-sql-go/msgs/feloaddonemsg.go @@ -1,6 +1,6 @@ package msgs -// Copyright (c) 2019-2023 Micro Focus or one of its affiliates. +// Copyright (c) 2019-2023 Open Text. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/vertica/vertica-sql-go/msgs/feloadfailmsg.go b/vendor/github.com/vertica/vertica-sql-go/msgs/feloadfailmsg.go index 2371e67..f0c1b52 100644 --- a/vendor/github.com/vertica/vertica-sql-go/msgs/feloadfailmsg.go +++ b/vendor/github.com/vertica/vertica-sql-go/msgs/feloadfailmsg.go @@ -1,6 +1,6 @@ package msgs -// Copyright (c) 2019-2023 Micro Focus or one of its affiliates. +// Copyright (c) 2019-2023 Open Text. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/vertica/vertica-sql-go/msgs/feparsemsg.go b/vendor/github.com/vertica/vertica-sql-go/msgs/feparsemsg.go index 207c4c7..530255d 100644 --- a/vendor/github.com/vertica/vertica-sql-go/msgs/feparsemsg.go +++ b/vendor/github.com/vertica/vertica-sql-go/msgs/feparsemsg.go @@ -1,6 +1,6 @@ package msgs -// Copyright (c) 2019-2023 Micro Focus or one of its affiliates. +// Copyright (c) 2019-2023 Open Text. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/vertica/vertica-sql-go/msgs/fepasswordmsg.go b/vendor/github.com/vertica/vertica-sql-go/msgs/fepasswordmsg.go index f42433d..8a70dd8 100644 --- a/vendor/github.com/vertica/vertica-sql-go/msgs/fepasswordmsg.go +++ b/vendor/github.com/vertica/vertica-sql-go/msgs/fepasswordmsg.go @@ -1,6 +1,6 @@ package msgs -// Copyright (c) 2019-2023 Micro Focus or one of its affiliates. +// Copyright (c) 2019-2023 Open Text. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/vertica/vertica-sql-go/msgs/fequerymsg.go b/vendor/github.com/vertica/vertica-sql-go/msgs/fequerymsg.go index ed56f18..6154e32 100644 --- a/vendor/github.com/vertica/vertica-sql-go/msgs/fequerymsg.go +++ b/vendor/github.com/vertica/vertica-sql-go/msgs/fequerymsg.go @@ -1,6 +1,6 @@ package msgs -// Copyright (c) 2019-2023 Micro Focus or one of its affiliates. +// Copyright (c) 2019-2023 Open Text. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/vertica/vertica-sql-go/msgs/fesslmsg.go b/vendor/github.com/vertica/vertica-sql-go/msgs/fesslmsg.go index 83161b2..c6c80f4 100644 --- a/vendor/github.com/vertica/vertica-sql-go/msgs/fesslmsg.go +++ b/vendor/github.com/vertica/vertica-sql-go/msgs/fesslmsg.go @@ -1,6 +1,6 @@ package msgs -// Copyright (c) 2019-2023 Micro Focus or one of its affiliates. +// Copyright (c) 2019-2023 Open Text. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/vertica/vertica-sql-go/msgs/festartupmsg.go b/vendor/github.com/vertica/vertica-sql-go/msgs/festartupmsg.go index b6965f7..d1e62b7 100644 --- a/vendor/github.com/vertica/vertica-sql-go/msgs/festartupmsg.go +++ b/vendor/github.com/vertica/vertica-sql-go/msgs/festartupmsg.go @@ -1,6 +1,6 @@ package msgs -// Copyright (c) 2019-2023 Micro Focus or one of its affiliates. +// Copyright (c) 2019-2023 Open Text. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -34,6 +34,7 @@ package msgs import ( "fmt" + "os" "os/user" "github.com/elastic/go-sysinfo" @@ -41,16 +42,19 @@ import ( // FEStartupMsg docs type FEStartupMsg struct { - ProtocolVersion uint32 - DriverName string - DriverVersion string - Username string - Database string - SessionID string - ClientPID int - ClientOS string - OSUsername string - Autocommit string + ProtocolVersion uint32 + DriverName string + DriverVersion string + Username string + Database string + SessionID string + ClientPID int + ClientOS string + OSUsername string + Autocommit string + OAuthAccessToken string + ClientOSHostname string + Workload string } // Flatten docs @@ -69,6 +73,12 @@ func (m *FEStartupMsg) Flatten() ([]byte, byte) { m.OSUsername = currentUser.Username } + m.ClientOSHostname = "" + hostname, err := os.Hostname() + if err == nil { + m.ClientOSHostname = hostname + } + buf := newMsgBuffer() const fixedProtocolVersion uint32 = 0x00030005 buf.appendUint32(fixedProtocolVersion) @@ -78,14 +88,17 @@ func (m *FEStartupMsg) Flatten() ([]byte, byte) { buf.appendUint32(m.ProtocolVersion) buf.appendBytes([]byte{0}) - if len(m.Username) > 0 { - buf.appendLabeledString("user", m.Username) - } + buf.appendLabeledString("user", m.Username) if len(m.Database) > 0 { buf.appendLabeledString("database", m.Database) } + if len(m.OAuthAccessToken) > 0 { + buf.appendLabeledString("oauth_access_token", m.OAuthAccessToken) + buf.appendLabeledString("auth_category", "OAuth") + } + buf.appendLabeledString("client_type", m.DriverName) buf.appendLabeledString("client_version", m.DriverVersion) buf.appendLabeledString("client_label", m.SessionID) @@ -93,6 +106,9 @@ func (m *FEStartupMsg) Flatten() ([]byte, byte) { buf.appendLabeledString("client_os", m.ClientOS) buf.appendLabeledString("client_os_user_name", m.OSUsername) buf.appendLabeledString("autocommit", m.Autocommit) + buf.appendLabeledString("protocol_compat", "VER") + buf.appendLabeledString("client_os_hostname", m.ClientOSHostname) + buf.appendLabeledString("workload", m.Workload) buf.appendBytes([]byte{0}) return buf.bytes(), 0 @@ -100,7 +116,7 @@ func (m *FEStartupMsg) Flatten() ([]byte, byte) { func (m *FEStartupMsg) String() string { return fmt.Sprintf( - "Startup (packet): ProtocolVersion:%08X, DriverName='%s', DriverVersion='%s', UserName='%s', Database='%s', SessionID='%s', ClientPID=%d, ClientOS='%s', ClientOSUserName='%s', Autocommit='%s'", + "Startup (packet): ProtocolVersion:%08X, DriverName='%s', DriverVersion='%s', UserName='%s', Database='%s', SessionID='%s', ClientPID=%d, ClientOS='%s', ClientOSUserName='%s', ClientOSHostname='%s', Autocommit='%s', OAuthAccessToken=, Workload='%s'", m.ProtocolVersion, m.DriverName, m.DriverVersion, @@ -110,5 +126,8 @@ func (m *FEStartupMsg) String() string { m.ClientPID, m.ClientOS, m.OSUsername, - m.Autocommit) + m.ClientOSHostname, + m.Autocommit, + len(m.OAuthAccessToken), + m.Workload) } diff --git a/vendor/github.com/vertica/vertica-sql-go/msgs/fesyncmsg.go b/vendor/github.com/vertica/vertica-sql-go/msgs/fesyncmsg.go index ad79da1..14fe6ba 100644 --- a/vendor/github.com/vertica/vertica-sql-go/msgs/fesyncmsg.go +++ b/vendor/github.com/vertica/vertica-sql-go/msgs/fesyncmsg.go @@ -1,6 +1,6 @@ package msgs -// Copyright (c) 2019-2023 Micro Focus or one of its affiliates. +// Copyright (c) 2019-2023 Open Text. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/vertica/vertica-sql-go/msgs/feterminatemsg.go b/vendor/github.com/vertica/vertica-sql-go/msgs/feterminatemsg.go index 8fbe2df..8771da5 100644 --- a/vendor/github.com/vertica/vertica-sql-go/msgs/feterminatemsg.go +++ b/vendor/github.com/vertica/vertica-sql-go/msgs/feterminatemsg.go @@ -1,6 +1,6 @@ package msgs -// Copyright (c) 2020-2023 Micro Focus or one of its affiliates. +// Copyright (c) 2020-2023 Open Text. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/vertica/vertica-sql-go/msgs/feverifyloadfiles.go b/vendor/github.com/vertica/vertica-sql-go/msgs/feverifyloadfiles.go index 7234b85..a2ed2c9 100644 --- a/vendor/github.com/vertica/vertica-sql-go/msgs/feverifyloadfiles.go +++ b/vendor/github.com/vertica/vertica-sql-go/msgs/feverifyloadfiles.go @@ -1,6 +1,6 @@ package msgs -// Copyright (c) 2019-2023 Micro Focus or one of its affiliates. +// Copyright (c) 2019-2023 Open Text. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/vertica/vertica-sql-go/msgs/msg.go b/vendor/github.com/vertica/vertica-sql-go/msgs/msg.go index 9b75e6d..6952c96 100644 --- a/vendor/github.com/vertica/vertica-sql-go/msgs/msg.go +++ b/vendor/github.com/vertica/vertica-sql-go/msgs/msg.go @@ -1,6 +1,6 @@ package msgs -// Copyright (c) 2019-2023 Micro Focus or one of its affiliates. +// Copyright (c) 2019-2023 Open Text. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/vertica/vertica-sql-go/msgs/msgbuffer.go b/vendor/github.com/vertica/vertica-sql-go/msgs/msgbuffer.go index 8e1028f..14c0607 100644 --- a/vendor/github.com/vertica/vertica-sql-go/msgs/msgbuffer.go +++ b/vendor/github.com/vertica/vertica-sql-go/msgs/msgbuffer.go @@ -1,6 +1,6 @@ package msgs -// Copyright (c) 2019-2023 Micro Focus or one of its affiliates. +// Copyright (c) 2019-2023 Open Text. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/vertica/vertica-sql-go/parse/queryLex.go b/vendor/github.com/vertica/vertica-sql-go/parse/queryLex.go index 4f37880..6ea6acd 100644 --- a/vendor/github.com/vertica/vertica-sql-go/parse/queryLex.go +++ b/vendor/github.com/vertica/vertica-sql-go/parse/queryLex.go @@ -6,7 +6,7 @@ import ( "unicode/utf8" ) -// Copyright (c) 2020-2023 Micro Focus or one of its affiliates. +// Copyright (c) 2020-2023 Open Text. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/vertica/vertica-sql-go/result.go b/vendor/github.com/vertica/vertica-sql-go/result.go index c2e9911..d01f6b7 100644 --- a/vendor/github.com/vertica/vertica-sql-go/result.go +++ b/vendor/github.com/vertica/vertica-sql-go/result.go @@ -1,6 +1,6 @@ package vertigo -// Copyright (c) 2019-2023 Micro Focus or one of its affiliates. +// Copyright (c) 2019-2023 Open Text. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/vertica/vertica-sql-go/rowcache/file.go b/vendor/github.com/vertica/vertica-sql-go/rowcache/file.go index ea7dc50..0b9782a 100644 --- a/vendor/github.com/vertica/vertica-sql-go/rowcache/file.go +++ b/vendor/github.com/vertica/vertica-sql-go/rowcache/file.go @@ -1,6 +1,6 @@ package rowcache -// Copyright (c) 2020-2023 Micro Focus or one of its affiliates. +// Copyright (c) 2020-2023 Open Text. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/vertica/vertica-sql-go/rowcache/memory.go b/vendor/github.com/vertica/vertica-sql-go/rowcache/memory.go index 88626a0..bdce26f 100644 --- a/vendor/github.com/vertica/vertica-sql-go/rowcache/memory.go +++ b/vendor/github.com/vertica/vertica-sql-go/rowcache/memory.go @@ -1,6 +1,6 @@ package rowcache -// Copyright (c) 2020-2023 Micro Focus or one of its affiliates. +// Copyright (c) 2020-2023 Open Text. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/vertica/vertica-sql-go/rows.go b/vendor/github.com/vertica/vertica-sql-go/rows.go index c47159f..707b406 100644 --- a/vendor/github.com/vertica/vertica-sql-go/rows.go +++ b/vendor/github.com/vertica/vertica-sql-go/rows.go @@ -1,6 +1,6 @@ package vertigo -// Copyright (c) 2019-2023 Micro Focus or one of its affiliates. +// Copyright (c) 2019-2023 Open Text. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/vertica/vertica-sql-go/stmt.go b/vendor/github.com/vertica/vertica-sql-go/stmt.go index 47d69d9..fc6a8a0 100644 --- a/vendor/github.com/vertica/vertica-sql-go/stmt.go +++ b/vendor/github.com/vertica/vertica-sql-go/stmt.go @@ -1,6 +1,6 @@ package vertigo -// Copyright (c) 2019-2023 Micro Focus or one of its affiliates. +// Copyright (c) 2019-2023 Open Text. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/vertica/vertica-sql-go/tx.go b/vendor/github.com/vertica/vertica-sql-go/tx.go index 55b88e5..ebe874b 100644 --- a/vendor/github.com/vertica/vertica-sql-go/tx.go +++ b/vendor/github.com/vertica/vertica-sql-go/tx.go @@ -1,6 +1,6 @@ package vertigo -// Copyright (c) 2019-2023 Micro Focus or one of its affiliates. +// Copyright (c) 2019-2023 Open Text. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/go.opentelemetry.io/otel/attribute/README.md b/vendor/go.opentelemetry.io/otel/attribute/README.md new file mode 100644 index 0000000..5b3da8f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/README.md @@ -0,0 +1,3 @@ +# Attribute + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/attribute)](https://pkg.go.dev/go.opentelemetry.io/otel/attribute) diff --git a/vendor/go.opentelemetry.io/otel/attribute/doc.go b/vendor/go.opentelemetry.io/otel/attribute/doc.go index dafe742..eef51eb 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/doc.go +++ b/vendor/go.opentelemetry.io/otel/attribute/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Package attribute provides key and value attributes. package attribute // import "go.opentelemetry.io/otel/attribute" diff --git a/vendor/go.opentelemetry.io/otel/attribute/encoder.go b/vendor/go.opentelemetry.io/otel/attribute/encoder.go index fe2bc57..318e42f 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/encoder.go +++ b/vendor/go.opentelemetry.io/otel/attribute/encoder.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package attribute // import "go.opentelemetry.io/otel/attribute" diff --git a/vendor/go.opentelemetry.io/otel/attribute/filter.go b/vendor/go.opentelemetry.io/otel/attribute/filter.go new file mode 100644 index 0000000..be9cd92 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/filter.go @@ -0,0 +1,49 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package attribute // import "go.opentelemetry.io/otel/attribute" + +// Filter supports removing certain attributes from attribute sets. When +// the filter returns true, the attribute will be kept in the filtered +// attribute set. When the filter returns false, the attribute is excluded +// from the filtered attribute set, and the attribute instead appears in +// the removed list of excluded attributes. +type Filter func(KeyValue) bool + +// NewAllowKeysFilter returns a Filter that only allows attributes with one of +// the provided keys. +// +// If keys is empty a deny-all filter is returned. +func NewAllowKeysFilter(keys ...Key) Filter { + if len(keys) <= 0 { + return func(kv KeyValue) bool { return false } + } + + allowed := make(map[Key]struct{}) + for _, k := range keys { + allowed[k] = struct{}{} + } + return func(kv KeyValue) bool { + _, ok := allowed[kv.Key] + return ok + } +} + +// NewDenyKeysFilter returns a Filter that only allows attributes +// that do not have one of the provided keys. +// +// If keys is empty an allow-all filter is returned. +func NewDenyKeysFilter(keys ...Key) Filter { + if len(keys) <= 0 { + return func(kv KeyValue) bool { return true } + } + + forbid := make(map[Key]struct{}) + for _, k := range keys { + forbid[k] = struct{}{} + } + return func(kv KeyValue) bool { + _, ok := forbid[kv.Key] + return !ok + } +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/iterator.go b/vendor/go.opentelemetry.io/otel/attribute/iterator.go index 841b271..f2ba89c 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/iterator.go +++ b/vendor/go.opentelemetry.io/otel/attribute/iterator.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package attribute // import "go.opentelemetry.io/otel/attribute" diff --git a/vendor/go.opentelemetry.io/otel/attribute/key.go b/vendor/go.opentelemetry.io/otel/attribute/key.go index 0656a04..d9a22c6 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/key.go +++ b/vendor/go.opentelemetry.io/otel/attribute/key.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package attribute // import "go.opentelemetry.io/otel/attribute" diff --git a/vendor/go.opentelemetry.io/otel/attribute/kv.go b/vendor/go.opentelemetry.io/otel/attribute/kv.go index 1ddf3ce..3028f9a 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/kv.go +++ b/vendor/go.opentelemetry.io/otel/attribute/kv.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package attribute // import "go.opentelemetry.io/otel/attribute" diff --git a/vendor/go.opentelemetry.io/otel/attribute/set.go b/vendor/go.opentelemetry.io/otel/attribute/set.go index 26be598..bff9c7f 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/set.go +++ b/vendor/go.opentelemetry.io/otel/attribute/set.go @@ -1,22 +1,13 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package attribute // import "go.opentelemetry.io/otel/attribute" import ( + "cmp" "encoding/json" "reflect" + "slices" "sort" ) @@ -25,30 +16,33 @@ type ( // immutable set of attributes, with an internal cache for storing // attribute encodings. // - // This type supports the Equivalent method of comparison using values of - // type Distinct. + // This type will remain comparable for backwards compatibility. The + // equivalence of Sets across versions is not guaranteed to be stable. + // Prior versions may find two Sets to be equal or not when compared + // directly (i.e. ==), but subsequent versions may not. Users should use + // the Equals method to ensure stable equivalence checking. + // + // Users should also use the Distinct returned from Equivalent as a map key + // instead of a Set directly. In addition to that type providing guarantees + // on stable equivalence, it may also provide performance improvements. Set struct { equivalent Distinct } - // Distinct wraps a variable-size array of KeyValue, constructed with keys - // in sorted order. This can be used as a map key or for equality checking - // between Sets. + // Distinct is a unique identifier of a Set. + // + // Distinct is designed to be ensures equivalence stability: comparisons + // will return the save value across versions. For this reason, Distinct + // should always be used as a map key instead of a Set. Distinct struct { iface interface{} } - // Filter supports removing certain attributes from attribute sets. When - // the filter returns true, the attribute will be kept in the filtered - // attribute set. When the filter returns false, the attribute is excluded - // from the filtered attribute set, and the attribute instead appears in - // the removed list of excluded attributes. - Filter func(KeyValue) bool - - // Sortable implements sort.Interface, used for sorting KeyValue. This is - // an exported type to support a memory optimization. A pointer to one of - // these is needed for the call to sort.Stable(), which the caller may - // provide in order to avoid an allocation. See NewSetWithSortable(). + // Sortable implements sort.Interface, used for sorting KeyValue. + // + // Deprecated: This type is no longer used. It was added as a performance + // optimization for Go < 1.21 that is no longer needed (Go < 1.21 is no + // longer supported by the module). Sortable []KeyValue ) @@ -91,7 +85,7 @@ func (l *Set) Len() int { // Get returns the KeyValue at ordered position idx in this set. func (l *Set) Get(idx int) (KeyValue, bool) { - if l == nil { + if l == nil || !l.equivalent.Valid() { return KeyValue{}, false } value := l.equivalent.reflectValue() @@ -107,7 +101,7 @@ func (l *Set) Get(idx int) (KeyValue, bool) { // Value returns the value of a specified key in this set. func (l *Set) Value(k Key) (Value, bool) { - if l == nil { + if l == nil || !l.equivalent.Valid() { return Value{}, false } rValue := l.equivalent.reflectValue() @@ -187,11 +181,7 @@ func empty() Set { // Except for empty sets, this method adds an additional allocation compared // with calls that include a Sortable. func NewSet(kvs ...KeyValue) Set { - // Check for empty set. - if len(kvs) == 0 { - return empty() - } - s, _ := NewSetWithSortableFiltered(kvs, new(Sortable), nil) + s, _ := NewSetWithFiltered(kvs, nil) return s } @@ -199,12 +189,10 @@ func NewSet(kvs ...KeyValue) Set { // NewSetWithSortableFiltered for more details. // // This call includes a Sortable option as a memory optimization. -func NewSetWithSortable(kvs []KeyValue, tmp *Sortable) Set { - // Check for empty set. - if len(kvs) == 0 { - return empty() - } - s, _ := NewSetWithSortableFiltered(kvs, tmp, nil) +// +// Deprecated: Use [NewSet] instead. +func NewSetWithSortable(kvs []KeyValue, _ *Sortable) Set { + s, _ := NewSetWithFiltered(kvs, nil) return s } @@ -218,7 +206,37 @@ func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) { if len(kvs) == 0 { return empty(), nil } - return NewSetWithSortableFiltered(kvs, new(Sortable), filter) + + // Stable sort so the following de-duplication can implement + // last-value-wins semantics. + slices.SortStableFunc(kvs, func(a, b KeyValue) int { + return cmp.Compare(a.Key, b.Key) + }) + + position := len(kvs) - 1 + offset := position - 1 + + // The requirements stated above require that the stable + // result be placed in the end of the input slice, while + // overwritten values are swapped to the beginning. + // + // De-duplicate with last-value-wins semantics. Preserve + // duplicate values at the beginning of the input slice. + for ; offset >= 0; offset-- { + if kvs[offset].Key == kvs[position].Key { + continue + } + position-- + kvs[offset], kvs[position] = kvs[position], kvs[offset] + } + kvs = kvs[position:] + + if filter != nil { + if div := filteredToFront(kvs, filter); div != 0 { + return Set{equivalent: computeDistinct(kvs[div:])}, kvs[:div] + } + } + return Set{equivalent: computeDistinct(kvs)}, nil } // NewSetWithSortableFiltered returns a new Set. @@ -244,82 +262,71 @@ func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) { // // The second []KeyValue return value is a list of attributes that were // excluded by the Filter (if non-nil). -func NewSetWithSortableFiltered(kvs []KeyValue, tmp *Sortable, filter Filter) (Set, []KeyValue) { - // Check for empty set. - if len(kvs) == 0 { - return empty(), nil - } - - *tmp = kvs - - // Stable sort so the following de-duplication can implement - // last-value-wins semantics. - sort.Stable(tmp) - - *tmp = nil - - position := len(kvs) - 1 - offset := position - 1 - - // The requirements stated above require that the stable - // result be placed in the end of the input slice, while - // overwritten values are swapped to the beginning. - // - // De-duplicate with last-value-wins semantics. Preserve - // duplicate values at the beginning of the input slice. - for ; offset >= 0; offset-- { - if kvs[offset].Key == kvs[position].Key { - continue - } - position-- - kvs[offset], kvs[position] = kvs[position], kvs[offset] - } - if filter != nil { - return filterSet(kvs[position:], filter) - } - return Set{ - equivalent: computeDistinct(kvs[position:]), - }, nil +// +// Deprecated: Use [NewSetWithFiltered] instead. +func NewSetWithSortableFiltered(kvs []KeyValue, _ *Sortable, filter Filter) (Set, []KeyValue) { + return NewSetWithFiltered(kvs, filter) } -// filterSet reorders kvs so that included keys are contiguous at the end of -// the slice, while excluded keys precede the included keys. -func filterSet(kvs []KeyValue, filter Filter) (Set, []KeyValue) { - var excluded []KeyValue - - // Move attributes that do not match the filter so they're adjacent before - // calling computeDistinct(). - distinctPosition := len(kvs) - - // Swap indistinct keys forward and distinct keys toward the - // end of the slice. - offset := len(kvs) - 1 - for ; offset >= 0; offset-- { - if filter(kvs[offset]) { - distinctPosition-- - kvs[offset], kvs[distinctPosition] = kvs[distinctPosition], kvs[offset] - continue +// filteredToFront filters slice in-place using keep function. All KeyValues that need to +// be removed are moved to the front. All KeyValues that need to be kept are +// moved (in-order) to the back. The index for the first KeyValue to be kept is +// returned. +func filteredToFront(slice []KeyValue, keep Filter) int { + n := len(slice) + j := n + for i := n - 1; i >= 0; i-- { + if keep(slice[i]) { + j-- + slice[i], slice[j] = slice[j], slice[i] } } - excluded = kvs[:distinctPosition] - - return Set{ - equivalent: computeDistinct(kvs[distinctPosition:]), - }, excluded + return j } // Filter returns a filtered copy of this Set. See the documentation for // NewSetWithSortableFiltered for more details. func (l *Set) Filter(re Filter) (Set, []KeyValue) { if re == nil { - return Set{ - equivalent: l.equivalent, - }, nil + return *l, nil + } + + // Iterate in reverse to the first attribute that will be filtered out. + n := l.Len() + first := n - 1 + for ; first >= 0; first-- { + kv, _ := l.Get(first) + if !re(kv) { + break + } + } + + // No attributes will be dropped, return the immutable Set l and nil. + if first < 0 { + return *l, nil } - // Note: This could be refactored to avoid the temporary slice - // allocation, if it proves to be expensive. - return filterSet(l.ToSlice(), re) + // Copy now that we know we need to return a modified set. + // + // Do not do this in-place on the underlying storage of *Set l. Sets are + // immutable and filtering should not change this. + slice := l.ToSlice() + + // Don't re-iterate the slice if only slice[0] is filtered. + if first == 0 { + // It is safe to assume len(slice) >= 1 given we found at least one + // attribute above that needs to be filtered out. + return Set{equivalent: computeDistinct(slice[1:])}, slice[:1] + } + + // Move the filtered slice[first] to the front (preserving order). + kv := slice[first] + copy(slice[1:first+1], slice[:first]) + slice[0] = kv + + // Do not re-evaluate re(slice[first+1:]). + div := filteredToFront(slice[1:first+1], re) + 1 + return Set{equivalent: computeDistinct(slice[div:])}, slice[:div] } // computeDistinct returns a Distinct using either the fixed- or @@ -399,7 +406,7 @@ func (l *Set) MarshalJSON() ([]byte, error) { return json.Marshal(l.equivalent.iface) } -// MarshalLog is the marshaling function used by the logging system to represent this exporter. +// MarshalLog is the marshaling function used by the logging system to represent this Set. func (l Set) MarshalLog() interface{} { kvs := make(map[string]string) for _, kv := range l.ToSlice() { diff --git a/vendor/go.opentelemetry.io/otel/attribute/value.go b/vendor/go.opentelemetry.io/otel/attribute/value.go index 57899f6..b320314 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/value.go +++ b/vendor/go.opentelemetry.io/otel/attribute/value.go @@ -1,25 +1,16 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package attribute // import "go.opentelemetry.io/otel/attribute" import ( "encoding/json" "fmt" + "reflect" "strconv" "go.opentelemetry.io/otel/internal" + "go.opentelemetry.io/otel/internal/attribute" ) //go:generate stringer -type=Type @@ -66,12 +57,7 @@ func BoolValue(v bool) Value { // BoolSliceValue creates a BOOLSLICE Value. func BoolSliceValue(v []bool) Value { - cp := make([]bool, len(v)) - copy(cp, v) - return Value{ - vtype: BOOLSLICE, - slice: &cp, - } + return Value{vtype: BOOLSLICE, slice: attribute.BoolSliceValue(v)} } // IntValue creates an INT64 Value. @@ -81,13 +67,14 @@ func IntValue(v int) Value { // IntSliceValue creates an INTSLICE Value. func IntSliceValue(v []int) Value { - cp := make([]int64, 0, len(v)) - for _, i := range v { - cp = append(cp, int64(i)) + var int64Val int64 + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(int64Val))) + for i, val := range v { + cp.Elem().Index(i).SetInt(int64(val)) } return Value{ vtype: INT64SLICE, - slice: &cp, + slice: cp.Elem().Interface(), } } @@ -101,12 +88,7 @@ func Int64Value(v int64) Value { // Int64SliceValue creates an INT64SLICE Value. func Int64SliceValue(v []int64) Value { - cp := make([]int64, len(v)) - copy(cp, v) - return Value{ - vtype: INT64SLICE, - slice: &cp, - } + return Value{vtype: INT64SLICE, slice: attribute.Int64SliceValue(v)} } // Float64Value creates a FLOAT64 Value. @@ -119,12 +101,7 @@ func Float64Value(v float64) Value { // Float64SliceValue creates a FLOAT64SLICE Value. func Float64SliceValue(v []float64) Value { - cp := make([]float64, len(v)) - copy(cp, v) - return Value{ - vtype: FLOAT64SLICE, - slice: &cp, - } + return Value{vtype: FLOAT64SLICE, slice: attribute.Float64SliceValue(v)} } // StringValue creates a STRING Value. @@ -137,12 +114,7 @@ func StringValue(v string) Value { // StringSliceValue creates a STRINGSLICE Value. func StringSliceValue(v []string) Value { - cp := make([]string, len(v)) - copy(cp, v) - return Value{ - vtype: STRINGSLICE, - slice: &cp, - } + return Value{vtype: STRINGSLICE, slice: attribute.StringSliceValue(v)} } // Type returns a type of the Value. @@ -159,10 +131,14 @@ func (v Value) AsBool() bool { // AsBoolSlice returns the []bool value. Make sure that the Value's type is // BOOLSLICE. func (v Value) AsBoolSlice() []bool { - if s, ok := v.slice.(*[]bool); ok { - return *s + if v.vtype != BOOLSLICE { + return nil } - return nil + return v.asBoolSlice() +} + +func (v Value) asBoolSlice() []bool { + return attribute.AsBoolSlice(v.slice) } // AsInt64 returns the int64 value. Make sure that the Value's type is @@ -174,10 +150,14 @@ func (v Value) AsInt64() int64 { // AsInt64Slice returns the []int64 value. Make sure that the Value's type is // INT64SLICE. func (v Value) AsInt64Slice() []int64 { - if s, ok := v.slice.(*[]int64); ok { - return *s + if v.vtype != INT64SLICE { + return nil } - return nil + return v.asInt64Slice() +} + +func (v Value) asInt64Slice() []int64 { + return attribute.AsInt64Slice(v.slice) } // AsFloat64 returns the float64 value. Make sure that the Value's @@ -189,10 +169,14 @@ func (v Value) AsFloat64() float64 { // AsFloat64Slice returns the []float64 value. Make sure that the Value's type is // FLOAT64SLICE. func (v Value) AsFloat64Slice() []float64 { - if s, ok := v.slice.(*[]float64); ok { - return *s + if v.vtype != FLOAT64SLICE { + return nil } - return nil + return v.asFloat64Slice() +} + +func (v Value) asFloat64Slice() []float64 { + return attribute.AsFloat64Slice(v.slice) } // AsString returns the string value. Make sure that the Value's type @@ -204,10 +188,14 @@ func (v Value) AsString() string { // AsStringSlice returns the []string value. Make sure that the Value's type is // STRINGSLICE. func (v Value) AsStringSlice() []string { - if s, ok := v.slice.(*[]string); ok { - return *s + if v.vtype != STRINGSLICE { + return nil } - return nil + return v.asStringSlice() +} + +func (v Value) asStringSlice() []string { + return attribute.AsStringSlice(v.slice) } type unknownValueType struct{} @@ -218,19 +206,19 @@ func (v Value) AsInterface() interface{} { case BOOL: return v.AsBool() case BOOLSLICE: - return v.AsBoolSlice() + return v.asBoolSlice() case INT64: return v.AsInt64() case INT64SLICE: - return v.AsInt64Slice() + return v.asInt64Slice() case FLOAT64: return v.AsFloat64() case FLOAT64SLICE: - return v.AsFloat64Slice() + return v.asFloat64Slice() case STRING: return v.stringly case STRINGSLICE: - return v.AsStringSlice() + return v.asStringSlice() } return unknownValueType{} } @@ -239,19 +227,19 @@ func (v Value) AsInterface() interface{} { func (v Value) Emit() string { switch v.Type() { case BOOLSLICE: - return fmt.Sprint(*(v.slice.(*[]bool))) + return fmt.Sprint(v.asBoolSlice()) case BOOL: return strconv.FormatBool(v.AsBool()) case INT64SLICE: - return fmt.Sprint(*(v.slice.(*[]int64))) + return fmt.Sprint(v.asInt64Slice()) case INT64: return strconv.FormatInt(v.AsInt64(), 10) case FLOAT64SLICE: - return fmt.Sprint(*(v.slice.(*[]float64))) + return fmt.Sprint(v.asFloat64Slice()) case FLOAT64: return fmt.Sprint(v.AsFloat64()) case STRINGSLICE: - return fmt.Sprint(*(v.slice.(*[]string))) + return fmt.Sprint(v.asStringSlice()) case STRING: return v.stringly default: diff --git a/vendor/go.opentelemetry.io/otel/codes/README.md b/vendor/go.opentelemetry.io/otel/codes/README.md new file mode 100644 index 0000000..24c52b3 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/codes/README.md @@ -0,0 +1,3 @@ +# Codes + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/codes)](https://pkg.go.dev/go.opentelemetry.io/otel/codes) diff --git a/vendor/go.opentelemetry.io/otel/codes/codes.go b/vendor/go.opentelemetry.io/otel/codes/codes.go index 064a927..df29d96 100644 --- a/vendor/go.opentelemetry.io/otel/codes/codes.go +++ b/vendor/go.opentelemetry.io/otel/codes/codes.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package codes // import "go.opentelemetry.io/otel/codes" @@ -23,10 +12,20 @@ import ( const ( // Unset is the default status code. Unset Code = 0 + // Error indicates the operation contains an error. + // + // NOTE: The error code in OTLP is 2. + // The value of this enum is only relevant to the internals + // of the Go SDK. Error Code = 1 + // Ok indicates operation has been validated by an Application developers // or Operator to have completed successfully, or contain no error. + // + // NOTE: The Ok code in OTLP is 1. + // The value of this enum is only relevant to the internals + // of the Go SDK. Ok Code = 2 maxCode = 3 diff --git a/vendor/go.opentelemetry.io/otel/codes/doc.go b/vendor/go.opentelemetry.io/otel/codes/doc.go index df3e0f1..ee8db44 100644 --- a/vendor/go.opentelemetry.io/otel/codes/doc.go +++ b/vendor/go.opentelemetry.io/otel/codes/doc.go @@ -1,21 +1,10 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 /* Package codes defines the canonical error codes used by OpenTelemetry. It conforms to [the OpenTelemetry -specification](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#statuscanonicalcode). +specification](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/trace/api.md#set-status). */ package codes // import "go.opentelemetry.io/otel/codes" diff --git a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go new file mode 100644 index 0000000..f32766e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go @@ -0,0 +1,100 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package attribute provide several helper functions for some commonly used +logic of processing attributes. +*/ +package attribute // import "go.opentelemetry.io/otel/internal/attribute" + +import ( + "reflect" +) + +// BoolSliceValue converts a bool slice into an array with same elements as slice. +func BoolSliceValue(v []bool) interface{} { + var zero bool + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) + copy(cp.Elem().Slice(0, len(v)).Interface().([]bool), v) + return cp.Elem().Interface() +} + +// Int64SliceValue converts an int64 slice into an array with same elements as slice. +func Int64SliceValue(v []int64) interface{} { + var zero int64 + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) + copy(cp.Elem().Slice(0, len(v)).Interface().([]int64), v) + return cp.Elem().Interface() +} + +// Float64SliceValue converts a float64 slice into an array with same elements as slice. +func Float64SliceValue(v []float64) interface{} { + var zero float64 + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) + copy(cp.Elem().Slice(0, len(v)).Interface().([]float64), v) + return cp.Elem().Interface() +} + +// StringSliceValue converts a string slice into an array with same elements as slice. +func StringSliceValue(v []string) interface{} { + var zero string + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) + copy(cp.Elem().Slice(0, len(v)).Interface().([]string), v) + return cp.Elem().Interface() +} + +// AsBoolSlice converts a bool array into a slice into with same elements as array. +func AsBoolSlice(v interface{}) []bool { + rv := reflect.ValueOf(v) + if rv.Type().Kind() != reflect.Array { + return nil + } + var zero bool + correctLen := rv.Len() + correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) + cpy := reflect.New(correctType) + _ = reflect.Copy(cpy.Elem(), rv) + return cpy.Elem().Slice(0, correctLen).Interface().([]bool) +} + +// AsInt64Slice converts an int64 array into a slice into with same elements as array. +func AsInt64Slice(v interface{}) []int64 { + rv := reflect.ValueOf(v) + if rv.Type().Kind() != reflect.Array { + return nil + } + var zero int64 + correctLen := rv.Len() + correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) + cpy := reflect.New(correctType) + _ = reflect.Copy(cpy.Elem(), rv) + return cpy.Elem().Slice(0, correctLen).Interface().([]int64) +} + +// AsFloat64Slice converts a float64 array into a slice into with same elements as array. +func AsFloat64Slice(v interface{}) []float64 { + rv := reflect.ValueOf(v) + if rv.Type().Kind() != reflect.Array { + return nil + } + var zero float64 + correctLen := rv.Len() + correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) + cpy := reflect.New(correctType) + _ = reflect.Copy(cpy.Elem(), rv) + return cpy.Elem().Slice(0, correctLen).Interface().([]float64) +} + +// AsStringSlice converts a string array into a slice into with same elements as array. +func AsStringSlice(v interface{}) []string { + rv := reflect.ValueOf(v) + if rv.Type().Kind() != reflect.Array { + return nil + } + var zero string + correctLen := rv.Len() + correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) + cpy := reflect.New(correctType) + _ = reflect.Copy(cpy.Elem(), rv) + return cpy.Elem().Slice(0, correctLen).Interface().([]string) +} diff --git a/vendor/go.opentelemetry.io/otel/internal/gen.go b/vendor/go.opentelemetry.io/otel/internal/gen.go new file mode 100644 index 0000000..4259f03 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/gen.go @@ -0,0 +1,18 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/otel/internal" + +//go:generate gotmpl --body=./shared/matchers/expectation.go.tmpl "--data={}" --out=matchers/expectation.go +//go:generate gotmpl --body=./shared/matchers/expecter.go.tmpl "--data={}" --out=matchers/expecter.go +//go:generate gotmpl --body=./shared/matchers/temporal_matcher.go.tmpl "--data={}" --out=matchers/temporal_matcher.go + +//go:generate gotmpl --body=./shared/internaltest/alignment.go.tmpl "--data={}" --out=internaltest/alignment.go +//go:generate gotmpl --body=./shared/internaltest/env.go.tmpl "--data={}" --out=internaltest/env.go +//go:generate gotmpl --body=./shared/internaltest/env_test.go.tmpl "--data={}" --out=internaltest/env_test.go +//go:generate gotmpl --body=./shared/internaltest/errors.go.tmpl "--data={}" --out=internaltest/errors.go +//go:generate gotmpl --body=./shared/internaltest/harness.go.tmpl "--data={\"matchersImportPath\": \"go.opentelemetry.io/otel/internal/matchers\"}" --out=internaltest/harness.go +//go:generate gotmpl --body=./shared/internaltest/text_map_carrier.go.tmpl "--data={}" --out=internaltest/text_map_carrier.go +//go:generate gotmpl --body=./shared/internaltest/text_map_carrier_test.go.tmpl "--data={}" --out=internaltest/text_map_carrier_test.go +//go:generate gotmpl --body=./shared/internaltest/text_map_propagator.go.tmpl "--data={}" --out=internaltest/text_map_propagator.go +//go:generate gotmpl --body=./shared/internaltest/text_map_propagator_test.go.tmpl "--data={}" --out=internaltest/text_map_propagator_test.go diff --git a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go index e07e794..3e7bb3b 100644 --- a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go +++ b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package internal // import "go.opentelemetry.io/otel/internal" diff --git a/vendor/go.opentelemetry.io/otel/trace/README.md b/vendor/go.opentelemetry.io/otel/trace/README.md new file mode 100644 index 0000000..58ccaba --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/README.md @@ -0,0 +1,3 @@ +# Trace API + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/trace)](https://pkg.go.dev/go.opentelemetry.io/otel/trace) diff --git a/vendor/go.opentelemetry.io/otel/trace/config.go b/vendor/go.opentelemetry.io/otel/trace/config.go index f058cc7..273d58e 100644 --- a/vendor/go.opentelemetry.io/otel/trace/config.go +++ b/vendor/go.opentelemetry.io/otel/trace/config.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package trace // import "go.opentelemetry.io/otel/trace" @@ -25,6 +14,7 @@ type TracerConfig struct { instrumentationVersion string // Schema URL of the telemetry emitted by the Tracer. schemaURL string + attrs attribute.Set } // InstrumentationVersion returns the version of the library providing instrumentation. @@ -32,6 +22,12 @@ func (t *TracerConfig) InstrumentationVersion() string { return t.instrumentationVersion } +// InstrumentationAttributes returns the attributes associated with the library +// providing instrumentation. +func (t *TracerConfig) InstrumentationAttributes() attribute.Set { + return t.attrs +} + // SchemaURL returns the Schema URL of the telemetry emitted by the Tracer. func (t *TracerConfig) SchemaURL() string { return t.schemaURL @@ -261,6 +257,7 @@ func (o stackTraceOption) applyEvent(c EventConfig) EventConfig { c.stackTrace = bool(o) return c } + func (o stackTraceOption) applySpan(c SpanConfig) SpanConfig { c.stackTrace = bool(o) return c @@ -307,6 +304,16 @@ func WithInstrumentationVersion(version string) TracerOption { }) } +// WithInstrumentationAttributes sets the instrumentation attributes. +// +// The passed attributes will be de-duplicated. +func WithInstrumentationAttributes(attr ...attribute.KeyValue) TracerOption { + return tracerOptionFunc(func(config TracerConfig) TracerConfig { + config.attrs = attribute.NewSet(attr...) + return config + }) +} + // WithSchemaURL sets the schema URL for the Tracer. func WithSchemaURL(schemaURL string) TracerOption { return tracerOptionFunc(func(cfg TracerConfig) TracerConfig { diff --git a/vendor/go.opentelemetry.io/otel/trace/context.go b/vendor/go.opentelemetry.io/otel/trace/context.go index 76f9a08..5650a17 100644 --- a/vendor/go.opentelemetry.io/otel/trace/context.go +++ b/vendor/go.opentelemetry.io/otel/trace/context.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package trace // import "go.opentelemetry.io/otel/trace" @@ -47,12 +36,12 @@ func ContextWithRemoteSpanContext(parent context.Context, rsc SpanContext) conte // performs no operations is returned. func SpanFromContext(ctx context.Context) Span { if ctx == nil { - return noopSpan{} + return noopSpanInstance } if span, ok := ctx.Value(currentSpanKey).(Span); ok { return span } - return noopSpan{} + return noopSpanInstance } // SpanContextFromContext returns the current Span's SpanContext. diff --git a/vendor/go.opentelemetry.io/otel/trace/doc.go b/vendor/go.opentelemetry.io/otel/trace/doc.go index 3914177..d661c5d 100644 --- a/vendor/go.opentelemetry.io/otel/trace/doc.go +++ b/vendor/go.opentelemetry.io/otel/trace/doc.go @@ -1,23 +1,12 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 /* Package trace provides an implementation of the tracing part of the OpenTelemetry API. To participate in distributed traces a Span needs to be created for the -operation being performed as part of a traced workflow. It its simplest form: +operation being performed as part of a traced workflow. In its simplest form: var tracer trace.Tracer @@ -62,5 +51,69 @@ a default. defer span.End() // ... } + +# API Implementations + +This package does not conform to the standard Go versioning policy; all of its +interfaces may have methods added to them without a package major version bump. +This non-standard API evolution could surprise an uninformed implementation +author. They could unknowingly build their implementation in a way that would +result in a runtime panic for their users that update to the new API. + +The API is designed to help inform an instrumentation author about this +non-standard API evolution. It requires them to choose a default behavior for +unimplemented interface methods. There are three behavior choices they can +make: + + - Compilation failure + - Panic + - Default to another implementation + +All interfaces in this API embed a corresponding interface from +[go.opentelemetry.io/otel/trace/embedded]. If an author wants the default +behavior of their implementations to be a compilation failure, signaling to +their users they need to update to the latest version of that implementation, +they need to embed the corresponding interface from +[go.opentelemetry.io/otel/trace/embedded] in their implementation. For +example, + + import "go.opentelemetry.io/otel/trace/embedded" + + type TracerProvider struct { + embedded.TracerProvider + // ... + } + +If an author wants the default behavior of their implementations to panic, they +can embed the API interface directly. + + import "go.opentelemetry.io/otel/trace" + + type TracerProvider struct { + trace.TracerProvider + // ... + } + +This option is not recommended. It will lead to publishing packages that +contain runtime panics when users update to newer versions of +[go.opentelemetry.io/otel/trace], which may be done with a trasitive +dependency. + +Finally, an author can embed another implementation in theirs. The embedded +implementation will be used for methods not defined by the author. For example, +an author who wants to default to silently dropping the call can use +[go.opentelemetry.io/otel/trace/noop]: + + import "go.opentelemetry.io/otel/trace/noop" + + type TracerProvider struct { + noop.TracerProvider + // ... + } + +It is strongly recommended that authors only embed +[go.opentelemetry.io/otel/trace/noop] if they choose this default behavior. +That implementation is the only one OpenTelemetry authors can guarantee will +fully implement all the API interfaces when a user updates their API. */ package trace // import "go.opentelemetry.io/otel/trace" diff --git a/vendor/go.opentelemetry.io/otel/trace/embedded/README.md b/vendor/go.opentelemetry.io/otel/trace/embedded/README.md new file mode 100644 index 0000000..7754a23 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/embedded/README.md @@ -0,0 +1,3 @@ +# Trace Embedded + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/trace/embedded)](https://pkg.go.dev/go.opentelemetry.io/otel/trace/embedded) diff --git a/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go b/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go new file mode 100644 index 0000000..3e359a0 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go @@ -0,0 +1,45 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package embedded provides interfaces embedded within the [OpenTelemetry +// trace API]. +// +// Implementers of the [OpenTelemetry trace API] can embed the relevant type +// from this package into their implementation directly. Doing so will result +// in a compilation error for users when the [OpenTelemetry trace API] is +// extended (which is something that can happen without a major version bump of +// the API package). +// +// [OpenTelemetry trace API]: https://pkg.go.dev/go.opentelemetry.io/otel/trace +package embedded // import "go.opentelemetry.io/otel/trace/embedded" + +// TracerProvider is embedded in +// [go.opentelemetry.io/otel/trace.TracerProvider]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/trace.TracerProvider] if you want users to +// experience a compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/trace.TracerProvider] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type TracerProvider interface{ tracerProvider() } + +// Tracer is embedded in [go.opentelemetry.io/otel/trace.Tracer]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/trace.Tracer] if you want users to experience a +// compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/trace.Tracer] interface +// is extended (which is something that can happen without a major version bump +// of the API package). +type Tracer interface{ tracer() } + +// Span is embedded in [go.opentelemetry.io/otel/trace.Span]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/trace.Span] if you want users to experience a +// compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/trace.Span] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Span interface{ span() } diff --git a/vendor/go.opentelemetry.io/otel/trace/nonrecording.go b/vendor/go.opentelemetry.io/otel/trace/nonrecording.go index 88fcb81..c00221e 100644 --- a/vendor/go.opentelemetry.io/otel/trace/nonrecording.go +++ b/vendor/go.opentelemetry.io/otel/trace/nonrecording.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package trace // import "go.opentelemetry.io/otel/trace" diff --git a/vendor/go.opentelemetry.io/otel/trace/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop.go index 73950f2..ca20e99 100644 --- a/vendor/go.opentelemetry.io/otel/trace/noop.go +++ b/vendor/go.opentelemetry.io/otel/trace/noop.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package trace // import "go.opentelemetry.io/otel/trace" @@ -19,16 +8,20 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace/embedded" ) // NewNoopTracerProvider returns an implementation of TracerProvider that // performs no operations. The Tracer and Spans created from the returned // TracerProvider also perform no operations. +// +// Deprecated: Use [go.opentelemetry.io/otel/trace/noop.NewTracerProvider] +// instead. func NewNoopTracerProvider() TracerProvider { return noopTracerProvider{} } -type noopTracerProvider struct{} +type noopTracerProvider struct{ embedded.TracerProvider } var _ TracerProvider = noopTracerProvider{} @@ -37,8 +30,8 @@ func (p noopTracerProvider) Tracer(string, ...TracerOption) Tracer { return noopTracer{} } -// noopTracer is an implementation of Tracer that preforms no operations. -type noopTracer struct{} +// noopTracer is an implementation of Tracer that performs no operations. +type noopTracer struct{ embedded.Tracer } var _ Tracer = noopTracer{} @@ -48,15 +41,15 @@ func (t noopTracer) Start(ctx context.Context, name string, _ ...SpanStartOption span := SpanFromContext(ctx) if _, ok := span.(nonRecordingSpan); !ok { // span is likely already a noopSpan, but let's be sure - span = noopSpan{} + span = noopSpanInstance } return ContextWithSpan(ctx, span), span } -// noopSpan is an implementation of Span that preforms no operations. -type noopSpan struct{} +// noopSpan is an implementation of Span that performs no operations. +type noopSpan struct{ embedded.Span } -var _ Span = noopSpan{} +var noopSpanInstance Span = noopSpan{} // SpanContext returns an empty span context. func (noopSpan) SpanContext() SpanContext { return SpanContext{} } @@ -82,6 +75,9 @@ func (noopSpan) RecordError(error, ...EventOption) {} // AddEvent does nothing. func (noopSpan) AddEvent(string, ...EventOption) {} +// AddLink does nothing. +func (noopSpan) AddLink(Link) {} + // SetName does nothing. func (noopSpan) SetName(string) {} diff --git a/vendor/go.opentelemetry.io/otel/trace/trace.go b/vendor/go.opentelemetry.io/otel/trace/trace.go index e1f61e0..28877d4 100644 --- a/vendor/go.opentelemetry.io/otel/trace/trace.go +++ b/vendor/go.opentelemetry.io/otel/trace/trace.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package trace // import "go.opentelemetry.io/otel/trace" @@ -22,6 +11,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace/embedded" ) const ( @@ -48,8 +38,10 @@ func (e errorConst) Error() string { // nolint:revive // revive complains about stutter of `trace.TraceID`. type TraceID [16]byte -var nilTraceID TraceID -var _ json.Marshaler = nilTraceID +var ( + nilTraceID TraceID + _ json.Marshaler = nilTraceID +) // IsValid checks whether the trace TraceID is valid. A valid trace ID does // not consist of zeros only. @@ -71,8 +63,10 @@ func (t TraceID) String() string { // SpanID is a unique identity of a span in a trace. type SpanID [8]byte -var nilSpanID SpanID -var _ json.Marshaler = nilSpanID +var ( + nilSpanID SpanID + _ json.Marshaler = nilSpanID +) // IsValid checks whether the SpanID is valid. A valid SpanID does not consist // of zeros only. @@ -338,8 +332,15 @@ func (sc SpanContext) MarshalJSON() ([]byte, error) { // create a Span and it is then up to the operation the Span represents to // properly end the Span when the operation itself ends. // -// Warning: methods may be added to this interface in minor releases. +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. type Span interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Span + // End completes the Span. The Span is considered complete and ready to be // delivered through the rest of the telemetry pipeline after this method // is called. Therefore, updates to the Span are not allowed after this @@ -349,6 +350,12 @@ type Span interface { // AddEvent adds an event with the provided name and options. AddEvent(name string, options ...EventOption) + // AddLink adds a link. + // Adding links at span creation using WithLinks is preferred to calling AddLink + // later, for contexts that are available during span creation, because head + // sampling decisions can only consider information present during span creation. + AddLink(link Link) + // IsRecording returns the recording state of the Span. It will return // true if the Span is active and events can be recorded. IsRecording() bool @@ -364,8 +371,9 @@ type Span interface { SpanContext() SpanContext // SetStatus sets the status of the Span in the form of a code and a - // description, overriding previous values set. The description is only - // included in a status when the code is for an error. + // description, provided the status hasn't already been set to a higher + // value before (OK > Error > Unset). The description is only included in a + // status when the code is for an error. SetStatus(code codes.Code, description string) // SetName sets the Span name. @@ -386,16 +394,16 @@ type Span interface { // // For example, a Link is used in the following situations: // -// 1. Batch Processing: A batch of operations may contain operations -// associated with one or more traces/spans. Since there can only be one -// parent SpanContext, a Link is used to keep reference to the -// SpanContext of all operations in the batch. -// 2. Public Endpoint: A SpanContext for an in incoming client request on a -// public endpoint should be considered untrusted. In such a case, a new -// trace with its own identity and sampling decision needs to be created, -// but this new trace needs to be related to the original trace in some -// form. A Link is used to keep reference to the original SpanContext and -// track the relationship. +// 1. Batch Processing: A batch of operations may contain operations +// associated with one or more traces/spans. Since there can only be one +// parent SpanContext, a Link is used to keep reference to the +// SpanContext of all operations in the batch. +// 2. Public Endpoint: A SpanContext for an in incoming client request on a +// public endpoint should be considered untrusted. In such a case, a new +// trace with its own identity and sampling decision needs to be created, +// but this new trace needs to be related to the original trace in some +// form. A Link is used to keep reference to the original SpanContext and +// track the relationship. type Link struct { // SpanContext of the linked Span. SpanContext SpanContext @@ -485,8 +493,15 @@ func (sk SpanKind) String() string { // Tracer is the creator of Spans. // -// Warning: methods may be added to this interface in minor releases. +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. type Tracer interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Tracer + // Start creates a span and a context.Context containing the newly-created span. // // If the context.Context provided in `ctx` contains a Span then the newly-created @@ -503,17 +518,55 @@ type Tracer interface { Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span) } -// TracerProvider provides access to instrumentation Tracers. +// TracerProvider provides Tracers that are used by instrumentation code to +// trace computational workflows. +// +// A TracerProvider is the collection destination of all Spans from Tracers it +// provides, it represents a unique telemetry collection pipeline. How that +// pipeline is defined, meaning how those Spans are collected, processed, and +// where they are exported, depends on its implementation. Instrumentation +// authors do not need to define this implementation, rather just use the +// provided Tracers to instrument code. +// +// Commonly, instrumentation code will accept a TracerProvider implementation +// at runtime from its users or it can simply use the globally registered one +// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider). // -// Warning: methods may be added to this interface in minor releases. +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. type TracerProvider interface { - // Tracer creates an implementation of the Tracer interface. - // The instrumentationName must be the name of the library providing - // instrumentation. This name may be the same as the instrumented code - // only if that code provides built-in instrumentation. If the - // instrumentationName is empty, then a implementation defined default - // name will be used instead. + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.TracerProvider + + // Tracer returns a unique Tracer scoped to be used by instrumentation code + // to trace computational workflows. The scope and identity of that + // instrumentation code is uniquely defined by the name and options passed. + // + // The passed name needs to uniquely identify instrumentation code. + // Therefore, it is recommended that name is the Go package name of the + // library providing instrumentation (note: not the code being + // instrumented). Instrumentation libraries can have multiple versions, + // therefore, the WithInstrumentationVersion option should be used to + // distinguish these different codebases. Additionally, instrumentation + // libraries may sometimes use traces to communicate different domains of + // workflow data (i.e. using spans to communicate workflow events only). If + // this is the case, the WithScopeAttributes option should be used to + // uniquely identify Tracers that handle the different domains of workflow + // data. + // + // If the same name and options are passed multiple times, the same Tracer + // will be returned (it is up to the implementation if this will be the + // same underlying instance of that Tracer or not). It is not necessary to + // call this multiple times with the same name and options to get an + // up-to-date Tracer. All implementations will ensure any TracerProvider + // configuration changes are propagated to all provided Tracers. + // + // If name is empty, then an implementation defined default name will be + // used instead. // - // This method must be concurrency safe. - Tracer(instrumentationName string, opts ...TracerOption) Tracer + // This method is safe to call concurrently. + Tracer(name string, options ...TracerOption) Tracer } diff --git a/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/vendor/go.opentelemetry.io/otel/trace/tracestate.go index 5e775ce..20b5cf2 100644 --- a/vendor/go.opentelemetry.io/otel/trace/tracestate.go +++ b/vendor/go.opentelemetry.io/otel/trace/tracestate.go @@ -1,40 +1,19 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package trace // import "go.opentelemetry.io/otel/trace" import ( "encoding/json" "fmt" - "regexp" "strings" ) -var ( +const ( maxListMembers = 32 - listDelimiter = "," - - // based on the W3C Trace Context specification, see - // https://www.w3.org/TR/trace-context-1/#tracestate-header - noTenantKeyFormat = `[a-z][_0-9a-z\-\*\/]{0,255}` - withTenantKeyFormat = `[a-z0-9][_0-9a-z\-\*\/]{0,240}@[a-z][_0-9a-z\-\*\/]{0,13}` - valueFormat = `[\x20-\x2b\x2d-\x3c\x3e-\x7e]{0,255}[\x21-\x2b\x2d-\x3c\x3e-\x7e]` - - keyRe = regexp.MustCompile(`^((` + noTenantKeyFormat + `)|(` + withTenantKeyFormat + `))$`) - valueRe = regexp.MustCompile(`^(` + valueFormat + `)$`) - memberRe = regexp.MustCompile(`^\s*((` + noTenantKeyFormat + `)|(` + withTenantKeyFormat + `))=(` + valueFormat + `)\s*$`) + listDelimiters = "," + memberDelimiter = "=" errInvalidKey errorConst = "invalid tracestate key" errInvalidValue errorConst = "invalid tracestate value" @@ -48,32 +27,133 @@ type member struct { Value string } +// according to (chr = %x20 / (nblk-char = %x21-2B / %x2D-3C / %x3E-7E) ) +// means (chr = %x20-2B / %x2D-3C / %x3E-7E) . +func checkValueChar(v byte) bool { + return v >= '\x20' && v <= '\x7e' && v != '\x2c' && v != '\x3d' +} + +// according to (nblk-chr = %x21-2B / %x2D-3C / %x3E-7E) . +func checkValueLast(v byte) bool { + return v >= '\x21' && v <= '\x7e' && v != '\x2c' && v != '\x3d' +} + +// based on the W3C Trace Context specification +// +// value = (0*255(chr)) nblk-chr +// nblk-chr = %x21-2B / %x2D-3C / %x3E-7E +// chr = %x20 / nblk-chr +// +// see https://www.w3.org/TR/trace-context-1/#value +func checkValue(val string) bool { + n := len(val) + if n == 0 || n > 256 { + return false + } + for i := 0; i < n-1; i++ { + if !checkValueChar(val[i]) { + return false + } + } + return checkValueLast(val[n-1]) +} + +func checkKeyRemain(key string) bool { + // ( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ) + for _, v := range key { + if isAlphaNum(byte(v)) { + continue + } + switch v { + case '_', '-', '*', '/': + continue + } + return false + } + return true +} + +// according to +// +// simple-key = lcalpha (0*255( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )) +// system-id = lcalpha (0*13( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )) +// +// param n is remain part length, should be 255 in simple-key or 13 in system-id. +func checkKeyPart(key string, n int) bool { + if len(key) == 0 { + return false + } + first := key[0] // key's first char + ret := len(key[1:]) <= n + ret = ret && first >= 'a' && first <= 'z' + return ret && checkKeyRemain(key[1:]) +} + +func isAlphaNum(c byte) bool { + if c >= 'a' && c <= 'z' { + return true + } + return c >= '0' && c <= '9' +} + +// according to +// +// tenant-id = ( lcalpha / DIGIT ) 0*240( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ) +// +// param n is remain part length, should be 240 exactly. +func checkKeyTenant(key string, n int) bool { + if len(key) == 0 { + return false + } + return isAlphaNum(key[0]) && len(key[1:]) <= n && checkKeyRemain(key[1:]) +} + +// based on the W3C Trace Context specification +// +// key = simple-key / multi-tenant-key +// simple-key = lcalpha (0*255( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )) +// multi-tenant-key = tenant-id "@" system-id +// tenant-id = ( lcalpha / DIGIT ) (0*240( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )) +// system-id = lcalpha (0*13( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )) +// lcalpha = %x61-7A ; a-z +// +// see https://www.w3.org/TR/trace-context-1/#tracestate-header. +func checkKey(key string) bool { + tenant, system, ok := strings.Cut(key, "@") + if !ok { + return checkKeyPart(key, 255) + } + return checkKeyTenant(tenant, 240) && checkKeyPart(system, 13) +} + func newMember(key, value string) (member, error) { - if !keyRe.MatchString(key) { - return member{}, fmt.Errorf("%w: %s", errInvalidKey, key) + if !checkKey(key) { + return member{}, errInvalidKey } - if !valueRe.MatchString(value) { - return member{}, fmt.Errorf("%w: %s", errInvalidValue, value) + if !checkValue(value) { + return member{}, errInvalidValue } return member{Key: key, Value: value}, nil } func parseMember(m string) (member, error) { - matches := memberRe.FindStringSubmatch(m) - if len(matches) != 5 { + key, val, ok := strings.Cut(m, memberDelimiter) + if !ok { return member{}, fmt.Errorf("%w: %s", errInvalidMember, m) } - - return member{ - Key: matches[1], - Value: matches[4], - }, nil + key = strings.TrimLeft(key, " \t") + val = strings.TrimRight(val, " \t") + result, e := newMember(key, val) + if e != nil { + return member{}, fmt.Errorf("%w: %s", errInvalidMember, m) + } + return result, nil } // String encodes member into a string compliant with the W3C Trace Context // specification. func (m member) String() string { - return fmt.Sprintf("%s=%s", m.Key, m.Value) + return m.Key + "=" + m.Value } // TraceState provides additional vendor-specific trace identification @@ -97,8 +177,8 @@ var _ json.Marshaler = TraceState{} // ParseTraceState attempts to decode a TraceState from the passed // string. It returns an error if the input is invalid according to the W3C // Trace Context specification. -func ParseTraceState(tracestate string) (TraceState, error) { - if tracestate == "" { +func ParseTraceState(ts string) (TraceState, error) { + if ts == "" { return TraceState{}, nil } @@ -108,7 +188,9 @@ func ParseTraceState(tracestate string) (TraceState, error) { var members []member found := make(map[string]struct{}) - for _, memberStr := range strings.Split(tracestate, listDelimiter) { + for ts != "" { + var memberStr string + memberStr, ts, _ = strings.Cut(ts, listDelimiters) if len(memberStr) == 0 { continue } @@ -141,11 +223,29 @@ func (ts TraceState) MarshalJSON() ([]byte, error) { // Trace Context specification. The returned string will be invalid if the // TraceState contains any invalid members. func (ts TraceState) String() string { - members := make([]string, len(ts.list)) - for i, m := range ts.list { - members[i] = m.String() + if len(ts.list) == 0 { + return "" + } + var n int + n += len(ts.list) // member delimiters: '=' + n += len(ts.list) - 1 // list delimiters: ',' + for _, mem := range ts.list { + n += len(mem.Key) + n += len(mem.Value) } - return strings.Join(members, listDelimiter) + + var sb strings.Builder + sb.Grow(n) + _, _ = sb.WriteString(ts.list[0].Key) + _ = sb.WriteByte('=') + _, _ = sb.WriteString(ts.list[0].Value) + for i := 1; i < len(ts.list); i++ { + _ = sb.WriteByte(listDelimiters[0]) + _, _ = sb.WriteString(ts.list[i].Key) + _ = sb.WriteByte('=') + _, _ = sb.WriteString(ts.list[i].Value) + } + return sb.String() } // Get returns the value paired with key from the corresponding TraceState @@ -177,15 +277,25 @@ func (ts TraceState) Insert(key, value string) (TraceState, error) { if err != nil { return ts, err } - - cTS := ts.Delete(key) - if cTS.Len()+1 <= maxListMembers { - cTS.list = append(cTS.list, member{}) + n := len(ts.list) + found := n + for i := range ts.list { + if ts.list[i].Key == key { + found = i + } + } + cTS := TraceState{} + if found == n && n < maxListMembers { + cTS.list = make([]member, n+1) + } else { + cTS.list = make([]member, n) } - // When the number of members exceeds capacity, drop the "right-most". - copy(cTS.list[1:], cTS.list) cTS.list[0] = m - + // When the number of members exceeds capacity, drop the "right-most". + copy(cTS.list[1:], ts.list[0:found]) + if found < n { + copy(cTS.list[1+found:], ts.list[found+1:]) + } return cTS, nil } diff --git a/vendor/go.uber.org/atomic/.codecov.yml b/vendor/go.uber.org/atomic/.codecov.yml deleted file mode 100644 index 6d4d1be..0000000 --- a/vendor/go.uber.org/atomic/.codecov.yml +++ /dev/null @@ -1,15 +0,0 @@ -coverage: - range: 80..100 - round: down - precision: 2 - - status: - project: # measuring the overall project coverage - default: # context, you can create multiple ones with custom titles - enabled: yes # must be yes|true to enable this status - target: 100 # specify the target coverage for each commit status - # option: "auto" (must increase from parent commit or pull request base) - # option: "X%" a static target percentage to hit - if_not_found: success # if parent is not found report status as success, error, or failure - if_ci_failed: error # if ci fails report status as success, error, or failure - diff --git a/vendor/go.uber.org/atomic/.gitignore b/vendor/go.uber.org/atomic/.gitignore deleted file mode 100644 index 0a4504f..0000000 --- a/vendor/go.uber.org/atomic/.gitignore +++ /dev/null @@ -1,11 +0,0 @@ -.DS_Store -/vendor -/cover -cover.out -lint.log - -# Binaries -*.test - -# Profiling output -*.prof diff --git a/vendor/go.uber.org/atomic/.travis.yml b/vendor/go.uber.org/atomic/.travis.yml deleted file mode 100644 index 5895722..0000000 --- a/vendor/go.uber.org/atomic/.travis.yml +++ /dev/null @@ -1,23 +0,0 @@ -sudo: false -language: go -go_import_path: go.uber.org/atomic - -go: - - 1.7 - - 1.8 - - 1.9 - -cache: - directories: - - vendor - -install: - - make install_ci - -script: - - make test_ci - - scripts/test-ubergo.sh - - make lint - -after_success: - - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/go.uber.org/atomic/Makefile b/vendor/go.uber.org/atomic/Makefile deleted file mode 100644 index dfc63d9..0000000 --- a/vendor/go.uber.org/atomic/Makefile +++ /dev/null @@ -1,64 +0,0 @@ -PACKAGES := $(shell glide nv) -# Many Go tools take file globs or directories as arguments instead of packages. -PACKAGE_FILES ?= *.go - - -# The linting tools evolve with each Go version, so run them only on the latest -# stable release. -GO_VERSION := $(shell go version | cut -d " " -f 3) -GO_MINOR_VERSION := $(word 2,$(subst ., ,$(GO_VERSION))) -LINTABLE_MINOR_VERSIONS := 7 8 -ifneq ($(filter $(LINTABLE_MINOR_VERSIONS),$(GO_MINOR_VERSION)),) -SHOULD_LINT := true -endif - - -export GO15VENDOREXPERIMENT=1 - - -.PHONY: build -build: - go build -i $(PACKAGES) - - -.PHONY: install -install: - glide --version || go get github.com/Masterminds/glide - glide install - - -.PHONY: test -test: - go test -cover -race $(PACKAGES) - - -.PHONY: install_ci -install_ci: install - go get github.com/wadey/gocovmerge - go get github.com/mattn/goveralls - go get golang.org/x/tools/cmd/cover -ifdef SHOULD_LINT - go get github.com/golang/lint/golint -endif - -.PHONY: lint -lint: -ifdef SHOULD_LINT - @rm -rf lint.log - @echo "Checking formatting..." - @gofmt -d -s $(PACKAGE_FILES) 2>&1 | tee lint.log - @echo "Checking vet..." - @$(foreach dir,$(PACKAGE_FILES),go tool vet $(dir) 2>&1 | tee -a lint.log;) - @echo "Checking lint..." - @$(foreach dir,$(PKGS),golint $(dir) 2>&1 | tee -a lint.log;) - @echo "Checking for unresolved FIXMEs..." - @git grep -i fixme | grep -v -e vendor -e Makefile | tee -a lint.log - @[ ! -s lint.log ] -else - @echo "Skipping linters on" $(GO_VERSION) -endif - - -.PHONY: test_ci -test_ci: install_ci build - ./scripts/cover.sh $(shell go list $(PACKAGES)) diff --git a/vendor/go.uber.org/atomic/README.md b/vendor/go.uber.org/atomic/README.md deleted file mode 100644 index 6505abf..0000000 --- a/vendor/go.uber.org/atomic/README.md +++ /dev/null @@ -1,36 +0,0 @@ -# atomic [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] [![Go Report Card][reportcard-img]][reportcard] - -Simple wrappers for primitive types to enforce atomic access. - -## Installation -`go get -u go.uber.org/atomic` - -## Usage -The standard library's `sync/atomic` is powerful, but it's easy to forget which -variables must be accessed atomically. `go.uber.org/atomic` preserves all the -functionality of the standard library, but wraps the primitive types to -provide a safer, more convenient API. - -```go -var atom atomic.Uint32 -atom.Store(42) -atom.Sub(2) -atom.CAS(40, 11) -``` - -See the [documentation][doc] for a complete API specification. - -## Development Status -Stable. - -
    -Released under the [MIT License](LICENSE.txt). - -[doc-img]: https://godoc.org/github.com/uber-go/atomic?status.svg -[doc]: https://godoc.org/go.uber.org/atomic -[ci-img]: https://travis-ci.org/uber-go/atomic.svg?branch=master -[ci]: https://travis-ci.org/uber-go/atomic -[cov-img]: https://codecov.io/gh/uber-go/atomic/branch/master/graph/badge.svg -[cov]: https://codecov.io/gh/uber-go/atomic -[reportcard-img]: https://goreportcard.com/badge/go.uber.org/atomic -[reportcard]: https://goreportcard.com/report/go.uber.org/atomic diff --git a/vendor/go.uber.org/atomic/atomic.go b/vendor/go.uber.org/atomic/atomic.go deleted file mode 100644 index 1db6849..0000000 --- a/vendor/go.uber.org/atomic/atomic.go +++ /dev/null @@ -1,351 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Package atomic provides simple wrappers around numerics to enforce atomic -// access. -package atomic - -import ( - "math" - "sync/atomic" - "time" -) - -// Int32 is an atomic wrapper around an int32. -type Int32 struct{ v int32 } - -// NewInt32 creates an Int32. -func NewInt32(i int32) *Int32 { - return &Int32{i} -} - -// Load atomically loads the wrapped value. -func (i *Int32) Load() int32 { - return atomic.LoadInt32(&i.v) -} - -// Add atomically adds to the wrapped int32 and returns the new value. -func (i *Int32) Add(n int32) int32 { - return atomic.AddInt32(&i.v, n) -} - -// Sub atomically subtracts from the wrapped int32 and returns the new value. -func (i *Int32) Sub(n int32) int32 { - return atomic.AddInt32(&i.v, -n) -} - -// Inc atomically increments the wrapped int32 and returns the new value. -func (i *Int32) Inc() int32 { - return i.Add(1) -} - -// Dec atomically decrements the wrapped int32 and returns the new value. -func (i *Int32) Dec() int32 { - return i.Sub(1) -} - -// CAS is an atomic compare-and-swap. -func (i *Int32) CAS(old, new int32) bool { - return atomic.CompareAndSwapInt32(&i.v, old, new) -} - -// Store atomically stores the passed value. -func (i *Int32) Store(n int32) { - atomic.StoreInt32(&i.v, n) -} - -// Swap atomically swaps the wrapped int32 and returns the old value. -func (i *Int32) Swap(n int32) int32 { - return atomic.SwapInt32(&i.v, n) -} - -// Int64 is an atomic wrapper around an int64. -type Int64 struct{ v int64 } - -// NewInt64 creates an Int64. -func NewInt64(i int64) *Int64 { - return &Int64{i} -} - -// Load atomically loads the wrapped value. -func (i *Int64) Load() int64 { - return atomic.LoadInt64(&i.v) -} - -// Add atomically adds to the wrapped int64 and returns the new value. -func (i *Int64) Add(n int64) int64 { - return atomic.AddInt64(&i.v, n) -} - -// Sub atomically subtracts from the wrapped int64 and returns the new value. -func (i *Int64) Sub(n int64) int64 { - return atomic.AddInt64(&i.v, -n) -} - -// Inc atomically increments the wrapped int64 and returns the new value. -func (i *Int64) Inc() int64 { - return i.Add(1) -} - -// Dec atomically decrements the wrapped int64 and returns the new value. -func (i *Int64) Dec() int64 { - return i.Sub(1) -} - -// CAS is an atomic compare-and-swap. -func (i *Int64) CAS(old, new int64) bool { - return atomic.CompareAndSwapInt64(&i.v, old, new) -} - -// Store atomically stores the passed value. -func (i *Int64) Store(n int64) { - atomic.StoreInt64(&i.v, n) -} - -// Swap atomically swaps the wrapped int64 and returns the old value. -func (i *Int64) Swap(n int64) int64 { - return atomic.SwapInt64(&i.v, n) -} - -// Uint32 is an atomic wrapper around an uint32. -type Uint32 struct{ v uint32 } - -// NewUint32 creates a Uint32. -func NewUint32(i uint32) *Uint32 { - return &Uint32{i} -} - -// Load atomically loads the wrapped value. -func (i *Uint32) Load() uint32 { - return atomic.LoadUint32(&i.v) -} - -// Add atomically adds to the wrapped uint32 and returns the new value. -func (i *Uint32) Add(n uint32) uint32 { - return atomic.AddUint32(&i.v, n) -} - -// Sub atomically subtracts from the wrapped uint32 and returns the new value. -func (i *Uint32) Sub(n uint32) uint32 { - return atomic.AddUint32(&i.v, ^(n - 1)) -} - -// Inc atomically increments the wrapped uint32 and returns the new value. -func (i *Uint32) Inc() uint32 { - return i.Add(1) -} - -// Dec atomically decrements the wrapped int32 and returns the new value. -func (i *Uint32) Dec() uint32 { - return i.Sub(1) -} - -// CAS is an atomic compare-and-swap. -func (i *Uint32) CAS(old, new uint32) bool { - return atomic.CompareAndSwapUint32(&i.v, old, new) -} - -// Store atomically stores the passed value. -func (i *Uint32) Store(n uint32) { - atomic.StoreUint32(&i.v, n) -} - -// Swap atomically swaps the wrapped uint32 and returns the old value. -func (i *Uint32) Swap(n uint32) uint32 { - return atomic.SwapUint32(&i.v, n) -} - -// Uint64 is an atomic wrapper around a uint64. -type Uint64 struct{ v uint64 } - -// NewUint64 creates a Uint64. -func NewUint64(i uint64) *Uint64 { - return &Uint64{i} -} - -// Load atomically loads the wrapped value. -func (i *Uint64) Load() uint64 { - return atomic.LoadUint64(&i.v) -} - -// Add atomically adds to the wrapped uint64 and returns the new value. -func (i *Uint64) Add(n uint64) uint64 { - return atomic.AddUint64(&i.v, n) -} - -// Sub atomically subtracts from the wrapped uint64 and returns the new value. -func (i *Uint64) Sub(n uint64) uint64 { - return atomic.AddUint64(&i.v, ^(n - 1)) -} - -// Inc atomically increments the wrapped uint64 and returns the new value. -func (i *Uint64) Inc() uint64 { - return i.Add(1) -} - -// Dec atomically decrements the wrapped uint64 and returns the new value. -func (i *Uint64) Dec() uint64 { - return i.Sub(1) -} - -// CAS is an atomic compare-and-swap. -func (i *Uint64) CAS(old, new uint64) bool { - return atomic.CompareAndSwapUint64(&i.v, old, new) -} - -// Store atomically stores the passed value. -func (i *Uint64) Store(n uint64) { - atomic.StoreUint64(&i.v, n) -} - -// Swap atomically swaps the wrapped uint64 and returns the old value. -func (i *Uint64) Swap(n uint64) uint64 { - return atomic.SwapUint64(&i.v, n) -} - -// Bool is an atomic Boolean. -type Bool struct{ v uint32 } - -// NewBool creates a Bool. -func NewBool(initial bool) *Bool { - return &Bool{boolToInt(initial)} -} - -// Load atomically loads the Boolean. -func (b *Bool) Load() bool { - return truthy(atomic.LoadUint32(&b.v)) -} - -// CAS is an atomic compare-and-swap. -func (b *Bool) CAS(old, new bool) bool { - return atomic.CompareAndSwapUint32(&b.v, boolToInt(old), boolToInt(new)) -} - -// Store atomically stores the passed value. -func (b *Bool) Store(new bool) { - atomic.StoreUint32(&b.v, boolToInt(new)) -} - -// Swap sets the given value and returns the previous value. -func (b *Bool) Swap(new bool) bool { - return truthy(atomic.SwapUint32(&b.v, boolToInt(new))) -} - -// Toggle atomically negates the Boolean and returns the previous value. -func (b *Bool) Toggle() bool { - return truthy(atomic.AddUint32(&b.v, 1) - 1) -} - -func truthy(n uint32) bool { - return n&1 == 1 -} - -func boolToInt(b bool) uint32 { - if b { - return 1 - } - return 0 -} - -// Float64 is an atomic wrapper around float64. -type Float64 struct { - v uint64 -} - -// NewFloat64 creates a Float64. -func NewFloat64(f float64) *Float64 { - return &Float64{math.Float64bits(f)} -} - -// Load atomically loads the wrapped value. -func (f *Float64) Load() float64 { - return math.Float64frombits(atomic.LoadUint64(&f.v)) -} - -// Store atomically stores the passed value. -func (f *Float64) Store(s float64) { - atomic.StoreUint64(&f.v, math.Float64bits(s)) -} - -// Add atomically adds to the wrapped float64 and returns the new value. -func (f *Float64) Add(s float64) float64 { - for { - old := f.Load() - new := old + s - if f.CAS(old, new) { - return new - } - } -} - -// Sub atomically subtracts from the wrapped float64 and returns the new value. -func (f *Float64) Sub(s float64) float64 { - return f.Add(-s) -} - -// CAS is an atomic compare-and-swap. -func (f *Float64) CAS(old, new float64) bool { - return atomic.CompareAndSwapUint64(&f.v, math.Float64bits(old), math.Float64bits(new)) -} - -// Duration is an atomic wrapper around time.Duration -// https://godoc.org/time#Duration -type Duration struct { - v Int64 -} - -// NewDuration creates a Duration. -func NewDuration(d time.Duration) *Duration { - return &Duration{v: *NewInt64(int64(d))} -} - -// Load atomically loads the wrapped value. -func (d *Duration) Load() time.Duration { - return time.Duration(d.v.Load()) -} - -// Store atomically stores the passed value. -func (d *Duration) Store(n time.Duration) { - d.v.Store(int64(n)) -} - -// Add atomically adds to the wrapped time.Duration and returns the new value. -func (d *Duration) Add(n time.Duration) time.Duration { - return time.Duration(d.v.Add(int64(n))) -} - -// Sub atomically subtracts from the wrapped time.Duration and returns the new value. -func (d *Duration) Sub(n time.Duration) time.Duration { - return time.Duration(d.v.Sub(int64(n))) -} - -// Swap atomically swaps the wrapped time.Duration and returns the old value. -func (d *Duration) Swap(n time.Duration) time.Duration { - return time.Duration(d.v.Swap(int64(n))) -} - -// CAS is an atomic compare-and-swap. -func (d *Duration) CAS(old, new time.Duration) bool { - return d.v.CAS(int64(old), int64(new)) -} - -// Value shadows the type of the same name from sync/atomic -// https://godoc.org/sync/atomic#Value -type Value struct{ atomic.Value } diff --git a/vendor/go.uber.org/atomic/glide.lock b/vendor/go.uber.org/atomic/glide.lock deleted file mode 100644 index 3c72c59..0000000 --- a/vendor/go.uber.org/atomic/glide.lock +++ /dev/null @@ -1,17 +0,0 @@ -hash: f14d51408e3e0e4f73b34e4039484c78059cd7fc5f4996fdd73db20dc8d24f53 -updated: 2016-10-27T00:10:51.16960137-07:00 -imports: [] -testImports: -- name: github.com/davecgh/go-spew - version: 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d - subpackages: - - spew -- name: github.com/pmezard/go-difflib - version: d8ed2627bdf02c080bf22230dbb337003b7aba2d - subpackages: - - difflib -- name: github.com/stretchr/testify - version: d77da356e56a7428ad25149ca77381849a6a5232 - subpackages: - - assert - - require diff --git a/vendor/go.uber.org/atomic/glide.yaml b/vendor/go.uber.org/atomic/glide.yaml deleted file mode 100644 index 4cf608e..0000000 --- a/vendor/go.uber.org/atomic/glide.yaml +++ /dev/null @@ -1,6 +0,0 @@ -package: go.uber.org/atomic -testImport: -- package: github.com/stretchr/testify - subpackages: - - assert - - require diff --git a/vendor/go.uber.org/multierr/.gitignore b/vendor/go.uber.org/multierr/.gitignore index 61ead86..b9a05e3 100644 --- a/vendor/go.uber.org/multierr/.gitignore +++ b/vendor/go.uber.org/multierr/.gitignore @@ -1 +1,4 @@ /vendor +cover.html +cover.out +/bin diff --git a/vendor/go.uber.org/multierr/.travis.yml b/vendor/go.uber.org/multierr/.travis.yml deleted file mode 100644 index 5ffa8fe..0000000 --- a/vendor/go.uber.org/multierr/.travis.yml +++ /dev/null @@ -1,33 +0,0 @@ -sudo: false -language: go -go_import_path: go.uber.org/multierr - -env: - global: - - GO15VENDOREXPERIMENT=1 - -go: - - 1.7 - - 1.8 - - tip - -cache: - directories: - - vendor - -before_install: -- go version - -install: -- | - set -e - make install_ci - -script: -- | - set -e - make lint - make test_ci - -after_success: -- bash <(curl -s https://codecov.io/bash) diff --git a/vendor/go.uber.org/multierr/CHANGELOG.md b/vendor/go.uber.org/multierr/CHANGELOG.md index 898445d..f8177b9 100644 --- a/vendor/go.uber.org/multierr/CHANGELOG.md +++ b/vendor/go.uber.org/multierr/CHANGELOG.md @@ -1,6 +1,73 @@ Releases ======== +v1.11.0 (2023-03-28) +==================== +- `Errors` now supports any error that implements multiple-error + interface. +- Add `Every` function to allow checking if all errors in the chain + satisfies `errors.Is` against the target error. + +v1.10.0 (2023-03-08) +==================== + +- Comply with Go 1.20's multiple-error interface. +- Drop Go 1.18 support. + Per the support policy, only Go 1.19 and 1.20 are supported now. +- Drop all non-test external dependencies. + +v1.9.0 (2022-12-12) +=================== + +- Add `AppendFunc` that allow passsing functions to similar to + `AppendInvoke`. + +- Bump up yaml.v3 dependency to 3.0.1. + +v1.8.0 (2022-02-28) +=================== + +- `Combine`: perform zero allocations when there are no errors. + + +v1.7.0 (2021-05-06) +=================== + +- Add `AppendInvoke` to append into errors from `defer` blocks. + + +v1.6.0 (2020-09-14) +=================== + +- Actually drop library dependency on development-time tooling. + + +v1.5.0 (2020-02-24) +=================== + +- Drop library dependency on development-time tooling. + + +v1.4.0 (2019-11-04) +=================== + +- Add `AppendInto` function to more ergonomically build errors inside a + loop. + + +v1.3.0 (2019-10-29) +=================== + +- Switch to Go modules. + + +v1.2.0 (2019-09-26) +=================== + +- Support extracting and matching against wrapped errors with `errors.As` + and `errors.Is`. + + v1.1.0 (2017-06-30) =================== diff --git a/vendor/go.uber.org/multierr/LICENSE.txt b/vendor/go.uber.org/multierr/LICENSE.txt index 858e024..413e30f 100644 --- a/vendor/go.uber.org/multierr/LICENSE.txt +++ b/vendor/go.uber.org/multierr/LICENSE.txt @@ -1,4 +1,4 @@ -Copyright (c) 2017 Uber Technologies, Inc. +Copyright (c) 2017-2021 Uber Technologies, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/go.uber.org/multierr/Makefile b/vendor/go.uber.org/multierr/Makefile index a7437d0..dcb6fe7 100644 --- a/vendor/go.uber.org/multierr/Makefile +++ b/vendor/go.uber.org/multierr/Makefile @@ -1,23 +1,17 @@ -export GO15VENDOREXPERIMENT=1 - -PACKAGES := $(shell glide nv) +# Directory to put `go install`ed binaries in. +export GOBIN ?= $(shell pwd)/bin GO_FILES := $(shell \ find . '(' -path '*/.*' -o -path './vendor' ')' -prune \ -o -name '*.go' -print | cut -b3-) -.PHONY: install -install: - glide --version || go get github.com/Masterminds/glide - glide install - .PHONY: build build: - go build -i $(PACKAGES) + go build ./... .PHONY: test test: - go test -cover -race $(PACKAGES) + go test -race ./... .PHONY: gofmt gofmt: @@ -25,50 +19,20 @@ gofmt: @gofmt -e -s -l $(GO_FILES) > $(FMT_LOG) || true @[ ! -s "$(FMT_LOG)" ] || (echo "gofmt failed:" | cat - $(FMT_LOG) && false) -.PHONY: govet -govet: - $(eval VET_LOG := $(shell mktemp -t govet.XXXXX)) - @go vet $(PACKAGES) 2>&1 \ - | grep -v '^exit status' > $(VET_LOG) || true - @[ ! -s "$(VET_LOG)" ] || (echo "govet failed:" | cat - $(VET_LOG) && false) - .PHONY: golint golint: - @go get github.com/golang/lint/golint - $(eval LINT_LOG := $(shell mktemp -t golint.XXXXX)) - @cat /dev/null > $(LINT_LOG) - @$(foreach pkg, $(PACKAGES), golint $(pkg) >> $(LINT_LOG) || true;) - @[ ! -s "$(LINT_LOG)" ] || (echo "golint failed:" | cat - $(LINT_LOG) && false) + @cd tools && go install golang.org/x/lint/golint + @$(GOBIN)/golint ./... .PHONY: staticcheck staticcheck: - @go get honnef.co/go/tools/cmd/staticcheck - $(eval STATICCHECK_LOG := $(shell mktemp -t staticcheck.XXXXX)) - @staticcheck $(PACKAGES) 2>&1 > $(STATICCHECK_LOG) || true - @[ ! -s "$(STATICCHECK_LOG)" ] || (echo "staticcheck failed:" | cat - $(STATICCHECK_LOG) && false) + @cd tools && go install honnef.co/go/tools/cmd/staticcheck + @$(GOBIN)/staticcheck ./... .PHONY: lint -lint: gofmt govet golint staticcheck +lint: gofmt golint staticcheck .PHONY: cover cover: - ./scripts/cover.sh $(shell go list $(PACKAGES)) + go test -race -coverprofile=cover.out -coverpkg=./... -v ./... go tool cover -html=cover.out -o cover.html - -update-license: - @go get go.uber.org/tools/update-license - @update-license \ - $(shell go list -json $(PACKAGES) | \ - jq -r '.Dir + "/" + (.GoFiles | .[])') - -############################################################################## - -.PHONY: install_ci -install_ci: install - go get github.com/wadey/gocovmerge - go get github.com/mattn/goveralls - go get golang.org/x/tools/cmd/cover - -.PHONY: test_ci -test_ci: install_ci - ./scripts/cover.sh $(shell go list $(PACKAGES)) diff --git a/vendor/go.uber.org/multierr/README.md b/vendor/go.uber.org/multierr/README.md index 065088f..5ab6ac4 100644 --- a/vendor/go.uber.org/multierr/README.md +++ b/vendor/go.uber.org/multierr/README.md @@ -2,9 +2,29 @@ `multierr` allows combining one or more Go `error`s together. +## Features + +- **Idiomatic**: + multierr follows best practices in Go, and keeps your code idiomatic. + - It keeps the underlying error type hidden, + allowing you to deal in `error` values exclusively. + - It provides APIs to safely append into an error from a `defer` statement. +- **Performant**: + multierr is optimized for performance: + - It avoids allocations where possible. + - It utilizes slice resizing semantics to optimize common cases + like appending into the same error object from a loop. +- **Interoperable**: + multierr interoperates with the Go standard library's error APIs seamlessly: + - The `errors.Is` and `errors.As` functions *just work*. +- **Lightweight**: + multierr comes with virtually no dependencies. + ## Installation - go get -u go.uber.org/multierr +```bash +go get -u go.uber.org/multierr@latest +``` ## Status @@ -15,9 +35,9 @@ Stable: No breaking changes will be made before 2.0. Released under the [MIT License]. [MIT License]: LICENSE.txt -[doc-img]: https://godoc.org/go.uber.org/multierr?status.svg -[doc]: https://godoc.org/go.uber.org/multierr -[ci-img]: https://travis-ci.org/uber-go/multierr.svg?branch=master +[doc-img]: https://pkg.go.dev/badge/go.uber.org/multierr +[doc]: https://pkg.go.dev/go.uber.org/multierr +[ci-img]: https://github.com/uber-go/multierr/actions/workflows/go.yml/badge.svg [cov-img]: https://codecov.io/gh/uber-go/multierr/branch/master/graph/badge.svg -[ci]: https://travis-ci.org/uber-go/multierr +[ci]: https://github.com/uber-go/multierr/actions/workflows/go.yml [cov]: https://codecov.io/gh/uber-go/multierr diff --git a/vendor/go.uber.org/multierr/error.go b/vendor/go.uber.org/multierr/error.go index de6ce47..3a828b2 100644 --- a/vendor/go.uber.org/multierr/error.go +++ b/vendor/go.uber.org/multierr/error.go @@ -1,4 +1,4 @@ -// Copyright (c) 2017 Uber Technologies, Inc. +// Copyright (c) 2017-2023 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -20,54 +20,109 @@ // Package multierr allows combining one or more errors together. // -// Overview +// # Overview // // Errors can be combined with the use of the Combine function. // -// multierr.Combine( -// reader.Close(), -// writer.Close(), -// conn.Close(), -// ) +// multierr.Combine( +// reader.Close(), +// writer.Close(), +// conn.Close(), +// ) // // If only two errors are being combined, the Append function may be used // instead. // -// err = multierr.Combine(reader.Close(), writer.Close()) -// -// This makes it possible to record resource cleanup failures from deferred -// blocks with the help of named return values. -// -// func sendRequest(req Request) (err error) { -// conn, err := openConnection() -// if err != nil { -// return err -// } -// defer func() { -// err = multierr.Append(err, conn.Close()) -// }() -// // ... -// } +// err = multierr.Append(reader.Close(), writer.Close()) // // The underlying list of errors for a returned error object may be retrieved // with the Errors function. // -// errors := multierr.Errors(err) -// if len(errors) > 0 { -// fmt.Println("The following errors occurred:") -// } +// errors := multierr.Errors(err) +// if len(errors) > 0 { +// fmt.Println("The following errors occurred:", errors) +// } +// +// # Appending from a loop +// +// You sometimes need to append into an error from a loop. +// +// var err error +// for _, item := range items { +// err = multierr.Append(err, process(item)) +// } +// +// Cases like this may require knowledge of whether an individual instance +// failed. This usually requires introduction of a new variable. +// +// var err error +// for _, item := range items { +// if perr := process(item); perr != nil { +// log.Warn("skipping item", item) +// err = multierr.Append(err, perr) +// } +// } +// +// multierr includes AppendInto to simplify cases like this. +// +// var err error +// for _, item := range items { +// if multierr.AppendInto(&err, process(item)) { +// log.Warn("skipping item", item) +// } +// } +// +// This will append the error into the err variable, and return true if that +// individual error was non-nil. // -// Advanced Usage +// See [AppendInto] for more information. +// +// # Deferred Functions +// +// Go makes it possible to modify the return value of a function in a defer +// block if the function was using named returns. This makes it possible to +// record resource cleanup failures from deferred blocks. +// +// func sendRequest(req Request) (err error) { +// conn, err := openConnection() +// if err != nil { +// return err +// } +// defer func() { +// err = multierr.Append(err, conn.Close()) +// }() +// // ... +// } +// +// multierr provides the Invoker type and AppendInvoke function to make cases +// like the above simpler and obviate the need for a closure. The following is +// roughly equivalent to the example above. +// +// func sendRequest(req Request) (err error) { +// conn, err := openConnection() +// if err != nil { +// return err +// } +// defer multierr.AppendInvoke(&err, multierr.Close(conn)) +// // ... +// } +// +// See [AppendInvoke] and [Invoker] for more information. +// +// NOTE: If you're modifying an error from inside a defer, you MUST use a named +// return value for that function. +// +// # Advanced Usage // // Errors returned by Combine and Append MAY implement the following // interface. // -// type errorGroup interface { -// // Returns a slice containing the underlying list of errors. -// // -// // This slice MUST NOT be modified by the caller. -// Errors() []error -// } +// type errorGroup interface { +// // Returns a slice containing the underlying list of errors. +// // +// // This slice MUST NOT be modified by the caller. +// Errors() []error +// } // // Note that if you need access to list of errors behind a multierr error, you // should prefer using the Errors function. That said, if you need cheap @@ -76,31 +131,29 @@ // because errors returned by Combine and Append are not guaranteed to // implement this interface. // -// var errors []error -// group, ok := err.(errorGroup) -// if ok { -// errors = group.Errors() -// } else { -// errors = []error{err} -// } +// var errors []error +// group, ok := err.(errorGroup) +// if ok { +// errors = group.Errors() +// } else { +// errors = []error{err} +// } package multierr // import "go.uber.org/multierr" import ( "bytes" + "errors" "fmt" "io" "strings" "sync" - - "go.uber.org/atomic" + "sync/atomic" ) var ( // Separator for single-line error messages. _singlelineSeparator = []byte("; ") - _newline = []byte("\n") - // Prefix for multi-line messages _multilinePrefix = []byte("the following errors occurred:") @@ -132,36 +185,17 @@ type errorGroup interface { } // Errors returns a slice containing zero or more errors that the supplied -// error is composed of. If the error is nil, the returned slice is empty. +// error is composed of. If the error is nil, a nil slice is returned. // -// err := multierr.Append(r.Close(), w.Close()) -// errors := multierr.Errors(err) +// err := multierr.Append(r.Close(), w.Close()) +// errors := multierr.Errors(err) // // If the error is not composed of other errors, the returned slice contains // just the error that was passed in. // // Callers of this function are free to modify the returned slice. func Errors(err error) []error { - if err == nil { - return nil - } - - // Note that we're casting to multiError, not errorGroup. Our contract is - // that returned errors MAY implement errorGroup. Errors, however, only - // has special behavior for multierr-specific error objects. - // - // This behavior can be expanded in the future but I think it's prudent to - // start with as little as possible in terms of contract and possibility - // of misuse. - eg, ok := err.(*multiError) - if !ok { - return []error{err} - } - - errors := eg.Errors() - result := make([]error, len(errors)) - copy(result, errors) - return result + return extractErrors(err) } // multiError is an error that holds one or more errors. @@ -176,8 +210,6 @@ type multiError struct { errors []error } -var _ errorGroup = (*multiError)(nil) - // Errors returns the list of underlying errors. // // This slice MUST NOT be modified. @@ -203,6 +235,17 @@ func (merr *multiError) Error() string { return result } +// Every compares every error in the given err against the given target error +// using [errors.Is], and returns true only if every comparison returned true. +func Every(err error, target error) bool { + for _, e := range extractErrors(err) { + if !errors.Is(e, target) { + return false + } + } + return true +} + func (merr *multiError) Format(f fmt.State, c rune) { if c == 'v' && f.Flag('+') { merr.writeMultiline(f) @@ -294,6 +337,14 @@ func inspect(errors []error) (res inspectResult) { // fromSlice converts the given list of errors into a single error. func fromSlice(errors []error) error { + // Don't pay to inspect small slices. + switch len(errors) { + case 0: + return nil + case 1: + return errors[0] + } + res := inspect(errors) switch res.Count { case 0: @@ -303,8 +354,12 @@ func fromSlice(errors []error) error { return errors[res.FirstErrorIdx] case len(errors): if !res.ContainsMultiError { - // already flat - return &multiError{errors: errors} + // Error list is flat. Make a copy of it + // Otherwise "errors" escapes to the heap + // unconditionally for all other cases. + // This lets us optimize for the "no errors" case. + out := append(([]error)(nil), errors...) + return &multiError{errors: out} } } @@ -329,32 +384,32 @@ func fromSlice(errors []error) error { // If zero arguments were passed or if all items are nil, a nil error is // returned. // -// Combine(nil, nil) // == nil +// Combine(nil, nil) // == nil // // If only a single error was passed, it is returned as-is. // -// Combine(err) // == err +// Combine(err) // == err // // Combine skips over nil arguments so this function may be used to combine // together errors from operations that fail independently of each other. // -// multierr.Combine( -// reader.Close(), -// writer.Close(), -// pipe.Close(), -// ) +// multierr.Combine( +// reader.Close(), +// writer.Close(), +// pipe.Close(), +// ) // // If any of the passed errors is a multierr error, it will be flattened along // with the other errors. // -// multierr.Combine(multierr.Combine(err1, err2), err3) -// // is the same as -// multierr.Combine(err1, err2, err3) +// multierr.Combine(multierr.Combine(err1, err2), err3) +// // is the same as +// multierr.Combine(err1, err2, err3) // // The returned error formats into a readable multi-line error message if // formatted with %+v. // -// fmt.Sprintf("%+v", multierr.Combine(err1, err2)) +// fmt.Sprintf("%+v", multierr.Combine(err1, err2)) func Combine(errors ...error) error { return fromSlice(errors) } @@ -364,16 +419,19 @@ func Combine(errors ...error) error { // This function is a specialization of Combine for the common case where // there are only two errors. // -// err = multierr.Append(reader.Close(), writer.Close()) +// err = multierr.Append(reader.Close(), writer.Close()) // // The following pattern may also be used to record failure of deferred // operations without losing information about the original error. // -// func doSomething(..) (err error) { -// f := acquireResource() -// defer func() { -// err = multierr.Append(err, f.Close()) -// }() +// func doSomething(..) (err error) { +// f := acquireResource() +// defer func() { +// err = multierr.Append(err, f.Close()) +// }() +// +// Note that the variable MUST be a named return to append an error to it from +// the defer statement. See also [AppendInvoke]. func Append(left error, right error) error { switch { case left == nil: @@ -399,3 +457,190 @@ func Append(left error, right error) error { errors := [2]error{left, right} return fromSlice(errors[0:]) } + +// AppendInto appends an error into the destination of an error pointer and +// returns whether the error being appended was non-nil. +// +// var err error +// multierr.AppendInto(&err, r.Close()) +// multierr.AppendInto(&err, w.Close()) +// +// The above is equivalent to, +// +// err := multierr.Append(r.Close(), w.Close()) +// +// As AppendInto reports whether the provided error was non-nil, it may be +// used to build a multierr error in a loop more ergonomically. For example: +// +// var err error +// for line := range lines { +// var item Item +// if multierr.AppendInto(&err, parse(line, &item)) { +// continue +// } +// items = append(items, item) +// } +// +// Compare this with a version that relies solely on Append: +// +// var err error +// for line := range lines { +// var item Item +// if parseErr := parse(line, &item); parseErr != nil { +// err = multierr.Append(err, parseErr) +// continue +// } +// items = append(items, item) +// } +func AppendInto(into *error, err error) (errored bool) { + if into == nil { + // We panic if 'into' is nil. This is not documented above + // because suggesting that the pointer must be non-nil may + // confuse users into thinking that the error that it points + // to must be non-nil. + panic("misuse of multierr.AppendInto: into pointer must not be nil") + } + + if err == nil { + return false + } + *into = Append(*into, err) + return true +} + +// Invoker is an operation that may fail with an error. Use it with +// AppendInvoke to append the result of calling the function into an error. +// This allows you to conveniently defer capture of failing operations. +// +// See also, [Close] and [Invoke]. +type Invoker interface { + Invoke() error +} + +// Invoke wraps a function which may fail with an error to match the Invoker +// interface. Use it to supply functions matching this signature to +// AppendInvoke. +// +// For example, +// +// func processReader(r io.Reader) (err error) { +// scanner := bufio.NewScanner(r) +// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err)) +// for scanner.Scan() { +// // ... +// } +// // ... +// } +// +// In this example, the following line will construct the Invoker right away, +// but defer the invocation of scanner.Err() until the function returns. +// +// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err)) +// +// Note that the error you're appending to from the defer statement MUST be a +// named return. +type Invoke func() error + +// Invoke calls the supplied function and returns its result. +func (i Invoke) Invoke() error { return i() } + +// Close builds an Invoker that closes the provided io.Closer. Use it with +// AppendInvoke to close io.Closers and append their results into an error. +// +// For example, +// +// func processFile(path string) (err error) { +// f, err := os.Open(path) +// if err != nil { +// return err +// } +// defer multierr.AppendInvoke(&err, multierr.Close(f)) +// return processReader(f) +// } +// +// In this example, multierr.Close will construct the Invoker right away, but +// defer the invocation of f.Close until the function returns. +// +// defer multierr.AppendInvoke(&err, multierr.Close(f)) +// +// Note that the error you're appending to from the defer statement MUST be a +// named return. +func Close(closer io.Closer) Invoker { + return Invoke(closer.Close) +} + +// AppendInvoke appends the result of calling the given Invoker into the +// provided error pointer. Use it with named returns to safely defer +// invocation of fallible operations until a function returns, and capture the +// resulting errors. +// +// func doSomething(...) (err error) { +// // ... +// f, err := openFile(..) +// if err != nil { +// return err +// } +// +// // multierr will call f.Close() when this function returns and +// // if the operation fails, its append its error into the +// // returned error. +// defer multierr.AppendInvoke(&err, multierr.Close(f)) +// +// scanner := bufio.NewScanner(f) +// // Similarly, this scheduled scanner.Err to be called and +// // inspected when the function returns and append its error +// // into the returned error. +// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err)) +// +// // ... +// } +// +// NOTE: If used with a defer, the error variable MUST be a named return. +// +// Without defer, AppendInvoke behaves exactly like AppendInto. +// +// err := // ... +// multierr.AppendInvoke(&err, mutltierr.Invoke(foo)) +// +// // ...is roughly equivalent to... +// +// err := // ... +// multierr.AppendInto(&err, foo()) +// +// The advantage of the indirection introduced by Invoker is to make it easy +// to defer the invocation of a function. Without this indirection, the +// invoked function will be evaluated at the time of the defer block rather +// than when the function returns. +// +// // BAD: This is likely not what the caller intended. This will evaluate +// // foo() right away and append its result into the error when the +// // function returns. +// defer multierr.AppendInto(&err, foo()) +// +// // GOOD: This will defer invocation of foo unutil the function returns. +// defer multierr.AppendInvoke(&err, multierr.Invoke(foo)) +// +// multierr provides a few Invoker implementations out of the box for +// convenience. See [Invoker] for more information. +func AppendInvoke(into *error, invoker Invoker) { + AppendInto(into, invoker.Invoke()) +} + +// AppendFunc is a shorthand for [AppendInvoke]. +// It allows using function or method value directly +// without having to wrap it into an [Invoker] interface. +// +// func doSomething(...) (err error) { +// w, err := startWorker(...) +// if err != nil { +// return err +// } +// +// // multierr will call w.Stop() when this function returns and +// // if the operation fails, it appends its error into the +// // returned error. +// defer multierr.AppendFunc(&err, w.Stop) +// } +func AppendFunc(into *error, fn func() error) { + AppendInvoke(into, Invoke(fn)) +} diff --git a/vendor/go.uber.org/multierr/error_post_go120.go b/vendor/go.uber.org/multierr/error_post_go120.go new file mode 100644 index 0000000..a173f9c --- /dev/null +++ b/vendor/go.uber.org/multierr/error_post_go120.go @@ -0,0 +1,48 @@ +// Copyright (c) 2017-2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build go1.20 +// +build go1.20 + +package multierr + +// Unwrap returns a list of errors wrapped by this multierr. +func (merr *multiError) Unwrap() []error { + return merr.Errors() +} + +type multipleErrors interface { + Unwrap() []error +} + +func extractErrors(err error) []error { + if err == nil { + return nil + } + + // check if the given err is an Unwrapable error that + // implements multipleErrors interface. + eg, ok := err.(multipleErrors) + if !ok { + return []error{err} + } + + return append(([]error)(nil), eg.Unwrap()...) +} diff --git a/vendor/go.uber.org/multierr/error_pre_go120.go b/vendor/go.uber.org/multierr/error_pre_go120.go new file mode 100644 index 0000000..93872a3 --- /dev/null +++ b/vendor/go.uber.org/multierr/error_pre_go120.go @@ -0,0 +1,79 @@ +// Copyright (c) 2017-2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build !go1.20 +// +build !go1.20 + +package multierr + +import "errors" + +// Versions of Go before 1.20 did not support the Unwrap() []error method. +// This provides a similar behavior by implementing the Is(..) and As(..) +// methods. +// See the errors.Join proposal for details: +// https://github.com/golang/go/issues/53435 + +// As attempts to find the first error in the error list that matches the type +// of the value that target points to. +// +// This function allows errors.As to traverse the values stored on the +// multierr error. +func (merr *multiError) As(target interface{}) bool { + for _, err := range merr.Errors() { + if errors.As(err, target) { + return true + } + } + return false +} + +// Is attempts to match the provided error against errors in the error list. +// +// This function allows errors.Is to traverse the values stored on the +// multierr error. +func (merr *multiError) Is(target error) bool { + for _, err := range merr.Errors() { + if errors.Is(err, target) { + return true + } + } + return false +} + +func extractErrors(err error) []error { + if err == nil { + return nil + } + + // Note that we're casting to multiError, not errorGroup. Our contract is + // that returned errors MAY implement errorGroup. Errors, however, only + // has special behavior for multierr-specific error objects. + // + // This behavior can be expanded in the future but I think it's prudent to + // start with as little as possible in terms of contract and possibility + // of misuse. + eg, ok := err.(*multiError) + if !ok { + return []error{err} + } + + return append(([]error)(nil), eg.Errors()...) +} diff --git a/vendor/go.uber.org/multierr/glide.lock b/vendor/go.uber.org/multierr/glide.lock deleted file mode 100644 index f9ea94c..0000000 --- a/vendor/go.uber.org/multierr/glide.lock +++ /dev/null @@ -1,19 +0,0 @@ -hash: b53b5e9a84b9cb3cc4b2d0499e23da2feca1eec318ce9bb717ecf35bf24bf221 -updated: 2017-04-10T13:34:45.671678062-07:00 -imports: -- name: go.uber.org/atomic - version: 3b8db5e93c4c02efbc313e17b2e796b0914a01fb -testImports: -- name: github.com/davecgh/go-spew - version: 6d212800a42e8ab5c146b8ace3490ee17e5225f9 - subpackages: - - spew -- name: github.com/pmezard/go-difflib - version: d8ed2627bdf02c080bf22230dbb337003b7aba2d - subpackages: - - difflib -- name: github.com/stretchr/testify - version: 69483b4bd14f5845b5a1e55bca19e954e827f1d0 - subpackages: - - assert - - require diff --git a/vendor/go.uber.org/multierr/glide.yaml b/vendor/go.uber.org/multierr/glide.yaml deleted file mode 100644 index 6ef084e..0000000 --- a/vendor/go.uber.org/multierr/glide.yaml +++ /dev/null @@ -1,8 +0,0 @@ -package: go.uber.org/multierr -import: -- package: go.uber.org/atomic - version: ^1 -testImport: -- package: github.com/stretchr/testify - subpackages: - - assert diff --git a/vendor/go.uber.org/zap/.gitignore b/vendor/go.uber.org/zap/.gitignore index 08fbde6..da9d9d0 100644 --- a/vendor/go.uber.org/zap/.gitignore +++ b/vendor/go.uber.org/zap/.gitignore @@ -26,3 +26,7 @@ _testmain.go *.pprof *.out *.log + +/bin +cover.out +cover.html diff --git a/vendor/go.uber.org/zap/.golangci.yml b/vendor/go.uber.org/zap/.golangci.yml new file mode 100644 index 0000000..2346df1 --- /dev/null +++ b/vendor/go.uber.org/zap/.golangci.yml @@ -0,0 +1,77 @@ +output: + # Make output more digestible with quickfix in vim/emacs/etc. + sort-results: true + print-issued-lines: false + +linters: + # We'll track the golangci-lint default linters manually + # instead of letting them change without our control. + disable-all: true + enable: + # golangci-lint defaults: + - errcheck + - gosimple + - govet + - ineffassign + - staticcheck + - unused + + # Our own extras: + - gofumpt + - nolintlint # lints nolint directives + - revive + +linters-settings: + govet: + # These govet checks are disabled by default, but they're useful. + enable: + - niliness + - reflectvaluecompare + - sortslice + - unusedwrite + + errcheck: + exclude-functions: + # These methods can not fail. + # They operate on an in-memory buffer. + - (*go.uber.org/zap/buffer.Buffer).Write + - (*go.uber.org/zap/buffer.Buffer).WriteByte + - (*go.uber.org/zap/buffer.Buffer).WriteString + + - (*go.uber.org/zap/zapio.Writer).Close + - (*go.uber.org/zap/zapio.Writer).Sync + - (*go.uber.org/zap/zapio.Writer).Write + # Write to zapio.Writer cannot fail, + # so io.WriteString on it cannot fail. + - io.WriteString(*go.uber.org/zap/zapio.Writer) + + # Writing a plain string to a fmt.State cannot fail. + - io.WriteString(fmt.State) + +issues: + # Print all issues reported by all linters. + max-issues-per-linter: 0 + max-same-issues: 0 + + # Don't ignore some of the issues that golangci-lint considers okay. + # This includes documenting all exported entities. + exclude-use-default: false + + exclude-rules: + # Don't warn on unused parameters. + # Parameter names are useful; replacing them with '_' is undesirable. + - linters: [revive] + text: 'unused-parameter: parameter \S+ seems to be unused, consider removing or renaming it as _' + + # staticcheck already has smarter checks for empty blocks. + # revive's empty-block linter has false positives. + # For example, as of writing this, the following is not allowed. + # for foo() { } + - linters: [revive] + text: 'empty-block: this block is empty, you can remove it' + + # Ignore logger.Sync() errcheck failures in example_test.go + # since those are intended to be uncomplicated examples. + - linters: [errcheck] + path: example_test.go + text: 'Error return value of `logger.Sync` is not checked' diff --git a/vendor/go.uber.org/zap/.readme.tmpl b/vendor/go.uber.org/zap/.readme.tmpl index c6440db..4fea302 100644 --- a/vendor/go.uber.org/zap/.readme.tmpl +++ b/vendor/go.uber.org/zap/.readme.tmpl @@ -1,7 +1,15 @@ # :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] +
    + Blazing fast, structured, leveled logging in Go. +![Zap logo](assets/logo.png) + +[![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] + +
    + ## Installation `go get -u go.uber.org/zap` @@ -92,17 +100,18 @@ standard.
    -Released under the [MIT License](LICENSE.txt). +Released under the [MIT License](LICENSE). 1 In particular, keep in mind that we may be benchmarking against slightly older versions of other packages. Versions are -pinned in zap's [glide.lock][] file. [↩](#anchor-versions) +pinned in the [benchmarks/go.mod][] file. [↩](#anchor-versions) -[doc-img]: https://godoc.org/go.uber.org/zap?status.svg -[doc]: https://godoc.org/go.uber.org/zap -[ci-img]: https://travis-ci.org/uber-go/zap.svg?branch=master -[ci]: https://travis-ci.org/uber-go/zap +[doc-img]: https://pkg.go.dev/badge/go.uber.org/zap +[doc]: https://pkg.go.dev/go.uber.org/zap +[ci-img]: https://github.com/uber-go/zap/actions/workflows/go.yml/badge.svg +[ci]: https://github.com/uber-go/zap/actions/workflows/go.yml [cov-img]: https://codecov.io/gh/uber-go/zap/branch/master/graph/badge.svg [cov]: https://codecov.io/gh/uber-go/zap [benchmarking suite]: https://github.com/uber-go/zap/tree/master/benchmarks -[glide.lock]: https://github.com/uber-go/zap/blob/master/glide.lock +[benchmarks/go.mod]: https://github.com/uber-go/zap/blob/master/benchmarks/go.mod + diff --git a/vendor/go.uber.org/zap/.travis.yml b/vendor/go.uber.org/zap/.travis.yml deleted file mode 100644 index a3321fa..0000000 --- a/vendor/go.uber.org/zap/.travis.yml +++ /dev/null @@ -1,21 +0,0 @@ -language: go -sudo: false -go: - - 1.9.x - - 1.10.x -go_import_path: go.uber.org/zap -env: - global: - - TEST_TIMEOUT_SCALE=10 -cache: - directories: - - vendor -install: - - make dependencies -script: - - make lint - - make test - - make bench -after_success: - - make cover - - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/go.uber.org/zap/CHANGELOG.md b/vendor/go.uber.org/zap/CHANGELOG.md index 17d5b49..6d6cd5f 100644 --- a/vendor/go.uber.org/zap/CHANGELOG.md +++ b/vendor/go.uber.org/zap/CHANGELOG.md @@ -1,12 +1,382 @@ # Changelog +All notable changes to this project will be documented in this file. -## v1.9.1 (06 Aug 2018) +This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## 1.27.0 (20 Feb 2024) +Enhancements: +* [#1378][]: Add `WithLazy` method for `SugaredLogger`. +* [#1399][]: zaptest: Add `NewTestingWriter` for customizing TestingWriter with more flexibility than `NewLogger`. +* [#1406][]: Add `Log`, `Logw`, `Logln` methods for `SugaredLogger`. +* [#1416][]: Add `WithPanicHook` option for testing panic logs. + +Thanks to @defval, @dimmo, @arxeiss, and @MKrupauskas for their contributions to this release. + +[#1378]: https://github.com/uber-go/zap/pull/1378 +[#1399]: https://github.com/uber-go/zap/pull/1399 +[#1406]: https://github.com/uber-go/zap/pull/1406 +[#1416]: https://github.com/uber-go/zap/pull/1416 + +## 1.26.0 (14 Sep 2023) +Enhancements: +* [#1297][]: Add Dict as a Field. +* [#1319][]: Add `WithLazy` method to `Logger` which lazily evaluates the structured +context. +* [#1350][]: String encoding is much (~50%) faster now. + +Thanks to @hhk7734, @jquirke, and @cdvr1993 for their contributions to this release. + +[#1297]: https://github.com/uber-go/zap/pull/1297 +[#1319]: https://github.com/uber-go/zap/pull/1319 +[#1350]: https://github.com/uber-go/zap/pull/1350 + +## 1.25.0 (1 Aug 2023) + +This release contains several improvements including performance, API additions, +and two new experimental packages whose APIs are unstable and may change in the +future. + +Enhancements: +* [#1246][]: Add `zap/exp/zapslog` package for integration with slog. +* [#1273][]: Add `Name` to `Logger` which returns the Logger's name if one is set. +* [#1281][]: Add `zap/exp/expfield` package which contains helper methods +`Str` and `Strs` for constructing String-like zap.Fields. +* [#1310][]: Reduce stack size on `Any`. + +Thanks to @knight42, @dzakaammar, @bcspragu, and @rexywork for their contributions +to this release. + +[#1246]: https://github.com/uber-go/zap/pull/1246 +[#1273]: https://github.com/uber-go/zap/pull/1273 +[#1281]: https://github.com/uber-go/zap/pull/1281 +[#1310]: https://github.com/uber-go/zap/pull/1310 + +## 1.24.0 (30 Nov 2022) + +Enhancements: +* [#1148][]: Add `Level` to both `Logger` and `SugaredLogger` that reports the + current minimum enabled log level. +* [#1185][]: `SugaredLogger` turns errors to zap.Error automatically. + +Thanks to @Abirdcfly, @craigpastro, @nnnkkk7, and @sashamelentyev for their +contributions to this release. + +[#1148]: https://github.coml/uber-go/zap/pull/1148 +[#1185]: https://github.coml/uber-go/zap/pull/1185 + +## 1.23.0 (24 Aug 2022) + +Enhancements: +* [#1147][]: Add a `zapcore.LevelOf` function to determine the level of a + `LevelEnabler` or `Core`. +* [#1155][]: Add `zap.Stringers` field constructor to log arrays of objects + that implement `String() string`. + +[#1147]: https://github.com/uber-go/zap/pull/1147 +[#1155]: https://github.com/uber-go/zap/pull/1155 + +## 1.22.0 (8 Aug 2022) + +Enhancements: +* [#1071][]: Add `zap.Objects` and `zap.ObjectValues` field constructors to log + arrays of objects. With these two constructors, you don't need to implement + `zapcore.ArrayMarshaler` for use with `zap.Array` if those objects implement + `zapcore.ObjectMarshaler`. +* [#1079][]: Add `SugaredLogger.WithOptions` to build a copy of an existing + `SugaredLogger` with the provided options applied. +* [#1080][]: Add `*ln` variants to `SugaredLogger` for each log level. + These functions provide a string joining behavior similar to `fmt.Println`. +* [#1088][]: Add `zap.WithFatalHook` option to control the behavior of the + logger for `Fatal`-level log entries. This defaults to exiting the program. +* [#1108][]: Add a `zap.Must` function that you can use with `NewProduction` or + `NewDevelopment` to panic if the system was unable to build the logger. +* [#1118][]: Add a `Logger.Log` method that allows specifying the log level for + a statement dynamically. + +Thanks to @cardil, @craigpastro, @sashamelentyev, @shota3506, and @zhupeijun +for their contributions to this release. + +[#1071]: https://github.com/uber-go/zap/pull/1071 +[#1079]: https://github.com/uber-go/zap/pull/1079 +[#1080]: https://github.com/uber-go/zap/pull/1080 +[#1088]: https://github.com/uber-go/zap/pull/1088 +[#1108]: https://github.com/uber-go/zap/pull/1108 +[#1118]: https://github.com/uber-go/zap/pull/1118 + +## 1.21.0 (7 Feb 2022) + +Enhancements: +* [#1047][]: Add `zapcore.ParseLevel` to parse a `Level` from a string. +* [#1048][]: Add `zap.ParseAtomicLevel` to parse an `AtomicLevel` from a + string. + +Bugfixes: +* [#1058][]: Fix panic in JSON encoder when `EncodeLevel` is unset. + +Other changes: +* [#1052][]: Improve encoding performance when the `AddCaller` and + `AddStacktrace` options are used together. + +[#1047]: https://github.com/uber-go/zap/pull/1047 +[#1048]: https://github.com/uber-go/zap/pull/1048 +[#1052]: https://github.com/uber-go/zap/pull/1052 +[#1058]: https://github.com/uber-go/zap/pull/1058 + +Thanks to @aerosol and @Techassi for their contributions to this release. + +## 1.20.0 (4 Jan 2022) + +Enhancements: +* [#989][]: Add `EncoderConfig.SkipLineEnding` flag to disable adding newline + characters between log statements. +* [#1039][]: Add `EncoderConfig.NewReflectedEncoder` field to customize JSON + encoding of reflected log fields. + +Bugfixes: +* [#1011][]: Fix inaccurate precision when encoding complex64 as JSON. +* [#554][], [#1017][]: Close JSON namespaces opened in `MarshalLogObject` + methods when the methods return. +* [#1033][]: Avoid panicking in Sampler core if `thereafter` is zero. + +Other changes: +* [#1028][]: Drop support for Go < 1.15. + +[#554]: https://github.com/uber-go/zap/pull/554 +[#989]: https://github.com/uber-go/zap/pull/989 +[#1011]: https://github.com/uber-go/zap/pull/1011 +[#1017]: https://github.com/uber-go/zap/pull/1017 +[#1028]: https://github.com/uber-go/zap/pull/1028 +[#1033]: https://github.com/uber-go/zap/pull/1033 +[#1039]: https://github.com/uber-go/zap/pull/1039 + +Thanks to @psrajat, @lruggieri, @sammyrnycreal for their contributions to this release. + +## 1.19.1 (8 Sep 2021) + +Bugfixes: +* [#1001][]: JSON: Fix complex number encoding with negative imaginary part. Thanks to @hemantjadon. +* [#1003][]: JSON: Fix inaccurate precision when encoding float32. + +[#1001]: https://github.com/uber-go/zap/pull/1001 +[#1003]: https://github.com/uber-go/zap/pull/1003 + +## 1.19.0 (9 Aug 2021) + +Enhancements: +* [#975][]: Avoid panicking in Sampler core if the level is out of bounds. +* [#984][]: Reduce the size of BufferedWriteSyncer by aligning the fields + better. + +[#975]: https://github.com/uber-go/zap/pull/975 +[#984]: https://github.com/uber-go/zap/pull/984 + +Thanks to @lancoLiu and @thockin for their contributions to this release. + +## 1.18.1 (28 Jun 2021) + +Bugfixes: +* [#974][]: Fix nil dereference in logger constructed by `zap.NewNop`. + +[#974]: https://github.com/uber-go/zap/pull/974 + +## 1.18.0 (28 Jun 2021) + +Enhancements: +* [#961][]: Add `zapcore.BufferedWriteSyncer`, a new `WriteSyncer` that buffers + messages in-memory and flushes them periodically. +* [#971][]: Add `zapio.Writer` to use a Zap logger as an `io.Writer`. +* [#897][]: Add `zap.WithClock` option to control the source of time via the + new `zapcore.Clock` interface. +* [#949][]: Avoid panicking in `zap.SugaredLogger` when arguments of `*w` + methods don't match expectations. +* [#943][]: Add support for filtering by level or arbitrary matcher function to + `zaptest/observer`. +* [#691][]: Comply with `io.StringWriter` and `io.ByteWriter` in Zap's + `buffer.Buffer`. + +Thanks to @atrn0, @ernado, @heyanfu, @hnlq715, @zchee +for their contributions to this release. + +[#691]: https://github.com/uber-go/zap/pull/691 +[#897]: https://github.com/uber-go/zap/pull/897 +[#943]: https://github.com/uber-go/zap/pull/943 +[#949]: https://github.com/uber-go/zap/pull/949 +[#961]: https://github.com/uber-go/zap/pull/961 +[#971]: https://github.com/uber-go/zap/pull/971 + +## 1.17.0 (25 May 2021) + +Bugfixes: +* [#867][]: Encode `` for nil `error` instead of a panic. +* [#931][], [#936][]: Update minimum version constraints to address + vulnerabilities in dependencies. + +Enhancements: +* [#865][]: Improve alignment of fields of the Logger struct, reducing its + size from 96 to 80 bytes. +* [#881][]: Support `grpclog.LoggerV2` in zapgrpc. +* [#903][]: Support URL-encoded POST requests to the AtomicLevel HTTP handler + with the `application/x-www-form-urlencoded` content type. +* [#912][]: Support multi-field encoding with `zap.Inline`. +* [#913][]: Speed up SugaredLogger for calls with a single string. +* [#928][]: Add support for filtering by field name to `zaptest/observer`. + +Thanks to @ash2k, @FMLS, @jimmystewpot, @Oncilla, @tsoslow, @tylitianrui, @withshubh, and @wziww for their contributions to this release. + +[#865]: https://github.com/uber-go/zap/pull/865 +[#867]: https://github.com/uber-go/zap/pull/867 +[#881]: https://github.com/uber-go/zap/pull/881 +[#903]: https://github.com/uber-go/zap/pull/903 +[#912]: https://github.com/uber-go/zap/pull/912 +[#913]: https://github.com/uber-go/zap/pull/913 +[#928]: https://github.com/uber-go/zap/pull/928 +[#931]: https://github.com/uber-go/zap/pull/931 +[#936]: https://github.com/uber-go/zap/pull/936 + +## 1.16.0 (1 Sep 2020) + +Bugfixes: +* [#828][]: Fix missing newline in IncreaseLevel error messages. +* [#835][]: Fix panic in JSON encoder when encoding times or durations + without specifying a time or duration encoder. +* [#843][]: Honor CallerSkip when taking stack traces. +* [#862][]: Fix the default file permissions to use `0666` and rely on the umask instead. +* [#854][]: Encode `` for nil `Stringer` instead of a panic error log. + +Enhancements: +* [#629][]: Added `zapcore.TimeEncoderOfLayout` to easily create time encoders + for custom layouts. +* [#697][]: Added support for a configurable delimiter in the console encoder. +* [#852][]: Optimize console encoder by pooling the underlying JSON encoder. +* [#844][]: Add ability to include the calling function as part of logs. +* [#843][]: Add `StackSkip` for including truncated stacks as a field. +* [#861][]: Add options to customize Fatal behaviour for better testability. + +Thanks to @SteelPhase, @tmshn, @lixingwang, @wyxloading, @moul, @segevfiner, @andy-retailnext and @jcorbin for their contributions to this release. + +[#629]: https://github.com/uber-go/zap/pull/629 +[#697]: https://github.com/uber-go/zap/pull/697 +[#828]: https://github.com/uber-go/zap/pull/828 +[#835]: https://github.com/uber-go/zap/pull/835 +[#843]: https://github.com/uber-go/zap/pull/843 +[#844]: https://github.com/uber-go/zap/pull/844 +[#852]: https://github.com/uber-go/zap/pull/852 +[#854]: https://github.com/uber-go/zap/pull/854 +[#861]: https://github.com/uber-go/zap/pull/861 +[#862]: https://github.com/uber-go/zap/pull/862 + +## 1.15.0 (23 Apr 2020) + +Bugfixes: +* [#804][]: Fix handling of `Time` values out of `UnixNano` range. +* [#812][]: Fix `IncreaseLevel` being reset after a call to `With`. + +Enhancements: +* [#806][]: Add `WithCaller` option to supersede the `AddCaller` option. This + allows disabling annotation of log entries with caller information if + previously enabled with `AddCaller`. +* [#813][]: Deprecate `NewSampler` constructor in favor of + `NewSamplerWithOptions` which supports a `SamplerHook` option. This option + adds support for monitoring sampling decisions through a hook. + +Thanks to @danielbprice for their contributions to this release. + +[#804]: https://github.com/uber-go/zap/pull/804 +[#812]: https://github.com/uber-go/zap/pull/812 +[#806]: https://github.com/uber-go/zap/pull/806 +[#813]: https://github.com/uber-go/zap/pull/813 + +## 1.14.1 (14 Mar 2020) + +Bugfixes: +* [#791][]: Fix panic on attempting to build a logger with an invalid Config. +* [#795][]: Vendoring Zap with `go mod vendor` no longer includes Zap's + development-time dependencies. +* [#799][]: Fix issue introduced in 1.14.0 that caused invalid JSON output to + be generated for arrays of `time.Time` objects when using string-based time + formats. + +Thanks to @YashishDua for their contributions to this release. + +[#791]: https://github.com/uber-go/zap/pull/791 +[#795]: https://github.com/uber-go/zap/pull/795 +[#799]: https://github.com/uber-go/zap/pull/799 + +## 1.14.0 (20 Feb 2020) + +Enhancements: +* [#771][]: Optimize calls for disabled log levels. +* [#773][]: Add millisecond duration encoder. +* [#775][]: Add option to increase the level of a logger. +* [#786][]: Optimize time formatters using `Time.AppendFormat` where possible. + +Thanks to @caibirdme for their contributions to this release. + +[#771]: https://github.com/uber-go/zap/pull/771 +[#773]: https://github.com/uber-go/zap/pull/773 +[#775]: https://github.com/uber-go/zap/pull/775 +[#786]: https://github.com/uber-go/zap/pull/786 + +## 1.13.0 (13 Nov 2019) + +Enhancements: +* [#758][]: Add `Intp`, `Stringp`, and other similar `*p` field constructors + to log pointers to primitives with support for `nil` values. + +Thanks to @jbizzle for their contributions to this release. + +[#758]: https://github.com/uber-go/zap/pull/758 + +## 1.12.0 (29 Oct 2019) + +Enhancements: +* [#751][]: Migrate to Go modules. + +[#751]: https://github.com/uber-go/zap/pull/751 + +## 1.11.0 (21 Oct 2019) + +Enhancements: +* [#725][]: Add `zapcore.OmitKey` to omit keys in an `EncoderConfig`. +* [#736][]: Add `RFC3339` and `RFC3339Nano` time encoders. + +Thanks to @juicemia, @uhthomas for their contributions to this release. + +[#725]: https://github.com/uber-go/zap/pull/725 +[#736]: https://github.com/uber-go/zap/pull/736 + +## 1.10.0 (29 Apr 2019) + +Bugfixes: +* [#657][]: Fix `MapObjectEncoder.AppendByteString` not adding value as a + string. +* [#706][]: Fix incorrect call depth to determine caller in Go 1.12. + +Enhancements: +* [#610][]: Add `zaptest.WrapOptions` to wrap `zap.Option` for creating test + loggers. +* [#675][]: Don't panic when encoding a String field. +* [#704][]: Disable HTML escaping for JSON objects encoded using the + reflect-based encoder. + +Thanks to @iaroslav-ciupin, @lelenanam, @joa, @NWilson for their contributions +to this release. + +[#657]: https://github.com/uber-go/zap/pull/657 +[#706]: https://github.com/uber-go/zap/pull/706 +[#610]: https://github.com/uber-go/zap/pull/610 +[#675]: https://github.com/uber-go/zap/pull/675 +[#704]: https://github.com/uber-go/zap/pull/704 + +## 1.9.1 (06 Aug 2018) Bugfixes: * [#614][]: MapObjectEncoder should not ignore empty slices. -## v1.9.0 (19 Jul 2018) +[#614]: https://github.com/uber-go/zap/pull/614 + +## 1.9.0 (19 Jul 2018) Enhancements: * [#602][]: Reduce number of allocations when logging with reflection. @@ -15,7 +385,11 @@ Enhancements: Thanks to @nfarah86, @AlekSi, @JeanMertz, @philippgille, @etsangsplk, and @dimroc for their contributions to this release. -## v1.8.0 (13 Apr 2018) +[#602]: https://github.com/uber-go/zap/pull/602 +[#572]: https://github.com/uber-go/zap/pull/572 +[#606]: https://github.com/uber-go/zap/pull/606 + +## 1.8.0 (13 Apr 2018) Enhancements: * [#508][]: Make log level configurable when redirecting the standard @@ -28,19 +402,28 @@ Bugfixes: Thanks to @DiSiqueira and @djui for their contributions to this release. -## v1.7.1 (25 Sep 2017) +[#508]: https://github.com/uber-go/zap/pull/508 +[#518]: https://github.com/uber-go/zap/pull/518 +[#577]: https://github.com/uber-go/zap/pull/577 +[#574]: https://github.com/uber-go/zap/pull/574 + +## 1.7.1 (25 Sep 2017) Bugfixes: * [#504][]: Store strings when using AddByteString with the map encoder. -## v1.7.0 (21 Sep 2017) +[#504]: https://github.com/uber-go/zap/pull/504 + +## 1.7.0 (21 Sep 2017) Enhancements: * [#487][]: Add `NewStdLogAt`, which extends `NewStdLog` by allowing the user to specify the level of the logged messages. -## v1.6.0 (30 Aug 2017) +[#487]: https://github.com/uber-go/zap/pull/487 + +## 1.6.0 (30 Aug 2017) Enhancements: @@ -48,7 +431,10 @@ Enhancements: * [#490][]: Add a `ContextMap` method to observer logs for simpler field validation in tests. -## v1.5.0 (22 Jul 2017) +[#490]: https://github.com/uber-go/zap/pull/490 +[#491]: https://github.com/uber-go/zap/pull/491 + +## 1.5.0 (22 Jul 2017) Enhancements: @@ -61,7 +447,12 @@ Bugfixes: Thanks to @richard-tunein and @pavius for their contributions to this release. -## v1.4.1 (08 Jun 2017) +[#477]: https://github.com/uber-go/zap/pull/477 +[#465]: https://github.com/uber-go/zap/pull/465 +[#460]: https://github.com/uber-go/zap/pull/460 +[#470]: https://github.com/uber-go/zap/pull/470 + +## 1.4.1 (08 Jun 2017) This release fixes two bugs. @@ -70,7 +461,10 @@ Bugfixes: * [#435][]: Support a variety of case conventions when unmarshaling levels. * [#444][]: Fix a panic in the observer. -## v1.4.0 (12 May 2017) +[#435]: https://github.com/uber-go/zap/pull/435 +[#444]: https://github.com/uber-go/zap/pull/444 + +## 1.4.0 (12 May 2017) This release adds a few small features and is fully backward-compatible. @@ -82,7 +476,11 @@ Enhancements: * [#431][]: Make `zap.AtomicLevel` implement `fmt.Stringer`, which makes a variety of operations a bit simpler. -## v1.3.0 (25 Apr 2017) +[#424]: https://github.com/uber-go/zap/pull/424 +[#425]: https://github.com/uber-go/zap/pull/425 +[#431]: https://github.com/uber-go/zap/pull/431 + +## 1.3.0 (25 Apr 2017) This release adds an enhancement to zap's testing helpers as well as the ability to marshal an AtomicLevel. It is fully backward-compatible. @@ -93,7 +491,10 @@ Enhancements: particularly useful when testing the `SugaredLogger`. * [#416][]: Make `AtomicLevel` implement `encoding.TextMarshaler`. -## v1.2.0 (13 Apr 2017) +[#415]: https://github.com/uber-go/zap/pull/415 +[#416]: https://github.com/uber-go/zap/pull/416 + +## 1.2.0 (13 Apr 2017) This release adds a gRPC compatibility wrapper. It is fully backward-compatible. @@ -102,7 +503,9 @@ Enhancements: * [#402][]: Add a `zapgrpc` package that wraps zap's Logger and implements `grpclog.Logger`. -## v1.1.0 (31 Mar 2017) +[#402]: https://github.com/uber-go/zap/pull/402 + +## 1.1.0 (31 Mar 2017) This release fixes two bugs and adds some enhancements to zap's testing helpers. It is fully backward-compatible. @@ -119,7 +522,11 @@ Enhancements: Thanks to @moitias for contributing to this release. -## v1.0.0 (14 Mar 2017) +[#385]: https://github.com/uber-go/zap/pull/385 +[#396]: https://github.com/uber-go/zap/pull/396 +[#386]: https://github.com/uber-go/zap/pull/386 + +## 1.0.0 (14 Mar 2017) This is zap's first stable release. All exported APIs are now final, and no further breaking changes will be made in the 1.x release series. Anyone using a @@ -164,7 +571,21 @@ Enhancements: Thanks to @suyash, @htrendev, @flisky, @Ulexus, and @skipor for their contributions to this release. -## v1.0.0-rc.3 (7 Mar 2017) +[#366]: https://github.com/uber-go/zap/pull/366 +[#364]: https://github.com/uber-go/zap/pull/364 +[#371]: https://github.com/uber-go/zap/pull/371 +[#362]: https://github.com/uber-go/zap/pull/362 +[#369]: https://github.com/uber-go/zap/pull/369 +[#347]: https://github.com/uber-go/zap/pull/347 +[#373]: https://github.com/uber-go/zap/pull/373 +[#348]: https://github.com/uber-go/zap/pull/348 +[#327]: https://github.com/uber-go/zap/pull/327 +[#376]: https://github.com/uber-go/zap/pull/376 +[#346]: https://github.com/uber-go/zap/pull/346 +[#365]: https://github.com/uber-go/zap/pull/365 +[#372]: https://github.com/uber-go/zap/pull/372 + +## 1.0.0-rc.3 (7 Mar 2017) This is the third release candidate for zap's stable release. There are no breaking changes. @@ -185,7 +606,12 @@ Enhancements: Thanks to @ansel1 and @suyash for their contributions to this release. -## v1.0.0-rc.2 (21 Feb 2017) +[#339]: https://github.com/uber-go/zap/pull/339 +[#307]: https://github.com/uber-go/zap/pull/307 +[#353]: https://github.com/uber-go/zap/pull/353 +[#311]: https://github.com/uber-go/zap/pull/311 + +## 1.0.0-rc.2 (21 Feb 2017) This is the second release candidate for zap's stable release. It includes two breaking changes. @@ -222,7 +648,16 @@ Enhancements: Thanks to @skipor and @chapsuk for their contributions to this release. -## v1.0.0-rc.1 (14 Feb 2017) +[#316]: https://github.com/uber-go/zap/pull/316 +[#309]: https://github.com/uber-go/zap/pull/309 +[#317]: https://github.com/uber-go/zap/pull/317 +[#321]: https://github.com/uber-go/zap/pull/321 +[#325]: https://github.com/uber-go/zap/pull/325 +[#333]: https://github.com/uber-go/zap/pull/333 +[#326]: https://github.com/uber-go/zap/pull/326 +[#300]: https://github.com/uber-go/zap/pull/300 + +## 1.0.0-rc.1 (14 Feb 2017) This is the first release candidate for zap's stable release. There are multiple breaking changes and improvements from the pre-release version. Most notably: @@ -242,7 +677,7 @@ breaking changes and improvements from the pre-release version. Most notably: * Sampling is more accurate, and doesn't depend on the standard library's shared timer heap. -## v0.1.0-beta.1 (6 Feb 2017) +## 0.1.0-beta.1 (6 Feb 2017) This is a minor version, tagged to allow users to pin to the pre-1.0 APIs and upgrade at their leisure. Since this is the first tagged release, there are no @@ -250,56 +685,3 @@ backward compatibility concerns and all functionality is new. Early zap adopters should pin to the 0.1.x minor version until they're ready to upgrade to the upcoming stable release. - -[#316]: https://github.com/uber-go/zap/pull/316 -[#309]: https://github.com/uber-go/zap/pull/309 -[#317]: https://github.com/uber-go/zap/pull/317 -[#321]: https://github.com/uber-go/zap/pull/321 -[#325]: https://github.com/uber-go/zap/pull/325 -[#333]: https://github.com/uber-go/zap/pull/333 -[#326]: https://github.com/uber-go/zap/pull/326 -[#300]: https://github.com/uber-go/zap/pull/300 -[#339]: https://github.com/uber-go/zap/pull/339 -[#307]: https://github.com/uber-go/zap/pull/307 -[#353]: https://github.com/uber-go/zap/pull/353 -[#311]: https://github.com/uber-go/zap/pull/311 -[#366]: https://github.com/uber-go/zap/pull/366 -[#364]: https://github.com/uber-go/zap/pull/364 -[#371]: https://github.com/uber-go/zap/pull/371 -[#362]: https://github.com/uber-go/zap/pull/362 -[#369]: https://github.com/uber-go/zap/pull/369 -[#347]: https://github.com/uber-go/zap/pull/347 -[#373]: https://github.com/uber-go/zap/pull/373 -[#348]: https://github.com/uber-go/zap/pull/348 -[#327]: https://github.com/uber-go/zap/pull/327 -[#376]: https://github.com/uber-go/zap/pull/376 -[#346]: https://github.com/uber-go/zap/pull/346 -[#365]: https://github.com/uber-go/zap/pull/365 -[#372]: https://github.com/uber-go/zap/pull/372 -[#385]: https://github.com/uber-go/zap/pull/385 -[#396]: https://github.com/uber-go/zap/pull/396 -[#386]: https://github.com/uber-go/zap/pull/386 -[#402]: https://github.com/uber-go/zap/pull/402 -[#415]: https://github.com/uber-go/zap/pull/415 -[#416]: https://github.com/uber-go/zap/pull/416 -[#424]: https://github.com/uber-go/zap/pull/424 -[#425]: https://github.com/uber-go/zap/pull/425 -[#431]: https://github.com/uber-go/zap/pull/431 -[#435]: https://github.com/uber-go/zap/pull/435 -[#444]: https://github.com/uber-go/zap/pull/444 -[#477]: https://github.com/uber-go/zap/pull/477 -[#465]: https://github.com/uber-go/zap/pull/465 -[#460]: https://github.com/uber-go/zap/pull/460 -[#470]: https://github.com/uber-go/zap/pull/470 -[#487]: https://github.com/uber-go/zap/pull/487 -[#490]: https://github.com/uber-go/zap/pull/490 -[#491]: https://github.com/uber-go/zap/pull/491 -[#504]: https://github.com/uber-go/zap/pull/504 -[#508]: https://github.com/uber-go/zap/pull/508 -[#518]: https://github.com/uber-go/zap/pull/518 -[#577]: https://github.com/uber-go/zap/pull/577 -[#574]: https://github.com/uber-go/zap/pull/574 -[#602]: https://github.com/uber-go/zap/pull/602 -[#572]: https://github.com/uber-go/zap/pull/572 -[#606]: https://github.com/uber-go/zap/pull/606 -[#614]: https://github.com/uber-go/zap/pull/614 diff --git a/vendor/go.uber.org/zap/CONTRIBUTING.md b/vendor/go.uber.org/zap/CONTRIBUTING.md index 9454bba..ea02f3c 100644 --- a/vendor/go.uber.org/zap/CONTRIBUTING.md +++ b/vendor/go.uber.org/zap/CONTRIBUTING.md @@ -16,7 +16,7 @@ you to accept the CLA when you open your pull request. [Fork][fork], then clone the repository: -``` +```bash mkdir -p $GOPATH/src/go.uber.org cd $GOPATH/src/go.uber.org git clone git@github.com:your_github_username/zap.git @@ -25,29 +25,18 @@ git remote add upstream https://github.com/uber-go/zap.git git fetch upstream ``` -Install zap's dependencies: - -``` -make dependencies -``` - Make sure that the tests and the linters pass: -``` +```bash make test make lint ``` -If you're not using the minor version of Go specified in the Makefile's -`LINTABLE_MINOR_VERSIONS` variable, `make lint` doesn't do anything. This is -fine, but it means that you'll only discover lint failures after you open your -pull request. - ## Making Changes Start by creating a new branch for your changes: -``` +```bash cd $GOPATH/src/go.uber.org/zap git checkout master git fetch upstream @@ -58,22 +47,22 @@ git checkout -b cool_new_feature Make your changes, then ensure that `make lint` and `make test` still pass. If you're satisfied with your changes, push them to your fork. -``` +```bash git push origin cool_new_feature ``` Then use the GitHub UI to open a pull request. -At this point, you're waiting on us to review your changes. We *try* to respond +At this point, you're waiting on us to review your changes. We _try_ to respond to issues and pull requests within a few business days, and we may suggest some improvements or alternatives. Once your changes are approved, one of the project maintainers will merge them. We're much more likely to approve your changes if you: -* Add tests for new functionality. -* Write a [good commit message][commit-message]. -* Maintain backward compatibility. +- Add tests for new functionality. +- Write a [good commit message][commit-message]. +- Maintain backward compatibility. [fork]: https://github.com/uber-go/zap/fork [open-issue]: https://github.com/uber-go/zap/issues/new diff --git a/vendor/go.uber.org/zap/FAQ.md b/vendor/go.uber.org/zap/FAQ.md index 4256d35..b183b20 100644 --- a/vendor/go.uber.org/zap/FAQ.md +++ b/vendor/go.uber.org/zap/FAQ.md @@ -27,6 +27,13 @@ abstraction, and it lets us add methods without introducing breaking changes. Your applications should define and depend upon an interface that includes just the methods you use. +### Why are some of my logs missing? + +Logs are dropped intentionally by zap when sampling is enabled. The production +configuration (as returned by `NewProductionConfig()` enables sampling which will +cause repeated logs within a second to be sampled. See more details on why sampling +is enabled in [Why sample application logs](https://github.com/uber-go/zap/blob/master/FAQ.md#why-sample-application-logs). + ### Why sample application logs? Applications often experience runs of errors, either because of a bug or @@ -149,6 +156,8 @@ We're aware of the following extensions, but haven't used them ourselves: | `github.com/tchap/zapext` | Sentry, syslog | | `github.com/fgrosse/zaptest` | Ginkgo | | `github.com/blendle/zapdriver` | Stackdriver | +| `github.com/moul/zapgorm` | Gorm | +| `github.com/moul/zapfilter` | Advanced filtering rules | [go-proverbs]: https://go-proverbs.github.io/ [import-path]: https://golang.org/cmd/go/#hdr-Remote_import_paths diff --git a/vendor/go.uber.org/zap/LICENSE.txt b/vendor/go.uber.org/zap/LICENSE similarity index 100% rename from vendor/go.uber.org/zap/LICENSE.txt rename to vendor/go.uber.org/zap/LICENSE diff --git a/vendor/go.uber.org/zap/Makefile b/vendor/go.uber.org/zap/Makefile index ef7893b..eb1cee5 100644 --- a/vendor/go.uber.org/zap/Makefile +++ b/vendor/go.uber.org/zap/Makefile @@ -1,76 +1,76 @@ -export GO15VENDOREXPERIMENT=1 +# Directory containing the Makefile. +PROJECT_ROOT = $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) +export GOBIN ?= $(PROJECT_ROOT)/bin +export PATH := $(GOBIN):$(PATH) + +GOVULNCHECK = $(GOBIN)/govulncheck BENCH_FLAGS ?= -cpuprofile=cpu.pprof -memprofile=mem.pprof -benchmem -PKGS ?= $(shell glide novendor) -# Many Go tools take file globs or directories as arguments instead of packages. -PKG_FILES ?= *.go zapcore benchmarks buffer zapgrpc zaptest zaptest/observer internal/bufferpool internal/exit internal/color internal/ztest -# The linting tools evolve with each Go version, so run them only on the latest -# stable release. -GO_VERSION := $(shell go version | cut -d " " -f 3) -GO_MINOR_VERSION := $(word 2,$(subst ., ,$(GO_VERSION))) -LINTABLE_MINOR_VERSIONS := 10 -ifneq ($(filter $(LINTABLE_MINOR_VERSIONS),$(GO_MINOR_VERSION)),) -SHOULD_LINT := true -endif +# Directories containing independent Go modules. +MODULE_DIRS = . ./exp ./benchmarks ./zapgrpc/internal/test +# Directories that we want to track coverage for. +COVER_DIRS = . ./exp .PHONY: all all: lint test -.PHONY: dependencies -dependencies: - @echo "Installing Glide and locked dependencies..." - glide --version || go get -u -f github.com/Masterminds/glide - glide install - @echo "Installing test dependencies..." - go install ./vendor/github.com/axw/gocov/gocov - go install ./vendor/github.com/mattn/goveralls -ifdef SHOULD_LINT - @echo "Installing golint..." - go install ./vendor/github.com/golang/lint/golint -else - @echo "Not installing golint, since we don't expect to lint on" $(GO_VERSION) -endif - -# Disable printf-like invocation checking due to testify.assert.Error() -VET_RULES := -printf=false - .PHONY: lint -lint: -ifdef SHOULD_LINT - @rm -rf lint.log - @echo "Checking formatting..." - @gofmt -d -s $(PKG_FILES) 2>&1 | tee lint.log - @echo "Installing test dependencies for vet..." - @go test -i $(PKGS) - @echo "Checking vet..." - @$(foreach dir,$(PKG_FILES),go tool vet $(VET_RULES) $(dir) 2>&1 | tee -a lint.log;) - @echo "Checking lint..." - @$(foreach dir,$(PKGS),golint $(dir) 2>&1 | tee -a lint.log;) - @echo "Checking for unresolved FIXMEs..." - @git grep -i fixme | grep -v -e vendor -e Makefile | tee -a lint.log - @echo "Checking for license headers..." - @./check_license.sh | tee -a lint.log - @[ ! -s lint.log ] -else - @echo "Skipping linters on" $(GO_VERSION) -endif +lint: golangci-lint tidy-lint license-lint + +.PHONY: golangci-lint +golangci-lint: + @$(foreach mod,$(MODULE_DIRS), \ + (cd $(mod) && \ + echo "[lint] golangci-lint: $(mod)" && \ + golangci-lint run --path-prefix $(mod)) &&) true + +.PHONY: tidy +tidy: + @$(foreach dir,$(MODULE_DIRS), \ + (cd $(dir) && go mod tidy) &&) true + +.PHONY: tidy-lint +tidy-lint: + @$(foreach mod,$(MODULE_DIRS), \ + (cd $(mod) && \ + echo "[lint] tidy: $(mod)" && \ + go mod tidy && \ + git diff --exit-code -- go.mod go.sum) &&) true + + +.PHONY: license-lint +license-lint: + ./checklicense.sh + +$(GOVULNCHECK): + cd tools && go install golang.org/x/vuln/cmd/govulncheck .PHONY: test test: - go test -race $(PKGS) + @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go test -race ./...) &&) true .PHONY: cover cover: - ./scripts/cover.sh $(PKGS) + @$(foreach dir,$(COVER_DIRS), ( \ + cd $(dir) && \ + go test -race -coverprofile=cover.out -coverpkg=./... ./... \ + && go tool cover -html=cover.out -o cover.html) &&) true .PHONY: bench BENCH ?= . bench: - @$(foreach pkg,$(PKGS),go test -bench=$(BENCH) -run="^$$" $(BENCH_FLAGS) $(pkg);) + @$(foreach dir,$(MODULE_DIRS), ( \ + cd $(dir) && \ + go list ./... | xargs -n1 go test -bench=$(BENCH) -run="^$$" $(BENCH_FLAGS) \ + ) &&) true .PHONY: updatereadme updatereadme: rm -f README.md cat .readme.tmpl | go run internal/readme/readme.go > README.md + +.PHONY: vulncheck +vulncheck: $(GOVULNCHECK) + $(GOVULNCHECK) ./... diff --git a/vendor/go.uber.org/zap/README.md b/vendor/go.uber.org/zap/README.md index f4fd1cb..a17035c 100644 --- a/vendor/go.uber.org/zap/README.md +++ b/vendor/go.uber.org/zap/README.md @@ -1,7 +1,16 @@ -# :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] +# :zap: zap + + +
    Blazing fast, structured, leveled logging in Go. +![Zap logo](assets/logo.png) + +[![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] + +
    + ## Installation `go get -u go.uber.org/zap` @@ -64,43 +73,46 @@ id="anchor-versions">[1](#footnote-versions) Log a message and 10 fields: -| Package | Time | Objects Allocated | -| :--- | :---: | :---: | -| :zap: zap | 3131 ns/op | 5 allocs/op | -| :zap: zap (sugared) | 4173 ns/op | 21 allocs/op | -| zerolog | 16154 ns/op | 90 allocs/op | -| lion | 16341 ns/op | 111 allocs/op | -| go-kit | 17049 ns/op | 126 allocs/op | -| logrus | 23662 ns/op | 142 allocs/op | -| log15 | 36351 ns/op | 149 allocs/op | -| apex/log | 42530 ns/op | 126 allocs/op | +| Package | Time | Time % to zap | Objects Allocated | +| :------ | :--: | :-----------: | :---------------: | +| :zap: zap | 656 ns/op | +0% | 5 allocs/op +| :zap: zap (sugared) | 935 ns/op | +43% | 10 allocs/op +| zerolog | 380 ns/op | -42% | 1 allocs/op +| go-kit | 2249 ns/op | +243% | 57 allocs/op +| slog (LogAttrs) | 2479 ns/op | +278% | 40 allocs/op +| slog | 2481 ns/op | +278% | 42 allocs/op +| apex/log | 9591 ns/op | +1362% | 63 allocs/op +| log15 | 11393 ns/op | +1637% | 75 allocs/op +| logrus | 11654 ns/op | +1677% | 79 allocs/op Log a message with a logger that already has 10 fields of context: -| Package | Time | Objects Allocated | -| :--- | :---: | :---: | -| :zap: zap | 380 ns/op | 0 allocs/op | -| :zap: zap (sugared) | 564 ns/op | 2 allocs/op | -| zerolog | 321 ns/op | 0 allocs/op | -| lion | 7092 ns/op | 39 allocs/op | -| go-kit | 20226 ns/op | 115 allocs/op | -| logrus | 22312 ns/op | 130 allocs/op | -| log15 | 28788 ns/op | 79 allocs/op | -| apex/log | 42063 ns/op | 115 allocs/op | +| Package | Time | Time % to zap | Objects Allocated | +| :------ | :--: | :-----------: | :---------------: | +| :zap: zap | 67 ns/op | +0% | 0 allocs/op +| :zap: zap (sugared) | 84 ns/op | +25% | 1 allocs/op +| zerolog | 35 ns/op | -48% | 0 allocs/op +| slog | 193 ns/op | +188% | 0 allocs/op +| slog (LogAttrs) | 200 ns/op | +199% | 0 allocs/op +| go-kit | 2460 ns/op | +3572% | 56 allocs/op +| log15 | 9038 ns/op | +13390% | 70 allocs/op +| apex/log | 9068 ns/op | +13434% | 53 allocs/op +| logrus | 10521 ns/op | +15603% | 68 allocs/op Log a static string, without any context or `printf`-style templating: -| Package | Time | Objects Allocated | -| :--- | :---: | :---: | -| :zap: zap | 361 ns/op | 0 allocs/op | -| :zap: zap (sugared) | 534 ns/op | 2 allocs/op | -| zerolog | 323 ns/op | 0 allocs/op | -| standard library | 575 ns/op | 2 allocs/op | -| go-kit | 922 ns/op | 13 allocs/op | -| lion | 1413 ns/op | 10 allocs/op | -| logrus | 2291 ns/op | 27 allocs/op | -| apex/log | 3690 ns/op | 11 allocs/op | -| log15 | 5954 ns/op | 26 allocs/op | +| Package | Time | Time % to zap | Objects Allocated | +| :------ | :--: | :-----------: | :---------------: | +| :zap: zap | 63 ns/op | +0% | 0 allocs/op +| :zap: zap (sugared) | 81 ns/op | +29% | 1 allocs/op +| zerolog | 32 ns/op | -49% | 0 allocs/op +| standard library | 124 ns/op | +97% | 1 allocs/op +| slog | 196 ns/op | +211% | 0 allocs/op +| slog (LogAttrs) | 200 ns/op | +217% | 0 allocs/op +| go-kit | 213 ns/op | +238% | 9 allocs/op +| apex/log | 771 ns/op | +1124% | 5 allocs/op +| logrus | 1439 ns/op | +2184% | 23 allocs/op +| log15 | 2069 ns/op | +3184% | 20 allocs/op ## Development Status: Stable @@ -120,17 +132,18 @@ standard.
    -Released under the [MIT License](LICENSE.txt). +Released under the [MIT License](LICENSE). 1 In particular, keep in mind that we may be benchmarking against slightly older versions of other packages. Versions are -pinned in zap's [glide.lock][] file. [↩](#anchor-versions) +pinned in the [benchmarks/go.mod][] file. [↩](#anchor-versions) -[doc-img]: https://godoc.org/go.uber.org/zap?status.svg -[doc]: https://godoc.org/go.uber.org/zap -[ci-img]: https://travis-ci.org/uber-go/zap.svg?branch=master -[ci]: https://travis-ci.org/uber-go/zap +[doc-img]: https://pkg.go.dev/badge/go.uber.org/zap +[doc]: https://pkg.go.dev/go.uber.org/zap +[ci-img]: https://github.com/uber-go/zap/actions/workflows/go.yml/badge.svg +[ci]: https://github.com/uber-go/zap/actions/workflows/go.yml [cov-img]: https://codecov.io/gh/uber-go/zap/branch/master/graph/badge.svg [cov]: https://codecov.io/gh/uber-go/zap [benchmarking suite]: https://github.com/uber-go/zap/tree/master/benchmarks -[glide.lock]: https://github.com/uber-go/zap/blob/master/glide.lock +[benchmarks/go.mod]: https://github.com/uber-go/zap/blob/master/benchmarks/go.mod + diff --git a/vendor/go.uber.org/zap/array.go b/vendor/go.uber.org/zap/array.go index 5be3704..abfccb5 100644 --- a/vendor/go.uber.org/zap/array.go +++ b/vendor/go.uber.org/zap/array.go @@ -21,6 +21,7 @@ package zap import ( + "fmt" "time" "go.uber.org/zap/zapcore" @@ -94,11 +95,137 @@ func Int8s(key string, nums []int8) Field { return Array(key, int8s(nums)) } +// Objects constructs a field with the given key, holding a list of the +// provided objects that can be marshaled by Zap. +// +// Note that these objects must implement zapcore.ObjectMarshaler directly. +// That is, if you're trying to marshal a []Request, the MarshalLogObject +// method must be declared on the Request type, not its pointer (*Request). +// If it's on the pointer, use ObjectValues. +// +// Given an object that implements MarshalLogObject on the value receiver, you +// can log a slice of those objects with Objects like so: +// +// type Author struct{ ... } +// func (a Author) MarshalLogObject(enc zapcore.ObjectEncoder) error +// +// var authors []Author = ... +// logger.Info("loading article", zap.Objects("authors", authors)) +// +// Similarly, given a type that implements MarshalLogObject on its pointer +// receiver, you can log a slice of pointers to that object with Objects like +// so: +// +// type Request struct{ ... } +// func (r *Request) MarshalLogObject(enc zapcore.ObjectEncoder) error +// +// var requests []*Request = ... +// logger.Info("sending requests", zap.Objects("requests", requests)) +// +// If instead, you have a slice of values of such an object, use the +// ObjectValues constructor. +// +// var requests []Request = ... +// logger.Info("sending requests", zap.ObjectValues("requests", requests)) +func Objects[T zapcore.ObjectMarshaler](key string, values []T) Field { + return Array(key, objects[T](values)) +} + +type objects[T zapcore.ObjectMarshaler] []T + +func (os objects[T]) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for _, o := range os { + if err := arr.AppendObject(o); err != nil { + return err + } + } + return nil +} + +// ObjectMarshalerPtr is a constraint that specifies that the given type +// implements zapcore.ObjectMarshaler on a pointer receiver. +type ObjectMarshalerPtr[T any] interface { + *T + zapcore.ObjectMarshaler +} + +// ObjectValues constructs a field with the given key, holding a list of the +// provided objects, where pointers to these objects can be marshaled by Zap. +// +// Note that pointers to these objects must implement zapcore.ObjectMarshaler. +// That is, if you're trying to marshal a []Request, the MarshalLogObject +// method must be declared on the *Request type, not the value (Request). +// If it's on the value, use Objects. +// +// Given an object that implements MarshalLogObject on the pointer receiver, +// you can log a slice of those objects with ObjectValues like so: +// +// type Request struct{ ... } +// func (r *Request) MarshalLogObject(enc zapcore.ObjectEncoder) error +// +// var requests []Request = ... +// logger.Info("sending requests", zap.ObjectValues("requests", requests)) +// +// If instead, you have a slice of pointers of such an object, use the Objects +// field constructor. +// +// var requests []*Request = ... +// logger.Info("sending requests", zap.Objects("requests", requests)) +func ObjectValues[T any, P ObjectMarshalerPtr[T]](key string, values []T) Field { + return Array(key, objectValues[T, P](values)) +} + +type objectValues[T any, P ObjectMarshalerPtr[T]] []T + +func (os objectValues[T, P]) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range os { + // It is necessary for us to explicitly reference the "P" type. + // We cannot simply pass "&os[i]" to AppendObject because its type + // is "*T", which the type system does not consider as + // implementing ObjectMarshaler. + // Only the type "P" satisfies ObjectMarshaler, which we have + // to convert "*T" to explicitly. + var p P = &os[i] + if err := arr.AppendObject(p); err != nil { + return err + } + } + return nil +} + // Strings constructs a field that carries a slice of strings. func Strings(key string, ss []string) Field { return Array(key, stringArray(ss)) } +// Stringers constructs a field with the given key, holding a list of the +// output provided by the value's String method +// +// Given an object that implements String on the value receiver, you +// can log a slice of those objects with Objects like so: +// +// type Request struct{ ... } +// func (a Request) String() string +// +// var requests []Request = ... +// logger.Info("sending requests", zap.Stringers("requests", requests)) +// +// Note that these objects must implement fmt.Stringer directly. +// That is, if you're trying to marshal a []Request, the String method +// must be declared on the Request type, not its pointer (*Request). +func Stringers[T fmt.Stringer](key string, values []T) Field { + return Array(key, stringers[T](values)) +} + +type stringers[T fmt.Stringer] []T + +func (os stringers[T]) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for _, o := range os { + arr.AppendString(o.String()) + } + return nil +} + // Times constructs a field that carries a slice of time.Times. func Times(key string, ts []time.Time) Field { return Array(key, times(ts)) diff --git a/vendor/go.uber.org/zap/buffer/buffer.go b/vendor/go.uber.org/zap/buffer/buffer.go index 7592e8c..0b8540c 100644 --- a/vendor/go.uber.org/zap/buffer/buffer.go +++ b/vendor/go.uber.org/zap/buffer/buffer.go @@ -23,7 +23,10 @@ // package's zero-allocation formatters. package buffer // import "go.uber.org/zap/buffer" -import "strconv" +import ( + "strconv" + "time" +) const _size = 1024 // by default, create 1 KiB buffers @@ -39,6 +42,11 @@ func (b *Buffer) AppendByte(v byte) { b.bs = append(b.bs, v) } +// AppendBytes writes the given slice of bytes to the Buffer. +func (b *Buffer) AppendBytes(v []byte) { + b.bs = append(b.bs, v...) +} + // AppendString writes a string to the Buffer. func (b *Buffer) AppendString(s string) { b.bs = append(b.bs, s...) @@ -49,6 +57,11 @@ func (b *Buffer) AppendInt(i int64) { b.bs = strconv.AppendInt(b.bs, i, 10) } +// AppendTime appends the time formatted using the specified layout. +func (b *Buffer) AppendTime(t time.Time, layout string) { + b.bs = t.AppendFormat(b.bs, layout) +} + // AppendUint appends an unsigned integer to the underlying buffer (assuming // base 10). func (b *Buffer) AppendUint(i uint64) { @@ -98,6 +111,24 @@ func (b *Buffer) Write(bs []byte) (int, error) { return len(bs), nil } +// WriteByte writes a single byte to the Buffer. +// +// Error returned is always nil, function signature is compatible +// with bytes.Buffer and bufio.Writer +func (b *Buffer) WriteByte(v byte) error { + b.AppendByte(v) + return nil +} + +// WriteString writes a string to the Buffer. +// +// Error returned is always nil, function signature is compatible +// with bytes.Buffer and bufio.Writer +func (b *Buffer) WriteString(s string) (int, error) { + b.AppendString(s) + return len(s), nil +} + // TrimNewline trims any final "\n" byte from the end of the buffer. func (b *Buffer) TrimNewline() { if i := len(b.bs) - 1; i >= 0 { diff --git a/vendor/go.uber.org/zap/buffer/pool.go b/vendor/go.uber.org/zap/buffer/pool.go index 8fb3e20..8463233 100644 --- a/vendor/go.uber.org/zap/buffer/pool.go +++ b/vendor/go.uber.org/zap/buffer/pool.go @@ -20,25 +20,29 @@ package buffer -import "sync" +import ( + "go.uber.org/zap/internal/pool" +) // A Pool is a type-safe wrapper around a sync.Pool. type Pool struct { - p *sync.Pool + p *pool.Pool[*Buffer] } // NewPool constructs a new Pool. func NewPool() Pool { - return Pool{p: &sync.Pool{ - New: func() interface{} { - return &Buffer{bs: make([]byte, 0, _size)} - }, - }} + return Pool{ + p: pool.New(func() *Buffer { + return &Buffer{ + bs: make([]byte, 0, _size), + } + }), + } } // Get retrieves a Buffer from the pool, creating one if necessary. func (p Pool) Get() *Buffer { - buf := p.p.Get().(*Buffer) + buf := p.p.Get() buf.Reset() buf.pool = p return buf diff --git a/vendor/go.uber.org/zap/check_license.sh b/vendor/go.uber.org/zap/checklicense.sh similarity index 100% rename from vendor/go.uber.org/zap/check_license.sh rename to vendor/go.uber.org/zap/checklicense.sh diff --git a/vendor/go.uber.org/zap/config.go b/vendor/go.uber.org/zap/config.go index 6fe17d9..e76e4e6 100644 --- a/vendor/go.uber.org/zap/config.go +++ b/vendor/go.uber.org/zap/config.go @@ -21,6 +21,7 @@ package zap import ( + "errors" "sort" "time" @@ -31,10 +32,14 @@ import ( // global CPU and I/O load that logging puts on your process while attempting // to preserve a representative subset of your logs. // -// Values configured here are per-second. See zapcore.NewSampler for details. +// If specified, the Sampler will invoke the Hook after each decision. +// +// Values configured here are per-second. See zapcore.NewSamplerWithOptions for +// details. type SamplingConfig struct { - Initial int `json:"initial" yaml:"initial"` - Thereafter int `json:"thereafter" yaml:"thereafter"` + Initial int `json:"initial" yaml:"initial"` + Thereafter int `json:"thereafter" yaml:"thereafter"` + Hook func(zapcore.Entry, zapcore.SamplingDecision) `json:"-" yaml:"-"` } // Config offers a declarative way to construct a logger. It doesn't do @@ -90,12 +95,39 @@ type Config struct { // NewProductionEncoderConfig returns an opinionated EncoderConfig for // production environments. +// +// Messages encoded with this configuration will be JSON-formatted +// and will have the following keys by default: +// +// - "level": The logging level (e.g. "info", "error"). +// - "ts": The current time in number of seconds since the Unix epoch. +// - "msg": The message passed to the log statement. +// - "caller": If available, a short path to the file and line number +// where the log statement was issued. +// The logger configuration determines whether this field is captured. +// - "stacktrace": If available, a stack trace from the line +// where the log statement was issued. +// The logger configuration determines whether this field is captured. +// +// By default, the following formats are used for different types: +// +// - Time is formatted as floating-point number of seconds since the Unix +// epoch. +// - Duration is formatted as floating-point number of seconds. +// +// You may change these by setting the appropriate fields in the returned +// object. +// For example, use the following to change the time encoding format: +// +// cfg := zap.NewProductionEncoderConfig() +// cfg.EncodeTime = zapcore.ISO8601TimeEncoder func NewProductionEncoderConfig() zapcore.EncoderConfig { return zapcore.EncoderConfig{ TimeKey: "ts", LevelKey: "level", NameKey: "logger", CallerKey: "caller", + FunctionKey: zapcore.OmitKey, MessageKey: "msg", StacktraceKey: "stacktrace", LineEnding: zapcore.DefaultLineEnding, @@ -106,11 +138,22 @@ func NewProductionEncoderConfig() zapcore.EncoderConfig { } } -// NewProductionConfig is a reasonable production logging configuration. -// Logging is enabled at InfoLevel and above. +// NewProductionConfig builds a reasonable default production logging +// configuration. +// Logging is enabled at InfoLevel and above, and uses a JSON encoder. +// Logs are written to standard error. +// Stacktraces are included on logs of ErrorLevel and above. +// DPanicLevel logs will not panic, but will write a stacktrace. +// +// Sampling is enabled at 100:100 by default, +// meaning that after the first 100 log entries +// with the same level and message in the same second, +// it will log every 100th entry +// with the same level and message in the same second. +// You may disable this behavior by setting Sampling to nil. // -// It uses a JSON encoder, writes to standard error, and enables sampling. -// Stacktraces are automatically included on logs of ErrorLevel and above. +// See [NewProductionEncoderConfig] for information +// on the default encoder configuration. func NewProductionConfig() Config { return Config{ Level: NewAtomicLevelAt(InfoLevel), @@ -128,6 +171,32 @@ func NewProductionConfig() Config { // NewDevelopmentEncoderConfig returns an opinionated EncoderConfig for // development environments. +// +// Messages encoded with this configuration will use Zap's console encoder +// intended to print human-readable output. +// It will print log messages with the following information: +// +// - The log level (e.g. "INFO", "ERROR"). +// - The time in ISO8601 format (e.g. "2017-01-01T12:00:00Z"). +// - The message passed to the log statement. +// - If available, a short path to the file and line number +// where the log statement was issued. +// The logger configuration determines whether this field is captured. +// - If available, a stacktrace from the line +// where the log statement was issued. +// The logger configuration determines whether this field is captured. +// +// By default, the following formats are used for different types: +// +// - Time is formatted in ISO8601 format (e.g. "2017-01-01T12:00:00Z"). +// - Duration is formatted as a string (e.g. "1.234s"). +// +// You may change these by setting the appropriate fields in the returned +// object. +// For example, use the following to change the time encoding format: +// +// cfg := zap.NewDevelopmentEncoderConfig() +// cfg.EncodeTime = zapcore.ISO8601TimeEncoder func NewDevelopmentEncoderConfig() zapcore.EncoderConfig { return zapcore.EncoderConfig{ // Keys can be anything except the empty string. @@ -135,6 +204,7 @@ func NewDevelopmentEncoderConfig() zapcore.EncoderConfig { LevelKey: "L", NameKey: "N", CallerKey: "C", + FunctionKey: zapcore.OmitKey, MessageKey: "M", StacktraceKey: "S", LineEnding: zapcore.DefaultLineEnding, @@ -145,12 +215,15 @@ func NewDevelopmentEncoderConfig() zapcore.EncoderConfig { } } -// NewDevelopmentConfig is a reasonable development logging configuration. -// Logging is enabled at DebugLevel and above. +// NewDevelopmentConfig builds a reasonable default development logging +// configuration. +// Logging is enabled at DebugLevel and above, and uses a console encoder. +// Logs are written to standard error. +// Stacktraces are included on logs of WarnLevel and above. +// DPanicLevel logs will panic. // -// It enables development mode (which makes DPanicLevel logs panic), uses a -// console encoder, writes to standard error, and disables sampling. -// Stacktraces are automatically included on logs of WarnLevel and above. +// See [NewDevelopmentEncoderConfig] for information +// on the default encoder configuration. func NewDevelopmentConfig() Config { return Config{ Level: NewAtomicLevelAt(DebugLevel), @@ -174,6 +247,10 @@ func (cfg Config) Build(opts ...Option) (*Logger, error) { return nil, err } + if cfg.Level == (AtomicLevel{}) { + return nil, errors.New("missing Level") + } + log := New( zapcore.NewCore(enc, sink, cfg.Level), cfg.buildOptions(errSink)..., @@ -203,9 +280,19 @@ func (cfg Config) buildOptions(errSink zapcore.WriteSyncer) []Option { opts = append(opts, AddStacktrace(stackLevel)) } - if cfg.Sampling != nil { + if scfg := cfg.Sampling; scfg != nil { opts = append(opts, WrapCore(func(core zapcore.Core) zapcore.Core { - return zapcore.NewSampler(core, time.Second, int(cfg.Sampling.Initial), int(cfg.Sampling.Thereafter)) + var samplerOpts []zapcore.SamplerOption + if scfg.Hook != nil { + samplerOpts = append(samplerOpts, zapcore.SamplerHook(scfg.Hook)) + } + return zapcore.NewSamplerWithOptions( + core, + time.Second, + cfg.Sampling.Initial, + cfg.Sampling.Thereafter, + samplerOpts..., + ) })) } diff --git a/vendor/go.uber.org/zap/doc.go b/vendor/go.uber.org/zap/doc.go index 8638dd1..3c50d7b 100644 --- a/vendor/go.uber.org/zap/doc.go +++ b/vendor/go.uber.org/zap/doc.go @@ -32,7 +32,7 @@ // they need to count every allocation and when they'd prefer a more familiar, // loosely typed API. // -// Choosing a Logger +// # Choosing a Logger // // In contexts where performance is nice, but not critical, use the // SugaredLogger. It's 4-10x faster than other structured logging packages and @@ -41,14 +41,15 @@ // variadic number of key-value pairs. (For more advanced use cases, they also // accept strongly typed fields - see the SugaredLogger.With documentation for // details.) -// sugar := zap.NewExample().Sugar() -// defer sugar.Sync() -// sugar.Infow("failed to fetch URL", -// "url", "http://example.com", -// "attempt", 3, -// "backoff", time.Second, -// ) -// sugar.Infof("failed to fetch URL: %s", "http://example.com") +// +// sugar := zap.NewExample().Sugar() +// defer sugar.Sync() +// sugar.Infow("failed to fetch URL", +// "url", "http://example.com", +// "attempt", 3, +// "backoff", time.Second, +// ) +// sugar.Infof("failed to fetch URL: %s", "http://example.com") // // By default, loggers are unbuffered. However, since zap's low-level APIs // allow buffering, calling Sync before letting your process exit is a good @@ -57,32 +58,35 @@ // In the rare contexts where every microsecond and every allocation matter, // use the Logger. It's even faster than the SugaredLogger and allocates far // less, but it only supports strongly-typed, structured logging. -// logger := zap.NewExample() -// defer logger.Sync() -// logger.Info("failed to fetch URL", -// zap.String("url", "http://example.com"), -// zap.Int("attempt", 3), -// zap.Duration("backoff", time.Second), -// ) +// +// logger := zap.NewExample() +// defer logger.Sync() +// logger.Info("failed to fetch URL", +// zap.String("url", "http://example.com"), +// zap.Int("attempt", 3), +// zap.Duration("backoff", time.Second), +// ) // // Choosing between the Logger and SugaredLogger doesn't need to be an // application-wide decision: converting between the two is simple and // inexpensive. -// logger := zap.NewExample() -// defer logger.Sync() -// sugar := logger.Sugar() -// plain := sugar.Desugar() // -// Configuring Zap +// logger := zap.NewExample() +// defer logger.Sync() +// sugar := logger.Sugar() +// plain := sugar.Desugar() +// +// # Configuring Zap // // The simplest way to build a Logger is to use zap's opinionated presets: // NewExample, NewProduction, and NewDevelopment. These presets build a logger // with a single function call: -// logger, err := zap.NewProduction() -// if err != nil { -// log.Fatalf("can't initialize zap logger: %v", err) -// } -// defer logger.Sync() +// +// logger, err := zap.NewProduction() +// if err != nil { +// log.Fatalf("can't initialize zap logger: %v", err) +// } +// defer logger.Sync() // // Presets are fine for small projects, but larger projects and organizations // naturally require a bit more customization. For most users, zap's Config @@ -94,7 +98,7 @@ // go.uber.org/zap/zapcore. See the package-level AdvancedConfiguration // example for sample code. // -// Extending Zap +// # Extending Zap // // The zap package itself is a relatively thin wrapper around the interfaces // in go.uber.org/zap/zapcore. Extending zap to support a new encoding (e.g., @@ -106,7 +110,7 @@ // Similarly, package authors can use the high-performance Encoder and Core // implementations in the zapcore package to build their own loggers. // -// Frequently Asked Questions +// # Frequently Asked Questions // // An FAQ covering everything from installation errors to design decisions is // available at https://github.com/uber-go/zap/blob/master/FAQ.md. diff --git a/vendor/go.uber.org/zap/encoder.go b/vendor/go.uber.org/zap/encoder.go index 2e9d3c3..caa04ce 100644 --- a/vendor/go.uber.org/zap/encoder.go +++ b/vendor/go.uber.org/zap/encoder.go @@ -62,6 +62,10 @@ func RegisterEncoder(name string, constructor func(zapcore.EncoderConfig) (zapco } func newEncoder(name string, encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) { + if encoderConfig.TimeKey != "" && encoderConfig.EncodeTime == nil { + return nil, errors.New("missing EncodeTime in EncoderConfig") + } + _encoderMutex.RLock() defer _encoderMutex.RUnlock() if name == "" { diff --git a/vendor/go.uber.org/zap/error.go b/vendor/go.uber.org/zap/error.go index 65982a5..45f7b83 100644 --- a/vendor/go.uber.org/zap/error.go +++ b/vendor/go.uber.org/zap/error.go @@ -21,14 +21,13 @@ package zap import ( - "sync" - + "go.uber.org/zap/internal/pool" "go.uber.org/zap/zapcore" ) -var _errArrayElemPool = sync.Pool{New: func() interface{} { +var _errArrayElemPool = pool.New(func() *errArrayElem { return &errArrayElem{} -}} +}) // Error is shorthand for the common idiom NamedError("error", err). func Error(err error) Field { @@ -60,11 +59,14 @@ func (errs errArray) MarshalLogArray(arr zapcore.ArrayEncoder) error { // potentially an "errorVerbose" attribute, we need to wrap it in a // type that implements LogObjectMarshaler. To prevent this from // allocating, pool the wrapper type. - elem := _errArrayElemPool.Get().(*errArrayElem) + elem := _errArrayElemPool.Get() elem.error = errs[i] - arr.AppendObject(elem) + err := arr.AppendObject(elem) elem.error = nil _errArrayElemPool.Put(elem) + if err != nil { + return err + } } return nil } diff --git a/vendor/go.uber.org/zap/field.go b/vendor/go.uber.org/zap/field.go index 5130e13..6743930 100644 --- a/vendor/go.uber.org/zap/field.go +++ b/vendor/go.uber.org/zap/field.go @@ -25,6 +25,7 @@ import ( "math" "time" + "go.uber.org/zap/internal/stacktrace" "go.uber.org/zap/zapcore" ) @@ -32,12 +33,23 @@ import ( // improves the navigability of this package's API documentation. type Field = zapcore.Field +var ( + _minTimeInt64 = time.Unix(0, math.MinInt64) + _maxTimeInt64 = time.Unix(0, math.MaxInt64) +) + // Skip constructs a no-op field, which is often useful when handling invalid // inputs in other Field constructors. func Skip() Field { return Field{Type: zapcore.SkipType} } +// nilField returns a field which will marshal explicitly as nil. See motivation +// in https://github.com/uber-go/zap/issues/753 . If we ever make breaking +// changes and add zapcore.NilType and zapcore.ObjectEncoder.AddNil, the +// implementation here should be changed to reflect that. +func nilField(key string) Field { return Reflect(key, nil) } + // Binary constructs a field that carries an opaque binary blob. // // Binary data is serialized in an encoding-appropriate format. For example, @@ -56,6 +68,15 @@ func Bool(key string, val bool) Field { return Field{Key: key, Type: zapcore.BoolType, Integer: ival} } +// Boolp constructs a field that carries a *bool. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Boolp(key string, val *bool) Field { + if val == nil { + return nilField(key) + } + return Bool(key, *val) +} + // ByteString constructs a field that carries UTF-8 encoded text as a []byte. // To log opaque binary blobs (which aren't necessarily valid UTF-8), use // Binary. @@ -70,6 +91,15 @@ func Complex128(key string, val complex128) Field { return Field{Key: key, Type: zapcore.Complex128Type, Interface: val} } +// Complex128p constructs a field that carries a *complex128. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Complex128p(key string, val *complex128) Field { + if val == nil { + return nilField(key) + } + return Complex128(key, *val) +} + // Complex64 constructs a field that carries a complex number. Unlike most // numeric fields, this costs an allocation (to convert the complex64 to // interface{}). @@ -77,6 +107,15 @@ func Complex64(key string, val complex64) Field { return Field{Key: key, Type: zapcore.Complex64Type, Interface: val} } +// Complex64p constructs a field that carries a *complex64. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Complex64p(key string, val *complex64) Field { + if val == nil { + return nilField(key) + } + return Complex64(key, *val) +} + // Float64 constructs a field that carries a float64. The way the // floating-point value is represented is encoder-dependent, so marshaling is // necessarily lazy. @@ -84,6 +123,15 @@ func Float64(key string, val float64) Field { return Field{Key: key, Type: zapcore.Float64Type, Integer: int64(math.Float64bits(val))} } +// Float64p constructs a field that carries a *float64. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Float64p(key string, val *float64) Field { + if val == nil { + return nilField(key) + } + return Float64(key, *val) +} + // Float32 constructs a field that carries a float32. The way the // floating-point value is represented is encoder-dependent, so marshaling is // necessarily lazy. @@ -91,66 +139,183 @@ func Float32(key string, val float32) Field { return Field{Key: key, Type: zapcore.Float32Type, Integer: int64(math.Float32bits(val))} } +// Float32p constructs a field that carries a *float32. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Float32p(key string, val *float32) Field { + if val == nil { + return nilField(key) + } + return Float32(key, *val) +} + // Int constructs a field with the given key and value. func Int(key string, val int) Field { return Int64(key, int64(val)) } +// Intp constructs a field that carries a *int. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Intp(key string, val *int) Field { + if val == nil { + return nilField(key) + } + return Int(key, *val) +} + // Int64 constructs a field with the given key and value. func Int64(key string, val int64) Field { return Field{Key: key, Type: zapcore.Int64Type, Integer: val} } +// Int64p constructs a field that carries a *int64. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Int64p(key string, val *int64) Field { + if val == nil { + return nilField(key) + } + return Int64(key, *val) +} + // Int32 constructs a field with the given key and value. func Int32(key string, val int32) Field { return Field{Key: key, Type: zapcore.Int32Type, Integer: int64(val)} } +// Int32p constructs a field that carries a *int32. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Int32p(key string, val *int32) Field { + if val == nil { + return nilField(key) + } + return Int32(key, *val) +} + // Int16 constructs a field with the given key and value. func Int16(key string, val int16) Field { return Field{Key: key, Type: zapcore.Int16Type, Integer: int64(val)} } +// Int16p constructs a field that carries a *int16. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Int16p(key string, val *int16) Field { + if val == nil { + return nilField(key) + } + return Int16(key, *val) +} + // Int8 constructs a field with the given key and value. func Int8(key string, val int8) Field { return Field{Key: key, Type: zapcore.Int8Type, Integer: int64(val)} } +// Int8p constructs a field that carries a *int8. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Int8p(key string, val *int8) Field { + if val == nil { + return nilField(key) + } + return Int8(key, *val) +} + // String constructs a field with the given key and value. func String(key string, val string) Field { return Field{Key: key, Type: zapcore.StringType, String: val} } +// Stringp constructs a field that carries a *string. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Stringp(key string, val *string) Field { + if val == nil { + return nilField(key) + } + return String(key, *val) +} + // Uint constructs a field with the given key and value. func Uint(key string, val uint) Field { return Uint64(key, uint64(val)) } +// Uintp constructs a field that carries a *uint. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Uintp(key string, val *uint) Field { + if val == nil { + return nilField(key) + } + return Uint(key, *val) +} + // Uint64 constructs a field with the given key and value. func Uint64(key string, val uint64) Field { return Field{Key: key, Type: zapcore.Uint64Type, Integer: int64(val)} } +// Uint64p constructs a field that carries a *uint64. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Uint64p(key string, val *uint64) Field { + if val == nil { + return nilField(key) + } + return Uint64(key, *val) +} + // Uint32 constructs a field with the given key and value. func Uint32(key string, val uint32) Field { return Field{Key: key, Type: zapcore.Uint32Type, Integer: int64(val)} } +// Uint32p constructs a field that carries a *uint32. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Uint32p(key string, val *uint32) Field { + if val == nil { + return nilField(key) + } + return Uint32(key, *val) +} + // Uint16 constructs a field with the given key and value. func Uint16(key string, val uint16) Field { return Field{Key: key, Type: zapcore.Uint16Type, Integer: int64(val)} } +// Uint16p constructs a field that carries a *uint16. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Uint16p(key string, val *uint16) Field { + if val == nil { + return nilField(key) + } + return Uint16(key, *val) +} + // Uint8 constructs a field with the given key and value. func Uint8(key string, val uint8) Field { return Field{Key: key, Type: zapcore.Uint8Type, Integer: int64(val)} } +// Uint8p constructs a field that carries a *uint8. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Uint8p(key string, val *uint8) Field { + if val == nil { + return nilField(key) + } + return Uint8(key, *val) +} + // Uintptr constructs a field with the given key and value. func Uintptr(key string, val uintptr) Field { return Field{Key: key, Type: zapcore.UintptrType, Integer: int64(val)} } +// Uintptrp constructs a field that carries a *uintptr. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Uintptrp(key string, val *uintptr) Field { + if val == nil { + return nilField(key) + } + return Uintptr(key, *val) +} + // Reflect constructs a field with the given key and an arbitrary object. It uses // an encoding-appropriate, reflection-based function to lazily serialize nearly // any object into the logging context, but it's relatively slow and @@ -180,19 +345,37 @@ func Stringer(key string, val fmt.Stringer) Field { // Time constructs a Field with the given key and value. The encoder // controls how the time is serialized. func Time(key string, val time.Time) Field { + if val.Before(_minTimeInt64) || val.After(_maxTimeInt64) { + return Field{Key: key, Type: zapcore.TimeFullType, Interface: val} + } return Field{Key: key, Type: zapcore.TimeType, Integer: val.UnixNano(), Interface: val.Location()} } +// Timep constructs a field that carries a *time.Time. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Timep(key string, val *time.Time) Field { + if val == nil { + return nilField(key) + } + return Time(key, *val) +} + // Stack constructs a field that stores a stacktrace of the current goroutine // under provided key. Keep in mind that taking a stacktrace is eager and // expensive (relatively speaking); this function both makes an allocation and // takes about two microseconds. func Stack(key string) Field { + return StackSkip(key, 1) // skip Stack +} + +// StackSkip constructs a field similarly to Stack, but also skips the given +// number of frames from the top of the stacktrace. +func StackSkip(key string, skip int) Field { // Returning the stacktrace as a string costs an allocation, but saves us // from expanding the zapcore.Field union struct to include a byte slice. Since // taking a stacktrace is already so expensive (~10us), the extra allocation // is okay. - return String(key, takeStacktrace()) + return String(key, stacktrace.Take(skip+1)) // skip StackSkip } // Duration constructs a field with the given key and value. The encoder @@ -201,6 +384,15 @@ func Duration(key string, val time.Duration) Field { return Field{Key: key, Type: zapcore.DurationType, Integer: int64(val)} } +// Durationp constructs a field that carries a *time.Duration. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Durationp(key string, val *time.Duration) Field { + if val == nil { + return nilField(key) + } + return Duration(key, *val) +} + // Object constructs a field with the given key and ObjectMarshaler. It // provides a flexible, but still type-safe and efficient, way to add map- or // struct-like user-defined types to the logging context. The struct's @@ -209,6 +401,75 @@ func Object(key string, val zapcore.ObjectMarshaler) Field { return Field{Key: key, Type: zapcore.ObjectMarshalerType, Interface: val} } +// Inline constructs a Field that is similar to Object, but it +// will add the elements of the provided ObjectMarshaler to the +// current namespace. +func Inline(val zapcore.ObjectMarshaler) Field { + return zapcore.Field{ + Type: zapcore.InlineMarshalerType, + Interface: val, + } +} + +// Dict constructs a field containing the provided key-value pairs. +// It acts similar to [Object], but with the fields specified as arguments. +func Dict(key string, val ...Field) Field { + return dictField(key, val) +} + +// We need a function with the signature (string, T) for zap.Any. +func dictField(key string, val []Field) Field { + return Object(key, dictObject(val)) +} + +type dictObject []Field + +func (d dictObject) MarshalLogObject(enc zapcore.ObjectEncoder) error { + for _, f := range d { + f.AddTo(enc) + } + return nil +} + +// We discovered an issue where zap.Any can cause a performance degradation +// when used in new goroutines. +// +// This happens because the compiler assigns 4.8kb (one zap.Field per arm of +// switch statement) of stack space for zap.Any when it takes the form: +// +// switch v := v.(type) { +// case string: +// return String(key, v) +// case int: +// return Int(key, v) +// // ... +// default: +// return Reflect(key, v) +// } +// +// To avoid this, we use the type switch to assign a value to a single local variable +// and then call a function on it. +// The local variable is just a function reference so it doesn't allocate +// when converted to an interface{}. +// +// A fair bit of experimentation went into this. +// See also: +// +// - https://github.com/uber-go/zap/pull/1301 +// - https://github.com/uber-go/zap/pull/1303 +// - https://github.com/uber-go/zap/pull/1304 +// - https://github.com/uber-go/zap/pull/1305 +// - https://github.com/uber-go/zap/pull/1308 +// +// See https://github.com/golang/go/issues/62077 for upstream issue. +type anyFieldC[T any] func(string, T) Field + +func (f anyFieldC[T]) Any(key string, val any) Field { + v, _ := val.(T) + // val is guaranteed to be a T, except when it's nil. + return f(key, v) +} + // Any takes a key and an arbitrary value and chooses the best way to represent // them as a field, falling back to a reflection-based approach only if // necessary. @@ -217,94 +478,138 @@ func Object(key string, val zapcore.ObjectMarshaler) Field { // them. To minimize surprises, []byte values are treated as binary blobs, byte // values are treated as uint8, and runes are always treated as integers. func Any(key string, value interface{}) Field { - switch val := value.(type) { + var c interface{ Any(string, any) Field } + + switch value.(type) { case zapcore.ObjectMarshaler: - return Object(key, val) + c = anyFieldC[zapcore.ObjectMarshaler](Object) case zapcore.ArrayMarshaler: - return Array(key, val) + c = anyFieldC[zapcore.ArrayMarshaler](Array) + case []Field: + c = anyFieldC[[]Field](dictField) case bool: - return Bool(key, val) + c = anyFieldC[bool](Bool) + case *bool: + c = anyFieldC[*bool](Boolp) case []bool: - return Bools(key, val) + c = anyFieldC[[]bool](Bools) case complex128: - return Complex128(key, val) + c = anyFieldC[complex128](Complex128) + case *complex128: + c = anyFieldC[*complex128](Complex128p) case []complex128: - return Complex128s(key, val) + c = anyFieldC[[]complex128](Complex128s) case complex64: - return Complex64(key, val) + c = anyFieldC[complex64](Complex64) + case *complex64: + c = anyFieldC[*complex64](Complex64p) case []complex64: - return Complex64s(key, val) + c = anyFieldC[[]complex64](Complex64s) case float64: - return Float64(key, val) + c = anyFieldC[float64](Float64) + case *float64: + c = anyFieldC[*float64](Float64p) case []float64: - return Float64s(key, val) + c = anyFieldC[[]float64](Float64s) case float32: - return Float32(key, val) + c = anyFieldC[float32](Float32) + case *float32: + c = anyFieldC[*float32](Float32p) case []float32: - return Float32s(key, val) + c = anyFieldC[[]float32](Float32s) case int: - return Int(key, val) + c = anyFieldC[int](Int) + case *int: + c = anyFieldC[*int](Intp) case []int: - return Ints(key, val) + c = anyFieldC[[]int](Ints) case int64: - return Int64(key, val) + c = anyFieldC[int64](Int64) + case *int64: + c = anyFieldC[*int64](Int64p) case []int64: - return Int64s(key, val) + c = anyFieldC[[]int64](Int64s) case int32: - return Int32(key, val) + c = anyFieldC[int32](Int32) + case *int32: + c = anyFieldC[*int32](Int32p) case []int32: - return Int32s(key, val) + c = anyFieldC[[]int32](Int32s) case int16: - return Int16(key, val) + c = anyFieldC[int16](Int16) + case *int16: + c = anyFieldC[*int16](Int16p) case []int16: - return Int16s(key, val) + c = anyFieldC[[]int16](Int16s) case int8: - return Int8(key, val) + c = anyFieldC[int8](Int8) + case *int8: + c = anyFieldC[*int8](Int8p) case []int8: - return Int8s(key, val) + c = anyFieldC[[]int8](Int8s) case string: - return String(key, val) + c = anyFieldC[string](String) + case *string: + c = anyFieldC[*string](Stringp) case []string: - return Strings(key, val) + c = anyFieldC[[]string](Strings) case uint: - return Uint(key, val) + c = anyFieldC[uint](Uint) + case *uint: + c = anyFieldC[*uint](Uintp) case []uint: - return Uints(key, val) + c = anyFieldC[[]uint](Uints) case uint64: - return Uint64(key, val) + c = anyFieldC[uint64](Uint64) + case *uint64: + c = anyFieldC[*uint64](Uint64p) case []uint64: - return Uint64s(key, val) + c = anyFieldC[[]uint64](Uint64s) case uint32: - return Uint32(key, val) + c = anyFieldC[uint32](Uint32) + case *uint32: + c = anyFieldC[*uint32](Uint32p) case []uint32: - return Uint32s(key, val) + c = anyFieldC[[]uint32](Uint32s) case uint16: - return Uint16(key, val) + c = anyFieldC[uint16](Uint16) + case *uint16: + c = anyFieldC[*uint16](Uint16p) case []uint16: - return Uint16s(key, val) + c = anyFieldC[[]uint16](Uint16s) case uint8: - return Uint8(key, val) + c = anyFieldC[uint8](Uint8) + case *uint8: + c = anyFieldC[*uint8](Uint8p) case []byte: - return Binary(key, val) + c = anyFieldC[[]byte](Binary) case uintptr: - return Uintptr(key, val) + c = anyFieldC[uintptr](Uintptr) + case *uintptr: + c = anyFieldC[*uintptr](Uintptrp) case []uintptr: - return Uintptrs(key, val) + c = anyFieldC[[]uintptr](Uintptrs) case time.Time: - return Time(key, val) + c = anyFieldC[time.Time](Time) + case *time.Time: + c = anyFieldC[*time.Time](Timep) case []time.Time: - return Times(key, val) + c = anyFieldC[[]time.Time](Times) case time.Duration: - return Duration(key, val) + c = anyFieldC[time.Duration](Duration) + case *time.Duration: + c = anyFieldC[*time.Duration](Durationp) case []time.Duration: - return Durations(key, val) + c = anyFieldC[[]time.Duration](Durations) case error: - return NamedError(key, val) + c = anyFieldC[error](NamedError) case []error: - return Errors(key, val) + c = anyFieldC[[]error](Errors) case fmt.Stringer: - return Stringer(key, val) + c = anyFieldC[fmt.Stringer](Stringer) default: - return Reflect(key, val) + c = anyFieldC[any](Reflect) } + + return c.Any(key, value) } diff --git a/vendor/go.uber.org/zap/glide.lock b/vendor/go.uber.org/zap/glide.lock deleted file mode 100644 index 881b462..0000000 --- a/vendor/go.uber.org/zap/glide.lock +++ /dev/null @@ -1,76 +0,0 @@ -hash: f073ba522c06c88ea3075bde32a8aaf0969a840a66cab6318a0897d141ffee92 -updated: 2017-07-22T18:06:49.598185334-07:00 -imports: -- name: go.uber.org/atomic - version: 4e336646b2ef9fc6e47be8e21594178f98e5ebcf -- name: go.uber.org/multierr - version: 3c4937480c32f4c13a875a1829af76c98ca3d40a -testImports: -- name: github.com/apex/log - version: d9b960447bfa720077b2da653cc79e533455b499 - subpackages: - - handlers/json -- name: github.com/axw/gocov - version: 3a69a0d2a4ef1f263e2d92b041a69593d6964fe8 - subpackages: - - gocov -- name: github.com/davecgh/go-spew - version: 04cdfd42973bb9c8589fd6a731800cf222fde1a9 - subpackages: - - spew -- name: github.com/fatih/color - version: 62e9147c64a1ed519147b62a56a14e83e2be02c1 -- name: github.com/go-kit/kit - version: e10f5bf035be9af21fd5b2fb4469d5716c6ab07d - subpackages: - - log -- name: github.com/go-logfmt/logfmt - version: 390ab7935ee28ec6b286364bba9b4dd6410cb3d5 -- name: github.com/go-stack/stack - version: 54be5f394ed2c3e19dac9134a40a95ba5a017f7b -- name: github.com/golang/lint - version: c5fb716d6688a859aae56d26d3e6070808df29f7 - subpackages: - - golint -- name: github.com/kr/logfmt - version: b84e30acd515aadc4b783ad4ff83aff3299bdfe0 -- name: github.com/mattn/go-colorable - version: 3fa8c76f9daed4067e4a806fb7e4dc86455c6d6a -- name: github.com/mattn/go-isatty - version: fc9e8d8ef48496124e79ae0df75490096eccf6fe -- name: github.com/mattn/goveralls - version: 6efce81852ad1b7567c17ad71b03aeccc9dd9ae0 -- name: github.com/pborman/uuid - version: e790cca94e6cc75c7064b1332e63811d4aae1a53 -- name: github.com/pkg/errors - version: 645ef00459ed84a119197bfb8d8205042c6df63d -- name: github.com/pmezard/go-difflib - version: d8ed2627bdf02c080bf22230dbb337003b7aba2d - subpackages: - - difflib -- name: github.com/rs/zerolog - version: eed4c2b94d945e0b2456ad6aa518a443986b5f22 -- name: github.com/satori/go.uuid - version: 5bf94b69c6b68ee1b541973bb8e1144db23a194b -- name: github.com/sirupsen/logrus - version: 7dd06bf38e1e13df288d471a57d5adbac106be9e -- name: github.com/stretchr/testify - version: f6abca593680b2315d2075e0f5e2a9751e3f431a - subpackages: - - assert - - require -- name: go.pedge.io/lion - version: 87958e8713f1fa138d993087133b97e976642159 -- name: golang.org/x/sys - version: c4489faa6e5ab84c0ef40d6ee878f7a030281f0f - subpackages: - - unix -- name: golang.org/x/tools - version: 496819729719f9d07692195e0a94d6edd2251389 - subpackages: - - cover -- name: gopkg.in/inconshreveable/log15.v2 - version: b105bd37f74e5d9dc7b6ad7806715c7a2b83fd3f - subpackages: - - stack - - term diff --git a/vendor/go.uber.org/zap/glide.yaml b/vendor/go.uber.org/zap/glide.yaml index 9441259..8e1d05e 100644 --- a/vendor/go.uber.org/zap/glide.yaml +++ b/vendor/go.uber.org/zap/glide.yaml @@ -22,12 +22,11 @@ testImport: - package: github.com/mattn/goveralls - package: github.com/pborman/uuid - package: github.com/pkg/errors -- package: go.pedge.io/lion - package: github.com/rs/zerolog - package: golang.org/x/tools subpackages: - cover -- package: github.com/golang/lint +- package: golang.org/x/lint subpackages: - golint - package: github.com/axw/gocov diff --git a/vendor/go.uber.org/zap/global.go b/vendor/go.uber.org/zap/global.go index d02232e..3cb46c9 100644 --- a/vendor/go.uber.org/zap/global.go +++ b/vendor/go.uber.org/zap/global.go @@ -31,7 +31,7 @@ import ( ) const ( - _stdLogDefaultDepth = 2 + _stdLogDefaultDepth = 1 _loggerWriterDepth = 2 _programmerErrorTemplate = "You've found a bug in zap! Please file a bug at " + "https://github.com/uber-go/zap/issues/new and reference this error: %v" diff --git a/vendor/go.uber.org/zap/http_handler.go b/vendor/go.uber.org/zap/http_handler.go index 1b0ecac..2be8f65 100644 --- a/vendor/go.uber.org/zap/http_handler.go +++ b/vendor/go.uber.org/zap/http_handler.go @@ -22,7 +22,9 @@ package zap import ( "encoding/json" + "errors" "fmt" + "io" "net/http" "go.uber.org/zap/zapcore" @@ -31,51 +33,108 @@ import ( // ServeHTTP is a simple JSON endpoint that can report on or change the current // logging level. // -// GET requests return a JSON description of the current logging level. PUT -// requests change the logging level and expect a payload like: -// {"level":"info"} +// # GET // -// It's perfectly safe to change the logging level while a program is running. +// The GET request returns a JSON description of the current logging level like: +// +// {"level":"info"} +// +// # PUT +// +// The PUT request changes the logging level. It is perfectly safe to change the +// logging level while a program is running. Two content types are supported: +// +// Content-Type: application/x-www-form-urlencoded +// +// With this content type, the level can be provided through the request body or +// a query parameter. The log level is URL encoded like: +// +// level=debug +// +// The request body takes precedence over the query parameter, if both are +// specified. +// +// This content type is the default for a curl PUT request. Following are two +// example curl requests that both set the logging level to debug. +// +// curl -X PUT localhost:8080/log/level?level=debug +// curl -X PUT localhost:8080/log/level -d level=debug +// +// For any other content type, the payload is expected to be JSON encoded and +// look like: +// +// {"level":"info"} +// +// An example curl request could look like this: +// +// curl -X PUT localhost:8080/log/level -H "Content-Type: application/json" -d '{"level":"debug"}' func (lvl AtomicLevel) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if err := lvl.serveHTTP(w, r); err != nil { + w.WriteHeader(http.StatusInternalServerError) + fmt.Fprintf(w, "internal error: %v", err) + } +} + +func (lvl AtomicLevel) serveHTTP(w http.ResponseWriter, r *http.Request) error { type errorResponse struct { Error string `json:"error"` } type payload struct { - Level *zapcore.Level `json:"level"` + Level zapcore.Level `json:"level"` } enc := json.NewEncoder(w) switch r.Method { - case http.MethodGet: - current := lvl.Level() - enc.Encode(payload{Level: ¤t}) + return enc.Encode(payload{Level: lvl.Level()}) case http.MethodPut: - var req payload - - if errmess := func() string { - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - return fmt.Sprintf("Request body must be well-formed JSON: %v", err) - } - if req.Level == nil { - return "Must specify a logging level." - } - return "" - }(); errmess != "" { + requestedLvl, err := decodePutRequest(r.Header.Get("Content-Type"), r) + if err != nil { w.WriteHeader(http.StatusBadRequest) - enc.Encode(errorResponse{Error: errmess}) - return + return enc.Encode(errorResponse{Error: err.Error()}) } - - lvl.SetLevel(*req.Level) - enc.Encode(req) + lvl.SetLevel(requestedLvl) + return enc.Encode(payload{Level: lvl.Level()}) default: w.WriteHeader(http.StatusMethodNotAllowed) - enc.Encode(errorResponse{ + return enc.Encode(errorResponse{ Error: "Only GET and PUT are supported.", }) } } + +// Decodes incoming PUT requests and returns the requested logging level. +func decodePutRequest(contentType string, r *http.Request) (zapcore.Level, error) { + if contentType == "application/x-www-form-urlencoded" { + return decodePutURL(r) + } + return decodePutJSON(r.Body) +} + +func decodePutURL(r *http.Request) (zapcore.Level, error) { + lvl := r.FormValue("level") + if lvl == "" { + return 0, errors.New("must specify logging level") + } + var l zapcore.Level + if err := l.UnmarshalText([]byte(lvl)); err != nil { + return 0, err + } + return l, nil +} + +func decodePutJSON(body io.Reader) (zapcore.Level, error) { + var pld struct { + Level *zapcore.Level `json:"level"` + } + if err := json.NewDecoder(body).Decode(&pld); err != nil { + return 0, fmt.Errorf("malformed request body: %v", err) + } + if pld.Level == nil { + return 0, errors.New("must specify logging level") + } + return *pld.Level, nil +} diff --git a/vendor/go.uber.org/zap/internal/exit/exit.go b/vendor/go.uber.org/zap/internal/exit/exit.go index dfc5b05..f673f99 100644 --- a/vendor/go.uber.org/zap/internal/exit/exit.go +++ b/vendor/go.uber.org/zap/internal/exit/exit.go @@ -24,24 +24,25 @@ package exit import "os" -var real = func() { os.Exit(1) } +var _exit = os.Exit -// Exit normally terminates the process by calling os.Exit(1). If the package -// is stubbed, it instead records a call in the testing spy. -func Exit() { - real() +// With terminates the process by calling os.Exit(code). If the package is +// stubbed, it instead records a call in the testing spy. +func With(code int) { + _exit(code) } // A StubbedExit is a testing fake for os.Exit. type StubbedExit struct { Exited bool - prev func() + Code int + prev func(code int) } // Stub substitutes a fake for the call to os.Exit(1). func Stub() *StubbedExit { - s := &StubbedExit{prev: real} - real = s.exit + s := &StubbedExit{prev: _exit} + _exit = s.exit return s } @@ -56,9 +57,10 @@ func WithStub(f func()) *StubbedExit { // Unstub restores the previous exit function. func (se *StubbedExit) Unstub() { - real = se.prev + _exit = se.prev } -func (se *StubbedExit) exit() { +func (se *StubbedExit) exit(code int) { se.Exited = true + se.Code = code } diff --git a/vendor/go.uber.org/zap/internal/level_enabler.go b/vendor/go.uber.org/zap/internal/level_enabler.go new file mode 100644 index 0000000..40bfed8 --- /dev/null +++ b/vendor/go.uber.org/zap/internal/level_enabler.go @@ -0,0 +1,37 @@ +// Copyright (c) 2022 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package internal and its subpackages hold types and functionality +// that are not part of Zap's public API. +package internal + +import "go.uber.org/zap/zapcore" + +// LeveledEnabler is an interface satisfied by LevelEnablers that are able to +// report their own level. +// +// This interface is defined to use more conveniently in tests and non-zapcore +// packages. +// This cannot be imported from zapcore because of the cyclic dependency. +type LeveledEnabler interface { + zapcore.LevelEnabler + + Level() zapcore.Level +} diff --git a/vendor/go.uber.org/zap/internal/pool/pool.go b/vendor/go.uber.org/zap/internal/pool/pool.go new file mode 100644 index 0000000..60e9d2c --- /dev/null +++ b/vendor/go.uber.org/zap/internal/pool/pool.go @@ -0,0 +1,58 @@ +// Copyright (c) 2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package pool provides internal pool utilities. +package pool + +import ( + "sync" +) + +// A Pool is a generic wrapper around [sync.Pool] to provide strongly-typed +// object pooling. +// +// Note that SA6002 (ref: https://staticcheck.io/docs/checks/#SA6002) will +// not be detected, so all internal pool use must take care to only store +// pointer types. +type Pool[T any] struct { + pool sync.Pool +} + +// New returns a new [Pool] for T, and will use fn to construct new Ts when +// the pool is empty. +func New[T any](fn func() T) *Pool[T] { + return &Pool[T]{ + pool: sync.Pool{ + New: func() any { + return fn() + }, + }, + } +} + +// Get gets a T from the pool, or creates a new one if the pool is empty. +func (p *Pool[T]) Get() T { + return p.pool.Get().(T) +} + +// Put returns x into the pool. +func (p *Pool[T]) Put(x T) { + p.pool.Put(x) +} diff --git a/vendor/go.uber.org/zap/internal/stacktrace/stack.go b/vendor/go.uber.org/zap/internal/stacktrace/stack.go new file mode 100644 index 0000000..82af755 --- /dev/null +++ b/vendor/go.uber.org/zap/internal/stacktrace/stack.go @@ -0,0 +1,181 @@ +// Copyright (c) 2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package stacktrace provides support for gathering stack traces +// efficiently. +package stacktrace + +import ( + "runtime" + + "go.uber.org/zap/buffer" + "go.uber.org/zap/internal/bufferpool" + "go.uber.org/zap/internal/pool" +) + +var _stackPool = pool.New(func() *Stack { + return &Stack{ + storage: make([]uintptr, 64), + } +}) + +// Stack is a captured stack trace. +type Stack struct { + pcs []uintptr // program counters; always a subslice of storage + frames *runtime.Frames + + // The size of pcs varies depending on requirements: + // it will be one if the only the first frame was requested, + // and otherwise it will reflect the depth of the call stack. + // + // storage decouples the slice we need (pcs) from the slice we pool. + // We will always allocate a reasonably large storage, but we'll use + // only as much of it as we need. + storage []uintptr +} + +// Depth specifies how deep of a stack trace should be captured. +type Depth int + +const ( + // First captures only the first frame. + First Depth = iota + + // Full captures the entire call stack, allocating more + // storage for it if needed. + Full +) + +// Capture captures a stack trace of the specified depth, skipping +// the provided number of frames. skip=0 identifies the caller of +// Capture. +// +// The caller must call Free on the returned stacktrace after using it. +func Capture(skip int, depth Depth) *Stack { + stack := _stackPool.Get() + + switch depth { + case First: + stack.pcs = stack.storage[:1] + case Full: + stack.pcs = stack.storage + } + + // Unlike other "skip"-based APIs, skip=0 identifies runtime.Callers + // itself. +2 to skip captureStacktrace and runtime.Callers. + numFrames := runtime.Callers( + skip+2, + stack.pcs, + ) + + // runtime.Callers truncates the recorded stacktrace if there is no + // room in the provided slice. For the full stack trace, keep expanding + // storage until there are fewer frames than there is room. + if depth == Full { + pcs := stack.pcs + for numFrames == len(pcs) { + pcs = make([]uintptr, len(pcs)*2) + numFrames = runtime.Callers(skip+2, pcs) + } + + // Discard old storage instead of returning it to the pool. + // This will adjust the pool size over time if stack traces are + // consistently very deep. + stack.storage = pcs + stack.pcs = pcs[:numFrames] + } else { + stack.pcs = stack.pcs[:numFrames] + } + + stack.frames = runtime.CallersFrames(stack.pcs) + return stack +} + +// Free releases resources associated with this stacktrace +// and returns it back to the pool. +func (st *Stack) Free() { + st.frames = nil + st.pcs = nil + _stackPool.Put(st) +} + +// Count reports the total number of frames in this stacktrace. +// Count DOES NOT change as Next is called. +func (st *Stack) Count() int { + return len(st.pcs) +} + +// Next returns the next frame in the stack trace, +// and a boolean indicating whether there are more after it. +func (st *Stack) Next() (_ runtime.Frame, more bool) { + return st.frames.Next() +} + +// Take returns a string representation of the current stacktrace. +// +// skip is the number of frames to skip before recording the stack trace. +// skip=0 identifies the caller of Take. +func Take(skip int) string { + stack := Capture(skip+1, Full) + defer stack.Free() + + buffer := bufferpool.Get() + defer buffer.Free() + + stackfmt := NewFormatter(buffer) + stackfmt.FormatStack(stack) + return buffer.String() +} + +// Formatter formats a stack trace into a readable string representation. +type Formatter struct { + b *buffer.Buffer + nonEmpty bool // whehther we've written at least one frame already +} + +// NewFormatter builds a new Formatter. +func NewFormatter(b *buffer.Buffer) Formatter { + return Formatter{b: b} +} + +// FormatStack formats all remaining frames in the provided stacktrace -- minus +// the final runtime.main/runtime.goexit frame. +func (sf *Formatter) FormatStack(stack *Stack) { + // Note: On the last iteration, frames.Next() returns false, with a valid + // frame, but we ignore this frame. The last frame is a runtime frame which + // adds noise, since it's only either runtime.main or runtime.goexit. + for frame, more := stack.Next(); more; frame, more = stack.Next() { + sf.FormatFrame(frame) + } +} + +// FormatFrame formats the given frame. +func (sf *Formatter) FormatFrame(frame runtime.Frame) { + if sf.nonEmpty { + sf.b.AppendByte('\n') + } + sf.nonEmpty = true + sf.b.AppendString(frame.Function) + sf.b.AppendByte('\n') + sf.b.AppendByte('\t') + sf.b.AppendString(frame.File) + sf.b.AppendByte(':') + sf.b.AppendInt(int64(frame.Line)) +} diff --git a/vendor/go.uber.org/zap/level.go b/vendor/go.uber.org/zap/level.go index 3567a9a..155b208 100644 --- a/vendor/go.uber.org/zap/level.go +++ b/vendor/go.uber.org/zap/level.go @@ -21,7 +21,9 @@ package zap import ( - "go.uber.org/atomic" + "sync/atomic" + + "go.uber.org/zap/internal" "go.uber.org/zap/zapcore" ) @@ -70,12 +72,14 @@ type AtomicLevel struct { l *atomic.Int32 } +var _ internal.LeveledEnabler = AtomicLevel{} + // NewAtomicLevel creates an AtomicLevel with InfoLevel and above logging // enabled. func NewAtomicLevel() AtomicLevel { - return AtomicLevel{ - l: atomic.NewInt32(int32(InfoLevel)), - } + lvl := AtomicLevel{l: new(atomic.Int32)} + lvl.l.Store(int32(InfoLevel)) + return lvl } // NewAtomicLevelAt is a convenience function that creates an AtomicLevel @@ -86,6 +90,23 @@ func NewAtomicLevelAt(l zapcore.Level) AtomicLevel { return a } +// ParseAtomicLevel parses an AtomicLevel based on a lowercase or all-caps ASCII +// representation of the log level. If the provided ASCII representation is +// invalid an error is returned. +// +// This is particularly useful when dealing with text input to configure log +// levels. +func ParseAtomicLevel(text string) (AtomicLevel, error) { + a := NewAtomicLevel() + l, err := zapcore.ParseLevel(text) + if err != nil { + return a, err + } + + a.SetLevel(l) + return a, nil +} + // Enabled implements the zapcore.LevelEnabler interface, which allows the // AtomicLevel to be used in place of traditional static levels. func (lvl AtomicLevel) Enabled(l zapcore.Level) bool { diff --git a/vendor/go.uber.org/zap/logger.go b/vendor/go.uber.org/zap/logger.go index dc8f6e3..c4d3003 100644 --- a/vendor/go.uber.org/zap/logger.go +++ b/vendor/go.uber.org/zap/logger.go @@ -22,12 +22,12 @@ package zap import ( "fmt" - "io/ioutil" + "io" "os" - "runtime" "strings" - "time" + "go.uber.org/zap/internal/bufferpool" + "go.uber.org/zap/internal/stacktrace" "go.uber.org/zap/zapcore" ) @@ -42,13 +42,18 @@ type Logger struct { core zapcore.Core development bool + addCaller bool + onPanic zapcore.CheckWriteHook // default is WriteThenPanic + onFatal zapcore.CheckWriteHook // default is WriteThenFatal + name string errorOutput zapcore.WriteSyncer - addCaller bool - addStack zapcore.LevelEnabler + addStack zapcore.LevelEnabler callerSkip int + + clock zapcore.Clock } // New constructs a new Logger from the provided zapcore.Core and Options. If @@ -69,6 +74,7 @@ func New(core zapcore.Core, options ...Option) *Logger { core: core, errorOutput: zapcore.Lock(os.Stderr), addStack: zapcore.FatalLevel + 1, + clock: zapcore.DefaultClock, } return log.WithOptions(options...) } @@ -81,8 +87,9 @@ func New(core zapcore.Core, options ...Option) *Logger { func NewNop() *Logger { return &Logger{ core: zapcore.NewNopCore(), - errorOutput: zapcore.AddSync(ioutil.Discard), + errorOutput: zapcore.AddSync(io.Discard), addStack: zapcore.FatalLevel + 1, + clock: zapcore.DefaultClock, } } @@ -102,6 +109,19 @@ func NewDevelopment(options ...Option) (*Logger, error) { return NewDevelopmentConfig().Build(options...) } +// Must is a helper that wraps a call to a function returning (*Logger, error) +// and panics if the error is non-nil. It is intended for use in variable +// initialization such as: +// +// var logger = zap.Must(zap.NewProduction()) +func Must(logger *Logger, err error) *Logger { + if err != nil { + panic(err) + } + + return logger +} + // NewExample builds a Logger that's designed for use in zap's testable // examples. It writes DebugLevel and above logs to standard out as JSON, but // omits the timestamp and calling function to keep example output @@ -155,7 +175,8 @@ func (log *Logger) WithOptions(opts ...Option) *Logger { } // With creates a child logger and adds structured context to it. Fields added -// to the child don't affect the parent, and vice versa. +// to the child don't affect the parent, and vice versa. Any fields that +// require evaluation (such as Objects) are evaluated upon invocation of With. func (log *Logger) With(fields ...Field) *Logger { if len(fields) == 0 { return log @@ -165,6 +186,35 @@ func (log *Logger) With(fields ...Field) *Logger { return l } +// WithLazy creates a child logger and adds structured context to it lazily. +// +// The fields are evaluated only if the logger is further chained with [With] +// or is written to with any of the log level methods. +// Until that occurs, the logger may retain references to objects inside the fields, +// and logging will reflect the state of an object at the time of logging, +// not the time of WithLazy(). +// +// WithLazy provides a worthwhile performance optimization for contextual loggers +// when the likelihood of using the child logger is low, +// such as error paths and rarely taken branches. +// +// Similar to [With], fields added to the child don't affect the parent, and vice versa. +func (log *Logger) WithLazy(fields ...Field) *Logger { + if len(fields) == 0 { + return log + } + return log.WithOptions(WrapCore(func(core zapcore.Core) zapcore.Core { + return zapcore.NewLazyWith(core, fields) + })) +} + +// Level reports the minimum enabled level for this logger. +// +// For NopLoggers, this is [zapcore.InvalidLevel]. +func (log *Logger) Level() zapcore.Level { + return zapcore.LevelOf(log.core) +} + // Check returns a CheckedEntry if logging a message at the specified level // is enabled. It's a completely optional optimization; in high-performance // applications, Check can help avoid allocating a slice to hold fields. @@ -172,6 +222,16 @@ func (log *Logger) Check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { return log.check(lvl, msg) } +// Log logs a message at the specified level. The message includes any fields +// passed at the log site, as well as any fields accumulated on the logger. +// Any Fields that require evaluation (such as Objects) are evaluated upon +// invocation of Log. +func (log *Logger) Log(lvl zapcore.Level, msg string, fields ...Field) { + if ce := log.check(lvl, msg); ce != nil { + ce.Write(fields...) + } +} + // Debug logs a message at DebugLevel. The message includes any fields passed // at the log site, as well as any fields accumulated on the logger. func (log *Logger) Debug(msg string, fields ...Field) { @@ -248,21 +308,35 @@ func (log *Logger) Core() zapcore.Core { return log.core } +// Name returns the Logger's underlying name, +// or an empty string if the logger is unnamed. +func (log *Logger) Name() string { + return log.name +} + func (log *Logger) clone() *Logger { - copy := *log - return © + clone := *log + return &clone } func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { - // check must always be called directly by a method in the Logger interface - // (e.g., Check, Info, Fatal). + // Logger.check must always be called directly by a method in the + // Logger interface (e.g., Check, Info, Fatal). + // This skips Logger.check and the Info/Fatal/Check/etc. method that + // called it. const callerSkipOffset = 2 + // Check the level first to reduce the cost of disabled log calls. + // Since Panic and higher may exit, we skip the optimization for those levels. + if lvl < zapcore.DPanicLevel && !log.core.Enabled(lvl) { + return nil + } + // Create basic checked entry thru the core; this will be non-nil if the // log message will actually be written somewhere. ent := zapcore.Entry{ LoggerName: log.name, - Time: time.Now(), + Time: log.clock.Now(), Level: lvl, Message: msg, } @@ -272,12 +346,12 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { // Set up any required terminal behavior. switch ent.Level { case zapcore.PanicLevel: - ce = ce.Should(ent, zapcore.WriteThenPanic) + ce = ce.After(ent, terminalHookOverride(zapcore.WriteThenPanic, log.onPanic)) case zapcore.FatalLevel: - ce = ce.Should(ent, zapcore.WriteThenFatal) + ce = ce.After(ent, terminalHookOverride(zapcore.WriteThenFatal, log.onFatal)) case zapcore.DPanicLevel: if log.development { - ce = ce.Should(ent, zapcore.WriteThenPanic) + ce = ce.After(ent, terminalHookOverride(zapcore.WriteThenPanic, log.onPanic)) } } @@ -290,16 +364,72 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { // Thread the error output through to the CheckedEntry. ce.ErrorOutput = log.errorOutput + + addStack := log.addStack.Enabled(ce.Level) + if !log.addCaller && !addStack { + return ce + } + + // Adding the caller or stack trace requires capturing the callers of + // this function. We'll share information between these two. + stackDepth := stacktrace.First + if addStack { + stackDepth = stacktrace.Full + } + stack := stacktrace.Capture(log.callerSkip+callerSkipOffset, stackDepth) + defer stack.Free() + + if stack.Count() == 0 { + if log.addCaller { + fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", ent.Time.UTC()) + _ = log.errorOutput.Sync() + } + return ce + } + + frame, more := stack.Next() + if log.addCaller { - ce.Entry.Caller = zapcore.NewEntryCaller(runtime.Caller(log.callerSkip + callerSkipOffset)) - if !ce.Entry.Caller.Defined { - fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", time.Now().UTC()) - log.errorOutput.Sync() + ce.Caller = zapcore.EntryCaller{ + Defined: frame.PC != 0, + PC: frame.PC, + File: frame.File, + Line: frame.Line, + Function: frame.Function, } } - if log.addStack.Enabled(ce.Entry.Level) { - ce.Entry.Stack = Stack("").String + + if addStack { + buffer := bufferpool.Get() + defer buffer.Free() + + stackfmt := stacktrace.NewFormatter(buffer) + + // We've already extracted the first frame, so format that + // separately and defer to stackfmt for the rest. + stackfmt.FormatFrame(frame) + if more { + stackfmt.FormatStack(stack) + } + ce.Stack = buffer.String() } return ce } + +func terminalHookOverride(defaultHook, override zapcore.CheckWriteHook) zapcore.CheckWriteHook { + // A nil or WriteThenNoop hook will lead to continued execution after + // a Panic or Fatal log entry, which is unexpected. For example, + // + // f, err := os.Open(..) + // if err != nil { + // log.Fatal("cannot open", zap.Error(err)) + // } + // fmt.Println(f.Name()) + // + // The f.Name() will panic if we continue execution after the log.Fatal. + if override == nil || override == zapcore.WriteThenNoop { + return defaultHook + } + return override +} diff --git a/vendor/go.uber.org/zap/options.go b/vendor/go.uber.org/zap/options.go index 7a6b0fc..43d357a 100644 --- a/vendor/go.uber.org/zap/options.go +++ b/vendor/go.uber.org/zap/options.go @@ -20,7 +20,11 @@ package zap -import "go.uber.org/zap/zapcore" +import ( + "fmt" + + "go.uber.org/zap/zapcore" +) // An Option configures a Logger. type Option interface { @@ -82,11 +86,18 @@ func Development() Option { }) } -// AddCaller configures the Logger to annotate each message with the filename -// and line number of zap's caller. +// AddCaller configures the Logger to annotate each message with the filename, +// line number, and function name of zap's caller. See also WithCaller. func AddCaller() Option { + return WithCaller(true) +} + +// WithCaller configures the Logger to annotate each message with the filename, +// line number, and function name of zap's caller, or not, depending on the +// value of enabled. This is a generalized form of AddCaller. +func WithCaller(enabled bool) Option { return optionFunc(func(log *Logger) { - log.addCaller = true + log.addCaller = enabled }) } @@ -107,3 +118,65 @@ func AddStacktrace(lvl zapcore.LevelEnabler) Option { log.addStack = lvl }) } + +// IncreaseLevel increase the level of the logger. It has no effect if +// the passed in level tries to decrease the level of the logger. +func IncreaseLevel(lvl zapcore.LevelEnabler) Option { + return optionFunc(func(log *Logger) { + core, err := zapcore.NewIncreaseLevelCore(log.core, lvl) + if err != nil { + fmt.Fprintf(log.errorOutput, "failed to IncreaseLevel: %v\n", err) + } else { + log.core = core + } + }) +} + +// WithPanicHook sets a CheckWriteHook to run on Panic/DPanic logs. +// Zap will call this hook after writing a log statement with a Panic/DPanic level. +// +// For example, the following builds a logger that will exit the current +// goroutine after writing a Panic/DPanic log message, but it will not start a panic. +// +// zap.New(core, zap.WithPanicHook(zapcore.WriteThenGoexit)) +// +// This is useful for testing Panic/DPanic log output. +func WithPanicHook(hook zapcore.CheckWriteHook) Option { + return optionFunc(func(log *Logger) { + log.onPanic = hook + }) +} + +// OnFatal sets the action to take on fatal logs. +// +// Deprecated: Use [WithFatalHook] instead. +func OnFatal(action zapcore.CheckWriteAction) Option { + return WithFatalHook(action) +} + +// WithFatalHook sets a CheckWriteHook to run on fatal logs. +// Zap will call this hook after writing a log statement with a Fatal level. +// +// For example, the following builds a logger that will exit the current +// goroutine after writing a fatal log message, but it will not exit the +// program. +// +// zap.New(core, zap.WithFatalHook(zapcore.WriteThenGoexit)) +// +// It is important that the provided CheckWriteHook stops the control flow at +// the current statement to meet expectations of callers of the logger. +// We recommend calling os.Exit or runtime.Goexit inside custom hooks at +// minimum. +func WithFatalHook(hook zapcore.CheckWriteHook) Option { + return optionFunc(func(log *Logger) { + log.onFatal = hook + }) +} + +// WithClock specifies the clock used by the logger to determine the current +// time for logged entries. Defaults to the system clock with time.Now. +func WithClock(clock zapcore.Clock) Option { + return optionFunc(func(log *Logger) { + log.clock = clock + }) +} diff --git a/vendor/go.uber.org/zap/sink.go b/vendor/go.uber.org/zap/sink.go index ff0becf..499772a 100644 --- a/vendor/go.uber.org/zap/sink.go +++ b/vendor/go.uber.org/zap/sink.go @@ -1,4 +1,4 @@ -// Copyright (c) 2016 Uber Technologies, Inc. +// Copyright (c) 2016-2022 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -26,6 +26,7 @@ import ( "io" "net/url" "os" + "path/filepath" "strings" "sync" @@ -34,23 +35,7 @@ import ( const schemeFile = "file" -var ( - _sinkMutex sync.RWMutex - _sinkFactories map[string]func(*url.URL) (Sink, error) // keyed by scheme -) - -func init() { - resetSinkRegistry() -} - -func resetSinkRegistry() { - _sinkMutex.Lock() - defer _sinkMutex.Unlock() - - _sinkFactories = map[string]func(*url.URL) (Sink, error){ - schemeFile: newFileSink, - } -} +var _sinkRegistry = newSinkRegistry() // Sink defines the interface to write to and close logger destinations. type Sink interface { @@ -58,10 +43,6 @@ type Sink interface { io.Closer } -type nopCloserSink struct{ zapcore.WriteSyncer } - -func (nopCloserSink) Close() error { return nil } - type errSinkNotFound struct { scheme string } @@ -70,16 +51,30 @@ func (e *errSinkNotFound) Error() string { return fmt.Sprintf("no sink found for scheme %q", e.scheme) } -// RegisterSink registers a user-supplied factory for all sinks with a -// particular scheme. -// -// All schemes must be ASCII, valid under section 3.1 of RFC 3986 -// (https://tools.ietf.org/html/rfc3986#section-3.1), and must not already -// have a factory registered. Zap automatically registers a factory for the -// "file" scheme. -func RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error { - _sinkMutex.Lock() - defer _sinkMutex.Unlock() +type nopCloserSink struct{ zapcore.WriteSyncer } + +func (nopCloserSink) Close() error { return nil } + +type sinkRegistry struct { + mu sync.Mutex + factories map[string]func(*url.URL) (Sink, error) // keyed by scheme + openFile func(string, int, os.FileMode) (*os.File, error) // type matches os.OpenFile +} + +func newSinkRegistry() *sinkRegistry { + sr := &sinkRegistry{ + factories: make(map[string]func(*url.URL) (Sink, error)), + openFile: os.OpenFile, + } + // Infallible operation: the registry is empty, so we can't have a conflict. + _ = sr.RegisterSink(schemeFile, sr.newFileSinkFromURL) + return sr +} + +// RegisterScheme registers the given factory for the specific scheme. +func (sr *sinkRegistry) RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error { + sr.mu.Lock() + defer sr.mu.Unlock() if scheme == "" { return errors.New("can't register a sink factory for empty string") @@ -88,14 +83,22 @@ func RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error { if err != nil { return fmt.Errorf("%q is not a valid scheme: %v", scheme, err) } - if _, ok := _sinkFactories[normalized]; ok { + if _, ok := sr.factories[normalized]; ok { return fmt.Errorf("sink factory already registered for scheme %q", normalized) } - _sinkFactories[normalized] = factory + sr.factories[normalized] = factory return nil } -func newSink(rawURL string) (Sink, error) { +func (sr *sinkRegistry) newSink(rawURL string) (Sink, error) { + // URL parsing doesn't work well for Windows paths such as `c:\log.txt`, as scheme is set to + // the drive, and path is unset unless `c:/log.txt` is used. + // To avoid Windows-specific URL handling, we instead check IsAbs to open as a file. + // filepath.IsAbs is OS-specific, so IsAbs('c:/log.txt') is false outside of Windows. + if filepath.IsAbs(rawURL) { + return sr.newFileSinkFromPath(rawURL) + } + u, err := url.Parse(rawURL) if err != nil { return nil, fmt.Errorf("can't parse %q as a URL: %v", rawURL, err) @@ -104,16 +107,27 @@ func newSink(rawURL string) (Sink, error) { u.Scheme = schemeFile } - _sinkMutex.RLock() - factory, ok := _sinkFactories[u.Scheme] - _sinkMutex.RUnlock() + sr.mu.Lock() + factory, ok := sr.factories[u.Scheme] + sr.mu.Unlock() if !ok { return nil, &errSinkNotFound{u.Scheme} } return factory(u) } -func newFileSink(u *url.URL) (Sink, error) { +// RegisterSink registers a user-supplied factory for all sinks with a +// particular scheme. +// +// All schemes must be ASCII, valid under section 0.1 of RFC 3986 +// (https://tools.ietf.org/html/rfc3983#section-3.1), and must not already +// have a factory registered. Zap automatically registers a factory for the +// "file" scheme. +func RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error { + return _sinkRegistry.RegisterSink(scheme, factory) +} + +func (sr *sinkRegistry) newFileSinkFromURL(u *url.URL) (Sink, error) { if u.User != nil { return nil, fmt.Errorf("user and password not allowed with file URLs: got %v", u) } @@ -130,13 +144,18 @@ func newFileSink(u *url.URL) (Sink, error) { if hn := u.Hostname(); hn != "" && hn != "localhost" { return nil, fmt.Errorf("file URLs must leave host empty or use localhost: got %v", u) } - switch u.Path { + + return sr.newFileSinkFromPath(u.Path) +} + +func (sr *sinkRegistry) newFileSinkFromPath(path string) (Sink, error) { + switch path { case "stdout": return nopCloserSink{os.Stdout}, nil case "stderr": return nopCloserSink{os.Stderr}, nil } - return os.OpenFile(u.Path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644) + return sr.openFile(path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0o666) } func normalizeScheme(s string) (string, error) { diff --git a/vendor/go.uber.org/zap/stacktrace.go b/vendor/go.uber.org/zap/stacktrace.go deleted file mode 100644 index 100fac2..0000000 --- a/vendor/go.uber.org/zap/stacktrace.go +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zap - -import ( - "runtime" - "strings" - "sync" - - "go.uber.org/zap/internal/bufferpool" -) - -const _zapPackage = "go.uber.org/zap" - -var ( - _stacktracePool = sync.Pool{ - New: func() interface{} { - return newProgramCounters(64) - }, - } - - // We add "." and "/" suffixes to the package name to ensure we only match - // the exact package and not any package with the same prefix. - _zapStacktracePrefixes = addPrefix(_zapPackage, ".", "/") - _zapStacktraceVendorContains = addPrefix("/vendor/", _zapStacktracePrefixes...) -) - -func takeStacktrace() string { - buffer := bufferpool.Get() - defer buffer.Free() - programCounters := _stacktracePool.Get().(*programCounters) - defer _stacktracePool.Put(programCounters) - - var numFrames int - for { - // Skip the call to runtime.Counters and takeStacktrace so that the - // program counters start at the caller of takeStacktrace. - numFrames = runtime.Callers(2, programCounters.pcs) - if numFrames < len(programCounters.pcs) { - break - } - // Don't put the too-short counter slice back into the pool; this lets - // the pool adjust if we consistently take deep stacktraces. - programCounters = newProgramCounters(len(programCounters.pcs) * 2) - } - - i := 0 - skipZapFrames := true // skip all consecutive zap frames at the beginning. - frames := runtime.CallersFrames(programCounters.pcs[:numFrames]) - - // Note: On the last iteration, frames.Next() returns false, with a valid - // frame, but we ignore this frame. The last frame is a a runtime frame which - // adds noise, since it's only either runtime.main or runtime.goexit. - for frame, more := frames.Next(); more; frame, more = frames.Next() { - if skipZapFrames && isZapFrame(frame.Function) { - continue - } else { - skipZapFrames = false - } - - if i != 0 { - buffer.AppendByte('\n') - } - i++ - buffer.AppendString(frame.Function) - buffer.AppendByte('\n') - buffer.AppendByte('\t') - buffer.AppendString(frame.File) - buffer.AppendByte(':') - buffer.AppendInt(int64(frame.Line)) - } - - return buffer.String() -} - -func isZapFrame(function string) bool { - for _, prefix := range _zapStacktracePrefixes { - if strings.HasPrefix(function, prefix) { - return true - } - } - - // We can't use a prefix match here since the location of the vendor - // directory affects the prefix. Instead we do a contains match. - for _, contains := range _zapStacktraceVendorContains { - if strings.Contains(function, contains) { - return true - } - } - - return false -} - -type programCounters struct { - pcs []uintptr -} - -func newProgramCounters(size int) *programCounters { - return &programCounters{make([]uintptr, size)} -} - -func addPrefix(prefix string, ss ...string) []string { - withPrefix := make([]string, len(ss)) - for i, s := range ss { - withPrefix[i] = prefix + s - } - return withPrefix -} diff --git a/vendor/go.uber.org/zap/sugar.go b/vendor/go.uber.org/zap/sugar.go index 77ca227..8904cd0 100644 --- a/vendor/go.uber.org/zap/sugar.go +++ b/vendor/go.uber.org/zap/sugar.go @@ -31,6 +31,7 @@ import ( const ( _oddNumberErrMsg = "Ignored key without a value." _nonStringKeyErrMsg = "Ignored key-value pairs with non-string keys." + _multipleErrMsg = "Multiple errors without a key." ) // A SugaredLogger wraps the base Logger functionality in a slower, but less @@ -38,10 +39,19 @@ const ( // method. // // Unlike the Logger, the SugaredLogger doesn't insist on structured logging. -// For each log level, it exposes three methods: one for loosely-typed -// structured logging, one for println-style formatting, and one for -// printf-style formatting. For example, SugaredLoggers can produce InfoLevel -// output with Infow ("info with" structured context), Info, or Infof. +// For each log level, it exposes four methods: +// +// - methods named after the log level for log.Print-style logging +// - methods ending in "w" for loosely-typed structured logging +// - methods ending in "f" for log.Printf-style logging +// - methods ending in "ln" for log.Println-style logging +// +// For example, the methods for InfoLevel are: +// +// Info(...any) Print-style logging +// Infow(...any) Structured logging (read as "info with") +// Infof(string, ...any) Printf-style logging +// Infoln(...any) Println-style logging type SugaredLogger struct { base *Logger } @@ -61,27 +71,40 @@ func (s *SugaredLogger) Named(name string) *SugaredLogger { return &SugaredLogger{base: s.base.Named(name)} } +// WithOptions clones the current SugaredLogger, applies the supplied Options, +// and returns the result. It's safe to use concurrently. +func (s *SugaredLogger) WithOptions(opts ...Option) *SugaredLogger { + base := s.base.clone() + for _, opt := range opts { + opt.apply(base) + } + return &SugaredLogger{base: base} +} + // With adds a variadic number of fields to the logging context. It accepts a // mix of strongly-typed Field objects and loosely-typed key-value pairs. When // processing pairs, the first element of the pair is used as the field key // and the second as the field value. // // For example, -// sugaredLogger.With( -// "hello", "world", -// "failure", errors.New("oh no"), -// Stack(), -// "count", 42, -// "user", User{Name: "alice"}, -// ) +// +// sugaredLogger.With( +// "hello", "world", +// "failure", errors.New("oh no"), +// Stack(), +// "count", 42, +// "user", User{Name: "alice"}, +// ) +// // is the equivalent of -// unsugared.With( -// String("hello", "world"), -// String("failure", "oh no"), -// Stack(), -// Int("count", 42), -// Object("user", User{Name: "alice"}), -// ) +// +// unsugared.With( +// String("hello", "world"), +// String("failure", "oh no"), +// Stack(), +// Int("count", 42), +// Object("user", User{Name: "alice"}), +// ) // // Note that the keys in key-value pairs should be strings. In development, // passing a non-string key panics. In production, the logger is more @@ -92,83 +115,138 @@ func (s *SugaredLogger) With(args ...interface{}) *SugaredLogger { return &SugaredLogger{base: s.base.With(s.sweetenFields(args)...)} } -// Debug uses fmt.Sprint to construct and log a message. +// WithLazy adds a variadic number of fields to the logging context lazily. +// The fields are evaluated only if the logger is further chained with [With] +// or is written to with any of the log level methods. +// Until that occurs, the logger may retain references to objects inside the fields, +// and logging will reflect the state of an object at the time of logging, +// not the time of WithLazy(). +// +// Similar to [With], fields added to the child don't affect the parent, +// and vice versa. Also, the keys in key-value pairs should be strings. In development, +// passing a non-string key panics, while in production it logs an error and skips the pair. +// Passing an orphaned key has the same behavior. +func (s *SugaredLogger) WithLazy(args ...interface{}) *SugaredLogger { + return &SugaredLogger{base: s.base.WithLazy(s.sweetenFields(args)...)} +} + +// Level reports the minimum enabled level for this logger. +// +// For NopLoggers, this is [zapcore.InvalidLevel]. +func (s *SugaredLogger) Level() zapcore.Level { + return zapcore.LevelOf(s.base.core) +} + +// Log logs the provided arguments at provided level. +// Spaces are added between arguments when neither is a string. +func (s *SugaredLogger) Log(lvl zapcore.Level, args ...interface{}) { + s.log(lvl, "", args, nil) +} + +// Debug logs the provided arguments at [DebugLevel]. +// Spaces are added between arguments when neither is a string. func (s *SugaredLogger) Debug(args ...interface{}) { s.log(DebugLevel, "", args, nil) } -// Info uses fmt.Sprint to construct and log a message. +// Info logs the provided arguments at [InfoLevel]. +// Spaces are added between arguments when neither is a string. func (s *SugaredLogger) Info(args ...interface{}) { s.log(InfoLevel, "", args, nil) } -// Warn uses fmt.Sprint to construct and log a message. +// Warn logs the provided arguments at [WarnLevel]. +// Spaces are added between arguments when neither is a string. func (s *SugaredLogger) Warn(args ...interface{}) { s.log(WarnLevel, "", args, nil) } -// Error uses fmt.Sprint to construct and log a message. +// Error logs the provided arguments at [ErrorLevel]. +// Spaces are added between arguments when neither is a string. func (s *SugaredLogger) Error(args ...interface{}) { s.log(ErrorLevel, "", args, nil) } -// DPanic uses fmt.Sprint to construct and log a message. In development, the -// logger then panics. (See DPanicLevel for details.) +// DPanic logs the provided arguments at [DPanicLevel]. +// In development, the logger then panics. (See [DPanicLevel] for details.) +// Spaces are added between arguments when neither is a string. func (s *SugaredLogger) DPanic(args ...interface{}) { s.log(DPanicLevel, "", args, nil) } -// Panic uses fmt.Sprint to construct and log a message, then panics. +// Panic constructs a message with the provided arguments and panics. +// Spaces are added between arguments when neither is a string. func (s *SugaredLogger) Panic(args ...interface{}) { s.log(PanicLevel, "", args, nil) } -// Fatal uses fmt.Sprint to construct and log a message, then calls os.Exit. +// Fatal constructs a message with the provided arguments and calls os.Exit. +// Spaces are added between arguments when neither is a string. func (s *SugaredLogger) Fatal(args ...interface{}) { s.log(FatalLevel, "", args, nil) } -// Debugf uses fmt.Sprintf to log a templated message. +// Logf formats the message according to the format specifier +// and logs it at provided level. +func (s *SugaredLogger) Logf(lvl zapcore.Level, template string, args ...interface{}) { + s.log(lvl, template, args, nil) +} + +// Debugf formats the message according to the format specifier +// and logs it at [DebugLevel]. func (s *SugaredLogger) Debugf(template string, args ...interface{}) { s.log(DebugLevel, template, args, nil) } -// Infof uses fmt.Sprintf to log a templated message. +// Infof formats the message according to the format specifier +// and logs it at [InfoLevel]. func (s *SugaredLogger) Infof(template string, args ...interface{}) { s.log(InfoLevel, template, args, nil) } -// Warnf uses fmt.Sprintf to log a templated message. +// Warnf formats the message according to the format specifier +// and logs it at [WarnLevel]. func (s *SugaredLogger) Warnf(template string, args ...interface{}) { s.log(WarnLevel, template, args, nil) } -// Errorf uses fmt.Sprintf to log a templated message. +// Errorf formats the message according to the format specifier +// and logs it at [ErrorLevel]. func (s *SugaredLogger) Errorf(template string, args ...interface{}) { s.log(ErrorLevel, template, args, nil) } -// DPanicf uses fmt.Sprintf to log a templated message. In development, the -// logger then panics. (See DPanicLevel for details.) +// DPanicf formats the message according to the format specifier +// and logs it at [DPanicLevel]. +// In development, the logger then panics. (See [DPanicLevel] for details.) func (s *SugaredLogger) DPanicf(template string, args ...interface{}) { s.log(DPanicLevel, template, args, nil) } -// Panicf uses fmt.Sprintf to log a templated message, then panics. +// Panicf formats the message according to the format specifier +// and panics. func (s *SugaredLogger) Panicf(template string, args ...interface{}) { s.log(PanicLevel, template, args, nil) } -// Fatalf uses fmt.Sprintf to log a templated message, then calls os.Exit. +// Fatalf formats the message according to the format specifier +// and calls os.Exit. func (s *SugaredLogger) Fatalf(template string, args ...interface{}) { s.log(FatalLevel, template, args, nil) } +// Logw logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) Logw(lvl zapcore.Level, msg string, keysAndValues ...interface{}) { + s.log(lvl, msg, nil, keysAndValues) +} + // Debugw logs a message with some additional context. The variadic key-value // pairs are treated as they are in With. // // When debug-level logging is disabled, this is much faster than -// s.With(keysAndValues).Debug(msg) +// +// s.With(keysAndValues).Debug(msg) func (s *SugaredLogger) Debugw(msg string, keysAndValues ...interface{}) { s.log(DebugLevel, msg, nil, keysAndValues) } @@ -210,11 +288,61 @@ func (s *SugaredLogger) Fatalw(msg string, keysAndValues ...interface{}) { s.log(FatalLevel, msg, nil, keysAndValues) } +// Logln logs a message at provided level. +// Spaces are always added between arguments. +func (s *SugaredLogger) Logln(lvl zapcore.Level, args ...interface{}) { + s.logln(lvl, args, nil) +} + +// Debugln logs a message at [DebugLevel]. +// Spaces are always added between arguments. +func (s *SugaredLogger) Debugln(args ...interface{}) { + s.logln(DebugLevel, args, nil) +} + +// Infoln logs a message at [InfoLevel]. +// Spaces are always added between arguments. +func (s *SugaredLogger) Infoln(args ...interface{}) { + s.logln(InfoLevel, args, nil) +} + +// Warnln logs a message at [WarnLevel]. +// Spaces are always added between arguments. +func (s *SugaredLogger) Warnln(args ...interface{}) { + s.logln(WarnLevel, args, nil) +} + +// Errorln logs a message at [ErrorLevel]. +// Spaces are always added between arguments. +func (s *SugaredLogger) Errorln(args ...interface{}) { + s.logln(ErrorLevel, args, nil) +} + +// DPanicln logs a message at [DPanicLevel]. +// In development, the logger then panics. (See [DPanicLevel] for details.) +// Spaces are always added between arguments. +func (s *SugaredLogger) DPanicln(args ...interface{}) { + s.logln(DPanicLevel, args, nil) +} + +// Panicln logs a message at [PanicLevel] and panics. +// Spaces are always added between arguments. +func (s *SugaredLogger) Panicln(args ...interface{}) { + s.logln(PanicLevel, args, nil) +} + +// Fatalln logs a message at [FatalLevel] and calls os.Exit. +// Spaces are always added between arguments. +func (s *SugaredLogger) Fatalln(args ...interface{}) { + s.logln(FatalLevel, args, nil) +} + // Sync flushes any buffered log entries. func (s *SugaredLogger) Sync() error { return s.base.Sync() } +// log message with Sprint, Sprintf, or neither. func (s *SugaredLogger) log(lvl zapcore.Level, template string, fmtArgs []interface{}, context []interface{}) { // If logging at this level is completely disabled, skip the overhead of // string formatting. @@ -222,28 +350,60 @@ func (s *SugaredLogger) log(lvl zapcore.Level, template string, fmtArgs []interf return } - // Format with Sprint, Sprintf, or neither. - msg := template - if msg == "" && len(fmtArgs) > 0 { - msg = fmt.Sprint(fmtArgs...) - } else if msg != "" && len(fmtArgs) > 0 { - msg = fmt.Sprintf(template, fmtArgs...) + msg := getMessage(template, fmtArgs) + if ce := s.base.Check(lvl, msg); ce != nil { + ce.Write(s.sweetenFields(context)...) + } +} + +// logln message with Sprintln +func (s *SugaredLogger) logln(lvl zapcore.Level, fmtArgs []interface{}, context []interface{}) { + if lvl < DPanicLevel && !s.base.Core().Enabled(lvl) { + return } + msg := getMessageln(fmtArgs) if ce := s.base.Check(lvl, msg); ce != nil { ce.Write(s.sweetenFields(context)...) } } +// getMessage format with Sprint, Sprintf, or neither. +func getMessage(template string, fmtArgs []interface{}) string { + if len(fmtArgs) == 0 { + return template + } + + if template != "" { + return fmt.Sprintf(template, fmtArgs...) + } + + if len(fmtArgs) == 1 { + if str, ok := fmtArgs[0].(string); ok { + return str + } + } + return fmt.Sprint(fmtArgs...) +} + +// getMessageln format with Sprintln. +func getMessageln(fmtArgs []interface{}) string { + msg := fmt.Sprintln(fmtArgs...) + return msg[:len(msg)-1] +} + func (s *SugaredLogger) sweetenFields(args []interface{}) []Field { if len(args) == 0 { return nil } - // Allocate enough space for the worst case; if users pass only structured - // fields, we shouldn't penalize them with extra allocations. - fields := make([]Field, 0, len(args)) - var invalid invalidPairs + var ( + // Allocate enough space for the worst case; if users pass only structured + // fields, we shouldn't penalize them with extra allocations. + fields = make([]Field, 0, len(args)) + invalid invalidPairs + seenError bool + ) for i := 0; i < len(args); { // This is a strongly-typed field. Consume it and move on. @@ -253,9 +413,21 @@ func (s *SugaredLogger) sweetenFields(args []interface{}) []Field { continue } + // If it is an error, consume it and move on. + if err, ok := args[i].(error); ok { + if !seenError { + seenError = true + fields = append(fields, Error(err)) + } else { + s.base.Error(_multipleErrMsg, Error(err)) + } + i++ + continue + } + // Make sure this element isn't a dangling key. if i == len(args)-1 { - s.base.DPanic(_oddNumberErrMsg, Any("ignored", args[i])) + s.base.Error(_oddNumberErrMsg, Any("ignored", args[i])) break } @@ -276,7 +448,7 @@ func (s *SugaredLogger) sweetenFields(args []interface{}) []Field { // If we encountered any invalid key-value pairs, log an error. if len(invalid) > 0 { - s.base.DPanic(_nonStringKeyErrMsg, Array("invalid", invalid)) + s.base.Error(_nonStringKeyErrMsg, Array("invalid", invalid)) } return fields } diff --git a/vendor/go.uber.org/zap/writer.go b/vendor/go.uber.org/zap/writer.go index 86a709a..06768c6 100644 --- a/vendor/go.uber.org/zap/writer.go +++ b/vendor/go.uber.org/zap/writer.go @@ -1,4 +1,4 @@ -// Copyright (c) 2016 Uber Technologies, Inc. +// Copyright (c) 2016-2022 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -23,7 +23,6 @@ package zap import ( "fmt" "io" - "io/ioutil" "go.uber.org/zap/zapcore" @@ -49,40 +48,40 @@ import ( // os.Stdout and os.Stderr. When specified without a scheme, relative file // paths also work. func Open(paths ...string) (zapcore.WriteSyncer, func(), error) { - writers, close, err := open(paths) + writers, closeAll, err := open(paths) if err != nil { return nil, nil, err } writer := CombineWriteSyncers(writers...) - return writer, close, nil + return writer, closeAll, nil } func open(paths []string) ([]zapcore.WriteSyncer, func(), error) { writers := make([]zapcore.WriteSyncer, 0, len(paths)) closers := make([]io.Closer, 0, len(paths)) - close := func() { + closeAll := func() { for _, c := range closers { - c.Close() + _ = c.Close() } } var openErr error for _, path := range paths { - sink, err := newSink(path) + sink, err := _sinkRegistry.newSink(path) if err != nil { - openErr = multierr.Append(openErr, fmt.Errorf("couldn't open sink %q: %v", path, err)) + openErr = multierr.Append(openErr, fmt.Errorf("open sink %q: %w", path, err)) continue } writers = append(writers, sink) closers = append(closers, sink) } if openErr != nil { - close() - return writers, nil, openErr + closeAll() + return nil, nil, openErr } - return writers, close, nil + return writers, closeAll, nil } // CombineWriteSyncers is a utility that combines multiple WriteSyncers into a @@ -93,7 +92,7 @@ func open(paths []string) ([]zapcore.WriteSyncer, func(), error) { // using zapcore.NewMultiWriteSyncer and zapcore.Lock individually. func CombineWriteSyncers(writers ...zapcore.WriteSyncer) zapcore.WriteSyncer { if len(writers) == 0 { - return zapcore.AddSync(ioutil.Discard) + return zapcore.AddSync(io.Discard) } return zapcore.Lock(zapcore.NewMultiWriteSyncer(writers...)) } diff --git a/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go b/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go new file mode 100644 index 0000000..a40e93b --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go @@ -0,0 +1,219 @@ +// Copyright (c) 2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "bufio" + "sync" + "time" + + "go.uber.org/multierr" +) + +const ( + // _defaultBufferSize specifies the default size used by Buffer. + _defaultBufferSize = 256 * 1024 // 256 kB + + // _defaultFlushInterval specifies the default flush interval for + // Buffer. + _defaultFlushInterval = 30 * time.Second +) + +// A BufferedWriteSyncer is a WriteSyncer that buffers writes in-memory before +// flushing them to a wrapped WriteSyncer after reaching some limit, or at some +// fixed interval--whichever comes first. +// +// BufferedWriteSyncer is safe for concurrent use. You don't need to use +// zapcore.Lock for WriteSyncers with BufferedWriteSyncer. +// +// To set up a BufferedWriteSyncer, construct a WriteSyncer for your log +// destination (*os.File is a valid WriteSyncer), wrap it with +// BufferedWriteSyncer, and defer a Stop() call for when you no longer need the +// object. +// +// func main() { +// ws := ... // your log destination +// bws := &zapcore.BufferedWriteSyncer{WS: ws} +// defer bws.Stop() +// +// // ... +// core := zapcore.NewCore(enc, bws, lvl) +// logger := zap.New(core) +// +// // ... +// } +// +// By default, a BufferedWriteSyncer will buffer up to 256 kilobytes of logs, +// waiting at most 30 seconds between flushes. +// You can customize these parameters by setting the Size or FlushInterval +// fields. +// For example, the following buffers up to 512 kB of logs before flushing them +// to Stderr, with a maximum of one minute between each flush. +// +// ws := &BufferedWriteSyncer{ +// WS: os.Stderr, +// Size: 512 * 1024, // 512 kB +// FlushInterval: time.Minute, +// } +// defer ws.Stop() +type BufferedWriteSyncer struct { + // WS is the WriteSyncer around which BufferedWriteSyncer will buffer + // writes. + // + // This field is required. + WS WriteSyncer + + // Size specifies the maximum amount of data the writer will buffered + // before flushing. + // + // Defaults to 256 kB if unspecified. + Size int + + // FlushInterval specifies how often the writer should flush data if + // there have been no writes. + // + // Defaults to 30 seconds if unspecified. + FlushInterval time.Duration + + // Clock, if specified, provides control of the source of time for the + // writer. + // + // Defaults to the system clock. + Clock Clock + + // unexported fields for state + mu sync.Mutex + initialized bool // whether initialize() has run + stopped bool // whether Stop() has run + writer *bufio.Writer + ticker *time.Ticker + stop chan struct{} // closed when flushLoop should stop + done chan struct{} // closed when flushLoop has stopped +} + +func (s *BufferedWriteSyncer) initialize() { + size := s.Size + if size == 0 { + size = _defaultBufferSize + } + + flushInterval := s.FlushInterval + if flushInterval == 0 { + flushInterval = _defaultFlushInterval + } + + if s.Clock == nil { + s.Clock = DefaultClock + } + + s.ticker = s.Clock.NewTicker(flushInterval) + s.writer = bufio.NewWriterSize(s.WS, size) + s.stop = make(chan struct{}) + s.done = make(chan struct{}) + s.initialized = true + go s.flushLoop() +} + +// Write writes log data into buffer syncer directly, multiple Write calls will be batched, +// and log data will be flushed to disk when the buffer is full or periodically. +func (s *BufferedWriteSyncer) Write(bs []byte) (int, error) { + s.mu.Lock() + defer s.mu.Unlock() + + if !s.initialized { + s.initialize() + } + + // To avoid partial writes from being flushed, we manually flush the existing buffer if: + // * The current write doesn't fit into the buffer fully, and + // * The buffer is not empty (since bufio will not split large writes when the buffer is empty) + if len(bs) > s.writer.Available() && s.writer.Buffered() > 0 { + if err := s.writer.Flush(); err != nil { + return 0, err + } + } + + return s.writer.Write(bs) +} + +// Sync flushes buffered log data into disk directly. +func (s *BufferedWriteSyncer) Sync() error { + s.mu.Lock() + defer s.mu.Unlock() + + var err error + if s.initialized { + err = s.writer.Flush() + } + + return multierr.Append(err, s.WS.Sync()) +} + +// flushLoop flushes the buffer at the configured interval until Stop is +// called. +func (s *BufferedWriteSyncer) flushLoop() { + defer close(s.done) + + for { + select { + case <-s.ticker.C: + // we just simply ignore error here + // because the underlying bufio writer stores any errors + // and we return any error from Sync() as part of the close + _ = s.Sync() + case <-s.stop: + return + } + } +} + +// Stop closes the buffer, cleans up background goroutines, and flushes +// remaining unwritten data. +func (s *BufferedWriteSyncer) Stop() (err error) { + var stopped bool + + // Critical section. + func() { + s.mu.Lock() + defer s.mu.Unlock() + + if !s.initialized { + return + } + + stopped = s.stopped + if stopped { + return + } + s.stopped = true + + s.ticker.Stop() + close(s.stop) // tell flushLoop to stop + <-s.done // and wait until it has + }() + + // Don't call Sync on consecutive Stops. + if !stopped { + err = s.Sync() + } + + return err +} diff --git a/vendor/go.uber.org/zap/zapcore/clock.go b/vendor/go.uber.org/zap/zapcore/clock.go new file mode 100644 index 0000000..422fd82 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/clock.go @@ -0,0 +1,48 @@ +// Copyright (c) 2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "time" + +// DefaultClock is the default clock used by Zap in operations that require +// time. This clock uses the system clock for all operations. +var DefaultClock = systemClock{} + +// Clock is a source of time for logged entries. +type Clock interface { + // Now returns the current local time. + Now() time.Time + + // NewTicker returns *time.Ticker that holds a channel + // that delivers "ticks" of a clock. + NewTicker(time.Duration) *time.Ticker +} + +// systemClock implements default Clock that uses system time. +type systemClock struct{} + +func (systemClock) Now() time.Time { + return time.Now() +} + +func (systemClock) NewTicker(duration time.Duration) *time.Ticker { + return time.NewTicker(duration) +} diff --git a/vendor/go.uber.org/zap/zapcore/console_encoder.go b/vendor/go.uber.org/zap/zapcore/console_encoder.go index b787596..cc2b4e0 100644 --- a/vendor/go.uber.org/zap/zapcore/console_encoder.go +++ b/vendor/go.uber.org/zap/zapcore/console_encoder.go @@ -22,20 +22,20 @@ package zapcore import ( "fmt" - "sync" "go.uber.org/zap/buffer" "go.uber.org/zap/internal/bufferpool" + "go.uber.org/zap/internal/pool" ) -var _sliceEncoderPool = sync.Pool{ - New: func() interface{} { - return &sliceArrayEncoder{elems: make([]interface{}, 0, 2)} - }, -} +var _sliceEncoderPool = pool.New(func() *sliceArrayEncoder { + return &sliceArrayEncoder{ + elems: make([]interface{}, 0, 2), + } +}) func getSliceEncoder() *sliceArrayEncoder { - return _sliceEncoderPool.Get().(*sliceArrayEncoder) + return _sliceEncoderPool.Get() } func putSliceEncoder(e *sliceArrayEncoder) { @@ -56,6 +56,10 @@ type consoleEncoder struct { // encoder configuration, it will omit any element whose key is set to the empty // string. func NewConsoleEncoder(cfg EncoderConfig) Encoder { + if cfg.ConsoleSeparator == "" { + // Use a default delimiter of '\t' for backwards compatibility + cfg.ConsoleSeparator = "\t" + } return consoleEncoder{newJSONEncoder(cfg, true)} } @@ -73,7 +77,7 @@ func (c consoleEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, // If this ever becomes a performance bottleneck, we can implement // ArrayEncoder for our plain-text format. arr := getSliceEncoder() - if c.TimeKey != "" && c.EncodeTime != nil { + if c.TimeKey != "" && c.EncodeTime != nil && !ent.Time.IsZero() { c.EncodeTime(ent.Time, arr) } if c.LevelKey != "" && c.EncodeLevel != nil { @@ -89,12 +93,17 @@ func (c consoleEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, nameEncoder(ent.LoggerName, arr) } - if ent.Caller.Defined && c.CallerKey != "" && c.EncodeCaller != nil { - c.EncodeCaller(ent.Caller, arr) + if ent.Caller.Defined { + if c.CallerKey != "" && c.EncodeCaller != nil { + c.EncodeCaller(ent.Caller, arr) + } + if c.FunctionKey != "" { + arr.AppendString(ent.Caller.Function) + } } for i := range arr.elems { if i > 0 { - line.AppendByte('\t') + line.AppendString(c.ConsoleSeparator) } fmt.Fprint(line, arr.elems[i]) } @@ -102,7 +111,7 @@ func (c consoleEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, // Add the message itself. if c.MessageKey != "" { - c.addTabIfNecessary(line) + c.addSeparatorIfNecessary(line) line.AppendString(ent.Message) } @@ -116,17 +125,18 @@ func (c consoleEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, line.AppendString(ent.Stack) } - if c.LineEnding != "" { - line.AppendString(c.LineEnding) - } else { - line.AppendString(DefaultLineEnding) - } + line.AppendString(c.LineEnding) return line, nil } func (c consoleEncoder) writeContext(line *buffer.Buffer, extra []Field) { context := c.jsonEncoder.Clone().(*jsonEncoder) - defer context.buf.Free() + defer func() { + // putJSONEncoder assumes the buffer is still used, but we write out the buffer so + // we can free it. + context.buf.Free() + putJSONEncoder(context) + }() addFields(context, extra) context.closeOpenNamespaces() @@ -134,14 +144,14 @@ func (c consoleEncoder) writeContext(line *buffer.Buffer, extra []Field) { return } - c.addTabIfNecessary(line) + c.addSeparatorIfNecessary(line) line.AppendByte('{') line.Write(context.buf.Bytes()) line.AppendByte('}') } -func (c consoleEncoder) addTabIfNecessary(line *buffer.Buffer) { +func (c consoleEncoder) addSeparatorIfNecessary(line *buffer.Buffer) { if line.Len() > 0 { - line.AppendByte('\t') + line.AppendString(c.ConsoleSeparator) } } diff --git a/vendor/go.uber.org/zap/zapcore/core.go b/vendor/go.uber.org/zap/zapcore/core.go index a1ef8b0..776e93f 100644 --- a/vendor/go.uber.org/zap/zapcore/core.go +++ b/vendor/go.uber.org/zap/zapcore/core.go @@ -69,6 +69,15 @@ type ioCore struct { out WriteSyncer } +var ( + _ Core = (*ioCore)(nil) + _ leveledEnabler = (*ioCore)(nil) +) + +func (c *ioCore) Level() Level { + return LevelOf(c.LevelEnabler) +} + func (c *ioCore) With(fields []Field) Core { clone := c.clone() addFields(clone.enc, fields) @@ -93,9 +102,9 @@ func (c *ioCore) Write(ent Entry, fields []Field) error { return err } if ent.Level > ErrorLevel { - // Since we may be crashing the program, sync the output. Ignore Sync - // errors, pending a clean solution to issue #370. - c.Sync() + // Since we may be crashing the program, sync the output. + // Ignore Sync errors, pending a clean solution to issue #370. + _ = c.Sync() } return nil } diff --git a/vendor/go.uber.org/zap/zapcore/encoder.go b/vendor/go.uber.org/zap/zapcore/encoder.go index f050952..0446254 100644 --- a/vendor/go.uber.org/zap/zapcore/encoder.go +++ b/vendor/go.uber.org/zap/zapcore/encoder.go @@ -21,6 +21,8 @@ package zapcore import ( + "encoding/json" + "io" "time" "go.uber.org/zap/buffer" @@ -31,7 +33,13 @@ import ( // behavior. const DefaultLineEnding = "\n" +// OmitKey defines the key to use when callers want to remove a key from log output. +const OmitKey = "" + // A LevelEncoder serializes a Level to a primitive type. +// +// This function must make exactly one call +// to a PrimitiveArrayEncoder's Append* method. type LevelEncoder func(Level, PrimitiveArrayEncoder) // LowercaseLevelEncoder serializes a Level to a lowercase string. For example, @@ -85,6 +93,9 @@ func (e *LevelEncoder) UnmarshalText(text []byte) error { } // A TimeEncoder serializes a time.Time to a primitive type. +// +// This function must make exactly one call +// to a PrimitiveArrayEncoder's Append* method. type TimeEncoder func(time.Time, PrimitiveArrayEncoder) // EpochTimeEncoder serializes a time.Time to a floating-point number of seconds @@ -109,17 +120,66 @@ func EpochNanosTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { enc.AppendInt64(t.UnixNano()) } +func encodeTimeLayout(t time.Time, layout string, enc PrimitiveArrayEncoder) { + type appendTimeEncoder interface { + AppendTimeLayout(time.Time, string) + } + + if enc, ok := enc.(appendTimeEncoder); ok { + enc.AppendTimeLayout(t, layout) + return + } + + enc.AppendString(t.Format(layout)) +} + // ISO8601TimeEncoder serializes a time.Time to an ISO8601-formatted string // with millisecond precision. +// +// If enc supports AppendTimeLayout(t time.Time,layout string), it's used +// instead of appending a pre-formatted string value. func ISO8601TimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { - enc.AppendString(t.Format("2006-01-02T15:04:05.000Z0700")) + encodeTimeLayout(t, "2006-01-02T15:04:05.000Z0700", enc) +} + +// RFC3339TimeEncoder serializes a time.Time to an RFC3339-formatted string. +// +// If enc supports AppendTimeLayout(t time.Time,layout string), it's used +// instead of appending a pre-formatted string value. +func RFC3339TimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + encodeTimeLayout(t, time.RFC3339, enc) +} + +// RFC3339NanoTimeEncoder serializes a time.Time to an RFC3339-formatted string +// with nanosecond precision. +// +// If enc supports AppendTimeLayout(t time.Time,layout string), it's used +// instead of appending a pre-formatted string value. +func RFC3339NanoTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + encodeTimeLayout(t, time.RFC3339Nano, enc) +} + +// TimeEncoderOfLayout returns TimeEncoder which serializes a time.Time using +// given layout. +func TimeEncoderOfLayout(layout string) TimeEncoder { + return func(t time.Time, enc PrimitiveArrayEncoder) { + encodeTimeLayout(t, layout, enc) + } } -// UnmarshalText unmarshals text to a TimeEncoder. "iso8601" and "ISO8601" are -// unmarshaled to ISO8601TimeEncoder, "millis" is unmarshaled to -// EpochMillisTimeEncoder, and anything else is unmarshaled to EpochTimeEncoder. +// UnmarshalText unmarshals text to a TimeEncoder. +// "rfc3339nano" and "RFC3339Nano" are unmarshaled to RFC3339NanoTimeEncoder. +// "rfc3339" and "RFC3339" are unmarshaled to RFC3339TimeEncoder. +// "iso8601" and "ISO8601" are unmarshaled to ISO8601TimeEncoder. +// "millis" is unmarshaled to EpochMillisTimeEncoder. +// "nanos" is unmarshaled to EpochNanosEncoder. +// Anything else is unmarshaled to EpochTimeEncoder. func (e *TimeEncoder) UnmarshalText(text []byte) error { switch string(text) { + case "rfc3339nano", "RFC3339Nano": + *e = RFC3339NanoTimeEncoder + case "rfc3339", "RFC3339": + *e = RFC3339TimeEncoder case "iso8601", "ISO8601": *e = ISO8601TimeEncoder case "millis": @@ -132,7 +192,42 @@ func (e *TimeEncoder) UnmarshalText(text []byte) error { return nil } +// UnmarshalYAML unmarshals YAML to a TimeEncoder. +// If value is an object with a "layout" field, it will be unmarshaled to TimeEncoder with given layout. +// +// timeEncoder: +// layout: 06/01/02 03:04pm +// +// If value is string, it uses UnmarshalText. +// +// timeEncoder: iso8601 +func (e *TimeEncoder) UnmarshalYAML(unmarshal func(interface{}) error) error { + var o struct { + Layout string `json:"layout" yaml:"layout"` + } + if err := unmarshal(&o); err == nil { + *e = TimeEncoderOfLayout(o.Layout) + return nil + } + + var s string + if err := unmarshal(&s); err != nil { + return err + } + return e.UnmarshalText([]byte(s)) +} + +// UnmarshalJSON unmarshals JSON to a TimeEncoder as same way UnmarshalYAML does. +func (e *TimeEncoder) UnmarshalJSON(data []byte) error { + return e.UnmarshalYAML(func(v interface{}) error { + return json.Unmarshal(data, v) + }) +} + // A DurationEncoder serializes a time.Duration to a primitive type. +// +// This function must make exactly one call +// to a PrimitiveArrayEncoder's Append* method. type DurationEncoder func(time.Duration, PrimitiveArrayEncoder) // SecondsDurationEncoder serializes a time.Duration to a floating-point number of seconds elapsed. @@ -146,6 +241,12 @@ func NanosDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) { enc.AppendInt64(int64(d)) } +// MillisDurationEncoder serializes a time.Duration to an integer number of +// milliseconds elapsed. +func MillisDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) { + enc.AppendInt64(d.Nanoseconds() / 1e6) +} + // StringDurationEncoder serializes a time.Duration using its built-in String // method. func StringDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) { @@ -161,6 +262,8 @@ func (e *DurationEncoder) UnmarshalText(text []byte) error { *e = StringDurationEncoder case "nanos": *e = NanosDurationEncoder + case "ms": + *e = MillisDurationEncoder default: *e = SecondsDurationEncoder } @@ -168,6 +271,9 @@ func (e *DurationEncoder) UnmarshalText(text []byte) error { } // A CallerEncoder serializes an EntryCaller to a primitive type. +// +// This function must make exactly one call +// to a PrimitiveArrayEncoder's Append* method. type CallerEncoder func(EntryCaller, PrimitiveArrayEncoder) // FullCallerEncoder serializes a caller in /full/path/to/package/file:line @@ -198,6 +304,9 @@ func (e *CallerEncoder) UnmarshalText(text []byte) error { // A NameEncoder serializes a period-separated logger name to a primitive // type. +// +// This function must make exactly one call +// to a PrimitiveArrayEncoder's Append* method. type NameEncoder func(string, PrimitiveArrayEncoder) // FullNameEncoder serializes the logger name as-is. @@ -222,13 +331,15 @@ func (e *NameEncoder) UnmarshalText(text []byte) error { type EncoderConfig struct { // Set the keys used for each log entry. If any key is empty, that portion // of the entry is omitted. - MessageKey string `json:"messageKey" yaml:"messageKey"` - LevelKey string `json:"levelKey" yaml:"levelKey"` - TimeKey string `json:"timeKey" yaml:"timeKey"` - NameKey string `json:"nameKey" yaml:"nameKey"` - CallerKey string `json:"callerKey" yaml:"callerKey"` - StacktraceKey string `json:"stacktraceKey" yaml:"stacktraceKey"` - LineEnding string `json:"lineEnding" yaml:"lineEnding"` + MessageKey string `json:"messageKey" yaml:"messageKey"` + LevelKey string `json:"levelKey" yaml:"levelKey"` + TimeKey string `json:"timeKey" yaml:"timeKey"` + NameKey string `json:"nameKey" yaml:"nameKey"` + CallerKey string `json:"callerKey" yaml:"callerKey"` + FunctionKey string `json:"functionKey" yaml:"functionKey"` + StacktraceKey string `json:"stacktraceKey" yaml:"stacktraceKey"` + SkipLineEnding bool `json:"skipLineEnding" yaml:"skipLineEnding"` + LineEnding string `json:"lineEnding" yaml:"lineEnding"` // Configure the primitive representations of common complex types. For // example, some users may want all time.Times serialized as floating-point // seconds since epoch, while others may prefer ISO8601 strings. @@ -239,6 +350,12 @@ type EncoderConfig struct { // Unlike the other primitive type encoders, EncodeName is optional. The // zero value falls back to FullNameEncoder. EncodeName NameEncoder `json:"nameEncoder" yaml:"nameEncoder"` + // Configure the encoder for interface{} type objects. + // If not provided, objects are encoded using json.Encoder + NewReflectedEncoder func(io.Writer) ReflectedEncoder `json:"-" yaml:"-"` + // Configures the field separator used by the console encoder. Defaults + // to tab. + ConsoleSeparator string `json:"consoleSeparator" yaml:"consoleSeparator"` } // ObjectEncoder is a strongly-typed, encoding-agnostic interface for adding a @@ -272,8 +389,8 @@ type ObjectEncoder interface { AddUint8(key string, value uint8) AddUintptr(key string, value uintptr) - // AddReflected uses reflection to serialize arbitrary objects, so it's slow - // and allocation-heavy. + // AddReflected uses reflection to serialize arbitrary objects, so it can be + // slow and allocation-heavy. AddReflected(key string, value interface{}) error // OpenNamespace opens an isolated namespace where all subsequent fields will // be added. Applications can use namespaces to prevent key collisions when @@ -343,6 +460,7 @@ type Encoder interface { Clone() Encoder // EncodeEntry encodes an entry and fields, along with any accumulated - // context, into a byte buffer and returns it. + // context, into a byte buffer and returns it. Any fields that are empty, + // including fields on the `Entry` type, should be omitted. EncodeEntry(Entry, []Field) (*buffer.Buffer, error) } diff --git a/vendor/go.uber.org/zap/zapcore/entry.go b/vendor/go.uber.org/zap/zapcore/entry.go index 7d9893f..459a5d7 100644 --- a/vendor/go.uber.org/zap/zapcore/entry.go +++ b/vendor/go.uber.org/zap/zapcore/entry.go @@ -22,27 +22,25 @@ package zapcore import ( "fmt" + "runtime" "strings" - "sync" "time" + "go.uber.org/multierr" "go.uber.org/zap/internal/bufferpool" "go.uber.org/zap/internal/exit" - - "go.uber.org/multierr" + "go.uber.org/zap/internal/pool" ) -var ( - _cePool = sync.Pool{New: func() interface{} { - // Pre-allocate some space for cores. - return &CheckedEntry{ - cores: make([]Core, 4), - } - }} -) +var _cePool = pool.New(func() *CheckedEntry { + // Pre-allocate some space for cores. + return &CheckedEntry{ + cores: make([]Core, 4), + } +}) func getCheckedEntry() *CheckedEntry { - ce := _cePool.Get().(*CheckedEntry) + ce := _cePool.Get() ce.reset() return ce } @@ -70,10 +68,11 @@ func NewEntryCaller(pc uintptr, file string, line int, ok bool) EntryCaller { // EntryCaller represents the caller of a logging function. type EntryCaller struct { - Defined bool - PC uintptr - File string - Line int + Defined bool + PC uintptr + File string + Line int + Function string } // String returns the full path and line number of the caller. @@ -136,7 +135,8 @@ func (ec EntryCaller) TrimmedPath() string { // An Entry represents a complete log message. The entry's structured context // is already serialized, but the log level, time, message, and call site -// information are available for inspection and modification. +// information are available for inspection and modification. Any fields left +// empty will be omitted when encoding. // // Entries are pooled, so any functions that accept them MUST be careful not to // retain references to them. @@ -149,6 +149,27 @@ type Entry struct { Stack string } +// CheckWriteHook is a custom action that may be executed after an entry is +// written. +// +// Register one on a CheckedEntry with the After method. +// +// if ce := logger.Check(...); ce != nil { +// ce = ce.After(hook) +// ce.Write(...) +// } +// +// You can configure the hook for Fatal log statements at the logger level with +// the zap.WithFatalHook option. +type CheckWriteHook interface { + // OnWrite is invoked with the CheckedEntry that was written and a list + // of fields added with that entry. + // + // The list of fields DOES NOT include fields that were already added + // to the logger with the With method. + OnWrite(*CheckedEntry, []Field) +} + // CheckWriteAction indicates what action to take after a log entry is // processed. Actions are ordered in increasing severity. type CheckWriteAction uint8 @@ -157,23 +178,40 @@ const ( // WriteThenNoop indicates that nothing special needs to be done. It's the // default behavior. WriteThenNoop CheckWriteAction = iota + // WriteThenGoexit runs runtime.Goexit after Write. + WriteThenGoexit // WriteThenPanic causes a panic after Write. WriteThenPanic - // WriteThenFatal causes a fatal os.Exit after Write. + // WriteThenFatal causes an os.Exit(1) after Write. WriteThenFatal ) +// OnWrite implements the OnWrite method to keep CheckWriteAction compatible +// with the new CheckWriteHook interface which deprecates CheckWriteAction. +func (a CheckWriteAction) OnWrite(ce *CheckedEntry, _ []Field) { + switch a { + case WriteThenGoexit: + runtime.Goexit() + case WriteThenPanic: + panic(ce.Message) + case WriteThenFatal: + exit.With(1) + } +} + +var _ CheckWriteHook = CheckWriteAction(0) + // CheckedEntry is an Entry together with a collection of Cores that have // already agreed to log it. // -// CheckedEntry references should be created by calling AddCore or Should on a +// CheckedEntry references should be created by calling AddCore or After on a // nil *CheckedEntry. References are returned to a pool after Write, and MUST // NOT be retained after calling their Write method. type CheckedEntry struct { Entry ErrorOutput WriteSyncer dirty bool // best-effort detection of pool misuse - should CheckWriteAction + after CheckWriteHook cores []Core } @@ -181,7 +219,7 @@ func (ce *CheckedEntry) reset() { ce.Entry = Entry{} ce.ErrorOutput = nil ce.dirty = false - ce.should = WriteThenNoop + ce.after = nil for i := range ce.cores { // don't keep references to cores ce.cores[i] = nil @@ -203,8 +241,8 @@ func (ce *CheckedEntry) Write(fields ...Field) { // If the entry is dirty, log an internal error; because the // CheckedEntry is being used after it was returned to the pool, // the message may be an amalgamation from multiple call sites. - fmt.Fprintf(ce.ErrorOutput, "%v Unsafe CheckedEntry re-use near Entry %+v.\n", time.Now(), ce.Entry) - ce.ErrorOutput.Sync() + fmt.Fprintf(ce.ErrorOutput, "%v Unsafe CheckedEntry re-use near Entry %+v.\n", ce.Time, ce.Entry) + _ = ce.ErrorOutput.Sync() // ignore error } return } @@ -214,22 +252,16 @@ func (ce *CheckedEntry) Write(fields ...Field) { for i := range ce.cores { err = multierr.Append(err, ce.cores[i].Write(ce.Entry, fields)) } - if ce.ErrorOutput != nil { - if err != nil { - fmt.Fprintf(ce.ErrorOutput, "%v write error: %v\n", time.Now(), err) - ce.ErrorOutput.Sync() - } + if err != nil && ce.ErrorOutput != nil { + fmt.Fprintf(ce.ErrorOutput, "%v write error: %v\n", ce.Time, err) + _ = ce.ErrorOutput.Sync() // ignore error } - should, msg := ce.should, ce.Message - putCheckedEntry(ce) - - switch should { - case WriteThenPanic: - panic(msg) - case WriteThenFatal: - exit.Exit() + hook := ce.after + if hook != nil { + hook.OnWrite(ce, fields) } + putCheckedEntry(ce) } // AddCore adds a Core that has agreed to log this CheckedEntry. It's intended to be @@ -247,11 +279,20 @@ func (ce *CheckedEntry) AddCore(ent Entry, core Core) *CheckedEntry { // Should sets this CheckedEntry's CheckWriteAction, which controls whether a // Core will panic or fatal after writing this log entry. Like AddCore, it's // safe to call on nil CheckedEntry references. +// +// Deprecated: Use [CheckedEntry.After] instead. func (ce *CheckedEntry) Should(ent Entry, should CheckWriteAction) *CheckedEntry { + return ce.After(ent, should) +} + +// After sets this CheckEntry's CheckWriteHook, which will be called after this +// log entry has been written. It's safe to call this on nil CheckedEntry +// references. +func (ce *CheckedEntry) After(ent Entry, hook CheckWriteHook) *CheckedEntry { if ce == nil { ce = getCheckedEntry() ce.Entry = ent } - ce.should = should + ce.after = hook return ce } diff --git a/vendor/go.uber.org/zap/zapcore/error.go b/vendor/go.uber.org/zap/zapcore/error.go index a67c7ba..c40df13 100644 --- a/vendor/go.uber.org/zap/zapcore/error.go +++ b/vendor/go.uber.org/zap/zapcore/error.go @@ -22,7 +22,9 @@ package zapcore import ( "fmt" - "sync" + "reflect" + + "go.uber.org/zap/internal/pool" ) // Encodes the given error into fields of an object. A field with the given @@ -35,14 +37,30 @@ import ( // causer (from github.com/pkg/errors), a ${key}Causes field is added with an // array of objects containing the errors this error was comprised of. // -// { -// "error": err.Error(), -// "errorVerbose": fmt.Sprintf("%+v", err), -// "errorCauses": [ -// ... -// ], -// } -func encodeError(key string, err error, enc ObjectEncoder) error { +// { +// "error": err.Error(), +// "errorVerbose": fmt.Sprintf("%+v", err), +// "errorCauses": [ +// ... +// ], +// } +func encodeError(key string, err error, enc ObjectEncoder) (retErr error) { + // Try to capture panics (from nil references or otherwise) when calling + // the Error() method + defer func() { + if rerr := recover(); rerr != nil { + // If it's a nil pointer, just say "". The likeliest causes are a + // error that fails to guard against nil or a nil pointer for a + // value receiver, and in either case, "" is a nice result. + if v := reflect.ValueOf(err); v.Kind() == reflect.Ptr && v.IsNil() { + enc.AddString(key, "") + return + } + + retErr = fmt.Errorf("PANIC=%v", rerr) + } + }() + basic := err.Error() enc.AddString(key, basic) @@ -66,12 +84,7 @@ type errorGroup interface { Errors() []error } -type causer interface { - // Provides access to the error that caused this error. - Cause() error -} - -// Note that errArry and errArrayElem are very similar to the version +// Note that errArray and errArrayElem are very similar to the version // implemented in the top-level error.go file. We can't re-use this because // that would require exporting errArray as part of the zapcore API. @@ -85,15 +98,18 @@ func (errs errArray) MarshalLogArray(arr ArrayEncoder) error { } el := newErrArrayElem(errs[i]) - arr.AppendObject(el) + err := arr.AppendObject(el) el.Free() + if err != nil { + return err + } } return nil } -var _errArrayElemPool = sync.Pool{New: func() interface{} { +var _errArrayElemPool = pool.New(func() *errArrayElem { return &errArrayElem{} -}} +}) // Encodes any error into a {"error": ...} re-using the same errors logic. // @@ -101,7 +117,7 @@ var _errArrayElemPool = sync.Pool{New: func() interface{} { type errArrayElem struct{ err error } func newErrArrayElem(err error) *errArrayElem { - e := _errArrayElemPool.Get().(*errArrayElem) + e := _errArrayElemPool.Get() e.err = err return e } diff --git a/vendor/go.uber.org/zap/zapcore/field.go b/vendor/go.uber.org/zap/zapcore/field.go index 6a5e33e..308c978 100644 --- a/vendor/go.uber.org/zap/zapcore/field.go +++ b/vendor/go.uber.org/zap/zapcore/field.go @@ -47,7 +47,7 @@ const ( ByteStringType // Complex128Type indicates that the field carries a complex128. Complex128Type - // Complex64Type indicates that the field carries a complex128. + // Complex64Type indicates that the field carries a complex64. Complex64Type // DurationType indicates that the field carries a time.Duration. DurationType @@ -65,8 +65,11 @@ const ( Int8Type // StringType indicates that the field carries a string. StringType - // TimeType indicates that the field carries a time.Time. + // TimeType indicates that the field carries a time.Time that is + // representable by a UnixNano() stored as an int64. TimeType + // TimeFullType indicates that the field carries a time.Time stored as-is. + TimeFullType // Uint64Type indicates that the field carries a uint64. Uint64Type // Uint32Type indicates that the field carries a uint32. @@ -89,6 +92,10 @@ const ( ErrorType // SkipType indicates that the field is a no-op. SkipType + + // InlineMarshalerType indicates that the field carries an ObjectMarshaler + // that should be inlined. + InlineMarshalerType ) // A Field is a marshaling operation used to add a key-value pair to a logger's @@ -112,6 +119,8 @@ func (f Field) AddTo(enc ObjectEncoder) { err = enc.AddArray(f.Key, f.Interface.(ArrayMarshaler)) case ObjectMarshalerType: err = enc.AddObject(f.Key, f.Interface.(ObjectMarshaler)) + case InlineMarshalerType: + err = f.Interface.(ObjectMarshaler).MarshalLogObject(enc) case BinaryType: enc.AddBinary(f.Key, f.Interface.([]byte)) case BoolType: @@ -145,6 +154,8 @@ func (f Field) AddTo(enc ObjectEncoder) { // Fall back to UTC if location is nil. enc.AddTime(f.Key, time.Unix(0, f.Integer)) } + case TimeFullType: + enc.AddTime(f.Key, f.Interface.(time.Time)) case Uint64Type: enc.AddUint64(f.Key, uint64(f.Integer)) case Uint32Type: @@ -160,9 +171,9 @@ func (f Field) AddTo(enc ObjectEncoder) { case NamespaceType: enc.OpenNamespace(f.Key) case StringerType: - enc.AddString(f.Key, f.Interface.(fmt.Stringer).String()) + err = encodeStringer(f.Key, f.Interface, enc) case ErrorType: - encodeError(f.Key, f.Interface.(error), enc) + err = encodeError(f.Key, f.Interface.(error), enc) case SkipType: break default: @@ -199,3 +210,24 @@ func addFields(enc ObjectEncoder, fields []Field) { fields[i].AddTo(enc) } } + +func encodeStringer(key string, stringer interface{}, enc ObjectEncoder) (retErr error) { + // Try to capture panics (from nil references or otherwise) when calling + // the String() method, similar to https://golang.org/src/fmt/print.go#L540 + defer func() { + if err := recover(); err != nil { + // If it's a nil pointer, just say "". The likeliest causes are a + // Stringer that fails to guard against nil or a nil pointer for a + // value receiver, and in either case, "" is a nice result. + if v := reflect.ValueOf(stringer); v.Kind() == reflect.Ptr && v.IsNil() { + enc.AddString(key, "") + return + } + + retErr = fmt.Errorf("PANIC=%v", err) + } + }() + + enc.AddString(key, stringer.(fmt.Stringer).String()) + return nil +} diff --git a/vendor/go.uber.org/zap/zapcore/hook.go b/vendor/go.uber.org/zap/zapcore/hook.go index 5db4afb..198def9 100644 --- a/vendor/go.uber.org/zap/zapcore/hook.go +++ b/vendor/go.uber.org/zap/zapcore/hook.go @@ -27,6 +27,11 @@ type hooked struct { funcs []func(Entry) error } +var ( + _ Core = (*hooked)(nil) + _ leveledEnabler = (*hooked)(nil) +) + // RegisterHooks wraps a Core and runs a collection of user-defined callback // hooks each time a message is logged. Execution of the callbacks is blocking. // @@ -40,6 +45,10 @@ func RegisterHooks(core Core, hooks ...func(Entry) error) Core { } } +func (h *hooked) Level() Level { + return LevelOf(h.Core) +} + func (h *hooked) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { // Let the wrapped Core decide whether to log this message or not. This // also gives the downstream a chance to register itself directly with the diff --git a/vendor/go.uber.org/zap/zapcore/increase_level.go b/vendor/go.uber.org/zap/zapcore/increase_level.go new file mode 100644 index 0000000..7a11237 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/increase_level.go @@ -0,0 +1,75 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "fmt" + +type levelFilterCore struct { + core Core + level LevelEnabler +} + +var ( + _ Core = (*levelFilterCore)(nil) + _ leveledEnabler = (*levelFilterCore)(nil) +) + +// NewIncreaseLevelCore creates a core that can be used to increase the level of +// an existing Core. It cannot be used to decrease the logging level, as it acts +// as a filter before calling the underlying core. If level decreases the log level, +// an error is returned. +func NewIncreaseLevelCore(core Core, level LevelEnabler) (Core, error) { + for l := _maxLevel; l >= _minLevel; l-- { + if !core.Enabled(l) && level.Enabled(l) { + return nil, fmt.Errorf("invalid increase level, as level %q is allowed by increased level, but not by existing core", l) + } + } + + return &levelFilterCore{core, level}, nil +} + +func (c *levelFilterCore) Enabled(lvl Level) bool { + return c.level.Enabled(lvl) +} + +func (c *levelFilterCore) Level() Level { + return LevelOf(c.level) +} + +func (c *levelFilterCore) With(fields []Field) Core { + return &levelFilterCore{c.core.With(fields), c.level} +} + +func (c *levelFilterCore) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { + if !c.Enabled(ent.Level) { + return ce + } + + return c.core.Check(ent, ce) +} + +func (c *levelFilterCore) Write(ent Entry, fields []Field) error { + return c.core.Write(ent, fields) +} + +func (c *levelFilterCore) Sync() error { + return c.core.Sync() +} diff --git a/vendor/go.uber.org/zap/zapcore/json_encoder.go b/vendor/go.uber.org/zap/zapcore/json_encoder.go index 2dc67d8..9685169 100644 --- a/vendor/go.uber.org/zap/zapcore/json_encoder.go +++ b/vendor/go.uber.org/zap/zapcore/json_encoder.go @@ -22,26 +22,21 @@ package zapcore import ( "encoding/base64" - "encoding/json" "math" - "sync" "time" "unicode/utf8" "go.uber.org/zap/buffer" "go.uber.org/zap/internal/bufferpool" + "go.uber.org/zap/internal/pool" ) // For JSON-escaping; see jsonEncoder.safeAddString below. const _hex = "0123456789abcdef" -var _jsonPool = sync.Pool{New: func() interface{} { +var _jsonPool = pool.New(func() *jsonEncoder { return &jsonEncoder{} -}} - -func getJSONEncoder() *jsonEncoder { - return _jsonPool.Get().(*jsonEncoder) -} +}) func putJSONEncoder(enc *jsonEncoder) { if enc.reflectBuf != nil { @@ -64,7 +59,7 @@ type jsonEncoder struct { // for encoding generic values by reflection reflectBuf *buffer.Buffer - reflectEnc *json.Encoder + reflectEnc ReflectedEncoder } // NewJSONEncoder creates a fast, low-allocation JSON encoder. The encoder @@ -72,7 +67,9 @@ type jsonEncoder struct { // // Note that the encoder doesn't deduplicate keys, so it's possible to produce // a message like -// {"foo":"bar","foo":"baz"} +// +// {"foo":"bar","foo":"baz"} +// // This is permitted by the JSON specification, but not encouraged. Many // libraries will ignore duplicate key-value pairs (typically keeping the last // pair) when unmarshaling, but users should attempt to avoid adding duplicate @@ -82,6 +79,17 @@ func NewJSONEncoder(cfg EncoderConfig) Encoder { } func newJSONEncoder(cfg EncoderConfig, spaced bool) *jsonEncoder { + if cfg.SkipLineEnding { + cfg.LineEnding = "" + } else if cfg.LineEnding == "" { + cfg.LineEnding = DefaultLineEnding + } + + // If no EncoderConfig.NewReflectedEncoder is provided by the user, then use default + if cfg.NewReflectedEncoder == nil { + cfg.NewReflectedEncoder = defaultReflectedEncoder + } + return &jsonEncoder{ EncoderConfig: &cfg, buf: bufferpool.Get(), @@ -118,6 +126,11 @@ func (enc *jsonEncoder) AddComplex128(key string, val complex128) { enc.AppendComplex128(val) } +func (enc *jsonEncoder) AddComplex64(key string, val complex64) { + enc.addKey(key) + enc.AppendComplex64(val) +} + func (enc *jsonEncoder) AddDuration(key string, val time.Duration) { enc.addKey(key) enc.AppendDuration(val) @@ -128,6 +141,11 @@ func (enc *jsonEncoder) AddFloat64(key string, val float64) { enc.AppendFloat64(val) } +func (enc *jsonEncoder) AddFloat32(key string, val float32) { + enc.addKey(key) + enc.AppendFloat32(val) +} + func (enc *jsonEncoder) AddInt64(key string, val int64) { enc.addKey(key) enc.AppendInt64(val) @@ -136,21 +154,35 @@ func (enc *jsonEncoder) AddInt64(key string, val int64) { func (enc *jsonEncoder) resetReflectBuf() { if enc.reflectBuf == nil { enc.reflectBuf = bufferpool.Get() - enc.reflectEnc = json.NewEncoder(enc.reflectBuf) + enc.reflectEnc = enc.NewReflectedEncoder(enc.reflectBuf) } else { enc.reflectBuf.Reset() } } -func (enc *jsonEncoder) AddReflected(key string, obj interface{}) error { +var nullLiteralBytes = []byte("null") + +// Only invoke the standard JSON encoder if there is actually something to +// encode; otherwise write JSON null literal directly. +func (enc *jsonEncoder) encodeReflected(obj interface{}) ([]byte, error) { + if obj == nil { + return nullLiteralBytes, nil + } enc.resetReflectBuf() - err := enc.reflectEnc.Encode(obj) + if err := enc.reflectEnc.Encode(obj); err != nil { + return nil, err + } + enc.reflectBuf.TrimNewline() + return enc.reflectBuf.Bytes(), nil +} + +func (enc *jsonEncoder) AddReflected(key string, obj interface{}) error { + valueBytes, err := enc.encodeReflected(obj) if err != nil { return err } - enc.reflectBuf.TrimNewline() enc.addKey(key) - _, err = enc.buf.Write(enc.reflectBuf.Bytes()) + _, err = enc.buf.Write(valueBytes) return err } @@ -184,10 +216,16 @@ func (enc *jsonEncoder) AppendArray(arr ArrayMarshaler) error { } func (enc *jsonEncoder) AppendObject(obj ObjectMarshaler) error { + // Close ONLY new openNamespaces that are created during + // AppendObject(). + old := enc.openNamespaces + enc.openNamespaces = 0 enc.addElementSeparator() enc.buf.AppendByte('{') err := obj.MarshalLogObject(enc) enc.buf.AppendByte('}') + enc.closeOpenNamespaces() + enc.openNamespaces = old return err } @@ -203,23 +241,32 @@ func (enc *jsonEncoder) AppendByteString(val []byte) { enc.buf.AppendByte('"') } -func (enc *jsonEncoder) AppendComplex128(val complex128) { +// appendComplex appends the encoded form of the provided complex128 value. +// precision specifies the encoding precision for the real and imaginary +// components of the complex number. +func (enc *jsonEncoder) appendComplex(val complex128, precision int) { enc.addElementSeparator() // Cast to a platform-independent, fixed-size type. r, i := float64(real(val)), float64(imag(val)) enc.buf.AppendByte('"') // Because we're always in a quoted string, we can use strconv without // special-casing NaN and +/-Inf. - enc.buf.AppendFloat(r, 64) - enc.buf.AppendByte('+') - enc.buf.AppendFloat(i, 64) + enc.buf.AppendFloat(r, precision) + // If imaginary part is less than 0, minus (-) sign is added by default + // by AppendFloat. + if i >= 0 { + enc.buf.AppendByte('+') + } + enc.buf.AppendFloat(i, precision) enc.buf.AppendByte('i') enc.buf.AppendByte('"') } func (enc *jsonEncoder) AppendDuration(val time.Duration) { cur := enc.buf.Len() - enc.EncodeDuration(val, enc) + if e := enc.EncodeDuration; e != nil { + e(val, enc) + } if cur == enc.buf.Len() { // User-supplied EncodeDuration is a no-op. Fall back to nanoseconds to keep // JSON valid. @@ -233,14 +280,12 @@ func (enc *jsonEncoder) AppendInt64(val int64) { } func (enc *jsonEncoder) AppendReflected(val interface{}) error { - enc.resetReflectBuf() - err := enc.reflectEnc.Encode(val) + valueBytes, err := enc.encodeReflected(val) if err != nil { return err } - enc.reflectBuf.TrimNewline() enc.addElementSeparator() - _, err = enc.buf.Write(enc.reflectBuf.Bytes()) + _, err = enc.buf.Write(valueBytes) return err } @@ -251,9 +296,18 @@ func (enc *jsonEncoder) AppendString(val string) { enc.buf.AppendByte('"') } +func (enc *jsonEncoder) AppendTimeLayout(time time.Time, layout string) { + enc.addElementSeparator() + enc.buf.AppendByte('"') + enc.buf.AppendTime(time, layout) + enc.buf.AppendByte('"') +} + func (enc *jsonEncoder) AppendTime(val time.Time) { cur := enc.buf.Len() - enc.EncodeTime(val, enc) + if e := enc.EncodeTime; e != nil { + e(val, enc) + } if cur == enc.buf.Len() { // User-supplied EncodeTime is a no-op. Fall back to nanos since epoch to keep // output JSON valid. @@ -266,29 +320,28 @@ func (enc *jsonEncoder) AppendUint64(val uint64) { enc.buf.AppendUint(val) } -func (enc *jsonEncoder) AddComplex64(k string, v complex64) { enc.AddComplex128(k, complex128(v)) } -func (enc *jsonEncoder) AddFloat32(k string, v float32) { enc.AddFloat64(k, float64(v)) } -func (enc *jsonEncoder) AddInt(k string, v int) { enc.AddInt64(k, int64(v)) } -func (enc *jsonEncoder) AddInt32(k string, v int32) { enc.AddInt64(k, int64(v)) } -func (enc *jsonEncoder) AddInt16(k string, v int16) { enc.AddInt64(k, int64(v)) } -func (enc *jsonEncoder) AddInt8(k string, v int8) { enc.AddInt64(k, int64(v)) } -func (enc *jsonEncoder) AddUint(k string, v uint) { enc.AddUint64(k, uint64(v)) } -func (enc *jsonEncoder) AddUint32(k string, v uint32) { enc.AddUint64(k, uint64(v)) } -func (enc *jsonEncoder) AddUint16(k string, v uint16) { enc.AddUint64(k, uint64(v)) } -func (enc *jsonEncoder) AddUint8(k string, v uint8) { enc.AddUint64(k, uint64(v)) } -func (enc *jsonEncoder) AddUintptr(k string, v uintptr) { enc.AddUint64(k, uint64(v)) } -func (enc *jsonEncoder) AppendComplex64(v complex64) { enc.AppendComplex128(complex128(v)) } -func (enc *jsonEncoder) AppendFloat64(v float64) { enc.appendFloat(v, 64) } -func (enc *jsonEncoder) AppendFloat32(v float32) { enc.appendFloat(float64(v), 32) } -func (enc *jsonEncoder) AppendInt(v int) { enc.AppendInt64(int64(v)) } -func (enc *jsonEncoder) AppendInt32(v int32) { enc.AppendInt64(int64(v)) } -func (enc *jsonEncoder) AppendInt16(v int16) { enc.AppendInt64(int64(v)) } -func (enc *jsonEncoder) AppendInt8(v int8) { enc.AppendInt64(int64(v)) } -func (enc *jsonEncoder) AppendUint(v uint) { enc.AppendUint64(uint64(v)) } -func (enc *jsonEncoder) AppendUint32(v uint32) { enc.AppendUint64(uint64(v)) } -func (enc *jsonEncoder) AppendUint16(v uint16) { enc.AppendUint64(uint64(v)) } -func (enc *jsonEncoder) AppendUint8(v uint8) { enc.AppendUint64(uint64(v)) } -func (enc *jsonEncoder) AppendUintptr(v uintptr) { enc.AppendUint64(uint64(v)) } +func (enc *jsonEncoder) AddInt(k string, v int) { enc.AddInt64(k, int64(v)) } +func (enc *jsonEncoder) AddInt32(k string, v int32) { enc.AddInt64(k, int64(v)) } +func (enc *jsonEncoder) AddInt16(k string, v int16) { enc.AddInt64(k, int64(v)) } +func (enc *jsonEncoder) AddInt8(k string, v int8) { enc.AddInt64(k, int64(v)) } +func (enc *jsonEncoder) AddUint(k string, v uint) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AddUint32(k string, v uint32) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AddUint16(k string, v uint16) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AddUint8(k string, v uint8) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AddUintptr(k string, v uintptr) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AppendComplex64(v complex64) { enc.appendComplex(complex128(v), 32) } +func (enc *jsonEncoder) AppendComplex128(v complex128) { enc.appendComplex(complex128(v), 64) } +func (enc *jsonEncoder) AppendFloat64(v float64) { enc.appendFloat(v, 64) } +func (enc *jsonEncoder) AppendFloat32(v float32) { enc.appendFloat(float64(v), 32) } +func (enc *jsonEncoder) AppendInt(v int) { enc.AppendInt64(int64(v)) } +func (enc *jsonEncoder) AppendInt32(v int32) { enc.AppendInt64(int64(v)) } +func (enc *jsonEncoder) AppendInt16(v int16) { enc.AppendInt64(int64(v)) } +func (enc *jsonEncoder) AppendInt8(v int8) { enc.AppendInt64(int64(v)) } +func (enc *jsonEncoder) AppendUint(v uint) { enc.AppendUint64(uint64(v)) } +func (enc *jsonEncoder) AppendUint32(v uint32) { enc.AppendUint64(uint64(v)) } +func (enc *jsonEncoder) AppendUint16(v uint16) { enc.AppendUint64(uint64(v)) } +func (enc *jsonEncoder) AppendUint8(v uint8) { enc.AppendUint64(uint64(v)) } +func (enc *jsonEncoder) AppendUintptr(v uintptr) { enc.AppendUint64(uint64(v)) } func (enc *jsonEncoder) Clone() Encoder { clone := enc.clone() @@ -297,7 +350,7 @@ func (enc *jsonEncoder) Clone() Encoder { } func (enc *jsonEncoder) clone() *jsonEncoder { - clone := getJSONEncoder() + clone := _jsonPool.Get() clone.EncoderConfig = enc.EncoderConfig clone.spaced = enc.spaced clone.openNamespaces = enc.openNamespaces @@ -309,7 +362,7 @@ func (enc *jsonEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, final := enc.clone() final.buf.AppendByte('{') - if final.LevelKey != "" { + if final.LevelKey != "" && final.EncodeLevel != nil { final.addKey(final.LevelKey) cur := final.buf.Len() final.EncodeLevel(ent.Level, final) @@ -319,7 +372,7 @@ func (enc *jsonEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, final.AppendString(ent.Level.String()) } } - if final.TimeKey != "" { + if final.TimeKey != "" && !ent.Time.IsZero() { final.AddTime(final.TimeKey, ent.Time) } if ent.LoggerName != "" && final.NameKey != "" { @@ -340,14 +393,20 @@ func (enc *jsonEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, final.AppendString(ent.LoggerName) } } - if ent.Caller.Defined && final.CallerKey != "" { - final.addKey(final.CallerKey) - cur := final.buf.Len() - final.EncodeCaller(ent.Caller, final) - if cur == final.buf.Len() { - // User-supplied EncodeCaller was a no-op. Fall back to strings to - // keep output JSON valid. - final.AppendString(ent.Caller.String()) + if ent.Caller.Defined { + if final.CallerKey != "" { + final.addKey(final.CallerKey) + cur := final.buf.Len() + final.EncodeCaller(ent.Caller, final) + if cur == final.buf.Len() { + // User-supplied EncodeCaller was a no-op. Fall back to strings to + // keep output JSON valid. + final.AppendString(ent.Caller.String()) + } + } + if final.FunctionKey != "" { + final.addKey(final.FunctionKey) + final.AppendString(ent.Caller.Function) } } if final.MessageKey != "" { @@ -364,11 +423,7 @@ func (enc *jsonEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, final.AddString(final.StacktraceKey, ent.Stack) } final.buf.AppendByte('}') - if final.LineEnding != "" { - final.buf.AppendString(final.LineEnding) - } else { - final.buf.AppendString(DefaultLineEnding) - } + final.buf.AppendString(final.LineEnding) ret := final.buf putJSONEncoder(final) @@ -383,6 +438,7 @@ func (enc *jsonEncoder) closeOpenNamespaces() { for i := 0; i < enc.openNamespaces; i++ { enc.buf.AppendByte('}') } + enc.openNamespaces = 0 } func (enc *jsonEncoder) addKey(key string) { @@ -430,73 +486,98 @@ func (enc *jsonEncoder) appendFloat(val float64, bitSize int) { // Unlike the standard library's encoder, it doesn't attempt to protect the // user from browser vulnerabilities or JSONP-related problems. func (enc *jsonEncoder) safeAddString(s string) { - for i := 0; i < len(s); { - if enc.tryAddRuneSelf(s[i]) { - i++ - continue - } - r, size := utf8.DecodeRuneInString(s[i:]) - if enc.tryAddRuneError(r, size) { - i++ - continue - } - enc.buf.AppendString(s[i : i+size]) - i += size - } + safeAppendStringLike( + (*buffer.Buffer).AppendString, + utf8.DecodeRuneInString, + enc.buf, + s, + ) } // safeAddByteString is no-alloc equivalent of safeAddString(string(s)) for s []byte. func (enc *jsonEncoder) safeAddByteString(s []byte) { + safeAppendStringLike( + (*buffer.Buffer).AppendBytes, + utf8.DecodeRune, + enc.buf, + s, + ) +} + +// safeAppendStringLike is a generic implementation of safeAddString and safeAddByteString. +// It appends a string or byte slice to the buffer, escaping all special characters. +func safeAppendStringLike[S []byte | string]( + // appendTo appends this string-like object to the buffer. + appendTo func(*buffer.Buffer, S), + // decodeRune decodes the next rune from the string-like object + // and returns its value and width in bytes. + decodeRune func(S) (rune, int), + buf *buffer.Buffer, + s S, +) { + // The encoding logic below works by skipping over characters + // that can be safely copied as-is, + // until a character is found that needs special handling. + // At that point, we copy everything we've seen so far, + // and then handle that special character. + // + // last is the index of the last byte that was copied to the buffer. + last := 0 for i := 0; i < len(s); { - if enc.tryAddRuneSelf(s[i]) { + if s[i] >= utf8.RuneSelf { + // Character >= RuneSelf may be part of a multi-byte rune. + // They need to be decoded before we can decide how to handle them. + r, size := decodeRune(s[i:]) + if r != utf8.RuneError || size != 1 { + // No special handling required. + // Skip over this rune and continue. + i += size + continue + } + + // Invalid UTF-8 sequence. + // Replace it with the Unicode replacement character. + appendTo(buf, s[last:i]) + buf.AppendString(`\ufffd`) + i++ - continue - } - r, size := utf8.DecodeRune(s[i:]) - if enc.tryAddRuneError(r, size) { + last = i + } else { + // Character < RuneSelf is a single-byte UTF-8 rune. + if s[i] >= 0x20 && s[i] != '\\' && s[i] != '"' { + // No escaping necessary. + // Skip over this character and continue. + i++ + continue + } + + // This character needs to be escaped. + appendTo(buf, s[last:i]) + switch s[i] { + case '\\', '"': + buf.AppendByte('\\') + buf.AppendByte(s[i]) + case '\n': + buf.AppendByte('\\') + buf.AppendByte('n') + case '\r': + buf.AppendByte('\\') + buf.AppendByte('r') + case '\t': + buf.AppendByte('\\') + buf.AppendByte('t') + default: + // Encode bytes < 0x20, except for the escape sequences above. + buf.AppendString(`\u00`) + buf.AppendByte(_hex[s[i]>>4]) + buf.AppendByte(_hex[s[i]&0xF]) + } + i++ - continue + last = i } - enc.buf.Write(s[i : i+size]) - i += size - } -} - -// tryAddRuneSelf appends b if it is valid UTF-8 character represented in a single byte. -func (enc *jsonEncoder) tryAddRuneSelf(b byte) bool { - if b >= utf8.RuneSelf { - return false } - if 0x20 <= b && b != '\\' && b != '"' { - enc.buf.AppendByte(b) - return true - } - switch b { - case '\\', '"': - enc.buf.AppendByte('\\') - enc.buf.AppendByte(b) - case '\n': - enc.buf.AppendByte('\\') - enc.buf.AppendByte('n') - case '\r': - enc.buf.AppendByte('\\') - enc.buf.AppendByte('r') - case '\t': - enc.buf.AppendByte('\\') - enc.buf.AppendByte('t') - default: - // Encode bytes < 0x20, except for the escape sequences above. - enc.buf.AppendString(`\u00`) - enc.buf.AppendByte(_hex[b>>4]) - enc.buf.AppendByte(_hex[b&0xF]) - } - return true -} -func (enc *jsonEncoder) tryAddRuneError(r rune, size int) bool { - if r == utf8.RuneError && size == 1 { - enc.buf.AppendString(`\ufffd`) - return true - } - return false + // add remaining + appendTo(buf, s[last:]) } diff --git a/vendor/go.uber.org/zap/zapcore/lazy_with.go b/vendor/go.uber.org/zap/zapcore/lazy_with.go new file mode 100644 index 0000000..05288d6 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/lazy_with.go @@ -0,0 +1,54 @@ +// Copyright (c) 2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "sync" + +type lazyWithCore struct { + Core + sync.Once + fields []Field +} + +// NewLazyWith wraps a Core with a "lazy" Core that will only encode fields if +// the logger is written to (or is further chained in a lon-lazy manner). +func NewLazyWith(core Core, fields []Field) Core { + return &lazyWithCore{ + Core: core, + fields: fields, + } +} + +func (d *lazyWithCore) initOnce() { + d.Once.Do(func() { + d.Core = d.Core.With(d.fields) + }) +} + +func (d *lazyWithCore) With(fields []Field) Core { + d.initOnce() + return d.Core.With(fields) +} + +func (d *lazyWithCore) Check(e Entry, ce *CheckedEntry) *CheckedEntry { + d.initOnce() + return d.Core.Check(e, ce) +} diff --git a/vendor/go.uber.org/zap/zapcore/level.go b/vendor/go.uber.org/zap/zapcore/level.go index e575c9f..e01a241 100644 --- a/vendor/go.uber.org/zap/zapcore/level.go +++ b/vendor/go.uber.org/zap/zapcore/level.go @@ -53,8 +53,62 @@ const ( _minLevel = DebugLevel _maxLevel = FatalLevel + + // InvalidLevel is an invalid value for Level. + // + // Core implementations may panic if they see messages of this level. + InvalidLevel = _maxLevel + 1 ) +// ParseLevel parses a level based on the lower-case or all-caps ASCII +// representation of the log level. If the provided ASCII representation is +// invalid an error is returned. +// +// This is particularly useful when dealing with text input to configure log +// levels. +func ParseLevel(text string) (Level, error) { + var level Level + err := level.UnmarshalText([]byte(text)) + return level, err +} + +type leveledEnabler interface { + LevelEnabler + + Level() Level +} + +// LevelOf reports the minimum enabled log level for the given LevelEnabler +// from Zap's supported log levels, or [InvalidLevel] if none of them are +// enabled. +// +// A LevelEnabler may implement a 'Level() Level' method to override the +// behavior of this function. +// +// func (c *core) Level() Level { +// return c.currentLevel +// } +// +// It is recommended that [Core] implementations that wrap other cores use +// LevelOf to retrieve the level of the wrapped core. For example, +// +// func (c *coreWrapper) Level() Level { +// return zapcore.LevelOf(c.wrappedCore) +// } +func LevelOf(enab LevelEnabler) Level { + if lvler, ok := enab.(leveledEnabler); ok { + return lvler.Level() + } + + for lvl := _minLevel; lvl <= _maxLevel; lvl++ { + if enab.Enabled(lvl) { + return lvl + } + } + + return InvalidLevel +} + // String returns a lower-case ASCII representation of the log level. func (l Level) String() string { switch l { diff --git a/vendor/go.uber.org/zap/zapcore/marshaler.go b/vendor/go.uber.org/zap/zapcore/marshaler.go index 2627a65..c3c55ba 100644 --- a/vendor/go.uber.org/zap/zapcore/marshaler.go +++ b/vendor/go.uber.org/zap/zapcore/marshaler.go @@ -23,6 +23,10 @@ package zapcore // ObjectMarshaler allows user-defined types to efficiently add themselves to the // logging context, and to selectively omit information which shouldn't be // included in logs (e.g., passwords). +// +// Note: ObjectMarshaler is only used when zap.Object is used or when +// passed directly to zap.Any. It is not used when reflection-based +// encoding is used. type ObjectMarshaler interface { MarshalLogObject(ObjectEncoder) error } @@ -39,6 +43,10 @@ func (f ObjectMarshalerFunc) MarshalLogObject(enc ObjectEncoder) error { // ArrayMarshaler allows user-defined types to efficiently add themselves to the // logging context, and to selectively omit information which shouldn't be // included in logs (e.g., passwords). +// +// Note: ArrayMarshaler is only used when zap.Array is used or when +// passed directly to zap.Any. It is not used when reflection-based +// encoding is used. type ArrayMarshaler interface { MarshalLogArray(ArrayEncoder) error } diff --git a/vendor/go.uber.org/zap/zapcore/memory_encoder.go b/vendor/go.uber.org/zap/zapcore/memory_encoder.go index 6ef85b0..dfead08 100644 --- a/vendor/go.uber.org/zap/zapcore/memory_encoder.go +++ b/vendor/go.uber.org/zap/zapcore/memory_encoder.go @@ -158,7 +158,7 @@ func (s *sliceArrayEncoder) AppendReflected(v interface{}) error { } func (s *sliceArrayEncoder) AppendBool(v bool) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendByteString(v []byte) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendByteString(v []byte) { s.elems = append(s.elems, string(v)) } func (s *sliceArrayEncoder) AppendComplex128(v complex128) { s.elems = append(s.elems, v) } func (s *sliceArrayEncoder) AppendComplex64(v complex64) { s.elems = append(s.elems, v) } func (s *sliceArrayEncoder) AppendDuration(v time.Duration) { s.elems = append(s.elems, v) } diff --git a/vendor/go.uber.org/atomic/string.go b/vendor/go.uber.org/zap/zapcore/reflected_encoder.go similarity index 65% rename from vendor/go.uber.org/atomic/string.go rename to vendor/go.uber.org/zap/zapcore/reflected_encoder.go index ede8136..8746360 100644 --- a/vendor/go.uber.org/atomic/string.go +++ b/vendor/go.uber.org/zap/zapcore/reflected_encoder.go @@ -18,32 +18,24 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. -package atomic +package zapcore -// String is an atomic type-safe wrapper around Value for strings. -type String struct{ v Value } +import ( + "encoding/json" + "io" +) -// NewString creates a String. -func NewString(str string) *String { - s := &String{} - if str != "" { - s.Store(str) - } - return s +// ReflectedEncoder serializes log fields that can't be serialized with Zap's +// JSON encoder. These have the ReflectType field type. +// Use EncoderConfig.NewReflectedEncoder to set this. +type ReflectedEncoder interface { + // Encode encodes and writes to the underlying data stream. + Encode(interface{}) error } -// Load atomically loads the wrapped string. -func (s *String) Load() string { - v := s.v.Load() - if v == nil { - return "" - } - return v.(string) -} - -// Store atomically stores the passed string. -// Note: Converting the string to an interface{} to store in the Value -// requires an allocation. -func (s *String) Store(str string) { - s.v.Store(str) +func defaultReflectedEncoder(w io.Writer) ReflectedEncoder { + enc := json.NewEncoder(w) + // For consistency with our custom JSON encoder. + enc.SetEscapeHTML(false) + return enc } diff --git a/vendor/go.uber.org/zap/zapcore/sampler.go b/vendor/go.uber.org/zap/zapcore/sampler.go index e316418..b7c093a 100644 --- a/vendor/go.uber.org/zap/zapcore/sampler.go +++ b/vendor/go.uber.org/zap/zapcore/sampler.go @@ -1,4 +1,4 @@ -// Copyright (c) 2016 Uber Technologies, Inc. +// Copyright (c) 2016-2022 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -21,9 +21,8 @@ package zapcore import ( + "sync/atomic" "time" - - "go.uber.org/atomic" ) const ( @@ -66,32 +65,123 @@ func (c *counter) IncCheckReset(t time.Time, tick time.Duration) uint64 { tn := t.UnixNano() resetAfter := c.resetAt.Load() if resetAfter > tn { - return c.counter.Inc() + return c.counter.Add(1) } c.counter.Store(1) newResetAfter := tn + tick.Nanoseconds() - if !c.resetAt.CAS(resetAfter, newResetAfter) { + if !c.resetAt.CompareAndSwap(resetAfter, newResetAfter) { // We raced with another goroutine trying to reset, and it also reset // the counter to 1, so we need to reincrement the counter. - return c.counter.Inc() + return c.counter.Add(1) } return 1 } +// SamplingDecision is a decision represented as a bit field made by sampler. +// More decisions may be added in the future. +type SamplingDecision uint32 + +const ( + // LogDropped indicates that the Sampler dropped a log entry. + LogDropped SamplingDecision = 1 << iota + // LogSampled indicates that the Sampler sampled a log entry. + LogSampled +) + +// optionFunc wraps a func so it satisfies the SamplerOption interface. +type optionFunc func(*sampler) + +func (f optionFunc) apply(s *sampler) { + f(s) +} + +// SamplerOption configures a Sampler. +type SamplerOption interface { + apply(*sampler) +} + +// nopSamplingHook is the default hook used by sampler. +func nopSamplingHook(Entry, SamplingDecision) {} + +// SamplerHook registers a function which will be called when Sampler makes a +// decision. +// +// This hook may be used to get visibility into the performance of the sampler. +// For example, use it to track metrics of dropped versus sampled logs. +// +// var dropped atomic.Int64 +// zapcore.SamplerHook(func(ent zapcore.Entry, dec zapcore.SamplingDecision) { +// if dec&zapcore.LogDropped > 0 { +// dropped.Inc() +// } +// }) +func SamplerHook(hook func(entry Entry, dec SamplingDecision)) SamplerOption { + return optionFunc(func(s *sampler) { + s.hook = hook + }) +} + +// NewSamplerWithOptions creates a Core that samples incoming entries, which +// caps the CPU and I/O load of logging while attempting to preserve a +// representative subset of your logs. +// +// Zap samples by logging the first N entries with a given level and message +// each tick. If more Entries with the same level and message are seen during +// the same interval, every Mth message is logged and the rest are dropped. +// +// For example, +// +// core = NewSamplerWithOptions(core, time.Second, 10, 5) +// +// This will log the first 10 log entries with the same level and message +// in a one second interval as-is. Following that, it will allow through +// every 5th log entry with the same level and message in that interval. +// +// If thereafter is zero, the Core will drop all log entries after the first N +// in that interval. +// +// Sampler can be configured to report sampling decisions with the SamplerHook +// option. +// +// Keep in mind that Zap's sampling implementation is optimized for speed over +// absolute precision; under load, each tick may be slightly over- or +// under-sampled. +func NewSamplerWithOptions(core Core, tick time.Duration, first, thereafter int, opts ...SamplerOption) Core { + s := &sampler{ + Core: core, + tick: tick, + counts: newCounters(), + first: uint64(first), + thereafter: uint64(thereafter), + hook: nopSamplingHook, + } + for _, opt := range opts { + opt.apply(s) + } + + return s +} + type sampler struct { Core counts *counters tick time.Duration first, thereafter uint64 + hook func(Entry, SamplingDecision) } -// NewSampler creates a Core that samples incoming entries, which caps the CPU -// and I/O load of logging while attempting to preserve a representative subset -// of your logs. +var ( + _ Core = (*sampler)(nil) + _ leveledEnabler = (*sampler)(nil) +) + +// NewSampler creates a Core that samples incoming entries, which +// caps the CPU and I/O load of logging while attempting to preserve a +// representative subset of your logs. // // Zap samples by logging the first N entries with a given level and message // each tick. If more Entries with the same level and message are seen during @@ -100,14 +190,14 @@ type sampler struct { // Keep in mind that zap's sampling implementation is optimized for speed over // absolute precision; under load, each tick may be slightly over- or // under-sampled. +// +// Deprecated: use NewSamplerWithOptions. func NewSampler(core Core, tick time.Duration, first, thereafter int) Core { - return &sampler{ - Core: core, - tick: tick, - counts: newCounters(), - first: uint64(first), - thereafter: uint64(thereafter), - } + return NewSamplerWithOptions(core, tick, first, thereafter) +} + +func (s *sampler) Level() Level { + return LevelOf(s.Core) } func (s *sampler) With(fields []Field) Core { @@ -117,6 +207,7 @@ func (s *sampler) With(fields []Field) Core { counts: s.counts, first: s.first, thereafter: s.thereafter, + hook: s.hook, } } @@ -125,10 +216,14 @@ func (s *sampler) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { return ce } - counter := s.counts.get(ent.Level, ent.Message) - n := counter.IncCheckReset(ent.Time, s.tick) - if n > s.first && (n-s.first)%s.thereafter != 0 { - return ce + if ent.Level >= _minLevel && ent.Level <= _maxLevel { + counter := s.counts.get(ent.Level, ent.Message) + n := counter.IncCheckReset(ent.Time, s.tick) + if n > s.first && (s.thereafter == 0 || (n-s.first)%s.thereafter != 0) { + s.hook(ent, LogDropped) + return ce + } + s.hook(ent, LogSampled) } return s.Core.Check(ent, ce) } diff --git a/vendor/go.uber.org/zap/zapcore/tee.go b/vendor/go.uber.org/zap/zapcore/tee.go index 07a32ee..9bb32f0 100644 --- a/vendor/go.uber.org/zap/zapcore/tee.go +++ b/vendor/go.uber.org/zap/zapcore/tee.go @@ -1,4 +1,4 @@ -// Copyright (c) 2016 Uber Technologies, Inc. +// Copyright (c) 2016-2022 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -24,6 +24,11 @@ import "go.uber.org/multierr" type multiCore []Core +var ( + _ leveledEnabler = multiCore(nil) + _ Core = multiCore(nil) +) + // NewTee creates a Core that duplicates log entries into two or more // underlying Cores. // @@ -48,6 +53,16 @@ func (mc multiCore) With(fields []Field) Core { return clone } +func (mc multiCore) Level() Level { + minLvl := _maxLevel // mc is never empty + for i := range mc { + if lvl := LevelOf(mc[i]); lvl < minLvl { + minLvl = lvl + } + } + return minLvl +} + func (mc multiCore) Enabled(lvl Level) bool { for i := range mc { if mc[i].Enabled(lvl) { diff --git a/vendor/go.uber.org/zap/zapcore/write_syncer.go b/vendor/go.uber.org/zap/zapcore/write_syncer.go index 209e25f..d4a1af3 100644 --- a/vendor/go.uber.org/zap/zapcore/write_syncer.go +++ b/vendor/go.uber.org/zap/zapcore/write_syncer.go @@ -91,8 +91,7 @@ func NewMultiWriteSyncer(ws ...WriteSyncer) WriteSyncer { if len(ws) == 1 { return ws[0] } - // Copy to protect against https://github.com/golang/go/issues/7809 - return multiWriteSyncer(append([]WriteSyncer(nil), ws...)) + return multiWriteSyncer(ws) } // See https://golang.org/src/io/multi.go diff --git a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go index 904b57e..28cd99c 100644 --- a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go +++ b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go @@ -16,7 +16,7 @@ Hash Functions SHA-1, SHA-224, SHA-256, SHA-384 and SHA-512 for HMAC. To choose, you can pass the `New` functions from the different SHA packages to pbkdf2.Key. */ -package pbkdf2 // import "golang.org/x/crypto/pbkdf2" +package pbkdf2 import ( "crypto/hmac" diff --git a/vendor/golang.org/x/sync/LICENSE b/vendor/golang.org/x/sync/LICENSE new file mode 100644 index 0000000..2a7cf70 --- /dev/null +++ b/vendor/golang.org/x/sync/LICENSE @@ -0,0 +1,27 @@ +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/sync/PATENTS b/vendor/golang.org/x/sync/PATENTS new file mode 100644 index 0000000..7330990 --- /dev/null +++ b/vendor/golang.org/x/sync/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go new file mode 100644 index 0000000..948a3ee --- /dev/null +++ b/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -0,0 +1,135 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package errgroup provides synchronization, error propagation, and Context +// cancelation for groups of goroutines working on subtasks of a common task. +// +// [errgroup.Group] is related to [sync.WaitGroup] but adds handling of tasks +// returning errors. +package errgroup + +import ( + "context" + "fmt" + "sync" +) + +type token struct{} + +// A Group is a collection of goroutines working on subtasks that are part of +// the same overall task. +// +// A zero Group is valid, has no limit on the number of active goroutines, +// and does not cancel on error. +type Group struct { + cancel func(error) + + wg sync.WaitGroup + + sem chan token + + errOnce sync.Once + err error +} + +func (g *Group) done() { + if g.sem != nil { + <-g.sem + } + g.wg.Done() +} + +// WithContext returns a new Group and an associated Context derived from ctx. +// +// The derived Context is canceled the first time a function passed to Go +// returns a non-nil error or the first time Wait returns, whichever occurs +// first. +func WithContext(ctx context.Context) (*Group, context.Context) { + ctx, cancel := withCancelCause(ctx) + return &Group{cancel: cancel}, ctx +} + +// Wait blocks until all function calls from the Go method have returned, then +// returns the first non-nil error (if any) from them. +func (g *Group) Wait() error { + g.wg.Wait() + if g.cancel != nil { + g.cancel(g.err) + } + return g.err +} + +// Go calls the given function in a new goroutine. +// It blocks until the new goroutine can be added without the number of +// active goroutines in the group exceeding the configured limit. +// +// The first call to return a non-nil error cancels the group's context, if the +// group was created by calling WithContext. The error will be returned by Wait. +func (g *Group) Go(f func() error) { + if g.sem != nil { + g.sem <- token{} + } + + g.wg.Add(1) + go func() { + defer g.done() + + if err := f(); err != nil { + g.errOnce.Do(func() { + g.err = err + if g.cancel != nil { + g.cancel(g.err) + } + }) + } + }() +} + +// TryGo calls the given function in a new goroutine only if the number of +// active goroutines in the group is currently below the configured limit. +// +// The return value reports whether the goroutine was started. +func (g *Group) TryGo(f func() error) bool { + if g.sem != nil { + select { + case g.sem <- token{}: + // Note: this allows barging iff channels in general allow barging. + default: + return false + } + } + + g.wg.Add(1) + go func() { + defer g.done() + + if err := f(); err != nil { + g.errOnce.Do(func() { + g.err = err + if g.cancel != nil { + g.cancel(g.err) + } + }) + } + }() + return true +} + +// SetLimit limits the number of active goroutines in this group to at most n. +// A negative value indicates no limit. +// +// Any subsequent call to the Go method will block until it can add an active +// goroutine without exceeding the configured limit. +// +// The limit must not be modified while any goroutines in the group are active. +func (g *Group) SetLimit(n int) { + if n < 0 { + g.sem = nil + return + } + if len(g.sem) != 0 { + panic(fmt.Errorf("errgroup: modify limit while %v goroutines in the group are still active", len(g.sem))) + } + g.sem = make(chan token, n) +} diff --git a/vendor/golang.org/x/sync/errgroup/go120.go b/vendor/golang.org/x/sync/errgroup/go120.go new file mode 100644 index 0000000..f93c740 --- /dev/null +++ b/vendor/golang.org/x/sync/errgroup/go120.go @@ -0,0 +1,13 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.20 + +package errgroup + +import "context" + +func withCancelCause(parent context.Context) (context.Context, func(error)) { + return context.WithCancelCause(parent) +} diff --git a/vendor/golang.org/x/sync/errgroup/pre_go120.go b/vendor/golang.org/x/sync/errgroup/pre_go120.go new file mode 100644 index 0000000..88ce334 --- /dev/null +++ b/vendor/golang.org/x/sync/errgroup/pre_go120.go @@ -0,0 +1,14 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.20 + +package errgroup + +import "context" + +func withCancelCause(parent context.Context) (context.Context, func(error)) { + ctx, cancel := context.WithCancel(parent) + return ctx, func(error) { cancel() } +} diff --git a/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s b/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s new file mode 100644 index 0000000..269e173 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s @@ -0,0 +1,17 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc + +#include "textflag.h" + +// +// System calls for ppc64, AIX are implemented in runtime/syscall_aix.go +// + +TEXT ·syscall6(SB),NOSPLIT,$0-88 + JMP syscall·syscall6(SB) + +TEXT ·rawSyscall6(SB),NOSPLIT,$0-88 + JMP syscall·rawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/cpu/byteorder.go b/vendor/golang.org/x/sys/cpu/byteorder.go new file mode 100644 index 0000000..271055b --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/byteorder.go @@ -0,0 +1,66 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import ( + "runtime" +) + +// byteOrder is a subset of encoding/binary.ByteOrder. +type byteOrder interface { + Uint32([]byte) uint32 + Uint64([]byte) uint64 +} + +type littleEndian struct{} +type bigEndian struct{} + +func (littleEndian) Uint32(b []byte) uint32 { + _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func (littleEndian) Uint64(b []byte) uint64 { + _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +func (bigEndian) Uint32(b []byte) uint32 { + _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 + return uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 +} + +func (bigEndian) Uint64(b []byte) uint64 { + _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 | + uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56 +} + +// hostByteOrder returns littleEndian on little-endian machines and +// bigEndian on big-endian machines. +func hostByteOrder() byteOrder { + switch runtime.GOARCH { + case "386", "amd64", "amd64p32", + "alpha", + "arm", "arm64", + "loong64", + "mipsle", "mips64le", "mips64p32le", + "nios2", + "ppc64le", + "riscv", "riscv64", + "sh": + return littleEndian{} + case "armbe", "arm64be", + "m68k", + "mips", "mips64", "mips64p32", + "ppc", "ppc64", + "s390", "s390x", + "shbe", + "sparc", "sparc64": + return bigEndian{} + } + panic("unknown architecture") +} diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go new file mode 100644 index 0000000..8fa707a --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu.go @@ -0,0 +1,291 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cpu implements processor feature detection for +// various CPU architectures. +package cpu + +import ( + "os" + "strings" +) + +// Initialized reports whether the CPU features were initialized. +// +// For some GOOS/GOARCH combinations initialization of the CPU features depends +// on reading an operating specific file, e.g. /proc/self/auxv on linux/arm +// Initialized will report false if reading the file fails. +var Initialized bool + +// CacheLinePad is used to pad structs to avoid false sharing. +type CacheLinePad struct{ _ [cacheLineSize]byte } + +// X86 contains the supported CPU features of the +// current X86/AMD64 platform. If the current platform +// is not X86/AMD64 then all feature flags are false. +// +// X86 is padded to avoid false sharing. Further the HasAVX +// and HasAVX2 are only set if the OS supports XMM and YMM +// registers in addition to the CPUID feature bit being set. +var X86 struct { + _ CacheLinePad + HasAES bool // AES hardware implementation (AES NI) + HasADX bool // Multi-precision add-carry instruction extensions + HasAVX bool // Advanced vector extension + HasAVX2 bool // Advanced vector extension 2 + HasAVX512 bool // Advanced vector extension 512 + HasAVX512F bool // Advanced vector extension 512 Foundation Instructions + HasAVX512CD bool // Advanced vector extension 512 Conflict Detection Instructions + HasAVX512ER bool // Advanced vector extension 512 Exponential and Reciprocal Instructions + HasAVX512PF bool // Advanced vector extension 512 Prefetch Instructions + HasAVX512VL bool // Advanced vector extension 512 Vector Length Extensions + HasAVX512BW bool // Advanced vector extension 512 Byte and Word Instructions + HasAVX512DQ bool // Advanced vector extension 512 Doubleword and Quadword Instructions + HasAVX512IFMA bool // Advanced vector extension 512 Integer Fused Multiply Add + HasAVX512VBMI bool // Advanced vector extension 512 Vector Byte Manipulation Instructions + HasAVX5124VNNIW bool // Advanced vector extension 512 Vector Neural Network Instructions Word variable precision + HasAVX5124FMAPS bool // Advanced vector extension 512 Fused Multiply Accumulation Packed Single precision + HasAVX512VPOPCNTDQ bool // Advanced vector extension 512 Double and quad word population count instructions + HasAVX512VPCLMULQDQ bool // Advanced vector extension 512 Vector carry-less multiply operations + HasAVX512VNNI bool // Advanced vector extension 512 Vector Neural Network Instructions + HasAVX512GFNI bool // Advanced vector extension 512 Galois field New Instructions + HasAVX512VAES bool // Advanced vector extension 512 Vector AES instructions + HasAVX512VBMI2 bool // Advanced vector extension 512 Vector Byte Manipulation Instructions 2 + HasAVX512BITALG bool // Advanced vector extension 512 Bit Algorithms + HasAVX512BF16 bool // Advanced vector extension 512 BFloat16 Instructions + HasAMXTile bool // Advanced Matrix Extension Tile instructions + HasAMXInt8 bool // Advanced Matrix Extension Int8 instructions + HasAMXBF16 bool // Advanced Matrix Extension BFloat16 instructions + HasBMI1 bool // Bit manipulation instruction set 1 + HasBMI2 bool // Bit manipulation instruction set 2 + HasCX16 bool // Compare and exchange 16 Bytes + HasERMS bool // Enhanced REP for MOVSB and STOSB + HasFMA bool // Fused-multiply-add instructions + HasOSXSAVE bool // OS supports XSAVE/XRESTOR for saving/restoring XMM registers. + HasPCLMULQDQ bool // PCLMULQDQ instruction - most often used for AES-GCM + HasPOPCNT bool // Hamming weight instruction POPCNT. + HasRDRAND bool // RDRAND instruction (on-chip random number generator) + HasRDSEED bool // RDSEED instruction (on-chip random number generator) + HasSSE2 bool // Streaming SIMD extension 2 (always available on amd64) + HasSSE3 bool // Streaming SIMD extension 3 + HasSSSE3 bool // Supplemental streaming SIMD extension 3 + HasSSE41 bool // Streaming SIMD extension 4 and 4.1 + HasSSE42 bool // Streaming SIMD extension 4 and 4.2 + _ CacheLinePad +} + +// ARM64 contains the supported CPU features of the +// current ARMv8(aarch64) platform. If the current platform +// is not arm64 then all feature flags are false. +var ARM64 struct { + _ CacheLinePad + HasFP bool // Floating-point instruction set (always available) + HasASIMD bool // Advanced SIMD (always available) + HasEVTSTRM bool // Event stream support + HasAES bool // AES hardware implementation + HasPMULL bool // Polynomial multiplication instruction set + HasSHA1 bool // SHA1 hardware implementation + HasSHA2 bool // SHA2 hardware implementation + HasCRC32 bool // CRC32 hardware implementation + HasATOMICS bool // Atomic memory operation instruction set + HasFPHP bool // Half precision floating-point instruction set + HasASIMDHP bool // Advanced SIMD half precision instruction set + HasCPUID bool // CPUID identification scheme registers + HasASIMDRDM bool // Rounding double multiply add/subtract instruction set + HasJSCVT bool // Javascript conversion from floating-point to integer + HasFCMA bool // Floating-point multiplication and addition of complex numbers + HasLRCPC bool // Release Consistent processor consistent support + HasDCPOP bool // Persistent memory support + HasSHA3 bool // SHA3 hardware implementation + HasSM3 bool // SM3 hardware implementation + HasSM4 bool // SM4 hardware implementation + HasASIMDDP bool // Advanced SIMD double precision instruction set + HasSHA512 bool // SHA512 hardware implementation + HasSVE bool // Scalable Vector Extensions + HasSVE2 bool // Scalable Vector Extensions 2 + HasASIMDFHM bool // Advanced SIMD multiplication FP16 to FP32 + _ CacheLinePad +} + +// ARM contains the supported CPU features of the current ARM (32-bit) platform. +// All feature flags are false if: +// 1. the current platform is not arm, or +// 2. the current operating system is not Linux. +var ARM struct { + _ CacheLinePad + HasSWP bool // SWP instruction support + HasHALF bool // Half-word load and store support + HasTHUMB bool // ARM Thumb instruction set + Has26BIT bool // Address space limited to 26-bits + HasFASTMUL bool // 32-bit operand, 64-bit result multiplication support + HasFPA bool // Floating point arithmetic support + HasVFP bool // Vector floating point support + HasEDSP bool // DSP Extensions support + HasJAVA bool // Java instruction set + HasIWMMXT bool // Intel Wireless MMX technology support + HasCRUNCH bool // MaverickCrunch context switching and handling + HasTHUMBEE bool // Thumb EE instruction set + HasNEON bool // NEON instruction set + HasVFPv3 bool // Vector floating point version 3 support + HasVFPv3D16 bool // Vector floating point version 3 D8-D15 + HasTLS bool // Thread local storage support + HasVFPv4 bool // Vector floating point version 4 support + HasIDIVA bool // Integer divide instruction support in ARM mode + HasIDIVT bool // Integer divide instruction support in Thumb mode + HasVFPD32 bool // Vector floating point version 3 D15-D31 + HasLPAE bool // Large Physical Address Extensions + HasEVTSTRM bool // Event stream support + HasAES bool // AES hardware implementation + HasPMULL bool // Polynomial multiplication instruction set + HasSHA1 bool // SHA1 hardware implementation + HasSHA2 bool // SHA2 hardware implementation + HasCRC32 bool // CRC32 hardware implementation + _ CacheLinePad +} + +// MIPS64X contains the supported CPU features of the current mips64/mips64le +// platforms. If the current platform is not mips64/mips64le or the current +// operating system is not Linux then all feature flags are false. +var MIPS64X struct { + _ CacheLinePad + HasMSA bool // MIPS SIMD architecture + _ CacheLinePad +} + +// PPC64 contains the supported CPU features of the current ppc64/ppc64le platforms. +// If the current platform is not ppc64/ppc64le then all feature flags are false. +// +// For ppc64/ppc64le, it is safe to check only for ISA level starting on ISA v3.00, +// since there are no optional categories. There are some exceptions that also +// require kernel support to work (DARN, SCV), so there are feature bits for +// those as well. The struct is padded to avoid false sharing. +var PPC64 struct { + _ CacheLinePad + HasDARN bool // Hardware random number generator (requires kernel enablement) + HasSCV bool // Syscall vectored (requires kernel enablement) + IsPOWER8 bool // ISA v2.07 (POWER8) + IsPOWER9 bool // ISA v3.00 (POWER9), implies IsPOWER8 + _ CacheLinePad +} + +// S390X contains the supported CPU features of the current IBM Z +// (s390x) platform. If the current platform is not IBM Z then all +// feature flags are false. +// +// S390X is padded to avoid false sharing. Further HasVX is only set +// if the OS supports vector registers in addition to the STFLE +// feature bit being set. +var S390X struct { + _ CacheLinePad + HasZARCH bool // z/Architecture mode is active [mandatory] + HasSTFLE bool // store facility list extended + HasLDISP bool // long (20-bit) displacements + HasEIMM bool // 32-bit immediates + HasDFP bool // decimal floating point + HasETF3EH bool // ETF-3 enhanced + HasMSA bool // message security assist (CPACF) + HasAES bool // KM-AES{128,192,256} functions + HasAESCBC bool // KMC-AES{128,192,256} functions + HasAESCTR bool // KMCTR-AES{128,192,256} functions + HasAESGCM bool // KMA-GCM-AES{128,192,256} functions + HasGHASH bool // KIMD-GHASH function + HasSHA1 bool // K{I,L}MD-SHA-1 functions + HasSHA256 bool // K{I,L}MD-SHA-256 functions + HasSHA512 bool // K{I,L}MD-SHA-512 functions + HasSHA3 bool // K{I,L}MD-SHA3-{224,256,384,512} and K{I,L}MD-SHAKE-{128,256} functions + HasVX bool // vector facility + HasVXE bool // vector-enhancements facility 1 + _ CacheLinePad +} + +func init() { + archInit() + initOptions() + processOptions() +} + +// options contains the cpu debug options that can be used in GODEBUG. +// Options are arch dependent and are added by the arch specific initOptions functions. +// Features that are mandatory for the specific GOARCH should have the Required field set +// (e.g. SSE2 on amd64). +var options []option + +// Option names should be lower case. e.g. avx instead of AVX. +type option struct { + Name string + Feature *bool + Specified bool // whether feature value was specified in GODEBUG + Enable bool // whether feature should be enabled + Required bool // whether feature is mandatory and can not be disabled +} + +func processOptions() { + env := os.Getenv("GODEBUG") +field: + for env != "" { + field := "" + i := strings.IndexByte(env, ',') + if i < 0 { + field, env = env, "" + } else { + field, env = env[:i], env[i+1:] + } + if len(field) < 4 || field[:4] != "cpu." { + continue + } + i = strings.IndexByte(field, '=') + if i < 0 { + print("GODEBUG sys/cpu: no value specified for \"", field, "\"\n") + continue + } + key, value := field[4:i], field[i+1:] // e.g. "SSE2", "on" + + var enable bool + switch value { + case "on": + enable = true + case "off": + enable = false + default: + print("GODEBUG sys/cpu: value \"", value, "\" not supported for cpu option \"", key, "\"\n") + continue field + } + + if key == "all" { + for i := range options { + options[i].Specified = true + options[i].Enable = enable || options[i].Required + } + continue field + } + + for i := range options { + if options[i].Name == key { + options[i].Specified = true + options[i].Enable = enable + continue field + } + } + + print("GODEBUG sys/cpu: unknown cpu feature \"", key, "\"\n") + } + + for _, o := range options { + if !o.Specified { + continue + } + + if o.Enable && !*o.Feature { + print("GODEBUG sys/cpu: can not enable \"", o.Name, "\", missing CPU support\n") + continue + } + + if !o.Enable && o.Required { + print("GODEBUG sys/cpu: can not disable \"", o.Name, "\", required CPU feature\n") + continue + } + + *o.Feature = o.Enable + } +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_aix.go b/vendor/golang.org/x/sys/cpu/cpu_aix.go new file mode 100644 index 0000000..9bf0c32 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_aix.go @@ -0,0 +1,33 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build aix + +package cpu + +const ( + // getsystemcfg constants + _SC_IMPL = 2 + _IMPL_POWER8 = 0x10000 + _IMPL_POWER9 = 0x20000 +) + +func archInit() { + impl := getsystemcfg(_SC_IMPL) + if impl&_IMPL_POWER8 != 0 { + PPC64.IsPOWER8 = true + } + if impl&_IMPL_POWER9 != 0 { + PPC64.IsPOWER8 = true + PPC64.IsPOWER9 = true + } + + Initialized = true +} + +func getsystemcfg(label int) (n uint64) { + r0, _ := callgetsystemcfg(label) + n = uint64(r0) + return +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm.go b/vendor/golang.org/x/sys/cpu/cpu_arm.go new file mode 100644 index 0000000..301b752 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_arm.go @@ -0,0 +1,73 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +const cacheLineSize = 32 + +// HWCAP/HWCAP2 bits. +// These are specific to Linux. +const ( + hwcap_SWP = 1 << 0 + hwcap_HALF = 1 << 1 + hwcap_THUMB = 1 << 2 + hwcap_26BIT = 1 << 3 + hwcap_FAST_MULT = 1 << 4 + hwcap_FPA = 1 << 5 + hwcap_VFP = 1 << 6 + hwcap_EDSP = 1 << 7 + hwcap_JAVA = 1 << 8 + hwcap_IWMMXT = 1 << 9 + hwcap_CRUNCH = 1 << 10 + hwcap_THUMBEE = 1 << 11 + hwcap_NEON = 1 << 12 + hwcap_VFPv3 = 1 << 13 + hwcap_VFPv3D16 = 1 << 14 + hwcap_TLS = 1 << 15 + hwcap_VFPv4 = 1 << 16 + hwcap_IDIVA = 1 << 17 + hwcap_IDIVT = 1 << 18 + hwcap_VFPD32 = 1 << 19 + hwcap_LPAE = 1 << 20 + hwcap_EVTSTRM = 1 << 21 + + hwcap2_AES = 1 << 0 + hwcap2_PMULL = 1 << 1 + hwcap2_SHA1 = 1 << 2 + hwcap2_SHA2 = 1 << 3 + hwcap2_CRC32 = 1 << 4 +) + +func initOptions() { + options = []option{ + {Name: "pmull", Feature: &ARM.HasPMULL}, + {Name: "sha1", Feature: &ARM.HasSHA1}, + {Name: "sha2", Feature: &ARM.HasSHA2}, + {Name: "swp", Feature: &ARM.HasSWP}, + {Name: "thumb", Feature: &ARM.HasTHUMB}, + {Name: "thumbee", Feature: &ARM.HasTHUMBEE}, + {Name: "tls", Feature: &ARM.HasTLS}, + {Name: "vfp", Feature: &ARM.HasVFP}, + {Name: "vfpd32", Feature: &ARM.HasVFPD32}, + {Name: "vfpv3", Feature: &ARM.HasVFPv3}, + {Name: "vfpv3d16", Feature: &ARM.HasVFPv3D16}, + {Name: "vfpv4", Feature: &ARM.HasVFPv4}, + {Name: "half", Feature: &ARM.HasHALF}, + {Name: "26bit", Feature: &ARM.Has26BIT}, + {Name: "fastmul", Feature: &ARM.HasFASTMUL}, + {Name: "fpa", Feature: &ARM.HasFPA}, + {Name: "edsp", Feature: &ARM.HasEDSP}, + {Name: "java", Feature: &ARM.HasJAVA}, + {Name: "iwmmxt", Feature: &ARM.HasIWMMXT}, + {Name: "crunch", Feature: &ARM.HasCRUNCH}, + {Name: "neon", Feature: &ARM.HasNEON}, + {Name: "idivt", Feature: &ARM.HasIDIVT}, + {Name: "idiva", Feature: &ARM.HasIDIVA}, + {Name: "lpae", Feature: &ARM.HasLPAE}, + {Name: "evtstrm", Feature: &ARM.HasEVTSTRM}, + {Name: "aes", Feature: &ARM.HasAES}, + {Name: "crc32", Feature: &ARM.HasCRC32}, + } + +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_arm64.go new file mode 100644 index 0000000..0e27a21 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.go @@ -0,0 +1,182 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import "runtime" + +// cacheLineSize is used to prevent false sharing of cache lines. +// We choose 128 because Apple Silicon, a.k.a. M1, has 128-byte cache line size. +// It doesn't cost much and is much more future-proof. +const cacheLineSize = 128 + +func initOptions() { + options = []option{ + {Name: "fp", Feature: &ARM64.HasFP}, + {Name: "asimd", Feature: &ARM64.HasASIMD}, + {Name: "evstrm", Feature: &ARM64.HasEVTSTRM}, + {Name: "aes", Feature: &ARM64.HasAES}, + {Name: "fphp", Feature: &ARM64.HasFPHP}, + {Name: "jscvt", Feature: &ARM64.HasJSCVT}, + {Name: "lrcpc", Feature: &ARM64.HasLRCPC}, + {Name: "pmull", Feature: &ARM64.HasPMULL}, + {Name: "sha1", Feature: &ARM64.HasSHA1}, + {Name: "sha2", Feature: &ARM64.HasSHA2}, + {Name: "sha3", Feature: &ARM64.HasSHA3}, + {Name: "sha512", Feature: &ARM64.HasSHA512}, + {Name: "sm3", Feature: &ARM64.HasSM3}, + {Name: "sm4", Feature: &ARM64.HasSM4}, + {Name: "sve", Feature: &ARM64.HasSVE}, + {Name: "sve2", Feature: &ARM64.HasSVE2}, + {Name: "crc32", Feature: &ARM64.HasCRC32}, + {Name: "atomics", Feature: &ARM64.HasATOMICS}, + {Name: "asimdhp", Feature: &ARM64.HasASIMDHP}, + {Name: "cpuid", Feature: &ARM64.HasCPUID}, + {Name: "asimrdm", Feature: &ARM64.HasASIMDRDM}, + {Name: "fcma", Feature: &ARM64.HasFCMA}, + {Name: "dcpop", Feature: &ARM64.HasDCPOP}, + {Name: "asimddp", Feature: &ARM64.HasASIMDDP}, + {Name: "asimdfhm", Feature: &ARM64.HasASIMDFHM}, + } +} + +func archInit() { + switch runtime.GOOS { + case "freebsd": + readARM64Registers() + case "linux", "netbsd", "openbsd": + doinit() + default: + // Many platforms don't seem to allow reading these registers. + setMinimalFeatures() + } +} + +// setMinimalFeatures fakes the minimal ARM64 features expected by +// TestARM64minimalFeatures. +func setMinimalFeatures() { + ARM64.HasASIMD = true + ARM64.HasFP = true +} + +func readARM64Registers() { + Initialized = true + + parseARM64SystemRegisters(getisar0(), getisar1(), getpfr0()) +} + +func parseARM64SystemRegisters(isar0, isar1, pfr0 uint64) { + // ID_AA64ISAR0_EL1 + switch extractBits(isar0, 4, 7) { + case 1: + ARM64.HasAES = true + case 2: + ARM64.HasAES = true + ARM64.HasPMULL = true + } + + switch extractBits(isar0, 8, 11) { + case 1: + ARM64.HasSHA1 = true + } + + switch extractBits(isar0, 12, 15) { + case 1: + ARM64.HasSHA2 = true + case 2: + ARM64.HasSHA2 = true + ARM64.HasSHA512 = true + } + + switch extractBits(isar0, 16, 19) { + case 1: + ARM64.HasCRC32 = true + } + + switch extractBits(isar0, 20, 23) { + case 2: + ARM64.HasATOMICS = true + } + + switch extractBits(isar0, 28, 31) { + case 1: + ARM64.HasASIMDRDM = true + } + + switch extractBits(isar0, 32, 35) { + case 1: + ARM64.HasSHA3 = true + } + + switch extractBits(isar0, 36, 39) { + case 1: + ARM64.HasSM3 = true + } + + switch extractBits(isar0, 40, 43) { + case 1: + ARM64.HasSM4 = true + } + + switch extractBits(isar0, 44, 47) { + case 1: + ARM64.HasASIMDDP = true + } + + // ID_AA64ISAR1_EL1 + switch extractBits(isar1, 0, 3) { + case 1: + ARM64.HasDCPOP = true + } + + switch extractBits(isar1, 12, 15) { + case 1: + ARM64.HasJSCVT = true + } + + switch extractBits(isar1, 16, 19) { + case 1: + ARM64.HasFCMA = true + } + + switch extractBits(isar1, 20, 23) { + case 1: + ARM64.HasLRCPC = true + } + + // ID_AA64PFR0_EL1 + switch extractBits(pfr0, 16, 19) { + case 0: + ARM64.HasFP = true + case 1: + ARM64.HasFP = true + ARM64.HasFPHP = true + } + + switch extractBits(pfr0, 20, 23) { + case 0: + ARM64.HasASIMD = true + case 1: + ARM64.HasASIMD = true + ARM64.HasASIMDHP = true + } + + switch extractBits(pfr0, 32, 35) { + case 1: + ARM64.HasSVE = true + + parseARM64SVERegister(getzfr0()) + } +} + +func parseARM64SVERegister(zfr0 uint64) { + switch extractBits(zfr0, 0, 3) { + case 1: + ARM64.HasSVE2 = true + } +} + +func extractBits(data uint64, start, end uint) uint { + return (uint)(data>>start) & ((1 << (end - start + 1)) - 1) +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.s b/vendor/golang.org/x/sys/cpu/cpu_arm64.s new file mode 100644 index 0000000..22cc998 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.s @@ -0,0 +1,39 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc + +#include "textflag.h" + +// func getisar0() uint64 +TEXT ·getisar0(SB),NOSPLIT,$0-8 + // get Instruction Set Attributes 0 into x0 + // mrs x0, ID_AA64ISAR0_EL1 = d5380600 + WORD $0xd5380600 + MOVD R0, ret+0(FP) + RET + +// func getisar1() uint64 +TEXT ·getisar1(SB),NOSPLIT,$0-8 + // get Instruction Set Attributes 1 into x0 + // mrs x0, ID_AA64ISAR1_EL1 = d5380620 + WORD $0xd5380620 + MOVD R0, ret+0(FP) + RET + +// func getpfr0() uint64 +TEXT ·getpfr0(SB),NOSPLIT,$0-8 + // get Processor Feature Register 0 into x0 + // mrs x0, ID_AA64PFR0_EL1 = d5380400 + WORD $0xd5380400 + MOVD R0, ret+0(FP) + RET + +// func getzfr0() uint64 +TEXT ·getzfr0(SB),NOSPLIT,$0-8 + // get SVE Feature Register 0 into x0 + // mrs x0, ID_AA64ZFR0_EL1 = d5380480 + WORD $0xd5380480 + MOVD R0, ret+0(FP) + RET diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go new file mode 100644 index 0000000..6ac6e1e --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go @@ -0,0 +1,12 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc + +package cpu + +func getisar0() uint64 +func getisar1() uint64 +func getpfr0() uint64 +func getzfr0() uint64 diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go new file mode 100644 index 0000000..c8ae6dd --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go @@ -0,0 +1,21 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc + +package cpu + +// haveAsmFunctions reports whether the other functions in this file can +// be safely called. +func haveAsmFunctions() bool { return true } + +// The following feature detection functions are defined in cpu_s390x.s. +// They are likely to be expensive to call so the results should be cached. +func stfle() facilityList +func kmQuery() queryResult +func kmcQuery() queryResult +func kmctrQuery() queryResult +func kmaQuery() queryResult +func kimdQuery() queryResult +func klmdQuery() queryResult diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go new file mode 100644 index 0000000..910728f --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go @@ -0,0 +1,15 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (386 || amd64 || amd64p32) && gc + +package cpu + +// cpuid is implemented in cpu_x86.s for gc compiler +// and in cpu_gccgo.c for gccgo. +func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) + +// xgetbv with ecx = 0 is implemented in cpu_x86.s for gc compiler +// and in cpu_gccgo.c for gccgo. +func xgetbv() (eax, edx uint32) diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go new file mode 100644 index 0000000..7f19467 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go @@ -0,0 +1,11 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gccgo + +package cpu + +func getisar0() uint64 { return 0 } +func getisar1() uint64 { return 0 } +func getpfr0() uint64 { return 0 } diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go new file mode 100644 index 0000000..9526d2c --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go @@ -0,0 +1,22 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gccgo + +package cpu + +// haveAsmFunctions reports whether the other functions in this file can +// be safely called. +func haveAsmFunctions() bool { return false } + +// TODO(mundaym): the following feature detection functions are currently +// stubs. See https://golang.org/cl/162887 for how to fix this. +// They are likely to be expensive to call so the results should be cached. +func stfle() facilityList { panic("not implemented for gccgo") } +func kmQuery() queryResult { panic("not implemented for gccgo") } +func kmcQuery() queryResult { panic("not implemented for gccgo") } +func kmctrQuery() queryResult { panic("not implemented for gccgo") } +func kmaQuery() queryResult { panic("not implemented for gccgo") } +func kimdQuery() queryResult { panic("not implemented for gccgo") } +func klmdQuery() queryResult { panic("not implemented for gccgo") } diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c new file mode 100644 index 0000000..3f73a05 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c @@ -0,0 +1,37 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (386 || amd64 || amd64p32) && gccgo + +#include +#include +#include + +// Need to wrap __get_cpuid_count because it's declared as static. +int +gccgoGetCpuidCount(uint32_t leaf, uint32_t subleaf, + uint32_t *eax, uint32_t *ebx, + uint32_t *ecx, uint32_t *edx) +{ + return __get_cpuid_count(leaf, subleaf, eax, ebx, ecx, edx); +} + +#pragma GCC diagnostic ignored "-Wunknown-pragmas" +#pragma GCC push_options +#pragma GCC target("xsave") +#pragma clang attribute push (__attribute__((target("xsave"))), apply_to=function) + +// xgetbv reads the contents of an XCR (Extended Control Register) +// specified in the ECX register into registers EDX:EAX. +// Currently, the only supported value for XCR is 0. +void +gccgoXgetbv(uint32_t *eax, uint32_t *edx) +{ + uint64_t v = _xgetbv(0); + *eax = v & 0xffffffff; + *edx = v >> 32; +} + +#pragma clang attribute pop +#pragma GCC pop_options diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go new file mode 100644 index 0000000..99c60fe --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go @@ -0,0 +1,31 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (386 || amd64 || amd64p32) && gccgo + +package cpu + +//extern gccgoGetCpuidCount +func gccgoGetCpuidCount(eaxArg, ecxArg uint32, eax, ebx, ecx, edx *uint32) + +func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) { + var a, b, c, d uint32 + gccgoGetCpuidCount(eaxArg, ecxArg, &a, &b, &c, &d) + return a, b, c, d +} + +//extern gccgoXgetbv +func gccgoXgetbv(eax, edx *uint32) + +func xgetbv() (eax, edx uint32) { + var a, d uint32 + gccgoXgetbv(&a, &d) + return a, d +} + +// gccgo doesn't build on Darwin, per: +// https://github.com/Homebrew/homebrew-core/blob/HEAD/Formula/gcc.rb#L76 +func darwinSupportsAVX512() bool { + return false +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux.go b/vendor/golang.org/x/sys/cpu/cpu_linux.go new file mode 100644 index 0000000..743eb54 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux.go @@ -0,0 +1,15 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !386 && !amd64 && !amd64p32 && !arm64 + +package cpu + +func archInit() { + if err := readHWCAP(); err != nil { + return + } + doinit() + Initialized = true +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go b/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go new file mode 100644 index 0000000..2057006 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go @@ -0,0 +1,39 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +func doinit() { + ARM.HasSWP = isSet(hwCap, hwcap_SWP) + ARM.HasHALF = isSet(hwCap, hwcap_HALF) + ARM.HasTHUMB = isSet(hwCap, hwcap_THUMB) + ARM.Has26BIT = isSet(hwCap, hwcap_26BIT) + ARM.HasFASTMUL = isSet(hwCap, hwcap_FAST_MULT) + ARM.HasFPA = isSet(hwCap, hwcap_FPA) + ARM.HasVFP = isSet(hwCap, hwcap_VFP) + ARM.HasEDSP = isSet(hwCap, hwcap_EDSP) + ARM.HasJAVA = isSet(hwCap, hwcap_JAVA) + ARM.HasIWMMXT = isSet(hwCap, hwcap_IWMMXT) + ARM.HasCRUNCH = isSet(hwCap, hwcap_CRUNCH) + ARM.HasTHUMBEE = isSet(hwCap, hwcap_THUMBEE) + ARM.HasNEON = isSet(hwCap, hwcap_NEON) + ARM.HasVFPv3 = isSet(hwCap, hwcap_VFPv3) + ARM.HasVFPv3D16 = isSet(hwCap, hwcap_VFPv3D16) + ARM.HasTLS = isSet(hwCap, hwcap_TLS) + ARM.HasVFPv4 = isSet(hwCap, hwcap_VFPv4) + ARM.HasIDIVA = isSet(hwCap, hwcap_IDIVA) + ARM.HasIDIVT = isSet(hwCap, hwcap_IDIVT) + ARM.HasVFPD32 = isSet(hwCap, hwcap_VFPD32) + ARM.HasLPAE = isSet(hwCap, hwcap_LPAE) + ARM.HasEVTSTRM = isSet(hwCap, hwcap_EVTSTRM) + ARM.HasAES = isSet(hwCap2, hwcap2_AES) + ARM.HasPMULL = isSet(hwCap2, hwcap2_PMULL) + ARM.HasSHA1 = isSet(hwCap2, hwcap2_SHA1) + ARM.HasSHA2 = isSet(hwCap2, hwcap2_SHA2) + ARM.HasCRC32 = isSet(hwCap2, hwcap2_CRC32) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go new file mode 100644 index 0000000..3d386d0 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go @@ -0,0 +1,116 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import ( + "strings" + "syscall" +) + +// HWCAP/HWCAP2 bits. These are exposed by Linux. +const ( + hwcap_FP = 1 << 0 + hwcap_ASIMD = 1 << 1 + hwcap_EVTSTRM = 1 << 2 + hwcap_AES = 1 << 3 + hwcap_PMULL = 1 << 4 + hwcap_SHA1 = 1 << 5 + hwcap_SHA2 = 1 << 6 + hwcap_CRC32 = 1 << 7 + hwcap_ATOMICS = 1 << 8 + hwcap_FPHP = 1 << 9 + hwcap_ASIMDHP = 1 << 10 + hwcap_CPUID = 1 << 11 + hwcap_ASIMDRDM = 1 << 12 + hwcap_JSCVT = 1 << 13 + hwcap_FCMA = 1 << 14 + hwcap_LRCPC = 1 << 15 + hwcap_DCPOP = 1 << 16 + hwcap_SHA3 = 1 << 17 + hwcap_SM3 = 1 << 18 + hwcap_SM4 = 1 << 19 + hwcap_ASIMDDP = 1 << 20 + hwcap_SHA512 = 1 << 21 + hwcap_SVE = 1 << 22 + hwcap_ASIMDFHM = 1 << 23 + + hwcap2_SVE2 = 1 << 1 +) + +// linuxKernelCanEmulateCPUID reports whether we're running +// on Linux 4.11+. Ideally we'd like to ask the question about +// whether the current kernel contains +// https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=77c97b4ee21290f5f083173d957843b615abbff2 +// but the version number will have to do. +func linuxKernelCanEmulateCPUID() bool { + var un syscall.Utsname + syscall.Uname(&un) + var sb strings.Builder + for _, b := range un.Release[:] { + if b == 0 { + break + } + sb.WriteByte(byte(b)) + } + major, minor, _, ok := parseRelease(sb.String()) + return ok && (major > 4 || major == 4 && minor >= 11) +} + +func doinit() { + if err := readHWCAP(); err != nil { + // We failed to read /proc/self/auxv. This can happen if the binary has + // been given extra capabilities(7) with /bin/setcap. + // + // When this happens, we have two options. If the Linux kernel is new + // enough (4.11+), we can read the arm64 registers directly which'll + // trap into the kernel and then return back to userspace. + // + // But on older kernels, such as Linux 4.4.180 as used on many Synology + // devices, calling readARM64Registers (specifically getisar0) will + // cause a SIGILL and we'll die. So for older kernels, parse /proc/cpuinfo + // instead. + // + // See golang/go#57336. + if linuxKernelCanEmulateCPUID() { + readARM64Registers() + } else { + readLinuxProcCPUInfo() + } + return + } + + // HWCAP feature bits + ARM64.HasFP = isSet(hwCap, hwcap_FP) + ARM64.HasASIMD = isSet(hwCap, hwcap_ASIMD) + ARM64.HasEVTSTRM = isSet(hwCap, hwcap_EVTSTRM) + ARM64.HasAES = isSet(hwCap, hwcap_AES) + ARM64.HasPMULL = isSet(hwCap, hwcap_PMULL) + ARM64.HasSHA1 = isSet(hwCap, hwcap_SHA1) + ARM64.HasSHA2 = isSet(hwCap, hwcap_SHA2) + ARM64.HasCRC32 = isSet(hwCap, hwcap_CRC32) + ARM64.HasATOMICS = isSet(hwCap, hwcap_ATOMICS) + ARM64.HasFPHP = isSet(hwCap, hwcap_FPHP) + ARM64.HasASIMDHP = isSet(hwCap, hwcap_ASIMDHP) + ARM64.HasCPUID = isSet(hwCap, hwcap_CPUID) + ARM64.HasASIMDRDM = isSet(hwCap, hwcap_ASIMDRDM) + ARM64.HasJSCVT = isSet(hwCap, hwcap_JSCVT) + ARM64.HasFCMA = isSet(hwCap, hwcap_FCMA) + ARM64.HasLRCPC = isSet(hwCap, hwcap_LRCPC) + ARM64.HasDCPOP = isSet(hwCap, hwcap_DCPOP) + ARM64.HasSHA3 = isSet(hwCap, hwcap_SHA3) + ARM64.HasSM3 = isSet(hwCap, hwcap_SM3) + ARM64.HasSM4 = isSet(hwCap, hwcap_SM4) + ARM64.HasASIMDDP = isSet(hwCap, hwcap_ASIMDDP) + ARM64.HasSHA512 = isSet(hwCap, hwcap_SHA512) + ARM64.HasSVE = isSet(hwCap, hwcap_SVE) + ARM64.HasASIMDFHM = isSet(hwCap, hwcap_ASIMDFHM) + + // HWCAP2 feature bits + ARM64.HasSVE2 = isSet(hwCap2, hwcap2_SVE2) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go new file mode 100644 index 0000000..4686c1d --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go @@ -0,0 +1,22 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && (mips64 || mips64le) + +package cpu + +// HWCAP bits. These are exposed by the Linux kernel 5.4. +const ( + // CPU features + hwcap_MIPS_MSA = 1 << 1 +) + +func doinit() { + // HWCAP feature bits + MIPS64X.HasMSA = isSet(hwCap, hwcap_MIPS_MSA) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go b/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go new file mode 100644 index 0000000..cd63e73 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go @@ -0,0 +1,9 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && !arm && !arm64 && !mips64 && !mips64le && !ppc64 && !ppc64le && !s390x + +package cpu + +func doinit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go new file mode 100644 index 0000000..197188e --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go @@ -0,0 +1,30 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && (ppc64 || ppc64le) + +package cpu + +// HWCAP/HWCAP2 bits. These are exposed by the kernel. +const ( + // ISA Level + _PPC_FEATURE2_ARCH_2_07 = 0x80000000 + _PPC_FEATURE2_ARCH_3_00 = 0x00800000 + + // CPU features + _PPC_FEATURE2_DARN = 0x00200000 + _PPC_FEATURE2_SCV = 0x00100000 +) + +func doinit() { + // HWCAP2 feature bits + PPC64.IsPOWER8 = isSet(hwCap2, _PPC_FEATURE2_ARCH_2_07) + PPC64.IsPOWER9 = isSet(hwCap2, _PPC_FEATURE2_ARCH_3_00) + PPC64.HasDARN = isSet(hwCap2, _PPC_FEATURE2_DARN) + PPC64.HasSCV = isSet(hwCap2, _PPC_FEATURE2_SCV) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go new file mode 100644 index 0000000..1517ac6 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go @@ -0,0 +1,40 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +const ( + // bit mask values from /usr/include/bits/hwcap.h + hwcap_ZARCH = 2 + hwcap_STFLE = 4 + hwcap_MSA = 8 + hwcap_LDISP = 16 + hwcap_EIMM = 32 + hwcap_DFP = 64 + hwcap_ETF3EH = 256 + hwcap_VX = 2048 + hwcap_VXE = 8192 +) + +func initS390Xbase() { + // test HWCAP bit vector + has := func(featureMask uint) bool { + return hwCap&featureMask == featureMask + } + + // mandatory + S390X.HasZARCH = has(hwcap_ZARCH) + + // optional + S390X.HasSTFLE = has(hwcap_STFLE) + S390X.HasLDISP = has(hwcap_LDISP) + S390X.HasEIMM = has(hwcap_EIMM) + S390X.HasETF3EH = has(hwcap_ETF3EH) + S390X.HasDFP = has(hwcap_DFP) + S390X.HasMSA = has(hwcap_MSA) + S390X.HasVX = has(hwcap_VX) + if S390X.HasVX { + S390X.HasVXE = has(hwcap_VXE) + } +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_loong64.go b/vendor/golang.org/x/sys/cpu/cpu_loong64.go new file mode 100644 index 0000000..5586358 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_loong64.go @@ -0,0 +1,12 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build loong64 + +package cpu + +const cacheLineSize = 64 + +func initOptions() { +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_mips64x.go new file mode 100644 index 0000000..fedb00c --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_mips64x.go @@ -0,0 +1,15 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build mips64 || mips64le + +package cpu + +const cacheLineSize = 32 + +func initOptions() { + options = []option{ + {Name: "msa", Feature: &MIPS64X.HasMSA}, + } +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_mipsx.go b/vendor/golang.org/x/sys/cpu/cpu_mipsx.go new file mode 100644 index 0000000..ffb4ec7 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_mipsx.go @@ -0,0 +1,11 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build mips || mipsle + +package cpu + +const cacheLineSize = 32 + +func initOptions() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go new file mode 100644 index 0000000..ebfb3fc --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go @@ -0,0 +1,173 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import ( + "syscall" + "unsafe" +) + +// Minimal copy of functionality from x/sys/unix so the cpu package can call +// sysctl without depending on x/sys/unix. + +const ( + _CTL_QUERY = -2 + + _SYSCTL_VERS_1 = 0x1000000 +) + +var _zero uintptr + +func sysctl(mib []int32, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, errno := syscall.Syscall6( + syscall.SYS___SYSCTL, + uintptr(_p0), + uintptr(len(mib)), + uintptr(unsafe.Pointer(old)), + uintptr(unsafe.Pointer(oldlen)), + uintptr(unsafe.Pointer(new)), + uintptr(newlen)) + if errno != 0 { + return errno + } + return nil +} + +type sysctlNode struct { + Flags uint32 + Num int32 + Name [32]int8 + Ver uint32 + __rsvd uint32 + Un [16]byte + _sysctl_size [8]byte + _sysctl_func [8]byte + _sysctl_parent [8]byte + _sysctl_desc [8]byte +} + +func sysctlNodes(mib []int32) ([]sysctlNode, error) { + var olen uintptr + + // Get a list of all sysctl nodes below the given MIB by performing + // a sysctl for the given MIB with CTL_QUERY appended. + mib = append(mib, _CTL_QUERY) + qnode := sysctlNode{Flags: _SYSCTL_VERS_1} + qp := (*byte)(unsafe.Pointer(&qnode)) + sz := unsafe.Sizeof(qnode) + if err := sysctl(mib, nil, &olen, qp, sz); err != nil { + return nil, err + } + + // Now that we know the size, get the actual nodes. + nodes := make([]sysctlNode, olen/sz) + np := (*byte)(unsafe.Pointer(&nodes[0])) + if err := sysctl(mib, np, &olen, qp, sz); err != nil { + return nil, err + } + + return nodes, nil +} + +func nametomib(name string) ([]int32, error) { + // Split name into components. + var parts []string + last := 0 + for i := 0; i < len(name); i++ { + if name[i] == '.' { + parts = append(parts, name[last:i]) + last = i + 1 + } + } + parts = append(parts, name[last:]) + + mib := []int32{} + // Discover the nodes and construct the MIB OID. + for partno, part := range parts { + nodes, err := sysctlNodes(mib) + if err != nil { + return nil, err + } + for _, node := range nodes { + n := make([]byte, 0) + for i := range node.Name { + if node.Name[i] != 0 { + n = append(n, byte(node.Name[i])) + } + } + if string(n) == part { + mib = append(mib, int32(node.Num)) + break + } + } + if len(mib) != partno+1 { + return nil, err + } + } + + return mib, nil +} + +// aarch64SysctlCPUID is struct aarch64_sysctl_cpu_id from NetBSD's +type aarch64SysctlCPUID struct { + midr uint64 /* Main ID Register */ + revidr uint64 /* Revision ID Register */ + mpidr uint64 /* Multiprocessor Affinity Register */ + aa64dfr0 uint64 /* A64 Debug Feature Register 0 */ + aa64dfr1 uint64 /* A64 Debug Feature Register 1 */ + aa64isar0 uint64 /* A64 Instruction Set Attribute Register 0 */ + aa64isar1 uint64 /* A64 Instruction Set Attribute Register 1 */ + aa64mmfr0 uint64 /* A64 Memory Model Feature Register 0 */ + aa64mmfr1 uint64 /* A64 Memory Model Feature Register 1 */ + aa64mmfr2 uint64 /* A64 Memory Model Feature Register 2 */ + aa64pfr0 uint64 /* A64 Processor Feature Register 0 */ + aa64pfr1 uint64 /* A64 Processor Feature Register 1 */ + aa64zfr0 uint64 /* A64 SVE Feature ID Register 0 */ + mvfr0 uint32 /* Media and VFP Feature Register 0 */ + mvfr1 uint32 /* Media and VFP Feature Register 1 */ + mvfr2 uint32 /* Media and VFP Feature Register 2 */ + pad uint32 + clidr uint64 /* Cache Level ID Register */ + ctr uint64 /* Cache Type Register */ +} + +func sysctlCPUID(name string) (*aarch64SysctlCPUID, error) { + mib, err := nametomib(name) + if err != nil { + return nil, err + } + + out := aarch64SysctlCPUID{} + n := unsafe.Sizeof(out) + _, _, errno := syscall.Syscall6( + syscall.SYS___SYSCTL, + uintptr(unsafe.Pointer(&mib[0])), + uintptr(len(mib)), + uintptr(unsafe.Pointer(&out)), + uintptr(unsafe.Pointer(&n)), + uintptr(0), + uintptr(0)) + if errno != 0 { + return nil, errno + } + return &out, nil +} + +func doinit() { + cpuid, err := sysctlCPUID("machdep.cpu0.cpu_id") + if err != nil { + setMinimalFeatures() + return + } + parseARM64SystemRegisters(cpuid.aa64isar0, cpuid.aa64isar1, cpuid.aa64pfr0) + + Initialized = true +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go new file mode 100644 index 0000000..85b64d5 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go @@ -0,0 +1,65 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import ( + "syscall" + "unsafe" +) + +// Minimal copy of functionality from x/sys/unix so the cpu package can call +// sysctl without depending on x/sys/unix. + +const ( + // From OpenBSD's sys/sysctl.h. + _CTL_MACHDEP = 7 + + // From OpenBSD's machine/cpu.h. + _CPU_ID_AA64ISAR0 = 2 + _CPU_ID_AA64ISAR1 = 3 +) + +// Implemented in the runtime package (runtime/sys_openbsd3.go) +func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno) + +//go:linkname syscall_syscall6 syscall.syscall6 + +func sysctl(mib []uint32, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + _, _, errno := syscall_syscall6(libc_sysctl_trampoline_addr, uintptr(unsafe.Pointer(&mib[0])), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if errno != 0 { + return errno + } + return nil +} + +var libc_sysctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sysctl sysctl "libc.so" + +func sysctlUint64(mib []uint32) (uint64, bool) { + var out uint64 + nout := unsafe.Sizeof(out) + if err := sysctl(mib, (*byte)(unsafe.Pointer(&out)), &nout, nil, 0); err != nil { + return 0, false + } + return out, true +} + +func doinit() { + setMinimalFeatures() + + // Get ID_AA64ISAR0 and ID_AA64ISAR1 from sysctl. + isar0, ok := sysctlUint64([]uint32{_CTL_MACHDEP, _CPU_ID_AA64ISAR0}) + if !ok { + return + } + isar1, ok := sysctlUint64([]uint32{_CTL_MACHDEP, _CPU_ID_AA64ISAR1}) + if !ok { + return + } + parseARM64SystemRegisters(isar0, isar1, 0) + + Initialized = true +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s b/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s new file mode 100644 index 0000000..054ba05 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s @@ -0,0 +1,11 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sysctl(SB) + +GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_arm.go b/vendor/golang.org/x/sys/cpu/cpu_other_arm.go new file mode 100644 index 0000000..e9ecf2a --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_other_arm.go @@ -0,0 +1,9 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !linux && arm + +package cpu + +func archInit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go new file mode 100644 index 0000000..5341e7f --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go @@ -0,0 +1,9 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !linux && !netbsd && !openbsd && arm64 + +package cpu + +func doinit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go new file mode 100644 index 0000000..5f8f241 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go @@ -0,0 +1,11 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !linux && (mips64 || mips64le) + +package cpu + +func archInit() { + Initialized = true +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go b/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go new file mode 100644 index 0000000..89608fb --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go @@ -0,0 +1,12 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !aix && !linux && (ppc64 || ppc64le) + +package cpu + +func archInit() { + PPC64.IsPOWER8 = true + Initialized = true +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go b/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go new file mode 100644 index 0000000..5ab8780 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go @@ -0,0 +1,11 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !linux && riscv64 + +package cpu + +func archInit() { + Initialized = true +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go b/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go new file mode 100644 index 0000000..c14f12b --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go @@ -0,0 +1,16 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ppc64 || ppc64le + +package cpu + +const cacheLineSize = 128 + +func initOptions() { + options = []option{ + {Name: "darn", Feature: &PPC64.HasDARN}, + {Name: "scv", Feature: &PPC64.HasSCV}, + } +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_riscv64.go b/vendor/golang.org/x/sys/cpu/cpu_riscv64.go new file mode 100644 index 0000000..7f0c79c --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_riscv64.go @@ -0,0 +1,11 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build riscv64 + +package cpu + +const cacheLineSize = 64 + +func initOptions() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_s390x.go new file mode 100644 index 0000000..5881b88 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_s390x.go @@ -0,0 +1,172 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +const cacheLineSize = 256 + +func initOptions() { + options = []option{ + {Name: "zarch", Feature: &S390X.HasZARCH, Required: true}, + {Name: "stfle", Feature: &S390X.HasSTFLE, Required: true}, + {Name: "ldisp", Feature: &S390X.HasLDISP, Required: true}, + {Name: "eimm", Feature: &S390X.HasEIMM, Required: true}, + {Name: "dfp", Feature: &S390X.HasDFP}, + {Name: "etf3eh", Feature: &S390X.HasETF3EH}, + {Name: "msa", Feature: &S390X.HasMSA}, + {Name: "aes", Feature: &S390X.HasAES}, + {Name: "aescbc", Feature: &S390X.HasAESCBC}, + {Name: "aesctr", Feature: &S390X.HasAESCTR}, + {Name: "aesgcm", Feature: &S390X.HasAESGCM}, + {Name: "ghash", Feature: &S390X.HasGHASH}, + {Name: "sha1", Feature: &S390X.HasSHA1}, + {Name: "sha256", Feature: &S390X.HasSHA256}, + {Name: "sha3", Feature: &S390X.HasSHA3}, + {Name: "sha512", Feature: &S390X.HasSHA512}, + {Name: "vx", Feature: &S390X.HasVX}, + {Name: "vxe", Feature: &S390X.HasVXE}, + } +} + +// bitIsSet reports whether the bit at index is set. The bit index +// is in big endian order, so bit index 0 is the leftmost bit. +func bitIsSet(bits []uint64, index uint) bool { + return bits[index/64]&((1<<63)>>(index%64)) != 0 +} + +// facility is a bit index for the named facility. +type facility uint8 + +const ( + // mandatory facilities + zarch facility = 1 // z architecture mode is active + stflef facility = 7 // store-facility-list-extended + ldisp facility = 18 // long-displacement + eimm facility = 21 // extended-immediate + + // miscellaneous facilities + dfp facility = 42 // decimal-floating-point + etf3eh facility = 30 // extended-translation 3 enhancement + + // cryptography facilities + msa facility = 17 // message-security-assist + msa3 facility = 76 // message-security-assist extension 3 + msa4 facility = 77 // message-security-assist extension 4 + msa5 facility = 57 // message-security-assist extension 5 + msa8 facility = 146 // message-security-assist extension 8 + msa9 facility = 155 // message-security-assist extension 9 + + // vector facilities + vx facility = 129 // vector facility + vxe facility = 135 // vector-enhancements 1 + vxe2 facility = 148 // vector-enhancements 2 +) + +// facilityList contains the result of an STFLE call. +// Bits are numbered in big endian order so the +// leftmost bit (the MSB) is at index 0. +type facilityList struct { + bits [4]uint64 +} + +// Has reports whether the given facilities are present. +func (s *facilityList) Has(fs ...facility) bool { + if len(fs) == 0 { + panic("no facility bits provided") + } + for _, f := range fs { + if !bitIsSet(s.bits[:], uint(f)) { + return false + } + } + return true +} + +// function is the code for the named cryptographic function. +type function uint8 + +const ( + // KM{,A,C,CTR} function codes + aes128 function = 18 // AES-128 + aes192 function = 19 // AES-192 + aes256 function = 20 // AES-256 + + // K{I,L}MD function codes + sha1 function = 1 // SHA-1 + sha256 function = 2 // SHA-256 + sha512 function = 3 // SHA-512 + sha3_224 function = 32 // SHA3-224 + sha3_256 function = 33 // SHA3-256 + sha3_384 function = 34 // SHA3-384 + sha3_512 function = 35 // SHA3-512 + shake128 function = 36 // SHAKE-128 + shake256 function = 37 // SHAKE-256 + + // KLMD function codes + ghash function = 65 // GHASH +) + +// queryResult contains the result of a Query function +// call. Bits are numbered in big endian order so the +// leftmost bit (the MSB) is at index 0. +type queryResult struct { + bits [2]uint64 +} + +// Has reports whether the given functions are present. +func (q *queryResult) Has(fns ...function) bool { + if len(fns) == 0 { + panic("no function codes provided") + } + for _, f := range fns { + if !bitIsSet(q.bits[:], uint(f)) { + return false + } + } + return true +} + +func doinit() { + initS390Xbase() + + // We need implementations of stfle, km and so on + // to detect cryptographic features. + if !haveAsmFunctions() { + return + } + + // optional cryptographic functions + if S390X.HasMSA { + aes := []function{aes128, aes192, aes256} + + // cipher message + km, kmc := kmQuery(), kmcQuery() + S390X.HasAES = km.Has(aes...) + S390X.HasAESCBC = kmc.Has(aes...) + if S390X.HasSTFLE { + facilities := stfle() + if facilities.Has(msa4) { + kmctr := kmctrQuery() + S390X.HasAESCTR = kmctr.Has(aes...) + } + if facilities.Has(msa8) { + kma := kmaQuery() + S390X.HasAESGCM = kma.Has(aes...) + } + } + + // compute message digest + kimd := kimdQuery() // intermediate (no padding) + klmd := klmdQuery() // last (padding) + S390X.HasSHA1 = kimd.Has(sha1) && klmd.Has(sha1) + S390X.HasSHA256 = kimd.Has(sha256) && klmd.Has(sha256) + S390X.HasSHA512 = kimd.Has(sha512) && klmd.Has(sha512) + S390X.HasGHASH = kimd.Has(ghash) // KLMD-GHASH does not exist + sha3 := []function{ + sha3_224, sha3_256, sha3_384, sha3_512, + shake128, shake256, + } + S390X.HasSHA3 = kimd.Has(sha3...) && klmd.Has(sha3...) + } +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_s390x.s b/vendor/golang.org/x/sys/cpu/cpu_s390x.s new file mode 100644 index 0000000..1fb4b70 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_s390x.s @@ -0,0 +1,57 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc + +#include "textflag.h" + +// func stfle() facilityList +TEXT ·stfle(SB), NOSPLIT|NOFRAME, $0-32 + MOVD $ret+0(FP), R1 + MOVD $3, R0 // last doubleword index to store + XC $32, (R1), (R1) // clear 4 doublewords (32 bytes) + WORD $0xb2b01000 // store facility list extended (STFLE) + RET + +// func kmQuery() queryResult +TEXT ·kmQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KM-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB92E0024 // cipher message (KM) + RET + +// func kmcQuery() queryResult +TEXT ·kmcQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KMC-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB92F0024 // cipher message with chaining (KMC) + RET + +// func kmctrQuery() queryResult +TEXT ·kmctrQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KMCTR-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB92D4024 // cipher message with counter (KMCTR) + RET + +// func kmaQuery() queryResult +TEXT ·kmaQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KMA-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xb9296024 // cipher message with authentication (KMA) + RET + +// func kimdQuery() queryResult +TEXT ·kimdQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KIMD-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB93E0024 // compute intermediate message digest (KIMD) + RET + +// func klmdQuery() queryResult +TEXT ·klmdQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KLMD-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB93F0024 // compute last message digest (KLMD) + RET diff --git a/vendor/golang.org/x/sys/cpu/cpu_wasm.go b/vendor/golang.org/x/sys/cpu/cpu_wasm.go new file mode 100644 index 0000000..384787e --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_wasm.go @@ -0,0 +1,17 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build wasm + +package cpu + +// We're compiling the cpu package for an unknown (software-abstracted) CPU. +// Make CacheLinePad an empty struct and hope that the usual struct alignment +// rules are good enough. + +const cacheLineSize = 0 + +func initOptions() {} + +func archInit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.go b/vendor/golang.org/x/sys/cpu/cpu_x86.go new file mode 100644 index 0000000..c29f5e4 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_x86.go @@ -0,0 +1,151 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build 386 || amd64 || amd64p32 + +package cpu + +import "runtime" + +const cacheLineSize = 64 + +func initOptions() { + options = []option{ + {Name: "adx", Feature: &X86.HasADX}, + {Name: "aes", Feature: &X86.HasAES}, + {Name: "avx", Feature: &X86.HasAVX}, + {Name: "avx2", Feature: &X86.HasAVX2}, + {Name: "avx512", Feature: &X86.HasAVX512}, + {Name: "avx512f", Feature: &X86.HasAVX512F}, + {Name: "avx512cd", Feature: &X86.HasAVX512CD}, + {Name: "avx512er", Feature: &X86.HasAVX512ER}, + {Name: "avx512pf", Feature: &X86.HasAVX512PF}, + {Name: "avx512vl", Feature: &X86.HasAVX512VL}, + {Name: "avx512bw", Feature: &X86.HasAVX512BW}, + {Name: "avx512dq", Feature: &X86.HasAVX512DQ}, + {Name: "avx512ifma", Feature: &X86.HasAVX512IFMA}, + {Name: "avx512vbmi", Feature: &X86.HasAVX512VBMI}, + {Name: "avx512vnniw", Feature: &X86.HasAVX5124VNNIW}, + {Name: "avx5124fmaps", Feature: &X86.HasAVX5124FMAPS}, + {Name: "avx512vpopcntdq", Feature: &X86.HasAVX512VPOPCNTDQ}, + {Name: "avx512vpclmulqdq", Feature: &X86.HasAVX512VPCLMULQDQ}, + {Name: "avx512vnni", Feature: &X86.HasAVX512VNNI}, + {Name: "avx512gfni", Feature: &X86.HasAVX512GFNI}, + {Name: "avx512vaes", Feature: &X86.HasAVX512VAES}, + {Name: "avx512vbmi2", Feature: &X86.HasAVX512VBMI2}, + {Name: "avx512bitalg", Feature: &X86.HasAVX512BITALG}, + {Name: "avx512bf16", Feature: &X86.HasAVX512BF16}, + {Name: "amxtile", Feature: &X86.HasAMXTile}, + {Name: "amxint8", Feature: &X86.HasAMXInt8}, + {Name: "amxbf16", Feature: &X86.HasAMXBF16}, + {Name: "bmi1", Feature: &X86.HasBMI1}, + {Name: "bmi2", Feature: &X86.HasBMI2}, + {Name: "cx16", Feature: &X86.HasCX16}, + {Name: "erms", Feature: &X86.HasERMS}, + {Name: "fma", Feature: &X86.HasFMA}, + {Name: "osxsave", Feature: &X86.HasOSXSAVE}, + {Name: "pclmulqdq", Feature: &X86.HasPCLMULQDQ}, + {Name: "popcnt", Feature: &X86.HasPOPCNT}, + {Name: "rdrand", Feature: &X86.HasRDRAND}, + {Name: "rdseed", Feature: &X86.HasRDSEED}, + {Name: "sse3", Feature: &X86.HasSSE3}, + {Name: "sse41", Feature: &X86.HasSSE41}, + {Name: "sse42", Feature: &X86.HasSSE42}, + {Name: "ssse3", Feature: &X86.HasSSSE3}, + + // These capabilities should always be enabled on amd64: + {Name: "sse2", Feature: &X86.HasSSE2, Required: runtime.GOARCH == "amd64"}, + } +} + +func archInit() { + + Initialized = true + + maxID, _, _, _ := cpuid(0, 0) + + if maxID < 1 { + return + } + + _, _, ecx1, edx1 := cpuid(1, 0) + X86.HasSSE2 = isSet(26, edx1) + + X86.HasSSE3 = isSet(0, ecx1) + X86.HasPCLMULQDQ = isSet(1, ecx1) + X86.HasSSSE3 = isSet(9, ecx1) + X86.HasFMA = isSet(12, ecx1) + X86.HasCX16 = isSet(13, ecx1) + X86.HasSSE41 = isSet(19, ecx1) + X86.HasSSE42 = isSet(20, ecx1) + X86.HasPOPCNT = isSet(23, ecx1) + X86.HasAES = isSet(25, ecx1) + X86.HasOSXSAVE = isSet(27, ecx1) + X86.HasRDRAND = isSet(30, ecx1) + + var osSupportsAVX, osSupportsAVX512 bool + // For XGETBV, OSXSAVE bit is required and sufficient. + if X86.HasOSXSAVE { + eax, _ := xgetbv() + // Check if XMM and YMM registers have OS support. + osSupportsAVX = isSet(1, eax) && isSet(2, eax) + + if runtime.GOOS == "darwin" { + // Darwin doesn't save/restore AVX-512 mask registers correctly across signal handlers. + // Since users can't rely on mask register contents, let's not advertise AVX-512 support. + // See issue 49233. + osSupportsAVX512 = false + } else { + // Check if OPMASK and ZMM registers have OS support. + osSupportsAVX512 = osSupportsAVX && isSet(5, eax) && isSet(6, eax) && isSet(7, eax) + } + } + + X86.HasAVX = isSet(28, ecx1) && osSupportsAVX + + if maxID < 7 { + return + } + + _, ebx7, ecx7, edx7 := cpuid(7, 0) + X86.HasBMI1 = isSet(3, ebx7) + X86.HasAVX2 = isSet(5, ebx7) && osSupportsAVX + X86.HasBMI2 = isSet(8, ebx7) + X86.HasERMS = isSet(9, ebx7) + X86.HasRDSEED = isSet(18, ebx7) + X86.HasADX = isSet(19, ebx7) + + X86.HasAVX512 = isSet(16, ebx7) && osSupportsAVX512 // Because avx-512 foundation is the core required extension + if X86.HasAVX512 { + X86.HasAVX512F = true + X86.HasAVX512CD = isSet(28, ebx7) + X86.HasAVX512ER = isSet(27, ebx7) + X86.HasAVX512PF = isSet(26, ebx7) + X86.HasAVX512VL = isSet(31, ebx7) + X86.HasAVX512BW = isSet(30, ebx7) + X86.HasAVX512DQ = isSet(17, ebx7) + X86.HasAVX512IFMA = isSet(21, ebx7) + X86.HasAVX512VBMI = isSet(1, ecx7) + X86.HasAVX5124VNNIW = isSet(2, edx7) + X86.HasAVX5124FMAPS = isSet(3, edx7) + X86.HasAVX512VPOPCNTDQ = isSet(14, ecx7) + X86.HasAVX512VPCLMULQDQ = isSet(10, ecx7) + X86.HasAVX512VNNI = isSet(11, ecx7) + X86.HasAVX512GFNI = isSet(8, ecx7) + X86.HasAVX512VAES = isSet(9, ecx7) + X86.HasAVX512VBMI2 = isSet(6, ecx7) + X86.HasAVX512BITALG = isSet(12, ecx7) + + eax71, _, _, _ := cpuid(7, 1) + X86.HasAVX512BF16 = isSet(5, eax71) + } + + X86.HasAMXTile = isSet(24, edx7) + X86.HasAMXInt8 = isSet(25, edx7) + X86.HasAMXBF16 = isSet(22, edx7) +} + +func isSet(bitpos uint, value uint32) bool { + return value&(1<> 63)) +) + +// For those platforms don't have a 'cpuid' equivalent we use HWCAP/HWCAP2 +// These are initialized in cpu_$GOARCH.go +// and should not be changed after they are initialized. +var hwCap uint +var hwCap2 uint + +func readHWCAP() error { + // For Go 1.21+, get auxv from the Go runtime. + if a := getAuxv(); len(a) > 0 { + for len(a) >= 2 { + tag, val := a[0], uint(a[1]) + a = a[2:] + switch tag { + case _AT_HWCAP: + hwCap = val + case _AT_HWCAP2: + hwCap2 = val + } + } + return nil + } + + buf, err := os.ReadFile(procAuxv) + if err != nil { + // e.g. on android /proc/self/auxv is not accessible, so silently + // ignore the error and leave Initialized = false. On some + // architectures (e.g. arm64) doinit() implements a fallback + // readout and will set Initialized = true again. + return err + } + bo := hostByteOrder() + for len(buf) >= 2*(uintSize/8) { + var tag, val uint + switch uintSize { + case 32: + tag = uint(bo.Uint32(buf[0:])) + val = uint(bo.Uint32(buf[4:])) + buf = buf[8:] + case 64: + tag = uint(bo.Uint64(buf[0:])) + val = uint(bo.Uint64(buf[8:])) + buf = buf[16:] + } + switch tag { + case _AT_HWCAP: + hwCap = val + case _AT_HWCAP2: + hwCap2 = val + } + } + return nil +} diff --git a/vendor/golang.org/x/sys/cpu/parse.go b/vendor/golang.org/x/sys/cpu/parse.go new file mode 100644 index 0000000..762b63d --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/parse.go @@ -0,0 +1,43 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import "strconv" + +// parseRelease parses a dot-separated version number. It follows the semver +// syntax, but allows the minor and patch versions to be elided. +// +// This is a copy of the Go runtime's parseRelease from +// https://golang.org/cl/209597. +func parseRelease(rel string) (major, minor, patch int, ok bool) { + // Strip anything after a dash or plus. + for i := 0; i < len(rel); i++ { + if rel[i] == '-' || rel[i] == '+' { + rel = rel[:i] + break + } + } + + next := func() (int, bool) { + for i := 0; i < len(rel); i++ { + if rel[i] == '.' { + ver, err := strconv.Atoi(rel[:i]) + rel = rel[i+1:] + return ver, err == nil + } + } + ver, err := strconv.Atoi(rel) + rel = "" + return ver, err == nil + } + if major, ok = next(); !ok || rel == "" { + return + } + if minor, ok = next(); !ok || rel == "" { + return + } + patch, ok = next() + return +} diff --git a/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go b/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go new file mode 100644 index 0000000..4cd64c7 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go @@ -0,0 +1,53 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && arm64 + +package cpu + +import ( + "errors" + "io" + "os" + "strings" +) + +func readLinuxProcCPUInfo() error { + f, err := os.Open("/proc/cpuinfo") + if err != nil { + return err + } + defer f.Close() + + var buf [1 << 10]byte // enough for first CPU + n, err := io.ReadFull(f, buf[:]) + if err != nil && err != io.ErrUnexpectedEOF { + return err + } + in := string(buf[:n]) + const features = "\nFeatures : " + i := strings.Index(in, features) + if i == -1 { + return errors.New("no CPU features found") + } + in = in[i+len(features):] + if i := strings.Index(in, "\n"); i != -1 { + in = in[:i] + } + m := map[string]*bool{} + + initOptions() // need it early here; it's harmless to call twice + for _, o := range options { + m[o.Name] = o.Feature + } + // The EVTSTRM field has alias "evstrm" in Go, but Linux calls it "evtstrm". + m["evtstrm"] = &ARM64.HasEVTSTRM + + for _, f := range strings.Fields(in) { + if p, ok := m[f]; ok { + *p = true + } + } + return nil +} diff --git a/vendor/golang.org/x/sys/cpu/runtime_auxv.go b/vendor/golang.org/x/sys/cpu/runtime_auxv.go new file mode 100644 index 0000000..5f92ac9 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/runtime_auxv.go @@ -0,0 +1,16 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +// getAuxvFn is non-nil on Go 1.21+ (via runtime_auxv_go121.go init) +// on platforms that use auxv. +var getAuxvFn func() []uintptr + +func getAuxv() []uintptr { + if getAuxvFn == nil { + return nil + } + return getAuxvFn() +} diff --git a/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go b/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go new file mode 100644 index 0000000..4c9788e --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go @@ -0,0 +1,18 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.21 + +package cpu + +import ( + _ "unsafe" // for linkname +) + +//go:linkname runtime_getAuxv runtime.getAuxv +func runtime_getAuxv() []uintptr + +func init() { + getAuxvFn = runtime_getAuxv +} diff --git a/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go b/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go new file mode 100644 index 0000000..1b9ccb0 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go @@ -0,0 +1,26 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Recreate a getsystemcfg syscall handler instead of +// using the one provided by x/sys/unix to avoid having +// the dependency between them. (See golang.org/issue/32102) +// Moreover, this file will be used during the building of +// gccgo's libgo and thus must not used a CGo method. + +//go:build aix && gccgo + +package cpu + +import ( + "syscall" +) + +//extern getsystemcfg +func gccgoGetsystemcfg(label uint32) (r uint64) + +func callgetsystemcfg(label int) (r1 uintptr, e1 syscall.Errno) { + r1 = uintptr(gccgoGetsystemcfg(uint32(label))) + e1 = syscall.GetErrno() + return +} diff --git a/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go b/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go new file mode 100644 index 0000000..e8b6cdb --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go @@ -0,0 +1,35 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Minimal copy of x/sys/unix so the cpu package can make a +// system call on AIX without depending on x/sys/unix. +// (See golang.org/issue/32102) + +//go:build aix && ppc64 && gc + +package cpu + +import ( + "syscall" + "unsafe" +) + +//go:cgo_import_dynamic libc_getsystemcfg getsystemcfg "libc.a/shr_64.o" + +//go:linkname libc_getsystemcfg libc_getsystemcfg + +type syscallFunc uintptr + +var libc_getsystemcfg syscallFunc + +type errno = syscall.Errno + +// Implemented in runtime/syscall_aix.go. +func rawSyscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err errno) +func syscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err errno) + +func callgetsystemcfg(label int) (r1 uintptr, e1 errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_getsystemcfg)), 1, uintptr(label), 0, 0, 0, 0, 0) + return +} diff --git a/vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go b/vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go deleted file mode 100644 index e07899b..0000000 --- a/vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package unsafeheader contains header declarations for the Go runtime's -// slice and string implementations. -// -// This package allows x/sys to use types equivalent to -// reflect.SliceHeader and reflect.StringHeader without introducing -// a dependency on the (relatively heavy) "reflect" package. -package unsafeheader - -import ( - "unsafe" -) - -// Slice is the runtime representation of a slice. -// It cannot be used safely or portably and its representation may change in a later release. -type Slice struct { - Data unsafe.Pointer - Len int - Cap int -} - -// String is the runtime representation of a string. -// It cannot be used safely or portably and its representation may change in a later release. -type String struct { - Data unsafe.Pointer - Len int -} diff --git a/vendor/golang.org/x/sys/unix/aliases.go b/vendor/golang.org/x/sys/unix/aliases.go index abc89c1..b0e4198 100644 --- a/vendor/golang.org/x/sys/unix/aliases.go +++ b/vendor/golang.org/x/sys/unix/aliases.go @@ -2,9 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos) && go1.9 -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos -// +build go1.9 +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos package unix diff --git a/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s b/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s index db9171c..269e173 100644 --- a/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s +++ b/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_386.s b/vendor/golang.org/x/sys/unix/asm_bsd_386.s index e0fcd9b..a4fcef0 100644 --- a/vendor/golang.org/x/sys/unix/asm_bsd_386.s +++ b/vendor/golang.org/x/sys/unix/asm_bsd_386.s @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (freebsd || netbsd || openbsd) && gc -// +build freebsd netbsd openbsd -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_amd64.s b/vendor/golang.org/x/sys/unix/asm_bsd_amd64.s index 2b99c34..1e63615 100644 --- a/vendor/golang.org/x/sys/unix/asm_bsd_amd64.s +++ b/vendor/golang.org/x/sys/unix/asm_bsd_amd64.s @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (darwin || dragonfly || freebsd || netbsd || openbsd) && gc -// +build darwin dragonfly freebsd netbsd openbsd -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_arm.s b/vendor/golang.org/x/sys/unix/asm_bsd_arm.s index d702d4a..6496c31 100644 --- a/vendor/golang.org/x/sys/unix/asm_bsd_arm.s +++ b/vendor/golang.org/x/sys/unix/asm_bsd_arm.s @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (freebsd || netbsd || openbsd) && gc -// +build freebsd netbsd openbsd -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_arm64.s b/vendor/golang.org/x/sys/unix/asm_bsd_arm64.s index fe36a73..4fd1f54 100644 --- a/vendor/golang.org/x/sys/unix/asm_bsd_arm64.s +++ b/vendor/golang.org/x/sys/unix/asm_bsd_arm64.s @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (darwin || freebsd || netbsd || openbsd) && gc -// +build darwin freebsd netbsd openbsd -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s b/vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s index e5b9a84..42f7eb9 100644 --- a/vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s +++ b/vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (darwin || freebsd || netbsd || openbsd) && gc -// +build darwin freebsd netbsd openbsd -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s b/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s index d560019..f890266 100644 --- a/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s +++ b/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (darwin || freebsd || netbsd || openbsd) && gc -// +build darwin freebsd netbsd openbsd -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_386.s b/vendor/golang.org/x/sys/unix/asm_linux_386.s index 8fd101d..3b47348 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_386.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_386.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_amd64.s b/vendor/golang.org/x/sys/unix/asm_linux_amd64.s index 7ed38e4..67e29f3 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_amd64.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_amd64.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_arm.s b/vendor/golang.org/x/sys/unix/asm_linux_arm.s index 8ef1d51..d6ae269 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_arm.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_arm.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_arm64.s b/vendor/golang.org/x/sys/unix/asm_linux_arm64.s index 98ae027..01e5e25 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_arm64.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_arm64.s @@ -3,9 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && arm64 && gc -// +build linux -// +build arm64 -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_loong64.s b/vendor/golang.org/x/sys/unix/asm_linux_loong64.s index 5653572..2abf12f 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_loong64.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_loong64.s @@ -3,9 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && loong64 && gc -// +build linux -// +build loong64 -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s b/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s index 21231d2..f84bae7 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s @@ -3,9 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (mips64 || mips64le) && gc -// +build linux -// +build mips64 mips64le -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s b/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s index 6783b26..f08f628 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s @@ -3,9 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (mips || mipsle) && gc -// +build linux -// +build mips mipsle -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s b/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s index 19d4989..bdfc024 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s @@ -3,9 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (ppc64 || ppc64le) && gc -// +build linux -// +build ppc64 ppc64le -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s b/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s index e42eb81..2e8c996 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build riscv64 && gc -// +build riscv64 -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_s390x.s b/vendor/golang.org/x/sys/unix/asm_linux_s390x.s index c46aab3..2c394b1 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_s390x.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_s390x.s @@ -3,9 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && s390x && gc -// +build linux -// +build s390x -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s b/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s index 5e7a116..fab586a 100644 --- a/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s +++ b/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s b/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s index f8c5394..f949ec5 100644 --- a/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s +++ b/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_zos_s390x.s b/vendor/golang.org/x/sys/unix/asm_zos_s390x.s index 3b54e18..813dfad 100644 --- a/vendor/golang.org/x/sys/unix/asm_zos_s390x.s +++ b/vendor/golang.org/x/sys/unix/asm_zos_s390x.s @@ -3,18 +3,17 @@ // license that can be found in the LICENSE file. //go:build zos && s390x && gc -// +build zos -// +build s390x -// +build gc #include "textflag.h" #define PSALAA 1208(R0) #define GTAB64(x) 80(x) #define LCA64(x) 88(x) +#define SAVSTACK_ASYNC(x) 336(x) // in the LCA #define CAA(x) 8(x) -#define EDCHPXV(x) 1016(x) // in the CAA -#define SAVSTACK_ASYNC(x) 336(x) // in the LCA +#define CEECAATHDID(x) 976(x) // in the CAA +#define EDCHPXV(x) 1016(x) // in the CAA +#define GOCB(x) 1104(x) // in the CAA // SS_*, where x=SAVSTACK_ASYNC #define SS_LE(x) 0(x) @@ -22,405 +21,362 @@ #define SS_ERRNO(x) 16(x) #define SS_ERRNOJR(x) 20(x) -#define LE_CALL BYTE $0x0D; BYTE $0x76; // BL R7, R6 +// Function Descriptor Offsets +#define __errno 0x156*16 +#define __err2ad 0x16C*16 -TEXT ·clearErrno(SB),NOSPLIT,$0-0 - BL addrerrno<>(SB) - MOVD $0, 0(R3) +// Call Instructions +#define LE_CALL BYTE $0x0D; BYTE $0x76 // BL R7, R6 +#define SVC_LOAD BYTE $0x0A; BYTE $0x08 // SVC 08 LOAD +#define SVC_DELETE BYTE $0x0A; BYTE $0x09 // SVC 09 DELETE + +DATA zosLibVec<>(SB)/8, $0 +GLOBL zosLibVec<>(SB), NOPTR, $8 + +TEXT ·initZosLibVec(SB), NOSPLIT|NOFRAME, $0-0 + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + MOVD CAA(R8), R8 + MOVD EDCHPXV(R8), R8 + MOVD R8, zosLibVec<>(SB) + RET + +TEXT ·GetZosLibVec(SB), NOSPLIT|NOFRAME, $0-0 + MOVD zosLibVec<>(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·clearErrno(SB), NOSPLIT, $0-0 + BL addrerrno<>(SB) + MOVD $0, 0(R3) RET // Returns the address of errno in R3. -TEXT addrerrno<>(SB),NOSPLIT|NOFRAME,$0-0 +TEXT addrerrno<>(SB), NOSPLIT|NOFRAME, $0-0 // Get library control area (LCA). - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 // Get __errno FuncDesc. - MOVD CAA(R8), R9 - MOVD EDCHPXV(R9), R9 - ADD $(0x156*16), R9 - LMG 0(R9), R5, R6 + MOVD CAA(R8), R9 + MOVD EDCHPXV(R9), R9 + ADD $(__errno), R9 + LMG 0(R9), R5, R6 // Switch to saved LE stack. - MOVD SAVSTACK_ASYNC(R8), R9 - MOVD 0(R9), R4 - MOVD $0, 0(R9) + MOVD SAVSTACK_ASYNC(R8), R9 + MOVD 0(R9), R4 + MOVD $0, 0(R9) // Call __errno function. LE_CALL NOPH // Switch back to Go stack. - XOR R0, R0 // Restore R0 to $0. - MOVD R4, 0(R9) // Save stack pointer. + XOR R0, R0 // Restore R0 to $0. + MOVD R4, 0(R9) // Save stack pointer. RET -TEXT ·syscall_syscall(SB),NOSPLIT,$0-56 - BL runtime·entersyscall(SB) - MOVD a1+8(FP), R1 - MOVD a2+16(FP), R2 - MOVD a3+24(FP), R3 +// func svcCall(fnptr unsafe.Pointer, argv *unsafe.Pointer, dsa *uint64) +TEXT ·svcCall(SB), NOSPLIT, $0 + BL runtime·save_g(SB) // Save g and stack pointer + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + MOVD SAVSTACK_ASYNC(R8), R9 + MOVD R15, 0(R9) - // Get library control area (LCA). - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 + MOVD argv+8(FP), R1 // Move function arguments into registers + MOVD dsa+16(FP), g + MOVD fnptr+0(FP), R15 - // Get function. - MOVD CAA(R8), R9 - MOVD EDCHPXV(R9), R9 - MOVD trap+0(FP), R5 - SLD $4, R5 - ADD R5, R9 - LMG 0(R9), R5, R6 + BYTE $0x0D // Branch to function + BYTE $0xEF - // Restore LE stack. - MOVD SAVSTACK_ASYNC(R8), R9 - MOVD 0(R9), R4 - MOVD $0, 0(R9) + BL runtime·load_g(SB) // Restore g and stack pointer + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + MOVD SAVSTACK_ASYNC(R8), R9 + MOVD 0(R9), R15 - // Call function. - LE_CALL - NOPH - XOR R0, R0 // Restore R0 to $0. - MOVD R4, 0(R9) // Save stack pointer. - - MOVD R3, r1+32(FP) - MOVD R0, r2+40(FP) - MOVD R0, err+48(FP) - MOVW R3, R4 - CMP R4, $-1 - BNE done - BL addrerrno<>(SB) - MOVWZ 0(R3), R3 - MOVD R3, err+48(FP) -done: - BL runtime·exitsyscall(SB) RET -TEXT ·syscall_rawsyscall(SB),NOSPLIT,$0-56 - MOVD a1+8(FP), R1 - MOVD a2+16(FP), R2 - MOVD a3+24(FP), R3 - - // Get library control area (LCA). - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 - - // Get function. - MOVD CAA(R8), R9 - MOVD EDCHPXV(R9), R9 - MOVD trap+0(FP), R5 - SLD $4, R5 - ADD R5, R9 - LMG 0(R9), R5, R6 +// func svcLoad(name *byte) unsafe.Pointer +TEXT ·svcLoad(SB), NOSPLIT, $0 + MOVD R15, R2 // Save go stack pointer + MOVD name+0(FP), R0 // Move SVC args into registers + MOVD $0x80000000, R1 + MOVD $0, R15 + SVC_LOAD + MOVW R15, R3 // Save return code from SVC + MOVD R2, R15 // Restore go stack pointer + CMP R3, $0 // Check SVC return code + BNE error + + MOVD $-2, R3 // Reset last bit of entry point to zero + AND R0, R3 + MOVD R3, ret+8(FP) // Return entry point returned by SVC + CMP R0, R3 // Check if last bit of entry point was set + BNE done + + MOVD R15, R2 // Save go stack pointer + MOVD $0, R15 // Move SVC args into registers (entry point still in r0 from SVC 08) + SVC_DELETE + MOVD R2, R15 // Restore go stack pointer - // Restore LE stack. - MOVD SAVSTACK_ASYNC(R8), R9 - MOVD 0(R9), R4 - MOVD $0, 0(R9) +error: + MOVD $0, ret+8(FP) // Return 0 on failure - // Call function. - LE_CALL - NOPH - XOR R0, R0 // Restore R0 to $0. - MOVD R4, 0(R9) // Save stack pointer. - - MOVD R3, r1+32(FP) - MOVD R0, r2+40(FP) - MOVD R0, err+48(FP) - MOVW R3, R4 - CMP R4, $-1 - BNE done - BL addrerrno<>(SB) - MOVWZ 0(R3), R3 - MOVD R3, err+48(FP) done: + XOR R0, R0 // Reset r0 to 0 RET -TEXT ·syscall_syscall6(SB),NOSPLIT,$0-80 - BL runtime·entersyscall(SB) - MOVD a1+8(FP), R1 - MOVD a2+16(FP), R2 - MOVD a3+24(FP), R3 +// func svcUnload(name *byte, fnptr unsafe.Pointer) int64 +TEXT ·svcUnload(SB), NOSPLIT, $0 + MOVD R15, R2 // Save go stack pointer + MOVD name+0(FP), R0 // Move SVC args into registers + MOVD fnptr+8(FP), R15 + SVC_DELETE + XOR R0, R0 // Reset r0 to 0 + MOVD R15, R1 // Save SVC return code + MOVD R2, R15 // Restore go stack pointer + MOVD R1, ret+16(FP) // Return SVC return code + RET +// func gettid() uint64 +TEXT ·gettid(SB), NOSPLIT, $0 // Get library control area (LCA). - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 - // Get function. - MOVD CAA(R8), R9 - MOVD EDCHPXV(R9), R9 - MOVD trap+0(FP), R5 - SLD $4, R5 - ADD R5, R9 - LMG 0(R9), R5, R6 + // Get CEECAATHDID + MOVD CAA(R8), R9 + MOVD CEECAATHDID(R9), R9 + MOVD R9, ret+0(FP) - // Restore LE stack. - MOVD SAVSTACK_ASYNC(R8), R9 - MOVD 0(R9), R4 - MOVD $0, 0(R9) - - // Fill in parameter list. - MOVD a4+32(FP), R12 - MOVD R12, (2176+24)(R4) - MOVD a5+40(FP), R12 - MOVD R12, (2176+32)(R4) - MOVD a6+48(FP), R12 - MOVD R12, (2176+40)(R4) - - // Call function. - LE_CALL - NOPH - XOR R0, R0 // Restore R0 to $0. - MOVD R4, 0(R9) // Save stack pointer. - - MOVD R3, r1+56(FP) - MOVD R0, r2+64(FP) - MOVD R0, err+72(FP) - MOVW R3, R4 - CMP R4, $-1 - BNE done - BL addrerrno<>(SB) - MOVWZ 0(R3), R3 - MOVD R3, err+72(FP) -done: - BL runtime·exitsyscall(SB) RET -TEXT ·syscall_rawsyscall6(SB),NOSPLIT,$0-80 - MOVD a1+8(FP), R1 - MOVD a2+16(FP), R2 - MOVD a3+24(FP), R3 - - // Get library control area (LCA). - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 - - // Get function. - MOVD CAA(R8), R9 - MOVD EDCHPXV(R9), R9 - MOVD trap+0(FP), R5 - SLD $4, R5 - ADD R5, R9 - LMG 0(R9), R5, R6 +// +// Call LE function, if the return is -1 +// errno and errno2 is retrieved +// +TEXT ·CallLeFuncWithErr(SB), NOSPLIT, $0 + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + MOVD CAA(R8), R9 + MOVD g, GOCB(R9) // Restore LE stack. - MOVD SAVSTACK_ASYNC(R8), R9 - MOVD 0(R9), R4 - MOVD $0, 0(R9) - - // Fill in parameter list. - MOVD a4+32(FP), R12 - MOVD R12, (2176+24)(R4) - MOVD a5+40(FP), R12 - MOVD R12, (2176+32)(R4) - MOVD a6+48(FP), R12 - MOVD R12, (2176+40)(R4) - - // Call function. - LE_CALL + MOVD SAVSTACK_ASYNC(R8), R9 // R9-> LE stack frame saving address + MOVD 0(R9), R4 // R4-> restore previously saved stack frame pointer + + MOVD parms_base+8(FP), R7 // R7 -> argument array + MOVD parms_len+16(FP), R8 // R8 number of arguments + + // arg 1 ---> R1 + CMP R8, $0 + BEQ docall + SUB $1, R8 + MOVD 0(R7), R1 + + // arg 2 ---> R2 + CMP R8, $0 + BEQ docall + SUB $1, R8 + ADD $8, R7 + MOVD 0(R7), R2 + + // arg 3 --> R3 + CMP R8, $0 + BEQ docall + SUB $1, R8 + ADD $8, R7 + MOVD 0(R7), R3 + + CMP R8, $0 + BEQ docall + MOVD $2176+16, R6 // starting LE stack address-8 to store 4th argument + +repeat: + ADD $8, R7 + MOVD 0(R7), R0 // advance arg pointer by 8 byte + ADD $8, R6 // advance LE argument address by 8 byte + MOVD R0, (R4)(R6*1) // copy argument from go-slice to le-frame + SUB $1, R8 + CMP R8, $0 + BNE repeat + +docall: + MOVD funcdesc+0(FP), R8 // R8-> function descriptor + LMG 0(R8), R5, R6 + MOVD $0, 0(R9) // R9 address of SAVSTACK_ASYNC + LE_CALL // balr R7, R6 (return #1) + NOPH + MOVD R3, ret+32(FP) + CMP R3, $-1 // compare result to -1 + BNE done + + // retrieve errno and errno2 + MOVD zosLibVec<>(SB), R8 + ADD $(__errno), R8 + LMG 0(R8), R5, R6 + LE_CALL // balr R7, R6 __errno (return #3) NOPH - XOR R0, R0 // Restore R0 to $0. - MOVD R4, 0(R9) // Save stack pointer. - - MOVD R3, r1+56(FP) - MOVD R0, r2+64(FP) - MOVD R0, err+72(FP) - MOVW R3, R4 - CMP R4, $-1 - BNE done - BL ·rrno<>(SB) - MOVWZ 0(R3), R3 - MOVD R3, err+72(FP) + MOVWZ 0(R3), R3 + MOVD R3, err+48(FP) + MOVD zosLibVec<>(SB), R8 + ADD $(__err2ad), R8 + LMG 0(R8), R5, R6 + LE_CALL // balr R7, R6 __err2ad (return #2) + NOPH + MOVW (R3), R2 // retrieve errno2 + MOVD R2, errno2+40(FP) // store in return area + done: + MOVD R4, 0(R9) // Save stack pointer. RET -TEXT ·syscall_syscall9(SB),NOSPLIT,$0 - BL runtime·entersyscall(SB) - MOVD a1+8(FP), R1 - MOVD a2+16(FP), R2 - MOVD a3+24(FP), R3 - - // Get library control area (LCA). - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 - - // Get function. - MOVD CAA(R8), R9 - MOVD EDCHPXV(R9), R9 - MOVD trap+0(FP), R5 - SLD $4, R5 - ADD R5, R9 - LMG 0(R9), R5, R6 +// +// Call LE function, if the return is 0 +// errno and errno2 is retrieved +// +TEXT ·CallLeFuncWithPtrReturn(SB), NOSPLIT, $0 + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + MOVD CAA(R8), R9 + MOVD g, GOCB(R9) // Restore LE stack. - MOVD SAVSTACK_ASYNC(R8), R9 - MOVD 0(R9), R4 - MOVD $0, 0(R9) - - // Fill in parameter list. - MOVD a4+32(FP), R12 - MOVD R12, (2176+24)(R4) - MOVD a5+40(FP), R12 - MOVD R12, (2176+32)(R4) - MOVD a6+48(FP), R12 - MOVD R12, (2176+40)(R4) - MOVD a7+56(FP), R12 - MOVD R12, (2176+48)(R4) - MOVD a8+64(FP), R12 - MOVD R12, (2176+56)(R4) - MOVD a9+72(FP), R12 - MOVD R12, (2176+64)(R4) - - // Call function. - LE_CALL + MOVD SAVSTACK_ASYNC(R8), R9 // R9-> LE stack frame saving address + MOVD 0(R9), R4 // R4-> restore previously saved stack frame pointer + + MOVD parms_base+8(FP), R7 // R7 -> argument array + MOVD parms_len+16(FP), R8 // R8 number of arguments + + // arg 1 ---> R1 + CMP R8, $0 + BEQ docall + SUB $1, R8 + MOVD 0(R7), R1 + + // arg 2 ---> R2 + CMP R8, $0 + BEQ docall + SUB $1, R8 + ADD $8, R7 + MOVD 0(R7), R2 + + // arg 3 --> R3 + CMP R8, $0 + BEQ docall + SUB $1, R8 + ADD $8, R7 + MOVD 0(R7), R3 + + CMP R8, $0 + BEQ docall + MOVD $2176+16, R6 // starting LE stack address-8 to store 4th argument + +repeat: + ADD $8, R7 + MOVD 0(R7), R0 // advance arg pointer by 8 byte + ADD $8, R6 // advance LE argument address by 8 byte + MOVD R0, (R4)(R6*1) // copy argument from go-slice to le-frame + SUB $1, R8 + CMP R8, $0 + BNE repeat + +docall: + MOVD funcdesc+0(FP), R8 // R8-> function descriptor + LMG 0(R8), R5, R6 + MOVD $0, 0(R9) // R9 address of SAVSTACK_ASYNC + LE_CALL // balr R7, R6 (return #1) NOPH - XOR R0, R0 // Restore R0 to $0. - MOVD R4, 0(R9) // Save stack pointer. - - MOVD R3, r1+80(FP) - MOVD R0, r2+88(FP) - MOVD R0, err+96(FP) - MOVW R3, R4 - CMP R4, $-1 - BNE done - BL addrerrno<>(SB) - MOVWZ 0(R3), R3 - MOVD R3, err+96(FP) -done: - BL runtime·exitsyscall(SB) - RET - -TEXT ·syscall_rawsyscall9(SB),NOSPLIT,$0 - MOVD a1+8(FP), R1 - MOVD a2+16(FP), R2 - MOVD a3+24(FP), R3 - - // Get library control area (LCA). - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 - - // Get function. - MOVD CAA(R8), R9 - MOVD EDCHPXV(R9), R9 - MOVD trap+0(FP), R5 - SLD $4, R5 - ADD R5, R9 - LMG 0(R9), R5, R6 - - // Restore LE stack. - MOVD SAVSTACK_ASYNC(R8), R9 - MOVD 0(R9), R4 - MOVD $0, 0(R9) - - // Fill in parameter list. - MOVD a4+32(FP), R12 - MOVD R12, (2176+24)(R4) - MOVD a5+40(FP), R12 - MOVD R12, (2176+32)(R4) - MOVD a6+48(FP), R12 - MOVD R12, (2176+40)(R4) - MOVD a7+56(FP), R12 - MOVD R12, (2176+48)(R4) - MOVD a8+64(FP), R12 - MOVD R12, (2176+56)(R4) - MOVD a9+72(FP), R12 - MOVD R12, (2176+64)(R4) - - // Call function. - LE_CALL + MOVD R3, ret+32(FP) + CMP R3, $0 // compare result to 0 + BNE done + + // retrieve errno and errno2 + MOVD zosLibVec<>(SB), R8 + ADD $(__errno), R8 + LMG 0(R8), R5, R6 + LE_CALL // balr R7, R6 __errno (return #3) NOPH - XOR R0, R0 // Restore R0 to $0. - MOVD R4, 0(R9) // Save stack pointer. - - MOVD R3, r1+80(FP) - MOVD R0, r2+88(FP) - MOVD R0, err+96(FP) - MOVW R3, R4 - CMP R4, $-1 - BNE done - BL addrerrno<>(SB) - MOVWZ 0(R3), R3 - MOVD R3, err+96(FP) -done: - RET - -// func svcCall(fnptr unsafe.Pointer, argv *unsafe.Pointer, dsa *uint64) -TEXT ·svcCall(SB),NOSPLIT,$0 - BL runtime·save_g(SB) // Save g and stack pointer - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 - MOVD SAVSTACK_ASYNC(R8), R9 - MOVD R15, 0(R9) - - MOVD argv+8(FP), R1 // Move function arguments into registers - MOVD dsa+16(FP), g - MOVD fnptr+0(FP), R15 - - BYTE $0x0D // Branch to function - BYTE $0xEF - - BL runtime·load_g(SB) // Restore g and stack pointer - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 - MOVD SAVSTACK_ASYNC(R8), R9 - MOVD 0(R9), R15 - - RET - -// func svcLoad(name *byte) unsafe.Pointer -TEXT ·svcLoad(SB),NOSPLIT,$0 - MOVD R15, R2 // Save go stack pointer - MOVD name+0(FP), R0 // Move SVC args into registers - MOVD $0x80000000, R1 - MOVD $0, R15 - BYTE $0x0A // SVC 08 LOAD - BYTE $0x08 - MOVW R15, R3 // Save return code from SVC - MOVD R2, R15 // Restore go stack pointer - CMP R3, $0 // Check SVC return code - BNE error - - MOVD $-2, R3 // Reset last bit of entry point to zero - AND R0, R3 - MOVD R3, addr+8(FP) // Return entry point returned by SVC - CMP R0, R3 // Check if last bit of entry point was set - BNE done - - MOVD R15, R2 // Save go stack pointer - MOVD $0, R15 // Move SVC args into registers (entry point still in r0 from SVC 08) - BYTE $0x0A // SVC 09 DELETE - BYTE $0x09 - MOVD R2, R15 // Restore go stack pointer + MOVWZ 0(R3), R3 + MOVD R3, err+48(FP) + MOVD zosLibVec<>(SB), R8 + ADD $(__err2ad), R8 + LMG 0(R8), R5, R6 + LE_CALL // balr R7, R6 __err2ad (return #2) + NOPH + MOVW (R3), R2 // retrieve errno2 + MOVD R2, errno2+40(FP) // store in return area + XOR R2, R2 + MOVWZ R2, (R3) // clear errno2 -error: - MOVD $0, addr+8(FP) // Return 0 on failure done: - XOR R0, R0 // Reset r0 to 0 + MOVD R4, 0(R9) // Save stack pointer. RET -// func svcUnload(name *byte, fnptr unsafe.Pointer) int64 -TEXT ·svcUnload(SB),NOSPLIT,$0 - MOVD R15, R2 // Save go stack pointer - MOVD name+0(FP), R0 // Move SVC args into registers - MOVD addr+8(FP), R15 - BYTE $0x0A // SVC 09 - BYTE $0x09 - XOR R0, R0 // Reset r0 to 0 - MOVD R15, R1 // Save SVC return code - MOVD R2, R15 // Restore go stack pointer - MOVD R1, rc+0(FP) // Return SVC return code +// +// function to test if a pointer can be safely dereferenced (content read) +// return 0 for succces +// +TEXT ·ptrtest(SB), NOSPLIT, $0-16 + MOVD arg+0(FP), R10 // test pointer in R10 + + // set up R2 to point to CEECAADMC + BYTE $0xE3; BYTE $0x20; BYTE $0x04; BYTE $0xB8; BYTE $0x00; BYTE $0x17 // llgt 2,1208 + BYTE $0xB9; BYTE $0x17; BYTE $0x00; BYTE $0x22 // llgtr 2,2 + BYTE $0xA5; BYTE $0x26; BYTE $0x7F; BYTE $0xFF // nilh 2,32767 + BYTE $0xE3; BYTE $0x22; BYTE $0x00; BYTE $0x58; BYTE $0x00; BYTE $0x04 // lg 2,88(2) + BYTE $0xE3; BYTE $0x22; BYTE $0x00; BYTE $0x08; BYTE $0x00; BYTE $0x04 // lg 2,8(2) + BYTE $0x41; BYTE $0x22; BYTE $0x03; BYTE $0x68 // la 2,872(2) + + // set up R5 to point to the "shunt" path which set 1 to R3 (failure) + BYTE $0xB9; BYTE $0x82; BYTE $0x00; BYTE $0x33 // xgr 3,3 + BYTE $0xA7; BYTE $0x55; BYTE $0x00; BYTE $0x04 // bras 5,lbl1 + BYTE $0xA7; BYTE $0x39; BYTE $0x00; BYTE $0x01 // lghi 3,1 + + // if r3 is not zero (failed) then branch to finish + BYTE $0xB9; BYTE $0x02; BYTE $0x00; BYTE $0x33 // lbl1 ltgr 3,3 + BYTE $0xA7; BYTE $0x74; BYTE $0x00; BYTE $0x08 // brc b'0111',lbl2 + + // stomic store shunt address in R5 into CEECAADMC + BYTE $0xE3; BYTE $0x52; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x24 // stg 5,0(2) + + // now try reading from the test pointer in R10, if it fails it branches to the "lghi" instruction above + BYTE $0xE3; BYTE $0x9A; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x04 // lg 9,0(10) + + // finish here, restore 0 into CEECAADMC + BYTE $0xB9; BYTE $0x82; BYTE $0x00; BYTE $0x99 // lbl2 xgr 9,9 + BYTE $0xE3; BYTE $0x92; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x24 // stg 9,0(2) + MOVD R3, ret+8(FP) // result in R3 RET -// func gettid() uint64 -TEXT ·gettid(SB), NOSPLIT, $0 - // Get library control area (LCA). - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 - - // Get CEECAATHDID - MOVD CAA(R8), R9 - MOVD 0x3D0(R9), R9 - MOVD R9, ret+0(FP) - +// +// function to test if a untptr can be loaded from a pointer +// return 1: the 8-byte content +// 2: 0 for success, 1 for failure +// +// func safeload(ptr uintptr) ( value uintptr, error uintptr) +TEXT ·safeload(SB), NOSPLIT, $0-24 + MOVD ptr+0(FP), R10 // test pointer in R10 + MOVD $0x0, R6 + BYTE $0xE3; BYTE $0x20; BYTE $0x04; BYTE $0xB8; BYTE $0x00; BYTE $0x17 // llgt 2,1208 + BYTE $0xB9; BYTE $0x17; BYTE $0x00; BYTE $0x22 // llgtr 2,2 + BYTE $0xA5; BYTE $0x26; BYTE $0x7F; BYTE $0xFF // nilh 2,32767 + BYTE $0xE3; BYTE $0x22; BYTE $0x00; BYTE $0x58; BYTE $0x00; BYTE $0x04 // lg 2,88(2) + BYTE $0xE3; BYTE $0x22; BYTE $0x00; BYTE $0x08; BYTE $0x00; BYTE $0x04 // lg 2,8(2) + BYTE $0x41; BYTE $0x22; BYTE $0x03; BYTE $0x68 // la 2,872(2) + BYTE $0xB9; BYTE $0x82; BYTE $0x00; BYTE $0x33 // xgr 3,3 + BYTE $0xA7; BYTE $0x55; BYTE $0x00; BYTE $0x04 // bras 5,lbl1 + BYTE $0xA7; BYTE $0x39; BYTE $0x00; BYTE $0x01 // lghi 3,1 + BYTE $0xB9; BYTE $0x02; BYTE $0x00; BYTE $0x33 // lbl1 ltgr 3,3 + BYTE $0xA7; BYTE $0x74; BYTE $0x00; BYTE $0x08 // brc b'0111',lbl2 + BYTE $0xE3; BYTE $0x52; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x24 // stg 5,0(2) + BYTE $0xE3; BYTE $0x6A; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x04 // lg 6,0(10) + BYTE $0xB9; BYTE $0x82; BYTE $0x00; BYTE $0x99 // lbl2 xgr 9,9 + BYTE $0xE3; BYTE $0x92; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x24 // stg 9,0(2) + MOVD R6, value+8(FP) // result in R6 + MOVD R3, error+16(FP) // error in R3 RET diff --git a/vendor/golang.org/x/sys/unix/bpxsvc_zos.go b/vendor/golang.org/x/sys/unix/bpxsvc_zos.go new file mode 100644 index 0000000..39d647d --- /dev/null +++ b/vendor/golang.org/x/sys/unix/bpxsvc_zos.go @@ -0,0 +1,657 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build zos + +package unix + +import ( + "bytes" + "fmt" + "unsafe" +) + +//go:noescape +func bpxcall(plist []unsafe.Pointer, bpx_offset int64) + +//go:noescape +func A2e([]byte) + +//go:noescape +func E2a([]byte) + +const ( + BPX4STA = 192 // stat + BPX4FST = 104 // fstat + BPX4LST = 132 // lstat + BPX4OPN = 156 // open + BPX4CLO = 72 // close + BPX4CHR = 500 // chattr + BPX4FCR = 504 // fchattr + BPX4LCR = 1180 // lchattr + BPX4CTW = 492 // cond_timed_wait + BPX4GTH = 1056 // __getthent + BPX4PTQ = 412 // pthread_quiesc + BPX4PTR = 320 // ptrace +) + +const ( + //options + //byte1 + BPX_OPNFHIGH = 0x80 + //byte2 + BPX_OPNFEXEC = 0x80 + //byte3 + BPX_O_NOLARGEFILE = 0x08 + BPX_O_LARGEFILE = 0x04 + BPX_O_ASYNCSIG = 0x02 + BPX_O_SYNC = 0x01 + //byte4 + BPX_O_CREXCL = 0xc0 + BPX_O_CREAT = 0x80 + BPX_O_EXCL = 0x40 + BPX_O_NOCTTY = 0x20 + BPX_O_TRUNC = 0x10 + BPX_O_APPEND = 0x08 + BPX_O_NONBLOCK = 0x04 + BPX_FNDELAY = 0x04 + BPX_O_RDWR = 0x03 + BPX_O_RDONLY = 0x02 + BPX_O_WRONLY = 0x01 + BPX_O_ACCMODE = 0x03 + BPX_O_GETFL = 0x0f + + //mode + // byte1 (file type) + BPX_FT_DIR = 1 + BPX_FT_CHARSPEC = 2 + BPX_FT_REGFILE = 3 + BPX_FT_FIFO = 4 + BPX_FT_SYMLINK = 5 + BPX_FT_SOCKET = 6 + //byte3 + BPX_S_ISUID = 0x08 + BPX_S_ISGID = 0x04 + BPX_S_ISVTX = 0x02 + BPX_S_IRWXU1 = 0x01 + BPX_S_IRUSR = 0x01 + //byte4 + BPX_S_IRWXU2 = 0xc0 + BPX_S_IWUSR = 0x80 + BPX_S_IXUSR = 0x40 + BPX_S_IRWXG = 0x38 + BPX_S_IRGRP = 0x20 + BPX_S_IWGRP = 0x10 + BPX_S_IXGRP = 0x08 + BPX_S_IRWXOX = 0x07 + BPX_S_IROTH = 0x04 + BPX_S_IWOTH = 0x02 + BPX_S_IXOTH = 0x01 + + CW_INTRPT = 1 + CW_CONDVAR = 32 + CW_TIMEOUT = 64 + + PGTHA_NEXT = 2 + PGTHA_CURRENT = 1 + PGTHA_FIRST = 0 + PGTHA_LAST = 3 + PGTHA_PROCESS = 0x80 + PGTHA_CONTTY = 0x40 + PGTHA_PATH = 0x20 + PGTHA_COMMAND = 0x10 + PGTHA_FILEDATA = 0x08 + PGTHA_THREAD = 0x04 + PGTHA_PTAG = 0x02 + PGTHA_COMMANDLONG = 0x01 + PGTHA_THREADFAST = 0x80 + PGTHA_FILEPATH = 0x40 + PGTHA_THDSIGMASK = 0x20 + // thread quiece mode + QUIESCE_TERM int32 = 1 + QUIESCE_FORCE int32 = 2 + QUIESCE_QUERY int32 = 3 + QUIESCE_FREEZE int32 = 4 + QUIESCE_UNFREEZE int32 = 5 + FREEZE_THIS_THREAD int32 = 6 + FREEZE_EXIT int32 = 8 + QUIESCE_SRB int32 = 9 +) + +type Pgtha struct { + Pid uint32 // 0 + Tid0 uint32 // 4 + Tid1 uint32 + Accesspid byte // C + Accesstid byte // D + Accessasid uint16 // E + Loginname [8]byte // 10 + Flag1 byte // 18 + Flag1b2 byte // 19 +} + +type Bpxystat_t struct { // DSECT BPXYSTAT + St_id [4]uint8 // 0 + St_length uint16 // 0x4 + St_version uint16 // 0x6 + St_mode uint32 // 0x8 + St_ino uint32 // 0xc + St_dev uint32 // 0x10 + St_nlink uint32 // 0x14 + St_uid uint32 // 0x18 + St_gid uint32 // 0x1c + St_size uint64 // 0x20 + St_atime uint32 // 0x28 + St_mtime uint32 // 0x2c + St_ctime uint32 // 0x30 + St_rdev uint32 // 0x34 + St_auditoraudit uint32 // 0x38 + St_useraudit uint32 // 0x3c + St_blksize uint32 // 0x40 + St_createtime uint32 // 0x44 + St_auditid [4]uint32 // 0x48 + St_res01 uint32 // 0x58 + Ft_ccsid uint16 // 0x5c + Ft_flags uint16 // 0x5e + St_res01a [2]uint32 // 0x60 + St_res02 uint32 // 0x68 + St_blocks uint32 // 0x6c + St_opaque [3]uint8 // 0x70 + St_visible uint8 // 0x73 + St_reftime uint32 // 0x74 + St_fid uint64 // 0x78 + St_filefmt uint8 // 0x80 + St_fspflag2 uint8 // 0x81 + St_res03 [2]uint8 // 0x82 + St_ctimemsec uint32 // 0x84 + St_seclabel [8]uint8 // 0x88 + St_res04 [4]uint8 // 0x90 + // end of version 1 + _ uint32 // 0x94 + St_atime64 uint64 // 0x98 + St_mtime64 uint64 // 0xa0 + St_ctime64 uint64 // 0xa8 + St_createtime64 uint64 // 0xb0 + St_reftime64 uint64 // 0xb8 + _ uint64 // 0xc0 + St_res05 [16]uint8 // 0xc8 + // end of version 2 +} + +type BpxFilestatus struct { + Oflag1 byte + Oflag2 byte + Oflag3 byte + Oflag4 byte +} + +type BpxMode struct { + Ftype byte + Mode1 byte + Mode2 byte + Mode3 byte +} + +// Thr attribute structure for extended attributes +type Bpxyatt_t struct { // DSECT BPXYATT + Att_id [4]uint8 + Att_version uint16 + Att_res01 [2]uint8 + Att_setflags1 uint8 + Att_setflags2 uint8 + Att_setflags3 uint8 + Att_setflags4 uint8 + Att_mode uint32 + Att_uid uint32 + Att_gid uint32 + Att_opaquemask [3]uint8 + Att_visblmaskres uint8 + Att_opaque [3]uint8 + Att_visibleres uint8 + Att_size_h uint32 + Att_size_l uint32 + Att_atime uint32 + Att_mtime uint32 + Att_auditoraudit uint32 + Att_useraudit uint32 + Att_ctime uint32 + Att_reftime uint32 + // end of version 1 + Att_filefmt uint8 + Att_res02 [3]uint8 + Att_filetag uint32 + Att_res03 [8]uint8 + // end of version 2 + Att_atime64 uint64 + Att_mtime64 uint64 + Att_ctime64 uint64 + Att_reftime64 uint64 + Att_seclabel [8]uint8 + Att_ver3res02 [8]uint8 + // end of version 3 +} + +func BpxOpen(name string, options *BpxFilestatus, mode *BpxMode) (rv int32, rc int32, rn int32) { + if len(name) < 1024 { + var namebuf [1024]byte + sz := int32(copy(namebuf[:], name)) + A2e(namebuf[:sz]) + var parms [7]unsafe.Pointer + parms[0] = unsafe.Pointer(&sz) + parms[1] = unsafe.Pointer(&namebuf[0]) + parms[2] = unsafe.Pointer(options) + parms[3] = unsafe.Pointer(mode) + parms[4] = unsafe.Pointer(&rv) + parms[5] = unsafe.Pointer(&rc) + parms[6] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4OPN) + return rv, rc, rn + } + return -1, -1, -1 +} + +func BpxClose(fd int32) (rv int32, rc int32, rn int32) { + var parms [4]unsafe.Pointer + parms[0] = unsafe.Pointer(&fd) + parms[1] = unsafe.Pointer(&rv) + parms[2] = unsafe.Pointer(&rc) + parms[3] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4CLO) + return rv, rc, rn +} + +func BpxFileFStat(fd int32, st *Bpxystat_t) (rv int32, rc int32, rn int32) { + st.St_id = [4]uint8{0xe2, 0xe3, 0xc1, 0xe3} + st.St_version = 2 + stat_sz := uint32(unsafe.Sizeof(*st)) + var parms [6]unsafe.Pointer + parms[0] = unsafe.Pointer(&fd) + parms[1] = unsafe.Pointer(&stat_sz) + parms[2] = unsafe.Pointer(st) + parms[3] = unsafe.Pointer(&rv) + parms[4] = unsafe.Pointer(&rc) + parms[5] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4FST) + return rv, rc, rn +} + +func BpxFileStat(name string, st *Bpxystat_t) (rv int32, rc int32, rn int32) { + if len(name) < 1024 { + var namebuf [1024]byte + sz := int32(copy(namebuf[:], name)) + A2e(namebuf[:sz]) + st.St_id = [4]uint8{0xe2, 0xe3, 0xc1, 0xe3} + st.St_version = 2 + stat_sz := uint32(unsafe.Sizeof(*st)) + var parms [7]unsafe.Pointer + parms[0] = unsafe.Pointer(&sz) + parms[1] = unsafe.Pointer(&namebuf[0]) + parms[2] = unsafe.Pointer(&stat_sz) + parms[3] = unsafe.Pointer(st) + parms[4] = unsafe.Pointer(&rv) + parms[5] = unsafe.Pointer(&rc) + parms[6] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4STA) + return rv, rc, rn + } + return -1, -1, -1 +} + +func BpxFileLStat(name string, st *Bpxystat_t) (rv int32, rc int32, rn int32) { + if len(name) < 1024 { + var namebuf [1024]byte + sz := int32(copy(namebuf[:], name)) + A2e(namebuf[:sz]) + st.St_id = [4]uint8{0xe2, 0xe3, 0xc1, 0xe3} + st.St_version = 2 + stat_sz := uint32(unsafe.Sizeof(*st)) + var parms [7]unsafe.Pointer + parms[0] = unsafe.Pointer(&sz) + parms[1] = unsafe.Pointer(&namebuf[0]) + parms[2] = unsafe.Pointer(&stat_sz) + parms[3] = unsafe.Pointer(st) + parms[4] = unsafe.Pointer(&rv) + parms[5] = unsafe.Pointer(&rc) + parms[6] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4LST) + return rv, rc, rn + } + return -1, -1, -1 +} + +func BpxChattr(path string, attr *Bpxyatt_t) (rv int32, rc int32, rn int32) { + if len(path) >= 1024 { + return -1, -1, -1 + } + var namebuf [1024]byte + sz := int32(copy(namebuf[:], path)) + A2e(namebuf[:sz]) + attr_sz := uint32(unsafe.Sizeof(*attr)) + var parms [7]unsafe.Pointer + parms[0] = unsafe.Pointer(&sz) + parms[1] = unsafe.Pointer(&namebuf[0]) + parms[2] = unsafe.Pointer(&attr_sz) + parms[3] = unsafe.Pointer(attr) + parms[4] = unsafe.Pointer(&rv) + parms[5] = unsafe.Pointer(&rc) + parms[6] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4CHR) + return rv, rc, rn +} + +func BpxLchattr(path string, attr *Bpxyatt_t) (rv int32, rc int32, rn int32) { + if len(path) >= 1024 { + return -1, -1, -1 + } + var namebuf [1024]byte + sz := int32(copy(namebuf[:], path)) + A2e(namebuf[:sz]) + attr_sz := uint32(unsafe.Sizeof(*attr)) + var parms [7]unsafe.Pointer + parms[0] = unsafe.Pointer(&sz) + parms[1] = unsafe.Pointer(&namebuf[0]) + parms[2] = unsafe.Pointer(&attr_sz) + parms[3] = unsafe.Pointer(attr) + parms[4] = unsafe.Pointer(&rv) + parms[5] = unsafe.Pointer(&rc) + parms[6] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4LCR) + return rv, rc, rn +} + +func BpxFchattr(fd int32, attr *Bpxyatt_t) (rv int32, rc int32, rn int32) { + attr_sz := uint32(unsafe.Sizeof(*attr)) + var parms [6]unsafe.Pointer + parms[0] = unsafe.Pointer(&fd) + parms[1] = unsafe.Pointer(&attr_sz) + parms[2] = unsafe.Pointer(attr) + parms[3] = unsafe.Pointer(&rv) + parms[4] = unsafe.Pointer(&rc) + parms[5] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4FCR) + return rv, rc, rn +} + +func BpxCondTimedWait(sec uint32, nsec uint32, events uint32, secrem *uint32, nsecrem *uint32) (rv int32, rc int32, rn int32) { + var parms [8]unsafe.Pointer + parms[0] = unsafe.Pointer(&sec) + parms[1] = unsafe.Pointer(&nsec) + parms[2] = unsafe.Pointer(&events) + parms[3] = unsafe.Pointer(secrem) + parms[4] = unsafe.Pointer(nsecrem) + parms[5] = unsafe.Pointer(&rv) + parms[6] = unsafe.Pointer(&rc) + parms[7] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4CTW) + return rv, rc, rn +} +func BpxGetthent(in *Pgtha, outlen *uint32, out unsafe.Pointer) (rv int32, rc int32, rn int32) { + var parms [7]unsafe.Pointer + inlen := uint32(26) // nothing else will work. Go says Pgtha is 28-byte because of alignment, but Pgtha is "packed" and must be 26-byte + parms[0] = unsafe.Pointer(&inlen) + parms[1] = unsafe.Pointer(&in) + parms[2] = unsafe.Pointer(outlen) + parms[3] = unsafe.Pointer(&out) + parms[4] = unsafe.Pointer(&rv) + parms[5] = unsafe.Pointer(&rc) + parms[6] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4GTH) + return rv, rc, rn +} +func ZosJobname() (jobname string, err error) { + var pgtha Pgtha + pgtha.Pid = uint32(Getpid()) + pgtha.Accesspid = PGTHA_CURRENT + pgtha.Flag1 = PGTHA_PROCESS + var out [256]byte + var outlen uint32 + outlen = 256 + rv, rc, rn := BpxGetthent(&pgtha, &outlen, unsafe.Pointer(&out[0])) + if rv == 0 { + gthc := []byte{0x87, 0xa3, 0x88, 0x83} // 'gthc' in ebcdic + ix := bytes.Index(out[:], gthc) + if ix == -1 { + err = fmt.Errorf("BPX4GTH: gthc return data not found") + return + } + jn := out[ix+80 : ix+88] // we didn't declare Pgthc, but jobname is 8-byte at offset 80 + E2a(jn) + jobname = string(bytes.TrimRight(jn, " ")) + + } else { + err = fmt.Errorf("BPX4GTH: rc=%d errno=%d reason=code=0x%x", rv, rc, rn) + } + return +} +func Bpx4ptq(code int32, data string) (rv int32, rc int32, rn int32) { + var userdata [8]byte + var parms [5]unsafe.Pointer + copy(userdata[:], data+" ") + A2e(userdata[:]) + parms[0] = unsafe.Pointer(&code) + parms[1] = unsafe.Pointer(&userdata[0]) + parms[2] = unsafe.Pointer(&rv) + parms[3] = unsafe.Pointer(&rc) + parms[4] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4PTQ) + return rv, rc, rn +} + +const ( + PT_TRACE_ME = 0 // Debug this process + PT_READ_I = 1 // Read a full word + PT_READ_D = 2 // Read a full word + PT_READ_U = 3 // Read control info + PT_WRITE_I = 4 //Write a full word + PT_WRITE_D = 5 //Write a full word + PT_CONTINUE = 7 //Continue the process + PT_KILL = 8 //Terminate the process + PT_READ_GPR = 11 // Read GPR, CR, PSW + PT_READ_FPR = 12 // Read FPR + PT_READ_VR = 13 // Read VR + PT_WRITE_GPR = 14 // Write GPR, CR, PSW + PT_WRITE_FPR = 15 // Write FPR + PT_WRITE_VR = 16 // Write VR + PT_READ_BLOCK = 17 // Read storage + PT_WRITE_BLOCK = 19 // Write storage + PT_READ_GPRH = 20 // Read GPRH + PT_WRITE_GPRH = 21 // Write GPRH + PT_REGHSET = 22 // Read all GPRHs + PT_ATTACH = 30 // Attach to a process + PT_DETACH = 31 // Detach from a process + PT_REGSET = 32 // Read all GPRs + PT_REATTACH = 33 // Reattach to a process + PT_LDINFO = 34 // Read loader info + PT_MULTI = 35 // Multi process mode + PT_LD64INFO = 36 // RMODE64 Info Area + PT_BLOCKREQ = 40 // Block request + PT_THREAD_INFO = 60 // Read thread info + PT_THREAD_MODIFY = 61 + PT_THREAD_READ_FOCUS = 62 + PT_THREAD_WRITE_FOCUS = 63 + PT_THREAD_HOLD = 64 + PT_THREAD_SIGNAL = 65 + PT_EXPLAIN = 66 + PT_EVENTS = 67 + PT_THREAD_INFO_EXTENDED = 68 + PT_REATTACH2 = 71 + PT_CAPTURE = 72 + PT_UNCAPTURE = 73 + PT_GET_THREAD_TCB = 74 + PT_GET_ALET = 75 + PT_SWAPIN = 76 + PT_EXTENDED_EVENT = 98 + PT_RECOVER = 99 // Debug a program check + PT_GPR0 = 0 // General purpose register 0 + PT_GPR1 = 1 // General purpose register 1 + PT_GPR2 = 2 // General purpose register 2 + PT_GPR3 = 3 // General purpose register 3 + PT_GPR4 = 4 // General purpose register 4 + PT_GPR5 = 5 // General purpose register 5 + PT_GPR6 = 6 // General purpose register 6 + PT_GPR7 = 7 // General purpose register 7 + PT_GPR8 = 8 // General purpose register 8 + PT_GPR9 = 9 // General purpose register 9 + PT_GPR10 = 10 // General purpose register 10 + PT_GPR11 = 11 // General purpose register 11 + PT_GPR12 = 12 // General purpose register 12 + PT_GPR13 = 13 // General purpose register 13 + PT_GPR14 = 14 // General purpose register 14 + PT_GPR15 = 15 // General purpose register 15 + PT_FPR0 = 16 // Floating point register 0 + PT_FPR1 = 17 // Floating point register 1 + PT_FPR2 = 18 // Floating point register 2 + PT_FPR3 = 19 // Floating point register 3 + PT_FPR4 = 20 // Floating point register 4 + PT_FPR5 = 21 // Floating point register 5 + PT_FPR6 = 22 // Floating point register 6 + PT_FPR7 = 23 // Floating point register 7 + PT_FPR8 = 24 // Floating point register 8 + PT_FPR9 = 25 // Floating point register 9 + PT_FPR10 = 26 // Floating point register 10 + PT_FPR11 = 27 // Floating point register 11 + PT_FPR12 = 28 // Floating point register 12 + PT_FPR13 = 29 // Floating point register 13 + PT_FPR14 = 30 // Floating point register 14 + PT_FPR15 = 31 // Floating point register 15 + PT_FPC = 32 // Floating point control register + PT_PSW = 40 // PSW + PT_PSW0 = 40 // Left half of the PSW + PT_PSW1 = 41 // Right half of the PSW + PT_CR0 = 42 // Control register 0 + PT_CR1 = 43 // Control register 1 + PT_CR2 = 44 // Control register 2 + PT_CR3 = 45 // Control register 3 + PT_CR4 = 46 // Control register 4 + PT_CR5 = 47 // Control register 5 + PT_CR6 = 48 // Control register 6 + PT_CR7 = 49 // Control register 7 + PT_CR8 = 50 // Control register 8 + PT_CR9 = 51 // Control register 9 + PT_CR10 = 52 // Control register 10 + PT_CR11 = 53 // Control register 11 + PT_CR12 = 54 // Control register 12 + PT_CR13 = 55 // Control register 13 + PT_CR14 = 56 // Control register 14 + PT_CR15 = 57 // Control register 15 + PT_GPRH0 = 58 // GP High register 0 + PT_GPRH1 = 59 // GP High register 1 + PT_GPRH2 = 60 // GP High register 2 + PT_GPRH3 = 61 // GP High register 3 + PT_GPRH4 = 62 // GP High register 4 + PT_GPRH5 = 63 // GP High register 5 + PT_GPRH6 = 64 // GP High register 6 + PT_GPRH7 = 65 // GP High register 7 + PT_GPRH8 = 66 // GP High register 8 + PT_GPRH9 = 67 // GP High register 9 + PT_GPRH10 = 68 // GP High register 10 + PT_GPRH11 = 69 // GP High register 11 + PT_GPRH12 = 70 // GP High register 12 + PT_GPRH13 = 71 // GP High register 13 + PT_GPRH14 = 72 // GP High register 14 + PT_GPRH15 = 73 // GP High register 15 + PT_VR0 = 74 // Vector register 0 + PT_VR1 = 75 // Vector register 1 + PT_VR2 = 76 // Vector register 2 + PT_VR3 = 77 // Vector register 3 + PT_VR4 = 78 // Vector register 4 + PT_VR5 = 79 // Vector register 5 + PT_VR6 = 80 // Vector register 6 + PT_VR7 = 81 // Vector register 7 + PT_VR8 = 82 // Vector register 8 + PT_VR9 = 83 // Vector register 9 + PT_VR10 = 84 // Vector register 10 + PT_VR11 = 85 // Vector register 11 + PT_VR12 = 86 // Vector register 12 + PT_VR13 = 87 // Vector register 13 + PT_VR14 = 88 // Vector register 14 + PT_VR15 = 89 // Vector register 15 + PT_VR16 = 90 // Vector register 16 + PT_VR17 = 91 // Vector register 17 + PT_VR18 = 92 // Vector register 18 + PT_VR19 = 93 // Vector register 19 + PT_VR20 = 94 // Vector register 20 + PT_VR21 = 95 // Vector register 21 + PT_VR22 = 96 // Vector register 22 + PT_VR23 = 97 // Vector register 23 + PT_VR24 = 98 // Vector register 24 + PT_VR25 = 99 // Vector register 25 + PT_VR26 = 100 // Vector register 26 + PT_VR27 = 101 // Vector register 27 + PT_VR28 = 102 // Vector register 28 + PT_VR29 = 103 // Vector register 29 + PT_VR30 = 104 // Vector register 30 + PT_VR31 = 105 // Vector register 31 + PT_PSWG = 106 // PSWG + PT_PSWG0 = 106 // Bytes 0-3 + PT_PSWG1 = 107 // Bytes 4-7 + PT_PSWG2 = 108 // Bytes 8-11 (IA high word) + PT_PSWG3 = 109 // Bytes 12-15 (IA low word) +) + +func Bpx4ptr(request int32, pid int32, addr unsafe.Pointer, data unsafe.Pointer, buffer unsafe.Pointer) (rv int32, rc int32, rn int32) { + var parms [8]unsafe.Pointer + parms[0] = unsafe.Pointer(&request) + parms[1] = unsafe.Pointer(&pid) + parms[2] = unsafe.Pointer(&addr) + parms[3] = unsafe.Pointer(&data) + parms[4] = unsafe.Pointer(&buffer) + parms[5] = unsafe.Pointer(&rv) + parms[6] = unsafe.Pointer(&rc) + parms[7] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4PTR) + return rv, rc, rn +} + +func copyU8(val uint8, dest []uint8) int { + if len(dest) < 1 { + return 0 + } + dest[0] = val + return 1 +} + +func copyU8Arr(src, dest []uint8) int { + if len(dest) < len(src) { + return 0 + } + for i, v := range src { + dest[i] = v + } + return len(src) +} + +func copyU16(val uint16, dest []uint16) int { + if len(dest) < 1 { + return 0 + } + dest[0] = val + return 1 +} + +func copyU32(val uint32, dest []uint32) int { + if len(dest) < 1 { + return 0 + } + dest[0] = val + return 1 +} + +func copyU32Arr(src, dest []uint32) int { + if len(dest) < len(src) { + return 0 + } + for i, v := range src { + dest[i] = v + } + return len(src) +} + +func copyU64(val uint64, dest []uint64) int { + if len(dest) < 1 { + return 0 + } + dest[0] = val + return 1 +} diff --git a/vendor/golang.org/x/sys/unix/bpxsvc_zos.s b/vendor/golang.org/x/sys/unix/bpxsvc_zos.s new file mode 100644 index 0000000..4bd4a17 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/bpxsvc_zos.s @@ -0,0 +1,192 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "go_asm.h" +#include "textflag.h" + +// function to call USS assembly language services +// +// doc: https://www.ibm.com/support/knowledgecenter/en/SSLTBW_3.1.0/com.ibm.zos.v3r1.bpxb100/bit64env.htm +// +// arg1 unsafe.Pointer array that ressembles an OS PLIST +// +// arg2 function offset as in +// doc: https://www.ibm.com/support/knowledgecenter/en/SSLTBW_3.1.0/com.ibm.zos.v3r1.bpxb100/bpx2cr_List_of_offsets.htm +// +// func bpxcall(plist []unsafe.Pointer, bpx_offset int64) + +TEXT ·bpxcall(SB), NOSPLIT|NOFRAME, $0 + MOVD plist_base+0(FP), R1 // r1 points to plist + MOVD bpx_offset+24(FP), R2 // r2 offset to BPX vector table + MOVD R14, R7 // save r14 + MOVD R15, R8 // save r15 + MOVWZ 16(R0), R9 + MOVWZ 544(R9), R9 + MOVWZ 24(R9), R9 // call vector in r9 + ADD R2, R9 // add offset to vector table + MOVWZ (R9), R9 // r9 points to entry point + BYTE $0x0D // BL R14,R9 --> basr r14,r9 + BYTE $0xE9 // clobbers 0,1,14,15 + MOVD R8, R15 // restore 15 + JMP R7 // return via saved return address + +// func A2e(arr [] byte) +// code page conversion from 819 to 1047 +TEXT ·A2e(SB), NOSPLIT|NOFRAME, $0 + MOVD arg_base+0(FP), R2 // pointer to arry of characters + MOVD arg_len+8(FP), R3 // count + XOR R0, R0 + XOR R1, R1 + BYTE $0xA7; BYTE $0x15; BYTE $0x00; BYTE $0x82 // BRAS 1,(2+(256/2)) + + // ASCII -> EBCDIC conversion table: + BYTE $0x00; BYTE $0x01; BYTE $0x02; BYTE $0x03 + BYTE $0x37; BYTE $0x2d; BYTE $0x2e; BYTE $0x2f + BYTE $0x16; BYTE $0x05; BYTE $0x15; BYTE $0x0b + BYTE $0x0c; BYTE $0x0d; BYTE $0x0e; BYTE $0x0f + BYTE $0x10; BYTE $0x11; BYTE $0x12; BYTE $0x13 + BYTE $0x3c; BYTE $0x3d; BYTE $0x32; BYTE $0x26 + BYTE $0x18; BYTE $0x19; BYTE $0x3f; BYTE $0x27 + BYTE $0x1c; BYTE $0x1d; BYTE $0x1e; BYTE $0x1f + BYTE $0x40; BYTE $0x5a; BYTE $0x7f; BYTE $0x7b + BYTE $0x5b; BYTE $0x6c; BYTE $0x50; BYTE $0x7d + BYTE $0x4d; BYTE $0x5d; BYTE $0x5c; BYTE $0x4e + BYTE $0x6b; BYTE $0x60; BYTE $0x4b; BYTE $0x61 + BYTE $0xf0; BYTE $0xf1; BYTE $0xf2; BYTE $0xf3 + BYTE $0xf4; BYTE $0xf5; BYTE $0xf6; BYTE $0xf7 + BYTE $0xf8; BYTE $0xf9; BYTE $0x7a; BYTE $0x5e + BYTE $0x4c; BYTE $0x7e; BYTE $0x6e; BYTE $0x6f + BYTE $0x7c; BYTE $0xc1; BYTE $0xc2; BYTE $0xc3 + BYTE $0xc4; BYTE $0xc5; BYTE $0xc6; BYTE $0xc7 + BYTE $0xc8; BYTE $0xc9; BYTE $0xd1; BYTE $0xd2 + BYTE $0xd3; BYTE $0xd4; BYTE $0xd5; BYTE $0xd6 + BYTE $0xd7; BYTE $0xd8; BYTE $0xd9; BYTE $0xe2 + BYTE $0xe3; BYTE $0xe4; BYTE $0xe5; BYTE $0xe6 + BYTE $0xe7; BYTE $0xe8; BYTE $0xe9; BYTE $0xad + BYTE $0xe0; BYTE $0xbd; BYTE $0x5f; BYTE $0x6d + BYTE $0x79; BYTE $0x81; BYTE $0x82; BYTE $0x83 + BYTE $0x84; BYTE $0x85; BYTE $0x86; BYTE $0x87 + BYTE $0x88; BYTE $0x89; BYTE $0x91; BYTE $0x92 + BYTE $0x93; BYTE $0x94; BYTE $0x95; BYTE $0x96 + BYTE $0x97; BYTE $0x98; BYTE $0x99; BYTE $0xa2 + BYTE $0xa3; BYTE $0xa4; BYTE $0xa5; BYTE $0xa6 + BYTE $0xa7; BYTE $0xa8; BYTE $0xa9; BYTE $0xc0 + BYTE $0x4f; BYTE $0xd0; BYTE $0xa1; BYTE $0x07 + BYTE $0x20; BYTE $0x21; BYTE $0x22; BYTE $0x23 + BYTE $0x24; BYTE $0x25; BYTE $0x06; BYTE $0x17 + BYTE $0x28; BYTE $0x29; BYTE $0x2a; BYTE $0x2b + BYTE $0x2c; BYTE $0x09; BYTE $0x0a; BYTE $0x1b + BYTE $0x30; BYTE $0x31; BYTE $0x1a; BYTE $0x33 + BYTE $0x34; BYTE $0x35; BYTE $0x36; BYTE $0x08 + BYTE $0x38; BYTE $0x39; BYTE $0x3a; BYTE $0x3b + BYTE $0x04; BYTE $0x14; BYTE $0x3e; BYTE $0xff + BYTE $0x41; BYTE $0xaa; BYTE $0x4a; BYTE $0xb1 + BYTE $0x9f; BYTE $0xb2; BYTE $0x6a; BYTE $0xb5 + BYTE $0xbb; BYTE $0xb4; BYTE $0x9a; BYTE $0x8a + BYTE $0xb0; BYTE $0xca; BYTE $0xaf; BYTE $0xbc + BYTE $0x90; BYTE $0x8f; BYTE $0xea; BYTE $0xfa + BYTE $0xbe; BYTE $0xa0; BYTE $0xb6; BYTE $0xb3 + BYTE $0x9d; BYTE $0xda; BYTE $0x9b; BYTE $0x8b + BYTE $0xb7; BYTE $0xb8; BYTE $0xb9; BYTE $0xab + BYTE $0x64; BYTE $0x65; BYTE $0x62; BYTE $0x66 + BYTE $0x63; BYTE $0x67; BYTE $0x9e; BYTE $0x68 + BYTE $0x74; BYTE $0x71; BYTE $0x72; BYTE $0x73 + BYTE $0x78; BYTE $0x75; BYTE $0x76; BYTE $0x77 + BYTE $0xac; BYTE $0x69; BYTE $0xed; BYTE $0xee + BYTE $0xeb; BYTE $0xef; BYTE $0xec; BYTE $0xbf + BYTE $0x80; BYTE $0xfd; BYTE $0xfe; BYTE $0xfb + BYTE $0xfc; BYTE $0xba; BYTE $0xae; BYTE $0x59 + BYTE $0x44; BYTE $0x45; BYTE $0x42; BYTE $0x46 + BYTE $0x43; BYTE $0x47; BYTE $0x9c; BYTE $0x48 + BYTE $0x54; BYTE $0x51; BYTE $0x52; BYTE $0x53 + BYTE $0x58; BYTE $0x55; BYTE $0x56; BYTE $0x57 + BYTE $0x8c; BYTE $0x49; BYTE $0xcd; BYTE $0xce + BYTE $0xcb; BYTE $0xcf; BYTE $0xcc; BYTE $0xe1 + BYTE $0x70; BYTE $0xdd; BYTE $0xde; BYTE $0xdb + BYTE $0xdc; BYTE $0x8d; BYTE $0x8e; BYTE $0xdf + +retry: + WORD $0xB9931022 // TROO 2,2,b'0001' + BVS retry + RET + +// func e2a(arr [] byte) +// code page conversion from 1047 to 819 +TEXT ·E2a(SB), NOSPLIT|NOFRAME, $0 + MOVD arg_base+0(FP), R2 // pointer to arry of characters + MOVD arg_len+8(FP), R3 // count + XOR R0, R0 + XOR R1, R1 + BYTE $0xA7; BYTE $0x15; BYTE $0x00; BYTE $0x82 // BRAS 1,(2+(256/2)) + + // EBCDIC -> ASCII conversion table: + BYTE $0x00; BYTE $0x01; BYTE $0x02; BYTE $0x03 + BYTE $0x9c; BYTE $0x09; BYTE $0x86; BYTE $0x7f + BYTE $0x97; BYTE $0x8d; BYTE $0x8e; BYTE $0x0b + BYTE $0x0c; BYTE $0x0d; BYTE $0x0e; BYTE $0x0f + BYTE $0x10; BYTE $0x11; BYTE $0x12; BYTE $0x13 + BYTE $0x9d; BYTE $0x0a; BYTE $0x08; BYTE $0x87 + BYTE $0x18; BYTE $0x19; BYTE $0x92; BYTE $0x8f + BYTE $0x1c; BYTE $0x1d; BYTE $0x1e; BYTE $0x1f + BYTE $0x80; BYTE $0x81; BYTE $0x82; BYTE $0x83 + BYTE $0x84; BYTE $0x85; BYTE $0x17; BYTE $0x1b + BYTE $0x88; BYTE $0x89; BYTE $0x8a; BYTE $0x8b + BYTE $0x8c; BYTE $0x05; BYTE $0x06; BYTE $0x07 + BYTE $0x90; BYTE $0x91; BYTE $0x16; BYTE $0x93 + BYTE $0x94; BYTE $0x95; BYTE $0x96; BYTE $0x04 + BYTE $0x98; BYTE $0x99; BYTE $0x9a; BYTE $0x9b + BYTE $0x14; BYTE $0x15; BYTE $0x9e; BYTE $0x1a + BYTE $0x20; BYTE $0xa0; BYTE $0xe2; BYTE $0xe4 + BYTE $0xe0; BYTE $0xe1; BYTE $0xe3; BYTE $0xe5 + BYTE $0xe7; BYTE $0xf1; BYTE $0xa2; BYTE $0x2e + BYTE $0x3c; BYTE $0x28; BYTE $0x2b; BYTE $0x7c + BYTE $0x26; BYTE $0xe9; BYTE $0xea; BYTE $0xeb + BYTE $0xe8; BYTE $0xed; BYTE $0xee; BYTE $0xef + BYTE $0xec; BYTE $0xdf; BYTE $0x21; BYTE $0x24 + BYTE $0x2a; BYTE $0x29; BYTE $0x3b; BYTE $0x5e + BYTE $0x2d; BYTE $0x2f; BYTE $0xc2; BYTE $0xc4 + BYTE $0xc0; BYTE $0xc1; BYTE $0xc3; BYTE $0xc5 + BYTE $0xc7; BYTE $0xd1; BYTE $0xa6; BYTE $0x2c + BYTE $0x25; BYTE $0x5f; BYTE $0x3e; BYTE $0x3f + BYTE $0xf8; BYTE $0xc9; BYTE $0xca; BYTE $0xcb + BYTE $0xc8; BYTE $0xcd; BYTE $0xce; BYTE $0xcf + BYTE $0xcc; BYTE $0x60; BYTE $0x3a; BYTE $0x23 + BYTE $0x40; BYTE $0x27; BYTE $0x3d; BYTE $0x22 + BYTE $0xd8; BYTE $0x61; BYTE $0x62; BYTE $0x63 + BYTE $0x64; BYTE $0x65; BYTE $0x66; BYTE $0x67 + BYTE $0x68; BYTE $0x69; BYTE $0xab; BYTE $0xbb + BYTE $0xf0; BYTE $0xfd; BYTE $0xfe; BYTE $0xb1 + BYTE $0xb0; BYTE $0x6a; BYTE $0x6b; BYTE $0x6c + BYTE $0x6d; BYTE $0x6e; BYTE $0x6f; BYTE $0x70 + BYTE $0x71; BYTE $0x72; BYTE $0xaa; BYTE $0xba + BYTE $0xe6; BYTE $0xb8; BYTE $0xc6; BYTE $0xa4 + BYTE $0xb5; BYTE $0x7e; BYTE $0x73; BYTE $0x74 + BYTE $0x75; BYTE $0x76; BYTE $0x77; BYTE $0x78 + BYTE $0x79; BYTE $0x7a; BYTE $0xa1; BYTE $0xbf + BYTE $0xd0; BYTE $0x5b; BYTE $0xde; BYTE $0xae + BYTE $0xac; BYTE $0xa3; BYTE $0xa5; BYTE $0xb7 + BYTE $0xa9; BYTE $0xa7; BYTE $0xb6; BYTE $0xbc + BYTE $0xbd; BYTE $0xbe; BYTE $0xdd; BYTE $0xa8 + BYTE $0xaf; BYTE $0x5d; BYTE $0xb4; BYTE $0xd7 + BYTE $0x7b; BYTE $0x41; BYTE $0x42; BYTE $0x43 + BYTE $0x44; BYTE $0x45; BYTE $0x46; BYTE $0x47 + BYTE $0x48; BYTE $0x49; BYTE $0xad; BYTE $0xf4 + BYTE $0xf6; BYTE $0xf2; BYTE $0xf3; BYTE $0xf5 + BYTE $0x7d; BYTE $0x4a; BYTE $0x4b; BYTE $0x4c + BYTE $0x4d; BYTE $0x4e; BYTE $0x4f; BYTE $0x50 + BYTE $0x51; BYTE $0x52; BYTE $0xb9; BYTE $0xfb + BYTE $0xfc; BYTE $0xf9; BYTE $0xfa; BYTE $0xff + BYTE $0x5c; BYTE $0xf7; BYTE $0x53; BYTE $0x54 + BYTE $0x55; BYTE $0x56; BYTE $0x57; BYTE $0x58 + BYTE $0x59; BYTE $0x5a; BYTE $0xb2; BYTE $0xd4 + BYTE $0xd6; BYTE $0xd2; BYTE $0xd3; BYTE $0xd5 + BYTE $0x30; BYTE $0x31; BYTE $0x32; BYTE $0x33 + BYTE $0x34; BYTE $0x35; BYTE $0x36; BYTE $0x37 + BYTE $0x38; BYTE $0x39; BYTE $0xb3; BYTE $0xdb + BYTE $0xdc; BYTE $0xd9; BYTE $0xda; BYTE $0x9f + +retry: + WORD $0xB9931022 // TROO 2,2,b'0001' + BVS retry + RET diff --git a/vendor/golang.org/x/sys/unix/cap_freebsd.go b/vendor/golang.org/x/sys/unix/cap_freebsd.go index 0b7c6ad..a086578 100644 --- a/vendor/golang.org/x/sys/unix/cap_freebsd.go +++ b/vendor/golang.org/x/sys/unix/cap_freebsd.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build freebsd -// +build freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/constants.go b/vendor/golang.org/x/sys/unix/constants.go index 394a396..6fb7cb7 100644 --- a/vendor/golang.org/x/sys/unix/constants.go +++ b/vendor/golang.org/x/sys/unix/constants.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package unix diff --git a/vendor/golang.org/x/sys/unix/dev_aix_ppc.go b/vendor/golang.org/x/sys/unix/dev_aix_ppc.go index 65a9985..d785134 100644 --- a/vendor/golang.org/x/sys/unix/dev_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/dev_aix_ppc.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix && ppc -// +build aix,ppc // Functions to access/create device major and minor numbers matching the // encoding used by AIX. diff --git a/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go b/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go index 8fc08ad..623a5e6 100644 --- a/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix && ppc64 -// +build aix,ppc64 // Functions to access/create device major and minor numbers matching the // encoding used AIX. diff --git a/vendor/golang.org/x/sys/unix/dev_zos.go b/vendor/golang.org/x/sys/unix/dev_zos.go index a388e59..bb6a64f 100644 --- a/vendor/golang.org/x/sys/unix/dev_zos.go +++ b/vendor/golang.org/x/sys/unix/dev_zos.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build zos && s390x -// +build zos,s390x // Functions to access/create device major and minor numbers matching the // encoding used by z/OS. diff --git a/vendor/golang.org/x/sys/unix/dirent.go b/vendor/golang.org/x/sys/unix/dirent.go index 2499f97..1ebf117 100644 --- a/vendor/golang.org/x/sys/unix/dirent.go +++ b/vendor/golang.org/x/sys/unix/dirent.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package unix diff --git a/vendor/golang.org/x/sys/unix/endian_big.go b/vendor/golang.org/x/sys/unix/endian_big.go index a520265..1095fd3 100644 --- a/vendor/golang.org/x/sys/unix/endian_big.go +++ b/vendor/golang.org/x/sys/unix/endian_big.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. // //go:build armbe || arm64be || m68k || mips || mips64 || mips64p32 || ppc || ppc64 || s390 || s390x || shbe || sparc || sparc64 -// +build armbe arm64be m68k mips mips64 mips64p32 ppc ppc64 s390 s390x shbe sparc sparc64 package unix diff --git a/vendor/golang.org/x/sys/unix/endian_little.go b/vendor/golang.org/x/sys/unix/endian_little.go index b0f2bc4..b9f0e27 100644 --- a/vendor/golang.org/x/sys/unix/endian_little.go +++ b/vendor/golang.org/x/sys/unix/endian_little.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. // //go:build 386 || amd64 || amd64p32 || alpha || arm || arm64 || loong64 || mipsle || mips64le || mips64p32le || nios2 || ppc64le || riscv || riscv64 || sh -// +build 386 amd64 amd64p32 alpha arm arm64 loong64 mipsle mips64le mips64p32le nios2 ppc64le riscv riscv64 sh package unix diff --git a/vendor/golang.org/x/sys/unix/env_unix.go b/vendor/golang.org/x/sys/unix/env_unix.go index 29ccc4d..a96da71 100644 --- a/vendor/golang.org/x/sys/unix/env_unix.go +++ b/vendor/golang.org/x/sys/unix/env_unix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos // Unix environment variables. diff --git a/vendor/golang.org/x/sys/unix/epoll_zos.go b/vendor/golang.org/x/sys/unix/epoll_zos.go deleted file mode 100644 index cedaf7e..0000000 --- a/vendor/golang.org/x/sys/unix/epoll_zos.go +++ /dev/null @@ -1,221 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build zos && s390x -// +build zos,s390x - -package unix - -import ( - "sync" -) - -// This file simulates epoll on z/OS using poll. - -// Analogous to epoll_event on Linux. -// TODO(neeilan): Pad is because the Linux kernel expects a 96-bit struct. We never pass this to the kernel; remove? -type EpollEvent struct { - Events uint32 - Fd int32 - Pad int32 -} - -const ( - EPOLLERR = 0x8 - EPOLLHUP = 0x10 - EPOLLIN = 0x1 - EPOLLMSG = 0x400 - EPOLLOUT = 0x4 - EPOLLPRI = 0x2 - EPOLLRDBAND = 0x80 - EPOLLRDNORM = 0x40 - EPOLLWRBAND = 0x200 - EPOLLWRNORM = 0x100 - EPOLL_CTL_ADD = 0x1 - EPOLL_CTL_DEL = 0x2 - EPOLL_CTL_MOD = 0x3 - // The following constants are part of the epoll API, but represent - // currently unsupported functionality on z/OS. - // EPOLL_CLOEXEC = 0x80000 - // EPOLLET = 0x80000000 - // EPOLLONESHOT = 0x40000000 - // EPOLLRDHUP = 0x2000 // Typically used with edge-triggered notis - // EPOLLEXCLUSIVE = 0x10000000 // Exclusive wake-up mode - // EPOLLWAKEUP = 0x20000000 // Relies on Linux's BLOCK_SUSPEND capability -) - -// TODO(neeilan): We can eliminate these epToPoll / pToEpoll calls by using identical mask values for POLL/EPOLL -// constants where possible The lower 16 bits of epoll events (uint32) can fit any system poll event (int16). - -// epToPollEvt converts epoll event field to poll equivalent. -// In epoll, Events is a 32-bit field, while poll uses 16 bits. -func epToPollEvt(events uint32) int16 { - var ep2p = map[uint32]int16{ - EPOLLIN: POLLIN, - EPOLLOUT: POLLOUT, - EPOLLHUP: POLLHUP, - EPOLLPRI: POLLPRI, - EPOLLERR: POLLERR, - } - - var pollEvts int16 = 0 - for epEvt, pEvt := range ep2p { - if (events & epEvt) != 0 { - pollEvts |= pEvt - } - } - - return pollEvts -} - -// pToEpollEvt converts 16 bit poll event bitfields to 32-bit epoll event fields. -func pToEpollEvt(revents int16) uint32 { - var p2ep = map[int16]uint32{ - POLLIN: EPOLLIN, - POLLOUT: EPOLLOUT, - POLLHUP: EPOLLHUP, - POLLPRI: EPOLLPRI, - POLLERR: EPOLLERR, - } - - var epollEvts uint32 = 0 - for pEvt, epEvt := range p2ep { - if (revents & pEvt) != 0 { - epollEvts |= epEvt - } - } - - return epollEvts -} - -// Per-process epoll implementation. -type epollImpl struct { - mu sync.Mutex - epfd2ep map[int]*eventPoll - nextEpfd int -} - -// eventPoll holds a set of file descriptors being watched by the process. A process can have multiple epoll instances. -// On Linux, this is an in-kernel data structure accessed through a fd. -type eventPoll struct { - mu sync.Mutex - fds map[int]*EpollEvent -} - -// epoll impl for this process. -var impl epollImpl = epollImpl{ - epfd2ep: make(map[int]*eventPoll), - nextEpfd: 0, -} - -func (e *epollImpl) epollcreate(size int) (epfd int, err error) { - e.mu.Lock() - defer e.mu.Unlock() - epfd = e.nextEpfd - e.nextEpfd++ - - e.epfd2ep[epfd] = &eventPoll{ - fds: make(map[int]*EpollEvent), - } - return epfd, nil -} - -func (e *epollImpl) epollcreate1(flag int) (fd int, err error) { - return e.epollcreate(4) -} - -func (e *epollImpl) epollctl(epfd int, op int, fd int, event *EpollEvent) (err error) { - e.mu.Lock() - defer e.mu.Unlock() - - ep, ok := e.epfd2ep[epfd] - if !ok { - - return EBADF - } - - switch op { - case EPOLL_CTL_ADD: - // TODO(neeilan): When we make epfds and fds disjoint, detect epoll - // loops here (instances watching each other) and return ELOOP. - if _, ok := ep.fds[fd]; ok { - return EEXIST - } - ep.fds[fd] = event - case EPOLL_CTL_MOD: - if _, ok := ep.fds[fd]; !ok { - return ENOENT - } - ep.fds[fd] = event - case EPOLL_CTL_DEL: - if _, ok := ep.fds[fd]; !ok { - return ENOENT - } - delete(ep.fds, fd) - - } - return nil -} - -// Must be called while holding ep.mu -func (ep *eventPoll) getFds() []int { - fds := make([]int, len(ep.fds)) - for fd := range ep.fds { - fds = append(fds, fd) - } - return fds -} - -func (e *epollImpl) epollwait(epfd int, events []EpollEvent, msec int) (n int, err error) { - e.mu.Lock() // in [rare] case of concurrent epollcreate + epollwait - ep, ok := e.epfd2ep[epfd] - - if !ok { - e.mu.Unlock() - return 0, EBADF - } - - pollfds := make([]PollFd, 4) - for fd, epollevt := range ep.fds { - pollfds = append(pollfds, PollFd{Fd: int32(fd), Events: epToPollEvt(epollevt.Events)}) - } - e.mu.Unlock() - - n, err = Poll(pollfds, msec) - if err != nil { - return n, err - } - - i := 0 - for _, pFd := range pollfds { - if pFd.Revents != 0 { - events[i] = EpollEvent{Fd: pFd.Fd, Events: pToEpollEvt(pFd.Revents)} - i++ - } - - if i == n { - break - } - } - - return n, nil -} - -func EpollCreate(size int) (fd int, err error) { - return impl.epollcreate(size) -} - -func EpollCreate1(flag int) (fd int, err error) { - return impl.epollcreate1(flag) -} - -func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { - return impl.epollctl(epfd, op, fd, event) -} - -// Because EpollWait mutates events, the caller is expected to coordinate -// concurrent access if calling with the same epfd from multiple goroutines. -func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { - return impl.epollwait(epfd, events, msec) -} diff --git a/vendor/golang.org/x/sys/unix/fcntl.go b/vendor/golang.org/x/sys/unix/fcntl.go index e9b9912..6200876 100644 --- a/vendor/golang.org/x/sys/unix/fcntl.go +++ b/vendor/golang.org/x/sys/unix/fcntl.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build dragonfly || freebsd || linux || netbsd || openbsd -// +build dragonfly freebsd linux netbsd openbsd +//go:build dragonfly || freebsd || linux || netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go b/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go index 29d4480..13b4acd 100644 --- a/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go +++ b/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build (linux && 386) || (linux && arm) || (linux && mips) || (linux && mipsle) || (linux && ppc) -// +build linux,386 linux,arm linux,mips linux,mipsle linux,ppc package unix diff --git a/vendor/golang.org/x/sys/unix/fdset.go b/vendor/golang.org/x/sys/unix/fdset.go index a8068f9..9e83d18 100644 --- a/vendor/golang.org/x/sys/unix/fdset.go +++ b/vendor/golang.org/x/sys/unix/fdset.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package unix diff --git a/vendor/golang.org/x/sys/unix/fstatfs_zos.go b/vendor/golang.org/x/sys/unix/fstatfs_zos.go deleted file mode 100644 index e377cc9..0000000 --- a/vendor/golang.org/x/sys/unix/fstatfs_zos.go +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build zos && s390x -// +build zos,s390x - -package unix - -import ( - "unsafe" -) - -// This file simulates fstatfs on z/OS using fstatvfs and w_getmntent. - -func Fstatfs(fd int, stat *Statfs_t) (err error) { - var stat_v Statvfs_t - err = Fstatvfs(fd, &stat_v) - if err == nil { - // populate stat - stat.Type = 0 - stat.Bsize = stat_v.Bsize - stat.Blocks = stat_v.Blocks - stat.Bfree = stat_v.Bfree - stat.Bavail = stat_v.Bavail - stat.Files = stat_v.Files - stat.Ffree = stat_v.Ffree - stat.Fsid = stat_v.Fsid - stat.Namelen = stat_v.Namemax - stat.Frsize = stat_v.Frsize - stat.Flags = stat_v.Flag - for passn := 0; passn < 5; passn++ { - switch passn { - case 0: - err = tryGetmntent64(stat) - break - case 1: - err = tryGetmntent128(stat) - break - case 2: - err = tryGetmntent256(stat) - break - case 3: - err = tryGetmntent512(stat) - break - case 4: - err = tryGetmntent1024(stat) - break - default: - break - } - //proceed to return if: err is nil (found), err is nonnil but not ERANGE (another error occurred) - if err == nil || err != nil && err != ERANGE { - break - } - } - } - return err -} - -func tryGetmntent64(stat *Statfs_t) (err error) { - var mnt_ent_buffer struct { - header W_Mnth - filesys_info [64]W_Mntent - } - var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer)) - fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size) - if err != nil { - return err - } - err = ERANGE //return ERANGE if no match is found in this batch - for i := 0; i < fs_count; i++ { - if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) { - stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0]) - err = nil - break - } - } - return err -} - -func tryGetmntent128(stat *Statfs_t) (err error) { - var mnt_ent_buffer struct { - header W_Mnth - filesys_info [128]W_Mntent - } - var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer)) - fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size) - if err != nil { - return err - } - err = ERANGE //return ERANGE if no match is found in this batch - for i := 0; i < fs_count; i++ { - if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) { - stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0]) - err = nil - break - } - } - return err -} - -func tryGetmntent256(stat *Statfs_t) (err error) { - var mnt_ent_buffer struct { - header W_Mnth - filesys_info [256]W_Mntent - } - var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer)) - fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size) - if err != nil { - return err - } - err = ERANGE //return ERANGE if no match is found in this batch - for i := 0; i < fs_count; i++ { - if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) { - stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0]) - err = nil - break - } - } - return err -} - -func tryGetmntent512(stat *Statfs_t) (err error) { - var mnt_ent_buffer struct { - header W_Mnth - filesys_info [512]W_Mntent - } - var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer)) - fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size) - if err != nil { - return err - } - err = ERANGE //return ERANGE if no match is found in this batch - for i := 0; i < fs_count; i++ { - if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) { - stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0]) - err = nil - break - } - } - return err -} - -func tryGetmntent1024(stat *Statfs_t) (err error) { - var mnt_ent_buffer struct { - header W_Mnth - filesys_info [1024]W_Mntent - } - var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer)) - fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size) - if err != nil { - return err - } - err = ERANGE //return ERANGE if no match is found in this batch - for i := 0; i < fs_count; i++ { - if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) { - stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0]) - err = nil - break - } - } - return err -} diff --git a/vendor/golang.org/x/sys/unix/gccgo.go b/vendor/golang.org/x/sys/unix/gccgo.go index b06f52d..aca5721 100644 --- a/vendor/golang.org/x/sys/unix/gccgo.go +++ b/vendor/golang.org/x/sys/unix/gccgo.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gccgo && !aix && !hurd -// +build gccgo,!aix,!hurd package unix diff --git a/vendor/golang.org/x/sys/unix/gccgo_c.c b/vendor/golang.org/x/sys/unix/gccgo_c.c index c4fce0e..d468b7b 100644 --- a/vendor/golang.org/x/sys/unix/gccgo_c.c +++ b/vendor/golang.org/x/sys/unix/gccgo_c.c @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build gccgo,!hurd -// +build !aix,!hurd +//go:build gccgo && !aix && !hurd #include #include diff --git a/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go b/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go index e60e49a..972d61b 100644 --- a/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gccgo && linux && amd64 -// +build gccgo,linux,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/ifreq_linux.go b/vendor/golang.org/x/sys/unix/ifreq_linux.go index 15721a5..848840a 100644 --- a/vendor/golang.org/x/sys/unix/ifreq_linux.go +++ b/vendor/golang.org/x/sys/unix/ifreq_linux.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux -// +build linux package unix diff --git a/vendor/golang.org/x/sys/unix/ioctl_linux.go b/vendor/golang.org/x/sys/unix/ioctl_linux.go index 0d12c08..dbe680e 100644 --- a/vendor/golang.org/x/sys/unix/ioctl_linux.go +++ b/vendor/golang.org/x/sys/unix/ioctl_linux.go @@ -231,3 +231,8 @@ func IoctlLoopGetStatus64(fd int) (*LoopInfo64, error) { func IoctlLoopSetStatus64(fd int, value *LoopInfo64) error { return ioctlPtr(fd, LOOP_SET_STATUS64, unsafe.Pointer(value)) } + +// IoctlLoopConfigure configures all loop device parameters in a single step +func IoctlLoopConfigure(fd int, value *LoopConfig) error { + return ioctlPtr(fd, LOOP_CONFIGURE, unsafe.Pointer(value)) +} diff --git a/vendor/golang.org/x/sys/unix/ioctl_signed.go b/vendor/golang.org/x/sys/unix/ioctl_signed.go new file mode 100644 index 0000000..5b0759b --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ioctl_signed.go @@ -0,0 +1,69 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build aix || solaris + +package unix + +import ( + "unsafe" +) + +// ioctl itself should not be exposed directly, but additional get/set +// functions for specific types are permissible. + +// IoctlSetInt performs an ioctl operation which sets an integer value +// on fd, using the specified request number. +func IoctlSetInt(fd int, req int, value int) error { + return ioctl(fd, req, uintptr(value)) +} + +// IoctlSetPointerInt performs an ioctl operation which sets an +// integer value on fd, using the specified request number. The ioctl +// argument is called with a pointer to the integer value, rather than +// passing the integer value directly. +func IoctlSetPointerInt(fd int, req int, value int) error { + v := int32(value) + return ioctlPtr(fd, req, unsafe.Pointer(&v)) +} + +// IoctlSetWinsize performs an ioctl on fd with a *Winsize argument. +// +// To change fd's window size, the req argument should be TIOCSWINSZ. +func IoctlSetWinsize(fd int, req int, value *Winsize) error { + // TODO: if we get the chance, remove the req parameter and + // hardcode TIOCSWINSZ. + return ioctlPtr(fd, req, unsafe.Pointer(value)) +} + +// IoctlSetTermios performs an ioctl on fd with a *Termios. +// +// The req value will usually be TCSETA or TIOCSETA. +func IoctlSetTermios(fd int, req int, value *Termios) error { + // TODO: if we get the chance, remove the req parameter. + return ioctlPtr(fd, req, unsafe.Pointer(value)) +} + +// IoctlGetInt performs an ioctl operation which gets an integer value +// from fd, using the specified request number. +// +// A few ioctl requests use the return value as an output parameter; +// for those, IoctlRetInt should be used instead of this function. +func IoctlGetInt(fd int, req int) (int, error) { + var value int + err := ioctlPtr(fd, req, unsafe.Pointer(&value)) + return value, err +} + +func IoctlGetWinsize(fd int, req int) (*Winsize, error) { + var value Winsize + err := ioctlPtr(fd, req, unsafe.Pointer(&value)) + return &value, err +} + +func IoctlGetTermios(fd int, req int) (*Termios, error) { + var value Termios + err := ioctlPtr(fd, req, unsafe.Pointer(&value)) + return &value, err +} diff --git a/vendor/golang.org/x/sys/unix/ioctl.go b/vendor/golang.org/x/sys/unix/ioctl_unsigned.go similarity index 76% rename from vendor/golang.org/x/sys/unix/ioctl.go rename to vendor/golang.org/x/sys/unix/ioctl_unsigned.go index 1c51b0e..20f470b 100644 --- a/vendor/golang.org/x/sys/unix/ioctl.go +++ b/vendor/golang.org/x/sys/unix/ioctl_unsigned.go @@ -2,13 +2,11 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build aix || darwin || dragonfly || freebsd || hurd || linux || netbsd || openbsd || solaris -// +build aix darwin dragonfly freebsd hurd linux netbsd openbsd solaris +//go:build darwin || dragonfly || freebsd || hurd || linux || netbsd || openbsd package unix import ( - "runtime" "unsafe" ) @@ -27,7 +25,7 @@ func IoctlSetInt(fd int, req uint, value int) error { // passing the integer value directly. func IoctlSetPointerInt(fd int, req uint, value int) error { v := int32(value) - return ioctl(fd, req, uintptr(unsafe.Pointer(&v))) + return ioctlPtr(fd, req, unsafe.Pointer(&v)) } // IoctlSetWinsize performs an ioctl on fd with a *Winsize argument. @@ -36,9 +34,7 @@ func IoctlSetPointerInt(fd int, req uint, value int) error { func IoctlSetWinsize(fd int, req uint, value *Winsize) error { // TODO: if we get the chance, remove the req parameter and // hardcode TIOCSWINSZ. - err := ioctl(fd, req, uintptr(unsafe.Pointer(value))) - runtime.KeepAlive(value) - return err + return ioctlPtr(fd, req, unsafe.Pointer(value)) } // IoctlSetTermios performs an ioctl on fd with a *Termios. @@ -46,9 +42,7 @@ func IoctlSetWinsize(fd int, req uint, value *Winsize) error { // The req value will usually be TCSETA or TIOCSETA. func IoctlSetTermios(fd int, req uint, value *Termios) error { // TODO: if we get the chance, remove the req parameter. - err := ioctl(fd, req, uintptr(unsafe.Pointer(value))) - runtime.KeepAlive(value) - return err + return ioctlPtr(fd, req, unsafe.Pointer(value)) } // IoctlGetInt performs an ioctl operation which gets an integer value @@ -58,18 +52,18 @@ func IoctlSetTermios(fd int, req uint, value *Termios) error { // for those, IoctlRetInt should be used instead of this function. func IoctlGetInt(fd int, req uint) (int, error) { var value int - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + err := ioctlPtr(fd, req, unsafe.Pointer(&value)) return value, err } func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { var value Winsize - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + err := ioctlPtr(fd, req, unsafe.Pointer(&value)) return &value, err } func IoctlGetTermios(fd int, req uint) (*Termios, error) { var value Termios - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + err := ioctlPtr(fd, req, unsafe.Pointer(&value)) return &value, err } diff --git a/vendor/golang.org/x/sys/unix/ioctl_zos.go b/vendor/golang.org/x/sys/unix/ioctl_zos.go index 5384e7d..c8b2a75 100644 --- a/vendor/golang.org/x/sys/unix/ioctl_zos.go +++ b/vendor/golang.org/x/sys/unix/ioctl_zos.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build zos && s390x -// +build zos,s390x package unix @@ -17,25 +16,23 @@ import ( // IoctlSetInt performs an ioctl operation which sets an integer value // on fd, using the specified request number. -func IoctlSetInt(fd int, req uint, value int) error { +func IoctlSetInt(fd int, req int, value int) error { return ioctl(fd, req, uintptr(value)) } // IoctlSetWinsize performs an ioctl on fd with a *Winsize argument. // // To change fd's window size, the req argument should be TIOCSWINSZ. -func IoctlSetWinsize(fd int, req uint, value *Winsize) error { +func IoctlSetWinsize(fd int, req int, value *Winsize) error { // TODO: if we get the chance, remove the req parameter and // hardcode TIOCSWINSZ. - err := ioctl(fd, req, uintptr(unsafe.Pointer(value))) - runtime.KeepAlive(value) - return err + return ioctlPtr(fd, req, unsafe.Pointer(value)) } // IoctlSetTermios performs an ioctl on fd with a *Termios. // // The req value is expected to be TCSETS, TCSETSW, or TCSETSF -func IoctlSetTermios(fd int, req uint, value *Termios) error { +func IoctlSetTermios(fd int, req int, value *Termios) error { if (req != TCSETS) && (req != TCSETSW) && (req != TCSETSF) { return ENOSYS } @@ -49,22 +46,22 @@ func IoctlSetTermios(fd int, req uint, value *Termios) error { // // A few ioctl requests use the return value as an output parameter; // for those, IoctlRetInt should be used instead of this function. -func IoctlGetInt(fd int, req uint) (int, error) { +func IoctlGetInt(fd int, req int) (int, error) { var value int - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + err := ioctlPtr(fd, req, unsafe.Pointer(&value)) return value, err } -func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { +func IoctlGetWinsize(fd int, req int) (*Winsize, error) { var value Winsize - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + err := ioctlPtr(fd, req, unsafe.Pointer(&value)) return &value, err } // IoctlGetTermios performs an ioctl on fd with a *Termios. // // The req value is expected to be TCGETS -func IoctlGetTermios(fd int, req uint) (*Termios, error) { +func IoctlGetTermios(fd int, req int) (*Termios, error) { var value Termios if req != TCGETS { return &value, ENOSYS diff --git a/vendor/golang.org/x/sys/unix/mkall.sh b/vendor/golang.org/x/sys/unix/mkall.sh index 8e3947c..e6f31d3 100644 --- a/vendor/golang.org/x/sys/unix/mkall.sh +++ b/vendor/golang.org/x/sys/unix/mkall.sh @@ -50,7 +50,7 @@ if [[ "$GOOS" = "linux" ]]; then # Use the Docker-based build system # Files generated through docker (use $cmd so you can Ctl-C the build or run) $cmd docker build --tag generate:$GOOS $GOOS - $cmd docker run --interactive --tty --volume $(cd -- "$(dirname -- "$0")/.." && /bin/pwd):/build generate:$GOOS + $cmd docker run --interactive --tty --volume $(cd -- "$(dirname -- "$0")/.." && pwd):/build generate:$GOOS exit fi diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index 7456d9d..4ed2e48 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -66,6 +66,7 @@ includes_Darwin=' #include #include #include +#include #include #include #include @@ -203,6 +204,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -246,6 +248,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -260,6 +263,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -281,10 +285,6 @@ struct ltchars { #include #endif -#ifndef MSG_FASTOPEN -#define MSG_FASTOPEN 0x20000000 -#endif - #ifndef PTRACE_GETREGS #define PTRACE_GETREGS 0xc #endif @@ -293,14 +293,6 @@ struct ltchars { #define PTRACE_SETREGS 0xd #endif -#ifndef SOL_NETLINK -#define SOL_NETLINK 270 -#endif - -#ifndef SOL_SMC -#define SOL_SMC 286 -#endif - #ifdef SOL_BLUETOOTH // SPARC includes this in /usr/include/sparc64-linux-gnu/bits/socket.h // but it is already in bluetooth_linux.go @@ -317,10 +309,23 @@ struct ltchars { #undef TIPC_WAIT_FOREVER #define TIPC_WAIT_FOREVER 0xffffffff -// Copied from linux/l2tp.h -// Including linux/l2tp.h here causes conflicts between linux/in.h -// and netinet/in.h included via net/route.h above. -#define IPPROTO_L2TP 115 +// Copied from linux/netfilter/nf_nat.h +// Including linux/netfilter/nf_nat.h here causes conflicts between linux/in.h +// and netinet/in.h. +#define NF_NAT_RANGE_MAP_IPS (1 << 0) +#define NF_NAT_RANGE_PROTO_SPECIFIED (1 << 1) +#define NF_NAT_RANGE_PROTO_RANDOM (1 << 2) +#define NF_NAT_RANGE_PERSISTENT (1 << 3) +#define NF_NAT_RANGE_PROTO_RANDOM_FULLY (1 << 4) +#define NF_NAT_RANGE_PROTO_OFFSET (1 << 5) +#define NF_NAT_RANGE_NETMAP (1 << 6) +#define NF_NAT_RANGE_PROTO_RANDOM_ALL \ + (NF_NAT_RANGE_PROTO_RANDOM | NF_NAT_RANGE_PROTO_RANDOM_FULLY) +#define NF_NAT_RANGE_MASK \ + (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED | \ + NF_NAT_RANGE_PROTO_RANDOM | NF_NAT_RANGE_PERSISTENT | \ + NF_NAT_RANGE_PROTO_RANDOM_FULLY | NF_NAT_RANGE_PROTO_OFFSET | \ + NF_NAT_RANGE_NETMAP) // Copied from linux/hid.h. // Keep in sync with the size of the referenced fields. @@ -517,10 +522,12 @@ ccflags="$@" $2 ~ /^LOCK_(SH|EX|NB|UN)$/ || $2 ~ /^LO_(KEY|NAME)_SIZE$/ || $2 ~ /^LOOP_(CLR|CTL|GET|SET)_/ || - $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|TCP|MCAST|EVFILT|NOTE|SHUT|PROT|MAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR|LOCAL|TCPOPT)_/ || + $2 == "LOOP_CONFIGURE" || + $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|TCP|MCAST|EVFILT|NOTE|SHUT|PROT|MAP|MREMAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR|LOCAL|TCPOPT|UDP)_/ || $2 ~ /^NFC_(GENL|PROTO|COMM|RF|SE|DIRECTION|LLCP|SOCKPROTO)_/ || $2 ~ /^NFC_.*_(MAX)?SIZE$/ || $2 ~ /^RAW_PAYLOAD_/ || + $2 ~ /^[US]F_/ || $2 ~ /^TP_STATUS_/ || $2 ~ /^FALLOC_/ || $2 ~ /^ICMPV?6?_(FILTER|SEC)/ || @@ -543,6 +550,7 @@ ccflags="$@" $2 !~ "NLA_TYPE_MASK" && $2 !~ /^RTC_VL_(ACCURACY|BACKUP|DATA)/ && $2 ~ /^(NETLINK|NLM|NLMSG|NLA|IFA|IFAN|RT|RTC|RTCF|RTN|RTPROT|RTNH|ARPHRD|ETH_P|NETNSA)_/ || + $2 ~ /^SOCK_|SK_DIAG_|SKNLGRP_$/ || $2 ~ /^FIORDCHK$/ || $2 ~ /^SIOC/ || $2 ~ /^TIOC/ || @@ -557,7 +565,7 @@ ccflags="$@" $2 ~ /^RLIMIT_(AS|CORE|CPU|DATA|FSIZE|LOCKS|MEMLOCK|MSGQUEUE|NICE|NOFILE|NPROC|RSS|RTPRIO|RTTIME|SIGPENDING|STACK)|RLIM_INFINITY/ || $2 ~ /^PRIO_(PROCESS|PGRP|USER)/ || $2 ~ /^CLONE_[A-Z_]+/ || - $2 !~ /^(BPF_TIMEVAL|BPF_FIB_LOOKUP_[A-Z]+)$/ && + $2 !~ /^(BPF_TIMEVAL|BPF_FIB_LOOKUP_[A-Z]+|BPF_F_LINK)$/ && $2 ~ /^(BPF|DLT)_/ || $2 ~ /^AUDIT_/ || $2 ~ /^(CLOCK|TIMER)_/ || @@ -578,8 +586,9 @@ ccflags="$@" $2 ~ /^KEY_(SPEC|REQKEY_DEFL)_/ || $2 ~ /^KEYCTL_/ || $2 ~ /^PERF_/ || - $2 ~ /^SECCOMP_MODE_/ || + $2 ~ /^SECCOMP_/ || $2 ~ /^SEEK_/ || + $2 ~ /^SCHED_/ || $2 ~ /^SPLICE_/ || $2 ~ /^SYNC_FILE_RANGE_/ || $2 !~ /IOC_MAGIC/ && @@ -598,6 +607,9 @@ ccflags="$@" $2 ~ /^FSOPT_/ || $2 ~ /^WDIO[CFS]_/ || $2 ~ /^NFN/ || + $2 !~ /^NFT_META_IIFTYPE/ && + $2 ~ /^NFT_/ || + $2 ~ /^NF_NAT_/ || $2 ~ /^XDP_/ || $2 ~ /^RWF_/ || $2 ~ /^(HDIO|WIN|SMART)_/ || @@ -621,7 +633,7 @@ ccflags="$@" $2 ~ /^MEM/ || $2 ~ /^WG/ || $2 ~ /^FIB_RULE_/ || - $2 ~ /^BLK[A-Z]*(GET$|SET$|BUF$|PART$|SIZE)/ {printf("\t%s = C.%s\n", $2, $2)} + $2 ~ /^BLK[A-Z]*(GET$|SET$|BUF$|PART$|SIZE|IOMIN$|IOOPT$|ALIGNOFF$|DISCARD|ROTATIONAL$|ZEROOUT$|GETDISKSEQ$)/ {printf("\t%s = C.%s\n", $2, $2)} $2 ~ /^__WCOREFLAG$/ {next} $2 ~ /^__W[A-Z0-9]+$/ {printf("\t%s = C.%s\n", substr($2,3), $2)} @@ -659,7 +671,6 @@ echo '// mkerrors.sh' "$@" echo '// Code generated by the command above; see README.md. DO NOT EDIT.' echo echo "//go:build ${GOARCH} && ${GOOS}" -echo "// +build ${GOARCH},${GOOS}" echo go tool cgo -godefs -- "$@" _const.go >_error.out cat _error.out | grep -vf _error.grep | grep -vf _signal.grep @@ -738,7 +749,8 @@ main(void) e = errors[i].num; if(i > 0 && errors[i-1].num == e) continue; - strcpy(buf, strerror(e)); + strncpy(buf, strerror(e), sizeof(buf) - 1); + buf[sizeof(buf) - 1] = '\0'; // lowercase first letter: Bad -> bad, but STREAM -> STREAM. if(A <= buf[0] && buf[0] <= Z && a <= buf[1] && buf[1] <= z) buf[0] += a - A; @@ -757,7 +769,8 @@ main(void) e = signals[i].num; if(i > 0 && signals[i-1].num == e) continue; - strcpy(buf, strsignal(e)); + strncpy(buf, strsignal(e), sizeof(buf) - 1); + buf[sizeof(buf) - 1] = '\0'; // lowercase first letter: Bad -> bad, but STREAM -> STREAM. if(A <= buf[0] && buf[0] <= Z && a <= buf[1] && buf[1] <= z) buf[0] += a - A; diff --git a/vendor/golang.org/x/sys/unix/mmap_nomremap.go b/vendor/golang.org/x/sys/unix/mmap_nomremap.go new file mode 100644 index 0000000..7f602ff --- /dev/null +++ b/vendor/golang.org/x/sys/unix/mmap_nomremap.go @@ -0,0 +1,13 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build aix || darwin || dragonfly || freebsd || openbsd || solaris || zos + +package unix + +var mapper = &mmapper{ + active: make(map[*byte][]byte), + mmap: mmap, + munmap: munmap, +} diff --git a/vendor/golang.org/x/sys/unix/mremap.go b/vendor/golang.org/x/sys/unix/mremap.go new file mode 100644 index 0000000..3a5e776 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/mremap.go @@ -0,0 +1,57 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux || netbsd + +package unix + +import "unsafe" + +type mremapMmapper struct { + mmapper + mremap func(oldaddr uintptr, oldlength uintptr, newlength uintptr, flags int, newaddr uintptr) (xaddr uintptr, err error) +} + +var mapper = &mremapMmapper{ + mmapper: mmapper{ + active: make(map[*byte][]byte), + mmap: mmap, + munmap: munmap, + }, + mremap: mremap, +} + +func (m *mremapMmapper) Mremap(oldData []byte, newLength int, flags int) (data []byte, err error) { + if newLength <= 0 || len(oldData) == 0 || len(oldData) != cap(oldData) || flags&mremapFixed != 0 { + return nil, EINVAL + } + + pOld := &oldData[cap(oldData)-1] + m.Lock() + defer m.Unlock() + bOld := m.active[pOld] + if bOld == nil || &bOld[0] != &oldData[0] { + return nil, EINVAL + } + newAddr, errno := m.mremap(uintptr(unsafe.Pointer(&bOld[0])), uintptr(len(bOld)), uintptr(newLength), flags, 0) + if errno != nil { + return nil, errno + } + bNew := unsafe.Slice((*byte)(unsafe.Pointer(newAddr)), newLength) + pNew := &bNew[cap(bNew)-1] + if flags&mremapDontunmap == 0 { + delete(m.active, pOld) + } + m.active[pNew] = bNew + return bNew, nil +} + +func Mremap(oldData []byte, newLength int, flags int) (data []byte, err error) { + return mapper.Mremap(oldData, newLength, flags) +} + +func MremapPtr(oldAddr unsafe.Pointer, oldSize uintptr, newAddr unsafe.Pointer, newSize uintptr, flags int) (ret unsafe.Pointer, err error) { + xaddr, err := mapper.mremap(uintptr(oldAddr), oldSize, newSize, flags, uintptr(newAddr)) + return unsafe.Pointer(xaddr), err +} diff --git a/vendor/golang.org/x/sys/unix/pagesize_unix.go b/vendor/golang.org/x/sys/unix/pagesize_unix.go index 53f1b4c..0482408 100644 --- a/vendor/golang.org/x/sys/unix/pagesize_unix.go +++ b/vendor/golang.org/x/sys/unix/pagesize_unix.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos // For Unix, get the pagesize from the runtime. diff --git a/vendor/golang.org/x/sys/unix/pledge_openbsd.go b/vendor/golang.org/x/sys/unix/pledge_openbsd.go index eb48294..6a09af5 100644 --- a/vendor/golang.org/x/sys/unix/pledge_openbsd.go +++ b/vendor/golang.org/x/sys/unix/pledge_openbsd.go @@ -8,54 +8,31 @@ import ( "errors" "fmt" "strconv" - "syscall" - "unsafe" ) // Pledge implements the pledge syscall. // -// The pledge syscall does not accept execpromises on OpenBSD releases -// before 6.3. -// -// execpromises must be empty when Pledge is called on OpenBSD -// releases predating 6.3, otherwise an error will be returned. +// This changes both the promises and execpromises; use PledgePromises or +// PledgeExecpromises to only change the promises or execpromises +// respectively. // // For more information see pledge(2). func Pledge(promises, execpromises string) error { - maj, min, err := majmin() - if err != nil { + if err := pledgeAvailable(); err != nil { return err } - err = pledgeAvailable(maj, min, execpromises) + pptr, err := BytePtrFromString(promises) if err != nil { return err } - pptr, err := syscall.BytePtrFromString(promises) + exptr, err := BytePtrFromString(execpromises) if err != nil { return err } - // This variable will hold either a nil unsafe.Pointer or - // an unsafe.Pointer to a string (execpromises). - var expr unsafe.Pointer - - // If we're running on OpenBSD > 6.2, pass execpromises to the syscall. - if maj > 6 || (maj == 6 && min > 2) { - exptr, err := syscall.BytePtrFromString(execpromises) - if err != nil { - return err - } - expr = unsafe.Pointer(exptr) - } - - _, _, e := syscall.Syscall(SYS_PLEDGE, uintptr(unsafe.Pointer(pptr)), uintptr(expr), 0) - if e != 0 { - return e - } - - return nil + return pledge(pptr, exptr) } // PledgePromises implements the pledge syscall. @@ -64,30 +41,16 @@ func Pledge(promises, execpromises string) error { // // For more information see pledge(2). func PledgePromises(promises string) error { - maj, min, err := majmin() - if err != nil { - return err - } - - err = pledgeAvailable(maj, min, "") - if err != nil { + if err := pledgeAvailable(); err != nil { return err } - // This variable holds the execpromises and is always nil. - var expr unsafe.Pointer - - pptr, err := syscall.BytePtrFromString(promises) + pptr, err := BytePtrFromString(promises) if err != nil { return err } - _, _, e := syscall.Syscall(SYS_PLEDGE, uintptr(unsafe.Pointer(pptr)), uintptr(expr), 0) - if e != 0 { - return e - } - - return nil + return pledge(pptr, nil) } // PledgeExecpromises implements the pledge syscall. @@ -96,30 +59,16 @@ func PledgePromises(promises string) error { // // For more information see pledge(2). func PledgeExecpromises(execpromises string) error { - maj, min, err := majmin() - if err != nil { + if err := pledgeAvailable(); err != nil { return err } - err = pledgeAvailable(maj, min, execpromises) + exptr, err := BytePtrFromString(execpromises) if err != nil { return err } - // This variable holds the promises and is always nil. - var pptr unsafe.Pointer - - exptr, err := syscall.BytePtrFromString(execpromises) - if err != nil { - return err - } - - _, _, e := syscall.Syscall(SYS_PLEDGE, uintptr(pptr), uintptr(unsafe.Pointer(exptr)), 0) - if e != 0 { - return e - } - - return nil + return pledge(nil, exptr) } // majmin returns major and minor version number for an OpenBSD system. @@ -147,16 +96,15 @@ func majmin() (major int, minor int, err error) { // pledgeAvailable checks for availability of the pledge(2) syscall // based on the running OpenBSD version. -func pledgeAvailable(maj, min int, execpromises string) error { - // If OpenBSD <= 5.9, pledge is not available. - if (maj == 5 && min != 9) || maj < 5 { - return fmt.Errorf("pledge syscall is not available on OpenBSD %d.%d", maj, min) +func pledgeAvailable() error { + maj, min, err := majmin() + if err != nil { + return err } - // If OpenBSD <= 6.2 and execpromises is not empty, - // return an error - execpromises is not available before 6.3 - if (maj < 6 || (maj == 6 && min <= 2)) && execpromises != "" { - return fmt.Errorf("cannot use execpromises on OpenBSD %d.%d", maj, min) + // Require OpenBSD 6.4 as a minimum. + if maj < 6 || (maj == 6 && min <= 3) { + return fmt.Errorf("cannot call Pledge on OpenBSD %d.%d", maj, min) } return nil diff --git a/vendor/golang.org/x/sys/unix/ptrace_darwin.go b/vendor/golang.org/x/sys/unix/ptrace_darwin.go index 463c3ef..3f0975f 100644 --- a/vendor/golang.org/x/sys/unix/ptrace_darwin.go +++ b/vendor/golang.org/x/sys/unix/ptrace_darwin.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin && !ios -// +build darwin,!ios package unix diff --git a/vendor/golang.org/x/sys/unix/ptrace_ios.go b/vendor/golang.org/x/sys/unix/ptrace_ios.go index ed0509a..a4d35db 100644 --- a/vendor/golang.org/x/sys/unix/ptrace_ios.go +++ b/vendor/golang.org/x/sys/unix/ptrace_ios.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build ios -// +build ios package unix diff --git a/vendor/golang.org/x/sys/unix/race.go b/vendor/golang.org/x/sys/unix/race.go index 6f6c5fe..714d2aa 100644 --- a/vendor/golang.org/x/sys/unix/race.go +++ b/vendor/golang.org/x/sys/unix/race.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build (darwin && race) || (linux && race) || (freebsd && race) -// +build darwin,race linux,race freebsd,race package unix diff --git a/vendor/golang.org/x/sys/unix/race0.go b/vendor/golang.org/x/sys/unix/race0.go index 706e132..4a9f663 100644 --- a/vendor/golang.org/x/sys/unix/race0.go +++ b/vendor/golang.org/x/sys/unix/race0.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || (darwin && !race) || (linux && !race) || (freebsd && !race) || netbsd || openbsd || solaris || dragonfly || zos -// +build aix darwin,!race linux,!race freebsd,!race netbsd openbsd solaris dragonfly zos package unix diff --git a/vendor/golang.org/x/sys/unix/readdirent_getdents.go b/vendor/golang.org/x/sys/unix/readdirent_getdents.go index 4d62575..dbd2b6c 100644 --- a/vendor/golang.org/x/sys/unix/readdirent_getdents.go +++ b/vendor/golang.org/x/sys/unix/readdirent_getdents.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || dragonfly || freebsd || linux || netbsd || openbsd -// +build aix dragonfly freebsd linux netbsd openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go b/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go index 2a4ba47..b903c00 100644 --- a/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go +++ b/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build darwin -// +build darwin +//go:build darwin || zos package unix diff --git a/vendor/golang.org/x/sys/unix/sockcmsg_unix.go b/vendor/golang.org/x/sys/unix/sockcmsg_unix.go index 3865943..c3a62db 100644 --- a/vendor/golang.org/x/sys/unix/sockcmsg_unix.go +++ b/vendor/golang.org/x/sys/unix/sockcmsg_unix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos // Socket control messages diff --git a/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go b/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go index 0840fe4..4a1eab3 100644 --- a/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go +++ b/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin freebsd linux netbsd openbsd solaris zos package unix diff --git a/vendor/golang.org/x/sys/unix/sockcmsg_zos.go b/vendor/golang.org/x/sys/unix/sockcmsg_zos.go new file mode 100644 index 0000000..3e53dbc --- /dev/null +++ b/vendor/golang.org/x/sys/unix/sockcmsg_zos.go @@ -0,0 +1,58 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Socket control messages + +package unix + +import "unsafe" + +// UnixCredentials encodes credentials into a socket control message +// for sending to another process. This can be used for +// authentication. +func UnixCredentials(ucred *Ucred) []byte { + b := make([]byte, CmsgSpace(SizeofUcred)) + h := (*Cmsghdr)(unsafe.Pointer(&b[0])) + h.Level = SOL_SOCKET + h.Type = SCM_CREDENTIALS + h.SetLen(CmsgLen(SizeofUcred)) + *(*Ucred)(h.data(0)) = *ucred + return b +} + +// ParseUnixCredentials decodes a socket control message that contains +// credentials in a Ucred structure. To receive such a message, the +// SO_PASSCRED option must be enabled on the socket. +func ParseUnixCredentials(m *SocketControlMessage) (*Ucred, error) { + if m.Header.Level != SOL_SOCKET { + return nil, EINVAL + } + if m.Header.Type != SCM_CREDENTIALS { + return nil, EINVAL + } + ucred := *(*Ucred)(unsafe.Pointer(&m.Data[0])) + return &ucred, nil +} + +// PktInfo4 encodes Inet4Pktinfo into a socket control message of type IP_PKTINFO. +func PktInfo4(info *Inet4Pktinfo) []byte { + b := make([]byte, CmsgSpace(SizeofInet4Pktinfo)) + h := (*Cmsghdr)(unsafe.Pointer(&b[0])) + h.Level = SOL_IP + h.Type = IP_PKTINFO + h.SetLen(CmsgLen(SizeofInet4Pktinfo)) + *(*Inet4Pktinfo)(h.data(0)) = *info + return b +} + +// PktInfo6 encodes Inet6Pktinfo into a socket control message of type IPV6_PKTINFO. +func PktInfo6(info *Inet6Pktinfo) []byte { + b := make([]byte, CmsgSpace(SizeofInet6Pktinfo)) + h := (*Cmsghdr)(unsafe.Pointer(&b[0])) + h.Level = SOL_IPV6 + h.Type = IPV6_PKTINFO + h.SetLen(CmsgLen(SizeofInet6Pktinfo)) + *(*Inet6Pktinfo)(h.data(0)) = *info + return b +} diff --git a/vendor/golang.org/x/sys/unix/symaddr_zos_s390x.s b/vendor/golang.org/x/sys/unix/symaddr_zos_s390x.s new file mode 100644 index 0000000..3c4f33c --- /dev/null +++ b/vendor/golang.org/x/sys/unix/symaddr_zos_s390x.s @@ -0,0 +1,75 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build zos && s390x && gc + +#include "textflag.h" + +// provide the address of function variable to be fixed up. + +TEXT ·getPipe2Addr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Pipe2(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_FlockAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Flock(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_GetxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Getxattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_NanosleepAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Nanosleep(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_SetxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Setxattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_Wait4Addr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Wait4(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_MountAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Mount(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_UnmountAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Unmount(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_UtimesNanoAtAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·UtimesNanoAt(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_UtimesNanoAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·UtimesNano(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_MkfifoatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Mkfifoat(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_ChtagAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Chtag(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_ReadlinkatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Readlinkat(SB), R8 + MOVD R8, ret+0(FP) + RET + diff --git a/vendor/golang.org/x/sys/unix/syscall.go b/vendor/golang.org/x/sys/unix/syscall.go index 63e8c83..5ea74da 100644 --- a/vendor/golang.org/x/sys/unix/syscall.go +++ b/vendor/golang.org/x/sys/unix/syscall.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos // Package unix contains an interface to the low-level operating system // primitives. OS details vary depending on the underlying system, and diff --git a/vendor/golang.org/x/sys/unix/syscall_aix.go b/vendor/golang.org/x/sys/unix/syscall_aix.go index 2db1b51..67ce6ce 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix -// +build aix // Aix system calls. // This file is compiled as ordinary Go code, @@ -107,7 +106,8 @@ func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) { if n > 0 { sl += _Socklen(n) + 1 } - if sa.raw.Path[0] == '@' { + if sa.raw.Path[0] == '@' || (sa.raw.Path[0] == 0 && sl > 3) { + // Check sl > 3 so we don't change unnamed socket behavior. sa.raw.Path[0] = 0 // Don't count trailing NUL for abstract address. sl-- @@ -292,9 +292,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { break } } - - bytes := (*[len(pp.Path)]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] - sa.Name = string(bytes) + sa.Name = string(unsafe.Slice((*byte)(unsafe.Pointer(&pp.Path[0])), n)) return sa, nil case AF_INET: @@ -410,7 +408,8 @@ func (w WaitStatus) CoreDump() bool { return w&0x80 == 0x80 } func (w WaitStatus) TrapCause() int { return -1 } -//sys ioctl(fd int, req uint, arg uintptr) (err error) +//sys ioctl(fd int, req int, arg uintptr) (err error) +//sys ioctlPtr(fd int, req int, arg unsafe.Pointer) (err error) = ioctl // fcntl must never be called with cmd=F_DUP2FD because it doesn't work on AIX // There is no way to create a custom fcntl and to keep //sys fcntl easily, @@ -488,8 +487,6 @@ func Fsync(fd int) error { //sys Unlinkat(dirfd int, path string, flags int) (err error) //sys Ustat(dev int, ubuf *Ustat_t) (err error) //sys write(fd int, p []byte) (n int, err error) -//sys readlen(fd int, p *byte, np int) (n int, err error) = read -//sys writelen(fd int, p *byte, np int) (n int, err error) = write //sys Dup2(oldfd int, newfd int) (err error) //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = posix_fadvise64 @@ -536,21 +533,6 @@ func Fsync(fd int) error { //sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) = nsendmsg //sys munmap(addr uintptr, length uintptr) (err error) - -var mapper = &mmapper{ - active: make(map[*byte][]byte), - mmap: mmap, - munmap: munmap, -} - -func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { - return mapper.Mmap(fd, offset, length, prot, flags) -} - -func Munmap(b []byte) (err error) { - return mapper.Munmap(b) -} - //sys Madvise(b []byte, advice int) (err error) //sys Mprotect(b []byte, prot int) (err error) //sys Mlock(b []byte) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go index e92a0be..1fdaa47 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go @@ -3,12 +3,10 @@ // license that can be found in the LICENSE file. //go:build aix && ppc -// +build aix,ppc package unix //sysnb Getrlimit(resource int, rlim *Rlimit) (err error) = getrlimit64 -//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) = setrlimit64 //sys Seek(fd int, offset int64, whence int) (off int64, err error) = lseek64 //sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go index 16eed17..c87f9a9 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go @@ -3,12 +3,10 @@ // license that can be found in the LICENSE file. //go:build aix && ppc64 -// +build aix,ppc64 package unix //sysnb Getrlimit(resource int, rlim *Rlimit) (err error) -//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) //sys Seek(fd int, offset int64, whence int) (off int64, err error) = lseek //sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) = mmap64 diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd.go b/vendor/golang.org/x/sys/unix/syscall_bsd.go index eda4267..a00c3e5 100644 --- a/vendor/golang.org/x/sys/unix/syscall_bsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_bsd.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin || dragonfly || freebsd || netbsd || openbsd -// +build darwin dragonfly freebsd netbsd openbsd // BSD system call wrappers shared by *BSD based systems // including OS X (Darwin) and FreeBSD. Like the other @@ -245,8 +244,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { break } } - bytes := (*[len(pp.Path)]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] - sa.Name = string(bytes) + sa.Name = string(unsafe.Slice((*byte)(unsafe.Pointer(&pp.Path[0])), n)) return sa, nil case AF_INET: @@ -318,7 +316,7 @@ func GetsockoptString(fd, level, opt int) (string, error) { if err != nil { return "", err } - return string(buf[:vallen-1]), nil + return ByteSliceToString(buf[:vallen]), nil } //sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) @@ -602,20 +600,6 @@ func Poll(fds []PollFd, timeout int) (n int, err error) { // Gethostuuid(uuid *byte, timeout *Timespec) (err error) // Ptrace(req int, pid int, addr uintptr, data int) (ret uintptr, err error) -var mapper = &mmapper{ - active: make(map[*byte][]byte), - mmap: mmap, - munmap: munmap, -} - -func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { - return mapper.Mmap(fd, offset, length, prot, flags) -} - -func Munmap(b []byte) (err error) { - return mapper.Munmap(b) -} - //sys Madvise(b []byte, behav int) (err error) //sys Mlock(b []byte) (err error) //sys Mlockall(flags int) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 1f63382..4cc7b00 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -14,7 +14,6 @@ package unix import ( "fmt" - "runtime" "syscall" "unsafe" ) @@ -230,6 +229,7 @@ func direntNamlen(buf []byte) (uint64, bool) { func PtraceAttach(pid int) (err error) { return ptrace(PT_ATTACH, pid, 0, 0) } func PtraceDetach(pid int) (err error) { return ptrace(PT_DETACH, pid, 0, 0) } +func PtraceDenyAttach() (err error) { return ptrace(PT_DENY_ATTACH, 0, 0, 0) } //sysnb pipe(p *[2]int32) (err error) @@ -375,11 +375,10 @@ func Flistxattr(fd int, dest []byte) (sz int, err error) { func Kill(pid int, signum syscall.Signal) (err error) { return kill(pid, int(signum), 1) } //sys ioctl(fd int, req uint, arg uintptr) (err error) +//sys ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) = SYS_IOCTL func IoctlCtlInfo(fd int, ctlInfo *CtlInfo) error { - err := ioctl(fd, CTLIOCGINFO, uintptr(unsafe.Pointer(ctlInfo))) - runtime.KeepAlive(ctlInfo) - return err + return ioctlPtr(fd, CTLIOCGINFO, unsafe.Pointer(ctlInfo)) } // IfreqMTU is struct ifreq used to get or set a network device's MTU. @@ -393,16 +392,14 @@ type IfreqMTU struct { func IoctlGetIfreqMTU(fd int, ifname string) (*IfreqMTU, error) { var ifreq IfreqMTU copy(ifreq.Name[:], ifname) - err := ioctl(fd, SIOCGIFMTU, uintptr(unsafe.Pointer(&ifreq))) + err := ioctlPtr(fd, SIOCGIFMTU, unsafe.Pointer(&ifreq)) return &ifreq, err } // IoctlSetIfreqMTU performs the SIOCSIFMTU ioctl operation on fd to set the MTU // of the network device specified by ifreq.Name. func IoctlSetIfreqMTU(fd int, ifreq *IfreqMTU) error { - err := ioctl(fd, SIOCSIFMTU, uintptr(unsafe.Pointer(ifreq))) - runtime.KeepAlive(ifreq) - return err + return ioctlPtr(fd, SIOCSIFMTU, unsafe.Pointer(ifreq)) } //sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS_SYSCTL @@ -513,30 +510,48 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) { return nil, err } - // Find size. - n := uintptr(0) - if err := sysctl(mib, nil, &n, nil, 0); err != nil { - return nil, err - } - if n == 0 { - return nil, nil - } - if n%SizeofKinfoProc != 0 { - return nil, fmt.Errorf("sysctl() returned a size of %d, which is not a multiple of %d", n, SizeofKinfoProc) - } + for { + // Find size. + n := uintptr(0) + if err := sysctl(mib, nil, &n, nil, 0); err != nil { + return nil, err + } + if n == 0 { + return nil, nil + } + if n%SizeofKinfoProc != 0 { + return nil, fmt.Errorf("sysctl() returned a size of %d, which is not a multiple of %d", n, SizeofKinfoProc) + } - // Read into buffer of that size. - buf := make([]KinfoProc, n/SizeofKinfoProc) - if err := sysctl(mib, (*byte)(unsafe.Pointer(&buf[0])), &n, nil, 0); err != nil { - return nil, err - } - if n%SizeofKinfoProc != 0 { - return nil, fmt.Errorf("sysctl() returned a size of %d, which is not a multiple of %d", n, SizeofKinfoProc) + // Read into buffer of that size. + buf := make([]KinfoProc, n/SizeofKinfoProc) + if err := sysctl(mib, (*byte)(unsafe.Pointer(&buf[0])), &n, nil, 0); err != nil { + if err == ENOMEM { + // Process table grew. Try again. + continue + } + return nil, err + } + if n%SizeofKinfoProc != 0 { + return nil, fmt.Errorf("sysctl() returned a size of %d, which is not a multiple of %d", n, SizeofKinfoProc) + } + + // The actual call may return less than the original reported required + // size so ensure we deal with that. + return buf[:n/SizeofKinfoProc], nil } +} + +//sys pthread_chdir_np(path string) (err error) - // The actual call may return less than the original reported required - // size so ensure we deal with that. - return buf[:n/SizeofKinfoProc], nil +func PthreadChdir(path string) (err error) { + return pthread_chdir_np(path) +} + +//sys pthread_fchdir_np(fd int) (err error) + +func PthreadFchdir(fd int) (err error) { + return pthread_fchdir_np(fd) } //sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) @@ -616,6 +631,7 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) { //sys Rmdir(path string) (err error) //sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = SYS_LSEEK //sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) +//sys Setattrlist(path string, attrlist *Attrlist, attrBuf []byte, options int) (err error) //sys Setegid(egid int) (err error) //sysnb Seteuid(euid int) (err error) //sysnb Setgid(gid int) (err error) @@ -625,7 +641,6 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) { //sys Setprivexec(flag int) (err error) //sysnb Setregid(rgid int, egid int) (err error) //sysnb Setreuid(ruid int, euid int) (err error) -//sysnb Setrlimit(which int, lim *Rlimit) (err error) //sysnb Setsid() (pid int, err error) //sysnb Settimeofday(tp *Timeval) (err error) //sysnb Setuid(uid int) (err error) @@ -641,190 +656,3 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) { //sys write(fd int, p []byte) (n int, err error) //sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) //sys munmap(addr uintptr, length uintptr) (err error) -//sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ -//sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE - -/* - * Unimplemented - */ -// Profil -// Sigaction -// Sigprocmask -// Getlogin -// Sigpending -// Sigaltstack -// Ioctl -// Reboot -// Execve -// Vfork -// Sbrk -// Sstk -// Ovadvise -// Mincore -// Setitimer -// Swapon -// Select -// Sigsuspend -// Readv -// Writev -// Nfssvc -// Getfh -// Quotactl -// Csops -// Waitid -// Add_profil -// Kdebug_trace -// Sigreturn -// Atsocket -// Kqueue_from_portset_np -// Kqueue_portset -// Getattrlist -// Setattrlist -// Getdirentriesattr -// Searchfs -// Delete -// Copyfile -// Watchevent -// Waitevent -// Modwatch -// Fsctl -// Initgroups -// Posix_spawn -// Nfsclnt -// Fhopen -// Minherit -// Semsys -// Msgsys -// Shmsys -// Semctl -// Semget -// Semop -// Msgctl -// Msgget -// Msgsnd -// Msgrcv -// Shm_open -// Shm_unlink -// Sem_open -// Sem_close -// Sem_unlink -// Sem_wait -// Sem_trywait -// Sem_post -// Sem_getvalue -// Sem_init -// Sem_destroy -// Open_extended -// Umask_extended -// Stat_extended -// Lstat_extended -// Fstat_extended -// Chmod_extended -// Fchmod_extended -// Access_extended -// Settid -// Gettid -// Setsgroups -// Getsgroups -// Setwgroups -// Getwgroups -// Mkfifo_extended -// Mkdir_extended -// Identitysvc -// Shared_region_check_np -// Shared_region_map_np -// __pthread_mutex_destroy -// __pthread_mutex_init -// __pthread_mutex_lock -// __pthread_mutex_trylock -// __pthread_mutex_unlock -// __pthread_cond_init -// __pthread_cond_destroy -// __pthread_cond_broadcast -// __pthread_cond_signal -// Setsid_with_pid -// __pthread_cond_timedwait -// Aio_fsync -// Aio_return -// Aio_suspend -// Aio_cancel -// Aio_error -// Aio_read -// Aio_write -// Lio_listio -// __pthread_cond_wait -// Iopolicysys -// __pthread_kill -// __pthread_sigmask -// __sigwait -// __disable_threadsignal -// __pthread_markcancel -// __pthread_canceled -// __semwait_signal -// Proc_info -// sendfile -// Stat64_extended -// Lstat64_extended -// Fstat64_extended -// __pthread_chdir -// __pthread_fchdir -// Audit -// Auditon -// Getauid -// Setauid -// Getaudit -// Setaudit -// Getaudit_addr -// Setaudit_addr -// Auditctl -// Bsdthread_create -// Bsdthread_terminate -// Stack_snapshot -// Bsdthread_register -// Workq_open -// Workq_ops -// __mac_execve -// __mac_syscall -// __mac_get_file -// __mac_set_file -// __mac_get_link -// __mac_set_link -// __mac_get_proc -// __mac_set_proc -// __mac_get_fd -// __mac_set_fd -// __mac_get_pid -// __mac_get_lcid -// __mac_get_lctx -// __mac_set_lctx -// Setlcid -// Read_nocancel -// Write_nocancel -// Open_nocancel -// Close_nocancel -// Wait4_nocancel -// Recvmsg_nocancel -// Sendmsg_nocancel -// Recvfrom_nocancel -// Accept_nocancel -// Fcntl_nocancel -// Select_nocancel -// Fsync_nocancel -// Connect_nocancel -// Sigsuspend_nocancel -// Readv_nocancel -// Writev_nocancel -// Sendto_nocancel -// Pread_nocancel -// Pwrite_nocancel -// Waitid_nocancel -// Poll_nocancel -// Msgsnd_nocancel -// Msgrcv_nocancel -// Sem_wait_nocancel -// Aio_suspend_nocancel -// __sigwait_nocancel -// __semwait_signal_nocancel -// __mac_mount -// __mac_get_mount -// __mac_getfsstat diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go index b37310c..0eaecf5 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && darwin -// +build amd64,darwin package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go index d51ec99..f36c670 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm64 && darwin -// +build arm64,darwin package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go b/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go index 53c9664..2f0fa76 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build darwin && go1.12 -// +build darwin,go1.12 +//go:build darwin package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go index a41111a..97cb916 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go @@ -172,6 +172,7 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { } //sys ioctl(fd int, req uint, arg uintptr) (err error) +//sys ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) = SYS_IOCTL //sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL @@ -325,7 +326,6 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sysnb Setreuid(ruid int, euid int) (err error) //sysnb Setresgid(rgid int, egid int, sgid int) (err error) //sysnb Setresuid(ruid int, euid int, suid int) (err error) -//sysnb Setrlimit(which int, lim *Rlimit) (err error) //sysnb Setsid() (pid int, err error) //sysnb Settimeofday(tp *Timeval) (err error) //sysnb Setuid(uid int) (err error) @@ -343,203 +343,5 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys write(fd int, p []byte) (n int, err error) //sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) //sys munmap(addr uintptr, length uintptr) (err error) -//sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ -//sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE //sys accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) //sys utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) - -/* - * Unimplemented - * TODO(jsing): Update this list for DragonFly. - */ -// Profil -// Sigaction -// Sigprocmask -// Getlogin -// Sigpending -// Sigaltstack -// Reboot -// Execve -// Vfork -// Sbrk -// Sstk -// Ovadvise -// Mincore -// Setitimer -// Swapon -// Select -// Sigsuspend -// Readv -// Writev -// Nfssvc -// Getfh -// Quotactl -// Mount -// Csops -// Waitid -// Add_profil -// Kdebug_trace -// Sigreturn -// Atsocket -// Kqueue_from_portset_np -// Kqueue_portset -// Getattrlist -// Setattrlist -// Getdirentriesattr -// Searchfs -// Delete -// Copyfile -// Watchevent -// Waitevent -// Modwatch -// Getxattr -// Fgetxattr -// Setxattr -// Fsetxattr -// Removexattr -// Fremovexattr -// Listxattr -// Flistxattr -// Fsctl -// Initgroups -// Posix_spawn -// Nfsclnt -// Fhopen -// Minherit -// Semsys -// Msgsys -// Shmsys -// Semctl -// Semget -// Semop -// Msgctl -// Msgget -// Msgsnd -// Msgrcv -// Shmat -// Shmctl -// Shmdt -// Shmget -// Shm_open -// Shm_unlink -// Sem_open -// Sem_close -// Sem_unlink -// Sem_wait -// Sem_trywait -// Sem_post -// Sem_getvalue -// Sem_init -// Sem_destroy -// Open_extended -// Umask_extended -// Stat_extended -// Lstat_extended -// Fstat_extended -// Chmod_extended -// Fchmod_extended -// Access_extended -// Settid -// Gettid -// Setsgroups -// Getsgroups -// Setwgroups -// Getwgroups -// Mkfifo_extended -// Mkdir_extended -// Identitysvc -// Shared_region_check_np -// Shared_region_map_np -// __pthread_mutex_destroy -// __pthread_mutex_init -// __pthread_mutex_lock -// __pthread_mutex_trylock -// __pthread_mutex_unlock -// __pthread_cond_init -// __pthread_cond_destroy -// __pthread_cond_broadcast -// __pthread_cond_signal -// Setsid_with_pid -// __pthread_cond_timedwait -// Aio_fsync -// Aio_return -// Aio_suspend -// Aio_cancel -// Aio_error -// Aio_read -// Aio_write -// Lio_listio -// __pthread_cond_wait -// Iopolicysys -// __pthread_kill -// __pthread_sigmask -// __sigwait -// __disable_threadsignal -// __pthread_markcancel -// __pthread_canceled -// __semwait_signal -// Proc_info -// Stat64_extended -// Lstat64_extended -// Fstat64_extended -// __pthread_chdir -// __pthread_fchdir -// Audit -// Auditon -// Getauid -// Setauid -// Getaudit -// Setaudit -// Getaudit_addr -// Setaudit_addr -// Auditctl -// Bsdthread_create -// Bsdthread_terminate -// Stack_snapshot -// Bsdthread_register -// Workq_open -// Workq_ops -// __mac_execve -// __mac_syscall -// __mac_get_file -// __mac_set_file -// __mac_get_link -// __mac_set_link -// __mac_get_proc -// __mac_set_proc -// __mac_get_fd -// __mac_set_fd -// __mac_get_pid -// __mac_get_lcid -// __mac_get_lctx -// __mac_set_lctx -// Setlcid -// Read_nocancel -// Write_nocancel -// Open_nocancel -// Close_nocancel -// Wait4_nocancel -// Recvmsg_nocancel -// Sendmsg_nocancel -// Recvfrom_nocancel -// Accept_nocancel -// Fcntl_nocancel -// Select_nocancel -// Fsync_nocancel -// Connect_nocancel -// Sigsuspend_nocancel -// Readv_nocancel -// Writev_nocancel -// Sendto_nocancel -// Pread_nocancel -// Pwrite_nocancel -// Waitid_nocancel -// Msgsnd_nocancel -// Msgrcv_nocancel -// Sem_wait_nocancel -// Aio_suspend_nocancel -// __sigwait_nocancel -// __semwait_signal_nocancel -// __mac_mount -// __mac_get_mount -// __mac_getfsstat diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go index 4e2d321..14bab6b 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && dragonfly -// +build amd64,dragonfly package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go index d50b9dc..2b57e0f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go @@ -13,6 +13,7 @@ package unix import ( + "errors" "sync" "unsafe" ) @@ -161,32 +162,34 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { return } -//sys ioctl(fd int, req uint, arg uintptr) (err error) +//sys ioctl(fd int, req uint, arg uintptr) (err error) = SYS_IOCTL +//sys ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) = SYS_IOCTL //sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL func Uname(uname *Utsname) error { mib := []_C_int{CTL_KERN, KERN_OSTYPE} n := unsafe.Sizeof(uname.Sysname) - if err := sysctl(mib, &uname.Sysname[0], &n, nil, 0); err != nil { + // Suppress ENOMEM errors to be compatible with the C library __xuname() implementation. + if err := sysctl(mib, &uname.Sysname[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } mib = []_C_int{CTL_KERN, KERN_HOSTNAME} n = unsafe.Sizeof(uname.Nodename) - if err := sysctl(mib, &uname.Nodename[0], &n, nil, 0); err != nil { + if err := sysctl(mib, &uname.Nodename[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } mib = []_C_int{CTL_KERN, KERN_OSRELEASE} n = unsafe.Sizeof(uname.Release) - if err := sysctl(mib, &uname.Release[0], &n, nil, 0); err != nil { + if err := sysctl(mib, &uname.Release[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } mib = []_C_int{CTL_KERN, KERN_VERSION} n = unsafe.Sizeof(uname.Version) - if err := sysctl(mib, &uname.Version[0], &n, nil, 0); err != nil { + if err := sysctl(mib, &uname.Version[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } @@ -204,7 +207,7 @@ func Uname(uname *Utsname) error { mib = []_C_int{CTL_HW, HW_MACHINE} n = unsafe.Sizeof(uname.Machine) - if err := sysctl(mib, &uname.Machine[0], &n, nil, 0); err != nil { + if err := sysctl(mib, &uname.Machine[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } @@ -253,6 +256,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e } //sys ptrace(request int, pid int, addr uintptr, data int) (err error) +//sys ptracePtr(request int, pid int, addr unsafe.Pointer, data int) (err error) = SYS_PTRACE func PtraceAttach(pid int) (err error) { return ptrace(PT_ATTACH, pid, 0, 0) @@ -267,19 +271,36 @@ func PtraceDetach(pid int) (err error) { } func PtraceGetFpRegs(pid int, fpregsout *FpReg) (err error) { - return ptrace(PT_GETFPREGS, pid, uintptr(unsafe.Pointer(fpregsout)), 0) + return ptracePtr(PT_GETFPREGS, pid, unsafe.Pointer(fpregsout), 0) } func PtraceGetRegs(pid int, regsout *Reg) (err error) { - return ptrace(PT_GETREGS, pid, uintptr(unsafe.Pointer(regsout)), 0) + return ptracePtr(PT_GETREGS, pid, unsafe.Pointer(regsout), 0) +} + +func PtraceIO(req int, pid int, offs uintptr, out []byte, countin int) (count int, err error) { + ioDesc := PtraceIoDesc{ + Op: int32(req), + Offs: offs, + } + if countin > 0 { + _ = out[:countin] // check bounds + ioDesc.Addr = &out[0] + } else if out != nil { + ioDesc.Addr = (*byte)(unsafe.Pointer(&_zero)) + } + ioDesc.SetLen(countin) + + err = ptracePtr(PT_IO, pid, unsafe.Pointer(&ioDesc), 0) + return int(ioDesc.Len), err } func PtraceLwpEvents(pid int, enable int) (err error) { return ptrace(PT_LWP_EVENTS, pid, 0, enable) } -func PtraceLwpInfo(pid int, info uintptr) (err error) { - return ptrace(PT_LWPINFO, pid, info, int(unsafe.Sizeof(PtraceLwpInfoStruct{}))) +func PtraceLwpInfo(pid int, info *PtraceLwpInfoStruct) (err error) { + return ptracePtr(PT_LWPINFO, pid, unsafe.Pointer(info), int(unsafe.Sizeof(*info))) } func PtracePeekData(pid int, addr uintptr, out []byte) (count int, err error) { @@ -299,13 +320,25 @@ func PtracePokeText(pid int, addr uintptr, data []byte) (count int, err error) { } func PtraceSetRegs(pid int, regs *Reg) (err error) { - return ptrace(PT_SETREGS, pid, uintptr(unsafe.Pointer(regs)), 0) + return ptracePtr(PT_SETREGS, pid, unsafe.Pointer(regs), 0) } func PtraceSingleStep(pid int) (err error) { return ptrace(PT_STEP, pid, 1, 0) } +func Dup3(oldfd, newfd, flags int) error { + if oldfd == newfd || flags&^O_CLOEXEC != 0 { + return EINVAL + } + how := F_DUP2FD + if flags&O_CLOEXEC != 0 { + how = F_DUP2FD_CLOEXEC + } + _, err := fcntl(oldfd, how, newfd) + return err +} + /* * Exposed directly */ @@ -402,7 +435,6 @@ func PtraceSingleStep(pid int) (err error) { //sysnb Setreuid(ruid int, euid int) (err error) //sysnb Setresgid(rgid int, egid int, sgid int) (err error) //sysnb Setresuid(ruid int, euid int, suid int) (err error) -//sysnb Setrlimit(which int, lim *Rlimit) (err error) //sysnb Setsid() (pid int, err error) //sysnb Settimeofday(tp *Timeval) (err error) //sysnb Setuid(uid int) (err error) @@ -419,197 +451,5 @@ func PtraceSingleStep(pid int) (err error) { //sys write(fd int, p []byte) (n int, err error) //sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) //sys munmap(addr uintptr, length uintptr) (err error) -//sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ -//sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE //sys accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) //sys utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) - -/* - * Unimplemented - */ -// Profil -// Sigaction -// Sigprocmask -// Getlogin -// Sigpending -// Sigaltstack -// Ioctl -// Reboot -// Execve -// Vfork -// Sbrk -// Sstk -// Ovadvise -// Mincore -// Setitimer -// Swapon -// Select -// Sigsuspend -// Readv -// Writev -// Nfssvc -// Getfh -// Quotactl -// Mount -// Csops -// Waitid -// Add_profil -// Kdebug_trace -// Sigreturn -// Atsocket -// Kqueue_from_portset_np -// Kqueue_portset -// Getattrlist -// Setattrlist -// Getdents -// Getdirentriesattr -// Searchfs -// Delete -// Copyfile -// Watchevent -// Waitevent -// Modwatch -// Fsctl -// Initgroups -// Posix_spawn -// Nfsclnt -// Fhopen -// Minherit -// Semsys -// Msgsys -// Shmsys -// Semctl -// Semget -// Semop -// Msgctl -// Msgget -// Msgsnd -// Msgrcv -// Shmat -// Shmctl -// Shmdt -// Shmget -// Shm_open -// Shm_unlink -// Sem_open -// Sem_close -// Sem_unlink -// Sem_wait -// Sem_trywait -// Sem_post -// Sem_getvalue -// Sem_init -// Sem_destroy -// Open_extended -// Umask_extended -// Stat_extended -// Lstat_extended -// Fstat_extended -// Chmod_extended -// Fchmod_extended -// Access_extended -// Settid -// Gettid -// Setsgroups -// Getsgroups -// Setwgroups -// Getwgroups -// Mkfifo_extended -// Mkdir_extended -// Identitysvc -// Shared_region_check_np -// Shared_region_map_np -// __pthread_mutex_destroy -// __pthread_mutex_init -// __pthread_mutex_lock -// __pthread_mutex_trylock -// __pthread_mutex_unlock -// __pthread_cond_init -// __pthread_cond_destroy -// __pthread_cond_broadcast -// __pthread_cond_signal -// Setsid_with_pid -// __pthread_cond_timedwait -// Aio_fsync -// Aio_return -// Aio_suspend -// Aio_cancel -// Aio_error -// Aio_read -// Aio_write -// Lio_listio -// __pthread_cond_wait -// Iopolicysys -// __pthread_kill -// __pthread_sigmask -// __sigwait -// __disable_threadsignal -// __pthread_markcancel -// __pthread_canceled -// __semwait_signal -// Proc_info -// Stat64_extended -// Lstat64_extended -// Fstat64_extended -// __pthread_chdir -// __pthread_fchdir -// Audit -// Auditon -// Getauid -// Setauid -// Getaudit -// Setaudit -// Getaudit_addr -// Setaudit_addr -// Auditctl -// Bsdthread_create -// Bsdthread_terminate -// Stack_snapshot -// Bsdthread_register -// Workq_open -// Workq_ops -// __mac_execve -// __mac_syscall -// __mac_get_file -// __mac_set_file -// __mac_get_link -// __mac_set_link -// __mac_get_proc -// __mac_set_proc -// __mac_get_fd -// __mac_set_fd -// __mac_get_pid -// __mac_get_lcid -// __mac_get_lctx -// __mac_set_lctx -// Setlcid -// Read_nocancel -// Write_nocancel -// Open_nocancel -// Close_nocancel -// Wait4_nocancel -// Recvmsg_nocancel -// Sendmsg_nocancel -// Recvfrom_nocancel -// Accept_nocancel -// Fcntl_nocancel -// Select_nocancel -// Fsync_nocancel -// Connect_nocancel -// Sigsuspend_nocancel -// Readv_nocancel -// Writev_nocancel -// Sendto_nocancel -// Pread_nocancel -// Pwrite_nocancel -// Waitid_nocancel -// Poll_nocancel -// Msgsnd_nocancel -// Msgrcv_nocancel -// Sem_wait_nocancel -// Aio_suspend_nocancel -// __sigwait_nocancel -// __semwait_signal_nocancel -// __mac_mount -// __mac_get_mount -// __mac_getfsstat diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go index b11ede8..3967bca 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build 386 && freebsd -// +build 386,freebsd package unix @@ -42,6 +41,10 @@ func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } +func (d *PtraceIoDesc) SetLen(length int) { + d.Len = uint32(length) +} + func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { var writtenOut uint64 = 0 _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr((*offset)>>32), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0) @@ -57,11 +60,5 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) func PtraceGetFsBase(pid int, fsbase *int64) (err error) { - return ptrace(PT_GETFSBASE, pid, uintptr(unsafe.Pointer(fsbase)), 0) -} - -func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) { - ioDesc := PtraceIoDesc{Op: int32(req), Offs: uintptr(unsafe.Pointer(addr)), Addr: uintptr(unsafe.Pointer(&out[0])), Len: uint32(countin)} - err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) - return int(ioDesc.Len), err + return ptracePtr(PT_GETFSBASE, pid, unsafe.Pointer(fsbase), 0) } diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go index 9ed8eec..eff19ad 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && freebsd -// +build amd64,freebsd package unix @@ -42,6 +41,10 @@ func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } +func (d *PtraceIoDesc) SetLen(length int) { + d.Len = uint64(length) +} + func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { var writtenOut uint64 = 0 _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0, 0) @@ -57,11 +60,5 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) func PtraceGetFsBase(pid int, fsbase *int64) (err error) { - return ptrace(PT_GETFSBASE, pid, uintptr(unsafe.Pointer(fsbase)), 0) -} - -func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) { - ioDesc := PtraceIoDesc{Op: int32(req), Offs: uintptr(unsafe.Pointer(addr)), Addr: uintptr(unsafe.Pointer(&out[0])), Len: uint64(countin)} - err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) - return int(ioDesc.Len), err + return ptracePtr(PT_GETFSBASE, pid, unsafe.Pointer(fsbase), 0) } diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go index f8ac982..4f24b51 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm && freebsd -// +build arm,freebsd package unix @@ -42,6 +41,10 @@ func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } +func (d *PtraceIoDesc) SetLen(length int) { + d.Len = uint32(length) +} + func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { var writtenOut uint64 = 0 _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr((*offset)>>32), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0) @@ -55,9 +58,3 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e } func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) - -func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) { - ioDesc := PtraceIoDesc{Op: int32(req), Offs: uintptr(unsafe.Pointer(addr)), Addr: uintptr(unsafe.Pointer(&out[0])), Len: uint32(countin)} - err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) - return int(ioDesc.Len), err -} diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go index 8e93203..ac30759 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm64 && freebsd -// +build arm64,freebsd package unix @@ -42,6 +41,10 @@ func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } +func (d *PtraceIoDesc) SetLen(length int) { + d.Len = uint64(length) +} + func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { var writtenOut uint64 = 0 _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0, 0) @@ -55,9 +58,3 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e } func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) - -func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) { - ioDesc := PtraceIoDesc{Op: int32(req), Offs: uintptr(unsafe.Pointer(addr)), Addr: uintptr(unsafe.Pointer(&out[0])), Len: uint64(countin)} - err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) - return int(ioDesc.Len), err -} diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go index cbe1222..aab725c 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build riscv64 && freebsd -// +build riscv64,freebsd package unix @@ -42,6 +41,10 @@ func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } +func (d *PtraceIoDesc) SetLen(length int) { + d.Len = uint64(length) +} + func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { var writtenOut uint64 = 0 _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0, 0) @@ -55,9 +58,3 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e } func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) - -func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) { - ioDesc := PtraceIoDesc{Op: int32(req), Offs: uintptr(unsafe.Pointer(addr)), Addr: uintptr(unsafe.Pointer(&out[0])), Len: uint64(countin)} - err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) - return int(ioDesc.Len), err -} diff --git a/vendor/golang.org/x/sys/unix/syscall_hurd.go b/vendor/golang.org/x/sys/unix/syscall_hurd.go index 4ffb648..ba46651 100644 --- a/vendor/golang.org/x/sys/unix/syscall_hurd.go +++ b/vendor/golang.org/x/sys/unix/syscall_hurd.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build hurd -// +build hurd package unix @@ -20,3 +19,11 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { } return } + +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + r0, er := C.ioctl(C.int(fd), C.ulong(req), C.uintptr_t(uintptr(arg))) + if r0 == -1 && er != nil { + err = er + } + return +} diff --git a/vendor/golang.org/x/sys/unix/syscall_hurd_386.go b/vendor/golang.org/x/sys/unix/syscall_hurd_386.go index 7cf54a3..df89f9e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_hurd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_hurd_386.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build 386 && hurd -// +build 386,hurd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_illumos.go b/vendor/golang.org/x/sys/unix/syscall_illumos.go index 87db5a6..a863f70 100644 --- a/vendor/golang.org/x/sys/unix/syscall_illumos.go +++ b/vendor/golang.org/x/sys/unix/syscall_illumos.go @@ -5,7 +5,6 @@ // illumos system calls not present on Solaris. //go:build amd64 && illumos -// +build amd64,illumos package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index d839962..5682e26 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -61,15 +61,23 @@ func FanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname string) ( } //sys fchmodat(dirfd int, path string, mode uint32) (err error) - -func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { - // Linux fchmodat doesn't support the flags parameter. Mimick glibc's behavior - // and check the flags. Otherwise the mode would be applied to the symlink - // destination which is not what the user expects. - if flags&^AT_SYMLINK_NOFOLLOW != 0 { - return EINVAL - } else if flags&AT_SYMLINK_NOFOLLOW != 0 { - return EOPNOTSUPP +//sys fchmodat2(dirfd int, path string, mode uint32, flags int) (err error) + +func Fchmodat(dirfd int, path string, mode uint32, flags int) error { + // Linux fchmodat doesn't support the flags parameter, but fchmodat2 does. + // Try fchmodat2 if flags are specified. + if flags != 0 { + err := fchmodat2(dirfd, path, mode, flags) + if err == ENOSYS { + // fchmodat2 isn't available. If the flags are known to be valid, + // return EOPNOTSUPP to indicate that fchmodat doesn't support them. + if flags&^(AT_SYMLINK_NOFOLLOW|AT_EMPTY_PATH) != 0 { + return EINVAL + } else if flags&(AT_SYMLINK_NOFOLLOW|AT_EMPTY_PATH) != 0 { + return EOPNOTSUPP + } + } + return err } return fchmodat(dirfd, path, mode) } @@ -417,7 +425,8 @@ func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) { if n > 0 { sl += _Socklen(n) + 1 } - if sa.raw.Path[0] == '@' { + if sa.raw.Path[0] == '@' || (sa.raw.Path[0] == 0 && sl > 3) { + // Check sl > 3 so we don't change unnamed socket behavior. sa.raw.Path[0] = 0 // Don't count trailing NUL for abstract address. sl-- @@ -693,10 +702,10 @@ type SockaddrALG struct { func (sa *SockaddrALG) sockaddr() (unsafe.Pointer, _Socklen, error) { // Leave room for NUL byte terminator. - if len(sa.Type) > 13 { + if len(sa.Type) > len(sa.raw.Type)-1 { return nil, 0, EINVAL } - if len(sa.Name) > 63 { + if len(sa.Name) > len(sa.raw.Name)-1 { return nil, 0, EINVAL } @@ -704,17 +713,8 @@ func (sa *SockaddrALG) sockaddr() (unsafe.Pointer, _Socklen, error) { sa.raw.Feat = sa.Feature sa.raw.Mask = sa.Mask - typ, err := ByteSliceFromString(sa.Type) - if err != nil { - return nil, 0, err - } - name, err := ByteSliceFromString(sa.Name) - if err != nil { - return nil, 0, err - } - - copy(sa.raw.Type[:], typ) - copy(sa.raw.Name[:], name) + copy(sa.raw.Type[:], sa.Type) + copy(sa.raw.Name[:], sa.Name) return unsafe.Pointer(&sa.raw), SizeofSockaddrALG, nil } @@ -1015,8 +1015,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { for n < len(pp.Path) && pp.Path[n] != 0 { n++ } - bytes := (*[len(pp.Path)]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] - sa.Name = string(bytes) + sa.Name = string(unsafe.Slice((*byte)(unsafe.Pointer(&pp.Path[0])), n)) return sa, nil case AF_INET: @@ -1311,7 +1310,7 @@ func GetsockoptString(fd, level, opt int) (string, error) { return "", err } } - return string(buf[:vallen-1]), nil + return ByteSliceToString(buf[:vallen]), nil } func GetsockoptTpacketStats(fd, level, opt int) (*TpacketStats, error) { @@ -1365,6 +1364,10 @@ func SetsockoptTCPRepairOpt(fd, level, opt int, o []TCPRepairOpt) (err error) { return setsockopt(fd, level, opt, unsafe.Pointer(&o[0]), uintptr(SizeofTCPRepairOpt*len(o))) } +func SetsockoptTCPMD5Sig(fd, level, opt int, s *TCPMD5Sig) error { + return setsockopt(fd, level, opt, unsafe.Pointer(s), unsafe.Sizeof(*s)) +} + // Keyctl Commands (http://man7.org/linux/man-pages/man2/keyctl.2.html) // KeyctlInt calls keyctl commands in which each argument is an int. @@ -1579,6 +1582,7 @@ func BindToDevice(fd int, device string) (err error) { } //sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error) +//sys ptracePtr(request int, pid int, addr uintptr, data unsafe.Pointer) (err error) = SYS_PTRACE func ptracePeek(req int, pid int, addr uintptr, out []byte) (count int, err error) { // The peek requests are machine-size oriented, so we wrap it @@ -1596,7 +1600,7 @@ func ptracePeek(req int, pid int, addr uintptr, out []byte) (count int, err erro // boundary. n := 0 if addr%SizeofPtr != 0 { - err = ptrace(req, pid, addr-addr%SizeofPtr, uintptr(unsafe.Pointer(&buf[0]))) + err = ptracePtr(req, pid, addr-addr%SizeofPtr, unsafe.Pointer(&buf[0])) if err != nil { return 0, err } @@ -1608,7 +1612,7 @@ func ptracePeek(req int, pid int, addr uintptr, out []byte) (count int, err erro for len(out) > 0 { // We use an internal buffer to guarantee alignment. // It's not documented if this is necessary, but we're paranoid. - err = ptrace(req, pid, addr+uintptr(n), uintptr(unsafe.Pointer(&buf[0]))) + err = ptracePtr(req, pid, addr+uintptr(n), unsafe.Pointer(&buf[0])) if err != nil { return n, err } @@ -1640,7 +1644,7 @@ func ptracePoke(pokeReq int, peekReq int, pid int, addr uintptr, data []byte) (c n := 0 if addr%SizeofPtr != 0 { var buf [SizeofPtr]byte - err = ptrace(peekReq, pid, addr-addr%SizeofPtr, uintptr(unsafe.Pointer(&buf[0]))) + err = ptracePtr(peekReq, pid, addr-addr%SizeofPtr, unsafe.Pointer(&buf[0])) if err != nil { return 0, err } @@ -1667,7 +1671,7 @@ func ptracePoke(pokeReq int, peekReq int, pid int, addr uintptr, data []byte) (c // Trailing edge. if len(data) > 0 { var buf [SizeofPtr]byte - err = ptrace(peekReq, pid, addr+uintptr(n), uintptr(unsafe.Pointer(&buf[0]))) + err = ptracePtr(peekReq, pid, addr+uintptr(n), unsafe.Pointer(&buf[0])) if err != nil { return n, err } @@ -1695,12 +1699,23 @@ func PtracePokeUser(pid int, addr uintptr, data []byte) (count int, err error) { return ptracePoke(PTRACE_POKEUSR, PTRACE_PEEKUSR, pid, addr, data) } +// elfNT_PRSTATUS is a copy of the debug/elf.NT_PRSTATUS constant so +// x/sys/unix doesn't need to depend on debug/elf and thus +// compress/zlib, debug/dwarf, and other packages. +const elfNT_PRSTATUS = 1 + func PtraceGetRegs(pid int, regsout *PtraceRegs) (err error) { - return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout))) + var iov Iovec + iov.Base = (*byte)(unsafe.Pointer(regsout)) + iov.SetLen(int(unsafe.Sizeof(*regsout))) + return ptracePtr(PTRACE_GETREGSET, pid, uintptr(elfNT_PRSTATUS), unsafe.Pointer(&iov)) } func PtraceSetRegs(pid int, regs *PtraceRegs) (err error) { - return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs))) + var iov Iovec + iov.Base = (*byte)(unsafe.Pointer(regs)) + iov.SetLen(int(unsafe.Sizeof(*regs))) + return ptracePtr(PTRACE_SETREGSET, pid, uintptr(elfNT_PRSTATUS), unsafe.Pointer(&iov)) } func PtraceSetOptions(pid int, options int) (err error) { @@ -1709,7 +1724,7 @@ func PtraceSetOptions(pid int, options int) (err error) { func PtraceGetEventMsg(pid int) (msg uint, err error) { var data _C_long - err = ptrace(PTRACE_GETEVENTMSG, pid, 0, uintptr(unsafe.Pointer(&data))) + err = ptracePtr(PTRACE_GETEVENTMSG, pid, 0, unsafe.Pointer(&data)) msg = uint(data) return } @@ -1800,6 +1815,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sysnb Capset(hdr *CapUserHeader, data *CapUserData) (err error) //sys Chdir(path string) (err error) //sys Chroot(path string) (err error) +//sys ClockAdjtime(clockid int32, buf *Timex) (state int, err error) //sys ClockGetres(clockid int32, res *Timespec) (err error) //sys ClockGettime(clockid int32, time *Timespec) (err error) //sys ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) @@ -1833,6 +1849,105 @@ func Dup2(oldfd, newfd int) error { //sys Fsmount(fd int, flags int, mountAttrs int) (fsfd int, err error) //sys Fsopen(fsName string, flags int) (fd int, err error) //sys Fspick(dirfd int, pathName string, flags int) (fd int, err error) + +//sys fsconfig(fd int, cmd uint, key *byte, value *byte, aux int) (err error) + +func fsconfigCommon(fd int, cmd uint, key string, value *byte, aux int) (err error) { + var keyp *byte + if keyp, err = BytePtrFromString(key); err != nil { + return + } + return fsconfig(fd, cmd, keyp, value, aux) +} + +// FsconfigSetFlag is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_FLAG. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +func FsconfigSetFlag(fd int, key string) (err error) { + return fsconfigCommon(fd, FSCONFIG_SET_FLAG, key, nil, 0) +} + +// FsconfigSetString is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_STRING. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +// value is the parameter value to set. +func FsconfigSetString(fd int, key string, value string) (err error) { + var valuep *byte + if valuep, err = BytePtrFromString(value); err != nil { + return + } + return fsconfigCommon(fd, FSCONFIG_SET_STRING, key, valuep, 0) +} + +// FsconfigSetBinary is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_BINARY. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +// value is the parameter value to set. +func FsconfigSetBinary(fd int, key string, value []byte) (err error) { + if len(value) == 0 { + return EINVAL + } + return fsconfigCommon(fd, FSCONFIG_SET_BINARY, key, &value[0], len(value)) +} + +// FsconfigSetPath is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_PATH. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +// path is a non-empty path for specified key. +// atfd is a file descriptor at which to start lookup from or AT_FDCWD. +func FsconfigSetPath(fd int, key string, path string, atfd int) (err error) { + var valuep *byte + if valuep, err = BytePtrFromString(path); err != nil { + return + } + return fsconfigCommon(fd, FSCONFIG_SET_PATH, key, valuep, atfd) +} + +// FsconfigSetPathEmpty is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_PATH_EMPTY. The same as +// FconfigSetPath but with AT_PATH_EMPTY implied. +func FsconfigSetPathEmpty(fd int, key string, path string, atfd int) (err error) { + var valuep *byte + if valuep, err = BytePtrFromString(path); err != nil { + return + } + return fsconfigCommon(fd, FSCONFIG_SET_PATH_EMPTY, key, valuep, atfd) +} + +// FsconfigSetFd is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_FD. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +// value is a file descriptor to be assigned to specified key. +func FsconfigSetFd(fd int, key string, value int) (err error) { + return fsconfigCommon(fd, FSCONFIG_SET_FD, key, nil, value) +} + +// FsconfigCreate is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_CMD_CREATE. +// +// fd is the filesystem context to act upon. +func FsconfigCreate(fd int) (err error) { + return fsconfig(fd, FSCONFIG_CMD_CREATE, nil, nil, 0) +} + +// FsconfigReconfigure is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_CMD_RECONFIGURE. +// +// fd is the filesystem context to act upon. +func FsconfigReconfigure(fd int) (err error) { + return fsconfig(fd, FSCONFIG_CMD_RECONFIGURE, nil, nil, 0) +} + //sys Getdents(fd int, buf []byte) (n int, err error) = SYS_GETDENTS64 //sysnb Getpgid(pid int) (pgid int, err error) @@ -1868,9 +1983,8 @@ func Getpgrp() (pid int) { //sys OpenTree(dfd int, fileName string, flags uint) (r int, err error) //sys PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) //sys PivotRoot(newroot string, putold string) (err error) = SYS_PIVOT_ROOT -//sysnb Prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) = SYS_PRLIMIT64 //sys Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) -//sys Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) = SYS_PSELECT6 +//sys pselect6(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *sigset_argpack) (n int, err error) //sys read(fd int, p []byte) (n int, err error) //sys Removexattr(path string, attr string) (err error) //sys Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) @@ -1882,6 +1996,15 @@ func Getpgrp() (pid int) { //sysnb Settimeofday(tv *Timeval) (err error) //sys Setns(fd int, nstype int) (err error) +//go:linkname syscall_prlimit syscall.prlimit +func syscall_prlimit(pid, resource int, newlimit, old *syscall.Rlimit) error + +func Prlimit(pid, resource int, newlimit, old *Rlimit) error { + // Just call the syscall version, because as of Go 1.21 + // it will affect starting a new process. + return syscall_prlimit(pid, resource, (*syscall.Rlimit)(newlimit), (*syscall.Rlimit)(old)) +} + // PrctlRetInt performs a prctl operation specified by option and further // optional arguments arg2 through arg5 depending on option. It returns a // non-negative integer that is returned by the prctl syscall. @@ -1964,8 +2087,6 @@ func Signalfd(fd int, sigmask *Sigset_t, flags int) (newfd int, err error) { //sys Unshare(flags int) (err error) //sys write(fd int, p []byte) (n int, err error) //sys exitThread(code int) (err error) = SYS_EXIT -//sys readlen(fd int, p *byte, np int) (n int, err error) = SYS_READ -//sys writelen(fd int, p *byte, np int) (n int, err error) = SYS_WRITE //sys readv(fd int, iovs []Iovec) (n int, err error) = SYS_READV //sys writev(fd int, iovs []Iovec) (n int, err error) = SYS_WRITEV //sys preadv(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr) (n int, err error) = SYS_PREADV @@ -1999,7 +2120,7 @@ func appendBytes(vecs []Iovec, bs [][]byte) []Iovec { // offs2lohi splits offs into its low and high order bits. func offs2lohi(offs int64) (lo, hi uintptr) { const longBits = SizeofLong * 8 - return uintptr(offs), uintptr(uint64(offs) >> longBits) + return uintptr(offs), uintptr(uint64(offs) >> (longBits - 1) >> 1) // two shifts to avoid false positive in vet } func Readv(fd int, iovs [][]byte) (n int, err error) { @@ -2100,21 +2221,7 @@ func writevRacedetect(iovecs []Iovec, n int) { // mmap varies by architecture; see syscall_linux_*.go. //sys munmap(addr uintptr, length uintptr) (err error) - -var mapper = &mmapper{ - active: make(map[*byte][]byte), - mmap: mmap, - munmap: munmap, -} - -func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { - return mapper.Mmap(fd, offset, length, prot, flags) -} - -func Munmap(b []byte) (err error) { - return mapper.Munmap(b) -} - +//sys mremap(oldaddr uintptr, oldlength uintptr, newlength uintptr, flags int, newaddr uintptr) (xaddr uintptr, err error) //sys Madvise(b []byte, advice int) (err error) //sys Mprotect(b []byte, prot int) (err error) //sys Mlock(b []byte) (err error) @@ -2123,6 +2230,12 @@ func Munmap(b []byte) (err error) { //sys Munlock(b []byte) (err error) //sys Munlockall() (err error) +const ( + mremapFixed = MREMAP_FIXED + mremapDontunmap = MREMAP_DONTUNMAP + mremapMaymove = MREMAP_MAYMOVE +) + // Vmsplice splices user pages from a slice of Iovecs into a pipe specified by fd, // using the specified flags. func Vmsplice(fd int, iovs []Iovec, flags int) (int, error) { @@ -2153,6 +2266,14 @@ func isGroupMember(gid int) bool { return false } +func isCapDacOverrideSet() bool { + hdr := CapUserHeader{Version: LINUX_CAPABILITY_VERSION_3} + data := [2]CapUserData{} + err := Capget(&hdr, &data[0]) + + return err == nil && data[0].Effective&(1<> 63) // see math.intSize + + // A sigset stores one bit per signal, + // offset by 1 (because signal 0 does not exist). + // So the number of words needed is ⌈__C_NSIG - 1 / wordBits⌉. + sigsetWords := (_C__NSIG - 1 + wordBits - 1) / (wordBits) + + sigsetBytes := uintptr(sigsetWords * (wordBits / 8)) + kernelMask = &sigset_argpack{ + ss: sigmask, + ssLen: sigsetBytes, + } + } + + return pselect6(nfd, r, w, e, mutableTimeout, kernelMask) +} + +//sys schedSetattr(pid int, attr *SchedAttr, flags uint) (err error) +//sys schedGetattr(pid int, attr *SchedAttr, size uint, flags uint) (err error) + +// SchedSetAttr is a wrapper for sched_setattr(2) syscall. +// https://man7.org/linux/man-pages/man2/sched_setattr.2.html +func SchedSetAttr(pid int, attr *SchedAttr, flags uint) error { + if attr == nil { + return EINVAL + } + attr.Size = SizeofSchedAttr + return schedSetattr(pid, attr, flags) +} + +// SchedGetAttr is a wrapper for sched_getattr(2) syscall. +// https://man7.org/linux/man-pages/man2/sched_getattr.2.html +func SchedGetAttr(pid int, flags uint) (*SchedAttr, error) { + attr := &SchedAttr{} + if err := schedGetattr(pid, attr, SizeofSchedAttr, flags); err != nil { + return nil, err + } + return attr, nil +} + +//sys Cachestat(fd uint, crange *CachestatRange, cstat *Cachestat_t, flags uint) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_386.go index ff5b589..506dafa 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_386.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build 386 && linux -// +build 386,linux package unix @@ -97,33 +96,6 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { return } -//sysnb setrlimit(resource int, rlim *rlimit32) (err error) = SYS_SETRLIMIT - -func Setrlimit(resource int, rlim *Rlimit) (err error) { - err = Prlimit(0, resource, rlim, nil) - if err != ENOSYS { - return err - } - - rl := rlimit32{} - if rlim.Cur == rlimInf64 { - rl.Cur = rlimInf32 - } else if rlim.Cur < uint64(rlimInf32) { - rl.Cur = uint32(rlim.Cur) - } else { - return EINVAL - } - if rlim.Max == rlimInf64 { - rl.Max = rlimInf32 - } else if rlim.Max < uint64(rlimInf32) { - rl.Max = uint32(rlim.Max) - } else { - return EINVAL - } - - return setrlimit(resource, &rl) -} - func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { newoffset, errno := seek(fd, offset, whence) if errno != 0 { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_alarm.go b/vendor/golang.org/x/sys/unix/syscall_linux_alarm.go index 08086ac..38d5564 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_alarm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_alarm.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (386 || amd64 || mips || mipsle || mips64 || mipsle || ppc64 || ppc64le || ppc || s390x || sparc64) -// +build linux -// +build 386 amd64 mips mipsle mips64 mipsle ppc64 ppc64le ppc s390x sparc64 package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go index 9b27035..d557cf8 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && linux -// +build amd64,linux package unix @@ -40,13 +39,12 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err if timeout != nil { ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000} } - return Pselect(nfd, r, w, e, ts, nil) + return pselect6(nfd, r, w, e, ts, nil) } //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go index 8b0f0f3..facdb83 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && linux && gc -// +build amd64,linux,gc package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go index 856ad1d..cd2dd79 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm && linux -// +build arm,linux package unix @@ -171,33 +170,6 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { return } -//sysnb setrlimit(resource int, rlim *rlimit32) (err error) = SYS_SETRLIMIT - -func Setrlimit(resource int, rlim *Rlimit) (err error) { - err = Prlimit(0, resource, rlim, nil) - if err != ENOSYS { - return err - } - - rl := rlimit32{} - if rlim.Cur == rlimInf64 { - rl.Cur = rlimInf32 - } else if rlim.Cur < uint64(rlimInf32) { - rl.Cur = uint32(rlim.Cur) - } else { - return EINVAL - } - if rlim.Max == rlimInf64 { - rl.Max = rlimInf32 - } else if rlim.Max < uint64(rlimInf32) { - rl.Max = uint32(rlim.Max) - } else { - return EINVAL - } - - return setrlimit(resource, &rl) -} - func (r *PtraceRegs) PC() uint64 { return uint64(r.Uregs[15]) } func (r *PtraceRegs) SetPC(pc uint64) { r.Uregs[15] = uint32(pc) } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go index 6422704..cf2ee6c 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm64 && linux -// +build arm64,linux package unix @@ -33,13 +32,12 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err if timeout != nil { ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000} } - return Pselect(nfd, r, w, e, ts, nil) + return pselect6(nfd, r, w, e, ts, nil) } //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb setrlimit(resource int, rlim *Rlimit) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) @@ -143,15 +141,6 @@ func Getrlimit(resource int, rlim *Rlimit) error { return getrlimit(resource, rlim) } -// Setrlimit prefers the prlimit64 system call. See issue 38604. -func Setrlimit(resource int, rlim *Rlimit) error { - err := Prlimit(0, resource, rlim, nil) - if err != ENOSYS { - return err - } - return setrlimit(resource, rlim) -} - func (r *PtraceRegs) PC() uint64 { return r.Pc } func (r *PtraceRegs) SetPC(pc uint64) { r.Pc = pc } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gc.go b/vendor/golang.org/x/sys/unix/syscall_linux_gc.go index 2b1168d..ffc4c2b 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_gc.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gc.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && gc -// +build linux,gc package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go index 9843fb4..9ebfdcf 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && gc && 386 -// +build linux,gc,386 package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go index a6008fc..5f2b57c 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm && gc && linux -// +build arm,gc,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go index 7740af2..d1a3ad8 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && gccgo && 386 -// +build linux,gccgo,386 package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go index e16a122..f2f6742 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && gccgo && arm -// +build linux,gccgo,arm package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go index 59dab51..3d0e984 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build loong64 && linux -// +build loong64,linux package unix @@ -28,7 +27,7 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err if timeout != nil { ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000} } - return Pselect(nfd, r, w, e, ts, nil) + return pselect6(nfd, r, w, e, ts, nil) } //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) @@ -126,11 +125,6 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { return } -func Setrlimit(resource int, rlim *Rlimit) (err error) { - err = Prlimit(0, resource, rlim, nil) - return -} - func futimesat(dirfd int, path string, tv *[2]Timeval) (err error) { if tv == nil { return utimensat(dirfd, path, nil, 0) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go index bfef09a..70963a9 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (mips64 || mips64le) -// +build linux -// +build mips64 mips64le package unix @@ -31,13 +29,12 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err if timeout != nil { ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000} } - return Pselect(nfd, r, w, e, ts, nil) + return pselect6(nfd, r, w, e, ts, nil) } //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) //sys Statfs(path string, buf *Statfs_t) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go index ab30250..c218ebd 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (mips || mipsle) -// +build linux -// +build mips mipsle package unix @@ -151,33 +149,6 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { return } -//sysnb setrlimit(resource int, rlim *rlimit32) (err error) = SYS_SETRLIMIT - -func Setrlimit(resource int, rlim *Rlimit) (err error) { - err = Prlimit(0, resource, rlim, nil) - if err != ENOSYS { - return err - } - - rl := rlimit32{} - if rlim.Cur == rlimInf64 { - rl.Cur = rlimInf32 - } else if rlim.Cur < uint64(rlimInf32) { - rl.Cur = uint32(rlim.Cur) - } else { - return EINVAL - } - if rlim.Max == rlimInf64 { - rl.Max = rlimInf32 - } else if rlim.Max < uint64(rlimInf32) { - rl.Max = uint32(rlim.Max) - } else { - return EINVAL - } - - return setrlimit(resource, &rl) -} - func (r *PtraceRegs) PC() uint64 { return r.Epc } func (r *PtraceRegs) SetPC(pc uint64) { r.Epc = pc } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go index eac1cf1..e6c4850 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && ppc -// +build linux,ppc package unix @@ -159,33 +158,6 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { return } -//sysnb setrlimit(resource int, rlim *rlimit32) (err error) = SYS_SETRLIMIT - -func Setrlimit(resource int, rlim *Rlimit) (err error) { - err = Prlimit(0, resource, rlim, nil) - if err != ENOSYS { - return err - } - - rl := rlimit32{} - if rlim.Cur == rlimInf64 { - rl.Cur = rlimInf32 - } else if rlim.Cur < uint64(rlimInf32) { - rl.Cur = uint32(rlim.Cur) - } else { - return EINVAL - } - if rlim.Max == rlimInf64 { - rl.Max = rlimInf32 - } else if rlim.Max < uint64(rlimInf32) { - rl.Max = uint32(rlim.Max) - } else { - return EINVAL - } - - return setrlimit(resource, &rl) -} - func (r *PtraceRegs) PC() uint32 { return r.Nip } func (r *PtraceRegs) SetPC(pc uint32) { r.Nip = pc } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go index 4df5661..7286a9a 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (ppc64 || ppc64le) -// +build linux -// +build ppc64 ppc64le package unix @@ -34,7 +32,6 @@ package unix //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) //sys Stat(path string, stat *Stat_t) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go index 5f4243d..6f5a288 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build riscv64 && linux -// +build riscv64,linux package unix @@ -32,13 +31,12 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err if timeout != nil { ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000} } - return Pselect(nfd, r, w, e, ts, nil) + return pselect6(nfd, r, w, e, ts, nil) } //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) @@ -178,3 +176,14 @@ func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error } return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags) } + +//sys riscvHWProbe(pairs []RISCVHWProbePairs, cpuCount uintptr, cpus *CPUSet, flags uint) (err error) + +func RISCVHWProbe(pairs []RISCVHWProbePairs, set *CPUSet, flags uint) (err error) { + var setSize uintptr + + if set != nil { + setSize = uintptr(unsafe.Sizeof(*set)) + } + return riscvHWProbe(pairs, setSize, set, flags) +} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go index d0a7d40..66f3121 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build s390x && linux -// +build s390x,linux package unix @@ -34,7 +33,6 @@ import ( //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) //sys Stat(path string, stat *Stat_t) (err error) //sys Statfs(path string, buf *Statfs_t) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go index f5c793b..11d1f16 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build sparc64 && linux -// +build sparc64,linux package unix @@ -31,7 +30,6 @@ package unix //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) //sys Stat(path string, stat *Stat_t) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/vendor/golang.org/x/sys/unix/syscall_netbsd.go index 35a3ad7..8816209 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd.go @@ -13,7 +13,6 @@ package unix import ( - "runtime" "syscall" "unsafe" ) @@ -178,13 +177,13 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e } //sys ioctl(fd int, req uint, arg uintptr) (err error) +//sys ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) = SYS_IOCTL //sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL func IoctlGetPtmget(fd int, req uint) (*Ptmget, error) { var value Ptmget - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - runtime.KeepAlive(value) + err := ioctlPtr(fd, req, unsafe.Pointer(&value)) return &value, err } @@ -341,7 +340,6 @@ func Statvfs(path string, buf *Statvfs_t) (err error) { //sys Setpriority(which int, who int, prio int) (err error) //sysnb Setregid(rgid int, egid int) (err error) //sysnb Setreuid(ruid int, euid int) (err error) -//sysnb Setrlimit(which int, lim *Rlimit) (err error) //sysnb Setsid() (pid int, err error) //sysnb Settimeofday(tp *Timeval) (err error) //sysnb Setuid(uid int) (err error) @@ -358,267 +356,16 @@ func Statvfs(path string, buf *Statvfs_t) (err error) { //sys write(fd int, p []byte) (n int, err error) //sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) //sys munmap(addr uintptr, length uintptr) (err error) -//sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ -//sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE //sys utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) -/* - * Unimplemented - */ -// ____semctl13 -// __clone -// __fhopen40 -// __fhstat40 -// __fhstatvfs140 -// __fstat30 -// __getcwd -// __getfh30 -// __getlogin -// __lstat30 -// __mount50 -// __msgctl13 -// __msync13 -// __ntp_gettime30 -// __posix_chown -// __posix_fchown -// __posix_lchown -// __posix_rename -// __setlogin -// __shmctl13 -// __sigaction_sigtramp -// __sigaltstack14 -// __sigpending14 -// __sigprocmask14 -// __sigsuspend14 -// __sigtimedwait -// __stat30 -// __syscall -// __vfork14 -// _ksem_close -// _ksem_destroy -// _ksem_getvalue -// _ksem_init -// _ksem_open -// _ksem_post -// _ksem_trywait -// _ksem_unlink -// _ksem_wait -// _lwp_continue -// _lwp_create -// _lwp_ctl -// _lwp_detach -// _lwp_exit -// _lwp_getname -// _lwp_getprivate -// _lwp_kill -// _lwp_park -// _lwp_self -// _lwp_setname -// _lwp_setprivate -// _lwp_suspend -// _lwp_unpark -// _lwp_unpark_all -// _lwp_wait -// _lwp_wakeup -// _pset_bind -// _sched_getaffinity -// _sched_getparam -// _sched_setaffinity -// _sched_setparam -// acct -// aio_cancel -// aio_error -// aio_fsync -// aio_read -// aio_return -// aio_suspend -// aio_write -// break -// clock_getres -// clock_gettime -// clock_settime -// compat_09_ogetdomainname -// compat_09_osetdomainname -// compat_09_ouname -// compat_10_omsgsys -// compat_10_osemsys -// compat_10_oshmsys -// compat_12_fstat12 -// compat_12_getdirentries -// compat_12_lstat12 -// compat_12_msync -// compat_12_oreboot -// compat_12_oswapon -// compat_12_stat12 -// compat_13_sigaction13 -// compat_13_sigaltstack13 -// compat_13_sigpending13 -// compat_13_sigprocmask13 -// compat_13_sigreturn13 -// compat_13_sigsuspend13 -// compat_14___semctl -// compat_14_msgctl -// compat_14_shmctl -// compat_16___sigaction14 -// compat_16___sigreturn14 -// compat_20_fhstatfs -// compat_20_fstatfs -// compat_20_getfsstat -// compat_20_statfs -// compat_30___fhstat30 -// compat_30___fstat13 -// compat_30___lstat13 -// compat_30___stat13 -// compat_30_fhopen -// compat_30_fhstat -// compat_30_fhstatvfs1 -// compat_30_getdents -// compat_30_getfh -// compat_30_ntp_gettime -// compat_30_socket -// compat_40_mount -// compat_43_fstat43 -// compat_43_lstat43 -// compat_43_oaccept -// compat_43_ocreat -// compat_43_oftruncate -// compat_43_ogetdirentries -// compat_43_ogetdtablesize -// compat_43_ogethostid -// compat_43_ogethostname -// compat_43_ogetkerninfo -// compat_43_ogetpagesize -// compat_43_ogetpeername -// compat_43_ogetrlimit -// compat_43_ogetsockname -// compat_43_okillpg -// compat_43_olseek -// compat_43_ommap -// compat_43_oquota -// compat_43_orecv -// compat_43_orecvfrom -// compat_43_orecvmsg -// compat_43_osend -// compat_43_osendmsg -// compat_43_osethostid -// compat_43_osethostname -// compat_43_osetrlimit -// compat_43_osigblock -// compat_43_osigsetmask -// compat_43_osigstack -// compat_43_osigvec -// compat_43_otruncate -// compat_43_owait -// compat_43_stat43 -// execve -// extattr_delete_fd -// extattr_delete_file -// extattr_delete_link -// extattr_get_fd -// extattr_get_file -// extattr_get_link -// extattr_list_fd -// extattr_list_file -// extattr_list_link -// extattr_set_fd -// extattr_set_file -// extattr_set_link -// extattrctl -// fchroot -// fdatasync -// fgetxattr -// fktrace -// flistxattr -// fork -// fremovexattr -// fsetxattr -// fstatvfs1 -// fsync_range -// getcontext -// getitimer -// getvfsstat -// getxattr -// ktrace -// lchflags -// lchmod -// lfs_bmapv -// lfs_markv -// lfs_segclean -// lfs_segwait -// lgetxattr -// lio_listio -// listxattr -// llistxattr -// lremovexattr -// lseek -// lsetxattr -// lutimes -// madvise -// mincore -// minherit -// modctl -// mq_close -// mq_getattr -// mq_notify -// mq_open -// mq_receive -// mq_send -// mq_setattr -// mq_timedreceive -// mq_timedsend -// mq_unlink -// mremap -// msgget -// msgrcv -// msgsnd -// nfssvc -// ntp_adjtime -// pmc_control -// pmc_get_info -// pollts -// preadv -// profil -// pselect -// pset_assign -// pset_create -// pset_destroy -// ptrace -// pwritev -// quotactl -// rasctl -// readv -// reboot -// removexattr -// sa_enable -// sa_preempt -// sa_register -// sa_setconcurrency -// sa_stacks -// sa_yield -// sbrk -// sched_yield -// semconfig -// semget -// semop -// setcontext -// setitimer -// setxattr -// shmat -// shmdt -// shmget -// sstk -// statvfs1 -// swapctl -// sysarch -// syscall -// timer_create -// timer_delete -// timer_getoverrun -// timer_gettime -// timer_settime -// undelete -// utrace -// uuidgen -// vadvise -// vfork -// writev +const ( + mremapFixed = MAP_FIXED + mremapDontunmap = 0 + mremapMaymove = 0 +) + +//sys mremapNetBSD(oldp uintptr, oldsize uintptr, newp uintptr, newsize uintptr, flags int) (xaddr uintptr, err error) = SYS_MREMAP + +func mremap(oldaddr uintptr, oldlength uintptr, newlength uintptr, flags int, newaddr uintptr) (uintptr, error) { + return mremapNetBSD(oldaddr, oldlength, newaddr, newlength, flags) +} diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go index 5199d28..7a5eb57 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build 386 && netbsd -// +build 386,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go index 70a9c52..62d8957 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && netbsd -// +build amd64,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go index 3eb5942..ce6a068 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm && netbsd -// +build arm,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go index fc6ccfd..d46d689 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm64 && netbsd -// +build arm64,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go index 9b67b90..b25343c 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -137,24 +137,49 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e } func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { - var _p0 unsafe.Pointer + var bufptr *Statfs_t var bufsize uintptr if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) + bufptr = &buf[0] bufsize = unsafe.Sizeof(Statfs_t{}) * uintptr(len(buf)) } - r0, _, e1 := Syscall(SYS_GETFSSTAT, uintptr(_p0), bufsize, uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = e1 - } - return + return getfsstat(bufptr, bufsize, flags) +} + +//sysnb getresuid(ruid *_C_int, euid *_C_int, suid *_C_int) +//sysnb getresgid(rgid *_C_int, egid *_C_int, sgid *_C_int) + +func Getresuid() (ruid, euid, suid int) { + var r, e, s _C_int + getresuid(&r, &e, &s) + return int(r), int(e), int(s) +} + +func Getresgid() (rgid, egid, sgid int) { + var r, e, s _C_int + getresgid(&r, &e, &s) + return int(r), int(e), int(s) } //sys ioctl(fd int, req uint, arg uintptr) (err error) +//sys ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) = SYS_IOCTL //sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL +//sys fcntl(fd int, cmd int, arg int) (n int, err error) +//sys fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) = SYS_FCNTL + +// FcntlInt performs a fcntl syscall on fd with the provided command and argument. +func FcntlInt(fd uintptr, cmd, arg int) (int, error) { + return fcntl(int(fd), cmd, arg) +} + +// FcntlFlock performs a fcntl syscall for the F_GETLK, F_SETLK or F_SETLKW command. +func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error { + _, err := fcntlPtr(int(fd), cmd, unsafe.Pointer(lk)) + return err +} + //sys ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) func Ppoll(fds []PollFd, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { @@ -293,7 +318,6 @@ func Uname(uname *Utsname) error { //sysnb Setreuid(ruid int, euid int) (err error) //sysnb Setresgid(rgid int, egid int, sgid int) (err error) //sysnb Setresuid(ruid int, euid int, suid int) (err error) -//sysnb Setrlimit(which int, lim *Rlimit) (err error) //sysnb Setrtable(rtable int) (err error) //sysnb Setsid() (pid int, err error) //sysnb Settimeofday(tp *Timeval) (err error) @@ -311,80 +335,7 @@ func Uname(uname *Utsname) error { //sys write(fd int, p []byte) (n int, err error) //sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) //sys munmap(addr uintptr, length uintptr) (err error) -//sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ -//sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE +//sys getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) //sys utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) - -/* - * Unimplemented - */ -// __getcwd -// __semctl -// __syscall -// __sysctl -// adjfreq -// break -// clock_getres -// clock_gettime -// clock_settime -// closefrom -// execve -// fhopen -// fhstat -// fhstatfs -// fork -// futimens -// getfh -// getgid -// getitimer -// getlogin -// getresgid -// getresuid -// getthrid -// ktrace -// lfs_bmapv -// lfs_markv -// lfs_segclean -// lfs_segwait -// mincore -// minherit -// mount -// mquery -// msgctl -// msgget -// msgrcv -// msgsnd -// nfssvc -// nnpfspioctl -// preadv -// profil -// pwritev -// quotactl -// readv -// reboot -// renameat -// rfork -// sched_yield -// semget -// semop -// setgroups -// setitimer -// setsockopt -// shmat -// shmctl -// shmdt -// shmget -// sigaction -// sigaltstack -// sigpending -// sigprocmask -// sigreturn -// sigsuspend -// sysarch -// syscall -// threxit -// thrsigdivert -// thrsleep -// thrwakeup -// vfork -// writev +//sys pledge(promises *byte, execpromises *byte) (err error) +//sys unveil(path *byte, flags *byte) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go index 6baabcd..9ddc89f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build 386 && openbsd -// +build 386,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go index bab2536..70a3c96 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && openbsd -// +build amd64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go index 8eed3c4..265caa8 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm && openbsd -// +build arm,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go index 483dde9..ac4fda1 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm64 && openbsd -// +build arm64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go index 04aa43f..0a451e6 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build openbsd -// +build openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_ppc64.go index c279613..30a308c 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_ppc64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build ppc64 && openbsd -// +build ppc64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_riscv64.go index 23199a7..ea95433 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_riscv64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build riscv64 && openbsd -// +build riscv64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index 07ac561..21974af 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -128,7 +128,8 @@ func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) { if n > 0 { sl += _Socklen(n) + 1 } - if sa.raw.Path[0] == '@' { + if sa.raw.Path[0] == '@' || (sa.raw.Path[0] == 0 && sl > 3) { + // Check sl > 3 so we don't change unnamed socket behavior. sa.raw.Path[0] = 0 // Don't count trailing NUL for abstract address. sl-- @@ -157,7 +158,7 @@ func GetsockoptString(fd, level, opt int) (string, error) { if err != nil { return "", err } - return string(buf[:vallen-1]), nil + return ByteSliceToString(buf[:vallen]), nil } const ImplementsGetwd = true @@ -408,8 +409,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { for n < len(pp.Path) && pp.Path[n] != 0 { n++ } - bytes := (*[len(pp.Path)]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] - sa.Name = string(bytes) + sa.Name = string(unsafe.Slice((*byte)(unsafe.Pointer(&pp.Path[0])), n)) return sa, nil case AF_INET: @@ -546,22 +546,26 @@ func Minor(dev uint64) uint32 { * Expose the ioctl function */ -//sys ioctlRet(fd int, req uint, arg uintptr) (ret int, err error) = libc.ioctl +//sys ioctlRet(fd int, req int, arg uintptr) (ret int, err error) = libc.ioctl +//sys ioctlPtrRet(fd int, req int, arg unsafe.Pointer) (ret int, err error) = libc.ioctl -func ioctl(fd int, req uint, arg uintptr) (err error) { +func ioctl(fd int, req int, arg uintptr) (err error) { _, err = ioctlRet(fd, req, arg) return err } -func IoctlSetTermio(fd int, req uint, value *Termio) error { - err := ioctl(fd, req, uintptr(unsafe.Pointer(value))) - runtime.KeepAlive(value) +func ioctlPtr(fd int, req int, arg unsafe.Pointer) (err error) { + _, err = ioctlPtrRet(fd, req, arg) return err } -func IoctlGetTermio(fd int, req uint) (*Termio, error) { +func IoctlSetTermio(fd int, req int, value *Termio) error { + return ioctlPtr(fd, req, unsafe.Pointer(value)) +} + +func IoctlGetTermio(fd int, req int) (*Termio, error) { var value Termio - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + err := ioctlPtr(fd, req, unsafe.Pointer(&value)) return &value, err } @@ -662,7 +666,6 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Setpriority(which int, who int, prio int) (err error) //sysnb Setregid(rgid int, egid int) (err error) //sysnb Setreuid(ruid int, euid int) (err error) -//sysnb Setrlimit(which int, lim *Rlimit) (err error) //sysnb Setsid() (pid int, err error) //sysnb Setuid(uid int) (err error) //sys Shutdown(s int, how int) (err error) = libsocket.shutdown @@ -696,38 +699,6 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) = libsocket.setsockopt //sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) = libsocket.recvfrom -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procread)), 3, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf), 0, 0, 0) - n = int(r0) - if e1 != 0 { - err = e1 - } - return -} - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procwrite)), 3, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf), 0, 0, 0) - n = int(r0) - if e1 != 0 { - err = e1 - } - return -} - -var mapper = &mmapper{ - active: make(map[*byte][]byte), - mmap: mmap, - munmap: munmap, -} - -func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { - return mapper.Mmap(fd, offset, length, prot, flags) -} - -func Munmap(b []byte) (err error) { - return mapper.Munmap(b) -} - // Event Ports type fileObjCookie struct { @@ -1077,14 +1048,14 @@ func Getmsg(fd int, cl []byte, data []byte) (retCl []byte, retData []byte, flags return retCl, retData, flags, nil } -func IoctlSetIntRetInt(fd int, req uint, arg int) (int, error) { +func IoctlSetIntRetInt(fd int, req int, arg int) (int, error) { return ioctlRet(fd, req, uintptr(arg)) } -func IoctlSetString(fd int, req uint, val string) error { +func IoctlSetString(fd int, req int, val string) error { bs := make([]byte, len(val)+1) copy(bs[:len(bs)-1], val) - err := ioctl(fd, req, uintptr(unsafe.Pointer(&bs[0]))) + err := ioctlPtr(fd, req, unsafe.Pointer(&bs[0])) runtime.KeepAlive(&bs[0]) return err } @@ -1117,8 +1088,8 @@ func (l *Lifreq) GetLifruUint() uint { return *(*uint)(unsafe.Pointer(&l.Lifru[0])) } -func IoctlLifreq(fd int, req uint, l *Lifreq) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(l))) +func IoctlLifreq(fd int, req int, l *Lifreq) error { + return ioctlPtr(fd, req, unsafe.Pointer(l)) } // Strioctl Helpers @@ -1128,6 +1099,6 @@ func (s *Strioctl) SetInt(i int) { s.Dp = (*int8)(unsafe.Pointer(&i)) } -func IoctlSetStrioctlRetInt(fd int, req uint, s *Strioctl) (int, error) { - return ioctlRet(fd, req, uintptr(unsafe.Pointer(s))) +func IoctlSetStrioctlRetInt(fd int, req int, s *Strioctl) (int, error) { + return ioctlPtrRet(fd, req, unsafe.Pointer(s)) } diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go index 0bd25ef..e02d8ce 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && solaris -// +build amd64,solaris package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go index a386f88..4e92e5a 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package unix @@ -147,6 +146,23 @@ func (m *mmapper) Munmap(data []byte) (err error) { return nil } +func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { + return mapper.Mmap(fd, offset, length, prot, flags) +} + +func Munmap(b []byte) (err error) { + return mapper.Munmap(b) +} + +func MmapPtr(fd int, offset int64, addr unsafe.Pointer, length uintptr, prot int, flags int) (ret unsafe.Pointer, err error) { + xaddr, err := mapper.mmap(uintptr(addr), length, prot, flags, fd, offset) + return unsafe.Pointer(xaddr), err +} + +func MunmapPtr(addr unsafe.Pointer, length uintptr) (err error) { + return mapper.munmap(uintptr(addr), length) +} + func Read(fd int, p []byte) (n int, err error) { n, err = read(fd, p) if raceenabled { @@ -541,6 +557,9 @@ func SetNonblock(fd int, nonblocking bool) (err error) { if err != nil { return err } + if (flag&O_NONBLOCK != 0) == nonblocking { + return nil + } if nonblocking { flag |= O_NONBLOCK } else { @@ -578,7 +597,7 @@ func Lutimes(path string, tv []Timeval) error { return UtimesNanoAt(AT_FDCWD, path, ts, AT_SYMLINK_NOFOLLOW) } -// emptyIovec reports whether there are no bytes in the slice of Iovec. +// emptyIovecs reports whether there are no bytes in the slice of Iovec. func emptyIovecs(iov []Iovec) bool { for i := range iov { if iov[i].Len > 0 { @@ -587,3 +606,10 @@ func emptyIovecs(iov []Iovec) bool { } return true } + +// Setrlimit sets a resource limit. +func Setrlimit(resource int, rlim *Rlimit) error { + // Just call the syscall version, because as of Go 1.21 + // it will affect starting a new process. + return syscall.Setrlimit(resource, (*syscall.Rlimit)(rlim)) +} diff --git a/vendor/golang.org/x/sys/unix/syscall_unix_gc.go b/vendor/golang.org/x/sys/unix/syscall_unix_gc.go index b6919ca..05c95bc 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix_gc.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix_gc.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (darwin || dragonfly || freebsd || (linux && !ppc64 && !ppc64le) || netbsd || openbsd || solaris) && gc -// +build darwin dragonfly freebsd linux,!ppc64,!ppc64le netbsd openbsd solaris -// +build gc package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go b/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go index f6f707a..23f39b7 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go @@ -3,9 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (ppc64le || ppc64) && gc -// +build linux -// +build ppc64le ppc64 -// +build gc package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go index 68b2f3e..312ae6a 100644 --- a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go @@ -3,13 +3,22 @@ // license that can be found in the LICENSE file. //go:build zos && s390x -// +build zos,s390x + +// Many of the following syscalls are not available on all versions of z/OS. +// Some missing calls have legacy implementations/simulations but others +// will be missing completely. To achieve consistent failing behaviour on +// legacy systems, we first test the function pointer via a safeloading +// mechanism to see if the function exists on a given system. Then execution +// is branched to either continue the function call, or return an error. package unix import ( "bytes" "fmt" + "os" + "reflect" + "regexp" "runtime" "sort" "strings" @@ -18,17 +27,205 @@ import ( "unsafe" ) +//go:noescape +func initZosLibVec() + +//go:noescape +func GetZosLibVec() uintptr + +func init() { + initZosLibVec() + r0, _, _ := CallLeFuncWithPtrReturn(GetZosLibVec()+SYS_____GETENV_A<<4, uintptr(unsafe.Pointer(&([]byte("__ZOS_XSYSTRACE\x00"))[0]))) + if r0 != 0 { + n, _, _ := CallLeFuncWithPtrReturn(GetZosLibVec()+SYS___ATOI_A<<4, r0) + ZosTraceLevel = int(n) + r0, _, _ := CallLeFuncWithPtrReturn(GetZosLibVec()+SYS_____GETENV_A<<4, uintptr(unsafe.Pointer(&([]byte("__ZOS_XSYSTRACEFD\x00"))[0]))) + if r0 != 0 { + fd, _, _ := CallLeFuncWithPtrReturn(GetZosLibVec()+SYS___ATOI_A<<4, r0) + f := os.NewFile(fd, "zostracefile") + if f != nil { + ZosTracefile = f + } + } + + } +} + +//go:noescape +func CallLeFuncWithErr(funcdesc uintptr, parms ...uintptr) (ret, errno2 uintptr, err Errno) + +//go:noescape +func CallLeFuncWithPtrReturn(funcdesc uintptr, parms ...uintptr) (ret, errno2 uintptr, err Errno) + +// ------------------------------- +// pointer validity test +// good pointer returns 0 +// bad pointer returns 1 +// +//go:nosplit +func ptrtest(uintptr) uint64 + +// Load memory at ptr location with error handling if the location is invalid +// +//go:noescape +func safeload(ptr uintptr) (value uintptr, error uintptr) + const ( - O_CLOEXEC = 0 // Dummy value (not supported). - AF_LOCAL = AF_UNIX // AF_LOCAL is an alias for AF_UNIX + entrypointLocationOffset = 8 // From function descriptor + + xplinkEyecatcher = 0x00c300c500c500f1 // ".C.E.E.1" + eyecatcherOffset = 16 // From function entrypoint (negative) + ppa1LocationOffset = 8 // From function entrypoint (negative) + + nameLenOffset = 0x14 // From PPA1 start + nameOffset = 0x16 // From PPA1 start ) -func syscall_syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) -func syscall_rawsyscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) -func syscall_syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) -func syscall_rawsyscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) -func syscall_syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err Errno) -func syscall_rawsyscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err Errno) +func getPpaOffset(funcptr uintptr) int64 { + entrypoint, err := safeload(funcptr + entrypointLocationOffset) + if err != 0 { + return -1 + } + + // XPLink functions have ".C.E.E.1" as the first 8 bytes (EBCDIC) + val, err := safeload(entrypoint - eyecatcherOffset) + if err != 0 { + return -1 + } + if val != xplinkEyecatcher { + return -1 + } + + ppaoff, err := safeload(entrypoint - ppa1LocationOffset) + if err != 0 { + return -1 + } + + ppaoff >>= 32 + return int64(ppaoff) +} + +//------------------------------- +// function descriptor pointer validity test +// good pointer returns 0 +// bad pointer returns 1 + +// TODO: currently mksyscall_zos_s390x.go generate empty string for funcName +// have correct funcName pass to the funcptrtest function +func funcptrtest(funcptr uintptr, funcName string) uint64 { + entrypoint, err := safeload(funcptr + entrypointLocationOffset) + if err != 0 { + return 1 + } + + ppaoff := getPpaOffset(funcptr) + if ppaoff == -1 { + return 1 + } + + // PPA1 offset value is from the start of the entire function block, not the entrypoint + ppa1 := (entrypoint - eyecatcherOffset) + uintptr(ppaoff) + + nameLen, err := safeload(ppa1 + nameLenOffset) + if err != 0 { + return 1 + } + + nameLen >>= 48 + if nameLen > 128 { + return 1 + } + + // no function name input to argument end here + if funcName == "" { + return 0 + } + + var funcname [128]byte + for i := 0; i < int(nameLen); i += 8 { + v, err := safeload(ppa1 + nameOffset + uintptr(i)) + if err != 0 { + return 1 + } + funcname[i] = byte(v >> 56) + funcname[i+1] = byte(v >> 48) + funcname[i+2] = byte(v >> 40) + funcname[i+3] = byte(v >> 32) + funcname[i+4] = byte(v >> 24) + funcname[i+5] = byte(v >> 16) + funcname[i+6] = byte(v >> 8) + funcname[i+7] = byte(v) + } + + runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___E2A_L<<4, // __e2a_l + []uintptr{uintptr(unsafe.Pointer(&funcname[0])), nameLen}) + + name := string(funcname[:nameLen]) + if name != funcName { + return 1 + } + + return 0 +} + +// For detection of capabilities on a system. +// Is function descriptor f a valid function? +func isValidLeFunc(f uintptr) error { + ret := funcptrtest(f, "") + if ret != 0 { + return fmt.Errorf("Bad pointer, not an LE function ") + } + return nil +} + +// Retrieve function name from descriptor +func getLeFuncName(f uintptr) (string, error) { + // assume it has been checked, only check ppa1 validity here + entry := ((*[2]uintptr)(unsafe.Pointer(f)))[1] + preamp := ((*[4]uint32)(unsafe.Pointer(entry - eyecatcherOffset))) + + offsetPpa1 := preamp[2] + if offsetPpa1 > 0x0ffff { + return "", fmt.Errorf("PPA1 offset seems too big 0x%x\n", offsetPpa1) + } + + ppa1 := uintptr(unsafe.Pointer(preamp)) + uintptr(offsetPpa1) + res := ptrtest(ppa1) + if res != 0 { + return "", fmt.Errorf("PPA1 address not valid") + } + + size := *(*uint16)(unsafe.Pointer(ppa1 + nameLenOffset)) + if size > 128 { + return "", fmt.Errorf("Function name seems too long, length=%d\n", size) + } + + var name [128]byte + funcname := (*[128]byte)(unsafe.Pointer(ppa1 + nameOffset)) + copy(name[0:size], funcname[0:size]) + + runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___E2A_L<<4, // __e2a_l + []uintptr{uintptr(unsafe.Pointer(&name[0])), uintptr(size)}) + + return string(name[:size]), nil +} + +// Check z/OS version +func zosLeVersion() (version, release uint32) { + p1 := (*(*uintptr)(unsafe.Pointer(uintptr(1208)))) >> 32 + p1 = *(*uintptr)(unsafe.Pointer(uintptr(p1 + 88))) + p1 = *(*uintptr)(unsafe.Pointer(uintptr(p1 + 8))) + p1 = *(*uintptr)(unsafe.Pointer(uintptr(p1 + 984))) + vrm := *(*uint32)(unsafe.Pointer(p1 + 80)) + version = (vrm & 0x00ff0000) >> 16 + release = (vrm & 0x0000ff00) >> 8 + return +} + +// returns a zos C FILE * for stdio fd 0, 1, 2 +func ZosStdioFilep(fd int32) uintptr { + return uintptr(*(*uint64)(unsafe.Pointer(uintptr(*(*uint64)(unsafe.Pointer(uintptr(*(*uint64)(unsafe.Pointer(uintptr(uint64(*(*uint32)(unsafe.Pointer(uintptr(1208)))) + 80))) + uint64((fd+2)<<3)))))))) +} func copyStat(stat *Stat_t, statLE *Stat_LE_t) { stat.Dev = uint64(statLE.Dev) @@ -66,6 +263,21 @@ func (d *Dirent) NameString() string { } } +func DecodeData(dest []byte, sz int, val uint64) { + for i := 0; i < sz; i++ { + dest[sz-1-i] = byte((val >> (uint64(i * 8))) & 0xff) + } +} + +func EncodeData(data []byte) uint64 { + var value uint64 + sz := len(data) + for i := 0; i < sz; i++ { + value |= uint64(data[i]) << uint64(((sz - i - 1) * 8)) + } + return value +} + func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) { if sa.Port < 0 || sa.Port > 0xFFFF { return nil, 0, EINVAL @@ -75,7 +287,9 @@ func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) { p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) p[0] = byte(sa.Port >> 8) p[1] = byte(sa.Port) - sa.raw.Addr = sa.Addr + for i := 0; i < len(sa.Addr); i++ { + sa.raw.Addr[i] = sa.Addr[i] + } return unsafe.Pointer(&sa.raw), _Socklen(sa.raw.Len), nil } @@ -89,7 +303,9 @@ func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, _Socklen, error) { p[0] = byte(sa.Port >> 8) p[1] = byte(sa.Port) sa.raw.Scope_id = sa.ZoneId - sa.raw.Addr = sa.Addr + for i := 0; i < len(sa.Addr); i++ { + sa.raw.Addr[i] = sa.Addr[i] + } return unsafe.Pointer(&sa.raw), _Socklen(sa.raw.Len), nil } @@ -139,8 +355,7 @@ func anyToSockaddr(_ int, rsa *RawSockaddrAny) (Sockaddr, error) { for n < int(pp.Len) && pp.Path[n] != 0 { n++ } - bytes := (*[len(pp.Path)]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] - sa.Name = string(bytes) + sa.Name = string(unsafe.Slice((*byte)(unsafe.Pointer(&pp.Path[0])), n)) return sa, nil case AF_INET: @@ -148,7 +363,9 @@ func anyToSockaddr(_ int, rsa *RawSockaddrAny) (Sockaddr, error) { sa := new(SockaddrInet4) p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) - sa.Addr = pp.Addr + for i := 0; i < len(sa.Addr); i++ { + sa.Addr[i] = pp.Addr[i] + } return sa, nil case AF_INET6: @@ -157,7 +374,9 @@ func anyToSockaddr(_ int, rsa *RawSockaddrAny) (Sockaddr, error) { p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) sa.ZoneId = pp.Scope_id - sa.Addr = pp.Addr + for i := 0; i < len(sa.Addr); i++ { + sa.Addr[i] = pp.Addr[i] + } return sa, nil } return nil, EAFNOSUPPORT @@ -179,6 +398,43 @@ func Accept(fd int) (nfd int, sa Sockaddr, err error) { return } +func Accept4(fd int, flags int) (nfd int, sa Sockaddr, err error) { + var rsa RawSockaddrAny + var len _Socklen = SizeofSockaddrAny + nfd, err = accept4(fd, &rsa, &len, flags) + if err != nil { + return + } + if len > SizeofSockaddrAny { + panic("RawSockaddrAny too small") + } + // TODO(neeilan): Remove 0 in call + sa, err = anyToSockaddr(0, &rsa) + if err != nil { + Close(nfd) + nfd = 0 + } + return +} + +func Ctermid() (tty string, err error) { + var termdev [1025]byte + runtime.EnterSyscall() + r0, err2, err1 := CallLeFuncWithPtrReturn(GetZosLibVec()+SYS___CTERMID_A<<4, uintptr(unsafe.Pointer(&termdev[0]))) + runtime.ExitSyscall() + if r0 == 0 { + return "", fmt.Errorf("%s (errno2=0x%x)\n", err1.Error(), err2) + } + s := string(termdev[:]) + idx := strings.Index(s, string(rune(0))) + if idx == -1 { + tty = s + } else { + tty = s[:idx] + } + return +} + func (iov *Iovec) SetLen(length int) { iov.Len = uint64(length) } @@ -192,11 +448,16 @@ func (cmsg *Cmsghdr) SetLen(length int) { } //sys fcntl(fd int, cmd int, arg int) (val int, err error) +//sys Flistxattr(fd int, dest []byte) (sz int, err error) = SYS___FLISTXATTR_A +//sys Fremovexattr(fd int, attr string) (err error) = SYS___FREMOVEXATTR_A //sys read(fd int, p []byte) (n int, err error) -//sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ //sys write(fd int, p []byte) (n int, err error) +//sys Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) = SYS___FGETXATTR_A +//sys Fsetxattr(fd int, attr string, data []byte, flag int) (err error) = SYS___FSETXATTR_A + //sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) = SYS___ACCEPT_A +//sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) = SYS___ACCEPT4_A //sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) = SYS___BIND_A //sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) = SYS___CONNECT_A //sysnb getgroups(n int, list *_Gid_t) (nn int, err error) @@ -207,13 +468,19 @@ func (cmsg *Cmsghdr) SetLen(length int) { //sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) //sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) = SYS___GETPEERNAME_A //sysnb getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) = SYS___GETSOCKNAME_A +//sys Removexattr(path string, attr string) (err error) = SYS___REMOVEXATTR_A //sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) = SYS___RECVFROM_A //sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) = SYS___SENDTO_A //sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) = SYS___RECVMSG_A //sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) = SYS___SENDMSG_A //sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) = SYS_MMAP //sys munmap(addr uintptr, length uintptr) (err error) = SYS_MUNMAP -//sys ioctl(fd int, req uint, arg uintptr) (err error) = SYS_IOCTL +//sys ioctl(fd int, req int, arg uintptr) (err error) = SYS_IOCTL +//sys ioctlPtr(fd int, req int, arg unsafe.Pointer) (err error) = SYS_IOCTL +//sys shmat(id int, addr uintptr, flag int) (ret uintptr, err error) = SYS_SHMAT +//sys shmctl(id int, cmd int, buf *SysvShmDesc) (result int, err error) = SYS_SHMCTL64 +//sys shmdt(addr uintptr) (err error) = SYS_SHMDT +//sys shmget(key int, size int, flag int) (id int, err error) = SYS_SHMGET //sys Access(path string, mode uint32) (err error) = SYS___ACCESS_A //sys Chdir(path string) (err error) = SYS___CHDIR_A @@ -222,14 +489,31 @@ func (cmsg *Cmsghdr) SetLen(length int) { //sys Creat(path string, mode uint32) (fd int, err error) = SYS___CREAT_A //sys Dup(oldfd int) (fd int, err error) //sys Dup2(oldfd int, newfd int) (err error) +//sys Dup3(oldfd int, newfd int, flags int) (err error) = SYS_DUP3 +//sys Dirfd(dirp uintptr) (fd int, err error) = SYS_DIRFD +//sys EpollCreate(size int) (fd int, err error) = SYS_EPOLL_CREATE +//sys EpollCreate1(flags int) (fd int, err error) = SYS_EPOLL_CREATE1 +//sys EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) = SYS_EPOLL_CTL +//sys EpollPwait(epfd int, events []EpollEvent, msec int, sigmask *int) (n int, err error) = SYS_EPOLL_PWAIT +//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) = SYS_EPOLL_WAIT //sys Errno2() (er2 int) = SYS___ERRNO2 -//sys Err2ad() (eadd *int) = SYS___ERR2AD +//sys Eventfd(initval uint, flags int) (fd int, err error) = SYS_EVENTFD //sys Exit(code int) +//sys Faccessat(dirfd int, path string, mode uint32, flags int) (err error) = SYS___FACCESSAT_A + +func Faccessat2(dirfd int, path string, mode uint32, flags int) (err error) { + return Faccessat(dirfd, path, mode, flags) +} + //sys Fchdir(fd int) (err error) //sys Fchmod(fd int, mode uint32) (err error) +//sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) = SYS___FCHMODAT_A //sys Fchown(fd int, uid int, gid int) (err error) +//sys Fchownat(fd int, path string, uid int, gid int, flags int) (err error) = SYS___FCHOWNAT_A //sys FcntlInt(fd uintptr, cmd int, arg int) (retval int, err error) = SYS_FCNTL +//sys Fdatasync(fd int) (err error) = SYS_FDATASYNC //sys fstat(fd int, stat *Stat_LE_t) (err error) +//sys fstatat(dirfd int, path string, stat *Stat_LE_t, flags int) (err error) = SYS___FSTATAT_A func Fstat(fd int, stat *Stat_t) (err error) { var statLE Stat_LE_t @@ -238,28 +522,208 @@ func Fstat(fd int, stat *Stat_t) (err error) { return } +func Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) { + var statLE Stat_LE_t + err = fstatat(dirfd, path, &statLE, flags) + copyStat(stat, &statLE) + return +} + +func impl_Getxattr(path string, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(dest) > 0 { + _p2 = unsafe.Pointer(&dest[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___GETXATTR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest))) + sz = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_GetxattrAddr() *(func(path string, attr string, dest []byte) (sz int, err error)) + +var Getxattr = enter_Getxattr + +func enter_Getxattr(path string, attr string, dest []byte) (sz int, err error) { + funcref := get_GetxattrAddr() + if validGetxattr() { + *funcref = impl_Getxattr + } else { + *funcref = error_Getxattr + } + return (*funcref)(path, attr, dest) +} + +func error_Getxattr(path string, attr string, dest []byte) (sz int, err error) { + return -1, ENOSYS +} + +func validGetxattr() bool { + if funcptrtest(GetZosLibVec()+SYS___GETXATTR_A<<4, "") == 0 { + if name, err := getLeFuncName(GetZosLibVec() + SYS___GETXATTR_A<<4); err == nil { + return name == "__getxattr_a" + } + } + return false +} + +//sys Lgetxattr(link string, attr string, dest []byte) (sz int, err error) = SYS___LGETXATTR_A +//sys Lsetxattr(path string, attr string, data []byte, flags int) (err error) = SYS___LSETXATTR_A + +func impl_Setxattr(path string, attr string, data []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___SETXATTR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags)) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_SetxattrAddr() *(func(path string, attr string, data []byte, flags int) (err error)) + +var Setxattr = enter_Setxattr + +func enter_Setxattr(path string, attr string, data []byte, flags int) (err error) { + funcref := get_SetxattrAddr() + if validSetxattr() { + *funcref = impl_Setxattr + } else { + *funcref = error_Setxattr + } + return (*funcref)(path, attr, data, flags) +} + +func error_Setxattr(path string, attr string, data []byte, flags int) (err error) { + return ENOSYS +} + +func validSetxattr() bool { + if funcptrtest(GetZosLibVec()+SYS___SETXATTR_A<<4, "") == 0 { + if name, err := getLeFuncName(GetZosLibVec() + SYS___SETXATTR_A<<4); err == nil { + return name == "__setxattr_a" + } + } + return false +} + +//sys Fstatfs(fd int, buf *Statfs_t) (err error) = SYS_FSTATFS //sys Fstatvfs(fd int, stat *Statvfs_t) (err error) = SYS_FSTATVFS //sys Fsync(fd int) (err error) +//sys Futimes(fd int, tv []Timeval) (err error) = SYS_FUTIMES +//sys Futimesat(dirfd int, path string, tv []Timeval) (err error) = SYS___FUTIMESAT_A //sys Ftruncate(fd int, length int64) (err error) -//sys Getpagesize() (pgsize int) = SYS_GETPAGESIZE +//sys Getrandom(buf []byte, flags int) (n int, err error) = SYS_GETRANDOM +//sys InotifyInit() (fd int, err error) = SYS_INOTIFY_INIT +//sys InotifyInit1(flags int) (fd int, err error) = SYS_INOTIFY_INIT1 +//sys InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) = SYS___INOTIFY_ADD_WATCH_A +//sys InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) = SYS_INOTIFY_RM_WATCH +//sys Listxattr(path string, dest []byte) (sz int, err error) = SYS___LISTXATTR_A +//sys Llistxattr(path string, dest []byte) (sz int, err error) = SYS___LLISTXATTR_A +//sys Lremovexattr(path string, attr string) (err error) = SYS___LREMOVEXATTR_A +//sys Lutimes(path string, tv []Timeval) (err error) = SYS___LUTIMES_A //sys Mprotect(b []byte, prot int) (err error) = SYS_MPROTECT //sys Msync(b []byte, flags int) (err error) = SYS_MSYNC +//sys Console2(cmsg *ConsMsg2, modstr *byte, concmd *uint32) (err error) = SYS___CONSOLE2 + +// Pipe2 begin + +//go:nosplit +func getPipe2Addr() *(func([]int, int) error) + +var Pipe2 = pipe2Enter + +func pipe2Enter(p []int, flags int) (err error) { + if funcptrtest(GetZosLibVec()+SYS_PIPE2<<4, "") == 0 { + *getPipe2Addr() = pipe2Impl + } else { + *getPipe2Addr() = pipe2Error + } + return (*getPipe2Addr())(p, flags) +} + +func pipe2Impl(p []int, flags int) (err error) { + var pp [2]_C_int + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_PIPE2<<4, uintptr(unsafe.Pointer(&pp[0])), uintptr(flags)) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } else { + p[0] = int(pp[0]) + p[1] = int(pp[1]) + } + return +} +func pipe2Error(p []int, flags int) (err error) { + return fmt.Errorf("Pipe2 is not available on this system") +} + +// Pipe2 end + //sys Poll(fds []PollFd, timeout int) (n int, err error) = SYS_POLL + +func Readdir(dir uintptr) (dirent *Dirent, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___READDIR_A<<4, uintptr(dir)) + runtime.ExitSyscall() + dirent = (*Dirent)(unsafe.Pointer(r0)) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//sys Readdir_r(dirp uintptr, entry *direntLE, result **direntLE) (err error) = SYS___READDIR_R_A +//sys Statfs(path string, buf *Statfs_t) (err error) = SYS___STATFS_A +//sys Syncfs(fd int) (err error) = SYS_SYNCFS //sys Times(tms *Tms) (ticks uintptr, err error) = SYS_TIMES //sys W_Getmntent(buff *byte, size int) (lastsys int, err error) = SYS_W_GETMNTENT //sys W_Getmntent_A(buff *byte, size int) (lastsys int, err error) = SYS___W_GETMNTENT_A //sys mount_LE(path string, filesystem string, fstype string, mtm uint32, parmlen int32, parm string) (err error) = SYS___MOUNT_A -//sys unmount(filesystem string, mtm int) (err error) = SYS___UMOUNT_A +//sys unmount_LE(filesystem string, mtm int) (err error) = SYS___UMOUNT_A //sys Chroot(path string) (err error) = SYS___CHROOT_A //sys Select(nmsgsfds int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (ret int, err error) = SYS_SELECT -//sysnb Uname(buf *Utsname) (err error) = SYS___UNAME_A +//sysnb Uname(buf *Utsname) (err error) = SYS_____OSNAME_A +//sys Unshare(flags int) (err error) = SYS_UNSHARE func Ptsname(fd int) (name string, err error) { - r0, _, e1 := syscall_syscall(SYS___PTSNAME_A, uintptr(fd), 0, 0) - name = u2s(unsafe.Pointer(r0)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithPtrReturn(GetZosLibVec()+SYS___PTSNAME_A<<4, uintptr(fd)) + runtime.ExitSyscall() + if r0 == 0 { + err = errnoErr2(e1, e2) + } else { + name = u2s(unsafe.Pointer(r0)) } return } @@ -274,23 +738,23 @@ func u2s(cstr unsafe.Pointer) string { } func Close(fd int) (err error) { - _, _, e1 := syscall_syscall(SYS_CLOSE, uintptr(fd), 0, 0) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_CLOSE<<4, uintptr(fd)) + runtime.ExitSyscall() for i := 0; e1 == EAGAIN && i < 10; i++ { - _, _, _ = syscall_syscall(SYS_USLEEP, uintptr(10), 0, 0) - _, _, e1 = syscall_syscall(SYS_CLOSE, uintptr(fd), 0, 0) + runtime.EnterSyscall() + CallLeFuncWithErr(GetZosLibVec()+SYS_USLEEP<<4, uintptr(10)) + runtime.ExitSyscall() + runtime.EnterSyscall() + r0, e2, e1 = CallLeFuncWithErr(GetZosLibVec()+SYS_CLOSE<<4, uintptr(fd)) + runtime.ExitSyscall() } - if e1 != 0 { - err = errnoErr(e1) + if r0 != 0 { + err = errnoErr2(e1, e2) } return } -var mapper = &mmapper{ - active: make(map[*byte][]byte), - mmap: mmap, - munmap: munmap, -} - // Dummy function: there are no semantics for Madvise on z/OS func Madvise(b []byte, advice int) (err error) { return @@ -305,8 +769,6 @@ func Munmap(b []byte) (err error) { } //sys Gethostname(buf []byte) (err error) = SYS___GETHOSTNAME_A -//sysnb Getegid() (egid int) -//sysnb Geteuid() (uid int) //sysnb Getgid() (gid int) //sysnb Getpid() (pid int) //sysnb Getpgid(pid int) (pgid int, err error) = SYS_GETPGID @@ -333,11 +795,14 @@ func Getrusage(who int, rusage *Rusage) (err error) { return } +//sys Getegid() (egid int) = SYS_GETEGID +//sys Geteuid() (euid int) = SYS_GETEUID //sysnb Getsid(pid int) (sid int, err error) = SYS_GETSID //sysnb Getuid() (uid int) //sysnb Kill(pid int, sig Signal) (err error) //sys Lchown(path string, uid int, gid int) (err error) = SYS___LCHOWN_A //sys Link(path string, link string) (err error) = SYS___LINK_A +//sys Linkat(oldDirFd int, oldPath string, newDirFd int, newPath string, flags int) (err error) = SYS___LINKAT_A //sys Listen(s int, n int) (err error) //sys lstat(path string, stat *Stat_LE_t) (err error) = SYS___LSTAT_A @@ -348,15 +813,150 @@ func Lstat(path string, stat *Stat_t) (err error) { return } +// for checking symlinks begins with $VERSION/ $SYSNAME/ $SYSSYMR/ $SYSSYMA/ +func isSpecialPath(path []byte) (v bool) { + var special = [4][8]byte{ + [8]byte{'V', 'E', 'R', 'S', 'I', 'O', 'N', '/'}, + [8]byte{'S', 'Y', 'S', 'N', 'A', 'M', 'E', '/'}, + [8]byte{'S', 'Y', 'S', 'S', 'Y', 'M', 'R', '/'}, + [8]byte{'S', 'Y', 'S', 'S', 'Y', 'M', 'A', '/'}} + + var i, j int + for i = 0; i < len(special); i++ { + for j = 0; j < len(special[i]); j++ { + if path[j] != special[i][j] { + break + } + } + if j == len(special[i]) { + return true + } + } + return false +} + +func realpath(srcpath string, abspath []byte) (pathlen int, errno int) { + var source [1024]byte + copy(source[:], srcpath) + source[len(srcpath)] = 0 + ret := runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___REALPATH_A<<4, //__realpath_a() + []uintptr{uintptr(unsafe.Pointer(&source[0])), + uintptr(unsafe.Pointer(&abspath[0]))}) + if ret != 0 { + index := bytes.IndexByte(abspath[:], byte(0)) + if index != -1 { + return index, 0 + } + } else { + errptr := (*int)(unsafe.Pointer(runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___ERRNO<<4, []uintptr{}))) //__errno() + return 0, *errptr + } + return 0, 245 // EBADDATA 245 +} + +func Readlink(path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + n = int(runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___READLINK_A<<4, + []uintptr{uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))})) + runtime.KeepAlive(unsafe.Pointer(_p0)) + if n == -1 { + value := *(*int32)(unsafe.Pointer(runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___ERRNO<<4, []uintptr{}))) + err = errnoErr(Errno(value)) + } else { + if buf[0] == '$' { + if isSpecialPath(buf[1:9]) { + cnt, err1 := realpath(path, buf) + if err1 == 0 { + n = cnt + } + } + } + } + return +} + +func impl_Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___READLINKAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + runtime.ExitSyscall() + n = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + return n, err + } else { + if buf[0] == '$' { + if isSpecialPath(buf[1:9]) { + cnt, err1 := realpath(path, buf) + if err1 == 0 { + n = cnt + } + } + } + } + return +} + +//go:nosplit +func get_ReadlinkatAddr() *(func(dirfd int, path string, buf []byte) (n int, err error)) + +var Readlinkat = enter_Readlinkat + +func enter_Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + funcref := get_ReadlinkatAddr() + if funcptrtest(GetZosLibVec()+SYS___READLINKAT_A<<4, "") == 0 { + *funcref = impl_Readlinkat + } else { + *funcref = error_Readlinkat + } + return (*funcref)(dirfd, path, buf) +} + +func error_Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + n = -1 + err = ENOSYS + return +} + //sys Mkdir(path string, mode uint32) (err error) = SYS___MKDIR_A +//sys Mkdirat(dirfd int, path string, mode uint32) (err error) = SYS___MKDIRAT_A //sys Mkfifo(path string, mode uint32) (err error) = SYS___MKFIFO_A //sys Mknod(path string, mode uint32, dev int) (err error) = SYS___MKNOD_A +//sys Mknodat(dirfd int, path string, mode uint32, dev int) (err error) = SYS___MKNODAT_A +//sys PivotRoot(newroot string, oldroot string) (err error) = SYS___PIVOT_ROOT_A //sys Pread(fd int, p []byte, offset int64) (n int, err error) //sys Pwrite(fd int, p []byte, offset int64) (n int, err error) -//sys Readlink(path string, buf []byte) (n int, err error) = SYS___READLINK_A +//sys Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) = SYS___PRCTL_A +//sysnb Prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) = SYS_PRLIMIT //sys Rename(from string, to string) (err error) = SYS___RENAME_A +//sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) = SYS___RENAMEAT_A +//sys Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) = SYS___RENAMEAT2_A //sys Rmdir(path string) (err error) = SYS___RMDIR_A //sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK +//sys Setegid(egid int) (err error) = SYS_SETEGID +//sys Seteuid(euid int) (err error) = SYS_SETEUID +//sys Sethostname(p []byte) (err error) = SYS___SETHOSTNAME_A +//sys Setns(fd int, nstype int) (err error) = SYS_SETNS //sys Setpriority(which int, who int, prio int) (err error) //sysnb Setpgid(pid int, pgid int) (err error) = SYS_SETPGID //sysnb Setrlimit(resource int, lim *Rlimit) (err error) @@ -376,32 +976,57 @@ func Stat(path string, sta *Stat_t) (err error) { } //sys Symlink(path string, link string) (err error) = SYS___SYMLINK_A +//sys Symlinkat(oldPath string, dirfd int, newPath string) (err error) = SYS___SYMLINKAT_A //sys Sync() = SYS_SYNC //sys Truncate(path string, length int64) (err error) = SYS___TRUNCATE_A //sys Tcgetattr(fildes int, termptr *Termios) (err error) = SYS_TCGETATTR //sys Tcsetattr(fildes int, when int, termptr *Termios) (err error) = SYS_TCSETATTR //sys Umask(mask int) (oldmask int) //sys Unlink(path string) (err error) = SYS___UNLINK_A +//sys Unlinkat(dirfd int, path string, flags int) (err error) = SYS___UNLINKAT_A //sys Utime(path string, utim *Utimbuf) (err error) = SYS___UTIME_A //sys open(path string, mode int, perm uint32) (fd int, err error) = SYS___OPEN_A func Open(path string, mode int, perm uint32) (fd int, err error) { + if mode&O_ACCMODE == 0 { + mode |= O_RDONLY + } return open(path, mode, perm) } -func Mkfifoat(dirfd int, path string, mode uint32) (err error) { - wd, err := Getwd() - if err != nil { - return err +//sys openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) = SYS___OPENAT_A + +func Openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { + if flags&O_ACCMODE == 0 { + flags |= O_RDONLY } + return openat(dirfd, path, flags, mode) +} - if err := Fchdir(dirfd); err != nil { - return err +//sys openat2(dirfd int, path string, open_how *OpenHow, size int) (fd int, err error) = SYS___OPENAT2_A + +func Openat2(dirfd int, path string, how *OpenHow) (fd int, err error) { + if how.Flags&O_ACCMODE == 0 { + how.Flags |= O_RDONLY } - defer Chdir(wd) + return openat2(dirfd, path, how, SizeofOpenHow) +} - return Mkfifo(path, mode) +func ZosFdToPath(dirfd int) (path string, err error) { + var buffer [1024]byte + runtime.EnterSyscall() + ret, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_W_IOCTL<<4, uintptr(dirfd), 17, 1024, uintptr(unsafe.Pointer(&buffer[0]))) + runtime.ExitSyscall() + if ret == 0 { + zb := bytes.IndexByte(buffer[:], 0) + if zb == -1 { + zb = len(buffer) + } + CallLeFuncWithErr(GetZosLibVec()+SYS___E2A_L<<4, uintptr(unsafe.Pointer(&buffer[0])), uintptr(zb)) + return string(buffer[:zb]), nil + } + return "", errnoErr2(e1, e2) } //sys remove(path string) (err error) @@ -419,10 +1044,12 @@ func Getcwd(buf []byte) (n int, err error) { } else { p = unsafe.Pointer(&_zero) } - _, _, e := syscall_syscall(SYS___GETCWD_A, uintptr(p), uintptr(len(buf)), 0) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithPtrReturn(GetZosLibVec()+SYS___GETCWD_A<<4, uintptr(p), uintptr(len(buf))) + runtime.ExitSyscall() n = clen(buf) + 1 - if e != 0 { - err = errnoErr(e) + if r0 == 0 { + err = errnoErr2(e1, e2) } return } @@ -536,9 +1163,41 @@ func (w WaitStatus) StopSignal() Signal { func (w WaitStatus) TrapCause() int { return -1 } +//sys waitid(idType int, id int, info *Siginfo, options int) (err error) + +func Waitid(idType int, id int, info *Siginfo, options int, rusage *Rusage) (err error) { + return waitid(idType, id, info, options) +} + //sys waitpid(pid int, wstatus *_C_int, options int) (wpid int, err error) -func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error) { +func impl_Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_WAIT4<<4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage))) + runtime.ExitSyscall() + wpid = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_Wait4Addr() *(func(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error)) + +var Wait4 = enter_Wait4 + +func enter_Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error) { + funcref := get_Wait4Addr() + if funcptrtest(GetZosLibVec()+SYS_WAIT4<<4, "") == 0 { + *funcref = impl_Wait4 + } else { + *funcref = legacyWait4 + } + return (*funcref)(pid, wstatus, options, rusage) +} + +func legacyWait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error) { // TODO(mundaym): z/OS doesn't have wait4. I don't think getrusage does what we want. // At the moment rusage will not be touched. var status _C_int @@ -587,23 +1246,62 @@ func Pipe(p []int) (err error) { } var pp [2]_C_int err = pipe(&pp) - if err == nil { - p[0] = int(pp[0]) - p[1] = int(pp[1]) - } + p[0] = int(pp[0]) + p[1] = int(pp[1]) return } //sys utimes(path string, timeval *[2]Timeval) (err error) = SYS___UTIMES_A func Utimes(path string, tv []Timeval) (err error) { + if tv == nil { + return utimes(path, nil) + } if len(tv) != 2 { return EINVAL } return utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) } -func UtimesNano(path string, ts []Timespec) error { +//sys utimensat(dirfd int, path string, ts *[2]Timespec, flags int) (err error) = SYS___UTIMENSAT_A + +func validUtimensat() bool { + if funcptrtest(GetZosLibVec()+SYS___UTIMENSAT_A<<4, "") == 0 { + if name, err := getLeFuncName(GetZosLibVec() + SYS___UTIMENSAT_A<<4); err == nil { + return name == "__utimensat_a" + } + } + return false +} + +// Begin UtimesNano + +//go:nosplit +func get_UtimesNanoAddr() *(func(path string, ts []Timespec) (err error)) + +var UtimesNano = enter_UtimesNano + +func enter_UtimesNano(path string, ts []Timespec) (err error) { + funcref := get_UtimesNanoAddr() + if validUtimensat() { + *funcref = utimesNanoImpl + } else { + *funcref = legacyUtimesNano + } + return (*funcref)(path, ts) +} + +func utimesNanoImpl(path string, ts []Timespec) (err error) { + if ts == nil { + return utimensat(AT_FDCWD, path, nil, 0) + } + if len(ts) != 2 { + return EINVAL + } + return utimensat(AT_FDCWD, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0) +} + +func legacyUtimesNano(path string, ts []Timespec) (err error) { if len(ts) != 2 { return EINVAL } @@ -616,6 +1314,70 @@ func UtimesNano(path string, ts []Timespec) error { return utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) } +// End UtimesNano + +// Begin UtimesNanoAt + +//go:nosplit +func get_UtimesNanoAtAddr() *(func(dirfd int, path string, ts []Timespec, flags int) (err error)) + +var UtimesNanoAt = enter_UtimesNanoAt + +func enter_UtimesNanoAt(dirfd int, path string, ts []Timespec, flags int) (err error) { + funcref := get_UtimesNanoAtAddr() + if validUtimensat() { + *funcref = utimesNanoAtImpl + } else { + *funcref = legacyUtimesNanoAt + } + return (*funcref)(dirfd, path, ts, flags) +} + +func utimesNanoAtImpl(dirfd int, path string, ts []Timespec, flags int) (err error) { + if ts == nil { + return utimensat(dirfd, path, nil, flags) + } + if len(ts) != 2 { + return EINVAL + } + return utimensat(dirfd, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), flags) +} + +func legacyUtimesNanoAt(dirfd int, path string, ts []Timespec, flags int) (err error) { + if path[0] != '/' { + dirPath, err := ZosFdToPath(dirfd) + if err != nil { + return err + } + path = dirPath + "/" + path + } + if flags == AT_SYMLINK_NOFOLLOW { + if len(ts) != 2 { + return EINVAL + } + + if ts[0].Nsec >= 5e8 { + ts[0].Sec++ + } + ts[0].Nsec = 0 + if ts[1].Nsec >= 5e8 { + ts[1].Sec++ + } + ts[1].Nsec = 0 + + // Not as efficient as it could be because Timespec and + // Timeval have different types in the different OSes + tv := []Timeval{ + NsecToTimeval(TimespecToNsec(ts[0])), + NsecToTimeval(TimespecToNsec(ts[1])), + } + return Lutimes(path, tv) + } + return UtimesNano(path, ts) +} + +// End UtimesNanoAt + func Getsockname(fd int) (sa Sockaddr, err error) { var rsa RawSockaddrAny var len _Socklen = SizeofSockaddrAny @@ -1120,7 +1882,7 @@ func GetsockoptString(fd, level, opt int) (string, error) { return "", err } - return string(buf[:vallen-1]), nil + return ByteSliceToString(buf[:vallen]), nil } func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) { @@ -1202,67 +1964,46 @@ func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) return n, nil } -func Opendir(name string) (uintptr, error) { - p, err := BytePtrFromString(name) - if err != nil { - return 0, err - } - dir, _, e := syscall_syscall(SYS___OPENDIR_A, uintptr(unsafe.Pointer(p)), 0, 0) - runtime.KeepAlive(unsafe.Pointer(p)) - if e != 0 { - err = errnoErr(e) - } - return dir, err -} - -// clearsyscall.Errno resets the errno value to 0. -func clearErrno() - -func Readdir(dir uintptr) (*Dirent, error) { - var ent Dirent - var res uintptr - // __readdir_r_a returns errno at the end of the directory stream, rather than 0. - // Therefore to avoid false positives we clear errno before calling it. - - // TODO(neeilan): Commented this out to get sys/unix compiling on z/OS. Uncomment and fix. Error: "undefined: clearsyscall" - //clearsyscall.Errno() // TODO(mundaym): check pre-emption rules. - - e, _, _ := syscall_syscall(SYS___READDIR_R_A, dir, uintptr(unsafe.Pointer(&ent)), uintptr(unsafe.Pointer(&res))) - var err error - if e != 0 { - err = errnoErr(Errno(e)) - } - if res == 0 { - return nil, err - } - return &ent, err -} - -func readdir_r(dirp uintptr, entry *direntLE, result **direntLE) (err error) { - r0, _, e1 := syscall_syscall(SYS___READDIR_R_A, dirp, uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result))) - if int64(r0) == -1 { - err = errnoErr(Errno(e1)) +func Opendir(name string) (uintptr, error) { + p, err := BytePtrFromString(name) + if err != nil { + return 0, err } - return + err = nil + runtime.EnterSyscall() + dir, e2, e1 := CallLeFuncWithPtrReturn(GetZosLibVec()+SYS___OPENDIR_A<<4, uintptr(unsafe.Pointer(p))) + runtime.ExitSyscall() + runtime.KeepAlive(unsafe.Pointer(p)) + if dir == 0 { + err = errnoErr2(e1, e2) + } + return dir, err } +// clearsyscall.Errno resets the errno value to 0. +func clearErrno() + func Closedir(dir uintptr) error { - _, _, e := syscall_syscall(SYS_CLOSEDIR, dir, 0, 0) - if e != 0 { - return errnoErr(e) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_CLOSEDIR<<4, dir) + runtime.ExitSyscall() + if r0 != 0 { + return errnoErr2(e1, e2) } return nil } func Seekdir(dir uintptr, pos int) { - _, _, _ = syscall_syscall(SYS_SEEKDIR, dir, uintptr(pos), 0) + runtime.EnterSyscall() + CallLeFuncWithErr(GetZosLibVec()+SYS_SEEKDIR<<4, dir, uintptr(pos)) + runtime.ExitSyscall() } func Telldir(dir uintptr) (int, error) { - p, _, e := syscall_syscall(SYS_TELLDIR, dir, 0, 0) + p, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_TELLDIR<<4, dir) pos := int(p) - if pos == -1 { - return pos, errnoErr(e) + if int64(p) == -1 { + return pos, errnoErr2(e1, e2) } return pos, nil } @@ -1277,19 +2018,55 @@ func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error { *(*int64)(unsafe.Pointer(&flock[4])) = lk.Start *(*int64)(unsafe.Pointer(&flock[12])) = lk.Len *(*int32)(unsafe.Pointer(&flock[20])) = lk.Pid - _, _, errno := syscall_syscall(SYS_FCNTL, fd, uintptr(cmd), uintptr(unsafe.Pointer(&flock))) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FCNTL<<4, fd, uintptr(cmd), uintptr(unsafe.Pointer(&flock))) + runtime.ExitSyscall() lk.Type = *(*int16)(unsafe.Pointer(&flock[0])) lk.Whence = *(*int16)(unsafe.Pointer(&flock[2])) lk.Start = *(*int64)(unsafe.Pointer(&flock[4])) lk.Len = *(*int64)(unsafe.Pointer(&flock[12])) lk.Pid = *(*int32)(unsafe.Pointer(&flock[20])) - if errno == 0 { + if r0 == 0 { return nil } - return errno + return errnoErr2(e1, e2) +} + +func impl_Flock(fd int, how int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FLOCK<<4, uintptr(fd), uintptr(how)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FlockAddr() *(func(fd int, how int) (err error)) + +var Flock = enter_Flock + +func validFlock(fp uintptr) bool { + if funcptrtest(GetZosLibVec()+SYS_FLOCK<<4, "") == 0 { + if name, err := getLeFuncName(GetZosLibVec() + SYS_FLOCK<<4); err == nil { + return name == "flock" + } + } + return false +} + +func enter_Flock(fd int, how int) (err error) { + funcref := get_FlockAddr() + if validFlock(GetZosLibVec() + SYS_FLOCK<<4) { + *funcref = impl_Flock + } else { + *funcref = legacyFlock + } + return (*funcref)(fd, how) } -func Flock(fd int, how int) error { +func legacyFlock(fd int, how int) error { var flock_type int16 var fcntl_cmd int @@ -1323,41 +2100,51 @@ func Flock(fd int, how int) error { } func Mlock(b []byte) (err error) { - _, _, e1 := syscall_syscall(SYS___MLOCKALL, _BPX_NONSWAP, 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MLOCKALL<<4, _BPX_NONSWAP) + runtime.ExitSyscall() + if r0 != 0 { + err = errnoErr2(e1, e2) } return } func Mlock2(b []byte, flags int) (err error) { - _, _, e1 := syscall_syscall(SYS___MLOCKALL, _BPX_NONSWAP, 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MLOCKALL<<4, _BPX_NONSWAP) + runtime.ExitSyscall() + if r0 != 0 { + err = errnoErr2(e1, e2) } return } func Mlockall(flags int) (err error) { - _, _, e1 := syscall_syscall(SYS___MLOCKALL, _BPX_NONSWAP, 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MLOCKALL<<4, _BPX_NONSWAP) + runtime.ExitSyscall() + if r0 != 0 { + err = errnoErr2(e1, e2) } return } func Munlock(b []byte) (err error) { - _, _, e1 := syscall_syscall(SYS___MLOCKALL, _BPX_SWAP, 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MLOCKALL<<4, _BPX_SWAP) + runtime.ExitSyscall() + if r0 != 0 { + err = errnoErr2(e1, e2) } return } func Munlockall() (err error) { - _, _, e1 := syscall_syscall(SYS___MLOCKALL, _BPX_SWAP, 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MLOCKALL<<4, _BPX_SWAP) + runtime.ExitSyscall() + if r0 != 0 { + err = errnoErr2(e1, e2) } return } @@ -1388,15 +2175,104 @@ func ClockGettime(clockid int32, ts *Timespec) error { return nil } -func Statfs(path string, stat *Statfs_t) (err error) { - fd, err := open(path, O_RDONLY, 0) - defer Close(fd) - if err != nil { - return err +// Chtag + +//go:nosplit +func get_ChtagAddr() *(func(path string, ccsid uint64, textbit uint64) error) + +var Chtag = enter_Chtag + +func enter_Chtag(path string, ccsid uint64, textbit uint64) error { + funcref := get_ChtagAddr() + if validSetxattr() { + *funcref = impl_Chtag + } else { + *funcref = legacy_Chtag + } + return (*funcref)(path, ccsid, textbit) +} + +func legacy_Chtag(path string, ccsid uint64, textbit uint64) error { + tag := ccsid<<16 | textbit<<15 + var tag_buff [8]byte + DecodeData(tag_buff[:], 8, tag) + return Setxattr(path, "filetag", tag_buff[:], XATTR_REPLACE) +} + +func impl_Chtag(path string, ccsid uint64, textbit uint64) error { + tag := ccsid<<16 | textbit<<15 + var tag_buff [4]byte + DecodeData(tag_buff[:], 4, tag) + return Setxattr(path, "system.filetag", tag_buff[:], XATTR_REPLACE) +} + +// End of Chtag + +// Nanosleep + +//go:nosplit +func get_NanosleepAddr() *(func(time *Timespec, leftover *Timespec) error) + +var Nanosleep = enter_Nanosleep + +func enter_Nanosleep(time *Timespec, leftover *Timespec) error { + funcref := get_NanosleepAddr() + if funcptrtest(GetZosLibVec()+SYS_NANOSLEEP<<4, "") == 0 { + *funcref = impl_Nanosleep + } else { + *funcref = legacyNanosleep + } + return (*funcref)(time, leftover) +} + +func impl_Nanosleep(time *Timespec, leftover *Timespec) error { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_NANOSLEEP<<4, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover))) + runtime.ExitSyscall() + if int64(r0) == -1 { + return errnoErr2(e1, e2) + } + return nil +} + +func legacyNanosleep(time *Timespec, leftover *Timespec) error { + t0 := runtime.Nanotime1() + var secrem uint32 + var nsecrem uint32 + total := time.Sec*1000000000 + time.Nsec + elapsed := runtime.Nanotime1() - t0 + var rv int32 + var rc int32 + var err error + // repeatedly sleep for 1 second until less than 1 second left + for total-elapsed > 1000000000 { + rv, rc, _ = BpxCondTimedWait(uint32(1), uint32(0), uint32(CW_CONDVAR), &secrem, &nsecrem) + if rv != 0 && rc != 112 { // 112 is EAGAIN + if leftover != nil && rc == 120 { // 120 is EINTR + leftover.Sec = int64(secrem) + leftover.Nsec = int64(nsecrem) + } + err = Errno(rc) + return err + } + elapsed = runtime.Nanotime1() - t0 } - return Fstatfs(fd, stat) + // sleep the remainder + if total > elapsed { + rv, rc, _ = BpxCondTimedWait(uint32(0), uint32(total-elapsed), uint32(CW_CONDVAR), &secrem, &nsecrem) + } + if leftover != nil && rc == 120 { + leftover.Sec = int64(secrem) + leftover.Nsec = int64(nsecrem) + } + if rv != 0 && rc != 112 { + err = Errno(rc) + } + return err } +// End of Nanosleep + var ( Stdin = 0 Stdout = 1 @@ -1411,6 +2287,9 @@ var ( errENOENT error = syscall.ENOENT ) +var ZosTraceLevel int +var ZosTracefile *os.File + var ( signalNameMapOnce sync.Once signalNameMap map[string]syscall.Signal @@ -1432,6 +2311,56 @@ func errnoErr(e Errno) error { return e } +var reg *regexp.Regexp + +// enhanced with zos specific errno2 +func errnoErr2(e Errno, e2 uintptr) error { + switch e { + case 0: + return nil + case EAGAIN: + return errEAGAIN + /* + Allow the retrieval of errno2 for EINVAL and ENOENT on zos + case EINVAL: + return errEINVAL + case ENOENT: + return errENOENT + */ + } + if ZosTraceLevel > 0 { + var name string + if reg == nil { + reg = regexp.MustCompile("(^unix\\.[^/]+$|.*\\/unix\\.[^/]+$)") + } + i := 1 + pc, file, line, ok := runtime.Caller(i) + if ok { + name = runtime.FuncForPC(pc).Name() + } + for ok && reg.MatchString(runtime.FuncForPC(pc).Name()) { + i += 1 + pc, file, line, ok = runtime.Caller(i) + } + if ok { + if ZosTracefile == nil { + ZosConsolePrintf("From %s:%d\n", file, line) + ZosConsolePrintf("%s: %s (errno2=0x%x)\n", name, e.Error(), e2) + } else { + fmt.Fprintf(ZosTracefile, "From %s:%d\n", file, line) + fmt.Fprintf(ZosTracefile, "%s: %s (errno2=0x%x)\n", name, e.Error(), e2) + } + } else { + if ZosTracefile == nil { + ZosConsolePrintf("%s (errno2=0x%x)\n", e.Error(), e2) + } else { + fmt.Fprintf(ZosTracefile, "%s (errno2=0x%x)\n", e.Error(), e2) + } + } + } + return e +} + // ErrnoName returns the error name for error number e. func ErrnoName(e Errno) string { i := sort.Search(len(errorList), func(i int) bool { @@ -1490,6 +2419,9 @@ func (m *mmapper) Mmap(fd int, offset int64, length int, prot int, flags int) (d return nil, EINVAL } + // Set __MAP_64 by default + flags |= __MAP_64 + // Map the requested memory. addr, errno := m.mmap(0, uintptr(length), prot, flags, fd, offset) if errno != nil { @@ -1794,83 +2726,170 @@ func Exec(argv0 string, argv []string, envv []string) error { return syscall.Exec(argv0, argv, envv) } -func Mount(source string, target string, fstype string, flags uintptr, data string) (err error) { +func Getag(path string) (ccsid uint16, flag uint16, err error) { + var val [8]byte + sz, err := Getxattr(path, "ccsid", val[:]) + if err != nil { + return + } + ccsid = uint16(EncodeData(val[0:sz])) + sz, err = Getxattr(path, "flags", val[:]) + if err != nil { + return + } + flag = uint16(EncodeData(val[0:sz]) >> 15) + return +} + +// Mount begin +func impl_Mount(source string, target string, fstype string, flags uintptr, data string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(source) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(target) + if err != nil { + return + } + var _p2 *byte + _p2, err = BytePtrFromString(fstype) + if err != nil { + return + } + var _p3 *byte + _p3, err = BytePtrFromString(data) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MOUNT1_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(_p3))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_MountAddr() *(func(source string, target string, fstype string, flags uintptr, data string) (err error)) + +var Mount = enter_Mount + +func enter_Mount(source string, target string, fstype string, flags uintptr, data string) (err error) { + funcref := get_MountAddr() + if validMount() { + *funcref = impl_Mount + } else { + *funcref = legacyMount + } + return (*funcref)(source, target, fstype, flags, data) +} + +func legacyMount(source string, target string, fstype string, flags uintptr, data string) (err error) { if needspace := 8 - len(fstype); needspace <= 0 { - fstype = fstype[:8] + fstype = fstype[0:8] } else { - fstype += " "[:needspace] + fstype += " "[0:needspace] } return mount_LE(target, source, fstype, uint32(flags), int32(len(data)), data) } -func Unmount(name string, mtm int) (err error) { +func validMount() bool { + if funcptrtest(GetZosLibVec()+SYS___MOUNT1_A<<4, "") == 0 { + if name, err := getLeFuncName(GetZosLibVec() + SYS___MOUNT1_A<<4); err == nil { + return name == "__mount1_a" + } + } + return false +} + +// Mount end + +// Unmount begin +func impl_Unmount(target string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(target) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___UMOUNT2_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_UnmountAddr() *(func(target string, flags int) (err error)) + +var Unmount = enter_Unmount + +func enter_Unmount(target string, flags int) (err error) { + funcref := get_UnmountAddr() + if funcptrtest(GetZosLibVec()+SYS___UMOUNT2_A<<4, "") == 0 { + *funcref = impl_Unmount + } else { + *funcref = legacyUnmount + } + return (*funcref)(target, flags) +} + +func legacyUnmount(name string, mtm int) (err error) { // mountpoint is always a full path and starts with a '/' // check if input string is not a mountpoint but a filesystem name if name[0] != '/' { - return unmount(name, mtm) + return unmount_LE(name, mtm) } // treat name as mountpoint b2s := func(arr []byte) string { - nulli := bytes.IndexByte(arr, 0) - if nulli == -1 { - return string(arr) - } else { - return string(arr[:nulli]) + var str string + for i := 0; i < len(arr); i++ { + if arr[i] == 0 { + str = string(arr[:i]) + break + } } + return str } var buffer struct { header W_Mnth fsinfo [64]W_Mntent } - fsCount, err := W_Getmntent_A((*byte)(unsafe.Pointer(&buffer)), int(unsafe.Sizeof(buffer))) - if err != nil { - return err - } - if fsCount == 0 { - return EINVAL - } - for i := 0; i < fsCount; i++ { - if b2s(buffer.fsinfo[i].Mountpoint[:]) == name { - err = unmount(b2s(buffer.fsinfo[i].Fsname[:]), mtm) - break + fs_count, err := W_Getmntent_A((*byte)(unsafe.Pointer(&buffer)), int(unsafe.Sizeof(buffer))) + if err == nil { + err = EINVAL + for i := 0; i < fs_count; i++ { + if b2s(buffer.fsinfo[i].Mountpoint[:]) == name { + err = unmount_LE(b2s(buffer.fsinfo[i].Fsname[:]), mtm) + break + } } + } else if fs_count == 0 { + err = EINVAL } return err } -func fdToPath(dirfd int) (path string, err error) { - var buffer [1024]byte - // w_ctrl() - ret := runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS_W_IOCTL<<4, - []uintptr{uintptr(dirfd), 17, 1024, uintptr(unsafe.Pointer(&buffer[0]))}) - if ret == 0 { - zb := bytes.IndexByte(buffer[:], 0) - if zb == -1 { - zb = len(buffer) - } - // __e2a_l() - runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___E2A_L<<4, - []uintptr{uintptr(unsafe.Pointer(&buffer[0])), uintptr(zb)}) - return string(buffer[:zb]), nil - } - // __errno() - errno := int(*(*int32)(unsafe.Pointer(runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___ERRNO<<4, - []uintptr{})))) - // __errno2() - errno2 := int(runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___ERRNO2<<4, - []uintptr{})) - // strerror_r() - ret = runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS_STRERROR_R<<4, - []uintptr{uintptr(errno), uintptr(unsafe.Pointer(&buffer[0])), 1024}) - if ret == 0 { - zb := bytes.IndexByte(buffer[:], 0) - if zb == -1 { - zb = len(buffer) - } - return "", fmt.Errorf("%s (errno2=0x%x)", buffer[:zb], errno2) - } else { - return "", fmt.Errorf("fdToPath errno %d (errno2=0x%x)", errno, errno2) +// Unmount end + +func direntIno(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(Dirent{}.Ino), unsafe.Sizeof(Dirent{}.Ino)) +} + +func direntReclen(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen)) +} + +func direntNamlen(buf []byte) (uint64, bool) { + reclen, ok := direntReclen(buf) + if !ok { + return 0, false } + return reclen - uint64(unsafe.Offsetof(Dirent{}.Name)), true } func direntLeToDirentUnix(dirent *direntLE, dir uintptr, path string) (Dirent, error) { @@ -1912,7 +2931,7 @@ func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { } // Get path from fd to avoid unavailable call (fdopendir) - path, err := fdToPath(fd) + path, err := ZosFdToPath(fd) if err != nil { return 0, err } @@ -1926,7 +2945,7 @@ func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { for { var entryLE direntLE var entrypLE *direntLE - e := readdir_r(d, &entryLE, &entrypLE) + e := Readdir_r(d, &entryLE, &entrypLE) if e != nil { return n, e } @@ -1972,23 +2991,127 @@ func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { return n, nil } -func ReadDirent(fd int, buf []byte) (n int, err error) { - var base = (*uintptr)(unsafe.Pointer(new(uint64))) - return Getdirentries(fd, buf, base) +func Err2ad() (eadd *int) { + r0, _, _ := CallLeFuncWithErr(GetZosLibVec() + SYS___ERR2AD<<4) + eadd = (*int)(unsafe.Pointer(r0)) + return } -func direntIno(buf []byte) (uint64, bool) { - return readInt(buf, unsafe.Offsetof(Dirent{}.Ino), unsafe.Sizeof(Dirent{}.Ino)) +func ZosConsolePrintf(format string, v ...interface{}) (int, error) { + type __cmsg struct { + _ uint16 + _ [2]uint8 + __msg_length uint32 + __msg uintptr + _ [4]uint8 + } + msg := fmt.Sprintf(format, v...) + strptr := unsafe.Pointer((*reflect.StringHeader)(unsafe.Pointer(&msg)).Data) + len := (*reflect.StringHeader)(unsafe.Pointer(&msg)).Len + cmsg := __cmsg{__msg_length: uint32(len), __msg: uintptr(strptr)} + cmd := uint32(0) + runtime.EnterSyscall() + rc, err2, err1 := CallLeFuncWithErr(GetZosLibVec()+SYS_____CONSOLE_A<<4, uintptr(unsafe.Pointer(&cmsg)), 0, uintptr(unsafe.Pointer(&cmd))) + runtime.ExitSyscall() + if rc != 0 { + return 0, fmt.Errorf("%s (errno2=0x%x)\n", err1.Error(), err2) + } + return 0, nil +} +func ZosStringToEbcdicBytes(str string, nullterm bool) (ebcdicBytes []byte) { + if nullterm { + ebcdicBytes = []byte(str + "\x00") + } else { + ebcdicBytes = []byte(str) + } + A2e(ebcdicBytes) + return +} +func ZosEbcdicBytesToString(b []byte, trimRight bool) (str string) { + res := make([]byte, len(b)) + copy(res, b) + E2a(res) + if trimRight { + str = string(bytes.TrimRight(res, " \x00")) + } else { + str = string(res) + } + return } -func direntReclen(buf []byte) (uint64, bool) { - return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen)) +func fdToPath(dirfd int) (path string, err error) { + var buffer [1024]byte + // w_ctrl() + ret := runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS_W_IOCTL<<4, + []uintptr{uintptr(dirfd), 17, 1024, uintptr(unsafe.Pointer(&buffer[0]))}) + if ret == 0 { + zb := bytes.IndexByte(buffer[:], 0) + if zb == -1 { + zb = len(buffer) + } + // __e2a_l() + runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___E2A_L<<4, + []uintptr{uintptr(unsafe.Pointer(&buffer[0])), uintptr(zb)}) + return string(buffer[:zb]), nil + } + // __errno() + errno := int(*(*int32)(unsafe.Pointer(runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___ERRNO<<4, + []uintptr{})))) + // __errno2() + errno2 := int(runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___ERRNO2<<4, + []uintptr{})) + // strerror_r() + ret = runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS_STRERROR_R<<4, + []uintptr{uintptr(errno), uintptr(unsafe.Pointer(&buffer[0])), 1024}) + if ret == 0 { + zb := bytes.IndexByte(buffer[:], 0) + if zb == -1 { + zb = len(buffer) + } + return "", fmt.Errorf("%s (errno2=0x%x)", buffer[:zb], errno2) + } else { + return "", fmt.Errorf("fdToPath errno %d (errno2=0x%x)", errno, errno2) + } } -func direntNamlen(buf []byte) (uint64, bool) { - reclen, ok := direntReclen(buf) - if !ok { - return 0, false +func impl_Mkfifoat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return } - return reclen - uint64(unsafe.Offsetof(Dirent{}.Name)), true + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MKFIFOAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_MkfifoatAddr() *(func(dirfd int, path string, mode uint32) (err error)) + +var Mkfifoat = enter_Mkfifoat + +func enter_Mkfifoat(dirfd int, path string, mode uint32) (err error) { + funcref := get_MkfifoatAddr() + if funcptrtest(GetZosLibVec()+SYS___MKFIFOAT_A<<4, "") == 0 { + *funcref = impl_Mkfifoat + } else { + *funcref = legacy_Mkfifoat + } + return (*funcref)(dirfd, path, mode) +} + +func legacy_Mkfifoat(dirfd int, path string, mode uint32) (err error) { + dirname, err := ZosFdToPath(dirfd) + if err != nil { + return err + } + return Mkfifo(dirname+"/"+path, mode) } + +//sys Posix_openpt(oflag int) (fd int, err error) = SYS_POSIX_OPENPT +//sys Grantpt(fildes int) (rc int, err error) = SYS_GRANTPT +//sys Unlockpt(fildes int) (rc int, err error) = SYS_UNLOCKPT diff --git a/vendor/golang.org/x/sys/unix/sysvshm_linux.go b/vendor/golang.org/x/sys/unix/sysvshm_linux.go index 2c3a443..4fcd38d 100644 --- a/vendor/golang.org/x/sys/unix/sysvshm_linux.go +++ b/vendor/golang.org/x/sys/unix/sysvshm_linux.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux -// +build linux package unix diff --git a/vendor/golang.org/x/sys/unix/sysvshm_unix.go b/vendor/golang.org/x/sys/unix/sysvshm_unix.go index 5bb41d1..672d6b0 100644 --- a/vendor/golang.org/x/sys/unix/sysvshm_unix.go +++ b/vendor/golang.org/x/sys/unix/sysvshm_unix.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build (darwin && !ios) || linux -// +build darwin,!ios linux +//go:build (darwin && !ios) || linux || zos package unix diff --git a/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go b/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go index 71bddef..8b7977a 100644 --- a/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go +++ b/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build darwin && !ios -// +build darwin,!ios +//go:build (darwin && !ios) || zos package unix diff --git a/vendor/golang.org/x/sys/unix/timestruct.go b/vendor/golang.org/x/sys/unix/timestruct.go index 3d89304..7997b19 100644 --- a/vendor/golang.org/x/sys/unix/timestruct.go +++ b/vendor/golang.org/x/sys/unix/timestruct.go @@ -3,13 +3,12 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package unix import "time" -// TimespecToNSec returns the time stored in ts as nanoseconds. +// TimespecToNsec returns the time stored in ts as nanoseconds. func TimespecToNsec(ts Timespec) int64 { return ts.Nano() } // NsecToTimespec converts a number of nanoseconds into a Timespec. diff --git a/vendor/golang.org/x/sys/unix/unveil_openbsd.go b/vendor/golang.org/x/sys/unix/unveil_openbsd.go index 168d5ae..cb7e598 100644 --- a/vendor/golang.org/x/sys/unix/unveil_openbsd.go +++ b/vendor/golang.org/x/sys/unix/unveil_openbsd.go @@ -4,39 +4,48 @@ package unix -import ( - "syscall" - "unsafe" -) +import "fmt" // Unveil implements the unveil syscall. // For more information see unveil(2). // Note that the special case of blocking further // unveil calls is handled by UnveilBlock. func Unveil(path string, flags string) error { - pathPtr, err := syscall.BytePtrFromString(path) - if err != nil { + if err := supportsUnveil(); err != nil { return err } - flagsPtr, err := syscall.BytePtrFromString(flags) + pathPtr, err := BytePtrFromString(path) if err != nil { return err } - _, _, e := syscall.Syscall(SYS_UNVEIL, uintptr(unsafe.Pointer(pathPtr)), uintptr(unsafe.Pointer(flagsPtr)), 0) - if e != 0 { - return e + flagsPtr, err := BytePtrFromString(flags) + if err != nil { + return err } - return nil + return unveil(pathPtr, flagsPtr) } // UnveilBlock blocks future unveil calls. // For more information see unveil(2). func UnveilBlock() error { - // Both pointers must be nil. - var pathUnsafe, flagsUnsafe unsafe.Pointer - _, _, e := syscall.Syscall(SYS_UNVEIL, uintptr(pathUnsafe), uintptr(flagsUnsafe), 0) - if e != 0 { - return e + if err := supportsUnveil(); err != nil { + return err } + return unveil(nil, nil) +} + +// supportsUnveil checks for availability of the unveil(2) system call based +// on the running OpenBSD version. +func supportsUnveil() error { + maj, min, err := majmin() + if err != nil { + return err + } + + // unveil is not available before 6.4 + if maj < 6 || (maj == 6 && min <= 3) { + return fmt.Errorf("cannot call Unveil on OpenBSD %d.%d", maj, min) + } + return nil } diff --git a/vendor/golang.org/x/sys/unix/xattr_bsd.go b/vendor/golang.org/x/sys/unix/xattr_bsd.go index 663b377..e168793 100644 --- a/vendor/golang.org/x/sys/unix/xattr_bsd.go +++ b/vendor/golang.org/x/sys/unix/xattr_bsd.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build freebsd || netbsd -// +build freebsd netbsd package unix @@ -36,9 +35,14 @@ func xattrnamespace(fullattr string) (ns int, attr string, err error) { func initxattrdest(dest []byte, idx int) (d unsafe.Pointer) { if len(dest) > idx { return unsafe.Pointer(&dest[idx]) - } else { - return unsafe.Pointer(_zero) } + if dest != nil { + // extattr_get_file and extattr_list_file treat NULL differently from + // a non-NULL pointer of length zero. Preserve the property of nilness, + // even if we can't use dest directly. + return unsafe.Pointer(&_zero) + } + return nil } // FreeBSD and NetBSD implement their own syscalls to handle extended attributes diff --git a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go index ca9799b..2fb219d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc && aix -// +build ppc,aix // Created by cgo -godefs - DO NOT EDIT // cgo -godefs -- -maix32 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go index 200c8c2..b0e6f5c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && aix -// +build ppc64,aix // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -maix64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go index 476a1c7..e40fa85 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && darwin -// +build amd64,darwin // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go @@ -1270,6 +1269,16 @@ const ( SEEK_END = 0x2 SEEK_HOLE = 0x3 SEEK_SET = 0x0 + SF_APPEND = 0x40000 + SF_ARCHIVED = 0x10000 + SF_DATALESS = 0x40000000 + SF_FIRMLINK = 0x800000 + SF_IMMUTABLE = 0x20000 + SF_NOUNLINK = 0x100000 + SF_RESTRICTED = 0x80000 + SF_SETTABLE = 0x3fff0000 + SF_SUPPORTED = 0x9f0000 + SF_SYNTHETIC = 0xc0000000 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -1543,6 +1552,15 @@ const ( TIOCTIMESTAMP = 0x40107459 TIOCUCNTL = 0x80047466 TOSTOP = 0x400000 + UF_APPEND = 0x4 + UF_COMPRESSED = 0x20 + UF_DATAVAULT = 0x80 + UF_HIDDEN = 0x8000 + UF_IMMUTABLE = 0x2 + UF_NODUMP = 0x1 + UF_OPAQUE = 0x8 + UF_SETTABLE = 0xffff + UF_TRACKED = 0x40 VDISCARD = 0xf VDSUSP = 0xb VEOF = 0x0 diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go index e36f517..bb02aa6 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && darwin -// +build arm64,darwin // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go @@ -1270,6 +1269,16 @@ const ( SEEK_END = 0x2 SEEK_HOLE = 0x3 SEEK_SET = 0x0 + SF_APPEND = 0x40000 + SF_ARCHIVED = 0x10000 + SF_DATALESS = 0x40000000 + SF_FIRMLINK = 0x800000 + SF_IMMUTABLE = 0x20000 + SF_NOUNLINK = 0x100000 + SF_RESTRICTED = 0x80000 + SF_SETTABLE = 0x3fff0000 + SF_SUPPORTED = 0x9f0000 + SF_SYNTHETIC = 0xc0000000 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -1543,6 +1552,15 @@ const ( TIOCTIMESTAMP = 0x40107459 TIOCUCNTL = 0x80047466 TOSTOP = 0x400000 + UF_APPEND = 0x4 + UF_COMPRESSED = 0x20 + UF_DATAVAULT = 0x80 + UF_HIDDEN = 0x8000 + UF_IMMUTABLE = 0x2 + UF_NODUMP = 0x1 + UF_OPAQUE = 0x8 + UF_SETTABLE = 0xffff + UF_TRACKED = 0x40 VDISCARD = 0xf VDSUSP = 0xb VEOF = 0x0 diff --git a/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go index 17bba0e..c0e0f86 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && dragonfly -// +build amd64,dragonfly // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go index f8c2c51..6c69239 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && freebsd -// +build 386,freebsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m32 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go index 96310c3..dd9163f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && freebsd -// +build amd64,freebsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go index 777b69d..493a2a7 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && freebsd -// +build arm,freebsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go index c557ac2..8b437b3 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && freebsd -// +build arm64,freebsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_riscv64.go index 341b4d9..67c02dd 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && freebsd -// +build riscv64,freebsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 785d693..877a62b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -1,7 +1,6 @@ // Code generated by mkmerge; DO NOT EDIT. //go:build linux -// +build linux package unix @@ -70,6 +69,7 @@ const ( ALG_SET_DRBG_ENTROPY = 0x6 ALG_SET_IV = 0x2 ALG_SET_KEY = 0x1 + ALG_SET_KEY_BY_KEY_SERIAL = 0x7 ALG_SET_OP = 0x3 ANON_INODE_FS_MAGIC = 0x9041934 ARPHRD_6LOWPAN = 0x339 @@ -457,7 +457,6 @@ const ( B600 = 0x8 B75 = 0x2 B9600 = 0xd - BALLOON_KVM_MAGIC = 0x13661366 BDEVFS_MAGIC = 0x62646576 BINDERFS_SUPER_MAGIC = 0x6c6f6f70 BINFMTFS_MAGIC = 0x42494e4d @@ -481,23 +480,29 @@ const ( BPF_FROM_BE = 0x8 BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_AFTER = 0x10 BPF_F_ALLOW_MULTI = 0x2 BPF_F_ALLOW_OVERRIDE = 0x1 BPF_F_ANY_ALIGNMENT = 0x2 - BPF_F_KPROBE_MULTI_RETURN = 0x1 + BPF_F_BEFORE = 0x8 + BPF_F_ID = 0x20 + BPF_F_NETFILTER_IP_DEFRAG = 0x1 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_REPLACE = 0x4 BPF_F_SLEEPABLE = 0x10 BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_TEST_REG_INVARIANTS = 0x80 BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TEST_RUN_ON_CPU = 0x1 BPF_F_TEST_STATE_FREQ = 0x8 BPF_F_TEST_XDP_LIVE_FRAMES = 0x2 + BPF_F_XDP_DEV_BOUND_ONLY = 0x40 BPF_F_XDP_HAS_FRAGS = 0x20 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 BPF_JA = 0x0 + BPF_JCOND = 0xe0 BPF_JEQ = 0x10 BPF_JGE = 0x30 BPF_JGT = 0x20 @@ -520,6 +525,7 @@ const ( BPF_MAJOR_VERSION = 0x1 BPF_MAXINSNS = 0x1000 BPF_MEM = 0x60 + BPF_MEMSX = 0x80 BPF_MEMWORDS = 0x10 BPF_MINOR_VERSION = 0x1 BPF_MISC = 0x7 @@ -563,6 +569,7 @@ const ( BUS_USB = 0x3 BUS_VIRTUAL = 0x6 CAN_BCM = 0x2 + CAN_BUS_OFF_THRESHOLD = 0x100 CAN_CTRLMODE_3_SAMPLES = 0x4 CAN_CTRLMODE_BERR_REPORTING = 0x10 CAN_CTRLMODE_CC_LEN8_DLC = 0x100 @@ -577,9 +584,12 @@ const ( CAN_EFF_FLAG = 0x80000000 CAN_EFF_ID_BITS = 0x1d CAN_EFF_MASK = 0x1fffffff + CAN_ERROR_PASSIVE_THRESHOLD = 0x80 + CAN_ERROR_WARNING_THRESHOLD = 0x60 CAN_ERR_ACK = 0x20 CAN_ERR_BUSERROR = 0x80 CAN_ERR_BUSOFF = 0x40 + CAN_ERR_CNT = 0x200 CAN_ERR_CRTL = 0x4 CAN_ERR_CRTL_ACTIVE = 0x40 CAN_ERR_CRTL_RX_OVERFLOW = 0x1 @@ -648,6 +658,9 @@ const ( CAN_NPROTO = 0x8 CAN_RAW = 0x1 CAN_RAW_FILTER_MAX = 0x200 + CAN_RAW_XL_VCID_RX_FILTER = 0x4 + CAN_RAW_XL_VCID_TX_PASS = 0x2 + CAN_RAW_XL_VCID_TX_SET = 0x1 CAN_RTR_FLAG = 0x40000000 CAN_SFF_ID_BITS = 0xb CAN_SFF_MASK = 0x7ff @@ -771,6 +784,10 @@ const ( DEVLINK_GENL_MCGRP_CONFIG_NAME = "config" DEVLINK_GENL_NAME = "devlink" DEVLINK_GENL_VERSION = 0x1 + DEVLINK_PORT_FN_CAP_IPSEC_CRYPTO = 0x4 + DEVLINK_PORT_FN_CAP_IPSEC_PACKET = 0x8 + DEVLINK_PORT_FN_CAP_MIGRATABLE = 0x2 + DEVLINK_PORT_FN_CAP_ROCE = 0x1 DEVLINK_SB_THRESHOLD_TO_ALPHA_MAX = 0x14 DEVLINK_SUPPORTED_FLASH_OVERWRITE_SECTIONS = 0x3 DEVMEM_MAGIC = 0x454d444d @@ -820,9 +837,9 @@ const ( DM_UUID_FLAG = 0x4000 DM_UUID_LEN = 0x81 DM_VERSION = 0xc138fd00 - DM_VERSION_EXTRA = "-ioctl (2022-02-22)" + DM_VERSION_EXTRA = "-ioctl (2023-03-01)" DM_VERSION_MAJOR = 0x4 - DM_VERSION_MINOR = 0x2e + DM_VERSION_MINOR = 0x30 DM_VERSION_PATCHLEVEL = 0x0 DT_BLK = 0x6 DT_CHR = 0x2 @@ -1049,6 +1066,7 @@ const ( ETH_P_CAIF = 0xf7 ETH_P_CAN = 0xc ETH_P_CANFD = 0xd + ETH_P_CANXL = 0xe ETH_P_CFM = 0x8902 ETH_P_CONTROL = 0x16 ETH_P_CUST = 0x6006 @@ -1060,6 +1078,7 @@ const ( ETH_P_DNA_RT = 0x6003 ETH_P_DSA = 0x1b ETH_P_DSA_8021Q = 0xdadb + ETH_P_DSA_A5PSW = 0xe001 ETH_P_ECONET = 0x18 ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be @@ -1189,13 +1208,16 @@ const ( FAN_EVENT_METADATA_LEN = 0x18 FAN_EVENT_ON_CHILD = 0x8000000 FAN_FS_ERROR = 0x8000 + FAN_INFO = 0x20 FAN_MARK_ADD = 0x1 FAN_MARK_DONT_FOLLOW = 0x4 FAN_MARK_EVICTABLE = 0x200 FAN_MARK_FILESYSTEM = 0x100 FAN_MARK_FLUSH = 0x80 + FAN_MARK_IGNORE = 0x400 FAN_MARK_IGNORED_MASK = 0x20 FAN_MARK_IGNORED_SURV_MODIFY = 0x40 + FAN_MARK_IGNORE_SURV = 0x440 FAN_MARK_INODE = 0x0 FAN_MARK_MOUNT = 0x10 FAN_MARK_ONLYDIR = 0x8 @@ -1223,6 +1245,8 @@ const ( FAN_REPORT_PIDFD = 0x80 FAN_REPORT_TARGET_FID = 0x1000 FAN_REPORT_TID = 0x100 + FAN_RESPONSE_INFO_AUDIT_RULE = 0x1 + FAN_RESPONSE_INFO_NONE = 0x0 FAN_UNLIMITED_MARKS = 0x20 FAN_UNLIMITED_QUEUE = 0x10 FD_CLOEXEC = 0x1 @@ -1253,7 +1277,10 @@ const ( FSCRYPT_MODE_AES_128_CBC = 0x5 FSCRYPT_MODE_AES_128_CTS = 0x6 FSCRYPT_MODE_AES_256_CTS = 0x4 + FSCRYPT_MODE_AES_256_HCTR2 = 0xa FSCRYPT_MODE_AES_256_XTS = 0x1 + FSCRYPT_MODE_SM4_CTS = 0x8 + FSCRYPT_MODE_SM4_XTS = 0x7 FSCRYPT_POLICY_FLAGS_PAD_16 = 0x2 FSCRYPT_POLICY_FLAGS_PAD_32 = 0x3 FSCRYPT_POLICY_FLAGS_PAD_4 = 0x0 @@ -1272,8 +1299,6 @@ const ( FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 FS_ENCRYPTION_MODE_AES_256_XTS = 0x1 FS_ENCRYPTION_MODE_INVALID = 0x0 - FS_ENCRYPTION_MODE_SPECK128_256_CTS = 0x8 - FS_ENCRYPTION_MODE_SPECK128_256_XTS = 0x7 FS_IOC_ADD_ENCRYPTION_KEY = 0xc0506617 FS_IOC_GET_ENCRYPTION_KEY_STATUS = 0xc080661a FS_IOC_GET_ENCRYPTION_POLICY_EX = 0xc0096616 @@ -1318,6 +1343,7 @@ const ( F_OFD_SETLK = 0x25 F_OFD_SETLKW = 0x26 F_OK = 0x0 + F_SEAL_EXEC = 0x20 F_SEAL_FUTURE_WRITE = 0x10 F_SEAL_GROW = 0x4 F_SEAL_SEAL = 0x1 @@ -1430,6 +1456,7 @@ const ( IFF_NOARP = 0x80 IFF_NOFILTER = 0x1000 IFF_NOTRAILERS = 0x20 + IFF_NO_CARRIER = 0x40 IFF_NO_PI = 0x1000 IFF_ONE_QUEUE = 0x2000 IFF_PERSIST = 0x800 @@ -1605,6 +1632,7 @@ const ( IP_FREEBIND = 0xf IP_HDRINCL = 0x3 IP_IPSEC_POLICY = 0x10 + IP_LOCAL_PORT_RANGE = 0x33 IP_MAXPACKET = 0xffff IP_MAX_MEMBERSHIPS = 0x14 IP_MF = 0x2000 @@ -1631,6 +1659,7 @@ const ( IP_PMTUDISC_OMIT = 0x5 IP_PMTUDISC_PROBE = 0x3 IP_PMTUDISC_WANT = 0x1 + IP_PROTOCOL = 0x34 IP_RECVERR = 0xb IP_RECVERR_RFC4884 = 0x1a IP_RECVFRAGSIZE = 0x19 @@ -1676,12 +1705,14 @@ const ( KEXEC_ARCH_S390 = 0x160000 KEXEC_ARCH_SH = 0x2a0000 KEXEC_ARCH_X86_64 = 0x3e0000 + KEXEC_FILE_DEBUG = 0x8 KEXEC_FILE_NO_INITRAMFS = 0x4 KEXEC_FILE_ON_CRASH = 0x2 KEXEC_FILE_UNLOAD = 0x1 KEXEC_ON_CRASH = 0x1 KEXEC_PRESERVE_CONTEXT = 0x2 KEXEC_SEGMENT_MAX = 0x10 + KEXEC_UPDATE_ELFCOREHDR = 0x4 KEYCTL_ASSUME_AUTHORITY = 0x10 KEYCTL_CAPABILITIES = 0x1f KEYCTL_CAPS0_BIG_KEY = 0x10 @@ -1761,7 +1792,10 @@ const ( LANDLOCK_ACCESS_FS_REFER = 0x2000 LANDLOCK_ACCESS_FS_REMOVE_DIR = 0x10 LANDLOCK_ACCESS_FS_REMOVE_FILE = 0x20 + LANDLOCK_ACCESS_FS_TRUNCATE = 0x4000 LANDLOCK_ACCESS_FS_WRITE_FILE = 0x2 + LANDLOCK_ACCESS_NET_BIND_TCP = 0x1 + LANDLOCK_ACCESS_NET_CONNECT_TCP = 0x2 LANDLOCK_CREATE_RULESET_VERSION = 0x1 LINUX_REBOOT_CMD_CAD_OFF = 0x0 LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef @@ -1778,6 +1812,7 @@ const ( LOCK_SH = 0x1 LOCK_UN = 0x8 LOOP_CLR_FD = 0x4c01 + LOOP_CONFIGURE = 0x4c0a LOOP_CTL_ADD = 0x4c80 LOOP_CTL_GET_FREE = 0x4c82 LOOP_CTL_REMOVE = 0x4c81 @@ -1800,11 +1835,13 @@ const ( LWTUNNEL_IP_OPT_GENEVE_MAX = 0x3 LWTUNNEL_IP_OPT_VXLAN_MAX = 0x1 MADV_COLD = 0x14 + MADV_COLLAPSE = 0x19 MADV_DODUMP = 0x11 MADV_DOFORK = 0xb MADV_DONTDUMP = 0x10 MADV_DONTFORK = 0xa MADV_DONTNEED = 0x4 + MADV_DONTNEED_LOCKED = 0x18 MADV_FREE = 0x8 MADV_HUGEPAGE = 0xe MADV_HWPOISON = 0x64 @@ -1845,8 +1882,9 @@ const ( MEMWRITEOOB64 = 0xc0184d15 MFD_ALLOW_SEALING = 0x2 MFD_CLOEXEC = 0x1 + MFD_EXEC = 0x10 MFD_HUGETLB = 0x4 - MFD_HUGE_16GB = -0x78000000 + MFD_HUGE_16GB = 0x88000000 MFD_HUGE_16MB = 0x60000000 MFD_HUGE_1GB = 0x78000000 MFD_HUGE_1MB = 0x50000000 @@ -1860,6 +1898,7 @@ const ( MFD_HUGE_8MB = 0x5c000000 MFD_HUGE_MASK = 0x3f MFD_HUGE_SHIFT = 0x1a + MFD_NOEXEC_SEAL = 0x8 MINIX2_SUPER_MAGIC = 0x2468 MINIX2_SUPER_MAGIC2 = 0x2478 MINIX3_SUPER_MAGIC = 0x4d5a @@ -1868,6 +1907,7 @@ const ( MNT_DETACH = 0x2 MNT_EXPIRE = 0x4 MNT_FORCE = 0x1 + MNT_ID_REQ_SIZE_VER0 = 0x18 MODULE_INIT_COMPRESSED_FILE = 0x4 MODULE_INIT_IGNORE_MODVERSIONS = 0x1 MODULE_INIT_IGNORE_VERMAGIC = 0x2 @@ -1883,6 +1923,9 @@ const ( MOUNT_ATTR_SIZE_VER0 = 0x20 MOUNT_ATTR_STRICTATIME = 0x20 MOUNT_ATTR__ATIME = 0x70 + MREMAP_DONTUNMAP = 0x4 + MREMAP_FIXED = 0x2 + MREMAP_MAYMOVE = 0x1 MSDOS_SUPER_MAGIC = 0x4d44 MSG_BATCH = 0x40000 MSG_CMSG_CLOEXEC = 0x40000000 @@ -2096,6 +2139,60 @@ const ( NFNL_SUBSYS_QUEUE = 0x3 NFNL_SUBSYS_ULOG = 0x4 NFS_SUPER_MAGIC = 0x6969 + NFT_CHAIN_FLAGS = 0x7 + NFT_CHAIN_MAXNAMELEN = 0x100 + NFT_CT_MAX = 0x17 + NFT_DATA_RESERVED_MASK = 0xffffff00 + NFT_DATA_VALUE_MAXLEN = 0x40 + NFT_EXTHDR_OP_MAX = 0x4 + NFT_FIB_RESULT_MAX = 0x3 + NFT_INNER_MASK = 0xf + NFT_LOGLEVEL_MAX = 0x8 + NFT_NAME_MAXLEN = 0x100 + NFT_NG_MAX = 0x1 + NFT_OBJECT_CONNLIMIT = 0x5 + NFT_OBJECT_COUNTER = 0x1 + NFT_OBJECT_CT_EXPECT = 0x9 + NFT_OBJECT_CT_HELPER = 0x3 + NFT_OBJECT_CT_TIMEOUT = 0x7 + NFT_OBJECT_LIMIT = 0x4 + NFT_OBJECT_MAX = 0xa + NFT_OBJECT_QUOTA = 0x2 + NFT_OBJECT_SECMARK = 0x8 + NFT_OBJECT_SYNPROXY = 0xa + NFT_OBJECT_TUNNEL = 0x6 + NFT_OBJECT_UNSPEC = 0x0 + NFT_OBJ_MAXNAMELEN = 0x100 + NFT_OSF_MAXGENRELEN = 0x10 + NFT_QUEUE_FLAG_BYPASS = 0x1 + NFT_QUEUE_FLAG_CPU_FANOUT = 0x2 + NFT_QUEUE_FLAG_MASK = 0x3 + NFT_REG32_COUNT = 0x10 + NFT_REG32_SIZE = 0x4 + NFT_REG_MAX = 0x4 + NFT_REG_SIZE = 0x10 + NFT_REJECT_ICMPX_MAX = 0x3 + NFT_RT_MAX = 0x4 + NFT_SECMARK_CTX_MAXLEN = 0x100 + NFT_SET_MAXNAMELEN = 0x100 + NFT_SOCKET_MAX = 0x3 + NFT_TABLE_F_MASK = 0x7 + NFT_TABLE_MAXNAMELEN = 0x100 + NFT_TRACETYPE_MAX = 0x3 + NFT_TUNNEL_F_MASK = 0x7 + NFT_TUNNEL_MAX = 0x1 + NFT_TUNNEL_MODE_MAX = 0x2 + NFT_USERDATA_MAXLEN = 0x100 + NFT_XFRM_KEY_MAX = 0x6 + NF_NAT_RANGE_MAP_IPS = 0x1 + NF_NAT_RANGE_MASK = 0x7f + NF_NAT_RANGE_NETMAP = 0x40 + NF_NAT_RANGE_PERSISTENT = 0x8 + NF_NAT_RANGE_PROTO_OFFSET = 0x20 + NF_NAT_RANGE_PROTO_RANDOM = 0x4 + NF_NAT_RANGE_PROTO_RANDOM_ALL = 0x14 + NF_NAT_RANGE_PROTO_RANDOM_FULLY = 0x10 + NF_NAT_RANGE_PROTO_SPECIFIED = 0x2 NILFS_SUPER_MAGIC = 0x3434 NL0 = 0x0 NL1 = 0x100 @@ -2153,6 +2250,7 @@ const ( PACKET_FANOUT_DATA = 0x16 PACKET_FANOUT_EBPF = 0x7 PACKET_FANOUT_FLAG_DEFRAG = 0x8000 + PACKET_FANOUT_FLAG_IGNORE_OUTGOING = 0x4000 PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 PACKET_FANOUT_FLAG_UNIQUEID = 0x2000 PACKET_FANOUT_HASH = 0x0 @@ -2188,6 +2286,7 @@ const ( PACKET_USER = 0x6 PACKET_VERSION = 0xa PACKET_VNET_HDR = 0xf + PACKET_VNET_HDR_SZ = 0x18 PARITY_CRC16_PR0 = 0x2 PARITY_CRC16_PR0_CCITT = 0x4 PARITY_CRC16_PR1 = 0x3 @@ -2205,6 +2304,7 @@ const ( PERF_ATTR_SIZE_VER5 = 0x70 PERF_ATTR_SIZE_VER6 = 0x78 PERF_ATTR_SIZE_VER7 = 0x80 + PERF_ATTR_SIZE_VER8 = 0x88 PERF_AUX_FLAG_COLLISION = 0x8 PERF_AUX_FLAG_CORESIGHT_FORMAT_CORESIGHT = 0x0 PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW = 0x100 @@ -2212,6 +2312,12 @@ const ( PERF_AUX_FLAG_PARTIAL = 0x4 PERF_AUX_FLAG_PMU_FORMAT_TYPE_MASK = 0xff00 PERF_AUX_FLAG_TRUNCATED = 0x1 + PERF_BRANCH_ENTRY_INFO_BITS_MAX = 0x21 + PERF_BR_ARM64_DEBUG_DATA = 0x7 + PERF_BR_ARM64_DEBUG_EXIT = 0x5 + PERF_BR_ARM64_DEBUG_HALT = 0x4 + PERF_BR_ARM64_DEBUG_INST = 0x6 + PERF_BR_ARM64_FIQ = 0x3 PERF_FLAG_FD_CLOEXEC = 0x8 PERF_FLAG_FD_NO_GROUP = 0x1 PERF_FLAG_FD_OUTPUT = 0x2 @@ -2232,6 +2338,8 @@ const ( PERF_MEM_LOCK_NA = 0x1 PERF_MEM_LOCK_SHIFT = 0x18 PERF_MEM_LVLNUM_ANY_CACHE = 0xb + PERF_MEM_LVLNUM_CXL = 0x9 + PERF_MEM_LVLNUM_IO = 0xa PERF_MEM_LVLNUM_L1 = 0x1 PERF_MEM_LVLNUM_L2 = 0x2 PERF_MEM_LVLNUM_L3 = 0x3 @@ -2241,6 +2349,7 @@ const ( PERF_MEM_LVLNUM_PMEM = 0xe PERF_MEM_LVLNUM_RAM = 0xd PERF_MEM_LVLNUM_SHIFT = 0x21 + PERF_MEM_LVLNUM_UNC = 0x8 PERF_MEM_LVL_HIT = 0x2 PERF_MEM_LVL_IO = 0x1000 PERF_MEM_LVL_L1 = 0x8 @@ -2265,6 +2374,7 @@ const ( PERF_MEM_REMOTE_REMOTE = 0x1 PERF_MEM_REMOTE_SHIFT = 0x25 PERF_MEM_SNOOPX_FWD = 0x1 + PERF_MEM_SNOOPX_PEER = 0x2 PERF_MEM_SNOOPX_SHIFT = 0x26 PERF_MEM_SNOOP_HIT = 0x4 PERF_MEM_SNOOP_HITM = 0x10 @@ -2300,8 +2410,8 @@ const ( PERF_RECORD_MISC_USER = 0x2 PERF_SAMPLE_BRANCH_PLM_ALL = 0x7 PERF_SAMPLE_WEIGHT_TYPE = 0x1004000 + PID_FS_MAGIC = 0x50494446 PIPEFS_MAGIC = 0x50495045 - PPC_CMM_MAGIC = 0xc7571590 PPPIOCGNPMODE = 0xc008744c PPPIOCNEWUNIT = 0xc004743e PRIO_PGRP = 0x1 @@ -2338,6 +2448,7 @@ const ( PR_FP_EXC_UND = 0x40000 PR_FP_MODE_FR = 0x1 PR_FP_MODE_FRE = 0x2 + PR_GET_AUXV = 0x41555856 PR_GET_CHILD_SUBREAPER = 0x25 PR_GET_DUMPABLE = 0x3 PR_GET_ENDIAN = 0x13 @@ -2346,6 +2457,8 @@ const ( PR_GET_FP_MODE = 0x2e PR_GET_IO_FLUSHER = 0x3a PR_GET_KEEPCAPS = 0x7 + PR_GET_MDWE = 0x42 + PR_GET_MEMORY_MERGE = 0x44 PR_GET_NAME = 0x10 PR_GET_NO_NEW_PRIVS = 0x27 PR_GET_PDEATHSIG = 0x2 @@ -2366,6 +2479,8 @@ const ( PR_MCE_KILL_GET = 0x22 PR_MCE_KILL_LATE = 0x0 PR_MCE_KILL_SET = 0x1 + PR_MDWE_NO_INHERIT = 0x2 + PR_MDWE_REFUSE_EXEC_GAIN = 0x1 PR_MPX_DISABLE_MANAGEMENT = 0x2c PR_MPX_ENABLE_MANAGEMENT = 0x2b PR_MTE_TAG_MASK = 0x7fff8 @@ -2383,6 +2498,15 @@ const ( PR_PAC_GET_ENABLED_KEYS = 0x3d PR_PAC_RESET_KEYS = 0x36 PR_PAC_SET_ENABLED_KEYS = 0x3c + PR_RISCV_V_GET_CONTROL = 0x46 + PR_RISCV_V_SET_CONTROL = 0x45 + PR_RISCV_V_VSTATE_CTRL_CUR_MASK = 0x3 + PR_RISCV_V_VSTATE_CTRL_DEFAULT = 0x0 + PR_RISCV_V_VSTATE_CTRL_INHERIT = 0x10 + PR_RISCV_V_VSTATE_CTRL_MASK = 0x1f + PR_RISCV_V_VSTATE_CTRL_NEXT_MASK = 0xc + PR_RISCV_V_VSTATE_CTRL_OFF = 0x1 + PR_RISCV_V_VSTATE_CTRL_ON = 0x2 PR_SCHED_CORE = 0x3e PR_SCHED_CORE_CREATE = 0x1 PR_SCHED_CORE_GET = 0x0 @@ -2400,6 +2524,8 @@ const ( PR_SET_FP_MODE = 0x2d PR_SET_IO_FLUSHER = 0x39 PR_SET_KEEPCAPS = 0x8 + PR_SET_MDWE = 0x41 + PR_SET_MEMORY_MERGE = 0x43 PR_SET_MM = 0x23 PR_SET_MM_ARG_END = 0x9 PR_SET_MM_ARG_START = 0x8 @@ -2483,6 +2609,7 @@ const ( PTRACE_GETSIGMASK = 0x420a PTRACE_GET_RSEQ_CONFIGURATION = 0x420f PTRACE_GET_SYSCALL_INFO = 0x420e + PTRACE_GET_SYSCALL_USER_DISPATCH_CONFIG = 0x4211 PTRACE_INTERRUPT = 0x4207 PTRACE_KILL = 0x8 PTRACE_LISTEN = 0x4208 @@ -2513,6 +2640,7 @@ const ( PTRACE_SETREGSET = 0x4205 PTRACE_SETSIGINFO = 0x4203 PTRACE_SETSIGMASK = 0x420b + PTRACE_SET_SYSCALL_USER_DISPATCH_CONFIG = 0x4210 PTRACE_SINGLESTEP = 0x9 PTRACE_SYSCALL = 0x18 PTRACE_SYSCALL_INFO_ENTRY = 0x1 @@ -2556,8 +2684,9 @@ const ( RTAX_FEATURES = 0xc RTAX_FEATURE_ALLFRAG = 0x8 RTAX_FEATURE_ECN = 0x1 - RTAX_FEATURE_MASK = 0xf + RTAX_FEATURE_MASK = 0x1f RTAX_FEATURE_SACK = 0x2 + RTAX_FEATURE_TCP_USEC_TS = 0x10 RTAX_FEATURE_TIMESTAMP = 0x4 RTAX_HOPLIMIT = 0xa RTAX_INITCWND = 0xb @@ -2775,17 +2904,66 @@ const ( RWF_APPEND = 0x10 RWF_DSYNC = 0x2 RWF_HIPRI = 0x1 + RWF_NOAPPEND = 0x20 RWF_NOWAIT = 0x8 - RWF_SUPPORTED = 0x1f + RWF_SUPPORTED = 0x3f RWF_SYNC = 0x4 RWF_WRITE_LIFE_NOT_SET = 0x0 + SCHED_BATCH = 0x3 + SCHED_DEADLINE = 0x6 + SCHED_FIFO = 0x1 + SCHED_FLAG_ALL = 0x7f + SCHED_FLAG_DL_OVERRUN = 0x4 + SCHED_FLAG_KEEP_ALL = 0x18 + SCHED_FLAG_KEEP_PARAMS = 0x10 + SCHED_FLAG_KEEP_POLICY = 0x8 + SCHED_FLAG_RECLAIM = 0x2 + SCHED_FLAG_RESET_ON_FORK = 0x1 + SCHED_FLAG_UTIL_CLAMP = 0x60 + SCHED_FLAG_UTIL_CLAMP_MAX = 0x40 + SCHED_FLAG_UTIL_CLAMP_MIN = 0x20 + SCHED_IDLE = 0x5 + SCHED_NORMAL = 0x0 + SCHED_RESET_ON_FORK = 0x40000000 + SCHED_RR = 0x2 SCM_CREDENTIALS = 0x2 + SCM_PIDFD = 0x4 SCM_RIGHTS = 0x1 + SCM_SECURITY = 0x3 SCM_TIMESTAMP = 0x1d SC_LOG_FLUSH = 0x100000 + SECCOMP_ADDFD_FLAG_SEND = 0x2 + SECCOMP_ADDFD_FLAG_SETFD = 0x1 + SECCOMP_FILTER_FLAG_LOG = 0x2 + SECCOMP_FILTER_FLAG_NEW_LISTENER = 0x8 + SECCOMP_FILTER_FLAG_SPEC_ALLOW = 0x4 + SECCOMP_FILTER_FLAG_TSYNC = 0x1 + SECCOMP_FILTER_FLAG_TSYNC_ESRCH = 0x10 + SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV = 0x20 + SECCOMP_GET_ACTION_AVAIL = 0x2 + SECCOMP_GET_NOTIF_SIZES = 0x3 + SECCOMP_IOCTL_NOTIF_RECV = 0xc0502100 + SECCOMP_IOCTL_NOTIF_SEND = 0xc0182101 + SECCOMP_IOC_MAGIC = '!' SECCOMP_MODE_DISABLED = 0x0 SECCOMP_MODE_FILTER = 0x2 SECCOMP_MODE_STRICT = 0x1 + SECCOMP_RET_ACTION = 0x7fff0000 + SECCOMP_RET_ACTION_FULL = 0xffff0000 + SECCOMP_RET_ALLOW = 0x7fff0000 + SECCOMP_RET_DATA = 0xffff + SECCOMP_RET_ERRNO = 0x50000 + SECCOMP_RET_KILL = 0x0 + SECCOMP_RET_KILL_PROCESS = 0x80000000 + SECCOMP_RET_KILL_THREAD = 0x0 + SECCOMP_RET_LOG = 0x7ffc0000 + SECCOMP_RET_TRACE = 0x7ff00000 + SECCOMP_RET_TRAP = 0x30000 + SECCOMP_RET_USER_NOTIF = 0x7fc00000 + SECCOMP_SET_MODE_FILTER = 0x1 + SECCOMP_SET_MODE_STRICT = 0x0 + SECCOMP_USER_NOTIF_FD_SYNC_WAKE_UP = 0x1 + SECCOMP_USER_NOTIF_FLAG_CONTINUE = 0x1 SECRETMEM_MAGIC = 0x5345434d SECURITYFS_MAGIC = 0x73636673 SEEK_CUR = 0x1 @@ -2884,6 +3062,8 @@ const ( SIOCSMIIREG = 0x8949 SIOCSRARP = 0x8962 SIOCWANDEV = 0x894a + SK_DIAG_BPF_STORAGE_MAX = 0x3 + SK_DIAG_BPF_STORAGE_REQ_MAX = 0x1 SMACK_MAGIC = 0x43415d53 SMART_AUTOSAVE = 0xd2 SMART_AUTO_OFFLINE = 0xdb @@ -2904,6 +3084,8 @@ const ( SOCKFS_MAGIC = 0x534f434b SOCK_BUF_LOCK_MASK = 0x3 SOCK_DCCP = 0x6 + SOCK_DESTROY = 0x15 + SOCK_DIAG_BY_FAMILY = 0x14 SOCK_IOC_TYPE = 0x89 SOCK_PACKET = 0xa SOCK_RAW = 0x3 @@ -2944,6 +3126,8 @@ const ( SOL_TCP = 0x6 SOL_TIPC = 0x10f SOL_TLS = 0x11a + SOL_UDP = 0x11 + SOL_VSOCK = 0x11f SOL_X25 = 0x106 SOL_XDP = 0x11b SOMAXCONN = 0x1000 @@ -2999,9 +3183,11 @@ const ( STATX_BLOCKS = 0x400 STATX_BTIME = 0x800 STATX_CTIME = 0x80 + STATX_DIOALIGN = 0x2000 STATX_GID = 0x10 STATX_INO = 0x100 STATX_MNT_ID = 0x1000 + STATX_MNT_ID_UNIQUE = 0x4000 STATX_MODE = 0x2 STATX_MTIME = 0x40 STATX_NLINK = 0x4 @@ -3047,7 +3233,7 @@ const ( TASKSTATS_GENL_NAME = "TASKSTATS" TASKSTATS_GENL_VERSION = 0x1 TASKSTATS_TYPE_MAX = 0x6 - TASKSTATS_VERSION = 0xd + TASKSTATS_VERSION = 0xe TCIFLUSH = 0x0 TCIOFF = 0x2 TCIOFLUSH = 0x2 @@ -3089,6 +3275,7 @@ const ( TCP_MAX_WINSHIFT = 0xe TCP_MD5SIG = 0xe TCP_MD5SIG_EXT = 0x20 + TCP_MD5SIG_FLAG_IFINDEX = 0x2 TCP_MD5SIG_FLAG_PREFIX = 0x1 TCP_MD5SIG_MAXKEYLEN = 0x50 TCP_MSS = 0x200 @@ -3213,6 +3400,7 @@ const ( TP_STATUS_COPY = 0x2 TP_STATUS_CSUMNOTREADY = 0x8 TP_STATUS_CSUM_VALID = 0x80 + TP_STATUS_GSO_TCP = 0x100 TP_STATUS_KERNEL = 0x0 TP_STATUS_LOSING = 0x4 TP_STATUS_SENDING = 0x2 @@ -3227,6 +3415,19 @@ const ( TRACEFS_MAGIC = 0x74726163 TS_COMM_LEN = 0x20 UDF_SUPER_MAGIC = 0x15013346 + UDP_CORK = 0x1 + UDP_ENCAP = 0x64 + UDP_ENCAP_ESPINUDP = 0x2 + UDP_ENCAP_ESPINUDP_NON_IKE = 0x1 + UDP_ENCAP_GTP0 = 0x4 + UDP_ENCAP_GTP1U = 0x5 + UDP_ENCAP_L2TPINUDP = 0x3 + UDP_GRO = 0x68 + UDP_NO_CHECK6_RX = 0x66 + UDP_NO_CHECK6_TX = 0x65 + UDP_SEGMENT = 0x67 + UDP_V4_FLOW = 0x2 + UDP_V6_FLOW = 0x6 UMOUNT_NOFOLLOW = 0x8 USBDEVICE_SUPER_MAGIC = 0x9fa2 UTIME_NOW = 0x3fffffff @@ -3377,24 +3578,28 @@ const ( XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 + XDP_PKT_CONTD = 0x1 XDP_RING_NEED_WAKEUP = 0x1 XDP_RX_RING = 0x2 XDP_SHARED_UMEM = 0x1 XDP_STATISTICS = 0x7 + XDP_TXMD_FLAGS_CHECKSUM = 0x2 + XDP_TXMD_FLAGS_TIMESTAMP = 0x1 + XDP_TX_METADATA = 0x2 XDP_TX_RING = 0x3 XDP_UMEM_COMPLETION_RING = 0x6 XDP_UMEM_FILL_RING = 0x5 XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000 XDP_UMEM_PGOFF_FILL_RING = 0x100000000 XDP_UMEM_REG = 0x4 + XDP_UMEM_TX_SW_CSUM = 0x2 XDP_UMEM_UNALIGNED_CHUNK_FLAG = 0x1 XDP_USE_NEED_WAKEUP = 0x8 + XDP_USE_SG = 0x10 XDP_ZEROCOPY = 0x4 XENFS_SUPER_MAGIC = 0xabba1974 XFS_SUPER_MAGIC = 0x58465342 - Z3FOLD_MAGIC = 0x33 ZONEFS_MAGIC = 0x5a4f4653 - ZSMALLOC_MAGIC = 0x58295829 _HIDIOCGRAWNAME_LEN = 0x80 _HIDIOCGRAWPHYS_LEN = 0x40 _HIDIOCGRAWUNIQ_LEN = 0x40 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 36c0dfc..e4bc0bd 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && linux -// +build 386,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/386/include -m32 _const.go @@ -27,22 +26,31 @@ const ( B57600 = 0x1001 B576000 = 0x1006 B921600 = 0x1007 + BLKALIGNOFF = 0x127a BLKBSZGET = 0x80041270 BLKBSZSET = 0x40041271 + BLKDISCARD = 0x1277 + BLKDISCARDZEROES = 0x127c BLKFLSBUF = 0x1261 BLKFRAGET = 0x1265 BLKFRASET = 0x1264 + BLKGETDISKSEQ = 0x80081280 BLKGETSIZE = 0x1260 BLKGETSIZE64 = 0x80041272 + BLKIOMIN = 0x1278 + BLKIOOPT = 0x1279 BLKPBSZGET = 0x127b BLKRAGET = 0x1263 BLKRASET = 0x1262 BLKROGET = 0x125e BLKROSET = 0x125d + BLKROTATIONAL = 0x127e BLKRRPART = 0x125f + BLKSECDISCARD = 0x127d BLKSECTGET = 0x1267 BLKSECTSET = 0x1266 BLKSSZGET = 0x1268 + BLKZEROOUT = 0x127f BOTHER = 0x1000 BS1 = 0x2000 BSDLY = 0x2000 @@ -110,6 +118,7 @@ const ( IXOFF = 0x1000 IXON = 0x400 MAP_32BIT = 0x40 + MAP_ABOVE4G = 0x80 MAP_ANON = 0x20 MAP_ANONYMOUS = 0x20 MAP_DENYWRITE = 0x800 @@ -133,6 +142,7 @@ const ( MEMGETREGIONCOUNT = 0x80044d07 MEMISLOCKED = 0x80084d17 MEMLOCK = 0x40084d05 + MEMREAD = 0xc03c4d1a MEMREADOOB = 0xc00c4d04 MEMSETBADBLOCK = 0x40084d0c MEMUNLOCK = 0x40084d06 @@ -272,6 +282,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 @@ -316,10 +329,12 @@ const ( SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1f SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x26 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 4ff9427..689317a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && linux -// +build amd64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/amd64/include -m64 _const.go @@ -27,22 +26,31 @@ const ( B57600 = 0x1001 B576000 = 0x1006 B921600 = 0x1007 + BLKALIGNOFF = 0x127a BLKBSZGET = 0x80081270 BLKBSZSET = 0x40081271 + BLKDISCARD = 0x1277 + BLKDISCARDZEROES = 0x127c BLKFLSBUF = 0x1261 BLKFRAGET = 0x1265 BLKFRASET = 0x1264 + BLKGETDISKSEQ = 0x80081280 BLKGETSIZE = 0x1260 BLKGETSIZE64 = 0x80081272 + BLKIOMIN = 0x1278 + BLKIOOPT = 0x1279 BLKPBSZGET = 0x127b BLKRAGET = 0x1263 BLKRASET = 0x1262 BLKROGET = 0x125e BLKROSET = 0x125d + BLKROTATIONAL = 0x127e BLKRRPART = 0x125f + BLKSECDISCARD = 0x127d BLKSECTGET = 0x1267 BLKSECTSET = 0x1266 BLKSSZGET = 0x1268 + BLKZEROOUT = 0x127f BOTHER = 0x1000 BS1 = 0x2000 BSDLY = 0x2000 @@ -110,6 +118,7 @@ const ( IXOFF = 0x1000 IXON = 0x400 MAP_32BIT = 0x40 + MAP_ABOVE4G = 0x80 MAP_ANON = 0x20 MAP_ANONYMOUS = 0x20 MAP_DENYWRITE = 0x800 @@ -133,6 +142,7 @@ const ( MEMGETREGIONCOUNT = 0x80044d07 MEMISLOCKED = 0x80084d17 MEMLOCK = 0x40084d05 + MEMREAD = 0xc0404d1a MEMREADOOB = 0xc0104d04 MEMSETBADBLOCK = 0x40084d0c MEMUNLOCK = 0x40084d06 @@ -273,6 +283,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 @@ -317,10 +330,12 @@ const ( SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1f SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x26 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 3eaa0fb..5cca668 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && linux -// +build arm,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/arm/include _const.go @@ -27,22 +26,31 @@ const ( B57600 = 0x1001 B576000 = 0x1006 B921600 = 0x1007 + BLKALIGNOFF = 0x127a BLKBSZGET = 0x80041270 BLKBSZSET = 0x40041271 + BLKDISCARD = 0x1277 + BLKDISCARDZEROES = 0x127c BLKFLSBUF = 0x1261 BLKFRAGET = 0x1265 BLKFRASET = 0x1264 + BLKGETDISKSEQ = 0x80081280 BLKGETSIZE = 0x1260 BLKGETSIZE64 = 0x80041272 + BLKIOMIN = 0x1278 + BLKIOOPT = 0x1279 BLKPBSZGET = 0x127b BLKRAGET = 0x1263 BLKRASET = 0x1262 BLKROGET = 0x125e BLKROSET = 0x125d + BLKROTATIONAL = 0x127e BLKRRPART = 0x125f + BLKSECDISCARD = 0x127d BLKSECTGET = 0x1267 BLKSECTSET = 0x1266 BLKSSZGET = 0x1268 + BLKZEROOUT = 0x127f BOTHER = 0x1000 BS1 = 0x2000 BSDLY = 0x2000 @@ -131,6 +139,7 @@ const ( MEMGETREGIONCOUNT = 0x80044d07 MEMISLOCKED = 0x80084d17 MEMLOCK = 0x40084d05 + MEMREAD = 0xc0404d1a MEMREADOOB = 0xc00c4d04 MEMSETBADBLOCK = 0x40084d0c MEMUNLOCK = 0x40084d06 @@ -279,6 +288,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 @@ -323,10 +335,12 @@ const ( SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1f SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x26 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index d7995bd..1427050 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && linux -// +build arm64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/arm64/include -fsigned-char _const.go @@ -27,22 +26,31 @@ const ( B57600 = 0x1001 B576000 = 0x1006 B921600 = 0x1007 + BLKALIGNOFF = 0x127a BLKBSZGET = 0x80081270 BLKBSZSET = 0x40081271 + BLKDISCARD = 0x1277 + BLKDISCARDZEROES = 0x127c BLKFLSBUF = 0x1261 BLKFRAGET = 0x1265 BLKFRASET = 0x1264 + BLKGETDISKSEQ = 0x80081280 BLKGETSIZE = 0x1260 BLKGETSIZE64 = 0x80081272 + BLKIOMIN = 0x1278 + BLKIOOPT = 0x1279 BLKPBSZGET = 0x127b BLKRAGET = 0x1263 BLKRASET = 0x1262 BLKROGET = 0x125e BLKROSET = 0x125d + BLKROTATIONAL = 0x127e BLKRRPART = 0x125f + BLKSECDISCARD = 0x127d BLKSECTGET = 0x1267 BLKSECTSET = 0x1266 BLKSSZGET = 0x1268 + BLKZEROOUT = 0x127f BOTHER = 0x1000 BS1 = 0x2000 BSDLY = 0x2000 @@ -79,6 +87,7 @@ const ( FICLONE = 0x40049409 FICLONERANGE = 0x4020940d FLUSHO = 0x1000 + FPMR_MAGIC = 0x46504d52 FPSIMD_MAGIC = 0x46508001 FS_IOC_ENABLE_VERITY = 0x40806685 FS_IOC_GETFLAGS = 0x80086601 @@ -134,6 +143,7 @@ const ( MEMGETREGIONCOUNT = 0x80044d07 MEMISLOCKED = 0x80084d17 MEMLOCK = 0x40084d05 + MEMREAD = 0xc0404d1a MEMREADOOB = 0xc0104d04 MEMSETBADBLOCK = 0x40084d0c MEMUNLOCK = 0x40084d06 @@ -269,6 +279,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 @@ -313,10 +326,12 @@ const ( SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1f SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x26 @@ -442,6 +457,7 @@ const ( TIOCSWINSZ = 0x5414 TIOCVHANGUP = 0x5437 TOSTOP = 0x100 + TPIDR2_MAGIC = 0x54504902 TUNATTACHFILTER = 0x401054d5 TUNDETACHFILTER = 0x401054d6 TUNGETDEVNETNS = 0x54e3 @@ -514,6 +530,7 @@ const ( XCASE = 0x4 XTABS = 0x1800 ZA_MAGIC = 0x54366345 + ZT_MAGIC = 0x5a544e01 _HIDIOCGRAWNAME = 0x80804804 _HIDIOCGRAWPHYS = 0x80404805 _HIDIOCGRAWUNIQ = 0x80404808 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index 928e24c..28e39af 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build loong64 && linux -// +build loong64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/loong64/include _const.go @@ -27,22 +26,31 @@ const ( B57600 = 0x1001 B576000 = 0x1006 B921600 = 0x1007 + BLKALIGNOFF = 0x127a BLKBSZGET = 0x80081270 BLKBSZSET = 0x40081271 + BLKDISCARD = 0x1277 + BLKDISCARDZEROES = 0x127c BLKFLSBUF = 0x1261 BLKFRAGET = 0x1265 BLKFRASET = 0x1264 + BLKGETDISKSEQ = 0x80081280 BLKGETSIZE = 0x1260 BLKGETSIZE64 = 0x80081272 + BLKIOMIN = 0x1278 + BLKIOOPT = 0x1279 BLKPBSZGET = 0x127b BLKRAGET = 0x1263 BLKRASET = 0x1262 BLKROGET = 0x125e BLKROSET = 0x125d + BLKROTATIONAL = 0x127e BLKRRPART = 0x125f + BLKSECDISCARD = 0x127d BLKSECTGET = 0x1267 BLKSECTSET = 0x1266 BLKSSZGET = 0x1268 + BLKZEROOUT = 0x127f BOTHER = 0x1000 BS1 = 0x2000 BSDLY = 0x2000 @@ -109,6 +117,9 @@ const ( IUCLC = 0x200 IXOFF = 0x1000 IXON = 0x400 + LASX_CTX_MAGIC = 0x41535801 + LBT_CTX_MAGIC = 0x42540001 + LSX_CTX_MAGIC = 0x53580001 MAP_ANON = 0x20 MAP_ANONYMOUS = 0x20 MAP_DENYWRITE = 0x800 @@ -132,6 +143,7 @@ const ( MEMGETREGIONCOUNT = 0x80044d07 MEMISLOCKED = 0x80084d17 MEMLOCK = 0x40084d05 + MEMREAD = 0xc0404d1a MEMREADOOB = 0xc0104d04 MEMSETBADBLOCK = 0x40084d0c MEMUNLOCK = 0x40084d06 @@ -263,6 +275,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 @@ -307,10 +322,12 @@ const ( SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1f SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x26 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 179bffb..cd66e92 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips && linux -// +build mips,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/mips/include _const.go @@ -27,22 +26,31 @@ const ( B57600 = 0x1001 B576000 = 0x1006 B921600 = 0x1007 + BLKALIGNOFF = 0x2000127a BLKBSZGET = 0x40041270 BLKBSZSET = 0x80041271 + BLKDISCARD = 0x20001277 + BLKDISCARDZEROES = 0x2000127c BLKFLSBUF = 0x20001261 BLKFRAGET = 0x20001265 BLKFRASET = 0x20001264 + BLKGETDISKSEQ = 0x40081280 BLKGETSIZE = 0x20001260 BLKGETSIZE64 = 0x40041272 + BLKIOMIN = 0x20001278 + BLKIOOPT = 0x20001279 BLKPBSZGET = 0x2000127b BLKRAGET = 0x20001263 BLKRASET = 0x20001262 BLKROGET = 0x2000125e BLKROSET = 0x2000125d + BLKROTATIONAL = 0x2000127e BLKRRPART = 0x2000125f + BLKSECDISCARD = 0x2000127d BLKSECTGET = 0x20001267 BLKSECTSET = 0x20001266 BLKSSZGET = 0x20001268 + BLKZEROOUT = 0x2000127f BOTHER = 0x1000 BS1 = 0x2000 BSDLY = 0x2000 @@ -131,6 +139,7 @@ const ( MEMGETREGIONCOUNT = 0x40044d07 MEMISLOCKED = 0x40084d17 MEMLOCK = 0x80084d05 + MEMREAD = 0xc0404d1a MEMREADOOB = 0xc00c4d04 MEMSETBADBLOCK = 0x80084d0c MEMUNLOCK = 0x80084d06 @@ -272,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x80 SIOCATMARK = 0x40047307 @@ -316,10 +328,12 @@ const ( SO_NOFCS = 0x2b SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1e SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x1028 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 1fba17b..c1595eb 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && linux -// +build mips64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/mips64/include _const.go @@ -27,22 +26,31 @@ const ( B57600 = 0x1001 B576000 = 0x1006 B921600 = 0x1007 + BLKALIGNOFF = 0x2000127a BLKBSZGET = 0x40081270 BLKBSZSET = 0x80081271 + BLKDISCARD = 0x20001277 + BLKDISCARDZEROES = 0x2000127c BLKFLSBUF = 0x20001261 BLKFRAGET = 0x20001265 BLKFRASET = 0x20001264 + BLKGETDISKSEQ = 0x40081280 BLKGETSIZE = 0x20001260 BLKGETSIZE64 = 0x40081272 + BLKIOMIN = 0x20001278 + BLKIOOPT = 0x20001279 BLKPBSZGET = 0x2000127b BLKRAGET = 0x20001263 BLKRASET = 0x20001262 BLKROGET = 0x2000125e BLKROSET = 0x2000125d + BLKROTATIONAL = 0x2000127e BLKRRPART = 0x2000125f + BLKSECDISCARD = 0x2000127d BLKSECTGET = 0x20001267 BLKSECTSET = 0x20001266 BLKSSZGET = 0x20001268 + BLKZEROOUT = 0x2000127f BOTHER = 0x1000 BS1 = 0x2000 BSDLY = 0x2000 @@ -131,6 +139,7 @@ const ( MEMGETREGIONCOUNT = 0x40044d07 MEMISLOCKED = 0x40084d17 MEMLOCK = 0x80084d05 + MEMREAD = 0xc0404d1a MEMREADOOB = 0xc0104d04 MEMSETBADBLOCK = 0x80084d0c MEMUNLOCK = 0x80084d06 @@ -272,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x80 SIOCATMARK = 0x40047307 @@ -316,10 +328,12 @@ const ( SO_NOFCS = 0x2b SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1e SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x1028 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index b77dde3..ee9456b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64le && linux -// +build mips64le,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/mips64le/include _const.go @@ -27,22 +26,31 @@ const ( B57600 = 0x1001 B576000 = 0x1006 B921600 = 0x1007 + BLKALIGNOFF = 0x2000127a BLKBSZGET = 0x40081270 BLKBSZSET = 0x80081271 + BLKDISCARD = 0x20001277 + BLKDISCARDZEROES = 0x2000127c BLKFLSBUF = 0x20001261 BLKFRAGET = 0x20001265 BLKFRASET = 0x20001264 + BLKGETDISKSEQ = 0x40081280 BLKGETSIZE = 0x20001260 BLKGETSIZE64 = 0x40081272 + BLKIOMIN = 0x20001278 + BLKIOOPT = 0x20001279 BLKPBSZGET = 0x2000127b BLKRAGET = 0x20001263 BLKRASET = 0x20001262 BLKROGET = 0x2000125e BLKROSET = 0x2000125d + BLKROTATIONAL = 0x2000127e BLKRRPART = 0x2000125f + BLKSECDISCARD = 0x2000127d BLKSECTGET = 0x20001267 BLKSECTSET = 0x20001266 BLKSSZGET = 0x20001268 + BLKZEROOUT = 0x2000127f BOTHER = 0x1000 BS1 = 0x2000 BSDLY = 0x2000 @@ -131,6 +139,7 @@ const ( MEMGETREGIONCOUNT = 0x40044d07 MEMISLOCKED = 0x40084d17 MEMLOCK = 0x80084d05 + MEMREAD = 0xc0404d1a MEMREADOOB = 0xc0104d04 MEMSETBADBLOCK = 0x80084d0c MEMUNLOCK = 0x80084d06 @@ -272,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x80 SIOCATMARK = 0x40047307 @@ -316,10 +328,12 @@ const ( SO_NOFCS = 0x2b SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1e SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x1028 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 78c6c75..8cfca81 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mipsle && linux -// +build mipsle,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/mipsle/include _const.go @@ -27,22 +26,31 @@ const ( B57600 = 0x1001 B576000 = 0x1006 B921600 = 0x1007 + BLKALIGNOFF = 0x2000127a BLKBSZGET = 0x40041270 BLKBSZSET = 0x80041271 + BLKDISCARD = 0x20001277 + BLKDISCARDZEROES = 0x2000127c BLKFLSBUF = 0x20001261 BLKFRAGET = 0x20001265 BLKFRASET = 0x20001264 + BLKGETDISKSEQ = 0x40081280 BLKGETSIZE = 0x20001260 BLKGETSIZE64 = 0x40041272 + BLKIOMIN = 0x20001278 + BLKIOOPT = 0x20001279 BLKPBSZGET = 0x2000127b BLKRAGET = 0x20001263 BLKRASET = 0x20001262 BLKROGET = 0x2000125e BLKROSET = 0x2000125d + BLKROTATIONAL = 0x2000127e BLKRRPART = 0x2000125f + BLKSECDISCARD = 0x2000127d BLKSECTGET = 0x20001267 BLKSECTSET = 0x20001266 BLKSSZGET = 0x20001268 + BLKZEROOUT = 0x2000127f BOTHER = 0x1000 BS1 = 0x2000 BSDLY = 0x2000 @@ -131,6 +139,7 @@ const ( MEMGETREGIONCOUNT = 0x40044d07 MEMISLOCKED = 0x40084d17 MEMLOCK = 0x80084d05 + MEMREAD = 0xc0404d1a MEMREADOOB = 0xc00c4d04 MEMSETBADBLOCK = 0x80084d0c MEMUNLOCK = 0x80084d06 @@ -272,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x80 SIOCATMARK = 0x40047307 @@ -316,10 +328,12 @@ const ( SO_NOFCS = 0x2b SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1e SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x1028 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index 1c0d31f..60b0deb 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc && linux -// +build ppc,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/ppc/include _const.go @@ -27,22 +26,31 @@ const ( B57600 = 0x10 B576000 = 0x15 B921600 = 0x16 + BLKALIGNOFF = 0x2000127a BLKBSZGET = 0x40041270 BLKBSZSET = 0x80041271 + BLKDISCARD = 0x20001277 + BLKDISCARDZEROES = 0x2000127c BLKFLSBUF = 0x20001261 BLKFRAGET = 0x20001265 BLKFRASET = 0x20001264 + BLKGETDISKSEQ = 0x40081280 BLKGETSIZE = 0x20001260 BLKGETSIZE64 = 0x40041272 + BLKIOMIN = 0x20001278 + BLKIOOPT = 0x20001279 BLKPBSZGET = 0x2000127b BLKRAGET = 0x20001263 BLKRASET = 0x20001262 BLKROGET = 0x2000125e BLKROSET = 0x2000125d + BLKROTATIONAL = 0x2000127e BLKRRPART = 0x2000125f + BLKSECDISCARD = 0x2000127d BLKSECTGET = 0x20001267 BLKSECTSET = 0x20001266 BLKSSZGET = 0x20001268 + BLKZEROOUT = 0x2000127f BOTHER = 0x1f BS1 = 0x8000 BSDLY = 0x8000 @@ -131,6 +139,7 @@ const ( MEMGETREGIONCOUNT = 0x40044d07 MEMISLOCKED = 0x40084d17 MEMLOCK = 0x80084d05 + MEMREAD = 0xc0404d1a MEMREADOOB = 0xc00c4d04 MEMSETBADBLOCK = 0x80084d0c MEMUNLOCK = 0x80084d06 @@ -327,6 +336,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 @@ -371,10 +383,12 @@ const ( SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x14 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x15 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1f SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x26 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 959dd9b..f90aa72 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && linux -// +build ppc64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/ppc64/include _const.go @@ -27,22 +26,31 @@ const ( B57600 = 0x10 B576000 = 0x15 B921600 = 0x16 + BLKALIGNOFF = 0x2000127a BLKBSZGET = 0x40081270 BLKBSZSET = 0x80081271 + BLKDISCARD = 0x20001277 + BLKDISCARDZEROES = 0x2000127c BLKFLSBUF = 0x20001261 BLKFRAGET = 0x20001265 BLKFRASET = 0x20001264 + BLKGETDISKSEQ = 0x40081280 BLKGETSIZE = 0x20001260 BLKGETSIZE64 = 0x40081272 + BLKIOMIN = 0x20001278 + BLKIOOPT = 0x20001279 BLKPBSZGET = 0x2000127b BLKRAGET = 0x20001263 BLKRASET = 0x20001262 BLKROGET = 0x2000125e BLKROSET = 0x2000125d + BLKROTATIONAL = 0x2000127e BLKRRPART = 0x2000125f + BLKSECDISCARD = 0x2000127d BLKSECTGET = 0x20001267 BLKSECTSET = 0x20001266 BLKSSZGET = 0x20001268 + BLKZEROOUT = 0x2000127f BOTHER = 0x1f BS1 = 0x8000 BSDLY = 0x8000 @@ -131,6 +139,7 @@ const ( MEMGETREGIONCOUNT = 0x40044d07 MEMISLOCKED = 0x40084d17 MEMLOCK = 0x80084d05 + MEMREAD = 0xc0404d1a MEMREADOOB = 0xc0104d04 MEMSETBADBLOCK = 0x80084d0c MEMUNLOCK = 0x80084d06 @@ -331,6 +340,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 @@ -375,10 +387,12 @@ const ( SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x14 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x15 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1f SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x26 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 5a873cd..ba9e015 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64le && linux -// +build ppc64le,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/ppc64le/include _const.go @@ -27,22 +26,31 @@ const ( B57600 = 0x10 B576000 = 0x15 B921600 = 0x16 + BLKALIGNOFF = 0x2000127a BLKBSZGET = 0x40081270 BLKBSZSET = 0x80081271 + BLKDISCARD = 0x20001277 + BLKDISCARDZEROES = 0x2000127c BLKFLSBUF = 0x20001261 BLKFRAGET = 0x20001265 BLKFRASET = 0x20001264 + BLKGETDISKSEQ = 0x40081280 BLKGETSIZE = 0x20001260 BLKGETSIZE64 = 0x40081272 + BLKIOMIN = 0x20001278 + BLKIOOPT = 0x20001279 BLKPBSZGET = 0x2000127b BLKRAGET = 0x20001263 BLKRASET = 0x20001262 BLKROGET = 0x2000125e BLKROSET = 0x2000125d + BLKROTATIONAL = 0x2000127e BLKRRPART = 0x2000125f + BLKSECDISCARD = 0x2000127d BLKSECTGET = 0x20001267 BLKSECTSET = 0x20001266 BLKSSZGET = 0x20001268 + BLKZEROOUT = 0x2000127f BOTHER = 0x1f BS1 = 0x8000 BSDLY = 0x8000 @@ -131,6 +139,7 @@ const ( MEMGETREGIONCOUNT = 0x40044d07 MEMISLOCKED = 0x40084d17 MEMLOCK = 0x80084d05 + MEMREAD = 0xc0404d1a MEMREADOOB = 0xc0104d04 MEMSETBADBLOCK = 0x80084d0c MEMUNLOCK = 0x80084d06 @@ -331,6 +340,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 @@ -375,10 +387,12 @@ const ( SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x14 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x15 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1f SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x26 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index e336d14..07cdfd6 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && linux -// +build riscv64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/riscv64/include _const.go @@ -27,22 +26,31 @@ const ( B57600 = 0x1001 B576000 = 0x1006 B921600 = 0x1007 + BLKALIGNOFF = 0x127a BLKBSZGET = 0x80081270 BLKBSZSET = 0x40081271 + BLKDISCARD = 0x1277 + BLKDISCARDZEROES = 0x127c BLKFLSBUF = 0x1261 BLKFRAGET = 0x1265 BLKFRASET = 0x1264 + BLKGETDISKSEQ = 0x80081280 BLKGETSIZE = 0x1260 BLKGETSIZE64 = 0x80081272 + BLKIOMIN = 0x1278 + BLKIOOPT = 0x1279 BLKPBSZGET = 0x127b BLKRAGET = 0x1263 BLKRASET = 0x1262 BLKROGET = 0x125e BLKROSET = 0x125d + BLKROTATIONAL = 0x127e BLKRRPART = 0x125f + BLKSECDISCARD = 0x127d BLKSECTGET = 0x1267 BLKSECTSET = 0x1266 BLKSSZGET = 0x1268 + BLKZEROOUT = 0x127f BOTHER = 0x1000 BS1 = 0x2000 BSDLY = 0x2000 @@ -131,6 +139,7 @@ const ( MEMGETREGIONCOUNT = 0x80044d07 MEMISLOCKED = 0x80084d17 MEMLOCK = 0x40084d05 + MEMREAD = 0xc0404d1a MEMREADOOB = 0xc0104d04 MEMSETBADBLOCK = 0x40084d0c MEMUNLOCK = 0x40084d06 @@ -218,6 +227,9 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTRACE_GETFDPIC = 0x21 + PTRACE_GETFDPIC_EXEC = 0x0 + PTRACE_GETFDPIC_INTERP = 0x1 RLIMIT_AS = 0x9 RLIMIT_MEMLOCK = 0x8 RLIMIT_NOFILE = 0x7 @@ -260,6 +272,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 @@ -304,10 +319,12 @@ const ( SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1f SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x26 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 390c01d..2f1dd21 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build s390x && linux -// +build s390x,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/s390x/include -fsigned-char _const.go @@ -27,22 +26,31 @@ const ( B57600 = 0x1001 B576000 = 0x1006 B921600 = 0x1007 + BLKALIGNOFF = 0x127a BLKBSZGET = 0x80081270 BLKBSZSET = 0x40081271 + BLKDISCARD = 0x1277 + BLKDISCARDZEROES = 0x127c BLKFLSBUF = 0x1261 BLKFRAGET = 0x1265 BLKFRASET = 0x1264 + BLKGETDISKSEQ = 0x80081280 BLKGETSIZE = 0x1260 BLKGETSIZE64 = 0x80081272 + BLKIOMIN = 0x1278 + BLKIOOPT = 0x1279 BLKPBSZGET = 0x127b BLKRAGET = 0x1263 BLKRASET = 0x1262 BLKROGET = 0x125e BLKROSET = 0x125d + BLKROTATIONAL = 0x127e BLKRRPART = 0x125f + BLKSECDISCARD = 0x127d BLKSECTGET = 0x1267 BLKSECTSET = 0x1266 BLKSSZGET = 0x1268 + BLKZEROOUT = 0x127f BOTHER = 0x1000 BS1 = 0x2000 BSDLY = 0x2000 @@ -131,6 +139,7 @@ const ( MEMGETREGIONCOUNT = 0x80044d07 MEMISLOCKED = 0x80084d17 MEMLOCK = 0x40084d05 + MEMREAD = 0xc0404d1a MEMREADOOB = 0xc0104d04 MEMSETBADBLOCK = 0x40084d0c MEMUNLOCK = 0x40084d06 @@ -335,6 +344,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 @@ -379,10 +391,12 @@ const ( SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1f SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x26 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 98a6e5f..f40519d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build sparc64 && linux -// +build sparc64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/sparc64/include _const.go @@ -30,22 +29,31 @@ const ( B57600 = 0x1001 B576000 = 0x1006 B921600 = 0x1007 + BLKALIGNOFF = 0x2000127a BLKBSZGET = 0x40081270 BLKBSZSET = 0x80081271 + BLKDISCARD = 0x20001277 + BLKDISCARDZEROES = 0x2000127c BLKFLSBUF = 0x20001261 BLKFRAGET = 0x20001265 BLKFRASET = 0x20001264 + BLKGETDISKSEQ = 0x40081280 BLKGETSIZE = 0x20001260 BLKGETSIZE64 = 0x40081272 + BLKIOMIN = 0x20001278 + BLKIOOPT = 0x20001279 BLKPBSZGET = 0x2000127b BLKRAGET = 0x20001263 BLKRASET = 0x20001262 BLKROGET = 0x2000125e BLKROSET = 0x2000125d + BLKROTATIONAL = 0x2000127e BLKRRPART = 0x2000125f + BLKSECDISCARD = 0x2000127d BLKSECTGET = 0x20001267 BLKSECTSET = 0x20001266 BLKSSZGET = 0x20001268 + BLKZEROOUT = 0x2000127f BOTHER = 0x1000 BS1 = 0x2000 BSDLY = 0x2000 @@ -136,6 +144,7 @@ const ( MEMGETREGIONCOUNT = 0x40044d07 MEMISLOCKED = 0x40084d17 MEMLOCK = 0x80084d05 + MEMREAD = 0xc0404d1a MEMREADOOB = 0xc0104d04 MEMSETBADBLOCK = 0x80084d0c MEMUNLOCK = 0x80084d06 @@ -326,8 +335,59 @@ const ( SCM_TIMESTAMPNS = 0x21 SCM_TXTIME = 0x3f SCM_WIFI_STATUS = 0x25 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x400000 SFD_NONBLOCK = 0x4000 + SF_FP = 0x38 + SF_I0 = 0x20 + SF_I1 = 0x24 + SF_I2 = 0x28 + SF_I3 = 0x2c + SF_I4 = 0x30 + SF_I5 = 0x34 + SF_L0 = 0x0 + SF_L1 = 0x4 + SF_L2 = 0x8 + SF_L3 = 0xc + SF_L4 = 0x10 + SF_L5 = 0x14 + SF_L6 = 0x18 + SF_L7 = 0x1c + SF_PC = 0x3c + SF_RETP = 0x40 + SF_V9_FP = 0x70 + SF_V9_I0 = 0x40 + SF_V9_I1 = 0x48 + SF_V9_I2 = 0x50 + SF_V9_I3 = 0x58 + SF_V9_I4 = 0x60 + SF_V9_I5 = 0x68 + SF_V9_L0 = 0x0 + SF_V9_L1 = 0x8 + SF_V9_L2 = 0x10 + SF_V9_L3 = 0x18 + SF_V9_L4 = 0x20 + SF_V9_L5 = 0x28 + SF_V9_L6 = 0x30 + SF_V9_L7 = 0x38 + SF_V9_PC = 0x78 + SF_V9_RETP = 0x80 + SF_V9_XARG0 = 0x88 + SF_V9_XARG1 = 0x90 + SF_V9_XARG2 = 0x98 + SF_V9_XARG3 = 0xa0 + SF_V9_XARG4 = 0xa8 + SF_V9_XARG5 = 0xb0 + SF_V9_XXARG = 0xb8 + SF_XARG0 = 0x44 + SF_XARG1 = 0x48 + SF_XARG2 = 0x4c + SF_XARG3 = 0x50 + SF_XARG4 = 0x54 + SF_XARG5 = 0x58 + SF_XXARG = 0x5c SIOCATMARK = 0x8905 SIOCGPGRP = 0x8904 SIOCGSTAMPNS_NEW = 0x40108907 @@ -370,10 +430,12 @@ const ( SO_NOFCS = 0x27 SO_OOBINLINE = 0x100 SO_PASSCRED = 0x2 + SO_PASSPIDFD = 0x55 SO_PASSSEC = 0x1f SO_PEEK_OFF = 0x26 SO_PEERCRED = 0x40 SO_PEERGROUPS = 0x3d + SO_PEERPIDFD = 0x56 SO_PEERSEC = 0x1e SO_PREFER_BUSY_POLL = 0x48 SO_PROTOCOL = 0x1028 diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go index 72f7420..130085d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && netbsd -// +build 386,netbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m32 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go index 8d4eb0c..84769a1 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && netbsd -// +build amd64,netbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go index 9eef974..602ded0 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && netbsd -// +build arm,netbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -marm _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go index 3b62ba1..efc0406 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && netbsd -// +build arm64,netbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go index af20e47..5a6500f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && openbsd -// +build 386,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m32 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go index 6015fcb..a5aeeb9 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && openbsd -// +build amd64,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go index 8d44955..0e9748a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && openbsd -// +build arm,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go index ae16fe7..4f4449a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && openbsd -// +build arm64,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go index 03d90fe..76a363f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && openbsd -// +build mips64,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_ppc64.go index 8e2c51b..43ca0cd 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && openbsd -// +build ppc64,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_riscv64.go index 13d4030..b1b8bb2 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && openbsd -// +build riscv64,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go index 1afee6a..d2ddd31 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && solaris -// +build amd64,solaris // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go index fc7d050..da08b2a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build zos && s390x -// +build zos,s390x // Hand edited based on zerrors_linux_s390x.go // TODO: auto-generate. @@ -11,41 +10,99 @@ package unix const ( - BRKINT = 0x0001 - CLOCK_MONOTONIC = 0x1 - CLOCK_PROCESS_CPUTIME_ID = 0x2 - CLOCK_REALTIME = 0x0 - CLOCK_THREAD_CPUTIME_ID = 0x3 - CS8 = 0x0030 - CSIZE = 0x0030 - ECHO = 0x00000008 - ECHONL = 0x00000001 - FD_CLOEXEC = 0x01 - FD_CLOFORK = 0x02 - FNDELAY = 0x04 - F_CLOSFD = 9 - F_CONTROL_CVT = 13 - F_DUPFD = 0 - F_DUPFD2 = 8 - F_GETFD = 1 - F_GETFL = 259 - F_GETLK = 5 - F_GETOWN = 10 - F_OK = 0x0 - F_RDLCK = 1 - F_SETFD = 2 - F_SETFL = 4 - F_SETLK = 6 - F_SETLKW = 7 - F_SETOWN = 11 - F_SETTAG = 12 - F_UNLCK = 3 - F_WRLCK = 2 - FSTYPE_ZFS = 0xe9 //"Z" - FSTYPE_HFS = 0xc8 //"H" - FSTYPE_NFS = 0xd5 //"N" - FSTYPE_TFS = 0xe3 //"T" - FSTYPE_AUTOMOUNT = 0xc1 //"A" + BRKINT = 0x0001 + CLOCAL = 0x1 + CLOCK_MONOTONIC = 0x1 + CLOCK_PROCESS_CPUTIME_ID = 0x2 + CLOCK_REALTIME = 0x0 + CLOCK_THREAD_CPUTIME_ID = 0x3 + CLONE_NEWIPC = 0x08000000 + CLONE_NEWNET = 0x40000000 + CLONE_NEWNS = 0x00020000 + CLONE_NEWPID = 0x20000000 + CLONE_NEWUTS = 0x04000000 + CLONE_PARENT = 0x00008000 + CS8 = 0x0030 + CSIZE = 0x0030 + ECHO = 0x00000008 + ECHONL = 0x00000001 + EFD_SEMAPHORE = 0x00002000 + EFD_CLOEXEC = 0x00001000 + EFD_NONBLOCK = 0x00000004 + EPOLL_CLOEXEC = 0x00001000 + EPOLL_CTL_ADD = 0 + EPOLL_CTL_MOD = 1 + EPOLL_CTL_DEL = 2 + EPOLLRDNORM = 0x0001 + EPOLLRDBAND = 0x0002 + EPOLLIN = 0x0003 + EPOLLOUT = 0x0004 + EPOLLWRBAND = 0x0008 + EPOLLPRI = 0x0010 + EPOLLERR = 0x0020 + EPOLLHUP = 0x0040 + EPOLLEXCLUSIVE = 0x20000000 + EPOLLONESHOT = 0x40000000 + FD_CLOEXEC = 0x01 + FD_CLOFORK = 0x02 + FD_SETSIZE = 0x800 + FNDELAY = 0x04 + F_CLOSFD = 9 + F_CONTROL_CVT = 13 + F_DUPFD = 0 + F_DUPFD2 = 8 + F_GETFD = 1 + F_GETFL = 259 + F_GETLK = 5 + F_GETOWN = 10 + F_OK = 0x0 + F_RDLCK = 1 + F_SETFD = 2 + F_SETFL = 4 + F_SETLK = 6 + F_SETLKW = 7 + F_SETOWN = 11 + F_SETTAG = 12 + F_UNLCK = 3 + F_WRLCK = 2 + FSTYPE_ZFS = 0xe9 //"Z" + FSTYPE_HFS = 0xc8 //"H" + FSTYPE_NFS = 0xd5 //"N" + FSTYPE_TFS = 0xe3 //"T" + FSTYPE_AUTOMOUNT = 0xc1 //"A" + GRND_NONBLOCK = 1 + GRND_RANDOM = 2 + HUPCL = 0x0100 // Hang up on last close + IN_CLOEXEC = 0x00001000 + IN_NONBLOCK = 0x00000004 + IN_ACCESS = 0x00000001 + IN_MODIFY = 0x00000002 + IN_ATTRIB = 0x00000004 + IN_CLOSE_WRITE = 0x00000008 + IN_CLOSE_NOWRITE = 0x00000010 + IN_OPEN = 0x00000020 + IN_MOVED_FROM = 0x00000040 + IN_MOVED_TO = 0x00000080 + IN_CREATE = 0x00000100 + IN_DELETE = 0x00000200 + IN_DELETE_SELF = 0x00000400 + IN_MOVE_SELF = 0x00000800 + IN_UNMOUNT = 0x00002000 + IN_Q_OVERFLOW = 0x00004000 + IN_IGNORED = 0x00008000 + IN_CLOSE = (IN_CLOSE_WRITE | IN_CLOSE_NOWRITE) + IN_MOVE = (IN_MOVED_FROM | IN_MOVED_TO) + IN_ALL_EVENTS = (IN_ACCESS | IN_MODIFY | IN_ATTRIB | + IN_CLOSE | IN_OPEN | IN_MOVE | + IN_CREATE | IN_DELETE | IN_DELETE_SELF | + IN_MOVE_SELF) + IN_ONLYDIR = 0x01000000 + IN_DONT_FOLLOW = 0x02000000 + IN_EXCL_UNLINK = 0x04000000 + IN_MASK_CREATE = 0x10000000 + IN_MASK_ADD = 0x20000000 + IN_ISDIR = 0x40000000 + IN_ONESHOT = 0x80000000 IP6F_MORE_FRAG = 0x0001 IP6F_OFF_MASK = 0xfff8 IP6F_RESERVED_MASK = 0x0006 @@ -153,10 +210,18 @@ const ( IP_PKTINFO = 101 IP_RECVPKTINFO = 102 IP_TOS = 2 - IP_TTL = 3 + IP_TTL = 14 IP_UNBLOCK_SOURCE = 11 + ICMP6_FILTER = 1 + MCAST_INCLUDE = 0 + MCAST_EXCLUDE = 1 + MCAST_JOIN_GROUP = 40 + MCAST_LEAVE_GROUP = 41 + MCAST_JOIN_SOURCE_GROUP = 42 + MCAST_LEAVE_SOURCE_GROUP = 43 + MCAST_BLOCK_SOURCE = 44 + MCAST_UNBLOCK_SOURCE = 46 ICANON = 0x0010 - ICMP6_FILTER = 0x26 ICRNL = 0x0002 IEXTEN = 0x0020 IGNBRK = 0x0004 @@ -166,10 +231,10 @@ const ( ISTRIP = 0x0080 IXON = 0x0200 IXOFF = 0x0100 - LOCK_SH = 0x1 // Not exist on zOS - LOCK_EX = 0x2 // Not exist on zOS - LOCK_NB = 0x4 // Not exist on zOS - LOCK_UN = 0x8 // Not exist on zOS + LOCK_SH = 0x1 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_UN = 0x8 POLLIN = 0x0003 POLLOUT = 0x0004 POLLPRI = 0x0010 @@ -183,15 +248,29 @@ const ( MAP_PRIVATE = 0x1 // changes are private MAP_SHARED = 0x2 // changes are shared MAP_FIXED = 0x4 // place exactly - MCAST_JOIN_GROUP = 40 - MCAST_LEAVE_GROUP = 41 - MCAST_JOIN_SOURCE_GROUP = 42 - MCAST_LEAVE_SOURCE_GROUP = 43 - MCAST_BLOCK_SOURCE = 44 - MCAST_UNBLOCK_SOURCE = 45 + __MAP_MEGA = 0x8 + __MAP_64 = 0x10 + MAP_ANON = 0x20 + MAP_ANONYMOUS = 0x20 MS_SYNC = 0x1 // msync - synchronous writes MS_ASYNC = 0x2 // asynchronous writes MS_INVALIDATE = 0x4 // invalidate mappings + MS_BIND = 0x00001000 + MS_MOVE = 0x00002000 + MS_NOSUID = 0x00000002 + MS_PRIVATE = 0x00040000 + MS_REC = 0x00004000 + MS_REMOUNT = 0x00008000 + MS_RDONLY = 0x00000001 + MS_UNBINDABLE = 0x00020000 + MNT_DETACH = 0x00000004 + ZOSDSFS_SUPER_MAGIC = 0x44534653 // zOS DSFS + NFS_SUPER_MAGIC = 0x6969 // NFS + NSFS_MAGIC = 0x6e736673 // PROCNS + PROC_SUPER_MAGIC = 0x9fa0 // proc FS + ZOSTFS_SUPER_MAGIC = 0x544653 // zOS TFS + ZOSUFS_SUPER_MAGIC = 0x554653 // zOS UFS + ZOSZFS_SUPER_MAGIC = 0x5A4653 // zOS ZFS MTM_RDONLY = 0x80000000 MTM_RDWR = 0x40000000 MTM_UMOUNT = 0x10000000 @@ -206,13 +285,20 @@ const ( MTM_REMOUNT = 0x00000100 MTM_NOSECURITY = 0x00000080 NFDBITS = 0x20 + ONLRET = 0x0020 // NL performs CR function O_ACCMODE = 0x03 O_APPEND = 0x08 O_ASYNCSIG = 0x0200 O_CREAT = 0x80 + O_DIRECT = 0x00002000 + O_NOFOLLOW = 0x00004000 + O_DIRECTORY = 0x00008000 + O_PATH = 0x00080000 + O_CLOEXEC = 0x00001000 O_EXCL = 0x40 O_GETFL = 0x0F O_LARGEFILE = 0x0400 + O_NDELAY = 0x4 O_NONBLOCK = 0x04 O_RDONLY = 0x02 O_RDWR = 0x03 @@ -249,6 +335,7 @@ const ( AF_IUCV = 17 AF_LAT = 14 AF_LINK = 18 + AF_LOCAL = AF_UNIX // AF_LOCAL is an alias for AF_UNIX AF_MAX = 30 AF_NBS = 7 AF_NDD = 23 @@ -286,15 +373,33 @@ const ( RLIMIT_AS = 5 RLIMIT_NOFILE = 6 RLIMIT_MEMLIMIT = 7 + RLIMIT_MEMLOCK = 0x8 RLIM_INFINITY = 2147483647 + SCHED_FIFO = 0x2 + SCM_CREDENTIALS = 0x2 SCM_RIGHTS = 0x01 SF_CLOSE = 0x00000002 SF_REUSE = 0x00000001 + SHM_RND = 0x2 + SHM_RDONLY = 0x1 + SHMLBA = 0x1000 + IPC_STAT = 0x3 + IPC_SET = 0x2 + IPC_RMID = 0x1 + IPC_PRIVATE = 0x0 + IPC_CREAT = 0x1000000 + __IPC_MEGA = 0x4000000 + __IPC_SHAREAS = 0x20000000 + __IPC_BELOWBAR = 0x10000000 + IPC_EXCL = 0x2000000 + __IPC_GIGA = 0x8000000 SHUT_RD = 0 SHUT_RDWR = 2 SHUT_WR = 1 + SOCK_CLOEXEC = 0x00001000 SOCK_CONN_DGRAM = 6 SOCK_DGRAM = 2 + SOCK_NONBLOCK = 0x800 SOCK_RAW = 3 SOCK_RDM = 4 SOCK_SEQPACKET = 5 @@ -379,8 +484,6 @@ const ( S_IFMST = 0x00FF0000 TCP_KEEPALIVE = 0x8 TCP_NODELAY = 0x1 - TCP_INFO = 0xb - TCP_USER_TIMEOUT = 0x1 TIOCGWINSZ = 0x4008a368 TIOCSWINSZ = 0x8008a367 TIOCSBRK = 0x2000a77b @@ -428,7 +531,10 @@ const ( VSUSP = 9 VTIME = 10 WCONTINUED = 0x4 + WEXITED = 0x8 WNOHANG = 0x1 + WNOWAIT = 0x20 + WSTOPPED = 0x10 WUNTRACED = 0x2 _BPX_SWAP = 1 _BPX_NONSWAP = 2 @@ -453,8 +559,28 @@ const ( MADV_FREE = 15 // for Linux compatibility -- no zos semantics MADV_WIPEONFORK = 16 // for Linux compatibility -- no zos semantics MADV_KEEPONFORK = 17 // for Linux compatibility -- no zos semantics - AT_SYMLINK_NOFOLLOW = 1 // for Unix compatibility -- no zos semantics - AT_FDCWD = 2 // for Unix compatibility -- no zos semantics + AT_SYMLINK_FOLLOW = 0x400 + AT_SYMLINK_NOFOLLOW = 0x100 + XATTR_CREATE = 0x1 + XATTR_REPLACE = 0x2 + P_PID = 0 + P_PGID = 1 + P_ALL = 2 + PR_SET_NAME = 15 + PR_GET_NAME = 16 + PR_SET_NO_NEW_PRIVS = 38 + PR_GET_NO_NEW_PRIVS = 39 + PR_SET_DUMPABLE = 4 + PR_GET_DUMPABLE = 3 + PR_SET_PDEATHSIG = 1 + PR_GET_PDEATHSIG = 2 + PR_SET_CHILD_SUBREAPER = 36 + PR_GET_CHILD_SUBREAPER = 37 + AT_FDCWD = -100 + AT_EACCESS = 0x200 + AT_EMPTY_PATH = 0x1000 + AT_REMOVEDIR = 0x200 + RENAME_NOREPLACE = 1 << 0 ) const ( @@ -477,6 +603,7 @@ const ( EMLINK = Errno(125) ENAMETOOLONG = Errno(126) ENFILE = Errno(127) + ENOATTR = Errno(265) ENODEV = Errno(128) ENOENT = Errno(129) ENOEXEC = Errno(130) @@ -701,7 +828,7 @@ var errorList = [...]struct { {145, "EDC5145I", "The parameter list is too long, or the message to receive was too large for the buffer."}, {146, "EDC5146I", "Too many levels of symbolic links."}, {147, "EDC5147I", "Illegal byte sequence."}, - {148, "", ""}, + {148, "EDC5148I", "The named attribute or data not available."}, {149, "EDC5149I", "Value Overflow Error."}, {150, "EDC5150I", "UNIX System Services is not active."}, {151, "EDC5151I", "Dynamic allocation error."}, @@ -744,6 +871,7 @@ var errorList = [...]struct { {259, "EDC5259I", "A CUN_RS_NO_CONVERSION error was issued by Unicode Services."}, {260, "EDC5260I", "A CUN_RS_TABLE_NOT_ALIGNED error was issued by Unicode Services."}, {262, "EDC5262I", "An iconv() function encountered an unexpected error while using Unicode Services."}, + {265, "EDC5265I", "The named attribute not available."}, {1000, "EDC8000I", "A bad socket-call constant was found in the IUCV header."}, {1001, "EDC8001I", "An error was found in the IUCV header."}, {1002, "EDC8002I", "A socket descriptor is out of range."}, diff --git a/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go b/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go index bd001a6..586317c 100644 --- a/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go +++ b/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go @@ -1,8 +1,6 @@ // Code generated by linux/mkall.go generatePtracePair("arm", "arm64"). DO NOT EDIT. //go:build linux && (arm || arm64) -// +build linux -// +build arm arm64 package unix @@ -15,12 +13,12 @@ type PtraceRegsArm struct { // PtraceGetRegsArm fetches the registers used by arm binaries. func PtraceGetRegsArm(pid int, regsout *PtraceRegsArm) error { - return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout))) + return ptracePtr(PTRACE_GETREGS, pid, 0, unsafe.Pointer(regsout)) } // PtraceSetRegsArm sets the registers used by arm binaries. func PtraceSetRegsArm(pid int, regs *PtraceRegsArm) error { - return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs))) + return ptracePtr(PTRACE_SETREGS, pid, 0, unsafe.Pointer(regs)) } // PtraceRegsArm64 is the registers used by arm64 binaries. @@ -33,10 +31,10 @@ type PtraceRegsArm64 struct { // PtraceGetRegsArm64 fetches the registers used by arm64 binaries. func PtraceGetRegsArm64(pid int, regsout *PtraceRegsArm64) error { - return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout))) + return ptracePtr(PTRACE_GETREGS, pid, 0, unsafe.Pointer(regsout)) } // PtraceSetRegsArm64 sets the registers used by arm64 binaries. func PtraceSetRegsArm64(pid int, regs *PtraceRegsArm64) error { - return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs))) + return ptracePtr(PTRACE_SETREGS, pid, 0, unsafe.Pointer(regs)) } diff --git a/vendor/golang.org/x/sys/unix/zptrace_linux_arm64.go b/vendor/golang.org/x/sys/unix/zptrace_linux_arm64.go index 6cb6d68..834d285 100644 --- a/vendor/golang.org/x/sys/unix/zptrace_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zptrace_linux_arm64.go @@ -7,11 +7,11 @@ import "unsafe" // PtraceGetRegSetArm64 fetches the registers used by arm64 binaries. func PtraceGetRegSetArm64(pid, addr int, regsout *PtraceRegsArm64) error { iovec := Iovec{(*byte)(unsafe.Pointer(regsout)), uint64(unsafe.Sizeof(*regsout))} - return ptrace(PTRACE_GETREGSET, pid, uintptr(addr), uintptr(unsafe.Pointer(&iovec))) + return ptracePtr(PTRACE_GETREGSET, pid, uintptr(addr), unsafe.Pointer(&iovec)) } // PtraceSetRegSetArm64 sets the registers used by arm64 binaries. func PtraceSetRegSetArm64(pid, addr int, regs *PtraceRegsArm64) error { iovec := Iovec{(*byte)(unsafe.Pointer(regs)), uint64(unsafe.Sizeof(*regs))} - return ptrace(PTRACE_SETREGSET, pid, uintptr(addr), uintptr(unsafe.Pointer(&iovec))) + return ptracePtr(PTRACE_SETREGSET, pid, uintptr(addr), unsafe.Pointer(&iovec)) } diff --git a/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go b/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go index c34d063..d7c881b 100644 --- a/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go +++ b/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go @@ -1,8 +1,6 @@ // Code generated by linux/mkall.go generatePtracePair("mips", "mips64"). DO NOT EDIT. //go:build linux && (mips || mips64) -// +build linux -// +build mips mips64 package unix @@ -21,12 +19,12 @@ type PtraceRegsMips struct { // PtraceGetRegsMips fetches the registers used by mips binaries. func PtraceGetRegsMips(pid int, regsout *PtraceRegsMips) error { - return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout))) + return ptracePtr(PTRACE_GETREGS, pid, 0, unsafe.Pointer(regsout)) } // PtraceSetRegsMips sets the registers used by mips binaries. func PtraceSetRegsMips(pid int, regs *PtraceRegsMips) error { - return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs))) + return ptracePtr(PTRACE_SETREGS, pid, 0, unsafe.Pointer(regs)) } // PtraceRegsMips64 is the registers used by mips64 binaries. @@ -42,10 +40,10 @@ type PtraceRegsMips64 struct { // PtraceGetRegsMips64 fetches the registers used by mips64 binaries. func PtraceGetRegsMips64(pid int, regsout *PtraceRegsMips64) error { - return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout))) + return ptracePtr(PTRACE_GETREGS, pid, 0, unsafe.Pointer(regsout)) } // PtraceSetRegsMips64 sets the registers used by mips64 binaries. func PtraceSetRegsMips64(pid int, regs *PtraceRegsMips64) error { - return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs))) + return ptracePtr(PTRACE_SETREGS, pid, 0, unsafe.Pointer(regs)) } diff --git a/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go b/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go index 3ccf0c0..2d2de5d 100644 --- a/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go +++ b/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go @@ -1,8 +1,6 @@ // Code generated by linux/mkall.go generatePtracePair("mipsle", "mips64le"). DO NOT EDIT. //go:build linux && (mipsle || mips64le) -// +build linux -// +build mipsle mips64le package unix @@ -21,12 +19,12 @@ type PtraceRegsMipsle struct { // PtraceGetRegsMipsle fetches the registers used by mipsle binaries. func PtraceGetRegsMipsle(pid int, regsout *PtraceRegsMipsle) error { - return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout))) + return ptracePtr(PTRACE_GETREGS, pid, 0, unsafe.Pointer(regsout)) } // PtraceSetRegsMipsle sets the registers used by mipsle binaries. func PtraceSetRegsMipsle(pid int, regs *PtraceRegsMipsle) error { - return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs))) + return ptracePtr(PTRACE_SETREGS, pid, 0, unsafe.Pointer(regs)) } // PtraceRegsMips64le is the registers used by mips64le binaries. @@ -42,10 +40,10 @@ type PtraceRegsMips64le struct { // PtraceGetRegsMips64le fetches the registers used by mips64le binaries. func PtraceGetRegsMips64le(pid int, regsout *PtraceRegsMips64le) error { - return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout))) + return ptracePtr(PTRACE_GETREGS, pid, 0, unsafe.Pointer(regsout)) } // PtraceSetRegsMips64le sets the registers used by mips64le binaries. func PtraceSetRegsMips64le(pid int, regs *PtraceRegsMips64le) error { - return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs))) + return ptracePtr(PTRACE_SETREGS, pid, 0, unsafe.Pointer(regs)) } diff --git a/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go b/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go index 7d65857..5adc79f 100644 --- a/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go +++ b/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go @@ -1,8 +1,6 @@ // Code generated by linux/mkall.go generatePtracePair("386", "amd64"). DO NOT EDIT. //go:build linux && (386 || amd64) -// +build linux -// +build 386 amd64 package unix @@ -31,12 +29,12 @@ type PtraceRegs386 struct { // PtraceGetRegs386 fetches the registers used by 386 binaries. func PtraceGetRegs386(pid int, regsout *PtraceRegs386) error { - return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout))) + return ptracePtr(PTRACE_GETREGS, pid, 0, unsafe.Pointer(regsout)) } // PtraceSetRegs386 sets the registers used by 386 binaries. func PtraceSetRegs386(pid int, regs *PtraceRegs386) error { - return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs))) + return ptracePtr(PTRACE_SETREGS, pid, 0, unsafe.Pointer(regs)) } // PtraceRegsAmd64 is the registers used by amd64 binaries. @@ -72,10 +70,10 @@ type PtraceRegsAmd64 struct { // PtraceGetRegsAmd64 fetches the registers used by amd64 binaries. func PtraceGetRegsAmd64(pid int, regsout *PtraceRegsAmd64) error { - return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout))) + return ptracePtr(PTRACE_GETREGS, pid, 0, unsafe.Pointer(regsout)) } // PtraceSetRegsAmd64 sets the registers used by amd64 binaries. func PtraceSetRegsAmd64(pid int, regs *PtraceRegsAmd64) error { - return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs))) + return ptracePtr(PTRACE_SETREGS, pid, 0, unsafe.Pointer(regs)) } diff --git a/vendor/golang.org/x/sys/unix/zsymaddr_zos_s390x.s b/vendor/golang.org/x/sys/unix/zsymaddr_zos_s390x.s new file mode 100644 index 0000000..b77ff5d --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsymaddr_zos_s390x.s @@ -0,0 +1,364 @@ +// go run mksyscall_zos_s390x.go -o_sysnum zsysnum_zos_s390x.go -o_syscall zsyscall_zos_s390x.go -i_syscall syscall_zos_s390x.go -o_asm zsymaddr_zos_s390x.s +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build zos && s390x +#include "textflag.h" + +// provide the address of function variable to be fixed up. + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FlistxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Flistxattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FremovexattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Fremovexattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FgetxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Fgetxattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FsetxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Fsetxattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_accept4Addr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·accept4(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_RemovexattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Removexattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_Dup3Addr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Dup3(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_DirfdAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Dirfd(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_EpollCreateAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·EpollCreate(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_EpollCreate1Addr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·EpollCreate1(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_EpollCtlAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·EpollCtl(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_EpollPwaitAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·EpollPwait(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_EpollWaitAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·EpollWait(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_EventfdAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Eventfd(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FaccessatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Faccessat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FchmodatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Fchmodat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FchownatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Fchownat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FdatasyncAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Fdatasync(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_fstatatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·fstatat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_LgetxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Lgetxattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_LsetxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Lsetxattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FstatfsAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Fstatfs(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FutimesAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Futimes(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FutimesatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Futimesat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_GetrandomAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Getrandom(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_InotifyInitAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·InotifyInit(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_InotifyInit1Addr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·InotifyInit1(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_InotifyAddWatchAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·InotifyAddWatch(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_InotifyRmWatchAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·InotifyRmWatch(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_ListxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Listxattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_LlistxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Llistxattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_LremovexattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Lremovexattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_LutimesAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Lutimes(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_StatfsAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Statfs(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_SyncfsAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Syncfs(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_UnshareAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Unshare(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_LinkatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Linkat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_MkdiratAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Mkdirat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_MknodatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Mknodat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_PivotRootAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·PivotRoot(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_PrctlAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Prctl(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_PrlimitAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Prlimit(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_RenameatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Renameat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_Renameat2Addr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Renameat2(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_SethostnameAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Sethostname(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_SetnsAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Setns(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_SymlinkatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Symlinkat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_UnlinkatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Unlinkat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_openatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·openat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_openat2Addr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·openat2(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_utimensatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·utimensat(SB), R8 + MOVD R8, ret+0(FP) + RET diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go index 870215d..6ea64a3 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build aix && ppc -// +build aix,ppc package unix @@ -124,7 +123,6 @@ int utime(uintptr_t, uintptr_t); unsigned long long getsystemcfg(int); int umount(uintptr_t); int getrlimit64(int, uintptr_t); -int setrlimit64(int, uintptr_t); long long lseek64(int, long long, int); uintptr_t mmap(uintptr_t, uintptr_t, int, int, int, long long); @@ -213,7 +211,7 @@ func wait4(pid Pid_t, status *_C_int, options int, rusage *Rusage) (wpid Pid_t, // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ioctl(fd int, req uint, arg uintptr) (err error) { +func ioctl(fd int, req int, arg uintptr) (err error) { r0, er := C.ioctl(C.int(fd), C.int(req), C.uintptr_t(arg)) if r0 == -1 && er != nil { err = er @@ -223,6 +221,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req int, arg unsafe.Pointer) (err error) { + r0, er := C.ioctl(C.int(fd), C.int(req), C.uintptr_t(uintptr(arg))) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func FcntlInt(fd uintptr, cmd int, arg int) (r int, err error) { r0, er := C.fcntl(C.uintptr_t(fd), C.int(cmd), C.uintptr_t(arg)) r = int(r0) @@ -808,28 +816,6 @@ func write(fd int, p []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, p *byte, np int) (n int, err error) { - r0, er := C.read(C.int(fd), C.uintptr_t(uintptr(unsafe.Pointer(p))), C.size_t(np)) - n = int(r0) - if r0 == -1 && er != nil { - err = er - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, p *byte, np int) (n int, err error) { - r0, er := C.write(C.int(fd), C.uintptr_t(uintptr(unsafe.Pointer(p))), C.size_t(np)) - n = int(r0) - if r0 == -1 && er != nil { - err = er - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Dup2(oldfd int, newfd int) (err error) { r0, er := C.dup2(C.int(oldfd), C.int(newfd)) if r0 == -1 && er != nil { @@ -1454,16 +1440,6 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - r0, er := C.setrlimit64(C.int(resource), C.uintptr_t(uintptr(unsafe.Pointer(rlim)))) - if r0 == -1 && er != nil { - err = er - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Seek(fd int, offset int64, whence int) (off int64, err error) { r0, er := C.lseek64(C.int(fd), C.longlong(offset), C.int(whence)) off = int64(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go index a89b0bf..99ee439 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build aix && ppc64 -// +build aix,ppc64 package unix @@ -93,8 +92,18 @@ func wait4(pid Pid_t, status *_C_int, options int, rusage *Rusage) (wpid Pid_t, // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, e1 := callioctl(fd, int(req), arg) +func ioctl(fd int, req int, arg uintptr) (err error) { + _, e1 := callioctl(fd, req, arg) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ioctlPtr(fd int, req int, arg unsafe.Pointer) (err error) { + _, e1 := callioctl_ptr(fd, req, arg) if e1 != 0 { err = errnoErr(e1) } @@ -752,28 +761,6 @@ func write(fd int, p []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, p *byte, np int) (n int, err error) { - r0, e1 := callread(fd, uintptr(unsafe.Pointer(p)), np) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, p *byte, np int) (n int, err error) { - r0, e1 := callwrite(fd, uintptr(unsafe.Pointer(p)), np) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Dup2(oldfd int, newfd int) (err error) { _, e1 := calldup2(oldfd, newfd) if e1 != 0 { @@ -1412,16 +1399,6 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, e1 := callsetrlimit(resource, uintptr(unsafe.Pointer(rlim))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Seek(fd int, offset int64, whence int) (off int64, err error) { r0, e1 := calllseek(fd, offset, whence) off = int64(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go index 2caa5ad..b68a783 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build aix && ppc64 && gc -// +build aix,ppc64,gc package unix @@ -124,7 +123,6 @@ import ( //go:cgo_import_dynamic libc_getsystemcfg getsystemcfg "libc.a/shr_64.o" //go:cgo_import_dynamic libc_umount umount "libc.a/shr_64.o" //go:cgo_import_dynamic libc_getrlimit getrlimit "libc.a/shr_64.o" -//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.a/shr_64.o" //go:cgo_import_dynamic libc_lseek lseek "libc.a/shr_64.o" //go:cgo_import_dynamic libc_mmap64 mmap64 "libc.a/shr_64.o" @@ -242,7 +240,6 @@ import ( //go:linkname libc_getsystemcfg libc_getsystemcfg //go:linkname libc_umount libc_umount //go:linkname libc_getrlimit libc_getrlimit -//go:linkname libc_setrlimit libc_setrlimit //go:linkname libc_lseek libc_lseek //go:linkname libc_mmap64 libc_mmap64 @@ -363,7 +360,6 @@ var ( libc_getsystemcfg, libc_umount, libc_getrlimit, - libc_setrlimit, libc_lseek, libc_mmap64 syscallFunc ) @@ -423,6 +419,13 @@ func callioctl(fd int, req int, arg uintptr) (r1 uintptr, e1 Errno) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func callioctl_ptr(fd int, req int, arg unsafe.Pointer) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_ioctl)), 3, uintptr(fd), uintptr(req), uintptr(arg), 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func callfcntl(fd uintptr, cmd int, arg uintptr) (r1 uintptr, e1 Errno) { r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_fcntl)), 3, fd, uintptr(cmd), arg, 0, 0, 0) return @@ -1172,13 +1175,6 @@ func callgetrlimit(resource int, rlim uintptr) (r1 uintptr, e1 Errno) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func callsetrlimit(resource int, rlim uintptr) (r1 uintptr, e1 Errno) { - r1, _, e1 = rawSyscall6(uintptr(unsafe.Pointer(&libc_setrlimit)), 2, uintptr(resource), rlim, 0, 0, 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func calllseek(fd int, offset int64, whence int) (r1 uintptr, e1 Errno) { r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_lseek)), 3, uintptr(fd), uintptr(offset), uintptr(whence), 0, 0, 0) return diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go index 944a714..0a87450 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build aix && ppc64 && gccgo -// +build aix,ppc64,gccgo package unix @@ -123,7 +122,6 @@ int utime(uintptr_t, uintptr_t); unsigned long long getsystemcfg(int); int umount(uintptr_t); int getrlimit(int, uintptr_t); -int setrlimit(int, uintptr_t); long long lseek(int, long long, int); uintptr_t mmap64(uintptr_t, uintptr_t, int, int, int, long long); @@ -131,6 +129,7 @@ uintptr_t mmap64(uintptr_t, uintptr_t, int, int, int, long long); import "C" import ( "syscall" + "unsafe" ) // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -191,6 +190,14 @@ func callioctl(fd int, req int, arg uintptr) (r1 uintptr, e1 Errno) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func callioctl_ptr(fd int, req int, arg unsafe.Pointer) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.ioctl(C.int(fd), C.int(req), C.uintptr_t(uintptr(arg)))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func callfcntl(fd uintptr, cmd int, arg uintptr) (r1 uintptr, e1 Errno) { r1 = uintptr(C.fcntl(C.uintptr_t(fd), C.int(cmd), C.uintptr_t(arg))) e1 = syscall.GetErrno() @@ -1047,14 +1054,6 @@ func callgetrlimit(resource int, rlim uintptr) (r1 uintptr, e1 Errno) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func callsetrlimit(resource int, rlim uintptr) (r1 uintptr, e1 Errno) { - r1 = uintptr(C.setrlimit(C.int(resource), C.uintptr_t(rlim))) - e1 = syscall.GetErrno() - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func calllseek(fd int, offset int64, whence int) (r1 uintptr, e1 Errno) { r1 = uintptr(C.lseek(C.int(fd), C.longlong(offset), C.int(whence))) e1 = syscall.GetErrno() diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index c2461c4..07642c3 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build darwin && amd64 -// +build darwin,amd64 package unix @@ -731,6 +730,16 @@ var libc_ioctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -751,6 +760,39 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func pthread_chdir_np(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_pthread_chdir_np_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pthread_chdir_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pthread_chdir_np pthread_chdir_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pthread_fchdir_np(fd int) (err error) { + _, _, e1 := syscall_syscall(libc_pthread_fchdir_np_trampoline_addr, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pthread_fchdir_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pthread_fchdir_np pthread_fchdir_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { _, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) if e1 != 0 { @@ -1984,6 +2026,31 @@ var libc_select_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Setattrlist(path string, attrlist *Attrlist, attrBuf []byte, options int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(attrBuf) > 0 { + _p1 = unsafe.Pointer(&attrBuf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall6(libc_setattrlist_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(attrlist)), uintptr(_p1), uintptr(len(attrBuf)), uintptr(options), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setattrlist_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setattrlist setattrlist "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Setegid(egid int) (err error) { _, _, e1 := syscall_syscall(libc_setegid_trampoline_addr, uintptr(egid), 0, 0) if e1 != 0 { @@ -2115,20 +2182,6 @@ var libc_setreuid_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := syscall_rawSyscall(libc_setsid_trampoline_addr, 0, 0, 0) pid = int(r0) @@ -2391,28 +2444,6 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := syscall_syscall(libc_fstat64_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s index 95fe4c0..923e08c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s @@ -5,900 +5,760 @@ TEXT libc_fdopendir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fdopendir(SB) - GLOBL ·libc_fdopendir_trampoline_addr(SB), RODATA, $8 DATA ·libc_fdopendir_trampoline_addr(SB)/8, $libc_fdopendir_trampoline<>(SB) TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getgroups(SB) - GLOBL ·libc_getgroups_trampoline_addr(SB), RODATA, $8 DATA ·libc_getgroups_trampoline_addr(SB)/8, $libc_getgroups_trampoline<>(SB) TEXT libc_setgroups_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setgroups(SB) - GLOBL ·libc_setgroups_trampoline_addr(SB), RODATA, $8 DATA ·libc_setgroups_trampoline_addr(SB)/8, $libc_setgroups_trampoline<>(SB) TEXT libc_wait4_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_wait4(SB) - GLOBL ·libc_wait4_trampoline_addr(SB), RODATA, $8 DATA ·libc_wait4_trampoline_addr(SB)/8, $libc_wait4_trampoline<>(SB) TEXT libc_accept_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_accept(SB) - GLOBL ·libc_accept_trampoline_addr(SB), RODATA, $8 DATA ·libc_accept_trampoline_addr(SB)/8, $libc_accept_trampoline<>(SB) TEXT libc_bind_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_bind(SB) - GLOBL ·libc_bind_trampoline_addr(SB), RODATA, $8 DATA ·libc_bind_trampoline_addr(SB)/8, $libc_bind_trampoline<>(SB) TEXT libc_connect_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_connect(SB) - GLOBL ·libc_connect_trampoline_addr(SB), RODATA, $8 DATA ·libc_connect_trampoline_addr(SB)/8, $libc_connect_trampoline<>(SB) TEXT libc_socket_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_socket(SB) - GLOBL ·libc_socket_trampoline_addr(SB), RODATA, $8 DATA ·libc_socket_trampoline_addr(SB)/8, $libc_socket_trampoline<>(SB) TEXT libc_getsockopt_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsockopt(SB) - GLOBL ·libc_getsockopt_trampoline_addr(SB), RODATA, $8 DATA ·libc_getsockopt_trampoline_addr(SB)/8, $libc_getsockopt_trampoline<>(SB) TEXT libc_setsockopt_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setsockopt(SB) - GLOBL ·libc_setsockopt_trampoline_addr(SB), RODATA, $8 DATA ·libc_setsockopt_trampoline_addr(SB)/8, $libc_setsockopt_trampoline<>(SB) TEXT libc_getpeername_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpeername(SB) - GLOBL ·libc_getpeername_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpeername_trampoline_addr(SB)/8, $libc_getpeername_trampoline<>(SB) TEXT libc_getsockname_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsockname(SB) - GLOBL ·libc_getsockname_trampoline_addr(SB), RODATA, $8 DATA ·libc_getsockname_trampoline_addr(SB)/8, $libc_getsockname_trampoline<>(SB) TEXT libc_shutdown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_shutdown(SB) - GLOBL ·libc_shutdown_trampoline_addr(SB), RODATA, $8 DATA ·libc_shutdown_trampoline_addr(SB)/8, $libc_shutdown_trampoline<>(SB) TEXT libc_socketpair_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_socketpair(SB) - GLOBL ·libc_socketpair_trampoline_addr(SB), RODATA, $8 DATA ·libc_socketpair_trampoline_addr(SB)/8, $libc_socketpair_trampoline<>(SB) TEXT libc_recvfrom_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_recvfrom(SB) - GLOBL ·libc_recvfrom_trampoline_addr(SB), RODATA, $8 DATA ·libc_recvfrom_trampoline_addr(SB)/8, $libc_recvfrom_trampoline<>(SB) TEXT libc_sendto_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendto(SB) - GLOBL ·libc_sendto_trampoline_addr(SB), RODATA, $8 DATA ·libc_sendto_trampoline_addr(SB)/8, $libc_sendto_trampoline<>(SB) TEXT libc_recvmsg_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_recvmsg(SB) - GLOBL ·libc_recvmsg_trampoline_addr(SB), RODATA, $8 DATA ·libc_recvmsg_trampoline_addr(SB)/8, $libc_recvmsg_trampoline<>(SB) TEXT libc_sendmsg_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendmsg(SB) - GLOBL ·libc_sendmsg_trampoline_addr(SB), RODATA, $8 DATA ·libc_sendmsg_trampoline_addr(SB)/8, $libc_sendmsg_trampoline<>(SB) TEXT libc_kevent_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kevent(SB) - GLOBL ·libc_kevent_trampoline_addr(SB), RODATA, $8 DATA ·libc_kevent_trampoline_addr(SB)/8, $libc_kevent_trampoline<>(SB) TEXT libc_utimes_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimes(SB) - GLOBL ·libc_utimes_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimes_trampoline_addr(SB)/8, $libc_utimes_trampoline<>(SB) TEXT libc_futimes_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_futimes(SB) - GLOBL ·libc_futimes_trampoline_addr(SB), RODATA, $8 DATA ·libc_futimes_trampoline_addr(SB)/8, $libc_futimes_trampoline<>(SB) TEXT libc_poll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_poll(SB) - GLOBL ·libc_poll_trampoline_addr(SB), RODATA, $8 DATA ·libc_poll_trampoline_addr(SB)/8, $libc_poll_trampoline<>(SB) TEXT libc_madvise_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_madvise(SB) - GLOBL ·libc_madvise_trampoline_addr(SB), RODATA, $8 DATA ·libc_madvise_trampoline_addr(SB)/8, $libc_madvise_trampoline<>(SB) TEXT libc_mlock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mlock(SB) - GLOBL ·libc_mlock_trampoline_addr(SB), RODATA, $8 DATA ·libc_mlock_trampoline_addr(SB)/8, $libc_mlock_trampoline<>(SB) TEXT libc_mlockall_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mlockall(SB) - GLOBL ·libc_mlockall_trampoline_addr(SB), RODATA, $8 DATA ·libc_mlockall_trampoline_addr(SB)/8, $libc_mlockall_trampoline<>(SB) TEXT libc_mprotect_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mprotect(SB) - GLOBL ·libc_mprotect_trampoline_addr(SB), RODATA, $8 DATA ·libc_mprotect_trampoline_addr(SB)/8, $libc_mprotect_trampoline<>(SB) TEXT libc_msync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_msync(SB) - GLOBL ·libc_msync_trampoline_addr(SB), RODATA, $8 DATA ·libc_msync_trampoline_addr(SB)/8, $libc_msync_trampoline<>(SB) TEXT libc_munlock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munlock(SB) - GLOBL ·libc_munlock_trampoline_addr(SB), RODATA, $8 DATA ·libc_munlock_trampoline_addr(SB)/8, $libc_munlock_trampoline<>(SB) TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munlockall(SB) - GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $8 DATA ·libc_munlockall_trampoline_addr(SB)/8, $libc_munlockall_trampoline<>(SB) TEXT libc_closedir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_closedir(SB) - GLOBL ·libc_closedir_trampoline_addr(SB), RODATA, $8 DATA ·libc_closedir_trampoline_addr(SB)/8, $libc_closedir_trampoline<>(SB) TEXT libc_readdir_r_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_readdir_r(SB) - GLOBL ·libc_readdir_r_trampoline_addr(SB), RODATA, $8 DATA ·libc_readdir_r_trampoline_addr(SB)/8, $libc_readdir_r_trampoline<>(SB) TEXT libc_pipe_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pipe(SB) - GLOBL ·libc_pipe_trampoline_addr(SB), RODATA, $8 DATA ·libc_pipe_trampoline_addr(SB)/8, $libc_pipe_trampoline<>(SB) TEXT libc_getxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getxattr(SB) - GLOBL ·libc_getxattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_getxattr_trampoline_addr(SB)/8, $libc_getxattr_trampoline<>(SB) TEXT libc_fgetxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fgetxattr(SB) - GLOBL ·libc_fgetxattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_fgetxattr_trampoline_addr(SB)/8, $libc_fgetxattr_trampoline<>(SB) TEXT libc_setxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setxattr(SB) - GLOBL ·libc_setxattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_setxattr_trampoline_addr(SB)/8, $libc_setxattr_trampoline<>(SB) TEXT libc_fsetxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fsetxattr(SB) - GLOBL ·libc_fsetxattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_fsetxattr_trampoline_addr(SB)/8, $libc_fsetxattr_trampoline<>(SB) TEXT libc_removexattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_removexattr(SB) - GLOBL ·libc_removexattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_removexattr_trampoline_addr(SB)/8, $libc_removexattr_trampoline<>(SB) TEXT libc_fremovexattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fremovexattr(SB) - GLOBL ·libc_fremovexattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_fremovexattr_trampoline_addr(SB)/8, $libc_fremovexattr_trampoline<>(SB) TEXT libc_listxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_listxattr(SB) - GLOBL ·libc_listxattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_listxattr_trampoline_addr(SB)/8, $libc_listxattr_trampoline<>(SB) TEXT libc_flistxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_flistxattr(SB) - GLOBL ·libc_flistxattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_flistxattr_trampoline_addr(SB)/8, $libc_flistxattr_trampoline<>(SB) TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) - GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fcntl(SB) - GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $8 DATA ·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB) TEXT libc_kill_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kill(SB) - GLOBL ·libc_kill_trampoline_addr(SB), RODATA, $8 DATA ·libc_kill_trampoline_addr(SB)/8, $libc_kill_trampoline<>(SB) TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ioctl(SB) - GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB) TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sysctl(SB) - GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) +TEXT libc_pthread_chdir_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pthread_chdir_np(SB) +GLOBL ·libc_pthread_chdir_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pthread_chdir_np_trampoline_addr(SB)/8, $libc_pthread_chdir_np_trampoline<>(SB) + +TEXT libc_pthread_fchdir_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pthread_fchdir_np(SB) +GLOBL ·libc_pthread_fchdir_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pthread_fchdir_np_trampoline_addr(SB)/8, $libc_pthread_fchdir_np_trampoline<>(SB) + TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) - GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8 DATA ·libc_sendfile_trampoline_addr(SB)/8, $libc_sendfile_trampoline<>(SB) TEXT libc_shmat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_shmat(SB) - GLOBL ·libc_shmat_trampoline_addr(SB), RODATA, $8 DATA ·libc_shmat_trampoline_addr(SB)/8, $libc_shmat_trampoline<>(SB) TEXT libc_shmctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_shmctl(SB) - GLOBL ·libc_shmctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_shmctl_trampoline_addr(SB)/8, $libc_shmctl_trampoline<>(SB) TEXT libc_shmdt_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_shmdt(SB) - GLOBL ·libc_shmdt_trampoline_addr(SB), RODATA, $8 DATA ·libc_shmdt_trampoline_addr(SB)/8, $libc_shmdt_trampoline<>(SB) TEXT libc_shmget_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_shmget(SB) - GLOBL ·libc_shmget_trampoline_addr(SB), RODATA, $8 DATA ·libc_shmget_trampoline_addr(SB)/8, $libc_shmget_trampoline<>(SB) TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_access(SB) - GLOBL ·libc_access_trampoline_addr(SB), RODATA, $8 DATA ·libc_access_trampoline_addr(SB)/8, $libc_access_trampoline<>(SB) TEXT libc_adjtime_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_adjtime(SB) - GLOBL ·libc_adjtime_trampoline_addr(SB), RODATA, $8 DATA ·libc_adjtime_trampoline_addr(SB)/8, $libc_adjtime_trampoline<>(SB) TEXT libc_chdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chdir(SB) - GLOBL ·libc_chdir_trampoline_addr(SB), RODATA, $8 DATA ·libc_chdir_trampoline_addr(SB)/8, $libc_chdir_trampoline<>(SB) TEXT libc_chflags_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chflags(SB) - GLOBL ·libc_chflags_trampoline_addr(SB), RODATA, $8 DATA ·libc_chflags_trampoline_addr(SB)/8, $libc_chflags_trampoline<>(SB) TEXT libc_chmod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chmod(SB) - GLOBL ·libc_chmod_trampoline_addr(SB), RODATA, $8 DATA ·libc_chmod_trampoline_addr(SB)/8, $libc_chmod_trampoline<>(SB) TEXT libc_chown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chown(SB) - GLOBL ·libc_chown_trampoline_addr(SB), RODATA, $8 DATA ·libc_chown_trampoline_addr(SB)/8, $libc_chown_trampoline<>(SB) TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chroot(SB) - GLOBL ·libc_chroot_trampoline_addr(SB), RODATA, $8 DATA ·libc_chroot_trampoline_addr(SB)/8, $libc_chroot_trampoline<>(SB) TEXT libc_clock_gettime_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_clock_gettime(SB) - GLOBL ·libc_clock_gettime_trampoline_addr(SB), RODATA, $8 DATA ·libc_clock_gettime_trampoline_addr(SB)/8, $libc_clock_gettime_trampoline<>(SB) TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_close(SB) - GLOBL ·libc_close_trampoline_addr(SB), RODATA, $8 DATA ·libc_close_trampoline_addr(SB)/8, $libc_close_trampoline<>(SB) TEXT libc_clonefile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_clonefile(SB) - GLOBL ·libc_clonefile_trampoline_addr(SB), RODATA, $8 DATA ·libc_clonefile_trampoline_addr(SB)/8, $libc_clonefile_trampoline<>(SB) TEXT libc_clonefileat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_clonefileat(SB) - GLOBL ·libc_clonefileat_trampoline_addr(SB), RODATA, $8 DATA ·libc_clonefileat_trampoline_addr(SB)/8, $libc_clonefileat_trampoline<>(SB) TEXT libc_dup_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_dup(SB) - GLOBL ·libc_dup_trampoline_addr(SB), RODATA, $8 DATA ·libc_dup_trampoline_addr(SB)/8, $libc_dup_trampoline<>(SB) TEXT libc_dup2_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_dup2(SB) - GLOBL ·libc_dup2_trampoline_addr(SB), RODATA, $8 DATA ·libc_dup2_trampoline_addr(SB)/8, $libc_dup2_trampoline<>(SB) TEXT libc_exchangedata_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_exchangedata(SB) - GLOBL ·libc_exchangedata_trampoline_addr(SB), RODATA, $8 DATA ·libc_exchangedata_trampoline_addr(SB)/8, $libc_exchangedata_trampoline<>(SB) TEXT libc_exit_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_exit(SB) - GLOBL ·libc_exit_trampoline_addr(SB), RODATA, $8 DATA ·libc_exit_trampoline_addr(SB)/8, $libc_exit_trampoline<>(SB) TEXT libc_faccessat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_faccessat(SB) - GLOBL ·libc_faccessat_trampoline_addr(SB), RODATA, $8 DATA ·libc_faccessat_trampoline_addr(SB)/8, $libc_faccessat_trampoline<>(SB) TEXT libc_fchdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchdir(SB) - GLOBL ·libc_fchdir_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchdir_trampoline_addr(SB)/8, $libc_fchdir_trampoline<>(SB) TEXT libc_fchflags_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchflags(SB) - GLOBL ·libc_fchflags_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchflags_trampoline_addr(SB)/8, $libc_fchflags_trampoline<>(SB) TEXT libc_fchmod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchmod(SB) - GLOBL ·libc_fchmod_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchmod_trampoline_addr(SB)/8, $libc_fchmod_trampoline<>(SB) TEXT libc_fchmodat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchmodat(SB) - GLOBL ·libc_fchmodat_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchmodat_trampoline_addr(SB)/8, $libc_fchmodat_trampoline<>(SB) TEXT libc_fchown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchown(SB) - GLOBL ·libc_fchown_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchown_trampoline_addr(SB)/8, $libc_fchown_trampoline<>(SB) TEXT libc_fchownat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchownat(SB) - GLOBL ·libc_fchownat_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchownat_trampoline_addr(SB)/8, $libc_fchownat_trampoline<>(SB) TEXT libc_fclonefileat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fclonefileat(SB) - GLOBL ·libc_fclonefileat_trampoline_addr(SB), RODATA, $8 DATA ·libc_fclonefileat_trampoline_addr(SB)/8, $libc_fclonefileat_trampoline<>(SB) TEXT libc_flock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_flock(SB) - GLOBL ·libc_flock_trampoline_addr(SB), RODATA, $8 DATA ·libc_flock_trampoline_addr(SB)/8, $libc_flock_trampoline<>(SB) TEXT libc_fpathconf_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fpathconf(SB) - GLOBL ·libc_fpathconf_trampoline_addr(SB), RODATA, $8 DATA ·libc_fpathconf_trampoline_addr(SB)/8, $libc_fpathconf_trampoline<>(SB) TEXT libc_fsync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fsync(SB) - GLOBL ·libc_fsync_trampoline_addr(SB), RODATA, $8 DATA ·libc_fsync_trampoline_addr(SB)/8, $libc_fsync_trampoline<>(SB) TEXT libc_ftruncate_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ftruncate(SB) - GLOBL ·libc_ftruncate_trampoline_addr(SB), RODATA, $8 DATA ·libc_ftruncate_trampoline_addr(SB)/8, $libc_ftruncate_trampoline<>(SB) TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getcwd(SB) - GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8 DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB) TEXT libc_getdtablesize_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getdtablesize(SB) - GLOBL ·libc_getdtablesize_trampoline_addr(SB), RODATA, $8 DATA ·libc_getdtablesize_trampoline_addr(SB)/8, $libc_getdtablesize_trampoline<>(SB) TEXT libc_getegid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getegid(SB) - GLOBL ·libc_getegid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getegid_trampoline_addr(SB)/8, $libc_getegid_trampoline<>(SB) TEXT libc_geteuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_geteuid(SB) - GLOBL ·libc_geteuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_geteuid_trampoline_addr(SB)/8, $libc_geteuid_trampoline<>(SB) TEXT libc_getgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getgid(SB) - GLOBL ·libc_getgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getgid_trampoline_addr(SB)/8, $libc_getgid_trampoline<>(SB) TEXT libc_getpgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpgid(SB) - GLOBL ·libc_getpgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpgid_trampoline_addr(SB)/8, $libc_getpgid_trampoline<>(SB) TEXT libc_getpgrp_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpgrp(SB) - GLOBL ·libc_getpgrp_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpgrp_trampoline_addr(SB)/8, $libc_getpgrp_trampoline<>(SB) TEXT libc_getpid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpid(SB) - GLOBL ·libc_getpid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpid_trampoline_addr(SB)/8, $libc_getpid_trampoline<>(SB) TEXT libc_getppid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getppid(SB) - GLOBL ·libc_getppid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getppid_trampoline_addr(SB)/8, $libc_getppid_trampoline<>(SB) TEXT libc_getpriority_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpriority(SB) - GLOBL ·libc_getpriority_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpriority_trampoline_addr(SB)/8, $libc_getpriority_trampoline<>(SB) TEXT libc_getrlimit_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getrlimit(SB) - GLOBL ·libc_getrlimit_trampoline_addr(SB), RODATA, $8 DATA ·libc_getrlimit_trampoline_addr(SB)/8, $libc_getrlimit_trampoline<>(SB) TEXT libc_getrusage_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getrusage(SB) - GLOBL ·libc_getrusage_trampoline_addr(SB), RODATA, $8 DATA ·libc_getrusage_trampoline_addr(SB)/8, $libc_getrusage_trampoline<>(SB) TEXT libc_getsid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsid(SB) - GLOBL ·libc_getsid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getsid_trampoline_addr(SB)/8, $libc_getsid_trampoline<>(SB) TEXT libc_gettimeofday_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_gettimeofday(SB) - GLOBL ·libc_gettimeofday_trampoline_addr(SB), RODATA, $8 DATA ·libc_gettimeofday_trampoline_addr(SB)/8, $libc_gettimeofday_trampoline<>(SB) TEXT libc_getuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getuid(SB) - GLOBL ·libc_getuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getuid_trampoline_addr(SB)/8, $libc_getuid_trampoline<>(SB) TEXT libc_issetugid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_issetugid(SB) - GLOBL ·libc_issetugid_trampoline_addr(SB), RODATA, $8 DATA ·libc_issetugid_trampoline_addr(SB)/8, $libc_issetugid_trampoline<>(SB) TEXT libc_kqueue_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kqueue(SB) - GLOBL ·libc_kqueue_trampoline_addr(SB), RODATA, $8 DATA ·libc_kqueue_trampoline_addr(SB)/8, $libc_kqueue_trampoline<>(SB) TEXT libc_lchown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lchown(SB) - GLOBL ·libc_lchown_trampoline_addr(SB), RODATA, $8 DATA ·libc_lchown_trampoline_addr(SB)/8, $libc_lchown_trampoline<>(SB) TEXT libc_link_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_link(SB) - GLOBL ·libc_link_trampoline_addr(SB), RODATA, $8 DATA ·libc_link_trampoline_addr(SB)/8, $libc_link_trampoline<>(SB) TEXT libc_linkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_linkat(SB) - GLOBL ·libc_linkat_trampoline_addr(SB), RODATA, $8 DATA ·libc_linkat_trampoline_addr(SB)/8, $libc_linkat_trampoline<>(SB) TEXT libc_listen_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_listen(SB) - GLOBL ·libc_listen_trampoline_addr(SB), RODATA, $8 DATA ·libc_listen_trampoline_addr(SB)/8, $libc_listen_trampoline<>(SB) TEXT libc_mkdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkdir(SB) - GLOBL ·libc_mkdir_trampoline_addr(SB), RODATA, $8 DATA ·libc_mkdir_trampoline_addr(SB)/8, $libc_mkdir_trampoline<>(SB) TEXT libc_mkdirat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkdirat(SB) - GLOBL ·libc_mkdirat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mkdirat_trampoline_addr(SB)/8, $libc_mkdirat_trampoline<>(SB) TEXT libc_mkfifo_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkfifo(SB) - GLOBL ·libc_mkfifo_trampoline_addr(SB), RODATA, $8 DATA ·libc_mkfifo_trampoline_addr(SB)/8, $libc_mkfifo_trampoline<>(SB) TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mknod(SB) - GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB) TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mount(SB) - GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8 DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB) TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_open(SB) - GLOBL ·libc_open_trampoline_addr(SB), RODATA, $8 DATA ·libc_open_trampoline_addr(SB)/8, $libc_open_trampoline<>(SB) TEXT libc_openat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_openat(SB) - GLOBL ·libc_openat_trampoline_addr(SB), RODATA, $8 DATA ·libc_openat_trampoline_addr(SB)/8, $libc_openat_trampoline<>(SB) TEXT libc_pathconf_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pathconf(SB) - GLOBL ·libc_pathconf_trampoline_addr(SB), RODATA, $8 DATA ·libc_pathconf_trampoline_addr(SB)/8, $libc_pathconf_trampoline<>(SB) TEXT libc_pread_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pread(SB) - GLOBL ·libc_pread_trampoline_addr(SB), RODATA, $8 DATA ·libc_pread_trampoline_addr(SB)/8, $libc_pread_trampoline<>(SB) TEXT libc_pwrite_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pwrite(SB) - GLOBL ·libc_pwrite_trampoline_addr(SB), RODATA, $8 DATA ·libc_pwrite_trampoline_addr(SB)/8, $libc_pwrite_trampoline<>(SB) TEXT libc_read_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_read(SB) - GLOBL ·libc_read_trampoline_addr(SB), RODATA, $8 DATA ·libc_read_trampoline_addr(SB)/8, $libc_read_trampoline<>(SB) TEXT libc_readlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_readlink(SB) - GLOBL ·libc_readlink_trampoline_addr(SB), RODATA, $8 DATA ·libc_readlink_trampoline_addr(SB)/8, $libc_readlink_trampoline<>(SB) TEXT libc_readlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_readlinkat(SB) - GLOBL ·libc_readlinkat_trampoline_addr(SB), RODATA, $8 DATA ·libc_readlinkat_trampoline_addr(SB)/8, $libc_readlinkat_trampoline<>(SB) TEXT libc_rename_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_rename(SB) - GLOBL ·libc_rename_trampoline_addr(SB), RODATA, $8 DATA ·libc_rename_trampoline_addr(SB)/8, $libc_rename_trampoline<>(SB) TEXT libc_renameat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_renameat(SB) - GLOBL ·libc_renameat_trampoline_addr(SB), RODATA, $8 DATA ·libc_renameat_trampoline_addr(SB)/8, $libc_renameat_trampoline<>(SB) TEXT libc_revoke_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_revoke(SB) - GLOBL ·libc_revoke_trampoline_addr(SB), RODATA, $8 DATA ·libc_revoke_trampoline_addr(SB)/8, $libc_revoke_trampoline<>(SB) TEXT libc_rmdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_rmdir(SB) - GLOBL ·libc_rmdir_trampoline_addr(SB), RODATA, $8 DATA ·libc_rmdir_trampoline_addr(SB)/8, $libc_rmdir_trampoline<>(SB) TEXT libc_lseek_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lseek(SB) - GLOBL ·libc_lseek_trampoline_addr(SB), RODATA, $8 DATA ·libc_lseek_trampoline_addr(SB)/8, $libc_lseek_trampoline<>(SB) TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_select(SB) - GLOBL ·libc_select_trampoline_addr(SB), RODATA, $8 DATA ·libc_select_trampoline_addr(SB)/8, $libc_select_trampoline<>(SB) +TEXT libc_setattrlist_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setattrlist(SB) +GLOBL ·libc_setattrlist_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setattrlist_trampoline_addr(SB)/8, $libc_setattrlist_trampoline<>(SB) + TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setegid(SB) - GLOBL ·libc_setegid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setegid_trampoline_addr(SB)/8, $libc_setegid_trampoline<>(SB) TEXT libc_seteuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_seteuid(SB) - GLOBL ·libc_seteuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_seteuid_trampoline_addr(SB)/8, $libc_seteuid_trampoline<>(SB) TEXT libc_setgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setgid(SB) - GLOBL ·libc_setgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setgid_trampoline_addr(SB)/8, $libc_setgid_trampoline<>(SB) TEXT libc_setlogin_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setlogin(SB) - GLOBL ·libc_setlogin_trampoline_addr(SB), RODATA, $8 DATA ·libc_setlogin_trampoline_addr(SB)/8, $libc_setlogin_trampoline<>(SB) TEXT libc_setpgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setpgid(SB) - GLOBL ·libc_setpgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setpgid_trampoline_addr(SB)/8, $libc_setpgid_trampoline<>(SB) TEXT libc_setpriority_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setpriority(SB) - GLOBL ·libc_setpriority_trampoline_addr(SB), RODATA, $8 DATA ·libc_setpriority_trampoline_addr(SB)/8, $libc_setpriority_trampoline<>(SB) TEXT libc_setprivexec_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setprivexec(SB) - GLOBL ·libc_setprivexec_trampoline_addr(SB), RODATA, $8 DATA ·libc_setprivexec_trampoline_addr(SB)/8, $libc_setprivexec_trampoline<>(SB) TEXT libc_setregid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setregid(SB) - GLOBL ·libc_setregid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setregid_trampoline_addr(SB)/8, $libc_setregid_trampoline<>(SB) TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setreuid(SB) - GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setreuid_trampoline_addr(SB)/8, $libc_setreuid_trampoline<>(SB) -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setrlimit(SB) - -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) - TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setsid(SB) - GLOBL ·libc_setsid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setsid_trampoline_addr(SB)/8, $libc_setsid_trampoline<>(SB) TEXT libc_settimeofday_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_settimeofday(SB) - GLOBL ·libc_settimeofday_trampoline_addr(SB), RODATA, $8 DATA ·libc_settimeofday_trampoline_addr(SB)/8, $libc_settimeofday_trampoline<>(SB) TEXT libc_setuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setuid(SB) - GLOBL ·libc_setuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setuid_trampoline_addr(SB)/8, $libc_setuid_trampoline<>(SB) TEXT libc_symlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_symlink(SB) - GLOBL ·libc_symlink_trampoline_addr(SB), RODATA, $8 DATA ·libc_symlink_trampoline_addr(SB)/8, $libc_symlink_trampoline<>(SB) TEXT libc_symlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_symlinkat(SB) - GLOBL ·libc_symlinkat_trampoline_addr(SB), RODATA, $8 DATA ·libc_symlinkat_trampoline_addr(SB)/8, $libc_symlinkat_trampoline<>(SB) TEXT libc_sync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sync(SB) - GLOBL ·libc_sync_trampoline_addr(SB), RODATA, $8 DATA ·libc_sync_trampoline_addr(SB)/8, $libc_sync_trampoline<>(SB) TEXT libc_truncate_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_truncate(SB) - GLOBL ·libc_truncate_trampoline_addr(SB), RODATA, $8 DATA ·libc_truncate_trampoline_addr(SB)/8, $libc_truncate_trampoline<>(SB) TEXT libc_umask_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_umask(SB) - GLOBL ·libc_umask_trampoline_addr(SB), RODATA, $8 DATA ·libc_umask_trampoline_addr(SB)/8, $libc_umask_trampoline<>(SB) TEXT libc_undelete_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_undelete(SB) - GLOBL ·libc_undelete_trampoline_addr(SB), RODATA, $8 DATA ·libc_undelete_trampoline_addr(SB)/8, $libc_undelete_trampoline<>(SB) TEXT libc_unlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unlink(SB) - GLOBL ·libc_unlink_trampoline_addr(SB), RODATA, $8 DATA ·libc_unlink_trampoline_addr(SB)/8, $libc_unlink_trampoline<>(SB) TEXT libc_unlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unlinkat(SB) - GLOBL ·libc_unlinkat_trampoline_addr(SB), RODATA, $8 DATA ·libc_unlinkat_trampoline_addr(SB)/8, $libc_unlinkat_trampoline<>(SB) TEXT libc_unmount_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unmount(SB) - GLOBL ·libc_unmount_trampoline_addr(SB), RODATA, $8 DATA ·libc_unmount_trampoline_addr(SB)/8, $libc_unmount_trampoline<>(SB) TEXT libc_write_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_write(SB) - GLOBL ·libc_write_trampoline_addr(SB), RODATA, $8 DATA ·libc_write_trampoline_addr(SB)/8, $libc_write_trampoline<>(SB) TEXT libc_mmap_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mmap(SB) - GLOBL ·libc_mmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_mmap_trampoline_addr(SB)/8, $libc_mmap_trampoline<>(SB) TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munmap(SB) - GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) TEXT libc_fstat64_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstat64(SB) - GLOBL ·libc_fstat64_trampoline_addr(SB), RODATA, $8 DATA ·libc_fstat64_trampoline_addr(SB)/8, $libc_fstat64_trampoline<>(SB) TEXT libc_fstatat64_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstatat64(SB) - GLOBL ·libc_fstatat64_trampoline_addr(SB), RODATA, $8 DATA ·libc_fstatat64_trampoline_addr(SB)/8, $libc_fstatat64_trampoline<>(SB) TEXT libc_fstatfs64_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstatfs64(SB) - GLOBL ·libc_fstatfs64_trampoline_addr(SB), RODATA, $8 DATA ·libc_fstatfs64_trampoline_addr(SB)/8, $libc_fstatfs64_trampoline<>(SB) TEXT libc_getfsstat64_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getfsstat64(SB) - GLOBL ·libc_getfsstat64_trampoline_addr(SB), RODATA, $8 DATA ·libc_getfsstat64_trampoline_addr(SB)/8, $libc_getfsstat64_trampoline<>(SB) TEXT libc_lstat64_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lstat64(SB) - GLOBL ·libc_lstat64_trampoline_addr(SB), RODATA, $8 DATA ·libc_lstat64_trampoline_addr(SB)/8, $libc_lstat64_trampoline<>(SB) TEXT libc_ptrace_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ptrace(SB) - GLOBL ·libc_ptrace_trampoline_addr(SB), RODATA, $8 DATA ·libc_ptrace_trampoline_addr(SB)/8, $libc_ptrace_trampoline<>(SB) TEXT libc_stat64_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_stat64(SB) - GLOBL ·libc_stat64_trampoline_addr(SB), RODATA, $8 DATA ·libc_stat64_trampoline_addr(SB)/8, $libc_stat64_trampoline<>(SB) TEXT libc_statfs64_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_statfs64(SB) - GLOBL ·libc_statfs64_trampoline_addr(SB), RODATA, $8 DATA ·libc_statfs64_trampoline_addr(SB)/8, $libc_statfs64_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index 26a0fdc..7d73dda 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build darwin && arm64 -// +build darwin,arm64 package unix @@ -731,6 +730,16 @@ var libc_ioctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -751,6 +760,39 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func pthread_chdir_np(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_pthread_chdir_np_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pthread_chdir_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pthread_chdir_np pthread_chdir_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pthread_fchdir_np(fd int) (err error) { + _, _, e1 := syscall_syscall(libc_pthread_fchdir_np_trampoline_addr, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pthread_fchdir_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pthread_fchdir_np pthread_fchdir_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { _, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) if e1 != 0 { @@ -1984,6 +2026,31 @@ var libc_select_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Setattrlist(path string, attrlist *Attrlist, attrBuf []byte, options int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(attrBuf) > 0 { + _p1 = unsafe.Pointer(&attrBuf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall6(libc_setattrlist_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(attrlist)), uintptr(_p1), uintptr(len(attrBuf)), uintptr(options), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setattrlist_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setattrlist setattrlist "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Setegid(egid int) (err error) { _, _, e1 := syscall_syscall(libc_setegid_trampoline_addr, uintptr(egid), 0, 0) if e1 != 0 { @@ -2115,20 +2182,6 @@ var libc_setreuid_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := syscall_rawSyscall(libc_setsid_trampoline_addr, 0, 0, 0) pid = int(r0) @@ -2391,28 +2444,6 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := syscall_syscall(libc_fstat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s index efa5b4c..0577001 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s @@ -5,900 +5,760 @@ TEXT libc_fdopendir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fdopendir(SB) - GLOBL ·libc_fdopendir_trampoline_addr(SB), RODATA, $8 DATA ·libc_fdopendir_trampoline_addr(SB)/8, $libc_fdopendir_trampoline<>(SB) TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getgroups(SB) - GLOBL ·libc_getgroups_trampoline_addr(SB), RODATA, $8 DATA ·libc_getgroups_trampoline_addr(SB)/8, $libc_getgroups_trampoline<>(SB) TEXT libc_setgroups_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setgroups(SB) - GLOBL ·libc_setgroups_trampoline_addr(SB), RODATA, $8 DATA ·libc_setgroups_trampoline_addr(SB)/8, $libc_setgroups_trampoline<>(SB) TEXT libc_wait4_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_wait4(SB) - GLOBL ·libc_wait4_trampoline_addr(SB), RODATA, $8 DATA ·libc_wait4_trampoline_addr(SB)/8, $libc_wait4_trampoline<>(SB) TEXT libc_accept_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_accept(SB) - GLOBL ·libc_accept_trampoline_addr(SB), RODATA, $8 DATA ·libc_accept_trampoline_addr(SB)/8, $libc_accept_trampoline<>(SB) TEXT libc_bind_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_bind(SB) - GLOBL ·libc_bind_trampoline_addr(SB), RODATA, $8 DATA ·libc_bind_trampoline_addr(SB)/8, $libc_bind_trampoline<>(SB) TEXT libc_connect_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_connect(SB) - GLOBL ·libc_connect_trampoline_addr(SB), RODATA, $8 DATA ·libc_connect_trampoline_addr(SB)/8, $libc_connect_trampoline<>(SB) TEXT libc_socket_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_socket(SB) - GLOBL ·libc_socket_trampoline_addr(SB), RODATA, $8 DATA ·libc_socket_trampoline_addr(SB)/8, $libc_socket_trampoline<>(SB) TEXT libc_getsockopt_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsockopt(SB) - GLOBL ·libc_getsockopt_trampoline_addr(SB), RODATA, $8 DATA ·libc_getsockopt_trampoline_addr(SB)/8, $libc_getsockopt_trampoline<>(SB) TEXT libc_setsockopt_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setsockopt(SB) - GLOBL ·libc_setsockopt_trampoline_addr(SB), RODATA, $8 DATA ·libc_setsockopt_trampoline_addr(SB)/8, $libc_setsockopt_trampoline<>(SB) TEXT libc_getpeername_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpeername(SB) - GLOBL ·libc_getpeername_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpeername_trampoline_addr(SB)/8, $libc_getpeername_trampoline<>(SB) TEXT libc_getsockname_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsockname(SB) - GLOBL ·libc_getsockname_trampoline_addr(SB), RODATA, $8 DATA ·libc_getsockname_trampoline_addr(SB)/8, $libc_getsockname_trampoline<>(SB) TEXT libc_shutdown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_shutdown(SB) - GLOBL ·libc_shutdown_trampoline_addr(SB), RODATA, $8 DATA ·libc_shutdown_trampoline_addr(SB)/8, $libc_shutdown_trampoline<>(SB) TEXT libc_socketpair_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_socketpair(SB) - GLOBL ·libc_socketpair_trampoline_addr(SB), RODATA, $8 DATA ·libc_socketpair_trampoline_addr(SB)/8, $libc_socketpair_trampoline<>(SB) TEXT libc_recvfrom_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_recvfrom(SB) - GLOBL ·libc_recvfrom_trampoline_addr(SB), RODATA, $8 DATA ·libc_recvfrom_trampoline_addr(SB)/8, $libc_recvfrom_trampoline<>(SB) TEXT libc_sendto_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendto(SB) - GLOBL ·libc_sendto_trampoline_addr(SB), RODATA, $8 DATA ·libc_sendto_trampoline_addr(SB)/8, $libc_sendto_trampoline<>(SB) TEXT libc_recvmsg_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_recvmsg(SB) - GLOBL ·libc_recvmsg_trampoline_addr(SB), RODATA, $8 DATA ·libc_recvmsg_trampoline_addr(SB)/8, $libc_recvmsg_trampoline<>(SB) TEXT libc_sendmsg_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendmsg(SB) - GLOBL ·libc_sendmsg_trampoline_addr(SB), RODATA, $8 DATA ·libc_sendmsg_trampoline_addr(SB)/8, $libc_sendmsg_trampoline<>(SB) TEXT libc_kevent_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kevent(SB) - GLOBL ·libc_kevent_trampoline_addr(SB), RODATA, $8 DATA ·libc_kevent_trampoline_addr(SB)/8, $libc_kevent_trampoline<>(SB) TEXT libc_utimes_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimes(SB) - GLOBL ·libc_utimes_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimes_trampoline_addr(SB)/8, $libc_utimes_trampoline<>(SB) TEXT libc_futimes_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_futimes(SB) - GLOBL ·libc_futimes_trampoline_addr(SB), RODATA, $8 DATA ·libc_futimes_trampoline_addr(SB)/8, $libc_futimes_trampoline<>(SB) TEXT libc_poll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_poll(SB) - GLOBL ·libc_poll_trampoline_addr(SB), RODATA, $8 DATA ·libc_poll_trampoline_addr(SB)/8, $libc_poll_trampoline<>(SB) TEXT libc_madvise_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_madvise(SB) - GLOBL ·libc_madvise_trampoline_addr(SB), RODATA, $8 DATA ·libc_madvise_trampoline_addr(SB)/8, $libc_madvise_trampoline<>(SB) TEXT libc_mlock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mlock(SB) - GLOBL ·libc_mlock_trampoline_addr(SB), RODATA, $8 DATA ·libc_mlock_trampoline_addr(SB)/8, $libc_mlock_trampoline<>(SB) TEXT libc_mlockall_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mlockall(SB) - GLOBL ·libc_mlockall_trampoline_addr(SB), RODATA, $8 DATA ·libc_mlockall_trampoline_addr(SB)/8, $libc_mlockall_trampoline<>(SB) TEXT libc_mprotect_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mprotect(SB) - GLOBL ·libc_mprotect_trampoline_addr(SB), RODATA, $8 DATA ·libc_mprotect_trampoline_addr(SB)/8, $libc_mprotect_trampoline<>(SB) TEXT libc_msync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_msync(SB) - GLOBL ·libc_msync_trampoline_addr(SB), RODATA, $8 DATA ·libc_msync_trampoline_addr(SB)/8, $libc_msync_trampoline<>(SB) TEXT libc_munlock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munlock(SB) - GLOBL ·libc_munlock_trampoline_addr(SB), RODATA, $8 DATA ·libc_munlock_trampoline_addr(SB)/8, $libc_munlock_trampoline<>(SB) TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munlockall(SB) - GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $8 DATA ·libc_munlockall_trampoline_addr(SB)/8, $libc_munlockall_trampoline<>(SB) TEXT libc_closedir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_closedir(SB) - GLOBL ·libc_closedir_trampoline_addr(SB), RODATA, $8 DATA ·libc_closedir_trampoline_addr(SB)/8, $libc_closedir_trampoline<>(SB) TEXT libc_readdir_r_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_readdir_r(SB) - GLOBL ·libc_readdir_r_trampoline_addr(SB), RODATA, $8 DATA ·libc_readdir_r_trampoline_addr(SB)/8, $libc_readdir_r_trampoline<>(SB) TEXT libc_pipe_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pipe(SB) - GLOBL ·libc_pipe_trampoline_addr(SB), RODATA, $8 DATA ·libc_pipe_trampoline_addr(SB)/8, $libc_pipe_trampoline<>(SB) TEXT libc_getxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getxattr(SB) - GLOBL ·libc_getxattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_getxattr_trampoline_addr(SB)/8, $libc_getxattr_trampoline<>(SB) TEXT libc_fgetxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fgetxattr(SB) - GLOBL ·libc_fgetxattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_fgetxattr_trampoline_addr(SB)/8, $libc_fgetxattr_trampoline<>(SB) TEXT libc_setxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setxattr(SB) - GLOBL ·libc_setxattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_setxattr_trampoline_addr(SB)/8, $libc_setxattr_trampoline<>(SB) TEXT libc_fsetxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fsetxattr(SB) - GLOBL ·libc_fsetxattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_fsetxattr_trampoline_addr(SB)/8, $libc_fsetxattr_trampoline<>(SB) TEXT libc_removexattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_removexattr(SB) - GLOBL ·libc_removexattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_removexattr_trampoline_addr(SB)/8, $libc_removexattr_trampoline<>(SB) TEXT libc_fremovexattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fremovexattr(SB) - GLOBL ·libc_fremovexattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_fremovexattr_trampoline_addr(SB)/8, $libc_fremovexattr_trampoline<>(SB) TEXT libc_listxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_listxattr(SB) - GLOBL ·libc_listxattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_listxattr_trampoline_addr(SB)/8, $libc_listxattr_trampoline<>(SB) TEXT libc_flistxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_flistxattr(SB) - GLOBL ·libc_flistxattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_flistxattr_trampoline_addr(SB)/8, $libc_flistxattr_trampoline<>(SB) TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) - GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fcntl(SB) - GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $8 DATA ·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB) TEXT libc_kill_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kill(SB) - GLOBL ·libc_kill_trampoline_addr(SB), RODATA, $8 DATA ·libc_kill_trampoline_addr(SB)/8, $libc_kill_trampoline<>(SB) TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ioctl(SB) - GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB) TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sysctl(SB) - GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) +TEXT libc_pthread_chdir_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pthread_chdir_np(SB) +GLOBL ·libc_pthread_chdir_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pthread_chdir_np_trampoline_addr(SB)/8, $libc_pthread_chdir_np_trampoline<>(SB) + +TEXT libc_pthread_fchdir_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pthread_fchdir_np(SB) +GLOBL ·libc_pthread_fchdir_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pthread_fchdir_np_trampoline_addr(SB)/8, $libc_pthread_fchdir_np_trampoline<>(SB) + TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) - GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8 DATA ·libc_sendfile_trampoline_addr(SB)/8, $libc_sendfile_trampoline<>(SB) TEXT libc_shmat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_shmat(SB) - GLOBL ·libc_shmat_trampoline_addr(SB), RODATA, $8 DATA ·libc_shmat_trampoline_addr(SB)/8, $libc_shmat_trampoline<>(SB) TEXT libc_shmctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_shmctl(SB) - GLOBL ·libc_shmctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_shmctl_trampoline_addr(SB)/8, $libc_shmctl_trampoline<>(SB) TEXT libc_shmdt_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_shmdt(SB) - GLOBL ·libc_shmdt_trampoline_addr(SB), RODATA, $8 DATA ·libc_shmdt_trampoline_addr(SB)/8, $libc_shmdt_trampoline<>(SB) TEXT libc_shmget_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_shmget(SB) - GLOBL ·libc_shmget_trampoline_addr(SB), RODATA, $8 DATA ·libc_shmget_trampoline_addr(SB)/8, $libc_shmget_trampoline<>(SB) TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_access(SB) - GLOBL ·libc_access_trampoline_addr(SB), RODATA, $8 DATA ·libc_access_trampoline_addr(SB)/8, $libc_access_trampoline<>(SB) TEXT libc_adjtime_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_adjtime(SB) - GLOBL ·libc_adjtime_trampoline_addr(SB), RODATA, $8 DATA ·libc_adjtime_trampoline_addr(SB)/8, $libc_adjtime_trampoline<>(SB) TEXT libc_chdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chdir(SB) - GLOBL ·libc_chdir_trampoline_addr(SB), RODATA, $8 DATA ·libc_chdir_trampoline_addr(SB)/8, $libc_chdir_trampoline<>(SB) TEXT libc_chflags_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chflags(SB) - GLOBL ·libc_chflags_trampoline_addr(SB), RODATA, $8 DATA ·libc_chflags_trampoline_addr(SB)/8, $libc_chflags_trampoline<>(SB) TEXT libc_chmod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chmod(SB) - GLOBL ·libc_chmod_trampoline_addr(SB), RODATA, $8 DATA ·libc_chmod_trampoline_addr(SB)/8, $libc_chmod_trampoline<>(SB) TEXT libc_chown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chown(SB) - GLOBL ·libc_chown_trampoline_addr(SB), RODATA, $8 DATA ·libc_chown_trampoline_addr(SB)/8, $libc_chown_trampoline<>(SB) TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chroot(SB) - GLOBL ·libc_chroot_trampoline_addr(SB), RODATA, $8 DATA ·libc_chroot_trampoline_addr(SB)/8, $libc_chroot_trampoline<>(SB) TEXT libc_clock_gettime_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_clock_gettime(SB) - GLOBL ·libc_clock_gettime_trampoline_addr(SB), RODATA, $8 DATA ·libc_clock_gettime_trampoline_addr(SB)/8, $libc_clock_gettime_trampoline<>(SB) TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_close(SB) - GLOBL ·libc_close_trampoline_addr(SB), RODATA, $8 DATA ·libc_close_trampoline_addr(SB)/8, $libc_close_trampoline<>(SB) TEXT libc_clonefile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_clonefile(SB) - GLOBL ·libc_clonefile_trampoline_addr(SB), RODATA, $8 DATA ·libc_clonefile_trampoline_addr(SB)/8, $libc_clonefile_trampoline<>(SB) TEXT libc_clonefileat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_clonefileat(SB) - GLOBL ·libc_clonefileat_trampoline_addr(SB), RODATA, $8 DATA ·libc_clonefileat_trampoline_addr(SB)/8, $libc_clonefileat_trampoline<>(SB) TEXT libc_dup_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_dup(SB) - GLOBL ·libc_dup_trampoline_addr(SB), RODATA, $8 DATA ·libc_dup_trampoline_addr(SB)/8, $libc_dup_trampoline<>(SB) TEXT libc_dup2_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_dup2(SB) - GLOBL ·libc_dup2_trampoline_addr(SB), RODATA, $8 DATA ·libc_dup2_trampoline_addr(SB)/8, $libc_dup2_trampoline<>(SB) TEXT libc_exchangedata_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_exchangedata(SB) - GLOBL ·libc_exchangedata_trampoline_addr(SB), RODATA, $8 DATA ·libc_exchangedata_trampoline_addr(SB)/8, $libc_exchangedata_trampoline<>(SB) TEXT libc_exit_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_exit(SB) - GLOBL ·libc_exit_trampoline_addr(SB), RODATA, $8 DATA ·libc_exit_trampoline_addr(SB)/8, $libc_exit_trampoline<>(SB) TEXT libc_faccessat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_faccessat(SB) - GLOBL ·libc_faccessat_trampoline_addr(SB), RODATA, $8 DATA ·libc_faccessat_trampoline_addr(SB)/8, $libc_faccessat_trampoline<>(SB) TEXT libc_fchdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchdir(SB) - GLOBL ·libc_fchdir_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchdir_trampoline_addr(SB)/8, $libc_fchdir_trampoline<>(SB) TEXT libc_fchflags_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchflags(SB) - GLOBL ·libc_fchflags_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchflags_trampoline_addr(SB)/8, $libc_fchflags_trampoline<>(SB) TEXT libc_fchmod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchmod(SB) - GLOBL ·libc_fchmod_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchmod_trampoline_addr(SB)/8, $libc_fchmod_trampoline<>(SB) TEXT libc_fchmodat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchmodat(SB) - GLOBL ·libc_fchmodat_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchmodat_trampoline_addr(SB)/8, $libc_fchmodat_trampoline<>(SB) TEXT libc_fchown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchown(SB) - GLOBL ·libc_fchown_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchown_trampoline_addr(SB)/8, $libc_fchown_trampoline<>(SB) TEXT libc_fchownat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchownat(SB) - GLOBL ·libc_fchownat_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchownat_trampoline_addr(SB)/8, $libc_fchownat_trampoline<>(SB) TEXT libc_fclonefileat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fclonefileat(SB) - GLOBL ·libc_fclonefileat_trampoline_addr(SB), RODATA, $8 DATA ·libc_fclonefileat_trampoline_addr(SB)/8, $libc_fclonefileat_trampoline<>(SB) TEXT libc_flock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_flock(SB) - GLOBL ·libc_flock_trampoline_addr(SB), RODATA, $8 DATA ·libc_flock_trampoline_addr(SB)/8, $libc_flock_trampoline<>(SB) TEXT libc_fpathconf_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fpathconf(SB) - GLOBL ·libc_fpathconf_trampoline_addr(SB), RODATA, $8 DATA ·libc_fpathconf_trampoline_addr(SB)/8, $libc_fpathconf_trampoline<>(SB) TEXT libc_fsync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fsync(SB) - GLOBL ·libc_fsync_trampoline_addr(SB), RODATA, $8 DATA ·libc_fsync_trampoline_addr(SB)/8, $libc_fsync_trampoline<>(SB) TEXT libc_ftruncate_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ftruncate(SB) - GLOBL ·libc_ftruncate_trampoline_addr(SB), RODATA, $8 DATA ·libc_ftruncate_trampoline_addr(SB)/8, $libc_ftruncate_trampoline<>(SB) TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getcwd(SB) - GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8 DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB) TEXT libc_getdtablesize_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getdtablesize(SB) - GLOBL ·libc_getdtablesize_trampoline_addr(SB), RODATA, $8 DATA ·libc_getdtablesize_trampoline_addr(SB)/8, $libc_getdtablesize_trampoline<>(SB) TEXT libc_getegid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getegid(SB) - GLOBL ·libc_getegid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getegid_trampoline_addr(SB)/8, $libc_getegid_trampoline<>(SB) TEXT libc_geteuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_geteuid(SB) - GLOBL ·libc_geteuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_geteuid_trampoline_addr(SB)/8, $libc_geteuid_trampoline<>(SB) TEXT libc_getgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getgid(SB) - GLOBL ·libc_getgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getgid_trampoline_addr(SB)/8, $libc_getgid_trampoline<>(SB) TEXT libc_getpgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpgid(SB) - GLOBL ·libc_getpgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpgid_trampoline_addr(SB)/8, $libc_getpgid_trampoline<>(SB) TEXT libc_getpgrp_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpgrp(SB) - GLOBL ·libc_getpgrp_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpgrp_trampoline_addr(SB)/8, $libc_getpgrp_trampoline<>(SB) TEXT libc_getpid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpid(SB) - GLOBL ·libc_getpid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpid_trampoline_addr(SB)/8, $libc_getpid_trampoline<>(SB) TEXT libc_getppid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getppid(SB) - GLOBL ·libc_getppid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getppid_trampoline_addr(SB)/8, $libc_getppid_trampoline<>(SB) TEXT libc_getpriority_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpriority(SB) - GLOBL ·libc_getpriority_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpriority_trampoline_addr(SB)/8, $libc_getpriority_trampoline<>(SB) TEXT libc_getrlimit_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getrlimit(SB) - GLOBL ·libc_getrlimit_trampoline_addr(SB), RODATA, $8 DATA ·libc_getrlimit_trampoline_addr(SB)/8, $libc_getrlimit_trampoline<>(SB) TEXT libc_getrusage_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getrusage(SB) - GLOBL ·libc_getrusage_trampoline_addr(SB), RODATA, $8 DATA ·libc_getrusage_trampoline_addr(SB)/8, $libc_getrusage_trampoline<>(SB) TEXT libc_getsid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsid(SB) - GLOBL ·libc_getsid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getsid_trampoline_addr(SB)/8, $libc_getsid_trampoline<>(SB) TEXT libc_gettimeofday_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_gettimeofday(SB) - GLOBL ·libc_gettimeofday_trampoline_addr(SB), RODATA, $8 DATA ·libc_gettimeofday_trampoline_addr(SB)/8, $libc_gettimeofday_trampoline<>(SB) TEXT libc_getuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getuid(SB) - GLOBL ·libc_getuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getuid_trampoline_addr(SB)/8, $libc_getuid_trampoline<>(SB) TEXT libc_issetugid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_issetugid(SB) - GLOBL ·libc_issetugid_trampoline_addr(SB), RODATA, $8 DATA ·libc_issetugid_trampoline_addr(SB)/8, $libc_issetugid_trampoline<>(SB) TEXT libc_kqueue_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kqueue(SB) - GLOBL ·libc_kqueue_trampoline_addr(SB), RODATA, $8 DATA ·libc_kqueue_trampoline_addr(SB)/8, $libc_kqueue_trampoline<>(SB) TEXT libc_lchown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lchown(SB) - GLOBL ·libc_lchown_trampoline_addr(SB), RODATA, $8 DATA ·libc_lchown_trampoline_addr(SB)/8, $libc_lchown_trampoline<>(SB) TEXT libc_link_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_link(SB) - GLOBL ·libc_link_trampoline_addr(SB), RODATA, $8 DATA ·libc_link_trampoline_addr(SB)/8, $libc_link_trampoline<>(SB) TEXT libc_linkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_linkat(SB) - GLOBL ·libc_linkat_trampoline_addr(SB), RODATA, $8 DATA ·libc_linkat_trampoline_addr(SB)/8, $libc_linkat_trampoline<>(SB) TEXT libc_listen_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_listen(SB) - GLOBL ·libc_listen_trampoline_addr(SB), RODATA, $8 DATA ·libc_listen_trampoline_addr(SB)/8, $libc_listen_trampoline<>(SB) TEXT libc_mkdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkdir(SB) - GLOBL ·libc_mkdir_trampoline_addr(SB), RODATA, $8 DATA ·libc_mkdir_trampoline_addr(SB)/8, $libc_mkdir_trampoline<>(SB) TEXT libc_mkdirat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkdirat(SB) - GLOBL ·libc_mkdirat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mkdirat_trampoline_addr(SB)/8, $libc_mkdirat_trampoline<>(SB) TEXT libc_mkfifo_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkfifo(SB) - GLOBL ·libc_mkfifo_trampoline_addr(SB), RODATA, $8 DATA ·libc_mkfifo_trampoline_addr(SB)/8, $libc_mkfifo_trampoline<>(SB) TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mknod(SB) - GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB) TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mount(SB) - GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8 DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB) TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_open(SB) - GLOBL ·libc_open_trampoline_addr(SB), RODATA, $8 DATA ·libc_open_trampoline_addr(SB)/8, $libc_open_trampoline<>(SB) TEXT libc_openat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_openat(SB) - GLOBL ·libc_openat_trampoline_addr(SB), RODATA, $8 DATA ·libc_openat_trampoline_addr(SB)/8, $libc_openat_trampoline<>(SB) TEXT libc_pathconf_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pathconf(SB) - GLOBL ·libc_pathconf_trampoline_addr(SB), RODATA, $8 DATA ·libc_pathconf_trampoline_addr(SB)/8, $libc_pathconf_trampoline<>(SB) TEXT libc_pread_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pread(SB) - GLOBL ·libc_pread_trampoline_addr(SB), RODATA, $8 DATA ·libc_pread_trampoline_addr(SB)/8, $libc_pread_trampoline<>(SB) TEXT libc_pwrite_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pwrite(SB) - GLOBL ·libc_pwrite_trampoline_addr(SB), RODATA, $8 DATA ·libc_pwrite_trampoline_addr(SB)/8, $libc_pwrite_trampoline<>(SB) TEXT libc_read_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_read(SB) - GLOBL ·libc_read_trampoline_addr(SB), RODATA, $8 DATA ·libc_read_trampoline_addr(SB)/8, $libc_read_trampoline<>(SB) TEXT libc_readlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_readlink(SB) - GLOBL ·libc_readlink_trampoline_addr(SB), RODATA, $8 DATA ·libc_readlink_trampoline_addr(SB)/8, $libc_readlink_trampoline<>(SB) TEXT libc_readlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_readlinkat(SB) - GLOBL ·libc_readlinkat_trampoline_addr(SB), RODATA, $8 DATA ·libc_readlinkat_trampoline_addr(SB)/8, $libc_readlinkat_trampoline<>(SB) TEXT libc_rename_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_rename(SB) - GLOBL ·libc_rename_trampoline_addr(SB), RODATA, $8 DATA ·libc_rename_trampoline_addr(SB)/8, $libc_rename_trampoline<>(SB) TEXT libc_renameat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_renameat(SB) - GLOBL ·libc_renameat_trampoline_addr(SB), RODATA, $8 DATA ·libc_renameat_trampoline_addr(SB)/8, $libc_renameat_trampoline<>(SB) TEXT libc_revoke_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_revoke(SB) - GLOBL ·libc_revoke_trampoline_addr(SB), RODATA, $8 DATA ·libc_revoke_trampoline_addr(SB)/8, $libc_revoke_trampoline<>(SB) TEXT libc_rmdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_rmdir(SB) - GLOBL ·libc_rmdir_trampoline_addr(SB), RODATA, $8 DATA ·libc_rmdir_trampoline_addr(SB)/8, $libc_rmdir_trampoline<>(SB) TEXT libc_lseek_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lseek(SB) - GLOBL ·libc_lseek_trampoline_addr(SB), RODATA, $8 DATA ·libc_lseek_trampoline_addr(SB)/8, $libc_lseek_trampoline<>(SB) TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_select(SB) - GLOBL ·libc_select_trampoline_addr(SB), RODATA, $8 DATA ·libc_select_trampoline_addr(SB)/8, $libc_select_trampoline<>(SB) +TEXT libc_setattrlist_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setattrlist(SB) +GLOBL ·libc_setattrlist_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setattrlist_trampoline_addr(SB)/8, $libc_setattrlist_trampoline<>(SB) + TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setegid(SB) - GLOBL ·libc_setegid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setegid_trampoline_addr(SB)/8, $libc_setegid_trampoline<>(SB) TEXT libc_seteuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_seteuid(SB) - GLOBL ·libc_seteuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_seteuid_trampoline_addr(SB)/8, $libc_seteuid_trampoline<>(SB) TEXT libc_setgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setgid(SB) - GLOBL ·libc_setgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setgid_trampoline_addr(SB)/8, $libc_setgid_trampoline<>(SB) TEXT libc_setlogin_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setlogin(SB) - GLOBL ·libc_setlogin_trampoline_addr(SB), RODATA, $8 DATA ·libc_setlogin_trampoline_addr(SB)/8, $libc_setlogin_trampoline<>(SB) TEXT libc_setpgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setpgid(SB) - GLOBL ·libc_setpgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setpgid_trampoline_addr(SB)/8, $libc_setpgid_trampoline<>(SB) TEXT libc_setpriority_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setpriority(SB) - GLOBL ·libc_setpriority_trampoline_addr(SB), RODATA, $8 DATA ·libc_setpriority_trampoline_addr(SB)/8, $libc_setpriority_trampoline<>(SB) TEXT libc_setprivexec_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setprivexec(SB) - GLOBL ·libc_setprivexec_trampoline_addr(SB), RODATA, $8 DATA ·libc_setprivexec_trampoline_addr(SB)/8, $libc_setprivexec_trampoline<>(SB) TEXT libc_setregid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setregid(SB) - GLOBL ·libc_setregid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setregid_trampoline_addr(SB)/8, $libc_setregid_trampoline<>(SB) TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setreuid(SB) - GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setreuid_trampoline_addr(SB)/8, $libc_setreuid_trampoline<>(SB) -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setrlimit(SB) - -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) - TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setsid(SB) - GLOBL ·libc_setsid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setsid_trampoline_addr(SB)/8, $libc_setsid_trampoline<>(SB) TEXT libc_settimeofday_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_settimeofday(SB) - GLOBL ·libc_settimeofday_trampoline_addr(SB), RODATA, $8 DATA ·libc_settimeofday_trampoline_addr(SB)/8, $libc_settimeofday_trampoline<>(SB) TEXT libc_setuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setuid(SB) - GLOBL ·libc_setuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setuid_trampoline_addr(SB)/8, $libc_setuid_trampoline<>(SB) TEXT libc_symlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_symlink(SB) - GLOBL ·libc_symlink_trampoline_addr(SB), RODATA, $8 DATA ·libc_symlink_trampoline_addr(SB)/8, $libc_symlink_trampoline<>(SB) TEXT libc_symlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_symlinkat(SB) - GLOBL ·libc_symlinkat_trampoline_addr(SB), RODATA, $8 DATA ·libc_symlinkat_trampoline_addr(SB)/8, $libc_symlinkat_trampoline<>(SB) TEXT libc_sync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sync(SB) - GLOBL ·libc_sync_trampoline_addr(SB), RODATA, $8 DATA ·libc_sync_trampoline_addr(SB)/8, $libc_sync_trampoline<>(SB) TEXT libc_truncate_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_truncate(SB) - GLOBL ·libc_truncate_trampoline_addr(SB), RODATA, $8 DATA ·libc_truncate_trampoline_addr(SB)/8, $libc_truncate_trampoline<>(SB) TEXT libc_umask_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_umask(SB) - GLOBL ·libc_umask_trampoline_addr(SB), RODATA, $8 DATA ·libc_umask_trampoline_addr(SB)/8, $libc_umask_trampoline<>(SB) TEXT libc_undelete_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_undelete(SB) - GLOBL ·libc_undelete_trampoline_addr(SB), RODATA, $8 DATA ·libc_undelete_trampoline_addr(SB)/8, $libc_undelete_trampoline<>(SB) TEXT libc_unlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unlink(SB) - GLOBL ·libc_unlink_trampoline_addr(SB), RODATA, $8 DATA ·libc_unlink_trampoline_addr(SB)/8, $libc_unlink_trampoline<>(SB) TEXT libc_unlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unlinkat(SB) - GLOBL ·libc_unlinkat_trampoline_addr(SB), RODATA, $8 DATA ·libc_unlinkat_trampoline_addr(SB)/8, $libc_unlinkat_trampoline<>(SB) TEXT libc_unmount_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unmount(SB) - GLOBL ·libc_unmount_trampoline_addr(SB), RODATA, $8 DATA ·libc_unmount_trampoline_addr(SB)/8, $libc_unmount_trampoline<>(SB) TEXT libc_write_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_write(SB) - GLOBL ·libc_write_trampoline_addr(SB), RODATA, $8 DATA ·libc_write_trampoline_addr(SB)/8, $libc_write_trampoline<>(SB) TEXT libc_mmap_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mmap(SB) - GLOBL ·libc_mmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_mmap_trampoline_addr(SB)/8, $libc_mmap_trampoline<>(SB) TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munmap(SB) - GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstat(SB) - GLOBL ·libc_fstat_trampoline_addr(SB), RODATA, $8 DATA ·libc_fstat_trampoline_addr(SB)/8, $libc_fstat_trampoline<>(SB) TEXT libc_fstatat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstatat(SB) - GLOBL ·libc_fstatat_trampoline_addr(SB), RODATA, $8 DATA ·libc_fstatat_trampoline_addr(SB)/8, $libc_fstatat_trampoline<>(SB) TEXT libc_fstatfs_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstatfs(SB) - GLOBL ·libc_fstatfs_trampoline_addr(SB), RODATA, $8 DATA ·libc_fstatfs_trampoline_addr(SB)/8, $libc_fstatfs_trampoline<>(SB) TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getfsstat(SB) - GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $8 DATA ·libc_getfsstat_trampoline_addr(SB)/8, $libc_getfsstat_trampoline<>(SB) TEXT libc_lstat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lstat(SB) - GLOBL ·libc_lstat_trampoline_addr(SB), RODATA, $8 DATA ·libc_lstat_trampoline_addr(SB)/8, $libc_lstat_trampoline<>(SB) TEXT libc_ptrace_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ptrace(SB) - GLOBL ·libc_ptrace_trampoline_addr(SB), RODATA, $8 DATA ·libc_ptrace_trampoline_addr(SB)/8, $libc_ptrace_trampoline<>(SB) TEXT libc_stat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_stat(SB) - GLOBL ·libc_stat_trampoline_addr(SB), RODATA, $8 DATA ·libc_stat_trampoline_addr(SB)/8, $libc_stat_trampoline<>(SB) TEXT libc_statfs_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_statfs(SB) - GLOBL ·libc_statfs_trampoline_addr(SB), RODATA, $8 DATA ·libc_statfs_trampoline_addr(SB)/8, $libc_statfs_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go index 54749f9..aad65fc 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build dragonfly && amd64 -// +build dragonfly,amd64 package unix @@ -436,6 +435,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -1400,16 +1409,6 @@ func Setresuid(ruid int, euid int, suid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) @@ -1642,28 +1641,6 @@ func munmap(addr uintptr, length uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) { r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) nfd = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go index 77479d4..c009639 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build freebsd && 386 -// +build freebsd,386 package unix @@ -388,6 +387,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -414,6 +423,16 @@ func ptrace(request int, pid int, addr uintptr, data int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ptracePtr(request int, pid int, addr unsafe.Pointer, data int) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1625,16 +1644,6 @@ func Setresuid(ruid int, euid int, suid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) @@ -1852,28 +1861,6 @@ func munmap(addr uintptr, length uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) { r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) nfd = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go index 2e966d4..7664df7 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build freebsd && amd64 -// +build freebsd,amd64 package unix @@ -388,6 +387,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -414,6 +423,16 @@ func ptrace(request int, pid int, addr uintptr, data int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ptracePtr(request int, pid int, addr unsafe.Pointer, data int) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1625,16 +1644,6 @@ func Setresuid(ruid int, euid int, suid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) @@ -1852,28 +1861,6 @@ func munmap(addr uintptr, length uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) { r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) nfd = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go index d65a7c0..ae09918 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build freebsd && arm -// +build freebsd,arm package unix @@ -388,6 +387,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -414,6 +423,16 @@ func ptrace(request int, pid int, addr uintptr, data int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ptracePtr(request int, pid int, addr unsafe.Pointer, data int) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1625,16 +1644,6 @@ func Setresuid(ruid int, euid int, suid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) @@ -1852,28 +1861,6 @@ func munmap(addr uintptr, length uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) { r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) nfd = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go index 6f0b97c..11fd5d4 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build freebsd && arm64 -// +build freebsd,arm64 package unix @@ -388,6 +387,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -414,6 +423,16 @@ func ptrace(request int, pid int, addr uintptr, data int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ptracePtr(request int, pid int, addr unsafe.Pointer, data int) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1625,16 +1644,6 @@ func Setresuid(ruid int, euid int, suid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) @@ -1852,28 +1861,6 @@ func munmap(addr uintptr, length uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) { r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) nfd = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go index e1c23b5..c3d2d65 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build freebsd && riscv64 -// +build freebsd,riscv64 package unix @@ -388,6 +387,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -414,6 +423,16 @@ func ptrace(request int, pid int, addr uintptr, data int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ptracePtr(request int, pid int, addr unsafe.Pointer, data int) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1625,16 +1644,6 @@ func Setresuid(ruid int, euid int, suid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) @@ -1852,28 +1861,6 @@ func munmap(addr uintptr, length uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) { r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) nfd = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go index b57c705..c698cbc 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build illumos && amd64 -// +build illumos,amd64 package unix @@ -40,7 +39,7 @@ func readv(fd int, iovs []Iovec) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procreadv)), 3, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(iovs)), 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -55,7 +54,7 @@ func preadv(fd int, iovs []Iovec, off int64) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procpreadv)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(iovs)), uintptr(off), 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -70,7 +69,7 @@ func writev(fd int, iovs []Iovec) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procwritev)), 3, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(iovs)), 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -85,7 +84,7 @@ func pwritev(fd int, iovs []Iovec, off int64) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procpwritev)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(iovs)), uintptr(off), 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -96,7 +95,7 @@ func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procaccept4)), 4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) fd = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 293cf36..87d8612 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -1,7 +1,6 @@ // Code generated by mkmerge; DO NOT EDIT. //go:build linux -// +build linux package unix @@ -38,6 +37,21 @@ func fchmodat(dirfd int, path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fchmodat2(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHMODAT2, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctl(fd int, req uint, arg uintptr) (err error) { _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -379,6 +393,16 @@ func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ptracePtr(request int, pid int, addr uintptr, data unsafe.Pointer) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(arg) @@ -537,6 +561,17 @@ func Chroot(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockAdjtime(clockid int32, buf *Timex) (state int, err error) { + r0, _, e1 := Syscall(SYS_CLOCK_ADJTIME, uintptr(clockid), uintptr(unsafe.Pointer(buf)), 0) + state = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ClockGetres(clockid int32, res *Timespec) (err error) { _, _, e1 := Syscall(SYS_CLOCK_GETRES, uintptr(clockid), uintptr(unsafe.Pointer(res)), 0) if e1 != 0 { @@ -871,6 +906,16 @@ func Fspick(dirfd int, pathName string, flags int) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fsconfig(fd int, cmd uint, key *byte, value *byte, aux int) (err error) { + _, _, e1 := Syscall6(SYS_FSCONFIG, uintptr(fd), uintptr(cmd), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(value)), uintptr(aux), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getdents(fd int, buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { @@ -1325,16 +1370,6 @@ func PivotRoot(newroot string, putold string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { - _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) if e1 != 0 { @@ -1345,7 +1380,7 @@ func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) ( // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { +func pselect6(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *sigset_argpack) (n int, err error) { r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) n = int(r0) if e1 != 0 { @@ -1723,28 +1758,6 @@ func exitThread(code int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func readv(fd int, iovs []Iovec) (n int, err error) { var _p0 unsafe.Pointer if len(iovs) > 0 { @@ -1857,6 +1870,17 @@ func munmap(addr uintptr, length uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func mremap(oldaddr uintptr, oldlength uintptr, newlength uintptr, flags int, newaddr uintptr) (xaddr uintptr, err error) { + r0, _, e1 := Syscall6(SYS_MREMAP, uintptr(oldaddr), uintptr(oldlength), uintptr(newlength), uintptr(flags), uintptr(newaddr), 0) + xaddr = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Madvise(b []byte, advice int) (err error) { var _p0 unsafe.Pointer if len(b) > 0 { @@ -2161,3 +2185,47 @@ func rtSigprocmask(how int, set *Sigset_t, oldset *Sigset_t, sigsetsize uintptr) } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getresuid(ruid *_C_int, euid *_C_int, suid *_C_int) { + RawSyscallNoError(SYS_GETRESUID, uintptr(unsafe.Pointer(ruid)), uintptr(unsafe.Pointer(euid)), uintptr(unsafe.Pointer(suid))) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getresgid(rgid *_C_int, egid *_C_int, sgid *_C_int) { + RawSyscallNoError(SYS_GETRESGID, uintptr(unsafe.Pointer(rgid)), uintptr(unsafe.Pointer(egid)), uintptr(unsafe.Pointer(sgid))) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func schedSetattr(pid int, attr *SchedAttr, flags uint) (err error) { + _, _, e1 := Syscall(SYS_SCHED_SETATTR, uintptr(pid), uintptr(unsafe.Pointer(attr)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func schedGetattr(pid int, attr *SchedAttr, size uint, flags uint) (err error) { + _, _, e1 := Syscall6(SYS_SCHED_GETATTR, uintptr(pid), uintptr(unsafe.Pointer(attr)), uintptr(size), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Cachestat(fd uint, crange *CachestatRange, cstat *Cachestat_t, flags uint) (err error) { + _, _, e1 := Syscall6(SYS_CACHESTAT, uintptr(fd), uintptr(unsafe.Pointer(crange)), uintptr(unsafe.Pointer(cstat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go index c81b0ad..4def3e9 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && 386 -// +build linux,386 package unix @@ -411,16 +410,6 @@ func getrlimit(resource int, rlim *rlimit32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func setrlimit(resource int, rlim *rlimit32) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func futimesat(dirfd int, path string, times *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go index 2206bce..fef2bc8 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && amd64 -// +build linux,amd64 package unix @@ -334,16 +333,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go index edf6b39..a9fd76a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && arm -// +build linux,arm package unix @@ -578,16 +577,6 @@ func getrlimit(resource int, rlim *rlimit32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func setrlimit(resource int, rlim *rlimit32) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func armSyncFileRange(fd int, flags int, off int64, n int64) (err error) { _, _, e1 := Syscall6(SYS_ARM_SYNC_FILE_RANGE, uintptr(fd), uintptr(flags), uintptr(off), uintptr(off>>32), uintptr(n), uintptr(n>>32)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go index 190609f..4600650 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && arm64 -// +build linux,arm64 package unix @@ -289,16 +288,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go index 806ffd1..c8987d2 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && loong64 -// +build linux,loong64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go index 5f984cb..921f430 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && mips -// +build linux,mips package unix @@ -644,16 +643,6 @@ func getrlimit(resource int, rlim *rlimit32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func setrlimit(resource int, rlim *rlimit32) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Alarm(seconds uint) (remaining uint, err error) { r0, _, e1 := Syscall(SYS_ALARM, uintptr(seconds), 0, 0) remaining = uint(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go index 46fc380..44f0678 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && mips64 -// +build linux,mips64 package unix @@ -278,16 +277,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go index cbd0d4d..e7fa0ab 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && mips64le -// +build linux,mips64le package unix @@ -278,16 +277,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go index 0c13d15..8c51256 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && mipsle -// +build linux,mipsle package unix @@ -644,16 +643,6 @@ func getrlimit(resource int, rlim *rlimit32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func setrlimit(resource int, rlim *rlimit32) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Alarm(seconds uint) (remaining uint, err error) { r0, _, e1 := Syscall(SYS_ALARM, uintptr(seconds), 0, 0) remaining = uint(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go index e01432a..7392fd4 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && ppc -// +build linux,ppc package unix @@ -624,16 +623,6 @@ func getrlimit(resource int, rlim *rlimit32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func setrlimit(resource int, rlim *rlimit32) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func syncFileRange2(fd int, flags int, off int64, n int64) (err error) { _, _, e1 := Syscall6(SYS_SYNC_FILE_RANGE2, uintptr(fd), uintptr(flags), uintptr(off>>32), uintptr(off), uintptr(n>>32), uintptr(n)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go index 13c7ee7..4118043 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && ppc64 -// +build linux,ppc64 package unix @@ -349,16 +348,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go index 02d0c0f..40c6ce7 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && ppc64le -// +build linux,ppc64le package unix @@ -349,16 +348,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go index 9fee3b1..2cfe34a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && riscv64 -// +build linux,riscv64 package unix @@ -269,16 +268,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { @@ -541,3 +530,19 @@ func kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, f } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func riscvHWProbe(pairs []RISCVHWProbePairs, cpuCount uintptr, cpus *CPUSet, flags uint) (err error) { + var _p0 unsafe.Pointer + if len(pairs) > 0 { + _p0 = unsafe.Pointer(&pairs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_RISCV_HWPROBE, uintptr(_p0), uintptr(len(pairs)), uintptr(cpuCount), uintptr(unsafe.Pointer(cpus)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go index 647bbfe..61e6f07 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && s390x -// +build linux,s390x package unix @@ -319,16 +318,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) { r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) n = int64(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go index ada057f..834b842 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && sparc64 -// +build linux,sparc64 package unix @@ -329,16 +328,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go index 79f7389..e91ebc1 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build netbsd && 386 -// +build netbsd,386 package unix @@ -405,6 +404,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -1597,16 +1606,6 @@ func Setreuid(ruid int, euid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) @@ -1824,20 +1823,13 @@ func munmap(addr uintptr, length uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) +func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) + _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -1846,13 +1838,9 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) +func mremapNetBSD(oldp uintptr, oldsize uintptr, newp uintptr, newsize uintptr, flags int) (xaddr uintptr, err error) { + r0, _, e1 := Syscall6(SYS_MREMAP, uintptr(oldp), uintptr(oldsize), uintptr(newp), uintptr(newsize), uintptr(flags), 0) + xaddr = uintptr(r0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go index fb161f3..be28bab 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build netbsd && amd64 -// +build netbsd,amd64 package unix @@ -405,6 +404,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -1597,16 +1606,6 @@ func Setreuid(ruid int, euid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) @@ -1824,20 +1823,13 @@ func munmap(addr uintptr, length uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) +func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) + _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -1846,13 +1838,9 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) +func mremapNetBSD(oldp uintptr, oldsize uintptr, newp uintptr, newsize uintptr, flags int) (xaddr uintptr, err error) { + r0, _, e1 := Syscall6(SYS_MREMAP, uintptr(oldp), uintptr(oldsize), uintptr(newp), uintptr(newsize), uintptr(flags), 0) + xaddr = uintptr(r0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go index 4c8ac99..fb587e8 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build netbsd && arm -// +build netbsd,arm package unix @@ -405,6 +404,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -1597,16 +1606,6 @@ func Setreuid(ruid int, euid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) @@ -1824,20 +1823,13 @@ func munmap(addr uintptr, length uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) +func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) + _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -1846,13 +1838,9 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) +func mremapNetBSD(oldp uintptr, oldsize uintptr, newp uintptr, newsize uintptr, flags int) (xaddr uintptr, err error) { + r0, _, e1 := Syscall6(SYS_MREMAP, uintptr(oldp), uintptr(oldsize), uintptr(newp), uintptr(newsize), uintptr(flags), 0) + xaddr = uintptr(r0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go index 76dd8ec..d576438 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build netbsd && arm64 -// +build netbsd,arm64 package unix @@ -405,6 +404,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -1597,16 +1606,6 @@ func Setreuid(ruid int, euid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) @@ -1824,20 +1823,13 @@ func munmap(addr uintptr, length uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) +func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) + _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -1846,13 +1838,9 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) +func mremapNetBSD(oldp uintptr, oldsize uintptr, newp uintptr, newsize uintptr, flags int) (xaddr uintptr, err error) { + r0, _, e1 := Syscall6(SYS_MREMAP, uintptr(oldp), uintptr(oldsize), uintptr(newp), uintptr(newsize), uintptr(flags), 0) + xaddr = uintptr(r0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index caeb807..9dc4241 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && 386 -// +build openbsd,386 package unix @@ -519,6 +518,28 @@ var libc_getcwd_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getresuid(ruid *_C_int, euid *_C_int, suid *_C_int) { + syscall_rawSyscall(libc_getresuid_trampoline_addr, uintptr(unsafe.Pointer(ruid)), uintptr(unsafe.Pointer(euid)), uintptr(unsafe.Pointer(suid))) + return +} + +var libc_getresuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getresuid getresuid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getresgid(rgid *_C_int, egid *_C_int, sgid *_C_int) { + syscall_rawSyscall(libc_getresgid_trampoline_addr, uintptr(unsafe.Pointer(rgid)), uintptr(unsafe.Pointer(egid)), uintptr(unsafe.Pointer(sgid))) + return +} + +var libc_getresgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getresgid getresgid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctl(fd int, req uint, arg uintptr) (err error) { _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -533,6 +554,16 @@ var libc_ioctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -553,6 +584,32 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fcntl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fcntl fcntl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) @@ -1886,20 +1943,6 @@ var libc_setresuid_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrtable(rtable int) (err error) { _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { @@ -2195,8 +2238,8 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) +func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -2204,16 +2247,9 @@ func readlen(fd int, buf *byte, nbuf int) (n int, err error) { return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +var libc_getfsstat_trampoline_addr uintptr -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} +//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2233,3 +2269,31 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error var libc_utimensat_trampoline_addr uintptr //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pledge(promises *byte, execpromises *byte) (err error) { + _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pledge_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pledge pledge "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unveil(path *byte, flags *byte) (err error) { + _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unveil_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unveil unveil "libc.so" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s index 0874442..41b5617 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s @@ -158,6 +158,16 @@ TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $4 DATA ·libc_getcwd_trampoline_addr(SB)/4, $libc_getcwd_trampoline<>(SB) +TEXT libc_getresuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getresuid(SB) +GLOBL ·libc_getresuid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getresuid_trampoline_addr(SB)/4, $libc_getresuid_trampoline<>(SB) + +TEXT libc_getresgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getresgid(SB) +GLOBL ·libc_getresgid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getresgid_trampoline_addr(SB)/4, $libc_getresgid_trampoline<>(SB) + TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ioctl(SB) GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $4 @@ -168,6 +178,11 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $4 DATA ·libc_sysctl_trampoline_addr(SB)/4, $libc_sysctl_trampoline<>(SB) +TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fcntl(SB) +GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fcntl_trampoline_addr(SB)/4, $libc_fcntl_trampoline<>(SB) + TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ppoll(SB) GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $4 @@ -573,11 +588,6 @@ TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $4 DATA ·libc_setresuid_trampoline_addr(SB)/4, $libc_setresuid_trampoline<>(SB) -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setrlimit(SB) -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $4 -DATA ·libc_setrlimit_trampoline_addr(SB)/4, $libc_setrlimit_trampoline<>(SB) - TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setrtable(SB) GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $4 @@ -663,7 +673,22 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $4 DATA ·libc_munmap_trampoline_addr(SB)/4, $libc_munmap_trampoline<>(SB) +TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getfsstat(SB) +GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getfsstat_trampoline_addr(SB)/4, $libc_getfsstat_trampoline<>(SB) + TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $4 DATA ·libc_utimensat_trampoline_addr(SB)/4, $libc_utimensat_trampoline<>(SB) + +TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pledge(SB) +GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $4 +DATA ·libc_pledge_trampoline_addr(SB)/4, $libc_pledge_trampoline<>(SB) + +TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unveil(SB) +GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $4 +DATA ·libc_unveil_trampoline_addr(SB)/4, $libc_unveil_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index a05e5f4..0d3a075 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && amd64 -// +build openbsd,amd64 package unix @@ -519,6 +518,28 @@ var libc_getcwd_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getresuid(ruid *_C_int, euid *_C_int, suid *_C_int) { + syscall_rawSyscall(libc_getresuid_trampoline_addr, uintptr(unsafe.Pointer(ruid)), uintptr(unsafe.Pointer(euid)), uintptr(unsafe.Pointer(suid))) + return +} + +var libc_getresuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getresuid getresuid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getresgid(rgid *_C_int, egid *_C_int, sgid *_C_int) { + syscall_rawSyscall(libc_getresgid_trampoline_addr, uintptr(unsafe.Pointer(rgid)), uintptr(unsafe.Pointer(egid)), uintptr(unsafe.Pointer(sgid))) + return +} + +var libc_getresgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getresgid getresgid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctl(fd int, req uint, arg uintptr) (err error) { _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -533,6 +554,16 @@ var libc_ioctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -553,6 +584,32 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fcntl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fcntl fcntl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) @@ -1886,20 +1943,6 @@ var libc_setresuid_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrtable(rtable int) (err error) { _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { @@ -2195,8 +2238,8 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) +func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -2204,16 +2247,9 @@ func readlen(fd int, buf *byte, nbuf int) (n int, err error) { return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +var libc_getfsstat_trampoline_addr uintptr -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} +//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2233,3 +2269,31 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error var libc_utimensat_trampoline_addr uintptr //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pledge(promises *byte, execpromises *byte) (err error) { + _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pledge_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pledge pledge "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unveil(path *byte, flags *byte) (err error) { + _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unveil_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unveil unveil "libc.so" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s index 5782cd1..4019a65 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s @@ -158,6 +158,16 @@ TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8 DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB) +TEXT libc_getresuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getresuid(SB) +GLOBL ·libc_getresuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getresuid_trampoline_addr(SB)/8, $libc_getresuid_trampoline<>(SB) + +TEXT libc_getresgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getresgid(SB) +GLOBL ·libc_getresgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getresgid_trampoline_addr(SB)/8, $libc_getresgid_trampoline<>(SB) + TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ioctl(SB) GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 @@ -168,6 +178,11 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) +TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fcntl(SB) +GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB) + TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ppoll(SB) GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8 @@ -573,11 +588,6 @@ TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setresuid_trampoline_addr(SB)/8, $libc_setresuid_trampoline<>(SB) -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setrlimit(SB) -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) - TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setrtable(SB) GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $8 @@ -663,7 +673,22 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) +TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getfsstat(SB) +GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getfsstat_trampoline_addr(SB)/8, $libc_getfsstat_trampoline<>(SB) + TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) + +TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pledge(SB) +GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pledge_trampoline_addr(SB)/8, $libc_pledge_trampoline<>(SB) + +TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unveil(SB) +GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unveil_trampoline_addr(SB)/8, $libc_unveil_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go index b2da8e5..c39f777 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && arm -// +build openbsd,arm package unix @@ -519,6 +518,28 @@ var libc_getcwd_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getresuid(ruid *_C_int, euid *_C_int, suid *_C_int) { + syscall_rawSyscall(libc_getresuid_trampoline_addr, uintptr(unsafe.Pointer(ruid)), uintptr(unsafe.Pointer(euid)), uintptr(unsafe.Pointer(suid))) + return +} + +var libc_getresuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getresuid getresuid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getresgid(rgid *_C_int, egid *_C_int, sgid *_C_int) { + syscall_rawSyscall(libc_getresgid_trampoline_addr, uintptr(unsafe.Pointer(rgid)), uintptr(unsafe.Pointer(egid)), uintptr(unsafe.Pointer(sgid))) + return +} + +var libc_getresgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getresgid getresgid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctl(fd int, req uint, arg uintptr) (err error) { _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -533,6 +554,16 @@ var libc_ioctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -553,6 +584,32 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fcntl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fcntl fcntl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) @@ -1886,20 +1943,6 @@ var libc_setresuid_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrtable(rtable int) (err error) { _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { @@ -2195,8 +2238,8 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) +func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -2204,16 +2247,9 @@ func readlen(fd int, buf *byte, nbuf int) (n int, err error) { return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +var libc_getfsstat_trampoline_addr uintptr -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} +//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2233,3 +2269,31 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error var libc_utimensat_trampoline_addr uintptr //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pledge(promises *byte, execpromises *byte) (err error) { + _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pledge_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pledge pledge "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unveil(path *byte, flags *byte) (err error) { + _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unveil_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unveil unveil "libc.so" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s index cf31042..ac4af24 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s @@ -158,6 +158,16 @@ TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $4 DATA ·libc_getcwd_trampoline_addr(SB)/4, $libc_getcwd_trampoline<>(SB) +TEXT libc_getresuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getresuid(SB) +GLOBL ·libc_getresuid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getresuid_trampoline_addr(SB)/4, $libc_getresuid_trampoline<>(SB) + +TEXT libc_getresgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getresgid(SB) +GLOBL ·libc_getresgid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getresgid_trampoline_addr(SB)/4, $libc_getresgid_trampoline<>(SB) + TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ioctl(SB) GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $4 @@ -168,6 +178,11 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $4 DATA ·libc_sysctl_trampoline_addr(SB)/4, $libc_sysctl_trampoline<>(SB) +TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fcntl(SB) +GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fcntl_trampoline_addr(SB)/4, $libc_fcntl_trampoline<>(SB) + TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ppoll(SB) GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $4 @@ -573,11 +588,6 @@ TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $4 DATA ·libc_setresuid_trampoline_addr(SB)/4, $libc_setresuid_trampoline<>(SB) -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setrlimit(SB) -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $4 -DATA ·libc_setrlimit_trampoline_addr(SB)/4, $libc_setrlimit_trampoline<>(SB) - TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setrtable(SB) GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $4 @@ -663,7 +673,22 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $4 DATA ·libc_munmap_trampoline_addr(SB)/4, $libc_munmap_trampoline<>(SB) +TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getfsstat(SB) +GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getfsstat_trampoline_addr(SB)/4, $libc_getfsstat_trampoline<>(SB) + TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $4 DATA ·libc_utimensat_trampoline_addr(SB)/4, $libc_utimensat_trampoline<>(SB) + +TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pledge(SB) +GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $4 +DATA ·libc_pledge_trampoline_addr(SB)/4, $libc_pledge_trampoline<>(SB) + +TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unveil(SB) +GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $4 +DATA ·libc_unveil_trampoline_addr(SB)/4, $libc_unveil_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go index 048b265..57571d0 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && arm64 -// +build openbsd,arm64 package unix @@ -519,6 +518,28 @@ var libc_getcwd_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getresuid(ruid *_C_int, euid *_C_int, suid *_C_int) { + syscall_rawSyscall(libc_getresuid_trampoline_addr, uintptr(unsafe.Pointer(ruid)), uintptr(unsafe.Pointer(euid)), uintptr(unsafe.Pointer(suid))) + return +} + +var libc_getresuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getresuid getresuid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getresgid(rgid *_C_int, egid *_C_int, sgid *_C_int) { + syscall_rawSyscall(libc_getresgid_trampoline_addr, uintptr(unsafe.Pointer(rgid)), uintptr(unsafe.Pointer(egid)), uintptr(unsafe.Pointer(sgid))) + return +} + +var libc_getresgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getresgid getresgid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctl(fd int, req uint, arg uintptr) (err error) { _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -533,6 +554,16 @@ var libc_ioctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -553,6 +584,32 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fcntl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fcntl fcntl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) @@ -1886,20 +1943,6 @@ var libc_setresuid_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrtable(rtable int) (err error) { _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { @@ -2195,8 +2238,8 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) +func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -2204,16 +2247,9 @@ func readlen(fd int, buf *byte, nbuf int) (n int, err error) { return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +var libc_getfsstat_trampoline_addr uintptr -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} +//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2233,3 +2269,31 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error var libc_utimensat_trampoline_addr uintptr //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pledge(promises *byte, execpromises *byte) (err error) { + _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pledge_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pledge pledge "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unveil(path *byte, flags *byte) (err error) { + _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unveil_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unveil unveil "libc.so" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s index 484bb42..f77d532 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s @@ -158,6 +158,16 @@ TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8 DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB) +TEXT libc_getresuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getresuid(SB) +GLOBL ·libc_getresuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getresuid_trampoline_addr(SB)/8, $libc_getresuid_trampoline<>(SB) + +TEXT libc_getresgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getresgid(SB) +GLOBL ·libc_getresgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getresgid_trampoline_addr(SB)/8, $libc_getresgid_trampoline<>(SB) + TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ioctl(SB) GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 @@ -168,6 +178,11 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) +TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fcntl(SB) +GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB) + TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ppoll(SB) GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8 @@ -573,11 +588,6 @@ TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setresuid_trampoline_addr(SB)/8, $libc_setresuid_trampoline<>(SB) -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setrlimit(SB) -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) - TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setrtable(SB) GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $8 @@ -663,7 +673,22 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) +TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getfsstat(SB) +GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getfsstat_trampoline_addr(SB)/8, $libc_getfsstat_trampoline<>(SB) + TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) + +TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pledge(SB) +GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pledge_trampoline_addr(SB)/8, $libc_pledge_trampoline<>(SB) + +TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unveil(SB) +GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unveil_trampoline_addr(SB)/8, $libc_unveil_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go index 6f33e37..e62963e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && mips64 -// +build openbsd,mips64 package unix @@ -519,6 +518,28 @@ var libc_getcwd_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getresuid(ruid *_C_int, euid *_C_int, suid *_C_int) { + syscall_rawSyscall(libc_getresuid_trampoline_addr, uintptr(unsafe.Pointer(ruid)), uintptr(unsafe.Pointer(euid)), uintptr(unsafe.Pointer(suid))) + return +} + +var libc_getresuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getresuid getresuid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getresgid(rgid *_C_int, egid *_C_int, sgid *_C_int) { + syscall_rawSyscall(libc_getresgid_trampoline_addr, uintptr(unsafe.Pointer(rgid)), uintptr(unsafe.Pointer(egid)), uintptr(unsafe.Pointer(sgid))) + return +} + +var libc_getresgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getresgid getresgid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctl(fd int, req uint, arg uintptr) (err error) { _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -533,6 +554,16 @@ var libc_ioctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -553,6 +584,32 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fcntl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fcntl fcntl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) @@ -1886,20 +1943,6 @@ var libc_setresuid_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrtable(rtable int) (err error) { _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { @@ -2195,8 +2238,8 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) +func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -2204,16 +2247,9 @@ func readlen(fd int, buf *byte, nbuf int) (n int, err error) { return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +var libc_getfsstat_trampoline_addr uintptr -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} +//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2233,3 +2269,31 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error var libc_utimensat_trampoline_addr uintptr //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pledge(promises *byte, execpromises *byte) (err error) { + _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pledge_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pledge pledge "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unveil(path *byte, flags *byte) (err error) { + _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unveil_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unveil unveil "libc.so" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s index 55af272..fae140b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s @@ -158,6 +158,16 @@ TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8 DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB) +TEXT libc_getresuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getresuid(SB) +GLOBL ·libc_getresuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getresuid_trampoline_addr(SB)/8, $libc_getresuid_trampoline<>(SB) + +TEXT libc_getresgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getresgid(SB) +GLOBL ·libc_getresgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getresgid_trampoline_addr(SB)/8, $libc_getresgid_trampoline<>(SB) + TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ioctl(SB) GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 @@ -168,6 +178,11 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) +TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fcntl(SB) +GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB) + TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ppoll(SB) GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8 @@ -573,11 +588,6 @@ TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setresuid_trampoline_addr(SB)/8, $libc_setresuid_trampoline<>(SB) -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setrlimit(SB) -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) - TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setrtable(SB) GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $8 @@ -663,7 +673,22 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) +TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getfsstat(SB) +GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getfsstat_trampoline_addr(SB)/8, $libc_getfsstat_trampoline<>(SB) + TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) + +TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pledge(SB) +GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pledge_trampoline_addr(SB)/8, $libc_pledge_trampoline<>(SB) + +TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unveil(SB) +GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unveil_trampoline_addr(SB)/8, $libc_unveil_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go index 330cf7f..0083135 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && ppc64 -// +build openbsd,ppc64 package unix @@ -519,6 +518,28 @@ var libc_getcwd_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getresuid(ruid *_C_int, euid *_C_int, suid *_C_int) { + syscall_rawSyscall(libc_getresuid_trampoline_addr, uintptr(unsafe.Pointer(ruid)), uintptr(unsafe.Pointer(euid)), uintptr(unsafe.Pointer(suid))) + return +} + +var libc_getresuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getresuid getresuid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getresgid(rgid *_C_int, egid *_C_int, sgid *_C_int) { + syscall_rawSyscall(libc_getresgid_trampoline_addr, uintptr(unsafe.Pointer(rgid)), uintptr(unsafe.Pointer(egid)), uintptr(unsafe.Pointer(sgid))) + return +} + +var libc_getresgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getresgid getresgid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctl(fd int, req uint, arg uintptr) (err error) { _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -533,6 +554,16 @@ var libc_ioctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -553,6 +584,32 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fcntl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fcntl fcntl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) @@ -1886,20 +1943,6 @@ var libc_setresuid_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrtable(rtable int) (err error) { _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { @@ -2195,8 +2238,8 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) +func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -2204,16 +2247,9 @@ func readlen(fd int, buf *byte, nbuf int) (n int, err error) { return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +var libc_getfsstat_trampoline_addr uintptr -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} +//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2233,3 +2269,31 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error var libc_utimensat_trampoline_addr uintptr //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pledge(promises *byte, execpromises *byte) (err error) { + _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pledge_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pledge pledge "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unveil(path *byte, flags *byte) (err error) { + _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unveil_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unveil unveil "libc.so" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s index 4028255..9d1e0ff 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s @@ -189,6 +189,18 @@ TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8 DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB) +TEXT libc_getresuid_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getresuid(SB) + RET +GLOBL ·libc_getresuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getresuid_trampoline_addr(SB)/8, $libc_getresuid_trampoline<>(SB) + +TEXT libc_getresgid_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getresgid(SB) + RET +GLOBL ·libc_getresgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getresgid_trampoline_addr(SB)/8, $libc_getresgid_trampoline<>(SB) + TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 CALL libc_ioctl(SB) RET @@ -201,6 +213,12 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) +TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_fcntl(SB) + RET +GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB) + TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 CALL libc_ppoll(SB) RET @@ -687,12 +705,6 @@ TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setresuid_trampoline_addr(SB)/8, $libc_setresuid_trampoline<>(SB) -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - CALL libc_setrlimit(SB) - RET -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) - TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 CALL libc_setrtable(SB) RET @@ -795,8 +807,26 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) +TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getfsstat(SB) + RET +GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getfsstat_trampoline_addr(SB)/8, $libc_getfsstat_trampoline<>(SB) + TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 CALL libc_utimensat(SB) RET GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) + +TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_pledge(SB) + RET +GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pledge_trampoline_addr(SB)/8, $libc_pledge_trampoline<>(SB) + +TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_unveil(SB) + RET +GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unveil_trampoline_addr(SB)/8, $libc_unveil_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go index 5f24de0..79029ed 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && riscv64 -// +build openbsd,riscv64 package unix @@ -519,6 +518,28 @@ var libc_getcwd_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getresuid(ruid *_C_int, euid *_C_int, suid *_C_int) { + syscall_rawSyscall(libc_getresuid_trampoline_addr, uintptr(unsafe.Pointer(ruid)), uintptr(unsafe.Pointer(euid)), uintptr(unsafe.Pointer(suid))) + return +} + +var libc_getresuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getresuid getresuid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getresgid(rgid *_C_int, egid *_C_int, sgid *_C_int) { + syscall_rawSyscall(libc_getresgid_trampoline_addr, uintptr(unsafe.Pointer(rgid)), uintptr(unsafe.Pointer(egid)), uintptr(unsafe.Pointer(sgid))) + return +} + +var libc_getresgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getresgid getresgid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctl(fd int, req uint, arg uintptr) (err error) { _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -533,6 +554,16 @@ var libc_ioctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -553,6 +584,32 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fcntl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fcntl fcntl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) @@ -1886,20 +1943,6 @@ var libc_setresuid_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrtable(rtable int) (err error) { _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { @@ -2195,8 +2238,8 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) +func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -2204,16 +2247,9 @@ func readlen(fd int, buf *byte, nbuf int) (n int, err error) { return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +var libc_getfsstat_trampoline_addr uintptr -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} +//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2233,3 +2269,31 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error var libc_utimensat_trampoline_addr uintptr //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pledge(promises *byte, execpromises *byte) (err error) { + _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pledge_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pledge pledge "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unveil(path *byte, flags *byte) (err error) { + _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unveil_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unveil unveil "libc.so" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s index e1fbd4d..da115f9 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s @@ -158,6 +158,16 @@ TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8 DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB) +TEXT libc_getresuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getresuid(SB) +GLOBL ·libc_getresuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getresuid_trampoline_addr(SB)/8, $libc_getresuid_trampoline<>(SB) + +TEXT libc_getresgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getresgid(SB) +GLOBL ·libc_getresgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getresgid_trampoline_addr(SB)/8, $libc_getresgid_trampoline<>(SB) + TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ioctl(SB) GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 @@ -168,6 +178,11 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) +TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fcntl(SB) +GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB) + TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ppoll(SB) GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8 @@ -573,11 +588,6 @@ TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setresuid_trampoline_addr(SB)/8, $libc_setresuid_trampoline<>(SB) -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setrlimit(SB) -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) - TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setrtable(SB) GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $8 @@ -663,7 +673,22 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) +TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getfsstat(SB) +GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getfsstat_trampoline_addr(SB)/8, $libc_getfsstat_trampoline<>(SB) + TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) + +TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pledge(SB) +GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pledge_trampoline_addr(SB)/8, $libc_pledge_trampoline<>(SB) + +TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unveil(SB) +GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unveil_trampoline_addr(SB)/8, $libc_unveil_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index 78d4a42..829b87f 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build solaris && amd64 -// +build solaris,amd64 package unix @@ -110,7 +109,6 @@ import ( //go:cgo_import_dynamic libc_setpriority setpriority "libc.so" //go:cgo_import_dynamic libc_setregid setregid "libc.so" //go:cgo_import_dynamic libc_setreuid setreuid "libc.so" -//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" //go:cgo_import_dynamic libc_setsid setsid "libc.so" //go:cgo_import_dynamic libc_setuid setuid "libc.so" //go:cgo_import_dynamic libc_shutdown shutdown "libsocket.so" @@ -250,7 +248,6 @@ import ( //go:linkname procSetpriority libc_setpriority //go:linkname procSetregid libc_setregid //go:linkname procSetreuid libc_setreuid -//go:linkname procSetrlimit libc_setrlimit //go:linkname procSetsid libc_setsid //go:linkname procSetuid libc_setuid //go:linkname procshutdown libc_shutdown @@ -391,7 +388,6 @@ var ( procSetpriority, procSetregid, procSetreuid, - procSetrlimit, procSetsid, procSetuid, procshutdown, @@ -439,7 +435,7 @@ func pipe(p *[2]_C_int) (n int, err error) { r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procpipe)), 1, uintptr(unsafe.Pointer(p)), 0, 0, 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -449,7 +445,7 @@ func pipe(p *[2]_C_int) (n int, err error) { func pipe2(p *[2]_C_int, flags int) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procpipe2)), 2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -459,7 +455,7 @@ func pipe2(p *[2]_C_int, flags int) (err error) { func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procgetsockname)), 3, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -474,7 +470,7 @@ func Getcwd(buf []byte) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procGetcwd)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), 0, 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -485,7 +481,7 @@ func getgroups(ngid int, gid *_Gid_t) (n int, err error) { r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procgetgroups)), 2, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0, 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -495,7 +491,7 @@ func getgroups(ngid int, gid *_Gid_t) (n int, err error) { func setgroups(ngid int, gid *_Gid_t) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procsetgroups)), 2, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -506,7 +502,7 @@ func wait4(pid int32, statusp *_C_int, options int, rusage *Rusage) (wpid int32, r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procwait4)), 4, uintptr(pid), uintptr(unsafe.Pointer(statusp)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) wpid = int32(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -521,7 +517,7 @@ func gethostname(buf []byte) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procgethostname)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), 0, 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -536,7 +532,7 @@ func utimes(path string, times *[2]Timeval) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procutimes)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -551,7 +547,7 @@ func utimensat(fd int, path string, times *[2]Timespec, flag int) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procutimensat)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flag), 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -562,7 +558,7 @@ func fcntl(fd int, cmd int, arg int) (val int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procfcntl)), 3, uintptr(fd), uintptr(cmd), uintptr(arg), 0, 0, 0) val = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -572,7 +568,7 @@ func fcntl(fd int, cmd int, arg int) (val int, err error) { func futimesat(fildes int, path *byte, times *[2]Timeval) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procfutimesat)), 3, uintptr(fildes), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(times)), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -583,7 +579,7 @@ func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procaccept)), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0) fd = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -594,7 +590,7 @@ func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_recvmsg)), 3, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags), 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -605,7 +601,7 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_sendmsg)), 3, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags), 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -615,7 +611,7 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { func acct(path *byte) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procacct)), 1, uintptr(unsafe.Pointer(path)), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -646,11 +642,22 @@ func __minor(version int, dev uint64) (val uint) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ioctlRet(fd int, req uint, arg uintptr) (ret int, err error) { +func ioctlRet(fd int, req int, arg uintptr) (ret int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procioctl)), 3, uintptr(fd), uintptr(req), uintptr(arg), 0, 0, 0) ret = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ioctlPtrRet(fd int, req int, arg unsafe.Pointer) (ret int, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procioctl)), 3, uintptr(fd), uintptr(req), uintptr(arg), 0, 0, 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) } return } @@ -661,7 +668,7 @@ func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procpoll)), 3, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout), 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -676,7 +683,7 @@ func Access(path string, mode uint32) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procAccess)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -686,7 +693,7 @@ func Access(path string, mode uint32) (err error) { func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procAdjtime)), 2, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -701,7 +708,7 @@ func Chdir(path string) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procChdir)), 1, uintptr(unsafe.Pointer(_p0)), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -716,7 +723,7 @@ func Chmod(path string, mode uint32) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procChmod)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -731,7 +738,7 @@ func Chown(path string, uid int, gid int) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procChown)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -746,7 +753,7 @@ func Chroot(path string) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procChroot)), 1, uintptr(unsafe.Pointer(_p0)), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -756,7 +763,7 @@ func Chroot(path string) (err error) { func ClockGettime(clockid int32, time *Timespec) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procClockGettime)), 2, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -766,7 +773,7 @@ func ClockGettime(clockid int32, time *Timespec) (err error) { func Close(fd int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procClose)), 1, uintptr(fd), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -782,7 +789,7 @@ func Creat(path string, mode uint32) (fd int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procCreat)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0, 0) fd = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -793,7 +800,7 @@ func Dup(fd int) (nfd int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procDup)), 1, uintptr(fd), 0, 0, 0, 0, 0) nfd = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -803,7 +810,7 @@ func Dup(fd int) (nfd int, err error) { func Dup2(oldfd int, newfd int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procDup2)), 2, uintptr(oldfd), uintptr(newfd), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -825,7 +832,7 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFaccessat)), 4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -835,7 +842,7 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { func Fchdir(fd int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFchdir)), 1, uintptr(fd), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -845,7 +852,7 @@ func Fchdir(fd int) (err error) { func Fchmod(fd int, mode uint32) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFchmod)), 2, uintptr(fd), uintptr(mode), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -860,7 +867,7 @@ func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFchmodat)), 4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -870,7 +877,7 @@ func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { func Fchown(fd int, uid int, gid int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFchown)), 3, uintptr(fd), uintptr(uid), uintptr(gid), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -885,7 +892,7 @@ func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFchownat)), 5, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -895,7 +902,7 @@ func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { func Fdatasync(fd int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFdatasync)), 1, uintptr(fd), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -905,7 +912,7 @@ func Fdatasync(fd int) (err error) { func Flock(fd int, how int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFlock)), 2, uintptr(fd), uintptr(how), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -916,7 +923,7 @@ func Fpathconf(fd int, name int) (val int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFpathconf)), 2, uintptr(fd), uintptr(name), 0, 0, 0, 0) val = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -926,7 +933,7 @@ func Fpathconf(fd int, name int) (val int, err error) { func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFstat)), 2, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -941,7 +948,7 @@ func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFstatat)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -951,7 +958,7 @@ func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { func Fstatvfs(fd int, vfsstat *Statvfs_t) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFstatvfs)), 2, uintptr(fd), uintptr(unsafe.Pointer(vfsstat)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -966,7 +973,7 @@ func Getdents(fd int, buf []byte, basep *uintptr) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procGetdents)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -993,7 +1000,7 @@ func Getpgid(pid int) (pgid int, err error) { r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGetpgid)), 1, uintptr(pid), 0, 0, 0, 0, 0) pgid = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1004,7 +1011,7 @@ func Getpgrp() (pgid int, err error) { r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGetpgrp)), 0, 0, 0, 0, 0, 0, 0) pgid = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1039,7 +1046,7 @@ func Getpriority(which int, who int) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procGetpriority)), 2, uintptr(which), uintptr(who), 0, 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1049,7 +1056,7 @@ func Getpriority(which int, who int) (n int, err error) { func Getrlimit(which int, lim *Rlimit) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGetrlimit)), 2, uintptr(which), uintptr(unsafe.Pointer(lim)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1059,7 +1066,7 @@ func Getrlimit(which int, lim *Rlimit) (err error) { func Getrusage(who int, rusage *Rusage) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGetrusage)), 2, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1070,7 +1077,7 @@ func Getsid(pid int) (sid int, err error) { r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGetsid)), 1, uintptr(pid), 0, 0, 0, 0, 0) sid = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1080,7 +1087,7 @@ func Getsid(pid int) (sid int, err error) { func Gettimeofday(tv *Timeval) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGettimeofday)), 1, uintptr(unsafe.Pointer(tv)), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1098,7 +1105,7 @@ func Getuid() (uid int) { func Kill(pid int, signum syscall.Signal) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procKill)), 2, uintptr(pid), uintptr(signum), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1113,7 +1120,7 @@ func Lchown(path string, uid int, gid int) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procLchown)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1133,7 +1140,7 @@ func Link(path string, link string) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procLink)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1143,7 +1150,7 @@ func Link(path string, link string) (err error) { func Listen(s int, backlog int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_llisten)), 2, uintptr(s), uintptr(backlog), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1158,7 +1165,7 @@ func Lstat(path string, stat *Stat_t) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procLstat)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1172,7 +1179,7 @@ func Madvise(b []byte, advice int) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMadvise)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(advice), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1187,7 +1194,7 @@ func Mkdir(path string, mode uint32) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMkdir)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1202,7 +1209,7 @@ func Mkdirat(dirfd int, path string, mode uint32) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMkdirat)), 3, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1217,7 +1224,7 @@ func Mkfifo(path string, mode uint32) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMkfifo)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1232,7 +1239,7 @@ func Mkfifoat(dirfd int, path string, mode uint32) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMkfifoat)), 3, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1247,7 +1254,7 @@ func Mknod(path string, mode uint32, dev int) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMknod)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1262,7 +1269,7 @@ func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMknodat)), 4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1276,7 +1283,7 @@ func Mlock(b []byte) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMlock)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1286,7 +1293,7 @@ func Mlock(b []byte) (err error) { func Mlockall(flags int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMlockall)), 1, uintptr(flags), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1300,7 +1307,7 @@ func Mprotect(b []byte, prot int) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMprotect)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(prot), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1314,7 +1321,7 @@ func Msync(b []byte, flags int) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMsync)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(flags), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1328,7 +1335,7 @@ func Munlock(b []byte) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMunlock)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1338,7 +1345,7 @@ func Munlock(b []byte) (err error) { func Munlockall() (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMunlockall)), 0, 0, 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1348,7 +1355,7 @@ func Munlockall() (err error) { func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procNanosleep)), 2, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1364,7 +1371,7 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procOpen)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0, 0) fd = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1380,7 +1387,7 @@ func Openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procOpenat)), 4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0) fd = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1396,7 +1403,7 @@ func Pathconf(path string, name int) (val int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procPathconf)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0, 0, 0, 0) val = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1406,7 +1413,7 @@ func Pathconf(path string, name int) (val int, err error) { func Pause() (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procPause)), 0, 0, 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1421,7 +1428,7 @@ func pread(fd int, p []byte, offset int64) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procpread)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), uintptr(offset), 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1436,7 +1443,7 @@ func pwrite(fd int, p []byte, offset int64) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procpwrite)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), uintptr(offset), 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1451,7 +1458,7 @@ func read(fd int, p []byte) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procread)), 3, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1471,7 +1478,7 @@ func Readlink(path string, buf []byte) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procReadlink)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(len(buf)), 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1491,7 +1498,7 @@ func Rename(from string, to string) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procRename)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1511,7 +1518,7 @@ func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err e } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procRenameat)), 4, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1526,7 +1533,7 @@ func Rmdir(path string) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procRmdir)), 1, uintptr(unsafe.Pointer(_p0)), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1537,7 +1544,7 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proclseek)), 3, uintptr(fd), uintptr(offset), uintptr(whence), 0, 0, 0) newoffset = int64(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1548,7 +1555,7 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procSelect)), 5, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1558,7 +1565,7 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err func Setegid(egid int) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetegid)), 1, uintptr(egid), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1568,7 +1575,7 @@ func Setegid(egid int) (err error) { func Seteuid(euid int) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSeteuid)), 1, uintptr(euid), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1578,7 +1585,7 @@ func Seteuid(euid int) (err error) { func Setgid(gid int) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetgid)), 1, uintptr(gid), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1592,7 +1599,7 @@ func Sethostname(p []byte) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procSethostname)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1602,7 +1609,7 @@ func Sethostname(p []byte) (err error) { func Setpgid(pid int, pgid int) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetpgid)), 2, uintptr(pid), uintptr(pgid), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1612,7 +1619,7 @@ func Setpgid(pid int, pgid int) (err error) { func Setpriority(which int, who int, prio int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procSetpriority)), 3, uintptr(which), uintptr(who), uintptr(prio), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1622,7 +1629,7 @@ func Setpriority(which int, who int, prio int) (err error) { func Setregid(rgid int, egid int) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetregid)), 2, uintptr(rgid), uintptr(egid), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1632,17 +1639,7 @@ func Setregid(rgid int, egid int) (err error) { func Setreuid(ruid int, euid int) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetreuid)), 2, uintptr(ruid), uintptr(euid), 0, 0, 0, 0) if e1 != 0 { - err = e1 - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetrlimit)), 2, uintptr(which), uintptr(unsafe.Pointer(lim)), 0, 0, 0, 0) - if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1653,7 +1650,7 @@ func Setsid() (pid int, err error) { r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetsid)), 0, 0, 0, 0, 0, 0, 0) pid = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1663,7 +1660,7 @@ func Setsid() (pid int, err error) { func Setuid(uid int) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetuid)), 1, uintptr(uid), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1673,7 +1670,7 @@ func Setuid(uid int) (err error) { func Shutdown(s int, how int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procshutdown)), 2, uintptr(s), uintptr(how), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1688,7 +1685,7 @@ func Stat(path string, stat *Stat_t) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procStat)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1703,7 +1700,7 @@ func Statvfs(path string, vfsstat *Statvfs_t) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procStatvfs)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(vfsstat)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1723,7 +1720,7 @@ func Symlink(path string, link string) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procSymlink)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1733,7 +1730,7 @@ func Symlink(path string, link string) (err error) { func Sync() (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procSync)), 0, 0, 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1744,7 +1741,7 @@ func Sysconf(which int) (n int64, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procSysconf)), 1, uintptr(which), 0, 0, 0, 0, 0) n = int64(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1755,7 +1752,7 @@ func Times(tms *Tms) (ticks uintptr, err error) { r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procTimes)), 1, uintptr(unsafe.Pointer(tms)), 0, 0, 0, 0, 0) ticks = uintptr(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1770,7 +1767,7 @@ func Truncate(path string, length int64) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procTruncate)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1780,7 +1777,7 @@ func Truncate(path string, length int64) (err error) { func Fsync(fd int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFsync)), 1, uintptr(fd), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1790,7 +1787,7 @@ func Fsync(fd int) (err error) { func Ftruncate(fd int, length int64) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFtruncate)), 2, uintptr(fd), uintptr(length), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1808,7 +1805,7 @@ func Umask(mask int) (oldmask int) { func Uname(buf *Utsname) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procUname)), 1, uintptr(unsafe.Pointer(buf)), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1823,7 +1820,7 @@ func Unmount(target string, flags int) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procumount)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1838,7 +1835,7 @@ func Unlink(path string) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procUnlink)), 1, uintptr(unsafe.Pointer(_p0)), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1853,7 +1850,7 @@ func Unlinkat(dirfd int, path string, flags int) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procUnlinkat)), 3, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1863,7 +1860,7 @@ func Unlinkat(dirfd int, path string, flags int) (err error) { func Ustat(dev int, ubuf *Ustat_t) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procUstat)), 2, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1878,7 +1875,7 @@ func Utime(path string, buf *Utimbuf) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procUtime)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1888,7 +1885,7 @@ func Utime(path string, buf *Utimbuf) (err error) { func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_bind)), 3, uintptr(s), uintptr(addr), uintptr(addrlen), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1898,7 +1895,7 @@ func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_connect)), 3, uintptr(s), uintptr(addr), uintptr(addrlen), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1909,7 +1906,7 @@ func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) ( r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procmmap)), 6, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) ret = uintptr(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1919,7 +1916,7 @@ func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) ( func munmap(addr uintptr, length uintptr) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procmunmap)), 2, uintptr(addr), uintptr(length), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1930,7 +1927,7 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procsendfile)), 4, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0) written = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1944,7 +1941,7 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) ( } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_sendto)), 6, uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1955,7 +1952,7 @@ func socket(domain int, typ int, proto int) (fd int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_socket)), 3, uintptr(domain), uintptr(typ), uintptr(proto), 0, 0, 0) fd = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1965,7 +1962,7 @@ func socket(domain int, typ int, proto int) (fd int, err error) { func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&proc__xnet_socketpair)), 4, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1980,7 +1977,7 @@ func write(fd int, p []byte) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procwrite)), 3, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1990,7 +1987,7 @@ func write(fd int, p []byte) (n int, err error) { func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_getsockopt)), 5, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -2000,7 +1997,7 @@ func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procgetpeername)), 3, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -2010,7 +2007,7 @@ func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procsetsockopt)), 5, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -2025,7 +2022,7 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procrecvfrom)), 6, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -2036,7 +2033,7 @@ func port_create() (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procport_create)), 0, 0, 0, 0, 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -2047,7 +2044,7 @@ func port_associate(port int, source int, object uintptr, events int, user *byte r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procport_associate)), 5, uintptr(port), uintptr(source), uintptr(object), uintptr(events), uintptr(unsafe.Pointer(user)), 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -2058,7 +2055,7 @@ func port_dissociate(port int, source int, object uintptr) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procport_dissociate)), 3, uintptr(port), uintptr(source), uintptr(object), 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -2069,7 +2066,7 @@ func port_get(port int, pe *portEvent, timeout *Timespec) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procport_get)), 3, uintptr(port), uintptr(unsafe.Pointer(pe)), uintptr(unsafe.Pointer(timeout)), 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -2080,7 +2077,7 @@ func port_getn(port int, pe *portEvent, max uint32, nget *uint32, timeout *Times r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procport_getn)), 5, uintptr(port), uintptr(unsafe.Pointer(pe)), uintptr(max), uintptr(unsafe.Pointer(nget)), uintptr(unsafe.Pointer(timeout)), 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -2090,7 +2087,7 @@ func port_getn(port int, pe *portEvent, max uint32, nget *uint32, timeout *Times func putmsg(fd int, clptr *strbuf, dataptr *strbuf, flags int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procputmsg)), 4, uintptr(fd), uintptr(unsafe.Pointer(clptr)), uintptr(unsafe.Pointer(dataptr)), uintptr(flags), 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -2100,7 +2097,7 @@ func putmsg(fd int, clptr *strbuf, dataptr *strbuf, flags int) (err error) { func getmsg(fd int, clptr *strbuf, dataptr *strbuf, flags *int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procgetmsg)), 4, uintptr(fd), uintptr(unsafe.Pointer(clptr)), uintptr(unsafe.Pointer(dataptr)), uintptr(unsafe.Pointer(flags)), 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go index f207945..7ccf66b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go @@ -1,50 +1,123 @@ -// go run mksyscall.go -tags zos,s390x syscall_zos_s390x.go +// go run mksyscall_zos_s390x.go -o_sysnum zsysnum_zos_s390x.go -o_syscall zsyscall_zos_s390x.go -i_syscall syscall_zos_s390x.go -o_asm zsymaddr_zos_s390x.s // Code generated by the command above; see README.md. DO NOT EDIT. //go:build zos && s390x -// +build zos,s390x package unix import ( + "runtime" + "syscall" "unsafe" ) +var _ syscall.Errno + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := syscall_syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FCNTL<<4, uintptr(fd), uintptr(cmd), uintptr(arg)) + runtime.ExitSyscall() val = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func read(fd int, p []byte) (n int, err error) { +func impl_Flistxattr(fd int, dest []byte) (sz int, err error) { var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) + if len(dest) > 0 { + _p0 = unsafe.Pointer(&dest[0]) } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FLISTXATTR_A<<4, uintptr(fd), uintptr(_p0), uintptr(len(dest))) + runtime.ExitSyscall() + sz = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FlistxattrAddr() *(func(fd int, dest []byte) (sz int, err error)) + +var Flistxattr = enter_Flistxattr + +func enter_Flistxattr(fd int, dest []byte) (sz int, err error) { + funcref := get_FlistxattrAddr() + if funcptrtest(GetZosLibVec()+SYS___FLISTXATTR_A<<4, "") == 0 { + *funcref = impl_Flistxattr + } else { + *funcref = error_Flistxattr + } + return (*funcref)(fd, dest) +} + +func error_Flistxattr(fd int, dest []byte) (sz int, err error) { + sz = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Fremovexattr(fd int, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FREMOVEXATTR_A<<4, uintptr(fd), uintptr(unsafe.Pointer(_p0))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } +//go:nosplit +func get_FremovexattrAddr() *(func(fd int, attr string) (err error)) + +var Fremovexattr = enter_Fremovexattr + +func enter_Fremovexattr(fd int, attr string) (err error) { + funcref := get_FremovexattrAddr() + if funcptrtest(GetZosLibVec()+SYS___FREMOVEXATTR_A<<4, "") == 0 { + *funcref = impl_Fremovexattr + } else { + *funcref = error_Fremovexattr + } + return (*funcref)(fd, attr) +} + +func error_Fremovexattr(fd int, attr string) (err error) { + err = ENOSYS + return +} + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_READ<<4, uintptr(fd), uintptr(_p0), uintptr(len(p))) + runtime.ExitSyscall() n = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -58,31 +131,159 @@ func write(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_WRITE<<4, uintptr(fd), uintptr(_p0), uintptr(len(p))) + runtime.ExitSyscall() n = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FGETXATTR_A<<4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) + runtime.ExitSyscall() + sz = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FgetxattrAddr() *(func(fd int, attr string, dest []byte) (sz int, err error)) + +var Fgetxattr = enter_Fgetxattr + +func enter_Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { + funcref := get_FgetxattrAddr() + if funcptrtest(GetZosLibVec()+SYS___FGETXATTR_A<<4, "") == 0 { + *funcref = impl_Fgetxattr + } else { + *funcref = error_Fgetxattr + } + return (*funcref)(fd, attr, dest) +} + +func error_Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { + sz = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Fsetxattr(fd int, attr string, data []byte, flag int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(data) > 0 { + _p1 = unsafe.Pointer(&data[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FSETXATTR_A<<4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(data)), uintptr(flag)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FsetxattrAddr() *(func(fd int, attr string, data []byte, flag int) (err error)) + +var Fsetxattr = enter_Fsetxattr + +func enter_Fsetxattr(fd int, attr string, data []byte, flag int) (err error) { + funcref := get_FsetxattrAddr() + if funcptrtest(GetZosLibVec()+SYS___FSETXATTR_A<<4, "") == 0 { + *funcref = impl_Fsetxattr + } else { + *funcref = error_Fsetxattr } + return (*funcref)(fd, attr, data, flag) +} + +func error_Fsetxattr(fd int, attr string, data []byte, flag int) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := syscall_syscall(SYS___ACCEPT_A, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___ACCEPT_A<<4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___ACCEPT4_A<<4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags)) + runtime.ExitSyscall() fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_accept4Addr() *(func(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error)) + +var accept4 = enter_accept4 + +func enter_accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { + funcref := get_accept4Addr() + if funcptrtest(GetZosLibVec()+SYS___ACCEPT4_A<<4, "") == 0 { + *funcref = impl_accept4 + } else { + *funcref = error_accept4 } + return (*funcref)(s, rsa, addrlen, flags) +} + +func error_accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { + fd = -1 + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := syscall_syscall(SYS___BIND_A, uintptr(s), uintptr(addr), uintptr(addrlen)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___BIND_A<<4, uintptr(s), uintptr(addr), uintptr(addrlen)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -90,9 +291,11 @@ func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := syscall_syscall(SYS___CONNECT_A, uintptr(s), uintptr(addr), uintptr(addrlen)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___CONNECT_A<<4, uintptr(s), uintptr(addr), uintptr(addrlen)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -100,10 +303,10 @@ func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getgroups(n int, list *_Gid_t) (nn int, err error) { - r0, _, e1 := syscall_rawsyscall(SYS_GETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETGROUPS<<4, uintptr(n), uintptr(unsafe.Pointer(list))) nn = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -111,9 +314,9 @@ func getgroups(n int, list *_Gid_t) (nn int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setgroups(n int, list *_Gid_t) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_SETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETGROUPS<<4, uintptr(n), uintptr(unsafe.Pointer(list))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -121,9 +324,11 @@ func setgroups(n int, list *_Gid_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { - _, _, e1 := syscall_syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETSOCKOPT<<4, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -131,9 +336,11 @@ func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { - _, _, e1 := syscall_syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETSOCKOPT<<4, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -141,10 +348,10 @@ func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socket(domain int, typ int, proto int) (fd int, err error) { - r0, _, e1 := syscall_rawsyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SOCKET<<4, uintptr(domain), uintptr(typ), uintptr(proto)) fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -152,9 +359,9 @@ func socket(domain int, typ int, proto int) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { - _, _, e1 := syscall_rawsyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SOCKETPAIR<<4, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -162,9 +369,9 @@ func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := syscall_rawsyscall(SYS___GETPEERNAME_A, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___GETPEERNAME_A<<4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -172,10 +379,52 @@ func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := syscall_rawsyscall(SYS___GETSOCKNAME_A, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___GETSOCKNAME_A<<4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Removexattr(path string, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___REMOVEXATTR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_RemovexattrAddr() *(func(path string, attr string) (err error)) + +var Removexattr = enter_Removexattr + +func enter_Removexattr(path string, attr string) (err error) { + funcref := get_RemovexattrAddr() + if funcptrtest(GetZosLibVec()+SYS___REMOVEXATTR_A<<4, "") == 0 { + *funcref = impl_Removexattr + } else { + *funcref = error_Removexattr + } + return (*funcref)(path, attr) +} + +func error_Removexattr(path string, attr string) (err error) { + err = ENOSYS return } @@ -188,10 +437,12 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall6(SYS___RECVFROM_A, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___RECVFROM_A<<4, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + runtime.ExitSyscall() n = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -205,9 +456,11 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) ( } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall6(SYS___SENDTO_A, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___SENDTO_A<<4, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -215,10 +468,12 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) ( // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := syscall_syscall(SYS___RECVMSG_A, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___RECVMSG_A<<4, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + runtime.ExitSyscall() n = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -226,10 +481,12 @@ func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := syscall_syscall(SYS___SENDMSG_A, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___SENDMSG_A<<4, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + runtime.ExitSyscall() n = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -237,10 +494,12 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { - r0, _, e1 := syscall_syscall6(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_MMAP<<4, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) + runtime.ExitSyscall() ret = uintptr(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -248,19 +507,86 @@ func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) ( // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := syscall_syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_MUNMAP<<4, uintptr(addr), uintptr(length)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ioctl(fd int, req int, arg uintptr) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_IOCTL<<4, uintptr(fd), uintptr(req), uintptr(arg)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ioctlPtr(fd int, req int, arg unsafe.Pointer) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_IOCTL<<4, uintptr(fd), uintptr(req), uintptr(arg)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func shmat(id int, addr uintptr, flag int) (ret uintptr, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SHMAT<<4, uintptr(id), uintptr(addr), uintptr(flag)) + runtime.ExitSyscall() + ret = uintptr(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func shmctl(id int, cmd int, buf *SysvShmDesc) (result int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SHMCTL64<<4, uintptr(id), uintptr(cmd), uintptr(unsafe.Pointer(buf))) + runtime.ExitSyscall() + result = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func shmdt(addr uintptr) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SHMDT<<4, uintptr(addr)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := syscall_syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) - if e1 != 0 { - err = errnoErr(e1) +func shmget(key int, size int, flag int) (id int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SHMGET<<4, uintptr(key), uintptr(size), uintptr(flag)) + runtime.ExitSyscall() + id = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -273,9 +599,11 @@ func Access(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___ACCESS_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___ACCESS_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -288,9 +616,11 @@ func Chdir(path string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___CHDIR_A, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___CHDIR_A<<4, uintptr(unsafe.Pointer(_p0))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -303,9 +633,11 @@ func Chown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___CHOWN_A, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___CHOWN_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -318,9 +650,11 @@ func Chmod(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___CHMOD_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___CHMOD_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -333,10 +667,12 @@ func Creat(path string, mode uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := syscall_syscall(SYS___CREAT_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___CREAT_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + runtime.ExitSyscall() fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -344,10 +680,12 @@ func Creat(path string, mode uint32) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup(oldfd int) (fd int, err error) { - r0, _, e1 := syscall_syscall(SYS_DUP, uintptr(oldfd), 0, 0) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_DUP<<4, uintptr(oldfd)) + runtime.ExitSyscall() fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -355,617 +693,2216 @@ func Dup(oldfd int) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup2(oldfd int, newfd int) (err error) { - _, _, e1 := syscall_syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_DUP2<<4, uintptr(oldfd), uintptr(newfd)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Errno2() (er2 int) { - uer2, _, _ := syscall_syscall(SYS___ERRNO2, 0, 0, 0) - er2 = int(uer2) +func impl_Dup3(oldfd int, newfd int, flags int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_DUP3<<4, uintptr(oldfd), uintptr(newfd), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_Dup3Addr() *(func(oldfd int, newfd int, flags int) (err error)) -func Err2ad() (eadd *int) { - ueadd, _, _ := syscall_syscall(SYS___ERR2AD, 0, 0, 0) - eadd = (*int)(unsafe.Pointer(ueadd)) - return -} +var Dup3 = enter_Dup3 -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func enter_Dup3(oldfd int, newfd int, flags int) (err error) { + funcref := get_Dup3Addr() + if funcptrtest(GetZosLibVec()+SYS_DUP3<<4, "") == 0 { + *funcref = impl_Dup3 + } else { + *funcref = error_Dup3 + } + return (*funcref)(oldfd, newfd, flags) +} -func Exit(code int) { - syscall_syscall(SYS_EXIT, uintptr(code), 0, 0) +func error_Dup3(oldfd int, newfd int, flags int) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fchdir(fd int) (err error) { - _, _, e1 := syscall_syscall(SYS_FCHDIR, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) +func impl_Dirfd(dirp uintptr) (fd int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_DIRFD<<4, uintptr(dirp)) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_DirfdAddr() *(func(dirp uintptr) (fd int, err error)) -func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := syscall_syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) +var Dirfd = enter_Dirfd + +func enter_Dirfd(dirp uintptr) (fd int, err error) { + funcref := get_DirfdAddr() + if funcptrtest(GetZosLibVec()+SYS_DIRFD<<4, "") == 0 { + *funcref = impl_Dirfd + } else { + *funcref = error_Dirfd } + return (*funcref)(dirp) +} + +func error_Dirfd(dirp uintptr) (fd int, err error) { + fd = -1 + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fchown(fd int, uid int, gid int) (err error) { - _, _, e1 := syscall_syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) - if e1 != 0 { - err = errnoErr(e1) +func impl_EpollCreate(size int) (fd int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_EPOLL_CREATE<<4, uintptr(size)) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_EpollCreateAddr() *(func(size int) (fd int, err error)) -func FcntlInt(fd uintptr, cmd int, arg int) (retval int, err error) { - r0, _, e1 := syscall_syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - retval = int(r0) - if e1 != 0 { - err = errnoErr(e1) +var EpollCreate = enter_EpollCreate + +func enter_EpollCreate(size int) (fd int, err error) { + funcref := get_EpollCreateAddr() + if funcptrtest(GetZosLibVec()+SYS_EPOLL_CREATE<<4, "") == 0 { + *funcref = impl_EpollCreate + } else { + *funcref = error_EpollCreate } + return (*funcref)(size) +} + +func error_EpollCreate(size int) (fd int, err error) { + fd = -1 + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstat(fd int, stat *Stat_LE_t) (err error) { - _, _, e1 := syscall_syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) +func impl_EpollCreate1(flags int) (fd int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_EPOLL_CREATE1<<4, uintptr(flags)) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_EpollCreate1Addr() *(func(flags int) (fd int, err error)) -func Fstatvfs(fd int, stat *Statvfs_t) (err error) { - _, _, e1 := syscall_syscall(SYS_FSTATVFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) +var EpollCreate1 = enter_EpollCreate1 + +func enter_EpollCreate1(flags int) (fd int, err error) { + funcref := get_EpollCreate1Addr() + if funcptrtest(GetZosLibVec()+SYS_EPOLL_CREATE1<<4, "") == 0 { + *funcref = impl_EpollCreate1 + } else { + *funcref = error_EpollCreate1 } + return (*funcref)(flags) +} + +func error_EpollCreate1(flags int) (fd int, err error) { + fd = -1 + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fsync(fd int) (err error) { - _, _, e1 := syscall_syscall(SYS_FSYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) +func impl_EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_EPOLL_CTL<<4, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_EpollCtlAddr() *(func(epfd int, op int, fd int, event *EpollEvent) (err error)) -func Ftruncate(fd int, length int64) (err error) { - _, _, e1 := syscall_syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) +var EpollCtl = enter_EpollCtl + +func enter_EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { + funcref := get_EpollCtlAddr() + if funcptrtest(GetZosLibVec()+SYS_EPOLL_CTL<<4, "") == 0 { + *funcref = impl_EpollCtl + } else { + *funcref = error_EpollCtl } - return + return (*funcref)(epfd, op, fd, event) } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpagesize() (pgsize int) { - r0, _, _ := syscall_syscall(SYS_GETPAGESIZE, 0, 0, 0) - pgsize = int(r0) +func error_EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mprotect(b []byte, prot int) (err error) { +func impl_EpollPwait(epfd int, events []EpollEvent, msec int, sigmask *int) (n int, err error) { var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_EPOLL_PWAIT<<4, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), uintptr(unsafe.Pointer(sigmask))) + runtime.ExitSyscall() + n = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_EpollPwaitAddr() *(func(epfd int, events []EpollEvent, msec int, sigmask *int) (n int, err error)) -func Msync(b []byte, flags int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) +var EpollPwait = enter_EpollPwait + +func enter_EpollPwait(epfd int, events []EpollEvent, msec int, sigmask *int) (n int, err error) { + funcref := get_EpollPwaitAddr() + if funcptrtest(GetZosLibVec()+SYS_EPOLL_PWAIT<<4, "") == 0 { + *funcref = impl_EpollPwait } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := syscall_syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) + *funcref = error_EpollPwait } + return (*funcref)(epfd, events, msec, sigmask) +} + +func error_EpollPwait(epfd int, events []EpollEvent, msec int, sigmask *int) (n int, err error) { + n = -1 + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Poll(fds []PollFd, timeout int) (n int, err error) { +func impl_EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { var _p0 unsafe.Pointer - if len(fds) > 0 { - _p0 = unsafe.Pointer(&fds[0]) + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall(SYS_POLL, uintptr(_p0), uintptr(len(fds)), uintptr(timeout)) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_EPOLL_WAIT<<4, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec)) + runtime.ExitSyscall() n = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_EpollWaitAddr() *(func(epfd int, events []EpollEvent, msec int) (n int, err error)) -func Times(tms *Tms) (ticks uintptr, err error) { - r0, _, e1 := syscall_syscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0) - ticks = uintptr(r0) - if e1 != 0 { - err = errnoErr(e1) +var EpollWait = enter_EpollWait + +func enter_EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + funcref := get_EpollWaitAddr() + if funcptrtest(GetZosLibVec()+SYS_EPOLL_WAIT<<4, "") == 0 { + *funcref = impl_EpollWait + } else { + *funcref = error_EpollWait } + return (*funcref)(epfd, events, msec) +} + +func error_EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + n = -1 + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func W_Getmntent(buff *byte, size int) (lastsys int, err error) { - r0, _, e1 := syscall_syscall(SYS_W_GETMNTENT, uintptr(unsafe.Pointer(buff)), uintptr(size), 0) - lastsys = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } +func Errno2() (er2 int) { + runtime.EnterSyscall() + r0, _, _ := CallLeFuncWithErr(GetZosLibVec() + SYS___ERRNO2<<4) + runtime.ExitSyscall() + er2 = int(r0) return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func W_Getmntent_A(buff *byte, size int) (lastsys int, err error) { - r0, _, e1 := syscall_syscall(SYS___W_GETMNTENT_A, uintptr(unsafe.Pointer(buff)), uintptr(size), 0) - lastsys = int(r0) - if e1 != 0 { - err = errnoErr(e1) +func impl_Eventfd(initval uint, flags int) (fd int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_EVENTFD<<4, uintptr(initval), uintptr(flags)) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_EventfdAddr() *(func(initval uint, flags int) (fd int, err error)) + +var Eventfd = enter_Eventfd + +func enter_Eventfd(initval uint, flags int) (fd int, err error) { + funcref := get_EventfdAddr() + if funcptrtest(GetZosLibVec()+SYS_EVENTFD<<4, "") == 0 { + *funcref = impl_Eventfd + } else { + *funcref = error_Eventfd } + return (*funcref)(initval, flags) +} + +func error_Eventfd(initval uint, flags int) (fd int, err error) { + fd = -1 + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func mount_LE(path string, filesystem string, fstype string, mtm uint32, parmlen int32, parm string) (err error) { +func Exit(code int) { + runtime.EnterSyscall() + CallLeFuncWithErr(GetZosLibVec()+SYS_EXIT<<4, uintptr(code)) + runtime.ExitSyscall() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - var _p1 *byte - _p1, err = BytePtrFromString(filesystem) - if err != nil { - return - } - var _p2 *byte - _p2, err = BytePtrFromString(fstype) - if err != nil { - return + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FACCESSAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } - var _p3 *byte - _p3, err = BytePtrFromString(parm) + return +} + +//go:nosplit +func get_FaccessatAddr() *(func(dirfd int, path string, mode uint32, flags int) (err error)) + +var Faccessat = enter_Faccessat + +func enter_Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + funcref := get_FaccessatAddr() + if funcptrtest(GetZosLibVec()+SYS___FACCESSAT_A<<4, "") == 0 { + *funcref = impl_Faccessat + } else { + *funcref = error_Faccessat + } + return (*funcref)(dirfd, path, mode, flags) +} + +func error_Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FCHDIR<<4, uintptr(fd)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FCHMOD<<4, uintptr(fd), uintptr(mode)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := syscall_syscall6(SYS___MOUNT_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(mtm), uintptr(parmlen), uintptr(unsafe.Pointer(_p3))) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FCHMODAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FchmodatAddr() *(func(dirfd int, path string, mode uint32, flags int) (err error)) + +var Fchmodat = enter_Fchmodat + +func enter_Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + funcref := get_FchmodatAddr() + if funcptrtest(GetZosLibVec()+SYS___FCHMODAT_A<<4, "") == 0 { + *funcref = impl_Fchmodat + } else { + *funcref = error_Fchmodat + } + return (*funcref)(dirfd, path, mode, flags) +} + +func error_Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FCHOWN<<4, uintptr(fd), uintptr(uid), uintptr(gid)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func unmount(filesystem string, mtm int) (err error) { +func impl_Fchownat(fd int, path string, uid int, gid int, flags int) (err error) { var _p0 *byte - _p0, err = BytePtrFromString(filesystem) + _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := syscall_syscall(SYS___UMOUNT_A, uintptr(unsafe.Pointer(_p0)), uintptr(mtm), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FCHOWNAT_A<<4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FchownatAddr() *(func(fd int, path string, uid int, gid int, flags int) (err error)) + +var Fchownat = enter_Fchownat + +func enter_Fchownat(fd int, path string, uid int, gid int, flags int) (err error) { + funcref := get_FchownatAddr() + if funcptrtest(GetZosLibVec()+SYS___FCHOWNAT_A<<4, "") == 0 { + *funcref = impl_Fchownat + } else { + *funcref = error_Fchownat } + return (*funcref)(fd, path, uid, gid, flags) +} + +func error_Fchownat(fd int, path string, uid int, gid int, flags int) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Chroot(path string) (err error) { +func FcntlInt(fd uintptr, cmd int, arg int) (retval int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FCNTL<<4, uintptr(fd), uintptr(cmd), uintptr(arg)) + runtime.ExitSyscall() + retval = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Fdatasync(fd int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FDATASYNC<<4, uintptr(fd)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FdatasyncAddr() *(func(fd int) (err error)) + +var Fdatasync = enter_Fdatasync + +func enter_Fdatasync(fd int) (err error) { + funcref := get_FdatasyncAddr() + if funcptrtest(GetZosLibVec()+SYS_FDATASYNC<<4, "") == 0 { + *funcref = impl_Fdatasync + } else { + *funcref = error_Fdatasync + } + return (*funcref)(fd) +} + +func error_Fdatasync(fd int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fstat(fd int, stat *Stat_LE_t) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FSTAT<<4, uintptr(fd), uintptr(unsafe.Pointer(stat))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_fstatat(dirfd int, path string, stat *Stat_LE_t, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := syscall_syscall(SYS___CHROOT_A, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FSTATAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_fstatatAddr() *(func(dirfd int, path string, stat *Stat_LE_t, flags int) (err error)) + +var fstatat = enter_fstatat + +func enter_fstatat(dirfd int, path string, stat *Stat_LE_t, flags int) (err error) { + funcref := get_fstatatAddr() + if funcptrtest(GetZosLibVec()+SYS___FSTATAT_A<<4, "") == 0 { + *funcref = impl_fstatat + } else { + *funcref = error_fstatat } + return (*funcref)(dirfd, path, stat, flags) +} + +func error_fstatat(dirfd int, path string, stat *Stat_LE_t, flags int) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Uname(buf *Utsname) (err error) { - _, _, e1 := syscall_rawsyscall(SYS___UNAME_A, uintptr(unsafe.Pointer(buf)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) +func impl_Lgetxattr(link string, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(link) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(dest) > 0 { + _p2 = unsafe.Pointer(&dest[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LGETXATTR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest))) + runtime.ExitSyscall() + sz = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_LgetxattrAddr() *(func(link string, attr string, dest []byte) (sz int, err error)) + +var Lgetxattr = enter_Lgetxattr + +func enter_Lgetxattr(link string, attr string, dest []byte) (sz int, err error) { + funcref := get_LgetxattrAddr() + if funcptrtest(GetZosLibVec()+SYS___LGETXATTR_A<<4, "") == 0 { + *funcref = impl_Lgetxattr + } else { + *funcref = error_Lgetxattr } + return (*funcref)(link, attr, dest) +} + +func error_Lgetxattr(link string, attr string, dest []byte) (sz int, err error) { + sz = -1 + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Gethostname(buf []byte) (err error) { +func impl_Lsetxattr(path string, attr string, data []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LSETXATTR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_LsetxattrAddr() *(func(path string, attr string, data []byte, flags int) (err error)) + +var Lsetxattr = enter_Lsetxattr + +func enter_Lsetxattr(path string, attr string, data []byte, flags int) (err error) { + funcref := get_LsetxattrAddr() + if funcptrtest(GetZosLibVec()+SYS___LSETXATTR_A<<4, "") == 0 { + *funcref = impl_Lsetxattr + } else { + *funcref = error_Lsetxattr + } + return (*funcref)(path, attr, data, flags) +} + +func error_Lsetxattr(path string, attr string, data []byte, flags int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Fstatfs(fd int, buf *Statfs_t) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FSTATFS<<4, uintptr(fd), uintptr(unsafe.Pointer(buf))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FstatfsAddr() *(func(fd int, buf *Statfs_t) (err error)) + +var Fstatfs = enter_Fstatfs + +func enter_Fstatfs(fd int, buf *Statfs_t) (err error) { + funcref := get_FstatfsAddr() + if funcptrtest(GetZosLibVec()+SYS_FSTATFS<<4, "") == 0 { + *funcref = impl_Fstatfs + } else { + *funcref = error_Fstatfs + } + return (*funcref)(fd, buf) +} + +func error_Fstatfs(fd int, buf *Statfs_t) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatvfs(fd int, stat *Statvfs_t) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FSTATVFS<<4, uintptr(fd), uintptr(unsafe.Pointer(stat))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FSYNC<<4, uintptr(fd)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Futimes(fd int, tv []Timeval) (err error) { var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) + if len(tv) > 0 { + _p0 = unsafe.Pointer(&tv[0]) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall(SYS___GETHOSTNAME_A, uintptr(_p0), uintptr(len(buf)), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FUTIMES<<4, uintptr(fd), uintptr(_p0), uintptr(len(tv))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FutimesAddr() *(func(fd int, tv []Timeval) (err error)) + +var Futimes = enter_Futimes + +func enter_Futimes(fd int, tv []Timeval) (err error) { + funcref := get_FutimesAddr() + if funcptrtest(GetZosLibVec()+SYS_FUTIMES<<4, "") == 0 { + *funcref = impl_Futimes + } else { + *funcref = error_Futimes } + return (*funcref)(fd, tv) +} + +func error_Futimes(fd int, tv []Timeval) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getegid() (egid int) { - r0, _, _ := syscall_rawsyscall(SYS_GETEGID, 0, 0, 0) - egid = int(r0) +func impl_Futimesat(dirfd int, path string, tv []Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(tv) > 0 { + _p1 = unsafe.Pointer(&tv[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FUTIMESAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(tv))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FutimesatAddr() *(func(dirfd int, path string, tv []Timeval) (err error)) + +var Futimesat = enter_Futimesat + +func enter_Futimesat(dirfd int, path string, tv []Timeval) (err error) { + funcref := get_FutimesatAddr() + if funcptrtest(GetZosLibVec()+SYS___FUTIMESAT_A<<4, "") == 0 { + *funcref = impl_Futimesat + } else { + *funcref = error_Futimesat + } + return (*funcref)(dirfd, path, tv) +} + +func error_Futimesat(dirfd int, path string, tv []Timeval) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Geteuid() (uid int) { - r0, _, _ := syscall_rawsyscall(SYS_GETEUID, 0, 0, 0) - uid = int(r0) +func Ftruncate(fd int, length int64) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FTRUNCATE<<4, uintptr(fd), uintptr(length)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getgid() (gid int) { - r0, _, _ := syscall_rawsyscall(SYS_GETGID, 0, 0, 0) - gid = int(r0) +func impl_Getrandom(buf []byte, flags int) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETRANDOM<<4, uintptr(_p0), uintptr(len(buf)), uintptr(flags)) + runtime.ExitSyscall() + n = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_GetrandomAddr() *(func(buf []byte, flags int) (n int, err error)) + +var Getrandom = enter_Getrandom + +func enter_Getrandom(buf []byte, flags int) (n int, err error) { + funcref := get_GetrandomAddr() + if funcptrtest(GetZosLibVec()+SYS_GETRANDOM<<4, "") == 0 { + *funcref = impl_Getrandom + } else { + *funcref = error_Getrandom + } + return (*funcref)(buf, flags) +} + +func error_Getrandom(buf []byte, flags int) (n int, err error) { + n = -1 + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getpid() (pid int) { - r0, _, _ := syscall_rawsyscall(SYS_GETPID, 0, 0, 0) - pid = int(r0) +func impl_InotifyInit() (fd int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec() + SYS_INOTIFY_INIT<<4) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_InotifyInitAddr() *(func() (fd int, err error)) + +var InotifyInit = enter_InotifyInit + +func enter_InotifyInit() (fd int, err error) { + funcref := get_InotifyInitAddr() + if funcptrtest(GetZosLibVec()+SYS_INOTIFY_INIT<<4, "") == 0 { + *funcref = impl_InotifyInit + } else { + *funcref = error_InotifyInit + } + return (*funcref)() +} + +func error_InotifyInit() (fd int, err error) { + fd = -1 + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := syscall_rawsyscall(SYS_GETPGID, uintptr(pid), 0, 0) - pgid = int(r0) - if e1 != 0 { - err = errnoErr(e1) +func impl_InotifyInit1(flags int) (fd int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_INOTIFY_INIT1<<4, uintptr(flags)) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_InotifyInit1Addr() *(func(flags int) (fd int, err error)) + +var InotifyInit1 = enter_InotifyInit1 + +func enter_InotifyInit1(flags int) (fd int, err error) { + funcref := get_InotifyInit1Addr() + if funcptrtest(GetZosLibVec()+SYS_INOTIFY_INIT1<<4, "") == 0 { + *funcref = impl_InotifyInit1 + } else { + *funcref = error_InotifyInit1 } + return (*funcref)(flags) +} + +func error_InotifyInit1(flags int) (fd int, err error) { + fd = -1 + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getppid() (pid int) { - r0, _, _ := syscall_rawsyscall(SYS_GETPPID, 0, 0, 0) - pid = int(r0) +func impl_InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(pathname) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___INOTIFY_ADD_WATCH_A<<4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask)) + runtime.ExitSyscall() + watchdesc = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_InotifyAddWatchAddr() *(func(fd int, pathname string, mask uint32) (watchdesc int, err error)) + +var InotifyAddWatch = enter_InotifyAddWatch + +func enter_InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { + funcref := get_InotifyAddWatchAddr() + if funcptrtest(GetZosLibVec()+SYS___INOTIFY_ADD_WATCH_A<<4, "") == 0 { + *funcref = impl_InotifyAddWatch + } else { + *funcref = error_InotifyAddWatch + } + return (*funcref)(fd, pathname, mask) +} + +func error_InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { + watchdesc = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_INOTIFY_RM_WATCH<<4, uintptr(fd), uintptr(watchdesc)) + runtime.ExitSyscall() + success = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_InotifyRmWatchAddr() *(func(fd int, watchdesc uint32) (success int, err error)) + +var InotifyRmWatch = enter_InotifyRmWatch + +func enter_InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { + funcref := get_InotifyRmWatchAddr() + if funcptrtest(GetZosLibVec()+SYS_INOTIFY_RM_WATCH<<4, "") == 0 { + *funcref = impl_InotifyRmWatch + } else { + *funcref = error_InotifyRmWatch + } + return (*funcref)(fd, watchdesc) +} + +func error_InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { + success = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Listxattr(path string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LISTXATTR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) + runtime.ExitSyscall() + sz = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_ListxattrAddr() *(func(path string, dest []byte) (sz int, err error)) + +var Listxattr = enter_Listxattr + +func enter_Listxattr(path string, dest []byte) (sz int, err error) { + funcref := get_ListxattrAddr() + if funcptrtest(GetZosLibVec()+SYS___LISTXATTR_A<<4, "") == 0 { + *funcref = impl_Listxattr + } else { + *funcref = error_Listxattr + } + return (*funcref)(path, dest) +} + +func error_Listxattr(path string, dest []byte) (sz int, err error) { + sz = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Llistxattr(path string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LLISTXATTR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) + runtime.ExitSyscall() + sz = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_LlistxattrAddr() *(func(path string, dest []byte) (sz int, err error)) + +var Llistxattr = enter_Llistxattr + +func enter_Llistxattr(path string, dest []byte) (sz int, err error) { + funcref := get_LlistxattrAddr() + if funcptrtest(GetZosLibVec()+SYS___LLISTXATTR_A<<4, "") == 0 { + *funcref = impl_Llistxattr + } else { + *funcref = error_Llistxattr + } + return (*funcref)(path, dest) +} + +func error_Llistxattr(path string, dest []byte) (sz int, err error) { + sz = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Lremovexattr(path string, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LREMOVEXATTR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_LremovexattrAddr() *(func(path string, attr string) (err error)) + +var Lremovexattr = enter_Lremovexattr + +func enter_Lremovexattr(path string, attr string) (err error) { + funcref := get_LremovexattrAddr() + if funcptrtest(GetZosLibVec()+SYS___LREMOVEXATTR_A<<4, "") == 0 { + *funcref = impl_Lremovexattr + } else { + *funcref = error_Lremovexattr + } + return (*funcref)(path, attr) +} + +func error_Lremovexattr(path string, attr string) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Lutimes(path string, tv []Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(tv) > 0 { + _p1 = unsafe.Pointer(&tv[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LUTIMES_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(tv))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_LutimesAddr() *(func(path string, tv []Timeval) (err error)) + +var Lutimes = enter_Lutimes + +func enter_Lutimes(path string, tv []Timeval) (err error) { + funcref := get_LutimesAddr() + if funcptrtest(GetZosLibVec()+SYS___LUTIMES_A<<4, "") == 0 { + *funcref = impl_Lutimes + } else { + *funcref = error_Lutimes + } + return (*funcref)(path, tv) +} + +func error_Lutimes(path string, tv []Timeval) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_MPROTECT<<4, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Msync(b []byte, flags int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_MSYNC<<4, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Console2(cmsg *ConsMsg2, modstr *byte, concmd *uint32) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___CONSOLE2<<4, uintptr(unsafe.Pointer(cmsg)), uintptr(unsafe.Pointer(modstr)), uintptr(unsafe.Pointer(concmd))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Poll(fds []PollFd, timeout int) (n int, err error) { + var _p0 unsafe.Pointer + if len(fds) > 0 { + _p0 = unsafe.Pointer(&fds[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_POLL<<4, uintptr(_p0), uintptr(len(fds)), uintptr(timeout)) + runtime.ExitSyscall() + n = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readdir_r(dirp uintptr, entry *direntLE, result **direntLE) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___READDIR_R_A<<4, uintptr(dirp), uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Statfs(path string, buf *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___STATFS_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_StatfsAddr() *(func(path string, buf *Statfs_t) (err error)) + +var Statfs = enter_Statfs + +func enter_Statfs(path string, buf *Statfs_t) (err error) { + funcref := get_StatfsAddr() + if funcptrtest(GetZosLibVec()+SYS___STATFS_A<<4, "") == 0 { + *funcref = impl_Statfs + } else { + *funcref = error_Statfs + } + return (*funcref)(path, buf) +} + +func error_Statfs(path string, buf *Statfs_t) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Syncfs(fd int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SYNCFS<<4, uintptr(fd)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_SyncfsAddr() *(func(fd int) (err error)) + +var Syncfs = enter_Syncfs + +func enter_Syncfs(fd int) (err error) { + funcref := get_SyncfsAddr() + if funcptrtest(GetZosLibVec()+SYS_SYNCFS<<4, "") == 0 { + *funcref = impl_Syncfs + } else { + *funcref = error_Syncfs + } + return (*funcref)(fd) +} + +func error_Syncfs(fd int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Times(tms *Tms) (ticks uintptr, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_TIMES<<4, uintptr(unsafe.Pointer(tms))) + runtime.ExitSyscall() + ticks = uintptr(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func W_Getmntent(buff *byte, size int) (lastsys int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_W_GETMNTENT<<4, uintptr(unsafe.Pointer(buff)), uintptr(size)) + runtime.ExitSyscall() + lastsys = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func W_Getmntent_A(buff *byte, size int) (lastsys int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___W_GETMNTENT_A<<4, uintptr(unsafe.Pointer(buff)), uintptr(size)) + runtime.ExitSyscall() + lastsys = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mount_LE(path string, filesystem string, fstype string, mtm uint32, parmlen int32, parm string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(filesystem) + if err != nil { + return + } + var _p2 *byte + _p2, err = BytePtrFromString(fstype) + if err != nil { + return + } + var _p3 *byte + _p3, err = BytePtrFromString(parm) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MOUNT_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(mtm), uintptr(parmlen), uintptr(unsafe.Pointer(_p3))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unmount_LE(filesystem string, mtm int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(filesystem) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___UMOUNT_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(mtm)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___CHROOT_A<<4, uintptr(unsafe.Pointer(_p0))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Select(nmsgsfds int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (ret int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SELECT<<4, uintptr(nmsgsfds), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout))) + runtime.ExitSyscall() + ret = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Uname(buf *Utsname) (err error) { + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_____OSNAME_A<<4, uintptr(unsafe.Pointer(buf))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Unshare(flags int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_UNSHARE<<4, uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_UnshareAddr() *(func(flags int) (err error)) + +var Unshare = enter_Unshare + +func enter_Unshare(flags int) (err error) { + funcref := get_UnshareAddr() + if funcptrtest(GetZosLibVec()+SYS_UNSHARE<<4, "") == 0 { + *funcref = impl_Unshare + } else { + *funcref = error_Unshare + } + return (*funcref)(flags) +} + +func error_Unshare(flags int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gethostname(buf []byte) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___GETHOSTNAME_A<<4, uintptr(_p0), uintptr(len(buf))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _, _ := CallLeFuncWithErr(GetZosLibVec() + SYS_GETGID<<4) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _, _ := CallLeFuncWithErr(GetZosLibVec() + SYS_GETPID<<4) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETPGID<<4, uintptr(pid)) + pgid = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (pid int) { + r0, _, _ := CallLeFuncWithErr(GetZosLibVec() + SYS_GETPPID<<4) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETPRIORITY<<4, uintptr(which), uintptr(who)) + runtime.ExitSyscall() + prio = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrlimit(resource int, rlim *Rlimit) (err error) { + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETRLIMIT<<4, uintptr(resource), uintptr(unsafe.Pointer(rlim))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getrusage(who int, rusage *rusage_zos) (err error) { + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETRUSAGE<<4, uintptr(who), uintptr(unsafe.Pointer(rusage))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + runtime.EnterSyscall() + r0, _, _ := CallLeFuncWithErr(GetZosLibVec() + SYS_GETEGID<<4) + runtime.ExitSyscall() + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (euid int) { + runtime.EnterSyscall() + r0, _, _ := CallLeFuncWithErr(GetZosLibVec() + SYS_GETEUID<<4) + runtime.ExitSyscall() + euid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETSID<<4, uintptr(pid)) + sid = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _, _ := CallLeFuncWithErr(GetZosLibVec() + SYS_GETUID<<4) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, sig Signal) (err error) { + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_KILL<<4, uintptr(pid), uintptr(sig)) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LCHOWN_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Link(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LINK_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Linkat(oldDirFd int, oldPath string, newDirFd int, newPath string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldPath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newPath) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LINKAT_A<<4, uintptr(oldDirFd), uintptr(unsafe.Pointer(_p0)), uintptr(newDirFd), uintptr(unsafe.Pointer(_p1)), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_LinkatAddr() *(func(oldDirFd int, oldPath string, newDirFd int, newPath string, flags int) (err error)) + +var Linkat = enter_Linkat + +func enter_Linkat(oldDirFd int, oldPath string, newDirFd int, newPath string, flags int) (err error) { + funcref := get_LinkatAddr() + if funcptrtest(GetZosLibVec()+SYS___LINKAT_A<<4, "") == 0 { + *funcref = impl_Linkat + } else { + *funcref = error_Linkat + } + return (*funcref)(oldDirFd, oldPath, newDirFd, newPath, flags) +} + +func error_Linkat(oldDirFd int, oldPath string, newDirFd int, newPath string, flags int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, n int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_LISTEN<<4, uintptr(s), uintptr(n)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func lstat(path string, stat *Stat_LE_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LSTAT_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdir(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MKDIR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Mkdirat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MKDIRAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_MkdiratAddr() *(func(dirfd int, path string, mode uint32) (err error)) + +var Mkdirat = enter_Mkdirat + +func enter_Mkdirat(dirfd int, path string, mode uint32) (err error) { + funcref := get_MkdiratAddr() + if funcptrtest(GetZosLibVec()+SYS___MKDIRAT_A<<4, "") == 0 { + *funcref = impl_Mkdirat + } else { + *funcref = error_Mkdirat + } + return (*funcref)(dirfd, path, mode) +} + +func error_Mkdirat(dirfd int, path string, mode uint32) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkfifo(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MKFIFO_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknod(path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MKNOD_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MKNODAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_MknodatAddr() *(func(dirfd int, path string, mode uint32, dev int) (err error)) + +var Mknodat = enter_Mknodat + +func enter_Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + funcref := get_MknodatAddr() + if funcptrtest(GetZosLibVec()+SYS___MKNODAT_A<<4, "") == 0 { + *funcref = impl_Mknodat + } else { + *funcref = error_Mknodat + } + return (*funcref)(dirfd, path, mode, dev) +} + +func error_Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_PivotRoot(newroot string, oldroot string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(newroot) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(oldroot) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___PIVOT_ROOT_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_PivotRootAddr() *(func(newroot string, oldroot string) (err error)) + +var PivotRoot = enter_PivotRoot + +func enter_PivotRoot(newroot string, oldroot string) (err error) { + funcref := get_PivotRootAddr() + if funcptrtest(GetZosLibVec()+SYS___PIVOT_ROOT_A<<4, "") == 0 { + *funcref = impl_PivotRoot + } else { + *funcref = error_PivotRoot + } + return (*funcref)(newroot, oldroot) +} + +func error_PivotRoot(newroot string, oldroot string) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_PREAD<<4, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset)) + runtime.ExitSyscall() + n = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := syscall_syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) - prio = int(r0) - if e1 != 0 { - err = errnoErr(e1) +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_PWRITE<<4, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset)) + runtime.ExitSyscall() + n = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_GETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) +func impl_Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___PRCTL_A<<4, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_PrctlAddr() *(func(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error)) -func getrusage(who int, rusage *rusage_zos) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) - if e1 != 0 { - err = errnoErr(e1) +var Prctl = enter_Prctl + +func enter_Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { + funcref := get_PrctlAddr() + if funcptrtest(GetZosLibVec()+SYS___PRCTL_A<<4, "") == 0 { + *funcref = impl_Prctl + } else { + *funcref = error_Prctl } - return + return (*funcref)(option, arg2, arg3, arg4, arg5) } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getsid(pid int) (sid int, err error) { - r0, _, e1 := syscall_rawsyscall(SYS_GETSID, uintptr(pid), 0, 0) - sid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } +func error_Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getuid() (uid int) { - r0, _, _ := syscall_rawsyscall(SYS_GETUID, 0, 0, 0) - uid = int(r0) +func impl_Prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_PRLIMIT<<4, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_PrlimitAddr() *(func(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error)) -func Kill(pid int, sig Signal) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0) - if e1 != 0 { - err = errnoErr(e1) +var Prlimit = enter_Prlimit + +func enter_Prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { + funcref := get_PrlimitAddr() + if funcptrtest(GetZosLibVec()+SYS_PRLIMIT<<4, "") == 0 { + *funcref = impl_Prlimit + } else { + *funcref = error_Prlimit } + return (*funcref)(pid, resource, newlimit, old) +} + +func error_Prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Lchown(path string, uid int, gid int) (err error) { +func Rename(from string, to string) (err error) { var _p0 *byte - _p0, err = BytePtrFromString(path) + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) if err != nil { return } - _, _, e1 := syscall_syscall(SYS___LCHOWN_A, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___RENAME_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Link(path string, link string) (err error) { +func impl_Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { var _p0 *byte - _p0, err = BytePtrFromString(path) + _p0, err = BytePtrFromString(oldpath) if err != nil { return } var _p1 *byte - _p1, err = BytePtrFromString(link) + _p1, err = BytePtrFromString(newpath) if err != nil { return } - _, _, e1 := syscall_syscall(SYS___LINK_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___RENAMEAT_A<<4, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_RenameatAddr() *(func(olddirfd int, oldpath string, newdirfd int, newpath string) (err error)) -func Listen(s int, n int) (err error) { - _, _, e1 := syscall_syscall(SYS_LISTEN, uintptr(s), uintptr(n), 0) - if e1 != 0 { - err = errnoErr(e1) +var Renameat = enter_Renameat + +func enter_Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { + funcref := get_RenameatAddr() + if funcptrtest(GetZosLibVec()+SYS___RENAMEAT_A<<4, "") == 0 { + *funcref = impl_Renameat + } else { + *funcref = error_Renameat } + return (*funcref)(olddirfd, oldpath, newdirfd, newpath) +} + +func error_Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func lstat(path string, stat *Stat_LE_t) (err error) { +func impl_Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) { var _p0 *byte - _p0, err = BytePtrFromString(path) + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) if err != nil { return } - _, _, e1 := syscall_syscall(SYS___LSTAT_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___RENAMEAT2_A<<4, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_Renameat2Addr() *(func(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error)) -func Mkdir(path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(SYS___MKDIR_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) +var Renameat2 = enter_Renameat2 + +func enter_Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) { + funcref := get_Renameat2Addr() + if funcptrtest(GetZosLibVec()+SYS___RENAMEAT2_A<<4, "") == 0 { + *funcref = impl_Renameat2 + } else { + *funcref = error_Renameat2 } + return (*funcref)(olddirfd, oldpath, newdirfd, newpath, flags) +} + +func error_Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mkfifo(path string, mode uint32) (err error) { +func Rmdir(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := syscall_syscall(SYS___MKFIFO_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___RMDIR_A<<4, uintptr(unsafe.Pointer(_p0))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mknod(path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(SYS___MKNOD_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) - if e1 != 0 { - err = errnoErr(e1) +func Seek(fd int, offset int64, whence int) (off int64, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_LSEEK<<4, uintptr(fd), uintptr(offset), uintptr(whence)) + runtime.ExitSyscall() + off = int64(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) +func Setegid(egid int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETEGID<<4, uintptr(egid)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } - r0, _, e1 := syscall_syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seteuid(euid int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETEUID<<4, uintptr(euid)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func impl_Sethostname(p []byte) (err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___SETHOSTNAME_A<<4, uintptr(_p0), uintptr(len(p))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_SethostnameAddr() *(func(p []byte) (err error)) -func Readlink(path string, buf []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(buf) > 0 { - _p1 = unsafe.Pointer(&buf[0]) +var Sethostname = enter_Sethostname + +func enter_Sethostname(p []byte) (err error) { + funcref := get_SethostnameAddr() + if funcptrtest(GetZosLibVec()+SYS___SETHOSTNAME_A<<4, "") == 0 { + *funcref = impl_Sethostname } else { - _p1 = unsafe.Pointer(&_zero) + *funcref = error_Sethostname } - r0, _, e1 := syscall_syscall(SYS___READLINK_A, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return + return (*funcref)(p) } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Rename(from string, to string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(from) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(to) - if err != nil { - return - } - _, _, e1 := syscall_syscall(SYS___RENAME_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } +func error_Sethostname(p []byte) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Rmdir(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(SYS___RMDIR_A, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) +func impl_Setns(fd int, nstype int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETNS<<4, uintptr(fd), uintptr(nstype)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_SetnsAddr() *(func(fd int, nstype int) (err error)) -func Seek(fd int, offset int64, whence int) (off int64, err error) { - r0, _, e1 := syscall_syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence)) - off = int64(r0) - if e1 != 0 { - err = errnoErr(e1) +var Setns = enter_Setns + +func enter_Setns(fd int, nstype int) (err error) { + funcref := get_SetnsAddr() + if funcptrtest(GetZosLibVec()+SYS_SETNS<<4, "") == 0 { + *funcref = impl_Setns + } else { + *funcref = error_Setns } + return (*funcref)(fd, nstype) +} + +func error_Setns(fd int, nstype int) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := syscall_syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETPRIORITY<<4, uintptr(which), uintptr(who), uintptr(prio)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -973,9 +2910,9 @@ func Setpriority(which int, who int, prio int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETPGID<<4, uintptr(pid), uintptr(pgid)) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -983,9 +2920,9 @@ func Setpgid(pid int, pgid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setrlimit(resource int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETRLIMIT<<4, uintptr(resource), uintptr(unsafe.Pointer(lim))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -993,9 +2930,9 @@ func Setrlimit(resource int, lim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setregid(rgid int, egid int) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETREGID<<4, uintptr(rgid), uintptr(egid)) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1003,9 +2940,9 @@ func Setregid(rgid int, egid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETREUID<<4, uintptr(ruid), uintptr(euid)) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1013,10 +2950,10 @@ func Setreuid(ruid int, euid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setsid() (pid int, err error) { - r0, _, e1 := syscall_rawsyscall(SYS_SETSID, 0, 0, 0) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec() + SYS_SETSID<<4) pid = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1024,9 +2961,11 @@ func Setsid() (pid int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setuid(uid int) (err error) { - _, _, e1 := syscall_syscall(SYS_SETUID, uintptr(uid), 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETUID<<4, uintptr(uid)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1034,9 +2973,11 @@ func Setuid(uid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setgid(uid int) (err error) { - _, _, e1 := syscall_syscall(SYS_SETGID, uintptr(uid), 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETGID<<4, uintptr(uid)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1044,9 +2985,11 @@ func Setgid(uid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Shutdown(fd int, how int) (err error) { - _, _, e1 := syscall_syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SHUTDOWN<<4, uintptr(fd), uintptr(how)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1059,9 +3002,11 @@ func stat(path string, statLE *Stat_LE_t) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___STAT_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(statLE)), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___STAT_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(statLE))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1079,17 +3024,63 @@ func Symlink(path string, link string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___SYMLINK_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___SYMLINK_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Symlinkat(oldPath string, dirfd int, newPath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldPath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newPath) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___SYMLINKAT_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(dirfd), uintptr(unsafe.Pointer(_p1))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } +//go:nosplit +func get_SymlinkatAddr() *(func(oldPath string, dirfd int, newPath string) (err error)) + +var Symlinkat = enter_Symlinkat + +func enter_Symlinkat(oldPath string, dirfd int, newPath string) (err error) { + funcref := get_SymlinkatAddr() + if funcptrtest(GetZosLibVec()+SYS___SYMLINKAT_A<<4, "") == 0 { + *funcref = impl_Symlinkat + } else { + *funcref = error_Symlinkat + } + return (*funcref)(oldPath, dirfd, newPath) +} + +func error_Symlinkat(oldPath string, dirfd int, newPath string) (err error) { + err = ENOSYS + return +} + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Sync() { - syscall_syscall(SYS_SYNC, 0, 0, 0) + runtime.EnterSyscall() + CallLeFuncWithErr(GetZosLibVec() + SYS_SYNC<<4) + runtime.ExitSyscall() return } @@ -1101,9 +3092,11 @@ func Truncate(path string, length int64) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___TRUNCATE_A, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___TRUNCATE_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(length)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1111,9 +3104,11 @@ func Truncate(path string, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Tcgetattr(fildes int, termptr *Termios) (err error) { - _, _, e1 := syscall_syscall(SYS_TCGETATTR, uintptr(fildes), uintptr(unsafe.Pointer(termptr)), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_TCGETATTR<<4, uintptr(fildes), uintptr(unsafe.Pointer(termptr))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1121,9 +3116,11 @@ func Tcgetattr(fildes int, termptr *Termios) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Tcsetattr(fildes int, when int, termptr *Termios) (err error) { - _, _, e1 := syscall_syscall(SYS_TCSETATTR, uintptr(fildes), uintptr(when), uintptr(unsafe.Pointer(termptr))) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_TCSETATTR<<4, uintptr(fildes), uintptr(when), uintptr(unsafe.Pointer(termptr))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1131,7 +3128,9 @@ func Tcsetattr(fildes int, when int, termptr *Termios) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Umask(mask int) (oldmask int) { - r0, _, _ := syscall_syscall(SYS_UMASK, uintptr(mask), 0, 0) + runtime.EnterSyscall() + r0, _, _ := CallLeFuncWithErr(GetZosLibVec()+SYS_UMASK<<4, uintptr(mask)) + runtime.ExitSyscall() oldmask = int(r0) return } @@ -1144,10 +3143,49 @@ func Unlink(path string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___UNLINK_A, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___UNLINK_A<<4, uintptr(unsafe.Pointer(_p0))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Unlinkat(dirfd int, path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___UNLINKAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_UnlinkatAddr() *(func(dirfd int, path string, flags int) (err error)) + +var Unlinkat = enter_Unlinkat + +func enter_Unlinkat(dirfd int, path string, flags int) (err error) { + funcref := get_UnlinkatAddr() + if funcptrtest(GetZosLibVec()+SYS___UNLINKAT_A<<4, "") == 0 { + *funcref = impl_Unlinkat + } else { + *funcref = error_Unlinkat } + return (*funcref)(dirfd, path, flags) +} + +func error_Unlinkat(dirfd int, path string, flags int) (err error) { + err = ENOSYS return } @@ -1159,9 +3197,11 @@ func Utime(path string, utim *Utimbuf) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___UTIME_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(utim)), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___UTIME_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(utim))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1174,11 +3214,91 @@ func open(path string, mode int, perm uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := syscall_syscall(SYS___OPEN_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___OPEN_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___OPENAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode)) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_openatAddr() *(func(dirfd int, path string, flags int, mode uint32) (fd int, err error)) + +var openat = enter_openat + +func enter_openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { + funcref := get_openatAddr() + if funcptrtest(GetZosLibVec()+SYS___OPENAT_A<<4, "") == 0 { + *funcref = impl_openat + } else { + *funcref = error_openat + } + return (*funcref)(dirfd, path, flags, mode) +} + +func error_openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { + fd = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_openat2(dirfd int, path string, open_how *OpenHow, size int) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___OPENAT2_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(open_how)), uintptr(size)) + runtime.ExitSyscall() fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_openat2Addr() *(func(dirfd int, path string, open_how *OpenHow, size int) (fd int, err error)) + +var openat2 = enter_openat2 + +func enter_openat2(dirfd int, path string, open_how *OpenHow, size int) (fd int, err error) { + funcref := get_openat2Addr() + if funcptrtest(GetZosLibVec()+SYS___OPENAT2_A<<4, "") == 0 { + *funcref = impl_openat2 + } else { + *funcref = error_openat2 } + return (*funcref)(dirfd, path, open_how, size) +} + +func error_openat2(dirfd int, path string, open_how *OpenHow, size int) (fd int, err error) { + fd = -1 + err = ENOSYS return } @@ -1190,9 +3310,23 @@ func remove(path string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS_REMOVE, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_REMOVE<<4, uintptr(unsafe.Pointer(_p0))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func waitid(idType int, id int, info *Siginfo, options int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_WAITID<<4, uintptr(idType), uintptr(id), uintptr(unsafe.Pointer(info)), uintptr(options)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1200,10 +3334,12 @@ func remove(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func waitpid(pid int, wstatus *_C_int, options int) (wpid int, err error) { - r0, _, e1 := syscall_syscall(SYS_WAITPID, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options)) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_WAITPID<<4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options)) + runtime.ExitSyscall() wpid = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1211,9 +3347,9 @@ func waitpid(pid int, wstatus *_C_int, options int) (wpid int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func gettimeofday(tv *timeval_zos) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETTIMEOFDAY<<4, uintptr(unsafe.Pointer(tv))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1221,9 +3357,9 @@ func gettimeofday(tv *timeval_zos) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func pipe(p *[2]_C_int) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_PIPE<<4, uintptr(unsafe.Pointer(p))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1236,20 +3372,87 @@ func utimes(path string, timeval *[2]Timeval) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___UTIMES_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___UTIMES_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(nmsgsfds int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (ret int, err error) { - r0, _, e1 := syscall_syscall6(SYS_SELECT, uintptr(nmsgsfds), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) +func impl_utimensat(dirfd int, path string, ts *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___UTIMENSAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(ts)), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_utimensatAddr() *(func(dirfd int, path string, ts *[2]Timespec, flags int) (err error)) + +var utimensat = enter_utimensat + +func enter_utimensat(dirfd int, path string, ts *[2]Timespec, flags int) (err error) { + funcref := get_utimensatAddr() + if funcptrtest(GetZosLibVec()+SYS___UTIMENSAT_A<<4, "") == 0 { + *funcref = impl_utimensat + } else { + *funcref = error_utimensat + } + return (*funcref)(dirfd, path, ts, flags) +} + +func error_utimensat(dirfd int, path string, ts *[2]Timespec, flags int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Posix_openpt(oflag int) (fd int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_POSIX_OPENPT<<4, uintptr(oflag)) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Grantpt(fildes int) (rc int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GRANTPT<<4, uintptr(fildes)) + runtime.ExitSyscall() + rc = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlockpt(fildes int) (rc int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_UNLOCKPT<<4, uintptr(fildes)) + runtime.ExitSyscall() + rc = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go index 55e0484..3a58ae8 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; DO NOT EDIT. //go:build 386 && openbsd -// +build 386,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go index d2243cf..dcb7a0e 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; DO NOT EDIT. //go:build amd64 && openbsd -// +build amd64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go index 82dc51b..db5a7bf 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; DO NOT EDIT. //go:build arm && openbsd -// +build arm,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go index cbdda1a..7be575a 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; DO NOT EDIT. //go:build arm64 && openbsd -// +build arm64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go index f55eae1..d6e3174 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; DO NOT EDIT. //go:build mips64 && openbsd -// +build mips64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_ppc64.go index e440544..ee97157 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; DO NOT EDIT. //go:build ppc64 && openbsd -// +build ppc64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_riscv64.go index a0db82f..35c3b91 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; DO NOT EDIT. //go:build riscv64 && openbsd -// +build riscv64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go index f8298ff..5edda76 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && darwin -// +build amd64,darwin package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go index 5eb433b..0dc9e8b 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && darwin -// +build arm64,darwin package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go index 703675c..308ddf3 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && dragonfly -// +build amd64,dragonfly package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go index 4e0d961..418664e 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && freebsd -// +build 386,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go index 01636b8..34d0b86 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && freebsd -// +build amd64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go index ad99bc1..b71cf45 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && freebsd -// +build arm,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go index 89dcc42..e32df1c 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && freebsd -// +build arm64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_riscv64.go index ee37aaa..15ad611 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && freebsd -// +build riscv64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index c9c4ad0..53aef5d 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && linux -// +build 386,linux package unix @@ -447,4 +446,15 @@ const ( SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 + SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index 12ff341..71d5247 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && linux -// +build amd64,linux package unix @@ -369,4 +368,15 @@ const ( SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 + SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index c3fb5e7..c747706 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && linux -// +build arm,linux package unix @@ -411,4 +410,15 @@ const ( SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 + SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 358c847..f96e214 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && linux -// +build arm64,linux package unix @@ -314,4 +313,15 @@ const ( SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 + SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index 81c4849..2842534 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build loong64 && linux -// +build loong64,linux package unix @@ -308,4 +307,15 @@ const ( SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 + SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index 202a57e..d095301 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips && linux -// +build mips,linux package unix @@ -431,4 +430,15 @@ const ( SYS_PROCESS_MRELEASE = 4448 SYS_FUTEX_WAITV = 4449 SYS_SET_MEMPOLICY_HOME_NODE = 4450 + SYS_CACHESTAT = 4451 + SYS_FCHMODAT2 = 4452 + SYS_MAP_SHADOW_STACK = 4453 + SYS_FUTEX_WAKE = 4454 + SYS_FUTEX_WAIT = 4455 + SYS_FUTEX_REQUEUE = 4456 + SYS_STATMOUNT = 4457 + SYS_LISTMOUNT = 4458 + SYS_LSM_GET_SELF_ATTR = 4459 + SYS_LSM_SET_SELF_ATTR = 4460 + SYS_LSM_LIST_MODULES = 4461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 1fbceb5..295c7f4 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && linux -// +build mips64,linux package unix @@ -361,4 +360,15 @@ const ( SYS_PROCESS_MRELEASE = 5448 SYS_FUTEX_WAITV = 5449 SYS_SET_MEMPOLICY_HOME_NODE = 5450 + SYS_CACHESTAT = 5451 + SYS_FCHMODAT2 = 5452 + SYS_MAP_SHADOW_STACK = 5453 + SYS_FUTEX_WAKE = 5454 + SYS_FUTEX_WAIT = 5455 + SYS_FUTEX_REQUEUE = 5456 + SYS_STATMOUNT = 5457 + SYS_LISTMOUNT = 5458 + SYS_LSM_GET_SELF_ATTR = 5459 + SYS_LSM_SET_SELF_ATTR = 5460 + SYS_LSM_LIST_MODULES = 5461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index b4ffb7a..d1a9eac 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64le && linux -// +build mips64le,linux package unix @@ -361,4 +360,15 @@ const ( SYS_PROCESS_MRELEASE = 5448 SYS_FUTEX_WAITV = 5449 SYS_SET_MEMPOLICY_HOME_NODE = 5450 + SYS_CACHESTAT = 5451 + SYS_FCHMODAT2 = 5452 + SYS_MAP_SHADOW_STACK = 5453 + SYS_FUTEX_WAKE = 5454 + SYS_FUTEX_WAIT = 5455 + SYS_FUTEX_REQUEUE = 5456 + SYS_STATMOUNT = 5457 + SYS_LISTMOUNT = 5458 + SYS_LSM_GET_SELF_ATTR = 5459 + SYS_LSM_SET_SELF_ATTR = 5460 + SYS_LSM_LIST_MODULES = 5461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index 867985f..bec157c 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mipsle && linux -// +build mipsle,linux package unix @@ -431,4 +430,15 @@ const ( SYS_PROCESS_MRELEASE = 4448 SYS_FUTEX_WAITV = 4449 SYS_SET_MEMPOLICY_HOME_NODE = 4450 + SYS_CACHESTAT = 4451 + SYS_FCHMODAT2 = 4452 + SYS_MAP_SHADOW_STACK = 4453 + SYS_FUTEX_WAKE = 4454 + SYS_FUTEX_WAIT = 4455 + SYS_FUTEX_REQUEUE = 4456 + SYS_STATMOUNT = 4457 + SYS_LISTMOUNT = 4458 + SYS_LSM_GET_SELF_ATTR = 4459 + SYS_LSM_SET_SELF_ATTR = 4460 + SYS_LSM_LIST_MODULES = 4461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index a8cce69..7ee7bdc 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc && linux -// +build ppc,linux package unix @@ -438,4 +437,15 @@ const ( SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 + SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index d44c5b3..fad1f25 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && linux -// +build ppc64,linux package unix @@ -410,4 +409,15 @@ const ( SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 + SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index 4214dd9..7d3e163 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64le && linux -// +build ppc64le,linux package unix @@ -410,4 +409,15 @@ const ( SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 + SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 3e594a8..0ed53ad 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && linux -// +build riscv64,linux package unix @@ -251,6 +250,8 @@ const ( SYS_ACCEPT4 = 242 SYS_RECVMMSG = 243 SYS_ARCH_SPECIFIC_SYSCALL = 244 + SYS_RISCV_HWPROBE = 258 + SYS_RISCV_FLUSH_ICACHE = 259 SYS_WAIT4 = 260 SYS_PRLIMIT64 = 261 SYS_FANOTIFY_INIT = 262 @@ -313,4 +314,15 @@ const ( SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 + SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index 7ea4652..2fba04a 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build s390x && linux -// +build s390x,linux package unix @@ -372,7 +371,19 @@ const ( SYS_LANDLOCK_CREATE_RULESET = 444 SYS_LANDLOCK_ADD_RULE = 445 SYS_LANDLOCK_RESTRICT_SELF = 446 + SYS_MEMFD_SECRET = 447 SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 + SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index 92f628e..621d00d 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build sparc64 && linux -// +build sparc64,linux package unix @@ -389,4 +388,15 @@ const ( SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 + SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go index 3a6699e..b2aa8cd 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && netbsd -// +build 386,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go index 5677cd4..524a1b1 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && netbsd -// +build amd64,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go index e784cb6..d59b943 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && netbsd -// +build arm,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go index bd4952e..31e771d 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; DO NOT EDIT. //go:build arm64 && netbsd -// +build arm64,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go index 5977338..9fd77c6 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && openbsd -// +build 386,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go index 16af291..af10af2 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && openbsd -// +build amd64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go index f59b18a..cc2028a 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && openbsd -// +build arm,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go index 721ef59..c06dd44 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && openbsd -// +build arm64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go index 01c43a0..9ddbf3e 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && openbsd -// +build mips64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_ppc64.go index f258cfa..19a6ee4 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && openbsd -// +build ppc64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_riscv64.go index 07919e0..05192a7 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && openbsd -// +build riscv64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go index 073daad..5e8c263 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go @@ -1,2670 +1,2852 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. +// go run mksyscall_zos_s390x.go -o_sysnum zsysnum_zos_s390x.go -o_syscall zsyscall_zos_s390x.go -i_syscall syscall_zos_s390x.go -o_asm zsymaddr_zos_s390x.s +// Code generated by the command above; see README.md. DO NOT EDIT. //go:build zos && s390x -// +build zos,s390x package unix -// TODO: auto-generate. - const ( - SYS_ACOSD128 = 0xB80 - SYS_ACOSD32 = 0xB7E - SYS_ACOSD64 = 0xB7F - SYS_ACOSHD128 = 0xB83 - SYS_ACOSHD32 = 0xB81 - SYS_ACOSHD64 = 0xB82 - SYS_AIO_FSYNC = 0xC69 - SYS_ASCTIME = 0x0AE - SYS_ASCTIME64 = 0xCD7 - SYS_ASCTIME64_R = 0xCD8 - SYS_ASIND128 = 0xB86 - SYS_ASIND32 = 0xB84 - SYS_ASIND64 = 0xB85 - SYS_ASINHD128 = 0xB89 - SYS_ASINHD32 = 0xB87 - SYS_ASINHD64 = 0xB88 - SYS_ATAN2D128 = 0xB8F - SYS_ATAN2D32 = 0xB8D - SYS_ATAN2D64 = 0xB8E - SYS_ATAND128 = 0xB8C - SYS_ATAND32 = 0xB8A - SYS_ATAND64 = 0xB8B - SYS_ATANHD128 = 0xB92 - SYS_ATANHD32 = 0xB90 - SYS_ATANHD64 = 0xB91 - SYS_BIND2ADDRSEL = 0xD59 - SYS_C16RTOMB = 0xD40 - SYS_C32RTOMB = 0xD41 - SYS_CBRTD128 = 0xB95 - SYS_CBRTD32 = 0xB93 - SYS_CBRTD64 = 0xB94 - SYS_CEILD128 = 0xB98 - SYS_CEILD32 = 0xB96 - SYS_CEILD64 = 0xB97 - SYS_CLEARENV = 0x0C9 - SYS_CLEARERR_UNLOCKED = 0xCA1 - SYS_CLOCK = 0x0AA - SYS_CLOGL = 0xA00 - SYS_CLRMEMF = 0x0BD - SYS_CONJ = 0xA03 - SYS_CONJF = 0xA06 - SYS_CONJL = 0xA09 - SYS_COPYSIGND128 = 0xB9E - SYS_COPYSIGND32 = 0xB9C - SYS_COPYSIGND64 = 0xB9D - SYS_COSD128 = 0xBA1 - SYS_COSD32 = 0xB9F - SYS_COSD64 = 0xBA0 - SYS_COSHD128 = 0xBA4 - SYS_COSHD32 = 0xBA2 - SYS_COSHD64 = 0xBA3 - SYS_CPOW = 0xA0C - SYS_CPOWF = 0xA0F - SYS_CPOWL = 0xA12 - SYS_CPROJ = 0xA15 - SYS_CPROJF = 0xA18 - SYS_CPROJL = 0xA1B - SYS_CREAL = 0xA1E - SYS_CREALF = 0xA21 - SYS_CREALL = 0xA24 - SYS_CSIN = 0xA27 - SYS_CSINF = 0xA2A - SYS_CSINH = 0xA30 - SYS_CSINHF = 0xA33 - SYS_CSINHL = 0xA36 - SYS_CSINL = 0xA2D - SYS_CSNAP = 0x0C5 - SYS_CSQRT = 0xA39 - SYS_CSQRTF = 0xA3C - SYS_CSQRTL = 0xA3F - SYS_CTAN = 0xA42 - SYS_CTANF = 0xA45 - SYS_CTANH = 0xA4B - SYS_CTANHF = 0xA4E - SYS_CTANHL = 0xA51 - SYS_CTANL = 0xA48 - SYS_CTIME = 0x0AB - SYS_CTIME64 = 0xCD9 - SYS_CTIME64_R = 0xCDA - SYS_CTRACE = 0x0C6 - SYS_DIFFTIME = 0x0A7 - SYS_DIFFTIME64 = 0xCDB - SYS_DLADDR = 0xC82 - SYS_DYNALLOC = 0x0C3 - SYS_DYNFREE = 0x0C2 - SYS_ERFCD128 = 0xBAA - SYS_ERFCD32 = 0xBA8 - SYS_ERFCD64 = 0xBA9 - SYS_ERFD128 = 0xBA7 - SYS_ERFD32 = 0xBA5 - SYS_ERFD64 = 0xBA6 - SYS_EXP2D128 = 0xBB0 - SYS_EXP2D32 = 0xBAE - SYS_EXP2D64 = 0xBAF - SYS_EXPD128 = 0xBAD - SYS_EXPD32 = 0xBAB - SYS_EXPD64 = 0xBAC - SYS_EXPM1D128 = 0xBB3 - SYS_EXPM1D32 = 0xBB1 - SYS_EXPM1D64 = 0xBB2 - SYS_FABSD128 = 0xBB6 - SYS_FABSD32 = 0xBB4 - SYS_FABSD64 = 0xBB5 - SYS_FDELREC_UNLOCKED = 0xCA2 - SYS_FDIMD128 = 0xBB9 - SYS_FDIMD32 = 0xBB7 - SYS_FDIMD64 = 0xBB8 - SYS_FDOPEN_UNLOCKED = 0xCFC - SYS_FECLEAREXCEPT = 0xAEA - SYS_FEGETENV = 0xAEB - SYS_FEGETEXCEPTFLAG = 0xAEC - SYS_FEGETROUND = 0xAED - SYS_FEHOLDEXCEPT = 0xAEE - SYS_FEOF_UNLOCKED = 0xCA3 - SYS_FERAISEEXCEPT = 0xAEF - SYS_FERROR_UNLOCKED = 0xCA4 - SYS_FESETENV = 0xAF0 - SYS_FESETEXCEPTFLAG = 0xAF1 - SYS_FESETROUND = 0xAF2 - SYS_FETCHEP = 0x0BF - SYS_FETESTEXCEPT = 0xAF3 - SYS_FEUPDATEENV = 0xAF4 - SYS_FE_DEC_GETROUND = 0xBBA - SYS_FE_DEC_SETROUND = 0xBBB - SYS_FFLUSH_UNLOCKED = 0xCA5 - SYS_FGETC_UNLOCKED = 0xC80 - SYS_FGETPOS64 = 0xCEE - SYS_FGETPOS64_UNLOCKED = 0xCF4 - SYS_FGETPOS_UNLOCKED = 0xCA6 - SYS_FGETS_UNLOCKED = 0xC7C - SYS_FGETWC_UNLOCKED = 0xCA7 - SYS_FGETWS_UNLOCKED = 0xCA8 - SYS_FILENO_UNLOCKED = 0xCA9 - SYS_FLDATA = 0x0C1 - SYS_FLDATA_UNLOCKED = 0xCAA - SYS_FLOCATE_UNLOCKED = 0xCAB - SYS_FLOORD128 = 0xBBE - SYS_FLOORD32 = 0xBBC - SYS_FLOORD64 = 0xBBD - SYS_FMA = 0xA63 - SYS_FMAD128 = 0xBC1 - SYS_FMAD32 = 0xBBF - SYS_FMAD64 = 0xBC0 - SYS_FMAF = 0xA66 - SYS_FMAL = 0xA69 - SYS_FMAX = 0xA6C - SYS_FMAXD128 = 0xBC4 - SYS_FMAXD32 = 0xBC2 - SYS_FMAXD64 = 0xBC3 - SYS_FMAXF = 0xA6F - SYS_FMAXL = 0xA72 - SYS_FMIN = 0xA75 - SYS_FMIND128 = 0xBC7 - SYS_FMIND32 = 0xBC5 - SYS_FMIND64 = 0xBC6 - SYS_FMINF = 0xA78 - SYS_FMINL = 0xA7B - SYS_FMODD128 = 0xBCA - SYS_FMODD32 = 0xBC8 - SYS_FMODD64 = 0xBC9 - SYS_FOPEN64 = 0xD49 - SYS_FOPEN64_UNLOCKED = 0xD4A - SYS_FOPEN_UNLOCKED = 0xCFA - SYS_FPRINTF_UNLOCKED = 0xCAC - SYS_FPUTC_UNLOCKED = 0xC81 - SYS_FPUTS_UNLOCKED = 0xC7E - SYS_FPUTWC_UNLOCKED = 0xCAD - SYS_FPUTWS_UNLOCKED = 0xCAE - SYS_FREAD_NOUPDATE = 0xCEC - SYS_FREAD_NOUPDATE_UNLOCKED = 0xCED - SYS_FREAD_UNLOCKED = 0xC7B - SYS_FREEIFADDRS = 0xCE6 - SYS_FREOPEN64 = 0xD4B - SYS_FREOPEN64_UNLOCKED = 0xD4C - SYS_FREOPEN_UNLOCKED = 0xCFB - SYS_FREXPD128 = 0xBCE - SYS_FREXPD32 = 0xBCC - SYS_FREXPD64 = 0xBCD - SYS_FSCANF_UNLOCKED = 0xCAF - SYS_FSEEK64 = 0xCEF - SYS_FSEEK64_UNLOCKED = 0xCF5 - SYS_FSEEKO64 = 0xCF0 - SYS_FSEEKO64_UNLOCKED = 0xCF6 - SYS_FSEEKO_UNLOCKED = 0xCB1 - SYS_FSEEK_UNLOCKED = 0xCB0 - SYS_FSETPOS64 = 0xCF1 - SYS_FSETPOS64_UNLOCKED = 0xCF7 - SYS_FSETPOS_UNLOCKED = 0xCB3 - SYS_FTELL64 = 0xCF2 - SYS_FTELL64_UNLOCKED = 0xCF8 - SYS_FTELLO64 = 0xCF3 - SYS_FTELLO64_UNLOCKED = 0xCF9 - SYS_FTELLO_UNLOCKED = 0xCB5 - SYS_FTELL_UNLOCKED = 0xCB4 - SYS_FUPDATE = 0x0B5 - SYS_FUPDATE_UNLOCKED = 0xCB7 - SYS_FWIDE_UNLOCKED = 0xCB8 - SYS_FWPRINTF_UNLOCKED = 0xCB9 - SYS_FWRITE_UNLOCKED = 0xC7A - SYS_FWSCANF_UNLOCKED = 0xCBA - SYS_GETDATE64 = 0xD4F - SYS_GETIFADDRS = 0xCE7 - SYS_GETIPV4SOURCEFILTER = 0xC77 - SYS_GETSOURCEFILTER = 0xC79 - SYS_GETSYNTX = 0x0FD - SYS_GETS_UNLOCKED = 0xC7D - SYS_GETTIMEOFDAY64 = 0xD50 - SYS_GETWCHAR_UNLOCKED = 0xCBC - SYS_GETWC_UNLOCKED = 0xCBB - SYS_GMTIME = 0x0B0 - SYS_GMTIME64 = 0xCDC - SYS_GMTIME64_R = 0xCDD - SYS_HYPOTD128 = 0xBD1 - SYS_HYPOTD32 = 0xBCF - SYS_HYPOTD64 = 0xBD0 - SYS_ILOGBD128 = 0xBD4 - SYS_ILOGBD32 = 0xBD2 - SYS_ILOGBD64 = 0xBD3 - SYS_ILOGBF = 0xA7E - SYS_ILOGBL = 0xA81 - SYS_INET6_IS_SRCADDR = 0xD5A - SYS_ISBLANK = 0x0FE - SYS_ISWALNUM = 0x0FF - SYS_LDEXPD128 = 0xBD7 - SYS_LDEXPD32 = 0xBD5 - SYS_LDEXPD64 = 0xBD6 - SYS_LGAMMAD128 = 0xBDA - SYS_LGAMMAD32 = 0xBD8 - SYS_LGAMMAD64 = 0xBD9 - SYS_LIO_LISTIO = 0xC6A - SYS_LLRINT = 0xA84 - SYS_LLRINTD128 = 0xBDD - SYS_LLRINTD32 = 0xBDB - SYS_LLRINTD64 = 0xBDC - SYS_LLRINTF = 0xA87 - SYS_LLRINTL = 0xA8A - SYS_LLROUND = 0xA8D - SYS_LLROUNDD128 = 0xBE0 - SYS_LLROUNDD32 = 0xBDE - SYS_LLROUNDD64 = 0xBDF - SYS_LLROUNDF = 0xA90 - SYS_LLROUNDL = 0xA93 - SYS_LOCALTIM = 0x0B1 - SYS_LOCALTIME = 0x0B1 - SYS_LOCALTIME64 = 0xCDE - SYS_LOCALTIME64_R = 0xCDF - SYS_LOG10D128 = 0xBE6 - SYS_LOG10D32 = 0xBE4 - SYS_LOG10D64 = 0xBE5 - SYS_LOG1PD128 = 0xBE9 - SYS_LOG1PD32 = 0xBE7 - SYS_LOG1PD64 = 0xBE8 - SYS_LOG2D128 = 0xBEC - SYS_LOG2D32 = 0xBEA - SYS_LOG2D64 = 0xBEB - SYS_LOGBD128 = 0xBEF - SYS_LOGBD32 = 0xBED - SYS_LOGBD64 = 0xBEE - SYS_LOGBF = 0xA96 - SYS_LOGBL = 0xA99 - SYS_LOGD128 = 0xBE3 - SYS_LOGD32 = 0xBE1 - SYS_LOGD64 = 0xBE2 - SYS_LRINT = 0xA9C - SYS_LRINTD128 = 0xBF2 - SYS_LRINTD32 = 0xBF0 - SYS_LRINTD64 = 0xBF1 - SYS_LRINTF = 0xA9F - SYS_LRINTL = 0xAA2 - SYS_LROUNDD128 = 0xBF5 - SYS_LROUNDD32 = 0xBF3 - SYS_LROUNDD64 = 0xBF4 - SYS_LROUNDL = 0xAA5 - SYS_MBLEN = 0x0AF - SYS_MBRTOC16 = 0xD42 - SYS_MBRTOC32 = 0xD43 - SYS_MEMSET = 0x0A3 - SYS_MKTIME = 0x0AC - SYS_MKTIME64 = 0xCE0 - SYS_MODFD128 = 0xBF8 - SYS_MODFD32 = 0xBF6 - SYS_MODFD64 = 0xBF7 - SYS_NAN = 0xAA8 - SYS_NAND128 = 0xBFB - SYS_NAND32 = 0xBF9 - SYS_NAND64 = 0xBFA - SYS_NANF = 0xAAA - SYS_NANL = 0xAAC - SYS_NEARBYINT = 0xAAE - SYS_NEARBYINTD128 = 0xBFE - SYS_NEARBYINTD32 = 0xBFC - SYS_NEARBYINTD64 = 0xBFD - SYS_NEARBYINTF = 0xAB1 - SYS_NEARBYINTL = 0xAB4 - SYS_NEXTAFTERD128 = 0xC01 - SYS_NEXTAFTERD32 = 0xBFF - SYS_NEXTAFTERD64 = 0xC00 - SYS_NEXTAFTERF = 0xAB7 - SYS_NEXTAFTERL = 0xABA - SYS_NEXTTOWARD = 0xABD - SYS_NEXTTOWARDD128 = 0xC04 - SYS_NEXTTOWARDD32 = 0xC02 - SYS_NEXTTOWARDD64 = 0xC03 - SYS_NEXTTOWARDF = 0xAC0 - SYS_NEXTTOWARDL = 0xAC3 - SYS_NL_LANGINFO = 0x0FC - SYS_PERROR_UNLOCKED = 0xCBD - SYS_POSIX_FALLOCATE = 0xCE8 - SYS_POSIX_MEMALIGN = 0xCE9 - SYS_POSIX_OPENPT = 0xC66 - SYS_POWD128 = 0xC07 - SYS_POWD32 = 0xC05 - SYS_POWD64 = 0xC06 - SYS_PRINTF_UNLOCKED = 0xCBE - SYS_PSELECT = 0xC67 - SYS_PTHREAD_ATTR_GETSTACK = 0xB3E - SYS_PTHREAD_ATTR_SETSTACK = 0xB3F - SYS_PTHREAD_SECURITY_APPLID_NP = 0xCE4 - SYS_PUTS_UNLOCKED = 0xC7F - SYS_PUTWCHAR_UNLOCKED = 0xCC0 - SYS_PUTWC_UNLOCKED = 0xCBF - SYS_QUANTEXPD128 = 0xD46 - SYS_QUANTEXPD32 = 0xD44 - SYS_QUANTEXPD64 = 0xD45 - SYS_QUANTIZED128 = 0xC0A - SYS_QUANTIZED32 = 0xC08 - SYS_QUANTIZED64 = 0xC09 - SYS_REMAINDERD128 = 0xC0D - SYS_REMAINDERD32 = 0xC0B - SYS_REMAINDERD64 = 0xC0C - SYS_RESIZE_ALLOC = 0xCEB - SYS_REWIND_UNLOCKED = 0xCC1 - SYS_RINTD128 = 0xC13 - SYS_RINTD32 = 0xC11 - SYS_RINTD64 = 0xC12 - SYS_RINTF = 0xACB - SYS_RINTL = 0xACD - SYS_ROUND = 0xACF - SYS_ROUNDD128 = 0xC16 - SYS_ROUNDD32 = 0xC14 - SYS_ROUNDD64 = 0xC15 - SYS_ROUNDF = 0xAD2 - SYS_ROUNDL = 0xAD5 - SYS_SAMEQUANTUMD128 = 0xC19 - SYS_SAMEQUANTUMD32 = 0xC17 - SYS_SAMEQUANTUMD64 = 0xC18 - SYS_SCALBLN = 0xAD8 - SYS_SCALBLND128 = 0xC1C - SYS_SCALBLND32 = 0xC1A - SYS_SCALBLND64 = 0xC1B - SYS_SCALBLNF = 0xADB - SYS_SCALBLNL = 0xADE - SYS_SCALBND128 = 0xC1F - SYS_SCALBND32 = 0xC1D - SYS_SCALBND64 = 0xC1E - SYS_SCALBNF = 0xAE3 - SYS_SCALBNL = 0xAE6 - SYS_SCANF_UNLOCKED = 0xCC2 - SYS_SCHED_YIELD = 0xB32 - SYS_SETENV = 0x0C8 - SYS_SETIPV4SOURCEFILTER = 0xC76 - SYS_SETSOURCEFILTER = 0xC78 - SYS_SHM_OPEN = 0xC8C - SYS_SHM_UNLINK = 0xC8D - SYS_SIND128 = 0xC22 - SYS_SIND32 = 0xC20 - SYS_SIND64 = 0xC21 - SYS_SINHD128 = 0xC25 - SYS_SINHD32 = 0xC23 - SYS_SINHD64 = 0xC24 - SYS_SIZEOF_ALLOC = 0xCEA - SYS_SOCKATMARK = 0xC68 - SYS_SQRTD128 = 0xC28 - SYS_SQRTD32 = 0xC26 - SYS_SQRTD64 = 0xC27 - SYS_STRCHR = 0x0A0 - SYS_STRCSPN = 0x0A1 - SYS_STRERROR = 0x0A8 - SYS_STRERROR_R = 0xB33 - SYS_STRFTIME = 0x0B2 - SYS_STRLEN = 0x0A9 - SYS_STRPBRK = 0x0A2 - SYS_STRSPN = 0x0A4 - SYS_STRSTR = 0x0A5 - SYS_STRTOD128 = 0xC2B - SYS_STRTOD32 = 0xC29 - SYS_STRTOD64 = 0xC2A - SYS_STRTOK = 0x0A6 - SYS_TAND128 = 0xC2E - SYS_TAND32 = 0xC2C - SYS_TAND64 = 0xC2D - SYS_TANHD128 = 0xC31 - SYS_TANHD32 = 0xC2F - SYS_TANHD64 = 0xC30 - SYS_TGAMMAD128 = 0xC34 - SYS_TGAMMAD32 = 0xC32 - SYS_TGAMMAD64 = 0xC33 - SYS_TIME = 0x0AD - SYS_TIME64 = 0xCE1 - SYS_TMPFILE64 = 0xD4D - SYS_TMPFILE64_UNLOCKED = 0xD4E - SYS_TMPFILE_UNLOCKED = 0xCFD - SYS_TRUNCD128 = 0xC40 - SYS_TRUNCD32 = 0xC3E - SYS_TRUNCD64 = 0xC3F - SYS_UNGETC_UNLOCKED = 0xCC3 - SYS_UNGETWC_UNLOCKED = 0xCC4 - SYS_UNSETENV = 0xB34 - SYS_VFPRINTF_UNLOCKED = 0xCC5 - SYS_VFSCANF_UNLOCKED = 0xCC7 - SYS_VFWPRINTF_UNLOCKED = 0xCC9 - SYS_VFWSCANF_UNLOCKED = 0xCCB - SYS_VPRINTF_UNLOCKED = 0xCCD - SYS_VSCANF_UNLOCKED = 0xCCF - SYS_VWPRINTF_UNLOCKED = 0xCD1 - SYS_VWSCANF_UNLOCKED = 0xCD3 - SYS_WCSTOD128 = 0xC43 - SYS_WCSTOD32 = 0xC41 - SYS_WCSTOD64 = 0xC42 - SYS_WPRINTF_UNLOCKED = 0xCD5 - SYS_WSCANF_UNLOCKED = 0xCD6 - SYS__FLUSHLBF = 0xD68 - SYS__FLUSHLBF_UNLOCKED = 0xD6F - SYS___ACOSHF_H = 0xA54 - SYS___ACOSHL_H = 0xA55 - SYS___ASINHF_H = 0xA56 - SYS___ASINHL_H = 0xA57 - SYS___ATANPID128 = 0xC6D - SYS___ATANPID32 = 0xC6B - SYS___ATANPID64 = 0xC6C - SYS___CBRTF_H = 0xA58 - SYS___CBRTL_H = 0xA59 - SYS___CDUMP = 0x0C4 - SYS___CLASS = 0xAFA - SYS___CLASS2 = 0xB99 - SYS___CLASS2D128 = 0xC99 - SYS___CLASS2D32 = 0xC97 - SYS___CLASS2D64 = 0xC98 - SYS___CLASS2F = 0xC91 - SYS___CLASS2F_B = 0xC93 - SYS___CLASS2F_H = 0xC94 - SYS___CLASS2L = 0xC92 - SYS___CLASS2L_B = 0xC95 - SYS___CLASS2L_H = 0xC96 - SYS___CLASS2_B = 0xB9A - SYS___CLASS2_H = 0xB9B - SYS___CLASS_B = 0xAFB - SYS___CLASS_H = 0xAFC - SYS___CLOGL_B = 0xA01 - SYS___CLOGL_H = 0xA02 - SYS___CLRENV = 0x0C9 - SYS___CLRMF = 0x0BD - SYS___CODEPAGE_INFO = 0xC64 - SYS___CONJF_B = 0xA07 - SYS___CONJF_H = 0xA08 - SYS___CONJL_B = 0xA0A - SYS___CONJL_H = 0xA0B - SYS___CONJ_B = 0xA04 - SYS___CONJ_H = 0xA05 - SYS___COPYSIGN_B = 0xA5A - SYS___COPYSIGN_H = 0xAF5 - SYS___COSPID128 = 0xC70 - SYS___COSPID32 = 0xC6E - SYS___COSPID64 = 0xC6F - SYS___CPOWF_B = 0xA10 - SYS___CPOWF_H = 0xA11 - SYS___CPOWL_B = 0xA13 - SYS___CPOWL_H = 0xA14 - SYS___CPOW_B = 0xA0D - SYS___CPOW_H = 0xA0E - SYS___CPROJF_B = 0xA19 - SYS___CPROJF_H = 0xA1A - SYS___CPROJL_B = 0xA1C - SYS___CPROJL_H = 0xA1D - SYS___CPROJ_B = 0xA16 - SYS___CPROJ_H = 0xA17 - SYS___CREALF_B = 0xA22 - SYS___CREALF_H = 0xA23 - SYS___CREALL_B = 0xA25 - SYS___CREALL_H = 0xA26 - SYS___CREAL_B = 0xA1F - SYS___CREAL_H = 0xA20 - SYS___CSINF_B = 0xA2B - SYS___CSINF_H = 0xA2C - SYS___CSINHF_B = 0xA34 - SYS___CSINHF_H = 0xA35 - SYS___CSINHL_B = 0xA37 - SYS___CSINHL_H = 0xA38 - SYS___CSINH_B = 0xA31 - SYS___CSINH_H = 0xA32 - SYS___CSINL_B = 0xA2E - SYS___CSINL_H = 0xA2F - SYS___CSIN_B = 0xA28 - SYS___CSIN_H = 0xA29 - SYS___CSNAP = 0x0C5 - SYS___CSQRTF_B = 0xA3D - SYS___CSQRTF_H = 0xA3E - SYS___CSQRTL_B = 0xA40 - SYS___CSQRTL_H = 0xA41 - SYS___CSQRT_B = 0xA3A - SYS___CSQRT_H = 0xA3B - SYS___CTANF_B = 0xA46 - SYS___CTANF_H = 0xA47 - SYS___CTANHF_B = 0xA4F - SYS___CTANHF_H = 0xA50 - SYS___CTANHL_B = 0xA52 - SYS___CTANHL_H = 0xA53 - SYS___CTANH_B = 0xA4C - SYS___CTANH_H = 0xA4D - SYS___CTANL_B = 0xA49 - SYS___CTANL_H = 0xA4A - SYS___CTAN_B = 0xA43 - SYS___CTAN_H = 0xA44 - SYS___CTEST = 0x0C7 - SYS___CTRACE = 0x0C6 - SYS___D1TOP = 0xC9B - SYS___D2TOP = 0xC9C - SYS___D4TOP = 0xC9D - SYS___DYNALL = 0x0C3 - SYS___DYNFRE = 0x0C2 - SYS___EXP2F_H = 0xA5E - SYS___EXP2L_H = 0xA5F - SYS___EXP2_H = 0xA5D - SYS___EXPM1F_H = 0xA5B - SYS___EXPM1L_H = 0xA5C - SYS___FBUFSIZE = 0xD60 - SYS___FLBF = 0xD62 - SYS___FLDATA = 0x0C1 - SYS___FMAF_B = 0xA67 - SYS___FMAF_H = 0xA68 - SYS___FMAL_B = 0xA6A - SYS___FMAL_H = 0xA6B - SYS___FMAXF_B = 0xA70 - SYS___FMAXF_H = 0xA71 - SYS___FMAXL_B = 0xA73 - SYS___FMAXL_H = 0xA74 - SYS___FMAX_B = 0xA6D - SYS___FMAX_H = 0xA6E - SYS___FMA_B = 0xA64 - SYS___FMA_H = 0xA65 - SYS___FMINF_B = 0xA79 - SYS___FMINF_H = 0xA7A - SYS___FMINL_B = 0xA7C - SYS___FMINL_H = 0xA7D - SYS___FMIN_B = 0xA76 - SYS___FMIN_H = 0xA77 - SYS___FPENDING = 0xD61 - SYS___FPENDING_UNLOCKED = 0xD6C - SYS___FPURGE = 0xD69 - SYS___FPURGE_UNLOCKED = 0xD70 - SYS___FP_CAST_D = 0xBCB - SYS___FREADABLE = 0xD63 - SYS___FREADAHEAD = 0xD6A - SYS___FREADAHEAD_UNLOCKED = 0xD71 - SYS___FREADING = 0xD65 - SYS___FREADING_UNLOCKED = 0xD6D - SYS___FSEEK2 = 0xB3C - SYS___FSETERR = 0xD6B - SYS___FSETLOCKING = 0xD67 - SYS___FTCHEP = 0x0BF - SYS___FTELL2 = 0xB3B - SYS___FUPDT = 0x0B5 - SYS___FWRITABLE = 0xD64 - SYS___FWRITING = 0xD66 - SYS___FWRITING_UNLOCKED = 0xD6E - SYS___GETCB = 0x0B4 - SYS___GETGRGID1 = 0xD5B - SYS___GETGRNAM1 = 0xD5C - SYS___GETTHENT = 0xCE5 - SYS___GETTOD = 0xD3E - SYS___HYPOTF_H = 0xAF6 - SYS___HYPOTL_H = 0xAF7 - SYS___ILOGBF_B = 0xA7F - SYS___ILOGBF_H = 0xA80 - SYS___ILOGBL_B = 0xA82 - SYS___ILOGBL_H = 0xA83 - SYS___ISBLANK_A = 0xB2E - SYS___ISBLNK = 0x0FE - SYS___ISWBLANK_A = 0xB2F - SYS___LE_CEEGTJS = 0xD72 - SYS___LE_TRACEBACK = 0xB7A - SYS___LGAMMAL_H = 0xA62 - SYS___LGAMMA_B_C99 = 0xB39 - SYS___LGAMMA_H_C99 = 0xB38 - SYS___LGAMMA_R_C99 = 0xB3A - SYS___LLRINTF_B = 0xA88 - SYS___LLRINTF_H = 0xA89 - SYS___LLRINTL_B = 0xA8B - SYS___LLRINTL_H = 0xA8C - SYS___LLRINT_B = 0xA85 - SYS___LLRINT_H = 0xA86 - SYS___LLROUNDF_B = 0xA91 - SYS___LLROUNDF_H = 0xA92 - SYS___LLROUNDL_B = 0xA94 - SYS___LLROUNDL_H = 0xA95 - SYS___LLROUND_B = 0xA8E - SYS___LLROUND_H = 0xA8F - SYS___LOCALE_CTL = 0xD47 - SYS___LOG1PF_H = 0xA60 - SYS___LOG1PL_H = 0xA61 - SYS___LOGBF_B = 0xA97 - SYS___LOGBF_H = 0xA98 - SYS___LOGBL_B = 0xA9A - SYS___LOGBL_H = 0xA9B - SYS___LOGIN_APPLID = 0xCE2 - SYS___LRINTF_B = 0xAA0 - SYS___LRINTF_H = 0xAA1 - SYS___LRINTL_B = 0xAA3 - SYS___LRINTL_H = 0xAA4 - SYS___LRINT_B = 0xA9D - SYS___LRINT_H = 0xA9E - SYS___LROUNDF_FIXUP = 0xB31 - SYS___LROUNDL_B = 0xAA6 - SYS___LROUNDL_H = 0xAA7 - SYS___LROUND_FIXUP = 0xB30 - SYS___MOSERVICES = 0xD3D - SYS___MUST_STAY_CLEAN = 0xB7C - SYS___NANF_B = 0xAAB - SYS___NANL_B = 0xAAD - SYS___NAN_B = 0xAA9 - SYS___NEARBYINTF_B = 0xAB2 - SYS___NEARBYINTF_H = 0xAB3 - SYS___NEARBYINTL_B = 0xAB5 - SYS___NEARBYINTL_H = 0xAB6 - SYS___NEARBYINT_B = 0xAAF - SYS___NEARBYINT_H = 0xAB0 - SYS___NEXTAFTERF_B = 0xAB8 - SYS___NEXTAFTERF_H = 0xAB9 - SYS___NEXTAFTERL_B = 0xABB - SYS___NEXTAFTERL_H = 0xABC - SYS___NEXTTOWARDF_B = 0xAC1 - SYS___NEXTTOWARDF_H = 0xAC2 - SYS___NEXTTOWARDL_B = 0xAC4 - SYS___NEXTTOWARDL_H = 0xAC5 - SYS___NEXTTOWARD_B = 0xABE - SYS___NEXTTOWARD_H = 0xABF - SYS___O_ENV = 0xB7D - SYS___PASSWD_APPLID = 0xCE3 - SYS___PTOD1 = 0xC9E - SYS___PTOD2 = 0xC9F - SYS___PTOD4 = 0xCA0 - SYS___REGCOMP_STD = 0x0EA - SYS___REMAINDERF_H = 0xAC6 - SYS___REMAINDERL_H = 0xAC7 - SYS___REMQUOD128 = 0xC10 - SYS___REMQUOD32 = 0xC0E - SYS___REMQUOD64 = 0xC0F - SYS___REMQUOF_H = 0xAC9 - SYS___REMQUOL_H = 0xACA - SYS___REMQUO_H = 0xAC8 - SYS___RINTF_B = 0xACC - SYS___RINTL_B = 0xACE - SYS___ROUNDF_B = 0xAD3 - SYS___ROUNDF_H = 0xAD4 - SYS___ROUNDL_B = 0xAD6 - SYS___ROUNDL_H = 0xAD7 - SYS___ROUND_B = 0xAD0 - SYS___ROUND_H = 0xAD1 - SYS___SCALBLNF_B = 0xADC - SYS___SCALBLNF_H = 0xADD - SYS___SCALBLNL_B = 0xADF - SYS___SCALBLNL_H = 0xAE0 - SYS___SCALBLN_B = 0xAD9 - SYS___SCALBLN_H = 0xADA - SYS___SCALBNF_B = 0xAE4 - SYS___SCALBNF_H = 0xAE5 - SYS___SCALBNL_B = 0xAE7 - SYS___SCALBNL_H = 0xAE8 - SYS___SCALBN_B = 0xAE1 - SYS___SCALBN_H = 0xAE2 - SYS___SETENV = 0x0C8 - SYS___SINPID128 = 0xC73 - SYS___SINPID32 = 0xC71 - SYS___SINPID64 = 0xC72 - SYS___SMF_RECORD2 = 0xD48 - SYS___STATIC_REINIT = 0xB3D - SYS___TGAMMAF_H_C99 = 0xB79 - SYS___TGAMMAL_H = 0xAE9 - SYS___TGAMMA_H_C99 = 0xB78 - SYS___TOCSNAME2 = 0xC9A - SYS_CEIL = 0x01F - SYS_CHAUDIT = 0x1E0 - SYS_EXP = 0x01A - SYS_FCHAUDIT = 0x1E1 - SYS_FREXP = 0x01D - SYS_GETGROUPSBYNAME = 0x1E2 - SYS_GETPWUID = 0x1A0 - SYS_GETUID = 0x1A1 - SYS_ISATTY = 0x1A3 - SYS_KILL = 0x1A4 - SYS_LDEXP = 0x01E - SYS_LINK = 0x1A5 - SYS_LOG10 = 0x01C - SYS_LSEEK = 0x1A6 - SYS_LSTAT = 0x1A7 - SYS_MKDIR = 0x1A8 - SYS_MKFIFO = 0x1A9 - SYS_MKNOD = 0x1AA - SYS_MODF = 0x01B - SYS_MOUNT = 0x1AB - SYS_OPEN = 0x1AC - SYS_OPENDIR = 0x1AD - SYS_PATHCONF = 0x1AE - SYS_PAUSE = 0x1AF - SYS_PIPE = 0x1B0 - SYS_PTHREAD_ATTR_DESTROY = 0x1E7 - SYS_PTHREAD_ATTR_GETDETACHSTATE = 0x1EB - SYS_PTHREAD_ATTR_GETSTACKSIZE = 0x1E9 - SYS_PTHREAD_ATTR_GETWEIGHT_NP = 0x1ED - SYS_PTHREAD_ATTR_INIT = 0x1E6 - SYS_PTHREAD_ATTR_SETDETACHSTATE = 0x1EA - SYS_PTHREAD_ATTR_SETSTACKSIZE = 0x1E8 - SYS_PTHREAD_ATTR_SETWEIGHT_NP = 0x1EC - SYS_PTHREAD_CANCEL = 0x1EE - SYS_PTHREAD_CLEANUP_POP = 0x1F0 - SYS_PTHREAD_CLEANUP_PUSH = 0x1EF - SYS_PTHREAD_CONDATTR_DESTROY = 0x1F2 - SYS_PTHREAD_CONDATTR_INIT = 0x1F1 - SYS_PTHREAD_COND_BROADCAST = 0x1F6 - SYS_PTHREAD_COND_DESTROY = 0x1F4 - SYS_PTHREAD_COND_INIT = 0x1F3 - SYS_PTHREAD_COND_SIGNAL = 0x1F5 - SYS_PTHREAD_COND_TIMEDWAIT = 0x1F8 - SYS_PTHREAD_COND_WAIT = 0x1F7 - SYS_PTHREAD_CREATE = 0x1F9 - SYS_PTHREAD_DETACH = 0x1FA - SYS_PTHREAD_EQUAL = 0x1FB - SYS_PTHREAD_EXIT = 0x1E4 - SYS_PTHREAD_GETSPECIFIC = 0x1FC - SYS_PTHREAD_JOIN = 0x1FD - SYS_PTHREAD_KEY_CREATE = 0x1FE - SYS_PTHREAD_KILL = 0x1E5 - SYS_PTHREAD_MUTEXATTR_INIT = 0x1FF - SYS_READ = 0x1B2 - SYS_READDIR = 0x1B3 - SYS_READLINK = 0x1B4 - SYS_REWINDDIR = 0x1B5 - SYS_RMDIR = 0x1B6 - SYS_SETEGID = 0x1B7 - SYS_SETEUID = 0x1B8 - SYS_SETGID = 0x1B9 - SYS_SETPGID = 0x1BA - SYS_SETSID = 0x1BB - SYS_SETUID = 0x1BC - SYS_SIGACTION = 0x1BD - SYS_SIGADDSET = 0x1BE - SYS_SIGDELSET = 0x1BF - SYS_SIGEMPTYSET = 0x1C0 - SYS_SIGFILLSET = 0x1C1 - SYS_SIGISMEMBER = 0x1C2 - SYS_SIGLONGJMP = 0x1C3 - SYS_SIGPENDING = 0x1C4 - SYS_SIGPROCMASK = 0x1C5 - SYS_SIGSETJMP = 0x1C6 - SYS_SIGSUSPEND = 0x1C7 - SYS_SIGWAIT = 0x1E3 - SYS_SLEEP = 0x1C8 - SYS_STAT = 0x1C9 - SYS_SYMLINK = 0x1CB - SYS_SYSCONF = 0x1CC - SYS_TCDRAIN = 0x1CD - SYS_TCFLOW = 0x1CE - SYS_TCFLUSH = 0x1CF - SYS_TCGETATTR = 0x1D0 - SYS_TCGETPGRP = 0x1D1 - SYS_TCSENDBREAK = 0x1D2 - SYS_TCSETATTR = 0x1D3 - SYS_TCSETPGRP = 0x1D4 - SYS_TIMES = 0x1D5 - SYS_TTYNAME = 0x1D6 - SYS_TZSET = 0x1D7 - SYS_UMASK = 0x1D8 - SYS_UMOUNT = 0x1D9 - SYS_UNAME = 0x1DA - SYS_UNLINK = 0x1DB - SYS_UTIME = 0x1DC - SYS_WAIT = 0x1DD - SYS_WAITPID = 0x1DE - SYS_WRITE = 0x1DF - SYS_W_GETPSENT = 0x1B1 - SYS_W_IOCTL = 0x1A2 - SYS_W_STATFS = 0x1CA - SYS_A64L = 0x2EF - SYS_BCMP = 0x2B9 - SYS_BCOPY = 0x2BA - SYS_BZERO = 0x2BB - SYS_CATCLOSE = 0x2B6 - SYS_CATGETS = 0x2B7 - SYS_CATOPEN = 0x2B8 - SYS_CRYPT = 0x2AC - SYS_DBM_CLEARERR = 0x2F7 - SYS_DBM_CLOSE = 0x2F8 - SYS_DBM_DELETE = 0x2F9 - SYS_DBM_ERROR = 0x2FA - SYS_DBM_FETCH = 0x2FB - SYS_DBM_FIRSTKEY = 0x2FC - SYS_DBM_NEXTKEY = 0x2FD - SYS_DBM_OPEN = 0x2FE - SYS_DBM_STORE = 0x2FF - SYS_DRAND48 = 0x2B2 - SYS_ENCRYPT = 0x2AD - SYS_ENDUTXENT = 0x2E1 - SYS_ERAND48 = 0x2B3 - SYS_ERF = 0x02C - SYS_ERFC = 0x02D - SYS_FCHDIR = 0x2D9 - SYS_FFS = 0x2BC - SYS_FMTMSG = 0x2E5 - SYS_FSTATVFS = 0x2B4 - SYS_FTIME = 0x2F5 - SYS_GAMMA = 0x02E - SYS_GETDATE = 0x2A6 - SYS_GETPAGESIZE = 0x2D8 - SYS_GETTIMEOFDAY = 0x2F6 - SYS_GETUTXENT = 0x2E0 - SYS_GETUTXID = 0x2E2 - SYS_GETUTXLINE = 0x2E3 - SYS_HCREATE = 0x2C6 - SYS_HDESTROY = 0x2C7 - SYS_HSEARCH = 0x2C8 - SYS_HYPOT = 0x02B - SYS_INDEX = 0x2BD - SYS_INITSTATE = 0x2C2 - SYS_INSQUE = 0x2CF - SYS_ISASCII = 0x2ED - SYS_JRAND48 = 0x2E6 - SYS_L64A = 0x2F0 - SYS_LCONG48 = 0x2EA - SYS_LFIND = 0x2C9 - SYS_LRAND48 = 0x2E7 - SYS_LSEARCH = 0x2CA - SYS_MEMCCPY = 0x2D4 - SYS_MRAND48 = 0x2E8 - SYS_NRAND48 = 0x2E9 - SYS_PCLOSE = 0x2D2 - SYS_POPEN = 0x2D1 - SYS_PUTUTXLINE = 0x2E4 - SYS_RANDOM = 0x2C4 - SYS_REMQUE = 0x2D0 - SYS_RINDEX = 0x2BE - SYS_SEED48 = 0x2EC - SYS_SETKEY = 0x2AE - SYS_SETSTATE = 0x2C3 - SYS_SETUTXENT = 0x2DF - SYS_SRAND48 = 0x2EB - SYS_SRANDOM = 0x2C5 - SYS_STATVFS = 0x2B5 - SYS_STRCASECMP = 0x2BF - SYS_STRDUP = 0x2C0 - SYS_STRNCASECMP = 0x2C1 - SYS_SWAB = 0x2D3 - SYS_TDELETE = 0x2CB - SYS_TFIND = 0x2CC - SYS_TOASCII = 0x2EE - SYS_TSEARCH = 0x2CD - SYS_TWALK = 0x2CE - SYS_UALARM = 0x2F1 - SYS_USLEEP = 0x2F2 - SYS_WAIT3 = 0x2A7 - SYS_WAITID = 0x2A8 - SYS_Y1 = 0x02A - SYS___ATOE = 0x2DB - SYS___ATOE_L = 0x2DC - SYS___CATTRM = 0x2A9 - SYS___CNVBLK = 0x2AF - SYS___CRYTRM = 0x2B0 - SYS___DLGHT = 0x2A1 - SYS___ECRTRM = 0x2B1 - SYS___ETOA = 0x2DD - SYS___ETOA_L = 0x2DE - SYS___GDTRM = 0x2AA - SYS___OCLCK = 0x2DA - SYS___OPARGF = 0x2A2 - SYS___OPERRF = 0x2A5 - SYS___OPINDF = 0x2A4 - SYS___OPOPTF = 0x2A3 - SYS___RNDTRM = 0x2AB - SYS___SRCTRM = 0x2F4 - SYS___TZONE = 0x2A0 - SYS___UTXTRM = 0x2F3 - SYS_ASIN = 0x03E - SYS_ISXDIGIT = 0x03B - SYS_SETLOCAL = 0x03A - SYS_SETLOCALE = 0x03A - SYS_SIN = 0x03F - SYS_TOLOWER = 0x03C - SYS_TOUPPER = 0x03D - SYS_ACCEPT_AND_RECV = 0x4F7 - SYS_ATOL = 0x04E - SYS_CHECKSCH = 0x4BC - SYS_CHECKSCHENV = 0x4BC - SYS_CLEARERR = 0x04C - SYS_CONNECTS = 0x4B5 - SYS_CONNECTSERVER = 0x4B5 - SYS_CONNECTW = 0x4B4 - SYS_CONNECTWORKMGR = 0x4B4 - SYS_CONTINUE = 0x4B3 - SYS_CONTINUEWORKUNIT = 0x4B3 - SYS_COPYSIGN = 0x4C2 - SYS_CREATEWO = 0x4B2 - SYS_CREATEWORKUNIT = 0x4B2 - SYS_DELETEWO = 0x4B9 - SYS_DELETEWORKUNIT = 0x4B9 - SYS_DISCONNE = 0x4B6 - SYS_DISCONNECTSERVER = 0x4B6 - SYS_FEOF = 0x04D - SYS_FERROR = 0x04A - SYS_FINITE = 0x4C8 - SYS_GAMMA_R = 0x4E2 - SYS_JOINWORK = 0x4B7 - SYS_JOINWORKUNIT = 0x4B7 - SYS_LEAVEWOR = 0x4B8 - SYS_LEAVEWORKUNIT = 0x4B8 - SYS_LGAMMA_R = 0x4EB - SYS_MATHERR = 0x4D0 - SYS_PERROR = 0x04F - SYS_QUERYMET = 0x4BA - SYS_QUERYMETRICS = 0x4BA - SYS_QUERYSCH = 0x4BB - SYS_QUERYSCHENV = 0x4BB - SYS_REWIND = 0x04B - SYS_SCALBN = 0x4D4 - SYS_SIGNIFIC = 0x4D5 - SYS_SIGNIFICAND = 0x4D5 - SYS___ACOSH_B = 0x4DA - SYS___ACOS_B = 0x4D9 - SYS___ASINH_B = 0x4BE - SYS___ASIN_B = 0x4DB - SYS___ATAN2_B = 0x4DC - SYS___ATANH_B = 0x4DD - SYS___ATAN_B = 0x4BF - SYS___CBRT_B = 0x4C0 - SYS___CEIL_B = 0x4C1 - SYS___COSH_B = 0x4DE - SYS___COS_B = 0x4C3 - SYS___DGHT = 0x4A8 - SYS___ENVN = 0x4B0 - SYS___ERFC_B = 0x4C5 - SYS___ERF_B = 0x4C4 - SYS___EXPM1_B = 0x4C6 - SYS___EXP_B = 0x4DF - SYS___FABS_B = 0x4C7 - SYS___FLOOR_B = 0x4C9 - SYS___FMOD_B = 0x4E0 - SYS___FP_SETMODE = 0x4F8 - SYS___FREXP_B = 0x4CA - SYS___GAMMA_B = 0x4E1 - SYS___GDRR = 0x4A1 - SYS___HRRNO = 0x4A2 - SYS___HYPOT_B = 0x4E3 - SYS___ILOGB_B = 0x4CB - SYS___ISNAN_B = 0x4CC - SYS___J0_B = 0x4E4 - SYS___J1_B = 0x4E6 - SYS___JN_B = 0x4E8 - SYS___LDEXP_B = 0x4CD - SYS___LGAMMA_B = 0x4EA - SYS___LOG10_B = 0x4ED - SYS___LOG1P_B = 0x4CE - SYS___LOGB_B = 0x4CF - SYS___LOGIN = 0x4F5 - SYS___LOG_B = 0x4EC - SYS___MLOCKALL = 0x4B1 - SYS___MODF_B = 0x4D1 - SYS___NEXTAFTER_B = 0x4D2 - SYS___OPENDIR2 = 0x4F3 - SYS___OPEN_STAT = 0x4F6 - SYS___OPND = 0x4A5 - SYS___OPPT = 0x4A6 - SYS___OPRG = 0x4A3 - SYS___OPRR = 0x4A4 - SYS___PID_AFFINITY = 0x4BD - SYS___POW_B = 0x4EE - SYS___READDIR2 = 0x4F4 - SYS___REMAINDER_B = 0x4EF - SYS___RINT_B = 0x4D3 - SYS___SCALB_B = 0x4F0 - SYS___SIGACTIONSET = 0x4FB - SYS___SIGGM = 0x4A7 - SYS___SINH_B = 0x4F1 - SYS___SIN_B = 0x4D6 - SYS___SQRT_B = 0x4F2 - SYS___TANH_B = 0x4D8 - SYS___TAN_B = 0x4D7 - SYS___TRRNO = 0x4AF - SYS___TZNE = 0x4A9 - SYS___TZZN = 0x4AA - SYS___UCREATE = 0x4FC - SYS___UFREE = 0x4FE - SYS___UHEAPREPORT = 0x4FF - SYS___UMALLOC = 0x4FD - SYS___Y0_B = 0x4E5 - SYS___Y1_B = 0x4E7 - SYS___YN_B = 0x4E9 - SYS_ABORT = 0x05C - SYS_ASCTIME_R = 0x5E0 - SYS_ATEXIT = 0x05D - SYS_CONNECTE = 0x5AE - SYS_CONNECTEXPORTIMPORT = 0x5AE - SYS_CTIME_R = 0x5E1 - SYS_DN_COMP = 0x5DF - SYS_DN_EXPAND = 0x5DD - SYS_DN_SKIPNAME = 0x5DE - SYS_EXIT = 0x05A - SYS_EXPORTWO = 0x5A1 - SYS_EXPORTWORKUNIT = 0x5A1 - SYS_EXTRACTW = 0x5A5 - SYS_EXTRACTWORKUNIT = 0x5A5 - SYS_FSEEKO = 0x5C9 - SYS_FTELLO = 0x5C8 - SYS_GETGRGID_R = 0x5E7 - SYS_GETGRNAM_R = 0x5E8 - SYS_GETLOGIN_R = 0x5E9 - SYS_GETPWNAM_R = 0x5EA - SYS_GETPWUID_R = 0x5EB - SYS_GMTIME_R = 0x5E2 - SYS_IMPORTWO = 0x5A3 - SYS_IMPORTWORKUNIT = 0x5A3 - SYS_INET_NTOP = 0x5D3 - SYS_INET_PTON = 0x5D4 - SYS_LLABS = 0x5CE - SYS_LLDIV = 0x5CB - SYS_LOCALTIME_R = 0x5E3 - SYS_PTHREAD_ATFORK = 0x5ED - SYS_PTHREAD_ATTR_GETDETACHSTATE_U98 = 0x5FB - SYS_PTHREAD_ATTR_GETGUARDSIZE = 0x5EE - SYS_PTHREAD_ATTR_GETSCHEDPARAM = 0x5F9 - SYS_PTHREAD_ATTR_GETSTACKADDR = 0x5EF - SYS_PTHREAD_ATTR_SETDETACHSTATE_U98 = 0x5FC - SYS_PTHREAD_ATTR_SETGUARDSIZE = 0x5F0 - SYS_PTHREAD_ATTR_SETSCHEDPARAM = 0x5FA - SYS_PTHREAD_ATTR_SETSTACKADDR = 0x5F1 - SYS_PTHREAD_CONDATTR_GETPSHARED = 0x5F2 - SYS_PTHREAD_CONDATTR_SETPSHARED = 0x5F3 - SYS_PTHREAD_DETACH_U98 = 0x5FD - SYS_PTHREAD_GETCONCURRENCY = 0x5F4 - SYS_PTHREAD_GETSPECIFIC_U98 = 0x5FE - SYS_PTHREAD_KEY_DELETE = 0x5F5 - SYS_PTHREAD_SETCANCELSTATE = 0x5FF - SYS_PTHREAD_SETCONCURRENCY = 0x5F6 - SYS_PTHREAD_SIGMASK = 0x5F7 - SYS_QUERYENC = 0x5AD - SYS_QUERYWORKUNITCLASSIFICATION = 0x5AD - SYS_RAISE = 0x05E - SYS_RAND_R = 0x5E4 - SYS_READDIR_R = 0x5E6 - SYS_REALLOC = 0x05B - SYS_RES_INIT = 0x5D8 - SYS_RES_MKQUERY = 0x5D7 - SYS_RES_QUERY = 0x5D9 - SYS_RES_QUERYDOMAIN = 0x5DC - SYS_RES_SEARCH = 0x5DA - SYS_RES_SEND = 0x5DB - SYS_SETJMP = 0x05F - SYS_SIGQUEUE = 0x5A9 - SYS_STRTOK_R = 0x5E5 - SYS_STRTOLL = 0x5B0 - SYS_STRTOULL = 0x5B1 - SYS_TTYNAME_R = 0x5EC - SYS_UNDOEXPO = 0x5A2 - SYS_UNDOEXPORTWORKUNIT = 0x5A2 - SYS_UNDOIMPO = 0x5A4 - SYS_UNDOIMPORTWORKUNIT = 0x5A4 - SYS_WCSTOLL = 0x5CC - SYS_WCSTOULL = 0x5CD - SYS___ABORT = 0x05C - SYS___CONSOLE2 = 0x5D2 - SYS___CPL = 0x5A6 - SYS___DISCARDDATA = 0x5F8 - SYS___DSA_PREV = 0x5B2 - SYS___EP_FIND = 0x5B3 - SYS___FP_SWAPMODE = 0x5AF - SYS___GETUSERID = 0x5AB - SYS___GET_CPUID = 0x5B9 - SYS___GET_SYSTEM_SETTINGS = 0x5BA - SYS___IPDOMAINNAME = 0x5AC - SYS___MAP_INIT = 0x5A7 - SYS___MAP_SERVICE = 0x5A8 - SYS___MOUNT = 0x5AA - SYS___MSGRCV_TIMED = 0x5B7 - SYS___RES = 0x5D6 - SYS___SEMOP_TIMED = 0x5B8 - SYS___SERVER_THREADS_QUERY = 0x5B4 - SYS_FPRINTF = 0x06D - SYS_FSCANF = 0x06A - SYS_PRINTF = 0x06F - SYS_SETBUF = 0x06B - SYS_SETVBUF = 0x06C - SYS_SSCANF = 0x06E - SYS___CATGETS_A = 0x6C0 - SYS___CHAUDIT_A = 0x6F4 - SYS___CHMOD_A = 0x6E8 - SYS___COLLATE_INIT_A = 0x6AC - SYS___CREAT_A = 0x6F6 - SYS___CTYPE_INIT_A = 0x6AF - SYS___DLLLOAD_A = 0x6DF - SYS___DLLQUERYFN_A = 0x6E0 - SYS___DLLQUERYVAR_A = 0x6E1 - SYS___E2A_L = 0x6E3 - SYS___EXECLE_A = 0x6A0 - SYS___EXECLP_A = 0x6A4 - SYS___EXECVE_A = 0x6C1 - SYS___EXECVP_A = 0x6C2 - SYS___EXECV_A = 0x6B1 - SYS___FPRINTF_A = 0x6FA - SYS___GETADDRINFO_A = 0x6BF - SYS___GETNAMEINFO_A = 0x6C4 - SYS___GET_WCTYPE_STD_A = 0x6AE - SYS___ICONV_OPEN_A = 0x6DE - SYS___IF_INDEXTONAME_A = 0x6DC - SYS___IF_NAMETOINDEX_A = 0x6DB - SYS___ISWCTYPE_A = 0x6B0 - SYS___IS_WCTYPE_STD_A = 0x6B2 - SYS___LOCALECONV_A = 0x6B8 - SYS___LOCALECONV_STD_A = 0x6B9 - SYS___LOCALE_INIT_A = 0x6B7 - SYS___LSTAT_A = 0x6EE - SYS___LSTAT_O_A = 0x6EF - SYS___MKDIR_A = 0x6E9 - SYS___MKFIFO_A = 0x6EC - SYS___MKNOD_A = 0x6F0 - SYS___MONETARY_INIT_A = 0x6BC - SYS___MOUNT_A = 0x6F1 - SYS___NL_CSINFO_A = 0x6D6 - SYS___NL_LANGINFO_A = 0x6BA - SYS___NL_LNAGINFO_STD_A = 0x6BB - SYS___NL_MONINFO_A = 0x6D7 - SYS___NL_NUMINFO_A = 0x6D8 - SYS___NL_RESPINFO_A = 0x6D9 - SYS___NL_TIMINFO_A = 0x6DA - SYS___NUMERIC_INIT_A = 0x6C6 - SYS___OPEN_A = 0x6F7 - SYS___PRINTF_A = 0x6DD - SYS___RESP_INIT_A = 0x6C7 - SYS___RPMATCH_A = 0x6C8 - SYS___RPMATCH_C_A = 0x6C9 - SYS___RPMATCH_STD_A = 0x6CA - SYS___SETLOCALE_A = 0x6F9 - SYS___SPAWNP_A = 0x6C5 - SYS___SPAWN_A = 0x6C3 - SYS___SPRINTF_A = 0x6FB - SYS___STAT_A = 0x6EA - SYS___STAT_O_A = 0x6EB - SYS___STRCOLL_STD_A = 0x6A1 - SYS___STRFMON_A = 0x6BD - SYS___STRFMON_STD_A = 0x6BE - SYS___STRFTIME_A = 0x6CC - SYS___STRFTIME_STD_A = 0x6CD - SYS___STRPTIME_A = 0x6CE - SYS___STRPTIME_STD_A = 0x6CF - SYS___STRXFRM_A = 0x6A2 - SYS___STRXFRM_C_A = 0x6A3 - SYS___STRXFRM_STD_A = 0x6A5 - SYS___SYNTAX_INIT_A = 0x6D4 - SYS___TIME_INIT_A = 0x6CB - SYS___TOD_INIT_A = 0x6D5 - SYS___TOWLOWER_A = 0x6B3 - SYS___TOWLOWER_STD_A = 0x6B4 - SYS___TOWUPPER_A = 0x6B5 - SYS___TOWUPPER_STD_A = 0x6B6 - SYS___UMOUNT_A = 0x6F2 - SYS___VFPRINTF_A = 0x6FC - SYS___VPRINTF_A = 0x6FD - SYS___VSPRINTF_A = 0x6FE - SYS___VSWPRINTF_A = 0x6FF - SYS___WCSCOLL_A = 0x6A6 - SYS___WCSCOLL_C_A = 0x6A7 - SYS___WCSCOLL_STD_A = 0x6A8 - SYS___WCSFTIME_A = 0x6D0 - SYS___WCSFTIME_STD_A = 0x6D1 - SYS___WCSXFRM_A = 0x6A9 - SYS___WCSXFRM_C_A = 0x6AA - SYS___WCSXFRM_STD_A = 0x6AB - SYS___WCTYPE_A = 0x6AD - SYS___W_GETMNTENT_A = 0x6F5 - SYS_____CCSIDTYPE_A = 0x6E6 - SYS_____CHATTR_A = 0x6E2 - SYS_____CSNAMETYPE_A = 0x6E7 - SYS_____OPEN_STAT_A = 0x6ED - SYS_____SPAWN2_A = 0x6D2 - SYS_____SPAWNP2_A = 0x6D3 - SYS_____TOCCSID_A = 0x6E4 - SYS_____TOCSNAME_A = 0x6E5 - SYS_ACL_FREE = 0x7FF - SYS_ACL_INIT = 0x7FE - SYS_FWIDE = 0x7DF - SYS_FWPRINTF = 0x7D1 - SYS_FWRITE = 0x07E - SYS_FWSCANF = 0x7D5 - SYS_GETCHAR = 0x07B - SYS_GETS = 0x07C - SYS_M_CREATE_LAYOUT = 0x7C9 - SYS_M_DESTROY_LAYOUT = 0x7CA - SYS_M_GETVALUES_LAYOUT = 0x7CB - SYS_M_SETVALUES_LAYOUT = 0x7CC - SYS_M_TRANSFORM_LAYOUT = 0x7CD - SYS_M_WTRANSFORM_LAYOUT = 0x7CE - SYS_PREAD = 0x7C7 - SYS_PUTC = 0x07D - SYS_PUTCHAR = 0x07A - SYS_PUTS = 0x07F - SYS_PWRITE = 0x7C8 - SYS_TOWCTRAN = 0x7D8 - SYS_TOWCTRANS = 0x7D8 - SYS_UNATEXIT = 0x7B5 - SYS_VFWPRINT = 0x7D3 - SYS_VFWPRINTF = 0x7D3 - SYS_VWPRINTF = 0x7D4 - SYS_WCTRANS = 0x7D7 - SYS_WPRINTF = 0x7D2 - SYS_WSCANF = 0x7D6 - SYS___ASCTIME_R_A = 0x7A1 - SYS___BASENAME_A = 0x7DC - SYS___BTOWC_A = 0x7E4 - SYS___CDUMP_A = 0x7B7 - SYS___CEE3DMP_A = 0x7B6 - SYS___CEILF_H = 0x7F4 - SYS___CEILL_H = 0x7F5 - SYS___CEIL_H = 0x7EA - SYS___CRYPT_A = 0x7BE - SYS___CSNAP_A = 0x7B8 - SYS___CTEST_A = 0x7B9 - SYS___CTIME_R_A = 0x7A2 - SYS___CTRACE_A = 0x7BA - SYS___DBM_OPEN_A = 0x7E6 - SYS___DIRNAME_A = 0x7DD - SYS___FABSF_H = 0x7FA - SYS___FABSL_H = 0x7FB - SYS___FABS_H = 0x7ED - SYS___FGETWC_A = 0x7AA - SYS___FGETWS_A = 0x7AD - SYS___FLOORF_H = 0x7F6 - SYS___FLOORL_H = 0x7F7 - SYS___FLOOR_H = 0x7EB - SYS___FPUTWC_A = 0x7A5 - SYS___FPUTWS_A = 0x7A8 - SYS___GETTIMEOFDAY_A = 0x7AE - SYS___GETWCHAR_A = 0x7AC - SYS___GETWC_A = 0x7AB - SYS___GLOB_A = 0x7DE - SYS___GMTIME_A = 0x7AF - SYS___GMTIME_R_A = 0x7B0 - SYS___INET_PTON_A = 0x7BC - SYS___J0_H = 0x7EE - SYS___J1_H = 0x7EF - SYS___JN_H = 0x7F0 - SYS___LOCALTIME_A = 0x7B1 - SYS___LOCALTIME_R_A = 0x7B2 - SYS___MALLOC24 = 0x7FC - SYS___MALLOC31 = 0x7FD - SYS___MKTIME_A = 0x7B3 - SYS___MODFF_H = 0x7F8 - SYS___MODFL_H = 0x7F9 - SYS___MODF_H = 0x7EC - SYS___OPENDIR_A = 0x7C2 - SYS___OSNAME = 0x7E0 - SYS___PUTWCHAR_A = 0x7A7 - SYS___PUTWC_A = 0x7A6 - SYS___READDIR_A = 0x7C3 - SYS___STRTOLL_A = 0x7A3 - SYS___STRTOULL_A = 0x7A4 - SYS___SYSLOG_A = 0x7BD - SYS___TZZNA = 0x7B4 - SYS___UNGETWC_A = 0x7A9 - SYS___UTIME_A = 0x7A0 - SYS___VFPRINTF2_A = 0x7E7 - SYS___VPRINTF2_A = 0x7E8 - SYS___VSPRINTF2_A = 0x7E9 - SYS___VSWPRNTF2_A = 0x7BB - SYS___WCSTOD_A = 0x7D9 - SYS___WCSTOL_A = 0x7DA - SYS___WCSTOUL_A = 0x7DB - SYS___WCTOB_A = 0x7E5 - SYS___Y0_H = 0x7F1 - SYS___Y1_H = 0x7F2 - SYS___YN_H = 0x7F3 - SYS_____OPENDIR2_A = 0x7BF - SYS_____OSNAME_A = 0x7E1 - SYS_____READDIR2_A = 0x7C0 - SYS_DLCLOSE = 0x8DF - SYS_DLERROR = 0x8E0 - SYS_DLOPEN = 0x8DD - SYS_DLSYM = 0x8DE - SYS_FLOCKFILE = 0x8D3 - SYS_FTRYLOCKFILE = 0x8D4 - SYS_FUNLOCKFILE = 0x8D5 - SYS_GETCHAR_UNLOCKED = 0x8D7 - SYS_GETC_UNLOCKED = 0x8D6 - SYS_PUTCHAR_UNLOCKED = 0x8D9 - SYS_PUTC_UNLOCKED = 0x8D8 - SYS_SNPRINTF = 0x8DA - SYS_VSNPRINTF = 0x8DB - SYS_WCSCSPN = 0x08B - SYS_WCSLEN = 0x08C - SYS_WCSNCAT = 0x08D - SYS_WCSNCMP = 0x08A - SYS_WCSNCPY = 0x08F - SYS_WCSSPN = 0x08E - SYS___ABSF_H = 0x8E7 - SYS___ABSL_H = 0x8E8 - SYS___ABS_H = 0x8E6 - SYS___ACOSF_H = 0x8EA - SYS___ACOSH_H = 0x8EC - SYS___ACOSL_H = 0x8EB - SYS___ACOS_H = 0x8E9 - SYS___ASINF_H = 0x8EE - SYS___ASINH_H = 0x8F0 - SYS___ASINL_H = 0x8EF - SYS___ASIN_H = 0x8ED - SYS___ATAN2F_H = 0x8F8 - SYS___ATAN2L_H = 0x8F9 - SYS___ATAN2_H = 0x8F7 - SYS___ATANF_H = 0x8F2 - SYS___ATANHF_H = 0x8F5 - SYS___ATANHL_H = 0x8F6 - SYS___ATANH_H = 0x8F4 - SYS___ATANL_H = 0x8F3 - SYS___ATAN_H = 0x8F1 - SYS___CBRT_H = 0x8FA - SYS___COPYSIGNF_H = 0x8FB - SYS___COPYSIGNL_H = 0x8FC - SYS___COSF_H = 0x8FE - SYS___COSL_H = 0x8FF - SYS___COS_H = 0x8FD - SYS___DLERROR_A = 0x8D2 - SYS___DLOPEN_A = 0x8D0 - SYS___DLSYM_A = 0x8D1 - SYS___GETUTXENT_A = 0x8C6 - SYS___GETUTXID_A = 0x8C7 - SYS___GETUTXLINE_A = 0x8C8 - SYS___ITOA = 0x8AA - SYS___ITOA_A = 0x8B0 - SYS___LE_CONDITION_TOKEN_BUILD = 0x8A5 - SYS___LE_MSG_ADD_INSERT = 0x8A6 - SYS___LE_MSG_GET = 0x8A7 - SYS___LE_MSG_GET_AND_WRITE = 0x8A8 - SYS___LE_MSG_WRITE = 0x8A9 - SYS___LLTOA = 0x8AE - SYS___LLTOA_A = 0x8B4 - SYS___LTOA = 0x8AC - SYS___LTOA_A = 0x8B2 - SYS___PUTCHAR_UNLOCKED_A = 0x8CC - SYS___PUTC_UNLOCKED_A = 0x8CB - SYS___PUTUTXLINE_A = 0x8C9 - SYS___RESET_EXCEPTION_HANDLER = 0x8E3 - SYS___REXEC_A = 0x8C4 - SYS___REXEC_AF_A = 0x8C5 - SYS___SET_EXCEPTION_HANDLER = 0x8E2 - SYS___SNPRINTF_A = 0x8CD - SYS___SUPERKILL = 0x8A4 - SYS___TCGETATTR_A = 0x8A1 - SYS___TCSETATTR_A = 0x8A2 - SYS___ULLTOA = 0x8AF - SYS___ULLTOA_A = 0x8B5 - SYS___ULTOA = 0x8AD - SYS___ULTOA_A = 0x8B3 - SYS___UTOA = 0x8AB - SYS___UTOA_A = 0x8B1 - SYS___VHM_EVENT = 0x8E4 - SYS___VSNPRINTF_A = 0x8CE - SYS_____GETENV_A = 0x8C3 - SYS_____UTMPXNAME_A = 0x8CA - SYS_CACOSH = 0x9A0 - SYS_CACOSHF = 0x9A3 - SYS_CACOSHL = 0x9A6 - SYS_CARG = 0x9A9 - SYS_CARGF = 0x9AC - SYS_CARGL = 0x9AF - SYS_CASIN = 0x9B2 - SYS_CASINF = 0x9B5 - SYS_CASINH = 0x9BB - SYS_CASINHF = 0x9BE - SYS_CASINHL = 0x9C1 - SYS_CASINL = 0x9B8 - SYS_CATAN = 0x9C4 - SYS_CATANF = 0x9C7 - SYS_CATANH = 0x9CD - SYS_CATANHF = 0x9D0 - SYS_CATANHL = 0x9D3 - SYS_CATANL = 0x9CA - SYS_CCOS = 0x9D6 - SYS_CCOSF = 0x9D9 - SYS_CCOSH = 0x9DF - SYS_CCOSHF = 0x9E2 - SYS_CCOSHL = 0x9E5 - SYS_CCOSL = 0x9DC - SYS_CEXP = 0x9E8 - SYS_CEXPF = 0x9EB - SYS_CEXPL = 0x9EE - SYS_CIMAG = 0x9F1 - SYS_CIMAGF = 0x9F4 - SYS_CIMAGL = 0x9F7 - SYS_CLOGF = 0x9FD - SYS_MEMCHR = 0x09B - SYS_MEMCMP = 0x09A - SYS_STRCOLL = 0x09C - SYS_STRNCMP = 0x09D - SYS_STRRCHR = 0x09F - SYS_STRXFRM = 0x09E - SYS___CACOSHF_B = 0x9A4 - SYS___CACOSHF_H = 0x9A5 - SYS___CACOSHL_B = 0x9A7 - SYS___CACOSHL_H = 0x9A8 - SYS___CACOSH_B = 0x9A1 - SYS___CACOSH_H = 0x9A2 - SYS___CARGF_B = 0x9AD - SYS___CARGF_H = 0x9AE - SYS___CARGL_B = 0x9B0 - SYS___CARGL_H = 0x9B1 - SYS___CARG_B = 0x9AA - SYS___CARG_H = 0x9AB - SYS___CASINF_B = 0x9B6 - SYS___CASINF_H = 0x9B7 - SYS___CASINHF_B = 0x9BF - SYS___CASINHF_H = 0x9C0 - SYS___CASINHL_B = 0x9C2 - SYS___CASINHL_H = 0x9C3 - SYS___CASINH_B = 0x9BC - SYS___CASINH_H = 0x9BD - SYS___CASINL_B = 0x9B9 - SYS___CASINL_H = 0x9BA - SYS___CASIN_B = 0x9B3 - SYS___CASIN_H = 0x9B4 - SYS___CATANF_B = 0x9C8 - SYS___CATANF_H = 0x9C9 - SYS___CATANHF_B = 0x9D1 - SYS___CATANHF_H = 0x9D2 - SYS___CATANHL_B = 0x9D4 - SYS___CATANHL_H = 0x9D5 - SYS___CATANH_B = 0x9CE - SYS___CATANH_H = 0x9CF - SYS___CATANL_B = 0x9CB - SYS___CATANL_H = 0x9CC - SYS___CATAN_B = 0x9C5 - SYS___CATAN_H = 0x9C6 - SYS___CCOSF_B = 0x9DA - SYS___CCOSF_H = 0x9DB - SYS___CCOSHF_B = 0x9E3 - SYS___CCOSHF_H = 0x9E4 - SYS___CCOSHL_B = 0x9E6 - SYS___CCOSHL_H = 0x9E7 - SYS___CCOSH_B = 0x9E0 - SYS___CCOSH_H = 0x9E1 - SYS___CCOSL_B = 0x9DD - SYS___CCOSL_H = 0x9DE - SYS___CCOS_B = 0x9D7 - SYS___CCOS_H = 0x9D8 - SYS___CEXPF_B = 0x9EC - SYS___CEXPF_H = 0x9ED - SYS___CEXPL_B = 0x9EF - SYS___CEXPL_H = 0x9F0 - SYS___CEXP_B = 0x9E9 - SYS___CEXP_H = 0x9EA - SYS___CIMAGF_B = 0x9F5 - SYS___CIMAGF_H = 0x9F6 - SYS___CIMAGL_B = 0x9F8 - SYS___CIMAGL_H = 0x9F9 - SYS___CIMAG_B = 0x9F2 - SYS___CIMAG_H = 0x9F3 - SYS___CLOG = 0x9FA - SYS___CLOGF_B = 0x9FE - SYS___CLOGF_H = 0x9FF - SYS___CLOG_B = 0x9FB - SYS___CLOG_H = 0x9FC - SYS_ISWCTYPE = 0x10C - SYS_ISWXDIGI = 0x10A - SYS_ISWXDIGIT = 0x10A - SYS_MBSINIT = 0x10F - SYS_TOWLOWER = 0x10D - SYS_TOWUPPER = 0x10E - SYS_WCTYPE = 0x10B - SYS_WCSSTR = 0x11B - SYS___RPMTCH = 0x11A - SYS_WCSTOD = 0x12E - SYS_WCSTOK = 0x12C - SYS_WCSTOL = 0x12D - SYS_WCSTOUL = 0x12F - SYS_FGETWC = 0x13C - SYS_FGETWS = 0x13D - SYS_FPUTWC = 0x13E - SYS_FPUTWS = 0x13F - SYS_REGERROR = 0x13B - SYS_REGFREE = 0x13A - SYS_COLLEQUIV = 0x14F - SYS_COLLTOSTR = 0x14E - SYS_ISMCCOLLEL = 0x14C - SYS_STRTOCOLL = 0x14D - SYS_DLLFREE = 0x16F - SYS_DLLQUERYFN = 0x16D - SYS_DLLQUERYVAR = 0x16E - SYS_GETMCCOLL = 0x16A - SYS_GETWMCCOLL = 0x16B - SYS___ERR2AD = 0x16C - SYS_CFSETOSPEED = 0x17A - SYS_CHDIR = 0x17B - SYS_CHMOD = 0x17C - SYS_CHOWN = 0x17D - SYS_CLOSE = 0x17E - SYS_CLOSEDIR = 0x17F - SYS_LOG = 0x017 - SYS_COSH = 0x018 - SYS_FCHMOD = 0x18A - SYS_FCHOWN = 0x18B - SYS_FCNTL = 0x18C - SYS_FILENO = 0x18D - SYS_FORK = 0x18E - SYS_FPATHCONF = 0x18F - SYS_GETLOGIN = 0x19A - SYS_GETPGRP = 0x19C - SYS_GETPID = 0x19D - SYS_GETPPID = 0x19E - SYS_GETPWNAM = 0x19F - SYS_TANH = 0x019 - SYS_W_GETMNTENT = 0x19B - SYS_POW = 0x020 - SYS_PTHREAD_SELF = 0x20A - SYS_PTHREAD_SETINTR = 0x20B - SYS_PTHREAD_SETINTRTYPE = 0x20C - SYS_PTHREAD_SETSPECIFIC = 0x20D - SYS_PTHREAD_TESTINTR = 0x20E - SYS_PTHREAD_YIELD = 0x20F - SYS_SQRT = 0x021 - SYS_FLOOR = 0x022 - SYS_J1 = 0x023 - SYS_WCSPBRK = 0x23F - SYS_BSEARCH = 0x24C - SYS_FABS = 0x024 - SYS_GETENV = 0x24A - SYS_LDIV = 0x24D - SYS_SYSTEM = 0x24B - SYS_FMOD = 0x025 - SYS___RETHROW = 0x25F - SYS___THROW = 0x25E - SYS_J0 = 0x026 - SYS_PUTENV = 0x26A - SYS___GETENV = 0x26F - SYS_SEMCTL = 0x27A - SYS_SEMGET = 0x27B - SYS_SEMOP = 0x27C - SYS_SHMAT = 0x27D - SYS_SHMCTL = 0x27E - SYS_SHMDT = 0x27F - SYS_YN = 0x027 - SYS_JN = 0x028 - SYS_SIGALTSTACK = 0x28A - SYS_SIGHOLD = 0x28B - SYS_SIGIGNORE = 0x28C - SYS_SIGINTERRUPT = 0x28D - SYS_SIGPAUSE = 0x28E - SYS_SIGRELSE = 0x28F - SYS_GETOPT = 0x29A - SYS_GETSUBOPT = 0x29D - SYS_LCHOWN = 0x29B - SYS_SETPGRP = 0x29E - SYS_TRUNCATE = 0x29C - SYS_Y0 = 0x029 - SYS___GDERR = 0x29F - SYS_ISALPHA = 0x030 - SYS_VFORK = 0x30F - SYS__LONGJMP = 0x30D - SYS__SETJMP = 0x30E - SYS_GLOB = 0x31A - SYS_GLOBFREE = 0x31B - SYS_ISALNUM = 0x031 - SYS_PUTW = 0x31C - SYS_SEEKDIR = 0x31D - SYS_TELLDIR = 0x31E - SYS_TEMPNAM = 0x31F - SYS_GETTIMEOFDAY_R = 0x32E - SYS_ISLOWER = 0x032 - SYS_LGAMMA = 0x32C - SYS_REMAINDER = 0x32A - SYS_SCALB = 0x32B - SYS_SYNC = 0x32F - SYS_TTYSLOT = 0x32D - SYS_ENDPROTOENT = 0x33A - SYS_ENDSERVENT = 0x33B - SYS_GETHOSTBYADDR = 0x33D - SYS_GETHOSTBYADDR_R = 0x33C - SYS_GETHOSTBYNAME = 0x33F - SYS_GETHOSTBYNAME_R = 0x33E - SYS_ISCNTRL = 0x033 - SYS_GETSERVBYNAME = 0x34A - SYS_GETSERVBYPORT = 0x34B - SYS_GETSERVENT = 0x34C - SYS_GETSOCKNAME = 0x34D - SYS_GETSOCKOPT = 0x34E - SYS_INET_ADDR = 0x34F - SYS_ISDIGIT = 0x034 - SYS_ISGRAPH = 0x035 - SYS_SELECT = 0x35B - SYS_SELECTEX = 0x35C - SYS_SEND = 0x35D - SYS_SENDTO = 0x35F - SYS_CHROOT = 0x36A - SYS_ISNAN = 0x36D - SYS_ISUPPER = 0x036 - SYS_ULIMIT = 0x36C - SYS_UTIMES = 0x36E - SYS_W_STATVFS = 0x36B - SYS___H_ERRNO = 0x36F - SYS_GRANTPT = 0x37A - SYS_ISPRINT = 0x037 - SYS_TCGETSID = 0x37C - SYS_UNLOCKPT = 0x37B - SYS___TCGETCP = 0x37D - SYS___TCSETCP = 0x37E - SYS___TCSETTABLES = 0x37F - SYS_ISPUNCT = 0x038 - SYS_NLIST = 0x38C - SYS___IPDBCS = 0x38D - SYS___IPDSPX = 0x38E - SYS___IPMSGC = 0x38F - SYS___STHOSTENT = 0x38B - SYS___STSERVENT = 0x38A - SYS_ISSPACE = 0x039 - SYS_COS = 0x040 - SYS_T_ALLOC = 0x40A - SYS_T_BIND = 0x40B - SYS_T_CLOSE = 0x40C - SYS_T_CONNECT = 0x40D - SYS_T_ERROR = 0x40E - SYS_T_FREE = 0x40F - SYS_TAN = 0x041 - SYS_T_RCVREL = 0x41A - SYS_T_RCVUDATA = 0x41B - SYS_T_RCVUDERR = 0x41C - SYS_T_SND = 0x41D - SYS_T_SNDDIS = 0x41E - SYS_T_SNDREL = 0x41F - SYS_GETPMSG = 0x42A - SYS_ISASTREAM = 0x42B - SYS_PUTMSG = 0x42C - SYS_PUTPMSG = 0x42D - SYS_SINH = 0x042 - SYS___ISPOSIXON = 0x42E - SYS___OPENMVSREL = 0x42F - SYS_ACOS = 0x043 - SYS_ATAN = 0x044 - SYS_ATAN2 = 0x045 - SYS_FTELL = 0x046 - SYS_FGETPOS = 0x047 - SYS_SOCK_DEBUG = 0x47A - SYS_SOCK_DO_TESTSTOR = 0x47D - SYS_TAKESOCKET = 0x47E - SYS___SERVER_INIT = 0x47F - SYS_FSEEK = 0x048 - SYS___IPHOST = 0x48B - SYS___IPNODE = 0x48C - SYS___SERVER_CLASSIFY_CREATE = 0x48D - SYS___SERVER_CLASSIFY_DESTROY = 0x48E - SYS___SERVER_CLASSIFY_RESET = 0x48F - SYS___SMF_RECORD = 0x48A - SYS_FSETPOS = 0x049 - SYS___FNWSA = 0x49B - SYS___SPAWN2 = 0x49D - SYS___SPAWNP2 = 0x49E - SYS_ATOF = 0x050 - SYS_PTHREAD_MUTEXATTR_GETPSHARED = 0x50A - SYS_PTHREAD_MUTEXATTR_SETPSHARED = 0x50B - SYS_PTHREAD_RWLOCK_DESTROY = 0x50C - SYS_PTHREAD_RWLOCK_INIT = 0x50D - SYS_PTHREAD_RWLOCK_RDLOCK = 0x50E - SYS_PTHREAD_RWLOCK_TRYRDLOCK = 0x50F - SYS_ATOI = 0x051 - SYS___FP_CLASS = 0x51D - SYS___FP_CLR_FLAG = 0x51A - SYS___FP_FINITE = 0x51E - SYS___FP_ISNAN = 0x51F - SYS___FP_RAISE_XCP = 0x51C - SYS___FP_READ_FLAG = 0x51B - SYS_RAND = 0x052 - SYS_SIGTIMEDWAIT = 0x52D - SYS_SIGWAITINFO = 0x52E - SYS___CHKBFP = 0x52F - SYS___FPC_RS = 0x52C - SYS___FPC_RW = 0x52A - SYS___FPC_SM = 0x52B - SYS_STRTOD = 0x053 - SYS_STRTOL = 0x054 - SYS_STRTOUL = 0x055 - SYS_MALLOC = 0x056 - SYS_SRAND = 0x057 - SYS_CALLOC = 0x058 - SYS_FREE = 0x059 - SYS___OSENV = 0x59F - SYS___W_PIOCTL = 0x59E - SYS_LONGJMP = 0x060 - SYS___FLOORF_B = 0x60A - SYS___FLOORL_B = 0x60B - SYS___FREXPF_B = 0x60C - SYS___FREXPL_B = 0x60D - SYS___LDEXPF_B = 0x60E - SYS___LDEXPL_B = 0x60F - SYS_SIGNAL = 0x061 - SYS___ATAN2F_B = 0x61A - SYS___ATAN2L_B = 0x61B - SYS___COSHF_B = 0x61C - SYS___COSHL_B = 0x61D - SYS___EXPF_B = 0x61E - SYS___EXPL_B = 0x61F - SYS_TMPNAM = 0x062 - SYS___ABSF_B = 0x62A - SYS___ABSL_B = 0x62C - SYS___ABS_B = 0x62B - SYS___FMODF_B = 0x62D - SYS___FMODL_B = 0x62E - SYS___MODFF_B = 0x62F - SYS_ATANL = 0x63A - SYS_CEILF = 0x63B - SYS_CEILL = 0x63C - SYS_COSF = 0x63D - SYS_COSHF = 0x63F - SYS_COSL = 0x63E - SYS_REMOVE = 0x063 - SYS_POWL = 0x64A - SYS_RENAME = 0x064 - SYS_SINF = 0x64B - SYS_SINHF = 0x64F - SYS_SINL = 0x64C - SYS_SQRTF = 0x64D - SYS_SQRTL = 0x64E - SYS_BTOWC = 0x65F - SYS_FREXPL = 0x65A - SYS_LDEXPF = 0x65B - SYS_LDEXPL = 0x65C - SYS_MODFF = 0x65D - SYS_MODFL = 0x65E - SYS_TMPFILE = 0x065 - SYS_FREOPEN = 0x066 - SYS___CHARMAP_INIT_A = 0x66E - SYS___GETHOSTBYADDR_R_A = 0x66C - SYS___GETHOSTBYNAME_A = 0x66A - SYS___GETHOSTBYNAME_R_A = 0x66D - SYS___MBLEN_A = 0x66F - SYS___RES_INIT_A = 0x66B - SYS_FCLOSE = 0x067 - SYS___GETGRGID_R_A = 0x67D - SYS___WCSTOMBS_A = 0x67A - SYS___WCSTOMBS_STD_A = 0x67B - SYS___WCSWIDTH_A = 0x67C - SYS___WCSWIDTH_ASIA = 0x67F - SYS___WCSWIDTH_STD_A = 0x67E - SYS_FFLUSH = 0x068 - SYS___GETLOGIN_R_A = 0x68E - SYS___GETPWNAM_R_A = 0x68C - SYS___GETPWUID_R_A = 0x68D - SYS___TTYNAME_R_A = 0x68F - SYS___WCWIDTH_ASIA = 0x68B - SYS___WCWIDTH_STD_A = 0x68A - SYS_FOPEN = 0x069 - SYS___REGEXEC_A = 0x69A - SYS___REGEXEC_STD_A = 0x69B - SYS___REGFREE_A = 0x69C - SYS___REGFREE_STD_A = 0x69D - SYS___STRCOLL_A = 0x69E - SYS___STRCOLL_C_A = 0x69F - SYS_SCANF = 0x070 - SYS___A64L_A = 0x70C - SYS___ECVT_A = 0x70D - SYS___FCVT_A = 0x70E - SYS___GCVT_A = 0x70F - SYS___STRTOUL_A = 0x70A - SYS_____AE_CORRESTBL_QUERY_A = 0x70B - SYS_SPRINTF = 0x071 - SYS___ACCESS_A = 0x71F - SYS___CATOPEN_A = 0x71E - SYS___GETOPT_A = 0x71D - SYS___REALPATH_A = 0x71A - SYS___SETENV_A = 0x71B - SYS___SYSTEM_A = 0x71C - SYS_FGETC = 0x072 - SYS___GAI_STRERROR_A = 0x72F - SYS___RMDIR_A = 0x72A - SYS___STATVFS_A = 0x72B - SYS___SYMLINK_A = 0x72C - SYS___TRUNCATE_A = 0x72D - SYS___UNLINK_A = 0x72E - SYS_VFPRINTF = 0x073 - SYS___ISSPACE_A = 0x73A - SYS___ISUPPER_A = 0x73B - SYS___ISWALNUM_A = 0x73F - SYS___ISXDIGIT_A = 0x73C - SYS___TOLOWER_A = 0x73D - SYS___TOUPPER_A = 0x73E - SYS_VPRINTF = 0x074 - SYS___CONFSTR_A = 0x74B - SYS___FDOPEN_A = 0x74E - SYS___FLDATA_A = 0x74F - SYS___FTOK_A = 0x74C - SYS___ISWXDIGIT_A = 0x74A - SYS___MKTEMP_A = 0x74D - SYS_VSPRINTF = 0x075 - SYS___GETGRGID_A = 0x75A - SYS___GETGRNAM_A = 0x75B - SYS___GETGROUPSBYNAME_A = 0x75C - SYS___GETHOSTENT_A = 0x75D - SYS___GETHOSTNAME_A = 0x75E - SYS___GETLOGIN_A = 0x75F - SYS_GETC = 0x076 - SYS___CREATEWORKUNIT_A = 0x76A - SYS___CTERMID_A = 0x76B - SYS___FMTMSG_A = 0x76C - SYS___INITGROUPS_A = 0x76D - SYS___MSGRCV_A = 0x76F - SYS_____LOGIN_A = 0x76E - SYS_FGETS = 0x077 - SYS___STRCASECMP_A = 0x77B - SYS___STRNCASECMP_A = 0x77C - SYS___TTYNAME_A = 0x77D - SYS___UNAME_A = 0x77E - SYS___UTIMES_A = 0x77F - SYS_____SERVER_PWU_A = 0x77A - SYS_FPUTC = 0x078 - SYS___CREAT_O_A = 0x78E - SYS___ENVNA = 0x78F - SYS___FREAD_A = 0x78A - SYS___FWRITE_A = 0x78B - SYS___ISASCII = 0x78D - SYS___OPEN_O_A = 0x78C - SYS_FPUTS = 0x079 - SYS___ASCTIME_A = 0x79C - SYS___CTIME_A = 0x79D - SYS___GETDATE_A = 0x79E - SYS___GETSERVBYPORT_A = 0x79A - SYS___GETSERVENT_A = 0x79B - SYS___TZSET_A = 0x79F - SYS_ACL_FROM_TEXT = 0x80C - SYS_ACL_SET_FD = 0x80A - SYS_ACL_SET_FILE = 0x80B - SYS_ACL_SORT = 0x80E - SYS_ACL_TO_TEXT = 0x80D - SYS_UNGETC = 0x080 - SYS___SHUTDOWN_REGISTRATION = 0x80F - SYS_FREAD = 0x081 - SYS_FREEADDRINFO = 0x81A - SYS_GAI_STRERROR = 0x81B - SYS_REXEC_AF = 0x81C - SYS___DYNALLOC_A = 0x81F - SYS___POE = 0x81D - SYS_WCSTOMBS = 0x082 - SYS___INET_ADDR_A = 0x82F - SYS___NLIST_A = 0x82A - SYS_____TCGETCP_A = 0x82B - SYS_____TCSETCP_A = 0x82C - SYS_____W_PIOCTL_A = 0x82E - SYS_MBTOWC = 0x083 - SYS___CABEND = 0x83D - SYS___LE_CIB_GET = 0x83E - SYS___RECVMSG_A = 0x83B - SYS___SENDMSG_A = 0x83A - SYS___SET_LAA_FOR_JIT = 0x83F - SYS_____LCHATTR_A = 0x83C - SYS_WCTOMB = 0x084 - SYS___CBRTL_B = 0x84A - SYS___COPYSIGNF_B = 0x84B - SYS___COPYSIGNL_B = 0x84C - SYS___COTANF_B = 0x84D - SYS___COTANL_B = 0x84F - SYS___COTAN_B = 0x84E - SYS_MBSTOWCS = 0x085 - SYS___LOG1PL_B = 0x85A - SYS___LOG2F_B = 0x85B - SYS___LOG2L_B = 0x85D - SYS___LOG2_B = 0x85C - SYS___REMAINDERF_B = 0x85E - SYS___REMAINDERL_B = 0x85F - SYS_ACOSHF = 0x86E - SYS_ACOSHL = 0x86F - SYS_WCSCPY = 0x086 - SYS___ERFCF_B = 0x86D - SYS___ERFF_B = 0x86C - SYS___LROUNDF_B = 0x86A - SYS___LROUND_B = 0x86B - SYS_COTANL = 0x87A - SYS_EXP2F = 0x87B - SYS_EXP2L = 0x87C - SYS_EXPM1F = 0x87D - SYS_EXPM1L = 0x87E - SYS_FDIMF = 0x87F - SYS_WCSCAT = 0x087 - SYS___COTANL = 0x87A - SYS_REMAINDERF = 0x88A - SYS_REMAINDERL = 0x88B - SYS_REMAINDF = 0x88A - SYS_REMAINDL = 0x88B - SYS_REMQUO = 0x88D - SYS_REMQUOF = 0x88C - SYS_REMQUOL = 0x88E - SYS_TGAMMAF = 0x88F - SYS_WCSCHR = 0x088 - SYS_ERFCF = 0x89B - SYS_ERFCL = 0x89C - SYS_ERFL = 0x89A - SYS_EXP2 = 0x89E - SYS_WCSCMP = 0x089 - SYS___EXP2_B = 0x89D - SYS___FAR_JUMP = 0x89F - SYS_ABS = 0x090 - SYS___ERFCL_H = 0x90A - SYS___EXPF_H = 0x90C - SYS___EXPL_H = 0x90D - SYS___EXPM1_H = 0x90E - SYS___EXP_H = 0x90B - SYS___FDIM_H = 0x90F - SYS_DIV = 0x091 - SYS___LOG2F_H = 0x91F - SYS___LOG2_H = 0x91E - SYS___LOGB_H = 0x91D - SYS___LOGF_H = 0x91B - SYS___LOGL_H = 0x91C - SYS___LOG_H = 0x91A - SYS_LABS = 0x092 - SYS___POWL_H = 0x92A - SYS___REMAINDER_H = 0x92B - SYS___RINT_H = 0x92C - SYS___SCALB_H = 0x92D - SYS___SINF_H = 0x92F - SYS___SIN_H = 0x92E - SYS_STRNCPY = 0x093 - SYS___TANHF_H = 0x93B - SYS___TANHL_H = 0x93C - SYS___TANH_H = 0x93A - SYS___TGAMMAF_H = 0x93E - SYS___TGAMMA_H = 0x93D - SYS___TRUNC_H = 0x93F - SYS_MEMCPY = 0x094 - SYS_VFWSCANF = 0x94A - SYS_VSWSCANF = 0x94E - SYS_VWSCANF = 0x94C - SYS_INET6_RTH_ADD = 0x95D - SYS_INET6_RTH_INIT = 0x95C - SYS_INET6_RTH_REVERSE = 0x95E - SYS_INET6_RTH_SEGMENTS = 0x95F - SYS_INET6_RTH_SPACE = 0x95B - SYS_MEMMOVE = 0x095 - SYS_WCSTOLD = 0x95A - SYS_STRCPY = 0x096 - SYS_STRCMP = 0x097 - SYS_CABS = 0x98E - SYS_STRCAT = 0x098 - SYS___CABS_B = 0x98F - SYS___POW_II = 0x98A - SYS___POW_II_B = 0x98B - SYS___POW_II_H = 0x98C - SYS_CACOSF = 0x99A - SYS_CACOSL = 0x99D - SYS_STRNCAT = 0x099 - SYS___CACOSF_B = 0x99B - SYS___CACOSF_H = 0x99C - SYS___CACOSL_B = 0x99E - SYS___CACOSL_H = 0x99F - SYS_ISWALPHA = 0x100 - SYS_ISWBLANK = 0x101 - SYS___ISWBLK = 0x101 - SYS_ISWCNTRL = 0x102 - SYS_ISWDIGIT = 0x103 - SYS_ISWGRAPH = 0x104 - SYS_ISWLOWER = 0x105 - SYS_ISWPRINT = 0x106 - SYS_ISWPUNCT = 0x107 - SYS_ISWSPACE = 0x108 - SYS_ISWUPPER = 0x109 - SYS_WCTOB = 0x110 - SYS_MBRLEN = 0x111 - SYS_MBRTOWC = 0x112 - SYS_MBSRTOWC = 0x113 - SYS_MBSRTOWCS = 0x113 - SYS_WCRTOMB = 0x114 - SYS_WCSRTOMB = 0x115 - SYS_WCSRTOMBS = 0x115 - SYS___CSID = 0x116 - SYS___WCSID = 0x117 - SYS_STRPTIME = 0x118 - SYS___STRPTM = 0x118 - SYS_STRFMON = 0x119 - SYS_WCSCOLL = 0x130 - SYS_WCSXFRM = 0x131 - SYS_WCSWIDTH = 0x132 - SYS_WCWIDTH = 0x133 - SYS_WCSFTIME = 0x134 - SYS_SWPRINTF = 0x135 - SYS_VSWPRINT = 0x136 - SYS_VSWPRINTF = 0x136 - SYS_SWSCANF = 0x137 - SYS_REGCOMP = 0x138 - SYS_REGEXEC = 0x139 - SYS_GETWC = 0x140 - SYS_GETWCHAR = 0x141 - SYS_PUTWC = 0x142 - SYS_PUTWCHAR = 0x143 - SYS_UNGETWC = 0x144 - SYS_ICONV_OPEN = 0x145 - SYS_ICONV = 0x146 - SYS_ICONV_CLOSE = 0x147 - SYS_COLLRANGE = 0x150 - SYS_CCLASS = 0x151 - SYS_COLLORDER = 0x152 - SYS___DEMANGLE = 0x154 - SYS_FDOPEN = 0x155 - SYS___ERRNO = 0x156 - SYS___ERRNO2 = 0x157 - SYS___TERROR = 0x158 - SYS_MAXCOLL = 0x169 - SYS_DLLLOAD = 0x170 - SYS__EXIT = 0x174 - SYS_ACCESS = 0x175 - SYS_ALARM = 0x176 - SYS_CFGETISPEED = 0x177 - SYS_CFGETOSPEED = 0x178 - SYS_CFSETISPEED = 0x179 - SYS_CREAT = 0x180 - SYS_CTERMID = 0x181 - SYS_DUP = 0x182 - SYS_DUP2 = 0x183 - SYS_EXECL = 0x184 - SYS_EXECLE = 0x185 - SYS_EXECLP = 0x186 - SYS_EXECV = 0x187 - SYS_EXECVE = 0x188 - SYS_EXECVP = 0x189 - SYS_FSTAT = 0x190 - SYS_FSYNC = 0x191 - SYS_FTRUNCATE = 0x192 - SYS_GETCWD = 0x193 - SYS_GETEGID = 0x194 - SYS_GETEUID = 0x195 - SYS_GETGID = 0x196 - SYS_GETGRGID = 0x197 - SYS_GETGRNAM = 0x198 - SYS_GETGROUPS = 0x199 - SYS_PTHREAD_MUTEXATTR_DESTROY = 0x200 - SYS_PTHREAD_MUTEXATTR_SETKIND_NP = 0x201 - SYS_PTHREAD_MUTEXATTR_GETKIND_NP = 0x202 - SYS_PTHREAD_MUTEX_INIT = 0x203 - SYS_PTHREAD_MUTEX_DESTROY = 0x204 - SYS_PTHREAD_MUTEX_LOCK = 0x205 - SYS_PTHREAD_MUTEX_TRYLOCK = 0x206 - SYS_PTHREAD_MUTEX_UNLOCK = 0x207 - SYS_PTHREAD_ONCE = 0x209 - SYS_TW_OPEN = 0x210 - SYS_TW_FCNTL = 0x211 - SYS_PTHREAD_JOIN_D4_NP = 0x212 - SYS_PTHREAD_CONDATTR_SETKIND_NP = 0x213 - SYS_PTHREAD_CONDATTR_GETKIND_NP = 0x214 - SYS_EXTLINK_NP = 0x215 - SYS___PASSWD = 0x216 - SYS_SETGROUPS = 0x217 - SYS_INITGROUPS = 0x218 - SYS_WCSRCHR = 0x240 - SYS_SVC99 = 0x241 - SYS___SVC99 = 0x241 - SYS_WCSWCS = 0x242 - SYS_LOCALECO = 0x243 - SYS_LOCALECONV = 0x243 - SYS___LIBREL = 0x244 - SYS_RELEASE = 0x245 - SYS___RLSE = 0x245 - SYS_FLOCATE = 0x246 - SYS___FLOCT = 0x246 - SYS_FDELREC = 0x247 - SYS___FDLREC = 0x247 - SYS_FETCH = 0x248 - SYS___FETCH = 0x248 - SYS_QSORT = 0x249 - SYS___CLEANUPCATCH = 0x260 - SYS___CATCHMATCH = 0x261 - SYS___CLEAN2UPCATCH = 0x262 - SYS_GETPRIORITY = 0x270 - SYS_NICE = 0x271 - SYS_SETPRIORITY = 0x272 - SYS_GETITIMER = 0x273 - SYS_SETITIMER = 0x274 - SYS_MSGCTL = 0x275 - SYS_MSGGET = 0x276 - SYS_MSGRCV = 0x277 - SYS_MSGSND = 0x278 - SYS_MSGXRCV = 0x279 - SYS___MSGXR = 0x279 - SYS_SHMGET = 0x280 - SYS___GETIPC = 0x281 - SYS_SETGRENT = 0x282 - SYS_GETGRENT = 0x283 - SYS_ENDGRENT = 0x284 - SYS_SETPWENT = 0x285 - SYS_GETPWENT = 0x286 - SYS_ENDPWENT = 0x287 - SYS_BSD_SIGNAL = 0x288 - SYS_KILLPG = 0x289 - SYS_SIGSET = 0x290 - SYS_SIGSTACK = 0x291 - SYS_GETRLIMIT = 0x292 - SYS_SETRLIMIT = 0x293 - SYS_GETRUSAGE = 0x294 - SYS_MMAP = 0x295 - SYS_MPROTECT = 0x296 - SYS_MSYNC = 0x297 - SYS_MUNMAP = 0x298 - SYS_CONFSTR = 0x299 - SYS___NDMTRM = 0x300 - SYS_FTOK = 0x301 - SYS_BASENAME = 0x302 - SYS_DIRNAME = 0x303 - SYS_GETDTABLESIZE = 0x304 - SYS_MKSTEMP = 0x305 - SYS_MKTEMP = 0x306 - SYS_NFTW = 0x307 - SYS_GETWD = 0x308 - SYS_LOCKF = 0x309 - SYS_WORDEXP = 0x310 - SYS_WORDFREE = 0x311 - SYS_GETPGID = 0x312 - SYS_GETSID = 0x313 - SYS___UTMPXNAME = 0x314 - SYS_CUSERID = 0x315 - SYS_GETPASS = 0x316 - SYS_FNMATCH = 0x317 - SYS_FTW = 0x318 - SYS_GETW = 0x319 - SYS_ACOSH = 0x320 - SYS_ASINH = 0x321 - SYS_ATANH = 0x322 - SYS_CBRT = 0x323 - SYS_EXPM1 = 0x324 - SYS_ILOGB = 0x325 - SYS_LOGB = 0x326 - SYS_LOG1P = 0x327 - SYS_NEXTAFTER = 0x328 - SYS_RINT = 0x329 - SYS_SPAWN = 0x330 - SYS_SPAWNP = 0x331 - SYS_GETLOGIN_UU = 0x332 - SYS_ECVT = 0x333 - SYS_FCVT = 0x334 - SYS_GCVT = 0x335 - SYS_ACCEPT = 0x336 - SYS_BIND = 0x337 - SYS_CONNECT = 0x338 - SYS_ENDHOSTENT = 0x339 - SYS_GETHOSTENT = 0x340 - SYS_GETHOSTID = 0x341 - SYS_GETHOSTNAME = 0x342 - SYS_GETNETBYADDR = 0x343 - SYS_GETNETBYNAME = 0x344 - SYS_GETNETENT = 0x345 - SYS_GETPEERNAME = 0x346 - SYS_GETPROTOBYNAME = 0x347 - SYS_GETPROTOBYNUMBER = 0x348 - SYS_GETPROTOENT = 0x349 - SYS_INET_LNAOF = 0x350 - SYS_INET_MAKEADDR = 0x351 - SYS_INET_NETOF = 0x352 - SYS_INET_NETWORK = 0x353 - SYS_INET_NTOA = 0x354 - SYS_IOCTL = 0x355 - SYS_LISTEN = 0x356 - SYS_READV = 0x357 - SYS_RECV = 0x358 - SYS_RECVFROM = 0x359 - SYS_SETHOSTENT = 0x360 - SYS_SETNETENT = 0x361 - SYS_SETPEER = 0x362 - SYS_SETPROTOENT = 0x363 - SYS_SETSERVENT = 0x364 - SYS_SETSOCKOPT = 0x365 - SYS_SHUTDOWN = 0x366 - SYS_SOCKET = 0x367 - SYS_SOCKETPAIR = 0x368 - SYS_WRITEV = 0x369 - SYS_ENDNETENT = 0x370 - SYS_CLOSELOG = 0x371 - SYS_OPENLOG = 0x372 - SYS_SETLOGMASK = 0x373 - SYS_SYSLOG = 0x374 - SYS_PTSNAME = 0x375 - SYS_SETREUID = 0x376 - SYS_SETREGID = 0x377 - SYS_REALPATH = 0x378 - SYS___SIGNGAM = 0x379 - SYS_POLL = 0x380 - SYS_REXEC = 0x381 - SYS___ISASCII2 = 0x382 - SYS___TOASCII2 = 0x383 - SYS_CHPRIORITY = 0x384 - SYS_PTHREAD_ATTR_SETSYNCTYPE_NP = 0x385 - SYS_PTHREAD_ATTR_GETSYNCTYPE_NP = 0x386 - SYS_PTHREAD_SET_LIMIT_NP = 0x387 - SYS___STNETENT = 0x388 - SYS___STPROTOENT = 0x389 - SYS___SELECT1 = 0x390 - SYS_PTHREAD_SECURITY_NP = 0x391 - SYS___CHECK_RESOURCE_AUTH_NP = 0x392 - SYS___CONVERT_ID_NP = 0x393 - SYS___OPENVMREL = 0x394 - SYS_WMEMCHR = 0x395 - SYS_WMEMCMP = 0x396 - SYS_WMEMCPY = 0x397 - SYS_WMEMMOVE = 0x398 - SYS_WMEMSET = 0x399 - SYS___FPUTWC = 0x400 - SYS___PUTWC = 0x401 - SYS___PWCHAR = 0x402 - SYS___WCSFTM = 0x403 - SYS___WCSTOK = 0x404 - SYS___WCWDTH = 0x405 - SYS_T_ACCEPT = 0x409 - SYS_T_GETINFO = 0x410 - SYS_T_GETPROTADDR = 0x411 - SYS_T_GETSTATE = 0x412 - SYS_T_LISTEN = 0x413 - SYS_T_LOOK = 0x414 - SYS_T_OPEN = 0x415 - SYS_T_OPTMGMT = 0x416 - SYS_T_RCV = 0x417 - SYS_T_RCVCONNECT = 0x418 - SYS_T_RCVDIS = 0x419 - SYS_T_SNDUDATA = 0x420 - SYS_T_STRERROR = 0x421 - SYS_T_SYNC = 0x422 - SYS_T_UNBIND = 0x423 - SYS___T_ERRNO = 0x424 - SYS___RECVMSG2 = 0x425 - SYS___SENDMSG2 = 0x426 - SYS_FATTACH = 0x427 - SYS_FDETACH = 0x428 - SYS_GETMSG = 0x429 - SYS_GETCONTEXT = 0x430 - SYS_SETCONTEXT = 0x431 - SYS_MAKECONTEXT = 0x432 - SYS_SWAPCONTEXT = 0x433 - SYS_PTHREAD_GETSPECIFIC_D8_NP = 0x434 - SYS_GETCLIENTID = 0x470 - SYS___GETCLIENTID = 0x471 - SYS_GETSTABLESIZE = 0x472 - SYS_GETIBMOPT = 0x473 - SYS_GETIBMSOCKOPT = 0x474 - SYS_GIVESOCKET = 0x475 - SYS_IBMSFLUSH = 0x476 - SYS_MAXDESC = 0x477 - SYS_SETIBMOPT = 0x478 - SYS_SETIBMSOCKOPT = 0x479 - SYS___SERVER_PWU = 0x480 - SYS_PTHREAD_TAG_NP = 0x481 - SYS___CONSOLE = 0x482 - SYS___WSINIT = 0x483 - SYS___IPTCPN = 0x489 - SYS___SERVER_CLASSIFY = 0x490 - SYS___HEAPRPT = 0x496 - SYS___ISBFP = 0x500 - SYS___FP_CAST = 0x501 - SYS___CERTIFICATE = 0x502 - SYS_SEND_FILE = 0x503 - SYS_AIO_CANCEL = 0x504 - SYS_AIO_ERROR = 0x505 - SYS_AIO_READ = 0x506 - SYS_AIO_RETURN = 0x507 - SYS_AIO_SUSPEND = 0x508 - SYS_AIO_WRITE = 0x509 - SYS_PTHREAD_RWLOCK_TRYWRLOCK = 0x510 - SYS_PTHREAD_RWLOCK_UNLOCK = 0x511 - SYS_PTHREAD_RWLOCK_WRLOCK = 0x512 - SYS_PTHREAD_RWLOCKATTR_GETPSHARED = 0x513 - SYS_PTHREAD_RWLOCKATTR_SETPSHARED = 0x514 - SYS_PTHREAD_RWLOCKATTR_INIT = 0x515 - SYS_PTHREAD_RWLOCKATTR_DESTROY = 0x516 - SYS___CTTBL = 0x517 - SYS_PTHREAD_MUTEXATTR_SETTYPE = 0x518 - SYS_PTHREAD_MUTEXATTR_GETTYPE = 0x519 - SYS___FP_UNORDERED = 0x520 - SYS___FP_READ_RND = 0x521 - SYS___FP_READ_RND_B = 0x522 - SYS___FP_SWAP_RND = 0x523 - SYS___FP_SWAP_RND_B = 0x524 - SYS___FP_LEVEL = 0x525 - SYS___FP_BTOH = 0x526 - SYS___FP_HTOB = 0x527 - SYS___FPC_RD = 0x528 - SYS___FPC_WR = 0x529 - SYS_PTHREAD_SETCANCELTYPE = 0x600 - SYS_PTHREAD_TESTCANCEL = 0x601 - SYS___ATANF_B = 0x602 - SYS___ATANL_B = 0x603 - SYS___CEILF_B = 0x604 - SYS___CEILL_B = 0x605 - SYS___COSF_B = 0x606 - SYS___COSL_B = 0x607 - SYS___FABSF_B = 0x608 - SYS___FABSL_B = 0x609 - SYS___SINF_B = 0x610 - SYS___SINL_B = 0x611 - SYS___TANF_B = 0x612 - SYS___TANL_B = 0x613 - SYS___TANHF_B = 0x614 - SYS___TANHL_B = 0x615 - SYS___ACOSF_B = 0x616 - SYS___ACOSL_B = 0x617 - SYS___ASINF_B = 0x618 - SYS___ASINL_B = 0x619 - SYS___LOGF_B = 0x620 - SYS___LOGL_B = 0x621 - SYS___LOG10F_B = 0x622 - SYS___LOG10L_B = 0x623 - SYS___POWF_B = 0x624 - SYS___POWL_B = 0x625 - SYS___SINHF_B = 0x626 - SYS___SINHL_B = 0x627 - SYS___SQRTF_B = 0x628 - SYS___SQRTL_B = 0x629 - SYS___MODFL_B = 0x630 - SYS_ABSF = 0x631 - SYS_ABSL = 0x632 - SYS_ACOSF = 0x633 - SYS_ACOSL = 0x634 - SYS_ASINF = 0x635 - SYS_ASINL = 0x636 - SYS_ATAN2F = 0x637 - SYS_ATAN2L = 0x638 - SYS_ATANF = 0x639 - SYS_COSHL = 0x640 - SYS_EXPF = 0x641 - SYS_EXPL = 0x642 - SYS_TANHF = 0x643 - SYS_TANHL = 0x644 - SYS_LOG10F = 0x645 - SYS_LOG10L = 0x646 - SYS_LOGF = 0x647 - SYS_LOGL = 0x648 - SYS_POWF = 0x649 - SYS_SINHL = 0x650 - SYS_TANF = 0x651 - SYS_TANL = 0x652 - SYS_FABSF = 0x653 - SYS_FABSL = 0x654 - SYS_FLOORF = 0x655 - SYS_FLOORL = 0x656 - SYS_FMODF = 0x657 - SYS_FMODL = 0x658 - SYS_FREXPF = 0x659 - SYS___CHATTR = 0x660 - SYS___FCHATTR = 0x661 - SYS___TOCCSID = 0x662 - SYS___CSNAMETYPE = 0x663 - SYS___TOCSNAME = 0x664 - SYS___CCSIDTYPE = 0x665 - SYS___AE_CORRESTBL_QUERY = 0x666 - SYS___AE_AUTOCONVERT_STATE = 0x667 - SYS_DN_FIND = 0x668 - SYS___GETHOSTBYADDR_A = 0x669 - SYS___MBLEN_SB_A = 0x670 - SYS___MBLEN_STD_A = 0x671 - SYS___MBLEN_UTF = 0x672 - SYS___MBSTOWCS_A = 0x673 - SYS___MBSTOWCS_STD_A = 0x674 - SYS___MBTOWC_A = 0x675 - SYS___MBTOWC_ISO1 = 0x676 - SYS___MBTOWC_SBCS = 0x677 - SYS___MBTOWC_MBCS = 0x678 - SYS___MBTOWC_UTF = 0x679 - SYS___CSID_A = 0x680 - SYS___CSID_STD_A = 0x681 - SYS___WCSID_A = 0x682 - SYS___WCSID_STD_A = 0x683 - SYS___WCTOMB_A = 0x684 - SYS___WCTOMB_ISO1 = 0x685 - SYS___WCTOMB_STD_A = 0x686 - SYS___WCTOMB_UTF = 0x687 - SYS___WCWIDTH_A = 0x688 - SYS___GETGRNAM_R_A = 0x689 - SYS___READDIR_R_A = 0x690 - SYS___E2A_S = 0x691 - SYS___FNMATCH_A = 0x692 - SYS___FNMATCH_C_A = 0x693 - SYS___EXECL_A = 0x694 - SYS___FNMATCH_STD_A = 0x695 - SYS___REGCOMP_A = 0x696 - SYS___REGCOMP_STD_A = 0x697 - SYS___REGERROR_A = 0x698 - SYS___REGERROR_STD_A = 0x699 - SYS___SWPRINTF_A = 0x700 - SYS___FSCANF_A = 0x701 - SYS___SCANF_A = 0x702 - SYS___SSCANF_A = 0x703 - SYS___SWSCANF_A = 0x704 - SYS___ATOF_A = 0x705 - SYS___ATOI_A = 0x706 - SYS___ATOL_A = 0x707 - SYS___STRTOD_A = 0x708 - SYS___STRTOL_A = 0x709 - SYS___L64A_A = 0x710 - SYS___STRERROR_A = 0x711 - SYS___PERROR_A = 0x712 - SYS___FETCH_A = 0x713 - SYS___GETENV_A = 0x714 - SYS___MKSTEMP_A = 0x717 - SYS___PTSNAME_A = 0x718 - SYS___PUTENV_A = 0x719 - SYS___CHDIR_A = 0x720 - SYS___CHOWN_A = 0x721 - SYS___CHROOT_A = 0x722 - SYS___GETCWD_A = 0x723 - SYS___GETWD_A = 0x724 - SYS___LCHOWN_A = 0x725 - SYS___LINK_A = 0x726 - SYS___PATHCONF_A = 0x727 - SYS___IF_NAMEINDEX_A = 0x728 - SYS___READLINK_A = 0x729 - SYS___EXTLINK_NP_A = 0x730 - SYS___ISALNUM_A = 0x731 - SYS___ISALPHA_A = 0x732 - SYS___A2E_S = 0x733 - SYS___ISCNTRL_A = 0x734 - SYS___ISDIGIT_A = 0x735 - SYS___ISGRAPH_A = 0x736 - SYS___ISLOWER_A = 0x737 - SYS___ISPRINT_A = 0x738 - SYS___ISPUNCT_A = 0x739 - SYS___ISWALPHA_A = 0x740 - SYS___A2E_L = 0x741 - SYS___ISWCNTRL_A = 0x742 - SYS___ISWDIGIT_A = 0x743 - SYS___ISWGRAPH_A = 0x744 - SYS___ISWLOWER_A = 0x745 - SYS___ISWPRINT_A = 0x746 - SYS___ISWPUNCT_A = 0x747 - SYS___ISWSPACE_A = 0x748 - SYS___ISWUPPER_A = 0x749 - SYS___REMOVE_A = 0x750 - SYS___RENAME_A = 0x751 - SYS___TMPNAM_A = 0x752 - SYS___FOPEN_A = 0x753 - SYS___FREOPEN_A = 0x754 - SYS___CUSERID_A = 0x755 - SYS___POPEN_A = 0x756 - SYS___TEMPNAM_A = 0x757 - SYS___FTW_A = 0x758 - SYS___GETGRENT_A = 0x759 - SYS___INET_NTOP_A = 0x760 - SYS___GETPASS_A = 0x761 - SYS___GETPWENT_A = 0x762 - SYS___GETPWNAM_A = 0x763 - SYS___GETPWUID_A = 0x764 - SYS_____CHECK_RESOURCE_AUTH_NP_A = 0x765 - SYS___CHECKSCHENV_A = 0x766 - SYS___CONNECTSERVER_A = 0x767 - SYS___CONNECTWORKMGR_A = 0x768 - SYS_____CONSOLE_A = 0x769 - SYS___MSGSND_A = 0x770 - SYS___MSGXRCV_A = 0x771 - SYS___NFTW_A = 0x772 - SYS_____PASSWD_A = 0x773 - SYS___PTHREAD_SECURITY_NP_A = 0x774 - SYS___QUERYMETRICS_A = 0x775 - SYS___QUERYSCHENV = 0x776 - SYS___READV_A = 0x777 - SYS_____SERVER_CLASSIFY_A = 0x778 - SYS_____SERVER_INIT_A = 0x779 - SYS___W_GETPSENT_A = 0x780 - SYS___WRITEV_A = 0x781 - SYS___W_STATFS_A = 0x782 - SYS___W_STATVFS_A = 0x783 - SYS___FPUTC_A = 0x784 - SYS___PUTCHAR_A = 0x785 - SYS___PUTS_A = 0x786 - SYS___FGETS_A = 0x787 - SYS___GETS_A = 0x788 - SYS___FPUTS_A = 0x789 - SYS___PUTC_A = 0x790 - SYS___AE_THREAD_SETMODE = 0x791 - SYS___AE_THREAD_SWAPMODE = 0x792 - SYS___GETNETBYADDR_A = 0x793 - SYS___GETNETBYNAME_A = 0x794 - SYS___GETNETENT_A = 0x795 - SYS___GETPROTOBYNAME_A = 0x796 - SYS___GETPROTOBYNUMBER_A = 0x797 - SYS___GETPROTOENT_A = 0x798 - SYS___GETSERVBYNAME_A = 0x799 - SYS_ACL_FIRST_ENTRY = 0x800 - SYS_ACL_GET_ENTRY = 0x801 - SYS_ACL_VALID = 0x802 - SYS_ACL_CREATE_ENTRY = 0x803 - SYS_ACL_DELETE_ENTRY = 0x804 - SYS_ACL_UPDATE_ENTRY = 0x805 - SYS_ACL_DELETE_FD = 0x806 - SYS_ACL_DELETE_FILE = 0x807 - SYS_ACL_GET_FD = 0x808 - SYS_ACL_GET_FILE = 0x809 - SYS___ERFL_B = 0x810 - SYS___ERFCL_B = 0x811 - SYS___LGAMMAL_B = 0x812 - SYS___SETHOOKEVENTS = 0x813 - SYS_IF_NAMETOINDEX = 0x814 - SYS_IF_INDEXTONAME = 0x815 - SYS_IF_NAMEINDEX = 0x816 - SYS_IF_FREENAMEINDEX = 0x817 - SYS_GETADDRINFO = 0x818 - SYS_GETNAMEINFO = 0x819 - SYS___DYNFREE_A = 0x820 - SYS___RES_QUERY_A = 0x821 - SYS___RES_SEARCH_A = 0x822 - SYS___RES_QUERYDOMAIN_A = 0x823 - SYS___RES_MKQUERY_A = 0x824 - SYS___RES_SEND_A = 0x825 - SYS___DN_EXPAND_A = 0x826 - SYS___DN_SKIPNAME_A = 0x827 - SYS___DN_COMP_A = 0x828 - SYS___DN_FIND_A = 0x829 - SYS___INET_NTOA_A = 0x830 - SYS___INET_NETWORK_A = 0x831 - SYS___ACCEPT_A = 0x832 - SYS___ACCEPT_AND_RECV_A = 0x833 - SYS___BIND_A = 0x834 - SYS___CONNECT_A = 0x835 - SYS___GETPEERNAME_A = 0x836 - SYS___GETSOCKNAME_A = 0x837 - SYS___RECVFROM_A = 0x838 - SYS___SENDTO_A = 0x839 - SYS___LCHATTR = 0x840 - SYS___WRITEDOWN = 0x841 - SYS_PTHREAD_MUTEX_INIT2 = 0x842 - SYS___ACOSHF_B = 0x843 - SYS___ACOSHL_B = 0x844 - SYS___ASINHF_B = 0x845 - SYS___ASINHL_B = 0x846 - SYS___ATANHF_B = 0x847 - SYS___ATANHL_B = 0x848 - SYS___CBRTF_B = 0x849 - SYS___EXP2F_B = 0x850 - SYS___EXP2L_B = 0x851 - SYS___EXPM1F_B = 0x852 - SYS___EXPM1L_B = 0x853 - SYS___FDIMF_B = 0x854 - SYS___FDIM_B = 0x855 - SYS___FDIML_B = 0x856 - SYS___HYPOTF_B = 0x857 - SYS___HYPOTL_B = 0x858 - SYS___LOG1PF_B = 0x859 - SYS___REMQUOF_B = 0x860 - SYS___REMQUO_B = 0x861 - SYS___REMQUOL_B = 0x862 - SYS___TGAMMAF_B = 0x863 - SYS___TGAMMA_B = 0x864 - SYS___TGAMMAL_B = 0x865 - SYS___TRUNCF_B = 0x866 - SYS___TRUNC_B = 0x867 - SYS___TRUNCL_B = 0x868 - SYS___LGAMMAF_B = 0x869 - SYS_ASINHF = 0x870 - SYS_ASINHL = 0x871 - SYS_ATANHF = 0x872 - SYS_ATANHL = 0x873 - SYS_CBRTF = 0x874 - SYS_CBRTL = 0x875 - SYS_COPYSIGNF = 0x876 - SYS_CPYSIGNF = 0x876 - SYS_COPYSIGNL = 0x877 - SYS_CPYSIGNL = 0x877 - SYS_COTANF = 0x878 - SYS___COTANF = 0x878 - SYS_COTAN = 0x879 - SYS___COTAN = 0x879 - SYS_FDIM = 0x881 - SYS_FDIML = 0x882 - SYS_HYPOTF = 0x883 - SYS_HYPOTL = 0x884 - SYS_LOG1PF = 0x885 - SYS_LOG1PL = 0x886 - SYS_LOG2F = 0x887 - SYS_LOG2 = 0x888 - SYS_LOG2L = 0x889 - SYS_TGAMMA = 0x890 - SYS_TGAMMAL = 0x891 - SYS_TRUNCF = 0x892 - SYS_TRUNC = 0x893 - SYS_TRUNCL = 0x894 - SYS_LGAMMAF = 0x895 - SYS_LGAMMAL = 0x896 - SYS_LROUNDF = 0x897 - SYS_LROUND = 0x898 - SYS_ERFF = 0x899 - SYS___COSHF_H = 0x900 - SYS___COSHL_H = 0x901 - SYS___COTAN_H = 0x902 - SYS___COTANF_H = 0x903 - SYS___COTANL_H = 0x904 - SYS___ERF_H = 0x905 - SYS___ERFF_H = 0x906 - SYS___ERFL_H = 0x907 - SYS___ERFC_H = 0x908 - SYS___ERFCF_H = 0x909 - SYS___FDIMF_H = 0x910 - SYS___FDIML_H = 0x911 - SYS___FMOD_H = 0x912 - SYS___FMODF_H = 0x913 - SYS___FMODL_H = 0x914 - SYS___GAMMA_H = 0x915 - SYS___HYPOT_H = 0x916 - SYS___ILOGB_H = 0x917 - SYS___LGAMMA_H = 0x918 - SYS___LGAMMAF_H = 0x919 - SYS___LOG2L_H = 0x920 - SYS___LOG1P_H = 0x921 - SYS___LOG10_H = 0x922 - SYS___LOG10F_H = 0x923 - SYS___LOG10L_H = 0x924 - SYS___LROUND_H = 0x925 - SYS___LROUNDF_H = 0x926 - SYS___NEXTAFTER_H = 0x927 - SYS___POW_H = 0x928 - SYS___POWF_H = 0x929 - SYS___SINL_H = 0x930 - SYS___SINH_H = 0x931 - SYS___SINHF_H = 0x932 - SYS___SINHL_H = 0x933 - SYS___SQRT_H = 0x934 - SYS___SQRTF_H = 0x935 - SYS___SQRTL_H = 0x936 - SYS___TAN_H = 0x937 - SYS___TANF_H = 0x938 - SYS___TANL_H = 0x939 - SYS___TRUNCF_H = 0x940 - SYS___TRUNCL_H = 0x941 - SYS___COSH_H = 0x942 - SYS___LE_DEBUG_SET_RESUME_MCH = 0x943 - SYS_VFSCANF = 0x944 - SYS_VSCANF = 0x946 - SYS_VSSCANF = 0x948 - SYS_IMAXABS = 0x950 - SYS_IMAXDIV = 0x951 - SYS_STRTOIMAX = 0x952 - SYS_STRTOUMAX = 0x953 - SYS_WCSTOIMAX = 0x954 - SYS_WCSTOUMAX = 0x955 - SYS_ATOLL = 0x956 - SYS_STRTOF = 0x957 - SYS_STRTOLD = 0x958 - SYS_WCSTOF = 0x959 - SYS_INET6_RTH_GETADDR = 0x960 - SYS_INET6_OPT_INIT = 0x961 - SYS_INET6_OPT_APPEND = 0x962 - SYS_INET6_OPT_FINISH = 0x963 - SYS_INET6_OPT_SET_VAL = 0x964 - SYS_INET6_OPT_NEXT = 0x965 - SYS_INET6_OPT_FIND = 0x966 - SYS_INET6_OPT_GET_VAL = 0x967 - SYS___POW_I = 0x987 - SYS___POW_I_B = 0x988 - SYS___POW_I_H = 0x989 - SYS___CABS_H = 0x990 - SYS_CABSF = 0x991 - SYS___CABSF_B = 0x992 - SYS___CABSF_H = 0x993 - SYS_CABSL = 0x994 - SYS___CABSL_B = 0x995 - SYS___CABSL_H = 0x996 - SYS_CACOS = 0x997 - SYS___CACOS_B = 0x998 - SYS___CACOS_H = 0x999 + SYS_LOG = 0x17 // 23 + SYS_COSH = 0x18 // 24 + SYS_TANH = 0x19 // 25 + SYS_EXP = 0x1A // 26 + SYS_MODF = 0x1B // 27 + SYS_LOG10 = 0x1C // 28 + SYS_FREXP = 0x1D // 29 + SYS_LDEXP = 0x1E // 30 + SYS_CEIL = 0x1F // 31 + SYS_POW = 0x20 // 32 + SYS_SQRT = 0x21 // 33 + SYS_FLOOR = 0x22 // 34 + SYS_J1 = 0x23 // 35 + SYS_FABS = 0x24 // 36 + SYS_FMOD = 0x25 // 37 + SYS_J0 = 0x26 // 38 + SYS_YN = 0x27 // 39 + SYS_JN = 0x28 // 40 + SYS_Y0 = 0x29 // 41 + SYS_Y1 = 0x2A // 42 + SYS_HYPOT = 0x2B // 43 + SYS_ERF = 0x2C // 44 + SYS_ERFC = 0x2D // 45 + SYS_GAMMA = 0x2E // 46 + SYS_ISALPHA = 0x30 // 48 + SYS_ISALNUM = 0x31 // 49 + SYS_ISLOWER = 0x32 // 50 + SYS_ISCNTRL = 0x33 // 51 + SYS_ISDIGIT = 0x34 // 52 + SYS_ISGRAPH = 0x35 // 53 + SYS_ISUPPER = 0x36 // 54 + SYS_ISPRINT = 0x37 // 55 + SYS_ISPUNCT = 0x38 // 56 + SYS_ISSPACE = 0x39 // 57 + SYS_SETLOCAL = 0x3A // 58 + SYS_SETLOCALE = 0x3A // 58 + SYS_ISXDIGIT = 0x3B // 59 + SYS_TOLOWER = 0x3C // 60 + SYS_TOUPPER = 0x3D // 61 + SYS_ASIN = 0x3E // 62 + SYS_SIN = 0x3F // 63 + SYS_COS = 0x40 // 64 + SYS_TAN = 0x41 // 65 + SYS_SINH = 0x42 // 66 + SYS_ACOS = 0x43 // 67 + SYS_ATAN = 0x44 // 68 + SYS_ATAN2 = 0x45 // 69 + SYS_FTELL = 0x46 // 70 + SYS_FGETPOS = 0x47 // 71 + SYS_FSEEK = 0x48 // 72 + SYS_FSETPOS = 0x49 // 73 + SYS_FERROR = 0x4A // 74 + SYS_REWIND = 0x4B // 75 + SYS_CLEARERR = 0x4C // 76 + SYS_FEOF = 0x4D // 77 + SYS_ATOL = 0x4E // 78 + SYS_PERROR = 0x4F // 79 + SYS_ATOF = 0x50 // 80 + SYS_ATOI = 0x51 // 81 + SYS_RAND = 0x52 // 82 + SYS_STRTOD = 0x53 // 83 + SYS_STRTOL = 0x54 // 84 + SYS_STRTOUL = 0x55 // 85 + SYS_MALLOC = 0x56 // 86 + SYS_SRAND = 0x57 // 87 + SYS_CALLOC = 0x58 // 88 + SYS_FREE = 0x59 // 89 + SYS_EXIT = 0x5A // 90 + SYS_REALLOC = 0x5B // 91 + SYS_ABORT = 0x5C // 92 + SYS___ABORT = 0x5C // 92 + SYS_ATEXIT = 0x5D // 93 + SYS_RAISE = 0x5E // 94 + SYS_SETJMP = 0x5F // 95 + SYS_LONGJMP = 0x60 // 96 + SYS_SIGNAL = 0x61 // 97 + SYS_TMPNAM = 0x62 // 98 + SYS_REMOVE = 0x63 // 99 + SYS_RENAME = 0x64 // 100 + SYS_TMPFILE = 0x65 // 101 + SYS_FREOPEN = 0x66 // 102 + SYS_FCLOSE = 0x67 // 103 + SYS_FFLUSH = 0x68 // 104 + SYS_FOPEN = 0x69 // 105 + SYS_FSCANF = 0x6A // 106 + SYS_SETBUF = 0x6B // 107 + SYS_SETVBUF = 0x6C // 108 + SYS_FPRINTF = 0x6D // 109 + SYS_SSCANF = 0x6E // 110 + SYS_PRINTF = 0x6F // 111 + SYS_SCANF = 0x70 // 112 + SYS_SPRINTF = 0x71 // 113 + SYS_FGETC = 0x72 // 114 + SYS_VFPRINTF = 0x73 // 115 + SYS_VPRINTF = 0x74 // 116 + SYS_VSPRINTF = 0x75 // 117 + SYS_GETC = 0x76 // 118 + SYS_FGETS = 0x77 // 119 + SYS_FPUTC = 0x78 // 120 + SYS_FPUTS = 0x79 // 121 + SYS_PUTCHAR = 0x7A // 122 + SYS_GETCHAR = 0x7B // 123 + SYS_GETS = 0x7C // 124 + SYS_PUTC = 0x7D // 125 + SYS_FWRITE = 0x7E // 126 + SYS_PUTS = 0x7F // 127 + SYS_UNGETC = 0x80 // 128 + SYS_FREAD = 0x81 // 129 + SYS_WCSTOMBS = 0x82 // 130 + SYS_MBTOWC = 0x83 // 131 + SYS_WCTOMB = 0x84 // 132 + SYS_MBSTOWCS = 0x85 // 133 + SYS_WCSCPY = 0x86 // 134 + SYS_WCSCAT = 0x87 // 135 + SYS_WCSCHR = 0x88 // 136 + SYS_WCSCMP = 0x89 // 137 + SYS_WCSNCMP = 0x8A // 138 + SYS_WCSCSPN = 0x8B // 139 + SYS_WCSLEN = 0x8C // 140 + SYS_WCSNCAT = 0x8D // 141 + SYS_WCSSPN = 0x8E // 142 + SYS_WCSNCPY = 0x8F // 143 + SYS_ABS = 0x90 // 144 + SYS_DIV = 0x91 // 145 + SYS_LABS = 0x92 // 146 + SYS_STRNCPY = 0x93 // 147 + SYS_MEMCPY = 0x94 // 148 + SYS_MEMMOVE = 0x95 // 149 + SYS_STRCPY = 0x96 // 150 + SYS_STRCMP = 0x97 // 151 + SYS_STRCAT = 0x98 // 152 + SYS_STRNCAT = 0x99 // 153 + SYS_MEMCMP = 0x9A // 154 + SYS_MEMCHR = 0x9B // 155 + SYS_STRCOLL = 0x9C // 156 + SYS_STRNCMP = 0x9D // 157 + SYS_STRXFRM = 0x9E // 158 + SYS_STRRCHR = 0x9F // 159 + SYS_STRCHR = 0xA0 // 160 + SYS_STRCSPN = 0xA1 // 161 + SYS_STRPBRK = 0xA2 // 162 + SYS_MEMSET = 0xA3 // 163 + SYS_STRSPN = 0xA4 // 164 + SYS_STRSTR = 0xA5 // 165 + SYS_STRTOK = 0xA6 // 166 + SYS_DIFFTIME = 0xA7 // 167 + SYS_STRERROR = 0xA8 // 168 + SYS_STRLEN = 0xA9 // 169 + SYS_CLOCK = 0xAA // 170 + SYS_CTIME = 0xAB // 171 + SYS_MKTIME = 0xAC // 172 + SYS_TIME = 0xAD // 173 + SYS_ASCTIME = 0xAE // 174 + SYS_MBLEN = 0xAF // 175 + SYS_GMTIME = 0xB0 // 176 + SYS_LOCALTIM = 0xB1 // 177 + SYS_LOCALTIME = 0xB1 // 177 + SYS_STRFTIME = 0xB2 // 178 + SYS___GETCB = 0xB4 // 180 + SYS_FUPDATE = 0xB5 // 181 + SYS___FUPDT = 0xB5 // 181 + SYS_CLRMEMF = 0xBD // 189 + SYS___CLRMF = 0xBD // 189 + SYS_FETCHEP = 0xBF // 191 + SYS___FTCHEP = 0xBF // 191 + SYS_FLDATA = 0xC1 // 193 + SYS___FLDATA = 0xC1 // 193 + SYS_DYNFREE = 0xC2 // 194 + SYS___DYNFRE = 0xC2 // 194 + SYS_DYNALLOC = 0xC3 // 195 + SYS___DYNALL = 0xC3 // 195 + SYS___CDUMP = 0xC4 // 196 + SYS_CSNAP = 0xC5 // 197 + SYS___CSNAP = 0xC5 // 197 + SYS_CTRACE = 0xC6 // 198 + SYS___CTRACE = 0xC6 // 198 + SYS___CTEST = 0xC7 // 199 + SYS_SETENV = 0xC8 // 200 + SYS___SETENV = 0xC8 // 200 + SYS_CLEARENV = 0xC9 // 201 + SYS___CLRENV = 0xC9 // 201 + SYS___REGCOMP_STD = 0xEA // 234 + SYS_NL_LANGINFO = 0xFC // 252 + SYS_GETSYNTX = 0xFD // 253 + SYS_ISBLANK = 0xFE // 254 + SYS___ISBLNK = 0xFE // 254 + SYS_ISWALNUM = 0xFF // 255 + SYS_ISWALPHA = 0x100 // 256 + SYS_ISWBLANK = 0x101 // 257 + SYS___ISWBLK = 0x101 // 257 + SYS_ISWCNTRL = 0x102 // 258 + SYS_ISWDIGIT = 0x103 // 259 + SYS_ISWGRAPH = 0x104 // 260 + SYS_ISWLOWER = 0x105 // 261 + SYS_ISWPRINT = 0x106 // 262 + SYS_ISWPUNCT = 0x107 // 263 + SYS_ISWSPACE = 0x108 // 264 + SYS_ISWUPPER = 0x109 // 265 + SYS_ISWXDIGI = 0x10A // 266 + SYS_ISWXDIGIT = 0x10A // 266 + SYS_WCTYPE = 0x10B // 267 + SYS_ISWCTYPE = 0x10C // 268 + SYS_TOWLOWER = 0x10D // 269 + SYS_TOWUPPER = 0x10E // 270 + SYS_MBSINIT = 0x10F // 271 + SYS_WCTOB = 0x110 // 272 + SYS_MBRLEN = 0x111 // 273 + SYS_MBRTOWC = 0x112 // 274 + SYS_MBSRTOWC = 0x113 // 275 + SYS_MBSRTOWCS = 0x113 // 275 + SYS_WCRTOMB = 0x114 // 276 + SYS_WCSRTOMB = 0x115 // 277 + SYS_WCSRTOMBS = 0x115 // 277 + SYS___CSID = 0x116 // 278 + SYS___WCSID = 0x117 // 279 + SYS_STRPTIME = 0x118 // 280 + SYS___STRPTM = 0x118 // 280 + SYS_STRFMON = 0x119 // 281 + SYS___RPMTCH = 0x11A // 282 + SYS_WCSSTR = 0x11B // 283 + SYS_WCSTOK = 0x12C // 300 + SYS_WCSTOL = 0x12D // 301 + SYS_WCSTOD = 0x12E // 302 + SYS_WCSTOUL = 0x12F // 303 + SYS_WCSCOLL = 0x130 // 304 + SYS_WCSXFRM = 0x131 // 305 + SYS_WCSWIDTH = 0x132 // 306 + SYS_WCWIDTH = 0x133 // 307 + SYS_WCSFTIME = 0x134 // 308 + SYS_SWPRINTF = 0x135 // 309 + SYS_VSWPRINT = 0x136 // 310 + SYS_VSWPRINTF = 0x136 // 310 + SYS_SWSCANF = 0x137 // 311 + SYS_REGCOMP = 0x138 // 312 + SYS_REGEXEC = 0x139 // 313 + SYS_REGFREE = 0x13A // 314 + SYS_REGERROR = 0x13B // 315 + SYS_FGETWC = 0x13C // 316 + SYS_FGETWS = 0x13D // 317 + SYS_FPUTWC = 0x13E // 318 + SYS_FPUTWS = 0x13F // 319 + SYS_GETWC = 0x140 // 320 + SYS_GETWCHAR = 0x141 // 321 + SYS_PUTWC = 0x142 // 322 + SYS_PUTWCHAR = 0x143 // 323 + SYS_UNGETWC = 0x144 // 324 + SYS_ICONV_OPEN = 0x145 // 325 + SYS_ICONV = 0x146 // 326 + SYS_ICONV_CLOSE = 0x147 // 327 + SYS_ISMCCOLLEL = 0x14C // 332 + SYS_STRTOCOLL = 0x14D // 333 + SYS_COLLTOSTR = 0x14E // 334 + SYS_COLLEQUIV = 0x14F // 335 + SYS_COLLRANGE = 0x150 // 336 + SYS_CCLASS = 0x151 // 337 + SYS_COLLORDER = 0x152 // 338 + SYS___DEMANGLE = 0x154 // 340 + SYS_FDOPEN = 0x155 // 341 + SYS___ERRNO = 0x156 // 342 + SYS___ERRNO2 = 0x157 // 343 + SYS___TERROR = 0x158 // 344 + SYS_MAXCOLL = 0x169 // 361 + SYS_GETMCCOLL = 0x16A // 362 + SYS_GETWMCCOLL = 0x16B // 363 + SYS___ERR2AD = 0x16C // 364 + SYS_DLLQUERYFN = 0x16D // 365 + SYS_DLLQUERYVAR = 0x16E // 366 + SYS_DLLFREE = 0x16F // 367 + SYS_DLLLOAD = 0x170 // 368 + SYS__EXIT = 0x174 // 372 + SYS_ACCESS = 0x175 // 373 + SYS_ALARM = 0x176 // 374 + SYS_CFGETISPEED = 0x177 // 375 + SYS_CFGETOSPEED = 0x178 // 376 + SYS_CFSETISPEED = 0x179 // 377 + SYS_CFSETOSPEED = 0x17A // 378 + SYS_CHDIR = 0x17B // 379 + SYS_CHMOD = 0x17C // 380 + SYS_CHOWN = 0x17D // 381 + SYS_CLOSE = 0x17E // 382 + SYS_CLOSEDIR = 0x17F // 383 + SYS_CREAT = 0x180 // 384 + SYS_CTERMID = 0x181 // 385 + SYS_DUP = 0x182 // 386 + SYS_DUP2 = 0x183 // 387 + SYS_EXECL = 0x184 // 388 + SYS_EXECLE = 0x185 // 389 + SYS_EXECLP = 0x186 // 390 + SYS_EXECV = 0x187 // 391 + SYS_EXECVE = 0x188 // 392 + SYS_EXECVP = 0x189 // 393 + SYS_FCHMOD = 0x18A // 394 + SYS_FCHOWN = 0x18B // 395 + SYS_FCNTL = 0x18C // 396 + SYS_FILENO = 0x18D // 397 + SYS_FORK = 0x18E // 398 + SYS_FPATHCONF = 0x18F // 399 + SYS_FSTAT = 0x190 // 400 + SYS_FSYNC = 0x191 // 401 + SYS_FTRUNCATE = 0x192 // 402 + SYS_GETCWD = 0x193 // 403 + SYS_GETEGID = 0x194 // 404 + SYS_GETEUID = 0x195 // 405 + SYS_GETGID = 0x196 // 406 + SYS_GETGRGID = 0x197 // 407 + SYS_GETGRNAM = 0x198 // 408 + SYS_GETGROUPS = 0x199 // 409 + SYS_GETLOGIN = 0x19A // 410 + SYS_W_GETMNTENT = 0x19B // 411 + SYS_GETPGRP = 0x19C // 412 + SYS_GETPID = 0x19D // 413 + SYS_GETPPID = 0x19E // 414 + SYS_GETPWNAM = 0x19F // 415 + SYS_GETPWUID = 0x1A0 // 416 + SYS_GETUID = 0x1A1 // 417 + SYS_W_IOCTL = 0x1A2 // 418 + SYS_ISATTY = 0x1A3 // 419 + SYS_KILL = 0x1A4 // 420 + SYS_LINK = 0x1A5 // 421 + SYS_LSEEK = 0x1A6 // 422 + SYS_LSTAT = 0x1A7 // 423 + SYS_MKDIR = 0x1A8 // 424 + SYS_MKFIFO = 0x1A9 // 425 + SYS_MKNOD = 0x1AA // 426 + SYS_MOUNT = 0x1AB // 427 + SYS_OPEN = 0x1AC // 428 + SYS_OPENDIR = 0x1AD // 429 + SYS_PATHCONF = 0x1AE // 430 + SYS_PAUSE = 0x1AF // 431 + SYS_PIPE = 0x1B0 // 432 + SYS_W_GETPSENT = 0x1B1 // 433 + SYS_READ = 0x1B2 // 434 + SYS_READDIR = 0x1B3 // 435 + SYS_READLINK = 0x1B4 // 436 + SYS_REWINDDIR = 0x1B5 // 437 + SYS_RMDIR = 0x1B6 // 438 + SYS_SETEGID = 0x1B7 // 439 + SYS_SETEUID = 0x1B8 // 440 + SYS_SETGID = 0x1B9 // 441 + SYS_SETPGID = 0x1BA // 442 + SYS_SETSID = 0x1BB // 443 + SYS_SETUID = 0x1BC // 444 + SYS_SIGACTION = 0x1BD // 445 + SYS_SIGADDSET = 0x1BE // 446 + SYS_SIGDELSET = 0x1BF // 447 + SYS_SIGEMPTYSET = 0x1C0 // 448 + SYS_SIGFILLSET = 0x1C1 // 449 + SYS_SIGISMEMBER = 0x1C2 // 450 + SYS_SIGLONGJMP = 0x1C3 // 451 + SYS_SIGPENDING = 0x1C4 // 452 + SYS_SIGPROCMASK = 0x1C5 // 453 + SYS_SIGSETJMP = 0x1C6 // 454 + SYS_SIGSUSPEND = 0x1C7 // 455 + SYS_SLEEP = 0x1C8 // 456 + SYS_STAT = 0x1C9 // 457 + SYS_W_STATFS = 0x1CA // 458 + SYS_SYMLINK = 0x1CB // 459 + SYS_SYSCONF = 0x1CC // 460 + SYS_TCDRAIN = 0x1CD // 461 + SYS_TCFLOW = 0x1CE // 462 + SYS_TCFLUSH = 0x1CF // 463 + SYS_TCGETATTR = 0x1D0 // 464 + SYS_TCGETPGRP = 0x1D1 // 465 + SYS_TCSENDBREAK = 0x1D2 // 466 + SYS_TCSETATTR = 0x1D3 // 467 + SYS_TCSETPGRP = 0x1D4 // 468 + SYS_TIMES = 0x1D5 // 469 + SYS_TTYNAME = 0x1D6 // 470 + SYS_TZSET = 0x1D7 // 471 + SYS_UMASK = 0x1D8 // 472 + SYS_UMOUNT = 0x1D9 // 473 + SYS_UNAME = 0x1DA // 474 + SYS_UNLINK = 0x1DB // 475 + SYS_UTIME = 0x1DC // 476 + SYS_WAIT = 0x1DD // 477 + SYS_WAITPID = 0x1DE // 478 + SYS_WRITE = 0x1DF // 479 + SYS_CHAUDIT = 0x1E0 // 480 + SYS_FCHAUDIT = 0x1E1 // 481 + SYS_GETGROUPSBYNAME = 0x1E2 // 482 + SYS_SIGWAIT = 0x1E3 // 483 + SYS_PTHREAD_EXIT = 0x1E4 // 484 + SYS_PTHREAD_KILL = 0x1E5 // 485 + SYS_PTHREAD_ATTR_INIT = 0x1E6 // 486 + SYS_PTHREAD_ATTR_DESTROY = 0x1E7 // 487 + SYS_PTHREAD_ATTR_SETSTACKSIZE = 0x1E8 // 488 + SYS_PTHREAD_ATTR_GETSTACKSIZE = 0x1E9 // 489 + SYS_PTHREAD_ATTR_SETDETACHSTATE = 0x1EA // 490 + SYS_PTHREAD_ATTR_GETDETACHSTATE = 0x1EB // 491 + SYS_PTHREAD_ATTR_SETWEIGHT_NP = 0x1EC // 492 + SYS_PTHREAD_ATTR_GETWEIGHT_NP = 0x1ED // 493 + SYS_PTHREAD_CANCEL = 0x1EE // 494 + SYS_PTHREAD_CLEANUP_PUSH = 0x1EF // 495 + SYS_PTHREAD_CLEANUP_POP = 0x1F0 // 496 + SYS_PTHREAD_CONDATTR_INIT = 0x1F1 // 497 + SYS_PTHREAD_CONDATTR_DESTROY = 0x1F2 // 498 + SYS_PTHREAD_COND_INIT = 0x1F3 // 499 + SYS_PTHREAD_COND_DESTROY = 0x1F4 // 500 + SYS_PTHREAD_COND_SIGNAL = 0x1F5 // 501 + SYS_PTHREAD_COND_BROADCAST = 0x1F6 // 502 + SYS_PTHREAD_COND_WAIT = 0x1F7 // 503 + SYS_PTHREAD_COND_TIMEDWAIT = 0x1F8 // 504 + SYS_PTHREAD_CREATE = 0x1F9 // 505 + SYS_PTHREAD_DETACH = 0x1FA // 506 + SYS_PTHREAD_EQUAL = 0x1FB // 507 + SYS_PTHREAD_GETSPECIFIC = 0x1FC // 508 + SYS_PTHREAD_JOIN = 0x1FD // 509 + SYS_PTHREAD_KEY_CREATE = 0x1FE // 510 + SYS_PTHREAD_MUTEXATTR_INIT = 0x1FF // 511 + SYS_PTHREAD_MUTEXATTR_DESTROY = 0x200 // 512 + SYS_PTHREAD_MUTEXATTR_SETKIND_NP = 0x201 // 513 + SYS_PTHREAD_MUTEXATTR_GETKIND_NP = 0x202 // 514 + SYS_PTHREAD_MUTEX_INIT = 0x203 // 515 + SYS_PTHREAD_MUTEX_DESTROY = 0x204 // 516 + SYS_PTHREAD_MUTEX_LOCK = 0x205 // 517 + SYS_PTHREAD_MUTEX_TRYLOCK = 0x206 // 518 + SYS_PTHREAD_MUTEX_UNLOCK = 0x207 // 519 + SYS_PTHREAD_ONCE = 0x209 // 521 + SYS_PTHREAD_SELF = 0x20A // 522 + SYS_PTHREAD_SETINTR = 0x20B // 523 + SYS_PTHREAD_SETINTRTYPE = 0x20C // 524 + SYS_PTHREAD_SETSPECIFIC = 0x20D // 525 + SYS_PTHREAD_TESTINTR = 0x20E // 526 + SYS_PTHREAD_YIELD = 0x20F // 527 + SYS_TW_OPEN = 0x210 // 528 + SYS_TW_FCNTL = 0x211 // 529 + SYS_PTHREAD_JOIN_D4_NP = 0x212 // 530 + SYS_PTHREAD_CONDATTR_SETKIND_NP = 0x213 // 531 + SYS_PTHREAD_CONDATTR_GETKIND_NP = 0x214 // 532 + SYS_EXTLINK_NP = 0x215 // 533 + SYS___PASSWD = 0x216 // 534 + SYS_SETGROUPS = 0x217 // 535 + SYS_INITGROUPS = 0x218 // 536 + SYS_WCSPBRK = 0x23F // 575 + SYS_WCSRCHR = 0x240 // 576 + SYS_SVC99 = 0x241 // 577 + SYS___SVC99 = 0x241 // 577 + SYS_WCSWCS = 0x242 // 578 + SYS_LOCALECO = 0x243 // 579 + SYS_LOCALECONV = 0x243 // 579 + SYS___LIBREL = 0x244 // 580 + SYS_RELEASE = 0x245 // 581 + SYS___RLSE = 0x245 // 581 + SYS_FLOCATE = 0x246 // 582 + SYS___FLOCT = 0x246 // 582 + SYS_FDELREC = 0x247 // 583 + SYS___FDLREC = 0x247 // 583 + SYS_FETCH = 0x248 // 584 + SYS___FETCH = 0x248 // 584 + SYS_QSORT = 0x249 // 585 + SYS_GETENV = 0x24A // 586 + SYS_SYSTEM = 0x24B // 587 + SYS_BSEARCH = 0x24C // 588 + SYS_LDIV = 0x24D // 589 + SYS___THROW = 0x25E // 606 + SYS___RETHROW = 0x25F // 607 + SYS___CLEANUPCATCH = 0x260 // 608 + SYS___CATCHMATCH = 0x261 // 609 + SYS___CLEAN2UPCATCH = 0x262 // 610 + SYS_PUTENV = 0x26A // 618 + SYS___GETENV = 0x26F // 623 + SYS_GETPRIORITY = 0x270 // 624 + SYS_NICE = 0x271 // 625 + SYS_SETPRIORITY = 0x272 // 626 + SYS_GETITIMER = 0x273 // 627 + SYS_SETITIMER = 0x274 // 628 + SYS_MSGCTL = 0x275 // 629 + SYS_MSGGET = 0x276 // 630 + SYS_MSGRCV = 0x277 // 631 + SYS_MSGSND = 0x278 // 632 + SYS_MSGXRCV = 0x279 // 633 + SYS___MSGXR = 0x279 // 633 + SYS_SEMCTL = 0x27A // 634 + SYS_SEMGET = 0x27B // 635 + SYS_SEMOP = 0x27C // 636 + SYS_SHMAT = 0x27D // 637 + SYS_SHMCTL = 0x27E // 638 + SYS_SHMDT = 0x27F // 639 + SYS_SHMGET = 0x280 // 640 + SYS___GETIPC = 0x281 // 641 + SYS_SETGRENT = 0x282 // 642 + SYS_GETGRENT = 0x283 // 643 + SYS_ENDGRENT = 0x284 // 644 + SYS_SETPWENT = 0x285 // 645 + SYS_GETPWENT = 0x286 // 646 + SYS_ENDPWENT = 0x287 // 647 + SYS_BSD_SIGNAL = 0x288 // 648 + SYS_KILLPG = 0x289 // 649 + SYS_SIGALTSTACK = 0x28A // 650 + SYS_SIGHOLD = 0x28B // 651 + SYS_SIGIGNORE = 0x28C // 652 + SYS_SIGINTERRUPT = 0x28D // 653 + SYS_SIGPAUSE = 0x28E // 654 + SYS_SIGRELSE = 0x28F // 655 + SYS_SIGSET = 0x290 // 656 + SYS_SIGSTACK = 0x291 // 657 + SYS_GETRLIMIT = 0x292 // 658 + SYS_SETRLIMIT = 0x293 // 659 + SYS_GETRUSAGE = 0x294 // 660 + SYS_MMAP = 0x295 // 661 + SYS_MPROTECT = 0x296 // 662 + SYS_MSYNC = 0x297 // 663 + SYS_MUNMAP = 0x298 // 664 + SYS_CONFSTR = 0x299 // 665 + SYS_GETOPT = 0x29A // 666 + SYS_LCHOWN = 0x29B // 667 + SYS_TRUNCATE = 0x29C // 668 + SYS_GETSUBOPT = 0x29D // 669 + SYS_SETPGRP = 0x29E // 670 + SYS___GDERR = 0x29F // 671 + SYS___TZONE = 0x2A0 // 672 + SYS___DLGHT = 0x2A1 // 673 + SYS___OPARGF = 0x2A2 // 674 + SYS___OPOPTF = 0x2A3 // 675 + SYS___OPINDF = 0x2A4 // 676 + SYS___OPERRF = 0x2A5 // 677 + SYS_GETDATE = 0x2A6 // 678 + SYS_WAIT3 = 0x2A7 // 679 + SYS_WAITID = 0x2A8 // 680 + SYS___CATTRM = 0x2A9 // 681 + SYS___GDTRM = 0x2AA // 682 + SYS___RNDTRM = 0x2AB // 683 + SYS_CRYPT = 0x2AC // 684 + SYS_ENCRYPT = 0x2AD // 685 + SYS_SETKEY = 0x2AE // 686 + SYS___CNVBLK = 0x2AF // 687 + SYS___CRYTRM = 0x2B0 // 688 + SYS___ECRTRM = 0x2B1 // 689 + SYS_DRAND48 = 0x2B2 // 690 + SYS_ERAND48 = 0x2B3 // 691 + SYS_FSTATVFS = 0x2B4 // 692 + SYS_STATVFS = 0x2B5 // 693 + SYS_CATCLOSE = 0x2B6 // 694 + SYS_CATGETS = 0x2B7 // 695 + SYS_CATOPEN = 0x2B8 // 696 + SYS_BCMP = 0x2B9 // 697 + SYS_BCOPY = 0x2BA // 698 + SYS_BZERO = 0x2BB // 699 + SYS_FFS = 0x2BC // 700 + SYS_INDEX = 0x2BD // 701 + SYS_RINDEX = 0x2BE // 702 + SYS_STRCASECMP = 0x2BF // 703 + SYS_STRDUP = 0x2C0 // 704 + SYS_STRNCASECMP = 0x2C1 // 705 + SYS_INITSTATE = 0x2C2 // 706 + SYS_SETSTATE = 0x2C3 // 707 + SYS_RANDOM = 0x2C4 // 708 + SYS_SRANDOM = 0x2C5 // 709 + SYS_HCREATE = 0x2C6 // 710 + SYS_HDESTROY = 0x2C7 // 711 + SYS_HSEARCH = 0x2C8 // 712 + SYS_LFIND = 0x2C9 // 713 + SYS_LSEARCH = 0x2CA // 714 + SYS_TDELETE = 0x2CB // 715 + SYS_TFIND = 0x2CC // 716 + SYS_TSEARCH = 0x2CD // 717 + SYS_TWALK = 0x2CE // 718 + SYS_INSQUE = 0x2CF // 719 + SYS_REMQUE = 0x2D0 // 720 + SYS_POPEN = 0x2D1 // 721 + SYS_PCLOSE = 0x2D2 // 722 + SYS_SWAB = 0x2D3 // 723 + SYS_MEMCCPY = 0x2D4 // 724 + SYS_GETPAGESIZE = 0x2D8 // 728 + SYS_FCHDIR = 0x2D9 // 729 + SYS___OCLCK = 0x2DA // 730 + SYS___ATOE = 0x2DB // 731 + SYS___ATOE_L = 0x2DC // 732 + SYS___ETOA = 0x2DD // 733 + SYS___ETOA_L = 0x2DE // 734 + SYS_SETUTXENT = 0x2DF // 735 + SYS_GETUTXENT = 0x2E0 // 736 + SYS_ENDUTXENT = 0x2E1 // 737 + SYS_GETUTXID = 0x2E2 // 738 + SYS_GETUTXLINE = 0x2E3 // 739 + SYS_PUTUTXLINE = 0x2E4 // 740 + SYS_FMTMSG = 0x2E5 // 741 + SYS_JRAND48 = 0x2E6 // 742 + SYS_LRAND48 = 0x2E7 // 743 + SYS_MRAND48 = 0x2E8 // 744 + SYS_NRAND48 = 0x2E9 // 745 + SYS_LCONG48 = 0x2EA // 746 + SYS_SRAND48 = 0x2EB // 747 + SYS_SEED48 = 0x2EC // 748 + SYS_ISASCII = 0x2ED // 749 + SYS_TOASCII = 0x2EE // 750 + SYS_A64L = 0x2EF // 751 + SYS_L64A = 0x2F0 // 752 + SYS_UALARM = 0x2F1 // 753 + SYS_USLEEP = 0x2F2 // 754 + SYS___UTXTRM = 0x2F3 // 755 + SYS___SRCTRM = 0x2F4 // 756 + SYS_FTIME = 0x2F5 // 757 + SYS_GETTIMEOFDAY = 0x2F6 // 758 + SYS_DBM_CLEARERR = 0x2F7 // 759 + SYS_DBM_CLOSE = 0x2F8 // 760 + SYS_DBM_DELETE = 0x2F9 // 761 + SYS_DBM_ERROR = 0x2FA // 762 + SYS_DBM_FETCH = 0x2FB // 763 + SYS_DBM_FIRSTKEY = 0x2FC // 764 + SYS_DBM_NEXTKEY = 0x2FD // 765 + SYS_DBM_OPEN = 0x2FE // 766 + SYS_DBM_STORE = 0x2FF // 767 + SYS___NDMTRM = 0x300 // 768 + SYS_FTOK = 0x301 // 769 + SYS_BASENAME = 0x302 // 770 + SYS_DIRNAME = 0x303 // 771 + SYS_GETDTABLESIZE = 0x304 // 772 + SYS_MKSTEMP = 0x305 // 773 + SYS_MKTEMP = 0x306 // 774 + SYS_NFTW = 0x307 // 775 + SYS_GETWD = 0x308 // 776 + SYS_LOCKF = 0x309 // 777 + SYS__LONGJMP = 0x30D // 781 + SYS__SETJMP = 0x30E // 782 + SYS_VFORK = 0x30F // 783 + SYS_WORDEXP = 0x310 // 784 + SYS_WORDFREE = 0x311 // 785 + SYS_GETPGID = 0x312 // 786 + SYS_GETSID = 0x313 // 787 + SYS___UTMPXNAME = 0x314 // 788 + SYS_CUSERID = 0x315 // 789 + SYS_GETPASS = 0x316 // 790 + SYS_FNMATCH = 0x317 // 791 + SYS_FTW = 0x318 // 792 + SYS_GETW = 0x319 // 793 + SYS_GLOB = 0x31A // 794 + SYS_GLOBFREE = 0x31B // 795 + SYS_PUTW = 0x31C // 796 + SYS_SEEKDIR = 0x31D // 797 + SYS_TELLDIR = 0x31E // 798 + SYS_TEMPNAM = 0x31F // 799 + SYS_ACOSH = 0x320 // 800 + SYS_ASINH = 0x321 // 801 + SYS_ATANH = 0x322 // 802 + SYS_CBRT = 0x323 // 803 + SYS_EXPM1 = 0x324 // 804 + SYS_ILOGB = 0x325 // 805 + SYS_LOGB = 0x326 // 806 + SYS_LOG1P = 0x327 // 807 + SYS_NEXTAFTER = 0x328 // 808 + SYS_RINT = 0x329 // 809 + SYS_REMAINDER = 0x32A // 810 + SYS_SCALB = 0x32B // 811 + SYS_LGAMMA = 0x32C // 812 + SYS_TTYSLOT = 0x32D // 813 + SYS_GETTIMEOFDAY_R = 0x32E // 814 + SYS_SYNC = 0x32F // 815 + SYS_SPAWN = 0x330 // 816 + SYS_SPAWNP = 0x331 // 817 + SYS_GETLOGIN_UU = 0x332 // 818 + SYS_ECVT = 0x333 // 819 + SYS_FCVT = 0x334 // 820 + SYS_GCVT = 0x335 // 821 + SYS_ACCEPT = 0x336 // 822 + SYS_BIND = 0x337 // 823 + SYS_CONNECT = 0x338 // 824 + SYS_ENDHOSTENT = 0x339 // 825 + SYS_ENDPROTOENT = 0x33A // 826 + SYS_ENDSERVENT = 0x33B // 827 + SYS_GETHOSTBYADDR_R = 0x33C // 828 + SYS_GETHOSTBYADDR = 0x33D // 829 + SYS_GETHOSTBYNAME_R = 0x33E // 830 + SYS_GETHOSTBYNAME = 0x33F // 831 + SYS_GETHOSTENT = 0x340 // 832 + SYS_GETHOSTID = 0x341 // 833 + SYS_GETHOSTNAME = 0x342 // 834 + SYS_GETNETBYADDR = 0x343 // 835 + SYS_GETNETBYNAME = 0x344 // 836 + SYS_GETNETENT = 0x345 // 837 + SYS_GETPEERNAME = 0x346 // 838 + SYS_GETPROTOBYNAME = 0x347 // 839 + SYS_GETPROTOBYNUMBER = 0x348 // 840 + SYS_GETPROTOENT = 0x349 // 841 + SYS_GETSERVBYNAME = 0x34A // 842 + SYS_GETSERVBYPORT = 0x34B // 843 + SYS_GETSERVENT = 0x34C // 844 + SYS_GETSOCKNAME = 0x34D // 845 + SYS_GETSOCKOPT = 0x34E // 846 + SYS_INET_ADDR = 0x34F // 847 + SYS_INET_LNAOF = 0x350 // 848 + SYS_INET_MAKEADDR = 0x351 // 849 + SYS_INET_NETOF = 0x352 // 850 + SYS_INET_NETWORK = 0x353 // 851 + SYS_INET_NTOA = 0x354 // 852 + SYS_IOCTL = 0x355 // 853 + SYS_LISTEN = 0x356 // 854 + SYS_READV = 0x357 // 855 + SYS_RECV = 0x358 // 856 + SYS_RECVFROM = 0x359 // 857 + SYS_SELECT = 0x35B // 859 + SYS_SELECTEX = 0x35C // 860 + SYS_SEND = 0x35D // 861 + SYS_SENDTO = 0x35F // 863 + SYS_SETHOSTENT = 0x360 // 864 + SYS_SETNETENT = 0x361 // 865 + SYS_SETPEER = 0x362 // 866 + SYS_SETPROTOENT = 0x363 // 867 + SYS_SETSERVENT = 0x364 // 868 + SYS_SETSOCKOPT = 0x365 // 869 + SYS_SHUTDOWN = 0x366 // 870 + SYS_SOCKET = 0x367 // 871 + SYS_SOCKETPAIR = 0x368 // 872 + SYS_WRITEV = 0x369 // 873 + SYS_CHROOT = 0x36A // 874 + SYS_W_STATVFS = 0x36B // 875 + SYS_ULIMIT = 0x36C // 876 + SYS_ISNAN = 0x36D // 877 + SYS_UTIMES = 0x36E // 878 + SYS___H_ERRNO = 0x36F // 879 + SYS_ENDNETENT = 0x370 // 880 + SYS_CLOSELOG = 0x371 // 881 + SYS_OPENLOG = 0x372 // 882 + SYS_SETLOGMASK = 0x373 // 883 + SYS_SYSLOG = 0x374 // 884 + SYS_PTSNAME = 0x375 // 885 + SYS_SETREUID = 0x376 // 886 + SYS_SETREGID = 0x377 // 887 + SYS_REALPATH = 0x378 // 888 + SYS___SIGNGAM = 0x379 // 889 + SYS_GRANTPT = 0x37A // 890 + SYS_UNLOCKPT = 0x37B // 891 + SYS_TCGETSID = 0x37C // 892 + SYS___TCGETCP = 0x37D // 893 + SYS___TCSETCP = 0x37E // 894 + SYS___TCSETTABLES = 0x37F // 895 + SYS_POLL = 0x380 // 896 + SYS_REXEC = 0x381 // 897 + SYS___ISASCII2 = 0x382 // 898 + SYS___TOASCII2 = 0x383 // 899 + SYS_CHPRIORITY = 0x384 // 900 + SYS_PTHREAD_ATTR_SETSYNCTYPE_NP = 0x385 // 901 + SYS_PTHREAD_ATTR_GETSYNCTYPE_NP = 0x386 // 902 + SYS_PTHREAD_SET_LIMIT_NP = 0x387 // 903 + SYS___STNETENT = 0x388 // 904 + SYS___STPROTOENT = 0x389 // 905 + SYS___STSERVENT = 0x38A // 906 + SYS___STHOSTENT = 0x38B // 907 + SYS_NLIST = 0x38C // 908 + SYS___IPDBCS = 0x38D // 909 + SYS___IPDSPX = 0x38E // 910 + SYS___IPMSGC = 0x38F // 911 + SYS___SELECT1 = 0x390 // 912 + SYS_PTHREAD_SECURITY_NP = 0x391 // 913 + SYS___CHECK_RESOURCE_AUTH_NP = 0x392 // 914 + SYS___CONVERT_ID_NP = 0x393 // 915 + SYS___OPENVMREL = 0x394 // 916 + SYS_WMEMCHR = 0x395 // 917 + SYS_WMEMCMP = 0x396 // 918 + SYS_WMEMCPY = 0x397 // 919 + SYS_WMEMMOVE = 0x398 // 920 + SYS_WMEMSET = 0x399 // 921 + SYS___FPUTWC = 0x400 // 1024 + SYS___PUTWC = 0x401 // 1025 + SYS___PWCHAR = 0x402 // 1026 + SYS___WCSFTM = 0x403 // 1027 + SYS___WCSTOK = 0x404 // 1028 + SYS___WCWDTH = 0x405 // 1029 + SYS_T_ACCEPT = 0x409 // 1033 + SYS_T_ALLOC = 0x40A // 1034 + SYS_T_BIND = 0x40B // 1035 + SYS_T_CLOSE = 0x40C // 1036 + SYS_T_CONNECT = 0x40D // 1037 + SYS_T_ERROR = 0x40E // 1038 + SYS_T_FREE = 0x40F // 1039 + SYS_T_GETINFO = 0x410 // 1040 + SYS_T_GETPROTADDR = 0x411 // 1041 + SYS_T_GETSTATE = 0x412 // 1042 + SYS_T_LISTEN = 0x413 // 1043 + SYS_T_LOOK = 0x414 // 1044 + SYS_T_OPEN = 0x415 // 1045 + SYS_T_OPTMGMT = 0x416 // 1046 + SYS_T_RCV = 0x417 // 1047 + SYS_T_RCVCONNECT = 0x418 // 1048 + SYS_T_RCVDIS = 0x419 // 1049 + SYS_T_RCVREL = 0x41A // 1050 + SYS_T_RCVUDATA = 0x41B // 1051 + SYS_T_RCVUDERR = 0x41C // 1052 + SYS_T_SND = 0x41D // 1053 + SYS_T_SNDDIS = 0x41E // 1054 + SYS_T_SNDREL = 0x41F // 1055 + SYS_T_SNDUDATA = 0x420 // 1056 + SYS_T_STRERROR = 0x421 // 1057 + SYS_T_SYNC = 0x422 // 1058 + SYS_T_UNBIND = 0x423 // 1059 + SYS___T_ERRNO = 0x424 // 1060 + SYS___RECVMSG2 = 0x425 // 1061 + SYS___SENDMSG2 = 0x426 // 1062 + SYS_FATTACH = 0x427 // 1063 + SYS_FDETACH = 0x428 // 1064 + SYS_GETMSG = 0x429 // 1065 + SYS_GETPMSG = 0x42A // 1066 + SYS_ISASTREAM = 0x42B // 1067 + SYS_PUTMSG = 0x42C // 1068 + SYS_PUTPMSG = 0x42D // 1069 + SYS___ISPOSIXON = 0x42E // 1070 + SYS___OPENMVSREL = 0x42F // 1071 + SYS_GETCONTEXT = 0x430 // 1072 + SYS_SETCONTEXT = 0x431 // 1073 + SYS_MAKECONTEXT = 0x432 // 1074 + SYS_SWAPCONTEXT = 0x433 // 1075 + SYS_PTHREAD_GETSPECIFIC_D8_NP = 0x434 // 1076 + SYS_GETCLIENTID = 0x470 // 1136 + SYS___GETCLIENTID = 0x471 // 1137 + SYS_GETSTABLESIZE = 0x472 // 1138 + SYS_GETIBMOPT = 0x473 // 1139 + SYS_GETIBMSOCKOPT = 0x474 // 1140 + SYS_GIVESOCKET = 0x475 // 1141 + SYS_IBMSFLUSH = 0x476 // 1142 + SYS_MAXDESC = 0x477 // 1143 + SYS_SETIBMOPT = 0x478 // 1144 + SYS_SETIBMSOCKOPT = 0x479 // 1145 + SYS_SOCK_DEBUG = 0x47A // 1146 + SYS_SOCK_DO_TESTSTOR = 0x47D // 1149 + SYS_TAKESOCKET = 0x47E // 1150 + SYS___SERVER_INIT = 0x47F // 1151 + SYS___SERVER_PWU = 0x480 // 1152 + SYS_PTHREAD_TAG_NP = 0x481 // 1153 + SYS___CONSOLE = 0x482 // 1154 + SYS___WSINIT = 0x483 // 1155 + SYS___IPTCPN = 0x489 // 1161 + SYS___SMF_RECORD = 0x48A // 1162 + SYS___IPHOST = 0x48B // 1163 + SYS___IPNODE = 0x48C // 1164 + SYS___SERVER_CLASSIFY_CREATE = 0x48D // 1165 + SYS___SERVER_CLASSIFY_DESTROY = 0x48E // 1166 + SYS___SERVER_CLASSIFY_RESET = 0x48F // 1167 + SYS___SERVER_CLASSIFY = 0x490 // 1168 + SYS___HEAPRPT = 0x496 // 1174 + SYS___FNWSA = 0x49B // 1179 + SYS___SPAWN2 = 0x49D // 1181 + SYS___SPAWNP2 = 0x49E // 1182 + SYS___GDRR = 0x4A1 // 1185 + SYS___HRRNO = 0x4A2 // 1186 + SYS___OPRG = 0x4A3 // 1187 + SYS___OPRR = 0x4A4 // 1188 + SYS___OPND = 0x4A5 // 1189 + SYS___OPPT = 0x4A6 // 1190 + SYS___SIGGM = 0x4A7 // 1191 + SYS___DGHT = 0x4A8 // 1192 + SYS___TZNE = 0x4A9 // 1193 + SYS___TZZN = 0x4AA // 1194 + SYS___TRRNO = 0x4AF // 1199 + SYS___ENVN = 0x4B0 // 1200 + SYS___MLOCKALL = 0x4B1 // 1201 + SYS_CREATEWO = 0x4B2 // 1202 + SYS_CREATEWORKUNIT = 0x4B2 // 1202 + SYS_CONTINUE = 0x4B3 // 1203 + SYS_CONTINUEWORKUNIT = 0x4B3 // 1203 + SYS_CONNECTW = 0x4B4 // 1204 + SYS_CONNECTWORKMGR = 0x4B4 // 1204 + SYS_CONNECTS = 0x4B5 // 1205 + SYS_CONNECTSERVER = 0x4B5 // 1205 + SYS_DISCONNE = 0x4B6 // 1206 + SYS_DISCONNECTSERVER = 0x4B6 // 1206 + SYS_JOINWORK = 0x4B7 // 1207 + SYS_JOINWORKUNIT = 0x4B7 // 1207 + SYS_LEAVEWOR = 0x4B8 // 1208 + SYS_LEAVEWORKUNIT = 0x4B8 // 1208 + SYS_DELETEWO = 0x4B9 // 1209 + SYS_DELETEWORKUNIT = 0x4B9 // 1209 + SYS_QUERYMET = 0x4BA // 1210 + SYS_QUERYMETRICS = 0x4BA // 1210 + SYS_QUERYSCH = 0x4BB // 1211 + SYS_QUERYSCHENV = 0x4BB // 1211 + SYS_CHECKSCH = 0x4BC // 1212 + SYS_CHECKSCHENV = 0x4BC // 1212 + SYS___PID_AFFINITY = 0x4BD // 1213 + SYS___ASINH_B = 0x4BE // 1214 + SYS___ATAN_B = 0x4BF // 1215 + SYS___CBRT_B = 0x4C0 // 1216 + SYS___CEIL_B = 0x4C1 // 1217 + SYS_COPYSIGN = 0x4C2 // 1218 + SYS___COS_B = 0x4C3 // 1219 + SYS___ERF_B = 0x4C4 // 1220 + SYS___ERFC_B = 0x4C5 // 1221 + SYS___EXPM1_B = 0x4C6 // 1222 + SYS___FABS_B = 0x4C7 // 1223 + SYS_FINITE = 0x4C8 // 1224 + SYS___FLOOR_B = 0x4C9 // 1225 + SYS___FREXP_B = 0x4CA // 1226 + SYS___ILOGB_B = 0x4CB // 1227 + SYS___ISNAN_B = 0x4CC // 1228 + SYS___LDEXP_B = 0x4CD // 1229 + SYS___LOG1P_B = 0x4CE // 1230 + SYS___LOGB_B = 0x4CF // 1231 + SYS_MATHERR = 0x4D0 // 1232 + SYS___MODF_B = 0x4D1 // 1233 + SYS___NEXTAFTER_B = 0x4D2 // 1234 + SYS___RINT_B = 0x4D3 // 1235 + SYS_SCALBN = 0x4D4 // 1236 + SYS_SIGNIFIC = 0x4D5 // 1237 + SYS_SIGNIFICAND = 0x4D5 // 1237 + SYS___SIN_B = 0x4D6 // 1238 + SYS___TAN_B = 0x4D7 // 1239 + SYS___TANH_B = 0x4D8 // 1240 + SYS___ACOS_B = 0x4D9 // 1241 + SYS___ACOSH_B = 0x4DA // 1242 + SYS___ASIN_B = 0x4DB // 1243 + SYS___ATAN2_B = 0x4DC // 1244 + SYS___ATANH_B = 0x4DD // 1245 + SYS___COSH_B = 0x4DE // 1246 + SYS___EXP_B = 0x4DF // 1247 + SYS___FMOD_B = 0x4E0 // 1248 + SYS___GAMMA_B = 0x4E1 // 1249 + SYS_GAMMA_R = 0x4E2 // 1250 + SYS___HYPOT_B = 0x4E3 // 1251 + SYS___J0_B = 0x4E4 // 1252 + SYS___Y0_B = 0x4E5 // 1253 + SYS___J1_B = 0x4E6 // 1254 + SYS___Y1_B = 0x4E7 // 1255 + SYS___JN_B = 0x4E8 // 1256 + SYS___YN_B = 0x4E9 // 1257 + SYS___LGAMMA_B = 0x4EA // 1258 + SYS_LGAMMA_R = 0x4EB // 1259 + SYS___LOG_B = 0x4EC // 1260 + SYS___LOG10_B = 0x4ED // 1261 + SYS___POW_B = 0x4EE // 1262 + SYS___REMAINDER_B = 0x4EF // 1263 + SYS___SCALB_B = 0x4F0 // 1264 + SYS___SINH_B = 0x4F1 // 1265 + SYS___SQRT_B = 0x4F2 // 1266 + SYS___OPENDIR2 = 0x4F3 // 1267 + SYS___READDIR2 = 0x4F4 // 1268 + SYS___LOGIN = 0x4F5 // 1269 + SYS___OPEN_STAT = 0x4F6 // 1270 + SYS_ACCEPT_AND_RECV = 0x4F7 // 1271 + SYS___FP_SETMODE = 0x4F8 // 1272 + SYS___SIGACTIONSET = 0x4FB // 1275 + SYS___UCREATE = 0x4FC // 1276 + SYS___UMALLOC = 0x4FD // 1277 + SYS___UFREE = 0x4FE // 1278 + SYS___UHEAPREPORT = 0x4FF // 1279 + SYS___ISBFP = 0x500 // 1280 + SYS___FP_CAST = 0x501 // 1281 + SYS___CERTIFICATE = 0x502 // 1282 + SYS_SEND_FILE = 0x503 // 1283 + SYS_AIO_CANCEL = 0x504 // 1284 + SYS_AIO_ERROR = 0x505 // 1285 + SYS_AIO_READ = 0x506 // 1286 + SYS_AIO_RETURN = 0x507 // 1287 + SYS_AIO_SUSPEND = 0x508 // 1288 + SYS_AIO_WRITE = 0x509 // 1289 + SYS_PTHREAD_MUTEXATTR_GETPSHARED = 0x50A // 1290 + SYS_PTHREAD_MUTEXATTR_SETPSHARED = 0x50B // 1291 + SYS_PTHREAD_RWLOCK_DESTROY = 0x50C // 1292 + SYS_PTHREAD_RWLOCK_INIT = 0x50D // 1293 + SYS_PTHREAD_RWLOCK_RDLOCK = 0x50E // 1294 + SYS_PTHREAD_RWLOCK_TRYRDLOCK = 0x50F // 1295 + SYS_PTHREAD_RWLOCK_TRYWRLOCK = 0x510 // 1296 + SYS_PTHREAD_RWLOCK_UNLOCK = 0x511 // 1297 + SYS_PTHREAD_RWLOCK_WRLOCK = 0x512 // 1298 + SYS_PTHREAD_RWLOCKATTR_GETPSHARED = 0x513 // 1299 + SYS_PTHREAD_RWLOCKATTR_SETPSHARED = 0x514 // 1300 + SYS_PTHREAD_RWLOCKATTR_INIT = 0x515 // 1301 + SYS_PTHREAD_RWLOCKATTR_DESTROY = 0x516 // 1302 + SYS___CTTBL = 0x517 // 1303 + SYS_PTHREAD_MUTEXATTR_SETTYPE = 0x518 // 1304 + SYS_PTHREAD_MUTEXATTR_GETTYPE = 0x519 // 1305 + SYS___FP_CLR_FLAG = 0x51A // 1306 + SYS___FP_READ_FLAG = 0x51B // 1307 + SYS___FP_RAISE_XCP = 0x51C // 1308 + SYS___FP_CLASS = 0x51D // 1309 + SYS___FP_FINITE = 0x51E // 1310 + SYS___FP_ISNAN = 0x51F // 1311 + SYS___FP_UNORDERED = 0x520 // 1312 + SYS___FP_READ_RND = 0x521 // 1313 + SYS___FP_READ_RND_B = 0x522 // 1314 + SYS___FP_SWAP_RND = 0x523 // 1315 + SYS___FP_SWAP_RND_B = 0x524 // 1316 + SYS___FP_LEVEL = 0x525 // 1317 + SYS___FP_BTOH = 0x526 // 1318 + SYS___FP_HTOB = 0x527 // 1319 + SYS___FPC_RD = 0x528 // 1320 + SYS___FPC_WR = 0x529 // 1321 + SYS___FPC_RW = 0x52A // 1322 + SYS___FPC_SM = 0x52B // 1323 + SYS___FPC_RS = 0x52C // 1324 + SYS_SIGTIMEDWAIT = 0x52D // 1325 + SYS_SIGWAITINFO = 0x52E // 1326 + SYS___CHKBFP = 0x52F // 1327 + SYS___W_PIOCTL = 0x59E // 1438 + SYS___OSENV = 0x59F // 1439 + SYS_EXPORTWO = 0x5A1 // 1441 + SYS_EXPORTWORKUNIT = 0x5A1 // 1441 + SYS_UNDOEXPO = 0x5A2 // 1442 + SYS_UNDOEXPORTWORKUNIT = 0x5A2 // 1442 + SYS_IMPORTWO = 0x5A3 // 1443 + SYS_IMPORTWORKUNIT = 0x5A3 // 1443 + SYS_UNDOIMPO = 0x5A4 // 1444 + SYS_UNDOIMPORTWORKUNIT = 0x5A4 // 1444 + SYS_EXTRACTW = 0x5A5 // 1445 + SYS_EXTRACTWORKUNIT = 0x5A5 // 1445 + SYS___CPL = 0x5A6 // 1446 + SYS___MAP_INIT = 0x5A7 // 1447 + SYS___MAP_SERVICE = 0x5A8 // 1448 + SYS_SIGQUEUE = 0x5A9 // 1449 + SYS___MOUNT = 0x5AA // 1450 + SYS___GETUSERID = 0x5AB // 1451 + SYS___IPDOMAINNAME = 0x5AC // 1452 + SYS_QUERYENC = 0x5AD // 1453 + SYS_QUERYWORKUNITCLASSIFICATION = 0x5AD // 1453 + SYS_CONNECTE = 0x5AE // 1454 + SYS_CONNECTEXPORTIMPORT = 0x5AE // 1454 + SYS___FP_SWAPMODE = 0x5AF // 1455 + SYS_STRTOLL = 0x5B0 // 1456 + SYS_STRTOULL = 0x5B1 // 1457 + SYS___DSA_PREV = 0x5B2 // 1458 + SYS___EP_FIND = 0x5B3 // 1459 + SYS___SERVER_THREADS_QUERY = 0x5B4 // 1460 + SYS___MSGRCV_TIMED = 0x5B7 // 1463 + SYS___SEMOP_TIMED = 0x5B8 // 1464 + SYS___GET_CPUID = 0x5B9 // 1465 + SYS___GET_SYSTEM_SETTINGS = 0x5BA // 1466 + SYS_FTELLO = 0x5C8 // 1480 + SYS_FSEEKO = 0x5C9 // 1481 + SYS_LLDIV = 0x5CB // 1483 + SYS_WCSTOLL = 0x5CC // 1484 + SYS_WCSTOULL = 0x5CD // 1485 + SYS_LLABS = 0x5CE // 1486 + SYS___CONSOLE2 = 0x5D2 // 1490 + SYS_INET_NTOP = 0x5D3 // 1491 + SYS_INET_PTON = 0x5D4 // 1492 + SYS___RES = 0x5D6 // 1494 + SYS_RES_MKQUERY = 0x5D7 // 1495 + SYS_RES_INIT = 0x5D8 // 1496 + SYS_RES_QUERY = 0x5D9 // 1497 + SYS_RES_SEARCH = 0x5DA // 1498 + SYS_RES_SEND = 0x5DB // 1499 + SYS_RES_QUERYDOMAIN = 0x5DC // 1500 + SYS_DN_EXPAND = 0x5DD // 1501 + SYS_DN_SKIPNAME = 0x5DE // 1502 + SYS_DN_COMP = 0x5DF // 1503 + SYS_ASCTIME_R = 0x5E0 // 1504 + SYS_CTIME_R = 0x5E1 // 1505 + SYS_GMTIME_R = 0x5E2 // 1506 + SYS_LOCALTIME_R = 0x5E3 // 1507 + SYS_RAND_R = 0x5E4 // 1508 + SYS_STRTOK_R = 0x5E5 // 1509 + SYS_READDIR_R = 0x5E6 // 1510 + SYS_GETGRGID_R = 0x5E7 // 1511 + SYS_GETGRNAM_R = 0x5E8 // 1512 + SYS_GETLOGIN_R = 0x5E9 // 1513 + SYS_GETPWNAM_R = 0x5EA // 1514 + SYS_GETPWUID_R = 0x5EB // 1515 + SYS_TTYNAME_R = 0x5EC // 1516 + SYS_PTHREAD_ATFORK = 0x5ED // 1517 + SYS_PTHREAD_ATTR_GETGUARDSIZE = 0x5EE // 1518 + SYS_PTHREAD_ATTR_GETSTACKADDR = 0x5EF // 1519 + SYS_PTHREAD_ATTR_SETGUARDSIZE = 0x5F0 // 1520 + SYS_PTHREAD_ATTR_SETSTACKADDR = 0x5F1 // 1521 + SYS_PTHREAD_CONDATTR_GETPSHARED = 0x5F2 // 1522 + SYS_PTHREAD_CONDATTR_SETPSHARED = 0x5F3 // 1523 + SYS_PTHREAD_GETCONCURRENCY = 0x5F4 // 1524 + SYS_PTHREAD_KEY_DELETE = 0x5F5 // 1525 + SYS_PTHREAD_SETCONCURRENCY = 0x5F6 // 1526 + SYS_PTHREAD_SIGMASK = 0x5F7 // 1527 + SYS___DISCARDDATA = 0x5F8 // 1528 + SYS_PTHREAD_ATTR_GETSCHEDPARAM = 0x5F9 // 1529 + SYS_PTHREAD_ATTR_SETSCHEDPARAM = 0x5FA // 1530 + SYS_PTHREAD_ATTR_GETDETACHSTATE_U98 = 0x5FB // 1531 + SYS_PTHREAD_ATTR_SETDETACHSTATE_U98 = 0x5FC // 1532 + SYS_PTHREAD_DETACH_U98 = 0x5FD // 1533 + SYS_PTHREAD_GETSPECIFIC_U98 = 0x5FE // 1534 + SYS_PTHREAD_SETCANCELSTATE = 0x5FF // 1535 + SYS_PTHREAD_SETCANCELTYPE = 0x600 // 1536 + SYS_PTHREAD_TESTCANCEL = 0x601 // 1537 + SYS___ATANF_B = 0x602 // 1538 + SYS___ATANL_B = 0x603 // 1539 + SYS___CEILF_B = 0x604 // 1540 + SYS___CEILL_B = 0x605 // 1541 + SYS___COSF_B = 0x606 // 1542 + SYS___COSL_B = 0x607 // 1543 + SYS___FABSF_B = 0x608 // 1544 + SYS___FABSL_B = 0x609 // 1545 + SYS___FLOORF_B = 0x60A // 1546 + SYS___FLOORL_B = 0x60B // 1547 + SYS___FREXPF_B = 0x60C // 1548 + SYS___FREXPL_B = 0x60D // 1549 + SYS___LDEXPF_B = 0x60E // 1550 + SYS___LDEXPL_B = 0x60F // 1551 + SYS___SINF_B = 0x610 // 1552 + SYS___SINL_B = 0x611 // 1553 + SYS___TANF_B = 0x612 // 1554 + SYS___TANL_B = 0x613 // 1555 + SYS___TANHF_B = 0x614 // 1556 + SYS___TANHL_B = 0x615 // 1557 + SYS___ACOSF_B = 0x616 // 1558 + SYS___ACOSL_B = 0x617 // 1559 + SYS___ASINF_B = 0x618 // 1560 + SYS___ASINL_B = 0x619 // 1561 + SYS___ATAN2F_B = 0x61A // 1562 + SYS___ATAN2L_B = 0x61B // 1563 + SYS___COSHF_B = 0x61C // 1564 + SYS___COSHL_B = 0x61D // 1565 + SYS___EXPF_B = 0x61E // 1566 + SYS___EXPL_B = 0x61F // 1567 + SYS___LOGF_B = 0x620 // 1568 + SYS___LOGL_B = 0x621 // 1569 + SYS___LOG10F_B = 0x622 // 1570 + SYS___LOG10L_B = 0x623 // 1571 + SYS___POWF_B = 0x624 // 1572 + SYS___POWL_B = 0x625 // 1573 + SYS___SINHF_B = 0x626 // 1574 + SYS___SINHL_B = 0x627 // 1575 + SYS___SQRTF_B = 0x628 // 1576 + SYS___SQRTL_B = 0x629 // 1577 + SYS___ABSF_B = 0x62A // 1578 + SYS___ABS_B = 0x62B // 1579 + SYS___ABSL_B = 0x62C // 1580 + SYS___FMODF_B = 0x62D // 1581 + SYS___FMODL_B = 0x62E // 1582 + SYS___MODFF_B = 0x62F // 1583 + SYS___MODFL_B = 0x630 // 1584 + SYS_ABSF = 0x631 // 1585 + SYS_ABSL = 0x632 // 1586 + SYS_ACOSF = 0x633 // 1587 + SYS_ACOSL = 0x634 // 1588 + SYS_ASINF = 0x635 // 1589 + SYS_ASINL = 0x636 // 1590 + SYS_ATAN2F = 0x637 // 1591 + SYS_ATAN2L = 0x638 // 1592 + SYS_ATANF = 0x639 // 1593 + SYS_ATANL = 0x63A // 1594 + SYS_CEILF = 0x63B // 1595 + SYS_CEILL = 0x63C // 1596 + SYS_COSF = 0x63D // 1597 + SYS_COSL = 0x63E // 1598 + SYS_COSHF = 0x63F // 1599 + SYS_COSHL = 0x640 // 1600 + SYS_EXPF = 0x641 // 1601 + SYS_EXPL = 0x642 // 1602 + SYS_TANHF = 0x643 // 1603 + SYS_TANHL = 0x644 // 1604 + SYS_LOG10F = 0x645 // 1605 + SYS_LOG10L = 0x646 // 1606 + SYS_LOGF = 0x647 // 1607 + SYS_LOGL = 0x648 // 1608 + SYS_POWF = 0x649 // 1609 + SYS_POWL = 0x64A // 1610 + SYS_SINF = 0x64B // 1611 + SYS_SINL = 0x64C // 1612 + SYS_SQRTF = 0x64D // 1613 + SYS_SQRTL = 0x64E // 1614 + SYS_SINHF = 0x64F // 1615 + SYS_SINHL = 0x650 // 1616 + SYS_TANF = 0x651 // 1617 + SYS_TANL = 0x652 // 1618 + SYS_FABSF = 0x653 // 1619 + SYS_FABSL = 0x654 // 1620 + SYS_FLOORF = 0x655 // 1621 + SYS_FLOORL = 0x656 // 1622 + SYS_FMODF = 0x657 // 1623 + SYS_FMODL = 0x658 // 1624 + SYS_FREXPF = 0x659 // 1625 + SYS_FREXPL = 0x65A // 1626 + SYS_LDEXPF = 0x65B // 1627 + SYS_LDEXPL = 0x65C // 1628 + SYS_MODFF = 0x65D // 1629 + SYS_MODFL = 0x65E // 1630 + SYS_BTOWC = 0x65F // 1631 + SYS___CHATTR = 0x660 // 1632 + SYS___FCHATTR = 0x661 // 1633 + SYS___TOCCSID = 0x662 // 1634 + SYS___CSNAMETYPE = 0x663 // 1635 + SYS___TOCSNAME = 0x664 // 1636 + SYS___CCSIDTYPE = 0x665 // 1637 + SYS___AE_CORRESTBL_QUERY = 0x666 // 1638 + SYS___AE_AUTOCONVERT_STATE = 0x667 // 1639 + SYS_DN_FIND = 0x668 // 1640 + SYS___GETHOSTBYADDR_A = 0x669 // 1641 + SYS___GETHOSTBYNAME_A = 0x66A // 1642 + SYS___RES_INIT_A = 0x66B // 1643 + SYS___GETHOSTBYADDR_R_A = 0x66C // 1644 + SYS___GETHOSTBYNAME_R_A = 0x66D // 1645 + SYS___CHARMAP_INIT_A = 0x66E // 1646 + SYS___MBLEN_A = 0x66F // 1647 + SYS___MBLEN_SB_A = 0x670 // 1648 + SYS___MBLEN_STD_A = 0x671 // 1649 + SYS___MBLEN_UTF = 0x672 // 1650 + SYS___MBSTOWCS_A = 0x673 // 1651 + SYS___MBSTOWCS_STD_A = 0x674 // 1652 + SYS___MBTOWC_A = 0x675 // 1653 + SYS___MBTOWC_ISO1 = 0x676 // 1654 + SYS___MBTOWC_SBCS = 0x677 // 1655 + SYS___MBTOWC_MBCS = 0x678 // 1656 + SYS___MBTOWC_UTF = 0x679 // 1657 + SYS___WCSTOMBS_A = 0x67A // 1658 + SYS___WCSTOMBS_STD_A = 0x67B // 1659 + SYS___WCSWIDTH_A = 0x67C // 1660 + SYS___GETGRGID_R_A = 0x67D // 1661 + SYS___WCSWIDTH_STD_A = 0x67E // 1662 + SYS___WCSWIDTH_ASIA = 0x67F // 1663 + SYS___CSID_A = 0x680 // 1664 + SYS___CSID_STD_A = 0x681 // 1665 + SYS___WCSID_A = 0x682 // 1666 + SYS___WCSID_STD_A = 0x683 // 1667 + SYS___WCTOMB_A = 0x684 // 1668 + SYS___WCTOMB_ISO1 = 0x685 // 1669 + SYS___WCTOMB_STD_A = 0x686 // 1670 + SYS___WCTOMB_UTF = 0x687 // 1671 + SYS___WCWIDTH_A = 0x688 // 1672 + SYS___GETGRNAM_R_A = 0x689 // 1673 + SYS___WCWIDTH_STD_A = 0x68A // 1674 + SYS___WCWIDTH_ASIA = 0x68B // 1675 + SYS___GETPWNAM_R_A = 0x68C // 1676 + SYS___GETPWUID_R_A = 0x68D // 1677 + SYS___GETLOGIN_R_A = 0x68E // 1678 + SYS___TTYNAME_R_A = 0x68F // 1679 + SYS___READDIR_R_A = 0x690 // 1680 + SYS___E2A_S = 0x691 // 1681 + SYS___FNMATCH_A = 0x692 // 1682 + SYS___FNMATCH_C_A = 0x693 // 1683 + SYS___EXECL_A = 0x694 // 1684 + SYS___FNMATCH_STD_A = 0x695 // 1685 + SYS___REGCOMP_A = 0x696 // 1686 + SYS___REGCOMP_STD_A = 0x697 // 1687 + SYS___REGERROR_A = 0x698 // 1688 + SYS___REGERROR_STD_A = 0x699 // 1689 + SYS___REGEXEC_A = 0x69A // 1690 + SYS___REGEXEC_STD_A = 0x69B // 1691 + SYS___REGFREE_A = 0x69C // 1692 + SYS___REGFREE_STD_A = 0x69D // 1693 + SYS___STRCOLL_A = 0x69E // 1694 + SYS___STRCOLL_C_A = 0x69F // 1695 + SYS___EXECLE_A = 0x6A0 // 1696 + SYS___STRCOLL_STD_A = 0x6A1 // 1697 + SYS___STRXFRM_A = 0x6A2 // 1698 + SYS___STRXFRM_C_A = 0x6A3 // 1699 + SYS___EXECLP_A = 0x6A4 // 1700 + SYS___STRXFRM_STD_A = 0x6A5 // 1701 + SYS___WCSCOLL_A = 0x6A6 // 1702 + SYS___WCSCOLL_C_A = 0x6A7 // 1703 + SYS___WCSCOLL_STD_A = 0x6A8 // 1704 + SYS___WCSXFRM_A = 0x6A9 // 1705 + SYS___WCSXFRM_C_A = 0x6AA // 1706 + SYS___WCSXFRM_STD_A = 0x6AB // 1707 + SYS___COLLATE_INIT_A = 0x6AC // 1708 + SYS___WCTYPE_A = 0x6AD // 1709 + SYS___GET_WCTYPE_STD_A = 0x6AE // 1710 + SYS___CTYPE_INIT_A = 0x6AF // 1711 + SYS___ISWCTYPE_A = 0x6B0 // 1712 + SYS___EXECV_A = 0x6B1 // 1713 + SYS___IS_WCTYPE_STD_A = 0x6B2 // 1714 + SYS___TOWLOWER_A = 0x6B3 // 1715 + SYS___TOWLOWER_STD_A = 0x6B4 // 1716 + SYS___TOWUPPER_A = 0x6B5 // 1717 + SYS___TOWUPPER_STD_A = 0x6B6 // 1718 + SYS___LOCALE_INIT_A = 0x6B7 // 1719 + SYS___LOCALECONV_A = 0x6B8 // 1720 + SYS___LOCALECONV_STD_A = 0x6B9 // 1721 + SYS___NL_LANGINFO_A = 0x6BA // 1722 + SYS___NL_LNAGINFO_STD_A = 0x6BB // 1723 + SYS___MONETARY_INIT_A = 0x6BC // 1724 + SYS___STRFMON_A = 0x6BD // 1725 + SYS___STRFMON_STD_A = 0x6BE // 1726 + SYS___GETADDRINFO_A = 0x6BF // 1727 + SYS___CATGETS_A = 0x6C0 // 1728 + SYS___EXECVE_A = 0x6C1 // 1729 + SYS___EXECVP_A = 0x6C2 // 1730 + SYS___SPAWN_A = 0x6C3 // 1731 + SYS___GETNAMEINFO_A = 0x6C4 // 1732 + SYS___SPAWNP_A = 0x6C5 // 1733 + SYS___NUMERIC_INIT_A = 0x6C6 // 1734 + SYS___RESP_INIT_A = 0x6C7 // 1735 + SYS___RPMATCH_A = 0x6C8 // 1736 + SYS___RPMATCH_C_A = 0x6C9 // 1737 + SYS___RPMATCH_STD_A = 0x6CA // 1738 + SYS___TIME_INIT_A = 0x6CB // 1739 + SYS___STRFTIME_A = 0x6CC // 1740 + SYS___STRFTIME_STD_A = 0x6CD // 1741 + SYS___STRPTIME_A = 0x6CE // 1742 + SYS___STRPTIME_STD_A = 0x6CF // 1743 + SYS___WCSFTIME_A = 0x6D0 // 1744 + SYS___WCSFTIME_STD_A = 0x6D1 // 1745 + SYS_____SPAWN2_A = 0x6D2 // 1746 + SYS_____SPAWNP2_A = 0x6D3 // 1747 + SYS___SYNTAX_INIT_A = 0x6D4 // 1748 + SYS___TOD_INIT_A = 0x6D5 // 1749 + SYS___NL_CSINFO_A = 0x6D6 // 1750 + SYS___NL_MONINFO_A = 0x6D7 // 1751 + SYS___NL_NUMINFO_A = 0x6D8 // 1752 + SYS___NL_RESPINFO_A = 0x6D9 // 1753 + SYS___NL_TIMINFO_A = 0x6DA // 1754 + SYS___IF_NAMETOINDEX_A = 0x6DB // 1755 + SYS___IF_INDEXTONAME_A = 0x6DC // 1756 + SYS___PRINTF_A = 0x6DD // 1757 + SYS___ICONV_OPEN_A = 0x6DE // 1758 + SYS___DLLLOAD_A = 0x6DF // 1759 + SYS___DLLQUERYFN_A = 0x6E0 // 1760 + SYS___DLLQUERYVAR_A = 0x6E1 // 1761 + SYS_____CHATTR_A = 0x6E2 // 1762 + SYS___E2A_L = 0x6E3 // 1763 + SYS_____TOCCSID_A = 0x6E4 // 1764 + SYS_____TOCSNAME_A = 0x6E5 // 1765 + SYS_____CCSIDTYPE_A = 0x6E6 // 1766 + SYS_____CSNAMETYPE_A = 0x6E7 // 1767 + SYS___CHMOD_A = 0x6E8 // 1768 + SYS___MKDIR_A = 0x6E9 // 1769 + SYS___STAT_A = 0x6EA // 1770 + SYS___STAT_O_A = 0x6EB // 1771 + SYS___MKFIFO_A = 0x6EC // 1772 + SYS_____OPEN_STAT_A = 0x6ED // 1773 + SYS___LSTAT_A = 0x6EE // 1774 + SYS___LSTAT_O_A = 0x6EF // 1775 + SYS___MKNOD_A = 0x6F0 // 1776 + SYS___MOUNT_A = 0x6F1 // 1777 + SYS___UMOUNT_A = 0x6F2 // 1778 + SYS___CHAUDIT_A = 0x6F4 // 1780 + SYS___W_GETMNTENT_A = 0x6F5 // 1781 + SYS___CREAT_A = 0x6F6 // 1782 + SYS___OPEN_A = 0x6F7 // 1783 + SYS___SETLOCALE_A = 0x6F9 // 1785 + SYS___FPRINTF_A = 0x6FA // 1786 + SYS___SPRINTF_A = 0x6FB // 1787 + SYS___VFPRINTF_A = 0x6FC // 1788 + SYS___VPRINTF_A = 0x6FD // 1789 + SYS___VSPRINTF_A = 0x6FE // 1790 + SYS___VSWPRINTF_A = 0x6FF // 1791 + SYS___SWPRINTF_A = 0x700 // 1792 + SYS___FSCANF_A = 0x701 // 1793 + SYS___SCANF_A = 0x702 // 1794 + SYS___SSCANF_A = 0x703 // 1795 + SYS___SWSCANF_A = 0x704 // 1796 + SYS___ATOF_A = 0x705 // 1797 + SYS___ATOI_A = 0x706 // 1798 + SYS___ATOL_A = 0x707 // 1799 + SYS___STRTOD_A = 0x708 // 1800 + SYS___STRTOL_A = 0x709 // 1801 + SYS___STRTOUL_A = 0x70A // 1802 + SYS_____AE_CORRESTBL_QUERY_A = 0x70B // 1803 + SYS___A64L_A = 0x70C // 1804 + SYS___ECVT_A = 0x70D // 1805 + SYS___FCVT_A = 0x70E // 1806 + SYS___GCVT_A = 0x70F // 1807 + SYS___L64A_A = 0x710 // 1808 + SYS___STRERROR_A = 0x711 // 1809 + SYS___PERROR_A = 0x712 // 1810 + SYS___FETCH_A = 0x713 // 1811 + SYS___GETENV_A = 0x714 // 1812 + SYS___MKSTEMP_A = 0x717 // 1815 + SYS___PTSNAME_A = 0x718 // 1816 + SYS___PUTENV_A = 0x719 // 1817 + SYS___REALPATH_A = 0x71A // 1818 + SYS___SETENV_A = 0x71B // 1819 + SYS___SYSTEM_A = 0x71C // 1820 + SYS___GETOPT_A = 0x71D // 1821 + SYS___CATOPEN_A = 0x71E // 1822 + SYS___ACCESS_A = 0x71F // 1823 + SYS___CHDIR_A = 0x720 // 1824 + SYS___CHOWN_A = 0x721 // 1825 + SYS___CHROOT_A = 0x722 // 1826 + SYS___GETCWD_A = 0x723 // 1827 + SYS___GETWD_A = 0x724 // 1828 + SYS___LCHOWN_A = 0x725 // 1829 + SYS___LINK_A = 0x726 // 1830 + SYS___PATHCONF_A = 0x727 // 1831 + SYS___IF_NAMEINDEX_A = 0x728 // 1832 + SYS___READLINK_A = 0x729 // 1833 + SYS___RMDIR_A = 0x72A // 1834 + SYS___STATVFS_A = 0x72B // 1835 + SYS___SYMLINK_A = 0x72C // 1836 + SYS___TRUNCATE_A = 0x72D // 1837 + SYS___UNLINK_A = 0x72E // 1838 + SYS___GAI_STRERROR_A = 0x72F // 1839 + SYS___EXTLINK_NP_A = 0x730 // 1840 + SYS___ISALNUM_A = 0x731 // 1841 + SYS___ISALPHA_A = 0x732 // 1842 + SYS___A2E_S = 0x733 // 1843 + SYS___ISCNTRL_A = 0x734 // 1844 + SYS___ISDIGIT_A = 0x735 // 1845 + SYS___ISGRAPH_A = 0x736 // 1846 + SYS___ISLOWER_A = 0x737 // 1847 + SYS___ISPRINT_A = 0x738 // 1848 + SYS___ISPUNCT_A = 0x739 // 1849 + SYS___ISSPACE_A = 0x73A // 1850 + SYS___ISUPPER_A = 0x73B // 1851 + SYS___ISXDIGIT_A = 0x73C // 1852 + SYS___TOLOWER_A = 0x73D // 1853 + SYS___TOUPPER_A = 0x73E // 1854 + SYS___ISWALNUM_A = 0x73F // 1855 + SYS___ISWALPHA_A = 0x740 // 1856 + SYS___A2E_L = 0x741 // 1857 + SYS___ISWCNTRL_A = 0x742 // 1858 + SYS___ISWDIGIT_A = 0x743 // 1859 + SYS___ISWGRAPH_A = 0x744 // 1860 + SYS___ISWLOWER_A = 0x745 // 1861 + SYS___ISWPRINT_A = 0x746 // 1862 + SYS___ISWPUNCT_A = 0x747 // 1863 + SYS___ISWSPACE_A = 0x748 // 1864 + SYS___ISWUPPER_A = 0x749 // 1865 + SYS___ISWXDIGIT_A = 0x74A // 1866 + SYS___CONFSTR_A = 0x74B // 1867 + SYS___FTOK_A = 0x74C // 1868 + SYS___MKTEMP_A = 0x74D // 1869 + SYS___FDOPEN_A = 0x74E // 1870 + SYS___FLDATA_A = 0x74F // 1871 + SYS___REMOVE_A = 0x750 // 1872 + SYS___RENAME_A = 0x751 // 1873 + SYS___TMPNAM_A = 0x752 // 1874 + SYS___FOPEN_A = 0x753 // 1875 + SYS___FREOPEN_A = 0x754 // 1876 + SYS___CUSERID_A = 0x755 // 1877 + SYS___POPEN_A = 0x756 // 1878 + SYS___TEMPNAM_A = 0x757 // 1879 + SYS___FTW_A = 0x758 // 1880 + SYS___GETGRENT_A = 0x759 // 1881 + SYS___GETGRGID_A = 0x75A // 1882 + SYS___GETGRNAM_A = 0x75B // 1883 + SYS___GETGROUPSBYNAME_A = 0x75C // 1884 + SYS___GETHOSTENT_A = 0x75D // 1885 + SYS___GETHOSTNAME_A = 0x75E // 1886 + SYS___GETLOGIN_A = 0x75F // 1887 + SYS___INET_NTOP_A = 0x760 // 1888 + SYS___GETPASS_A = 0x761 // 1889 + SYS___GETPWENT_A = 0x762 // 1890 + SYS___GETPWNAM_A = 0x763 // 1891 + SYS___GETPWUID_A = 0x764 // 1892 + SYS_____CHECK_RESOURCE_AUTH_NP_A = 0x765 // 1893 + SYS___CHECKSCHENV_A = 0x766 // 1894 + SYS___CONNECTSERVER_A = 0x767 // 1895 + SYS___CONNECTWORKMGR_A = 0x768 // 1896 + SYS_____CONSOLE_A = 0x769 // 1897 + SYS___CREATEWORKUNIT_A = 0x76A // 1898 + SYS___CTERMID_A = 0x76B // 1899 + SYS___FMTMSG_A = 0x76C // 1900 + SYS___INITGROUPS_A = 0x76D // 1901 + SYS_____LOGIN_A = 0x76E // 1902 + SYS___MSGRCV_A = 0x76F // 1903 + SYS___MSGSND_A = 0x770 // 1904 + SYS___MSGXRCV_A = 0x771 // 1905 + SYS___NFTW_A = 0x772 // 1906 + SYS_____PASSWD_A = 0x773 // 1907 + SYS___PTHREAD_SECURITY_NP_A = 0x774 // 1908 + SYS___QUERYMETRICS_A = 0x775 // 1909 + SYS___QUERYSCHENV = 0x776 // 1910 + SYS___READV_A = 0x777 // 1911 + SYS_____SERVER_CLASSIFY_A = 0x778 // 1912 + SYS_____SERVER_INIT_A = 0x779 // 1913 + SYS_____SERVER_PWU_A = 0x77A // 1914 + SYS___STRCASECMP_A = 0x77B // 1915 + SYS___STRNCASECMP_A = 0x77C // 1916 + SYS___TTYNAME_A = 0x77D // 1917 + SYS___UNAME_A = 0x77E // 1918 + SYS___UTIMES_A = 0x77F // 1919 + SYS___W_GETPSENT_A = 0x780 // 1920 + SYS___WRITEV_A = 0x781 // 1921 + SYS___W_STATFS_A = 0x782 // 1922 + SYS___W_STATVFS_A = 0x783 // 1923 + SYS___FPUTC_A = 0x784 // 1924 + SYS___PUTCHAR_A = 0x785 // 1925 + SYS___PUTS_A = 0x786 // 1926 + SYS___FGETS_A = 0x787 // 1927 + SYS___GETS_A = 0x788 // 1928 + SYS___FPUTS_A = 0x789 // 1929 + SYS___FREAD_A = 0x78A // 1930 + SYS___FWRITE_A = 0x78B // 1931 + SYS___OPEN_O_A = 0x78C // 1932 + SYS___ISASCII = 0x78D // 1933 + SYS___CREAT_O_A = 0x78E // 1934 + SYS___ENVNA = 0x78F // 1935 + SYS___PUTC_A = 0x790 // 1936 + SYS___AE_THREAD_SETMODE = 0x791 // 1937 + SYS___AE_THREAD_SWAPMODE = 0x792 // 1938 + SYS___GETNETBYADDR_A = 0x793 // 1939 + SYS___GETNETBYNAME_A = 0x794 // 1940 + SYS___GETNETENT_A = 0x795 // 1941 + SYS___GETPROTOBYNAME_A = 0x796 // 1942 + SYS___GETPROTOBYNUMBER_A = 0x797 // 1943 + SYS___GETPROTOENT_A = 0x798 // 1944 + SYS___GETSERVBYNAME_A = 0x799 // 1945 + SYS___GETSERVBYPORT_A = 0x79A // 1946 + SYS___GETSERVENT_A = 0x79B // 1947 + SYS___ASCTIME_A = 0x79C // 1948 + SYS___CTIME_A = 0x79D // 1949 + SYS___GETDATE_A = 0x79E // 1950 + SYS___TZSET_A = 0x79F // 1951 + SYS___UTIME_A = 0x7A0 // 1952 + SYS___ASCTIME_R_A = 0x7A1 // 1953 + SYS___CTIME_R_A = 0x7A2 // 1954 + SYS___STRTOLL_A = 0x7A3 // 1955 + SYS___STRTOULL_A = 0x7A4 // 1956 + SYS___FPUTWC_A = 0x7A5 // 1957 + SYS___PUTWC_A = 0x7A6 // 1958 + SYS___PUTWCHAR_A = 0x7A7 // 1959 + SYS___FPUTWS_A = 0x7A8 // 1960 + SYS___UNGETWC_A = 0x7A9 // 1961 + SYS___FGETWC_A = 0x7AA // 1962 + SYS___GETWC_A = 0x7AB // 1963 + SYS___GETWCHAR_A = 0x7AC // 1964 + SYS___FGETWS_A = 0x7AD // 1965 + SYS___GETTIMEOFDAY_A = 0x7AE // 1966 + SYS___GMTIME_A = 0x7AF // 1967 + SYS___GMTIME_R_A = 0x7B0 // 1968 + SYS___LOCALTIME_A = 0x7B1 // 1969 + SYS___LOCALTIME_R_A = 0x7B2 // 1970 + SYS___MKTIME_A = 0x7B3 // 1971 + SYS___TZZNA = 0x7B4 // 1972 + SYS_UNATEXIT = 0x7B5 // 1973 + SYS___CEE3DMP_A = 0x7B6 // 1974 + SYS___CDUMP_A = 0x7B7 // 1975 + SYS___CSNAP_A = 0x7B8 // 1976 + SYS___CTEST_A = 0x7B9 // 1977 + SYS___CTRACE_A = 0x7BA // 1978 + SYS___VSWPRNTF2_A = 0x7BB // 1979 + SYS___INET_PTON_A = 0x7BC // 1980 + SYS___SYSLOG_A = 0x7BD // 1981 + SYS___CRYPT_A = 0x7BE // 1982 + SYS_____OPENDIR2_A = 0x7BF // 1983 + SYS_____READDIR2_A = 0x7C0 // 1984 + SYS___OPENDIR_A = 0x7C2 // 1986 + SYS___READDIR_A = 0x7C3 // 1987 + SYS_PREAD = 0x7C7 // 1991 + SYS_PWRITE = 0x7C8 // 1992 + SYS_M_CREATE_LAYOUT = 0x7C9 // 1993 + SYS_M_DESTROY_LAYOUT = 0x7CA // 1994 + SYS_M_GETVALUES_LAYOUT = 0x7CB // 1995 + SYS_M_SETVALUES_LAYOUT = 0x7CC // 1996 + SYS_M_TRANSFORM_LAYOUT = 0x7CD // 1997 + SYS_M_WTRANSFORM_LAYOUT = 0x7CE // 1998 + SYS_FWPRINTF = 0x7D1 // 2001 + SYS_WPRINTF = 0x7D2 // 2002 + SYS_VFWPRINT = 0x7D3 // 2003 + SYS_VFWPRINTF = 0x7D3 // 2003 + SYS_VWPRINTF = 0x7D4 // 2004 + SYS_FWSCANF = 0x7D5 // 2005 + SYS_WSCANF = 0x7D6 // 2006 + SYS_WCTRANS = 0x7D7 // 2007 + SYS_TOWCTRAN = 0x7D8 // 2008 + SYS_TOWCTRANS = 0x7D8 // 2008 + SYS___WCSTOD_A = 0x7D9 // 2009 + SYS___WCSTOL_A = 0x7DA // 2010 + SYS___WCSTOUL_A = 0x7DB // 2011 + SYS___BASENAME_A = 0x7DC // 2012 + SYS___DIRNAME_A = 0x7DD // 2013 + SYS___GLOB_A = 0x7DE // 2014 + SYS_FWIDE = 0x7DF // 2015 + SYS___OSNAME = 0x7E0 // 2016 + SYS_____OSNAME_A = 0x7E1 // 2017 + SYS___BTOWC_A = 0x7E4 // 2020 + SYS___WCTOB_A = 0x7E5 // 2021 + SYS___DBM_OPEN_A = 0x7E6 // 2022 + SYS___VFPRINTF2_A = 0x7E7 // 2023 + SYS___VPRINTF2_A = 0x7E8 // 2024 + SYS___VSPRINTF2_A = 0x7E9 // 2025 + SYS___CEIL_H = 0x7EA // 2026 + SYS___FLOOR_H = 0x7EB // 2027 + SYS___MODF_H = 0x7EC // 2028 + SYS___FABS_H = 0x7ED // 2029 + SYS___J0_H = 0x7EE // 2030 + SYS___J1_H = 0x7EF // 2031 + SYS___JN_H = 0x7F0 // 2032 + SYS___Y0_H = 0x7F1 // 2033 + SYS___Y1_H = 0x7F2 // 2034 + SYS___YN_H = 0x7F3 // 2035 + SYS___CEILF_H = 0x7F4 // 2036 + SYS___CEILL_H = 0x7F5 // 2037 + SYS___FLOORF_H = 0x7F6 // 2038 + SYS___FLOORL_H = 0x7F7 // 2039 + SYS___MODFF_H = 0x7F8 // 2040 + SYS___MODFL_H = 0x7F9 // 2041 + SYS___FABSF_H = 0x7FA // 2042 + SYS___FABSL_H = 0x7FB // 2043 + SYS___MALLOC24 = 0x7FC // 2044 + SYS___MALLOC31 = 0x7FD // 2045 + SYS_ACL_INIT = 0x7FE // 2046 + SYS_ACL_FREE = 0x7FF // 2047 + SYS_ACL_FIRST_ENTRY = 0x800 // 2048 + SYS_ACL_GET_ENTRY = 0x801 // 2049 + SYS_ACL_VALID = 0x802 // 2050 + SYS_ACL_CREATE_ENTRY = 0x803 // 2051 + SYS_ACL_DELETE_ENTRY = 0x804 // 2052 + SYS_ACL_UPDATE_ENTRY = 0x805 // 2053 + SYS_ACL_DELETE_FD = 0x806 // 2054 + SYS_ACL_DELETE_FILE = 0x807 // 2055 + SYS_ACL_GET_FD = 0x808 // 2056 + SYS_ACL_GET_FILE = 0x809 // 2057 + SYS_ACL_SET_FD = 0x80A // 2058 + SYS_ACL_SET_FILE = 0x80B // 2059 + SYS_ACL_FROM_TEXT = 0x80C // 2060 + SYS_ACL_TO_TEXT = 0x80D // 2061 + SYS_ACL_SORT = 0x80E // 2062 + SYS___SHUTDOWN_REGISTRATION = 0x80F // 2063 + SYS___ERFL_B = 0x810 // 2064 + SYS___ERFCL_B = 0x811 // 2065 + SYS___LGAMMAL_B = 0x812 // 2066 + SYS___SETHOOKEVENTS = 0x813 // 2067 + SYS_IF_NAMETOINDEX = 0x814 // 2068 + SYS_IF_INDEXTONAME = 0x815 // 2069 + SYS_IF_NAMEINDEX = 0x816 // 2070 + SYS_IF_FREENAMEINDEX = 0x817 // 2071 + SYS_GETADDRINFO = 0x818 // 2072 + SYS_GETNAMEINFO = 0x819 // 2073 + SYS_FREEADDRINFO = 0x81A // 2074 + SYS_GAI_STRERROR = 0x81B // 2075 + SYS_REXEC_AF = 0x81C // 2076 + SYS___POE = 0x81D // 2077 + SYS___DYNALLOC_A = 0x81F // 2079 + SYS___DYNFREE_A = 0x820 // 2080 + SYS___RES_QUERY_A = 0x821 // 2081 + SYS___RES_SEARCH_A = 0x822 // 2082 + SYS___RES_QUERYDOMAIN_A = 0x823 // 2083 + SYS___RES_MKQUERY_A = 0x824 // 2084 + SYS___RES_SEND_A = 0x825 // 2085 + SYS___DN_EXPAND_A = 0x826 // 2086 + SYS___DN_SKIPNAME_A = 0x827 // 2087 + SYS___DN_COMP_A = 0x828 // 2088 + SYS___DN_FIND_A = 0x829 // 2089 + SYS___NLIST_A = 0x82A // 2090 + SYS_____TCGETCP_A = 0x82B // 2091 + SYS_____TCSETCP_A = 0x82C // 2092 + SYS_____W_PIOCTL_A = 0x82E // 2094 + SYS___INET_ADDR_A = 0x82F // 2095 + SYS___INET_NTOA_A = 0x830 // 2096 + SYS___INET_NETWORK_A = 0x831 // 2097 + SYS___ACCEPT_A = 0x832 // 2098 + SYS___ACCEPT_AND_RECV_A = 0x833 // 2099 + SYS___BIND_A = 0x834 // 2100 + SYS___CONNECT_A = 0x835 // 2101 + SYS___GETPEERNAME_A = 0x836 // 2102 + SYS___GETSOCKNAME_A = 0x837 // 2103 + SYS___RECVFROM_A = 0x838 // 2104 + SYS___SENDTO_A = 0x839 // 2105 + SYS___SENDMSG_A = 0x83A // 2106 + SYS___RECVMSG_A = 0x83B // 2107 + SYS_____LCHATTR_A = 0x83C // 2108 + SYS___CABEND = 0x83D // 2109 + SYS___LE_CIB_GET = 0x83E // 2110 + SYS___SET_LAA_FOR_JIT = 0x83F // 2111 + SYS___LCHATTR = 0x840 // 2112 + SYS___WRITEDOWN = 0x841 // 2113 + SYS_PTHREAD_MUTEX_INIT2 = 0x842 // 2114 + SYS___ACOSHF_B = 0x843 // 2115 + SYS___ACOSHL_B = 0x844 // 2116 + SYS___ASINHF_B = 0x845 // 2117 + SYS___ASINHL_B = 0x846 // 2118 + SYS___ATANHF_B = 0x847 // 2119 + SYS___ATANHL_B = 0x848 // 2120 + SYS___CBRTF_B = 0x849 // 2121 + SYS___CBRTL_B = 0x84A // 2122 + SYS___COPYSIGNF_B = 0x84B // 2123 + SYS___COPYSIGNL_B = 0x84C // 2124 + SYS___COTANF_B = 0x84D // 2125 + SYS___COTAN_B = 0x84E // 2126 + SYS___COTANL_B = 0x84F // 2127 + SYS___EXP2F_B = 0x850 // 2128 + SYS___EXP2L_B = 0x851 // 2129 + SYS___EXPM1F_B = 0x852 // 2130 + SYS___EXPM1L_B = 0x853 // 2131 + SYS___FDIMF_B = 0x854 // 2132 + SYS___FDIM_B = 0x855 // 2133 + SYS___FDIML_B = 0x856 // 2134 + SYS___HYPOTF_B = 0x857 // 2135 + SYS___HYPOTL_B = 0x858 // 2136 + SYS___LOG1PF_B = 0x859 // 2137 + SYS___LOG1PL_B = 0x85A // 2138 + SYS___LOG2F_B = 0x85B // 2139 + SYS___LOG2_B = 0x85C // 2140 + SYS___LOG2L_B = 0x85D // 2141 + SYS___REMAINDERF_B = 0x85E // 2142 + SYS___REMAINDERL_B = 0x85F // 2143 + SYS___REMQUOF_B = 0x860 // 2144 + SYS___REMQUO_B = 0x861 // 2145 + SYS___REMQUOL_B = 0x862 // 2146 + SYS___TGAMMAF_B = 0x863 // 2147 + SYS___TGAMMA_B = 0x864 // 2148 + SYS___TGAMMAL_B = 0x865 // 2149 + SYS___TRUNCF_B = 0x866 // 2150 + SYS___TRUNC_B = 0x867 // 2151 + SYS___TRUNCL_B = 0x868 // 2152 + SYS___LGAMMAF_B = 0x869 // 2153 + SYS___LROUNDF_B = 0x86A // 2154 + SYS___LROUND_B = 0x86B // 2155 + SYS___ERFF_B = 0x86C // 2156 + SYS___ERFCF_B = 0x86D // 2157 + SYS_ACOSHF = 0x86E // 2158 + SYS_ACOSHL = 0x86F // 2159 + SYS_ASINHF = 0x870 // 2160 + SYS_ASINHL = 0x871 // 2161 + SYS_ATANHF = 0x872 // 2162 + SYS_ATANHL = 0x873 // 2163 + SYS_CBRTF = 0x874 // 2164 + SYS_CBRTL = 0x875 // 2165 + SYS_COPYSIGNF = 0x876 // 2166 + SYS_CPYSIGNF = 0x876 // 2166 + SYS_COPYSIGNL = 0x877 // 2167 + SYS_CPYSIGNL = 0x877 // 2167 + SYS_COTANF = 0x878 // 2168 + SYS___COTANF = 0x878 // 2168 + SYS_COTAN = 0x879 // 2169 + SYS___COTAN = 0x879 // 2169 + SYS_COTANL = 0x87A // 2170 + SYS___COTANL = 0x87A // 2170 + SYS_EXP2F = 0x87B // 2171 + SYS_EXP2L = 0x87C // 2172 + SYS_EXPM1F = 0x87D // 2173 + SYS_EXPM1L = 0x87E // 2174 + SYS_FDIMF = 0x87F // 2175 + SYS_FDIM = 0x881 // 2177 + SYS_FDIML = 0x882 // 2178 + SYS_HYPOTF = 0x883 // 2179 + SYS_HYPOTL = 0x884 // 2180 + SYS_LOG1PF = 0x885 // 2181 + SYS_LOG1PL = 0x886 // 2182 + SYS_LOG2F = 0x887 // 2183 + SYS_LOG2 = 0x888 // 2184 + SYS_LOG2L = 0x889 // 2185 + SYS_REMAINDERF = 0x88A // 2186 + SYS_REMAINDF = 0x88A // 2186 + SYS_REMAINDERL = 0x88B // 2187 + SYS_REMAINDL = 0x88B // 2187 + SYS_REMQUOF = 0x88C // 2188 + SYS_REMQUO = 0x88D // 2189 + SYS_REMQUOL = 0x88E // 2190 + SYS_TGAMMAF = 0x88F // 2191 + SYS_TGAMMA = 0x890 // 2192 + SYS_TGAMMAL = 0x891 // 2193 + SYS_TRUNCF = 0x892 // 2194 + SYS_TRUNC = 0x893 // 2195 + SYS_TRUNCL = 0x894 // 2196 + SYS_LGAMMAF = 0x895 // 2197 + SYS_LGAMMAL = 0x896 // 2198 + SYS_LROUNDF = 0x897 // 2199 + SYS_LROUND = 0x898 // 2200 + SYS_ERFF = 0x899 // 2201 + SYS_ERFL = 0x89A // 2202 + SYS_ERFCF = 0x89B // 2203 + SYS_ERFCL = 0x89C // 2204 + SYS___EXP2_B = 0x89D // 2205 + SYS_EXP2 = 0x89E // 2206 + SYS___FAR_JUMP = 0x89F // 2207 + SYS___TCGETATTR_A = 0x8A1 // 2209 + SYS___TCSETATTR_A = 0x8A2 // 2210 + SYS___SUPERKILL = 0x8A4 // 2212 + SYS___LE_CONDITION_TOKEN_BUILD = 0x8A5 // 2213 + SYS___LE_MSG_ADD_INSERT = 0x8A6 // 2214 + SYS___LE_MSG_GET = 0x8A7 // 2215 + SYS___LE_MSG_GET_AND_WRITE = 0x8A8 // 2216 + SYS___LE_MSG_WRITE = 0x8A9 // 2217 + SYS___ITOA = 0x8AA // 2218 + SYS___UTOA = 0x8AB // 2219 + SYS___LTOA = 0x8AC // 2220 + SYS___ULTOA = 0x8AD // 2221 + SYS___LLTOA = 0x8AE // 2222 + SYS___ULLTOA = 0x8AF // 2223 + SYS___ITOA_A = 0x8B0 // 2224 + SYS___UTOA_A = 0x8B1 // 2225 + SYS___LTOA_A = 0x8B2 // 2226 + SYS___ULTOA_A = 0x8B3 // 2227 + SYS___LLTOA_A = 0x8B4 // 2228 + SYS___ULLTOA_A = 0x8B5 // 2229 + SYS_____GETENV_A = 0x8C3 // 2243 + SYS___REXEC_A = 0x8C4 // 2244 + SYS___REXEC_AF_A = 0x8C5 // 2245 + SYS___GETUTXENT_A = 0x8C6 // 2246 + SYS___GETUTXID_A = 0x8C7 // 2247 + SYS___GETUTXLINE_A = 0x8C8 // 2248 + SYS___PUTUTXLINE_A = 0x8C9 // 2249 + SYS_____UTMPXNAME_A = 0x8CA // 2250 + SYS___PUTC_UNLOCKED_A = 0x8CB // 2251 + SYS___PUTCHAR_UNLOCKED_A = 0x8CC // 2252 + SYS___SNPRINTF_A = 0x8CD // 2253 + SYS___VSNPRINTF_A = 0x8CE // 2254 + SYS___DLOPEN_A = 0x8D0 // 2256 + SYS___DLSYM_A = 0x8D1 // 2257 + SYS___DLERROR_A = 0x8D2 // 2258 + SYS_FLOCKFILE = 0x8D3 // 2259 + SYS_FTRYLOCKFILE = 0x8D4 // 2260 + SYS_FUNLOCKFILE = 0x8D5 // 2261 + SYS_GETC_UNLOCKED = 0x8D6 // 2262 + SYS_GETCHAR_UNLOCKED = 0x8D7 // 2263 + SYS_PUTC_UNLOCKED = 0x8D8 // 2264 + SYS_PUTCHAR_UNLOCKED = 0x8D9 // 2265 + SYS_SNPRINTF = 0x8DA // 2266 + SYS_VSNPRINTF = 0x8DB // 2267 + SYS_DLOPEN = 0x8DD // 2269 + SYS_DLSYM = 0x8DE // 2270 + SYS_DLCLOSE = 0x8DF // 2271 + SYS_DLERROR = 0x8E0 // 2272 + SYS___SET_EXCEPTION_HANDLER = 0x8E2 // 2274 + SYS___RESET_EXCEPTION_HANDLER = 0x8E3 // 2275 + SYS___VHM_EVENT = 0x8E4 // 2276 + SYS___ABS_H = 0x8E6 // 2278 + SYS___ABSF_H = 0x8E7 // 2279 + SYS___ABSL_H = 0x8E8 // 2280 + SYS___ACOS_H = 0x8E9 // 2281 + SYS___ACOSF_H = 0x8EA // 2282 + SYS___ACOSL_H = 0x8EB // 2283 + SYS___ACOSH_H = 0x8EC // 2284 + SYS___ASIN_H = 0x8ED // 2285 + SYS___ASINF_H = 0x8EE // 2286 + SYS___ASINL_H = 0x8EF // 2287 + SYS___ASINH_H = 0x8F0 // 2288 + SYS___ATAN_H = 0x8F1 // 2289 + SYS___ATANF_H = 0x8F2 // 2290 + SYS___ATANL_H = 0x8F3 // 2291 + SYS___ATANH_H = 0x8F4 // 2292 + SYS___ATANHF_H = 0x8F5 // 2293 + SYS___ATANHL_H = 0x8F6 // 2294 + SYS___ATAN2_H = 0x8F7 // 2295 + SYS___ATAN2F_H = 0x8F8 // 2296 + SYS___ATAN2L_H = 0x8F9 // 2297 + SYS___CBRT_H = 0x8FA // 2298 + SYS___COPYSIGNF_H = 0x8FB // 2299 + SYS___COPYSIGNL_H = 0x8FC // 2300 + SYS___COS_H = 0x8FD // 2301 + SYS___COSF_H = 0x8FE // 2302 + SYS___COSL_H = 0x8FF // 2303 + SYS___COSHF_H = 0x900 // 2304 + SYS___COSHL_H = 0x901 // 2305 + SYS___COTAN_H = 0x902 // 2306 + SYS___COTANF_H = 0x903 // 2307 + SYS___COTANL_H = 0x904 // 2308 + SYS___ERF_H = 0x905 // 2309 + SYS___ERFF_H = 0x906 // 2310 + SYS___ERFL_H = 0x907 // 2311 + SYS___ERFC_H = 0x908 // 2312 + SYS___ERFCF_H = 0x909 // 2313 + SYS___ERFCL_H = 0x90A // 2314 + SYS___EXP_H = 0x90B // 2315 + SYS___EXPF_H = 0x90C // 2316 + SYS___EXPL_H = 0x90D // 2317 + SYS___EXPM1_H = 0x90E // 2318 + SYS___FDIM_H = 0x90F // 2319 + SYS___FDIMF_H = 0x910 // 2320 + SYS___FDIML_H = 0x911 // 2321 + SYS___FMOD_H = 0x912 // 2322 + SYS___FMODF_H = 0x913 // 2323 + SYS___FMODL_H = 0x914 // 2324 + SYS___GAMMA_H = 0x915 // 2325 + SYS___HYPOT_H = 0x916 // 2326 + SYS___ILOGB_H = 0x917 // 2327 + SYS___LGAMMA_H = 0x918 // 2328 + SYS___LGAMMAF_H = 0x919 // 2329 + SYS___LOG_H = 0x91A // 2330 + SYS___LOGF_H = 0x91B // 2331 + SYS___LOGL_H = 0x91C // 2332 + SYS___LOGB_H = 0x91D // 2333 + SYS___LOG2_H = 0x91E // 2334 + SYS___LOG2F_H = 0x91F // 2335 + SYS___LOG2L_H = 0x920 // 2336 + SYS___LOG1P_H = 0x921 // 2337 + SYS___LOG10_H = 0x922 // 2338 + SYS___LOG10F_H = 0x923 // 2339 + SYS___LOG10L_H = 0x924 // 2340 + SYS___LROUND_H = 0x925 // 2341 + SYS___LROUNDF_H = 0x926 // 2342 + SYS___NEXTAFTER_H = 0x927 // 2343 + SYS___POW_H = 0x928 // 2344 + SYS___POWF_H = 0x929 // 2345 + SYS___POWL_H = 0x92A // 2346 + SYS___REMAINDER_H = 0x92B // 2347 + SYS___RINT_H = 0x92C // 2348 + SYS___SCALB_H = 0x92D // 2349 + SYS___SIN_H = 0x92E // 2350 + SYS___SINF_H = 0x92F // 2351 + SYS___SINL_H = 0x930 // 2352 + SYS___SINH_H = 0x931 // 2353 + SYS___SINHF_H = 0x932 // 2354 + SYS___SINHL_H = 0x933 // 2355 + SYS___SQRT_H = 0x934 // 2356 + SYS___SQRTF_H = 0x935 // 2357 + SYS___SQRTL_H = 0x936 // 2358 + SYS___TAN_H = 0x937 // 2359 + SYS___TANF_H = 0x938 // 2360 + SYS___TANL_H = 0x939 // 2361 + SYS___TANH_H = 0x93A // 2362 + SYS___TANHF_H = 0x93B // 2363 + SYS___TANHL_H = 0x93C // 2364 + SYS___TGAMMA_H = 0x93D // 2365 + SYS___TGAMMAF_H = 0x93E // 2366 + SYS___TRUNC_H = 0x93F // 2367 + SYS___TRUNCF_H = 0x940 // 2368 + SYS___TRUNCL_H = 0x941 // 2369 + SYS___COSH_H = 0x942 // 2370 + SYS___LE_DEBUG_SET_RESUME_MCH = 0x943 // 2371 + SYS_VFSCANF = 0x944 // 2372 + SYS_VSCANF = 0x946 // 2374 + SYS_VSSCANF = 0x948 // 2376 + SYS_VFWSCANF = 0x94A // 2378 + SYS_VWSCANF = 0x94C // 2380 + SYS_VSWSCANF = 0x94E // 2382 + SYS_IMAXABS = 0x950 // 2384 + SYS_IMAXDIV = 0x951 // 2385 + SYS_STRTOIMAX = 0x952 // 2386 + SYS_STRTOUMAX = 0x953 // 2387 + SYS_WCSTOIMAX = 0x954 // 2388 + SYS_WCSTOUMAX = 0x955 // 2389 + SYS_ATOLL = 0x956 // 2390 + SYS_STRTOF = 0x957 // 2391 + SYS_STRTOLD = 0x958 // 2392 + SYS_WCSTOF = 0x959 // 2393 + SYS_WCSTOLD = 0x95A // 2394 + SYS_INET6_RTH_SPACE = 0x95B // 2395 + SYS_INET6_RTH_INIT = 0x95C // 2396 + SYS_INET6_RTH_ADD = 0x95D // 2397 + SYS_INET6_RTH_REVERSE = 0x95E // 2398 + SYS_INET6_RTH_SEGMENTS = 0x95F // 2399 + SYS_INET6_RTH_GETADDR = 0x960 // 2400 + SYS_INET6_OPT_INIT = 0x961 // 2401 + SYS_INET6_OPT_APPEND = 0x962 // 2402 + SYS_INET6_OPT_FINISH = 0x963 // 2403 + SYS_INET6_OPT_SET_VAL = 0x964 // 2404 + SYS_INET6_OPT_NEXT = 0x965 // 2405 + SYS_INET6_OPT_FIND = 0x966 // 2406 + SYS_INET6_OPT_GET_VAL = 0x967 // 2407 + SYS___POW_I = 0x987 // 2439 + SYS___POW_I_B = 0x988 // 2440 + SYS___POW_I_H = 0x989 // 2441 + SYS___POW_II = 0x98A // 2442 + SYS___POW_II_B = 0x98B // 2443 + SYS___POW_II_H = 0x98C // 2444 + SYS_CABS = 0x98E // 2446 + SYS___CABS_B = 0x98F // 2447 + SYS___CABS_H = 0x990 // 2448 + SYS_CABSF = 0x991 // 2449 + SYS___CABSF_B = 0x992 // 2450 + SYS___CABSF_H = 0x993 // 2451 + SYS_CABSL = 0x994 // 2452 + SYS___CABSL_B = 0x995 // 2453 + SYS___CABSL_H = 0x996 // 2454 + SYS_CACOS = 0x997 // 2455 + SYS___CACOS_B = 0x998 // 2456 + SYS___CACOS_H = 0x999 // 2457 + SYS_CACOSF = 0x99A // 2458 + SYS___CACOSF_B = 0x99B // 2459 + SYS___CACOSF_H = 0x99C // 2460 + SYS_CACOSL = 0x99D // 2461 + SYS___CACOSL_B = 0x99E // 2462 + SYS___CACOSL_H = 0x99F // 2463 + SYS_CACOSH = 0x9A0 // 2464 + SYS___CACOSH_B = 0x9A1 // 2465 + SYS___CACOSH_H = 0x9A2 // 2466 + SYS_CACOSHF = 0x9A3 // 2467 + SYS___CACOSHF_B = 0x9A4 // 2468 + SYS___CACOSHF_H = 0x9A5 // 2469 + SYS_CACOSHL = 0x9A6 // 2470 + SYS___CACOSHL_B = 0x9A7 // 2471 + SYS___CACOSHL_H = 0x9A8 // 2472 + SYS_CARG = 0x9A9 // 2473 + SYS___CARG_B = 0x9AA // 2474 + SYS___CARG_H = 0x9AB // 2475 + SYS_CARGF = 0x9AC // 2476 + SYS___CARGF_B = 0x9AD // 2477 + SYS___CARGF_H = 0x9AE // 2478 + SYS_CARGL = 0x9AF // 2479 + SYS___CARGL_B = 0x9B0 // 2480 + SYS___CARGL_H = 0x9B1 // 2481 + SYS_CASIN = 0x9B2 // 2482 + SYS___CASIN_B = 0x9B3 // 2483 + SYS___CASIN_H = 0x9B4 // 2484 + SYS_CASINF = 0x9B5 // 2485 + SYS___CASINF_B = 0x9B6 // 2486 + SYS___CASINF_H = 0x9B7 // 2487 + SYS_CASINL = 0x9B8 // 2488 + SYS___CASINL_B = 0x9B9 // 2489 + SYS___CASINL_H = 0x9BA // 2490 + SYS_CASINH = 0x9BB // 2491 + SYS___CASINH_B = 0x9BC // 2492 + SYS___CASINH_H = 0x9BD // 2493 + SYS_CASINHF = 0x9BE // 2494 + SYS___CASINHF_B = 0x9BF // 2495 + SYS___CASINHF_H = 0x9C0 // 2496 + SYS_CASINHL = 0x9C1 // 2497 + SYS___CASINHL_B = 0x9C2 // 2498 + SYS___CASINHL_H = 0x9C3 // 2499 + SYS_CATAN = 0x9C4 // 2500 + SYS___CATAN_B = 0x9C5 // 2501 + SYS___CATAN_H = 0x9C6 // 2502 + SYS_CATANF = 0x9C7 // 2503 + SYS___CATANF_B = 0x9C8 // 2504 + SYS___CATANF_H = 0x9C9 // 2505 + SYS_CATANL = 0x9CA // 2506 + SYS___CATANL_B = 0x9CB // 2507 + SYS___CATANL_H = 0x9CC // 2508 + SYS_CATANH = 0x9CD // 2509 + SYS___CATANH_B = 0x9CE // 2510 + SYS___CATANH_H = 0x9CF // 2511 + SYS_CATANHF = 0x9D0 // 2512 + SYS___CATANHF_B = 0x9D1 // 2513 + SYS___CATANHF_H = 0x9D2 // 2514 + SYS_CATANHL = 0x9D3 // 2515 + SYS___CATANHL_B = 0x9D4 // 2516 + SYS___CATANHL_H = 0x9D5 // 2517 + SYS_CCOS = 0x9D6 // 2518 + SYS___CCOS_B = 0x9D7 // 2519 + SYS___CCOS_H = 0x9D8 // 2520 + SYS_CCOSF = 0x9D9 // 2521 + SYS___CCOSF_B = 0x9DA // 2522 + SYS___CCOSF_H = 0x9DB // 2523 + SYS_CCOSL = 0x9DC // 2524 + SYS___CCOSL_B = 0x9DD // 2525 + SYS___CCOSL_H = 0x9DE // 2526 + SYS_CCOSH = 0x9DF // 2527 + SYS___CCOSH_B = 0x9E0 // 2528 + SYS___CCOSH_H = 0x9E1 // 2529 + SYS_CCOSHF = 0x9E2 // 2530 + SYS___CCOSHF_B = 0x9E3 // 2531 + SYS___CCOSHF_H = 0x9E4 // 2532 + SYS_CCOSHL = 0x9E5 // 2533 + SYS___CCOSHL_B = 0x9E6 // 2534 + SYS___CCOSHL_H = 0x9E7 // 2535 + SYS_CEXP = 0x9E8 // 2536 + SYS___CEXP_B = 0x9E9 // 2537 + SYS___CEXP_H = 0x9EA // 2538 + SYS_CEXPF = 0x9EB // 2539 + SYS___CEXPF_B = 0x9EC // 2540 + SYS___CEXPF_H = 0x9ED // 2541 + SYS_CEXPL = 0x9EE // 2542 + SYS___CEXPL_B = 0x9EF // 2543 + SYS___CEXPL_H = 0x9F0 // 2544 + SYS_CIMAG = 0x9F1 // 2545 + SYS___CIMAG_B = 0x9F2 // 2546 + SYS___CIMAG_H = 0x9F3 // 2547 + SYS_CIMAGF = 0x9F4 // 2548 + SYS___CIMAGF_B = 0x9F5 // 2549 + SYS___CIMAGF_H = 0x9F6 // 2550 + SYS_CIMAGL = 0x9F7 // 2551 + SYS___CIMAGL_B = 0x9F8 // 2552 + SYS___CIMAGL_H = 0x9F9 // 2553 + SYS___CLOG = 0x9FA // 2554 + SYS___CLOG_B = 0x9FB // 2555 + SYS___CLOG_H = 0x9FC // 2556 + SYS_CLOGF = 0x9FD // 2557 + SYS___CLOGF_B = 0x9FE // 2558 + SYS___CLOGF_H = 0x9FF // 2559 + SYS_CLOGL = 0xA00 // 2560 + SYS___CLOGL_B = 0xA01 // 2561 + SYS___CLOGL_H = 0xA02 // 2562 + SYS_CONJ = 0xA03 // 2563 + SYS___CONJ_B = 0xA04 // 2564 + SYS___CONJ_H = 0xA05 // 2565 + SYS_CONJF = 0xA06 // 2566 + SYS___CONJF_B = 0xA07 // 2567 + SYS___CONJF_H = 0xA08 // 2568 + SYS_CONJL = 0xA09 // 2569 + SYS___CONJL_B = 0xA0A // 2570 + SYS___CONJL_H = 0xA0B // 2571 + SYS_CPOW = 0xA0C // 2572 + SYS___CPOW_B = 0xA0D // 2573 + SYS___CPOW_H = 0xA0E // 2574 + SYS_CPOWF = 0xA0F // 2575 + SYS___CPOWF_B = 0xA10 // 2576 + SYS___CPOWF_H = 0xA11 // 2577 + SYS_CPOWL = 0xA12 // 2578 + SYS___CPOWL_B = 0xA13 // 2579 + SYS___CPOWL_H = 0xA14 // 2580 + SYS_CPROJ = 0xA15 // 2581 + SYS___CPROJ_B = 0xA16 // 2582 + SYS___CPROJ_H = 0xA17 // 2583 + SYS_CPROJF = 0xA18 // 2584 + SYS___CPROJF_B = 0xA19 // 2585 + SYS___CPROJF_H = 0xA1A // 2586 + SYS_CPROJL = 0xA1B // 2587 + SYS___CPROJL_B = 0xA1C // 2588 + SYS___CPROJL_H = 0xA1D // 2589 + SYS_CREAL = 0xA1E // 2590 + SYS___CREAL_B = 0xA1F // 2591 + SYS___CREAL_H = 0xA20 // 2592 + SYS_CREALF = 0xA21 // 2593 + SYS___CREALF_B = 0xA22 // 2594 + SYS___CREALF_H = 0xA23 // 2595 + SYS_CREALL = 0xA24 // 2596 + SYS___CREALL_B = 0xA25 // 2597 + SYS___CREALL_H = 0xA26 // 2598 + SYS_CSIN = 0xA27 // 2599 + SYS___CSIN_B = 0xA28 // 2600 + SYS___CSIN_H = 0xA29 // 2601 + SYS_CSINF = 0xA2A // 2602 + SYS___CSINF_B = 0xA2B // 2603 + SYS___CSINF_H = 0xA2C // 2604 + SYS_CSINL = 0xA2D // 2605 + SYS___CSINL_B = 0xA2E // 2606 + SYS___CSINL_H = 0xA2F // 2607 + SYS_CSINH = 0xA30 // 2608 + SYS___CSINH_B = 0xA31 // 2609 + SYS___CSINH_H = 0xA32 // 2610 + SYS_CSINHF = 0xA33 // 2611 + SYS___CSINHF_B = 0xA34 // 2612 + SYS___CSINHF_H = 0xA35 // 2613 + SYS_CSINHL = 0xA36 // 2614 + SYS___CSINHL_B = 0xA37 // 2615 + SYS___CSINHL_H = 0xA38 // 2616 + SYS_CSQRT = 0xA39 // 2617 + SYS___CSQRT_B = 0xA3A // 2618 + SYS___CSQRT_H = 0xA3B // 2619 + SYS_CSQRTF = 0xA3C // 2620 + SYS___CSQRTF_B = 0xA3D // 2621 + SYS___CSQRTF_H = 0xA3E // 2622 + SYS_CSQRTL = 0xA3F // 2623 + SYS___CSQRTL_B = 0xA40 // 2624 + SYS___CSQRTL_H = 0xA41 // 2625 + SYS_CTAN = 0xA42 // 2626 + SYS___CTAN_B = 0xA43 // 2627 + SYS___CTAN_H = 0xA44 // 2628 + SYS_CTANF = 0xA45 // 2629 + SYS___CTANF_B = 0xA46 // 2630 + SYS___CTANF_H = 0xA47 // 2631 + SYS_CTANL = 0xA48 // 2632 + SYS___CTANL_B = 0xA49 // 2633 + SYS___CTANL_H = 0xA4A // 2634 + SYS_CTANH = 0xA4B // 2635 + SYS___CTANH_B = 0xA4C // 2636 + SYS___CTANH_H = 0xA4D // 2637 + SYS_CTANHF = 0xA4E // 2638 + SYS___CTANHF_B = 0xA4F // 2639 + SYS___CTANHF_H = 0xA50 // 2640 + SYS_CTANHL = 0xA51 // 2641 + SYS___CTANHL_B = 0xA52 // 2642 + SYS___CTANHL_H = 0xA53 // 2643 + SYS___ACOSHF_H = 0xA54 // 2644 + SYS___ACOSHL_H = 0xA55 // 2645 + SYS___ASINHF_H = 0xA56 // 2646 + SYS___ASINHL_H = 0xA57 // 2647 + SYS___CBRTF_H = 0xA58 // 2648 + SYS___CBRTL_H = 0xA59 // 2649 + SYS___COPYSIGN_B = 0xA5A // 2650 + SYS___EXPM1F_H = 0xA5B // 2651 + SYS___EXPM1L_H = 0xA5C // 2652 + SYS___EXP2_H = 0xA5D // 2653 + SYS___EXP2F_H = 0xA5E // 2654 + SYS___EXP2L_H = 0xA5F // 2655 + SYS___LOG1PF_H = 0xA60 // 2656 + SYS___LOG1PL_H = 0xA61 // 2657 + SYS___LGAMMAL_H = 0xA62 // 2658 + SYS_FMA = 0xA63 // 2659 + SYS___FMA_B = 0xA64 // 2660 + SYS___FMA_H = 0xA65 // 2661 + SYS_FMAF = 0xA66 // 2662 + SYS___FMAF_B = 0xA67 // 2663 + SYS___FMAF_H = 0xA68 // 2664 + SYS_FMAL = 0xA69 // 2665 + SYS___FMAL_B = 0xA6A // 2666 + SYS___FMAL_H = 0xA6B // 2667 + SYS_FMAX = 0xA6C // 2668 + SYS___FMAX_B = 0xA6D // 2669 + SYS___FMAX_H = 0xA6E // 2670 + SYS_FMAXF = 0xA6F // 2671 + SYS___FMAXF_B = 0xA70 // 2672 + SYS___FMAXF_H = 0xA71 // 2673 + SYS_FMAXL = 0xA72 // 2674 + SYS___FMAXL_B = 0xA73 // 2675 + SYS___FMAXL_H = 0xA74 // 2676 + SYS_FMIN = 0xA75 // 2677 + SYS___FMIN_B = 0xA76 // 2678 + SYS___FMIN_H = 0xA77 // 2679 + SYS_FMINF = 0xA78 // 2680 + SYS___FMINF_B = 0xA79 // 2681 + SYS___FMINF_H = 0xA7A // 2682 + SYS_FMINL = 0xA7B // 2683 + SYS___FMINL_B = 0xA7C // 2684 + SYS___FMINL_H = 0xA7D // 2685 + SYS_ILOGBF = 0xA7E // 2686 + SYS___ILOGBF_B = 0xA7F // 2687 + SYS___ILOGBF_H = 0xA80 // 2688 + SYS_ILOGBL = 0xA81 // 2689 + SYS___ILOGBL_B = 0xA82 // 2690 + SYS___ILOGBL_H = 0xA83 // 2691 + SYS_LLRINT = 0xA84 // 2692 + SYS___LLRINT_B = 0xA85 // 2693 + SYS___LLRINT_H = 0xA86 // 2694 + SYS_LLRINTF = 0xA87 // 2695 + SYS___LLRINTF_B = 0xA88 // 2696 + SYS___LLRINTF_H = 0xA89 // 2697 + SYS_LLRINTL = 0xA8A // 2698 + SYS___LLRINTL_B = 0xA8B // 2699 + SYS___LLRINTL_H = 0xA8C // 2700 + SYS_LLROUND = 0xA8D // 2701 + SYS___LLROUND_B = 0xA8E // 2702 + SYS___LLROUND_H = 0xA8F // 2703 + SYS_LLROUNDF = 0xA90 // 2704 + SYS___LLROUNDF_B = 0xA91 // 2705 + SYS___LLROUNDF_H = 0xA92 // 2706 + SYS_LLROUNDL = 0xA93 // 2707 + SYS___LLROUNDL_B = 0xA94 // 2708 + SYS___LLROUNDL_H = 0xA95 // 2709 + SYS_LOGBF = 0xA96 // 2710 + SYS___LOGBF_B = 0xA97 // 2711 + SYS___LOGBF_H = 0xA98 // 2712 + SYS_LOGBL = 0xA99 // 2713 + SYS___LOGBL_B = 0xA9A // 2714 + SYS___LOGBL_H = 0xA9B // 2715 + SYS_LRINT = 0xA9C // 2716 + SYS___LRINT_B = 0xA9D // 2717 + SYS___LRINT_H = 0xA9E // 2718 + SYS_LRINTF = 0xA9F // 2719 + SYS___LRINTF_B = 0xAA0 // 2720 + SYS___LRINTF_H = 0xAA1 // 2721 + SYS_LRINTL = 0xAA2 // 2722 + SYS___LRINTL_B = 0xAA3 // 2723 + SYS___LRINTL_H = 0xAA4 // 2724 + SYS_LROUNDL = 0xAA5 // 2725 + SYS___LROUNDL_B = 0xAA6 // 2726 + SYS___LROUNDL_H = 0xAA7 // 2727 + SYS_NAN = 0xAA8 // 2728 + SYS___NAN_B = 0xAA9 // 2729 + SYS_NANF = 0xAAA // 2730 + SYS___NANF_B = 0xAAB // 2731 + SYS_NANL = 0xAAC // 2732 + SYS___NANL_B = 0xAAD // 2733 + SYS_NEARBYINT = 0xAAE // 2734 + SYS___NEARBYINT_B = 0xAAF // 2735 + SYS___NEARBYINT_H = 0xAB0 // 2736 + SYS_NEARBYINTF = 0xAB1 // 2737 + SYS___NEARBYINTF_B = 0xAB2 // 2738 + SYS___NEARBYINTF_H = 0xAB3 // 2739 + SYS_NEARBYINTL = 0xAB4 // 2740 + SYS___NEARBYINTL_B = 0xAB5 // 2741 + SYS___NEARBYINTL_H = 0xAB6 // 2742 + SYS_NEXTAFTERF = 0xAB7 // 2743 + SYS___NEXTAFTERF_B = 0xAB8 // 2744 + SYS___NEXTAFTERF_H = 0xAB9 // 2745 + SYS_NEXTAFTERL = 0xABA // 2746 + SYS___NEXTAFTERL_B = 0xABB // 2747 + SYS___NEXTAFTERL_H = 0xABC // 2748 + SYS_NEXTTOWARD = 0xABD // 2749 + SYS___NEXTTOWARD_B = 0xABE // 2750 + SYS___NEXTTOWARD_H = 0xABF // 2751 + SYS_NEXTTOWARDF = 0xAC0 // 2752 + SYS___NEXTTOWARDF_B = 0xAC1 // 2753 + SYS___NEXTTOWARDF_H = 0xAC2 // 2754 + SYS_NEXTTOWARDL = 0xAC3 // 2755 + SYS___NEXTTOWARDL_B = 0xAC4 // 2756 + SYS___NEXTTOWARDL_H = 0xAC5 // 2757 + SYS___REMAINDERF_H = 0xAC6 // 2758 + SYS___REMAINDERL_H = 0xAC7 // 2759 + SYS___REMQUO_H = 0xAC8 // 2760 + SYS___REMQUOF_H = 0xAC9 // 2761 + SYS___REMQUOL_H = 0xACA // 2762 + SYS_RINTF = 0xACB // 2763 + SYS___RINTF_B = 0xACC // 2764 + SYS_RINTL = 0xACD // 2765 + SYS___RINTL_B = 0xACE // 2766 + SYS_ROUND = 0xACF // 2767 + SYS___ROUND_B = 0xAD0 // 2768 + SYS___ROUND_H = 0xAD1 // 2769 + SYS_ROUNDF = 0xAD2 // 2770 + SYS___ROUNDF_B = 0xAD3 // 2771 + SYS___ROUNDF_H = 0xAD4 // 2772 + SYS_ROUNDL = 0xAD5 // 2773 + SYS___ROUNDL_B = 0xAD6 // 2774 + SYS___ROUNDL_H = 0xAD7 // 2775 + SYS_SCALBLN = 0xAD8 // 2776 + SYS___SCALBLN_B = 0xAD9 // 2777 + SYS___SCALBLN_H = 0xADA // 2778 + SYS_SCALBLNF = 0xADB // 2779 + SYS___SCALBLNF_B = 0xADC // 2780 + SYS___SCALBLNF_H = 0xADD // 2781 + SYS_SCALBLNL = 0xADE // 2782 + SYS___SCALBLNL_B = 0xADF // 2783 + SYS___SCALBLNL_H = 0xAE0 // 2784 + SYS___SCALBN_B = 0xAE1 // 2785 + SYS___SCALBN_H = 0xAE2 // 2786 + SYS_SCALBNF = 0xAE3 // 2787 + SYS___SCALBNF_B = 0xAE4 // 2788 + SYS___SCALBNF_H = 0xAE5 // 2789 + SYS_SCALBNL = 0xAE6 // 2790 + SYS___SCALBNL_B = 0xAE7 // 2791 + SYS___SCALBNL_H = 0xAE8 // 2792 + SYS___TGAMMAL_H = 0xAE9 // 2793 + SYS_FECLEAREXCEPT = 0xAEA // 2794 + SYS_FEGETENV = 0xAEB // 2795 + SYS_FEGETEXCEPTFLAG = 0xAEC // 2796 + SYS_FEGETROUND = 0xAED // 2797 + SYS_FEHOLDEXCEPT = 0xAEE // 2798 + SYS_FERAISEEXCEPT = 0xAEF // 2799 + SYS_FESETENV = 0xAF0 // 2800 + SYS_FESETEXCEPTFLAG = 0xAF1 // 2801 + SYS_FESETROUND = 0xAF2 // 2802 + SYS_FETESTEXCEPT = 0xAF3 // 2803 + SYS_FEUPDATEENV = 0xAF4 // 2804 + SYS___COPYSIGN_H = 0xAF5 // 2805 + SYS___HYPOTF_H = 0xAF6 // 2806 + SYS___HYPOTL_H = 0xAF7 // 2807 + SYS___CLASS = 0xAFA // 2810 + SYS___CLASS_B = 0xAFB // 2811 + SYS___CLASS_H = 0xAFC // 2812 + SYS___ISBLANK_A = 0xB2E // 2862 + SYS___ISWBLANK_A = 0xB2F // 2863 + SYS___LROUND_FIXUP = 0xB30 // 2864 + SYS___LROUNDF_FIXUP = 0xB31 // 2865 + SYS_SCHED_YIELD = 0xB32 // 2866 + SYS_STRERROR_R = 0xB33 // 2867 + SYS_UNSETENV = 0xB34 // 2868 + SYS___LGAMMA_H_C99 = 0xB38 // 2872 + SYS___LGAMMA_B_C99 = 0xB39 // 2873 + SYS___LGAMMA_R_C99 = 0xB3A // 2874 + SYS___FTELL2 = 0xB3B // 2875 + SYS___FSEEK2 = 0xB3C // 2876 + SYS___STATIC_REINIT = 0xB3D // 2877 + SYS_PTHREAD_ATTR_GETSTACK = 0xB3E // 2878 + SYS_PTHREAD_ATTR_SETSTACK = 0xB3F // 2879 + SYS___TGAMMA_H_C99 = 0xB78 // 2936 + SYS___TGAMMAF_H_C99 = 0xB79 // 2937 + SYS___LE_TRACEBACK = 0xB7A // 2938 + SYS___MUST_STAY_CLEAN = 0xB7C // 2940 + SYS___O_ENV = 0xB7D // 2941 + SYS_ACOSD32 = 0xB7E // 2942 + SYS_ACOSD64 = 0xB7F // 2943 + SYS_ACOSD128 = 0xB80 // 2944 + SYS_ACOSHD32 = 0xB81 // 2945 + SYS_ACOSHD64 = 0xB82 // 2946 + SYS_ACOSHD128 = 0xB83 // 2947 + SYS_ASIND32 = 0xB84 // 2948 + SYS_ASIND64 = 0xB85 // 2949 + SYS_ASIND128 = 0xB86 // 2950 + SYS_ASINHD32 = 0xB87 // 2951 + SYS_ASINHD64 = 0xB88 // 2952 + SYS_ASINHD128 = 0xB89 // 2953 + SYS_ATAND32 = 0xB8A // 2954 + SYS_ATAND64 = 0xB8B // 2955 + SYS_ATAND128 = 0xB8C // 2956 + SYS_ATAN2D32 = 0xB8D // 2957 + SYS_ATAN2D64 = 0xB8E // 2958 + SYS_ATAN2D128 = 0xB8F // 2959 + SYS_ATANHD32 = 0xB90 // 2960 + SYS_ATANHD64 = 0xB91 // 2961 + SYS_ATANHD128 = 0xB92 // 2962 + SYS_CBRTD32 = 0xB93 // 2963 + SYS_CBRTD64 = 0xB94 // 2964 + SYS_CBRTD128 = 0xB95 // 2965 + SYS_CEILD32 = 0xB96 // 2966 + SYS_CEILD64 = 0xB97 // 2967 + SYS_CEILD128 = 0xB98 // 2968 + SYS___CLASS2 = 0xB99 // 2969 + SYS___CLASS2_B = 0xB9A // 2970 + SYS___CLASS2_H = 0xB9B // 2971 + SYS_COPYSIGND32 = 0xB9C // 2972 + SYS_COPYSIGND64 = 0xB9D // 2973 + SYS_COPYSIGND128 = 0xB9E // 2974 + SYS_COSD32 = 0xB9F // 2975 + SYS_COSD64 = 0xBA0 // 2976 + SYS_COSD128 = 0xBA1 // 2977 + SYS_COSHD32 = 0xBA2 // 2978 + SYS_COSHD64 = 0xBA3 // 2979 + SYS_COSHD128 = 0xBA4 // 2980 + SYS_ERFD32 = 0xBA5 // 2981 + SYS_ERFD64 = 0xBA6 // 2982 + SYS_ERFD128 = 0xBA7 // 2983 + SYS_ERFCD32 = 0xBA8 // 2984 + SYS_ERFCD64 = 0xBA9 // 2985 + SYS_ERFCD128 = 0xBAA // 2986 + SYS_EXPD32 = 0xBAB // 2987 + SYS_EXPD64 = 0xBAC // 2988 + SYS_EXPD128 = 0xBAD // 2989 + SYS_EXP2D32 = 0xBAE // 2990 + SYS_EXP2D64 = 0xBAF // 2991 + SYS_EXP2D128 = 0xBB0 // 2992 + SYS_EXPM1D32 = 0xBB1 // 2993 + SYS_EXPM1D64 = 0xBB2 // 2994 + SYS_EXPM1D128 = 0xBB3 // 2995 + SYS_FABSD32 = 0xBB4 // 2996 + SYS_FABSD64 = 0xBB5 // 2997 + SYS_FABSD128 = 0xBB6 // 2998 + SYS_FDIMD32 = 0xBB7 // 2999 + SYS_FDIMD64 = 0xBB8 // 3000 + SYS_FDIMD128 = 0xBB9 // 3001 + SYS_FE_DEC_GETROUND = 0xBBA // 3002 + SYS_FE_DEC_SETROUND = 0xBBB // 3003 + SYS_FLOORD32 = 0xBBC // 3004 + SYS_FLOORD64 = 0xBBD // 3005 + SYS_FLOORD128 = 0xBBE // 3006 + SYS_FMAD32 = 0xBBF // 3007 + SYS_FMAD64 = 0xBC0 // 3008 + SYS_FMAD128 = 0xBC1 // 3009 + SYS_FMAXD32 = 0xBC2 // 3010 + SYS_FMAXD64 = 0xBC3 // 3011 + SYS_FMAXD128 = 0xBC4 // 3012 + SYS_FMIND32 = 0xBC5 // 3013 + SYS_FMIND64 = 0xBC6 // 3014 + SYS_FMIND128 = 0xBC7 // 3015 + SYS_FMODD32 = 0xBC8 // 3016 + SYS_FMODD64 = 0xBC9 // 3017 + SYS_FMODD128 = 0xBCA // 3018 + SYS___FP_CAST_D = 0xBCB // 3019 + SYS_FREXPD32 = 0xBCC // 3020 + SYS_FREXPD64 = 0xBCD // 3021 + SYS_FREXPD128 = 0xBCE // 3022 + SYS_HYPOTD32 = 0xBCF // 3023 + SYS_HYPOTD64 = 0xBD0 // 3024 + SYS_HYPOTD128 = 0xBD1 // 3025 + SYS_ILOGBD32 = 0xBD2 // 3026 + SYS_ILOGBD64 = 0xBD3 // 3027 + SYS_ILOGBD128 = 0xBD4 // 3028 + SYS_LDEXPD32 = 0xBD5 // 3029 + SYS_LDEXPD64 = 0xBD6 // 3030 + SYS_LDEXPD128 = 0xBD7 // 3031 + SYS_LGAMMAD32 = 0xBD8 // 3032 + SYS_LGAMMAD64 = 0xBD9 // 3033 + SYS_LGAMMAD128 = 0xBDA // 3034 + SYS_LLRINTD32 = 0xBDB // 3035 + SYS_LLRINTD64 = 0xBDC // 3036 + SYS_LLRINTD128 = 0xBDD // 3037 + SYS_LLROUNDD32 = 0xBDE // 3038 + SYS_LLROUNDD64 = 0xBDF // 3039 + SYS_LLROUNDD128 = 0xBE0 // 3040 + SYS_LOGD32 = 0xBE1 // 3041 + SYS_LOGD64 = 0xBE2 // 3042 + SYS_LOGD128 = 0xBE3 // 3043 + SYS_LOG10D32 = 0xBE4 // 3044 + SYS_LOG10D64 = 0xBE5 // 3045 + SYS_LOG10D128 = 0xBE6 // 3046 + SYS_LOG1PD32 = 0xBE7 // 3047 + SYS_LOG1PD64 = 0xBE8 // 3048 + SYS_LOG1PD128 = 0xBE9 // 3049 + SYS_LOG2D32 = 0xBEA // 3050 + SYS_LOG2D64 = 0xBEB // 3051 + SYS_LOG2D128 = 0xBEC // 3052 + SYS_LOGBD32 = 0xBED // 3053 + SYS_LOGBD64 = 0xBEE // 3054 + SYS_LOGBD128 = 0xBEF // 3055 + SYS_LRINTD32 = 0xBF0 // 3056 + SYS_LRINTD64 = 0xBF1 // 3057 + SYS_LRINTD128 = 0xBF2 // 3058 + SYS_LROUNDD32 = 0xBF3 // 3059 + SYS_LROUNDD64 = 0xBF4 // 3060 + SYS_LROUNDD128 = 0xBF5 // 3061 + SYS_MODFD32 = 0xBF6 // 3062 + SYS_MODFD64 = 0xBF7 // 3063 + SYS_MODFD128 = 0xBF8 // 3064 + SYS_NAND32 = 0xBF9 // 3065 + SYS_NAND64 = 0xBFA // 3066 + SYS_NAND128 = 0xBFB // 3067 + SYS_NEARBYINTD32 = 0xBFC // 3068 + SYS_NEARBYINTD64 = 0xBFD // 3069 + SYS_NEARBYINTD128 = 0xBFE // 3070 + SYS_NEXTAFTERD32 = 0xBFF // 3071 + SYS_NEXTAFTERD64 = 0xC00 // 3072 + SYS_NEXTAFTERD128 = 0xC01 // 3073 + SYS_NEXTTOWARDD32 = 0xC02 // 3074 + SYS_NEXTTOWARDD64 = 0xC03 // 3075 + SYS_NEXTTOWARDD128 = 0xC04 // 3076 + SYS_POWD32 = 0xC05 // 3077 + SYS_POWD64 = 0xC06 // 3078 + SYS_POWD128 = 0xC07 // 3079 + SYS_QUANTIZED32 = 0xC08 // 3080 + SYS_QUANTIZED64 = 0xC09 // 3081 + SYS_QUANTIZED128 = 0xC0A // 3082 + SYS_REMAINDERD32 = 0xC0B // 3083 + SYS_REMAINDERD64 = 0xC0C // 3084 + SYS_REMAINDERD128 = 0xC0D // 3085 + SYS___REMQUOD32 = 0xC0E // 3086 + SYS___REMQUOD64 = 0xC0F // 3087 + SYS___REMQUOD128 = 0xC10 // 3088 + SYS_RINTD32 = 0xC11 // 3089 + SYS_RINTD64 = 0xC12 // 3090 + SYS_RINTD128 = 0xC13 // 3091 + SYS_ROUNDD32 = 0xC14 // 3092 + SYS_ROUNDD64 = 0xC15 // 3093 + SYS_ROUNDD128 = 0xC16 // 3094 + SYS_SAMEQUANTUMD32 = 0xC17 // 3095 + SYS_SAMEQUANTUMD64 = 0xC18 // 3096 + SYS_SAMEQUANTUMD128 = 0xC19 // 3097 + SYS_SCALBLND32 = 0xC1A // 3098 + SYS_SCALBLND64 = 0xC1B // 3099 + SYS_SCALBLND128 = 0xC1C // 3100 + SYS_SCALBND32 = 0xC1D // 3101 + SYS_SCALBND64 = 0xC1E // 3102 + SYS_SCALBND128 = 0xC1F // 3103 + SYS_SIND32 = 0xC20 // 3104 + SYS_SIND64 = 0xC21 // 3105 + SYS_SIND128 = 0xC22 // 3106 + SYS_SINHD32 = 0xC23 // 3107 + SYS_SINHD64 = 0xC24 // 3108 + SYS_SINHD128 = 0xC25 // 3109 + SYS_SQRTD32 = 0xC26 // 3110 + SYS_SQRTD64 = 0xC27 // 3111 + SYS_SQRTD128 = 0xC28 // 3112 + SYS_STRTOD32 = 0xC29 // 3113 + SYS_STRTOD64 = 0xC2A // 3114 + SYS_STRTOD128 = 0xC2B // 3115 + SYS_TAND32 = 0xC2C // 3116 + SYS_TAND64 = 0xC2D // 3117 + SYS_TAND128 = 0xC2E // 3118 + SYS_TANHD32 = 0xC2F // 3119 + SYS_TANHD64 = 0xC30 // 3120 + SYS_TANHD128 = 0xC31 // 3121 + SYS_TGAMMAD32 = 0xC32 // 3122 + SYS_TGAMMAD64 = 0xC33 // 3123 + SYS_TGAMMAD128 = 0xC34 // 3124 + SYS_TRUNCD32 = 0xC3E // 3134 + SYS_TRUNCD64 = 0xC3F // 3135 + SYS_TRUNCD128 = 0xC40 // 3136 + SYS_WCSTOD32 = 0xC41 // 3137 + SYS_WCSTOD64 = 0xC42 // 3138 + SYS_WCSTOD128 = 0xC43 // 3139 + SYS___CODEPAGE_INFO = 0xC64 // 3172 + SYS_POSIX_OPENPT = 0xC66 // 3174 + SYS_PSELECT = 0xC67 // 3175 + SYS_SOCKATMARK = 0xC68 // 3176 + SYS_AIO_FSYNC = 0xC69 // 3177 + SYS_LIO_LISTIO = 0xC6A // 3178 + SYS___ATANPID32 = 0xC6B // 3179 + SYS___ATANPID64 = 0xC6C // 3180 + SYS___ATANPID128 = 0xC6D // 3181 + SYS___COSPID32 = 0xC6E // 3182 + SYS___COSPID64 = 0xC6F // 3183 + SYS___COSPID128 = 0xC70 // 3184 + SYS___SINPID32 = 0xC71 // 3185 + SYS___SINPID64 = 0xC72 // 3186 + SYS___SINPID128 = 0xC73 // 3187 + SYS_SETIPV4SOURCEFILTER = 0xC76 // 3190 + SYS_GETIPV4SOURCEFILTER = 0xC77 // 3191 + SYS_SETSOURCEFILTER = 0xC78 // 3192 + SYS_GETSOURCEFILTER = 0xC79 // 3193 + SYS_FWRITE_UNLOCKED = 0xC7A // 3194 + SYS_FREAD_UNLOCKED = 0xC7B // 3195 + SYS_FGETS_UNLOCKED = 0xC7C // 3196 + SYS_GETS_UNLOCKED = 0xC7D // 3197 + SYS_FPUTS_UNLOCKED = 0xC7E // 3198 + SYS_PUTS_UNLOCKED = 0xC7F // 3199 + SYS_FGETC_UNLOCKED = 0xC80 // 3200 + SYS_FPUTC_UNLOCKED = 0xC81 // 3201 + SYS_DLADDR = 0xC82 // 3202 + SYS_SHM_OPEN = 0xC8C // 3212 + SYS_SHM_UNLINK = 0xC8D // 3213 + SYS___CLASS2F = 0xC91 // 3217 + SYS___CLASS2L = 0xC92 // 3218 + SYS___CLASS2F_B = 0xC93 // 3219 + SYS___CLASS2F_H = 0xC94 // 3220 + SYS___CLASS2L_B = 0xC95 // 3221 + SYS___CLASS2L_H = 0xC96 // 3222 + SYS___CLASS2D32 = 0xC97 // 3223 + SYS___CLASS2D64 = 0xC98 // 3224 + SYS___CLASS2D128 = 0xC99 // 3225 + SYS___TOCSNAME2 = 0xC9A // 3226 + SYS___D1TOP = 0xC9B // 3227 + SYS___D2TOP = 0xC9C // 3228 + SYS___D4TOP = 0xC9D // 3229 + SYS___PTOD1 = 0xC9E // 3230 + SYS___PTOD2 = 0xC9F // 3231 + SYS___PTOD4 = 0xCA0 // 3232 + SYS_CLEARERR_UNLOCKED = 0xCA1 // 3233 + SYS_FDELREC_UNLOCKED = 0xCA2 // 3234 + SYS_FEOF_UNLOCKED = 0xCA3 // 3235 + SYS_FERROR_UNLOCKED = 0xCA4 // 3236 + SYS_FFLUSH_UNLOCKED = 0xCA5 // 3237 + SYS_FGETPOS_UNLOCKED = 0xCA6 // 3238 + SYS_FGETWC_UNLOCKED = 0xCA7 // 3239 + SYS_FGETWS_UNLOCKED = 0xCA8 // 3240 + SYS_FILENO_UNLOCKED = 0xCA9 // 3241 + SYS_FLDATA_UNLOCKED = 0xCAA // 3242 + SYS_FLOCATE_UNLOCKED = 0xCAB // 3243 + SYS_FPRINTF_UNLOCKED = 0xCAC // 3244 + SYS_FPUTWC_UNLOCKED = 0xCAD // 3245 + SYS_FPUTWS_UNLOCKED = 0xCAE // 3246 + SYS_FSCANF_UNLOCKED = 0xCAF // 3247 + SYS_FSEEK_UNLOCKED = 0xCB0 // 3248 + SYS_FSEEKO_UNLOCKED = 0xCB1 // 3249 + SYS_FSETPOS_UNLOCKED = 0xCB3 // 3251 + SYS_FTELL_UNLOCKED = 0xCB4 // 3252 + SYS_FTELLO_UNLOCKED = 0xCB5 // 3253 + SYS_FUPDATE_UNLOCKED = 0xCB7 // 3255 + SYS_FWIDE_UNLOCKED = 0xCB8 // 3256 + SYS_FWPRINTF_UNLOCKED = 0xCB9 // 3257 + SYS_FWSCANF_UNLOCKED = 0xCBA // 3258 + SYS_GETWC_UNLOCKED = 0xCBB // 3259 + SYS_GETWCHAR_UNLOCKED = 0xCBC // 3260 + SYS_PERROR_UNLOCKED = 0xCBD // 3261 + SYS_PRINTF_UNLOCKED = 0xCBE // 3262 + SYS_PUTWC_UNLOCKED = 0xCBF // 3263 + SYS_PUTWCHAR_UNLOCKED = 0xCC0 // 3264 + SYS_REWIND_UNLOCKED = 0xCC1 // 3265 + SYS_SCANF_UNLOCKED = 0xCC2 // 3266 + SYS_UNGETC_UNLOCKED = 0xCC3 // 3267 + SYS_UNGETWC_UNLOCKED = 0xCC4 // 3268 + SYS_VFPRINTF_UNLOCKED = 0xCC5 // 3269 + SYS_VFSCANF_UNLOCKED = 0xCC7 // 3271 + SYS_VFWPRINTF_UNLOCKED = 0xCC9 // 3273 + SYS_VFWSCANF_UNLOCKED = 0xCCB // 3275 + SYS_VPRINTF_UNLOCKED = 0xCCD // 3277 + SYS_VSCANF_UNLOCKED = 0xCCF // 3279 + SYS_VWPRINTF_UNLOCKED = 0xCD1 // 3281 + SYS_VWSCANF_UNLOCKED = 0xCD3 // 3283 + SYS_WPRINTF_UNLOCKED = 0xCD5 // 3285 + SYS_WSCANF_UNLOCKED = 0xCD6 // 3286 + SYS_ASCTIME64 = 0xCD7 // 3287 + SYS_ASCTIME64_R = 0xCD8 // 3288 + SYS_CTIME64 = 0xCD9 // 3289 + SYS_CTIME64_R = 0xCDA // 3290 + SYS_DIFFTIME64 = 0xCDB // 3291 + SYS_GMTIME64 = 0xCDC // 3292 + SYS_GMTIME64_R = 0xCDD // 3293 + SYS_LOCALTIME64 = 0xCDE // 3294 + SYS_LOCALTIME64_R = 0xCDF // 3295 + SYS_MKTIME64 = 0xCE0 // 3296 + SYS_TIME64 = 0xCE1 // 3297 + SYS___LOGIN_APPLID = 0xCE2 // 3298 + SYS___PASSWD_APPLID = 0xCE3 // 3299 + SYS_PTHREAD_SECURITY_APPLID_NP = 0xCE4 // 3300 + SYS___GETTHENT = 0xCE5 // 3301 + SYS_FREEIFADDRS = 0xCE6 // 3302 + SYS_GETIFADDRS = 0xCE7 // 3303 + SYS_POSIX_FALLOCATE = 0xCE8 // 3304 + SYS_POSIX_MEMALIGN = 0xCE9 // 3305 + SYS_SIZEOF_ALLOC = 0xCEA // 3306 + SYS_RESIZE_ALLOC = 0xCEB // 3307 + SYS_FREAD_NOUPDATE = 0xCEC // 3308 + SYS_FREAD_NOUPDATE_UNLOCKED = 0xCED // 3309 + SYS_FGETPOS64 = 0xCEE // 3310 + SYS_FSEEK64 = 0xCEF // 3311 + SYS_FSEEKO64 = 0xCF0 // 3312 + SYS_FSETPOS64 = 0xCF1 // 3313 + SYS_FTELL64 = 0xCF2 // 3314 + SYS_FTELLO64 = 0xCF3 // 3315 + SYS_FGETPOS64_UNLOCKED = 0xCF4 // 3316 + SYS_FSEEK64_UNLOCKED = 0xCF5 // 3317 + SYS_FSEEKO64_UNLOCKED = 0xCF6 // 3318 + SYS_FSETPOS64_UNLOCKED = 0xCF7 // 3319 + SYS_FTELL64_UNLOCKED = 0xCF8 // 3320 + SYS_FTELLO64_UNLOCKED = 0xCF9 // 3321 + SYS_FOPEN_UNLOCKED = 0xCFA // 3322 + SYS_FREOPEN_UNLOCKED = 0xCFB // 3323 + SYS_FDOPEN_UNLOCKED = 0xCFC // 3324 + SYS_TMPFILE_UNLOCKED = 0xCFD // 3325 + SYS___MOSERVICES = 0xD3D // 3389 + SYS___GETTOD = 0xD3E // 3390 + SYS_C16RTOMB = 0xD40 // 3392 + SYS_C32RTOMB = 0xD41 // 3393 + SYS_MBRTOC16 = 0xD42 // 3394 + SYS_MBRTOC32 = 0xD43 // 3395 + SYS_QUANTEXPD32 = 0xD44 // 3396 + SYS_QUANTEXPD64 = 0xD45 // 3397 + SYS_QUANTEXPD128 = 0xD46 // 3398 + SYS___LOCALE_CTL = 0xD47 // 3399 + SYS___SMF_RECORD2 = 0xD48 // 3400 + SYS_FOPEN64 = 0xD49 // 3401 + SYS_FOPEN64_UNLOCKED = 0xD4A // 3402 + SYS_FREOPEN64 = 0xD4B // 3403 + SYS_FREOPEN64_UNLOCKED = 0xD4C // 3404 + SYS_TMPFILE64 = 0xD4D // 3405 + SYS_TMPFILE64_UNLOCKED = 0xD4E // 3406 + SYS_GETDATE64 = 0xD4F // 3407 + SYS_GETTIMEOFDAY64 = 0xD50 // 3408 + SYS_BIND2ADDRSEL = 0xD59 // 3417 + SYS_INET6_IS_SRCADDR = 0xD5A // 3418 + SYS___GETGRGID1 = 0xD5B // 3419 + SYS___GETGRNAM1 = 0xD5C // 3420 + SYS___FBUFSIZE = 0xD60 // 3424 + SYS___FPENDING = 0xD61 // 3425 + SYS___FLBF = 0xD62 // 3426 + SYS___FREADABLE = 0xD63 // 3427 + SYS___FWRITABLE = 0xD64 // 3428 + SYS___FREADING = 0xD65 // 3429 + SYS___FWRITING = 0xD66 // 3430 + SYS___FSETLOCKING = 0xD67 // 3431 + SYS__FLUSHLBF = 0xD68 // 3432 + SYS___FPURGE = 0xD69 // 3433 + SYS___FREADAHEAD = 0xD6A // 3434 + SYS___FSETERR = 0xD6B // 3435 + SYS___FPENDING_UNLOCKED = 0xD6C // 3436 + SYS___FREADING_UNLOCKED = 0xD6D // 3437 + SYS___FWRITING_UNLOCKED = 0xD6E // 3438 + SYS__FLUSHLBF_UNLOCKED = 0xD6F // 3439 + SYS___FPURGE_UNLOCKED = 0xD70 // 3440 + SYS___FREADAHEAD_UNLOCKED = 0xD71 // 3441 + SYS___LE_CEEGTJS = 0xD72 // 3442 + SYS___LE_RECORD_DUMP = 0xD73 // 3443 + SYS_FSTAT64 = 0xD74 // 3444 + SYS_LSTAT64 = 0xD75 // 3445 + SYS_STAT64 = 0xD76 // 3446 + SYS___READDIR2_64 = 0xD77 // 3447 + SYS___OPEN_STAT64 = 0xD78 // 3448 + SYS_FTW64 = 0xD79 // 3449 + SYS_NFTW64 = 0xD7A // 3450 + SYS_UTIME64 = 0xD7B // 3451 + SYS_UTIMES64 = 0xD7C // 3452 + SYS___GETIPC64 = 0xD7D // 3453 + SYS_MSGCTL64 = 0xD7E // 3454 + SYS_SEMCTL64 = 0xD7F // 3455 + SYS_SHMCTL64 = 0xD80 // 3456 + SYS_MSGXRCV64 = 0xD81 // 3457 + SYS___MGXR64 = 0xD81 // 3457 + SYS_W_GETPSENT64 = 0xD82 // 3458 + SYS_PTHREAD_COND_TIMEDWAIT64 = 0xD83 // 3459 + SYS_FTIME64 = 0xD85 // 3461 + SYS_GETUTXENT64 = 0xD86 // 3462 + SYS_GETUTXID64 = 0xD87 // 3463 + SYS_GETUTXLINE64 = 0xD88 // 3464 + SYS_PUTUTXLINE64 = 0xD89 // 3465 + SYS_NEWLOCALE = 0xD8A // 3466 + SYS_FREELOCALE = 0xD8B // 3467 + SYS_USELOCALE = 0xD8C // 3468 + SYS_DUPLOCALE = 0xD8D // 3469 + SYS___CHATTR64 = 0xD9C // 3484 + SYS___LCHATTR64 = 0xD9D // 3485 + SYS___FCHATTR64 = 0xD9E // 3486 + SYS_____CHATTR64_A = 0xD9F // 3487 + SYS_____LCHATTR64_A = 0xDA0 // 3488 + SYS___LE_CEEUSGD = 0xDA1 // 3489 + SYS___LE_IFAM_CON = 0xDA2 // 3490 + SYS___LE_IFAM_DSC = 0xDA3 // 3491 + SYS___LE_IFAM_GET = 0xDA4 // 3492 + SYS___LE_IFAM_QRY = 0xDA5 // 3493 + SYS_ALIGNED_ALLOC = 0xDA6 // 3494 + SYS_ACCEPT4 = 0xDA7 // 3495 + SYS___ACCEPT4_A = 0xDA8 // 3496 + SYS_COPYFILERANGE = 0xDA9 // 3497 + SYS_GETLINE = 0xDAA // 3498 + SYS___GETLINE_A = 0xDAB // 3499 + SYS_DIRFD = 0xDAC // 3500 + SYS_CLOCK_GETTIME = 0xDAD // 3501 + SYS_DUP3 = 0xDAE // 3502 + SYS_EPOLL_CREATE = 0xDAF // 3503 + SYS_EPOLL_CREATE1 = 0xDB0 // 3504 + SYS_EPOLL_CTL = 0xDB1 // 3505 + SYS_EPOLL_WAIT = 0xDB2 // 3506 + SYS_EPOLL_PWAIT = 0xDB3 // 3507 + SYS_EVENTFD = 0xDB4 // 3508 + SYS_STATFS = 0xDB5 // 3509 + SYS___STATFS_A = 0xDB6 // 3510 + SYS_FSTATFS = 0xDB7 // 3511 + SYS_INOTIFY_INIT = 0xDB8 // 3512 + SYS_INOTIFY_INIT1 = 0xDB9 // 3513 + SYS_INOTIFY_ADD_WATCH = 0xDBA // 3514 + SYS___INOTIFY_ADD_WATCH_A = 0xDBB // 3515 + SYS_INOTIFY_RM_WATCH = 0xDBC // 3516 + SYS_PIPE2 = 0xDBD // 3517 + SYS_PIVOT_ROOT = 0xDBE // 3518 + SYS___PIVOT_ROOT_A = 0xDBF // 3519 + SYS_PRCTL = 0xDC0 // 3520 + SYS_PRLIMIT = 0xDC1 // 3521 + SYS_SETHOSTNAME = 0xDC2 // 3522 + SYS___SETHOSTNAME_A = 0xDC3 // 3523 + SYS_SETRESUID = 0xDC4 // 3524 + SYS_SETRESGID = 0xDC5 // 3525 + SYS_PTHREAD_CONDATTR_GETCLOCK = 0xDC6 // 3526 + SYS_FLOCK = 0xDC7 // 3527 + SYS_FGETXATTR = 0xDC8 // 3528 + SYS___FGETXATTR_A = 0xDC9 // 3529 + SYS_FLISTXATTR = 0xDCA // 3530 + SYS___FLISTXATTR_A = 0xDCB // 3531 + SYS_FREMOVEXATTR = 0xDCC // 3532 + SYS___FREMOVEXATTR_A = 0xDCD // 3533 + SYS_FSETXATTR = 0xDCE // 3534 + SYS___FSETXATTR_A = 0xDCF // 3535 + SYS_GETXATTR = 0xDD0 // 3536 + SYS___GETXATTR_A = 0xDD1 // 3537 + SYS_LGETXATTR = 0xDD2 // 3538 + SYS___LGETXATTR_A = 0xDD3 // 3539 + SYS_LISTXATTR = 0xDD4 // 3540 + SYS___LISTXATTR_A = 0xDD5 // 3541 + SYS_LLISTXATTR = 0xDD6 // 3542 + SYS___LLISTXATTR_A = 0xDD7 // 3543 + SYS_LREMOVEXATTR = 0xDD8 // 3544 + SYS___LREMOVEXATTR_A = 0xDD9 // 3545 + SYS_LSETXATTR = 0xDDA // 3546 + SYS___LSETXATTR_A = 0xDDB // 3547 + SYS_REMOVEXATTR = 0xDDC // 3548 + SYS___REMOVEXATTR_A = 0xDDD // 3549 + SYS_SETXATTR = 0xDDE // 3550 + SYS___SETXATTR_A = 0xDDF // 3551 + SYS_FDATASYNC = 0xDE0 // 3552 + SYS_SYNCFS = 0xDE1 // 3553 + SYS_FUTIMES = 0xDE2 // 3554 + SYS_FUTIMESAT = 0xDE3 // 3555 + SYS___FUTIMESAT_A = 0xDE4 // 3556 + SYS_LUTIMES = 0xDE5 // 3557 + SYS___LUTIMES_A = 0xDE6 // 3558 + SYS_INET_ATON = 0xDE7 // 3559 + SYS_GETRANDOM = 0xDE8 // 3560 + SYS_GETTID = 0xDE9 // 3561 + SYS_MEMFD_CREATE = 0xDEA // 3562 + SYS___MEMFD_CREATE_A = 0xDEB // 3563 + SYS_FACCESSAT = 0xDEC // 3564 + SYS___FACCESSAT_A = 0xDED // 3565 + SYS_FCHMODAT = 0xDEE // 3566 + SYS___FCHMODAT_A = 0xDEF // 3567 + SYS_FCHOWNAT = 0xDF0 // 3568 + SYS___FCHOWNAT_A = 0xDF1 // 3569 + SYS_FSTATAT = 0xDF2 // 3570 + SYS___FSTATAT_A = 0xDF3 // 3571 + SYS_LINKAT = 0xDF4 // 3572 + SYS___LINKAT_A = 0xDF5 // 3573 + SYS_MKDIRAT = 0xDF6 // 3574 + SYS___MKDIRAT_A = 0xDF7 // 3575 + SYS_MKFIFOAT = 0xDF8 // 3576 + SYS___MKFIFOAT_A = 0xDF9 // 3577 + SYS_MKNODAT = 0xDFA // 3578 + SYS___MKNODAT_A = 0xDFB // 3579 + SYS_OPENAT = 0xDFC // 3580 + SYS___OPENAT_A = 0xDFD // 3581 + SYS_READLINKAT = 0xDFE // 3582 + SYS___READLINKAT_A = 0xDFF // 3583 + SYS_RENAMEAT = 0xE00 // 3584 + SYS___RENAMEAT_A = 0xE01 // 3585 + SYS_RENAMEAT2 = 0xE02 // 3586 + SYS___RENAMEAT2_A = 0xE03 // 3587 + SYS_SYMLINKAT = 0xE04 // 3588 + SYS___SYMLINKAT_A = 0xE05 // 3589 + SYS_UNLINKAT = 0xE06 // 3590 + SYS___UNLINKAT_A = 0xE07 // 3591 + SYS_SYSINFO = 0xE08 // 3592 + SYS_WAIT4 = 0xE0A // 3594 + SYS_CLONE = 0xE0B // 3595 + SYS_UNSHARE = 0xE0C // 3596 + SYS_SETNS = 0xE0D // 3597 + SYS_CAPGET = 0xE0E // 3598 + SYS_CAPSET = 0xE0F // 3599 + SYS_STRCHRNUL = 0xE10 // 3600 + SYS_PTHREAD_CONDATTR_SETCLOCK = 0xE12 // 3602 + SYS_OPEN_BY_HANDLE_AT = 0xE13 // 3603 + SYS___OPEN_BY_HANDLE_AT_A = 0xE14 // 3604 + SYS___INET_ATON_A = 0xE15 // 3605 + SYS_MOUNT1 = 0xE16 // 3606 + SYS___MOUNT1_A = 0xE17 // 3607 + SYS_UMOUNT1 = 0xE18 // 3608 + SYS___UMOUNT1_A = 0xE19 // 3609 + SYS_UMOUNT2 = 0xE1A // 3610 + SYS___UMOUNT2_A = 0xE1B // 3611 + SYS___PRCTL_A = 0xE1C // 3612 + SYS_LOCALTIME_R2 = 0xE1D // 3613 + SYS___LOCALTIME_R2_A = 0xE1E // 3614 + SYS_OPENAT2 = 0xE1F // 3615 + SYS___OPENAT2_A = 0xE20 // 3616 + SYS___LE_CEEMICT = 0xE21 // 3617 + SYS_GETENTROPY = 0xE22 // 3618 + SYS_NANOSLEEP = 0xE23 // 3619 + SYS_UTIMENSAT = 0xE24 // 3620 + SYS___UTIMENSAT_A = 0xE25 // 3621 + SYS_ASPRINTF = 0xE26 // 3622 + SYS___ASPRINTF_A = 0xE27 // 3623 + SYS_VASPRINTF = 0xE28 // 3624 + SYS___VASPRINTF_A = 0xE29 // 3625 + SYS_DPRINTF = 0xE2A // 3626 + SYS___DPRINTF_A = 0xE2B // 3627 + SYS_GETOPT_LONG = 0xE2C // 3628 + SYS___GETOPT_LONG_A = 0xE2D // 3629 + SYS_PSIGNAL = 0xE2E // 3630 + SYS___PSIGNAL_A = 0xE2F // 3631 + SYS_PSIGNAL_UNLOCKED = 0xE30 // 3632 + SYS___PSIGNAL_UNLOCKED_A = 0xE31 // 3633 + SYS_FSTATAT_O = 0xE32 // 3634 + SYS___FSTATAT_O_A = 0xE33 // 3635 + SYS_FSTATAT64 = 0xE34 // 3636 + SYS___FSTATAT64_A = 0xE35 // 3637 + SYS___CHATTRAT = 0xE36 // 3638 + SYS_____CHATTRAT_A = 0xE37 // 3639 + SYS___CHATTRAT64 = 0xE38 // 3640 + SYS_____CHATTRAT64_A = 0xE39 // 3641 + SYS_MADVISE = 0xE3A // 3642 + SYS___AUTHENTICATE = 0xE3B // 3643 + ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go index 7a8161c..3e6d57c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc && aix -// +build ppc,aix package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go index 07ed733..3a219bd 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && aix -// +build ppc64,aix package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index e2a64f0..091d107 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && darwin -// +build amd64,darwin package unix @@ -151,6 +150,16 @@ type Dirent struct { _ [3]byte } +type Attrlist struct { + Bitmapcount uint16 + Reserved uint16 + Commonattr uint32 + Volattr uint32 + Dirattr uint32 + Fileattr uint32 + Forkattr uint32 +} + const ( PathMax = 0x400 ) @@ -610,6 +619,7 @@ const ( AT_REMOVEDIR = 0x80 AT_SYMLINK_FOLLOW = 0x40 AT_SYMLINK_NOFOLLOW = 0x20 + AT_EACCESS = 0x10 ) type PollFd struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index 34aa775..28ff4ef 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && darwin -// +build arm64,darwin package unix @@ -151,6 +150,16 @@ type Dirent struct { _ [3]byte } +type Attrlist struct { + Bitmapcount uint16 + Reserved uint16 + Commonattr uint32 + Volattr uint32 + Dirattr uint32 + Fileattr uint32 + Forkattr uint32 +} + const ( PathMax = 0x400 ) @@ -610,6 +619,7 @@ const ( AT_REMOVEDIR = 0x80 AT_SYMLINK_FOLLOW = 0x40 AT_SYMLINK_NOFOLLOW = 0x20 + AT_EACCESS = 0x10 ) type PollFd struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go index d0ba8e9..30e405b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && dragonfly -// +build amd64,dragonfly package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go index d9c78cd..6cbd094 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && freebsd -// +build 386,freebsd package unix @@ -362,7 +361,7 @@ type FpExtendedPrecision struct{} type PtraceIoDesc struct { Op int32 Offs uintptr - Addr uintptr + Addr *byte Len uint32 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go index 26991b1..7c03b6e 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && freebsd -// +build amd64,freebsd package unix @@ -367,7 +366,7 @@ type FpExtendedPrecision struct{} type PtraceIoDesc struct { Op int32 Offs uintptr - Addr uintptr + Addr *byte Len uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go index f8324e7..422107e 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && freebsd -// +build arm,freebsd package unix @@ -350,7 +349,7 @@ type FpExtendedPrecision struct { type PtraceIoDesc struct { Op int32 Offs uintptr - Addr uintptr + Addr *byte Len uint32 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go index 4220411..505a12a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && freebsd -// +build arm64,freebsd package unix @@ -347,7 +346,7 @@ type FpExtendedPrecision struct{} type PtraceIoDesc struct { Op int32 Offs uintptr - Addr uintptr + Addr *byte Len uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go index 0660fd4..cc986c7 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && freebsd -// +build riscv64,freebsd package unix @@ -348,7 +347,7 @@ type FpExtendedPrecision struct{} type PtraceIoDesc struct { Op int32 Offs uintptr - Addr uintptr + Addr *byte Len uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index ff68811..4740b83 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -1,7 +1,6 @@ // Code generated by mkmerge; DO NOT EDIT. //go:build linux -// +build linux package unix @@ -29,6 +28,41 @@ type Itimerval struct { Value Timeval } +const ( + ADJ_OFFSET = 0x1 + ADJ_FREQUENCY = 0x2 + ADJ_MAXERROR = 0x4 + ADJ_ESTERROR = 0x8 + ADJ_STATUS = 0x10 + ADJ_TIMECONST = 0x20 + ADJ_TAI = 0x80 + ADJ_SETOFFSET = 0x100 + ADJ_MICRO = 0x1000 + ADJ_NANO = 0x2000 + ADJ_TICK = 0x4000 + ADJ_OFFSET_SINGLESHOT = 0x8001 + ADJ_OFFSET_SS_READ = 0xa001 +) + +const ( + STA_PLL = 0x1 + STA_PPSFREQ = 0x2 + STA_PPSTIME = 0x4 + STA_FLL = 0x8 + STA_INS = 0x10 + STA_DEL = 0x20 + STA_UNSYNC = 0x40 + STA_FREQHOLD = 0x80 + STA_PPSSIGNAL = 0x100 + STA_PPSJITTER = 0x200 + STA_PPSWANDER = 0x400 + STA_PPSERROR = 0x800 + STA_CLOCKERR = 0x1000 + STA_NANO = 0x2000 + STA_MODE = 0x4000 + STA_CLK = 0x8000 +) + const ( TIME_OK = 0x0 TIME_INS = 0x1 @@ -53,29 +87,30 @@ type StatxTimestamp struct { } type Statx_t struct { - Mask uint32 - Blksize uint32 - Attributes uint64 - Nlink uint32 - Uid uint32 - Gid uint32 - Mode uint16 - _ [1]uint16 - Ino uint64 - Size uint64 - Blocks uint64 - Attributes_mask uint64 - Atime StatxTimestamp - Btime StatxTimestamp - Ctime StatxTimestamp - Mtime StatxTimestamp - Rdev_major uint32 - Rdev_minor uint32 - Dev_major uint32 - Dev_minor uint32 - Mnt_id uint64 - _ uint64 - _ [12]uint64 + Mask uint32 + Blksize uint32 + Attributes uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Mode uint16 + _ [1]uint16 + Ino uint64 + Size uint64 + Blocks uint64 + Attributes_mask uint64 + Atime StatxTimestamp + Btime StatxTimestamp + Ctime StatxTimestamp + Mtime StatxTimestamp + Rdev_major uint32 + Rdev_minor uint32 + Dev_major uint32 + Dev_minor uint32 + Mnt_id uint64 + Dio_mem_align uint32 + Dio_offset_align uint32 + _ [12]uint64 } type Fsid struct { @@ -139,7 +174,8 @@ type FscryptPolicyV2 struct { Contents_encryption_mode uint8 Filenames_encryption_mode uint8 Flags uint8 - _ [4]uint8 + Log2_data_unit_size uint8 + _ [3]uint8 Master_key_identifier [16]uint8 } @@ -420,36 +456,63 @@ type Ucred struct { } type TCPInfo struct { - State uint8 - Ca_state uint8 - Retransmits uint8 - Probes uint8 - Backoff uint8 - Options uint8 - Rto uint32 - Ato uint32 - Snd_mss uint32 - Rcv_mss uint32 - Unacked uint32 - Sacked uint32 - Lost uint32 - Retrans uint32 - Fackets uint32 - Last_data_sent uint32 - Last_ack_sent uint32 - Last_data_recv uint32 - Last_ack_recv uint32 - Pmtu uint32 - Rcv_ssthresh uint32 - Rtt uint32 - Rttvar uint32 - Snd_ssthresh uint32 - Snd_cwnd uint32 - Advmss uint32 - Reordering uint32 - Rcv_rtt uint32 - Rcv_space uint32 - Total_retrans uint32 + State uint8 + Ca_state uint8 + Retransmits uint8 + Probes uint8 + Backoff uint8 + Options uint8 + Rto uint32 + Ato uint32 + Snd_mss uint32 + Rcv_mss uint32 + Unacked uint32 + Sacked uint32 + Lost uint32 + Retrans uint32 + Fackets uint32 + Last_data_sent uint32 + Last_ack_sent uint32 + Last_data_recv uint32 + Last_ack_recv uint32 + Pmtu uint32 + Rcv_ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Snd_ssthresh uint32 + Snd_cwnd uint32 + Advmss uint32 + Reordering uint32 + Rcv_rtt uint32 + Rcv_space uint32 + Total_retrans uint32 + Pacing_rate uint64 + Max_pacing_rate uint64 + Bytes_acked uint64 + Bytes_received uint64 + Segs_out uint32 + Segs_in uint32 + Notsent_bytes uint32 + Min_rtt uint32 + Data_segs_in uint32 + Data_segs_out uint32 + Delivery_rate uint64 + Busy_time uint64 + Rwnd_limited uint64 + Sndbuf_limited uint64 + Delivered uint32 + Delivered_ce uint32 + Bytes_sent uint64 + Bytes_retrans uint64 + Dsack_dups uint32 + Reord_seen uint32 + Rcv_ooopack uint32 + Snd_wnd uint32 + Rcv_wnd uint32 + Rehash uint32 + Total_rto uint16 + Total_rto_recoveries uint16 + Total_rto_time uint32 } type CanFilter struct { @@ -492,7 +555,7 @@ const ( SizeofIPv6MTUInfo = 0x20 SizeofICMPv6Filter = 0x20 SizeofUcred = 0xc - SizeofTCPInfo = 0x68 + SizeofTCPInfo = 0xf8 SizeofCanFilter = 0x8 SizeofTCPRepairOpt = 0x8 ) @@ -773,6 +836,15 @@ const ( FSPICK_EMPTY_PATH = 0x8 FSMOUNT_CLOEXEC = 0x1 + + FSCONFIG_SET_FLAG = 0x0 + FSCONFIG_SET_STRING = 0x1 + FSCONFIG_SET_BINARY = 0x2 + FSCONFIG_SET_PATH = 0x3 + FSCONFIG_SET_PATH_EMPTY = 0x4 + FSCONFIG_SET_FD = 0x5 + FSCONFIG_CMD_CREATE = 0x6 + FSCONFIG_CMD_RECONFIGURE = 0x7 ) type OpenHow struct { @@ -806,6 +878,11 @@ const ( POLLNVAL = 0x20 ) +type sigset_argpack struct { + ss *Sigset_t + ssLen uintptr +} + type SignalfdSiginfo struct { Signo uint32 Errno int32 @@ -1007,6 +1084,7 @@ const ( PerfBitCommExec = CBitFieldMaskBit24 PerfBitUseClockID = CBitFieldMaskBit25 PerfBitContextSwitch = CBitFieldMaskBit26 + PerfBitWriteBackward = CBitFieldMaskBit27 ) const ( @@ -1099,7 +1177,9 @@ const ( PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT = 0xf PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT = 0x10 PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT = 0x11 - PERF_SAMPLE_BRANCH_MAX_SHIFT = 0x12 + PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT = 0x12 + PERF_SAMPLE_BRANCH_COUNTERS = 0x80000 + PERF_SAMPLE_BRANCH_MAX_SHIFT = 0x14 PERF_SAMPLE_BRANCH_USER = 0x1 PERF_SAMPLE_BRANCH_KERNEL = 0x2 PERF_SAMPLE_BRANCH_HV = 0x4 @@ -1118,7 +1198,8 @@ const ( PERF_SAMPLE_BRANCH_NO_CYCLES = 0x8000 PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000 PERF_SAMPLE_BRANCH_HW_INDEX = 0x20000 - PERF_SAMPLE_BRANCH_MAX = 0x40000 + PERF_SAMPLE_BRANCH_PRIV_SAVE = 0x40000 + PERF_SAMPLE_BRANCH_MAX = 0x100000 PERF_BR_UNKNOWN = 0x0 PERF_BR_COND = 0x1 PERF_BR_UNCOND = 0x2 @@ -1132,7 +1213,10 @@ const ( PERF_BR_COND_RET = 0xa PERF_BR_ERET = 0xb PERF_BR_IRQ = 0xc - PERF_BR_MAX = 0xd + PERF_BR_SERROR = 0xd + PERF_BR_NO_TX = 0xe + PERF_BR_EXTEND_ABI = 0xf + PERF_BR_MAX = 0x10 PERF_SAMPLE_REGS_ABI_NONE = 0x0 PERF_SAMPLE_REGS_ABI_32 = 0x1 PERF_SAMPLE_REGS_ABI_64 = 0x2 @@ -1151,7 +1235,8 @@ const ( PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 PERF_FORMAT_ID = 0x4 PERF_FORMAT_GROUP = 0x8 - PERF_FORMAT_MAX = 0x10 + PERF_FORMAT_LOST = 0x10 + PERF_FORMAT_MAX = 0x20 PERF_IOC_FLAG_GROUP = 0x1 PERF_RECORD_MMAP = 0x1 PERF_RECORD_LOST = 0x2 @@ -1197,7 +1282,7 @@ type TCPMD5Sig struct { Flags uint8 Prefixlen uint8 Keylen uint16 - _ uint32 + Ifindex int32 Key [80]uint8 } @@ -1471,6 +1556,11 @@ const ( IFLA_GRO_MAX_SIZE = 0x3a IFLA_TSO_MAX_SIZE = 0x3b IFLA_TSO_MAX_SEGS = 0x3c + IFLA_ALLMULTI = 0x3d + IFLA_DEVLINK_PORT = 0x3e + IFLA_GSO_IPV4_MAX_SIZE = 0x3f + IFLA_GRO_IPV4_MAX_SIZE = 0x40 + IFLA_DPLL_PIN = 0x41 IFLA_PROTO_DOWN_REASON_UNSPEC = 0x0 IFLA_PROTO_DOWN_REASON_MASK = 0x1 IFLA_PROTO_DOWN_REASON_VALUE = 0x2 @@ -1486,6 +1576,7 @@ const ( IFLA_INET6_ICMP6STATS = 0x6 IFLA_INET6_TOKEN = 0x7 IFLA_INET6_ADDR_GEN_MODE = 0x8 + IFLA_INET6_RA_MTU = 0x9 IFLA_BR_UNSPEC = 0x0 IFLA_BR_FORWARD_DELAY = 0x1 IFLA_BR_HELLO_TIME = 0x2 @@ -1533,6 +1624,9 @@ const ( IFLA_BR_MCAST_MLD_VERSION = 0x2c IFLA_BR_VLAN_STATS_PER_PORT = 0x2d IFLA_BR_MULTI_BOOLOPT = 0x2e + IFLA_BR_MCAST_QUERIER_STATE = 0x2f + IFLA_BR_FDB_N_LEARNED = 0x30 + IFLA_BR_FDB_MAX_LEARNED = 0x31 IFLA_BRPORT_UNSPEC = 0x0 IFLA_BRPORT_STATE = 0x1 IFLA_BRPORT_PRIORITY = 0x2 @@ -1570,6 +1664,14 @@ const ( IFLA_BRPORT_BACKUP_PORT = 0x22 IFLA_BRPORT_MRP_RING_OPEN = 0x23 IFLA_BRPORT_MRP_IN_OPEN = 0x24 + IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT = 0x25 + IFLA_BRPORT_MCAST_EHT_HOSTS_CNT = 0x26 + IFLA_BRPORT_LOCKED = 0x27 + IFLA_BRPORT_MAB = 0x28 + IFLA_BRPORT_MCAST_N_GROUPS = 0x29 + IFLA_BRPORT_MCAST_MAX_GROUPS = 0x2a + IFLA_BRPORT_NEIGH_VLAN_SUPPRESS = 0x2b + IFLA_BRPORT_BACKUP_NHID = 0x2c IFLA_INFO_UNSPEC = 0x0 IFLA_INFO_KIND = 0x1 IFLA_INFO_DATA = 0x2 @@ -1591,6 +1693,9 @@ const ( IFLA_MACVLAN_MACADDR = 0x4 IFLA_MACVLAN_MACADDR_DATA = 0x5 IFLA_MACVLAN_MACADDR_COUNT = 0x6 + IFLA_MACVLAN_BC_QUEUE_LEN = 0x7 + IFLA_MACVLAN_BC_QUEUE_LEN_USED = 0x8 + IFLA_MACVLAN_BC_CUTOFF = 0x9 IFLA_VRF_UNSPEC = 0x0 IFLA_VRF_TABLE = 0x1 IFLA_VRF_PORT_UNSPEC = 0x0 @@ -1614,9 +1719,22 @@ const ( IFLA_XFRM_UNSPEC = 0x0 IFLA_XFRM_LINK = 0x1 IFLA_XFRM_IF_ID = 0x2 + IFLA_XFRM_COLLECT_METADATA = 0x3 IFLA_IPVLAN_UNSPEC = 0x0 IFLA_IPVLAN_MODE = 0x1 IFLA_IPVLAN_FLAGS = 0x2 + NETKIT_NEXT = -0x1 + NETKIT_PASS = 0x0 + NETKIT_DROP = 0x2 + NETKIT_REDIRECT = 0x7 + NETKIT_L2 = 0x0 + NETKIT_L3 = 0x1 + IFLA_NETKIT_UNSPEC = 0x0 + IFLA_NETKIT_PEER_INFO = 0x1 + IFLA_NETKIT_PRIMARY = 0x2 + IFLA_NETKIT_POLICY = 0x3 + IFLA_NETKIT_PEER_POLICY = 0x4 + IFLA_NETKIT_MODE = 0x5 IFLA_VXLAN_UNSPEC = 0x0 IFLA_VXLAN_ID = 0x1 IFLA_VXLAN_GROUP = 0x2 @@ -1647,6 +1765,8 @@ const ( IFLA_VXLAN_GPE = 0x1b IFLA_VXLAN_TTL_INHERIT = 0x1c IFLA_VXLAN_DF = 0x1d + IFLA_VXLAN_VNIFILTER = 0x1e + IFLA_VXLAN_LOCALBYPASS = 0x1f IFLA_GENEVE_UNSPEC = 0x0 IFLA_GENEVE_ID = 0x1 IFLA_GENEVE_REMOTE = 0x2 @@ -1661,6 +1781,7 @@ const ( IFLA_GENEVE_LABEL = 0xb IFLA_GENEVE_TTL_INHERIT = 0xc IFLA_GENEVE_DF = 0xd + IFLA_GENEVE_INNER_PROTO_INHERIT = 0xe IFLA_BAREUDP_UNSPEC = 0x0 IFLA_BAREUDP_PORT = 0x1 IFLA_BAREUDP_ETHERTYPE = 0x2 @@ -1673,6 +1794,8 @@ const ( IFLA_GTP_FD1 = 0x2 IFLA_GTP_PDP_HASHSIZE = 0x3 IFLA_GTP_ROLE = 0x4 + IFLA_GTP_CREATE_SOCKETS = 0x5 + IFLA_GTP_RESTART_COUNT = 0x6 IFLA_BOND_UNSPEC = 0x0 IFLA_BOND_MODE = 0x1 IFLA_BOND_ACTIVE_SLAVE = 0x2 @@ -1702,6 +1825,9 @@ const ( IFLA_BOND_AD_ACTOR_SYSTEM = 0x1a IFLA_BOND_TLB_DYNAMIC_LB = 0x1b IFLA_BOND_PEER_NOTIF_DELAY = 0x1c + IFLA_BOND_AD_LACP_ACTIVE = 0x1d + IFLA_BOND_MISSED_MAX = 0x1e + IFLA_BOND_NS_IP6_TARGET = 0x1f IFLA_BOND_AD_INFO_UNSPEC = 0x0 IFLA_BOND_AD_INFO_AGGREGATOR = 0x1 IFLA_BOND_AD_INFO_NUM_PORTS = 0x2 @@ -1717,6 +1843,7 @@ const ( IFLA_BOND_SLAVE_AD_AGGREGATOR_ID = 0x6 IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE = 0x7 IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE = 0x8 + IFLA_BOND_SLAVE_PRIO = 0x9 IFLA_VF_INFO_UNSPEC = 0x0 IFLA_VF_INFO = 0x1 IFLA_VF_UNSPEC = 0x0 @@ -1775,8 +1902,16 @@ const ( IFLA_STATS_LINK_XSTATS_SLAVE = 0x3 IFLA_STATS_LINK_OFFLOAD_XSTATS = 0x4 IFLA_STATS_AF_SPEC = 0x5 + IFLA_STATS_GETSET_UNSPEC = 0x0 + IFLA_STATS_GET_FILTERS = 0x1 + IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS = 0x2 IFLA_OFFLOAD_XSTATS_UNSPEC = 0x0 IFLA_OFFLOAD_XSTATS_CPU_HIT = 0x1 + IFLA_OFFLOAD_XSTATS_HW_S_INFO = 0x2 + IFLA_OFFLOAD_XSTATS_L3_STATS = 0x3 + IFLA_OFFLOAD_XSTATS_HW_S_INFO_UNSPEC = 0x0 + IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST = 0x1 + IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED = 0x2 IFLA_XDP_UNSPEC = 0x0 IFLA_XDP_FD = 0x1 IFLA_XDP_ATTACHED = 0x2 @@ -1806,6 +1941,11 @@ const ( IFLA_RMNET_UNSPEC = 0x0 IFLA_RMNET_MUX_ID = 0x1 IFLA_RMNET_FLAGS = 0x2 + IFLA_MCTP_UNSPEC = 0x0 + IFLA_MCTP_NET = 0x1 + IFLA_DSA_UNSPEC = 0x0 + IFLA_DSA_CONDUIT = 0x1 + IFLA_DSA_MASTER = 0x1 ) const ( @@ -1897,7 +2037,11 @@ const ( NFT_MSG_GETOBJ = 0x13 NFT_MSG_DELOBJ = 0x14 NFT_MSG_GETOBJ_RESET = 0x15 - NFT_MSG_MAX = 0x19 + NFT_MSG_NEWFLOWTABLE = 0x16 + NFT_MSG_GETFLOWTABLE = 0x17 + NFT_MSG_DELFLOWTABLE = 0x18 + NFT_MSG_GETRULE_RESET = 0x19 + NFT_MSG_MAX = 0x22 NFTA_LIST_UNSPEC = 0x0 NFTA_LIST_ELEM = 0x1 NFTA_HOOK_UNSPEC = 0x0 @@ -2338,6 +2482,15 @@ type XDPMmapOffsets struct { Cr XDPRingOffset } +type XDPUmemReg struct { + Addr uint64 + Len uint64 + Chunk_size uint32 + Headroom uint32 + Flags uint32 + Tx_metadata_len uint32 +} + type XDPStatistics struct { Rx_dropped uint64 Rx_invalid_descs uint64 @@ -2401,9 +2554,11 @@ const ( SOF_TIMESTAMPING_OPT_STATS = 0x1000 SOF_TIMESTAMPING_OPT_PKTINFO = 0x2000 SOF_TIMESTAMPING_OPT_TX_SWHW = 0x4000 + SOF_TIMESTAMPING_BIND_PHC = 0x8000 + SOF_TIMESTAMPING_OPT_ID_TCP = 0x10000 - SOF_TIMESTAMPING_LAST = 0x8000 - SOF_TIMESTAMPING_MASK = 0xffff + SOF_TIMESTAMPING_LAST = 0x10000 + SOF_TIMESTAMPING_MASK = 0x1ffff SCM_TSTAMP_SND = 0x0 SCM_TSTAMP_SCHED = 0x1 @@ -2482,6 +2637,11 @@ const ( BPF_REG_8 = 0x8 BPF_REG_9 = 0x9 BPF_REG_10 = 0xa + BPF_CGROUP_ITER_ORDER_UNSPEC = 0x0 + BPF_CGROUP_ITER_SELF_ONLY = 0x1 + BPF_CGROUP_ITER_DESCENDANTS_PRE = 0x2 + BPF_CGROUP_ITER_DESCENDANTS_POST = 0x3 + BPF_CGROUP_ITER_ANCESTORS_UP = 0x4 BPF_MAP_CREATE = 0x0 BPF_MAP_LOOKUP_ELEM = 0x1 BPF_MAP_UPDATE_ELEM = 0x2 @@ -2493,6 +2653,7 @@ const ( BPF_PROG_ATTACH = 0x8 BPF_PROG_DETACH = 0x9 BPF_PROG_TEST_RUN = 0xa + BPF_PROG_RUN = 0xa BPF_PROG_GET_NEXT_ID = 0xb BPF_MAP_GET_NEXT_ID = 0xc BPF_PROG_GET_FD_BY_ID = 0xd @@ -2537,6 +2698,7 @@ const ( BPF_MAP_TYPE_CPUMAP = 0x10 BPF_MAP_TYPE_XSKMAP = 0x11 BPF_MAP_TYPE_SOCKHASH = 0x12 + BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED = 0x13 BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 @@ -2547,6 +2709,10 @@ const ( BPF_MAP_TYPE_STRUCT_OPS = 0x1a BPF_MAP_TYPE_RINGBUF = 0x1b BPF_MAP_TYPE_INODE_STORAGE = 0x1c + BPF_MAP_TYPE_TASK_STORAGE = 0x1d + BPF_MAP_TYPE_BLOOM_FILTER = 0x1e + BPF_MAP_TYPE_USER_RINGBUF = 0x1f + BPF_MAP_TYPE_CGRP_STORAGE = 0x20 BPF_PROG_TYPE_UNSPEC = 0x0 BPF_PROG_TYPE_SOCKET_FILTER = 0x1 BPF_PROG_TYPE_KPROBE = 0x2 @@ -2578,6 +2744,8 @@ const ( BPF_PROG_TYPE_EXT = 0x1c BPF_PROG_TYPE_LSM = 0x1d BPF_PROG_TYPE_SK_LOOKUP = 0x1e + BPF_PROG_TYPE_SYSCALL = 0x1f + BPF_PROG_TYPE_NETFILTER = 0x20 BPF_CGROUP_INET_INGRESS = 0x0 BPF_CGROUP_INET_EGRESS = 0x1 BPF_CGROUP_INET_SOCK_CREATE = 0x2 @@ -2616,6 +2784,17 @@ const ( BPF_XDP_CPUMAP = 0x23 BPF_SK_LOOKUP = 0x24 BPF_XDP = 0x25 + BPF_SK_SKB_VERDICT = 0x26 + BPF_SK_REUSEPORT_SELECT = 0x27 + BPF_SK_REUSEPORT_SELECT_OR_MIGRATE = 0x28 + BPF_PERF_EVENT = 0x29 + BPF_TRACE_KPROBE_MULTI = 0x2a + BPF_LSM_CGROUP = 0x2b + BPF_STRUCT_OPS = 0x2c + BPF_NETFILTER = 0x2d + BPF_TCX_INGRESS = 0x2e + BPF_TCX_EGRESS = 0x2f + BPF_TRACE_UPROBE_MULTI = 0x30 BPF_LINK_TYPE_UNSPEC = 0x0 BPF_LINK_TYPE_RAW_TRACEPOINT = 0x1 BPF_LINK_TYPE_TRACING = 0x2 @@ -2623,6 +2802,21 @@ const ( BPF_LINK_TYPE_ITER = 0x4 BPF_LINK_TYPE_NETNS = 0x5 BPF_LINK_TYPE_XDP = 0x6 + BPF_LINK_TYPE_PERF_EVENT = 0x7 + BPF_LINK_TYPE_KPROBE_MULTI = 0x8 + BPF_LINK_TYPE_STRUCT_OPS = 0x9 + BPF_LINK_TYPE_NETFILTER = 0xa + BPF_LINK_TYPE_TCX = 0xb + BPF_LINK_TYPE_UPROBE_MULTI = 0xc + BPF_PERF_EVENT_UNSPEC = 0x0 + BPF_PERF_EVENT_UPROBE = 0x1 + BPF_PERF_EVENT_URETPROBE = 0x2 + BPF_PERF_EVENT_KPROBE = 0x3 + BPF_PERF_EVENT_KRETPROBE = 0x4 + BPF_PERF_EVENT_TRACEPOINT = 0x5 + BPF_PERF_EVENT_EVENT = 0x6 + BPF_F_KPROBE_MULTI_RETURN = 0x1 + BPF_F_UPROBE_MULTI_RETURN = 0x1 BPF_ANY = 0x0 BPF_NOEXIST = 0x1 BPF_EXIST = 0x2 @@ -2640,6 +2834,8 @@ const ( BPF_F_MMAPABLE = 0x400 BPF_F_PRESERVE_ELEMS = 0x800 BPF_F_INNER_MAP = 0x1000 + BPF_F_LINK = 0x2000 + BPF_F_PATH_FD = 0x4000 BPF_STATS_RUN_TIME = 0x0 BPF_STACK_BUILD_ID_EMPTY = 0x0 BPF_STACK_BUILD_ID_VALID = 0x1 @@ -2660,6 +2856,8 @@ const ( BPF_F_ZERO_CSUM_TX = 0x2 BPF_F_DONT_FRAGMENT = 0x4 BPF_F_SEQ_NUMBER = 0x8 + BPF_F_NO_TUNNEL_KEY = 0x10 + BPF_F_TUNINFO_FLAGS = 0x10 BPF_F_INDEX_MASK = 0xffffffff BPF_F_CURRENT_CPU = 0xffffffff BPF_F_CTXLEN_MASK = 0xfffff00000000 @@ -2674,6 +2872,9 @@ const ( BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 BPF_F_ADJ_ROOM_NO_CSUM_RESET = 0x20 + BPF_F_ADJ_ROOM_ENCAP_L2_ETH = 0x40 + BPF_F_ADJ_ROOM_DECAP_L3_IPV4 = 0x80 + BPF_F_ADJ_ROOM_DECAP_L3_IPV6 = 0x100 BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 BPF_F_SYSCTL_BASE_NAME = 0x1 @@ -2698,10 +2899,16 @@ const ( BPF_LWT_ENCAP_SEG6 = 0x0 BPF_LWT_ENCAP_SEG6_INLINE = 0x1 BPF_LWT_ENCAP_IP = 0x2 + BPF_F_BPRM_SECUREEXEC = 0x1 + BPF_F_BROADCAST = 0x8 + BPF_F_EXCLUDE_INGRESS = 0x10 + BPF_SKB_TSTAMP_UNSPEC = 0x0 + BPF_SKB_TSTAMP_DELIVERY_MONO = 0x1 BPF_OK = 0x0 BPF_DROP = 0x2 BPF_REDIRECT = 0x7 BPF_LWT_REROUTE = 0x80 + BPF_FLOW_DISSECTOR_CONTINUE = 0x81 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 @@ -2738,7 +2945,7 @@ const ( BPF_TCP_LISTEN = 0xa BPF_TCP_CLOSING = 0xb BPF_TCP_NEW_SYN_RECV = 0xc - BPF_TCP_MAX_STATES = 0xd + BPF_TCP_MAX_STATES = 0xe TCP_BPF_IW = 0x3e9 TCP_BPF_SNDCWND_CLAMP = 0x3ea TCP_BPF_DELACK_MAX = 0x3eb @@ -2756,6 +2963,8 @@ const ( BPF_DEVCG_DEV_CHAR = 0x2 BPF_FIB_LOOKUP_DIRECT = 0x1 BPF_FIB_LOOKUP_OUTPUT = 0x2 + BPF_FIB_LOOKUP_SKIP_NEIGH = 0x4 + BPF_FIB_LOOKUP_TBID = 0x8 BPF_FIB_LKUP_RET_SUCCESS = 0x0 BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 @@ -2765,6 +2974,10 @@ const ( BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 + BPF_MTU_CHK_SEGS = 0x1 + BPF_MTU_CHK_RET_SUCCESS = 0x0 + BPF_MTU_CHK_RET_FRAG_NEEDED = 0x1 + BPF_MTU_CHK_RET_SEGS_TOOBIG = 0x2 BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 BPF_FD_TYPE_TRACEPOINT = 0x1 BPF_FD_TYPE_KPROBE = 0x2 @@ -2774,6 +2987,20 @@ const ( BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG = 0x1 BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL = 0x2 BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP = 0x4 + BPF_CORE_FIELD_BYTE_OFFSET = 0x0 + BPF_CORE_FIELD_BYTE_SIZE = 0x1 + BPF_CORE_FIELD_EXISTS = 0x2 + BPF_CORE_FIELD_SIGNED = 0x3 + BPF_CORE_FIELD_LSHIFT_U64 = 0x4 + BPF_CORE_FIELD_RSHIFT_U64 = 0x5 + BPF_CORE_TYPE_ID_LOCAL = 0x6 + BPF_CORE_TYPE_ID_TARGET = 0x7 + BPF_CORE_TYPE_EXISTS = 0x8 + BPF_CORE_TYPE_SIZE = 0x9 + BPF_CORE_ENUMVAL_EXISTS = 0xa + BPF_CORE_ENUMVAL_VALUE = 0xb + BPF_CORE_TYPE_MATCHES = 0xc + BPF_F_TIMER_ABS = 0x1 ) const ( @@ -2852,6 +3079,12 @@ type LoopInfo64 struct { Encrypt_key [32]uint8 Init [2]uint64 } +type LoopConfig struct { + Fd uint32 + Size uint32 + Info LoopInfo64 + _ [8]uint64 +} type TIPCSocketAddr struct { Ref uint32 @@ -2979,7 +3212,16 @@ const ( DEVLINK_CMD_TRAP_POLICER_NEW = 0x47 DEVLINK_CMD_TRAP_POLICER_DEL = 0x48 DEVLINK_CMD_HEALTH_REPORTER_TEST = 0x49 - DEVLINK_CMD_MAX = 0x51 + DEVLINK_CMD_RATE_GET = 0x4a + DEVLINK_CMD_RATE_SET = 0x4b + DEVLINK_CMD_RATE_NEW = 0x4c + DEVLINK_CMD_RATE_DEL = 0x4d + DEVLINK_CMD_LINECARD_GET = 0x4e + DEVLINK_CMD_LINECARD_SET = 0x4f + DEVLINK_CMD_LINECARD_NEW = 0x50 + DEVLINK_CMD_LINECARD_DEL = 0x51 + DEVLINK_CMD_SELFTESTS_GET = 0x52 + DEVLINK_CMD_MAX = 0x54 DEVLINK_PORT_TYPE_NOTSET = 0x0 DEVLINK_PORT_TYPE_AUTO = 0x1 DEVLINK_PORT_TYPE_ETH = 0x2 @@ -3208,7 +3450,13 @@ const ( DEVLINK_ATTR_RATE_NODE_NAME = 0xa8 DEVLINK_ATTR_RATE_PARENT_NODE_NAME = 0xa9 DEVLINK_ATTR_REGION_MAX_SNAPSHOTS = 0xaa - DEVLINK_ATTR_MAX = 0xae + DEVLINK_ATTR_LINECARD_INDEX = 0xab + DEVLINK_ATTR_LINECARD_STATE = 0xac + DEVLINK_ATTR_LINECARD_TYPE = 0xad + DEVLINK_ATTR_LINECARD_SUPPORTED_TYPES = 0xae + DEVLINK_ATTR_NESTED_DEVLINK = 0xaf + DEVLINK_ATTR_SELFTESTS = 0xb0 + DEVLINK_ATTR_MAX = 0xb3 DEVLINK_DPIPE_FIELD_MAPPING_TYPE_NONE = 0x0 DEVLINK_DPIPE_FIELD_MAPPING_TYPE_IFINDEX = 0x1 DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT = 0x0 @@ -3224,7 +3472,8 @@ const ( DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR = 0x1 DEVLINK_PORT_FN_ATTR_STATE = 0x2 DEVLINK_PORT_FN_ATTR_OPSTATE = 0x3 - DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x3 + DEVLINK_PORT_FN_ATTR_CAPS = 0x4 + DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x5 ) type FsverityDigest struct { @@ -3317,7 +3566,8 @@ const ( LWTUNNEL_ENCAP_SEG6_LOCAL = 0x7 LWTUNNEL_ENCAP_RPL = 0x8 LWTUNNEL_ENCAP_IOAM6 = 0x9 - LWTUNNEL_ENCAP_MAX = 0x9 + LWTUNNEL_ENCAP_XFRM = 0xa + LWTUNNEL_ENCAP_MAX = 0xa MPLS_IPTUNNEL_UNSPEC = 0x0 MPLS_IPTUNNEL_DST = 0x1 @@ -3512,7 +3762,10 @@ const ( ETHTOOL_MSG_PHC_VCLOCKS_GET = 0x21 ETHTOOL_MSG_MODULE_GET = 0x22 ETHTOOL_MSG_MODULE_SET = 0x23 - ETHTOOL_MSG_USER_MAX = 0x23 + ETHTOOL_MSG_PSE_GET = 0x24 + ETHTOOL_MSG_PSE_SET = 0x25 + ETHTOOL_MSG_RSS_GET = 0x26 + ETHTOOL_MSG_USER_MAX = 0x2b ETHTOOL_MSG_KERNEL_NONE = 0x0 ETHTOOL_MSG_STRSET_GET_REPLY = 0x1 ETHTOOL_MSG_LINKINFO_GET_REPLY = 0x2 @@ -3550,7 +3803,9 @@ const ( ETHTOOL_MSG_PHC_VCLOCKS_GET_REPLY = 0x22 ETHTOOL_MSG_MODULE_GET_REPLY = 0x23 ETHTOOL_MSG_MODULE_NTF = 0x24 - ETHTOOL_MSG_KERNEL_MAX = 0x24 + ETHTOOL_MSG_PSE_GET_REPLY = 0x25 + ETHTOOL_MSG_RSS_GET_REPLY = 0x26 + ETHTOOL_MSG_KERNEL_MAX = 0x2b ETHTOOL_A_HEADER_UNSPEC = 0x0 ETHTOOL_A_HEADER_DEV_INDEX = 0x1 ETHTOOL_A_HEADER_DEV_NAME = 0x2 @@ -3609,7 +3864,8 @@ const ( ETHTOOL_A_LINKMODES_MASTER_SLAVE_CFG = 0x7 ETHTOOL_A_LINKMODES_MASTER_SLAVE_STATE = 0x8 ETHTOOL_A_LINKMODES_LANES = 0x9 - ETHTOOL_A_LINKMODES_MAX = 0x9 + ETHTOOL_A_LINKMODES_RATE_MATCHING = 0xa + ETHTOOL_A_LINKMODES_MAX = 0xa ETHTOOL_A_LINKSTATE_UNSPEC = 0x0 ETHTOOL_A_LINKSTATE_HEADER = 0x1 ETHTOOL_A_LINKSTATE_LINK = 0x2 @@ -3617,7 +3873,8 @@ const ( ETHTOOL_A_LINKSTATE_SQI_MAX = 0x4 ETHTOOL_A_LINKSTATE_EXT_STATE = 0x5 ETHTOOL_A_LINKSTATE_EXT_SUBSTATE = 0x6 - ETHTOOL_A_LINKSTATE_MAX = 0x6 + ETHTOOL_A_LINKSTATE_EXT_DOWN_CNT = 0x7 + ETHTOOL_A_LINKSTATE_MAX = 0x7 ETHTOOL_A_DEBUG_UNSPEC = 0x0 ETHTOOL_A_DEBUG_HEADER = 0x1 ETHTOOL_A_DEBUG_MSGMASK = 0x2 @@ -3652,7 +3909,7 @@ const ( ETHTOOL_A_RINGS_TCP_DATA_SPLIT = 0xb ETHTOOL_A_RINGS_CQE_SIZE = 0xc ETHTOOL_A_RINGS_TX_PUSH = 0xd - ETHTOOL_A_RINGS_MAX = 0xd + ETHTOOL_A_RINGS_MAX = 0x10 ETHTOOL_A_CHANNELS_UNSPEC = 0x0 ETHTOOL_A_CHANNELS_HEADER = 0x1 ETHTOOL_A_CHANNELS_RX_MAX = 0x2 @@ -3690,14 +3947,14 @@ const ( ETHTOOL_A_COALESCE_RATE_SAMPLE_INTERVAL = 0x17 ETHTOOL_A_COALESCE_USE_CQE_MODE_TX = 0x18 ETHTOOL_A_COALESCE_USE_CQE_MODE_RX = 0x19 - ETHTOOL_A_COALESCE_MAX = 0x19 + ETHTOOL_A_COALESCE_MAX = 0x1c ETHTOOL_A_PAUSE_UNSPEC = 0x0 ETHTOOL_A_PAUSE_HEADER = 0x1 ETHTOOL_A_PAUSE_AUTONEG = 0x2 ETHTOOL_A_PAUSE_RX = 0x3 ETHTOOL_A_PAUSE_TX = 0x4 ETHTOOL_A_PAUSE_STATS = 0x5 - ETHTOOL_A_PAUSE_MAX = 0x5 + ETHTOOL_A_PAUSE_MAX = 0x6 ETHTOOL_A_PAUSE_STAT_UNSPEC = 0x0 ETHTOOL_A_PAUSE_STAT_PAD = 0x1 ETHTOOL_A_PAUSE_STAT_TX_FRAMES = 0x2 @@ -4000,7 +4257,8 @@ const ( ) type LandlockRulesetAttr struct { - Access_fs uint64 + Access_fs uint64 + Access_net uint64 } type LandlockPathBeneathAttr struct { @@ -4201,6 +4459,9 @@ const ( NL80211_ACL_POLICY_DENY_UNLESS_LISTED = 0x1 NL80211_AC_VI = 0x1 NL80211_AC_VO = 0x0 + NL80211_AP_SETTINGS_EXTERNAL_AUTH_SUPPORT = 0x1 + NL80211_AP_SETTINGS_SA_QUERY_OFFLOAD_SUPPORT = 0x2 + NL80211_AP_SME_SA_QUERY_OFFLOAD = 0x1 NL80211_ATTR_4ADDR = 0x53 NL80211_ATTR_ACK = 0x5c NL80211_ATTR_ACK_SIGNAL = 0x107 @@ -4209,6 +4470,7 @@ const ( NL80211_ATTR_AIRTIME_WEIGHT = 0x112 NL80211_ATTR_AKM_SUITES = 0x4c NL80211_ATTR_AP_ISOLATE = 0x60 + NL80211_ATTR_AP_SETTINGS_FLAGS = 0x135 NL80211_ATTR_AUTH_DATA = 0x9c NL80211_ATTR_AUTH_TYPE = 0x35 NL80211_ATTR_BANDS = 0xef @@ -4240,6 +4502,9 @@ const ( NL80211_ATTR_COALESCE_RULE_DELAY = 0x1 NL80211_ATTR_COALESCE_RULE_MAX = 0x3 NL80211_ATTR_COALESCE_RULE_PKT_PATTERN = 0x3 + NL80211_ATTR_COLOR_CHANGE_COLOR = 0x130 + NL80211_ATTR_COLOR_CHANGE_COUNT = 0x12f + NL80211_ATTR_COLOR_CHANGE_ELEMS = 0x131 NL80211_ATTR_CONN_FAILED_REASON = 0x9b NL80211_ATTR_CONTROL_PORT = 0x44 NL80211_ATTR_CONTROL_PORT_ETHERTYPE = 0x66 @@ -4266,6 +4531,7 @@ const ( NL80211_ATTR_DEVICE_AP_SME = 0x8d NL80211_ATTR_DFS_CAC_TIME = 0x7 NL80211_ATTR_DFS_REGION = 0x92 + NL80211_ATTR_DISABLE_EHT = 0x137 NL80211_ATTR_DISABLE_HE = 0x12d NL80211_ATTR_DISABLE_HT = 0x93 NL80211_ATTR_DISABLE_VHT = 0xaf @@ -4273,6 +4539,8 @@ const ( NL80211_ATTR_DONT_WAIT_FOR_ACK = 0x8e NL80211_ATTR_DTIM_PERIOD = 0xd NL80211_ATTR_DURATION = 0x57 + NL80211_ATTR_EHT_CAPABILITY = 0x136 + NL80211_ATTR_EML_CAPABILITY = 0x13d NL80211_ATTR_EXT_CAPA = 0xa9 NL80211_ATTR_EXT_CAPA_MASK = 0xaa NL80211_ATTR_EXTERNAL_AUTH_ACTION = 0x104 @@ -4337,10 +4605,11 @@ const ( NL80211_ATTR_MAC_HINT = 0xc8 NL80211_ATTR_MAC_MASK = 0xd7 NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca - NL80211_ATTR_MAX = 0x137 + NL80211_ATTR_MAX = 0x14a NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4 NL80211_ATTR_MAX_CSA_COUNTERS = 0xce NL80211_ATTR_MAX_MATCH_SETS = 0x85 + NL80211_ATTR_MAX_NUM_AKM_SUITES = 0x13c NL80211_ATTR_MAX_NUM_PMKIDS = 0x56 NL80211_ATTR_MAX_NUM_SCAN_SSIDS = 0x2b NL80211_ATTR_MAX_NUM_SCHED_SCAN_PLANS = 0xde @@ -4350,6 +4619,8 @@ const ( NL80211_ATTR_MAX_SCAN_PLAN_INTERVAL = 0xdf NL80211_ATTR_MAX_SCAN_PLAN_ITERATIONS = 0xe0 NL80211_ATTR_MAX_SCHED_SCAN_IE_LEN = 0x7c + NL80211_ATTR_MBSSID_CONFIG = 0x132 + NL80211_ATTR_MBSSID_ELEMS = 0x133 NL80211_ATTR_MCAST_RATE = 0x6b NL80211_ATTR_MDID = 0xb1 NL80211_ATTR_MEASUREMENT_DURATION = 0xeb @@ -4359,6 +4630,11 @@ const ( NL80211_ATTR_MESH_PEER_AID = 0xed NL80211_ATTR_MESH_SETUP = 0x70 NL80211_ATTR_MGMT_SUBTYPE = 0x29 + NL80211_ATTR_MLD_ADDR = 0x13a + NL80211_ATTR_MLD_CAPA_AND_OPS = 0x13e + NL80211_ATTR_MLO_LINK_ID = 0x139 + NL80211_ATTR_MLO_LINKS = 0x138 + NL80211_ATTR_MLO_SUPPORT = 0x13b NL80211_ATTR_MNTR_FLAGS = 0x17 NL80211_ATTR_MPATH_INFO = 0x1b NL80211_ATTR_MPATH_NEXT_HOP = 0x1a @@ -4371,6 +4647,7 @@ const ( NL80211_ATTR_NETNS_FD = 0xdb NL80211_ATTR_NOACK_MAP = 0x95 NL80211_ATTR_NSS = 0x106 + NL80211_ATTR_OBSS_COLOR_BITMAP = 0x12e NL80211_ATTR_OFFCHANNEL_TX_OK = 0x6c NL80211_ATTR_OPER_CLASS = 0xd6 NL80211_ATTR_OPMODE_NOTIF = 0xc2 @@ -4397,6 +4674,7 @@ const ( NL80211_ATTR_PROTOCOL_FEATURES = 0xad NL80211_ATTR_PS_STATE = 0x5d NL80211_ATTR_QOS_MAP = 0xc7 + NL80211_ATTR_RADAR_BACKGROUND = 0x134 NL80211_ATTR_RADAR_EVENT = 0xa8 NL80211_ATTR_REASON_CODE = 0x36 NL80211_ATTR_RECEIVE_MULTICAST = 0x121 @@ -4412,6 +4690,7 @@ const ( NL80211_ATTR_RESP_IE = 0x4e NL80211_ATTR_ROAM_SUPPORT = 0x83 NL80211_ATTR_RX_FRAME_TYPES = 0x64 + NL80211_ATTR_RX_HW_TIMESTAMP = 0x140 NL80211_ATTR_RXMGMT_FLAGS = 0xbc NL80211_ATTR_RX_SIGNAL_DBM = 0x97 NL80211_ATTR_S1G_CAPABILITY = 0x128 @@ -4469,6 +4748,7 @@ const ( NL80211_ATTR_SUPPORT_MESH_AUTH = 0x73 NL80211_ATTR_SURVEY_INFO = 0x54 NL80211_ATTR_SURVEY_RADIO_STATS = 0xda + NL80211_ATTR_TD_BITMAP = 0x141 NL80211_ATTR_TDLS_ACTION = 0x88 NL80211_ATTR_TDLS_DIALOG_TOKEN = 0x89 NL80211_ATTR_TDLS_EXTERNAL_SETUP = 0x8c @@ -4484,6 +4764,7 @@ const ( NL80211_ATTR_TSID = 0xd2 NL80211_ATTR_TWT_RESPONDER = 0x116 NL80211_ATTR_TX_FRAME_TYPES = 0x63 + NL80211_ATTR_TX_HW_TIMESTAMP = 0x13f NL80211_ATTR_TX_NO_CCK_RATE = 0x87 NL80211_ATTR_TXQ_LIMIT = 0x10a NL80211_ATTR_TXQ_MEMORY_LIMIT = 0x10b @@ -4553,10 +4834,14 @@ const ( NL80211_BAND_ATTR_HT_CAPA = 0x4 NL80211_BAND_ATTR_HT_MCS_SET = 0x3 NL80211_BAND_ATTR_IFTYPE_DATA = 0x9 - NL80211_BAND_ATTR_MAX = 0xb + NL80211_BAND_ATTR_MAX = 0xd NL80211_BAND_ATTR_RATES = 0x2 NL80211_BAND_ATTR_VHT_CAPA = 0x8 NL80211_BAND_ATTR_VHT_MCS_SET = 0x7 + NL80211_BAND_IFTYPE_ATTR_EHT_CAP_MAC = 0x8 + NL80211_BAND_IFTYPE_ATTR_EHT_CAP_MCS_SET = 0xa + NL80211_BAND_IFTYPE_ATTR_EHT_CAP_PHY = 0x9 + NL80211_BAND_IFTYPE_ATTR_EHT_CAP_PPE = 0xb NL80211_BAND_IFTYPE_ATTR_HE_6GHZ_CAPA = 0x6 NL80211_BAND_IFTYPE_ATTR_HE_CAP_MAC = 0x2 NL80211_BAND_IFTYPE_ATTR_HE_CAP_MCS_SET = 0x4 @@ -4564,6 +4849,8 @@ const ( NL80211_BAND_IFTYPE_ATTR_HE_CAP_PPE = 0x5 NL80211_BAND_IFTYPE_ATTR_IFTYPES = 0x1 NL80211_BAND_IFTYPE_ATTR_MAX = 0xb + NL80211_BAND_IFTYPE_ATTR_VENDOR_ELEMS = 0x7 + NL80211_BAND_LC = 0x5 NL80211_BAND_S1GHZ = 0x4 NL80211_BITRATE_ATTR_2GHZ_SHORTPREAMBLE = 0x2 NL80211_BITRATE_ATTR_MAX = 0x2 @@ -4584,7 +4871,9 @@ const ( NL80211_BSS_FREQUENCY_OFFSET = 0x14 NL80211_BSS_INFORMATION_ELEMENTS = 0x6 NL80211_BSS_LAST_SEEN_BOOTTIME = 0xf - NL80211_BSS_MAX = 0x14 + NL80211_BSS_MAX = 0x18 + NL80211_BSS_MLD_ADDR = 0x16 + NL80211_BSS_MLO_LINK_ID = 0x15 NL80211_BSS_PAD = 0x10 NL80211_BSS_PARENT_BSSID = 0x12 NL80211_BSS_PARENT_TSF = 0x11 @@ -4612,6 +4901,7 @@ const ( NL80211_CHAN_WIDTH_20 = 0x1 NL80211_CHAN_WIDTH_20_NOHT = 0x0 NL80211_CHAN_WIDTH_2 = 0x9 + NL80211_CHAN_WIDTH_320 = 0xd NL80211_CHAN_WIDTH_40 = 0x2 NL80211_CHAN_WIDTH_4 = 0xa NL80211_CHAN_WIDTH_5 = 0x6 @@ -4621,8 +4911,11 @@ const ( NL80211_CMD_ABORT_SCAN = 0x72 NL80211_CMD_ACTION = 0x3b NL80211_CMD_ACTION_TX_STATUS = 0x3c + NL80211_CMD_ADD_LINK = 0x94 + NL80211_CMD_ADD_LINK_STA = 0x96 NL80211_CMD_ADD_NAN_FUNCTION = 0x75 NL80211_CMD_ADD_TX_TS = 0x69 + NL80211_CMD_ASSOC_COMEBACK = 0x93 NL80211_CMD_ASSOCIATE = 0x26 NL80211_CMD_AUTHENTICATE = 0x25 NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL = 0x38 @@ -4630,6 +4923,10 @@ const ( NL80211_CMD_CHANNEL_SWITCH = 0x66 NL80211_CMD_CH_SWITCH_NOTIFY = 0x58 NL80211_CMD_CH_SWITCH_STARTED_NOTIFY = 0x6e + NL80211_CMD_COLOR_CHANGE_ABORTED = 0x90 + NL80211_CMD_COLOR_CHANGE_COMPLETED = 0x91 + NL80211_CMD_COLOR_CHANGE_REQUEST = 0x8e + NL80211_CMD_COLOR_CHANGE_STARTED = 0x8f NL80211_CMD_CONNECT = 0x2e NL80211_CMD_CONN_FAILED = 0x5b NL80211_CMD_CONTROL_PORT_FRAME = 0x81 @@ -4678,8 +4975,9 @@ const ( NL80211_CMD_LEAVE_IBSS = 0x2c NL80211_CMD_LEAVE_MESH = 0x45 NL80211_CMD_LEAVE_OCB = 0x6d - NL80211_CMD_MAX = 0x93 + NL80211_CMD_MAX = 0x9b NL80211_CMD_MICHAEL_MIC_FAILURE = 0x29 + NL80211_CMD_MODIFY_LINK_STA = 0x97 NL80211_CMD_NAN_MATCH = 0x78 NL80211_CMD_NEW_BEACON = 0xf NL80211_CMD_NEW_INTERFACE = 0x7 @@ -4692,6 +4990,7 @@ const ( NL80211_CMD_NEW_WIPHY = 0x3 NL80211_CMD_NOTIFY_CQM = 0x40 NL80211_CMD_NOTIFY_RADAR = 0x86 + NL80211_CMD_OBSS_COLOR_COLLISION = 0x8d NL80211_CMD_PEER_MEASUREMENT_COMPLETE = 0x85 NL80211_CMD_PEER_MEASUREMENT_RESULT = 0x84 NL80211_CMD_PEER_MEASUREMENT_START = 0x83 @@ -4707,6 +5006,8 @@ const ( NL80211_CMD_REGISTER_FRAME = 0x3a NL80211_CMD_RELOAD_REGDB = 0x7e NL80211_CMD_REMAIN_ON_CHANNEL = 0x37 + NL80211_CMD_REMOVE_LINK = 0x95 + NL80211_CMD_REMOVE_LINK_STA = 0x98 NL80211_CMD_REQ_SET_REG = 0x1b NL80211_CMD_ROAM = 0x2f NL80211_CMD_SCAN_ABORTED = 0x23 @@ -4717,6 +5018,7 @@ const ( NL80211_CMD_SET_CHANNEL = 0x41 NL80211_CMD_SET_COALESCE = 0x65 NL80211_CMD_SET_CQM = 0x3f + NL80211_CMD_SET_FILS_AAD = 0x92 NL80211_CMD_SET_INTERFACE = 0x6 NL80211_CMD_SET_KEY = 0xa NL80211_CMD_SET_MAC_ACL = 0x5d @@ -4791,6 +5093,8 @@ const ( NL80211_EDMG_BW_CONFIG_MIN = 0x4 NL80211_EDMG_CHANNELS_MAX = 0x3c NL80211_EDMG_CHANNELS_MIN = 0x1 + NL80211_EHT_MAX_CAPABILITY_LEN = 0x33 + NL80211_EHT_MIN_CAPABILITY_LEN = 0xd NL80211_EXTERNAL_AUTH_ABORT = 0x1 NL80211_EXTERNAL_AUTH_START = 0x0 NL80211_EXT_FEATURE_4WAY_HANDSHAKE_AP_PSK = 0x32 @@ -4807,6 +5111,7 @@ const ( NL80211_EXT_FEATURE_BEACON_RATE_HT = 0x7 NL80211_EXT_FEATURE_BEACON_RATE_LEGACY = 0x6 NL80211_EXT_FEATURE_BEACON_RATE_VHT = 0x8 + NL80211_EXT_FEATURE_BSS_COLOR = 0x3a NL80211_EXT_FEATURE_BSS_PARENT_TSF = 0x4 NL80211_EXT_FEATURE_CAN_REPLACE_PTK0 = 0x1f NL80211_EXT_FEATURE_CONTROL_PORT_NO_PREAUTH = 0x2a @@ -4818,6 +5123,7 @@ const ( NL80211_EXT_FEATURE_DFS_OFFLOAD = 0x19 NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER = 0x20 NL80211_EXT_FEATURE_EXT_KEY_ID = 0x24 + NL80211_EXT_FEATURE_FILS_CRYPTO_OFFLOAD = 0x3b NL80211_EXT_FEATURE_FILS_DISCOVERY = 0x34 NL80211_EXT_FEATURE_FILS_MAX_CHANNEL_TIME = 0x11 NL80211_EXT_FEATURE_FILS_SK_OFFLOAD = 0xe @@ -4833,8 +5139,10 @@ const ( NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION = 0x14 NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE = 0x13 NL80211_EXT_FEATURE_OPERATING_CHANNEL_VALIDATION = 0x31 + NL80211_EXT_FEATURE_POWERED_ADDR_CHANGE = 0x3d NL80211_EXT_FEATURE_PROTECTED_TWT = 0x2b NL80211_EXT_FEATURE_PROT_RANGE_NEGO_AND_MEASURE = 0x39 + NL80211_EXT_FEATURE_RADAR_BACKGROUND = 0x3c NL80211_EXT_FEATURE_RRM = 0x1 NL80211_EXT_FEATURE_SAE_OFFLOAD_AP = 0x33 NL80211_EXT_FEATURE_SAE_OFFLOAD = 0x26 @@ -4901,12 +5209,14 @@ const ( NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf - NL80211_FREQUENCY_ATTR_MAX = 0x1b + NL80211_FREQUENCY_ATTR_MAX = 0x20 NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6 NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11 NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc NL80211_FREQUENCY_ATTR_NO_20MHZ = 0x10 + NL80211_FREQUENCY_ATTR_NO_320MHZ = 0x1a NL80211_FREQUENCY_ATTR_NO_80MHZ = 0xb + NL80211_FREQUENCY_ATTR_NO_EHT = 0x1b NL80211_FREQUENCY_ATTR_NO_HE = 0x13 NL80211_FREQUENCY_ATTR_NO_HT40_MINUS = 0x9 NL80211_FREQUENCY_ATTR_NO_HT40_PLUS = 0xa @@ -5006,6 +5316,12 @@ const ( NL80211_MAX_SUPP_HT_RATES = 0x4d NL80211_MAX_SUPP_RATES = 0x20 NL80211_MAX_SUPP_REG_RULES = 0x80 + NL80211_MBSSID_CONFIG_ATTR_EMA = 0x5 + NL80211_MBSSID_CONFIG_ATTR_INDEX = 0x3 + NL80211_MBSSID_CONFIG_ATTR_MAX = 0x5 + NL80211_MBSSID_CONFIG_ATTR_MAX_EMA_PROFILE_PERIODICITY = 0x2 + NL80211_MBSSID_CONFIG_ATTR_MAX_INTERFACES = 0x1 + NL80211_MBSSID_CONFIG_ATTR_TX_IFINDEX = 0x4 NL80211_MESHCONF_ATTR_MAX = 0x1f NL80211_MESHCONF_AUTO_OPEN_PLINKS = 0x7 NL80211_MESHCONF_AWAKE_WINDOW = 0x1b @@ -5168,6 +5484,7 @@ const ( NL80211_PMSR_FTM_FAILURE_UNSPECIFIED = 0x0 NL80211_PMSR_FTM_FAILURE_WRONG_CHANNEL = 0x3 NL80211_PMSR_FTM_REQ_ATTR_ASAP = 0x1 + NL80211_PMSR_FTM_REQ_ATTR_BSS_COLOR = 0xd NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION = 0x5 NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD = 0x4 NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST = 0x6 @@ -5244,12 +5561,36 @@ const ( NL80211_RADAR_PRE_CAC_EXPIRED = 0x4 NL80211_RATE_INFO_10_MHZ_WIDTH = 0xb NL80211_RATE_INFO_160_MHZ_WIDTH = 0xa + NL80211_RATE_INFO_320_MHZ_WIDTH = 0x12 NL80211_RATE_INFO_40_MHZ_WIDTH = 0x3 NL80211_RATE_INFO_5_MHZ_WIDTH = 0xc NL80211_RATE_INFO_80_MHZ_WIDTH = 0x8 NL80211_RATE_INFO_80P80_MHZ_WIDTH = 0x9 NL80211_RATE_INFO_BITRATE32 = 0x5 NL80211_RATE_INFO_BITRATE = 0x1 + NL80211_RATE_INFO_EHT_GI_0_8 = 0x0 + NL80211_RATE_INFO_EHT_GI_1_6 = 0x1 + NL80211_RATE_INFO_EHT_GI_3_2 = 0x2 + NL80211_RATE_INFO_EHT_GI = 0x15 + NL80211_RATE_INFO_EHT_MCS = 0x13 + NL80211_RATE_INFO_EHT_NSS = 0x14 + NL80211_RATE_INFO_EHT_RU_ALLOC_106 = 0x3 + NL80211_RATE_INFO_EHT_RU_ALLOC_106P26 = 0x4 + NL80211_RATE_INFO_EHT_RU_ALLOC_242 = 0x5 + NL80211_RATE_INFO_EHT_RU_ALLOC_26 = 0x0 + NL80211_RATE_INFO_EHT_RU_ALLOC_2x996 = 0xb + NL80211_RATE_INFO_EHT_RU_ALLOC_2x996P484 = 0xc + NL80211_RATE_INFO_EHT_RU_ALLOC_3x996 = 0xd + NL80211_RATE_INFO_EHT_RU_ALLOC_3x996P484 = 0xe + NL80211_RATE_INFO_EHT_RU_ALLOC_484 = 0x6 + NL80211_RATE_INFO_EHT_RU_ALLOC_484P242 = 0x7 + NL80211_RATE_INFO_EHT_RU_ALLOC_4x996 = 0xf + NL80211_RATE_INFO_EHT_RU_ALLOC_52 = 0x1 + NL80211_RATE_INFO_EHT_RU_ALLOC_52P26 = 0x2 + NL80211_RATE_INFO_EHT_RU_ALLOC_996 = 0x8 + NL80211_RATE_INFO_EHT_RU_ALLOC_996P484 = 0x9 + NL80211_RATE_INFO_EHT_RU_ALLOC_996P484P242 = 0xa + NL80211_RATE_INFO_EHT_RU_ALLOC = 0x16 NL80211_RATE_INFO_HE_1XLTF = 0x0 NL80211_RATE_INFO_HE_2XLTF = 0x1 NL80211_RATE_INFO_HE_4XLTF = 0x2 @@ -5268,7 +5609,7 @@ const ( NL80211_RATE_INFO_HE_RU_ALLOC_52 = 0x1 NL80211_RATE_INFO_HE_RU_ALLOC_996 = 0x5 NL80211_RATE_INFO_HE_RU_ALLOC = 0x11 - NL80211_RATE_INFO_MAX = 0x16 + NL80211_RATE_INFO_MAX = 0x1d NL80211_RATE_INFO_MCS = 0x2 NL80211_RATE_INFO_SHORT_GI = 0x4 NL80211_RATE_INFO_VHT_MCS = 0x6 @@ -5281,7 +5622,7 @@ const ( NL80211_REGDOM_TYPE_CUSTOM_WORLD = 0x2 NL80211_REGDOM_TYPE_INTERSECTION = 0x3 NL80211_REGDOM_TYPE_WORLD = 0x1 - NL80211_REG_RULE_ATTR_MAX = 0x7 + NL80211_REG_RULE_ATTR_MAX = 0x8 NL80211_REKEY_DATA_AKM = 0x4 NL80211_REKEY_DATA_KCK = 0x2 NL80211_REKEY_DATA_KEK = 0x1 @@ -5292,6 +5633,7 @@ const ( NL80211_RRF_GO_CONCURRENT = 0x1000 NL80211_RRF_IR_CONCURRENT = 0x1000 NL80211_RRF_NO_160MHZ = 0x10000 + NL80211_RRF_NO_320MHZ = 0x40000 NL80211_RRF_NO_80MHZ = 0x8000 NL80211_RRF_NO_CCK = 0x2 NL80211_RRF_NO_HE = 0x20000 @@ -5361,7 +5703,7 @@ const ( NL80211_STA_FLAG_ASSOCIATED = 0x7 NL80211_STA_FLAG_AUTHENTICATED = 0x5 NL80211_STA_FLAG_AUTHORIZED = 0x1 - NL80211_STA_FLAG_MAX = 0x7 + NL80211_STA_FLAG_MAX = 0x8 NL80211_STA_FLAG_MAX_OLD_API = 0x6 NL80211_STA_FLAG_MFP = 0x4 NL80211_STA_FLAG_SHORT_PREAMBLE = 0x2 @@ -5607,3 +5949,86 @@ const ( AUDIT_NLGRP_NONE = 0x0 AUDIT_NLGRP_READLOG = 0x1 ) + +const ( + TUN_F_CSUM = 0x1 + TUN_F_TSO4 = 0x2 + TUN_F_TSO6 = 0x4 + TUN_F_TSO_ECN = 0x8 + TUN_F_UFO = 0x10 + TUN_F_USO4 = 0x20 + TUN_F_USO6 = 0x40 +) + +const ( + VIRTIO_NET_HDR_F_NEEDS_CSUM = 0x1 + VIRTIO_NET_HDR_F_DATA_VALID = 0x2 + VIRTIO_NET_HDR_F_RSC_INFO = 0x4 +) + +const ( + VIRTIO_NET_HDR_GSO_NONE = 0x0 + VIRTIO_NET_HDR_GSO_TCPV4 = 0x1 + VIRTIO_NET_HDR_GSO_UDP = 0x3 + VIRTIO_NET_HDR_GSO_TCPV6 = 0x4 + VIRTIO_NET_HDR_GSO_UDP_L4 = 0x5 + VIRTIO_NET_HDR_GSO_ECN = 0x80 +) + +type SchedAttr struct { + Size uint32 + Policy uint32 + Flags uint64 + Nice int32 + Priority uint32 + Runtime uint64 + Deadline uint64 + Period uint64 + Util_min uint32 + Util_max uint32 +} + +const SizeofSchedAttr = 0x38 + +type Cachestat_t struct { + Cache uint64 + Dirty uint64 + Writeback uint64 + Evicted uint64 + Recently_evicted uint64 +} +type CachestatRange struct { + Off uint64 + Len uint64 +} + +const ( + SK_MEMINFO_RMEM_ALLOC = 0x0 + SK_MEMINFO_RCVBUF = 0x1 + SK_MEMINFO_WMEM_ALLOC = 0x2 + SK_MEMINFO_SNDBUF = 0x3 + SK_MEMINFO_FWD_ALLOC = 0x4 + SK_MEMINFO_WMEM_QUEUED = 0x5 + SK_MEMINFO_OPTMEM = 0x6 + SK_MEMINFO_BACKLOG = 0x7 + SK_MEMINFO_DROPS = 0x8 + SK_MEMINFO_VARS = 0x9 + SKNLGRP_NONE = 0x0 + SKNLGRP_INET_TCP_DESTROY = 0x1 + SKNLGRP_INET_UDP_DESTROY = 0x2 + SKNLGRP_INET6_TCP_DESTROY = 0x3 + SKNLGRP_INET6_UDP_DESTROY = 0x4 + SK_DIAG_BPF_STORAGE_REQ_NONE = 0x0 + SK_DIAG_BPF_STORAGE_REQ_MAP_FD = 0x1 + SK_DIAG_BPF_STORAGE_REP_NONE = 0x0 + SK_DIAG_BPF_STORAGE = 0x1 + SK_DIAG_BPF_STORAGE_NONE = 0x0 + SK_DIAG_BPF_STORAGE_PAD = 0x1 + SK_DIAG_BPF_STORAGE_MAP_ID = 0x2 + SK_DIAG_BPF_STORAGE_MAP_VALUE = 0x3 +) + +type SockDiagReq struct { + Family uint8 + Protocol uint8 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index 89c516a..fd402da 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && linux -// +build 386,linux package unix @@ -337,6 +336,8 @@ type Taskstats struct { Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 + Irq_count uint64 + Irq_delay_total uint64 } type cpuMask uint32 @@ -414,7 +415,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [122]int8 + Data [122]byte _ uint32 } @@ -476,14 +477,6 @@ const ( BLKPG = 0x1269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index 62b4fb2..eb7a5e1 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && linux -// +build amd64,linux package unix @@ -350,6 +349,8 @@ type Taskstats struct { Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 + Irq_count uint64 + Irq_delay_total uint64 } type cpuMask uint64 @@ -427,7 +428,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [118]int8 + Data [118]byte _ uint64 } @@ -491,15 +492,6 @@ const ( BLKPG = 0x1269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index e86b358..d78ac10 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && linux -// +build arm,linux package unix @@ -328,6 +327,8 @@ type Taskstats struct { Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 + Irq_count uint64 + Irq_delay_total uint64 } type cpuMask uint32 @@ -405,7 +406,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [122]uint8 + Data [122]byte _ uint32 } @@ -469,15 +470,6 @@ const ( BLKPG = 0x1269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]uint8 Driver_name [64]uint8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index 6c6be4c..cd06d47 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && linux -// +build arm64,linux package unix @@ -329,6 +328,8 @@ type Taskstats struct { Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 + Irq_count uint64 + Irq_delay_total uint64 } type cpuMask uint64 @@ -406,7 +407,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [118]int8 + Data [118]byte _ uint64 } @@ -470,15 +471,6 @@ const ( BLKPG = 0x1269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go index 4982ea3..2f28fe2 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build loong64 && linux -// +build loong64,linux package unix @@ -330,6 +329,8 @@ type Taskstats struct { Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 + Irq_count uint64 + Irq_delay_total uint64 } type cpuMask uint64 @@ -407,7 +408,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [118]int8 + Data [118]byte _ uint64 } @@ -471,15 +472,6 @@ const ( BLKPG = 0x1269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index 173141a..71d6cac 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips && linux -// +build mips,linux package unix @@ -333,6 +332,8 @@ type Taskstats struct { Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 + Irq_count uint64 + Irq_delay_total uint64 } type cpuMask uint32 @@ -410,7 +411,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [122]int8 + Data [122]byte _ uint32 } @@ -475,15 +476,6 @@ const ( BLKPG = 0x20001269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index 93ae4c5..8596d45 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && linux -// +build mips64,linux package unix @@ -332,6 +331,8 @@ type Taskstats struct { Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 + Irq_count uint64 + Irq_delay_total uint64 } type cpuMask uint64 @@ -409,7 +410,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [118]int8 + Data [118]byte _ uint64 } @@ -473,15 +474,6 @@ const ( BLKPG = 0x20001269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index 4e4e510..cd60ea1 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64le && linux -// +build mips64le,linux package unix @@ -332,6 +331,8 @@ type Taskstats struct { Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 + Irq_count uint64 + Irq_delay_total uint64 } type cpuMask uint64 @@ -409,7 +410,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [118]int8 + Data [118]byte _ uint64 } @@ -473,15 +474,6 @@ const ( BLKPG = 0x20001269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index 3f5ba01..b0ae420 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mipsle && linux -// +build mipsle,linux package unix @@ -333,6 +332,8 @@ type Taskstats struct { Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 + Irq_count uint64 + Irq_delay_total uint64 } type cpuMask uint32 @@ -410,7 +411,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [122]int8 + Data [122]byte _ uint32 } @@ -475,15 +476,6 @@ const ( BLKPG = 0x20001269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go index 71dfe7c..8359728 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc && linux -// +build ppc,linux package unix @@ -340,6 +339,8 @@ type Taskstats struct { Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 + Irq_count uint64 + Irq_delay_total uint64 } type cpuMask uint32 @@ -417,7 +418,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [122]uint8 + Data [122]byte _ uint32 } @@ -481,15 +482,6 @@ const ( BLKPG = 0x20001269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]uint8 Driver_name [64]uint8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 3a2b7f0..69eb6a5 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && linux -// +build ppc64,linux package unix @@ -339,6 +338,8 @@ type Taskstats struct { Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 + Irq_count uint64 + Irq_delay_total uint64 } type cpuMask uint64 @@ -416,7 +417,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [118]uint8 + Data [118]byte _ uint64 } @@ -480,15 +481,6 @@ const ( BLKPG = 0x20001269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]uint8 Driver_name [64]uint8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index a52d627..5f583cb 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64le && linux -// +build ppc64le,linux package unix @@ -339,6 +338,8 @@ type Taskstats struct { Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 + Irq_count uint64 + Irq_delay_total uint64 } type cpuMask uint64 @@ -416,7 +417,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [118]uint8 + Data [118]byte _ uint64 } @@ -480,15 +481,6 @@ const ( BLKPG = 0x20001269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]uint8 Driver_name [64]uint8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index dfc007d..15adc04 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && linux -// +build riscv64,linux package unix @@ -357,6 +356,8 @@ type Taskstats struct { Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 + Irq_count uint64 + Irq_delay_total uint64 } type cpuMask uint64 @@ -434,7 +435,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [118]uint8 + Data [118]byte _ uint64 } @@ -498,15 +499,6 @@ const ( BLKPG = 0x1269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]uint8 Driver_name [64]uint8 @@ -716,3 +708,30 @@ type SysvShmDesc struct { _ uint64 _ uint64 } + +type RISCVHWProbePairs struct { + Key int64 + Value uint64 +} + +const ( + RISCV_HWPROBE_KEY_MVENDORID = 0x0 + RISCV_HWPROBE_KEY_MARCHID = 0x1 + RISCV_HWPROBE_KEY_MIMPID = 0x2 + RISCV_HWPROBE_KEY_BASE_BEHAVIOR = 0x3 + RISCV_HWPROBE_BASE_BEHAVIOR_IMA = 0x1 + RISCV_HWPROBE_KEY_IMA_EXT_0 = 0x4 + RISCV_HWPROBE_IMA_FD = 0x1 + RISCV_HWPROBE_IMA_C = 0x2 + RISCV_HWPROBE_IMA_V = 0x4 + RISCV_HWPROBE_EXT_ZBA = 0x8 + RISCV_HWPROBE_EXT_ZBB = 0x10 + RISCV_HWPROBE_EXT_ZBS = 0x20 + RISCV_HWPROBE_KEY_CPUPERF_0 = 0x5 + RISCV_HWPROBE_MISALIGNED_UNKNOWN = 0x0 + RISCV_HWPROBE_MISALIGNED_EMULATED = 0x1 + RISCV_HWPROBE_MISALIGNED_SLOW = 0x2 + RISCV_HWPROBE_MISALIGNED_FAST = 0x3 + RISCV_HWPROBE_MISALIGNED_UNSUPPORTED = 0x4 + RISCV_HWPROBE_MISALIGNED_MASK = 0x7 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index b53cb91..cf3ce90 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build s390x && linux -// +build s390x,linux package unix @@ -352,6 +351,8 @@ type Taskstats struct { Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 + Irq_count uint64 + Irq_delay_total uint64 } type cpuMask uint64 @@ -429,7 +430,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [118]int8 + Data [118]byte _ uint64 } @@ -494,15 +495,6 @@ const ( BLKPG = 0x1269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index fe0aa35..590b567 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build sparc64 && linux -// +build sparc64,linux package unix @@ -334,6 +333,8 @@ type Taskstats struct { Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 + Irq_count uint64 + Irq_delay_total uint64 } type cpuMask uint64 @@ -411,7 +412,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [118]int8 + Data [118]byte _ uint64 } @@ -475,15 +476,6 @@ const ( BLKPG = 0x20001269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go index 9bc4c8f..f22e794 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && netbsd -// +build 386,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go index bb05f65..066a7d8 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && netbsd -// +build amd64,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go index db40e3a..439548e 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && netbsd -// +build arm,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go index 1112115..16085d3 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && netbsd -// +build arm64,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go index 26eba23..afd13a3 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && openbsd -// +build 386,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go index 5a54798..5d97f1f 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && openbsd -// +build amd64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go index be58c4e..34871cd 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && openbsd -// +build arm,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go index 5233826..5911bce 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && openbsd -// +build arm64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go index 605cfdb..e4f24f3 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && openbsd -// +build mips64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_ppc64.go index d6724c0..ca50a79 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && openbsd -// +build ppc64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_riscv64.go index ddfd27a..d7d7f79 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && openbsd -// +build riscv64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go index 0400747..1416057 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && solaris -// +build amd64,solaris package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go index aec1efc..d9a13af 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build zos && s390x -// +build zos,s390x // Hand edited based on ztypes_linux_s390x.go // TODO: auto-generate. @@ -26,10 +25,13 @@ const ( SizeofIPv6Mreq = 20 SizeofICMPv6Filter = 32 SizeofIPv6MTUInfo = 32 + SizeofInet4Pktinfo = 8 + SizeofInet6Pktinfo = 20 SizeofLinger = 8 SizeofSockaddrInet4 = 16 SizeofSockaddrInet6 = 28 SizeofTCPInfo = 0x68 + SizeofUcred = 12 ) type ( @@ -70,12 +72,17 @@ type Utimbuf struct { } type Utsname struct { - Sysname [65]byte - Nodename [65]byte - Release [65]byte - Version [65]byte - Machine [65]byte - Domainname [65]byte + Sysname [16]byte + Nodename [32]byte + Release [8]byte + Version [8]byte + Machine [16]byte +} + +type Ucred struct { + Pid int32 + Uid uint32 + Gid uint32 } type RawSockaddrInet4 struct { @@ -326,7 +333,7 @@ type Statvfs_t struct { } type Statfs_t struct { - Type uint32 + Type uint64 Bsize uint64 Blocks uint64 Bfree uint64 @@ -337,6 +344,7 @@ type Statfs_t struct { Namelen uint64 Frsize uint64 Flags uint64 + _ [4]uint64 } type direntLE struct { @@ -413,3 +421,126 @@ type W_Mntent struct { Quiesceowner [8]byte _ [38]byte } + +type EpollEvent struct { + Events uint32 + _ int32 + Fd int32 + Pad int32 +} + +type InotifyEvent struct { + Wd int32 + Mask uint32 + Cookie uint32 + Len uint32 + Name string +} + +const ( + SizeofInotifyEvent = 0x10 +) + +type ConsMsg2 struct { + Cm2Format uint16 + Cm2R1 uint16 + Cm2Msglength uint32 + Cm2Msg *byte + Cm2R2 [4]byte + Cm2R3 [4]byte + Cm2Routcde *uint32 + Cm2Descr *uint32 + Cm2Msgflag uint32 + Cm2Token uint32 + Cm2Msgid *uint32 + Cm2R4 [4]byte + Cm2DomToken uint32 + Cm2DomMsgid *uint32 + Cm2ModCartptr *byte + Cm2ModConsidptr *byte + Cm2MsgCart [8]byte + Cm2MsgConsid [4]byte + Cm2R5 [12]byte +} + +const ( + CC_modify = 1 + CC_stop = 2 + CONSOLE_FORMAT_2 = 2 + CONSOLE_FORMAT_3 = 3 + CONSOLE_HRDCPY = 0x80000000 +) + +type OpenHow struct { + Flags uint64 + Mode uint64 + Resolve uint64 +} + +const SizeofOpenHow = 0x18 + +const ( + RESOLVE_CACHED = 0x20 + RESOLVE_BENEATH = 0x8 + RESOLVE_IN_ROOT = 0x10 + RESOLVE_NO_MAGICLINKS = 0x2 + RESOLVE_NO_SYMLINKS = 0x4 + RESOLVE_NO_XDEV = 0x1 +) + +type Siginfo struct { + Signo int32 + Errno int32 + Code int32 + Pid int32 + Uid uint32 + _ [44]byte +} + +type SysvIpcPerm struct { + Uid uint32 + Gid uint32 + Cuid uint32 + Cgid uint32 + Mode int32 +} + +type SysvShmDesc struct { + Perm SysvIpcPerm + _ [4]byte + Lpid int32 + Cpid int32 + Nattch uint32 + _ [4]byte + _ [4]byte + _ [4]byte + _ int32 + _ uint8 + _ uint8 + _ uint16 + _ *byte + Segsz uint64 + Atime Time_t + Dtime Time_t + Ctime Time_t +} + +type SysvShmDesc64 struct { + Perm SysvIpcPerm + _ [4]byte + Lpid int32 + Cpid int32 + Nattch uint32 + _ [4]byte + _ [4]byte + _ [4]byte + _ int32 + _ byte + _ uint8 + _ uint16 + _ *byte + Segsz uint64 + Atime int64 + Dtime int64 + Ctime int64 +} diff --git a/vendor/golang.org/x/sys/windows/aliases.go b/vendor/golang.org/x/sys/windows/aliases.go index a20ebea..16f9056 100644 --- a/vendor/golang.org/x/sys/windows/aliases.go +++ b/vendor/golang.org/x/sys/windows/aliases.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build windows && go1.9 -// +build windows,go1.9 +//go:build windows package windows diff --git a/vendor/golang.org/x/sys/windows/empty.s b/vendor/golang.org/x/sys/windows/empty.s deleted file mode 100644 index fdbbbcd..0000000 --- a/vendor/golang.org/x/sys/windows/empty.s +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.12 -// +build !go1.12 - -// This file is here to allow bodyless functions with go:linkname for Go 1.11 -// and earlier (see https://golang.org/issue/23311). diff --git a/vendor/golang.org/x/sys/windows/env_windows.go b/vendor/golang.org/x/sys/windows/env_windows.go index 92ac05f..d4577a4 100644 --- a/vendor/golang.org/x/sys/windows/env_windows.go +++ b/vendor/golang.org/x/sys/windows/env_windows.go @@ -37,14 +37,17 @@ func (token Token) Environ(inheritExisting bool) (env []string, err error) { return nil, err } defer DestroyEnvironmentBlock(block) - blockp := uintptr(unsafe.Pointer(block)) - for { - entry := UTF16PtrToString((*uint16)(unsafe.Pointer(blockp))) - if len(entry) == 0 { - break + size := unsafe.Sizeof(*block) + for *block != 0 { + // find NUL terminator + end := unsafe.Pointer(block) + for *(*uint16)(end) != 0 { + end = unsafe.Add(end, size) } - env = append(env, entry) - blockp += 2 * (uintptr(len(entry)) + 1) + + entry := unsafe.Slice(block, (uintptr(end)-uintptr(unsafe.Pointer(block)))/size) + env = append(env, UTF16ToString(entry)) + block = (*uint16)(unsafe.Add(end, size)) } return env, nil } diff --git a/vendor/golang.org/x/sys/windows/eventlog.go b/vendor/golang.org/x/sys/windows/eventlog.go index 2cd6064..6c36695 100644 --- a/vendor/golang.org/x/sys/windows/eventlog.go +++ b/vendor/golang.org/x/sys/windows/eventlog.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows -// +build windows package windows diff --git a/vendor/golang.org/x/sys/windows/exec_windows.go b/vendor/golang.org/x/sys/windows/exec_windows.go index 75980fd..9cabbb6 100644 --- a/vendor/golang.org/x/sys/windows/exec_windows.go +++ b/vendor/golang.org/x/sys/windows/exec_windows.go @@ -22,7 +22,7 @@ import ( // but only if there is space or tab inside s. func EscapeArg(s string) string { if len(s) == 0 { - return "\"\"" + return `""` } n := len(s) hasSpace := false @@ -35,7 +35,7 @@ func EscapeArg(s string) string { } } if hasSpace { - n += 2 + n += 2 // Reserve space for quotes. } if n == len(s) { return s @@ -82,36 +82,106 @@ func EscapeArg(s string) string { // in CreateProcess's CommandLine argument, CreateService/ChangeServiceConfig's BinaryPathName argument, // or any program that uses CommandLineToArgv. func ComposeCommandLine(args []string) string { - var commandLine string - for i := range args { - if i > 0 { - commandLine += " " + if len(args) == 0 { + return "" + } + + // Per https://learn.microsoft.com/en-us/windows/win32/api/shellapi/nf-shellapi-commandlinetoargvw: + // “This function accepts command lines that contain a program name; the + // program name can be enclosed in quotation marks or not.” + // + // Unfortunately, it provides no means of escaping interior quotation marks + // within that program name, and we have no way to report them here. + prog := args[0] + mustQuote := len(prog) == 0 + for i := 0; i < len(prog); i++ { + c := prog[i] + if c <= ' ' || (c == '"' && i == 0) { + // Force quotes for not only the ASCII space and tab as described in the + // MSDN article, but also ASCII control characters. + // The documentation for CommandLineToArgvW doesn't say what happens when + // the first argument is not a valid program name, but it empirically + // seems to drop unquoted control characters. + mustQuote = true + break + } + } + var commandLine []byte + if mustQuote { + commandLine = make([]byte, 0, len(prog)+2) + commandLine = append(commandLine, '"') + for i := 0; i < len(prog); i++ { + c := prog[i] + if c == '"' { + // This quote would interfere with our surrounding quotes. + // We have no way to report an error, so just strip out + // the offending character instead. + continue + } + commandLine = append(commandLine, c) + } + commandLine = append(commandLine, '"') + } else { + if len(args) == 1 { + // args[0] is a valid command line representing itself. + // No need to allocate a new slice or string for it. + return prog } - commandLine += EscapeArg(args[i]) + commandLine = []byte(prog) } - return commandLine + + for _, arg := range args[1:] { + commandLine = append(commandLine, ' ') + // TODO(bcmills): since we're already appending to a slice, it would be nice + // to avoid the intermediate allocations of EscapeArg. + // Perhaps we can factor out an appendEscapedArg function. + commandLine = append(commandLine, EscapeArg(arg)...) + } + return string(commandLine) } // DecomposeCommandLine breaks apart its argument command line into unescaped parts using CommandLineToArgv, // as gathered from GetCommandLine, QUERY_SERVICE_CONFIG's BinaryPathName argument, or elsewhere that // command lines are passed around. +// DecomposeCommandLine returns an error if commandLine contains NUL. func DecomposeCommandLine(commandLine string) ([]string, error) { if len(commandLine) == 0 { return []string{}, nil } + utf16CommandLine, err := UTF16FromString(commandLine) + if err != nil { + return nil, errorspkg.New("string with NUL passed to DecomposeCommandLine") + } var argc int32 - argv, err := CommandLineToArgv(StringToUTF16Ptr(commandLine), &argc) + argv, err := commandLineToArgv(&utf16CommandLine[0], &argc) if err != nil { return nil, err } defer LocalFree(Handle(unsafe.Pointer(argv))) + var args []string - for _, v := range (*argv)[:argc] { - args = append(args, UTF16ToString((*v)[:])) + for _, p := range unsafe.Slice(argv, argc) { + args = append(args, UTF16PtrToString(p)) } return args, nil } +// CommandLineToArgv parses a Unicode command line string and sets +// argc to the number of parsed arguments. +// +// The returned memory should be freed using a single call to LocalFree. +// +// Note that although the return type of CommandLineToArgv indicates 8192 +// entries of up to 8192 characters each, the actual count of parsed arguments +// may exceed 8192, and the documentation for CommandLineToArgvW does not mention +// any bound on the lengths of the individual argument strings. +// (See https://go.dev/issue/63236.) +func CommandLineToArgv(cmd *uint16, argc *int32) (argv *[8192]*[8192]uint16, err error) { + argp, err := commandLineToArgv(cmd, argc) + argv = (*[8192]*[8192]uint16)(unsafe.Pointer(argp)) + return argv, err +} + func CloseOnExec(fd Handle) { SetHandleInformation(Handle(fd), HANDLE_FLAG_INHERIT, 0) } diff --git a/vendor/golang.org/x/sys/windows/mksyscall.go b/vendor/golang.org/x/sys/windows/mksyscall.go index 8563f79..dbcdb09 100644 --- a/vendor/golang.org/x/sys/windows/mksyscall.go +++ b/vendor/golang.org/x/sys/windows/mksyscall.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build generate -// +build generate package windows diff --git a/vendor/golang.org/x/sys/windows/race.go b/vendor/golang.org/x/sys/windows/race.go index 9196b08..0f1bdc3 100644 --- a/vendor/golang.org/x/sys/windows/race.go +++ b/vendor/golang.org/x/sys/windows/race.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows && race -// +build windows,race package windows diff --git a/vendor/golang.org/x/sys/windows/race0.go b/vendor/golang.org/x/sys/windows/race0.go index 7bae481..0c78da7 100644 --- a/vendor/golang.org/x/sys/windows/race0.go +++ b/vendor/golang.org/x/sys/windows/race0.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows && !race -// +build windows,!race package windows diff --git a/vendor/golang.org/x/sys/windows/registry/key.go b/vendor/golang.org/x/sys/windows/registry/key.go index 6c8d97b..fd86324 100644 --- a/vendor/golang.org/x/sys/windows/registry/key.go +++ b/vendor/golang.org/x/sys/windows/registry/key.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows -// +build windows // Package registry provides access to the Windows registry. // diff --git a/vendor/golang.org/x/sys/windows/registry/mksyscall.go b/vendor/golang.org/x/sys/windows/registry/mksyscall.go index ee74927..bbf86cc 100644 --- a/vendor/golang.org/x/sys/windows/registry/mksyscall.go +++ b/vendor/golang.org/x/sys/windows/registry/mksyscall.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build generate -// +build generate package registry diff --git a/vendor/golang.org/x/sys/windows/registry/syscall.go b/vendor/golang.org/x/sys/windows/registry/syscall.go index 4173351..f533091 100644 --- a/vendor/golang.org/x/sys/windows/registry/syscall.go +++ b/vendor/golang.org/x/sys/windows/registry/syscall.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows -// +build windows package registry diff --git a/vendor/golang.org/x/sys/windows/registry/value.go b/vendor/golang.org/x/sys/windows/registry/value.go index 2789f6f..74db26b 100644 --- a/vendor/golang.org/x/sys/windows/registry/value.go +++ b/vendor/golang.org/x/sys/windows/registry/value.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows -// +build windows package registry diff --git a/vendor/golang.org/x/sys/windows/security_windows.go b/vendor/golang.org/x/sys/windows/security_windows.go index d414ef1..97651b5 100644 --- a/vendor/golang.org/x/sys/windows/security_windows.go +++ b/vendor/golang.org/x/sys/windows/security_windows.go @@ -7,8 +7,6 @@ package windows import ( "syscall" "unsafe" - - "golang.org/x/sys/internal/unsafeheader" ) const ( @@ -70,6 +68,7 @@ type UserInfo10 struct { //sys NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) = netapi32.NetUserGetInfo //sys NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (neterr error) = netapi32.NetGetJoinInformation //sys NetApiBufferFree(buf *byte) (neterr error) = netapi32.NetApiBufferFree +//sys NetUserEnum(serverName *uint16, level uint32, filter uint32, buf **byte, prefMaxLen uint32, entriesRead *uint32, totalEntries *uint32, resumeHandle *uint32) (neterr error) = netapi32.NetUserEnum const ( // do not reorder @@ -895,7 +894,7 @@ type ACL struct { aclRevision byte sbz1 byte aclSize uint16 - aceCount uint16 + AceCount uint16 sbz2 uint16 } @@ -1088,6 +1087,27 @@ type EXPLICIT_ACCESS struct { Trustee TRUSTEE } +// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-ace_header +type ACE_HEADER struct { + AceType uint8 + AceFlags uint8 + AceSize uint16 +} + +// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-access_allowed_ace +type ACCESS_ALLOWED_ACE struct { + Header ACE_HEADER + Mask ACCESS_MASK + SidStart uint32 +} + +const ( + // Constants for AceType + // https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-ace_header + ACCESS_ALLOWED_ACE_TYPE = 0 + ACCESS_DENIED_ACE_TYPE = 1 +) + // This type is the union inside of TRUSTEE and must be created using one of the TrusteeValueFrom* functions. type TrusteeValue uintptr @@ -1159,6 +1179,7 @@ type OBJECTS_AND_NAME struct { //sys makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURITY_DESCRIPTOR, selfRelativeSDSize *uint32) (err error) = advapi32.MakeSelfRelativeSD //sys setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCESS, oldACL *ACL, newACL **ACL) (ret error) = advapi32.SetEntriesInAclW +//sys GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (ret error) = advapi32.GetAce // Control returns the security descriptor control bits. func (sd *SECURITY_DESCRIPTOR) Control() (control SECURITY_DESCRIPTOR_CONTROL, revision uint32, err error) { @@ -1341,21 +1362,14 @@ func (selfRelativeSD *SECURITY_DESCRIPTOR) copySelfRelativeSecurityDescriptor() sdLen = min } - var src []byte - h := (*unsafeheader.Slice)(unsafe.Pointer(&src)) - h.Data = unsafe.Pointer(selfRelativeSD) - h.Len = sdLen - h.Cap = sdLen - + src := unsafe.Slice((*byte)(unsafe.Pointer(selfRelativeSD)), sdLen) + // SECURITY_DESCRIPTOR has pointers in it, which means checkptr expects for it to + // be aligned properly. When we're copying a Windows-allocated struct to a + // Go-allocated one, make sure that the Go allocation is aligned to the + // pointer size. const psize = int(unsafe.Sizeof(uintptr(0))) - - var dst []byte - h = (*unsafeheader.Slice)(unsafe.Pointer(&dst)) alloc := make([]uintptr, (sdLen+psize-1)/psize) - h.Data = (*unsafeheader.Slice)(unsafe.Pointer(&alloc)).Data - h.Len = sdLen - h.Cap = sdLen - + dst := unsafe.Slice((*byte)(unsafe.Pointer(&alloc[0])), sdLen) copy(dst, src) return (*SECURITY_DESCRIPTOR)(unsafe.Pointer(&dst[0])) } diff --git a/vendor/golang.org/x/sys/windows/service.go b/vendor/golang.org/x/sys/windows/service.go index f8deca8..a9dc630 100644 --- a/vendor/golang.org/x/sys/windows/service.go +++ b/vendor/golang.org/x/sys/windows/service.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows -// +build windows package windows @@ -141,6 +140,12 @@ const ( SERVICE_DYNAMIC_INFORMATION_LEVEL_START_REASON = 1 ) +type ENUM_SERVICE_STATUS struct { + ServiceName *uint16 + DisplayName *uint16 + ServiceStatus SERVICE_STATUS +} + type SERVICE_STATUS struct { ServiceType uint32 CurrentState uint32 @@ -212,6 +217,10 @@ type SERVICE_FAILURE_ACTIONS struct { Actions *SC_ACTION } +type SERVICE_FAILURE_ACTIONS_FLAG struct { + FailureActionsOnNonCrashFailures int32 +} + type SC_ACTION struct { Type uint32 Delay uint32 @@ -245,3 +254,4 @@ type QUERY_SERVICE_LOCK_STATUS struct { //sys UnsubscribeServiceChangeNotifications(subscription uintptr) = sechost.UnsubscribeServiceChangeNotifications? //sys RegisterServiceCtrlHandlerEx(serviceName *uint16, handlerProc uintptr, context uintptr) (handle Handle, err error) = advapi32.RegisterServiceCtrlHandlerExW //sys QueryServiceDynamicInformation(service Handle, infoLevel uint32, dynamicInfo unsafe.Pointer) (err error) = advapi32.QueryServiceDynamicInformation? +//sys EnumDependentServices(service Handle, activityState uint32, services *ENUM_SERVICE_STATUS, buffSize uint32, bytesNeeded *uint32, servicesReturned *uint32) (err error) = advapi32.EnumDependentServicesW diff --git a/vendor/golang.org/x/sys/windows/str.go b/vendor/golang.org/x/sys/windows/str.go index 4fc0143..6a4f9ce 100644 --- a/vendor/golang.org/x/sys/windows/str.go +++ b/vendor/golang.org/x/sys/windows/str.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows -// +build windows package windows diff --git a/vendor/golang.org/x/sys/windows/syscall.go b/vendor/golang.org/x/sys/windows/syscall.go index 8732cdb..e85ed6b 100644 --- a/vendor/golang.org/x/sys/windows/syscall.go +++ b/vendor/golang.org/x/sys/windows/syscall.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows -// +build windows // Package windows contains an interface to the low-level operating system // primitives. OS details vary depending on the underlying system, and diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index a49853e..6525c62 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -10,14 +10,11 @@ import ( errorspkg "errors" "fmt" "runtime" - "strings" "sync" "syscall" "time" "unicode/utf16" "unsafe" - - "golang.org/x/sys/internal/unsafeheader" ) type Handle uintptr @@ -87,22 +84,13 @@ func StringToUTF16(s string) []uint16 { // s, with a terminating NUL added. If s contains a NUL byte at any // location, it returns (nil, syscall.EINVAL). func UTF16FromString(s string) ([]uint16, error) { - if strings.IndexByte(s, 0) != -1 { - return nil, syscall.EINVAL - } - return utf16.Encode([]rune(s + "\x00")), nil + return syscall.UTF16FromString(s) } // UTF16ToString returns the UTF-8 encoding of the UTF-16 sequence s, // with a terminating NUL and any bytes after the NUL removed. func UTF16ToString(s []uint16) string { - for i, v := range s { - if v == 0 { - s = s[:i] - break - } - } - return string(utf16.Decode(s)) + return syscall.UTF16ToString(s) } // StringToUTF16Ptr is deprecated. Use UTF16PtrFromString instead. @@ -137,22 +125,21 @@ func UTF16PtrToString(p *uint16) string { for ptr := unsafe.Pointer(p); *(*uint16)(ptr) != 0; n++ { ptr = unsafe.Pointer(uintptr(ptr) + unsafe.Sizeof(*p)) } - - return string(utf16.Decode(unsafe.Slice(p, n))) + return UTF16ToString(unsafe.Slice(p, n)) } func Getpagesize() int { return 4096 } // NewCallback converts a Go function to a function pointer conforming to the stdcall calling convention. // This is useful when interoperating with Windows code requiring callbacks. -// The argument is expected to be a function with with one uintptr-sized result. The function must not have arguments with size larger than the size of uintptr. +// The argument is expected to be a function with one uintptr-sized result. The function must not have arguments with size larger than the size of uintptr. func NewCallback(fn interface{}) uintptr { return syscall.NewCallback(fn) } // NewCallbackCDecl converts a Go function to a function pointer conforming to the cdecl calling convention. // This is useful when interoperating with Windows code requiring callbacks. -// The argument is expected to be a function with with one uintptr-sized result. The function must not have arguments with size larger than the size of uintptr. +// The argument is expected to be a function with one uintptr-sized result. The function must not have arguments with size larger than the size of uintptr. func NewCallbackCDecl(fn interface{}) uintptr { return syscall.NewCallbackCDecl(fn) } @@ -167,6 +154,8 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys GetModuleFileName(module Handle, filename *uint16, size uint32) (n uint32, err error) = kernel32.GetModuleFileNameW //sys GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err error) = kernel32.GetModuleHandleExW //sys SetDefaultDllDirectories(directoryFlags uint32) (err error) +//sys AddDllDirectory(path *uint16) (cookie uintptr, err error) = kernel32.AddDllDirectory +//sys RemoveDllDirectory(cookie uintptr) (err error) = kernel32.RemoveDllDirectory //sys SetDllDirectory(path string) (err error) = kernel32.SetDllDirectoryW //sys GetVersion() (ver uint32, err error) //sys FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, buf []uint16, args *byte) (n uint32, err error) = FormatMessageW @@ -176,6 +165,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile Handle) (handle Handle, err error) [failretval==InvalidHandle] = CreateFileW //sys CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *SecurityAttributes) (handle Handle, err error) [failretval==InvalidHandle] = CreateNamedPipeW //sys ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) +//sys DisconnectNamedPipe(pipe Handle) (err error) //sys GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) //sys GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW //sys SetNamedPipeHandleState(pipe Handle, state *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32) (err error) = SetNamedPipeHandleState @@ -204,6 +194,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys GetComputerName(buf *uint16, n *uint32) (err error) = GetComputerNameW //sys GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) = GetComputerNameExW //sys SetEndOfFile(handle Handle) (err error) +//sys SetFileValidData(handle Handle, validDataLength int64) (err error) //sys GetSystemTimeAsFileTime(time *Filetime) //sys GetSystemTimePreciseAsFileTime(time *Filetime) //sys GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) [failretval==0xffffffff] @@ -226,7 +217,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **uint16) (ret error) = shell32.SHGetKnownFolderPath //sys TerminateProcess(handle Handle, exitcode uint32) (err error) //sys GetExitCodeProcess(handle Handle, exitcode *uint32) (err error) -//sys GetStartupInfo(startupInfo *StartupInfo) (err error) = GetStartupInfoW +//sys getStartupInfo(startupInfo *StartupInfo) = GetStartupInfoW //sys GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, kernelTime *Filetime, userTime *Filetime) (err error) //sys DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetProcessHandle Handle, lpTargetHandle *Handle, dwDesiredAccess uint32, bInheritHandle bool, dwOptions uint32) (err error) //sys WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32, err error) [failretval==0xffffffff] @@ -245,12 +236,13 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) (err error) = userenv.CreateEnvironmentBlock //sys DestroyEnvironmentBlock(block *uint16) (err error) = userenv.DestroyEnvironmentBlock //sys getTickCount64() (ms uint64) = kernel32.GetTickCount64 +//sys GetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) //sys SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) //sys GetFileAttributes(name *uint16) (attrs uint32, err error) [failretval==INVALID_FILE_ATTRIBUTES] = kernel32.GetFileAttributesW //sys SetFileAttributes(name *uint16, attrs uint32) (err error) = kernel32.SetFileAttributesW //sys GetFileAttributesEx(name *uint16, level uint32, info *byte) (err error) = kernel32.GetFileAttributesExW //sys GetCommandLine() (cmd *uint16) = kernel32.GetCommandLineW -//sys CommandLineToArgv(cmd *uint16, argc *int32) (argv *[8192]*[8192]uint16, err error) [failretval==nil] = shell32.CommandLineToArgvW +//sys commandLineToArgv(cmd *uint16, argc *int32) (argv **uint16, err error) [failretval==nil] = shell32.CommandLineToArgvW //sys LocalFree(hmem Handle) (handle Handle, err error) [failretval!=0] //sys LocalAlloc(flags uint32, length uint32) (ptr uintptr, err error) //sys SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) @@ -309,12 +301,15 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys RegNotifyChangeKeyValue(key Handle, watchSubtree bool, notifyFilter uint32, event Handle, asynchronous bool) (regerrno error) = advapi32.RegNotifyChangeKeyValue //sys GetCurrentProcessId() (pid uint32) = kernel32.GetCurrentProcessId //sys ProcessIdToSessionId(pid uint32, sessionid *uint32) (err error) = kernel32.ProcessIdToSessionId +//sys ClosePseudoConsole(console Handle) = kernel32.ClosePseudoConsole +//sys createPseudoConsole(size uint32, in Handle, out Handle, flags uint32, pconsole *Handle) (hr error) = kernel32.CreatePseudoConsole //sys GetConsoleMode(console Handle, mode *uint32) (err error) = kernel32.GetConsoleMode //sys SetConsoleMode(console Handle, mode uint32) (err error) = kernel32.SetConsoleMode //sys GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) = kernel32.GetConsoleScreenBufferInfo //sys setConsoleCursorPosition(console Handle, position uint32) (err error) = kernel32.SetConsoleCursorPosition //sys WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) = kernel32.WriteConsoleW //sys ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) = kernel32.ReadConsoleW +//sys resizePseudoConsole(pconsole Handle, size uint32) (hr error) = kernel32.ResizePseudoConsole //sys CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) [failretval==InvalidHandle] = kernel32.CreateToolhelp32Snapshot //sys Module32First(snapshot Handle, moduleEntry *ModuleEntry32) (err error) = kernel32.Module32FirstW //sys Module32Next(snapshot Handle, moduleEntry *ModuleEntry32) (err error) = kernel32.Module32NextW @@ -354,8 +349,19 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys SetProcessPriorityBoost(process Handle, disable bool) (err error) = kernel32.SetProcessPriorityBoost //sys GetProcessWorkingSetSizeEx(hProcess Handle, lpMinimumWorkingSetSize *uintptr, lpMaximumWorkingSetSize *uintptr, flags *uint32) //sys SetProcessWorkingSetSizeEx(hProcess Handle, dwMinimumWorkingSetSize uintptr, dwMaximumWorkingSetSize uintptr, flags uint32) (err error) +//sys ClearCommBreak(handle Handle) (err error) +//sys ClearCommError(handle Handle, lpErrors *uint32, lpStat *ComStat) (err error) +//sys EscapeCommFunction(handle Handle, dwFunc uint32) (err error) +//sys GetCommState(handle Handle, lpDCB *DCB) (err error) +//sys GetCommModemStatus(handle Handle, lpModemStat *uint32) (err error) //sys GetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) +//sys PurgeComm(handle Handle, dwFlags uint32) (err error) +//sys SetCommBreak(handle Handle) (err error) +//sys SetCommMask(handle Handle, dwEvtMask uint32) (err error) +//sys SetCommState(handle Handle, lpDCB *DCB) (err error) //sys SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) +//sys SetupComm(handle Handle, dwInQueue uint32, dwOutQueue uint32) (err error) +//sys WaitCommEvent(handle Handle, lpEvtMask *uint32, lpOverlapped *Overlapped) (err error) //sys GetActiveProcessorCount(groupNumber uint16) (ret uint32) //sys GetMaximumProcessorCount(groupNumber uint16) (ret uint32) //sys EnumWindows(enumFunc uintptr, param unsafe.Pointer) (err error) = user32.EnumWindows @@ -415,7 +421,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys VerQueryValue(block unsafe.Pointer, subBlock string, pointerToBufferPointer unsafe.Pointer, bufSize *uint32) (err error) = version.VerQueryValueW // Process Status API (PSAPI) -//sys EnumProcesses(processIds []uint32, bytesReturned *uint32) (err error) = psapi.EnumProcesses +//sys enumProcesses(processIds *uint32, nSize uint32, bytesReturned *uint32) (err error) = psapi.EnumProcesses //sys EnumProcessModules(process Handle, module *Handle, cb uint32, cbNeeded *uint32) (err error) = psapi.EnumProcessModules //sys EnumProcessModulesEx(process Handle, module *Handle, cb uint32, cbNeeded *uint32, filterFlag uint32) (err error) = psapi.EnumProcessModulesEx //sys GetModuleInformation(process Handle, module Handle, modinfo *ModuleInfo, cb uint32) (err error) = psapi.GetModuleInformation @@ -447,6 +453,10 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys DwmGetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) = dwmapi.DwmGetWindowAttribute //sys DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) = dwmapi.DwmSetWindowAttribute +// Windows Multimedia API +//sys TimeBeginPeriod (period uint32) (err error) [failretval != 0] = winmm.timeBeginPeriod +//sys TimeEndPeriod (period uint32) (err error) [failretval != 0] = winmm.timeEndPeriod + // syscall interface implementation for other packages // GetCurrentProcess returns the handle for the current process. @@ -834,6 +844,9 @@ const socket_error = uintptr(^uint32(0)) //sys WSAStartup(verreq uint32, data *WSAData) (sockerr error) = ws2_32.WSAStartup //sys WSACleanup() (err error) [failretval==socket_error] = ws2_32.WSACleanup //sys WSAIoctl(s Handle, iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbob uint32, cbbr *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) [failretval==socket_error] = ws2_32.WSAIoctl +//sys WSALookupServiceBegin(querySet *WSAQUERYSET, flags uint32, handle *Handle) (err error) [failretval==socket_error] = ws2_32.WSALookupServiceBeginW +//sys WSALookupServiceNext(handle Handle, flags uint32, size *int32, querySet *WSAQUERYSET) (err error) [failretval==socket_error] = ws2_32.WSALookupServiceNextW +//sys WSALookupServiceEnd(handle Handle) (err error) [failretval==socket_error] = ws2_32.WSALookupServiceEnd //sys socket(af int32, typ int32, protocol int32) (handle Handle, err error) [failretval==InvalidHandle] = ws2_32.socket //sys sendto(s Handle, buf []byte, flags int32, to unsafe.Pointer, tolen int32) (err error) [failretval==socket_error] = ws2_32.sendto //sys recvfrom(s Handle, buf []byte, flags int32, from *RawSockaddrAny, fromlen *int32) (n int32, err error) [failretval==-1] = ws2_32.recvfrom @@ -971,7 +984,8 @@ func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, int32, error) { if n > 0 { sl += int32(n) + 1 } - if sa.raw.Path[0] == '@' { + if sa.raw.Path[0] == '@' || (sa.raw.Path[0] == 0 && sl > 3) { + // Check sl > 3 so we don't change unnamed socket behavior. sa.raw.Path[0] = 0 // Don't count trailing NUL for abstract address. sl-- @@ -1029,8 +1043,7 @@ func (rsa *RawSockaddrAny) Sockaddr() (Sockaddr, error) { for n < len(pp.Path) && pp.Path[n] != 0 { n++ } - bytes := (*[len(pp.Path)]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] - sa.Name = string(bytes) + sa.Name = string(unsafe.Slice((*byte)(unsafe.Pointer(&pp.Path[0])), n)) return sa, nil case AF_INET: @@ -1362,6 +1375,17 @@ func SetsockoptIPv6Mreq(fd Handle, level, opt int, mreq *IPv6Mreq) (err error) { return syscall.EWINDOWS } +func EnumProcesses(processIds []uint32, bytesReturned *uint32) error { + // EnumProcesses syscall expects the size parameter to be in bytes, but the code generated with mksyscall uses + // the length of the processIds slice instead. Hence, this wrapper function is added to fix the discrepancy. + var p *uint32 + if len(processIds) > 0 { + p = &processIds[0] + } + size := uint32(len(processIds) * 4) + return enumProcesses(p, size, bytesReturned) +} + func Getpid() (pid int) { return int(GetCurrentProcessId()) } func FindFirstFile(name *uint16, data *Win32finddata) (handle Handle, err error) { @@ -1621,6 +1645,11 @@ func SetConsoleCursorPosition(console Handle, position Coord) error { return setConsoleCursorPosition(console, *((*uint32)(unsafe.Pointer(&position)))) } +func GetStartupInfo(startupInfo *StartupInfo) error { + getStartupInfo(startupInfo) + return nil +} + func (s NTStatus) Errno() syscall.Errno { return rtlNtStatusToDosErrorNoTeb(s) } @@ -1655,12 +1684,8 @@ func NewNTUnicodeString(s string) (*NTUnicodeString, error) { // Slice returns a uint16 slice that aliases the data in the NTUnicodeString. func (s *NTUnicodeString) Slice() []uint16 { - var slice []uint16 - hdr := (*unsafeheader.Slice)(unsafe.Pointer(&slice)) - hdr.Data = unsafe.Pointer(s.Buffer) - hdr.Len = int(s.Length) - hdr.Cap = int(s.MaximumLength) - return slice + slice := unsafe.Slice(s.Buffer, s.MaximumLength) + return slice[:s.Length] } func (s *NTUnicodeString) String() string { @@ -1683,12 +1708,8 @@ func NewNTString(s string) (*NTString, error) { // Slice returns a byte slice that aliases the data in the NTString. func (s *NTString) Slice() []byte { - var slice []byte - hdr := (*unsafeheader.Slice)(unsafe.Pointer(&slice)) - hdr.Data = unsafe.Pointer(s.Buffer) - hdr.Len = int(s.Length) - hdr.Cap = int(s.MaximumLength) - return slice + slice := unsafe.Slice(s.Buffer, s.MaximumLength) + return slice[:s.Length] } func (s *NTString) String() string { @@ -1740,10 +1761,7 @@ func LoadResourceData(module, resInfo Handle) (data []byte, err error) { if err != nil { return } - h := (*unsafeheader.Slice)(unsafe.Pointer(&data)) - h.Data = unsafe.Pointer(ptr) - h.Len = int(size) - h.Cap = int(size) + data = unsafe.Slice((*byte)(unsafe.Pointer(ptr)), size) return } @@ -1814,3 +1832,87 @@ type PSAPI_WORKING_SET_EX_INFORMATION struct { // A PSAPI_WORKING_SET_EX_BLOCK union that indicates the attributes of the page at VirtualAddress. VirtualAttributes PSAPI_WORKING_SET_EX_BLOCK } + +// CreatePseudoConsole creates a windows pseudo console. +func CreatePseudoConsole(size Coord, in Handle, out Handle, flags uint32, pconsole *Handle) error { + // We need this wrapper to manually cast Coord to uint32. The autogenerated wrappers only + // accept arguments that can be casted to uintptr, and Coord can't. + return createPseudoConsole(*((*uint32)(unsafe.Pointer(&size))), in, out, flags, pconsole) +} + +// ResizePseudoConsole resizes the internal buffers of the pseudo console to the width and height specified in `size`. +func ResizePseudoConsole(pconsole Handle, size Coord) error { + // We need this wrapper to manually cast Coord to uint32. The autogenerated wrappers only + // accept arguments that can be casted to uintptr, and Coord can't. + return resizePseudoConsole(pconsole, *((*uint32)(unsafe.Pointer(&size)))) +} + +// DCB constants. See https://learn.microsoft.com/en-us/windows/win32/api/winbase/ns-winbase-dcb. +const ( + CBR_110 = 110 + CBR_300 = 300 + CBR_600 = 600 + CBR_1200 = 1200 + CBR_2400 = 2400 + CBR_4800 = 4800 + CBR_9600 = 9600 + CBR_14400 = 14400 + CBR_19200 = 19200 + CBR_38400 = 38400 + CBR_57600 = 57600 + CBR_115200 = 115200 + CBR_128000 = 128000 + CBR_256000 = 256000 + + DTR_CONTROL_DISABLE = 0x00000000 + DTR_CONTROL_ENABLE = 0x00000010 + DTR_CONTROL_HANDSHAKE = 0x00000020 + + RTS_CONTROL_DISABLE = 0x00000000 + RTS_CONTROL_ENABLE = 0x00001000 + RTS_CONTROL_HANDSHAKE = 0x00002000 + RTS_CONTROL_TOGGLE = 0x00003000 + + NOPARITY = 0 + ODDPARITY = 1 + EVENPARITY = 2 + MARKPARITY = 3 + SPACEPARITY = 4 + + ONESTOPBIT = 0 + ONE5STOPBITS = 1 + TWOSTOPBITS = 2 +) + +// EscapeCommFunction constants. See https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-escapecommfunction. +const ( + SETXOFF = 1 + SETXON = 2 + SETRTS = 3 + CLRRTS = 4 + SETDTR = 5 + CLRDTR = 6 + SETBREAK = 8 + CLRBREAK = 9 +) + +// PurgeComm constants. See https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-purgecomm. +const ( + PURGE_TXABORT = 0x0001 + PURGE_RXABORT = 0x0002 + PURGE_TXCLEAR = 0x0004 + PURGE_RXCLEAR = 0x0008 +) + +// SetCommMask constants. See https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-setcommmask. +const ( + EV_RXCHAR = 0x0001 + EV_RXFLAG = 0x0002 + EV_TXEMPTY = 0x0004 + EV_CTS = 0x0008 + EV_DSR = 0x0010 + EV_RLSD = 0x0020 + EV_BREAK = 0x0040 + EV_ERR = 0x0080 + EV_RING = 0x0100 +) diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 0c4add9..d8cb71d 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -247,6 +247,7 @@ const ( PROC_THREAD_ATTRIBUTE_MITIGATION_POLICY = 0x00020007 PROC_THREAD_ATTRIBUTE_UMS_THREAD = 0x00030006 PROC_THREAD_ATTRIBUTE_PROTECTION_LEVEL = 0x0002000b + PROC_THREAD_ATTRIBUTE_PSEUDOCONSOLE = 0x00020016 ) const ( @@ -1093,7 +1094,33 @@ const ( SOMAXCONN = 0x7fffffff - TCP_NODELAY = 1 + TCP_NODELAY = 1 + TCP_EXPEDITED_1122 = 2 + TCP_KEEPALIVE = 3 + TCP_MAXSEG = 4 + TCP_MAXRT = 5 + TCP_STDURG = 6 + TCP_NOURG = 7 + TCP_ATMARK = 8 + TCP_NOSYNRETRIES = 9 + TCP_TIMESTAMPS = 10 + TCP_OFFLOAD_PREFERENCE = 11 + TCP_CONGESTION_ALGORITHM = 12 + TCP_DELAY_FIN_ACK = 13 + TCP_MAXRTMS = 14 + TCP_FASTOPEN = 15 + TCP_KEEPCNT = 16 + TCP_KEEPIDLE = TCP_KEEPALIVE + TCP_KEEPINTVL = 17 + TCP_FAIL_CONNECT_ON_ICMP_ERROR = 18 + TCP_ICMP_ERROR_INFO = 19 + + UDP_NOCHECKSUM = 1 + UDP_SEND_MSG_SIZE = 2 + UDP_RECV_MAX_COALESCED_SIZE = 3 + UDP_CHECKSUM_COVERAGE = 20 + + UDP_COALESCED_INFO = 3 SHUT_RD = 0 SHUT_WR = 1 @@ -1243,6 +1270,51 @@ const ( DnsSectionAdditional = 0x0003 ) +const ( + // flags of WSALookupService + LUP_DEEP = 0x0001 + LUP_CONTAINERS = 0x0002 + LUP_NOCONTAINERS = 0x0004 + LUP_NEAREST = 0x0008 + LUP_RETURN_NAME = 0x0010 + LUP_RETURN_TYPE = 0x0020 + LUP_RETURN_VERSION = 0x0040 + LUP_RETURN_COMMENT = 0x0080 + LUP_RETURN_ADDR = 0x0100 + LUP_RETURN_BLOB = 0x0200 + LUP_RETURN_ALIASES = 0x0400 + LUP_RETURN_QUERY_STRING = 0x0800 + LUP_RETURN_ALL = 0x0FF0 + LUP_RES_SERVICE = 0x8000 + + LUP_FLUSHCACHE = 0x1000 + LUP_FLUSHPREVIOUS = 0x2000 + + LUP_NON_AUTHORITATIVE = 0x4000 + LUP_SECURE = 0x8000 + LUP_RETURN_PREFERRED_NAMES = 0x10000 + LUP_DNS_ONLY = 0x20000 + + LUP_ADDRCONFIG = 0x100000 + LUP_DUAL_ADDR = 0x200000 + LUP_FILESERVER = 0x400000 + LUP_DISABLE_IDN_ENCODING = 0x00800000 + LUP_API_ANSI = 0x01000000 + + LUP_RESOLUTION_HANDLE = 0x80000000 +) + +const ( + // values of WSAQUERYSET's namespace + NS_ALL = 0 + NS_DNS = 12 + NS_NLA = 15 + NS_BTH = 16 + NS_EMAIL = 37 + NS_PNRPNAME = 38 + NS_PNRPCLOUD = 39 +) + type DNSSRVData struct { Target *uint16 Priority uint16 @@ -2094,6 +2166,12 @@ const ( ENABLE_LVB_GRID_WORLDWIDE = 0x10 ) +// Pseudo console related constants used for the flags parameter to +// CreatePseudoConsole. See: https://learn.microsoft.com/en-us/windows/console/createpseudoconsole +const ( + PSEUDOCONSOLE_INHERIT_CURSOR = 0x1 +) + type Coord struct { X int16 Y int16 @@ -2175,19 +2253,23 @@ type JOBOBJECT_BASIC_UI_RESTRICTIONS struct { } const ( - // JobObjectInformationClass + // JobObjectInformationClass for QueryInformationJobObject and SetInformationJobObject JobObjectAssociateCompletionPortInformation = 7 + JobObjectBasicAccountingInformation = 1 + JobObjectBasicAndIoAccountingInformation = 8 JobObjectBasicLimitInformation = 2 + JobObjectBasicProcessIdList = 3 JobObjectBasicUIRestrictions = 4 JobObjectCpuRateControlInformation = 15 JobObjectEndOfJobTimeInformation = 6 JobObjectExtendedLimitInformation = 9 JobObjectGroupInformation = 11 JobObjectGroupInformationEx = 14 - JobObjectLimitViolationInformation2 = 35 + JobObjectLimitViolationInformation = 13 + JobObjectLimitViolationInformation2 = 34 JobObjectNetRateControlInformation = 32 JobObjectNotificationLimitInformation = 12 - JobObjectNotificationLimitInformation2 = 34 + JobObjectNotificationLimitInformation2 = 33 JobObjectSecurityLimitInformation = 5 ) @@ -3258,3 +3340,67 @@ const ( DWMWA_TEXT_COLOR = 36 DWMWA_VISIBLE_FRAME_BORDER_THICKNESS = 37 ) + +type WSAQUERYSET struct { + Size uint32 + ServiceInstanceName *uint16 + ServiceClassId *GUID + Version *WSAVersion + Comment *uint16 + NameSpace uint32 + NSProviderId *GUID + Context *uint16 + NumberOfProtocols uint32 + AfpProtocols *AFProtocols + QueryString *uint16 + NumberOfCsAddrs uint32 + SaBuffer *CSAddrInfo + OutputFlags uint32 + Blob *BLOB +} + +type WSAVersion struct { + Version uint32 + EnumerationOfComparison int32 +} + +type AFProtocols struct { + AddressFamily int32 + Protocol int32 +} + +type CSAddrInfo struct { + LocalAddr SocketAddress + RemoteAddr SocketAddress + SocketType int32 + Protocol int32 +} + +type BLOB struct { + Size uint32 + BlobData *byte +} + +type ComStat struct { + Flags uint32 + CBInQue uint32 + CBOutQue uint32 +} + +type DCB struct { + DCBlength uint32 + BaudRate uint32 + Flags uint32 + wReserved uint16 + XonLim uint16 + XoffLim uint16 + ByteSize uint8 + Parity uint8 + StopBits uint8 + XonChar byte + XoffChar byte + ErrorChar byte + EofChar byte + EvtChar byte + wReserved1 uint16 +} diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index ac60052..eba7610 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -55,6 +55,7 @@ var ( moduser32 = NewLazySystemDLL("user32.dll") moduserenv = NewLazySystemDLL("userenv.dll") modversion = NewLazySystemDLL("version.dll") + modwinmm = NewLazySystemDLL("winmm.dll") modwintrust = NewLazySystemDLL("wintrust.dll") modws2_32 = NewLazySystemDLL("ws2_32.dll") modwtsapi32 = NewLazySystemDLL("wtsapi32.dll") @@ -86,9 +87,11 @@ var ( procDeleteService = modadvapi32.NewProc("DeleteService") procDeregisterEventSource = modadvapi32.NewProc("DeregisterEventSource") procDuplicateTokenEx = modadvapi32.NewProc("DuplicateTokenEx") + procEnumDependentServicesW = modadvapi32.NewProc("EnumDependentServicesW") procEnumServicesStatusExW = modadvapi32.NewProc("EnumServicesStatusExW") procEqualSid = modadvapi32.NewProc("EqualSid") procFreeSid = modadvapi32.NewProc("FreeSid") + procGetAce = modadvapi32.NewProc("GetAce") procGetLengthSid = modadvapi32.NewProc("GetLengthSid") procGetNamedSecurityInfoW = modadvapi32.NewProc("GetNamedSecurityInfoW") procGetSecurityDescriptorControl = modadvapi32.NewProc("GetSecurityDescriptorControl") @@ -182,10 +185,14 @@ var ( procGetAdaptersInfo = modiphlpapi.NewProc("GetAdaptersInfo") procGetBestInterfaceEx = modiphlpapi.NewProc("GetBestInterfaceEx") procGetIfEntry = modiphlpapi.NewProc("GetIfEntry") + procAddDllDirectory = modkernel32.NewProc("AddDllDirectory") procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject") procCancelIo = modkernel32.NewProc("CancelIo") procCancelIoEx = modkernel32.NewProc("CancelIoEx") + procClearCommBreak = modkernel32.NewProc("ClearCommBreak") + procClearCommError = modkernel32.NewProc("ClearCommError") procCloseHandle = modkernel32.NewProc("CloseHandle") + procClosePseudoConsole = modkernel32.NewProc("ClosePseudoConsole") procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") procCreateDirectoryW = modkernel32.NewProc("CreateDirectoryW") procCreateEventExW = modkernel32.NewProc("CreateEventExW") @@ -200,6 +207,7 @@ var ( procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW") procCreatePipe = modkernel32.NewProc("CreatePipe") procCreateProcessW = modkernel32.NewProc("CreateProcessW") + procCreatePseudoConsole = modkernel32.NewProc("CreatePseudoConsole") procCreateSymbolicLinkW = modkernel32.NewProc("CreateSymbolicLinkW") procCreateToolhelp32Snapshot = modkernel32.NewProc("CreateToolhelp32Snapshot") procDefineDosDeviceW = modkernel32.NewProc("DefineDosDeviceW") @@ -207,7 +215,9 @@ var ( procDeleteProcThreadAttributeList = modkernel32.NewProc("DeleteProcThreadAttributeList") procDeleteVolumeMountPointW = modkernel32.NewProc("DeleteVolumeMountPointW") procDeviceIoControl = modkernel32.NewProc("DeviceIoControl") + procDisconnectNamedPipe = modkernel32.NewProc("DisconnectNamedPipe") procDuplicateHandle = modkernel32.NewProc("DuplicateHandle") + procEscapeCommFunction = modkernel32.NewProc("EscapeCommFunction") procExitProcess = modkernel32.NewProc("ExitProcess") procExpandEnvironmentStringsW = modkernel32.NewProc("ExpandEnvironmentStringsW") procFindClose = modkernel32.NewProc("FindClose") @@ -231,6 +241,8 @@ var ( procGenerateConsoleCtrlEvent = modkernel32.NewProc("GenerateConsoleCtrlEvent") procGetACP = modkernel32.NewProc("GetACP") procGetActiveProcessorCount = modkernel32.NewProc("GetActiveProcessorCount") + procGetCommModemStatus = modkernel32.NewProc("GetCommModemStatus") + procGetCommState = modkernel32.NewProc("GetCommState") procGetCommTimeouts = modkernel32.NewProc("GetCommTimeouts") procGetCommandLineW = modkernel32.NewProc("GetCommandLineW") procGetComputerNameExW = modkernel32.NewProc("GetComputerNameExW") @@ -249,6 +261,7 @@ var ( procGetFileAttributesW = modkernel32.NewProc("GetFileAttributesW") procGetFileInformationByHandle = modkernel32.NewProc("GetFileInformationByHandle") procGetFileInformationByHandleEx = modkernel32.NewProc("GetFileInformationByHandleEx") + procGetFileTime = modkernel32.NewProc("GetFileTime") procGetFileType = modkernel32.NewProc("GetFileType") procGetFinalPathNameByHandleW = modkernel32.NewProc("GetFinalPathNameByHandleW") procGetFullPathNameW = modkernel32.NewProc("GetFullPathNameW") @@ -316,6 +329,7 @@ var ( procProcess32NextW = modkernel32.NewProc("Process32NextW") procProcessIdToSessionId = modkernel32.NewProc("ProcessIdToSessionId") procPulseEvent = modkernel32.NewProc("PulseEvent") + procPurgeComm = modkernel32.NewProc("PurgeComm") procQueryDosDeviceW = modkernel32.NewProc("QueryDosDeviceW") procQueryFullProcessImageNameW = modkernel32.NewProc("QueryFullProcessImageNameW") procQueryInformationJobObject = modkernel32.NewProc("QueryInformationJobObject") @@ -325,8 +339,13 @@ var ( procReadProcessMemory = modkernel32.NewProc("ReadProcessMemory") procReleaseMutex = modkernel32.NewProc("ReleaseMutex") procRemoveDirectoryW = modkernel32.NewProc("RemoveDirectoryW") + procRemoveDllDirectory = modkernel32.NewProc("RemoveDllDirectory") procResetEvent = modkernel32.NewProc("ResetEvent") + procResizePseudoConsole = modkernel32.NewProc("ResizePseudoConsole") procResumeThread = modkernel32.NewProc("ResumeThread") + procSetCommBreak = modkernel32.NewProc("SetCommBreak") + procSetCommMask = modkernel32.NewProc("SetCommMask") + procSetCommState = modkernel32.NewProc("SetCommState") procSetCommTimeouts = modkernel32.NewProc("SetCommTimeouts") procSetConsoleCursorPosition = modkernel32.NewProc("SetConsoleCursorPosition") procSetConsoleMode = modkernel32.NewProc("SetConsoleMode") @@ -342,6 +361,7 @@ var ( procSetFileInformationByHandle = modkernel32.NewProc("SetFileInformationByHandle") procSetFilePointer = modkernel32.NewProc("SetFilePointer") procSetFileTime = modkernel32.NewProc("SetFileTime") + procSetFileValidData = modkernel32.NewProc("SetFileValidData") procSetHandleInformation = modkernel32.NewProc("SetHandleInformation") procSetInformationJobObject = modkernel32.NewProc("SetInformationJobObject") procSetNamedPipeHandleState = modkernel32.NewProc("SetNamedPipeHandleState") @@ -352,6 +372,7 @@ var ( procSetStdHandle = modkernel32.NewProc("SetStdHandle") procSetVolumeLabelW = modkernel32.NewProc("SetVolumeLabelW") procSetVolumeMountPointW = modkernel32.NewProc("SetVolumeMountPointW") + procSetupComm = modkernel32.NewProc("SetupComm") procSizeofResource = modkernel32.NewProc("SizeofResource") procSleepEx = modkernel32.NewProc("SleepEx") procTerminateJobObject = modkernel32.NewProc("TerminateJobObject") @@ -370,6 +391,7 @@ var ( procVirtualQueryEx = modkernel32.NewProc("VirtualQueryEx") procVirtualUnlock = modkernel32.NewProc("VirtualUnlock") procWTSGetActiveConsoleSessionId = modkernel32.NewProc("WTSGetActiveConsoleSessionId") + procWaitCommEvent = modkernel32.NewProc("WaitCommEvent") procWaitForMultipleObjects = modkernel32.NewProc("WaitForMultipleObjects") procWaitForSingleObject = modkernel32.NewProc("WaitForSingleObject") procWriteConsoleW = modkernel32.NewProc("WriteConsoleW") @@ -380,6 +402,7 @@ var ( procTransmitFile = modmswsock.NewProc("TransmitFile") procNetApiBufferFree = modnetapi32.NewProc("NetApiBufferFree") procNetGetJoinInformation = modnetapi32.NewProc("NetGetJoinInformation") + procNetUserEnum = modnetapi32.NewProc("NetUserEnum") procNetUserGetInfo = modnetapi32.NewProc("NetUserGetInfo") procNtCreateFile = modntdll.NewProc("NtCreateFile") procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile") @@ -467,6 +490,8 @@ var ( procGetFileVersionInfoSizeW = modversion.NewProc("GetFileVersionInfoSizeW") procGetFileVersionInfoW = modversion.NewProc("GetFileVersionInfoW") procVerQueryValueW = modversion.NewProc("VerQueryValueW") + proctimeBeginPeriod = modwinmm.NewProc("timeBeginPeriod") + proctimeEndPeriod = modwinmm.NewProc("timeEndPeriod") procWinVerifyTrustEx = modwintrust.NewProc("WinVerifyTrustEx") procFreeAddrInfoW = modws2_32.NewProc("FreeAddrInfoW") procGetAddrInfoW = modws2_32.NewProc("GetAddrInfoW") @@ -474,6 +499,9 @@ var ( procWSAEnumProtocolsW = modws2_32.NewProc("WSAEnumProtocolsW") procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult") procWSAIoctl = modws2_32.NewProc("WSAIoctl") + procWSALookupServiceBeginW = modws2_32.NewProc("WSALookupServiceBeginW") + procWSALookupServiceEnd = modws2_32.NewProc("WSALookupServiceEnd") + procWSALookupServiceNextW = modws2_32.NewProc("WSALookupServiceNextW") procWSARecv = modws2_32.NewProc("WSARecv") procWSARecvFrom = modws2_32.NewProc("WSARecvFrom") procWSASend = modws2_32.NewProc("WSASend") @@ -731,6 +759,14 @@ func DuplicateTokenEx(existingToken Token, desiredAccess uint32, tokenAttributes return } +func EnumDependentServices(service Handle, activityState uint32, services *ENUM_SERVICE_STATUS, buffSize uint32, bytesNeeded *uint32, servicesReturned *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procEnumDependentServicesW.Addr(), 6, uintptr(service), uintptr(activityState), uintptr(unsafe.Pointer(services)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func EnumServicesStatusEx(mgr Handle, infoLevel uint32, serviceType uint32, serviceState uint32, services *byte, bufSize uint32, bytesNeeded *uint32, servicesReturned *uint32, resumeHandle *uint32, groupName *uint16) (err error) { r1, _, e1 := syscall.Syscall12(procEnumServicesStatusExW.Addr(), 10, uintptr(mgr), uintptr(infoLevel), uintptr(serviceType), uintptr(serviceState), uintptr(unsafe.Pointer(services)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned)), uintptr(unsafe.Pointer(resumeHandle)), uintptr(unsafe.Pointer(groupName)), 0, 0) if r1 == 0 { @@ -1189,6 +1225,14 @@ func setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCE return } +func GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (ret error) { + r0, _, _ := syscall.Syscall(procGetAce.Addr(), 3, uintptr(unsafe.Pointer(acl)), uintptr(aceIndex), uintptr(unsafe.Pointer(pAce))) + if r0 == 0 { + ret = GetLastError() + } + return +} + func SetKernelObjectSecurity(handle Handle, securityInformation SECURITY_INFORMATION, securityDescriptor *SECURITY_DESCRIPTOR) (err error) { r1, _, e1 := syscall.Syscall(procSetKernelObjectSecurity.Addr(), 3, uintptr(handle), uintptr(securityInformation), uintptr(unsafe.Pointer(securityDescriptor))) if r1 == 0 { @@ -1586,6 +1630,15 @@ func GetIfEntry(pIfRow *MibIfRow) (errcode error) { return } +func AddDllDirectory(path *uint16) (cookie uintptr, err error) { + r0, _, e1 := syscall.Syscall(procAddDllDirectory.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + cookie = uintptr(r0) + if cookie == 0 { + err = errnoErr(e1) + } + return +} + func AssignProcessToJobObject(job Handle, process Handle) (err error) { r1, _, e1 := syscall.Syscall(procAssignProcessToJobObject.Addr(), 2, uintptr(job), uintptr(process), 0) if r1 == 0 { @@ -1610,6 +1663,22 @@ func CancelIoEx(s Handle, o *Overlapped) (err error) { return } +func ClearCommBreak(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procClearCommBreak.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func ClearCommError(handle Handle, lpErrors *uint32, lpStat *ComStat) (err error) { + r1, _, e1 := syscall.Syscall(procClearCommError.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(lpErrors)), uintptr(unsafe.Pointer(lpStat))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func CloseHandle(handle Handle) (err error) { r1, _, e1 := syscall.Syscall(procCloseHandle.Addr(), 1, uintptr(handle), 0, 0) if r1 == 0 { @@ -1618,6 +1687,11 @@ func CloseHandle(handle Handle) (err error) { return } +func ClosePseudoConsole(console Handle) { + syscall.Syscall(procClosePseudoConsole.Addr(), 1, uintptr(console), 0, 0) + return +} + func ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) { r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(overlapped)), 0) if r1 == 0 { @@ -1747,6 +1821,14 @@ func CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityA return } +func createPseudoConsole(size uint32, in Handle, out Handle, flags uint32, pconsole *Handle) (hr error) { + r0, _, _ := syscall.Syscall6(procCreatePseudoConsole.Addr(), 5, uintptr(size), uintptr(in), uintptr(out), uintptr(flags), uintptr(unsafe.Pointer(pconsole)), 0) + if r0 != 0 { + hr = syscall.Errno(r0) + } + return +} + func CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags uint32) (err error) { r1, _, e1 := syscall.Syscall(procCreateSymbolicLinkW.Addr(), 3, uintptr(unsafe.Pointer(symlinkfilename)), uintptr(unsafe.Pointer(targetfilename)), uintptr(flags)) if r1&0xff == 0 { @@ -1801,6 +1883,14 @@ func DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBuff return } +func DisconnectNamedPipe(pipe Handle) (err error) { + r1, _, e1 := syscall.Syscall(procDisconnectNamedPipe.Addr(), 1, uintptr(pipe), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetProcessHandle Handle, lpTargetHandle *Handle, dwDesiredAccess uint32, bInheritHandle bool, dwOptions uint32) (err error) { var _p0 uint32 if bInheritHandle { @@ -1813,6 +1903,14 @@ func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetP return } +func EscapeCommFunction(handle Handle, dwFunc uint32) (err error) { + r1, _, e1 := syscall.Syscall(procEscapeCommFunction.Addr(), 2, uintptr(handle), uintptr(dwFunc), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func ExitProcess(exitcode uint32) { syscall.Syscall(procExitProcess.Addr(), 1, uintptr(exitcode), 0, 0) return @@ -2014,6 +2112,22 @@ func GetActiveProcessorCount(groupNumber uint16) (ret uint32) { return } +func GetCommModemStatus(handle Handle, lpModemStat *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetCommModemStatus.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpModemStat)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetCommState(handle Handle, lpDCB *DCB) (err error) { + r1, _, e1 := syscall.Syscall(procGetCommState.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpDCB)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func GetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { r1, _, e1 := syscall.Syscall(procGetCommTimeouts.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(timeouts)), 0) if r1 == 0 { @@ -2154,6 +2268,14 @@ func GetFileInformationByHandleEx(handle Handle, class uint32, outBuffer *byte, return } +func GetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) { + r1, _, e1 := syscall.Syscall6(procGetFileTime.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func GetFileType(filehandle Handle) (n uint32, err error) { r0, _, e1 := syscall.Syscall(procGetFileType.Addr(), 1, uintptr(filehandle), 0, 0) n = uint32(r0) @@ -2355,11 +2477,8 @@ func GetShortPathName(longpath *uint16, shortpath *uint16, buflen uint32) (n uin return } -func GetStartupInfo(startupInfo *StartupInfo) (err error) { - r1, _, e1 := syscall.Syscall(procGetStartupInfoW.Addr(), 1, uintptr(unsafe.Pointer(startupInfo)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } +func getStartupInfo(startupInfo *StartupInfo) { + syscall.Syscall(procGetStartupInfoW.Addr(), 1, uintptr(unsafe.Pointer(startupInfo)), 0, 0) return } @@ -2761,6 +2880,14 @@ func PulseEvent(event Handle) (err error) { return } +func PurgeComm(handle Handle, dwFlags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procPurgeComm.Addr(), 2, uintptr(handle), uintptr(dwFlags), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint32, err error) { r0, _, e1 := syscall.Syscall(procQueryDosDeviceW.Addr(), 3, uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)), uintptr(max)) n = uint32(r0) @@ -2842,6 +2969,14 @@ func RemoveDirectory(path *uint16) (err error) { return } +func RemoveDllDirectory(cookie uintptr) (err error) { + r1, _, e1 := syscall.Syscall(procRemoveDllDirectory.Addr(), 1, uintptr(cookie), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func ResetEvent(event Handle) (err error) { r1, _, e1 := syscall.Syscall(procResetEvent.Addr(), 1, uintptr(event), 0, 0) if r1 == 0 { @@ -2850,6 +2985,14 @@ func ResetEvent(event Handle) (err error) { return } +func resizePseudoConsole(pconsole Handle, size uint32) (hr error) { + r0, _, _ := syscall.Syscall(procResizePseudoConsole.Addr(), 2, uintptr(pconsole), uintptr(size), 0) + if r0 != 0 { + hr = syscall.Errno(r0) + } + return +} + func ResumeThread(thread Handle) (ret uint32, err error) { r0, _, e1 := syscall.Syscall(procResumeThread.Addr(), 1, uintptr(thread), 0, 0) ret = uint32(r0) @@ -2859,6 +3002,30 @@ func ResumeThread(thread Handle) (ret uint32, err error) { return } +func SetCommBreak(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procSetCommBreak.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetCommMask(handle Handle, dwEvtMask uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetCommMask.Addr(), 2, uintptr(handle), uintptr(dwEvtMask), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetCommState(handle Handle, lpDCB *DCB) (err error) { + r1, _, e1 := syscall.Syscall(procSetCommState.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpDCB)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { r1, _, e1 := syscall.Syscall(procSetCommTimeouts.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(timeouts)), 0) if r1 == 0 { @@ -2987,6 +3154,14 @@ func SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetim return } +func SetFileValidData(handle Handle, validDataLength int64) (err error) { + r1, _, e1 := syscall.Syscall(procSetFileValidData.Addr(), 2, uintptr(handle), uintptr(validDataLength), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) { r1, _, e1 := syscall.Syscall(procSetHandleInformation.Addr(), 3, uintptr(handle), uintptr(mask), uintptr(flags)) if r1 == 0 { @@ -3072,6 +3247,14 @@ func SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err erro return } +func SetupComm(handle Handle, dwInQueue uint32, dwOutQueue uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetupComm.Addr(), 3, uintptr(handle), uintptr(dwInQueue), uintptr(dwOutQueue)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func SizeofResource(module Handle, resInfo Handle) (size uint32, err error) { r0, _, e1 := syscall.Syscall(procSizeofResource.Addr(), 2, uintptr(module), uintptr(resInfo), 0) size = uint32(r0) @@ -3218,6 +3401,14 @@ func WTSGetActiveConsoleSessionId() (sessionID uint32) { return } +func WaitCommEvent(handle Handle, lpEvtMask *uint32, lpOverlapped *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall(procWaitCommEvent.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(lpEvtMask)), uintptr(unsafe.Pointer(lpOverlapped))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func waitForMultipleObjects(count uint32, handles uintptr, waitAll bool, waitMilliseconds uint32) (event uint32, err error) { var _p0 uint32 if waitAll { @@ -3305,6 +3496,14 @@ func NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (nete return } +func NetUserEnum(serverName *uint16, level uint32, filter uint32, buf **byte, prefMaxLen uint32, entriesRead *uint32, totalEntries *uint32, resumeHandle *uint32) (neterr error) { + r0, _, _ := syscall.Syscall9(procNetUserEnum.Addr(), 8, uintptr(unsafe.Pointer(serverName)), uintptr(level), uintptr(filter), uintptr(unsafe.Pointer(buf)), uintptr(prefMaxLen), uintptr(unsafe.Pointer(entriesRead)), uintptr(unsafe.Pointer(totalEntries)), uintptr(unsafe.Pointer(resumeHandle)), 0) + if r0 != 0 { + neterr = syscall.Errno(r0) + } + return +} + func NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) { r0, _, _ := syscall.Syscall6(procNetUserGetInfo.Addr(), 4, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(userName)), uintptr(level), uintptr(unsafe.Pointer(buf)), 0, 0) if r0 != 0 { @@ -3504,12 +3703,8 @@ func EnumProcessModulesEx(process Handle, module *Handle, cb uint32, cbNeeded *u return } -func EnumProcesses(processIds []uint32, bytesReturned *uint32) (err error) { - var _p0 *uint32 - if len(processIds) > 0 { - _p0 = &processIds[0] - } - r1, _, e1 := syscall.Syscall(procEnumProcesses.Addr(), 3, uintptr(unsafe.Pointer(_p0)), uintptr(len(processIds)), uintptr(unsafe.Pointer(bytesReturned))) +func enumProcesses(processIds *uint32, nSize uint32, bytesReturned *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procEnumProcesses.Addr(), 3, uintptr(unsafe.Pointer(processIds)), uintptr(nSize), uintptr(unsafe.Pointer(bytesReturned))) if r1 == 0 { err = errnoErr(e1) } @@ -3812,9 +4007,9 @@ func setupUninstallOEMInf(infFileName *uint16, flags SUOI, reserved uintptr) (er return } -func CommandLineToArgv(cmd *uint16, argc *int32) (argv *[8192]*[8192]uint16, err error) { +func commandLineToArgv(cmd *uint16, argc *int32) (argv **uint16, err error) { r0, _, e1 := syscall.Syscall(procCommandLineToArgvW.Addr(), 2, uintptr(unsafe.Pointer(cmd)), uintptr(unsafe.Pointer(argc)), 0) - argv = (*[8192]*[8192]uint16)(unsafe.Pointer(r0)) + argv = (**uint16)(unsafe.Pointer(r0)) if argv == nil { err = errnoErr(e1) } @@ -4009,6 +4204,22 @@ func _VerQueryValue(block unsafe.Pointer, subBlock *uint16, pointerToBufferPoint return } +func TimeBeginPeriod(period uint32) (err error) { + r1, _, e1 := syscall.Syscall(proctimeBeginPeriod.Addr(), 1, uintptr(period), 0, 0) + if r1 != 0 { + err = errnoErr(e1) + } + return +} + +func TimeEndPeriod(period uint32) (err error) { + r1, _, e1 := syscall.Syscall(proctimeEndPeriod.Addr(), 1, uintptr(period), 0, 0) + if r1 != 0 { + err = errnoErr(e1) + } + return +} + func WinVerifyTrustEx(hwnd HWND, actionId *GUID, data *WinTrustData) (ret error) { r0, _, _ := syscall.Syscall(procWinVerifyTrustEx.Addr(), 3, uintptr(hwnd), uintptr(unsafe.Pointer(actionId)), uintptr(unsafe.Pointer(data))) if r0 != 0 { @@ -4067,6 +4278,30 @@ func WSAIoctl(s Handle, iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbo return } +func WSALookupServiceBegin(querySet *WSAQUERYSET, flags uint32, handle *Handle) (err error) { + r1, _, e1 := syscall.Syscall(procWSALookupServiceBeginW.Addr(), 3, uintptr(unsafe.Pointer(querySet)), uintptr(flags), uintptr(unsafe.Pointer(handle))) + if r1 == socket_error { + err = errnoErr(e1) + } + return +} + +func WSALookupServiceEnd(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procWSALookupServiceEnd.Addr(), 1, uintptr(handle), 0, 0) + if r1 == socket_error { + err = errnoErr(e1) + } + return +} + +func WSALookupServiceNext(handle Handle, flags uint32, size *int32, querySet *WSAQUERYSET) (err error) { + r1, _, e1 := syscall.Syscall6(procWSALookupServiceNextW.Addr(), 4, uintptr(handle), uintptr(flags), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(querySet)), 0, 0) + if r1 == socket_error { + err = errnoErr(e1) + } + return +} + func WSARecv(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, overlapped *Overlapped, croutine *byte) (err error) { r1, _, e1 := syscall.Syscall9(procWSARecv.Addr(), 7, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0, 0) if r1 == socket_error { diff --git a/vendor/golang.org/x/text/unicode/norm/forminfo.go b/vendor/golang.org/x/text/unicode/norm/forminfo.go index d69ccb4..487335d 100644 --- a/vendor/golang.org/x/text/unicode/norm/forminfo.go +++ b/vendor/golang.org/x/text/unicode/norm/forminfo.go @@ -13,7 +13,7 @@ import "encoding/binary" // a rune to a uint16. The values take two forms. For v >= 0x8000: // bits // 15: 1 (inverse of NFD_QC bit of qcInfo) -// 13..7: qcInfo (see below). isYesD is always true (no decompostion). +// 13..7: qcInfo (see below). isYesD is always true (no decomposition). // 6..0: ccc (compressed CCC value). // For v < 0x8000, the respective rune has a decomposition and v is an index // into a byte array of UTF-8 decomposition sequences and additional info and diff --git a/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go index f5a0788..1af161c 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.10 && !go1.13 -// +build go1.10,!go1.13 package norm diff --git a/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go index cb7239c..eb73ecc 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.13 && !go1.14 -// +build go1.13,!go1.14 package norm diff --git a/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go index 11b2733..276cb8d 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.14 && !go1.16 -// +build go1.14,!go1.16 package norm diff --git a/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go index 9115ef2..0cceffd 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. -//go:build go1.16 -// +build go1.16 +//go:build go1.16 && !go1.21 package norm diff --git a/vendor/golang.org/x/text/unicode/norm/tables15.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables15.0.0.go new file mode 100644 index 0000000..b0819e4 --- /dev/null +++ b/vendor/golang.org/x/text/unicode/norm/tables15.0.0.go @@ -0,0 +1,7907 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +//go:build go1.21 + +package norm + +import "sync" + +const ( + // Version is the Unicode edition from which the tables are derived. + Version = "15.0.0" + + // MaxTransformChunkSize indicates the maximum number of bytes that Transform + // may need to write atomically for any Form. Making a destination buffer at + // least this size ensures that Transform can always make progress and that + // the user does not need to grow the buffer on an ErrShortDst. + MaxTransformChunkSize = 35 + maxNonStarters*4 +) + +var ccc = [56]uint8{ + 0, 1, 6, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, + 28, 29, 30, 31, 32, 33, 34, 35, + 36, 84, 91, 103, 107, 118, 122, 129, + 130, 132, 202, 214, 216, 218, 220, 222, + 224, 226, 228, 230, 232, 233, 234, 240, +} + +const ( + firstMulti = 0x199A + firstCCC = 0x2DD5 + endMulti = 0x30A1 + firstLeadingCCC = 0x4AEF + firstCCCZeroExcept = 0x4BB9 + firstStarterWithNLead = 0x4BE0 + lastDecomp = 0x4BE2 + maxDecomp = 0x8000 +) + +// decomps: 19426 bytes +var decomps = [...]byte{ + // Bytes 0 - 3f + 0x00, 0x41, 0x20, 0x41, 0x21, 0x41, 0x22, 0x41, + 0x23, 0x41, 0x24, 0x41, 0x25, 0x41, 0x26, 0x41, + 0x27, 0x41, 0x28, 0x41, 0x29, 0x41, 0x2A, 0x41, + 0x2B, 0x41, 0x2C, 0x41, 0x2D, 0x41, 0x2E, 0x41, + 0x2F, 0x41, 0x30, 0x41, 0x31, 0x41, 0x32, 0x41, + 0x33, 0x41, 0x34, 0x41, 0x35, 0x41, 0x36, 0x41, + 0x37, 0x41, 0x38, 0x41, 0x39, 0x41, 0x3A, 0x41, + 0x3B, 0x41, 0x3C, 0x41, 0x3D, 0x41, 0x3E, 0x41, + // Bytes 40 - 7f + 0x3F, 0x41, 0x40, 0x41, 0x41, 0x41, 0x42, 0x41, + 0x43, 0x41, 0x44, 0x41, 0x45, 0x41, 0x46, 0x41, + 0x47, 0x41, 0x48, 0x41, 0x49, 0x41, 0x4A, 0x41, + 0x4B, 0x41, 0x4C, 0x41, 0x4D, 0x41, 0x4E, 0x41, + 0x4F, 0x41, 0x50, 0x41, 0x51, 0x41, 0x52, 0x41, + 0x53, 0x41, 0x54, 0x41, 0x55, 0x41, 0x56, 0x41, + 0x57, 0x41, 0x58, 0x41, 0x59, 0x41, 0x5A, 0x41, + 0x5B, 0x41, 0x5C, 0x41, 0x5D, 0x41, 0x5E, 0x41, + // Bytes 80 - bf + 0x5F, 0x41, 0x60, 0x41, 0x61, 0x41, 0x62, 0x41, + 0x63, 0x41, 0x64, 0x41, 0x65, 0x41, 0x66, 0x41, + 0x67, 0x41, 0x68, 0x41, 0x69, 0x41, 0x6A, 0x41, + 0x6B, 0x41, 0x6C, 0x41, 0x6D, 0x41, 0x6E, 0x41, + 0x6F, 0x41, 0x70, 0x41, 0x71, 0x41, 0x72, 0x41, + 0x73, 0x41, 0x74, 0x41, 0x75, 0x41, 0x76, 0x41, + 0x77, 0x41, 0x78, 0x41, 0x79, 0x41, 0x7A, 0x41, + 0x7B, 0x41, 0x7C, 0x41, 0x7D, 0x41, 0x7E, 0x42, + // Bytes c0 - ff + 0xC2, 0xA2, 0x42, 0xC2, 0xA3, 0x42, 0xC2, 0xA5, + 0x42, 0xC2, 0xA6, 0x42, 0xC2, 0xAC, 0x42, 0xC2, + 0xB7, 0x42, 0xC3, 0x86, 0x42, 0xC3, 0xA6, 0x42, + 0xC3, 0xB0, 0x42, 0xC3, 0xB8, 0x42, 0xC4, 0xA6, + 0x42, 0xC4, 0xA7, 0x42, 0xC4, 0xB1, 0x42, 0xC5, + 0x8B, 0x42, 0xC5, 0x93, 0x42, 0xC6, 0x8E, 0x42, + 0xC6, 0x90, 0x42, 0xC6, 0xAB, 0x42, 0xC7, 0x80, + 0x42, 0xC7, 0x81, 0x42, 0xC7, 0x82, 0x42, 0xC8, + // Bytes 100 - 13f + 0xA2, 0x42, 0xC8, 0xB7, 0x42, 0xC9, 0x90, 0x42, + 0xC9, 0x91, 0x42, 0xC9, 0x92, 0x42, 0xC9, 0x93, + 0x42, 0xC9, 0x94, 0x42, 0xC9, 0x95, 0x42, 0xC9, + 0x96, 0x42, 0xC9, 0x97, 0x42, 0xC9, 0x98, 0x42, + 0xC9, 0x99, 0x42, 0xC9, 0x9B, 0x42, 0xC9, 0x9C, + 0x42, 0xC9, 0x9E, 0x42, 0xC9, 0x9F, 0x42, 0xC9, + 0xA0, 0x42, 0xC9, 0xA1, 0x42, 0xC9, 0xA2, 0x42, + 0xC9, 0xA3, 0x42, 0xC9, 0xA4, 0x42, 0xC9, 0xA5, + // Bytes 140 - 17f + 0x42, 0xC9, 0xA6, 0x42, 0xC9, 0xA7, 0x42, 0xC9, + 0xA8, 0x42, 0xC9, 0xA9, 0x42, 0xC9, 0xAA, 0x42, + 0xC9, 0xAB, 0x42, 0xC9, 0xAC, 0x42, 0xC9, 0xAD, + 0x42, 0xC9, 0xAE, 0x42, 0xC9, 0xAF, 0x42, 0xC9, + 0xB0, 0x42, 0xC9, 0xB1, 0x42, 0xC9, 0xB2, 0x42, + 0xC9, 0xB3, 0x42, 0xC9, 0xB4, 0x42, 0xC9, 0xB5, + 0x42, 0xC9, 0xB6, 0x42, 0xC9, 0xB7, 0x42, 0xC9, + 0xB8, 0x42, 0xC9, 0xB9, 0x42, 0xC9, 0xBA, 0x42, + // Bytes 180 - 1bf + 0xC9, 0xBB, 0x42, 0xC9, 0xBD, 0x42, 0xC9, 0xBE, + 0x42, 0xCA, 0x80, 0x42, 0xCA, 0x81, 0x42, 0xCA, + 0x82, 0x42, 0xCA, 0x83, 0x42, 0xCA, 0x84, 0x42, + 0xCA, 0x88, 0x42, 0xCA, 0x89, 0x42, 0xCA, 0x8A, + 0x42, 0xCA, 0x8B, 0x42, 0xCA, 0x8C, 0x42, 0xCA, + 0x8D, 0x42, 0xCA, 0x8E, 0x42, 0xCA, 0x8F, 0x42, + 0xCA, 0x90, 0x42, 0xCA, 0x91, 0x42, 0xCA, 0x92, + 0x42, 0xCA, 0x95, 0x42, 0xCA, 0x98, 0x42, 0xCA, + // Bytes 1c0 - 1ff + 0x99, 0x42, 0xCA, 0x9B, 0x42, 0xCA, 0x9C, 0x42, + 0xCA, 0x9D, 0x42, 0xCA, 0x9F, 0x42, 0xCA, 0xA1, + 0x42, 0xCA, 0xA2, 0x42, 0xCA, 0xA3, 0x42, 0xCA, + 0xA4, 0x42, 0xCA, 0xA5, 0x42, 0xCA, 0xA6, 0x42, + 0xCA, 0xA7, 0x42, 0xCA, 0xA8, 0x42, 0xCA, 0xA9, + 0x42, 0xCA, 0xAA, 0x42, 0xCA, 0xAB, 0x42, 0xCA, + 0xB9, 0x42, 0xCB, 0x90, 0x42, 0xCB, 0x91, 0x42, + 0xCE, 0x91, 0x42, 0xCE, 0x92, 0x42, 0xCE, 0x93, + // Bytes 200 - 23f + 0x42, 0xCE, 0x94, 0x42, 0xCE, 0x95, 0x42, 0xCE, + 0x96, 0x42, 0xCE, 0x97, 0x42, 0xCE, 0x98, 0x42, + 0xCE, 0x99, 0x42, 0xCE, 0x9A, 0x42, 0xCE, 0x9B, + 0x42, 0xCE, 0x9C, 0x42, 0xCE, 0x9D, 0x42, 0xCE, + 0x9E, 0x42, 0xCE, 0x9F, 0x42, 0xCE, 0xA0, 0x42, + 0xCE, 0xA1, 0x42, 0xCE, 0xA3, 0x42, 0xCE, 0xA4, + 0x42, 0xCE, 0xA5, 0x42, 0xCE, 0xA6, 0x42, 0xCE, + 0xA7, 0x42, 0xCE, 0xA8, 0x42, 0xCE, 0xA9, 0x42, + // Bytes 240 - 27f + 0xCE, 0xB1, 0x42, 0xCE, 0xB2, 0x42, 0xCE, 0xB3, + 0x42, 0xCE, 0xB4, 0x42, 0xCE, 0xB5, 0x42, 0xCE, + 0xB6, 0x42, 0xCE, 0xB7, 0x42, 0xCE, 0xB8, 0x42, + 0xCE, 0xB9, 0x42, 0xCE, 0xBA, 0x42, 0xCE, 0xBB, + 0x42, 0xCE, 0xBC, 0x42, 0xCE, 0xBD, 0x42, 0xCE, + 0xBE, 0x42, 0xCE, 0xBF, 0x42, 0xCF, 0x80, 0x42, + 0xCF, 0x81, 0x42, 0xCF, 0x82, 0x42, 0xCF, 0x83, + 0x42, 0xCF, 0x84, 0x42, 0xCF, 0x85, 0x42, 0xCF, + // Bytes 280 - 2bf + 0x86, 0x42, 0xCF, 0x87, 0x42, 0xCF, 0x88, 0x42, + 0xCF, 0x89, 0x42, 0xCF, 0x9C, 0x42, 0xCF, 0x9D, + 0x42, 0xD0, 0xB0, 0x42, 0xD0, 0xB1, 0x42, 0xD0, + 0xB2, 0x42, 0xD0, 0xB3, 0x42, 0xD0, 0xB4, 0x42, + 0xD0, 0xB5, 0x42, 0xD0, 0xB6, 0x42, 0xD0, 0xB7, + 0x42, 0xD0, 0xB8, 0x42, 0xD0, 0xBA, 0x42, 0xD0, + 0xBB, 0x42, 0xD0, 0xBC, 0x42, 0xD0, 0xBD, 0x42, + 0xD0, 0xBE, 0x42, 0xD0, 0xBF, 0x42, 0xD1, 0x80, + // Bytes 2c0 - 2ff + 0x42, 0xD1, 0x81, 0x42, 0xD1, 0x82, 0x42, 0xD1, + 0x83, 0x42, 0xD1, 0x84, 0x42, 0xD1, 0x85, 0x42, + 0xD1, 0x86, 0x42, 0xD1, 0x87, 0x42, 0xD1, 0x88, + 0x42, 0xD1, 0x8A, 0x42, 0xD1, 0x8B, 0x42, 0xD1, + 0x8C, 0x42, 0xD1, 0x8D, 0x42, 0xD1, 0x8E, 0x42, + 0xD1, 0x95, 0x42, 0xD1, 0x96, 0x42, 0xD1, 0x98, + 0x42, 0xD1, 0x9F, 0x42, 0xD2, 0x91, 0x42, 0xD2, + 0xAB, 0x42, 0xD2, 0xAF, 0x42, 0xD2, 0xB1, 0x42, + // Bytes 300 - 33f + 0xD3, 0x8F, 0x42, 0xD3, 0x99, 0x42, 0xD3, 0xA9, + 0x42, 0xD7, 0x90, 0x42, 0xD7, 0x91, 0x42, 0xD7, + 0x92, 0x42, 0xD7, 0x93, 0x42, 0xD7, 0x94, 0x42, + 0xD7, 0x9B, 0x42, 0xD7, 0x9C, 0x42, 0xD7, 0x9D, + 0x42, 0xD7, 0xA2, 0x42, 0xD7, 0xA8, 0x42, 0xD7, + 0xAA, 0x42, 0xD8, 0xA1, 0x42, 0xD8, 0xA7, 0x42, + 0xD8, 0xA8, 0x42, 0xD8, 0xA9, 0x42, 0xD8, 0xAA, + 0x42, 0xD8, 0xAB, 0x42, 0xD8, 0xAC, 0x42, 0xD8, + // Bytes 340 - 37f + 0xAD, 0x42, 0xD8, 0xAE, 0x42, 0xD8, 0xAF, 0x42, + 0xD8, 0xB0, 0x42, 0xD8, 0xB1, 0x42, 0xD8, 0xB2, + 0x42, 0xD8, 0xB3, 0x42, 0xD8, 0xB4, 0x42, 0xD8, + 0xB5, 0x42, 0xD8, 0xB6, 0x42, 0xD8, 0xB7, 0x42, + 0xD8, 0xB8, 0x42, 0xD8, 0xB9, 0x42, 0xD8, 0xBA, + 0x42, 0xD9, 0x81, 0x42, 0xD9, 0x82, 0x42, 0xD9, + 0x83, 0x42, 0xD9, 0x84, 0x42, 0xD9, 0x85, 0x42, + 0xD9, 0x86, 0x42, 0xD9, 0x87, 0x42, 0xD9, 0x88, + // Bytes 380 - 3bf + 0x42, 0xD9, 0x89, 0x42, 0xD9, 0x8A, 0x42, 0xD9, + 0xAE, 0x42, 0xD9, 0xAF, 0x42, 0xD9, 0xB1, 0x42, + 0xD9, 0xB9, 0x42, 0xD9, 0xBA, 0x42, 0xD9, 0xBB, + 0x42, 0xD9, 0xBE, 0x42, 0xD9, 0xBF, 0x42, 0xDA, + 0x80, 0x42, 0xDA, 0x83, 0x42, 0xDA, 0x84, 0x42, + 0xDA, 0x86, 0x42, 0xDA, 0x87, 0x42, 0xDA, 0x88, + 0x42, 0xDA, 0x8C, 0x42, 0xDA, 0x8D, 0x42, 0xDA, + 0x8E, 0x42, 0xDA, 0x91, 0x42, 0xDA, 0x98, 0x42, + // Bytes 3c0 - 3ff + 0xDA, 0xA1, 0x42, 0xDA, 0xA4, 0x42, 0xDA, 0xA6, + 0x42, 0xDA, 0xA9, 0x42, 0xDA, 0xAD, 0x42, 0xDA, + 0xAF, 0x42, 0xDA, 0xB1, 0x42, 0xDA, 0xB3, 0x42, + 0xDA, 0xBA, 0x42, 0xDA, 0xBB, 0x42, 0xDA, 0xBE, + 0x42, 0xDB, 0x81, 0x42, 0xDB, 0x85, 0x42, 0xDB, + 0x86, 0x42, 0xDB, 0x87, 0x42, 0xDB, 0x88, 0x42, + 0xDB, 0x89, 0x42, 0xDB, 0x8B, 0x42, 0xDB, 0x8C, + 0x42, 0xDB, 0x90, 0x42, 0xDB, 0x92, 0x43, 0xE0, + // Bytes 400 - 43f + 0xBC, 0x8B, 0x43, 0xE1, 0x83, 0x9C, 0x43, 0xE1, + 0x84, 0x80, 0x43, 0xE1, 0x84, 0x81, 0x43, 0xE1, + 0x84, 0x82, 0x43, 0xE1, 0x84, 0x83, 0x43, 0xE1, + 0x84, 0x84, 0x43, 0xE1, 0x84, 0x85, 0x43, 0xE1, + 0x84, 0x86, 0x43, 0xE1, 0x84, 0x87, 0x43, 0xE1, + 0x84, 0x88, 0x43, 0xE1, 0x84, 0x89, 0x43, 0xE1, + 0x84, 0x8A, 0x43, 0xE1, 0x84, 0x8B, 0x43, 0xE1, + 0x84, 0x8C, 0x43, 0xE1, 0x84, 0x8D, 0x43, 0xE1, + // Bytes 440 - 47f + 0x84, 0x8E, 0x43, 0xE1, 0x84, 0x8F, 0x43, 0xE1, + 0x84, 0x90, 0x43, 0xE1, 0x84, 0x91, 0x43, 0xE1, + 0x84, 0x92, 0x43, 0xE1, 0x84, 0x94, 0x43, 0xE1, + 0x84, 0x95, 0x43, 0xE1, 0x84, 0x9A, 0x43, 0xE1, + 0x84, 0x9C, 0x43, 0xE1, 0x84, 0x9D, 0x43, 0xE1, + 0x84, 0x9E, 0x43, 0xE1, 0x84, 0xA0, 0x43, 0xE1, + 0x84, 0xA1, 0x43, 0xE1, 0x84, 0xA2, 0x43, 0xE1, + 0x84, 0xA3, 0x43, 0xE1, 0x84, 0xA7, 0x43, 0xE1, + // Bytes 480 - 4bf + 0x84, 0xA9, 0x43, 0xE1, 0x84, 0xAB, 0x43, 0xE1, + 0x84, 0xAC, 0x43, 0xE1, 0x84, 0xAD, 0x43, 0xE1, + 0x84, 0xAE, 0x43, 0xE1, 0x84, 0xAF, 0x43, 0xE1, + 0x84, 0xB2, 0x43, 0xE1, 0x84, 0xB6, 0x43, 0xE1, + 0x85, 0x80, 0x43, 0xE1, 0x85, 0x87, 0x43, 0xE1, + 0x85, 0x8C, 0x43, 0xE1, 0x85, 0x97, 0x43, 0xE1, + 0x85, 0x98, 0x43, 0xE1, 0x85, 0x99, 0x43, 0xE1, + 0x85, 0xA0, 0x43, 0xE1, 0x86, 0x84, 0x43, 0xE1, + // Bytes 4c0 - 4ff + 0x86, 0x85, 0x43, 0xE1, 0x86, 0x88, 0x43, 0xE1, + 0x86, 0x91, 0x43, 0xE1, 0x86, 0x92, 0x43, 0xE1, + 0x86, 0x94, 0x43, 0xE1, 0x86, 0x9E, 0x43, 0xE1, + 0x86, 0xA1, 0x43, 0xE1, 0x87, 0x87, 0x43, 0xE1, + 0x87, 0x88, 0x43, 0xE1, 0x87, 0x8C, 0x43, 0xE1, + 0x87, 0x8E, 0x43, 0xE1, 0x87, 0x93, 0x43, 0xE1, + 0x87, 0x97, 0x43, 0xE1, 0x87, 0x99, 0x43, 0xE1, + 0x87, 0x9D, 0x43, 0xE1, 0x87, 0x9F, 0x43, 0xE1, + // Bytes 500 - 53f + 0x87, 0xB1, 0x43, 0xE1, 0x87, 0xB2, 0x43, 0xE1, + 0xB4, 0x82, 0x43, 0xE1, 0xB4, 0x96, 0x43, 0xE1, + 0xB4, 0x97, 0x43, 0xE1, 0xB4, 0x9C, 0x43, 0xE1, + 0xB4, 0x9D, 0x43, 0xE1, 0xB4, 0xA5, 0x43, 0xE1, + 0xB5, 0xBB, 0x43, 0xE1, 0xB6, 0x85, 0x43, 0xE1, + 0xB6, 0x91, 0x43, 0xE2, 0x80, 0x82, 0x43, 0xE2, + 0x80, 0x83, 0x43, 0xE2, 0x80, 0x90, 0x43, 0xE2, + 0x80, 0x93, 0x43, 0xE2, 0x80, 0x94, 0x43, 0xE2, + // Bytes 540 - 57f + 0x82, 0xA9, 0x43, 0xE2, 0x86, 0x90, 0x43, 0xE2, + 0x86, 0x91, 0x43, 0xE2, 0x86, 0x92, 0x43, 0xE2, + 0x86, 0x93, 0x43, 0xE2, 0x88, 0x82, 0x43, 0xE2, + 0x88, 0x87, 0x43, 0xE2, 0x88, 0x91, 0x43, 0xE2, + 0x88, 0x92, 0x43, 0xE2, 0x94, 0x82, 0x43, 0xE2, + 0x96, 0xA0, 0x43, 0xE2, 0x97, 0x8B, 0x43, 0xE2, + 0xA6, 0x85, 0x43, 0xE2, 0xA6, 0x86, 0x43, 0xE2, + 0xB1, 0xB1, 0x43, 0xE2, 0xB5, 0xA1, 0x43, 0xE3, + // Bytes 580 - 5bf + 0x80, 0x81, 0x43, 0xE3, 0x80, 0x82, 0x43, 0xE3, + 0x80, 0x88, 0x43, 0xE3, 0x80, 0x89, 0x43, 0xE3, + 0x80, 0x8A, 0x43, 0xE3, 0x80, 0x8B, 0x43, 0xE3, + 0x80, 0x8C, 0x43, 0xE3, 0x80, 0x8D, 0x43, 0xE3, + 0x80, 0x8E, 0x43, 0xE3, 0x80, 0x8F, 0x43, 0xE3, + 0x80, 0x90, 0x43, 0xE3, 0x80, 0x91, 0x43, 0xE3, + 0x80, 0x92, 0x43, 0xE3, 0x80, 0x94, 0x43, 0xE3, + 0x80, 0x95, 0x43, 0xE3, 0x80, 0x96, 0x43, 0xE3, + // Bytes 5c0 - 5ff + 0x80, 0x97, 0x43, 0xE3, 0x82, 0xA1, 0x43, 0xE3, + 0x82, 0xA2, 0x43, 0xE3, 0x82, 0xA3, 0x43, 0xE3, + 0x82, 0xA4, 0x43, 0xE3, 0x82, 0xA5, 0x43, 0xE3, + 0x82, 0xA6, 0x43, 0xE3, 0x82, 0xA7, 0x43, 0xE3, + 0x82, 0xA8, 0x43, 0xE3, 0x82, 0xA9, 0x43, 0xE3, + 0x82, 0xAA, 0x43, 0xE3, 0x82, 0xAB, 0x43, 0xE3, + 0x82, 0xAD, 0x43, 0xE3, 0x82, 0xAF, 0x43, 0xE3, + 0x82, 0xB1, 0x43, 0xE3, 0x82, 0xB3, 0x43, 0xE3, + // Bytes 600 - 63f + 0x82, 0xB5, 0x43, 0xE3, 0x82, 0xB7, 0x43, 0xE3, + 0x82, 0xB9, 0x43, 0xE3, 0x82, 0xBB, 0x43, 0xE3, + 0x82, 0xBD, 0x43, 0xE3, 0x82, 0xBF, 0x43, 0xE3, + 0x83, 0x81, 0x43, 0xE3, 0x83, 0x83, 0x43, 0xE3, + 0x83, 0x84, 0x43, 0xE3, 0x83, 0x86, 0x43, 0xE3, + 0x83, 0x88, 0x43, 0xE3, 0x83, 0x8A, 0x43, 0xE3, + 0x83, 0x8B, 0x43, 0xE3, 0x83, 0x8C, 0x43, 0xE3, + 0x83, 0x8D, 0x43, 0xE3, 0x83, 0x8E, 0x43, 0xE3, + // Bytes 640 - 67f + 0x83, 0x8F, 0x43, 0xE3, 0x83, 0x92, 0x43, 0xE3, + 0x83, 0x95, 0x43, 0xE3, 0x83, 0x98, 0x43, 0xE3, + 0x83, 0x9B, 0x43, 0xE3, 0x83, 0x9E, 0x43, 0xE3, + 0x83, 0x9F, 0x43, 0xE3, 0x83, 0xA0, 0x43, 0xE3, + 0x83, 0xA1, 0x43, 0xE3, 0x83, 0xA2, 0x43, 0xE3, + 0x83, 0xA3, 0x43, 0xE3, 0x83, 0xA4, 0x43, 0xE3, + 0x83, 0xA5, 0x43, 0xE3, 0x83, 0xA6, 0x43, 0xE3, + 0x83, 0xA7, 0x43, 0xE3, 0x83, 0xA8, 0x43, 0xE3, + // Bytes 680 - 6bf + 0x83, 0xA9, 0x43, 0xE3, 0x83, 0xAA, 0x43, 0xE3, + 0x83, 0xAB, 0x43, 0xE3, 0x83, 0xAC, 0x43, 0xE3, + 0x83, 0xAD, 0x43, 0xE3, 0x83, 0xAF, 0x43, 0xE3, + 0x83, 0xB0, 0x43, 0xE3, 0x83, 0xB1, 0x43, 0xE3, + 0x83, 0xB2, 0x43, 0xE3, 0x83, 0xB3, 0x43, 0xE3, + 0x83, 0xBB, 0x43, 0xE3, 0x83, 0xBC, 0x43, 0xE3, + 0x92, 0x9E, 0x43, 0xE3, 0x92, 0xB9, 0x43, 0xE3, + 0x92, 0xBB, 0x43, 0xE3, 0x93, 0x9F, 0x43, 0xE3, + // Bytes 6c0 - 6ff + 0x94, 0x95, 0x43, 0xE3, 0x9B, 0xAE, 0x43, 0xE3, + 0x9B, 0xBC, 0x43, 0xE3, 0x9E, 0x81, 0x43, 0xE3, + 0xA0, 0xAF, 0x43, 0xE3, 0xA1, 0xA2, 0x43, 0xE3, + 0xA1, 0xBC, 0x43, 0xE3, 0xA3, 0x87, 0x43, 0xE3, + 0xA3, 0xA3, 0x43, 0xE3, 0xA4, 0x9C, 0x43, 0xE3, + 0xA4, 0xBA, 0x43, 0xE3, 0xA8, 0xAE, 0x43, 0xE3, + 0xA9, 0xAC, 0x43, 0xE3, 0xAB, 0xA4, 0x43, 0xE3, + 0xAC, 0x88, 0x43, 0xE3, 0xAC, 0x99, 0x43, 0xE3, + // Bytes 700 - 73f + 0xAD, 0x89, 0x43, 0xE3, 0xAE, 0x9D, 0x43, 0xE3, + 0xB0, 0x98, 0x43, 0xE3, 0xB1, 0x8E, 0x43, 0xE3, + 0xB4, 0xB3, 0x43, 0xE3, 0xB6, 0x96, 0x43, 0xE3, + 0xBA, 0xAC, 0x43, 0xE3, 0xBA, 0xB8, 0x43, 0xE3, + 0xBC, 0x9B, 0x43, 0xE3, 0xBF, 0xBC, 0x43, 0xE4, + 0x80, 0x88, 0x43, 0xE4, 0x80, 0x98, 0x43, 0xE4, + 0x80, 0xB9, 0x43, 0xE4, 0x81, 0x86, 0x43, 0xE4, + 0x82, 0x96, 0x43, 0xE4, 0x83, 0xA3, 0x43, 0xE4, + // Bytes 740 - 77f + 0x84, 0xAF, 0x43, 0xE4, 0x88, 0x82, 0x43, 0xE4, + 0x88, 0xA7, 0x43, 0xE4, 0x8A, 0xA0, 0x43, 0xE4, + 0x8C, 0x81, 0x43, 0xE4, 0x8C, 0xB4, 0x43, 0xE4, + 0x8D, 0x99, 0x43, 0xE4, 0x8F, 0x95, 0x43, 0xE4, + 0x8F, 0x99, 0x43, 0xE4, 0x90, 0x8B, 0x43, 0xE4, + 0x91, 0xAB, 0x43, 0xE4, 0x94, 0xAB, 0x43, 0xE4, + 0x95, 0x9D, 0x43, 0xE4, 0x95, 0xA1, 0x43, 0xE4, + 0x95, 0xAB, 0x43, 0xE4, 0x97, 0x97, 0x43, 0xE4, + // Bytes 780 - 7bf + 0x97, 0xB9, 0x43, 0xE4, 0x98, 0xB5, 0x43, 0xE4, + 0x9A, 0xBE, 0x43, 0xE4, 0x9B, 0x87, 0x43, 0xE4, + 0xA6, 0x95, 0x43, 0xE4, 0xA7, 0xA6, 0x43, 0xE4, + 0xA9, 0xAE, 0x43, 0xE4, 0xA9, 0xB6, 0x43, 0xE4, + 0xAA, 0xB2, 0x43, 0xE4, 0xAC, 0xB3, 0x43, 0xE4, + 0xAF, 0x8E, 0x43, 0xE4, 0xB3, 0x8E, 0x43, 0xE4, + 0xB3, 0xAD, 0x43, 0xE4, 0xB3, 0xB8, 0x43, 0xE4, + 0xB5, 0x96, 0x43, 0xE4, 0xB8, 0x80, 0x43, 0xE4, + // Bytes 7c0 - 7ff + 0xB8, 0x81, 0x43, 0xE4, 0xB8, 0x83, 0x43, 0xE4, + 0xB8, 0x89, 0x43, 0xE4, 0xB8, 0x8A, 0x43, 0xE4, + 0xB8, 0x8B, 0x43, 0xE4, 0xB8, 0x8D, 0x43, 0xE4, + 0xB8, 0x99, 0x43, 0xE4, 0xB8, 0xA6, 0x43, 0xE4, + 0xB8, 0xA8, 0x43, 0xE4, 0xB8, 0xAD, 0x43, 0xE4, + 0xB8, 0xB2, 0x43, 0xE4, 0xB8, 0xB6, 0x43, 0xE4, + 0xB8, 0xB8, 0x43, 0xE4, 0xB8, 0xB9, 0x43, 0xE4, + 0xB8, 0xBD, 0x43, 0xE4, 0xB8, 0xBF, 0x43, 0xE4, + // Bytes 800 - 83f + 0xB9, 0x81, 0x43, 0xE4, 0xB9, 0x99, 0x43, 0xE4, + 0xB9, 0x9D, 0x43, 0xE4, 0xBA, 0x82, 0x43, 0xE4, + 0xBA, 0x85, 0x43, 0xE4, 0xBA, 0x86, 0x43, 0xE4, + 0xBA, 0x8C, 0x43, 0xE4, 0xBA, 0x94, 0x43, 0xE4, + 0xBA, 0xA0, 0x43, 0xE4, 0xBA, 0xA4, 0x43, 0xE4, + 0xBA, 0xAE, 0x43, 0xE4, 0xBA, 0xBA, 0x43, 0xE4, + 0xBB, 0x80, 0x43, 0xE4, 0xBB, 0x8C, 0x43, 0xE4, + 0xBB, 0xA4, 0x43, 0xE4, 0xBC, 0x81, 0x43, 0xE4, + // Bytes 840 - 87f + 0xBC, 0x91, 0x43, 0xE4, 0xBD, 0xA0, 0x43, 0xE4, + 0xBE, 0x80, 0x43, 0xE4, 0xBE, 0x86, 0x43, 0xE4, + 0xBE, 0x8B, 0x43, 0xE4, 0xBE, 0xAE, 0x43, 0xE4, + 0xBE, 0xBB, 0x43, 0xE4, 0xBE, 0xBF, 0x43, 0xE5, + 0x80, 0x82, 0x43, 0xE5, 0x80, 0xAB, 0x43, 0xE5, + 0x81, 0xBA, 0x43, 0xE5, 0x82, 0x99, 0x43, 0xE5, + 0x83, 0x8F, 0x43, 0xE5, 0x83, 0x9A, 0x43, 0xE5, + 0x83, 0xA7, 0x43, 0xE5, 0x84, 0xAA, 0x43, 0xE5, + // Bytes 880 - 8bf + 0x84, 0xBF, 0x43, 0xE5, 0x85, 0x80, 0x43, 0xE5, + 0x85, 0x85, 0x43, 0xE5, 0x85, 0x8D, 0x43, 0xE5, + 0x85, 0x94, 0x43, 0xE5, 0x85, 0xA4, 0x43, 0xE5, + 0x85, 0xA5, 0x43, 0xE5, 0x85, 0xA7, 0x43, 0xE5, + 0x85, 0xA8, 0x43, 0xE5, 0x85, 0xA9, 0x43, 0xE5, + 0x85, 0xAB, 0x43, 0xE5, 0x85, 0xAD, 0x43, 0xE5, + 0x85, 0xB7, 0x43, 0xE5, 0x86, 0x80, 0x43, 0xE5, + 0x86, 0x82, 0x43, 0xE5, 0x86, 0x8D, 0x43, 0xE5, + // Bytes 8c0 - 8ff + 0x86, 0x92, 0x43, 0xE5, 0x86, 0x95, 0x43, 0xE5, + 0x86, 0x96, 0x43, 0xE5, 0x86, 0x97, 0x43, 0xE5, + 0x86, 0x99, 0x43, 0xE5, 0x86, 0xA4, 0x43, 0xE5, + 0x86, 0xAB, 0x43, 0xE5, 0x86, 0xAC, 0x43, 0xE5, + 0x86, 0xB5, 0x43, 0xE5, 0x86, 0xB7, 0x43, 0xE5, + 0x87, 0x89, 0x43, 0xE5, 0x87, 0x8C, 0x43, 0xE5, + 0x87, 0x9C, 0x43, 0xE5, 0x87, 0x9E, 0x43, 0xE5, + 0x87, 0xA0, 0x43, 0xE5, 0x87, 0xB5, 0x43, 0xE5, + // Bytes 900 - 93f + 0x88, 0x80, 0x43, 0xE5, 0x88, 0x83, 0x43, 0xE5, + 0x88, 0x87, 0x43, 0xE5, 0x88, 0x97, 0x43, 0xE5, + 0x88, 0x9D, 0x43, 0xE5, 0x88, 0xA9, 0x43, 0xE5, + 0x88, 0xBA, 0x43, 0xE5, 0x88, 0xBB, 0x43, 0xE5, + 0x89, 0x86, 0x43, 0xE5, 0x89, 0x8D, 0x43, 0xE5, + 0x89, 0xB2, 0x43, 0xE5, 0x89, 0xB7, 0x43, 0xE5, + 0x8A, 0x89, 0x43, 0xE5, 0x8A, 0x9B, 0x43, 0xE5, + 0x8A, 0xA3, 0x43, 0xE5, 0x8A, 0xB3, 0x43, 0xE5, + // Bytes 940 - 97f + 0x8A, 0xB4, 0x43, 0xE5, 0x8B, 0x87, 0x43, 0xE5, + 0x8B, 0x89, 0x43, 0xE5, 0x8B, 0x92, 0x43, 0xE5, + 0x8B, 0x9E, 0x43, 0xE5, 0x8B, 0xA4, 0x43, 0xE5, + 0x8B, 0xB5, 0x43, 0xE5, 0x8B, 0xB9, 0x43, 0xE5, + 0x8B, 0xBA, 0x43, 0xE5, 0x8C, 0x85, 0x43, 0xE5, + 0x8C, 0x86, 0x43, 0xE5, 0x8C, 0x95, 0x43, 0xE5, + 0x8C, 0x97, 0x43, 0xE5, 0x8C, 0x9A, 0x43, 0xE5, + 0x8C, 0xB8, 0x43, 0xE5, 0x8C, 0xBB, 0x43, 0xE5, + // Bytes 980 - 9bf + 0x8C, 0xBF, 0x43, 0xE5, 0x8D, 0x81, 0x43, 0xE5, + 0x8D, 0x84, 0x43, 0xE5, 0x8D, 0x85, 0x43, 0xE5, + 0x8D, 0x89, 0x43, 0xE5, 0x8D, 0x91, 0x43, 0xE5, + 0x8D, 0x94, 0x43, 0xE5, 0x8D, 0x9A, 0x43, 0xE5, + 0x8D, 0x9C, 0x43, 0xE5, 0x8D, 0xA9, 0x43, 0xE5, + 0x8D, 0xB0, 0x43, 0xE5, 0x8D, 0xB3, 0x43, 0xE5, + 0x8D, 0xB5, 0x43, 0xE5, 0x8D, 0xBD, 0x43, 0xE5, + 0x8D, 0xBF, 0x43, 0xE5, 0x8E, 0x82, 0x43, 0xE5, + // Bytes 9c0 - 9ff + 0x8E, 0xB6, 0x43, 0xE5, 0x8F, 0x83, 0x43, 0xE5, + 0x8F, 0x88, 0x43, 0xE5, 0x8F, 0x8A, 0x43, 0xE5, + 0x8F, 0x8C, 0x43, 0xE5, 0x8F, 0x9F, 0x43, 0xE5, + 0x8F, 0xA3, 0x43, 0xE5, 0x8F, 0xA5, 0x43, 0xE5, + 0x8F, 0xAB, 0x43, 0xE5, 0x8F, 0xAF, 0x43, 0xE5, + 0x8F, 0xB1, 0x43, 0xE5, 0x8F, 0xB3, 0x43, 0xE5, + 0x90, 0x86, 0x43, 0xE5, 0x90, 0x88, 0x43, 0xE5, + 0x90, 0x8D, 0x43, 0xE5, 0x90, 0x8F, 0x43, 0xE5, + // Bytes a00 - a3f + 0x90, 0x9D, 0x43, 0xE5, 0x90, 0xB8, 0x43, 0xE5, + 0x90, 0xB9, 0x43, 0xE5, 0x91, 0x82, 0x43, 0xE5, + 0x91, 0x88, 0x43, 0xE5, 0x91, 0xA8, 0x43, 0xE5, + 0x92, 0x9E, 0x43, 0xE5, 0x92, 0xA2, 0x43, 0xE5, + 0x92, 0xBD, 0x43, 0xE5, 0x93, 0xB6, 0x43, 0xE5, + 0x94, 0x90, 0x43, 0xE5, 0x95, 0x8F, 0x43, 0xE5, + 0x95, 0x93, 0x43, 0xE5, 0x95, 0x95, 0x43, 0xE5, + 0x95, 0xA3, 0x43, 0xE5, 0x96, 0x84, 0x43, 0xE5, + // Bytes a40 - a7f + 0x96, 0x87, 0x43, 0xE5, 0x96, 0x99, 0x43, 0xE5, + 0x96, 0x9D, 0x43, 0xE5, 0x96, 0xAB, 0x43, 0xE5, + 0x96, 0xB3, 0x43, 0xE5, 0x96, 0xB6, 0x43, 0xE5, + 0x97, 0x80, 0x43, 0xE5, 0x97, 0x82, 0x43, 0xE5, + 0x97, 0xA2, 0x43, 0xE5, 0x98, 0x86, 0x43, 0xE5, + 0x99, 0x91, 0x43, 0xE5, 0x99, 0xA8, 0x43, 0xE5, + 0x99, 0xB4, 0x43, 0xE5, 0x9B, 0x97, 0x43, 0xE5, + 0x9B, 0x9B, 0x43, 0xE5, 0x9B, 0xB9, 0x43, 0xE5, + // Bytes a80 - abf + 0x9C, 0x96, 0x43, 0xE5, 0x9C, 0x97, 0x43, 0xE5, + 0x9C, 0x9F, 0x43, 0xE5, 0x9C, 0xB0, 0x43, 0xE5, + 0x9E, 0x8B, 0x43, 0xE5, 0x9F, 0x8E, 0x43, 0xE5, + 0x9F, 0xB4, 0x43, 0xE5, 0xA0, 0x8D, 0x43, 0xE5, + 0xA0, 0xB1, 0x43, 0xE5, 0xA0, 0xB2, 0x43, 0xE5, + 0xA1, 0x80, 0x43, 0xE5, 0xA1, 0x9A, 0x43, 0xE5, + 0xA1, 0x9E, 0x43, 0xE5, 0xA2, 0xA8, 0x43, 0xE5, + 0xA2, 0xAC, 0x43, 0xE5, 0xA2, 0xB3, 0x43, 0xE5, + // Bytes ac0 - aff + 0xA3, 0x98, 0x43, 0xE5, 0xA3, 0x9F, 0x43, 0xE5, + 0xA3, 0xAB, 0x43, 0xE5, 0xA3, 0xAE, 0x43, 0xE5, + 0xA3, 0xB0, 0x43, 0xE5, 0xA3, 0xB2, 0x43, 0xE5, + 0xA3, 0xB7, 0x43, 0xE5, 0xA4, 0x82, 0x43, 0xE5, + 0xA4, 0x86, 0x43, 0xE5, 0xA4, 0x8A, 0x43, 0xE5, + 0xA4, 0x95, 0x43, 0xE5, 0xA4, 0x9A, 0x43, 0xE5, + 0xA4, 0x9C, 0x43, 0xE5, 0xA4, 0xA2, 0x43, 0xE5, + 0xA4, 0xA7, 0x43, 0xE5, 0xA4, 0xA9, 0x43, 0xE5, + // Bytes b00 - b3f + 0xA5, 0x84, 0x43, 0xE5, 0xA5, 0x88, 0x43, 0xE5, + 0xA5, 0x91, 0x43, 0xE5, 0xA5, 0x94, 0x43, 0xE5, + 0xA5, 0xA2, 0x43, 0xE5, 0xA5, 0xB3, 0x43, 0xE5, + 0xA7, 0x98, 0x43, 0xE5, 0xA7, 0xAC, 0x43, 0xE5, + 0xA8, 0x9B, 0x43, 0xE5, 0xA8, 0xA7, 0x43, 0xE5, + 0xA9, 0xA2, 0x43, 0xE5, 0xA9, 0xA6, 0x43, 0xE5, + 0xAA, 0xB5, 0x43, 0xE5, 0xAC, 0x88, 0x43, 0xE5, + 0xAC, 0xA8, 0x43, 0xE5, 0xAC, 0xBE, 0x43, 0xE5, + // Bytes b40 - b7f + 0xAD, 0x90, 0x43, 0xE5, 0xAD, 0x97, 0x43, 0xE5, + 0xAD, 0xA6, 0x43, 0xE5, 0xAE, 0x80, 0x43, 0xE5, + 0xAE, 0x85, 0x43, 0xE5, 0xAE, 0x97, 0x43, 0xE5, + 0xAF, 0x83, 0x43, 0xE5, 0xAF, 0x98, 0x43, 0xE5, + 0xAF, 0xA7, 0x43, 0xE5, 0xAF, 0xAE, 0x43, 0xE5, + 0xAF, 0xB3, 0x43, 0xE5, 0xAF, 0xB8, 0x43, 0xE5, + 0xAF, 0xBF, 0x43, 0xE5, 0xB0, 0x86, 0x43, 0xE5, + 0xB0, 0x8F, 0x43, 0xE5, 0xB0, 0xA2, 0x43, 0xE5, + // Bytes b80 - bbf + 0xB0, 0xB8, 0x43, 0xE5, 0xB0, 0xBF, 0x43, 0xE5, + 0xB1, 0xA0, 0x43, 0xE5, 0xB1, 0xA2, 0x43, 0xE5, + 0xB1, 0xA4, 0x43, 0xE5, 0xB1, 0xA5, 0x43, 0xE5, + 0xB1, 0xAE, 0x43, 0xE5, 0xB1, 0xB1, 0x43, 0xE5, + 0xB2, 0x8D, 0x43, 0xE5, 0xB3, 0x80, 0x43, 0xE5, + 0xB4, 0x99, 0x43, 0xE5, 0xB5, 0x83, 0x43, 0xE5, + 0xB5, 0x90, 0x43, 0xE5, 0xB5, 0xAB, 0x43, 0xE5, + 0xB5, 0xAE, 0x43, 0xE5, 0xB5, 0xBC, 0x43, 0xE5, + // Bytes bc0 - bff + 0xB6, 0xB2, 0x43, 0xE5, 0xB6, 0xBA, 0x43, 0xE5, + 0xB7, 0x9B, 0x43, 0xE5, 0xB7, 0xA1, 0x43, 0xE5, + 0xB7, 0xA2, 0x43, 0xE5, 0xB7, 0xA5, 0x43, 0xE5, + 0xB7, 0xA6, 0x43, 0xE5, 0xB7, 0xB1, 0x43, 0xE5, + 0xB7, 0xBD, 0x43, 0xE5, 0xB7, 0xBE, 0x43, 0xE5, + 0xB8, 0xA8, 0x43, 0xE5, 0xB8, 0xBD, 0x43, 0xE5, + 0xB9, 0xA9, 0x43, 0xE5, 0xB9, 0xB2, 0x43, 0xE5, + 0xB9, 0xB4, 0x43, 0xE5, 0xB9, 0xBA, 0x43, 0xE5, + // Bytes c00 - c3f + 0xB9, 0xBC, 0x43, 0xE5, 0xB9, 0xBF, 0x43, 0xE5, + 0xBA, 0xA6, 0x43, 0xE5, 0xBA, 0xB0, 0x43, 0xE5, + 0xBA, 0xB3, 0x43, 0xE5, 0xBA, 0xB6, 0x43, 0xE5, + 0xBB, 0x89, 0x43, 0xE5, 0xBB, 0x8A, 0x43, 0xE5, + 0xBB, 0x92, 0x43, 0xE5, 0xBB, 0x93, 0x43, 0xE5, + 0xBB, 0x99, 0x43, 0xE5, 0xBB, 0xAC, 0x43, 0xE5, + 0xBB, 0xB4, 0x43, 0xE5, 0xBB, 0xBE, 0x43, 0xE5, + 0xBC, 0x84, 0x43, 0xE5, 0xBC, 0x8B, 0x43, 0xE5, + // Bytes c40 - c7f + 0xBC, 0x93, 0x43, 0xE5, 0xBC, 0xA2, 0x43, 0xE5, + 0xBD, 0x90, 0x43, 0xE5, 0xBD, 0x93, 0x43, 0xE5, + 0xBD, 0xA1, 0x43, 0xE5, 0xBD, 0xA2, 0x43, 0xE5, + 0xBD, 0xA9, 0x43, 0xE5, 0xBD, 0xAB, 0x43, 0xE5, + 0xBD, 0xB3, 0x43, 0xE5, 0xBE, 0x8B, 0x43, 0xE5, + 0xBE, 0x8C, 0x43, 0xE5, 0xBE, 0x97, 0x43, 0xE5, + 0xBE, 0x9A, 0x43, 0xE5, 0xBE, 0xA9, 0x43, 0xE5, + 0xBE, 0xAD, 0x43, 0xE5, 0xBF, 0x83, 0x43, 0xE5, + // Bytes c80 - cbf + 0xBF, 0x8D, 0x43, 0xE5, 0xBF, 0x97, 0x43, 0xE5, + 0xBF, 0xB5, 0x43, 0xE5, 0xBF, 0xB9, 0x43, 0xE6, + 0x80, 0x92, 0x43, 0xE6, 0x80, 0x9C, 0x43, 0xE6, + 0x81, 0xB5, 0x43, 0xE6, 0x82, 0x81, 0x43, 0xE6, + 0x82, 0x94, 0x43, 0xE6, 0x83, 0x87, 0x43, 0xE6, + 0x83, 0x98, 0x43, 0xE6, 0x83, 0xA1, 0x43, 0xE6, + 0x84, 0x88, 0x43, 0xE6, 0x85, 0x84, 0x43, 0xE6, + 0x85, 0x88, 0x43, 0xE6, 0x85, 0x8C, 0x43, 0xE6, + // Bytes cc0 - cff + 0x85, 0x8E, 0x43, 0xE6, 0x85, 0xA0, 0x43, 0xE6, + 0x85, 0xA8, 0x43, 0xE6, 0x85, 0xBA, 0x43, 0xE6, + 0x86, 0x8E, 0x43, 0xE6, 0x86, 0x90, 0x43, 0xE6, + 0x86, 0xA4, 0x43, 0xE6, 0x86, 0xAF, 0x43, 0xE6, + 0x86, 0xB2, 0x43, 0xE6, 0x87, 0x9E, 0x43, 0xE6, + 0x87, 0xB2, 0x43, 0xE6, 0x87, 0xB6, 0x43, 0xE6, + 0x88, 0x80, 0x43, 0xE6, 0x88, 0x88, 0x43, 0xE6, + 0x88, 0x90, 0x43, 0xE6, 0x88, 0x9B, 0x43, 0xE6, + // Bytes d00 - d3f + 0x88, 0xAE, 0x43, 0xE6, 0x88, 0xB4, 0x43, 0xE6, + 0x88, 0xB6, 0x43, 0xE6, 0x89, 0x8B, 0x43, 0xE6, + 0x89, 0x93, 0x43, 0xE6, 0x89, 0x9D, 0x43, 0xE6, + 0x8A, 0x95, 0x43, 0xE6, 0x8A, 0xB1, 0x43, 0xE6, + 0x8B, 0x89, 0x43, 0xE6, 0x8B, 0x8F, 0x43, 0xE6, + 0x8B, 0x93, 0x43, 0xE6, 0x8B, 0x94, 0x43, 0xE6, + 0x8B, 0xBC, 0x43, 0xE6, 0x8B, 0xBE, 0x43, 0xE6, + 0x8C, 0x87, 0x43, 0xE6, 0x8C, 0xBD, 0x43, 0xE6, + // Bytes d40 - d7f + 0x8D, 0x90, 0x43, 0xE6, 0x8D, 0x95, 0x43, 0xE6, + 0x8D, 0xA8, 0x43, 0xE6, 0x8D, 0xBB, 0x43, 0xE6, + 0x8E, 0x83, 0x43, 0xE6, 0x8E, 0xA0, 0x43, 0xE6, + 0x8E, 0xA9, 0x43, 0xE6, 0x8F, 0x84, 0x43, 0xE6, + 0x8F, 0x85, 0x43, 0xE6, 0x8F, 0xA4, 0x43, 0xE6, + 0x90, 0x9C, 0x43, 0xE6, 0x90, 0xA2, 0x43, 0xE6, + 0x91, 0x92, 0x43, 0xE6, 0x91, 0xA9, 0x43, 0xE6, + 0x91, 0xB7, 0x43, 0xE6, 0x91, 0xBE, 0x43, 0xE6, + // Bytes d80 - dbf + 0x92, 0x9A, 0x43, 0xE6, 0x92, 0x9D, 0x43, 0xE6, + 0x93, 0x84, 0x43, 0xE6, 0x94, 0xAF, 0x43, 0xE6, + 0x94, 0xB4, 0x43, 0xE6, 0x95, 0x8F, 0x43, 0xE6, + 0x95, 0x96, 0x43, 0xE6, 0x95, 0xAC, 0x43, 0xE6, + 0x95, 0xB8, 0x43, 0xE6, 0x96, 0x87, 0x43, 0xE6, + 0x96, 0x97, 0x43, 0xE6, 0x96, 0x99, 0x43, 0xE6, + 0x96, 0xA4, 0x43, 0xE6, 0x96, 0xB0, 0x43, 0xE6, + 0x96, 0xB9, 0x43, 0xE6, 0x97, 0x85, 0x43, 0xE6, + // Bytes dc0 - dff + 0x97, 0xA0, 0x43, 0xE6, 0x97, 0xA2, 0x43, 0xE6, + 0x97, 0xA3, 0x43, 0xE6, 0x97, 0xA5, 0x43, 0xE6, + 0x98, 0x93, 0x43, 0xE6, 0x98, 0xA0, 0x43, 0xE6, + 0x99, 0x89, 0x43, 0xE6, 0x99, 0xB4, 0x43, 0xE6, + 0x9A, 0x88, 0x43, 0xE6, 0x9A, 0x91, 0x43, 0xE6, + 0x9A, 0x9C, 0x43, 0xE6, 0x9A, 0xB4, 0x43, 0xE6, + 0x9B, 0x86, 0x43, 0xE6, 0x9B, 0xB0, 0x43, 0xE6, + 0x9B, 0xB4, 0x43, 0xE6, 0x9B, 0xB8, 0x43, 0xE6, + // Bytes e00 - e3f + 0x9C, 0x80, 0x43, 0xE6, 0x9C, 0x88, 0x43, 0xE6, + 0x9C, 0x89, 0x43, 0xE6, 0x9C, 0x97, 0x43, 0xE6, + 0x9C, 0x9B, 0x43, 0xE6, 0x9C, 0xA1, 0x43, 0xE6, + 0x9C, 0xA8, 0x43, 0xE6, 0x9D, 0x8E, 0x43, 0xE6, + 0x9D, 0x93, 0x43, 0xE6, 0x9D, 0x96, 0x43, 0xE6, + 0x9D, 0x9E, 0x43, 0xE6, 0x9D, 0xBB, 0x43, 0xE6, + 0x9E, 0x85, 0x43, 0xE6, 0x9E, 0x97, 0x43, 0xE6, + 0x9F, 0xB3, 0x43, 0xE6, 0x9F, 0xBA, 0x43, 0xE6, + // Bytes e40 - e7f + 0xA0, 0x97, 0x43, 0xE6, 0xA0, 0x9F, 0x43, 0xE6, + 0xA0, 0xAA, 0x43, 0xE6, 0xA1, 0x92, 0x43, 0xE6, + 0xA2, 0x81, 0x43, 0xE6, 0xA2, 0x85, 0x43, 0xE6, + 0xA2, 0x8E, 0x43, 0xE6, 0xA2, 0xA8, 0x43, 0xE6, + 0xA4, 0x94, 0x43, 0xE6, 0xA5, 0x82, 0x43, 0xE6, + 0xA6, 0xA3, 0x43, 0xE6, 0xA7, 0xAA, 0x43, 0xE6, + 0xA8, 0x82, 0x43, 0xE6, 0xA8, 0x93, 0x43, 0xE6, + 0xAA, 0xA8, 0x43, 0xE6, 0xAB, 0x93, 0x43, 0xE6, + // Bytes e80 - ebf + 0xAB, 0x9B, 0x43, 0xE6, 0xAC, 0x84, 0x43, 0xE6, + 0xAC, 0xA0, 0x43, 0xE6, 0xAC, 0xA1, 0x43, 0xE6, + 0xAD, 0x94, 0x43, 0xE6, 0xAD, 0xA2, 0x43, 0xE6, + 0xAD, 0xA3, 0x43, 0xE6, 0xAD, 0xB2, 0x43, 0xE6, + 0xAD, 0xB7, 0x43, 0xE6, 0xAD, 0xB9, 0x43, 0xE6, + 0xAE, 0x9F, 0x43, 0xE6, 0xAE, 0xAE, 0x43, 0xE6, + 0xAE, 0xB3, 0x43, 0xE6, 0xAE, 0xBA, 0x43, 0xE6, + 0xAE, 0xBB, 0x43, 0xE6, 0xAF, 0x8B, 0x43, 0xE6, + // Bytes ec0 - eff + 0xAF, 0x8D, 0x43, 0xE6, 0xAF, 0x94, 0x43, 0xE6, + 0xAF, 0x9B, 0x43, 0xE6, 0xB0, 0x8F, 0x43, 0xE6, + 0xB0, 0x94, 0x43, 0xE6, 0xB0, 0xB4, 0x43, 0xE6, + 0xB1, 0x8E, 0x43, 0xE6, 0xB1, 0xA7, 0x43, 0xE6, + 0xB2, 0x88, 0x43, 0xE6, 0xB2, 0xBF, 0x43, 0xE6, + 0xB3, 0x8C, 0x43, 0xE6, 0xB3, 0x8D, 0x43, 0xE6, + 0xB3, 0xA5, 0x43, 0xE6, 0xB3, 0xA8, 0x43, 0xE6, + 0xB4, 0x96, 0x43, 0xE6, 0xB4, 0x9B, 0x43, 0xE6, + // Bytes f00 - f3f + 0xB4, 0x9E, 0x43, 0xE6, 0xB4, 0xB4, 0x43, 0xE6, + 0xB4, 0xBE, 0x43, 0xE6, 0xB5, 0x81, 0x43, 0xE6, + 0xB5, 0xA9, 0x43, 0xE6, 0xB5, 0xAA, 0x43, 0xE6, + 0xB5, 0xB7, 0x43, 0xE6, 0xB5, 0xB8, 0x43, 0xE6, + 0xB6, 0x85, 0x43, 0xE6, 0xB7, 0x8B, 0x43, 0xE6, + 0xB7, 0x9A, 0x43, 0xE6, 0xB7, 0xAA, 0x43, 0xE6, + 0xB7, 0xB9, 0x43, 0xE6, 0xB8, 0x9A, 0x43, 0xE6, + 0xB8, 0xAF, 0x43, 0xE6, 0xB9, 0xAE, 0x43, 0xE6, + // Bytes f40 - f7f + 0xBA, 0x80, 0x43, 0xE6, 0xBA, 0x9C, 0x43, 0xE6, + 0xBA, 0xBA, 0x43, 0xE6, 0xBB, 0x87, 0x43, 0xE6, + 0xBB, 0x8B, 0x43, 0xE6, 0xBB, 0x91, 0x43, 0xE6, + 0xBB, 0x9B, 0x43, 0xE6, 0xBC, 0x8F, 0x43, 0xE6, + 0xBC, 0x94, 0x43, 0xE6, 0xBC, 0xA2, 0x43, 0xE6, + 0xBC, 0xA3, 0x43, 0xE6, 0xBD, 0xAE, 0x43, 0xE6, + 0xBF, 0x86, 0x43, 0xE6, 0xBF, 0xAB, 0x43, 0xE6, + 0xBF, 0xBE, 0x43, 0xE7, 0x80, 0x9B, 0x43, 0xE7, + // Bytes f80 - fbf + 0x80, 0x9E, 0x43, 0xE7, 0x80, 0xB9, 0x43, 0xE7, + 0x81, 0x8A, 0x43, 0xE7, 0x81, 0xAB, 0x43, 0xE7, + 0x81, 0xB0, 0x43, 0xE7, 0x81, 0xB7, 0x43, 0xE7, + 0x81, 0xBD, 0x43, 0xE7, 0x82, 0x99, 0x43, 0xE7, + 0x82, 0xAD, 0x43, 0xE7, 0x83, 0x88, 0x43, 0xE7, + 0x83, 0x99, 0x43, 0xE7, 0x84, 0xA1, 0x43, 0xE7, + 0x85, 0x85, 0x43, 0xE7, 0x85, 0x89, 0x43, 0xE7, + 0x85, 0xAE, 0x43, 0xE7, 0x86, 0x9C, 0x43, 0xE7, + // Bytes fc0 - fff + 0x87, 0x8E, 0x43, 0xE7, 0x87, 0x90, 0x43, 0xE7, + 0x88, 0x90, 0x43, 0xE7, 0x88, 0x9B, 0x43, 0xE7, + 0x88, 0xA8, 0x43, 0xE7, 0x88, 0xAA, 0x43, 0xE7, + 0x88, 0xAB, 0x43, 0xE7, 0x88, 0xB5, 0x43, 0xE7, + 0x88, 0xB6, 0x43, 0xE7, 0x88, 0xBB, 0x43, 0xE7, + 0x88, 0xBF, 0x43, 0xE7, 0x89, 0x87, 0x43, 0xE7, + 0x89, 0x90, 0x43, 0xE7, 0x89, 0x99, 0x43, 0xE7, + 0x89, 0x9B, 0x43, 0xE7, 0x89, 0xA2, 0x43, 0xE7, + // Bytes 1000 - 103f + 0x89, 0xB9, 0x43, 0xE7, 0x8A, 0x80, 0x43, 0xE7, + 0x8A, 0x95, 0x43, 0xE7, 0x8A, 0xAC, 0x43, 0xE7, + 0x8A, 0xAF, 0x43, 0xE7, 0x8B, 0x80, 0x43, 0xE7, + 0x8B, 0xBC, 0x43, 0xE7, 0x8C, 0xAA, 0x43, 0xE7, + 0x8D, 0xB5, 0x43, 0xE7, 0x8D, 0xBA, 0x43, 0xE7, + 0x8E, 0x84, 0x43, 0xE7, 0x8E, 0x87, 0x43, 0xE7, + 0x8E, 0x89, 0x43, 0xE7, 0x8E, 0x8B, 0x43, 0xE7, + 0x8E, 0xA5, 0x43, 0xE7, 0x8E, 0xB2, 0x43, 0xE7, + // Bytes 1040 - 107f + 0x8F, 0x9E, 0x43, 0xE7, 0x90, 0x86, 0x43, 0xE7, + 0x90, 0x89, 0x43, 0xE7, 0x90, 0xA2, 0x43, 0xE7, + 0x91, 0x87, 0x43, 0xE7, 0x91, 0x9C, 0x43, 0xE7, + 0x91, 0xA9, 0x43, 0xE7, 0x91, 0xB1, 0x43, 0xE7, + 0x92, 0x85, 0x43, 0xE7, 0x92, 0x89, 0x43, 0xE7, + 0x92, 0x98, 0x43, 0xE7, 0x93, 0x8A, 0x43, 0xE7, + 0x93, 0x9C, 0x43, 0xE7, 0x93, 0xA6, 0x43, 0xE7, + 0x94, 0x86, 0x43, 0xE7, 0x94, 0x98, 0x43, 0xE7, + // Bytes 1080 - 10bf + 0x94, 0x9F, 0x43, 0xE7, 0x94, 0xA4, 0x43, 0xE7, + 0x94, 0xA8, 0x43, 0xE7, 0x94, 0xB0, 0x43, 0xE7, + 0x94, 0xB2, 0x43, 0xE7, 0x94, 0xB3, 0x43, 0xE7, + 0x94, 0xB7, 0x43, 0xE7, 0x94, 0xBB, 0x43, 0xE7, + 0x94, 0xBE, 0x43, 0xE7, 0x95, 0x99, 0x43, 0xE7, + 0x95, 0xA5, 0x43, 0xE7, 0x95, 0xB0, 0x43, 0xE7, + 0x96, 0x8B, 0x43, 0xE7, 0x96, 0x92, 0x43, 0xE7, + 0x97, 0xA2, 0x43, 0xE7, 0x98, 0x90, 0x43, 0xE7, + // Bytes 10c0 - 10ff + 0x98, 0x9D, 0x43, 0xE7, 0x98, 0x9F, 0x43, 0xE7, + 0x99, 0x82, 0x43, 0xE7, 0x99, 0xA9, 0x43, 0xE7, + 0x99, 0xB6, 0x43, 0xE7, 0x99, 0xBD, 0x43, 0xE7, + 0x9A, 0xAE, 0x43, 0xE7, 0x9A, 0xBF, 0x43, 0xE7, + 0x9B, 0x8A, 0x43, 0xE7, 0x9B, 0x9B, 0x43, 0xE7, + 0x9B, 0xA3, 0x43, 0xE7, 0x9B, 0xA7, 0x43, 0xE7, + 0x9B, 0xAE, 0x43, 0xE7, 0x9B, 0xB4, 0x43, 0xE7, + 0x9C, 0x81, 0x43, 0xE7, 0x9C, 0x9E, 0x43, 0xE7, + // Bytes 1100 - 113f + 0x9C, 0x9F, 0x43, 0xE7, 0x9D, 0x80, 0x43, 0xE7, + 0x9D, 0x8A, 0x43, 0xE7, 0x9E, 0x8B, 0x43, 0xE7, + 0x9E, 0xA7, 0x43, 0xE7, 0x9F, 0x9B, 0x43, 0xE7, + 0x9F, 0xA2, 0x43, 0xE7, 0x9F, 0xB3, 0x43, 0xE7, + 0xA1, 0x8E, 0x43, 0xE7, 0xA1, 0xAB, 0x43, 0xE7, + 0xA2, 0x8C, 0x43, 0xE7, 0xA2, 0x91, 0x43, 0xE7, + 0xA3, 0x8A, 0x43, 0xE7, 0xA3, 0x8C, 0x43, 0xE7, + 0xA3, 0xBB, 0x43, 0xE7, 0xA4, 0xAA, 0x43, 0xE7, + // Bytes 1140 - 117f + 0xA4, 0xBA, 0x43, 0xE7, 0xA4, 0xBC, 0x43, 0xE7, + 0xA4, 0xBE, 0x43, 0xE7, 0xA5, 0x88, 0x43, 0xE7, + 0xA5, 0x89, 0x43, 0xE7, 0xA5, 0x90, 0x43, 0xE7, + 0xA5, 0x96, 0x43, 0xE7, 0xA5, 0x9D, 0x43, 0xE7, + 0xA5, 0x9E, 0x43, 0xE7, 0xA5, 0xA5, 0x43, 0xE7, + 0xA5, 0xBF, 0x43, 0xE7, 0xA6, 0x81, 0x43, 0xE7, + 0xA6, 0x8D, 0x43, 0xE7, 0xA6, 0x8E, 0x43, 0xE7, + 0xA6, 0x8F, 0x43, 0xE7, 0xA6, 0xAE, 0x43, 0xE7, + // Bytes 1180 - 11bf + 0xA6, 0xB8, 0x43, 0xE7, 0xA6, 0xBE, 0x43, 0xE7, + 0xA7, 0x8A, 0x43, 0xE7, 0xA7, 0x98, 0x43, 0xE7, + 0xA7, 0xAB, 0x43, 0xE7, 0xA8, 0x9C, 0x43, 0xE7, + 0xA9, 0x80, 0x43, 0xE7, 0xA9, 0x8A, 0x43, 0xE7, + 0xA9, 0x8F, 0x43, 0xE7, 0xA9, 0xB4, 0x43, 0xE7, + 0xA9, 0xBA, 0x43, 0xE7, 0xAA, 0x81, 0x43, 0xE7, + 0xAA, 0xB1, 0x43, 0xE7, 0xAB, 0x8B, 0x43, 0xE7, + 0xAB, 0xAE, 0x43, 0xE7, 0xAB, 0xB9, 0x43, 0xE7, + // Bytes 11c0 - 11ff + 0xAC, 0xA0, 0x43, 0xE7, 0xAE, 0x8F, 0x43, 0xE7, + 0xAF, 0x80, 0x43, 0xE7, 0xAF, 0x86, 0x43, 0xE7, + 0xAF, 0x89, 0x43, 0xE7, 0xB0, 0xBE, 0x43, 0xE7, + 0xB1, 0xA0, 0x43, 0xE7, 0xB1, 0xB3, 0x43, 0xE7, + 0xB1, 0xBB, 0x43, 0xE7, 0xB2, 0x92, 0x43, 0xE7, + 0xB2, 0xBE, 0x43, 0xE7, 0xB3, 0x92, 0x43, 0xE7, + 0xB3, 0x96, 0x43, 0xE7, 0xB3, 0xA3, 0x43, 0xE7, + 0xB3, 0xA7, 0x43, 0xE7, 0xB3, 0xA8, 0x43, 0xE7, + // Bytes 1200 - 123f + 0xB3, 0xB8, 0x43, 0xE7, 0xB4, 0x80, 0x43, 0xE7, + 0xB4, 0x90, 0x43, 0xE7, 0xB4, 0xA2, 0x43, 0xE7, + 0xB4, 0xAF, 0x43, 0xE7, 0xB5, 0x82, 0x43, 0xE7, + 0xB5, 0x9B, 0x43, 0xE7, 0xB5, 0xA3, 0x43, 0xE7, + 0xB6, 0xA0, 0x43, 0xE7, 0xB6, 0xBE, 0x43, 0xE7, + 0xB7, 0x87, 0x43, 0xE7, 0xB7, 0xB4, 0x43, 0xE7, + 0xB8, 0x82, 0x43, 0xE7, 0xB8, 0x89, 0x43, 0xE7, + 0xB8, 0xB7, 0x43, 0xE7, 0xB9, 0x81, 0x43, 0xE7, + // Bytes 1240 - 127f + 0xB9, 0x85, 0x43, 0xE7, 0xBC, 0xB6, 0x43, 0xE7, + 0xBC, 0xBE, 0x43, 0xE7, 0xBD, 0x91, 0x43, 0xE7, + 0xBD, 0xB2, 0x43, 0xE7, 0xBD, 0xB9, 0x43, 0xE7, + 0xBD, 0xBA, 0x43, 0xE7, 0xBE, 0x85, 0x43, 0xE7, + 0xBE, 0x8A, 0x43, 0xE7, 0xBE, 0x95, 0x43, 0xE7, + 0xBE, 0x9A, 0x43, 0xE7, 0xBE, 0xBD, 0x43, 0xE7, + 0xBF, 0xBA, 0x43, 0xE8, 0x80, 0x81, 0x43, 0xE8, + 0x80, 0x85, 0x43, 0xE8, 0x80, 0x8C, 0x43, 0xE8, + // Bytes 1280 - 12bf + 0x80, 0x92, 0x43, 0xE8, 0x80, 0xB3, 0x43, 0xE8, + 0x81, 0x86, 0x43, 0xE8, 0x81, 0xA0, 0x43, 0xE8, + 0x81, 0xAF, 0x43, 0xE8, 0x81, 0xB0, 0x43, 0xE8, + 0x81, 0xBE, 0x43, 0xE8, 0x81, 0xBF, 0x43, 0xE8, + 0x82, 0x89, 0x43, 0xE8, 0x82, 0x8B, 0x43, 0xE8, + 0x82, 0xAD, 0x43, 0xE8, 0x82, 0xB2, 0x43, 0xE8, + 0x84, 0x83, 0x43, 0xE8, 0x84, 0xBE, 0x43, 0xE8, + 0x87, 0x98, 0x43, 0xE8, 0x87, 0xA3, 0x43, 0xE8, + // Bytes 12c0 - 12ff + 0x87, 0xA8, 0x43, 0xE8, 0x87, 0xAA, 0x43, 0xE8, + 0x87, 0xAD, 0x43, 0xE8, 0x87, 0xB3, 0x43, 0xE8, + 0x87, 0xBC, 0x43, 0xE8, 0x88, 0x81, 0x43, 0xE8, + 0x88, 0x84, 0x43, 0xE8, 0x88, 0x8C, 0x43, 0xE8, + 0x88, 0x98, 0x43, 0xE8, 0x88, 0x9B, 0x43, 0xE8, + 0x88, 0x9F, 0x43, 0xE8, 0x89, 0xAE, 0x43, 0xE8, + 0x89, 0xAF, 0x43, 0xE8, 0x89, 0xB2, 0x43, 0xE8, + 0x89, 0xB8, 0x43, 0xE8, 0x89, 0xB9, 0x43, 0xE8, + // Bytes 1300 - 133f + 0x8A, 0x8B, 0x43, 0xE8, 0x8A, 0x91, 0x43, 0xE8, + 0x8A, 0x9D, 0x43, 0xE8, 0x8A, 0xB1, 0x43, 0xE8, + 0x8A, 0xB3, 0x43, 0xE8, 0x8A, 0xBD, 0x43, 0xE8, + 0x8B, 0xA5, 0x43, 0xE8, 0x8B, 0xA6, 0x43, 0xE8, + 0x8C, 0x9D, 0x43, 0xE8, 0x8C, 0xA3, 0x43, 0xE8, + 0x8C, 0xB6, 0x43, 0xE8, 0x8D, 0x92, 0x43, 0xE8, + 0x8D, 0x93, 0x43, 0xE8, 0x8D, 0xA3, 0x43, 0xE8, + 0x8E, 0xAD, 0x43, 0xE8, 0x8E, 0xBD, 0x43, 0xE8, + // Bytes 1340 - 137f + 0x8F, 0x89, 0x43, 0xE8, 0x8F, 0x8A, 0x43, 0xE8, + 0x8F, 0x8C, 0x43, 0xE8, 0x8F, 0x9C, 0x43, 0xE8, + 0x8F, 0xA7, 0x43, 0xE8, 0x8F, 0xAF, 0x43, 0xE8, + 0x8F, 0xB1, 0x43, 0xE8, 0x90, 0xBD, 0x43, 0xE8, + 0x91, 0x89, 0x43, 0xE8, 0x91, 0x97, 0x43, 0xE8, + 0x93, 0xAE, 0x43, 0xE8, 0x93, 0xB1, 0x43, 0xE8, + 0x93, 0xB3, 0x43, 0xE8, 0x93, 0xBC, 0x43, 0xE8, + 0x94, 0x96, 0x43, 0xE8, 0x95, 0xA4, 0x43, 0xE8, + // Bytes 1380 - 13bf + 0x97, 0x8D, 0x43, 0xE8, 0x97, 0xBA, 0x43, 0xE8, + 0x98, 0x86, 0x43, 0xE8, 0x98, 0x92, 0x43, 0xE8, + 0x98, 0xAD, 0x43, 0xE8, 0x98, 0xBF, 0x43, 0xE8, + 0x99, 0x8D, 0x43, 0xE8, 0x99, 0x90, 0x43, 0xE8, + 0x99, 0x9C, 0x43, 0xE8, 0x99, 0xA7, 0x43, 0xE8, + 0x99, 0xA9, 0x43, 0xE8, 0x99, 0xAB, 0x43, 0xE8, + 0x9A, 0x88, 0x43, 0xE8, 0x9A, 0xA9, 0x43, 0xE8, + 0x9B, 0xA2, 0x43, 0xE8, 0x9C, 0x8E, 0x43, 0xE8, + // Bytes 13c0 - 13ff + 0x9C, 0xA8, 0x43, 0xE8, 0x9D, 0xAB, 0x43, 0xE8, + 0x9D, 0xB9, 0x43, 0xE8, 0x9E, 0x86, 0x43, 0xE8, + 0x9E, 0xBA, 0x43, 0xE8, 0x9F, 0xA1, 0x43, 0xE8, + 0xA0, 0x81, 0x43, 0xE8, 0xA0, 0x9F, 0x43, 0xE8, + 0xA1, 0x80, 0x43, 0xE8, 0xA1, 0x8C, 0x43, 0xE8, + 0xA1, 0xA0, 0x43, 0xE8, 0xA1, 0xA3, 0x43, 0xE8, + 0xA3, 0x82, 0x43, 0xE8, 0xA3, 0x8F, 0x43, 0xE8, + 0xA3, 0x97, 0x43, 0xE8, 0xA3, 0x9E, 0x43, 0xE8, + // Bytes 1400 - 143f + 0xA3, 0xA1, 0x43, 0xE8, 0xA3, 0xB8, 0x43, 0xE8, + 0xA3, 0xBA, 0x43, 0xE8, 0xA4, 0x90, 0x43, 0xE8, + 0xA5, 0x81, 0x43, 0xE8, 0xA5, 0xA4, 0x43, 0xE8, + 0xA5, 0xBE, 0x43, 0xE8, 0xA6, 0x86, 0x43, 0xE8, + 0xA6, 0x8B, 0x43, 0xE8, 0xA6, 0x96, 0x43, 0xE8, + 0xA7, 0x92, 0x43, 0xE8, 0xA7, 0xA3, 0x43, 0xE8, + 0xA8, 0x80, 0x43, 0xE8, 0xAA, 0xA0, 0x43, 0xE8, + 0xAA, 0xAA, 0x43, 0xE8, 0xAA, 0xBF, 0x43, 0xE8, + // Bytes 1440 - 147f + 0xAB, 0x8B, 0x43, 0xE8, 0xAB, 0x92, 0x43, 0xE8, + 0xAB, 0x96, 0x43, 0xE8, 0xAB, 0xAD, 0x43, 0xE8, + 0xAB, 0xB8, 0x43, 0xE8, 0xAB, 0xBE, 0x43, 0xE8, + 0xAC, 0x81, 0x43, 0xE8, 0xAC, 0xB9, 0x43, 0xE8, + 0xAD, 0x98, 0x43, 0xE8, 0xAE, 0x80, 0x43, 0xE8, + 0xAE, 0x8A, 0x43, 0xE8, 0xB0, 0xB7, 0x43, 0xE8, + 0xB1, 0x86, 0x43, 0xE8, 0xB1, 0x88, 0x43, 0xE8, + 0xB1, 0x95, 0x43, 0xE8, 0xB1, 0xB8, 0x43, 0xE8, + // Bytes 1480 - 14bf + 0xB2, 0x9D, 0x43, 0xE8, 0xB2, 0xA1, 0x43, 0xE8, + 0xB2, 0xA9, 0x43, 0xE8, 0xB2, 0xAB, 0x43, 0xE8, + 0xB3, 0x81, 0x43, 0xE8, 0xB3, 0x82, 0x43, 0xE8, + 0xB3, 0x87, 0x43, 0xE8, 0xB3, 0x88, 0x43, 0xE8, + 0xB3, 0x93, 0x43, 0xE8, 0xB4, 0x88, 0x43, 0xE8, + 0xB4, 0x9B, 0x43, 0xE8, 0xB5, 0xA4, 0x43, 0xE8, + 0xB5, 0xB0, 0x43, 0xE8, 0xB5, 0xB7, 0x43, 0xE8, + 0xB6, 0xB3, 0x43, 0xE8, 0xB6, 0xBC, 0x43, 0xE8, + // Bytes 14c0 - 14ff + 0xB7, 0x8B, 0x43, 0xE8, 0xB7, 0xAF, 0x43, 0xE8, + 0xB7, 0xB0, 0x43, 0xE8, 0xBA, 0xAB, 0x43, 0xE8, + 0xBB, 0x8A, 0x43, 0xE8, 0xBB, 0x94, 0x43, 0xE8, + 0xBC, 0xA6, 0x43, 0xE8, 0xBC, 0xAA, 0x43, 0xE8, + 0xBC, 0xB8, 0x43, 0xE8, 0xBC, 0xBB, 0x43, 0xE8, + 0xBD, 0xA2, 0x43, 0xE8, 0xBE, 0x9B, 0x43, 0xE8, + 0xBE, 0x9E, 0x43, 0xE8, 0xBE, 0xB0, 0x43, 0xE8, + 0xBE, 0xB5, 0x43, 0xE8, 0xBE, 0xB6, 0x43, 0xE9, + // Bytes 1500 - 153f + 0x80, 0xA3, 0x43, 0xE9, 0x80, 0xB8, 0x43, 0xE9, + 0x81, 0x8A, 0x43, 0xE9, 0x81, 0xA9, 0x43, 0xE9, + 0x81, 0xB2, 0x43, 0xE9, 0x81, 0xBC, 0x43, 0xE9, + 0x82, 0x8F, 0x43, 0xE9, 0x82, 0x91, 0x43, 0xE9, + 0x82, 0x94, 0x43, 0xE9, 0x83, 0x8E, 0x43, 0xE9, + 0x83, 0x9E, 0x43, 0xE9, 0x83, 0xB1, 0x43, 0xE9, + 0x83, 0xBD, 0x43, 0xE9, 0x84, 0x91, 0x43, 0xE9, + 0x84, 0x9B, 0x43, 0xE9, 0x85, 0x89, 0x43, 0xE9, + // Bytes 1540 - 157f + 0x85, 0x8D, 0x43, 0xE9, 0x85, 0xAA, 0x43, 0xE9, + 0x86, 0x99, 0x43, 0xE9, 0x86, 0xB4, 0x43, 0xE9, + 0x87, 0x86, 0x43, 0xE9, 0x87, 0x8C, 0x43, 0xE9, + 0x87, 0x8F, 0x43, 0xE9, 0x87, 0x91, 0x43, 0xE9, + 0x88, 0xB4, 0x43, 0xE9, 0x88, 0xB8, 0x43, 0xE9, + 0x89, 0xB6, 0x43, 0xE9, 0x89, 0xBC, 0x43, 0xE9, + 0x8B, 0x97, 0x43, 0xE9, 0x8B, 0x98, 0x43, 0xE9, + 0x8C, 0x84, 0x43, 0xE9, 0x8D, 0x8A, 0x43, 0xE9, + // Bytes 1580 - 15bf + 0x8F, 0xB9, 0x43, 0xE9, 0x90, 0x95, 0x43, 0xE9, + 0x95, 0xB7, 0x43, 0xE9, 0x96, 0x80, 0x43, 0xE9, + 0x96, 0x8B, 0x43, 0xE9, 0x96, 0xAD, 0x43, 0xE9, + 0x96, 0xB7, 0x43, 0xE9, 0x98, 0x9C, 0x43, 0xE9, + 0x98, 0xAE, 0x43, 0xE9, 0x99, 0x8B, 0x43, 0xE9, + 0x99, 0x8D, 0x43, 0xE9, 0x99, 0xB5, 0x43, 0xE9, + 0x99, 0xB8, 0x43, 0xE9, 0x99, 0xBC, 0x43, 0xE9, + 0x9A, 0x86, 0x43, 0xE9, 0x9A, 0xA3, 0x43, 0xE9, + // Bytes 15c0 - 15ff + 0x9A, 0xB6, 0x43, 0xE9, 0x9A, 0xB7, 0x43, 0xE9, + 0x9A, 0xB8, 0x43, 0xE9, 0x9A, 0xB9, 0x43, 0xE9, + 0x9B, 0x83, 0x43, 0xE9, 0x9B, 0xA2, 0x43, 0xE9, + 0x9B, 0xA3, 0x43, 0xE9, 0x9B, 0xA8, 0x43, 0xE9, + 0x9B, 0xB6, 0x43, 0xE9, 0x9B, 0xB7, 0x43, 0xE9, + 0x9C, 0xA3, 0x43, 0xE9, 0x9C, 0xB2, 0x43, 0xE9, + 0x9D, 0x88, 0x43, 0xE9, 0x9D, 0x91, 0x43, 0xE9, + 0x9D, 0x96, 0x43, 0xE9, 0x9D, 0x9E, 0x43, 0xE9, + // Bytes 1600 - 163f + 0x9D, 0xA2, 0x43, 0xE9, 0x9D, 0xA9, 0x43, 0xE9, + 0x9F, 0x8B, 0x43, 0xE9, 0x9F, 0x9B, 0x43, 0xE9, + 0x9F, 0xA0, 0x43, 0xE9, 0x9F, 0xAD, 0x43, 0xE9, + 0x9F, 0xB3, 0x43, 0xE9, 0x9F, 0xBF, 0x43, 0xE9, + 0xA0, 0x81, 0x43, 0xE9, 0xA0, 0x85, 0x43, 0xE9, + 0xA0, 0x8B, 0x43, 0xE9, 0xA0, 0x98, 0x43, 0xE9, + 0xA0, 0xA9, 0x43, 0xE9, 0xA0, 0xBB, 0x43, 0xE9, + 0xA1, 0x9E, 0x43, 0xE9, 0xA2, 0xA8, 0x43, 0xE9, + // Bytes 1640 - 167f + 0xA3, 0x9B, 0x43, 0xE9, 0xA3, 0x9F, 0x43, 0xE9, + 0xA3, 0xA2, 0x43, 0xE9, 0xA3, 0xAF, 0x43, 0xE9, + 0xA3, 0xBC, 0x43, 0xE9, 0xA4, 0xA8, 0x43, 0xE9, + 0xA4, 0xA9, 0x43, 0xE9, 0xA6, 0x96, 0x43, 0xE9, + 0xA6, 0x99, 0x43, 0xE9, 0xA6, 0xA7, 0x43, 0xE9, + 0xA6, 0xAC, 0x43, 0xE9, 0xA7, 0x82, 0x43, 0xE9, + 0xA7, 0xB1, 0x43, 0xE9, 0xA7, 0xBE, 0x43, 0xE9, + 0xA9, 0xAA, 0x43, 0xE9, 0xAA, 0xA8, 0x43, 0xE9, + // Bytes 1680 - 16bf + 0xAB, 0x98, 0x43, 0xE9, 0xAB, 0x9F, 0x43, 0xE9, + 0xAC, 0x92, 0x43, 0xE9, 0xAC, 0xA5, 0x43, 0xE9, + 0xAC, 0xAF, 0x43, 0xE9, 0xAC, 0xB2, 0x43, 0xE9, + 0xAC, 0xBC, 0x43, 0xE9, 0xAD, 0x9A, 0x43, 0xE9, + 0xAD, 0xAF, 0x43, 0xE9, 0xB1, 0x80, 0x43, 0xE9, + 0xB1, 0x97, 0x43, 0xE9, 0xB3, 0xA5, 0x43, 0xE9, + 0xB3, 0xBD, 0x43, 0xE9, 0xB5, 0xA7, 0x43, 0xE9, + 0xB6, 0xB4, 0x43, 0xE9, 0xB7, 0xBA, 0x43, 0xE9, + // Bytes 16c0 - 16ff + 0xB8, 0x9E, 0x43, 0xE9, 0xB9, 0xB5, 0x43, 0xE9, + 0xB9, 0xBF, 0x43, 0xE9, 0xBA, 0x97, 0x43, 0xE9, + 0xBA, 0x9F, 0x43, 0xE9, 0xBA, 0xA5, 0x43, 0xE9, + 0xBA, 0xBB, 0x43, 0xE9, 0xBB, 0x83, 0x43, 0xE9, + 0xBB, 0x8D, 0x43, 0xE9, 0xBB, 0x8E, 0x43, 0xE9, + 0xBB, 0x91, 0x43, 0xE9, 0xBB, 0xB9, 0x43, 0xE9, + 0xBB, 0xBD, 0x43, 0xE9, 0xBB, 0xBE, 0x43, 0xE9, + 0xBC, 0x85, 0x43, 0xE9, 0xBC, 0x8E, 0x43, 0xE9, + // Bytes 1700 - 173f + 0xBC, 0x8F, 0x43, 0xE9, 0xBC, 0x93, 0x43, 0xE9, + 0xBC, 0x96, 0x43, 0xE9, 0xBC, 0xA0, 0x43, 0xE9, + 0xBC, 0xBB, 0x43, 0xE9, 0xBD, 0x83, 0x43, 0xE9, + 0xBD, 0x8A, 0x43, 0xE9, 0xBD, 0x92, 0x43, 0xE9, + 0xBE, 0x8D, 0x43, 0xE9, 0xBE, 0x8E, 0x43, 0xE9, + 0xBE, 0x9C, 0x43, 0xE9, 0xBE, 0x9F, 0x43, 0xE9, + 0xBE, 0xA0, 0x43, 0xEA, 0x99, 0x91, 0x43, 0xEA, + 0x9A, 0x89, 0x43, 0xEA, 0x9C, 0xA7, 0x43, 0xEA, + // Bytes 1740 - 177f + 0x9D, 0xAF, 0x43, 0xEA, 0x9E, 0x8E, 0x43, 0xEA, + 0xAC, 0xB7, 0x43, 0xEA, 0xAD, 0x92, 0x43, 0xEA, + 0xAD, 0xA6, 0x43, 0xEA, 0xAD, 0xA7, 0x44, 0xF0, + 0x9D, 0xBC, 0x84, 0x44, 0xF0, 0x9D, 0xBC, 0x85, + 0x44, 0xF0, 0x9D, 0xBC, 0x86, 0x44, 0xF0, 0x9D, + 0xBC, 0x88, 0x44, 0xF0, 0x9D, 0xBC, 0x8A, 0x44, + 0xF0, 0x9D, 0xBC, 0x9E, 0x44, 0xF0, 0xA0, 0x84, + 0xA2, 0x44, 0xF0, 0xA0, 0x94, 0x9C, 0x44, 0xF0, + // Bytes 1780 - 17bf + 0xA0, 0x94, 0xA5, 0x44, 0xF0, 0xA0, 0x95, 0x8B, + 0x44, 0xF0, 0xA0, 0x98, 0xBA, 0x44, 0xF0, 0xA0, + 0xA0, 0x84, 0x44, 0xF0, 0xA0, 0xA3, 0x9E, 0x44, + 0xF0, 0xA0, 0xA8, 0xAC, 0x44, 0xF0, 0xA0, 0xAD, + 0xA3, 0x44, 0xF0, 0xA1, 0x93, 0xA4, 0x44, 0xF0, + 0xA1, 0x9A, 0xA8, 0x44, 0xF0, 0xA1, 0x9B, 0xAA, + 0x44, 0xF0, 0xA1, 0xA7, 0x88, 0x44, 0xF0, 0xA1, + 0xAC, 0x98, 0x44, 0xF0, 0xA1, 0xB4, 0x8B, 0x44, + // Bytes 17c0 - 17ff + 0xF0, 0xA1, 0xB7, 0xA4, 0x44, 0xF0, 0xA1, 0xB7, + 0xA6, 0x44, 0xF0, 0xA2, 0x86, 0x83, 0x44, 0xF0, + 0xA2, 0x86, 0x9F, 0x44, 0xF0, 0xA2, 0x8C, 0xB1, + 0x44, 0xF0, 0xA2, 0x9B, 0x94, 0x44, 0xF0, 0xA2, + 0xA1, 0x84, 0x44, 0xF0, 0xA2, 0xA1, 0x8A, 0x44, + 0xF0, 0xA2, 0xAC, 0x8C, 0x44, 0xF0, 0xA2, 0xAF, + 0xB1, 0x44, 0xF0, 0xA3, 0x80, 0x8A, 0x44, 0xF0, + 0xA3, 0x8A, 0xB8, 0x44, 0xF0, 0xA3, 0x8D, 0x9F, + // Bytes 1800 - 183f + 0x44, 0xF0, 0xA3, 0x8E, 0x93, 0x44, 0xF0, 0xA3, + 0x8E, 0x9C, 0x44, 0xF0, 0xA3, 0x8F, 0x83, 0x44, + 0xF0, 0xA3, 0x8F, 0x95, 0x44, 0xF0, 0xA3, 0x91, + 0xAD, 0x44, 0xF0, 0xA3, 0x9A, 0xA3, 0x44, 0xF0, + 0xA3, 0xA2, 0xA7, 0x44, 0xF0, 0xA3, 0xAA, 0x8D, + 0x44, 0xF0, 0xA3, 0xAB, 0xBA, 0x44, 0xF0, 0xA3, + 0xB2, 0xBC, 0x44, 0xF0, 0xA3, 0xB4, 0x9E, 0x44, + 0xF0, 0xA3, 0xBB, 0x91, 0x44, 0xF0, 0xA3, 0xBD, + // Bytes 1840 - 187f + 0x9E, 0x44, 0xF0, 0xA3, 0xBE, 0x8E, 0x44, 0xF0, + 0xA4, 0x89, 0xA3, 0x44, 0xF0, 0xA4, 0x8B, 0xAE, + 0x44, 0xF0, 0xA4, 0x8E, 0xAB, 0x44, 0xF0, 0xA4, + 0x98, 0x88, 0x44, 0xF0, 0xA4, 0x9C, 0xB5, 0x44, + 0xF0, 0xA4, 0xA0, 0x94, 0x44, 0xF0, 0xA4, 0xB0, + 0xB6, 0x44, 0xF0, 0xA4, 0xB2, 0x92, 0x44, 0xF0, + 0xA4, 0xBE, 0xA1, 0x44, 0xF0, 0xA4, 0xBE, 0xB8, + 0x44, 0xF0, 0xA5, 0x81, 0x84, 0x44, 0xF0, 0xA5, + // Bytes 1880 - 18bf + 0x83, 0xB2, 0x44, 0xF0, 0xA5, 0x83, 0xB3, 0x44, + 0xF0, 0xA5, 0x84, 0x99, 0x44, 0xF0, 0xA5, 0x84, + 0xB3, 0x44, 0xF0, 0xA5, 0x89, 0x89, 0x44, 0xF0, + 0xA5, 0x90, 0x9D, 0x44, 0xF0, 0xA5, 0x98, 0xA6, + 0x44, 0xF0, 0xA5, 0x9A, 0x9A, 0x44, 0xF0, 0xA5, + 0x9B, 0x85, 0x44, 0xF0, 0xA5, 0xA5, 0xBC, 0x44, + 0xF0, 0xA5, 0xAA, 0xA7, 0x44, 0xF0, 0xA5, 0xAE, + 0xAB, 0x44, 0xF0, 0xA5, 0xB2, 0x80, 0x44, 0xF0, + // Bytes 18c0 - 18ff + 0xA5, 0xB3, 0x90, 0x44, 0xF0, 0xA5, 0xBE, 0x86, + 0x44, 0xF0, 0xA6, 0x87, 0x9A, 0x44, 0xF0, 0xA6, + 0x88, 0xA8, 0x44, 0xF0, 0xA6, 0x89, 0x87, 0x44, + 0xF0, 0xA6, 0x8B, 0x99, 0x44, 0xF0, 0xA6, 0x8C, + 0xBE, 0x44, 0xF0, 0xA6, 0x93, 0x9A, 0x44, 0xF0, + 0xA6, 0x94, 0xA3, 0x44, 0xF0, 0xA6, 0x96, 0xA8, + 0x44, 0xF0, 0xA6, 0x9E, 0xA7, 0x44, 0xF0, 0xA6, + 0x9E, 0xB5, 0x44, 0xF0, 0xA6, 0xAC, 0xBC, 0x44, + // Bytes 1900 - 193f + 0xF0, 0xA6, 0xB0, 0xB6, 0x44, 0xF0, 0xA6, 0xB3, + 0x95, 0x44, 0xF0, 0xA6, 0xB5, 0xAB, 0x44, 0xF0, + 0xA6, 0xBC, 0xAC, 0x44, 0xF0, 0xA6, 0xBE, 0xB1, + 0x44, 0xF0, 0xA7, 0x83, 0x92, 0x44, 0xF0, 0xA7, + 0x8F, 0x8A, 0x44, 0xF0, 0xA7, 0x99, 0xA7, 0x44, + 0xF0, 0xA7, 0xA2, 0xAE, 0x44, 0xF0, 0xA7, 0xA5, + 0xA6, 0x44, 0xF0, 0xA7, 0xB2, 0xA8, 0x44, 0xF0, + 0xA7, 0xBB, 0x93, 0x44, 0xF0, 0xA7, 0xBC, 0xAF, + // Bytes 1940 - 197f + 0x44, 0xF0, 0xA8, 0x97, 0x92, 0x44, 0xF0, 0xA8, + 0x97, 0xAD, 0x44, 0xF0, 0xA8, 0x9C, 0xAE, 0x44, + 0xF0, 0xA8, 0xAF, 0xBA, 0x44, 0xF0, 0xA8, 0xB5, + 0xB7, 0x44, 0xF0, 0xA9, 0x85, 0x85, 0x44, 0xF0, + 0xA9, 0x87, 0x9F, 0x44, 0xF0, 0xA9, 0x88, 0x9A, + 0x44, 0xF0, 0xA9, 0x90, 0x8A, 0x44, 0xF0, 0xA9, + 0x92, 0x96, 0x44, 0xF0, 0xA9, 0x96, 0xB6, 0x44, + 0xF0, 0xA9, 0xAC, 0xB0, 0x44, 0xF0, 0xAA, 0x83, + // Bytes 1980 - 19bf + 0x8E, 0x44, 0xF0, 0xAA, 0x84, 0x85, 0x44, 0xF0, + 0xAA, 0x88, 0x8E, 0x44, 0xF0, 0xAA, 0x8A, 0x91, + 0x44, 0xF0, 0xAA, 0x8E, 0x92, 0x44, 0xF0, 0xAA, + 0x98, 0x80, 0x42, 0x21, 0x21, 0x42, 0x21, 0x3F, + 0x42, 0x2E, 0x2E, 0x42, 0x30, 0x2C, 0x42, 0x30, + 0x2E, 0x42, 0x31, 0x2C, 0x42, 0x31, 0x2E, 0x42, + 0x31, 0x30, 0x42, 0x31, 0x31, 0x42, 0x31, 0x32, + 0x42, 0x31, 0x33, 0x42, 0x31, 0x34, 0x42, 0x31, + // Bytes 19c0 - 19ff + 0x35, 0x42, 0x31, 0x36, 0x42, 0x31, 0x37, 0x42, + 0x31, 0x38, 0x42, 0x31, 0x39, 0x42, 0x32, 0x2C, + 0x42, 0x32, 0x2E, 0x42, 0x32, 0x30, 0x42, 0x32, + 0x31, 0x42, 0x32, 0x32, 0x42, 0x32, 0x33, 0x42, + 0x32, 0x34, 0x42, 0x32, 0x35, 0x42, 0x32, 0x36, + 0x42, 0x32, 0x37, 0x42, 0x32, 0x38, 0x42, 0x32, + 0x39, 0x42, 0x33, 0x2C, 0x42, 0x33, 0x2E, 0x42, + 0x33, 0x30, 0x42, 0x33, 0x31, 0x42, 0x33, 0x32, + // Bytes 1a00 - 1a3f + 0x42, 0x33, 0x33, 0x42, 0x33, 0x34, 0x42, 0x33, + 0x35, 0x42, 0x33, 0x36, 0x42, 0x33, 0x37, 0x42, + 0x33, 0x38, 0x42, 0x33, 0x39, 0x42, 0x34, 0x2C, + 0x42, 0x34, 0x2E, 0x42, 0x34, 0x30, 0x42, 0x34, + 0x31, 0x42, 0x34, 0x32, 0x42, 0x34, 0x33, 0x42, + 0x34, 0x34, 0x42, 0x34, 0x35, 0x42, 0x34, 0x36, + 0x42, 0x34, 0x37, 0x42, 0x34, 0x38, 0x42, 0x34, + 0x39, 0x42, 0x35, 0x2C, 0x42, 0x35, 0x2E, 0x42, + // Bytes 1a40 - 1a7f + 0x35, 0x30, 0x42, 0x36, 0x2C, 0x42, 0x36, 0x2E, + 0x42, 0x37, 0x2C, 0x42, 0x37, 0x2E, 0x42, 0x38, + 0x2C, 0x42, 0x38, 0x2E, 0x42, 0x39, 0x2C, 0x42, + 0x39, 0x2E, 0x42, 0x3D, 0x3D, 0x42, 0x3F, 0x21, + 0x42, 0x3F, 0x3F, 0x42, 0x41, 0x55, 0x42, 0x42, + 0x71, 0x42, 0x43, 0x44, 0x42, 0x44, 0x4A, 0x42, + 0x44, 0x5A, 0x42, 0x44, 0x7A, 0x42, 0x47, 0x42, + 0x42, 0x47, 0x79, 0x42, 0x48, 0x50, 0x42, 0x48, + // Bytes 1a80 - 1abf + 0x56, 0x42, 0x48, 0x67, 0x42, 0x48, 0x7A, 0x42, + 0x49, 0x49, 0x42, 0x49, 0x4A, 0x42, 0x49, 0x55, + 0x42, 0x49, 0x56, 0x42, 0x49, 0x58, 0x42, 0x4B, + 0x42, 0x42, 0x4B, 0x4B, 0x42, 0x4B, 0x4D, 0x42, + 0x4C, 0x4A, 0x42, 0x4C, 0x6A, 0x42, 0x4D, 0x42, + 0x42, 0x4D, 0x43, 0x42, 0x4D, 0x44, 0x42, 0x4D, + 0x52, 0x42, 0x4D, 0x56, 0x42, 0x4D, 0x57, 0x42, + 0x4E, 0x4A, 0x42, 0x4E, 0x6A, 0x42, 0x4E, 0x6F, + // Bytes 1ac0 - 1aff + 0x42, 0x50, 0x48, 0x42, 0x50, 0x52, 0x42, 0x50, + 0x61, 0x42, 0x52, 0x73, 0x42, 0x53, 0x44, 0x42, + 0x53, 0x4D, 0x42, 0x53, 0x53, 0x42, 0x53, 0x76, + 0x42, 0x54, 0x4D, 0x42, 0x56, 0x49, 0x42, 0x57, + 0x43, 0x42, 0x57, 0x5A, 0x42, 0x57, 0x62, 0x42, + 0x58, 0x49, 0x42, 0x63, 0x63, 0x42, 0x63, 0x64, + 0x42, 0x63, 0x6D, 0x42, 0x64, 0x42, 0x42, 0x64, + 0x61, 0x42, 0x64, 0x6C, 0x42, 0x64, 0x6D, 0x42, + // Bytes 1b00 - 1b3f + 0x64, 0x7A, 0x42, 0x65, 0x56, 0x42, 0x66, 0x66, + 0x42, 0x66, 0x69, 0x42, 0x66, 0x6C, 0x42, 0x66, + 0x6D, 0x42, 0x68, 0x61, 0x42, 0x69, 0x69, 0x42, + 0x69, 0x6A, 0x42, 0x69, 0x6E, 0x42, 0x69, 0x76, + 0x42, 0x69, 0x78, 0x42, 0x6B, 0x41, 0x42, 0x6B, + 0x56, 0x42, 0x6B, 0x57, 0x42, 0x6B, 0x67, 0x42, + 0x6B, 0x6C, 0x42, 0x6B, 0x6D, 0x42, 0x6B, 0x74, + 0x42, 0x6C, 0x6A, 0x42, 0x6C, 0x6D, 0x42, 0x6C, + // Bytes 1b40 - 1b7f + 0x6E, 0x42, 0x6C, 0x78, 0x42, 0x6D, 0x32, 0x42, + 0x6D, 0x33, 0x42, 0x6D, 0x41, 0x42, 0x6D, 0x56, + 0x42, 0x6D, 0x57, 0x42, 0x6D, 0x62, 0x42, 0x6D, + 0x67, 0x42, 0x6D, 0x6C, 0x42, 0x6D, 0x6D, 0x42, + 0x6D, 0x73, 0x42, 0x6E, 0x41, 0x42, 0x6E, 0x46, + 0x42, 0x6E, 0x56, 0x42, 0x6E, 0x57, 0x42, 0x6E, + 0x6A, 0x42, 0x6E, 0x6D, 0x42, 0x6E, 0x73, 0x42, + 0x6F, 0x56, 0x42, 0x70, 0x41, 0x42, 0x70, 0x46, + // Bytes 1b80 - 1bbf + 0x42, 0x70, 0x56, 0x42, 0x70, 0x57, 0x42, 0x70, + 0x63, 0x42, 0x70, 0x73, 0x42, 0x73, 0x72, 0x42, + 0x73, 0x74, 0x42, 0x76, 0x69, 0x42, 0x78, 0x69, + 0x43, 0x28, 0x31, 0x29, 0x43, 0x28, 0x32, 0x29, + 0x43, 0x28, 0x33, 0x29, 0x43, 0x28, 0x34, 0x29, + 0x43, 0x28, 0x35, 0x29, 0x43, 0x28, 0x36, 0x29, + 0x43, 0x28, 0x37, 0x29, 0x43, 0x28, 0x38, 0x29, + 0x43, 0x28, 0x39, 0x29, 0x43, 0x28, 0x41, 0x29, + // Bytes 1bc0 - 1bff + 0x43, 0x28, 0x42, 0x29, 0x43, 0x28, 0x43, 0x29, + 0x43, 0x28, 0x44, 0x29, 0x43, 0x28, 0x45, 0x29, + 0x43, 0x28, 0x46, 0x29, 0x43, 0x28, 0x47, 0x29, + 0x43, 0x28, 0x48, 0x29, 0x43, 0x28, 0x49, 0x29, + 0x43, 0x28, 0x4A, 0x29, 0x43, 0x28, 0x4B, 0x29, + 0x43, 0x28, 0x4C, 0x29, 0x43, 0x28, 0x4D, 0x29, + 0x43, 0x28, 0x4E, 0x29, 0x43, 0x28, 0x4F, 0x29, + 0x43, 0x28, 0x50, 0x29, 0x43, 0x28, 0x51, 0x29, + // Bytes 1c00 - 1c3f + 0x43, 0x28, 0x52, 0x29, 0x43, 0x28, 0x53, 0x29, + 0x43, 0x28, 0x54, 0x29, 0x43, 0x28, 0x55, 0x29, + 0x43, 0x28, 0x56, 0x29, 0x43, 0x28, 0x57, 0x29, + 0x43, 0x28, 0x58, 0x29, 0x43, 0x28, 0x59, 0x29, + 0x43, 0x28, 0x5A, 0x29, 0x43, 0x28, 0x61, 0x29, + 0x43, 0x28, 0x62, 0x29, 0x43, 0x28, 0x63, 0x29, + 0x43, 0x28, 0x64, 0x29, 0x43, 0x28, 0x65, 0x29, + 0x43, 0x28, 0x66, 0x29, 0x43, 0x28, 0x67, 0x29, + // Bytes 1c40 - 1c7f + 0x43, 0x28, 0x68, 0x29, 0x43, 0x28, 0x69, 0x29, + 0x43, 0x28, 0x6A, 0x29, 0x43, 0x28, 0x6B, 0x29, + 0x43, 0x28, 0x6C, 0x29, 0x43, 0x28, 0x6D, 0x29, + 0x43, 0x28, 0x6E, 0x29, 0x43, 0x28, 0x6F, 0x29, + 0x43, 0x28, 0x70, 0x29, 0x43, 0x28, 0x71, 0x29, + 0x43, 0x28, 0x72, 0x29, 0x43, 0x28, 0x73, 0x29, + 0x43, 0x28, 0x74, 0x29, 0x43, 0x28, 0x75, 0x29, + 0x43, 0x28, 0x76, 0x29, 0x43, 0x28, 0x77, 0x29, + // Bytes 1c80 - 1cbf + 0x43, 0x28, 0x78, 0x29, 0x43, 0x28, 0x79, 0x29, + 0x43, 0x28, 0x7A, 0x29, 0x43, 0x2E, 0x2E, 0x2E, + 0x43, 0x31, 0x30, 0x2E, 0x43, 0x31, 0x31, 0x2E, + 0x43, 0x31, 0x32, 0x2E, 0x43, 0x31, 0x33, 0x2E, + 0x43, 0x31, 0x34, 0x2E, 0x43, 0x31, 0x35, 0x2E, + 0x43, 0x31, 0x36, 0x2E, 0x43, 0x31, 0x37, 0x2E, + 0x43, 0x31, 0x38, 0x2E, 0x43, 0x31, 0x39, 0x2E, + 0x43, 0x32, 0x30, 0x2E, 0x43, 0x3A, 0x3A, 0x3D, + // Bytes 1cc0 - 1cff + 0x43, 0x3D, 0x3D, 0x3D, 0x43, 0x43, 0x6F, 0x2E, + 0x43, 0x46, 0x41, 0x58, 0x43, 0x47, 0x48, 0x7A, + 0x43, 0x47, 0x50, 0x61, 0x43, 0x49, 0x49, 0x49, + 0x43, 0x4C, 0x54, 0x44, 0x43, 0x4C, 0xC2, 0xB7, + 0x43, 0x4D, 0x48, 0x7A, 0x43, 0x4D, 0x50, 0x61, + 0x43, 0x4D, 0xCE, 0xA9, 0x43, 0x50, 0x50, 0x4D, + 0x43, 0x50, 0x50, 0x56, 0x43, 0x50, 0x54, 0x45, + 0x43, 0x54, 0x45, 0x4C, 0x43, 0x54, 0x48, 0x7A, + // Bytes 1d00 - 1d3f + 0x43, 0x56, 0x49, 0x49, 0x43, 0x58, 0x49, 0x49, + 0x43, 0x61, 0x2F, 0x63, 0x43, 0x61, 0x2F, 0x73, + 0x43, 0x61, 0xCA, 0xBE, 0x43, 0x62, 0x61, 0x72, + 0x43, 0x63, 0x2F, 0x6F, 0x43, 0x63, 0x2F, 0x75, + 0x43, 0x63, 0x61, 0x6C, 0x43, 0x63, 0x6D, 0x32, + 0x43, 0x63, 0x6D, 0x33, 0x43, 0x64, 0x6D, 0x32, + 0x43, 0x64, 0x6D, 0x33, 0x43, 0x65, 0x72, 0x67, + 0x43, 0x66, 0x66, 0x69, 0x43, 0x66, 0x66, 0x6C, + // Bytes 1d40 - 1d7f + 0x43, 0x67, 0x61, 0x6C, 0x43, 0x68, 0x50, 0x61, + 0x43, 0x69, 0x69, 0x69, 0x43, 0x6B, 0x48, 0x7A, + 0x43, 0x6B, 0x50, 0x61, 0x43, 0x6B, 0x6D, 0x32, + 0x43, 0x6B, 0x6D, 0x33, 0x43, 0x6B, 0xCE, 0xA9, + 0x43, 0x6C, 0x6F, 0x67, 0x43, 0x6C, 0xC2, 0xB7, + 0x43, 0x6D, 0x69, 0x6C, 0x43, 0x6D, 0x6D, 0x32, + 0x43, 0x6D, 0x6D, 0x33, 0x43, 0x6D, 0x6F, 0x6C, + 0x43, 0x72, 0x61, 0x64, 0x43, 0x76, 0x69, 0x69, + // Bytes 1d80 - 1dbf + 0x43, 0x78, 0x69, 0x69, 0x43, 0xC2, 0xB0, 0x43, + 0x43, 0xC2, 0xB0, 0x46, 0x43, 0xCA, 0xBC, 0x6E, + 0x43, 0xCE, 0xBC, 0x41, 0x43, 0xCE, 0xBC, 0x46, + 0x43, 0xCE, 0xBC, 0x56, 0x43, 0xCE, 0xBC, 0x57, + 0x43, 0xCE, 0xBC, 0x67, 0x43, 0xCE, 0xBC, 0x6C, + 0x43, 0xCE, 0xBC, 0x6D, 0x43, 0xCE, 0xBC, 0x73, + 0x44, 0x28, 0x31, 0x30, 0x29, 0x44, 0x28, 0x31, + 0x31, 0x29, 0x44, 0x28, 0x31, 0x32, 0x29, 0x44, + // Bytes 1dc0 - 1dff + 0x28, 0x31, 0x33, 0x29, 0x44, 0x28, 0x31, 0x34, + 0x29, 0x44, 0x28, 0x31, 0x35, 0x29, 0x44, 0x28, + 0x31, 0x36, 0x29, 0x44, 0x28, 0x31, 0x37, 0x29, + 0x44, 0x28, 0x31, 0x38, 0x29, 0x44, 0x28, 0x31, + 0x39, 0x29, 0x44, 0x28, 0x32, 0x30, 0x29, 0x44, + 0x30, 0xE7, 0x82, 0xB9, 0x44, 0x31, 0xE2, 0x81, + 0x84, 0x44, 0x31, 0xE6, 0x97, 0xA5, 0x44, 0x31, + 0xE6, 0x9C, 0x88, 0x44, 0x31, 0xE7, 0x82, 0xB9, + // Bytes 1e00 - 1e3f + 0x44, 0x32, 0xE6, 0x97, 0xA5, 0x44, 0x32, 0xE6, + 0x9C, 0x88, 0x44, 0x32, 0xE7, 0x82, 0xB9, 0x44, + 0x33, 0xE6, 0x97, 0xA5, 0x44, 0x33, 0xE6, 0x9C, + 0x88, 0x44, 0x33, 0xE7, 0x82, 0xB9, 0x44, 0x34, + 0xE6, 0x97, 0xA5, 0x44, 0x34, 0xE6, 0x9C, 0x88, + 0x44, 0x34, 0xE7, 0x82, 0xB9, 0x44, 0x35, 0xE6, + 0x97, 0xA5, 0x44, 0x35, 0xE6, 0x9C, 0x88, 0x44, + 0x35, 0xE7, 0x82, 0xB9, 0x44, 0x36, 0xE6, 0x97, + // Bytes 1e40 - 1e7f + 0xA5, 0x44, 0x36, 0xE6, 0x9C, 0x88, 0x44, 0x36, + 0xE7, 0x82, 0xB9, 0x44, 0x37, 0xE6, 0x97, 0xA5, + 0x44, 0x37, 0xE6, 0x9C, 0x88, 0x44, 0x37, 0xE7, + 0x82, 0xB9, 0x44, 0x38, 0xE6, 0x97, 0xA5, 0x44, + 0x38, 0xE6, 0x9C, 0x88, 0x44, 0x38, 0xE7, 0x82, + 0xB9, 0x44, 0x39, 0xE6, 0x97, 0xA5, 0x44, 0x39, + 0xE6, 0x9C, 0x88, 0x44, 0x39, 0xE7, 0x82, 0xB9, + 0x44, 0x56, 0x49, 0x49, 0x49, 0x44, 0x61, 0x2E, + // Bytes 1e80 - 1ebf + 0x6D, 0x2E, 0x44, 0x6B, 0x63, 0x61, 0x6C, 0x44, + 0x70, 0x2E, 0x6D, 0x2E, 0x44, 0x76, 0x69, 0x69, + 0x69, 0x44, 0xD5, 0xA5, 0xD6, 0x82, 0x44, 0xD5, + 0xB4, 0xD5, 0xA5, 0x44, 0xD5, 0xB4, 0xD5, 0xAB, + 0x44, 0xD5, 0xB4, 0xD5, 0xAD, 0x44, 0xD5, 0xB4, + 0xD5, 0xB6, 0x44, 0xD5, 0xBE, 0xD5, 0xB6, 0x44, + 0xD7, 0x90, 0xD7, 0x9C, 0x44, 0xD8, 0xA7, 0xD9, + 0xB4, 0x44, 0xD8, 0xA8, 0xD8, 0xAC, 0x44, 0xD8, + // Bytes 1ec0 - 1eff + 0xA8, 0xD8, 0xAD, 0x44, 0xD8, 0xA8, 0xD8, 0xAE, + 0x44, 0xD8, 0xA8, 0xD8, 0xB1, 0x44, 0xD8, 0xA8, + 0xD8, 0xB2, 0x44, 0xD8, 0xA8, 0xD9, 0x85, 0x44, + 0xD8, 0xA8, 0xD9, 0x86, 0x44, 0xD8, 0xA8, 0xD9, + 0x87, 0x44, 0xD8, 0xA8, 0xD9, 0x89, 0x44, 0xD8, + 0xA8, 0xD9, 0x8A, 0x44, 0xD8, 0xAA, 0xD8, 0xAC, + 0x44, 0xD8, 0xAA, 0xD8, 0xAD, 0x44, 0xD8, 0xAA, + 0xD8, 0xAE, 0x44, 0xD8, 0xAA, 0xD8, 0xB1, 0x44, + // Bytes 1f00 - 1f3f + 0xD8, 0xAA, 0xD8, 0xB2, 0x44, 0xD8, 0xAA, 0xD9, + 0x85, 0x44, 0xD8, 0xAA, 0xD9, 0x86, 0x44, 0xD8, + 0xAA, 0xD9, 0x87, 0x44, 0xD8, 0xAA, 0xD9, 0x89, + 0x44, 0xD8, 0xAA, 0xD9, 0x8A, 0x44, 0xD8, 0xAB, + 0xD8, 0xAC, 0x44, 0xD8, 0xAB, 0xD8, 0xB1, 0x44, + 0xD8, 0xAB, 0xD8, 0xB2, 0x44, 0xD8, 0xAB, 0xD9, + 0x85, 0x44, 0xD8, 0xAB, 0xD9, 0x86, 0x44, 0xD8, + 0xAB, 0xD9, 0x87, 0x44, 0xD8, 0xAB, 0xD9, 0x89, + // Bytes 1f40 - 1f7f + 0x44, 0xD8, 0xAB, 0xD9, 0x8A, 0x44, 0xD8, 0xAC, + 0xD8, 0xAD, 0x44, 0xD8, 0xAC, 0xD9, 0x85, 0x44, + 0xD8, 0xAC, 0xD9, 0x89, 0x44, 0xD8, 0xAC, 0xD9, + 0x8A, 0x44, 0xD8, 0xAD, 0xD8, 0xAC, 0x44, 0xD8, + 0xAD, 0xD9, 0x85, 0x44, 0xD8, 0xAD, 0xD9, 0x89, + 0x44, 0xD8, 0xAD, 0xD9, 0x8A, 0x44, 0xD8, 0xAE, + 0xD8, 0xAC, 0x44, 0xD8, 0xAE, 0xD8, 0xAD, 0x44, + 0xD8, 0xAE, 0xD9, 0x85, 0x44, 0xD8, 0xAE, 0xD9, + // Bytes 1f80 - 1fbf + 0x89, 0x44, 0xD8, 0xAE, 0xD9, 0x8A, 0x44, 0xD8, + 0xB3, 0xD8, 0xAC, 0x44, 0xD8, 0xB3, 0xD8, 0xAD, + 0x44, 0xD8, 0xB3, 0xD8, 0xAE, 0x44, 0xD8, 0xB3, + 0xD8, 0xB1, 0x44, 0xD8, 0xB3, 0xD9, 0x85, 0x44, + 0xD8, 0xB3, 0xD9, 0x87, 0x44, 0xD8, 0xB3, 0xD9, + 0x89, 0x44, 0xD8, 0xB3, 0xD9, 0x8A, 0x44, 0xD8, + 0xB4, 0xD8, 0xAC, 0x44, 0xD8, 0xB4, 0xD8, 0xAD, + 0x44, 0xD8, 0xB4, 0xD8, 0xAE, 0x44, 0xD8, 0xB4, + // Bytes 1fc0 - 1fff + 0xD8, 0xB1, 0x44, 0xD8, 0xB4, 0xD9, 0x85, 0x44, + 0xD8, 0xB4, 0xD9, 0x87, 0x44, 0xD8, 0xB4, 0xD9, + 0x89, 0x44, 0xD8, 0xB4, 0xD9, 0x8A, 0x44, 0xD8, + 0xB5, 0xD8, 0xAD, 0x44, 0xD8, 0xB5, 0xD8, 0xAE, + 0x44, 0xD8, 0xB5, 0xD8, 0xB1, 0x44, 0xD8, 0xB5, + 0xD9, 0x85, 0x44, 0xD8, 0xB5, 0xD9, 0x89, 0x44, + 0xD8, 0xB5, 0xD9, 0x8A, 0x44, 0xD8, 0xB6, 0xD8, + 0xAC, 0x44, 0xD8, 0xB6, 0xD8, 0xAD, 0x44, 0xD8, + // Bytes 2000 - 203f + 0xB6, 0xD8, 0xAE, 0x44, 0xD8, 0xB6, 0xD8, 0xB1, + 0x44, 0xD8, 0xB6, 0xD9, 0x85, 0x44, 0xD8, 0xB6, + 0xD9, 0x89, 0x44, 0xD8, 0xB6, 0xD9, 0x8A, 0x44, + 0xD8, 0xB7, 0xD8, 0xAD, 0x44, 0xD8, 0xB7, 0xD9, + 0x85, 0x44, 0xD8, 0xB7, 0xD9, 0x89, 0x44, 0xD8, + 0xB7, 0xD9, 0x8A, 0x44, 0xD8, 0xB8, 0xD9, 0x85, + 0x44, 0xD8, 0xB9, 0xD8, 0xAC, 0x44, 0xD8, 0xB9, + 0xD9, 0x85, 0x44, 0xD8, 0xB9, 0xD9, 0x89, 0x44, + // Bytes 2040 - 207f + 0xD8, 0xB9, 0xD9, 0x8A, 0x44, 0xD8, 0xBA, 0xD8, + 0xAC, 0x44, 0xD8, 0xBA, 0xD9, 0x85, 0x44, 0xD8, + 0xBA, 0xD9, 0x89, 0x44, 0xD8, 0xBA, 0xD9, 0x8A, + 0x44, 0xD9, 0x81, 0xD8, 0xAC, 0x44, 0xD9, 0x81, + 0xD8, 0xAD, 0x44, 0xD9, 0x81, 0xD8, 0xAE, 0x44, + 0xD9, 0x81, 0xD9, 0x85, 0x44, 0xD9, 0x81, 0xD9, + 0x89, 0x44, 0xD9, 0x81, 0xD9, 0x8A, 0x44, 0xD9, + 0x82, 0xD8, 0xAD, 0x44, 0xD9, 0x82, 0xD9, 0x85, + // Bytes 2080 - 20bf + 0x44, 0xD9, 0x82, 0xD9, 0x89, 0x44, 0xD9, 0x82, + 0xD9, 0x8A, 0x44, 0xD9, 0x83, 0xD8, 0xA7, 0x44, + 0xD9, 0x83, 0xD8, 0xAC, 0x44, 0xD9, 0x83, 0xD8, + 0xAD, 0x44, 0xD9, 0x83, 0xD8, 0xAE, 0x44, 0xD9, + 0x83, 0xD9, 0x84, 0x44, 0xD9, 0x83, 0xD9, 0x85, + 0x44, 0xD9, 0x83, 0xD9, 0x89, 0x44, 0xD9, 0x83, + 0xD9, 0x8A, 0x44, 0xD9, 0x84, 0xD8, 0xA7, 0x44, + 0xD9, 0x84, 0xD8, 0xAC, 0x44, 0xD9, 0x84, 0xD8, + // Bytes 20c0 - 20ff + 0xAD, 0x44, 0xD9, 0x84, 0xD8, 0xAE, 0x44, 0xD9, + 0x84, 0xD9, 0x85, 0x44, 0xD9, 0x84, 0xD9, 0x87, + 0x44, 0xD9, 0x84, 0xD9, 0x89, 0x44, 0xD9, 0x84, + 0xD9, 0x8A, 0x44, 0xD9, 0x85, 0xD8, 0xA7, 0x44, + 0xD9, 0x85, 0xD8, 0xAC, 0x44, 0xD9, 0x85, 0xD8, + 0xAD, 0x44, 0xD9, 0x85, 0xD8, 0xAE, 0x44, 0xD9, + 0x85, 0xD9, 0x85, 0x44, 0xD9, 0x85, 0xD9, 0x89, + 0x44, 0xD9, 0x85, 0xD9, 0x8A, 0x44, 0xD9, 0x86, + // Bytes 2100 - 213f + 0xD8, 0xAC, 0x44, 0xD9, 0x86, 0xD8, 0xAD, 0x44, + 0xD9, 0x86, 0xD8, 0xAE, 0x44, 0xD9, 0x86, 0xD8, + 0xB1, 0x44, 0xD9, 0x86, 0xD8, 0xB2, 0x44, 0xD9, + 0x86, 0xD9, 0x85, 0x44, 0xD9, 0x86, 0xD9, 0x86, + 0x44, 0xD9, 0x86, 0xD9, 0x87, 0x44, 0xD9, 0x86, + 0xD9, 0x89, 0x44, 0xD9, 0x86, 0xD9, 0x8A, 0x44, + 0xD9, 0x87, 0xD8, 0xAC, 0x44, 0xD9, 0x87, 0xD9, + 0x85, 0x44, 0xD9, 0x87, 0xD9, 0x89, 0x44, 0xD9, + // Bytes 2140 - 217f + 0x87, 0xD9, 0x8A, 0x44, 0xD9, 0x88, 0xD9, 0xB4, + 0x44, 0xD9, 0x8A, 0xD8, 0xAC, 0x44, 0xD9, 0x8A, + 0xD8, 0xAD, 0x44, 0xD9, 0x8A, 0xD8, 0xAE, 0x44, + 0xD9, 0x8A, 0xD8, 0xB1, 0x44, 0xD9, 0x8A, 0xD8, + 0xB2, 0x44, 0xD9, 0x8A, 0xD9, 0x85, 0x44, 0xD9, + 0x8A, 0xD9, 0x86, 0x44, 0xD9, 0x8A, 0xD9, 0x87, + 0x44, 0xD9, 0x8A, 0xD9, 0x89, 0x44, 0xD9, 0x8A, + 0xD9, 0x8A, 0x44, 0xD9, 0x8A, 0xD9, 0xB4, 0x44, + // Bytes 2180 - 21bf + 0xDB, 0x87, 0xD9, 0xB4, 0x45, 0x28, 0xE1, 0x84, + 0x80, 0x29, 0x45, 0x28, 0xE1, 0x84, 0x82, 0x29, + 0x45, 0x28, 0xE1, 0x84, 0x83, 0x29, 0x45, 0x28, + 0xE1, 0x84, 0x85, 0x29, 0x45, 0x28, 0xE1, 0x84, + 0x86, 0x29, 0x45, 0x28, 0xE1, 0x84, 0x87, 0x29, + 0x45, 0x28, 0xE1, 0x84, 0x89, 0x29, 0x45, 0x28, + 0xE1, 0x84, 0x8B, 0x29, 0x45, 0x28, 0xE1, 0x84, + 0x8C, 0x29, 0x45, 0x28, 0xE1, 0x84, 0x8E, 0x29, + // Bytes 21c0 - 21ff + 0x45, 0x28, 0xE1, 0x84, 0x8F, 0x29, 0x45, 0x28, + 0xE1, 0x84, 0x90, 0x29, 0x45, 0x28, 0xE1, 0x84, + 0x91, 0x29, 0x45, 0x28, 0xE1, 0x84, 0x92, 0x29, + 0x45, 0x28, 0xE4, 0xB8, 0x80, 0x29, 0x45, 0x28, + 0xE4, 0xB8, 0x83, 0x29, 0x45, 0x28, 0xE4, 0xB8, + 0x89, 0x29, 0x45, 0x28, 0xE4, 0xB9, 0x9D, 0x29, + 0x45, 0x28, 0xE4, 0xBA, 0x8C, 0x29, 0x45, 0x28, + 0xE4, 0xBA, 0x94, 0x29, 0x45, 0x28, 0xE4, 0xBB, + // Bytes 2200 - 223f + 0xA3, 0x29, 0x45, 0x28, 0xE4, 0xBC, 0x81, 0x29, + 0x45, 0x28, 0xE4, 0xBC, 0x91, 0x29, 0x45, 0x28, + 0xE5, 0x85, 0xAB, 0x29, 0x45, 0x28, 0xE5, 0x85, + 0xAD, 0x29, 0x45, 0x28, 0xE5, 0x8A, 0xB4, 0x29, + 0x45, 0x28, 0xE5, 0x8D, 0x81, 0x29, 0x45, 0x28, + 0xE5, 0x8D, 0x94, 0x29, 0x45, 0x28, 0xE5, 0x90, + 0x8D, 0x29, 0x45, 0x28, 0xE5, 0x91, 0xBC, 0x29, + 0x45, 0x28, 0xE5, 0x9B, 0x9B, 0x29, 0x45, 0x28, + // Bytes 2240 - 227f + 0xE5, 0x9C, 0x9F, 0x29, 0x45, 0x28, 0xE5, 0xAD, + 0xA6, 0x29, 0x45, 0x28, 0xE6, 0x97, 0xA5, 0x29, + 0x45, 0x28, 0xE6, 0x9C, 0x88, 0x29, 0x45, 0x28, + 0xE6, 0x9C, 0x89, 0x29, 0x45, 0x28, 0xE6, 0x9C, + 0xA8, 0x29, 0x45, 0x28, 0xE6, 0xA0, 0xAA, 0x29, + 0x45, 0x28, 0xE6, 0xB0, 0xB4, 0x29, 0x45, 0x28, + 0xE7, 0x81, 0xAB, 0x29, 0x45, 0x28, 0xE7, 0x89, + 0xB9, 0x29, 0x45, 0x28, 0xE7, 0x9B, 0xA3, 0x29, + // Bytes 2280 - 22bf + 0x45, 0x28, 0xE7, 0xA4, 0xBE, 0x29, 0x45, 0x28, + 0xE7, 0xA5, 0x9D, 0x29, 0x45, 0x28, 0xE7, 0xA5, + 0xAD, 0x29, 0x45, 0x28, 0xE8, 0x87, 0xAA, 0x29, + 0x45, 0x28, 0xE8, 0x87, 0xB3, 0x29, 0x45, 0x28, + 0xE8, 0xB2, 0xA1, 0x29, 0x45, 0x28, 0xE8, 0xB3, + 0x87, 0x29, 0x45, 0x28, 0xE9, 0x87, 0x91, 0x29, + 0x45, 0x30, 0xE2, 0x81, 0x84, 0x33, 0x45, 0x31, + 0x30, 0xE6, 0x97, 0xA5, 0x45, 0x31, 0x30, 0xE6, + // Bytes 22c0 - 22ff + 0x9C, 0x88, 0x45, 0x31, 0x30, 0xE7, 0x82, 0xB9, + 0x45, 0x31, 0x31, 0xE6, 0x97, 0xA5, 0x45, 0x31, + 0x31, 0xE6, 0x9C, 0x88, 0x45, 0x31, 0x31, 0xE7, + 0x82, 0xB9, 0x45, 0x31, 0x32, 0xE6, 0x97, 0xA5, + 0x45, 0x31, 0x32, 0xE6, 0x9C, 0x88, 0x45, 0x31, + 0x32, 0xE7, 0x82, 0xB9, 0x45, 0x31, 0x33, 0xE6, + 0x97, 0xA5, 0x45, 0x31, 0x33, 0xE7, 0x82, 0xB9, + 0x45, 0x31, 0x34, 0xE6, 0x97, 0xA5, 0x45, 0x31, + // Bytes 2300 - 233f + 0x34, 0xE7, 0x82, 0xB9, 0x45, 0x31, 0x35, 0xE6, + 0x97, 0xA5, 0x45, 0x31, 0x35, 0xE7, 0x82, 0xB9, + 0x45, 0x31, 0x36, 0xE6, 0x97, 0xA5, 0x45, 0x31, + 0x36, 0xE7, 0x82, 0xB9, 0x45, 0x31, 0x37, 0xE6, + 0x97, 0xA5, 0x45, 0x31, 0x37, 0xE7, 0x82, 0xB9, + 0x45, 0x31, 0x38, 0xE6, 0x97, 0xA5, 0x45, 0x31, + 0x38, 0xE7, 0x82, 0xB9, 0x45, 0x31, 0x39, 0xE6, + 0x97, 0xA5, 0x45, 0x31, 0x39, 0xE7, 0x82, 0xB9, + // Bytes 2340 - 237f + 0x45, 0x31, 0xE2, 0x81, 0x84, 0x32, 0x45, 0x31, + 0xE2, 0x81, 0x84, 0x33, 0x45, 0x31, 0xE2, 0x81, + 0x84, 0x34, 0x45, 0x31, 0xE2, 0x81, 0x84, 0x35, + 0x45, 0x31, 0xE2, 0x81, 0x84, 0x36, 0x45, 0x31, + 0xE2, 0x81, 0x84, 0x37, 0x45, 0x31, 0xE2, 0x81, + 0x84, 0x38, 0x45, 0x31, 0xE2, 0x81, 0x84, 0x39, + 0x45, 0x32, 0x30, 0xE6, 0x97, 0xA5, 0x45, 0x32, + 0x30, 0xE7, 0x82, 0xB9, 0x45, 0x32, 0x31, 0xE6, + // Bytes 2380 - 23bf + 0x97, 0xA5, 0x45, 0x32, 0x31, 0xE7, 0x82, 0xB9, + 0x45, 0x32, 0x32, 0xE6, 0x97, 0xA5, 0x45, 0x32, + 0x32, 0xE7, 0x82, 0xB9, 0x45, 0x32, 0x33, 0xE6, + 0x97, 0xA5, 0x45, 0x32, 0x33, 0xE7, 0x82, 0xB9, + 0x45, 0x32, 0x34, 0xE6, 0x97, 0xA5, 0x45, 0x32, + 0x34, 0xE7, 0x82, 0xB9, 0x45, 0x32, 0x35, 0xE6, + 0x97, 0xA5, 0x45, 0x32, 0x36, 0xE6, 0x97, 0xA5, + 0x45, 0x32, 0x37, 0xE6, 0x97, 0xA5, 0x45, 0x32, + // Bytes 23c0 - 23ff + 0x38, 0xE6, 0x97, 0xA5, 0x45, 0x32, 0x39, 0xE6, + 0x97, 0xA5, 0x45, 0x32, 0xE2, 0x81, 0x84, 0x33, + 0x45, 0x32, 0xE2, 0x81, 0x84, 0x35, 0x45, 0x33, + 0x30, 0xE6, 0x97, 0xA5, 0x45, 0x33, 0x31, 0xE6, + 0x97, 0xA5, 0x45, 0x33, 0xE2, 0x81, 0x84, 0x34, + 0x45, 0x33, 0xE2, 0x81, 0x84, 0x35, 0x45, 0x33, + 0xE2, 0x81, 0x84, 0x38, 0x45, 0x34, 0xE2, 0x81, + 0x84, 0x35, 0x45, 0x35, 0xE2, 0x81, 0x84, 0x36, + // Bytes 2400 - 243f + 0x45, 0x35, 0xE2, 0x81, 0x84, 0x38, 0x45, 0x37, + 0xE2, 0x81, 0x84, 0x38, 0x45, 0x41, 0xE2, 0x88, + 0x95, 0x6D, 0x45, 0x56, 0xE2, 0x88, 0x95, 0x6D, + 0x45, 0x6D, 0xE2, 0x88, 0x95, 0x73, 0x46, 0x31, + 0xE2, 0x81, 0x84, 0x31, 0x30, 0x46, 0x43, 0xE2, + 0x88, 0x95, 0x6B, 0x67, 0x46, 0x6D, 0xE2, 0x88, + 0x95, 0x73, 0x32, 0x46, 0xD8, 0xA8, 0xD8, 0xAD, + 0xD9, 0x8A, 0x46, 0xD8, 0xA8, 0xD8, 0xAE, 0xD9, + // Bytes 2440 - 247f + 0x8A, 0x46, 0xD8, 0xAA, 0xD8, 0xAC, 0xD9, 0x85, + 0x46, 0xD8, 0xAA, 0xD8, 0xAC, 0xD9, 0x89, 0x46, + 0xD8, 0xAA, 0xD8, 0xAC, 0xD9, 0x8A, 0x46, 0xD8, + 0xAA, 0xD8, 0xAD, 0xD8, 0xAC, 0x46, 0xD8, 0xAA, + 0xD8, 0xAD, 0xD9, 0x85, 0x46, 0xD8, 0xAA, 0xD8, + 0xAE, 0xD9, 0x85, 0x46, 0xD8, 0xAA, 0xD8, 0xAE, + 0xD9, 0x89, 0x46, 0xD8, 0xAA, 0xD8, 0xAE, 0xD9, + 0x8A, 0x46, 0xD8, 0xAA, 0xD9, 0x85, 0xD8, 0xAC, + // Bytes 2480 - 24bf + 0x46, 0xD8, 0xAA, 0xD9, 0x85, 0xD8, 0xAD, 0x46, + 0xD8, 0xAA, 0xD9, 0x85, 0xD8, 0xAE, 0x46, 0xD8, + 0xAA, 0xD9, 0x85, 0xD9, 0x89, 0x46, 0xD8, 0xAA, + 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD8, 0xAC, 0xD8, + 0xAD, 0xD9, 0x89, 0x46, 0xD8, 0xAC, 0xD8, 0xAD, + 0xD9, 0x8A, 0x46, 0xD8, 0xAC, 0xD9, 0x85, 0xD8, + 0xAD, 0x46, 0xD8, 0xAC, 0xD9, 0x85, 0xD9, 0x89, + 0x46, 0xD8, 0xAC, 0xD9, 0x85, 0xD9, 0x8A, 0x46, + // Bytes 24c0 - 24ff + 0xD8, 0xAD, 0xD8, 0xAC, 0xD9, 0x8A, 0x46, 0xD8, + 0xAD, 0xD9, 0x85, 0xD9, 0x89, 0x46, 0xD8, 0xAD, + 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD8, 0xB3, 0xD8, + 0xAC, 0xD8, 0xAD, 0x46, 0xD8, 0xB3, 0xD8, 0xAC, + 0xD9, 0x89, 0x46, 0xD8, 0xB3, 0xD8, 0xAD, 0xD8, + 0xAC, 0x46, 0xD8, 0xB3, 0xD8, 0xAE, 0xD9, 0x89, + 0x46, 0xD8, 0xB3, 0xD8, 0xAE, 0xD9, 0x8A, 0x46, + 0xD8, 0xB3, 0xD9, 0x85, 0xD8, 0xAC, 0x46, 0xD8, + // Bytes 2500 - 253f + 0xB3, 0xD9, 0x85, 0xD8, 0xAD, 0x46, 0xD8, 0xB3, + 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD8, 0xB4, 0xD8, + 0xAC, 0xD9, 0x8A, 0x46, 0xD8, 0xB4, 0xD8, 0xAD, + 0xD9, 0x85, 0x46, 0xD8, 0xB4, 0xD8, 0xAD, 0xD9, + 0x8A, 0x46, 0xD8, 0xB4, 0xD9, 0x85, 0xD8, 0xAE, + 0x46, 0xD8, 0xB4, 0xD9, 0x85, 0xD9, 0x85, 0x46, + 0xD8, 0xB5, 0xD8, 0xAD, 0xD8, 0xAD, 0x46, 0xD8, + 0xB5, 0xD8, 0xAD, 0xD9, 0x8A, 0x46, 0xD8, 0xB5, + // Bytes 2540 - 257f + 0xD9, 0x84, 0xD9, 0x89, 0x46, 0xD8, 0xB5, 0xD9, + 0x84, 0xDB, 0x92, 0x46, 0xD8, 0xB5, 0xD9, 0x85, + 0xD9, 0x85, 0x46, 0xD8, 0xB6, 0xD8, 0xAD, 0xD9, + 0x89, 0x46, 0xD8, 0xB6, 0xD8, 0xAD, 0xD9, 0x8A, + 0x46, 0xD8, 0xB6, 0xD8, 0xAE, 0xD9, 0x85, 0x46, + 0xD8, 0xB7, 0xD9, 0x85, 0xD8, 0xAD, 0x46, 0xD8, + 0xB7, 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD8, 0xB7, + 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD8, 0xB9, 0xD8, + // Bytes 2580 - 25bf + 0xAC, 0xD9, 0x85, 0x46, 0xD8, 0xB9, 0xD9, 0x85, + 0xD9, 0x85, 0x46, 0xD8, 0xB9, 0xD9, 0x85, 0xD9, + 0x89, 0x46, 0xD8, 0xB9, 0xD9, 0x85, 0xD9, 0x8A, + 0x46, 0xD8, 0xBA, 0xD9, 0x85, 0xD9, 0x85, 0x46, + 0xD8, 0xBA, 0xD9, 0x85, 0xD9, 0x89, 0x46, 0xD8, + 0xBA, 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD9, 0x81, + 0xD8, 0xAE, 0xD9, 0x85, 0x46, 0xD9, 0x81, 0xD9, + 0x85, 0xD9, 0x8A, 0x46, 0xD9, 0x82, 0xD9, 0x84, + // Bytes 25c0 - 25ff + 0xDB, 0x92, 0x46, 0xD9, 0x82, 0xD9, 0x85, 0xD8, + 0xAD, 0x46, 0xD9, 0x82, 0xD9, 0x85, 0xD9, 0x85, + 0x46, 0xD9, 0x82, 0xD9, 0x85, 0xD9, 0x8A, 0x46, + 0xD9, 0x83, 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD9, + 0x83, 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD9, 0x84, + 0xD8, 0xAC, 0xD8, 0xAC, 0x46, 0xD9, 0x84, 0xD8, + 0xAC, 0xD9, 0x85, 0x46, 0xD9, 0x84, 0xD8, 0xAC, + 0xD9, 0x8A, 0x46, 0xD9, 0x84, 0xD8, 0xAD, 0xD9, + // Bytes 2600 - 263f + 0x85, 0x46, 0xD9, 0x84, 0xD8, 0xAD, 0xD9, 0x89, + 0x46, 0xD9, 0x84, 0xD8, 0xAD, 0xD9, 0x8A, 0x46, + 0xD9, 0x84, 0xD8, 0xAE, 0xD9, 0x85, 0x46, 0xD9, + 0x84, 0xD9, 0x85, 0xD8, 0xAD, 0x46, 0xD9, 0x84, + 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD9, 0x85, 0xD8, + 0xAC, 0xD8, 0xAD, 0x46, 0xD9, 0x85, 0xD8, 0xAC, + 0xD8, 0xAE, 0x46, 0xD9, 0x85, 0xD8, 0xAC, 0xD9, + 0x85, 0x46, 0xD9, 0x85, 0xD8, 0xAC, 0xD9, 0x8A, + // Bytes 2640 - 267f + 0x46, 0xD9, 0x85, 0xD8, 0xAD, 0xD8, 0xAC, 0x46, + 0xD9, 0x85, 0xD8, 0xAD, 0xD9, 0x85, 0x46, 0xD9, + 0x85, 0xD8, 0xAD, 0xD9, 0x8A, 0x46, 0xD9, 0x85, + 0xD8, 0xAE, 0xD8, 0xAC, 0x46, 0xD9, 0x85, 0xD8, + 0xAE, 0xD9, 0x85, 0x46, 0xD9, 0x85, 0xD8, 0xAE, + 0xD9, 0x8A, 0x46, 0xD9, 0x85, 0xD9, 0x85, 0xD9, + 0x8A, 0x46, 0xD9, 0x86, 0xD8, 0xAC, 0xD8, 0xAD, + 0x46, 0xD9, 0x86, 0xD8, 0xAC, 0xD9, 0x85, 0x46, + // Bytes 2680 - 26bf + 0xD9, 0x86, 0xD8, 0xAC, 0xD9, 0x89, 0x46, 0xD9, + 0x86, 0xD8, 0xAC, 0xD9, 0x8A, 0x46, 0xD9, 0x86, + 0xD8, 0xAD, 0xD9, 0x85, 0x46, 0xD9, 0x86, 0xD8, + 0xAD, 0xD9, 0x89, 0x46, 0xD9, 0x86, 0xD8, 0xAD, + 0xD9, 0x8A, 0x46, 0xD9, 0x86, 0xD9, 0x85, 0xD9, + 0x89, 0x46, 0xD9, 0x86, 0xD9, 0x85, 0xD9, 0x8A, + 0x46, 0xD9, 0x87, 0xD9, 0x85, 0xD8, 0xAC, 0x46, + 0xD9, 0x87, 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD9, + // Bytes 26c0 - 26ff + 0x8A, 0xD8, 0xAC, 0xD9, 0x8A, 0x46, 0xD9, 0x8A, + 0xD8, 0xAD, 0xD9, 0x8A, 0x46, 0xD9, 0x8A, 0xD9, + 0x85, 0xD9, 0x85, 0x46, 0xD9, 0x8A, 0xD9, 0x85, + 0xD9, 0x8A, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD8, + 0xA7, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD8, 0xAC, + 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD8, 0xAD, 0x46, + 0xD9, 0x8A, 0xD9, 0x94, 0xD8, 0xAE, 0x46, 0xD9, + 0x8A, 0xD9, 0x94, 0xD8, 0xB1, 0x46, 0xD9, 0x8A, + // Bytes 2700 - 273f + 0xD9, 0x94, 0xD8, 0xB2, 0x46, 0xD9, 0x8A, 0xD9, + 0x94, 0xD9, 0x85, 0x46, 0xD9, 0x8A, 0xD9, 0x94, + 0xD9, 0x86, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD9, + 0x87, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD9, 0x88, + 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD9, 0x89, 0x46, + 0xD9, 0x8A, 0xD9, 0x94, 0xD9, 0x8A, 0x46, 0xD9, + 0x8A, 0xD9, 0x94, 0xDB, 0x86, 0x46, 0xD9, 0x8A, + 0xD9, 0x94, 0xDB, 0x87, 0x46, 0xD9, 0x8A, 0xD9, + // Bytes 2740 - 277f + 0x94, 0xDB, 0x88, 0x46, 0xD9, 0x8A, 0xD9, 0x94, + 0xDB, 0x90, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xDB, + 0x95, 0x46, 0xE0, 0xB9, 0x8D, 0xE0, 0xB8, 0xB2, + 0x46, 0xE0, 0xBA, 0xAB, 0xE0, 0xBA, 0x99, 0x46, + 0xE0, 0xBA, 0xAB, 0xE0, 0xBA, 0xA1, 0x46, 0xE0, + 0xBB, 0x8D, 0xE0, 0xBA, 0xB2, 0x46, 0xE0, 0xBD, + 0x80, 0xE0, 0xBE, 0xB5, 0x46, 0xE0, 0xBD, 0x82, + 0xE0, 0xBE, 0xB7, 0x46, 0xE0, 0xBD, 0x8C, 0xE0, + // Bytes 2780 - 27bf + 0xBE, 0xB7, 0x46, 0xE0, 0xBD, 0x91, 0xE0, 0xBE, + 0xB7, 0x46, 0xE0, 0xBD, 0x96, 0xE0, 0xBE, 0xB7, + 0x46, 0xE0, 0xBD, 0x9B, 0xE0, 0xBE, 0xB7, 0x46, + 0xE0, 0xBE, 0x90, 0xE0, 0xBE, 0xB5, 0x46, 0xE0, + 0xBE, 0x92, 0xE0, 0xBE, 0xB7, 0x46, 0xE0, 0xBE, + 0x9C, 0xE0, 0xBE, 0xB7, 0x46, 0xE0, 0xBE, 0xA1, + 0xE0, 0xBE, 0xB7, 0x46, 0xE0, 0xBE, 0xA6, 0xE0, + 0xBE, 0xB7, 0x46, 0xE0, 0xBE, 0xAB, 0xE0, 0xBE, + // Bytes 27c0 - 27ff + 0xB7, 0x46, 0xE2, 0x80, 0xB2, 0xE2, 0x80, 0xB2, + 0x46, 0xE2, 0x80, 0xB5, 0xE2, 0x80, 0xB5, 0x46, + 0xE2, 0x88, 0xAB, 0xE2, 0x88, 0xAB, 0x46, 0xE2, + 0x88, 0xAE, 0xE2, 0x88, 0xAE, 0x46, 0xE3, 0x81, + 0xBB, 0xE3, 0x81, 0x8B, 0x46, 0xE3, 0x82, 0x88, + 0xE3, 0x82, 0x8A, 0x46, 0xE3, 0x82, 0xAD, 0xE3, + 0x83, 0xAD, 0x46, 0xE3, 0x82, 0xB3, 0xE3, 0x82, + 0xB3, 0x46, 0xE3, 0x82, 0xB3, 0xE3, 0x83, 0x88, + // Bytes 2800 - 283f + 0x46, 0xE3, 0x83, 0x88, 0xE3, 0x83, 0xB3, 0x46, + 0xE3, 0x83, 0x8A, 0xE3, 0x83, 0x8E, 0x46, 0xE3, + 0x83, 0x9B, 0xE3, 0x83, 0xB3, 0x46, 0xE3, 0x83, + 0x9F, 0xE3, 0x83, 0xAA, 0x46, 0xE3, 0x83, 0xAA, + 0xE3, 0x83, 0xA9, 0x46, 0xE3, 0x83, 0xAC, 0xE3, + 0x83, 0xA0, 0x46, 0xE4, 0xBB, 0xA4, 0xE5, 0x92, + 0x8C, 0x46, 0xE5, 0xA4, 0xA7, 0xE6, 0xAD, 0xA3, + 0x46, 0xE5, 0xB9, 0xB3, 0xE6, 0x88, 0x90, 0x46, + // Bytes 2840 - 287f + 0xE6, 0x98, 0x8E, 0xE6, 0xB2, 0xBB, 0x46, 0xE6, + 0x98, 0xAD, 0xE5, 0x92, 0x8C, 0x47, 0x72, 0x61, + 0x64, 0xE2, 0x88, 0x95, 0x73, 0x47, 0xE3, 0x80, + 0x94, 0x53, 0xE3, 0x80, 0x95, 0x48, 0x28, 0xE1, + 0x84, 0x80, 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, + 0xE1, 0x84, 0x82, 0xE1, 0x85, 0xA1, 0x29, 0x48, + 0x28, 0xE1, 0x84, 0x83, 0xE1, 0x85, 0xA1, 0x29, + 0x48, 0x28, 0xE1, 0x84, 0x85, 0xE1, 0x85, 0xA1, + // Bytes 2880 - 28bf + 0x29, 0x48, 0x28, 0xE1, 0x84, 0x86, 0xE1, 0x85, + 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, 0x87, 0xE1, + 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, 0x89, + 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, + 0x8B, 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, + 0x84, 0x8C, 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, + 0xE1, 0x84, 0x8C, 0xE1, 0x85, 0xAE, 0x29, 0x48, + 0x28, 0xE1, 0x84, 0x8E, 0xE1, 0x85, 0xA1, 0x29, + // Bytes 28c0 - 28ff + 0x48, 0x28, 0xE1, 0x84, 0x8F, 0xE1, 0x85, 0xA1, + 0x29, 0x48, 0x28, 0xE1, 0x84, 0x90, 0xE1, 0x85, + 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, 0x91, 0xE1, + 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, 0x92, + 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x72, 0x61, 0x64, + 0xE2, 0x88, 0x95, 0x73, 0x32, 0x48, 0xD8, 0xA7, + 0xD9, 0x83, 0xD8, 0xA8, 0xD8, 0xB1, 0x48, 0xD8, + 0xA7, 0xD9, 0x84, 0xD9, 0x84, 0xD9, 0x87, 0x48, + // Bytes 2900 - 293f + 0xD8, 0xB1, 0xD8, 0xB3, 0xD9, 0x88, 0xD9, 0x84, + 0x48, 0xD8, 0xB1, 0xDB, 0x8C, 0xD8, 0xA7, 0xD9, + 0x84, 0x48, 0xD8, 0xB5, 0xD9, 0x84, 0xD8, 0xB9, + 0xD9, 0x85, 0x48, 0xD8, 0xB9, 0xD9, 0x84, 0xD9, + 0x8A, 0xD9, 0x87, 0x48, 0xD9, 0x85, 0xD8, 0xAD, + 0xD9, 0x85, 0xD8, 0xAF, 0x48, 0xD9, 0x88, 0xD8, + 0xB3, 0xD9, 0x84, 0xD9, 0x85, 0x49, 0xE2, 0x80, + 0xB2, 0xE2, 0x80, 0xB2, 0xE2, 0x80, 0xB2, 0x49, + // Bytes 2940 - 297f + 0xE2, 0x80, 0xB5, 0xE2, 0x80, 0xB5, 0xE2, 0x80, + 0xB5, 0x49, 0xE2, 0x88, 0xAB, 0xE2, 0x88, 0xAB, + 0xE2, 0x88, 0xAB, 0x49, 0xE2, 0x88, 0xAE, 0xE2, + 0x88, 0xAE, 0xE2, 0x88, 0xAE, 0x49, 0xE3, 0x80, + 0x94, 0xE4, 0xB8, 0x89, 0xE3, 0x80, 0x95, 0x49, + 0xE3, 0x80, 0x94, 0xE4, 0xBA, 0x8C, 0xE3, 0x80, + 0x95, 0x49, 0xE3, 0x80, 0x94, 0xE5, 0x8B, 0x9D, + 0xE3, 0x80, 0x95, 0x49, 0xE3, 0x80, 0x94, 0xE5, + // Bytes 2980 - 29bf + 0xAE, 0x89, 0xE3, 0x80, 0x95, 0x49, 0xE3, 0x80, + 0x94, 0xE6, 0x89, 0x93, 0xE3, 0x80, 0x95, 0x49, + 0xE3, 0x80, 0x94, 0xE6, 0x95, 0x97, 0xE3, 0x80, + 0x95, 0x49, 0xE3, 0x80, 0x94, 0xE6, 0x9C, 0xAC, + 0xE3, 0x80, 0x95, 0x49, 0xE3, 0x80, 0x94, 0xE7, + 0x82, 0xB9, 0xE3, 0x80, 0x95, 0x49, 0xE3, 0x80, + 0x94, 0xE7, 0x9B, 0x97, 0xE3, 0x80, 0x95, 0x49, + 0xE3, 0x82, 0xA2, 0xE3, 0x83, 0xBC, 0xE3, 0x83, + // Bytes 29c0 - 29ff + 0xAB, 0x49, 0xE3, 0x82, 0xA4, 0xE3, 0x83, 0xB3, + 0xE3, 0x83, 0x81, 0x49, 0xE3, 0x82, 0xA6, 0xE3, + 0x82, 0xA9, 0xE3, 0x83, 0xB3, 0x49, 0xE3, 0x82, + 0xAA, 0xE3, 0x83, 0xB3, 0xE3, 0x82, 0xB9, 0x49, + 0xE3, 0x82, 0xAA, 0xE3, 0x83, 0xBC, 0xE3, 0x83, + 0xA0, 0x49, 0xE3, 0x82, 0xAB, 0xE3, 0x82, 0xA4, + 0xE3, 0x83, 0xAA, 0x49, 0xE3, 0x82, 0xB1, 0xE3, + 0x83, 0xBC, 0xE3, 0x82, 0xB9, 0x49, 0xE3, 0x82, + // Bytes 2a00 - 2a3f + 0xB3, 0xE3, 0x83, 0xAB, 0xE3, 0x83, 0x8A, 0x49, + 0xE3, 0x82, 0xBB, 0xE3, 0x83, 0xB3, 0xE3, 0x83, + 0x81, 0x49, 0xE3, 0x82, 0xBB, 0xE3, 0x83, 0xB3, + 0xE3, 0x83, 0x88, 0x49, 0xE3, 0x83, 0x86, 0xE3, + 0x82, 0x99, 0xE3, 0x82, 0xB7, 0x49, 0xE3, 0x83, + 0x88, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xAB, 0x49, + 0xE3, 0x83, 0x8E, 0xE3, 0x83, 0x83, 0xE3, 0x83, + 0x88, 0x49, 0xE3, 0x83, 0x8F, 0xE3, 0x82, 0xA4, + // Bytes 2a40 - 2a7f + 0xE3, 0x83, 0x84, 0x49, 0xE3, 0x83, 0x92, 0xE3, + 0x82, 0x99, 0xE3, 0x83, 0xAB, 0x49, 0xE3, 0x83, + 0x92, 0xE3, 0x82, 0x9A, 0xE3, 0x82, 0xB3, 0x49, + 0xE3, 0x83, 0x95, 0xE3, 0x83, 0xA9, 0xE3, 0x83, + 0xB3, 0x49, 0xE3, 0x83, 0x98, 0xE3, 0x82, 0x9A, + 0xE3, 0x82, 0xBD, 0x49, 0xE3, 0x83, 0x98, 0xE3, + 0x83, 0xAB, 0xE3, 0x83, 0x84, 0x49, 0xE3, 0x83, + 0x9B, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0xAB, 0x49, + // Bytes 2a80 - 2abf + 0xE3, 0x83, 0x9B, 0xE3, 0x83, 0xBC, 0xE3, 0x83, + 0xB3, 0x49, 0xE3, 0x83, 0x9E, 0xE3, 0x82, 0xA4, + 0xE3, 0x83, 0xAB, 0x49, 0xE3, 0x83, 0x9E, 0xE3, + 0x83, 0x83, 0xE3, 0x83, 0x8F, 0x49, 0xE3, 0x83, + 0x9E, 0xE3, 0x83, 0xAB, 0xE3, 0x82, 0xAF, 0x49, + 0xE3, 0x83, 0xA4, 0xE3, 0x83, 0xBC, 0xE3, 0x83, + 0xAB, 0x49, 0xE3, 0x83, 0xA6, 0xE3, 0x82, 0xA2, + 0xE3, 0x83, 0xB3, 0x49, 0xE3, 0x83, 0xAF, 0xE3, + // Bytes 2ac0 - 2aff + 0x83, 0x83, 0xE3, 0x83, 0x88, 0x4C, 0xE2, 0x80, + 0xB2, 0xE2, 0x80, 0xB2, 0xE2, 0x80, 0xB2, 0xE2, + 0x80, 0xB2, 0x4C, 0xE2, 0x88, 0xAB, 0xE2, 0x88, + 0xAB, 0xE2, 0x88, 0xAB, 0xE2, 0x88, 0xAB, 0x4C, + 0xE3, 0x82, 0xA2, 0xE3, 0x83, 0xAB, 0xE3, 0x83, + 0x95, 0xE3, 0x82, 0xA1, 0x4C, 0xE3, 0x82, 0xA8, + 0xE3, 0x83, 0xBC, 0xE3, 0x82, 0xAB, 0xE3, 0x83, + 0xBC, 0x4C, 0xE3, 0x82, 0xAB, 0xE3, 0x82, 0x99, + // Bytes 2b00 - 2b3f + 0xE3, 0x83, 0xAD, 0xE3, 0x83, 0xB3, 0x4C, 0xE3, + 0x82, 0xAB, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xB3, + 0xE3, 0x83, 0x9E, 0x4C, 0xE3, 0x82, 0xAB, 0xE3, + 0x83, 0xA9, 0xE3, 0x83, 0x83, 0xE3, 0x83, 0x88, + 0x4C, 0xE3, 0x82, 0xAB, 0xE3, 0x83, 0xAD, 0xE3, + 0x83, 0xAA, 0xE3, 0x83, 0xBC, 0x4C, 0xE3, 0x82, + 0xAD, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0x8B, 0xE3, + 0x83, 0xBC, 0x4C, 0xE3, 0x82, 0xAD, 0xE3, 0x83, + // Bytes 2b40 - 2b7f + 0xA5, 0xE3, 0x83, 0xAA, 0xE3, 0x83, 0xBC, 0x4C, + 0xE3, 0x82, 0xAF, 0xE3, 0x82, 0x99, 0xE3, 0x83, + 0xA9, 0xE3, 0x83, 0xA0, 0x4C, 0xE3, 0x82, 0xAF, + 0xE3, 0x83, 0xAD, 0xE3, 0x83, 0xBC, 0xE3, 0x83, + 0x8D, 0x4C, 0xE3, 0x82, 0xB5, 0xE3, 0x82, 0xA4, + 0xE3, 0x82, 0xAF, 0xE3, 0x83, 0xAB, 0x4C, 0xE3, + 0x82, 0xBF, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xBC, + 0xE3, 0x82, 0xB9, 0x4C, 0xE3, 0x83, 0x8F, 0xE3, + // Bytes 2b80 - 2bbf + 0x82, 0x9A, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0x84, + 0x4C, 0xE3, 0x83, 0x92, 0xE3, 0x82, 0x9A, 0xE3, + 0x82, 0xAF, 0xE3, 0x83, 0xAB, 0x4C, 0xE3, 0x83, + 0x95, 0xE3, 0x82, 0xA3, 0xE3, 0x83, 0xBC, 0xE3, + 0x83, 0x88, 0x4C, 0xE3, 0x83, 0x98, 0xE3, 0x82, + 0x99, 0xE3, 0x83, 0xBC, 0xE3, 0x82, 0xBF, 0x4C, + 0xE3, 0x83, 0x98, 0xE3, 0x82, 0x9A, 0xE3, 0x83, + 0x8B, 0xE3, 0x83, 0x92, 0x4C, 0xE3, 0x83, 0x98, + // Bytes 2bc0 - 2bff + 0xE3, 0x82, 0x9A, 0xE3, 0x83, 0xB3, 0xE3, 0x82, + 0xB9, 0x4C, 0xE3, 0x83, 0x9B, 0xE3, 0x82, 0x99, + 0xE3, 0x83, 0xAB, 0xE3, 0x83, 0x88, 0x4C, 0xE3, + 0x83, 0x9E, 0xE3, 0x82, 0xA4, 0xE3, 0x82, 0xAF, + 0xE3, 0x83, 0xAD, 0x4C, 0xE3, 0x83, 0x9F, 0xE3, + 0x82, 0xAF, 0xE3, 0x83, 0xAD, 0xE3, 0x83, 0xB3, + 0x4C, 0xE3, 0x83, 0xA1, 0xE3, 0x83, 0xBC, 0xE3, + 0x83, 0x88, 0xE3, 0x83, 0xAB, 0x4C, 0xE3, 0x83, + // Bytes 2c00 - 2c3f + 0xAA, 0xE3, 0x83, 0x83, 0xE3, 0x83, 0x88, 0xE3, + 0x83, 0xAB, 0x4C, 0xE3, 0x83, 0xAB, 0xE3, 0x83, + 0x92, 0xE3, 0x82, 0x9A, 0xE3, 0x83, 0xBC, 0x4C, + 0xE6, 0xA0, 0xAA, 0xE5, 0xBC, 0x8F, 0xE4, 0xBC, + 0x9A, 0xE7, 0xA4, 0xBE, 0x4E, 0x28, 0xE1, 0x84, + 0x8B, 0xE1, 0x85, 0xA9, 0xE1, 0x84, 0x92, 0xE1, + 0x85, 0xAE, 0x29, 0x4F, 0xD8, 0xAC, 0xD9, 0x84, + 0x20, 0xD8, 0xAC, 0xD9, 0x84, 0xD8, 0xA7, 0xD9, + // Bytes 2c40 - 2c7f + 0x84, 0xD9, 0x87, 0x4F, 0xE3, 0x82, 0xA2, 0xE3, + 0x83, 0x8F, 0xE3, 0x82, 0x9A, 0xE3, 0x83, 0xBC, + 0xE3, 0x83, 0x88, 0x4F, 0xE3, 0x82, 0xA2, 0xE3, + 0x83, 0xB3, 0xE3, 0x83, 0x98, 0xE3, 0x82, 0x9A, + 0xE3, 0x82, 0xA2, 0x4F, 0xE3, 0x82, 0xAD, 0xE3, + 0x83, 0xAD, 0xE3, 0x83, 0xAF, 0xE3, 0x83, 0x83, + 0xE3, 0x83, 0x88, 0x4F, 0xE3, 0x82, 0xB5, 0xE3, + 0x83, 0xB3, 0xE3, 0x83, 0x81, 0xE3, 0x83, 0xBC, + // Bytes 2c80 - 2cbf + 0xE3, 0x83, 0xA0, 0x4F, 0xE3, 0x83, 0x8F, 0xE3, + 0x82, 0x99, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0xAC, + 0xE3, 0x83, 0xAB, 0x4F, 0xE3, 0x83, 0x98, 0xE3, + 0x82, 0xAF, 0xE3, 0x82, 0xBF, 0xE3, 0x83, 0xBC, + 0xE3, 0x83, 0xAB, 0x4F, 0xE3, 0x83, 0x9B, 0xE3, + 0x82, 0x9A, 0xE3, 0x82, 0xA4, 0xE3, 0x83, 0xB3, + 0xE3, 0x83, 0x88, 0x4F, 0xE3, 0x83, 0x9E, 0xE3, + 0x83, 0xB3, 0xE3, 0x82, 0xB7, 0xE3, 0x83, 0xA7, + // Bytes 2cc0 - 2cff + 0xE3, 0x83, 0xB3, 0x4F, 0xE3, 0x83, 0xA1, 0xE3, + 0x82, 0xAB, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0x88, + 0xE3, 0x83, 0xB3, 0x4F, 0xE3, 0x83, 0xAB, 0xE3, + 0x83, 0xBC, 0xE3, 0x83, 0x95, 0xE3, 0x82, 0x99, + 0xE3, 0x83, 0xAB, 0x51, 0x28, 0xE1, 0x84, 0x8B, + 0xE1, 0x85, 0xA9, 0xE1, 0x84, 0x8C, 0xE1, 0x85, + 0xA5, 0xE1, 0x86, 0xAB, 0x29, 0x52, 0xE3, 0x82, + 0xAD, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xAB, 0xE3, + // Bytes 2d00 - 2d3f + 0x82, 0xBF, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xBC, + 0x52, 0xE3, 0x82, 0xAD, 0xE3, 0x83, 0xAD, 0xE3, + 0x82, 0xAF, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xA9, + 0xE3, 0x83, 0xA0, 0x52, 0xE3, 0x82, 0xAD, 0xE3, + 0x83, 0xAD, 0xE3, 0x83, 0xA1, 0xE3, 0x83, 0xBC, + 0xE3, 0x83, 0x88, 0xE3, 0x83, 0xAB, 0x52, 0xE3, + 0x82, 0xAF, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xA9, + 0xE3, 0x83, 0xA0, 0xE3, 0x83, 0x88, 0xE3, 0x83, + // Bytes 2d40 - 2d7f + 0xB3, 0x52, 0xE3, 0x82, 0xAF, 0xE3, 0x83, 0xAB, + 0xE3, 0x82, 0xBB, 0xE3, 0x82, 0x99, 0xE3, 0x82, + 0xA4, 0xE3, 0x83, 0xAD, 0x52, 0xE3, 0x83, 0x8F, + 0xE3, 0x82, 0x9A, 0xE3, 0x83, 0xBC, 0xE3, 0x82, + 0xBB, 0xE3, 0x83, 0xB3, 0xE3, 0x83, 0x88, 0x52, + 0xE3, 0x83, 0x92, 0xE3, 0x82, 0x9A, 0xE3, 0x82, + 0xA2, 0xE3, 0x82, 0xB9, 0xE3, 0x83, 0x88, 0xE3, + 0x83, 0xAB, 0x52, 0xE3, 0x83, 0x95, 0xE3, 0x82, + // Bytes 2d80 - 2dbf + 0x99, 0xE3, 0x83, 0x83, 0xE3, 0x82, 0xB7, 0xE3, + 0x82, 0xA7, 0xE3, 0x83, 0xAB, 0x52, 0xE3, 0x83, + 0x9F, 0xE3, 0x83, 0xAA, 0xE3, 0x83, 0x8F, 0xE3, + 0x82, 0x99, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0xAB, + 0x52, 0xE3, 0x83, 0xAC, 0xE3, 0x83, 0xB3, 0xE3, + 0x83, 0x88, 0xE3, 0x82, 0xB1, 0xE3, 0x82, 0x99, + 0xE3, 0x83, 0xB3, 0x61, 0xD8, 0xB5, 0xD9, 0x84, + 0xD9, 0x89, 0x20, 0xD8, 0xA7, 0xD9, 0x84, 0xD9, + // Bytes 2dc0 - 2dff + 0x84, 0xD9, 0x87, 0x20, 0xD8, 0xB9, 0xD9, 0x84, + 0xD9, 0x8A, 0xD9, 0x87, 0x20, 0xD9, 0x88, 0xD8, + 0xB3, 0xD9, 0x84, 0xD9, 0x85, 0x06, 0xE0, 0xA7, + 0x87, 0xE0, 0xA6, 0xBE, 0x01, 0x06, 0xE0, 0xA7, + 0x87, 0xE0, 0xA7, 0x97, 0x01, 0x06, 0xE0, 0xAD, + 0x87, 0xE0, 0xAC, 0xBE, 0x01, 0x06, 0xE0, 0xAD, + 0x87, 0xE0, 0xAD, 0x96, 0x01, 0x06, 0xE0, 0xAD, + 0x87, 0xE0, 0xAD, 0x97, 0x01, 0x06, 0xE0, 0xAE, + // Bytes 2e00 - 2e3f + 0x92, 0xE0, 0xAF, 0x97, 0x01, 0x06, 0xE0, 0xAF, + 0x86, 0xE0, 0xAE, 0xBE, 0x01, 0x06, 0xE0, 0xAF, + 0x86, 0xE0, 0xAF, 0x97, 0x01, 0x06, 0xE0, 0xAF, + 0x87, 0xE0, 0xAE, 0xBE, 0x01, 0x06, 0xE0, 0xB2, + 0xBF, 0xE0, 0xB3, 0x95, 0x01, 0x06, 0xE0, 0xB3, + 0x86, 0xE0, 0xB3, 0x95, 0x01, 0x06, 0xE0, 0xB3, + 0x86, 0xE0, 0xB3, 0x96, 0x01, 0x06, 0xE0, 0xB5, + 0x86, 0xE0, 0xB4, 0xBE, 0x01, 0x06, 0xE0, 0xB5, + // Bytes 2e40 - 2e7f + 0x86, 0xE0, 0xB5, 0x97, 0x01, 0x06, 0xE0, 0xB5, + 0x87, 0xE0, 0xB4, 0xBE, 0x01, 0x06, 0xE0, 0xB7, + 0x99, 0xE0, 0xB7, 0x9F, 0x01, 0x06, 0xE1, 0x80, + 0xA5, 0xE1, 0x80, 0xAE, 0x01, 0x06, 0xE1, 0xAC, + 0x85, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, 0xAC, + 0x87, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, 0xAC, + 0x89, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, 0xAC, + 0x8B, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, 0xAC, + // Bytes 2e80 - 2ebf + 0x8D, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, 0xAC, + 0x91, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, 0xAC, + 0xBA, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, 0xAC, + 0xBC, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, 0xAC, + 0xBE, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, 0xAC, + 0xBF, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, 0xAD, + 0x82, 0xE1, 0xAC, 0xB5, 0x01, 0x08, 0xF0, 0x91, + 0x84, 0xB1, 0xF0, 0x91, 0x84, 0xA7, 0x01, 0x08, + // Bytes 2ec0 - 2eff + 0xF0, 0x91, 0x84, 0xB2, 0xF0, 0x91, 0x84, 0xA7, + 0x01, 0x08, 0xF0, 0x91, 0x8D, 0x87, 0xF0, 0x91, + 0x8C, 0xBE, 0x01, 0x08, 0xF0, 0x91, 0x8D, 0x87, + 0xF0, 0x91, 0x8D, 0x97, 0x01, 0x08, 0xF0, 0x91, + 0x92, 0xB9, 0xF0, 0x91, 0x92, 0xB0, 0x01, 0x08, + 0xF0, 0x91, 0x92, 0xB9, 0xF0, 0x91, 0x92, 0xBA, + 0x01, 0x08, 0xF0, 0x91, 0x92, 0xB9, 0xF0, 0x91, + 0x92, 0xBD, 0x01, 0x08, 0xF0, 0x91, 0x96, 0xB8, + // Bytes 2f00 - 2f3f + 0xF0, 0x91, 0x96, 0xAF, 0x01, 0x08, 0xF0, 0x91, + 0x96, 0xB9, 0xF0, 0x91, 0x96, 0xAF, 0x01, 0x08, + 0xF0, 0x91, 0xA4, 0xB5, 0xF0, 0x91, 0xA4, 0xB0, + 0x01, 0x09, 0xE0, 0xB3, 0x86, 0xE0, 0xB3, 0x82, + 0xE0, 0xB3, 0x95, 0x02, 0x09, 0xE0, 0xB7, 0x99, + 0xE0, 0xB7, 0x8F, 0xE0, 0xB7, 0x8A, 0x16, 0x44, + 0x44, 0x5A, 0xCC, 0x8C, 0xCD, 0x44, 0x44, 0x7A, + 0xCC, 0x8C, 0xCD, 0x44, 0x64, 0x7A, 0xCC, 0x8C, + // Bytes 2f40 - 2f7f + 0xCD, 0x46, 0xD9, 0x84, 0xD8, 0xA7, 0xD9, 0x93, + 0xCD, 0x46, 0xD9, 0x84, 0xD8, 0xA7, 0xD9, 0x94, + 0xCD, 0x46, 0xD9, 0x84, 0xD8, 0xA7, 0xD9, 0x95, + 0xB9, 0x46, 0xE1, 0x84, 0x80, 0xE1, 0x85, 0xA1, + 0x01, 0x46, 0xE1, 0x84, 0x82, 0xE1, 0x85, 0xA1, + 0x01, 0x46, 0xE1, 0x84, 0x83, 0xE1, 0x85, 0xA1, + 0x01, 0x46, 0xE1, 0x84, 0x85, 0xE1, 0x85, 0xA1, + 0x01, 0x46, 0xE1, 0x84, 0x86, 0xE1, 0x85, 0xA1, + // Bytes 2f80 - 2fbf + 0x01, 0x46, 0xE1, 0x84, 0x87, 0xE1, 0x85, 0xA1, + 0x01, 0x46, 0xE1, 0x84, 0x89, 0xE1, 0x85, 0xA1, + 0x01, 0x46, 0xE1, 0x84, 0x8B, 0xE1, 0x85, 0xA1, + 0x01, 0x46, 0xE1, 0x84, 0x8B, 0xE1, 0x85, 0xAE, + 0x01, 0x46, 0xE1, 0x84, 0x8C, 0xE1, 0x85, 0xA1, + 0x01, 0x46, 0xE1, 0x84, 0x8E, 0xE1, 0x85, 0xA1, + 0x01, 0x46, 0xE1, 0x84, 0x8F, 0xE1, 0x85, 0xA1, + 0x01, 0x46, 0xE1, 0x84, 0x90, 0xE1, 0x85, 0xA1, + // Bytes 2fc0 - 2fff + 0x01, 0x46, 0xE1, 0x84, 0x91, 0xE1, 0x85, 0xA1, + 0x01, 0x46, 0xE1, 0x84, 0x92, 0xE1, 0x85, 0xA1, + 0x01, 0x49, 0xE3, 0x83, 0xA1, 0xE3, 0x82, 0xAB, + 0xE3, 0x82, 0x99, 0x11, 0x4C, 0xE1, 0x84, 0x8C, + 0xE1, 0x85, 0xAE, 0xE1, 0x84, 0x8B, 0xE1, 0x85, + 0xB4, 0x01, 0x4C, 0xE3, 0x82, 0xAD, 0xE3, 0x82, + 0x99, 0xE3, 0x82, 0xAB, 0xE3, 0x82, 0x99, 0x11, + 0x4C, 0xE3, 0x82, 0xB3, 0xE3, 0x83, 0xBC, 0xE3, + // Bytes 3000 - 303f + 0x83, 0x9B, 0xE3, 0x82, 0x9A, 0x11, 0x4C, 0xE3, + 0x83, 0xA4, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0x88, + 0xE3, 0x82, 0x99, 0x11, 0x4F, 0xE1, 0x84, 0x8E, + 0xE1, 0x85, 0xA1, 0xE1, 0x86, 0xB7, 0xE1, 0x84, + 0x80, 0xE1, 0x85, 0xA9, 0x01, 0x4F, 0xE3, 0x82, + 0xA4, 0xE3, 0x83, 0x8B, 0xE3, 0x83, 0xB3, 0xE3, + 0x82, 0xAF, 0xE3, 0x82, 0x99, 0x11, 0x4F, 0xE3, + 0x82, 0xB7, 0xE3, 0x83, 0xAA, 0xE3, 0x83, 0xB3, + // Bytes 3040 - 307f + 0xE3, 0x82, 0xAF, 0xE3, 0x82, 0x99, 0x11, 0x4F, + 0xE3, 0x83, 0x98, 0xE3, 0x82, 0x9A, 0xE3, 0x83, + 0xBC, 0xE3, 0x82, 0xB7, 0xE3, 0x82, 0x99, 0x11, + 0x4F, 0xE3, 0x83, 0x9B, 0xE3, 0x82, 0x9A, 0xE3, + 0x83, 0xB3, 0xE3, 0x83, 0x88, 0xE3, 0x82, 0x99, + 0x11, 0x52, 0xE3, 0x82, 0xA8, 0xE3, 0x82, 0xB9, + 0xE3, 0x82, 0xAF, 0xE3, 0x83, 0xBC, 0xE3, 0x83, + 0x88, 0xE3, 0x82, 0x99, 0x11, 0x52, 0xE3, 0x83, + // Bytes 3080 - 30bf + 0x95, 0xE3, 0x82, 0xA1, 0xE3, 0x83, 0xA9, 0xE3, + 0x83, 0x83, 0xE3, 0x83, 0x88, 0xE3, 0x82, 0x99, + 0x11, 0x86, 0xE0, 0xB3, 0x86, 0xE0, 0xB3, 0x82, + 0x01, 0x86, 0xE0, 0xB7, 0x99, 0xE0, 0xB7, 0x8F, + 0x01, 0x03, 0x3C, 0xCC, 0xB8, 0x05, 0x03, 0x3D, + 0xCC, 0xB8, 0x05, 0x03, 0x3E, 0xCC, 0xB8, 0x05, + 0x03, 0x41, 0xCC, 0x80, 0xCD, 0x03, 0x41, 0xCC, + 0x81, 0xCD, 0x03, 0x41, 0xCC, 0x83, 0xCD, 0x03, + // Bytes 30c0 - 30ff + 0x41, 0xCC, 0x84, 0xCD, 0x03, 0x41, 0xCC, 0x89, + 0xCD, 0x03, 0x41, 0xCC, 0x8C, 0xCD, 0x03, 0x41, + 0xCC, 0x8F, 0xCD, 0x03, 0x41, 0xCC, 0x91, 0xCD, + 0x03, 0x41, 0xCC, 0xA5, 0xB9, 0x03, 0x41, 0xCC, + 0xA8, 0xA9, 0x03, 0x42, 0xCC, 0x87, 0xCD, 0x03, + 0x42, 0xCC, 0xA3, 0xB9, 0x03, 0x42, 0xCC, 0xB1, + 0xB9, 0x03, 0x43, 0xCC, 0x81, 0xCD, 0x03, 0x43, + 0xCC, 0x82, 0xCD, 0x03, 0x43, 0xCC, 0x87, 0xCD, + // Bytes 3100 - 313f + 0x03, 0x43, 0xCC, 0x8C, 0xCD, 0x03, 0x44, 0xCC, + 0x87, 0xCD, 0x03, 0x44, 0xCC, 0x8C, 0xCD, 0x03, + 0x44, 0xCC, 0xA3, 0xB9, 0x03, 0x44, 0xCC, 0xA7, + 0xA9, 0x03, 0x44, 0xCC, 0xAD, 0xB9, 0x03, 0x44, + 0xCC, 0xB1, 0xB9, 0x03, 0x45, 0xCC, 0x80, 0xCD, + 0x03, 0x45, 0xCC, 0x81, 0xCD, 0x03, 0x45, 0xCC, + 0x83, 0xCD, 0x03, 0x45, 0xCC, 0x86, 0xCD, 0x03, + 0x45, 0xCC, 0x87, 0xCD, 0x03, 0x45, 0xCC, 0x88, + // Bytes 3140 - 317f + 0xCD, 0x03, 0x45, 0xCC, 0x89, 0xCD, 0x03, 0x45, + 0xCC, 0x8C, 0xCD, 0x03, 0x45, 0xCC, 0x8F, 0xCD, + 0x03, 0x45, 0xCC, 0x91, 0xCD, 0x03, 0x45, 0xCC, + 0xA8, 0xA9, 0x03, 0x45, 0xCC, 0xAD, 0xB9, 0x03, + 0x45, 0xCC, 0xB0, 0xB9, 0x03, 0x46, 0xCC, 0x87, + 0xCD, 0x03, 0x47, 0xCC, 0x81, 0xCD, 0x03, 0x47, + 0xCC, 0x82, 0xCD, 0x03, 0x47, 0xCC, 0x84, 0xCD, + 0x03, 0x47, 0xCC, 0x86, 0xCD, 0x03, 0x47, 0xCC, + // Bytes 3180 - 31bf + 0x87, 0xCD, 0x03, 0x47, 0xCC, 0x8C, 0xCD, 0x03, + 0x47, 0xCC, 0xA7, 0xA9, 0x03, 0x48, 0xCC, 0x82, + 0xCD, 0x03, 0x48, 0xCC, 0x87, 0xCD, 0x03, 0x48, + 0xCC, 0x88, 0xCD, 0x03, 0x48, 0xCC, 0x8C, 0xCD, + 0x03, 0x48, 0xCC, 0xA3, 0xB9, 0x03, 0x48, 0xCC, + 0xA7, 0xA9, 0x03, 0x48, 0xCC, 0xAE, 0xB9, 0x03, + 0x49, 0xCC, 0x80, 0xCD, 0x03, 0x49, 0xCC, 0x81, + 0xCD, 0x03, 0x49, 0xCC, 0x82, 0xCD, 0x03, 0x49, + // Bytes 31c0 - 31ff + 0xCC, 0x83, 0xCD, 0x03, 0x49, 0xCC, 0x84, 0xCD, + 0x03, 0x49, 0xCC, 0x86, 0xCD, 0x03, 0x49, 0xCC, + 0x87, 0xCD, 0x03, 0x49, 0xCC, 0x89, 0xCD, 0x03, + 0x49, 0xCC, 0x8C, 0xCD, 0x03, 0x49, 0xCC, 0x8F, + 0xCD, 0x03, 0x49, 0xCC, 0x91, 0xCD, 0x03, 0x49, + 0xCC, 0xA3, 0xB9, 0x03, 0x49, 0xCC, 0xA8, 0xA9, + 0x03, 0x49, 0xCC, 0xB0, 0xB9, 0x03, 0x4A, 0xCC, + 0x82, 0xCD, 0x03, 0x4B, 0xCC, 0x81, 0xCD, 0x03, + // Bytes 3200 - 323f + 0x4B, 0xCC, 0x8C, 0xCD, 0x03, 0x4B, 0xCC, 0xA3, + 0xB9, 0x03, 0x4B, 0xCC, 0xA7, 0xA9, 0x03, 0x4B, + 0xCC, 0xB1, 0xB9, 0x03, 0x4C, 0xCC, 0x81, 0xCD, + 0x03, 0x4C, 0xCC, 0x8C, 0xCD, 0x03, 0x4C, 0xCC, + 0xA7, 0xA9, 0x03, 0x4C, 0xCC, 0xAD, 0xB9, 0x03, + 0x4C, 0xCC, 0xB1, 0xB9, 0x03, 0x4D, 0xCC, 0x81, + 0xCD, 0x03, 0x4D, 0xCC, 0x87, 0xCD, 0x03, 0x4D, + 0xCC, 0xA3, 0xB9, 0x03, 0x4E, 0xCC, 0x80, 0xCD, + // Bytes 3240 - 327f + 0x03, 0x4E, 0xCC, 0x81, 0xCD, 0x03, 0x4E, 0xCC, + 0x83, 0xCD, 0x03, 0x4E, 0xCC, 0x87, 0xCD, 0x03, + 0x4E, 0xCC, 0x8C, 0xCD, 0x03, 0x4E, 0xCC, 0xA3, + 0xB9, 0x03, 0x4E, 0xCC, 0xA7, 0xA9, 0x03, 0x4E, + 0xCC, 0xAD, 0xB9, 0x03, 0x4E, 0xCC, 0xB1, 0xB9, + 0x03, 0x4F, 0xCC, 0x80, 0xCD, 0x03, 0x4F, 0xCC, + 0x81, 0xCD, 0x03, 0x4F, 0xCC, 0x86, 0xCD, 0x03, + 0x4F, 0xCC, 0x89, 0xCD, 0x03, 0x4F, 0xCC, 0x8B, + // Bytes 3280 - 32bf + 0xCD, 0x03, 0x4F, 0xCC, 0x8C, 0xCD, 0x03, 0x4F, + 0xCC, 0x8F, 0xCD, 0x03, 0x4F, 0xCC, 0x91, 0xCD, + 0x03, 0x50, 0xCC, 0x81, 0xCD, 0x03, 0x50, 0xCC, + 0x87, 0xCD, 0x03, 0x52, 0xCC, 0x81, 0xCD, 0x03, + 0x52, 0xCC, 0x87, 0xCD, 0x03, 0x52, 0xCC, 0x8C, + 0xCD, 0x03, 0x52, 0xCC, 0x8F, 0xCD, 0x03, 0x52, + 0xCC, 0x91, 0xCD, 0x03, 0x52, 0xCC, 0xA7, 0xA9, + 0x03, 0x52, 0xCC, 0xB1, 0xB9, 0x03, 0x53, 0xCC, + // Bytes 32c0 - 32ff + 0x82, 0xCD, 0x03, 0x53, 0xCC, 0x87, 0xCD, 0x03, + 0x53, 0xCC, 0xA6, 0xB9, 0x03, 0x53, 0xCC, 0xA7, + 0xA9, 0x03, 0x54, 0xCC, 0x87, 0xCD, 0x03, 0x54, + 0xCC, 0x8C, 0xCD, 0x03, 0x54, 0xCC, 0xA3, 0xB9, + 0x03, 0x54, 0xCC, 0xA6, 0xB9, 0x03, 0x54, 0xCC, + 0xA7, 0xA9, 0x03, 0x54, 0xCC, 0xAD, 0xB9, 0x03, + 0x54, 0xCC, 0xB1, 0xB9, 0x03, 0x55, 0xCC, 0x80, + 0xCD, 0x03, 0x55, 0xCC, 0x81, 0xCD, 0x03, 0x55, + // Bytes 3300 - 333f + 0xCC, 0x82, 0xCD, 0x03, 0x55, 0xCC, 0x86, 0xCD, + 0x03, 0x55, 0xCC, 0x89, 0xCD, 0x03, 0x55, 0xCC, + 0x8A, 0xCD, 0x03, 0x55, 0xCC, 0x8B, 0xCD, 0x03, + 0x55, 0xCC, 0x8C, 0xCD, 0x03, 0x55, 0xCC, 0x8F, + 0xCD, 0x03, 0x55, 0xCC, 0x91, 0xCD, 0x03, 0x55, + 0xCC, 0xA3, 0xB9, 0x03, 0x55, 0xCC, 0xA4, 0xB9, + 0x03, 0x55, 0xCC, 0xA8, 0xA9, 0x03, 0x55, 0xCC, + 0xAD, 0xB9, 0x03, 0x55, 0xCC, 0xB0, 0xB9, 0x03, + // Bytes 3340 - 337f + 0x56, 0xCC, 0x83, 0xCD, 0x03, 0x56, 0xCC, 0xA3, + 0xB9, 0x03, 0x57, 0xCC, 0x80, 0xCD, 0x03, 0x57, + 0xCC, 0x81, 0xCD, 0x03, 0x57, 0xCC, 0x82, 0xCD, + 0x03, 0x57, 0xCC, 0x87, 0xCD, 0x03, 0x57, 0xCC, + 0x88, 0xCD, 0x03, 0x57, 0xCC, 0xA3, 0xB9, 0x03, + 0x58, 0xCC, 0x87, 0xCD, 0x03, 0x58, 0xCC, 0x88, + 0xCD, 0x03, 0x59, 0xCC, 0x80, 0xCD, 0x03, 0x59, + 0xCC, 0x81, 0xCD, 0x03, 0x59, 0xCC, 0x82, 0xCD, + // Bytes 3380 - 33bf + 0x03, 0x59, 0xCC, 0x83, 0xCD, 0x03, 0x59, 0xCC, + 0x84, 0xCD, 0x03, 0x59, 0xCC, 0x87, 0xCD, 0x03, + 0x59, 0xCC, 0x88, 0xCD, 0x03, 0x59, 0xCC, 0x89, + 0xCD, 0x03, 0x59, 0xCC, 0xA3, 0xB9, 0x03, 0x5A, + 0xCC, 0x81, 0xCD, 0x03, 0x5A, 0xCC, 0x82, 0xCD, + 0x03, 0x5A, 0xCC, 0x87, 0xCD, 0x03, 0x5A, 0xCC, + 0x8C, 0xCD, 0x03, 0x5A, 0xCC, 0xA3, 0xB9, 0x03, + 0x5A, 0xCC, 0xB1, 0xB9, 0x03, 0x61, 0xCC, 0x80, + // Bytes 33c0 - 33ff + 0xCD, 0x03, 0x61, 0xCC, 0x81, 0xCD, 0x03, 0x61, + 0xCC, 0x83, 0xCD, 0x03, 0x61, 0xCC, 0x84, 0xCD, + 0x03, 0x61, 0xCC, 0x89, 0xCD, 0x03, 0x61, 0xCC, + 0x8C, 0xCD, 0x03, 0x61, 0xCC, 0x8F, 0xCD, 0x03, + 0x61, 0xCC, 0x91, 0xCD, 0x03, 0x61, 0xCC, 0xA5, + 0xB9, 0x03, 0x61, 0xCC, 0xA8, 0xA9, 0x03, 0x62, + 0xCC, 0x87, 0xCD, 0x03, 0x62, 0xCC, 0xA3, 0xB9, + 0x03, 0x62, 0xCC, 0xB1, 0xB9, 0x03, 0x63, 0xCC, + // Bytes 3400 - 343f + 0x81, 0xCD, 0x03, 0x63, 0xCC, 0x82, 0xCD, 0x03, + 0x63, 0xCC, 0x87, 0xCD, 0x03, 0x63, 0xCC, 0x8C, + 0xCD, 0x03, 0x64, 0xCC, 0x87, 0xCD, 0x03, 0x64, + 0xCC, 0x8C, 0xCD, 0x03, 0x64, 0xCC, 0xA3, 0xB9, + 0x03, 0x64, 0xCC, 0xA7, 0xA9, 0x03, 0x64, 0xCC, + 0xAD, 0xB9, 0x03, 0x64, 0xCC, 0xB1, 0xB9, 0x03, + 0x65, 0xCC, 0x80, 0xCD, 0x03, 0x65, 0xCC, 0x81, + 0xCD, 0x03, 0x65, 0xCC, 0x83, 0xCD, 0x03, 0x65, + // Bytes 3440 - 347f + 0xCC, 0x86, 0xCD, 0x03, 0x65, 0xCC, 0x87, 0xCD, + 0x03, 0x65, 0xCC, 0x88, 0xCD, 0x03, 0x65, 0xCC, + 0x89, 0xCD, 0x03, 0x65, 0xCC, 0x8C, 0xCD, 0x03, + 0x65, 0xCC, 0x8F, 0xCD, 0x03, 0x65, 0xCC, 0x91, + 0xCD, 0x03, 0x65, 0xCC, 0xA8, 0xA9, 0x03, 0x65, + 0xCC, 0xAD, 0xB9, 0x03, 0x65, 0xCC, 0xB0, 0xB9, + 0x03, 0x66, 0xCC, 0x87, 0xCD, 0x03, 0x67, 0xCC, + 0x81, 0xCD, 0x03, 0x67, 0xCC, 0x82, 0xCD, 0x03, + // Bytes 3480 - 34bf + 0x67, 0xCC, 0x84, 0xCD, 0x03, 0x67, 0xCC, 0x86, + 0xCD, 0x03, 0x67, 0xCC, 0x87, 0xCD, 0x03, 0x67, + 0xCC, 0x8C, 0xCD, 0x03, 0x67, 0xCC, 0xA7, 0xA9, + 0x03, 0x68, 0xCC, 0x82, 0xCD, 0x03, 0x68, 0xCC, + 0x87, 0xCD, 0x03, 0x68, 0xCC, 0x88, 0xCD, 0x03, + 0x68, 0xCC, 0x8C, 0xCD, 0x03, 0x68, 0xCC, 0xA3, + 0xB9, 0x03, 0x68, 0xCC, 0xA7, 0xA9, 0x03, 0x68, + 0xCC, 0xAE, 0xB9, 0x03, 0x68, 0xCC, 0xB1, 0xB9, + // Bytes 34c0 - 34ff + 0x03, 0x69, 0xCC, 0x80, 0xCD, 0x03, 0x69, 0xCC, + 0x81, 0xCD, 0x03, 0x69, 0xCC, 0x82, 0xCD, 0x03, + 0x69, 0xCC, 0x83, 0xCD, 0x03, 0x69, 0xCC, 0x84, + 0xCD, 0x03, 0x69, 0xCC, 0x86, 0xCD, 0x03, 0x69, + 0xCC, 0x89, 0xCD, 0x03, 0x69, 0xCC, 0x8C, 0xCD, + 0x03, 0x69, 0xCC, 0x8F, 0xCD, 0x03, 0x69, 0xCC, + 0x91, 0xCD, 0x03, 0x69, 0xCC, 0xA3, 0xB9, 0x03, + 0x69, 0xCC, 0xA8, 0xA9, 0x03, 0x69, 0xCC, 0xB0, + // Bytes 3500 - 353f + 0xB9, 0x03, 0x6A, 0xCC, 0x82, 0xCD, 0x03, 0x6A, + 0xCC, 0x8C, 0xCD, 0x03, 0x6B, 0xCC, 0x81, 0xCD, + 0x03, 0x6B, 0xCC, 0x8C, 0xCD, 0x03, 0x6B, 0xCC, + 0xA3, 0xB9, 0x03, 0x6B, 0xCC, 0xA7, 0xA9, 0x03, + 0x6B, 0xCC, 0xB1, 0xB9, 0x03, 0x6C, 0xCC, 0x81, + 0xCD, 0x03, 0x6C, 0xCC, 0x8C, 0xCD, 0x03, 0x6C, + 0xCC, 0xA7, 0xA9, 0x03, 0x6C, 0xCC, 0xAD, 0xB9, + 0x03, 0x6C, 0xCC, 0xB1, 0xB9, 0x03, 0x6D, 0xCC, + // Bytes 3540 - 357f + 0x81, 0xCD, 0x03, 0x6D, 0xCC, 0x87, 0xCD, 0x03, + 0x6D, 0xCC, 0xA3, 0xB9, 0x03, 0x6E, 0xCC, 0x80, + 0xCD, 0x03, 0x6E, 0xCC, 0x81, 0xCD, 0x03, 0x6E, + 0xCC, 0x83, 0xCD, 0x03, 0x6E, 0xCC, 0x87, 0xCD, + 0x03, 0x6E, 0xCC, 0x8C, 0xCD, 0x03, 0x6E, 0xCC, + 0xA3, 0xB9, 0x03, 0x6E, 0xCC, 0xA7, 0xA9, 0x03, + 0x6E, 0xCC, 0xAD, 0xB9, 0x03, 0x6E, 0xCC, 0xB1, + 0xB9, 0x03, 0x6F, 0xCC, 0x80, 0xCD, 0x03, 0x6F, + // Bytes 3580 - 35bf + 0xCC, 0x81, 0xCD, 0x03, 0x6F, 0xCC, 0x86, 0xCD, + 0x03, 0x6F, 0xCC, 0x89, 0xCD, 0x03, 0x6F, 0xCC, + 0x8B, 0xCD, 0x03, 0x6F, 0xCC, 0x8C, 0xCD, 0x03, + 0x6F, 0xCC, 0x8F, 0xCD, 0x03, 0x6F, 0xCC, 0x91, + 0xCD, 0x03, 0x70, 0xCC, 0x81, 0xCD, 0x03, 0x70, + 0xCC, 0x87, 0xCD, 0x03, 0x72, 0xCC, 0x81, 0xCD, + 0x03, 0x72, 0xCC, 0x87, 0xCD, 0x03, 0x72, 0xCC, + 0x8C, 0xCD, 0x03, 0x72, 0xCC, 0x8F, 0xCD, 0x03, + // Bytes 35c0 - 35ff + 0x72, 0xCC, 0x91, 0xCD, 0x03, 0x72, 0xCC, 0xA7, + 0xA9, 0x03, 0x72, 0xCC, 0xB1, 0xB9, 0x03, 0x73, + 0xCC, 0x82, 0xCD, 0x03, 0x73, 0xCC, 0x87, 0xCD, + 0x03, 0x73, 0xCC, 0xA6, 0xB9, 0x03, 0x73, 0xCC, + 0xA7, 0xA9, 0x03, 0x74, 0xCC, 0x87, 0xCD, 0x03, + 0x74, 0xCC, 0x88, 0xCD, 0x03, 0x74, 0xCC, 0x8C, + 0xCD, 0x03, 0x74, 0xCC, 0xA3, 0xB9, 0x03, 0x74, + 0xCC, 0xA6, 0xB9, 0x03, 0x74, 0xCC, 0xA7, 0xA9, + // Bytes 3600 - 363f + 0x03, 0x74, 0xCC, 0xAD, 0xB9, 0x03, 0x74, 0xCC, + 0xB1, 0xB9, 0x03, 0x75, 0xCC, 0x80, 0xCD, 0x03, + 0x75, 0xCC, 0x81, 0xCD, 0x03, 0x75, 0xCC, 0x82, + 0xCD, 0x03, 0x75, 0xCC, 0x86, 0xCD, 0x03, 0x75, + 0xCC, 0x89, 0xCD, 0x03, 0x75, 0xCC, 0x8A, 0xCD, + 0x03, 0x75, 0xCC, 0x8B, 0xCD, 0x03, 0x75, 0xCC, + 0x8C, 0xCD, 0x03, 0x75, 0xCC, 0x8F, 0xCD, 0x03, + 0x75, 0xCC, 0x91, 0xCD, 0x03, 0x75, 0xCC, 0xA3, + // Bytes 3640 - 367f + 0xB9, 0x03, 0x75, 0xCC, 0xA4, 0xB9, 0x03, 0x75, + 0xCC, 0xA8, 0xA9, 0x03, 0x75, 0xCC, 0xAD, 0xB9, + 0x03, 0x75, 0xCC, 0xB0, 0xB9, 0x03, 0x76, 0xCC, + 0x83, 0xCD, 0x03, 0x76, 0xCC, 0xA3, 0xB9, 0x03, + 0x77, 0xCC, 0x80, 0xCD, 0x03, 0x77, 0xCC, 0x81, + 0xCD, 0x03, 0x77, 0xCC, 0x82, 0xCD, 0x03, 0x77, + 0xCC, 0x87, 0xCD, 0x03, 0x77, 0xCC, 0x88, 0xCD, + 0x03, 0x77, 0xCC, 0x8A, 0xCD, 0x03, 0x77, 0xCC, + // Bytes 3680 - 36bf + 0xA3, 0xB9, 0x03, 0x78, 0xCC, 0x87, 0xCD, 0x03, + 0x78, 0xCC, 0x88, 0xCD, 0x03, 0x79, 0xCC, 0x80, + 0xCD, 0x03, 0x79, 0xCC, 0x81, 0xCD, 0x03, 0x79, + 0xCC, 0x82, 0xCD, 0x03, 0x79, 0xCC, 0x83, 0xCD, + 0x03, 0x79, 0xCC, 0x84, 0xCD, 0x03, 0x79, 0xCC, + 0x87, 0xCD, 0x03, 0x79, 0xCC, 0x88, 0xCD, 0x03, + 0x79, 0xCC, 0x89, 0xCD, 0x03, 0x79, 0xCC, 0x8A, + 0xCD, 0x03, 0x79, 0xCC, 0xA3, 0xB9, 0x03, 0x7A, + // Bytes 36c0 - 36ff + 0xCC, 0x81, 0xCD, 0x03, 0x7A, 0xCC, 0x82, 0xCD, + 0x03, 0x7A, 0xCC, 0x87, 0xCD, 0x03, 0x7A, 0xCC, + 0x8C, 0xCD, 0x03, 0x7A, 0xCC, 0xA3, 0xB9, 0x03, + 0x7A, 0xCC, 0xB1, 0xB9, 0x04, 0xC2, 0xA8, 0xCC, + 0x80, 0xCE, 0x04, 0xC2, 0xA8, 0xCC, 0x81, 0xCE, + 0x04, 0xC2, 0xA8, 0xCD, 0x82, 0xCE, 0x04, 0xC3, + 0x86, 0xCC, 0x81, 0xCD, 0x04, 0xC3, 0x86, 0xCC, + 0x84, 0xCD, 0x04, 0xC3, 0x98, 0xCC, 0x81, 0xCD, + // Bytes 3700 - 373f + 0x04, 0xC3, 0xA6, 0xCC, 0x81, 0xCD, 0x04, 0xC3, + 0xA6, 0xCC, 0x84, 0xCD, 0x04, 0xC3, 0xB8, 0xCC, + 0x81, 0xCD, 0x04, 0xC5, 0xBF, 0xCC, 0x87, 0xCD, + 0x04, 0xC6, 0xB7, 0xCC, 0x8C, 0xCD, 0x04, 0xCA, + 0x92, 0xCC, 0x8C, 0xCD, 0x04, 0xCE, 0x91, 0xCC, + 0x80, 0xCD, 0x04, 0xCE, 0x91, 0xCC, 0x81, 0xCD, + 0x04, 0xCE, 0x91, 0xCC, 0x84, 0xCD, 0x04, 0xCE, + 0x91, 0xCC, 0x86, 0xCD, 0x04, 0xCE, 0x91, 0xCD, + // Bytes 3740 - 377f + 0x85, 0xDD, 0x04, 0xCE, 0x95, 0xCC, 0x80, 0xCD, + 0x04, 0xCE, 0x95, 0xCC, 0x81, 0xCD, 0x04, 0xCE, + 0x97, 0xCC, 0x80, 0xCD, 0x04, 0xCE, 0x97, 0xCC, + 0x81, 0xCD, 0x04, 0xCE, 0x97, 0xCD, 0x85, 0xDD, + 0x04, 0xCE, 0x99, 0xCC, 0x80, 0xCD, 0x04, 0xCE, + 0x99, 0xCC, 0x81, 0xCD, 0x04, 0xCE, 0x99, 0xCC, + 0x84, 0xCD, 0x04, 0xCE, 0x99, 0xCC, 0x86, 0xCD, + 0x04, 0xCE, 0x99, 0xCC, 0x88, 0xCD, 0x04, 0xCE, + // Bytes 3780 - 37bf + 0x9F, 0xCC, 0x80, 0xCD, 0x04, 0xCE, 0x9F, 0xCC, + 0x81, 0xCD, 0x04, 0xCE, 0xA1, 0xCC, 0x94, 0xCD, + 0x04, 0xCE, 0xA5, 0xCC, 0x80, 0xCD, 0x04, 0xCE, + 0xA5, 0xCC, 0x81, 0xCD, 0x04, 0xCE, 0xA5, 0xCC, + 0x84, 0xCD, 0x04, 0xCE, 0xA5, 0xCC, 0x86, 0xCD, + 0x04, 0xCE, 0xA5, 0xCC, 0x88, 0xCD, 0x04, 0xCE, + 0xA9, 0xCC, 0x80, 0xCD, 0x04, 0xCE, 0xA9, 0xCC, + 0x81, 0xCD, 0x04, 0xCE, 0xA9, 0xCD, 0x85, 0xDD, + // Bytes 37c0 - 37ff + 0x04, 0xCE, 0xB1, 0xCC, 0x84, 0xCD, 0x04, 0xCE, + 0xB1, 0xCC, 0x86, 0xCD, 0x04, 0xCE, 0xB1, 0xCD, + 0x85, 0xDD, 0x04, 0xCE, 0xB5, 0xCC, 0x80, 0xCD, + 0x04, 0xCE, 0xB5, 0xCC, 0x81, 0xCD, 0x04, 0xCE, + 0xB7, 0xCD, 0x85, 0xDD, 0x04, 0xCE, 0xB9, 0xCC, + 0x80, 0xCD, 0x04, 0xCE, 0xB9, 0xCC, 0x81, 0xCD, + 0x04, 0xCE, 0xB9, 0xCC, 0x84, 0xCD, 0x04, 0xCE, + 0xB9, 0xCC, 0x86, 0xCD, 0x04, 0xCE, 0xB9, 0xCD, + // Bytes 3800 - 383f + 0x82, 0xCD, 0x04, 0xCE, 0xBF, 0xCC, 0x80, 0xCD, + 0x04, 0xCE, 0xBF, 0xCC, 0x81, 0xCD, 0x04, 0xCF, + 0x81, 0xCC, 0x93, 0xCD, 0x04, 0xCF, 0x81, 0xCC, + 0x94, 0xCD, 0x04, 0xCF, 0x85, 0xCC, 0x80, 0xCD, + 0x04, 0xCF, 0x85, 0xCC, 0x81, 0xCD, 0x04, 0xCF, + 0x85, 0xCC, 0x84, 0xCD, 0x04, 0xCF, 0x85, 0xCC, + 0x86, 0xCD, 0x04, 0xCF, 0x85, 0xCD, 0x82, 0xCD, + 0x04, 0xCF, 0x89, 0xCD, 0x85, 0xDD, 0x04, 0xCF, + // Bytes 3840 - 387f + 0x92, 0xCC, 0x81, 0xCD, 0x04, 0xCF, 0x92, 0xCC, + 0x88, 0xCD, 0x04, 0xD0, 0x86, 0xCC, 0x88, 0xCD, + 0x04, 0xD0, 0x90, 0xCC, 0x86, 0xCD, 0x04, 0xD0, + 0x90, 0xCC, 0x88, 0xCD, 0x04, 0xD0, 0x93, 0xCC, + 0x81, 0xCD, 0x04, 0xD0, 0x95, 0xCC, 0x80, 0xCD, + 0x04, 0xD0, 0x95, 0xCC, 0x86, 0xCD, 0x04, 0xD0, + 0x95, 0xCC, 0x88, 0xCD, 0x04, 0xD0, 0x96, 0xCC, + 0x86, 0xCD, 0x04, 0xD0, 0x96, 0xCC, 0x88, 0xCD, + // Bytes 3880 - 38bf + 0x04, 0xD0, 0x97, 0xCC, 0x88, 0xCD, 0x04, 0xD0, + 0x98, 0xCC, 0x80, 0xCD, 0x04, 0xD0, 0x98, 0xCC, + 0x84, 0xCD, 0x04, 0xD0, 0x98, 0xCC, 0x86, 0xCD, + 0x04, 0xD0, 0x98, 0xCC, 0x88, 0xCD, 0x04, 0xD0, + 0x9A, 0xCC, 0x81, 0xCD, 0x04, 0xD0, 0x9E, 0xCC, + 0x88, 0xCD, 0x04, 0xD0, 0xA3, 0xCC, 0x84, 0xCD, + 0x04, 0xD0, 0xA3, 0xCC, 0x86, 0xCD, 0x04, 0xD0, + 0xA3, 0xCC, 0x88, 0xCD, 0x04, 0xD0, 0xA3, 0xCC, + // Bytes 38c0 - 38ff + 0x8B, 0xCD, 0x04, 0xD0, 0xA7, 0xCC, 0x88, 0xCD, + 0x04, 0xD0, 0xAB, 0xCC, 0x88, 0xCD, 0x04, 0xD0, + 0xAD, 0xCC, 0x88, 0xCD, 0x04, 0xD0, 0xB0, 0xCC, + 0x86, 0xCD, 0x04, 0xD0, 0xB0, 0xCC, 0x88, 0xCD, + 0x04, 0xD0, 0xB3, 0xCC, 0x81, 0xCD, 0x04, 0xD0, + 0xB5, 0xCC, 0x80, 0xCD, 0x04, 0xD0, 0xB5, 0xCC, + 0x86, 0xCD, 0x04, 0xD0, 0xB5, 0xCC, 0x88, 0xCD, + 0x04, 0xD0, 0xB6, 0xCC, 0x86, 0xCD, 0x04, 0xD0, + // Bytes 3900 - 393f + 0xB6, 0xCC, 0x88, 0xCD, 0x04, 0xD0, 0xB7, 0xCC, + 0x88, 0xCD, 0x04, 0xD0, 0xB8, 0xCC, 0x80, 0xCD, + 0x04, 0xD0, 0xB8, 0xCC, 0x84, 0xCD, 0x04, 0xD0, + 0xB8, 0xCC, 0x86, 0xCD, 0x04, 0xD0, 0xB8, 0xCC, + 0x88, 0xCD, 0x04, 0xD0, 0xBA, 0xCC, 0x81, 0xCD, + 0x04, 0xD0, 0xBE, 0xCC, 0x88, 0xCD, 0x04, 0xD1, + 0x83, 0xCC, 0x84, 0xCD, 0x04, 0xD1, 0x83, 0xCC, + 0x86, 0xCD, 0x04, 0xD1, 0x83, 0xCC, 0x88, 0xCD, + // Bytes 3940 - 397f + 0x04, 0xD1, 0x83, 0xCC, 0x8B, 0xCD, 0x04, 0xD1, + 0x87, 0xCC, 0x88, 0xCD, 0x04, 0xD1, 0x8B, 0xCC, + 0x88, 0xCD, 0x04, 0xD1, 0x8D, 0xCC, 0x88, 0xCD, + 0x04, 0xD1, 0x96, 0xCC, 0x88, 0xCD, 0x04, 0xD1, + 0xB4, 0xCC, 0x8F, 0xCD, 0x04, 0xD1, 0xB5, 0xCC, + 0x8F, 0xCD, 0x04, 0xD3, 0x98, 0xCC, 0x88, 0xCD, + 0x04, 0xD3, 0x99, 0xCC, 0x88, 0xCD, 0x04, 0xD3, + 0xA8, 0xCC, 0x88, 0xCD, 0x04, 0xD3, 0xA9, 0xCC, + // Bytes 3980 - 39bf + 0x88, 0xCD, 0x04, 0xD8, 0xA7, 0xD9, 0x93, 0xCD, + 0x04, 0xD8, 0xA7, 0xD9, 0x94, 0xCD, 0x04, 0xD8, + 0xA7, 0xD9, 0x95, 0xB9, 0x04, 0xD9, 0x88, 0xD9, + 0x94, 0xCD, 0x04, 0xD9, 0x8A, 0xD9, 0x94, 0xCD, + 0x04, 0xDB, 0x81, 0xD9, 0x94, 0xCD, 0x04, 0xDB, + 0x92, 0xD9, 0x94, 0xCD, 0x04, 0xDB, 0x95, 0xD9, + 0x94, 0xCD, 0x05, 0x41, 0xCC, 0x82, 0xCC, 0x80, + 0xCE, 0x05, 0x41, 0xCC, 0x82, 0xCC, 0x81, 0xCE, + // Bytes 39c0 - 39ff + 0x05, 0x41, 0xCC, 0x82, 0xCC, 0x83, 0xCE, 0x05, + 0x41, 0xCC, 0x82, 0xCC, 0x89, 0xCE, 0x05, 0x41, + 0xCC, 0x86, 0xCC, 0x80, 0xCE, 0x05, 0x41, 0xCC, + 0x86, 0xCC, 0x81, 0xCE, 0x05, 0x41, 0xCC, 0x86, + 0xCC, 0x83, 0xCE, 0x05, 0x41, 0xCC, 0x86, 0xCC, + 0x89, 0xCE, 0x05, 0x41, 0xCC, 0x87, 0xCC, 0x84, + 0xCE, 0x05, 0x41, 0xCC, 0x88, 0xCC, 0x84, 0xCE, + 0x05, 0x41, 0xCC, 0x8A, 0xCC, 0x81, 0xCE, 0x05, + // Bytes 3a00 - 3a3f + 0x41, 0xCC, 0xA3, 0xCC, 0x82, 0xCE, 0x05, 0x41, + 0xCC, 0xA3, 0xCC, 0x86, 0xCE, 0x05, 0x43, 0xCC, + 0xA7, 0xCC, 0x81, 0xCE, 0x05, 0x45, 0xCC, 0x82, + 0xCC, 0x80, 0xCE, 0x05, 0x45, 0xCC, 0x82, 0xCC, + 0x81, 0xCE, 0x05, 0x45, 0xCC, 0x82, 0xCC, 0x83, + 0xCE, 0x05, 0x45, 0xCC, 0x82, 0xCC, 0x89, 0xCE, + 0x05, 0x45, 0xCC, 0x84, 0xCC, 0x80, 0xCE, 0x05, + 0x45, 0xCC, 0x84, 0xCC, 0x81, 0xCE, 0x05, 0x45, + // Bytes 3a40 - 3a7f + 0xCC, 0xA3, 0xCC, 0x82, 0xCE, 0x05, 0x45, 0xCC, + 0xA7, 0xCC, 0x86, 0xCE, 0x05, 0x49, 0xCC, 0x88, + 0xCC, 0x81, 0xCE, 0x05, 0x4C, 0xCC, 0xA3, 0xCC, + 0x84, 0xCE, 0x05, 0x4F, 0xCC, 0x82, 0xCC, 0x80, + 0xCE, 0x05, 0x4F, 0xCC, 0x82, 0xCC, 0x81, 0xCE, + 0x05, 0x4F, 0xCC, 0x82, 0xCC, 0x83, 0xCE, 0x05, + 0x4F, 0xCC, 0x82, 0xCC, 0x89, 0xCE, 0x05, 0x4F, + 0xCC, 0x83, 0xCC, 0x81, 0xCE, 0x05, 0x4F, 0xCC, + // Bytes 3a80 - 3abf + 0x83, 0xCC, 0x84, 0xCE, 0x05, 0x4F, 0xCC, 0x83, + 0xCC, 0x88, 0xCE, 0x05, 0x4F, 0xCC, 0x84, 0xCC, + 0x80, 0xCE, 0x05, 0x4F, 0xCC, 0x84, 0xCC, 0x81, + 0xCE, 0x05, 0x4F, 0xCC, 0x87, 0xCC, 0x84, 0xCE, + 0x05, 0x4F, 0xCC, 0x88, 0xCC, 0x84, 0xCE, 0x05, + 0x4F, 0xCC, 0x9B, 0xCC, 0x80, 0xCE, 0x05, 0x4F, + 0xCC, 0x9B, 0xCC, 0x81, 0xCE, 0x05, 0x4F, 0xCC, + 0x9B, 0xCC, 0x83, 0xCE, 0x05, 0x4F, 0xCC, 0x9B, + // Bytes 3ac0 - 3aff + 0xCC, 0x89, 0xCE, 0x05, 0x4F, 0xCC, 0x9B, 0xCC, + 0xA3, 0xBA, 0x05, 0x4F, 0xCC, 0xA3, 0xCC, 0x82, + 0xCE, 0x05, 0x4F, 0xCC, 0xA8, 0xCC, 0x84, 0xCE, + 0x05, 0x52, 0xCC, 0xA3, 0xCC, 0x84, 0xCE, 0x05, + 0x53, 0xCC, 0x81, 0xCC, 0x87, 0xCE, 0x05, 0x53, + 0xCC, 0x8C, 0xCC, 0x87, 0xCE, 0x05, 0x53, 0xCC, + 0xA3, 0xCC, 0x87, 0xCE, 0x05, 0x55, 0xCC, 0x83, + 0xCC, 0x81, 0xCE, 0x05, 0x55, 0xCC, 0x84, 0xCC, + // Bytes 3b00 - 3b3f + 0x88, 0xCE, 0x05, 0x55, 0xCC, 0x88, 0xCC, 0x80, + 0xCE, 0x05, 0x55, 0xCC, 0x88, 0xCC, 0x81, 0xCE, + 0x05, 0x55, 0xCC, 0x88, 0xCC, 0x84, 0xCE, 0x05, + 0x55, 0xCC, 0x88, 0xCC, 0x8C, 0xCE, 0x05, 0x55, + 0xCC, 0x9B, 0xCC, 0x80, 0xCE, 0x05, 0x55, 0xCC, + 0x9B, 0xCC, 0x81, 0xCE, 0x05, 0x55, 0xCC, 0x9B, + 0xCC, 0x83, 0xCE, 0x05, 0x55, 0xCC, 0x9B, 0xCC, + 0x89, 0xCE, 0x05, 0x55, 0xCC, 0x9B, 0xCC, 0xA3, + // Bytes 3b40 - 3b7f + 0xBA, 0x05, 0x61, 0xCC, 0x82, 0xCC, 0x80, 0xCE, + 0x05, 0x61, 0xCC, 0x82, 0xCC, 0x81, 0xCE, 0x05, + 0x61, 0xCC, 0x82, 0xCC, 0x83, 0xCE, 0x05, 0x61, + 0xCC, 0x82, 0xCC, 0x89, 0xCE, 0x05, 0x61, 0xCC, + 0x86, 0xCC, 0x80, 0xCE, 0x05, 0x61, 0xCC, 0x86, + 0xCC, 0x81, 0xCE, 0x05, 0x61, 0xCC, 0x86, 0xCC, + 0x83, 0xCE, 0x05, 0x61, 0xCC, 0x86, 0xCC, 0x89, + 0xCE, 0x05, 0x61, 0xCC, 0x87, 0xCC, 0x84, 0xCE, + // Bytes 3b80 - 3bbf + 0x05, 0x61, 0xCC, 0x88, 0xCC, 0x84, 0xCE, 0x05, + 0x61, 0xCC, 0x8A, 0xCC, 0x81, 0xCE, 0x05, 0x61, + 0xCC, 0xA3, 0xCC, 0x82, 0xCE, 0x05, 0x61, 0xCC, + 0xA3, 0xCC, 0x86, 0xCE, 0x05, 0x63, 0xCC, 0xA7, + 0xCC, 0x81, 0xCE, 0x05, 0x65, 0xCC, 0x82, 0xCC, + 0x80, 0xCE, 0x05, 0x65, 0xCC, 0x82, 0xCC, 0x81, + 0xCE, 0x05, 0x65, 0xCC, 0x82, 0xCC, 0x83, 0xCE, + 0x05, 0x65, 0xCC, 0x82, 0xCC, 0x89, 0xCE, 0x05, + // Bytes 3bc0 - 3bff + 0x65, 0xCC, 0x84, 0xCC, 0x80, 0xCE, 0x05, 0x65, + 0xCC, 0x84, 0xCC, 0x81, 0xCE, 0x05, 0x65, 0xCC, + 0xA3, 0xCC, 0x82, 0xCE, 0x05, 0x65, 0xCC, 0xA7, + 0xCC, 0x86, 0xCE, 0x05, 0x69, 0xCC, 0x88, 0xCC, + 0x81, 0xCE, 0x05, 0x6C, 0xCC, 0xA3, 0xCC, 0x84, + 0xCE, 0x05, 0x6F, 0xCC, 0x82, 0xCC, 0x80, 0xCE, + 0x05, 0x6F, 0xCC, 0x82, 0xCC, 0x81, 0xCE, 0x05, + 0x6F, 0xCC, 0x82, 0xCC, 0x83, 0xCE, 0x05, 0x6F, + // Bytes 3c00 - 3c3f + 0xCC, 0x82, 0xCC, 0x89, 0xCE, 0x05, 0x6F, 0xCC, + 0x83, 0xCC, 0x81, 0xCE, 0x05, 0x6F, 0xCC, 0x83, + 0xCC, 0x84, 0xCE, 0x05, 0x6F, 0xCC, 0x83, 0xCC, + 0x88, 0xCE, 0x05, 0x6F, 0xCC, 0x84, 0xCC, 0x80, + 0xCE, 0x05, 0x6F, 0xCC, 0x84, 0xCC, 0x81, 0xCE, + 0x05, 0x6F, 0xCC, 0x87, 0xCC, 0x84, 0xCE, 0x05, + 0x6F, 0xCC, 0x88, 0xCC, 0x84, 0xCE, 0x05, 0x6F, + 0xCC, 0x9B, 0xCC, 0x80, 0xCE, 0x05, 0x6F, 0xCC, + // Bytes 3c40 - 3c7f + 0x9B, 0xCC, 0x81, 0xCE, 0x05, 0x6F, 0xCC, 0x9B, + 0xCC, 0x83, 0xCE, 0x05, 0x6F, 0xCC, 0x9B, 0xCC, + 0x89, 0xCE, 0x05, 0x6F, 0xCC, 0x9B, 0xCC, 0xA3, + 0xBA, 0x05, 0x6F, 0xCC, 0xA3, 0xCC, 0x82, 0xCE, + 0x05, 0x6F, 0xCC, 0xA8, 0xCC, 0x84, 0xCE, 0x05, + 0x72, 0xCC, 0xA3, 0xCC, 0x84, 0xCE, 0x05, 0x73, + 0xCC, 0x81, 0xCC, 0x87, 0xCE, 0x05, 0x73, 0xCC, + 0x8C, 0xCC, 0x87, 0xCE, 0x05, 0x73, 0xCC, 0xA3, + // Bytes 3c80 - 3cbf + 0xCC, 0x87, 0xCE, 0x05, 0x75, 0xCC, 0x83, 0xCC, + 0x81, 0xCE, 0x05, 0x75, 0xCC, 0x84, 0xCC, 0x88, + 0xCE, 0x05, 0x75, 0xCC, 0x88, 0xCC, 0x80, 0xCE, + 0x05, 0x75, 0xCC, 0x88, 0xCC, 0x81, 0xCE, 0x05, + 0x75, 0xCC, 0x88, 0xCC, 0x84, 0xCE, 0x05, 0x75, + 0xCC, 0x88, 0xCC, 0x8C, 0xCE, 0x05, 0x75, 0xCC, + 0x9B, 0xCC, 0x80, 0xCE, 0x05, 0x75, 0xCC, 0x9B, + 0xCC, 0x81, 0xCE, 0x05, 0x75, 0xCC, 0x9B, 0xCC, + // Bytes 3cc0 - 3cff + 0x83, 0xCE, 0x05, 0x75, 0xCC, 0x9B, 0xCC, 0x89, + 0xCE, 0x05, 0x75, 0xCC, 0x9B, 0xCC, 0xA3, 0xBA, + 0x05, 0xE1, 0xBE, 0xBF, 0xCC, 0x80, 0xCE, 0x05, + 0xE1, 0xBE, 0xBF, 0xCC, 0x81, 0xCE, 0x05, 0xE1, + 0xBE, 0xBF, 0xCD, 0x82, 0xCE, 0x05, 0xE1, 0xBF, + 0xBE, 0xCC, 0x80, 0xCE, 0x05, 0xE1, 0xBF, 0xBE, + 0xCC, 0x81, 0xCE, 0x05, 0xE1, 0xBF, 0xBE, 0xCD, + 0x82, 0xCE, 0x05, 0xE2, 0x86, 0x90, 0xCC, 0xB8, + // Bytes 3d00 - 3d3f + 0x05, 0x05, 0xE2, 0x86, 0x92, 0xCC, 0xB8, 0x05, + 0x05, 0xE2, 0x86, 0x94, 0xCC, 0xB8, 0x05, 0x05, + 0xE2, 0x87, 0x90, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + 0x87, 0x92, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x87, + 0x94, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x88, 0x83, + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x88, 0x88, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x88, 0x8B, 0xCC, 0xB8, + 0x05, 0x05, 0xE2, 0x88, 0xA3, 0xCC, 0xB8, 0x05, + // Bytes 3d40 - 3d7f + 0x05, 0xE2, 0x88, 0xA5, 0xCC, 0xB8, 0x05, 0x05, + 0xE2, 0x88, 0xBC, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + 0x89, 0x83, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, + 0x85, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0x88, + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0x8D, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x89, 0xA1, 0xCC, 0xB8, + 0x05, 0x05, 0xE2, 0x89, 0xA4, 0xCC, 0xB8, 0x05, + 0x05, 0xE2, 0x89, 0xA5, 0xCC, 0xB8, 0x05, 0x05, + // Bytes 3d80 - 3dbf + 0xE2, 0x89, 0xB2, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + 0x89, 0xB3, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, + 0xB6, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0xB7, + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0xBA, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x89, 0xBB, 0xCC, 0xB8, + 0x05, 0x05, 0xE2, 0x89, 0xBC, 0xCC, 0xB8, 0x05, + 0x05, 0xE2, 0x89, 0xBD, 0xCC, 0xB8, 0x05, 0x05, + 0xE2, 0x8A, 0x82, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + // Bytes 3dc0 - 3dff + 0x8A, 0x83, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, + 0x86, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0x87, + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0x91, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0x92, 0xCC, 0xB8, + 0x05, 0x05, 0xE2, 0x8A, 0xA2, 0xCC, 0xB8, 0x05, + 0x05, 0xE2, 0x8A, 0xA8, 0xCC, 0xB8, 0x05, 0x05, + 0xE2, 0x8A, 0xA9, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + 0x8A, 0xAB, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, + // Bytes 3e00 - 3e3f + 0xB2, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0xB3, + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0xB4, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0xB5, 0xCC, 0xB8, + 0x05, 0x06, 0xCE, 0x91, 0xCC, 0x93, 0xCD, 0x85, + 0xDE, 0x06, 0xCE, 0x91, 0xCC, 0x94, 0xCD, 0x85, + 0xDE, 0x06, 0xCE, 0x95, 0xCC, 0x93, 0xCC, 0x80, + 0xCE, 0x06, 0xCE, 0x95, 0xCC, 0x93, 0xCC, 0x81, + 0xCE, 0x06, 0xCE, 0x95, 0xCC, 0x94, 0xCC, 0x80, + // Bytes 3e40 - 3e7f + 0xCE, 0x06, 0xCE, 0x95, 0xCC, 0x94, 0xCC, 0x81, + 0xCE, 0x06, 0xCE, 0x97, 0xCC, 0x93, 0xCD, 0x85, + 0xDE, 0x06, 0xCE, 0x97, 0xCC, 0x94, 0xCD, 0x85, + 0xDE, 0x06, 0xCE, 0x99, 0xCC, 0x93, 0xCC, 0x80, + 0xCE, 0x06, 0xCE, 0x99, 0xCC, 0x93, 0xCC, 0x81, + 0xCE, 0x06, 0xCE, 0x99, 0xCC, 0x93, 0xCD, 0x82, + 0xCE, 0x06, 0xCE, 0x99, 0xCC, 0x94, 0xCC, 0x80, + 0xCE, 0x06, 0xCE, 0x99, 0xCC, 0x94, 0xCC, 0x81, + // Bytes 3e80 - 3ebf + 0xCE, 0x06, 0xCE, 0x99, 0xCC, 0x94, 0xCD, 0x82, + 0xCE, 0x06, 0xCE, 0x9F, 0xCC, 0x93, 0xCC, 0x80, + 0xCE, 0x06, 0xCE, 0x9F, 0xCC, 0x93, 0xCC, 0x81, + 0xCE, 0x06, 0xCE, 0x9F, 0xCC, 0x94, 0xCC, 0x80, + 0xCE, 0x06, 0xCE, 0x9F, 0xCC, 0x94, 0xCC, 0x81, + 0xCE, 0x06, 0xCE, 0xA5, 0xCC, 0x94, 0xCC, 0x80, + 0xCE, 0x06, 0xCE, 0xA5, 0xCC, 0x94, 0xCC, 0x81, + 0xCE, 0x06, 0xCE, 0xA5, 0xCC, 0x94, 0xCD, 0x82, + // Bytes 3ec0 - 3eff + 0xCE, 0x06, 0xCE, 0xA9, 0xCC, 0x93, 0xCD, 0x85, + 0xDE, 0x06, 0xCE, 0xA9, 0xCC, 0x94, 0xCD, 0x85, + 0xDE, 0x06, 0xCE, 0xB1, 0xCC, 0x80, 0xCD, 0x85, + 0xDE, 0x06, 0xCE, 0xB1, 0xCC, 0x81, 0xCD, 0x85, + 0xDE, 0x06, 0xCE, 0xB1, 0xCC, 0x93, 0xCD, 0x85, + 0xDE, 0x06, 0xCE, 0xB1, 0xCC, 0x94, 0xCD, 0x85, + 0xDE, 0x06, 0xCE, 0xB1, 0xCD, 0x82, 0xCD, 0x85, + 0xDE, 0x06, 0xCE, 0xB5, 0xCC, 0x93, 0xCC, 0x80, + // Bytes 3f00 - 3f3f + 0xCE, 0x06, 0xCE, 0xB5, 0xCC, 0x93, 0xCC, 0x81, + 0xCE, 0x06, 0xCE, 0xB5, 0xCC, 0x94, 0xCC, 0x80, + 0xCE, 0x06, 0xCE, 0xB5, 0xCC, 0x94, 0xCC, 0x81, + 0xCE, 0x06, 0xCE, 0xB7, 0xCC, 0x80, 0xCD, 0x85, + 0xDE, 0x06, 0xCE, 0xB7, 0xCC, 0x81, 0xCD, 0x85, + 0xDE, 0x06, 0xCE, 0xB7, 0xCC, 0x93, 0xCD, 0x85, + 0xDE, 0x06, 0xCE, 0xB7, 0xCC, 0x94, 0xCD, 0x85, + 0xDE, 0x06, 0xCE, 0xB7, 0xCD, 0x82, 0xCD, 0x85, + // Bytes 3f40 - 3f7f + 0xDE, 0x06, 0xCE, 0xB9, 0xCC, 0x88, 0xCC, 0x80, + 0xCE, 0x06, 0xCE, 0xB9, 0xCC, 0x88, 0xCC, 0x81, + 0xCE, 0x06, 0xCE, 0xB9, 0xCC, 0x88, 0xCD, 0x82, + 0xCE, 0x06, 0xCE, 0xB9, 0xCC, 0x93, 0xCC, 0x80, + 0xCE, 0x06, 0xCE, 0xB9, 0xCC, 0x93, 0xCC, 0x81, + 0xCE, 0x06, 0xCE, 0xB9, 0xCC, 0x93, 0xCD, 0x82, + 0xCE, 0x06, 0xCE, 0xB9, 0xCC, 0x94, 0xCC, 0x80, + 0xCE, 0x06, 0xCE, 0xB9, 0xCC, 0x94, 0xCC, 0x81, + // Bytes 3f80 - 3fbf + 0xCE, 0x06, 0xCE, 0xB9, 0xCC, 0x94, 0xCD, 0x82, + 0xCE, 0x06, 0xCE, 0xBF, 0xCC, 0x93, 0xCC, 0x80, + 0xCE, 0x06, 0xCE, 0xBF, 0xCC, 0x93, 0xCC, 0x81, + 0xCE, 0x06, 0xCE, 0xBF, 0xCC, 0x94, 0xCC, 0x80, + 0xCE, 0x06, 0xCE, 0xBF, 0xCC, 0x94, 0xCC, 0x81, + 0xCE, 0x06, 0xCF, 0x85, 0xCC, 0x88, 0xCC, 0x80, + 0xCE, 0x06, 0xCF, 0x85, 0xCC, 0x88, 0xCC, 0x81, + 0xCE, 0x06, 0xCF, 0x85, 0xCC, 0x88, 0xCD, 0x82, + // Bytes 3fc0 - 3fff + 0xCE, 0x06, 0xCF, 0x85, 0xCC, 0x93, 0xCC, 0x80, + 0xCE, 0x06, 0xCF, 0x85, 0xCC, 0x93, 0xCC, 0x81, + 0xCE, 0x06, 0xCF, 0x85, 0xCC, 0x93, 0xCD, 0x82, + 0xCE, 0x06, 0xCF, 0x85, 0xCC, 0x94, 0xCC, 0x80, + 0xCE, 0x06, 0xCF, 0x85, 0xCC, 0x94, 0xCC, 0x81, + 0xCE, 0x06, 0xCF, 0x85, 0xCC, 0x94, 0xCD, 0x82, + 0xCE, 0x06, 0xCF, 0x89, 0xCC, 0x80, 0xCD, 0x85, + 0xDE, 0x06, 0xCF, 0x89, 0xCC, 0x81, 0xCD, 0x85, + // Bytes 4000 - 403f + 0xDE, 0x06, 0xCF, 0x89, 0xCC, 0x93, 0xCD, 0x85, + 0xDE, 0x06, 0xCF, 0x89, 0xCC, 0x94, 0xCD, 0x85, + 0xDE, 0x06, 0xCF, 0x89, 0xCD, 0x82, 0xCD, 0x85, + 0xDE, 0x06, 0xE0, 0xA4, 0xA8, 0xE0, 0xA4, 0xBC, + 0x0D, 0x06, 0xE0, 0xA4, 0xB0, 0xE0, 0xA4, 0xBC, + 0x0D, 0x06, 0xE0, 0xA4, 0xB3, 0xE0, 0xA4, 0xBC, + 0x0D, 0x06, 0xE0, 0xB1, 0x86, 0xE0, 0xB1, 0x96, + 0x89, 0x06, 0xE0, 0xB7, 0x99, 0xE0, 0xB7, 0x8A, + // Bytes 4040 - 407f + 0x15, 0x06, 0xE3, 0x81, 0x86, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x81, 0x8B, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x81, 0x8D, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x81, 0x8F, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x81, 0x91, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x81, 0x93, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x81, 0x95, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x81, 0x97, 0xE3, 0x82, 0x99, + // Bytes 4080 - 40bf + 0x11, 0x06, 0xE3, 0x81, 0x99, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x81, 0x9B, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x81, 0x9D, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x81, 0x9F, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x81, 0xA1, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x81, 0xA4, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x81, 0xA6, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x81, 0xA8, 0xE3, 0x82, 0x99, + // Bytes 40c0 - 40ff + 0x11, 0x06, 0xE3, 0x81, 0xAF, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x81, 0xAF, 0xE3, 0x82, 0x9A, + 0x11, 0x06, 0xE3, 0x81, 0xB2, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x81, 0xB2, 0xE3, 0x82, 0x9A, + 0x11, 0x06, 0xE3, 0x81, 0xB5, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x81, 0xB5, 0xE3, 0x82, 0x9A, + 0x11, 0x06, 0xE3, 0x81, 0xB8, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x81, 0xB8, 0xE3, 0x82, 0x9A, + // Bytes 4100 - 413f + 0x11, 0x06, 0xE3, 0x81, 0xBB, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x81, 0xBB, 0xE3, 0x82, 0x9A, + 0x11, 0x06, 0xE3, 0x82, 0x9D, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x82, 0xA6, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x82, 0xAB, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x82, 0xAD, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x82, 0xAF, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x82, 0xB1, 0xE3, 0x82, 0x99, + // Bytes 4140 - 417f + 0x11, 0x06, 0xE3, 0x82, 0xB3, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x82, 0xB5, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x82, 0xB7, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x82, 0xB9, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x82, 0xBB, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x82, 0xBD, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x82, 0xBF, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x83, 0x81, 0xE3, 0x82, 0x99, + // Bytes 4180 - 41bf + 0x11, 0x06, 0xE3, 0x83, 0x84, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x83, 0x86, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x83, 0x88, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x83, 0x8F, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x83, 0x8F, 0xE3, 0x82, 0x9A, + 0x11, 0x06, 0xE3, 0x83, 0x92, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x83, 0x92, 0xE3, 0x82, 0x9A, + 0x11, 0x06, 0xE3, 0x83, 0x95, 0xE3, 0x82, 0x99, + // Bytes 41c0 - 41ff + 0x11, 0x06, 0xE3, 0x83, 0x95, 0xE3, 0x82, 0x9A, + 0x11, 0x06, 0xE3, 0x83, 0x98, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x83, 0x98, 0xE3, 0x82, 0x9A, + 0x11, 0x06, 0xE3, 0x83, 0x9B, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x83, 0x9B, 0xE3, 0x82, 0x9A, + 0x11, 0x06, 0xE3, 0x83, 0xAF, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x83, 0xB0, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x83, 0xB1, 0xE3, 0x82, 0x99, + // Bytes 4200 - 423f + 0x11, 0x06, 0xE3, 0x83, 0xB2, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x83, 0xBD, 0xE3, 0x82, 0x99, + 0x11, 0x08, 0xCE, 0x91, 0xCC, 0x93, 0xCC, 0x80, + 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0x91, 0xCC, 0x93, + 0xCC, 0x81, 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0x91, + 0xCC, 0x93, 0xCD, 0x82, 0xCD, 0x85, 0xDF, 0x08, + 0xCE, 0x91, 0xCC, 0x94, 0xCC, 0x80, 0xCD, 0x85, + 0xDF, 0x08, 0xCE, 0x91, 0xCC, 0x94, 0xCC, 0x81, + // Bytes 4240 - 427f + 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0x91, 0xCC, 0x94, + 0xCD, 0x82, 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0x97, + 0xCC, 0x93, 0xCC, 0x80, 0xCD, 0x85, 0xDF, 0x08, + 0xCE, 0x97, 0xCC, 0x93, 0xCC, 0x81, 0xCD, 0x85, + 0xDF, 0x08, 0xCE, 0x97, 0xCC, 0x93, 0xCD, 0x82, + 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0x97, 0xCC, 0x94, + 0xCC, 0x80, 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0x97, + 0xCC, 0x94, 0xCC, 0x81, 0xCD, 0x85, 0xDF, 0x08, + // Bytes 4280 - 42bf + 0xCE, 0x97, 0xCC, 0x94, 0xCD, 0x82, 0xCD, 0x85, + 0xDF, 0x08, 0xCE, 0xA9, 0xCC, 0x93, 0xCC, 0x80, + 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0xA9, 0xCC, 0x93, + 0xCC, 0x81, 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0xA9, + 0xCC, 0x93, 0xCD, 0x82, 0xCD, 0x85, 0xDF, 0x08, + 0xCE, 0xA9, 0xCC, 0x94, 0xCC, 0x80, 0xCD, 0x85, + 0xDF, 0x08, 0xCE, 0xA9, 0xCC, 0x94, 0xCC, 0x81, + 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0xA9, 0xCC, 0x94, + // Bytes 42c0 - 42ff + 0xCD, 0x82, 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0xB1, + 0xCC, 0x93, 0xCC, 0x80, 0xCD, 0x85, 0xDF, 0x08, + 0xCE, 0xB1, 0xCC, 0x93, 0xCC, 0x81, 0xCD, 0x85, + 0xDF, 0x08, 0xCE, 0xB1, 0xCC, 0x93, 0xCD, 0x82, + 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0xB1, 0xCC, 0x94, + 0xCC, 0x80, 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0xB1, + 0xCC, 0x94, 0xCC, 0x81, 0xCD, 0x85, 0xDF, 0x08, + 0xCE, 0xB1, 0xCC, 0x94, 0xCD, 0x82, 0xCD, 0x85, + // Bytes 4300 - 433f + 0xDF, 0x08, 0xCE, 0xB7, 0xCC, 0x93, 0xCC, 0x80, + 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0xB7, 0xCC, 0x93, + 0xCC, 0x81, 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0xB7, + 0xCC, 0x93, 0xCD, 0x82, 0xCD, 0x85, 0xDF, 0x08, + 0xCE, 0xB7, 0xCC, 0x94, 0xCC, 0x80, 0xCD, 0x85, + 0xDF, 0x08, 0xCE, 0xB7, 0xCC, 0x94, 0xCC, 0x81, + 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0xB7, 0xCC, 0x94, + 0xCD, 0x82, 0xCD, 0x85, 0xDF, 0x08, 0xCF, 0x89, + // Bytes 4340 - 437f + 0xCC, 0x93, 0xCC, 0x80, 0xCD, 0x85, 0xDF, 0x08, + 0xCF, 0x89, 0xCC, 0x93, 0xCC, 0x81, 0xCD, 0x85, + 0xDF, 0x08, 0xCF, 0x89, 0xCC, 0x93, 0xCD, 0x82, + 0xCD, 0x85, 0xDF, 0x08, 0xCF, 0x89, 0xCC, 0x94, + 0xCC, 0x80, 0xCD, 0x85, 0xDF, 0x08, 0xCF, 0x89, + 0xCC, 0x94, 0xCC, 0x81, 0xCD, 0x85, 0xDF, 0x08, + 0xCF, 0x89, 0xCC, 0x94, 0xCD, 0x82, 0xCD, 0x85, + 0xDF, 0x08, 0xF0, 0x91, 0x82, 0x99, 0xF0, 0x91, + // Bytes 4380 - 43bf + 0x82, 0xBA, 0x0D, 0x08, 0xF0, 0x91, 0x82, 0x9B, + 0xF0, 0x91, 0x82, 0xBA, 0x0D, 0x08, 0xF0, 0x91, + 0x82, 0xA5, 0xF0, 0x91, 0x82, 0xBA, 0x0D, 0x42, + 0xC2, 0xB4, 0x01, 0x43, 0x20, 0xCC, 0x81, 0xCD, + 0x43, 0x20, 0xCC, 0x83, 0xCD, 0x43, 0x20, 0xCC, + 0x84, 0xCD, 0x43, 0x20, 0xCC, 0x85, 0xCD, 0x43, + 0x20, 0xCC, 0x86, 0xCD, 0x43, 0x20, 0xCC, 0x87, + 0xCD, 0x43, 0x20, 0xCC, 0x88, 0xCD, 0x43, 0x20, + // Bytes 43c0 - 43ff + 0xCC, 0x8A, 0xCD, 0x43, 0x20, 0xCC, 0x8B, 0xCD, + 0x43, 0x20, 0xCC, 0x93, 0xCD, 0x43, 0x20, 0xCC, + 0x94, 0xCD, 0x43, 0x20, 0xCC, 0xA7, 0xA9, 0x43, + 0x20, 0xCC, 0xA8, 0xA9, 0x43, 0x20, 0xCC, 0xB3, + 0xB9, 0x43, 0x20, 0xCD, 0x82, 0xCD, 0x43, 0x20, + 0xCD, 0x85, 0xDD, 0x43, 0x20, 0xD9, 0x8B, 0x5D, + 0x43, 0x20, 0xD9, 0x8C, 0x61, 0x43, 0x20, 0xD9, + 0x8D, 0x65, 0x43, 0x20, 0xD9, 0x8E, 0x69, 0x43, + // Bytes 4400 - 443f + 0x20, 0xD9, 0x8F, 0x6D, 0x43, 0x20, 0xD9, 0x90, + 0x71, 0x43, 0x20, 0xD9, 0x91, 0x75, 0x43, 0x20, + 0xD9, 0x92, 0x79, 0x43, 0x41, 0xCC, 0x8A, 0xCD, + 0x43, 0x73, 0xCC, 0x87, 0xCD, 0x44, 0x20, 0xE3, + 0x82, 0x99, 0x11, 0x44, 0x20, 0xE3, 0x82, 0x9A, + 0x11, 0x44, 0xC2, 0xA8, 0xCC, 0x81, 0xCE, 0x44, + 0xCE, 0x91, 0xCC, 0x81, 0xCD, 0x44, 0xCE, 0x95, + 0xCC, 0x81, 0xCD, 0x44, 0xCE, 0x97, 0xCC, 0x81, + // Bytes 4440 - 447f + 0xCD, 0x44, 0xCE, 0x99, 0xCC, 0x81, 0xCD, 0x44, + 0xCE, 0x9F, 0xCC, 0x81, 0xCD, 0x44, 0xCE, 0xA5, + 0xCC, 0x81, 0xCD, 0x44, 0xCE, 0xA5, 0xCC, 0x88, + 0xCD, 0x44, 0xCE, 0xA9, 0xCC, 0x81, 0xCD, 0x44, + 0xCE, 0xB1, 0xCC, 0x81, 0xCD, 0x44, 0xCE, 0xB5, + 0xCC, 0x81, 0xCD, 0x44, 0xCE, 0xB7, 0xCC, 0x81, + 0xCD, 0x44, 0xCE, 0xB9, 0xCC, 0x81, 0xCD, 0x44, + 0xCE, 0xBF, 0xCC, 0x81, 0xCD, 0x44, 0xCF, 0x85, + // Bytes 4480 - 44bf + 0xCC, 0x81, 0xCD, 0x44, 0xCF, 0x89, 0xCC, 0x81, + 0xCD, 0x44, 0xD7, 0x90, 0xD6, 0xB7, 0x35, 0x44, + 0xD7, 0x90, 0xD6, 0xB8, 0x39, 0x44, 0xD7, 0x90, + 0xD6, 0xBC, 0x45, 0x44, 0xD7, 0x91, 0xD6, 0xBC, + 0x45, 0x44, 0xD7, 0x91, 0xD6, 0xBF, 0x4D, 0x44, + 0xD7, 0x92, 0xD6, 0xBC, 0x45, 0x44, 0xD7, 0x93, + 0xD6, 0xBC, 0x45, 0x44, 0xD7, 0x94, 0xD6, 0xBC, + 0x45, 0x44, 0xD7, 0x95, 0xD6, 0xB9, 0x3D, 0x44, + // Bytes 44c0 - 44ff + 0xD7, 0x95, 0xD6, 0xBC, 0x45, 0x44, 0xD7, 0x96, + 0xD6, 0xBC, 0x45, 0x44, 0xD7, 0x98, 0xD6, 0xBC, + 0x45, 0x44, 0xD7, 0x99, 0xD6, 0xB4, 0x29, 0x44, + 0xD7, 0x99, 0xD6, 0xBC, 0x45, 0x44, 0xD7, 0x9A, + 0xD6, 0xBC, 0x45, 0x44, 0xD7, 0x9B, 0xD6, 0xBC, + 0x45, 0x44, 0xD7, 0x9B, 0xD6, 0xBF, 0x4D, 0x44, + 0xD7, 0x9C, 0xD6, 0xBC, 0x45, 0x44, 0xD7, 0x9E, + 0xD6, 0xBC, 0x45, 0x44, 0xD7, 0xA0, 0xD6, 0xBC, + // Bytes 4500 - 453f + 0x45, 0x44, 0xD7, 0xA1, 0xD6, 0xBC, 0x45, 0x44, + 0xD7, 0xA3, 0xD6, 0xBC, 0x45, 0x44, 0xD7, 0xA4, + 0xD6, 0xBC, 0x45, 0x44, 0xD7, 0xA4, 0xD6, 0xBF, + 0x4D, 0x44, 0xD7, 0xA6, 0xD6, 0xBC, 0x45, 0x44, + 0xD7, 0xA7, 0xD6, 0xBC, 0x45, 0x44, 0xD7, 0xA8, + 0xD6, 0xBC, 0x45, 0x44, 0xD7, 0xA9, 0xD6, 0xBC, + 0x45, 0x44, 0xD7, 0xA9, 0xD7, 0x81, 0x51, 0x44, + 0xD7, 0xA9, 0xD7, 0x82, 0x55, 0x44, 0xD7, 0xAA, + // Bytes 4540 - 457f + 0xD6, 0xBC, 0x45, 0x44, 0xD7, 0xB2, 0xD6, 0xB7, + 0x35, 0x44, 0xD8, 0xA7, 0xD9, 0x8B, 0x5D, 0x44, + 0xD8, 0xA7, 0xD9, 0x93, 0xCD, 0x44, 0xD8, 0xA7, + 0xD9, 0x94, 0xCD, 0x44, 0xD8, 0xA7, 0xD9, 0x95, + 0xB9, 0x44, 0xD8, 0xB0, 0xD9, 0xB0, 0x7D, 0x44, + 0xD8, 0xB1, 0xD9, 0xB0, 0x7D, 0x44, 0xD9, 0x80, + 0xD9, 0x8B, 0x5D, 0x44, 0xD9, 0x80, 0xD9, 0x8E, + 0x69, 0x44, 0xD9, 0x80, 0xD9, 0x8F, 0x6D, 0x44, + // Bytes 4580 - 45bf + 0xD9, 0x80, 0xD9, 0x90, 0x71, 0x44, 0xD9, 0x80, + 0xD9, 0x91, 0x75, 0x44, 0xD9, 0x80, 0xD9, 0x92, + 0x79, 0x44, 0xD9, 0x87, 0xD9, 0xB0, 0x7D, 0x44, + 0xD9, 0x88, 0xD9, 0x94, 0xCD, 0x44, 0xD9, 0x89, + 0xD9, 0xB0, 0x7D, 0x44, 0xD9, 0x8A, 0xD9, 0x94, + 0xCD, 0x44, 0xDB, 0x92, 0xD9, 0x94, 0xCD, 0x44, + 0xDB, 0x95, 0xD9, 0x94, 0xCD, 0x45, 0x20, 0xCC, + 0x88, 0xCC, 0x80, 0xCE, 0x45, 0x20, 0xCC, 0x88, + // Bytes 45c0 - 45ff + 0xCC, 0x81, 0xCE, 0x45, 0x20, 0xCC, 0x88, 0xCD, + 0x82, 0xCE, 0x45, 0x20, 0xCC, 0x93, 0xCC, 0x80, + 0xCE, 0x45, 0x20, 0xCC, 0x93, 0xCC, 0x81, 0xCE, + 0x45, 0x20, 0xCC, 0x93, 0xCD, 0x82, 0xCE, 0x45, + 0x20, 0xCC, 0x94, 0xCC, 0x80, 0xCE, 0x45, 0x20, + 0xCC, 0x94, 0xCC, 0x81, 0xCE, 0x45, 0x20, 0xCC, + 0x94, 0xCD, 0x82, 0xCE, 0x45, 0x20, 0xD9, 0x8C, + 0xD9, 0x91, 0x76, 0x45, 0x20, 0xD9, 0x8D, 0xD9, + // Bytes 4600 - 463f + 0x91, 0x76, 0x45, 0x20, 0xD9, 0x8E, 0xD9, 0x91, + 0x76, 0x45, 0x20, 0xD9, 0x8F, 0xD9, 0x91, 0x76, + 0x45, 0x20, 0xD9, 0x90, 0xD9, 0x91, 0x76, 0x45, + 0x20, 0xD9, 0x91, 0xD9, 0xB0, 0x7E, 0x45, 0xE2, + 0xAB, 0x9D, 0xCC, 0xB8, 0x05, 0x46, 0xCE, 0xB9, + 0xCC, 0x88, 0xCC, 0x81, 0xCE, 0x46, 0xCF, 0x85, + 0xCC, 0x88, 0xCC, 0x81, 0xCE, 0x46, 0xD7, 0xA9, + 0xD6, 0xBC, 0xD7, 0x81, 0x52, 0x46, 0xD7, 0xA9, + // Bytes 4640 - 467f + 0xD6, 0xBC, 0xD7, 0x82, 0x56, 0x46, 0xD9, 0x80, + 0xD9, 0x8E, 0xD9, 0x91, 0x76, 0x46, 0xD9, 0x80, + 0xD9, 0x8F, 0xD9, 0x91, 0x76, 0x46, 0xD9, 0x80, + 0xD9, 0x90, 0xD9, 0x91, 0x76, 0x46, 0xE0, 0xA4, + 0x95, 0xE0, 0xA4, 0xBC, 0x0D, 0x46, 0xE0, 0xA4, + 0x96, 0xE0, 0xA4, 0xBC, 0x0D, 0x46, 0xE0, 0xA4, + 0x97, 0xE0, 0xA4, 0xBC, 0x0D, 0x46, 0xE0, 0xA4, + 0x9C, 0xE0, 0xA4, 0xBC, 0x0D, 0x46, 0xE0, 0xA4, + // Bytes 4680 - 46bf + 0xA1, 0xE0, 0xA4, 0xBC, 0x0D, 0x46, 0xE0, 0xA4, + 0xA2, 0xE0, 0xA4, 0xBC, 0x0D, 0x46, 0xE0, 0xA4, + 0xAB, 0xE0, 0xA4, 0xBC, 0x0D, 0x46, 0xE0, 0xA4, + 0xAF, 0xE0, 0xA4, 0xBC, 0x0D, 0x46, 0xE0, 0xA6, + 0xA1, 0xE0, 0xA6, 0xBC, 0x0D, 0x46, 0xE0, 0xA6, + 0xA2, 0xE0, 0xA6, 0xBC, 0x0D, 0x46, 0xE0, 0xA6, + 0xAF, 0xE0, 0xA6, 0xBC, 0x0D, 0x46, 0xE0, 0xA8, + 0x96, 0xE0, 0xA8, 0xBC, 0x0D, 0x46, 0xE0, 0xA8, + // Bytes 46c0 - 46ff + 0x97, 0xE0, 0xA8, 0xBC, 0x0D, 0x46, 0xE0, 0xA8, + 0x9C, 0xE0, 0xA8, 0xBC, 0x0D, 0x46, 0xE0, 0xA8, + 0xAB, 0xE0, 0xA8, 0xBC, 0x0D, 0x46, 0xE0, 0xA8, + 0xB2, 0xE0, 0xA8, 0xBC, 0x0D, 0x46, 0xE0, 0xA8, + 0xB8, 0xE0, 0xA8, 0xBC, 0x0D, 0x46, 0xE0, 0xAC, + 0xA1, 0xE0, 0xAC, 0xBC, 0x0D, 0x46, 0xE0, 0xAC, + 0xA2, 0xE0, 0xAC, 0xBC, 0x0D, 0x46, 0xE0, 0xBE, + 0xB2, 0xE0, 0xBE, 0x80, 0xA1, 0x46, 0xE0, 0xBE, + // Bytes 4700 - 473f + 0xB3, 0xE0, 0xBE, 0x80, 0xA1, 0x46, 0xE3, 0x83, + 0x86, 0xE3, 0x82, 0x99, 0x11, 0x48, 0xF0, 0x9D, + 0x85, 0x97, 0xF0, 0x9D, 0x85, 0xA5, 0xB1, 0x48, + 0xF0, 0x9D, 0x85, 0x98, 0xF0, 0x9D, 0x85, 0xA5, + 0xB1, 0x48, 0xF0, 0x9D, 0x86, 0xB9, 0xF0, 0x9D, + 0x85, 0xA5, 0xB1, 0x48, 0xF0, 0x9D, 0x86, 0xBA, + 0xF0, 0x9D, 0x85, 0xA5, 0xB1, 0x49, 0xE0, 0xBE, + 0xB2, 0xE0, 0xBD, 0xB1, 0xE0, 0xBE, 0x80, 0xA2, + // Bytes 4740 - 477f + 0x49, 0xE0, 0xBE, 0xB3, 0xE0, 0xBD, 0xB1, 0xE0, + 0xBE, 0x80, 0xA2, 0x4C, 0xF0, 0x9D, 0x85, 0x98, + 0xF0, 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xAE, + 0xB2, 0x4C, 0xF0, 0x9D, 0x85, 0x98, 0xF0, 0x9D, + 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xAF, 0xB2, 0x4C, + 0xF0, 0x9D, 0x85, 0x98, 0xF0, 0x9D, 0x85, 0xA5, + 0xF0, 0x9D, 0x85, 0xB0, 0xB2, 0x4C, 0xF0, 0x9D, + 0x85, 0x98, 0xF0, 0x9D, 0x85, 0xA5, 0xF0, 0x9D, + // Bytes 4780 - 47bf + 0x85, 0xB1, 0xB2, 0x4C, 0xF0, 0x9D, 0x85, 0x98, + 0xF0, 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xB2, + 0xB2, 0x4C, 0xF0, 0x9D, 0x86, 0xB9, 0xF0, 0x9D, + 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xAE, 0xB2, 0x4C, + 0xF0, 0x9D, 0x86, 0xB9, 0xF0, 0x9D, 0x85, 0xA5, + 0xF0, 0x9D, 0x85, 0xAF, 0xB2, 0x4C, 0xF0, 0x9D, + 0x86, 0xBA, 0xF0, 0x9D, 0x85, 0xA5, 0xF0, 0x9D, + 0x85, 0xAE, 0xB2, 0x4C, 0xF0, 0x9D, 0x86, 0xBA, + // Bytes 47c0 - 47ff + 0xF0, 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xAF, + 0xB2, 0x83, 0x41, 0xCC, 0x82, 0xCD, 0x83, 0x41, + 0xCC, 0x86, 0xCD, 0x83, 0x41, 0xCC, 0x87, 0xCD, + 0x83, 0x41, 0xCC, 0x88, 0xCD, 0x83, 0x41, 0xCC, + 0x8A, 0xCD, 0x83, 0x41, 0xCC, 0xA3, 0xB9, 0x83, + 0x43, 0xCC, 0xA7, 0xA9, 0x83, 0x45, 0xCC, 0x82, + 0xCD, 0x83, 0x45, 0xCC, 0x84, 0xCD, 0x83, 0x45, + 0xCC, 0xA3, 0xB9, 0x83, 0x45, 0xCC, 0xA7, 0xA9, + // Bytes 4800 - 483f + 0x83, 0x49, 0xCC, 0x88, 0xCD, 0x83, 0x4C, 0xCC, + 0xA3, 0xB9, 0x83, 0x4F, 0xCC, 0x82, 0xCD, 0x83, + 0x4F, 0xCC, 0x83, 0xCD, 0x83, 0x4F, 0xCC, 0x84, + 0xCD, 0x83, 0x4F, 0xCC, 0x87, 0xCD, 0x83, 0x4F, + 0xCC, 0x88, 0xCD, 0x83, 0x4F, 0xCC, 0x9B, 0xB1, + 0x83, 0x4F, 0xCC, 0xA3, 0xB9, 0x83, 0x4F, 0xCC, + 0xA8, 0xA9, 0x83, 0x52, 0xCC, 0xA3, 0xB9, 0x83, + 0x53, 0xCC, 0x81, 0xCD, 0x83, 0x53, 0xCC, 0x8C, + // Bytes 4840 - 487f + 0xCD, 0x83, 0x53, 0xCC, 0xA3, 0xB9, 0x83, 0x55, + 0xCC, 0x83, 0xCD, 0x83, 0x55, 0xCC, 0x84, 0xCD, + 0x83, 0x55, 0xCC, 0x88, 0xCD, 0x83, 0x55, 0xCC, + 0x9B, 0xB1, 0x83, 0x61, 0xCC, 0x82, 0xCD, 0x83, + 0x61, 0xCC, 0x86, 0xCD, 0x83, 0x61, 0xCC, 0x87, + 0xCD, 0x83, 0x61, 0xCC, 0x88, 0xCD, 0x83, 0x61, + 0xCC, 0x8A, 0xCD, 0x83, 0x61, 0xCC, 0xA3, 0xB9, + 0x83, 0x63, 0xCC, 0xA7, 0xA9, 0x83, 0x65, 0xCC, + // Bytes 4880 - 48bf + 0x82, 0xCD, 0x83, 0x65, 0xCC, 0x84, 0xCD, 0x83, + 0x65, 0xCC, 0xA3, 0xB9, 0x83, 0x65, 0xCC, 0xA7, + 0xA9, 0x83, 0x69, 0xCC, 0x88, 0xCD, 0x83, 0x6C, + 0xCC, 0xA3, 0xB9, 0x83, 0x6F, 0xCC, 0x82, 0xCD, + 0x83, 0x6F, 0xCC, 0x83, 0xCD, 0x83, 0x6F, 0xCC, + 0x84, 0xCD, 0x83, 0x6F, 0xCC, 0x87, 0xCD, 0x83, + 0x6F, 0xCC, 0x88, 0xCD, 0x83, 0x6F, 0xCC, 0x9B, + 0xB1, 0x83, 0x6F, 0xCC, 0xA3, 0xB9, 0x83, 0x6F, + // Bytes 48c0 - 48ff + 0xCC, 0xA8, 0xA9, 0x83, 0x72, 0xCC, 0xA3, 0xB9, + 0x83, 0x73, 0xCC, 0x81, 0xCD, 0x83, 0x73, 0xCC, + 0x8C, 0xCD, 0x83, 0x73, 0xCC, 0xA3, 0xB9, 0x83, + 0x75, 0xCC, 0x83, 0xCD, 0x83, 0x75, 0xCC, 0x84, + 0xCD, 0x83, 0x75, 0xCC, 0x88, 0xCD, 0x83, 0x75, + 0xCC, 0x9B, 0xB1, 0x84, 0xCE, 0x91, 0xCC, 0x93, + 0xCD, 0x84, 0xCE, 0x91, 0xCC, 0x94, 0xCD, 0x84, + 0xCE, 0x95, 0xCC, 0x93, 0xCD, 0x84, 0xCE, 0x95, + // Bytes 4900 - 493f + 0xCC, 0x94, 0xCD, 0x84, 0xCE, 0x97, 0xCC, 0x93, + 0xCD, 0x84, 0xCE, 0x97, 0xCC, 0x94, 0xCD, 0x84, + 0xCE, 0x99, 0xCC, 0x93, 0xCD, 0x84, 0xCE, 0x99, + 0xCC, 0x94, 0xCD, 0x84, 0xCE, 0x9F, 0xCC, 0x93, + 0xCD, 0x84, 0xCE, 0x9F, 0xCC, 0x94, 0xCD, 0x84, + 0xCE, 0xA5, 0xCC, 0x94, 0xCD, 0x84, 0xCE, 0xA9, + 0xCC, 0x93, 0xCD, 0x84, 0xCE, 0xA9, 0xCC, 0x94, + 0xCD, 0x84, 0xCE, 0xB1, 0xCC, 0x80, 0xCD, 0x84, + // Bytes 4940 - 497f + 0xCE, 0xB1, 0xCC, 0x81, 0xCD, 0x84, 0xCE, 0xB1, + 0xCC, 0x93, 0xCD, 0x84, 0xCE, 0xB1, 0xCC, 0x94, + 0xCD, 0x84, 0xCE, 0xB1, 0xCD, 0x82, 0xCD, 0x84, + 0xCE, 0xB5, 0xCC, 0x93, 0xCD, 0x84, 0xCE, 0xB5, + 0xCC, 0x94, 0xCD, 0x84, 0xCE, 0xB7, 0xCC, 0x80, + 0xCD, 0x84, 0xCE, 0xB7, 0xCC, 0x81, 0xCD, 0x84, + 0xCE, 0xB7, 0xCC, 0x93, 0xCD, 0x84, 0xCE, 0xB7, + 0xCC, 0x94, 0xCD, 0x84, 0xCE, 0xB7, 0xCD, 0x82, + // Bytes 4980 - 49bf + 0xCD, 0x84, 0xCE, 0xB9, 0xCC, 0x88, 0xCD, 0x84, + 0xCE, 0xB9, 0xCC, 0x93, 0xCD, 0x84, 0xCE, 0xB9, + 0xCC, 0x94, 0xCD, 0x84, 0xCE, 0xBF, 0xCC, 0x93, + 0xCD, 0x84, 0xCE, 0xBF, 0xCC, 0x94, 0xCD, 0x84, + 0xCF, 0x85, 0xCC, 0x88, 0xCD, 0x84, 0xCF, 0x85, + 0xCC, 0x93, 0xCD, 0x84, 0xCF, 0x85, 0xCC, 0x94, + 0xCD, 0x84, 0xCF, 0x89, 0xCC, 0x80, 0xCD, 0x84, + 0xCF, 0x89, 0xCC, 0x81, 0xCD, 0x84, 0xCF, 0x89, + // Bytes 49c0 - 49ff + 0xCC, 0x93, 0xCD, 0x84, 0xCF, 0x89, 0xCC, 0x94, + 0xCD, 0x84, 0xCF, 0x89, 0xCD, 0x82, 0xCD, 0x86, + 0xCE, 0x91, 0xCC, 0x93, 0xCC, 0x80, 0xCE, 0x86, + 0xCE, 0x91, 0xCC, 0x93, 0xCC, 0x81, 0xCE, 0x86, + 0xCE, 0x91, 0xCC, 0x93, 0xCD, 0x82, 0xCE, 0x86, + 0xCE, 0x91, 0xCC, 0x94, 0xCC, 0x80, 0xCE, 0x86, + 0xCE, 0x91, 0xCC, 0x94, 0xCC, 0x81, 0xCE, 0x86, + 0xCE, 0x91, 0xCC, 0x94, 0xCD, 0x82, 0xCE, 0x86, + // Bytes 4a00 - 4a3f + 0xCE, 0x97, 0xCC, 0x93, 0xCC, 0x80, 0xCE, 0x86, + 0xCE, 0x97, 0xCC, 0x93, 0xCC, 0x81, 0xCE, 0x86, + 0xCE, 0x97, 0xCC, 0x93, 0xCD, 0x82, 0xCE, 0x86, + 0xCE, 0x97, 0xCC, 0x94, 0xCC, 0x80, 0xCE, 0x86, + 0xCE, 0x97, 0xCC, 0x94, 0xCC, 0x81, 0xCE, 0x86, + 0xCE, 0x97, 0xCC, 0x94, 0xCD, 0x82, 0xCE, 0x86, + 0xCE, 0xA9, 0xCC, 0x93, 0xCC, 0x80, 0xCE, 0x86, + 0xCE, 0xA9, 0xCC, 0x93, 0xCC, 0x81, 0xCE, 0x86, + // Bytes 4a40 - 4a7f + 0xCE, 0xA9, 0xCC, 0x93, 0xCD, 0x82, 0xCE, 0x86, + 0xCE, 0xA9, 0xCC, 0x94, 0xCC, 0x80, 0xCE, 0x86, + 0xCE, 0xA9, 0xCC, 0x94, 0xCC, 0x81, 0xCE, 0x86, + 0xCE, 0xA9, 0xCC, 0x94, 0xCD, 0x82, 0xCE, 0x86, + 0xCE, 0xB1, 0xCC, 0x93, 0xCC, 0x80, 0xCE, 0x86, + 0xCE, 0xB1, 0xCC, 0x93, 0xCC, 0x81, 0xCE, 0x86, + 0xCE, 0xB1, 0xCC, 0x93, 0xCD, 0x82, 0xCE, 0x86, + 0xCE, 0xB1, 0xCC, 0x94, 0xCC, 0x80, 0xCE, 0x86, + // Bytes 4a80 - 4abf + 0xCE, 0xB1, 0xCC, 0x94, 0xCC, 0x81, 0xCE, 0x86, + 0xCE, 0xB1, 0xCC, 0x94, 0xCD, 0x82, 0xCE, 0x86, + 0xCE, 0xB7, 0xCC, 0x93, 0xCC, 0x80, 0xCE, 0x86, + 0xCE, 0xB7, 0xCC, 0x93, 0xCC, 0x81, 0xCE, 0x86, + 0xCE, 0xB7, 0xCC, 0x93, 0xCD, 0x82, 0xCE, 0x86, + 0xCE, 0xB7, 0xCC, 0x94, 0xCC, 0x80, 0xCE, 0x86, + 0xCE, 0xB7, 0xCC, 0x94, 0xCC, 0x81, 0xCE, 0x86, + 0xCE, 0xB7, 0xCC, 0x94, 0xCD, 0x82, 0xCE, 0x86, + // Bytes 4ac0 - 4aff + 0xCF, 0x89, 0xCC, 0x93, 0xCC, 0x80, 0xCE, 0x86, + 0xCF, 0x89, 0xCC, 0x93, 0xCC, 0x81, 0xCE, 0x86, + 0xCF, 0x89, 0xCC, 0x93, 0xCD, 0x82, 0xCE, 0x86, + 0xCF, 0x89, 0xCC, 0x94, 0xCC, 0x80, 0xCE, 0x86, + 0xCF, 0x89, 0xCC, 0x94, 0xCC, 0x81, 0xCE, 0x86, + 0xCF, 0x89, 0xCC, 0x94, 0xCD, 0x82, 0xCE, 0x42, + 0xCC, 0x80, 0xCD, 0x33, 0x42, 0xCC, 0x81, 0xCD, + 0x33, 0x42, 0xCC, 0x93, 0xCD, 0x33, 0x43, 0xE1, + // Bytes 4b00 - 4b3f + 0x85, 0xA1, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xA2, + 0x01, 0x00, 0x43, 0xE1, 0x85, 0xA3, 0x01, 0x00, + 0x43, 0xE1, 0x85, 0xA4, 0x01, 0x00, 0x43, 0xE1, + 0x85, 0xA5, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xA6, + 0x01, 0x00, 0x43, 0xE1, 0x85, 0xA7, 0x01, 0x00, + 0x43, 0xE1, 0x85, 0xA8, 0x01, 0x00, 0x43, 0xE1, + 0x85, 0xA9, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xAA, + 0x01, 0x00, 0x43, 0xE1, 0x85, 0xAB, 0x01, 0x00, + // Bytes 4b40 - 4b7f + 0x43, 0xE1, 0x85, 0xAC, 0x01, 0x00, 0x43, 0xE1, + 0x85, 0xAD, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xAE, + 0x01, 0x00, 0x43, 0xE1, 0x85, 0xAF, 0x01, 0x00, + 0x43, 0xE1, 0x85, 0xB0, 0x01, 0x00, 0x43, 0xE1, + 0x85, 0xB1, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xB2, + 0x01, 0x00, 0x43, 0xE1, 0x85, 0xB3, 0x01, 0x00, + 0x43, 0xE1, 0x85, 0xB4, 0x01, 0x00, 0x43, 0xE1, + 0x85, 0xB5, 0x01, 0x00, 0x43, 0xE1, 0x86, 0xAA, + // Bytes 4b80 - 4bbf + 0x01, 0x00, 0x43, 0xE1, 0x86, 0xAC, 0x01, 0x00, + 0x43, 0xE1, 0x86, 0xAD, 0x01, 0x00, 0x43, 0xE1, + 0x86, 0xB0, 0x01, 0x00, 0x43, 0xE1, 0x86, 0xB1, + 0x01, 0x00, 0x43, 0xE1, 0x86, 0xB2, 0x01, 0x00, + 0x43, 0xE1, 0x86, 0xB3, 0x01, 0x00, 0x43, 0xE1, + 0x86, 0xB4, 0x01, 0x00, 0x43, 0xE1, 0x86, 0xB5, + 0x01, 0x00, 0x44, 0xCC, 0x88, 0xCC, 0x81, 0xCE, + 0x33, 0x43, 0xE3, 0x82, 0x99, 0x11, 0x04, 0x43, + // Bytes 4bc0 - 4bff + 0xE3, 0x82, 0x9A, 0x11, 0x04, 0x46, 0xE0, 0xBD, + 0xB1, 0xE0, 0xBD, 0xB2, 0xA2, 0x27, 0x46, 0xE0, + 0xBD, 0xB1, 0xE0, 0xBD, 0xB4, 0xA6, 0x27, 0x46, + 0xE0, 0xBD, 0xB1, 0xE0, 0xBE, 0x80, 0xA2, 0x27, + 0x00, 0x01, +} + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *nfcTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return nfcValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = nfcIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *nfcTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return nfcValues[c0] + } + i := nfcIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = nfcIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = nfcIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *nfcTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return nfcValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = nfcIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *nfcTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return nfcValues[c0] + } + i := nfcIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = nfcIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = nfcIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// nfcTrie. Total size: 10798 bytes (10.54 KiB). Checksum: b5981cc85e3bd14. +type nfcTrie struct{} + +func newNfcTrie(i int) *nfcTrie { + return &nfcTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *nfcTrie) lookupValue(n uint32, b byte) uint16 { + switch { + case n < 46: + return uint16(nfcValues[n<<6+uint32(b)]) + default: + n -= 46 + return uint16(nfcSparse.lookup(n, b)) + } +} + +// nfcValues: 48 blocks, 3072 entries, 6144 bytes +// The third block is the zero block. +var nfcValues = [3072]uint16{ + // Block 0x0, offset 0x0 + 0x3c: 0xa000, 0x3d: 0xa000, 0x3e: 0xa000, + // Block 0x1, offset 0x40 + 0x41: 0xa000, 0x42: 0xa000, 0x43: 0xa000, 0x44: 0xa000, 0x45: 0xa000, + 0x46: 0xa000, 0x47: 0xa000, 0x48: 0xa000, 0x49: 0xa000, 0x4a: 0xa000, 0x4b: 0xa000, + 0x4c: 0xa000, 0x4d: 0xa000, 0x4e: 0xa000, 0x4f: 0xa000, 0x50: 0xa000, + 0x52: 0xa000, 0x53: 0xa000, 0x54: 0xa000, 0x55: 0xa000, 0x56: 0xa000, 0x57: 0xa000, + 0x58: 0xa000, 0x59: 0xa000, 0x5a: 0xa000, + 0x61: 0xa000, 0x62: 0xa000, 0x63: 0xa000, + 0x64: 0xa000, 0x65: 0xa000, 0x66: 0xa000, 0x67: 0xa000, 0x68: 0xa000, 0x69: 0xa000, + 0x6a: 0xa000, 0x6b: 0xa000, 0x6c: 0xa000, 0x6d: 0xa000, 0x6e: 0xa000, 0x6f: 0xa000, + 0x70: 0xa000, 0x72: 0xa000, 0x73: 0xa000, 0x74: 0xa000, 0x75: 0xa000, + 0x76: 0xa000, 0x77: 0xa000, 0x78: 0xa000, 0x79: 0xa000, 0x7a: 0xa000, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x30b0, 0xc1: 0x30b5, 0xc2: 0x47c9, 0xc3: 0x30ba, 0xc4: 0x47d8, 0xc5: 0x47dd, + 0xc6: 0xa000, 0xc7: 0x47e7, 0xc8: 0x3123, 0xc9: 0x3128, 0xca: 0x47ec, 0xcb: 0x313c, + 0xcc: 0x31af, 0xcd: 0x31b4, 0xce: 0x31b9, 0xcf: 0x4800, 0xd1: 0x3245, + 0xd2: 0x3268, 0xd3: 0x326d, 0xd4: 0x480a, 0xd5: 0x480f, 0xd6: 0x481e, + 0xd8: 0xa000, 0xd9: 0x32f4, 0xda: 0x32f9, 0xdb: 0x32fe, 0xdc: 0x4850, 0xdd: 0x3376, + 0xe0: 0x33bc, 0xe1: 0x33c1, 0xe2: 0x485a, 0xe3: 0x33c6, + 0xe4: 0x4869, 0xe5: 0x486e, 0xe6: 0xa000, 0xe7: 0x4878, 0xe8: 0x342f, 0xe9: 0x3434, + 0xea: 0x487d, 0xeb: 0x3448, 0xec: 0x34c0, 0xed: 0x34c5, 0xee: 0x34ca, 0xef: 0x4891, + 0xf1: 0x3556, 0xf2: 0x3579, 0xf3: 0x357e, 0xf4: 0x489b, 0xf5: 0x48a0, + 0xf6: 0x48af, 0xf8: 0xa000, 0xf9: 0x360a, 0xfa: 0x360f, 0xfb: 0x3614, + 0xfc: 0x48e1, 0xfd: 0x3691, 0xff: 0x36aa, + // Block 0x4, offset 0x100 + 0x100: 0x30bf, 0x101: 0x33cb, 0x102: 0x47ce, 0x103: 0x485f, 0x104: 0x30dd, 0x105: 0x33e9, + 0x106: 0x30f1, 0x107: 0x33fd, 0x108: 0x30f6, 0x109: 0x3402, 0x10a: 0x30fb, 0x10b: 0x3407, + 0x10c: 0x3100, 0x10d: 0x340c, 0x10e: 0x310a, 0x10f: 0x3416, + 0x112: 0x47f1, 0x113: 0x4882, 0x114: 0x3132, 0x115: 0x343e, 0x116: 0x3137, 0x117: 0x3443, + 0x118: 0x3155, 0x119: 0x3461, 0x11a: 0x3146, 0x11b: 0x3452, 0x11c: 0x316e, 0x11d: 0x347a, + 0x11e: 0x3178, 0x11f: 0x3484, 0x120: 0x317d, 0x121: 0x3489, 0x122: 0x3187, 0x123: 0x3493, + 0x124: 0x318c, 0x125: 0x3498, 0x128: 0x31be, 0x129: 0x34cf, + 0x12a: 0x31c3, 0x12b: 0x34d4, 0x12c: 0x31c8, 0x12d: 0x34d9, 0x12e: 0x31eb, 0x12f: 0x34f7, + 0x130: 0x31cd, 0x134: 0x31f5, 0x135: 0x3501, + 0x136: 0x3209, 0x137: 0x351a, 0x139: 0x3213, 0x13a: 0x3524, 0x13b: 0x321d, + 0x13c: 0x352e, 0x13d: 0x3218, 0x13e: 0x3529, + // Block 0x5, offset 0x140 + 0x143: 0x3240, 0x144: 0x3551, 0x145: 0x3259, + 0x146: 0x356a, 0x147: 0x324f, 0x148: 0x3560, + 0x14c: 0x4814, 0x14d: 0x48a5, 0x14e: 0x3272, 0x14f: 0x3583, 0x150: 0x327c, 0x151: 0x358d, + 0x154: 0x329a, 0x155: 0x35ab, 0x156: 0x32b3, 0x157: 0x35c4, + 0x158: 0x32a4, 0x159: 0x35b5, 0x15a: 0x4837, 0x15b: 0x48c8, 0x15c: 0x32bd, 0x15d: 0x35ce, + 0x15e: 0x32cc, 0x15f: 0x35dd, 0x160: 0x483c, 0x161: 0x48cd, 0x162: 0x32e5, 0x163: 0x35fb, + 0x164: 0x32d6, 0x165: 0x35ec, 0x168: 0x4846, 0x169: 0x48d7, + 0x16a: 0x484b, 0x16b: 0x48dc, 0x16c: 0x3303, 0x16d: 0x3619, 0x16e: 0x330d, 0x16f: 0x3623, + 0x170: 0x3312, 0x171: 0x3628, 0x172: 0x3330, 0x173: 0x3646, 0x174: 0x3353, 0x175: 0x3669, + 0x176: 0x337b, 0x177: 0x3696, 0x178: 0x338f, 0x179: 0x339e, 0x17a: 0x36be, 0x17b: 0x33a8, + 0x17c: 0x36c8, 0x17d: 0x33ad, 0x17e: 0x36cd, 0x17f: 0xa000, + // Block 0x6, offset 0x180 + 0x184: 0x8100, 0x185: 0x8100, + 0x186: 0x8100, + 0x18d: 0x30c9, 0x18e: 0x33d5, 0x18f: 0x31d7, 0x190: 0x34e3, 0x191: 0x3281, + 0x192: 0x3592, 0x193: 0x3317, 0x194: 0x362d, 0x195: 0x3b10, 0x196: 0x3c9f, 0x197: 0x3b09, + 0x198: 0x3c98, 0x199: 0x3b17, 0x19a: 0x3ca6, 0x19b: 0x3b02, 0x19c: 0x3c91, + 0x19e: 0x39f1, 0x19f: 0x3b80, 0x1a0: 0x39ea, 0x1a1: 0x3b79, 0x1a2: 0x36f4, 0x1a3: 0x3706, + 0x1a6: 0x3182, 0x1a7: 0x348e, 0x1a8: 0x31ff, 0x1a9: 0x3510, + 0x1aa: 0x482d, 0x1ab: 0x48be, 0x1ac: 0x3ad1, 0x1ad: 0x3c60, 0x1ae: 0x3718, 0x1af: 0x371e, + 0x1b0: 0x3506, 0x1b4: 0x3169, 0x1b5: 0x3475, + 0x1b8: 0x323b, 0x1b9: 0x354c, 0x1ba: 0x39f8, 0x1bb: 0x3b87, + 0x1bc: 0x36ee, 0x1bd: 0x3700, 0x1be: 0x36fa, 0x1bf: 0x370c, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x30ce, 0x1c1: 0x33da, 0x1c2: 0x30d3, 0x1c3: 0x33df, 0x1c4: 0x314b, 0x1c5: 0x3457, + 0x1c6: 0x3150, 0x1c7: 0x345c, 0x1c8: 0x31dc, 0x1c9: 0x34e8, 0x1ca: 0x31e1, 0x1cb: 0x34ed, + 0x1cc: 0x3286, 0x1cd: 0x3597, 0x1ce: 0x328b, 0x1cf: 0x359c, 0x1d0: 0x32a9, 0x1d1: 0x35ba, + 0x1d2: 0x32ae, 0x1d3: 0x35bf, 0x1d4: 0x331c, 0x1d5: 0x3632, 0x1d6: 0x3321, 0x1d7: 0x3637, + 0x1d8: 0x32c7, 0x1d9: 0x35d8, 0x1da: 0x32e0, 0x1db: 0x35f6, + 0x1de: 0x319b, 0x1df: 0x34a7, + 0x1e6: 0x47d3, 0x1e7: 0x4864, 0x1e8: 0x47fb, 0x1e9: 0x488c, + 0x1ea: 0x3aa0, 0x1eb: 0x3c2f, 0x1ec: 0x3a7d, 0x1ed: 0x3c0c, 0x1ee: 0x4819, 0x1ef: 0x48aa, + 0x1f0: 0x3a99, 0x1f1: 0x3c28, 0x1f2: 0x3385, 0x1f3: 0x36a0, + // Block 0x8, offset 0x200 + 0x200: 0x9933, 0x201: 0x9933, 0x202: 0x9933, 0x203: 0x9933, 0x204: 0x9933, 0x205: 0x8133, + 0x206: 0x9933, 0x207: 0x9933, 0x208: 0x9933, 0x209: 0x9933, 0x20a: 0x9933, 0x20b: 0x9933, + 0x20c: 0x9933, 0x20d: 0x8133, 0x20e: 0x8133, 0x20f: 0x9933, 0x210: 0x8133, 0x211: 0x9933, + 0x212: 0x8133, 0x213: 0x9933, 0x214: 0x9933, 0x215: 0x8134, 0x216: 0x812e, 0x217: 0x812e, + 0x218: 0x812e, 0x219: 0x812e, 0x21a: 0x8134, 0x21b: 0x992c, 0x21c: 0x812e, 0x21d: 0x812e, + 0x21e: 0x812e, 0x21f: 0x812e, 0x220: 0x812e, 0x221: 0x812a, 0x222: 0x812a, 0x223: 0x992e, + 0x224: 0x992e, 0x225: 0x992e, 0x226: 0x992e, 0x227: 0x992a, 0x228: 0x992a, 0x229: 0x812e, + 0x22a: 0x812e, 0x22b: 0x812e, 0x22c: 0x812e, 0x22d: 0x992e, 0x22e: 0x992e, 0x22f: 0x812e, + 0x230: 0x992e, 0x231: 0x992e, 0x232: 0x812e, 0x233: 0x812e, 0x234: 0x8101, 0x235: 0x8101, + 0x236: 0x8101, 0x237: 0x8101, 0x238: 0x9901, 0x239: 0x812e, 0x23a: 0x812e, 0x23b: 0x812e, + 0x23c: 0x812e, 0x23d: 0x8133, 0x23e: 0x8133, 0x23f: 0x8133, + // Block 0x9, offset 0x240 + 0x240: 0x4aef, 0x241: 0x4af4, 0x242: 0x9933, 0x243: 0x4af9, 0x244: 0x4bb2, 0x245: 0x9937, + 0x246: 0x8133, 0x247: 0x812e, 0x248: 0x812e, 0x249: 0x812e, 0x24a: 0x8133, 0x24b: 0x8133, + 0x24c: 0x8133, 0x24d: 0x812e, 0x24e: 0x812e, 0x250: 0x8133, 0x251: 0x8133, + 0x252: 0x8133, 0x253: 0x812e, 0x254: 0x812e, 0x255: 0x812e, 0x256: 0x812e, 0x257: 0x8133, + 0x258: 0x8134, 0x259: 0x812e, 0x25a: 0x812e, 0x25b: 0x8133, 0x25c: 0x8135, 0x25d: 0x8136, + 0x25e: 0x8136, 0x25f: 0x8135, 0x260: 0x8136, 0x261: 0x8136, 0x262: 0x8135, 0x263: 0x8133, + 0x264: 0x8133, 0x265: 0x8133, 0x266: 0x8133, 0x267: 0x8133, 0x268: 0x8133, 0x269: 0x8133, + 0x26a: 0x8133, 0x26b: 0x8133, 0x26c: 0x8133, 0x26d: 0x8133, 0x26e: 0x8133, 0x26f: 0x8133, + 0x274: 0x01ee, + 0x27a: 0x8100, + 0x27e: 0x0037, + // Block 0xa, offset 0x280 + 0x284: 0x8100, 0x285: 0x36e2, + 0x286: 0x372a, 0x287: 0x00ce, 0x288: 0x3748, 0x289: 0x3754, 0x28a: 0x3766, + 0x28c: 0x3784, 0x28e: 0x3796, 0x28f: 0x37b4, 0x290: 0x3f49, 0x291: 0xa000, + 0x295: 0xa000, 0x297: 0xa000, + 0x299: 0xa000, + 0x29f: 0xa000, 0x2a1: 0xa000, + 0x2a5: 0xa000, 0x2a9: 0xa000, + 0x2aa: 0x3778, 0x2ab: 0x37a8, 0x2ac: 0x493f, 0x2ad: 0x37d8, 0x2ae: 0x4969, 0x2af: 0x37ea, + 0x2b0: 0x3fb1, 0x2b1: 0xa000, 0x2b5: 0xa000, + 0x2b7: 0xa000, 0x2b9: 0xa000, + 0x2bf: 0xa000, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x3862, 0x2c1: 0x386e, 0x2c3: 0x385c, + 0x2c6: 0xa000, 0x2c7: 0x384a, + 0x2cc: 0x389e, 0x2cd: 0x3886, 0x2ce: 0x38b0, 0x2d0: 0xa000, + 0x2d3: 0xa000, 0x2d5: 0xa000, 0x2d6: 0xa000, 0x2d7: 0xa000, + 0x2d8: 0xa000, 0x2d9: 0x3892, 0x2da: 0xa000, + 0x2de: 0xa000, 0x2e3: 0xa000, + 0x2e7: 0xa000, + 0x2eb: 0xa000, 0x2ed: 0xa000, + 0x2f0: 0xa000, 0x2f3: 0xa000, 0x2f5: 0xa000, + 0x2f6: 0xa000, 0x2f7: 0xa000, 0x2f8: 0xa000, 0x2f9: 0x3916, 0x2fa: 0xa000, + 0x2fe: 0xa000, + // Block 0xc, offset 0x300 + 0x301: 0x3874, 0x302: 0x38f8, + 0x310: 0x3850, 0x311: 0x38d4, + 0x312: 0x3856, 0x313: 0x38da, 0x316: 0x3868, 0x317: 0x38ec, + 0x318: 0xa000, 0x319: 0xa000, 0x31a: 0x396a, 0x31b: 0x3970, 0x31c: 0x387a, 0x31d: 0x38fe, + 0x31e: 0x3880, 0x31f: 0x3904, 0x322: 0x388c, 0x323: 0x3910, + 0x324: 0x3898, 0x325: 0x391c, 0x326: 0x38a4, 0x327: 0x3928, 0x328: 0xa000, 0x329: 0xa000, + 0x32a: 0x3976, 0x32b: 0x397c, 0x32c: 0x38ce, 0x32d: 0x3952, 0x32e: 0x38aa, 0x32f: 0x392e, + 0x330: 0x38b6, 0x331: 0x393a, 0x332: 0x38bc, 0x333: 0x3940, 0x334: 0x38c2, 0x335: 0x3946, + 0x338: 0x38c8, 0x339: 0x394c, + // Block 0xd, offset 0x340 + 0x351: 0x812e, + 0x352: 0x8133, 0x353: 0x8133, 0x354: 0x8133, 0x355: 0x8133, 0x356: 0x812e, 0x357: 0x8133, + 0x358: 0x8133, 0x359: 0x8133, 0x35a: 0x812f, 0x35b: 0x812e, 0x35c: 0x8133, 0x35d: 0x8133, + 0x35e: 0x8133, 0x35f: 0x8133, 0x360: 0x8133, 0x361: 0x8133, 0x362: 0x812e, 0x363: 0x812e, + 0x364: 0x812e, 0x365: 0x812e, 0x366: 0x812e, 0x367: 0x812e, 0x368: 0x8133, 0x369: 0x8133, + 0x36a: 0x812e, 0x36b: 0x8133, 0x36c: 0x8133, 0x36d: 0x812f, 0x36e: 0x8132, 0x36f: 0x8133, + 0x370: 0x8106, 0x371: 0x8107, 0x372: 0x8108, 0x373: 0x8109, 0x374: 0x810a, 0x375: 0x810b, + 0x376: 0x810c, 0x377: 0x810d, 0x378: 0x810e, 0x379: 0x810f, 0x37a: 0x810f, 0x37b: 0x8110, + 0x37c: 0x8111, 0x37d: 0x8112, 0x37f: 0x8113, + // Block 0xe, offset 0x380 + 0x388: 0xa000, 0x38a: 0xa000, 0x38b: 0x8117, + 0x38c: 0x8118, 0x38d: 0x8119, 0x38e: 0x811a, 0x38f: 0x811b, 0x390: 0x811c, 0x391: 0x811d, + 0x392: 0x811e, 0x393: 0x9933, 0x394: 0x9933, 0x395: 0x992e, 0x396: 0x812e, 0x397: 0x8133, + 0x398: 0x8133, 0x399: 0x8133, 0x39a: 0x8133, 0x39b: 0x8133, 0x39c: 0x812e, 0x39d: 0x8133, + 0x39e: 0x8133, 0x39f: 0x812e, + 0x3b0: 0x811f, + // Block 0xf, offset 0x3c0 + 0x3ca: 0x8133, 0x3cb: 0x8133, + 0x3cc: 0x8133, 0x3cd: 0x8133, 0x3ce: 0x8133, 0x3cf: 0x812e, 0x3d0: 0x812e, 0x3d1: 0x812e, + 0x3d2: 0x812e, 0x3d3: 0x812e, 0x3d4: 0x8133, 0x3d5: 0x8133, 0x3d6: 0x8133, 0x3d7: 0x8133, + 0x3d8: 0x8133, 0x3d9: 0x8133, 0x3da: 0x8133, 0x3db: 0x8133, 0x3dc: 0x8133, 0x3dd: 0x8133, + 0x3de: 0x8133, 0x3df: 0x8133, 0x3e0: 0x8133, 0x3e1: 0x8133, 0x3e3: 0x812e, + 0x3e4: 0x8133, 0x3e5: 0x8133, 0x3e6: 0x812e, 0x3e7: 0x8133, 0x3e8: 0x8133, 0x3e9: 0x812e, + 0x3ea: 0x8133, 0x3eb: 0x8133, 0x3ec: 0x8133, 0x3ed: 0x812e, 0x3ee: 0x812e, 0x3ef: 0x812e, + 0x3f0: 0x8117, 0x3f1: 0x8118, 0x3f2: 0x8119, 0x3f3: 0x8133, 0x3f4: 0x8133, 0x3f5: 0x8133, + 0x3f6: 0x812e, 0x3f7: 0x8133, 0x3f8: 0x8133, 0x3f9: 0x812e, 0x3fa: 0x812e, 0x3fb: 0x8133, + 0x3fc: 0x8133, 0x3fd: 0x8133, 0x3fe: 0x8133, 0x3ff: 0x8133, + // Block 0x10, offset 0x400 + 0x405: 0xa000, + 0x406: 0x2e5d, 0x407: 0xa000, 0x408: 0x2e65, 0x409: 0xa000, 0x40a: 0x2e6d, 0x40b: 0xa000, + 0x40c: 0x2e75, 0x40d: 0xa000, 0x40e: 0x2e7d, 0x411: 0xa000, + 0x412: 0x2e85, + 0x434: 0x8103, 0x435: 0x9900, + 0x43a: 0xa000, 0x43b: 0x2e8d, + 0x43c: 0xa000, 0x43d: 0x2e95, 0x43e: 0xa000, 0x43f: 0xa000, + // Block 0x11, offset 0x440 + 0x440: 0x8133, 0x441: 0x8133, 0x442: 0x812e, 0x443: 0x8133, 0x444: 0x8133, 0x445: 0x8133, + 0x446: 0x8133, 0x447: 0x8133, 0x448: 0x8133, 0x449: 0x8133, 0x44a: 0x812e, 0x44b: 0x8133, + 0x44c: 0x8133, 0x44d: 0x8136, 0x44e: 0x812b, 0x44f: 0x812e, 0x450: 0x812a, 0x451: 0x8133, + 0x452: 0x8133, 0x453: 0x8133, 0x454: 0x8133, 0x455: 0x8133, 0x456: 0x8133, 0x457: 0x8133, + 0x458: 0x8133, 0x459: 0x8133, 0x45a: 0x8133, 0x45b: 0x8133, 0x45c: 0x8133, 0x45d: 0x8133, + 0x45e: 0x8133, 0x45f: 0x8133, 0x460: 0x8133, 0x461: 0x8133, 0x462: 0x8133, 0x463: 0x8133, + 0x464: 0x8133, 0x465: 0x8133, 0x466: 0x8133, 0x467: 0x8133, 0x468: 0x8133, 0x469: 0x8133, + 0x46a: 0x8133, 0x46b: 0x8133, 0x46c: 0x8133, 0x46d: 0x8133, 0x46e: 0x8133, 0x46f: 0x8133, + 0x470: 0x8133, 0x471: 0x8133, 0x472: 0x8133, 0x473: 0x8133, 0x474: 0x8133, 0x475: 0x8133, + 0x476: 0x8134, 0x477: 0x8132, 0x478: 0x8132, 0x479: 0x812e, 0x47a: 0x812d, 0x47b: 0x8133, + 0x47c: 0x8135, 0x47d: 0x812e, 0x47e: 0x8133, 0x47f: 0x812e, + // Block 0x12, offset 0x480 + 0x480: 0x30d8, 0x481: 0x33e4, 0x482: 0x30e2, 0x483: 0x33ee, 0x484: 0x30e7, 0x485: 0x33f3, + 0x486: 0x30ec, 0x487: 0x33f8, 0x488: 0x3a0d, 0x489: 0x3b9c, 0x48a: 0x3105, 0x48b: 0x3411, + 0x48c: 0x310f, 0x48d: 0x341b, 0x48e: 0x311e, 0x48f: 0x342a, 0x490: 0x3114, 0x491: 0x3420, + 0x492: 0x3119, 0x493: 0x3425, 0x494: 0x3a30, 0x495: 0x3bbf, 0x496: 0x3a37, 0x497: 0x3bc6, + 0x498: 0x315a, 0x499: 0x3466, 0x49a: 0x315f, 0x49b: 0x346b, 0x49c: 0x3a45, 0x49d: 0x3bd4, + 0x49e: 0x3164, 0x49f: 0x3470, 0x4a0: 0x3173, 0x4a1: 0x347f, 0x4a2: 0x3191, 0x4a3: 0x349d, + 0x4a4: 0x31a0, 0x4a5: 0x34ac, 0x4a6: 0x3196, 0x4a7: 0x34a2, 0x4a8: 0x31a5, 0x4a9: 0x34b1, + 0x4aa: 0x31aa, 0x4ab: 0x34b6, 0x4ac: 0x31f0, 0x4ad: 0x34fc, 0x4ae: 0x3a4c, 0x4af: 0x3bdb, + 0x4b0: 0x31fa, 0x4b1: 0x350b, 0x4b2: 0x3204, 0x4b3: 0x3515, 0x4b4: 0x320e, 0x4b5: 0x351f, + 0x4b6: 0x4805, 0x4b7: 0x4896, 0x4b8: 0x3a53, 0x4b9: 0x3be2, 0x4ba: 0x3227, 0x4bb: 0x3538, + 0x4bc: 0x3222, 0x4bd: 0x3533, 0x4be: 0x322c, 0x4bf: 0x353d, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x3231, 0x4c1: 0x3542, 0x4c2: 0x3236, 0x4c3: 0x3547, 0x4c4: 0x324a, 0x4c5: 0x355b, + 0x4c6: 0x3254, 0x4c7: 0x3565, 0x4c8: 0x3263, 0x4c9: 0x3574, 0x4ca: 0x325e, 0x4cb: 0x356f, + 0x4cc: 0x3a76, 0x4cd: 0x3c05, 0x4ce: 0x3a84, 0x4cf: 0x3c13, 0x4d0: 0x3a8b, 0x4d1: 0x3c1a, + 0x4d2: 0x3a92, 0x4d3: 0x3c21, 0x4d4: 0x3290, 0x4d5: 0x35a1, 0x4d6: 0x3295, 0x4d7: 0x35a6, + 0x4d8: 0x329f, 0x4d9: 0x35b0, 0x4da: 0x4832, 0x4db: 0x48c3, 0x4dc: 0x3ad8, 0x4dd: 0x3c67, + 0x4de: 0x32b8, 0x4df: 0x35c9, 0x4e0: 0x32c2, 0x4e1: 0x35d3, 0x4e2: 0x4841, 0x4e3: 0x48d2, + 0x4e4: 0x3adf, 0x4e5: 0x3c6e, 0x4e6: 0x3ae6, 0x4e7: 0x3c75, 0x4e8: 0x3aed, 0x4e9: 0x3c7c, + 0x4ea: 0x32d1, 0x4eb: 0x35e2, 0x4ec: 0x32db, 0x4ed: 0x35f1, 0x4ee: 0x32ef, 0x4ef: 0x3605, + 0x4f0: 0x32ea, 0x4f1: 0x3600, 0x4f2: 0x332b, 0x4f3: 0x3641, 0x4f4: 0x333a, 0x4f5: 0x3650, + 0x4f6: 0x3335, 0x4f7: 0x364b, 0x4f8: 0x3af4, 0x4f9: 0x3c83, 0x4fa: 0x3afb, 0x4fb: 0x3c8a, + 0x4fc: 0x333f, 0x4fd: 0x3655, 0x4fe: 0x3344, 0x4ff: 0x365a, + // Block 0x14, offset 0x500 + 0x500: 0x3349, 0x501: 0x365f, 0x502: 0x334e, 0x503: 0x3664, 0x504: 0x335d, 0x505: 0x3673, + 0x506: 0x3358, 0x507: 0x366e, 0x508: 0x3362, 0x509: 0x367d, 0x50a: 0x3367, 0x50b: 0x3682, + 0x50c: 0x336c, 0x50d: 0x3687, 0x50e: 0x338a, 0x50f: 0x36a5, 0x510: 0x33a3, 0x511: 0x36c3, + 0x512: 0x33b2, 0x513: 0x36d2, 0x514: 0x33b7, 0x515: 0x36d7, 0x516: 0x34bb, 0x517: 0x35e7, + 0x518: 0x3678, 0x519: 0x36b4, 0x51b: 0x3712, + 0x520: 0x47e2, 0x521: 0x4873, 0x522: 0x30c4, 0x523: 0x33d0, + 0x524: 0x39b9, 0x525: 0x3b48, 0x526: 0x39b2, 0x527: 0x3b41, 0x528: 0x39c7, 0x529: 0x3b56, + 0x52a: 0x39c0, 0x52b: 0x3b4f, 0x52c: 0x39ff, 0x52d: 0x3b8e, 0x52e: 0x39d5, 0x52f: 0x3b64, + 0x530: 0x39ce, 0x531: 0x3b5d, 0x532: 0x39e3, 0x533: 0x3b72, 0x534: 0x39dc, 0x535: 0x3b6b, + 0x536: 0x3a06, 0x537: 0x3b95, 0x538: 0x47f6, 0x539: 0x4887, 0x53a: 0x3141, 0x53b: 0x344d, + 0x53c: 0x312d, 0x53d: 0x3439, 0x53e: 0x3a1b, 0x53f: 0x3baa, + // Block 0x15, offset 0x540 + 0x540: 0x3a14, 0x541: 0x3ba3, 0x542: 0x3a29, 0x543: 0x3bb8, 0x544: 0x3a22, 0x545: 0x3bb1, + 0x546: 0x3a3e, 0x547: 0x3bcd, 0x548: 0x31d2, 0x549: 0x34de, 0x54a: 0x31e6, 0x54b: 0x34f2, + 0x54c: 0x4828, 0x54d: 0x48b9, 0x54e: 0x3277, 0x54f: 0x3588, 0x550: 0x3a61, 0x551: 0x3bf0, + 0x552: 0x3a5a, 0x553: 0x3be9, 0x554: 0x3a6f, 0x555: 0x3bfe, 0x556: 0x3a68, 0x557: 0x3bf7, + 0x558: 0x3aca, 0x559: 0x3c59, 0x55a: 0x3aae, 0x55b: 0x3c3d, 0x55c: 0x3aa7, 0x55d: 0x3c36, + 0x55e: 0x3abc, 0x55f: 0x3c4b, 0x560: 0x3ab5, 0x561: 0x3c44, 0x562: 0x3ac3, 0x563: 0x3c52, + 0x564: 0x3326, 0x565: 0x363c, 0x566: 0x3308, 0x567: 0x361e, 0x568: 0x3b25, 0x569: 0x3cb4, + 0x56a: 0x3b1e, 0x56b: 0x3cad, 0x56c: 0x3b33, 0x56d: 0x3cc2, 0x56e: 0x3b2c, 0x56f: 0x3cbb, + 0x570: 0x3b3a, 0x571: 0x3cc9, 0x572: 0x3371, 0x573: 0x368c, 0x574: 0x3399, 0x575: 0x36b9, + 0x576: 0x3394, 0x577: 0x36af, 0x578: 0x3380, 0x579: 0x369b, + // Block 0x16, offset 0x580 + 0x580: 0x4945, 0x581: 0x494b, 0x582: 0x4a5f, 0x583: 0x4a77, 0x584: 0x4a67, 0x585: 0x4a7f, + 0x586: 0x4a6f, 0x587: 0x4a87, 0x588: 0x48eb, 0x589: 0x48f1, 0x58a: 0x49cf, 0x58b: 0x49e7, + 0x58c: 0x49d7, 0x58d: 0x49ef, 0x58e: 0x49df, 0x58f: 0x49f7, 0x590: 0x4957, 0x591: 0x495d, + 0x592: 0x3ef9, 0x593: 0x3f09, 0x594: 0x3f01, 0x595: 0x3f11, + 0x598: 0x48f7, 0x599: 0x48fd, 0x59a: 0x3e29, 0x59b: 0x3e39, 0x59c: 0x3e31, 0x59d: 0x3e41, + 0x5a0: 0x496f, 0x5a1: 0x4975, 0x5a2: 0x4a8f, 0x5a3: 0x4aa7, + 0x5a4: 0x4a97, 0x5a5: 0x4aaf, 0x5a6: 0x4a9f, 0x5a7: 0x4ab7, 0x5a8: 0x4903, 0x5a9: 0x4909, + 0x5aa: 0x49ff, 0x5ab: 0x4a17, 0x5ac: 0x4a07, 0x5ad: 0x4a1f, 0x5ae: 0x4a0f, 0x5af: 0x4a27, + 0x5b0: 0x4987, 0x5b1: 0x498d, 0x5b2: 0x3f59, 0x5b3: 0x3f71, 0x5b4: 0x3f61, 0x5b5: 0x3f79, + 0x5b6: 0x3f69, 0x5b7: 0x3f81, 0x5b8: 0x490f, 0x5b9: 0x4915, 0x5ba: 0x3e59, 0x5bb: 0x3e71, + 0x5bc: 0x3e61, 0x5bd: 0x3e79, 0x5be: 0x3e69, 0x5bf: 0x3e81, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x4993, 0x5c1: 0x4999, 0x5c2: 0x3f89, 0x5c3: 0x3f99, 0x5c4: 0x3f91, 0x5c5: 0x3fa1, + 0x5c8: 0x491b, 0x5c9: 0x4921, 0x5ca: 0x3e89, 0x5cb: 0x3e99, + 0x5cc: 0x3e91, 0x5cd: 0x3ea1, 0x5d0: 0x49a5, 0x5d1: 0x49ab, + 0x5d2: 0x3fc1, 0x5d3: 0x3fd9, 0x5d4: 0x3fc9, 0x5d5: 0x3fe1, 0x5d6: 0x3fd1, 0x5d7: 0x3fe9, + 0x5d9: 0x4927, 0x5db: 0x3ea9, 0x5dd: 0x3eb1, + 0x5df: 0x3eb9, 0x5e0: 0x49bd, 0x5e1: 0x49c3, 0x5e2: 0x4abf, 0x5e3: 0x4ad7, + 0x5e4: 0x4ac7, 0x5e5: 0x4adf, 0x5e6: 0x4acf, 0x5e7: 0x4ae7, 0x5e8: 0x492d, 0x5e9: 0x4933, + 0x5ea: 0x4a2f, 0x5eb: 0x4a47, 0x5ec: 0x4a37, 0x5ed: 0x4a4f, 0x5ee: 0x4a3f, 0x5ef: 0x4a57, + 0x5f0: 0x4939, 0x5f1: 0x445f, 0x5f2: 0x37d2, 0x5f3: 0x4465, 0x5f4: 0x4963, 0x5f5: 0x446b, + 0x5f6: 0x37e4, 0x5f7: 0x4471, 0x5f8: 0x3802, 0x5f9: 0x4477, 0x5fa: 0x381a, 0x5fb: 0x447d, + 0x5fc: 0x49b1, 0x5fd: 0x4483, + // Block 0x18, offset 0x600 + 0x600: 0x3ee1, 0x601: 0x3ee9, 0x602: 0x42c5, 0x603: 0x42e3, 0x604: 0x42cf, 0x605: 0x42ed, + 0x606: 0x42d9, 0x607: 0x42f7, 0x608: 0x3e19, 0x609: 0x3e21, 0x60a: 0x4211, 0x60b: 0x422f, + 0x60c: 0x421b, 0x60d: 0x4239, 0x60e: 0x4225, 0x60f: 0x4243, 0x610: 0x3f29, 0x611: 0x3f31, + 0x612: 0x4301, 0x613: 0x431f, 0x614: 0x430b, 0x615: 0x4329, 0x616: 0x4315, 0x617: 0x4333, + 0x618: 0x3e49, 0x619: 0x3e51, 0x61a: 0x424d, 0x61b: 0x426b, 0x61c: 0x4257, 0x61d: 0x4275, + 0x61e: 0x4261, 0x61f: 0x427f, 0x620: 0x4001, 0x621: 0x4009, 0x622: 0x433d, 0x623: 0x435b, + 0x624: 0x4347, 0x625: 0x4365, 0x626: 0x4351, 0x627: 0x436f, 0x628: 0x3ec1, 0x629: 0x3ec9, + 0x62a: 0x4289, 0x62b: 0x42a7, 0x62c: 0x4293, 0x62d: 0x42b1, 0x62e: 0x429d, 0x62f: 0x42bb, + 0x630: 0x37c6, 0x631: 0x37c0, 0x632: 0x3ed1, 0x633: 0x37cc, 0x634: 0x3ed9, + 0x636: 0x4951, 0x637: 0x3ef1, 0x638: 0x3736, 0x639: 0x3730, 0x63a: 0x3724, 0x63b: 0x442f, + 0x63c: 0x373c, 0x63d: 0x8100, 0x63e: 0x0257, 0x63f: 0xa100, + // Block 0x19, offset 0x640 + 0x640: 0x8100, 0x641: 0x36e8, 0x642: 0x3f19, 0x643: 0x37de, 0x644: 0x3f21, + 0x646: 0x497b, 0x647: 0x3f39, 0x648: 0x3742, 0x649: 0x4435, 0x64a: 0x374e, 0x64b: 0x443b, + 0x64c: 0x375a, 0x64d: 0x3cd0, 0x64e: 0x3cd7, 0x64f: 0x3cde, 0x650: 0x37f6, 0x651: 0x37f0, + 0x652: 0x3f41, 0x653: 0x4625, 0x656: 0x37fc, 0x657: 0x3f51, + 0x658: 0x3772, 0x659: 0x376c, 0x65a: 0x3760, 0x65b: 0x4441, 0x65d: 0x3ce5, + 0x65e: 0x3cec, 0x65f: 0x3cf3, 0x660: 0x382c, 0x661: 0x3826, 0x662: 0x3fa9, 0x663: 0x462d, + 0x664: 0x380e, 0x665: 0x3814, 0x666: 0x3832, 0x667: 0x3fb9, 0x668: 0x37a2, 0x669: 0x379c, + 0x66a: 0x3790, 0x66b: 0x444d, 0x66c: 0x378a, 0x66d: 0x36dc, 0x66e: 0x4429, 0x66f: 0x0081, + 0x672: 0x3ff1, 0x673: 0x3838, 0x674: 0x3ff9, + 0x676: 0x49c9, 0x677: 0x4011, 0x678: 0x377e, 0x679: 0x4447, 0x67a: 0x37ae, 0x67b: 0x4459, + 0x67c: 0x37ba, 0x67d: 0x4397, 0x67e: 0xa100, + // Block 0x1a, offset 0x680 + 0x681: 0x3d47, 0x683: 0xa000, 0x684: 0x3d4e, 0x685: 0xa000, + 0x687: 0x3d55, 0x688: 0xa000, 0x689: 0x3d5c, + 0x68d: 0xa000, + 0x6a0: 0x30a6, 0x6a1: 0xa000, 0x6a2: 0x3d6a, + 0x6a4: 0xa000, 0x6a5: 0xa000, + 0x6ad: 0x3d63, 0x6ae: 0x30a1, 0x6af: 0x30ab, + 0x6b0: 0x3d71, 0x6b1: 0x3d78, 0x6b2: 0xa000, 0x6b3: 0xa000, 0x6b4: 0x3d7f, 0x6b5: 0x3d86, + 0x6b6: 0xa000, 0x6b7: 0xa000, 0x6b8: 0x3d8d, 0x6b9: 0x3d94, 0x6ba: 0xa000, 0x6bb: 0xa000, + 0x6bc: 0xa000, 0x6bd: 0xa000, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x3d9b, 0x6c1: 0x3da2, 0x6c2: 0xa000, 0x6c3: 0xa000, 0x6c4: 0x3db7, 0x6c5: 0x3dbe, + 0x6c6: 0xa000, 0x6c7: 0xa000, 0x6c8: 0x3dc5, 0x6c9: 0x3dcc, + 0x6d1: 0xa000, + 0x6d2: 0xa000, + 0x6e2: 0xa000, + 0x6e8: 0xa000, 0x6e9: 0xa000, + 0x6eb: 0xa000, 0x6ec: 0x3de1, 0x6ed: 0x3de8, 0x6ee: 0x3def, 0x6ef: 0x3df6, + 0x6f2: 0xa000, 0x6f3: 0xa000, 0x6f4: 0xa000, 0x6f5: 0xa000, + // Block 0x1c, offset 0x700 + 0x706: 0xa000, 0x70b: 0xa000, + 0x70c: 0x4049, 0x70d: 0xa000, 0x70e: 0x4051, 0x70f: 0xa000, 0x710: 0x4059, 0x711: 0xa000, + 0x712: 0x4061, 0x713: 0xa000, 0x714: 0x4069, 0x715: 0xa000, 0x716: 0x4071, 0x717: 0xa000, + 0x718: 0x4079, 0x719: 0xa000, 0x71a: 0x4081, 0x71b: 0xa000, 0x71c: 0x4089, 0x71d: 0xa000, + 0x71e: 0x4091, 0x71f: 0xa000, 0x720: 0x4099, 0x721: 0xa000, 0x722: 0x40a1, + 0x724: 0xa000, 0x725: 0x40a9, 0x726: 0xa000, 0x727: 0x40b1, 0x728: 0xa000, 0x729: 0x40b9, + 0x72f: 0xa000, + 0x730: 0x40c1, 0x731: 0x40c9, 0x732: 0xa000, 0x733: 0x40d1, 0x734: 0x40d9, 0x735: 0xa000, + 0x736: 0x40e1, 0x737: 0x40e9, 0x738: 0xa000, 0x739: 0x40f1, 0x73a: 0x40f9, 0x73b: 0xa000, + 0x73c: 0x4101, 0x73d: 0x4109, + // Block 0x1d, offset 0x740 + 0x754: 0x4041, + 0x759: 0x9904, 0x75a: 0x9904, 0x75b: 0x8100, 0x75c: 0x8100, 0x75d: 0xa000, + 0x75e: 0x4111, + 0x766: 0xa000, + 0x76b: 0xa000, 0x76c: 0x4121, 0x76d: 0xa000, 0x76e: 0x4129, 0x76f: 0xa000, + 0x770: 0x4131, 0x771: 0xa000, 0x772: 0x4139, 0x773: 0xa000, 0x774: 0x4141, 0x775: 0xa000, + 0x776: 0x4149, 0x777: 0xa000, 0x778: 0x4151, 0x779: 0xa000, 0x77a: 0x4159, 0x77b: 0xa000, + 0x77c: 0x4161, 0x77d: 0xa000, 0x77e: 0x4169, 0x77f: 0xa000, + // Block 0x1e, offset 0x780 + 0x780: 0x4171, 0x781: 0xa000, 0x782: 0x4179, 0x784: 0xa000, 0x785: 0x4181, + 0x786: 0xa000, 0x787: 0x4189, 0x788: 0xa000, 0x789: 0x4191, + 0x78f: 0xa000, 0x790: 0x4199, 0x791: 0x41a1, + 0x792: 0xa000, 0x793: 0x41a9, 0x794: 0x41b1, 0x795: 0xa000, 0x796: 0x41b9, 0x797: 0x41c1, + 0x798: 0xa000, 0x799: 0x41c9, 0x79a: 0x41d1, 0x79b: 0xa000, 0x79c: 0x41d9, 0x79d: 0x41e1, + 0x7af: 0xa000, + 0x7b0: 0xa000, 0x7b1: 0xa000, 0x7b2: 0xa000, 0x7b4: 0x4119, + 0x7b7: 0x41e9, 0x7b8: 0x41f1, 0x7b9: 0x41f9, 0x7ba: 0x4201, + 0x7bd: 0xa000, 0x7be: 0x4209, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x1472, 0x7c1: 0x0df6, 0x7c2: 0x14ce, 0x7c3: 0x149a, 0x7c4: 0x0f52, 0x7c5: 0x07e6, + 0x7c6: 0x09da, 0x7c7: 0x1726, 0x7c8: 0x1726, 0x7c9: 0x0b06, 0x7ca: 0x155a, 0x7cb: 0x0a3e, + 0x7cc: 0x0b02, 0x7cd: 0x0cea, 0x7ce: 0x10ca, 0x7cf: 0x125a, 0x7d0: 0x1392, 0x7d1: 0x13ce, + 0x7d2: 0x1402, 0x7d3: 0x1516, 0x7d4: 0x0e6e, 0x7d5: 0x0efa, 0x7d6: 0x0fa6, 0x7d7: 0x103e, + 0x7d8: 0x135a, 0x7d9: 0x1542, 0x7da: 0x166e, 0x7db: 0x080a, 0x7dc: 0x09ae, 0x7dd: 0x0e82, + 0x7de: 0x0fca, 0x7df: 0x138e, 0x7e0: 0x16be, 0x7e1: 0x0bae, 0x7e2: 0x0f72, 0x7e3: 0x137e, + 0x7e4: 0x1412, 0x7e5: 0x0d1e, 0x7e6: 0x12b6, 0x7e7: 0x13da, 0x7e8: 0x0c1a, 0x7e9: 0x0e0a, + 0x7ea: 0x0f12, 0x7eb: 0x1016, 0x7ec: 0x1522, 0x7ed: 0x084a, 0x7ee: 0x08e2, 0x7ef: 0x094e, + 0x7f0: 0x0d86, 0x7f1: 0x0e7a, 0x7f2: 0x0fc6, 0x7f3: 0x10ea, 0x7f4: 0x1272, 0x7f5: 0x1386, + 0x7f6: 0x139e, 0x7f7: 0x14c2, 0x7f8: 0x15ea, 0x7f9: 0x169e, 0x7fa: 0x16ba, 0x7fb: 0x1126, + 0x7fc: 0x1166, 0x7fd: 0x121e, 0x7fe: 0x133e, 0x7ff: 0x1576, + // Block 0x20, offset 0x800 + 0x800: 0x16c6, 0x801: 0x1446, 0x802: 0x0ac2, 0x803: 0x0c36, 0x804: 0x11d6, 0x805: 0x1296, + 0x806: 0x0ffa, 0x807: 0x112e, 0x808: 0x1492, 0x809: 0x15e2, 0x80a: 0x0abe, 0x80b: 0x0b8a, + 0x80c: 0x0e72, 0x80d: 0x0f26, 0x80e: 0x0f5a, 0x80f: 0x120e, 0x810: 0x1236, 0x811: 0x15a2, + 0x812: 0x094a, 0x813: 0x12a2, 0x814: 0x08ee, 0x815: 0x08ea, 0x816: 0x1192, 0x817: 0x1222, + 0x818: 0x1356, 0x819: 0x15aa, 0x81a: 0x1462, 0x81b: 0x0d22, 0x81c: 0x0e6e, 0x81d: 0x1452, + 0x81e: 0x07f2, 0x81f: 0x0b5e, 0x820: 0x0c8e, 0x821: 0x102a, 0x822: 0x10aa, 0x823: 0x096e, + 0x824: 0x1136, 0x825: 0x085a, 0x826: 0x0c72, 0x827: 0x07d2, 0x828: 0x0ee6, 0x829: 0x0d9e, + 0x82a: 0x120a, 0x82b: 0x09c2, 0x82c: 0x0aae, 0x82d: 0x10f6, 0x82e: 0x135e, 0x82f: 0x1436, + 0x830: 0x0eb2, 0x831: 0x14f2, 0x832: 0x0ede, 0x833: 0x0d32, 0x834: 0x1316, 0x835: 0x0d52, + 0x836: 0x10a6, 0x837: 0x0826, 0x838: 0x08a2, 0x839: 0x08e6, 0x83a: 0x0e4e, 0x83b: 0x11f6, + 0x83c: 0x12ee, 0x83d: 0x1442, 0x83e: 0x1556, 0x83f: 0x0956, + // Block 0x21, offset 0x840 + 0x840: 0x0a0a, 0x841: 0x0b12, 0x842: 0x0c2a, 0x843: 0x0dba, 0x844: 0x0f76, 0x845: 0x113a, + 0x846: 0x1592, 0x847: 0x1676, 0x848: 0x16ca, 0x849: 0x16e2, 0x84a: 0x0932, 0x84b: 0x0dee, + 0x84c: 0x0e9e, 0x84d: 0x14e6, 0x84e: 0x0bf6, 0x84f: 0x0cd2, 0x850: 0x0cee, 0x851: 0x0d7e, + 0x852: 0x0f66, 0x853: 0x0fb2, 0x854: 0x1062, 0x855: 0x1186, 0x856: 0x122a, 0x857: 0x128e, + 0x858: 0x14d6, 0x859: 0x1366, 0x85a: 0x14fe, 0x85b: 0x157a, 0x85c: 0x090a, 0x85d: 0x0936, + 0x85e: 0x0a1e, 0x85f: 0x0fa2, 0x860: 0x13ee, 0x861: 0x1436, 0x862: 0x0c16, 0x863: 0x0c86, + 0x864: 0x0d4a, 0x865: 0x0eaa, 0x866: 0x11d2, 0x867: 0x101e, 0x868: 0x0836, 0x869: 0x0a7a, + 0x86a: 0x0b5e, 0x86b: 0x0bc2, 0x86c: 0x0c92, 0x86d: 0x103a, 0x86e: 0x1056, 0x86f: 0x1266, + 0x870: 0x1286, 0x871: 0x155e, 0x872: 0x15de, 0x873: 0x15ee, 0x874: 0x162a, 0x875: 0x084e, + 0x876: 0x117a, 0x877: 0x154a, 0x878: 0x15c6, 0x879: 0x0caa, 0x87a: 0x0812, 0x87b: 0x0872, + 0x87c: 0x0b62, 0x87d: 0x0b82, 0x87e: 0x0daa, 0x87f: 0x0e6e, + // Block 0x22, offset 0x880 + 0x880: 0x0fbe, 0x881: 0x10c6, 0x882: 0x1372, 0x883: 0x1512, 0x884: 0x171e, 0x885: 0x0dde, + 0x886: 0x159e, 0x887: 0x092e, 0x888: 0x0e2a, 0x889: 0x0e36, 0x88a: 0x0f0a, 0x88b: 0x0f42, + 0x88c: 0x1046, 0x88d: 0x10a2, 0x88e: 0x1122, 0x88f: 0x1206, 0x890: 0x1636, 0x891: 0x08aa, + 0x892: 0x0cfe, 0x893: 0x15ae, 0x894: 0x0862, 0x895: 0x0ba6, 0x896: 0x0f2a, 0x897: 0x14da, + 0x898: 0x0c62, 0x899: 0x0cb2, 0x89a: 0x0e3e, 0x89b: 0x102a, 0x89c: 0x15b6, 0x89d: 0x0912, + 0x89e: 0x09fa, 0x89f: 0x0b92, 0x8a0: 0x0dce, 0x8a1: 0x0e1a, 0x8a2: 0x0e5a, 0x8a3: 0x0eee, + 0x8a4: 0x1042, 0x8a5: 0x10b6, 0x8a6: 0x1252, 0x8a7: 0x13f2, 0x8a8: 0x13fe, 0x8a9: 0x1552, + 0x8aa: 0x15d2, 0x8ab: 0x097e, 0x8ac: 0x0f46, 0x8ad: 0x09fe, 0x8ae: 0x0fc2, 0x8af: 0x1066, + 0x8b0: 0x1382, 0x8b1: 0x15ba, 0x8b2: 0x16a6, 0x8b3: 0x16ce, 0x8b4: 0x0e32, 0x8b5: 0x0f22, + 0x8b6: 0x12be, 0x8b7: 0x11b2, 0x8b8: 0x11be, 0x8b9: 0x11e2, 0x8ba: 0x1012, 0x8bb: 0x0f9a, + 0x8bc: 0x145e, 0x8bd: 0x082e, 0x8be: 0x1326, 0x8bf: 0x0916, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x0906, 0x8c1: 0x0c06, 0x8c2: 0x0d26, 0x8c3: 0x11ee, 0x8c4: 0x0b4e, 0x8c5: 0x0efe, + 0x8c6: 0x0dea, 0x8c7: 0x14e2, 0x8c8: 0x13e2, 0x8c9: 0x15a6, 0x8ca: 0x141e, 0x8cb: 0x0c22, + 0x8cc: 0x0882, 0x8cd: 0x0a56, 0x8d0: 0x0aaa, + 0x8d2: 0x0dda, 0x8d5: 0x08f2, 0x8d6: 0x101a, 0x8d7: 0x10de, + 0x8d8: 0x1142, 0x8d9: 0x115e, 0x8da: 0x1162, 0x8db: 0x1176, 0x8dc: 0x15f6, 0x8dd: 0x11e6, + 0x8de: 0x126a, 0x8e0: 0x138a, 0x8e2: 0x144e, + 0x8e5: 0x1502, 0x8e6: 0x152e, + 0x8ea: 0x164a, 0x8eb: 0x164e, 0x8ec: 0x1652, 0x8ed: 0x16b6, 0x8ee: 0x1526, 0x8ef: 0x15c2, + 0x8f0: 0x0852, 0x8f1: 0x0876, 0x8f2: 0x088a, 0x8f3: 0x0946, 0x8f4: 0x0952, 0x8f5: 0x0992, + 0x8f6: 0x0a46, 0x8f7: 0x0a62, 0x8f8: 0x0a6a, 0x8f9: 0x0aa6, 0x8fa: 0x0ab2, 0x8fb: 0x0b8e, + 0x8fc: 0x0b96, 0x8fd: 0x0c9e, 0x8fe: 0x0cc6, 0x8ff: 0x0cce, + // Block 0x24, offset 0x900 + 0x900: 0x0ce6, 0x901: 0x0d92, 0x902: 0x0dc2, 0x903: 0x0de2, 0x904: 0x0e52, 0x905: 0x0f16, + 0x906: 0x0f32, 0x907: 0x0f62, 0x908: 0x0fb6, 0x909: 0x0fd6, 0x90a: 0x104a, 0x90b: 0x112a, + 0x90c: 0x1146, 0x90d: 0x114e, 0x90e: 0x114a, 0x90f: 0x1152, 0x910: 0x1156, 0x911: 0x115a, + 0x912: 0x116e, 0x913: 0x1172, 0x914: 0x1196, 0x915: 0x11aa, 0x916: 0x11c6, 0x917: 0x122a, + 0x918: 0x1232, 0x919: 0x123a, 0x91a: 0x124e, 0x91b: 0x1276, 0x91c: 0x12c6, 0x91d: 0x12fa, + 0x91e: 0x12fa, 0x91f: 0x1362, 0x920: 0x140a, 0x921: 0x1422, 0x922: 0x1456, 0x923: 0x145a, + 0x924: 0x149e, 0x925: 0x14a2, 0x926: 0x14fa, 0x927: 0x1502, 0x928: 0x15d6, 0x929: 0x161a, + 0x92a: 0x1632, 0x92b: 0x0c96, 0x92c: 0x184b, 0x92d: 0x12de, + 0x930: 0x07da, 0x931: 0x08de, 0x932: 0x089e, 0x933: 0x0846, 0x934: 0x0886, 0x935: 0x08b2, + 0x936: 0x0942, 0x937: 0x095e, 0x938: 0x0a46, 0x939: 0x0a32, 0x93a: 0x0a42, 0x93b: 0x0a5e, + 0x93c: 0x0aaa, 0x93d: 0x0aba, 0x93e: 0x0afe, 0x93f: 0x0b0a, + // Block 0x25, offset 0x940 + 0x940: 0x0b26, 0x941: 0x0b36, 0x942: 0x0c1e, 0x943: 0x0c26, 0x944: 0x0c56, 0x945: 0x0c76, + 0x946: 0x0ca6, 0x947: 0x0cbe, 0x948: 0x0cae, 0x949: 0x0cce, 0x94a: 0x0cc2, 0x94b: 0x0ce6, + 0x94c: 0x0d02, 0x94d: 0x0d5a, 0x94e: 0x0d66, 0x94f: 0x0d6e, 0x950: 0x0d96, 0x951: 0x0dda, + 0x952: 0x0e0a, 0x953: 0x0e0e, 0x954: 0x0e22, 0x955: 0x0ea2, 0x956: 0x0eb2, 0x957: 0x0f0a, + 0x958: 0x0f56, 0x959: 0x0f4e, 0x95a: 0x0f62, 0x95b: 0x0f7e, 0x95c: 0x0fb6, 0x95d: 0x110e, + 0x95e: 0x0fda, 0x95f: 0x100e, 0x960: 0x101a, 0x961: 0x105a, 0x962: 0x1076, 0x963: 0x109a, + 0x964: 0x10be, 0x965: 0x10c2, 0x966: 0x10de, 0x967: 0x10e2, 0x968: 0x10f2, 0x969: 0x1106, + 0x96a: 0x1102, 0x96b: 0x1132, 0x96c: 0x11ae, 0x96d: 0x11c6, 0x96e: 0x11de, 0x96f: 0x1216, + 0x970: 0x122a, 0x971: 0x1246, 0x972: 0x1276, 0x973: 0x132a, 0x974: 0x1352, 0x975: 0x13c6, + 0x976: 0x140e, 0x977: 0x141a, 0x978: 0x1422, 0x979: 0x143a, 0x97a: 0x144e, 0x97b: 0x143e, + 0x97c: 0x1456, 0x97d: 0x1452, 0x97e: 0x144a, 0x97f: 0x145a, + // Block 0x26, offset 0x980 + 0x980: 0x1466, 0x981: 0x14a2, 0x982: 0x14de, 0x983: 0x150e, 0x984: 0x1546, 0x985: 0x1566, + 0x986: 0x15b2, 0x987: 0x15d6, 0x988: 0x15f6, 0x989: 0x160a, 0x98a: 0x161a, 0x98b: 0x1626, + 0x98c: 0x1632, 0x98d: 0x1686, 0x98e: 0x1726, 0x98f: 0x17e2, 0x990: 0x17dd, 0x991: 0x180f, + 0x992: 0x0702, 0x993: 0x072a, 0x994: 0x072e, 0x995: 0x1891, 0x996: 0x18be, 0x997: 0x1936, + 0x998: 0x1712, 0x999: 0x1722, + // Block 0x27, offset 0x9c0 + 0x9c0: 0x07f6, 0x9c1: 0x07ee, 0x9c2: 0x07fe, 0x9c3: 0x1774, 0x9c4: 0x0842, 0x9c5: 0x0852, + 0x9c6: 0x0856, 0x9c7: 0x085e, 0x9c8: 0x0866, 0x9c9: 0x086a, 0x9ca: 0x0876, 0x9cb: 0x086e, + 0x9cc: 0x06ae, 0x9cd: 0x1788, 0x9ce: 0x088a, 0x9cf: 0x088e, 0x9d0: 0x0892, 0x9d1: 0x08ae, + 0x9d2: 0x1779, 0x9d3: 0x06b2, 0x9d4: 0x089a, 0x9d5: 0x08ba, 0x9d6: 0x1783, 0x9d7: 0x08ca, + 0x9d8: 0x08d2, 0x9d9: 0x0832, 0x9da: 0x08da, 0x9db: 0x08de, 0x9dc: 0x195e, 0x9dd: 0x08fa, + 0x9de: 0x0902, 0x9df: 0x06ba, 0x9e0: 0x091a, 0x9e1: 0x091e, 0x9e2: 0x0926, 0x9e3: 0x092a, + 0x9e4: 0x06be, 0x9e5: 0x0942, 0x9e6: 0x0946, 0x9e7: 0x0952, 0x9e8: 0x095e, 0x9e9: 0x0962, + 0x9ea: 0x0966, 0x9eb: 0x096e, 0x9ec: 0x098e, 0x9ed: 0x0992, 0x9ee: 0x099a, 0x9ef: 0x09aa, + 0x9f0: 0x09b2, 0x9f1: 0x09b6, 0x9f2: 0x09b6, 0x9f3: 0x09b6, 0x9f4: 0x1797, 0x9f5: 0x0f8e, + 0x9f6: 0x09ca, 0x9f7: 0x09d2, 0x9f8: 0x179c, 0x9f9: 0x09de, 0x9fa: 0x09e6, 0x9fb: 0x09ee, + 0x9fc: 0x0a16, 0x9fd: 0x0a02, 0x9fe: 0x0a0e, 0x9ff: 0x0a12, + // Block 0x28, offset 0xa00 + 0xa00: 0x0a1a, 0xa01: 0x0a22, 0xa02: 0x0a26, 0xa03: 0x0a2e, 0xa04: 0x0a36, 0xa05: 0x0a3a, + 0xa06: 0x0a3a, 0xa07: 0x0a42, 0xa08: 0x0a4a, 0xa09: 0x0a4e, 0xa0a: 0x0a5a, 0xa0b: 0x0a7e, + 0xa0c: 0x0a62, 0xa0d: 0x0a82, 0xa0e: 0x0a66, 0xa0f: 0x0a6e, 0xa10: 0x0906, 0xa11: 0x0aca, + 0xa12: 0x0a92, 0xa13: 0x0a96, 0xa14: 0x0a9a, 0xa15: 0x0a8e, 0xa16: 0x0aa2, 0xa17: 0x0a9e, + 0xa18: 0x0ab6, 0xa19: 0x17a1, 0xa1a: 0x0ad2, 0xa1b: 0x0ad6, 0xa1c: 0x0ade, 0xa1d: 0x0aea, + 0xa1e: 0x0af2, 0xa1f: 0x0b0e, 0xa20: 0x17a6, 0xa21: 0x17ab, 0xa22: 0x0b1a, 0xa23: 0x0b1e, + 0xa24: 0x0b22, 0xa25: 0x0b16, 0xa26: 0x0b2a, 0xa27: 0x06c2, 0xa28: 0x06c6, 0xa29: 0x0b32, + 0xa2a: 0x0b3a, 0xa2b: 0x0b3a, 0xa2c: 0x17b0, 0xa2d: 0x0b56, 0xa2e: 0x0b5a, 0xa2f: 0x0b5e, + 0xa30: 0x0b66, 0xa31: 0x17b5, 0xa32: 0x0b6e, 0xa33: 0x0b72, 0xa34: 0x0c4a, 0xa35: 0x0b7a, + 0xa36: 0x06ca, 0xa37: 0x0b86, 0xa38: 0x0b96, 0xa39: 0x0ba2, 0xa3a: 0x0b9e, 0xa3b: 0x17bf, + 0xa3c: 0x0baa, 0xa3d: 0x17c4, 0xa3e: 0x0bb6, 0xa3f: 0x0bb2, + // Block 0x29, offset 0xa40 + 0xa40: 0x0bba, 0xa41: 0x0bca, 0xa42: 0x0bce, 0xa43: 0x06ce, 0xa44: 0x0bde, 0xa45: 0x0be6, + 0xa46: 0x0bea, 0xa47: 0x0bee, 0xa48: 0x06d2, 0xa49: 0x17c9, 0xa4a: 0x06d6, 0xa4b: 0x0c0a, + 0xa4c: 0x0c0e, 0xa4d: 0x0c12, 0xa4e: 0x0c1a, 0xa4f: 0x1990, 0xa50: 0x0c32, 0xa51: 0x17d3, + 0xa52: 0x17d3, 0xa53: 0x12d2, 0xa54: 0x0c42, 0xa55: 0x0c42, 0xa56: 0x06da, 0xa57: 0x17f6, + 0xa58: 0x18c8, 0xa59: 0x0c52, 0xa5a: 0x0c5a, 0xa5b: 0x06de, 0xa5c: 0x0c6e, 0xa5d: 0x0c7e, + 0xa5e: 0x0c82, 0xa5f: 0x0c8a, 0xa60: 0x0c9a, 0xa61: 0x06e6, 0xa62: 0x06e2, 0xa63: 0x0c9e, + 0xa64: 0x17d8, 0xa65: 0x0ca2, 0xa66: 0x0cb6, 0xa67: 0x0cba, 0xa68: 0x0cbe, 0xa69: 0x0cba, + 0xa6a: 0x0cca, 0xa6b: 0x0cce, 0xa6c: 0x0cde, 0xa6d: 0x0cd6, 0xa6e: 0x0cda, 0xa6f: 0x0ce2, + 0xa70: 0x0ce6, 0xa71: 0x0cea, 0xa72: 0x0cf6, 0xa73: 0x0cfa, 0xa74: 0x0d12, 0xa75: 0x0d1a, + 0xa76: 0x0d2a, 0xa77: 0x0d3e, 0xa78: 0x17e7, 0xa79: 0x0d3a, 0xa7a: 0x0d2e, 0xa7b: 0x0d46, + 0xa7c: 0x0d4e, 0xa7d: 0x0d62, 0xa7e: 0x17ec, 0xa7f: 0x0d6a, + // Block 0x2a, offset 0xa80 + 0xa80: 0x0d5e, 0xa81: 0x0d56, 0xa82: 0x06ea, 0xa83: 0x0d72, 0xa84: 0x0d7a, 0xa85: 0x0d82, + 0xa86: 0x0d76, 0xa87: 0x06ee, 0xa88: 0x0d92, 0xa89: 0x0d9a, 0xa8a: 0x17f1, 0xa8b: 0x0dc6, + 0xa8c: 0x0dfa, 0xa8d: 0x0dd6, 0xa8e: 0x06fa, 0xa8f: 0x0de2, 0xa90: 0x06f6, 0xa91: 0x06f2, + 0xa92: 0x08be, 0xa93: 0x08c2, 0xa94: 0x0dfe, 0xa95: 0x0de6, 0xa96: 0x12a6, 0xa97: 0x075e, + 0xa98: 0x0e0a, 0xa99: 0x0e0e, 0xa9a: 0x0e12, 0xa9b: 0x0e26, 0xa9c: 0x0e1e, 0xa9d: 0x180a, + 0xa9e: 0x06fe, 0xa9f: 0x0e3a, 0xaa0: 0x0e2e, 0xaa1: 0x0e4a, 0xaa2: 0x0e52, 0xaa3: 0x1814, + 0xaa4: 0x0e56, 0xaa5: 0x0e42, 0xaa6: 0x0e5e, 0xaa7: 0x0702, 0xaa8: 0x0e62, 0xaa9: 0x0e66, + 0xaaa: 0x0e6a, 0xaab: 0x0e76, 0xaac: 0x1819, 0xaad: 0x0e7e, 0xaae: 0x0706, 0xaaf: 0x0e8a, + 0xab0: 0x181e, 0xab1: 0x0e8e, 0xab2: 0x070a, 0xab3: 0x0e9a, 0xab4: 0x0ea6, 0xab5: 0x0eb2, + 0xab6: 0x0eb6, 0xab7: 0x1823, 0xab8: 0x17ba, 0xab9: 0x1828, 0xaba: 0x0ed6, 0xabb: 0x182d, + 0xabc: 0x0ee2, 0xabd: 0x0eea, 0xabe: 0x0eda, 0xabf: 0x0ef6, + // Block 0x2b, offset 0xac0 + 0xac0: 0x0f06, 0xac1: 0x0f16, 0xac2: 0x0f0a, 0xac3: 0x0f0e, 0xac4: 0x0f1a, 0xac5: 0x0f1e, + 0xac6: 0x1832, 0xac7: 0x0f02, 0xac8: 0x0f36, 0xac9: 0x0f3a, 0xaca: 0x070e, 0xacb: 0x0f4e, + 0xacc: 0x0f4a, 0xacd: 0x1837, 0xace: 0x0f2e, 0xacf: 0x0f6a, 0xad0: 0x183c, 0xad1: 0x1841, + 0xad2: 0x0f6e, 0xad3: 0x0f82, 0xad4: 0x0f7e, 0xad5: 0x0f7a, 0xad6: 0x0712, 0xad7: 0x0f86, + 0xad8: 0x0f96, 0xad9: 0x0f92, 0xada: 0x0f9e, 0xadb: 0x177e, 0xadc: 0x0fae, 0xadd: 0x1846, + 0xade: 0x0fba, 0xadf: 0x1850, 0xae0: 0x0fce, 0xae1: 0x0fda, 0xae2: 0x0fee, 0xae3: 0x1855, + 0xae4: 0x1002, 0xae5: 0x1006, 0xae6: 0x185a, 0xae7: 0x185f, 0xae8: 0x1022, 0xae9: 0x1032, + 0xaea: 0x0716, 0xaeb: 0x1036, 0xaec: 0x071a, 0xaed: 0x071a, 0xaee: 0x104e, 0xaef: 0x1052, + 0xaf0: 0x105a, 0xaf1: 0x105e, 0xaf2: 0x106a, 0xaf3: 0x071e, 0xaf4: 0x1082, 0xaf5: 0x1864, + 0xaf6: 0x109e, 0xaf7: 0x1869, 0xaf8: 0x10aa, 0xaf9: 0x17ce, 0xafa: 0x10ba, 0xafb: 0x186e, + 0xafc: 0x1873, 0xafd: 0x1878, 0xafe: 0x0722, 0xaff: 0x0726, + // Block 0x2c, offset 0xb00 + 0xb00: 0x10f2, 0xb01: 0x1882, 0xb02: 0x187d, 0xb03: 0x1887, 0xb04: 0x188c, 0xb05: 0x10fa, + 0xb06: 0x10fe, 0xb07: 0x10fe, 0xb08: 0x1106, 0xb09: 0x072e, 0xb0a: 0x110a, 0xb0b: 0x0732, + 0xb0c: 0x0736, 0xb0d: 0x1896, 0xb0e: 0x111e, 0xb0f: 0x1126, 0xb10: 0x1132, 0xb11: 0x073a, + 0xb12: 0x189b, 0xb13: 0x1156, 0xb14: 0x18a0, 0xb15: 0x18a5, 0xb16: 0x1176, 0xb17: 0x118e, + 0xb18: 0x073e, 0xb19: 0x1196, 0xb1a: 0x119a, 0xb1b: 0x119e, 0xb1c: 0x18aa, 0xb1d: 0x18af, + 0xb1e: 0x18af, 0xb1f: 0x11b6, 0xb20: 0x0742, 0xb21: 0x18b4, 0xb22: 0x11ca, 0xb23: 0x11ce, + 0xb24: 0x0746, 0xb25: 0x18b9, 0xb26: 0x11ea, 0xb27: 0x074a, 0xb28: 0x11fa, 0xb29: 0x11f2, + 0xb2a: 0x1202, 0xb2b: 0x18c3, 0xb2c: 0x121a, 0xb2d: 0x074e, 0xb2e: 0x1226, 0xb2f: 0x122e, + 0xb30: 0x123e, 0xb31: 0x0752, 0xb32: 0x18cd, 0xb33: 0x18d2, 0xb34: 0x0756, 0xb35: 0x18d7, + 0xb36: 0x1256, 0xb37: 0x18dc, 0xb38: 0x1262, 0xb39: 0x126e, 0xb3a: 0x1276, 0xb3b: 0x18e1, + 0xb3c: 0x18e6, 0xb3d: 0x128a, 0xb3e: 0x18eb, 0xb3f: 0x1292, + // Block 0x2d, offset 0xb40 + 0xb40: 0x17fb, 0xb41: 0x075a, 0xb42: 0x12aa, 0xb43: 0x12ae, 0xb44: 0x0762, 0xb45: 0x12b2, + 0xb46: 0x0b2e, 0xb47: 0x18f0, 0xb48: 0x18f5, 0xb49: 0x1800, 0xb4a: 0x1805, 0xb4b: 0x12d2, + 0xb4c: 0x12d6, 0xb4d: 0x14ee, 0xb4e: 0x0766, 0xb4f: 0x1302, 0xb50: 0x12fe, 0xb51: 0x1306, + 0xb52: 0x093a, 0xb53: 0x130a, 0xb54: 0x130e, 0xb55: 0x1312, 0xb56: 0x131a, 0xb57: 0x18fa, + 0xb58: 0x1316, 0xb59: 0x131e, 0xb5a: 0x1332, 0xb5b: 0x1336, 0xb5c: 0x1322, 0xb5d: 0x133a, + 0xb5e: 0x134e, 0xb5f: 0x1362, 0xb60: 0x132e, 0xb61: 0x1342, 0xb62: 0x1346, 0xb63: 0x134a, + 0xb64: 0x18ff, 0xb65: 0x1909, 0xb66: 0x1904, 0xb67: 0x076a, 0xb68: 0x136a, 0xb69: 0x136e, + 0xb6a: 0x1376, 0xb6b: 0x191d, 0xb6c: 0x137a, 0xb6d: 0x190e, 0xb6e: 0x076e, 0xb6f: 0x0772, + 0xb70: 0x1913, 0xb71: 0x1918, 0xb72: 0x0776, 0xb73: 0x139a, 0xb74: 0x139e, 0xb75: 0x13a2, + 0xb76: 0x13a6, 0xb77: 0x13b2, 0xb78: 0x13ae, 0xb79: 0x13ba, 0xb7a: 0x13b6, 0xb7b: 0x13c6, + 0xb7c: 0x13be, 0xb7d: 0x13c2, 0xb7e: 0x13ca, 0xb7f: 0x077a, + // Block 0x2e, offset 0xb80 + 0xb80: 0x13d2, 0xb81: 0x13d6, 0xb82: 0x077e, 0xb83: 0x13e6, 0xb84: 0x13ea, 0xb85: 0x1922, + 0xb86: 0x13f6, 0xb87: 0x13fa, 0xb88: 0x0782, 0xb89: 0x1406, 0xb8a: 0x06b6, 0xb8b: 0x1927, + 0xb8c: 0x192c, 0xb8d: 0x0786, 0xb8e: 0x078a, 0xb8f: 0x1432, 0xb90: 0x144a, 0xb91: 0x1466, + 0xb92: 0x1476, 0xb93: 0x1931, 0xb94: 0x148a, 0xb95: 0x148e, 0xb96: 0x14a6, 0xb97: 0x14b2, + 0xb98: 0x193b, 0xb99: 0x178d, 0xb9a: 0x14be, 0xb9b: 0x14ba, 0xb9c: 0x14c6, 0xb9d: 0x1792, + 0xb9e: 0x14d2, 0xb9f: 0x14de, 0xba0: 0x1940, 0xba1: 0x1945, 0xba2: 0x151e, 0xba3: 0x152a, + 0xba4: 0x1532, 0xba5: 0x194a, 0xba6: 0x1536, 0xba7: 0x1562, 0xba8: 0x156e, 0xba9: 0x1572, + 0xbaa: 0x156a, 0xbab: 0x157e, 0xbac: 0x1582, 0xbad: 0x194f, 0xbae: 0x158e, 0xbaf: 0x078e, + 0xbb0: 0x1596, 0xbb1: 0x1954, 0xbb2: 0x0792, 0xbb3: 0x15ce, 0xbb4: 0x0bbe, 0xbb5: 0x15e6, + 0xbb6: 0x1959, 0xbb7: 0x1963, 0xbb8: 0x0796, 0xbb9: 0x079a, 0xbba: 0x160e, 0xbbb: 0x1968, + 0xbbc: 0x079e, 0xbbd: 0x196d, 0xbbe: 0x1626, 0xbbf: 0x1626, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x162e, 0xbc1: 0x1972, 0xbc2: 0x1646, 0xbc3: 0x07a2, 0xbc4: 0x1656, 0xbc5: 0x1662, + 0xbc6: 0x166a, 0xbc7: 0x1672, 0xbc8: 0x07a6, 0xbc9: 0x1977, 0xbca: 0x1686, 0xbcb: 0x16a2, + 0xbcc: 0x16ae, 0xbcd: 0x07aa, 0xbce: 0x07ae, 0xbcf: 0x16b2, 0xbd0: 0x197c, 0xbd1: 0x07b2, + 0xbd2: 0x1981, 0xbd3: 0x1986, 0xbd4: 0x198b, 0xbd5: 0x16d6, 0xbd6: 0x07b6, 0xbd7: 0x16ea, + 0xbd8: 0x16f2, 0xbd9: 0x16f6, 0xbda: 0x16fe, 0xbdb: 0x1706, 0xbdc: 0x170e, 0xbdd: 0x1995, +} + +// nfcIndex: 22 blocks, 1408 entries, 1408 bytes +// Block 0 is the zero block. +var nfcIndex = [1408]uint8{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x2e, 0xc3: 0x01, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x2f, 0xc7: 0x04, + 0xc8: 0x05, 0xca: 0x30, 0xcb: 0x31, 0xcc: 0x06, 0xcd: 0x07, 0xce: 0x08, 0xcf: 0x32, + 0xd0: 0x09, 0xd1: 0x33, 0xd2: 0x34, 0xd3: 0x0a, 0xd6: 0x0b, 0xd7: 0x35, + 0xd8: 0x36, 0xd9: 0x0c, 0xdb: 0x37, 0xdc: 0x38, 0xdd: 0x39, 0xdf: 0x3a, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, + 0xea: 0x06, 0xeb: 0x07, 0xec: 0x08, 0xed: 0x09, 0xef: 0x0a, + 0xf0: 0x13, + // Block 0x4, offset 0x100 + 0x120: 0x3b, 0x121: 0x3c, 0x122: 0x3d, 0x123: 0x0d, 0x124: 0x3e, 0x125: 0x3f, 0x126: 0x40, 0x127: 0x41, + 0x128: 0x42, 0x129: 0x43, 0x12a: 0x44, 0x12b: 0x45, 0x12c: 0x40, 0x12d: 0x46, 0x12e: 0x47, 0x12f: 0x48, + 0x130: 0x44, 0x131: 0x49, 0x132: 0x4a, 0x133: 0x4b, 0x134: 0x4c, 0x135: 0x4d, 0x137: 0x4e, + 0x138: 0x4f, 0x139: 0x50, 0x13a: 0x51, 0x13b: 0x52, 0x13c: 0x53, 0x13d: 0x54, 0x13e: 0x55, 0x13f: 0x56, + // Block 0x5, offset 0x140 + 0x140: 0x57, 0x142: 0x58, 0x144: 0x59, 0x145: 0x5a, 0x146: 0x5b, 0x147: 0x5c, + 0x14d: 0x5d, + 0x15c: 0x5e, 0x15f: 0x5f, + 0x162: 0x60, 0x164: 0x61, + 0x168: 0x62, 0x169: 0x63, 0x16a: 0x64, 0x16b: 0x65, 0x16c: 0x0e, 0x16d: 0x66, 0x16e: 0x67, 0x16f: 0x68, + 0x170: 0x69, 0x173: 0x6a, 0x177: 0x0f, + 0x178: 0x10, 0x179: 0x11, 0x17a: 0x12, 0x17b: 0x13, 0x17c: 0x14, 0x17d: 0x15, 0x17e: 0x16, 0x17f: 0x17, + // Block 0x6, offset 0x180 + 0x180: 0x6b, 0x183: 0x6c, 0x184: 0x6d, 0x186: 0x6e, 0x187: 0x6f, + 0x188: 0x70, 0x189: 0x18, 0x18a: 0x19, 0x18b: 0x71, 0x18c: 0x72, + 0x1ab: 0x73, + 0x1b3: 0x74, 0x1b5: 0x75, 0x1b7: 0x76, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x77, 0x1c1: 0x1a, 0x1c2: 0x1b, 0x1c3: 0x1c, 0x1c4: 0x78, 0x1c5: 0x79, + 0x1c9: 0x7a, 0x1cc: 0x7b, 0x1cd: 0x7c, + // Block 0x8, offset 0x200 + 0x219: 0x7d, 0x21a: 0x7e, 0x21b: 0x7f, + 0x220: 0x80, 0x223: 0x81, 0x224: 0x82, 0x225: 0x83, 0x226: 0x84, 0x227: 0x85, + 0x22a: 0x86, 0x22b: 0x87, 0x22f: 0x88, + 0x230: 0x89, 0x231: 0x8a, 0x232: 0x8b, 0x233: 0x8c, 0x234: 0x8d, 0x235: 0x8e, 0x236: 0x8f, 0x237: 0x89, + 0x238: 0x8a, 0x239: 0x8b, 0x23a: 0x8c, 0x23b: 0x8d, 0x23c: 0x8e, 0x23d: 0x8f, 0x23e: 0x89, 0x23f: 0x8a, + // Block 0x9, offset 0x240 + 0x240: 0x8b, 0x241: 0x8c, 0x242: 0x8d, 0x243: 0x8e, 0x244: 0x8f, 0x245: 0x89, 0x246: 0x8a, 0x247: 0x8b, + 0x248: 0x8c, 0x249: 0x8d, 0x24a: 0x8e, 0x24b: 0x8f, 0x24c: 0x89, 0x24d: 0x8a, 0x24e: 0x8b, 0x24f: 0x8c, + 0x250: 0x8d, 0x251: 0x8e, 0x252: 0x8f, 0x253: 0x89, 0x254: 0x8a, 0x255: 0x8b, 0x256: 0x8c, 0x257: 0x8d, + 0x258: 0x8e, 0x259: 0x8f, 0x25a: 0x89, 0x25b: 0x8a, 0x25c: 0x8b, 0x25d: 0x8c, 0x25e: 0x8d, 0x25f: 0x8e, + 0x260: 0x8f, 0x261: 0x89, 0x262: 0x8a, 0x263: 0x8b, 0x264: 0x8c, 0x265: 0x8d, 0x266: 0x8e, 0x267: 0x8f, + 0x268: 0x89, 0x269: 0x8a, 0x26a: 0x8b, 0x26b: 0x8c, 0x26c: 0x8d, 0x26d: 0x8e, 0x26e: 0x8f, 0x26f: 0x89, + 0x270: 0x8a, 0x271: 0x8b, 0x272: 0x8c, 0x273: 0x8d, 0x274: 0x8e, 0x275: 0x8f, 0x276: 0x89, 0x277: 0x8a, + 0x278: 0x8b, 0x279: 0x8c, 0x27a: 0x8d, 0x27b: 0x8e, 0x27c: 0x8f, 0x27d: 0x89, 0x27e: 0x8a, 0x27f: 0x8b, + // Block 0xa, offset 0x280 + 0x280: 0x8c, 0x281: 0x8d, 0x282: 0x8e, 0x283: 0x8f, 0x284: 0x89, 0x285: 0x8a, 0x286: 0x8b, 0x287: 0x8c, + 0x288: 0x8d, 0x289: 0x8e, 0x28a: 0x8f, 0x28b: 0x89, 0x28c: 0x8a, 0x28d: 0x8b, 0x28e: 0x8c, 0x28f: 0x8d, + 0x290: 0x8e, 0x291: 0x8f, 0x292: 0x89, 0x293: 0x8a, 0x294: 0x8b, 0x295: 0x8c, 0x296: 0x8d, 0x297: 0x8e, + 0x298: 0x8f, 0x299: 0x89, 0x29a: 0x8a, 0x29b: 0x8b, 0x29c: 0x8c, 0x29d: 0x8d, 0x29e: 0x8e, 0x29f: 0x8f, + 0x2a0: 0x89, 0x2a1: 0x8a, 0x2a2: 0x8b, 0x2a3: 0x8c, 0x2a4: 0x8d, 0x2a5: 0x8e, 0x2a6: 0x8f, 0x2a7: 0x89, + 0x2a8: 0x8a, 0x2a9: 0x8b, 0x2aa: 0x8c, 0x2ab: 0x8d, 0x2ac: 0x8e, 0x2ad: 0x8f, 0x2ae: 0x89, 0x2af: 0x8a, + 0x2b0: 0x8b, 0x2b1: 0x8c, 0x2b2: 0x8d, 0x2b3: 0x8e, 0x2b4: 0x8f, 0x2b5: 0x89, 0x2b6: 0x8a, 0x2b7: 0x8b, + 0x2b8: 0x8c, 0x2b9: 0x8d, 0x2ba: 0x8e, 0x2bb: 0x8f, 0x2bc: 0x89, 0x2bd: 0x8a, 0x2be: 0x8b, 0x2bf: 0x8c, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x8d, 0x2c1: 0x8e, 0x2c2: 0x8f, 0x2c3: 0x89, 0x2c4: 0x8a, 0x2c5: 0x8b, 0x2c6: 0x8c, 0x2c7: 0x8d, + 0x2c8: 0x8e, 0x2c9: 0x8f, 0x2ca: 0x89, 0x2cb: 0x8a, 0x2cc: 0x8b, 0x2cd: 0x8c, 0x2ce: 0x8d, 0x2cf: 0x8e, + 0x2d0: 0x8f, 0x2d1: 0x89, 0x2d2: 0x8a, 0x2d3: 0x8b, 0x2d4: 0x8c, 0x2d5: 0x8d, 0x2d6: 0x8e, 0x2d7: 0x8f, + 0x2d8: 0x89, 0x2d9: 0x8a, 0x2da: 0x8b, 0x2db: 0x8c, 0x2dc: 0x8d, 0x2dd: 0x8e, 0x2de: 0x90, + // Block 0xc, offset 0x300 + 0x324: 0x1d, 0x325: 0x1e, 0x326: 0x1f, 0x327: 0x20, + 0x328: 0x21, 0x329: 0x22, 0x32a: 0x23, 0x32b: 0x24, 0x32c: 0x91, 0x32d: 0x92, 0x32e: 0x93, + 0x331: 0x94, 0x332: 0x95, 0x333: 0x96, 0x334: 0x97, + 0x338: 0x98, 0x339: 0x99, 0x33a: 0x9a, 0x33b: 0x9b, 0x33e: 0x9c, 0x33f: 0x9d, + // Block 0xd, offset 0x340 + 0x347: 0x9e, + 0x34b: 0x9f, 0x34d: 0xa0, + 0x368: 0xa1, 0x36b: 0xa2, + 0x374: 0xa3, + 0x37a: 0xa4, 0x37b: 0xa5, 0x37d: 0xa6, 0x37e: 0xa7, + // Block 0xe, offset 0x380 + 0x381: 0xa8, 0x382: 0xa9, 0x384: 0xaa, 0x385: 0x84, 0x387: 0xab, + 0x388: 0xac, 0x38b: 0xad, 0x38c: 0xae, 0x38d: 0xaf, + 0x391: 0xb0, 0x392: 0xb1, 0x393: 0xb2, 0x396: 0xb3, 0x397: 0xb4, + 0x398: 0x75, 0x39a: 0xb5, 0x39c: 0xb6, + 0x3a0: 0xb7, 0x3a4: 0xb8, 0x3a5: 0xb9, 0x3a7: 0xba, + 0x3a8: 0xbb, 0x3a9: 0xbc, 0x3aa: 0xbd, + 0x3b0: 0x75, 0x3b5: 0xbe, 0x3b6: 0xbf, + 0x3bd: 0xc0, + // Block 0xf, offset 0x3c0 + 0x3eb: 0xc1, 0x3ec: 0xc2, + 0x3ff: 0xc3, + // Block 0x10, offset 0x400 + 0x432: 0xc4, + // Block 0x11, offset 0x440 + 0x445: 0xc5, 0x446: 0xc6, 0x447: 0xc7, + 0x449: 0xc8, + // Block 0x12, offset 0x480 + 0x480: 0xc9, 0x482: 0xca, 0x484: 0xc2, + 0x48a: 0xcb, 0x48b: 0xcc, + 0x493: 0xcd, + 0x4a3: 0xce, 0x4a5: 0xcf, + // Block 0x13, offset 0x4c0 + 0x4c8: 0xd0, + // Block 0x14, offset 0x500 + 0x520: 0x25, 0x521: 0x26, 0x522: 0x27, 0x523: 0x28, 0x524: 0x29, 0x525: 0x2a, 0x526: 0x2b, 0x527: 0x2c, + 0x528: 0x2d, + // Block 0x15, offset 0x540 + 0x550: 0x0b, 0x551: 0x0c, 0x556: 0x0d, + 0x55b: 0x0e, 0x55d: 0x0f, 0x55e: 0x10, 0x55f: 0x11, + 0x56f: 0x12, +} + +// nfcSparseOffset: 163 entries, 326 bytes +var nfcSparseOffset = []uint16{0x0, 0x5, 0x9, 0xb, 0xd, 0x18, 0x28, 0x2a, 0x2f, 0x3a, 0x49, 0x56, 0x5e, 0x63, 0x68, 0x6a, 0x6e, 0x76, 0x7d, 0x80, 0x88, 0x8c, 0x90, 0x92, 0x94, 0x9d, 0xa1, 0xa8, 0xad, 0xb0, 0xba, 0xbd, 0xc4, 0xcc, 0xcf, 0xd1, 0xd4, 0xd6, 0xdb, 0xec, 0xf8, 0xfa, 0x100, 0x102, 0x104, 0x106, 0x108, 0x10a, 0x10c, 0x10f, 0x112, 0x114, 0x117, 0x11a, 0x11e, 0x124, 0x12b, 0x134, 0x136, 0x139, 0x13b, 0x146, 0x14a, 0x158, 0x15b, 0x161, 0x167, 0x172, 0x176, 0x178, 0x17a, 0x17c, 0x17e, 0x180, 0x186, 0x18a, 0x18c, 0x18e, 0x196, 0x19a, 0x19d, 0x19f, 0x1a1, 0x1a4, 0x1a7, 0x1a9, 0x1ab, 0x1ad, 0x1af, 0x1b5, 0x1b8, 0x1ba, 0x1c1, 0x1c7, 0x1cd, 0x1d5, 0x1db, 0x1e1, 0x1e7, 0x1eb, 0x1f9, 0x202, 0x205, 0x208, 0x20a, 0x20d, 0x20f, 0x213, 0x218, 0x21a, 0x21c, 0x221, 0x227, 0x229, 0x22b, 0x22d, 0x233, 0x236, 0x238, 0x23a, 0x23c, 0x242, 0x246, 0x24a, 0x252, 0x259, 0x25c, 0x25f, 0x261, 0x264, 0x26c, 0x270, 0x277, 0x27a, 0x280, 0x282, 0x285, 0x287, 0x28a, 0x28f, 0x291, 0x293, 0x295, 0x297, 0x299, 0x29c, 0x29e, 0x2a0, 0x2a2, 0x2a4, 0x2a6, 0x2a8, 0x2b5, 0x2bf, 0x2c1, 0x2c3, 0x2c9, 0x2cb, 0x2cd, 0x2cf, 0x2d3, 0x2d5, 0x2d8} + +// nfcSparseValues: 730 entries, 2920 bytes +var nfcSparseValues = [730]valueRange{ + // Block 0x0, offset 0x0 + {value: 0x0000, lo: 0x04}, + {value: 0xa100, lo: 0xa8, hi: 0xa8}, + {value: 0x8100, lo: 0xaf, hi: 0xaf}, + {value: 0x8100, lo: 0xb4, hi: 0xb4}, + {value: 0x8100, lo: 0xb8, hi: 0xb8}, + // Block 0x1, offset 0x5 + {value: 0x0091, lo: 0x03}, + {value: 0x4823, lo: 0xa0, hi: 0xa1}, + {value: 0x4855, lo: 0xaf, hi: 0xb0}, + {value: 0xa000, lo: 0xb7, hi: 0xb7}, + // Block 0x2, offset 0x9 + {value: 0x0000, lo: 0x01}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + // Block 0x3, offset 0xb + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0x98, hi: 0x9d}, + // Block 0x4, offset 0xd + {value: 0x0006, lo: 0x0a}, + {value: 0xa000, lo: 0x81, hi: 0x81}, + {value: 0xa000, lo: 0x85, hi: 0x85}, + {value: 0xa000, lo: 0x89, hi: 0x89}, + {value: 0x4981, lo: 0x8a, hi: 0x8a}, + {value: 0x499f, lo: 0x8b, hi: 0x8b}, + {value: 0x3808, lo: 0x8c, hi: 0x8c}, + {value: 0x3820, lo: 0x8d, hi: 0x8d}, + {value: 0x49b7, lo: 0x8e, hi: 0x8e}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x383e, lo: 0x93, hi: 0x94}, + // Block 0x5, offset 0x18 + {value: 0x0000, lo: 0x0f}, + {value: 0xa000, lo: 0x83, hi: 0x83}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0xa000, lo: 0x8b, hi: 0x8b}, + {value: 0xa000, lo: 0x8d, hi: 0x8d}, + {value: 0x38e6, lo: 0x90, hi: 0x90}, + {value: 0x38f2, lo: 0x91, hi: 0x91}, + {value: 0x38e0, lo: 0x93, hi: 0x93}, + {value: 0xa000, lo: 0x96, hi: 0x96}, + {value: 0x3958, lo: 0x97, hi: 0x97}, + {value: 0x3922, lo: 0x9c, hi: 0x9c}, + {value: 0x390a, lo: 0x9d, hi: 0x9d}, + {value: 0x3934, lo: 0x9e, hi: 0x9e}, + {value: 0xa000, lo: 0xb4, hi: 0xb5}, + {value: 0x395e, lo: 0xb6, hi: 0xb6}, + {value: 0x3964, lo: 0xb7, hi: 0xb7}, + // Block 0x6, offset 0x28 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0x83, hi: 0x87}, + // Block 0x7, offset 0x2a + {value: 0x0001, lo: 0x04}, + {value: 0x8114, lo: 0x81, hi: 0x82}, + {value: 0x8133, lo: 0x84, hi: 0x84}, + {value: 0x812e, lo: 0x85, hi: 0x85}, + {value: 0x810e, lo: 0x87, hi: 0x87}, + // Block 0x8, offset 0x2f + {value: 0x0000, lo: 0x0a}, + {value: 0x8133, lo: 0x90, hi: 0x97}, + {value: 0x811a, lo: 0x98, hi: 0x98}, + {value: 0x811b, lo: 0x99, hi: 0x99}, + {value: 0x811c, lo: 0x9a, hi: 0x9a}, + {value: 0x3982, lo: 0xa2, hi: 0xa2}, + {value: 0x3988, lo: 0xa3, hi: 0xa3}, + {value: 0x3994, lo: 0xa4, hi: 0xa4}, + {value: 0x398e, lo: 0xa5, hi: 0xa5}, + {value: 0x399a, lo: 0xa6, hi: 0xa6}, + {value: 0xa000, lo: 0xa7, hi: 0xa7}, + // Block 0x9, offset 0x3a + {value: 0x0000, lo: 0x0e}, + {value: 0x39ac, lo: 0x80, hi: 0x80}, + {value: 0xa000, lo: 0x81, hi: 0x81}, + {value: 0x39a0, lo: 0x82, hi: 0x82}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x39a6, lo: 0x93, hi: 0x93}, + {value: 0xa000, lo: 0x95, hi: 0x95}, + {value: 0x8133, lo: 0x96, hi: 0x9c}, + {value: 0x8133, lo: 0x9f, hi: 0xa2}, + {value: 0x812e, lo: 0xa3, hi: 0xa3}, + {value: 0x8133, lo: 0xa4, hi: 0xa4}, + {value: 0x8133, lo: 0xa7, hi: 0xa8}, + {value: 0x812e, lo: 0xaa, hi: 0xaa}, + {value: 0x8133, lo: 0xab, hi: 0xac}, + {value: 0x812e, lo: 0xad, hi: 0xad}, + // Block 0xa, offset 0x49 + {value: 0x0000, lo: 0x0c}, + {value: 0x8120, lo: 0x91, hi: 0x91}, + {value: 0x8133, lo: 0xb0, hi: 0xb0}, + {value: 0x812e, lo: 0xb1, hi: 0xb1}, + {value: 0x8133, lo: 0xb2, hi: 0xb3}, + {value: 0x812e, lo: 0xb4, hi: 0xb4}, + {value: 0x8133, lo: 0xb5, hi: 0xb6}, + {value: 0x812e, lo: 0xb7, hi: 0xb9}, + {value: 0x8133, lo: 0xba, hi: 0xba}, + {value: 0x812e, lo: 0xbb, hi: 0xbc}, + {value: 0x8133, lo: 0xbd, hi: 0xbd}, + {value: 0x812e, lo: 0xbe, hi: 0xbe}, + {value: 0x8133, lo: 0xbf, hi: 0xbf}, + // Block 0xb, offset 0x56 + {value: 0x0005, lo: 0x07}, + {value: 0x8133, lo: 0x80, hi: 0x80}, + {value: 0x8133, lo: 0x81, hi: 0x81}, + {value: 0x812e, lo: 0x82, hi: 0x83}, + {value: 0x812e, lo: 0x84, hi: 0x85}, + {value: 0x812e, lo: 0x86, hi: 0x87}, + {value: 0x812e, lo: 0x88, hi: 0x89}, + {value: 0x8133, lo: 0x8a, hi: 0x8a}, + // Block 0xc, offset 0x5e + {value: 0x0000, lo: 0x04}, + {value: 0x8133, lo: 0xab, hi: 0xb1}, + {value: 0x812e, lo: 0xb2, hi: 0xb2}, + {value: 0x8133, lo: 0xb3, hi: 0xb3}, + {value: 0x812e, lo: 0xbd, hi: 0xbd}, + // Block 0xd, offset 0x63 + {value: 0x0000, lo: 0x04}, + {value: 0x8133, lo: 0x96, hi: 0x99}, + {value: 0x8133, lo: 0x9b, hi: 0xa3}, + {value: 0x8133, lo: 0xa5, hi: 0xa7}, + {value: 0x8133, lo: 0xa9, hi: 0xad}, + // Block 0xe, offset 0x68 + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0x99, hi: 0x9b}, + // Block 0xf, offset 0x6a + {value: 0x0000, lo: 0x03}, + {value: 0x8133, lo: 0x98, hi: 0x98}, + {value: 0x812e, lo: 0x99, hi: 0x9b}, + {value: 0x8133, lo: 0x9c, hi: 0x9f}, + // Block 0x10, offset 0x6e + {value: 0x0000, lo: 0x07}, + {value: 0xa000, lo: 0xa8, hi: 0xa8}, + {value: 0x4019, lo: 0xa9, hi: 0xa9}, + {value: 0xa000, lo: 0xb0, hi: 0xb0}, + {value: 0x4021, lo: 0xb1, hi: 0xb1}, + {value: 0xa000, lo: 0xb3, hi: 0xb3}, + {value: 0x4029, lo: 0xb4, hi: 0xb4}, + {value: 0x9903, lo: 0xbc, hi: 0xbc}, + // Block 0x11, offset 0x76 + {value: 0x0008, lo: 0x06}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x8133, lo: 0x91, hi: 0x91}, + {value: 0x812e, lo: 0x92, hi: 0x92}, + {value: 0x8133, lo: 0x93, hi: 0x93}, + {value: 0x8133, lo: 0x94, hi: 0x94}, + {value: 0x465d, lo: 0x98, hi: 0x9f}, + // Block 0x12, offset 0x7d + {value: 0x0000, lo: 0x02}, + {value: 0x8103, lo: 0xbc, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x13, offset 0x80 + {value: 0x0008, lo: 0x07}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2dd5, lo: 0x8b, hi: 0x8c}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + {value: 0x469d, lo: 0x9c, hi: 0x9d}, + {value: 0x46ad, lo: 0x9f, hi: 0x9f}, + {value: 0x8133, lo: 0xbe, hi: 0xbe}, + // Block 0x14, offset 0x88 + {value: 0x0000, lo: 0x03}, + {value: 0x46d5, lo: 0xb3, hi: 0xb3}, + {value: 0x46dd, lo: 0xb6, hi: 0xb6}, + {value: 0x8103, lo: 0xbc, hi: 0xbc}, + // Block 0x15, offset 0x8c + {value: 0x0008, lo: 0x03}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x46b5, lo: 0x99, hi: 0x9b}, + {value: 0x46cd, lo: 0x9e, hi: 0x9e}, + // Block 0x16, offset 0x90 + {value: 0x0000, lo: 0x01}, + {value: 0x8103, lo: 0xbc, hi: 0xbc}, + // Block 0x17, offset 0x92 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + // Block 0x18, offset 0x94 + {value: 0x0000, lo: 0x08}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2ded, lo: 0x88, hi: 0x88}, + {value: 0x2de5, lo: 0x8b, hi: 0x8b}, + {value: 0x2df5, lo: 0x8c, hi: 0x8c}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x96, hi: 0x97}, + {value: 0x46e5, lo: 0x9c, hi: 0x9c}, + {value: 0x46ed, lo: 0x9d, hi: 0x9d}, + // Block 0x19, offset 0x9d + {value: 0x0000, lo: 0x03}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x2dfd, lo: 0x94, hi: 0x94}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x1a, offset 0xa1 + {value: 0x0000, lo: 0x06}, + {value: 0xa000, lo: 0x86, hi: 0x87}, + {value: 0x2e05, lo: 0x8a, hi: 0x8a}, + {value: 0x2e15, lo: 0x8b, hi: 0x8b}, + {value: 0x2e0d, lo: 0x8c, hi: 0x8c}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + // Block 0x1b, offset 0xa8 + {value: 0x1801, lo: 0x04}, + {value: 0xa000, lo: 0x86, hi: 0x86}, + {value: 0x4031, lo: 0x88, hi: 0x88}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x8121, lo: 0x95, hi: 0x96}, + // Block 0x1c, offset 0xad + {value: 0x0000, lo: 0x02}, + {value: 0x8103, lo: 0xbc, hi: 0xbc}, + {value: 0xa000, lo: 0xbf, hi: 0xbf}, + // Block 0x1d, offset 0xb0 + {value: 0x0000, lo: 0x09}, + {value: 0x2e1d, lo: 0x80, hi: 0x80}, + {value: 0x9900, lo: 0x82, hi: 0x82}, + {value: 0xa000, lo: 0x86, hi: 0x86}, + {value: 0x2e25, lo: 0x87, hi: 0x87}, + {value: 0x2e2d, lo: 0x88, hi: 0x88}, + {value: 0x3091, lo: 0x8a, hi: 0x8a}, + {value: 0x2f19, lo: 0x8b, hi: 0x8b}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x95, hi: 0x96}, + // Block 0x1e, offset 0xba + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0xbb, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x1f, offset 0xbd + {value: 0x0000, lo: 0x06}, + {value: 0xa000, lo: 0x86, hi: 0x87}, + {value: 0x2e35, lo: 0x8a, hi: 0x8a}, + {value: 0x2e45, lo: 0x8b, hi: 0x8b}, + {value: 0x2e3d, lo: 0x8c, hi: 0x8c}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + // Block 0x20, offset 0xc4 + {value: 0x6ab3, lo: 0x07}, + {value: 0x9905, lo: 0x8a, hi: 0x8a}, + {value: 0x9900, lo: 0x8f, hi: 0x8f}, + {value: 0xa000, lo: 0x99, hi: 0x99}, + {value: 0x4039, lo: 0x9a, hi: 0x9a}, + {value: 0x3099, lo: 0x9c, hi: 0x9c}, + {value: 0x2f24, lo: 0x9d, hi: 0x9d}, + {value: 0x2e4d, lo: 0x9e, hi: 0x9f}, + // Block 0x21, offset 0xcc + {value: 0x0000, lo: 0x02}, + {value: 0x8123, lo: 0xb8, hi: 0xb9}, + {value: 0x8105, lo: 0xba, hi: 0xba}, + // Block 0x22, offset 0xcf + {value: 0x0000, lo: 0x01}, + {value: 0x8124, lo: 0x88, hi: 0x8b}, + // Block 0x23, offset 0xd1 + {value: 0x0000, lo: 0x02}, + {value: 0x8125, lo: 0xb8, hi: 0xb9}, + {value: 0x8105, lo: 0xba, hi: 0xba}, + // Block 0x24, offset 0xd4 + {value: 0x0000, lo: 0x01}, + {value: 0x8126, lo: 0x88, hi: 0x8b}, + // Block 0x25, offset 0xd6 + {value: 0x0000, lo: 0x04}, + {value: 0x812e, lo: 0x98, hi: 0x99}, + {value: 0x812e, lo: 0xb5, hi: 0xb5}, + {value: 0x812e, lo: 0xb7, hi: 0xb7}, + {value: 0x812c, lo: 0xb9, hi: 0xb9}, + // Block 0x26, offset 0xdb + {value: 0x0000, lo: 0x10}, + {value: 0x2774, lo: 0x83, hi: 0x83}, + {value: 0x277b, lo: 0x8d, hi: 0x8d}, + {value: 0x2782, lo: 0x92, hi: 0x92}, + {value: 0x2789, lo: 0x97, hi: 0x97}, + {value: 0x2790, lo: 0x9c, hi: 0x9c}, + {value: 0x276d, lo: 0xa9, hi: 0xa9}, + {value: 0x8127, lo: 0xb1, hi: 0xb1}, + {value: 0x8128, lo: 0xb2, hi: 0xb2}, + {value: 0x4bc5, lo: 0xb3, hi: 0xb3}, + {value: 0x8129, lo: 0xb4, hi: 0xb4}, + {value: 0x4bce, lo: 0xb5, hi: 0xb5}, + {value: 0x46f5, lo: 0xb6, hi: 0xb6}, + {value: 0x8200, lo: 0xb7, hi: 0xb7}, + {value: 0x46fd, lo: 0xb8, hi: 0xb8}, + {value: 0x8200, lo: 0xb9, hi: 0xb9}, + {value: 0x8128, lo: 0xba, hi: 0xbd}, + // Block 0x27, offset 0xec + {value: 0x0000, lo: 0x0b}, + {value: 0x8128, lo: 0x80, hi: 0x80}, + {value: 0x4bd7, lo: 0x81, hi: 0x81}, + {value: 0x8133, lo: 0x82, hi: 0x83}, + {value: 0x8105, lo: 0x84, hi: 0x84}, + {value: 0x8133, lo: 0x86, hi: 0x87}, + {value: 0x279e, lo: 0x93, hi: 0x93}, + {value: 0x27a5, lo: 0x9d, hi: 0x9d}, + {value: 0x27ac, lo: 0xa2, hi: 0xa2}, + {value: 0x27b3, lo: 0xa7, hi: 0xa7}, + {value: 0x27ba, lo: 0xac, hi: 0xac}, + {value: 0x2797, lo: 0xb9, hi: 0xb9}, + // Block 0x28, offset 0xf8 + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0x86, hi: 0x86}, + // Block 0x29, offset 0xfa + {value: 0x0000, lo: 0x05}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x2e55, lo: 0xa6, hi: 0xa6}, + {value: 0x9900, lo: 0xae, hi: 0xae}, + {value: 0x8103, lo: 0xb7, hi: 0xb7}, + {value: 0x8105, lo: 0xb9, hi: 0xba}, + // Block 0x2a, offset 0x100 + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0x8d, hi: 0x8d}, + // Block 0x2b, offset 0x102 + {value: 0x0000, lo: 0x01}, + {value: 0xa000, lo: 0x80, hi: 0x92}, + // Block 0x2c, offset 0x104 + {value: 0x0000, lo: 0x01}, + {value: 0xb900, lo: 0xa1, hi: 0xb5}, + // Block 0x2d, offset 0x106 + {value: 0x0000, lo: 0x01}, + {value: 0x9900, lo: 0xa8, hi: 0xbf}, + // Block 0x2e, offset 0x108 + {value: 0x0000, lo: 0x01}, + {value: 0x9900, lo: 0x80, hi: 0x82}, + // Block 0x2f, offset 0x10a + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0x9d, hi: 0x9f}, + // Block 0x30, offset 0x10c + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x94, hi: 0x95}, + {value: 0x8105, lo: 0xb4, hi: 0xb4}, + // Block 0x31, offset 0x10f + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x92, hi: 0x92}, + {value: 0x8133, lo: 0x9d, hi: 0x9d}, + // Block 0x32, offset 0x112 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xa9, hi: 0xa9}, + // Block 0x33, offset 0x114 + {value: 0x0004, lo: 0x02}, + {value: 0x812f, lo: 0xb9, hi: 0xba}, + {value: 0x812e, lo: 0xbb, hi: 0xbb}, + // Block 0x34, offset 0x117 + {value: 0x0000, lo: 0x02}, + {value: 0x8133, lo: 0x97, hi: 0x97}, + {value: 0x812e, lo: 0x98, hi: 0x98}, + // Block 0x35, offset 0x11a + {value: 0x0000, lo: 0x03}, + {value: 0x8105, lo: 0xa0, hi: 0xa0}, + {value: 0x8133, lo: 0xb5, hi: 0xbc}, + {value: 0x812e, lo: 0xbf, hi: 0xbf}, + // Block 0x36, offset 0x11e + {value: 0x0000, lo: 0x05}, + {value: 0x8133, lo: 0xb0, hi: 0xb4}, + {value: 0x812e, lo: 0xb5, hi: 0xba}, + {value: 0x8133, lo: 0xbb, hi: 0xbc}, + {value: 0x812e, lo: 0xbd, hi: 0xbd}, + {value: 0x812e, lo: 0xbf, hi: 0xbf}, + // Block 0x37, offset 0x124 + {value: 0x0000, lo: 0x06}, + {value: 0x812e, lo: 0x80, hi: 0x80}, + {value: 0x8133, lo: 0x81, hi: 0x82}, + {value: 0x812e, lo: 0x83, hi: 0x84}, + {value: 0x8133, lo: 0x85, hi: 0x89}, + {value: 0x812e, lo: 0x8a, hi: 0x8a}, + {value: 0x8133, lo: 0x8b, hi: 0x8e}, + // Block 0x38, offset 0x12b + {value: 0x0000, lo: 0x08}, + {value: 0x2e9d, lo: 0x80, hi: 0x80}, + {value: 0x2ea5, lo: 0x81, hi: 0x81}, + {value: 0xa000, lo: 0x82, hi: 0x82}, + {value: 0x2ead, lo: 0x83, hi: 0x83}, + {value: 0x8105, lo: 0x84, hi: 0x84}, + {value: 0x8133, lo: 0xab, hi: 0xab}, + {value: 0x812e, lo: 0xac, hi: 0xac}, + {value: 0x8133, lo: 0xad, hi: 0xb3}, + // Block 0x39, offset 0x134 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xaa, hi: 0xab}, + // Block 0x3a, offset 0x136 + {value: 0x0000, lo: 0x02}, + {value: 0x8103, lo: 0xa6, hi: 0xa6}, + {value: 0x8105, lo: 0xb2, hi: 0xb3}, + // Block 0x3b, offset 0x139 + {value: 0x0000, lo: 0x01}, + {value: 0x8103, lo: 0xb7, hi: 0xb7}, + // Block 0x3c, offset 0x13b + {value: 0x0000, lo: 0x0a}, + {value: 0x8133, lo: 0x90, hi: 0x92}, + {value: 0x8101, lo: 0x94, hi: 0x94}, + {value: 0x812e, lo: 0x95, hi: 0x99}, + {value: 0x8133, lo: 0x9a, hi: 0x9b}, + {value: 0x812e, lo: 0x9c, hi: 0x9f}, + {value: 0x8133, lo: 0xa0, hi: 0xa0}, + {value: 0x8101, lo: 0xa2, hi: 0xa8}, + {value: 0x812e, lo: 0xad, hi: 0xad}, + {value: 0x8133, lo: 0xb4, hi: 0xb4}, + {value: 0x8133, lo: 0xb8, hi: 0xb9}, + // Block 0x3d, offset 0x146 + {value: 0x0004, lo: 0x03}, + {value: 0x052a, lo: 0x80, hi: 0x81}, + {value: 0x8100, lo: 0x97, hi: 0x97}, + {value: 0x8100, lo: 0xbe, hi: 0xbe}, + // Block 0x3e, offset 0x14a + {value: 0x0000, lo: 0x0d}, + {value: 0x8133, lo: 0x90, hi: 0x91}, + {value: 0x8101, lo: 0x92, hi: 0x93}, + {value: 0x8133, lo: 0x94, hi: 0x97}, + {value: 0x8101, lo: 0x98, hi: 0x9a}, + {value: 0x8133, lo: 0x9b, hi: 0x9c}, + {value: 0x8133, lo: 0xa1, hi: 0xa1}, + {value: 0x8101, lo: 0xa5, hi: 0xa6}, + {value: 0x8133, lo: 0xa7, hi: 0xa7}, + {value: 0x812e, lo: 0xa8, hi: 0xa8}, + {value: 0x8133, lo: 0xa9, hi: 0xa9}, + {value: 0x8101, lo: 0xaa, hi: 0xab}, + {value: 0x812e, lo: 0xac, hi: 0xaf}, + {value: 0x8133, lo: 0xb0, hi: 0xb0}, + // Block 0x3f, offset 0x158 + {value: 0x43bc, lo: 0x02}, + {value: 0x023c, lo: 0xa6, hi: 0xa6}, + {value: 0x0057, lo: 0xaa, hi: 0xab}, + // Block 0x40, offset 0x15b + {value: 0x0007, lo: 0x05}, + {value: 0xa000, lo: 0x90, hi: 0x90}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0xa000, lo: 0x94, hi: 0x94}, + {value: 0x3cfa, lo: 0x9a, hi: 0x9b}, + {value: 0x3d08, lo: 0xae, hi: 0xae}, + // Block 0x41, offset 0x161 + {value: 0x000e, lo: 0x05}, + {value: 0x3d0f, lo: 0x8d, hi: 0x8e}, + {value: 0x3d16, lo: 0x8f, hi: 0x8f}, + {value: 0xa000, lo: 0x90, hi: 0x90}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0xa000, lo: 0x94, hi: 0x94}, + // Block 0x42, offset 0x167 + {value: 0x62c7, lo: 0x0a}, + {value: 0xa000, lo: 0x83, hi: 0x83}, + {value: 0x3d24, lo: 0x84, hi: 0x84}, + {value: 0xa000, lo: 0x88, hi: 0x88}, + {value: 0x3d2b, lo: 0x89, hi: 0x89}, + {value: 0xa000, lo: 0x8b, hi: 0x8b}, + {value: 0x3d32, lo: 0x8c, hi: 0x8c}, + {value: 0xa000, lo: 0xa3, hi: 0xa3}, + {value: 0x3d39, lo: 0xa4, hi: 0xa5}, + {value: 0x3d40, lo: 0xa6, hi: 0xa6}, + {value: 0xa000, lo: 0xbc, hi: 0xbc}, + // Block 0x43, offset 0x172 + {value: 0x0007, lo: 0x03}, + {value: 0x3da9, lo: 0xa0, hi: 0xa1}, + {value: 0x3dd3, lo: 0xa2, hi: 0xa3}, + {value: 0x3dfd, lo: 0xaa, hi: 0xad}, + // Block 0x44, offset 0x176 + {value: 0x0004, lo: 0x01}, + {value: 0x0586, lo: 0xa9, hi: 0xaa}, + // Block 0x45, offset 0x178 + {value: 0x0000, lo: 0x01}, + {value: 0x461e, lo: 0x9c, hi: 0x9c}, + // Block 0x46, offset 0x17a + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xaf, hi: 0xb1}, + // Block 0x47, offset 0x17c + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xbf, hi: 0xbf}, + // Block 0x48, offset 0x17e + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xa0, hi: 0xbf}, + // Block 0x49, offset 0x180 + {value: 0x0000, lo: 0x05}, + {value: 0x812d, lo: 0xaa, hi: 0xaa}, + {value: 0x8132, lo: 0xab, hi: 0xab}, + {value: 0x8134, lo: 0xac, hi: 0xac}, + {value: 0x812f, lo: 0xad, hi: 0xad}, + {value: 0x8130, lo: 0xae, hi: 0xaf}, + // Block 0x4a, offset 0x186 + {value: 0x0000, lo: 0x03}, + {value: 0x4be0, lo: 0xb3, hi: 0xb3}, + {value: 0x4be0, lo: 0xb5, hi: 0xb6}, + {value: 0x4be0, lo: 0xba, hi: 0xbf}, + // Block 0x4b, offset 0x18a + {value: 0x0000, lo: 0x01}, + {value: 0x4be0, lo: 0x8f, hi: 0xa3}, + // Block 0x4c, offset 0x18c + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0xae, hi: 0xbe}, + // Block 0x4d, offset 0x18e + {value: 0x0000, lo: 0x07}, + {value: 0x8100, lo: 0x84, hi: 0x84}, + {value: 0x8100, lo: 0x87, hi: 0x87}, + {value: 0x8100, lo: 0x90, hi: 0x90}, + {value: 0x8100, lo: 0x9e, hi: 0x9e}, + {value: 0x8100, lo: 0xa1, hi: 0xa1}, + {value: 0x8100, lo: 0xb2, hi: 0xb2}, + {value: 0x8100, lo: 0xbb, hi: 0xbb}, + // Block 0x4e, offset 0x196 + {value: 0x0000, lo: 0x03}, + {value: 0x8100, lo: 0x80, hi: 0x80}, + {value: 0x8100, lo: 0x8b, hi: 0x8b}, + {value: 0x8100, lo: 0x8e, hi: 0x8e}, + // Block 0x4f, offset 0x19a + {value: 0x0000, lo: 0x02}, + {value: 0x8133, lo: 0xaf, hi: 0xaf}, + {value: 0x8133, lo: 0xb4, hi: 0xbd}, + // Block 0x50, offset 0x19d + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0x9e, hi: 0x9f}, + // Block 0x51, offset 0x19f + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xb0, hi: 0xb1}, + // Block 0x52, offset 0x1a1 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x86, hi: 0x86}, + {value: 0x8105, lo: 0xac, hi: 0xac}, + // Block 0x53, offset 0x1a4 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x84, hi: 0x84}, + {value: 0x8133, lo: 0xa0, hi: 0xb1}, + // Block 0x54, offset 0x1a7 + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0xab, hi: 0xad}, + // Block 0x55, offset 0x1a9 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x93, hi: 0x93}, + // Block 0x56, offset 0x1ab + {value: 0x0000, lo: 0x01}, + {value: 0x8103, lo: 0xb3, hi: 0xb3}, + // Block 0x57, offset 0x1ad + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x80, hi: 0x80}, + // Block 0x58, offset 0x1af + {value: 0x0000, lo: 0x05}, + {value: 0x8133, lo: 0xb0, hi: 0xb0}, + {value: 0x8133, lo: 0xb2, hi: 0xb3}, + {value: 0x812e, lo: 0xb4, hi: 0xb4}, + {value: 0x8133, lo: 0xb7, hi: 0xb8}, + {value: 0x8133, lo: 0xbe, hi: 0xbf}, + // Block 0x59, offset 0x1b5 + {value: 0x0000, lo: 0x02}, + {value: 0x8133, lo: 0x81, hi: 0x81}, + {value: 0x8105, lo: 0xb6, hi: 0xb6}, + // Block 0x5a, offset 0x1b8 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xad, hi: 0xad}, + // Block 0x5b, offset 0x1ba + {value: 0x0000, lo: 0x06}, + {value: 0xe500, lo: 0x80, hi: 0x80}, + {value: 0xc600, lo: 0x81, hi: 0x9b}, + {value: 0xe500, lo: 0x9c, hi: 0x9c}, + {value: 0xc600, lo: 0x9d, hi: 0xb7}, + {value: 0xe500, lo: 0xb8, hi: 0xb8}, + {value: 0xc600, lo: 0xb9, hi: 0xbf}, + // Block 0x5c, offset 0x1c1 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x93}, + {value: 0xe500, lo: 0x94, hi: 0x94}, + {value: 0xc600, lo: 0x95, hi: 0xaf}, + {value: 0xe500, lo: 0xb0, hi: 0xb0}, + {value: 0xc600, lo: 0xb1, hi: 0xbf}, + // Block 0x5d, offset 0x1c7 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x8b}, + {value: 0xe500, lo: 0x8c, hi: 0x8c}, + {value: 0xc600, lo: 0x8d, hi: 0xa7}, + {value: 0xe500, lo: 0xa8, hi: 0xa8}, + {value: 0xc600, lo: 0xa9, hi: 0xbf}, + // Block 0x5e, offset 0x1cd + {value: 0x0000, lo: 0x07}, + {value: 0xc600, lo: 0x80, hi: 0x83}, + {value: 0xe500, lo: 0x84, hi: 0x84}, + {value: 0xc600, lo: 0x85, hi: 0x9f}, + {value: 0xe500, lo: 0xa0, hi: 0xa0}, + {value: 0xc600, lo: 0xa1, hi: 0xbb}, + {value: 0xe500, lo: 0xbc, hi: 0xbc}, + {value: 0xc600, lo: 0xbd, hi: 0xbf}, + // Block 0x5f, offset 0x1d5 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x97}, + {value: 0xe500, lo: 0x98, hi: 0x98}, + {value: 0xc600, lo: 0x99, hi: 0xb3}, + {value: 0xe500, lo: 0xb4, hi: 0xb4}, + {value: 0xc600, lo: 0xb5, hi: 0xbf}, + // Block 0x60, offset 0x1db + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x8f}, + {value: 0xe500, lo: 0x90, hi: 0x90}, + {value: 0xc600, lo: 0x91, hi: 0xab}, + {value: 0xe500, lo: 0xac, hi: 0xac}, + {value: 0xc600, lo: 0xad, hi: 0xbf}, + // Block 0x61, offset 0x1e1 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x87}, + {value: 0xe500, lo: 0x88, hi: 0x88}, + {value: 0xc600, lo: 0x89, hi: 0xa3}, + {value: 0xe500, lo: 0xa4, hi: 0xa4}, + {value: 0xc600, lo: 0xa5, hi: 0xbf}, + // Block 0x62, offset 0x1e7 + {value: 0x0000, lo: 0x03}, + {value: 0xc600, lo: 0x80, hi: 0x87}, + {value: 0xe500, lo: 0x88, hi: 0x88}, + {value: 0xc600, lo: 0x89, hi: 0xa3}, + // Block 0x63, offset 0x1eb + {value: 0x0006, lo: 0x0d}, + {value: 0x44d1, lo: 0x9d, hi: 0x9d}, + {value: 0x8116, lo: 0x9e, hi: 0x9e}, + {value: 0x4543, lo: 0x9f, hi: 0x9f}, + {value: 0x4531, lo: 0xaa, hi: 0xab}, + {value: 0x4635, lo: 0xac, hi: 0xac}, + {value: 0x463d, lo: 0xad, hi: 0xad}, + {value: 0x4489, lo: 0xae, hi: 0xb1}, + {value: 0x44a7, lo: 0xb2, hi: 0xb4}, + {value: 0x44bf, lo: 0xb5, hi: 0xb6}, + {value: 0x44cb, lo: 0xb8, hi: 0xb8}, + {value: 0x44d7, lo: 0xb9, hi: 0xbb}, + {value: 0x44ef, lo: 0xbc, hi: 0xbc}, + {value: 0x44f5, lo: 0xbe, hi: 0xbe}, + // Block 0x64, offset 0x1f9 + {value: 0x0006, lo: 0x08}, + {value: 0x44fb, lo: 0x80, hi: 0x81}, + {value: 0x4507, lo: 0x83, hi: 0x84}, + {value: 0x4519, lo: 0x86, hi: 0x89}, + {value: 0x453d, lo: 0x8a, hi: 0x8a}, + {value: 0x44b9, lo: 0x8b, hi: 0x8b}, + {value: 0x44a1, lo: 0x8c, hi: 0x8c}, + {value: 0x44e9, lo: 0x8d, hi: 0x8d}, + {value: 0x4513, lo: 0x8e, hi: 0x8e}, + // Block 0x65, offset 0x202 + {value: 0x0000, lo: 0x02}, + {value: 0x8100, lo: 0xa4, hi: 0xa5}, + {value: 0x8100, lo: 0xb0, hi: 0xb1}, + // Block 0x66, offset 0x205 + {value: 0x0000, lo: 0x02}, + {value: 0x8100, lo: 0x9b, hi: 0x9d}, + {value: 0x8200, lo: 0x9e, hi: 0xa3}, + // Block 0x67, offset 0x208 + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0x90, hi: 0x90}, + // Block 0x68, offset 0x20a + {value: 0x0000, lo: 0x02}, + {value: 0x8100, lo: 0x99, hi: 0x99}, + {value: 0x8200, lo: 0xb2, hi: 0xb4}, + // Block 0x69, offset 0x20d + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0xbc, hi: 0xbd}, + // Block 0x6a, offset 0x20f + {value: 0x0000, lo: 0x03}, + {value: 0x8133, lo: 0xa0, hi: 0xa6}, + {value: 0x812e, lo: 0xa7, hi: 0xad}, + {value: 0x8133, lo: 0xae, hi: 0xaf}, + // Block 0x6b, offset 0x213 + {value: 0x0000, lo: 0x04}, + {value: 0x8100, lo: 0x89, hi: 0x8c}, + {value: 0x8100, lo: 0xb0, hi: 0xb2}, + {value: 0x8100, lo: 0xb4, hi: 0xb4}, + {value: 0x8100, lo: 0xb6, hi: 0xbf}, + // Block 0x6c, offset 0x218 + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0x81, hi: 0x8c}, + // Block 0x6d, offset 0x21a + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0xb5, hi: 0xba}, + // Block 0x6e, offset 0x21c + {value: 0x0000, lo: 0x04}, + {value: 0x4be0, lo: 0x9e, hi: 0x9f}, + {value: 0x4be0, lo: 0xa3, hi: 0xa3}, + {value: 0x4be0, lo: 0xa5, hi: 0xa6}, + {value: 0x4be0, lo: 0xaa, hi: 0xaf}, + // Block 0x6f, offset 0x221 + {value: 0x0000, lo: 0x05}, + {value: 0x4be0, lo: 0x82, hi: 0x87}, + {value: 0x4be0, lo: 0x8a, hi: 0x8f}, + {value: 0x4be0, lo: 0x92, hi: 0x97}, + {value: 0x4be0, lo: 0x9a, hi: 0x9c}, + {value: 0x8100, lo: 0xa3, hi: 0xa3}, + // Block 0x70, offset 0x227 + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0xbd, hi: 0xbd}, + // Block 0x71, offset 0x229 + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0xa0, hi: 0xa0}, + // Block 0x72, offset 0x22b + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xb6, hi: 0xba}, + // Block 0x73, offset 0x22d + {value: 0x002d, lo: 0x05}, + {value: 0x812e, lo: 0x8d, hi: 0x8d}, + {value: 0x8133, lo: 0x8f, hi: 0x8f}, + {value: 0x8133, lo: 0xb8, hi: 0xb8}, + {value: 0x8101, lo: 0xb9, hi: 0xba}, + {value: 0x8105, lo: 0xbf, hi: 0xbf}, + // Block 0x74, offset 0x233 + {value: 0x0000, lo: 0x02}, + {value: 0x8133, lo: 0xa5, hi: 0xa5}, + {value: 0x812e, lo: 0xa6, hi: 0xa6}, + // Block 0x75, offset 0x236 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xa4, hi: 0xa7}, + // Block 0x76, offset 0x238 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xab, hi: 0xac}, + // Block 0x77, offset 0x23a + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0xbd, hi: 0xbf}, + // Block 0x78, offset 0x23c + {value: 0x0000, lo: 0x05}, + {value: 0x812e, lo: 0x86, hi: 0x87}, + {value: 0x8133, lo: 0x88, hi: 0x8a}, + {value: 0x812e, lo: 0x8b, hi: 0x8b}, + {value: 0x8133, lo: 0x8c, hi: 0x8c}, + {value: 0x812e, lo: 0x8d, hi: 0x90}, + // Block 0x79, offset 0x242 + {value: 0x0005, lo: 0x03}, + {value: 0x8133, lo: 0x82, hi: 0x82}, + {value: 0x812e, lo: 0x83, hi: 0x84}, + {value: 0x812e, lo: 0x85, hi: 0x85}, + // Block 0x7a, offset 0x246 + {value: 0x0000, lo: 0x03}, + {value: 0x8105, lo: 0x86, hi: 0x86}, + {value: 0x8105, lo: 0xb0, hi: 0xb0}, + {value: 0x8105, lo: 0xbf, hi: 0xbf}, + // Block 0x7b, offset 0x24a + {value: 0x17fe, lo: 0x07}, + {value: 0xa000, lo: 0x99, hi: 0x99}, + {value: 0x4379, lo: 0x9a, hi: 0x9a}, + {value: 0xa000, lo: 0x9b, hi: 0x9b}, + {value: 0x4383, lo: 0x9c, hi: 0x9c}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x438d, lo: 0xab, hi: 0xab}, + {value: 0x8105, lo: 0xb9, hi: 0xba}, + // Block 0x7c, offset 0x252 + {value: 0x0000, lo: 0x06}, + {value: 0x8133, lo: 0x80, hi: 0x82}, + {value: 0x9900, lo: 0xa7, hi: 0xa7}, + {value: 0x2eb5, lo: 0xae, hi: 0xae}, + {value: 0x2ebf, lo: 0xaf, hi: 0xaf}, + {value: 0xa000, lo: 0xb1, hi: 0xb2}, + {value: 0x8105, lo: 0xb3, hi: 0xb4}, + // Block 0x7d, offset 0x259 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x80, hi: 0x80}, + {value: 0x8103, lo: 0x8a, hi: 0x8a}, + // Block 0x7e, offset 0x25c + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0xb5, hi: 0xb5}, + {value: 0x8103, lo: 0xb6, hi: 0xb6}, + // Block 0x7f, offset 0x25f + {value: 0x0002, lo: 0x01}, + {value: 0x8103, lo: 0xa9, hi: 0xaa}, + // Block 0x80, offset 0x261 + {value: 0x0000, lo: 0x02}, + {value: 0x8103, lo: 0xbb, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x81, offset 0x264 + {value: 0x0000, lo: 0x07}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2ec9, lo: 0x8b, hi: 0x8b}, + {value: 0x2ed3, lo: 0x8c, hi: 0x8c}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + {value: 0x8133, lo: 0xa6, hi: 0xac}, + {value: 0x8133, lo: 0xb0, hi: 0xb4}, + // Block 0x82, offset 0x26c + {value: 0x0000, lo: 0x03}, + {value: 0x8105, lo: 0x82, hi: 0x82}, + {value: 0x8103, lo: 0x86, hi: 0x86}, + {value: 0x8133, lo: 0x9e, hi: 0x9e}, + // Block 0x83, offset 0x270 + {value: 0x6a23, lo: 0x06}, + {value: 0x9900, lo: 0xb0, hi: 0xb0}, + {value: 0xa000, lo: 0xb9, hi: 0xb9}, + {value: 0x9900, lo: 0xba, hi: 0xba}, + {value: 0x2ee7, lo: 0xbb, hi: 0xbb}, + {value: 0x2edd, lo: 0xbc, hi: 0xbd}, + {value: 0x2ef1, lo: 0xbe, hi: 0xbe}, + // Block 0x84, offset 0x277 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x82, hi: 0x82}, + {value: 0x8103, lo: 0x83, hi: 0x83}, + // Block 0x85, offset 0x27a + {value: 0x0000, lo: 0x05}, + {value: 0x9900, lo: 0xaf, hi: 0xaf}, + {value: 0xa000, lo: 0xb8, hi: 0xb9}, + {value: 0x2efb, lo: 0xba, hi: 0xba}, + {value: 0x2f05, lo: 0xbb, hi: 0xbb}, + {value: 0x8105, lo: 0xbf, hi: 0xbf}, + // Block 0x86, offset 0x280 + {value: 0x0000, lo: 0x01}, + {value: 0x8103, lo: 0x80, hi: 0x80}, + // Block 0x87, offset 0x282 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0xb6, hi: 0xb6}, + {value: 0x8103, lo: 0xb7, hi: 0xb7}, + // Block 0x88, offset 0x285 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xab, hi: 0xab}, + // Block 0x89, offset 0x287 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0xb9, hi: 0xb9}, + {value: 0x8103, lo: 0xba, hi: 0xba}, + // Block 0x8a, offset 0x28a + {value: 0x0000, lo: 0x04}, + {value: 0x9900, lo: 0xb0, hi: 0xb0}, + {value: 0xa000, lo: 0xb5, hi: 0xb5}, + {value: 0x2f0f, lo: 0xb8, hi: 0xb8}, + {value: 0x8105, lo: 0xbd, hi: 0xbe}, + // Block 0x8b, offset 0x28f + {value: 0x0000, lo: 0x01}, + {value: 0x8103, lo: 0x83, hi: 0x83}, + // Block 0x8c, offset 0x291 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xa0, hi: 0xa0}, + // Block 0x8d, offset 0x293 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xb4, hi: 0xb4}, + // Block 0x8e, offset 0x295 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x87, hi: 0x87}, + // Block 0x8f, offset 0x297 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x99, hi: 0x99}, + // Block 0x90, offset 0x299 + {value: 0x0000, lo: 0x02}, + {value: 0x8103, lo: 0x82, hi: 0x82}, + {value: 0x8105, lo: 0x84, hi: 0x85}, + // Block 0x91, offset 0x29c + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x97, hi: 0x97}, + // Block 0x92, offset 0x29e + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x81, hi: 0x82}, + // Block 0x93, offset 0x2a0 + {value: 0x0000, lo: 0x01}, + {value: 0x8101, lo: 0xb0, hi: 0xb4}, + // Block 0x94, offset 0x2a2 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xb0, hi: 0xb6}, + // Block 0x95, offset 0x2a4 + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0xb0, hi: 0xb1}, + // Block 0x96, offset 0x2a6 + {value: 0x0000, lo: 0x01}, + {value: 0x8101, lo: 0x9e, hi: 0x9e}, + // Block 0x97, offset 0x2a8 + {value: 0x0000, lo: 0x0c}, + {value: 0x470d, lo: 0x9e, hi: 0x9e}, + {value: 0x4717, lo: 0x9f, hi: 0x9f}, + {value: 0x474b, lo: 0xa0, hi: 0xa0}, + {value: 0x4759, lo: 0xa1, hi: 0xa1}, + {value: 0x4767, lo: 0xa2, hi: 0xa2}, + {value: 0x4775, lo: 0xa3, hi: 0xa3}, + {value: 0x4783, lo: 0xa4, hi: 0xa4}, + {value: 0x812c, lo: 0xa5, hi: 0xa6}, + {value: 0x8101, lo: 0xa7, hi: 0xa9}, + {value: 0x8131, lo: 0xad, hi: 0xad}, + {value: 0x812c, lo: 0xae, hi: 0xb2}, + {value: 0x812e, lo: 0xbb, hi: 0xbf}, + // Block 0x98, offset 0x2b5 + {value: 0x0000, lo: 0x09}, + {value: 0x812e, lo: 0x80, hi: 0x82}, + {value: 0x8133, lo: 0x85, hi: 0x89}, + {value: 0x812e, lo: 0x8a, hi: 0x8b}, + {value: 0x8133, lo: 0xaa, hi: 0xad}, + {value: 0x4721, lo: 0xbb, hi: 0xbb}, + {value: 0x472b, lo: 0xbc, hi: 0xbc}, + {value: 0x4791, lo: 0xbd, hi: 0xbd}, + {value: 0x47ad, lo: 0xbe, hi: 0xbe}, + {value: 0x479f, lo: 0xbf, hi: 0xbf}, + // Block 0x99, offset 0x2bf + {value: 0x0000, lo: 0x01}, + {value: 0x47bb, lo: 0x80, hi: 0x80}, + // Block 0x9a, offset 0x2c1 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0x82, hi: 0x84}, + // Block 0x9b, offset 0x2c3 + {value: 0x0000, lo: 0x05}, + {value: 0x8133, lo: 0x80, hi: 0x86}, + {value: 0x8133, lo: 0x88, hi: 0x98}, + {value: 0x8133, lo: 0x9b, hi: 0xa1}, + {value: 0x8133, lo: 0xa3, hi: 0xa4}, + {value: 0x8133, lo: 0xa6, hi: 0xaa}, + // Block 0x9c, offset 0x2c9 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0x8f, hi: 0x8f}, + // Block 0x9d, offset 0x2cb + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xae, hi: 0xae}, + // Block 0x9e, offset 0x2cd + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xac, hi: 0xaf}, + // Block 0x9f, offset 0x2cf + {value: 0x0000, lo: 0x03}, + {value: 0x8134, lo: 0xac, hi: 0xad}, + {value: 0x812e, lo: 0xae, hi: 0xae}, + {value: 0x8133, lo: 0xaf, hi: 0xaf}, + // Block 0xa0, offset 0x2d3 + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0x90, hi: 0x96}, + // Block 0xa1, offset 0x2d5 + {value: 0x0000, lo: 0x02}, + {value: 0x8133, lo: 0x84, hi: 0x89}, + {value: 0x8103, lo: 0x8a, hi: 0x8a}, + // Block 0xa2, offset 0x2d8 + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0x93, hi: 0x93}, +} + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *nfkcTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return nfkcValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfkcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfkcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = nfkcIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *nfkcTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return nfkcValues[c0] + } + i := nfkcIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = nfkcIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = nfkcIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *nfkcTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return nfkcValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfkcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfkcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = nfkcIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *nfkcTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return nfkcValues[c0] + } + i := nfkcIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = nfkcIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = nfkcIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// nfkcTrie. Total size: 19260 bytes (18.81 KiB). Checksum: 1a0bbc4c8c24da49. +type nfkcTrie struct{} + +func newNfkcTrie(i int) *nfkcTrie { + return &nfkcTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *nfkcTrie) lookupValue(n uint32, b byte) uint16 { + switch { + case n < 95: + return uint16(nfkcValues[n<<6+uint32(b)]) + default: + n -= 95 + return uint16(nfkcSparse.lookup(n, b)) + } +} + +// nfkcValues: 97 blocks, 6208 entries, 12416 bytes +// The third block is the zero block. +var nfkcValues = [6208]uint16{ + // Block 0x0, offset 0x0 + 0x3c: 0xa000, 0x3d: 0xa000, 0x3e: 0xa000, + // Block 0x1, offset 0x40 + 0x41: 0xa000, 0x42: 0xa000, 0x43: 0xa000, 0x44: 0xa000, 0x45: 0xa000, + 0x46: 0xa000, 0x47: 0xa000, 0x48: 0xa000, 0x49: 0xa000, 0x4a: 0xa000, 0x4b: 0xa000, + 0x4c: 0xa000, 0x4d: 0xa000, 0x4e: 0xa000, 0x4f: 0xa000, 0x50: 0xa000, + 0x52: 0xa000, 0x53: 0xa000, 0x54: 0xa000, 0x55: 0xa000, 0x56: 0xa000, 0x57: 0xa000, + 0x58: 0xa000, 0x59: 0xa000, 0x5a: 0xa000, + 0x61: 0xa000, 0x62: 0xa000, 0x63: 0xa000, + 0x64: 0xa000, 0x65: 0xa000, 0x66: 0xa000, 0x67: 0xa000, 0x68: 0xa000, 0x69: 0xa000, + 0x6a: 0xa000, 0x6b: 0xa000, 0x6c: 0xa000, 0x6d: 0xa000, 0x6e: 0xa000, 0x6f: 0xa000, + 0x70: 0xa000, 0x72: 0xa000, 0x73: 0xa000, 0x74: 0xa000, 0x75: 0xa000, + 0x76: 0xa000, 0x77: 0xa000, 0x78: 0xa000, 0x79: 0xa000, 0x7a: 0xa000, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x30b0, 0xc1: 0x30b5, 0xc2: 0x47c9, 0xc3: 0x30ba, 0xc4: 0x47d8, 0xc5: 0x47dd, + 0xc6: 0xa000, 0xc7: 0x47e7, 0xc8: 0x3123, 0xc9: 0x3128, 0xca: 0x47ec, 0xcb: 0x313c, + 0xcc: 0x31af, 0xcd: 0x31b4, 0xce: 0x31b9, 0xcf: 0x4800, 0xd1: 0x3245, + 0xd2: 0x3268, 0xd3: 0x326d, 0xd4: 0x480a, 0xd5: 0x480f, 0xd6: 0x481e, + 0xd8: 0xa000, 0xd9: 0x32f4, 0xda: 0x32f9, 0xdb: 0x32fe, 0xdc: 0x4850, 0xdd: 0x3376, + 0xe0: 0x33bc, 0xe1: 0x33c1, 0xe2: 0x485a, 0xe3: 0x33c6, + 0xe4: 0x4869, 0xe5: 0x486e, 0xe6: 0xa000, 0xe7: 0x4878, 0xe8: 0x342f, 0xe9: 0x3434, + 0xea: 0x487d, 0xeb: 0x3448, 0xec: 0x34c0, 0xed: 0x34c5, 0xee: 0x34ca, 0xef: 0x4891, + 0xf1: 0x3556, 0xf2: 0x3579, 0xf3: 0x357e, 0xf4: 0x489b, 0xf5: 0x48a0, + 0xf6: 0x48af, 0xf8: 0xa000, 0xf9: 0x360a, 0xfa: 0x360f, 0xfb: 0x3614, + 0xfc: 0x48e1, 0xfd: 0x3691, 0xff: 0x36aa, + // Block 0x4, offset 0x100 + 0x100: 0x30bf, 0x101: 0x33cb, 0x102: 0x47ce, 0x103: 0x485f, 0x104: 0x30dd, 0x105: 0x33e9, + 0x106: 0x30f1, 0x107: 0x33fd, 0x108: 0x30f6, 0x109: 0x3402, 0x10a: 0x30fb, 0x10b: 0x3407, + 0x10c: 0x3100, 0x10d: 0x340c, 0x10e: 0x310a, 0x10f: 0x3416, + 0x112: 0x47f1, 0x113: 0x4882, 0x114: 0x3132, 0x115: 0x343e, 0x116: 0x3137, 0x117: 0x3443, + 0x118: 0x3155, 0x119: 0x3461, 0x11a: 0x3146, 0x11b: 0x3452, 0x11c: 0x316e, 0x11d: 0x347a, + 0x11e: 0x3178, 0x11f: 0x3484, 0x120: 0x317d, 0x121: 0x3489, 0x122: 0x3187, 0x123: 0x3493, + 0x124: 0x318c, 0x125: 0x3498, 0x128: 0x31be, 0x129: 0x34cf, + 0x12a: 0x31c3, 0x12b: 0x34d4, 0x12c: 0x31c8, 0x12d: 0x34d9, 0x12e: 0x31eb, 0x12f: 0x34f7, + 0x130: 0x31cd, 0x132: 0x1a8a, 0x133: 0x1b17, 0x134: 0x31f5, 0x135: 0x3501, + 0x136: 0x3209, 0x137: 0x351a, 0x139: 0x3213, 0x13a: 0x3524, 0x13b: 0x321d, + 0x13c: 0x352e, 0x13d: 0x3218, 0x13e: 0x3529, 0x13f: 0x1cdc, + // Block 0x5, offset 0x140 + 0x140: 0x1d64, 0x143: 0x3240, 0x144: 0x3551, 0x145: 0x3259, + 0x146: 0x356a, 0x147: 0x324f, 0x148: 0x3560, 0x149: 0x1d8c, + 0x14c: 0x4814, 0x14d: 0x48a5, 0x14e: 0x3272, 0x14f: 0x3583, 0x150: 0x327c, 0x151: 0x358d, + 0x154: 0x329a, 0x155: 0x35ab, 0x156: 0x32b3, 0x157: 0x35c4, + 0x158: 0x32a4, 0x159: 0x35b5, 0x15a: 0x4837, 0x15b: 0x48c8, 0x15c: 0x32bd, 0x15d: 0x35ce, + 0x15e: 0x32cc, 0x15f: 0x35dd, 0x160: 0x483c, 0x161: 0x48cd, 0x162: 0x32e5, 0x163: 0x35fb, + 0x164: 0x32d6, 0x165: 0x35ec, 0x168: 0x4846, 0x169: 0x48d7, + 0x16a: 0x484b, 0x16b: 0x48dc, 0x16c: 0x3303, 0x16d: 0x3619, 0x16e: 0x330d, 0x16f: 0x3623, + 0x170: 0x3312, 0x171: 0x3628, 0x172: 0x3330, 0x173: 0x3646, 0x174: 0x3353, 0x175: 0x3669, + 0x176: 0x337b, 0x177: 0x3696, 0x178: 0x338f, 0x179: 0x339e, 0x17a: 0x36be, 0x17b: 0x33a8, + 0x17c: 0x36c8, 0x17d: 0x33ad, 0x17e: 0x36cd, 0x17f: 0x00a7, + // Block 0x6, offset 0x180 + 0x184: 0x2f2f, 0x185: 0x2f35, + 0x186: 0x2f3b, 0x187: 0x1a9f, 0x188: 0x1aa2, 0x189: 0x1b38, 0x18a: 0x1ab7, 0x18b: 0x1aba, + 0x18c: 0x1b6e, 0x18d: 0x30c9, 0x18e: 0x33d5, 0x18f: 0x31d7, 0x190: 0x34e3, 0x191: 0x3281, + 0x192: 0x3592, 0x193: 0x3317, 0x194: 0x362d, 0x195: 0x3b10, 0x196: 0x3c9f, 0x197: 0x3b09, + 0x198: 0x3c98, 0x199: 0x3b17, 0x19a: 0x3ca6, 0x19b: 0x3b02, 0x19c: 0x3c91, + 0x19e: 0x39f1, 0x19f: 0x3b80, 0x1a0: 0x39ea, 0x1a1: 0x3b79, 0x1a2: 0x36f4, 0x1a3: 0x3706, + 0x1a6: 0x3182, 0x1a7: 0x348e, 0x1a8: 0x31ff, 0x1a9: 0x3510, + 0x1aa: 0x482d, 0x1ab: 0x48be, 0x1ac: 0x3ad1, 0x1ad: 0x3c60, 0x1ae: 0x3718, 0x1af: 0x371e, + 0x1b0: 0x3506, 0x1b1: 0x1a6f, 0x1b2: 0x1a72, 0x1b3: 0x1aff, 0x1b4: 0x3169, 0x1b5: 0x3475, + 0x1b8: 0x323b, 0x1b9: 0x354c, 0x1ba: 0x39f8, 0x1bb: 0x3b87, + 0x1bc: 0x36ee, 0x1bd: 0x3700, 0x1be: 0x36fa, 0x1bf: 0x370c, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x30ce, 0x1c1: 0x33da, 0x1c2: 0x30d3, 0x1c3: 0x33df, 0x1c4: 0x314b, 0x1c5: 0x3457, + 0x1c6: 0x3150, 0x1c7: 0x345c, 0x1c8: 0x31dc, 0x1c9: 0x34e8, 0x1ca: 0x31e1, 0x1cb: 0x34ed, + 0x1cc: 0x3286, 0x1cd: 0x3597, 0x1ce: 0x328b, 0x1cf: 0x359c, 0x1d0: 0x32a9, 0x1d1: 0x35ba, + 0x1d2: 0x32ae, 0x1d3: 0x35bf, 0x1d4: 0x331c, 0x1d5: 0x3632, 0x1d6: 0x3321, 0x1d7: 0x3637, + 0x1d8: 0x32c7, 0x1d9: 0x35d8, 0x1da: 0x32e0, 0x1db: 0x35f6, + 0x1de: 0x319b, 0x1df: 0x34a7, + 0x1e6: 0x47d3, 0x1e7: 0x4864, 0x1e8: 0x47fb, 0x1e9: 0x488c, + 0x1ea: 0x3aa0, 0x1eb: 0x3c2f, 0x1ec: 0x3a7d, 0x1ed: 0x3c0c, 0x1ee: 0x4819, 0x1ef: 0x48aa, + 0x1f0: 0x3a99, 0x1f1: 0x3c28, 0x1f2: 0x3385, 0x1f3: 0x36a0, + // Block 0x8, offset 0x200 + 0x200: 0x9933, 0x201: 0x9933, 0x202: 0x9933, 0x203: 0x9933, 0x204: 0x9933, 0x205: 0x8133, + 0x206: 0x9933, 0x207: 0x9933, 0x208: 0x9933, 0x209: 0x9933, 0x20a: 0x9933, 0x20b: 0x9933, + 0x20c: 0x9933, 0x20d: 0x8133, 0x20e: 0x8133, 0x20f: 0x9933, 0x210: 0x8133, 0x211: 0x9933, + 0x212: 0x8133, 0x213: 0x9933, 0x214: 0x9933, 0x215: 0x8134, 0x216: 0x812e, 0x217: 0x812e, + 0x218: 0x812e, 0x219: 0x812e, 0x21a: 0x8134, 0x21b: 0x992c, 0x21c: 0x812e, 0x21d: 0x812e, + 0x21e: 0x812e, 0x21f: 0x812e, 0x220: 0x812e, 0x221: 0x812a, 0x222: 0x812a, 0x223: 0x992e, + 0x224: 0x992e, 0x225: 0x992e, 0x226: 0x992e, 0x227: 0x992a, 0x228: 0x992a, 0x229: 0x812e, + 0x22a: 0x812e, 0x22b: 0x812e, 0x22c: 0x812e, 0x22d: 0x992e, 0x22e: 0x992e, 0x22f: 0x812e, + 0x230: 0x992e, 0x231: 0x992e, 0x232: 0x812e, 0x233: 0x812e, 0x234: 0x8101, 0x235: 0x8101, + 0x236: 0x8101, 0x237: 0x8101, 0x238: 0x9901, 0x239: 0x812e, 0x23a: 0x812e, 0x23b: 0x812e, + 0x23c: 0x812e, 0x23d: 0x8133, 0x23e: 0x8133, 0x23f: 0x8133, + // Block 0x9, offset 0x240 + 0x240: 0x4aef, 0x241: 0x4af4, 0x242: 0x9933, 0x243: 0x4af9, 0x244: 0x4bb2, 0x245: 0x9937, + 0x246: 0x8133, 0x247: 0x812e, 0x248: 0x812e, 0x249: 0x812e, 0x24a: 0x8133, 0x24b: 0x8133, + 0x24c: 0x8133, 0x24d: 0x812e, 0x24e: 0x812e, 0x250: 0x8133, 0x251: 0x8133, + 0x252: 0x8133, 0x253: 0x812e, 0x254: 0x812e, 0x255: 0x812e, 0x256: 0x812e, 0x257: 0x8133, + 0x258: 0x8134, 0x259: 0x812e, 0x25a: 0x812e, 0x25b: 0x8133, 0x25c: 0x8135, 0x25d: 0x8136, + 0x25e: 0x8136, 0x25f: 0x8135, 0x260: 0x8136, 0x261: 0x8136, 0x262: 0x8135, 0x263: 0x8133, + 0x264: 0x8133, 0x265: 0x8133, 0x266: 0x8133, 0x267: 0x8133, 0x268: 0x8133, 0x269: 0x8133, + 0x26a: 0x8133, 0x26b: 0x8133, 0x26c: 0x8133, 0x26d: 0x8133, 0x26e: 0x8133, 0x26f: 0x8133, + 0x274: 0x01ee, + 0x27a: 0x43e6, + 0x27e: 0x0037, + // Block 0xa, offset 0x280 + 0x284: 0x439b, 0x285: 0x45bc, + 0x286: 0x372a, 0x287: 0x00ce, 0x288: 0x3748, 0x289: 0x3754, 0x28a: 0x3766, + 0x28c: 0x3784, 0x28e: 0x3796, 0x28f: 0x37b4, 0x290: 0x3f49, 0x291: 0xa000, + 0x295: 0xa000, 0x297: 0xa000, + 0x299: 0xa000, + 0x29f: 0xa000, 0x2a1: 0xa000, + 0x2a5: 0xa000, 0x2a9: 0xa000, + 0x2aa: 0x3778, 0x2ab: 0x37a8, 0x2ac: 0x493f, 0x2ad: 0x37d8, 0x2ae: 0x4969, 0x2af: 0x37ea, + 0x2b0: 0x3fb1, 0x2b1: 0xa000, 0x2b5: 0xa000, + 0x2b7: 0xa000, 0x2b9: 0xa000, + 0x2bf: 0xa000, + // Block 0xb, offset 0x2c0 + 0x2c1: 0xa000, 0x2c5: 0xa000, + 0x2c9: 0xa000, 0x2ca: 0x4981, 0x2cb: 0x499f, + 0x2cc: 0x3808, 0x2cd: 0x3820, 0x2ce: 0x49b7, 0x2d0: 0x0242, 0x2d1: 0x0254, + 0x2d2: 0x0230, 0x2d3: 0x444d, 0x2d4: 0x4453, 0x2d5: 0x027e, 0x2d6: 0x026c, + 0x2f0: 0x025a, 0x2f1: 0x026f, 0x2f2: 0x0272, 0x2f4: 0x020c, 0x2f5: 0x024b, + 0x2f9: 0x022a, + // Block 0xc, offset 0x300 + 0x300: 0x3862, 0x301: 0x386e, 0x303: 0x385c, + 0x306: 0xa000, 0x307: 0x384a, + 0x30c: 0x389e, 0x30d: 0x3886, 0x30e: 0x38b0, 0x310: 0xa000, + 0x313: 0xa000, 0x315: 0xa000, 0x316: 0xa000, 0x317: 0xa000, + 0x318: 0xa000, 0x319: 0x3892, 0x31a: 0xa000, + 0x31e: 0xa000, 0x323: 0xa000, + 0x327: 0xa000, + 0x32b: 0xa000, 0x32d: 0xa000, + 0x330: 0xa000, 0x333: 0xa000, 0x335: 0xa000, + 0x336: 0xa000, 0x337: 0xa000, 0x338: 0xa000, 0x339: 0x3916, 0x33a: 0xa000, + 0x33e: 0xa000, + // Block 0xd, offset 0x340 + 0x341: 0x3874, 0x342: 0x38f8, + 0x350: 0x3850, 0x351: 0x38d4, + 0x352: 0x3856, 0x353: 0x38da, 0x356: 0x3868, 0x357: 0x38ec, + 0x358: 0xa000, 0x359: 0xa000, 0x35a: 0x396a, 0x35b: 0x3970, 0x35c: 0x387a, 0x35d: 0x38fe, + 0x35e: 0x3880, 0x35f: 0x3904, 0x362: 0x388c, 0x363: 0x3910, + 0x364: 0x3898, 0x365: 0x391c, 0x366: 0x38a4, 0x367: 0x3928, 0x368: 0xa000, 0x369: 0xa000, + 0x36a: 0x3976, 0x36b: 0x397c, 0x36c: 0x38ce, 0x36d: 0x3952, 0x36e: 0x38aa, 0x36f: 0x392e, + 0x370: 0x38b6, 0x371: 0x393a, 0x372: 0x38bc, 0x373: 0x3940, 0x374: 0x38c2, 0x375: 0x3946, + 0x378: 0x38c8, 0x379: 0x394c, + // Block 0xe, offset 0x380 + 0x387: 0x1e91, + 0x391: 0x812e, + 0x392: 0x8133, 0x393: 0x8133, 0x394: 0x8133, 0x395: 0x8133, 0x396: 0x812e, 0x397: 0x8133, + 0x398: 0x8133, 0x399: 0x8133, 0x39a: 0x812f, 0x39b: 0x812e, 0x39c: 0x8133, 0x39d: 0x8133, + 0x39e: 0x8133, 0x39f: 0x8133, 0x3a0: 0x8133, 0x3a1: 0x8133, 0x3a2: 0x812e, 0x3a3: 0x812e, + 0x3a4: 0x812e, 0x3a5: 0x812e, 0x3a6: 0x812e, 0x3a7: 0x812e, 0x3a8: 0x8133, 0x3a9: 0x8133, + 0x3aa: 0x812e, 0x3ab: 0x8133, 0x3ac: 0x8133, 0x3ad: 0x812f, 0x3ae: 0x8132, 0x3af: 0x8133, + 0x3b0: 0x8106, 0x3b1: 0x8107, 0x3b2: 0x8108, 0x3b3: 0x8109, 0x3b4: 0x810a, 0x3b5: 0x810b, + 0x3b6: 0x810c, 0x3b7: 0x810d, 0x3b8: 0x810e, 0x3b9: 0x810f, 0x3ba: 0x810f, 0x3bb: 0x8110, + 0x3bc: 0x8111, 0x3bd: 0x8112, 0x3bf: 0x8113, + // Block 0xf, offset 0x3c0 + 0x3c8: 0xa000, 0x3ca: 0xa000, 0x3cb: 0x8117, + 0x3cc: 0x8118, 0x3cd: 0x8119, 0x3ce: 0x811a, 0x3cf: 0x811b, 0x3d0: 0x811c, 0x3d1: 0x811d, + 0x3d2: 0x811e, 0x3d3: 0x9933, 0x3d4: 0x9933, 0x3d5: 0x992e, 0x3d6: 0x812e, 0x3d7: 0x8133, + 0x3d8: 0x8133, 0x3d9: 0x8133, 0x3da: 0x8133, 0x3db: 0x8133, 0x3dc: 0x812e, 0x3dd: 0x8133, + 0x3de: 0x8133, 0x3df: 0x812e, + 0x3f0: 0x811f, 0x3f5: 0x1eb4, + 0x3f6: 0x2143, 0x3f7: 0x217f, 0x3f8: 0x217a, + // Block 0x10, offset 0x400 + 0x40a: 0x8133, 0x40b: 0x8133, + 0x40c: 0x8133, 0x40d: 0x8133, 0x40e: 0x8133, 0x40f: 0x812e, 0x410: 0x812e, 0x411: 0x812e, + 0x412: 0x812e, 0x413: 0x812e, 0x414: 0x8133, 0x415: 0x8133, 0x416: 0x8133, 0x417: 0x8133, + 0x418: 0x8133, 0x419: 0x8133, 0x41a: 0x8133, 0x41b: 0x8133, 0x41c: 0x8133, 0x41d: 0x8133, + 0x41e: 0x8133, 0x41f: 0x8133, 0x420: 0x8133, 0x421: 0x8133, 0x423: 0x812e, + 0x424: 0x8133, 0x425: 0x8133, 0x426: 0x812e, 0x427: 0x8133, 0x428: 0x8133, 0x429: 0x812e, + 0x42a: 0x8133, 0x42b: 0x8133, 0x42c: 0x8133, 0x42d: 0x812e, 0x42e: 0x812e, 0x42f: 0x812e, + 0x430: 0x8117, 0x431: 0x8118, 0x432: 0x8119, 0x433: 0x8133, 0x434: 0x8133, 0x435: 0x8133, + 0x436: 0x812e, 0x437: 0x8133, 0x438: 0x8133, 0x439: 0x812e, 0x43a: 0x812e, 0x43b: 0x8133, + 0x43c: 0x8133, 0x43d: 0x8133, 0x43e: 0x8133, 0x43f: 0x8133, + // Block 0x11, offset 0x440 + 0x445: 0xa000, + 0x446: 0x2e5d, 0x447: 0xa000, 0x448: 0x2e65, 0x449: 0xa000, 0x44a: 0x2e6d, 0x44b: 0xa000, + 0x44c: 0x2e75, 0x44d: 0xa000, 0x44e: 0x2e7d, 0x451: 0xa000, + 0x452: 0x2e85, + 0x474: 0x8103, 0x475: 0x9900, + 0x47a: 0xa000, 0x47b: 0x2e8d, + 0x47c: 0xa000, 0x47d: 0x2e95, 0x47e: 0xa000, 0x47f: 0xa000, + // Block 0x12, offset 0x480 + 0x480: 0x0069, 0x481: 0x006b, 0x482: 0x006f, 0x483: 0x0083, 0x484: 0x0104, 0x485: 0x0107, + 0x486: 0x0506, 0x487: 0x0085, 0x488: 0x0089, 0x489: 0x008b, 0x48a: 0x011f, 0x48b: 0x0122, + 0x48c: 0x0125, 0x48d: 0x008f, 0x48f: 0x0097, 0x490: 0x009b, 0x491: 0x00e6, + 0x492: 0x009f, 0x493: 0x0110, 0x494: 0x050a, 0x495: 0x050e, 0x496: 0x00a1, 0x497: 0x00a9, + 0x498: 0x00ab, 0x499: 0x0516, 0x49a: 0x015b, 0x49b: 0x00ad, 0x49c: 0x051a, 0x49d: 0x0242, + 0x49e: 0x0245, 0x49f: 0x0248, 0x4a0: 0x027e, 0x4a1: 0x0281, 0x4a2: 0x0093, 0x4a3: 0x00a5, + 0x4a4: 0x00ab, 0x4a5: 0x00ad, 0x4a6: 0x0242, 0x4a7: 0x0245, 0x4a8: 0x026f, 0x4a9: 0x027e, + 0x4aa: 0x0281, + 0x4b8: 0x02b4, + // Block 0x13, offset 0x4c0 + 0x4db: 0x010a, 0x4dc: 0x0087, 0x4dd: 0x0113, + 0x4de: 0x00d7, 0x4df: 0x0125, 0x4e0: 0x008d, 0x4e1: 0x012b, 0x4e2: 0x0131, 0x4e3: 0x013d, + 0x4e4: 0x0146, 0x4e5: 0x0149, 0x4e6: 0x014c, 0x4e7: 0x051e, 0x4e8: 0x01c7, 0x4e9: 0x0155, + 0x4ea: 0x0522, 0x4eb: 0x01ca, 0x4ec: 0x0161, 0x4ed: 0x015e, 0x4ee: 0x0164, 0x4ef: 0x0167, + 0x4f0: 0x016a, 0x4f1: 0x016d, 0x4f2: 0x0176, 0x4f3: 0x018e, 0x4f4: 0x0191, 0x4f5: 0x00f2, + 0x4f6: 0x019a, 0x4f7: 0x019d, 0x4f8: 0x0512, 0x4f9: 0x01a0, 0x4fa: 0x01a3, 0x4fb: 0x00b5, + 0x4fc: 0x01af, 0x4fd: 0x01b2, 0x4fe: 0x01b5, 0x4ff: 0x0254, + // Block 0x14, offset 0x500 + 0x500: 0x8133, 0x501: 0x8133, 0x502: 0x812e, 0x503: 0x8133, 0x504: 0x8133, 0x505: 0x8133, + 0x506: 0x8133, 0x507: 0x8133, 0x508: 0x8133, 0x509: 0x8133, 0x50a: 0x812e, 0x50b: 0x8133, + 0x50c: 0x8133, 0x50d: 0x8136, 0x50e: 0x812b, 0x50f: 0x812e, 0x510: 0x812a, 0x511: 0x8133, + 0x512: 0x8133, 0x513: 0x8133, 0x514: 0x8133, 0x515: 0x8133, 0x516: 0x8133, 0x517: 0x8133, + 0x518: 0x8133, 0x519: 0x8133, 0x51a: 0x8133, 0x51b: 0x8133, 0x51c: 0x8133, 0x51d: 0x8133, + 0x51e: 0x8133, 0x51f: 0x8133, 0x520: 0x8133, 0x521: 0x8133, 0x522: 0x8133, 0x523: 0x8133, + 0x524: 0x8133, 0x525: 0x8133, 0x526: 0x8133, 0x527: 0x8133, 0x528: 0x8133, 0x529: 0x8133, + 0x52a: 0x8133, 0x52b: 0x8133, 0x52c: 0x8133, 0x52d: 0x8133, 0x52e: 0x8133, 0x52f: 0x8133, + 0x530: 0x8133, 0x531: 0x8133, 0x532: 0x8133, 0x533: 0x8133, 0x534: 0x8133, 0x535: 0x8133, + 0x536: 0x8134, 0x537: 0x8132, 0x538: 0x8132, 0x539: 0x812e, 0x53a: 0x812d, 0x53b: 0x8133, + 0x53c: 0x8135, 0x53d: 0x812e, 0x53e: 0x8133, 0x53f: 0x812e, + // Block 0x15, offset 0x540 + 0x540: 0x30d8, 0x541: 0x33e4, 0x542: 0x30e2, 0x543: 0x33ee, 0x544: 0x30e7, 0x545: 0x33f3, + 0x546: 0x30ec, 0x547: 0x33f8, 0x548: 0x3a0d, 0x549: 0x3b9c, 0x54a: 0x3105, 0x54b: 0x3411, + 0x54c: 0x310f, 0x54d: 0x341b, 0x54e: 0x311e, 0x54f: 0x342a, 0x550: 0x3114, 0x551: 0x3420, + 0x552: 0x3119, 0x553: 0x3425, 0x554: 0x3a30, 0x555: 0x3bbf, 0x556: 0x3a37, 0x557: 0x3bc6, + 0x558: 0x315a, 0x559: 0x3466, 0x55a: 0x315f, 0x55b: 0x346b, 0x55c: 0x3a45, 0x55d: 0x3bd4, + 0x55e: 0x3164, 0x55f: 0x3470, 0x560: 0x3173, 0x561: 0x347f, 0x562: 0x3191, 0x563: 0x349d, + 0x564: 0x31a0, 0x565: 0x34ac, 0x566: 0x3196, 0x567: 0x34a2, 0x568: 0x31a5, 0x569: 0x34b1, + 0x56a: 0x31aa, 0x56b: 0x34b6, 0x56c: 0x31f0, 0x56d: 0x34fc, 0x56e: 0x3a4c, 0x56f: 0x3bdb, + 0x570: 0x31fa, 0x571: 0x350b, 0x572: 0x3204, 0x573: 0x3515, 0x574: 0x320e, 0x575: 0x351f, + 0x576: 0x4805, 0x577: 0x4896, 0x578: 0x3a53, 0x579: 0x3be2, 0x57a: 0x3227, 0x57b: 0x3538, + 0x57c: 0x3222, 0x57d: 0x3533, 0x57e: 0x322c, 0x57f: 0x353d, + // Block 0x16, offset 0x580 + 0x580: 0x3231, 0x581: 0x3542, 0x582: 0x3236, 0x583: 0x3547, 0x584: 0x324a, 0x585: 0x355b, + 0x586: 0x3254, 0x587: 0x3565, 0x588: 0x3263, 0x589: 0x3574, 0x58a: 0x325e, 0x58b: 0x356f, + 0x58c: 0x3a76, 0x58d: 0x3c05, 0x58e: 0x3a84, 0x58f: 0x3c13, 0x590: 0x3a8b, 0x591: 0x3c1a, + 0x592: 0x3a92, 0x593: 0x3c21, 0x594: 0x3290, 0x595: 0x35a1, 0x596: 0x3295, 0x597: 0x35a6, + 0x598: 0x329f, 0x599: 0x35b0, 0x59a: 0x4832, 0x59b: 0x48c3, 0x59c: 0x3ad8, 0x59d: 0x3c67, + 0x59e: 0x32b8, 0x59f: 0x35c9, 0x5a0: 0x32c2, 0x5a1: 0x35d3, 0x5a2: 0x4841, 0x5a3: 0x48d2, + 0x5a4: 0x3adf, 0x5a5: 0x3c6e, 0x5a6: 0x3ae6, 0x5a7: 0x3c75, 0x5a8: 0x3aed, 0x5a9: 0x3c7c, + 0x5aa: 0x32d1, 0x5ab: 0x35e2, 0x5ac: 0x32db, 0x5ad: 0x35f1, 0x5ae: 0x32ef, 0x5af: 0x3605, + 0x5b0: 0x32ea, 0x5b1: 0x3600, 0x5b2: 0x332b, 0x5b3: 0x3641, 0x5b4: 0x333a, 0x5b5: 0x3650, + 0x5b6: 0x3335, 0x5b7: 0x364b, 0x5b8: 0x3af4, 0x5b9: 0x3c83, 0x5ba: 0x3afb, 0x5bb: 0x3c8a, + 0x5bc: 0x333f, 0x5bd: 0x3655, 0x5be: 0x3344, 0x5bf: 0x365a, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x3349, 0x5c1: 0x365f, 0x5c2: 0x334e, 0x5c3: 0x3664, 0x5c4: 0x335d, 0x5c5: 0x3673, + 0x5c6: 0x3358, 0x5c7: 0x366e, 0x5c8: 0x3362, 0x5c9: 0x367d, 0x5ca: 0x3367, 0x5cb: 0x3682, + 0x5cc: 0x336c, 0x5cd: 0x3687, 0x5ce: 0x338a, 0x5cf: 0x36a5, 0x5d0: 0x33a3, 0x5d1: 0x36c3, + 0x5d2: 0x33b2, 0x5d3: 0x36d2, 0x5d4: 0x33b7, 0x5d5: 0x36d7, 0x5d6: 0x34bb, 0x5d7: 0x35e7, + 0x5d8: 0x3678, 0x5d9: 0x36b4, 0x5da: 0x1d10, 0x5db: 0x4418, + 0x5e0: 0x47e2, 0x5e1: 0x4873, 0x5e2: 0x30c4, 0x5e3: 0x33d0, + 0x5e4: 0x39b9, 0x5e5: 0x3b48, 0x5e6: 0x39b2, 0x5e7: 0x3b41, 0x5e8: 0x39c7, 0x5e9: 0x3b56, + 0x5ea: 0x39c0, 0x5eb: 0x3b4f, 0x5ec: 0x39ff, 0x5ed: 0x3b8e, 0x5ee: 0x39d5, 0x5ef: 0x3b64, + 0x5f0: 0x39ce, 0x5f1: 0x3b5d, 0x5f2: 0x39e3, 0x5f3: 0x3b72, 0x5f4: 0x39dc, 0x5f5: 0x3b6b, + 0x5f6: 0x3a06, 0x5f7: 0x3b95, 0x5f8: 0x47f6, 0x5f9: 0x4887, 0x5fa: 0x3141, 0x5fb: 0x344d, + 0x5fc: 0x312d, 0x5fd: 0x3439, 0x5fe: 0x3a1b, 0x5ff: 0x3baa, + // Block 0x18, offset 0x600 + 0x600: 0x3a14, 0x601: 0x3ba3, 0x602: 0x3a29, 0x603: 0x3bb8, 0x604: 0x3a22, 0x605: 0x3bb1, + 0x606: 0x3a3e, 0x607: 0x3bcd, 0x608: 0x31d2, 0x609: 0x34de, 0x60a: 0x31e6, 0x60b: 0x34f2, + 0x60c: 0x4828, 0x60d: 0x48b9, 0x60e: 0x3277, 0x60f: 0x3588, 0x610: 0x3a61, 0x611: 0x3bf0, + 0x612: 0x3a5a, 0x613: 0x3be9, 0x614: 0x3a6f, 0x615: 0x3bfe, 0x616: 0x3a68, 0x617: 0x3bf7, + 0x618: 0x3aca, 0x619: 0x3c59, 0x61a: 0x3aae, 0x61b: 0x3c3d, 0x61c: 0x3aa7, 0x61d: 0x3c36, + 0x61e: 0x3abc, 0x61f: 0x3c4b, 0x620: 0x3ab5, 0x621: 0x3c44, 0x622: 0x3ac3, 0x623: 0x3c52, + 0x624: 0x3326, 0x625: 0x363c, 0x626: 0x3308, 0x627: 0x361e, 0x628: 0x3b25, 0x629: 0x3cb4, + 0x62a: 0x3b1e, 0x62b: 0x3cad, 0x62c: 0x3b33, 0x62d: 0x3cc2, 0x62e: 0x3b2c, 0x62f: 0x3cbb, + 0x630: 0x3b3a, 0x631: 0x3cc9, 0x632: 0x3371, 0x633: 0x368c, 0x634: 0x3399, 0x635: 0x36b9, + 0x636: 0x3394, 0x637: 0x36af, 0x638: 0x3380, 0x639: 0x369b, + // Block 0x19, offset 0x640 + 0x640: 0x4945, 0x641: 0x494b, 0x642: 0x4a5f, 0x643: 0x4a77, 0x644: 0x4a67, 0x645: 0x4a7f, + 0x646: 0x4a6f, 0x647: 0x4a87, 0x648: 0x48eb, 0x649: 0x48f1, 0x64a: 0x49cf, 0x64b: 0x49e7, + 0x64c: 0x49d7, 0x64d: 0x49ef, 0x64e: 0x49df, 0x64f: 0x49f7, 0x650: 0x4957, 0x651: 0x495d, + 0x652: 0x3ef9, 0x653: 0x3f09, 0x654: 0x3f01, 0x655: 0x3f11, + 0x658: 0x48f7, 0x659: 0x48fd, 0x65a: 0x3e29, 0x65b: 0x3e39, 0x65c: 0x3e31, 0x65d: 0x3e41, + 0x660: 0x496f, 0x661: 0x4975, 0x662: 0x4a8f, 0x663: 0x4aa7, + 0x664: 0x4a97, 0x665: 0x4aaf, 0x666: 0x4a9f, 0x667: 0x4ab7, 0x668: 0x4903, 0x669: 0x4909, + 0x66a: 0x49ff, 0x66b: 0x4a17, 0x66c: 0x4a07, 0x66d: 0x4a1f, 0x66e: 0x4a0f, 0x66f: 0x4a27, + 0x670: 0x4987, 0x671: 0x498d, 0x672: 0x3f59, 0x673: 0x3f71, 0x674: 0x3f61, 0x675: 0x3f79, + 0x676: 0x3f69, 0x677: 0x3f81, 0x678: 0x490f, 0x679: 0x4915, 0x67a: 0x3e59, 0x67b: 0x3e71, + 0x67c: 0x3e61, 0x67d: 0x3e79, 0x67e: 0x3e69, 0x67f: 0x3e81, + // Block 0x1a, offset 0x680 + 0x680: 0x4993, 0x681: 0x4999, 0x682: 0x3f89, 0x683: 0x3f99, 0x684: 0x3f91, 0x685: 0x3fa1, + 0x688: 0x491b, 0x689: 0x4921, 0x68a: 0x3e89, 0x68b: 0x3e99, + 0x68c: 0x3e91, 0x68d: 0x3ea1, 0x690: 0x49a5, 0x691: 0x49ab, + 0x692: 0x3fc1, 0x693: 0x3fd9, 0x694: 0x3fc9, 0x695: 0x3fe1, 0x696: 0x3fd1, 0x697: 0x3fe9, + 0x699: 0x4927, 0x69b: 0x3ea9, 0x69d: 0x3eb1, + 0x69f: 0x3eb9, 0x6a0: 0x49bd, 0x6a1: 0x49c3, 0x6a2: 0x4abf, 0x6a3: 0x4ad7, + 0x6a4: 0x4ac7, 0x6a5: 0x4adf, 0x6a6: 0x4acf, 0x6a7: 0x4ae7, 0x6a8: 0x492d, 0x6a9: 0x4933, + 0x6aa: 0x4a2f, 0x6ab: 0x4a47, 0x6ac: 0x4a37, 0x6ad: 0x4a4f, 0x6ae: 0x4a3f, 0x6af: 0x4a57, + 0x6b0: 0x4939, 0x6b1: 0x445f, 0x6b2: 0x37d2, 0x6b3: 0x4465, 0x6b4: 0x4963, 0x6b5: 0x446b, + 0x6b6: 0x37e4, 0x6b7: 0x4471, 0x6b8: 0x3802, 0x6b9: 0x4477, 0x6ba: 0x381a, 0x6bb: 0x447d, + 0x6bc: 0x49b1, 0x6bd: 0x4483, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x3ee1, 0x6c1: 0x3ee9, 0x6c2: 0x42c5, 0x6c3: 0x42e3, 0x6c4: 0x42cf, 0x6c5: 0x42ed, + 0x6c6: 0x42d9, 0x6c7: 0x42f7, 0x6c8: 0x3e19, 0x6c9: 0x3e21, 0x6ca: 0x4211, 0x6cb: 0x422f, + 0x6cc: 0x421b, 0x6cd: 0x4239, 0x6ce: 0x4225, 0x6cf: 0x4243, 0x6d0: 0x3f29, 0x6d1: 0x3f31, + 0x6d2: 0x4301, 0x6d3: 0x431f, 0x6d4: 0x430b, 0x6d5: 0x4329, 0x6d6: 0x4315, 0x6d7: 0x4333, + 0x6d8: 0x3e49, 0x6d9: 0x3e51, 0x6da: 0x424d, 0x6db: 0x426b, 0x6dc: 0x4257, 0x6dd: 0x4275, + 0x6de: 0x4261, 0x6df: 0x427f, 0x6e0: 0x4001, 0x6e1: 0x4009, 0x6e2: 0x433d, 0x6e3: 0x435b, + 0x6e4: 0x4347, 0x6e5: 0x4365, 0x6e6: 0x4351, 0x6e7: 0x436f, 0x6e8: 0x3ec1, 0x6e9: 0x3ec9, + 0x6ea: 0x4289, 0x6eb: 0x42a7, 0x6ec: 0x4293, 0x6ed: 0x42b1, 0x6ee: 0x429d, 0x6ef: 0x42bb, + 0x6f0: 0x37c6, 0x6f1: 0x37c0, 0x6f2: 0x3ed1, 0x6f3: 0x37cc, 0x6f4: 0x3ed9, + 0x6f6: 0x4951, 0x6f7: 0x3ef1, 0x6f8: 0x3736, 0x6f9: 0x3730, 0x6fa: 0x3724, 0x6fb: 0x442f, + 0x6fc: 0x373c, 0x6fd: 0x43c8, 0x6fe: 0x0257, 0x6ff: 0x43c8, + // Block 0x1c, offset 0x700 + 0x700: 0x43e1, 0x701: 0x45c3, 0x702: 0x3f19, 0x703: 0x37de, 0x704: 0x3f21, + 0x706: 0x497b, 0x707: 0x3f39, 0x708: 0x3742, 0x709: 0x4435, 0x70a: 0x374e, 0x70b: 0x443b, + 0x70c: 0x375a, 0x70d: 0x45ca, 0x70e: 0x45d1, 0x70f: 0x45d8, 0x710: 0x37f6, 0x711: 0x37f0, + 0x712: 0x3f41, 0x713: 0x4625, 0x716: 0x37fc, 0x717: 0x3f51, + 0x718: 0x3772, 0x719: 0x376c, 0x71a: 0x3760, 0x71b: 0x4441, 0x71d: 0x45df, + 0x71e: 0x45e6, 0x71f: 0x45ed, 0x720: 0x382c, 0x721: 0x3826, 0x722: 0x3fa9, 0x723: 0x462d, + 0x724: 0x380e, 0x725: 0x3814, 0x726: 0x3832, 0x727: 0x3fb9, 0x728: 0x37a2, 0x729: 0x379c, + 0x72a: 0x3790, 0x72b: 0x444d, 0x72c: 0x378a, 0x72d: 0x45b5, 0x72e: 0x45bc, 0x72f: 0x0081, + 0x732: 0x3ff1, 0x733: 0x3838, 0x734: 0x3ff9, + 0x736: 0x49c9, 0x737: 0x4011, 0x738: 0x377e, 0x739: 0x4447, 0x73a: 0x37ae, 0x73b: 0x4459, + 0x73c: 0x37ba, 0x73d: 0x439b, 0x73e: 0x43cd, + // Block 0x1d, offset 0x740 + 0x740: 0x1d08, 0x741: 0x1d0c, 0x742: 0x0047, 0x743: 0x1d84, 0x745: 0x1d18, + 0x746: 0x1d1c, 0x747: 0x00ef, 0x749: 0x1d88, 0x74a: 0x008f, 0x74b: 0x0051, + 0x74c: 0x0051, 0x74d: 0x0051, 0x74e: 0x0091, 0x74f: 0x00e0, 0x750: 0x0053, 0x751: 0x0053, + 0x752: 0x0059, 0x753: 0x0099, 0x755: 0x005d, 0x756: 0x1abd, + 0x759: 0x0061, 0x75a: 0x0063, 0x75b: 0x0065, 0x75c: 0x0065, 0x75d: 0x0065, + 0x760: 0x1acf, 0x761: 0x1cf8, 0x762: 0x1ad8, + 0x764: 0x0075, 0x766: 0x023c, 0x768: 0x0075, + 0x76a: 0x0057, 0x76b: 0x4413, 0x76c: 0x0045, 0x76d: 0x0047, 0x76f: 0x008b, + 0x770: 0x004b, 0x771: 0x004d, 0x773: 0x005b, 0x774: 0x009f, 0x775: 0x0308, + 0x776: 0x030b, 0x777: 0x030e, 0x778: 0x0311, 0x779: 0x0093, 0x77b: 0x1cc8, + 0x77c: 0x026c, 0x77d: 0x0245, 0x77e: 0x01fd, 0x77f: 0x0224, + // Block 0x1e, offset 0x780 + 0x780: 0x055a, 0x785: 0x0049, + 0x786: 0x0089, 0x787: 0x008b, 0x788: 0x0093, 0x789: 0x0095, + 0x790: 0x235e, 0x791: 0x236a, + 0x792: 0x241e, 0x793: 0x2346, 0x794: 0x23ca, 0x795: 0x2352, 0x796: 0x23d0, 0x797: 0x23e8, + 0x798: 0x23f4, 0x799: 0x2358, 0x79a: 0x23fa, 0x79b: 0x2364, 0x79c: 0x23ee, 0x79d: 0x2400, + 0x79e: 0x2406, 0x79f: 0x1dec, 0x7a0: 0x0053, 0x7a1: 0x1a87, 0x7a2: 0x1cd4, 0x7a3: 0x1a90, + 0x7a4: 0x006d, 0x7a5: 0x1adb, 0x7a6: 0x1d00, 0x7a7: 0x1e78, 0x7a8: 0x1a93, 0x7a9: 0x0071, + 0x7aa: 0x1ae7, 0x7ab: 0x1d04, 0x7ac: 0x0059, 0x7ad: 0x0047, 0x7ae: 0x0049, 0x7af: 0x005b, + 0x7b0: 0x0093, 0x7b1: 0x1b14, 0x7b2: 0x1d48, 0x7b3: 0x1b1d, 0x7b4: 0x00ad, 0x7b5: 0x1b92, + 0x7b6: 0x1d7c, 0x7b7: 0x1e8c, 0x7b8: 0x1b20, 0x7b9: 0x00b1, 0x7ba: 0x1b95, 0x7bb: 0x1d80, + 0x7bc: 0x0099, 0x7bd: 0x0087, 0x7be: 0x0089, 0x7bf: 0x009b, + // Block 0x1f, offset 0x7c0 + 0x7c1: 0x3d47, 0x7c3: 0xa000, 0x7c4: 0x3d4e, 0x7c5: 0xa000, + 0x7c7: 0x3d55, 0x7c8: 0xa000, 0x7c9: 0x3d5c, + 0x7cd: 0xa000, + 0x7e0: 0x30a6, 0x7e1: 0xa000, 0x7e2: 0x3d6a, + 0x7e4: 0xa000, 0x7e5: 0xa000, + 0x7ed: 0x3d63, 0x7ee: 0x30a1, 0x7ef: 0x30ab, + 0x7f0: 0x3d71, 0x7f1: 0x3d78, 0x7f2: 0xa000, 0x7f3: 0xa000, 0x7f4: 0x3d7f, 0x7f5: 0x3d86, + 0x7f6: 0xa000, 0x7f7: 0xa000, 0x7f8: 0x3d8d, 0x7f9: 0x3d94, 0x7fa: 0xa000, 0x7fb: 0xa000, + 0x7fc: 0xa000, 0x7fd: 0xa000, + // Block 0x20, offset 0x800 + 0x800: 0x3d9b, 0x801: 0x3da2, 0x802: 0xa000, 0x803: 0xa000, 0x804: 0x3db7, 0x805: 0x3dbe, + 0x806: 0xa000, 0x807: 0xa000, 0x808: 0x3dc5, 0x809: 0x3dcc, + 0x811: 0xa000, + 0x812: 0xa000, + 0x822: 0xa000, + 0x828: 0xa000, 0x829: 0xa000, + 0x82b: 0xa000, 0x82c: 0x3de1, 0x82d: 0x3de8, 0x82e: 0x3def, 0x82f: 0x3df6, + 0x832: 0xa000, 0x833: 0xa000, 0x834: 0xa000, 0x835: 0xa000, + // Block 0x21, offset 0x840 + 0x860: 0x0023, 0x861: 0x0025, 0x862: 0x0027, 0x863: 0x0029, + 0x864: 0x002b, 0x865: 0x002d, 0x866: 0x002f, 0x867: 0x0031, 0x868: 0x0033, 0x869: 0x19af, + 0x86a: 0x19b2, 0x86b: 0x19b5, 0x86c: 0x19b8, 0x86d: 0x19bb, 0x86e: 0x19be, 0x86f: 0x19c1, + 0x870: 0x19c4, 0x871: 0x19c7, 0x872: 0x19ca, 0x873: 0x19d3, 0x874: 0x1b98, 0x875: 0x1b9c, + 0x876: 0x1ba0, 0x877: 0x1ba4, 0x878: 0x1ba8, 0x879: 0x1bac, 0x87a: 0x1bb0, 0x87b: 0x1bb4, + 0x87c: 0x1bb8, 0x87d: 0x1db0, 0x87e: 0x1db5, 0x87f: 0x1dba, + // Block 0x22, offset 0x880 + 0x880: 0x1dbf, 0x881: 0x1dc4, 0x882: 0x1dc9, 0x883: 0x1dce, 0x884: 0x1dd3, 0x885: 0x1dd8, + 0x886: 0x1ddd, 0x887: 0x1de2, 0x888: 0x19ac, 0x889: 0x19d0, 0x88a: 0x19f4, 0x88b: 0x1a18, + 0x88c: 0x1a3c, 0x88d: 0x1a45, 0x88e: 0x1a4b, 0x88f: 0x1a51, 0x890: 0x1a57, 0x891: 0x1c90, + 0x892: 0x1c94, 0x893: 0x1c98, 0x894: 0x1c9c, 0x895: 0x1ca0, 0x896: 0x1ca4, 0x897: 0x1ca8, + 0x898: 0x1cac, 0x899: 0x1cb0, 0x89a: 0x1cb4, 0x89b: 0x1cb8, 0x89c: 0x1c24, 0x89d: 0x1c28, + 0x89e: 0x1c2c, 0x89f: 0x1c30, 0x8a0: 0x1c34, 0x8a1: 0x1c38, 0x8a2: 0x1c3c, 0x8a3: 0x1c40, + 0x8a4: 0x1c44, 0x8a5: 0x1c48, 0x8a6: 0x1c4c, 0x8a7: 0x1c50, 0x8a8: 0x1c54, 0x8a9: 0x1c58, + 0x8aa: 0x1c5c, 0x8ab: 0x1c60, 0x8ac: 0x1c64, 0x8ad: 0x1c68, 0x8ae: 0x1c6c, 0x8af: 0x1c70, + 0x8b0: 0x1c74, 0x8b1: 0x1c78, 0x8b2: 0x1c7c, 0x8b3: 0x1c80, 0x8b4: 0x1c84, 0x8b5: 0x1c88, + 0x8b6: 0x0043, 0x8b7: 0x0045, 0x8b8: 0x0047, 0x8b9: 0x0049, 0x8ba: 0x004b, 0x8bb: 0x004d, + 0x8bc: 0x004f, 0x8bd: 0x0051, 0x8be: 0x0053, 0x8bf: 0x0055, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x07ba, 0x8c1: 0x07de, 0x8c2: 0x07ea, 0x8c3: 0x07fa, 0x8c4: 0x0802, 0x8c5: 0x080e, + 0x8c6: 0x0816, 0x8c7: 0x081e, 0x8c8: 0x082a, 0x8c9: 0x087e, 0x8ca: 0x0896, 0x8cb: 0x08a6, + 0x8cc: 0x08b6, 0x8cd: 0x08c6, 0x8ce: 0x08d6, 0x8cf: 0x08f6, 0x8d0: 0x08fa, 0x8d1: 0x08fe, + 0x8d2: 0x0932, 0x8d3: 0x095a, 0x8d4: 0x096a, 0x8d5: 0x0972, 0x8d6: 0x0976, 0x8d7: 0x0982, + 0x8d8: 0x099e, 0x8d9: 0x09a2, 0x8da: 0x09ba, 0x8db: 0x09be, 0x8dc: 0x09c6, 0x8dd: 0x09d6, + 0x8de: 0x0a72, 0x8df: 0x0a86, 0x8e0: 0x0ac6, 0x8e1: 0x0ada, 0x8e2: 0x0ae2, 0x8e3: 0x0ae6, + 0x8e4: 0x0af6, 0x8e5: 0x0b12, 0x8e6: 0x0b3e, 0x8e7: 0x0b4a, 0x8e8: 0x0b6a, 0x8e9: 0x0b76, + 0x8ea: 0x0b7a, 0x8eb: 0x0b7e, 0x8ec: 0x0b96, 0x8ed: 0x0b9a, 0x8ee: 0x0bc6, 0x8ef: 0x0bd2, + 0x8f0: 0x0bda, 0x8f1: 0x0be2, 0x8f2: 0x0bf2, 0x8f3: 0x0bfa, 0x8f4: 0x0c02, 0x8f5: 0x0c2e, + 0x8f6: 0x0c32, 0x8f7: 0x0c3a, 0x8f8: 0x0c3e, 0x8f9: 0x0c46, 0x8fa: 0x0c4e, 0x8fb: 0x0c5e, + 0x8fc: 0x0c7a, 0x8fd: 0x0cf2, 0x8fe: 0x0d06, 0x8ff: 0x0d0a, + // Block 0x24, offset 0x900 + 0x900: 0x0d8a, 0x901: 0x0d8e, 0x902: 0x0da2, 0x903: 0x0da6, 0x904: 0x0dae, 0x905: 0x0db6, + 0x906: 0x0dbe, 0x907: 0x0dca, 0x908: 0x0df2, 0x909: 0x0e02, 0x90a: 0x0e16, 0x90b: 0x0e86, + 0x90c: 0x0e92, 0x90d: 0x0ea2, 0x90e: 0x0eae, 0x90f: 0x0eba, 0x910: 0x0ec2, 0x911: 0x0ec6, + 0x912: 0x0eca, 0x913: 0x0ece, 0x914: 0x0ed2, 0x915: 0x0f8a, 0x916: 0x0fd2, 0x917: 0x0fde, + 0x918: 0x0fe2, 0x919: 0x0fe6, 0x91a: 0x0fea, 0x91b: 0x0ff2, 0x91c: 0x0ff6, 0x91d: 0x100a, + 0x91e: 0x1026, 0x91f: 0x102e, 0x920: 0x106e, 0x921: 0x1072, 0x922: 0x107a, 0x923: 0x107e, + 0x924: 0x1086, 0x925: 0x108a, 0x926: 0x10ae, 0x927: 0x10b2, 0x928: 0x10ce, 0x929: 0x10d2, + 0x92a: 0x10d6, 0x92b: 0x10da, 0x92c: 0x10ee, 0x92d: 0x1112, 0x92e: 0x1116, 0x92f: 0x111a, + 0x930: 0x113e, 0x931: 0x117e, 0x932: 0x1182, 0x933: 0x11a2, 0x934: 0x11b2, 0x935: 0x11ba, + 0x936: 0x11da, 0x937: 0x11fe, 0x938: 0x1242, 0x939: 0x124a, 0x93a: 0x125e, 0x93b: 0x126a, + 0x93c: 0x1272, 0x93d: 0x127a, 0x93e: 0x127e, 0x93f: 0x1282, + // Block 0x25, offset 0x940 + 0x940: 0x129a, 0x941: 0x129e, 0x942: 0x12ba, 0x943: 0x12c2, 0x944: 0x12ca, 0x945: 0x12ce, + 0x946: 0x12da, 0x947: 0x12e2, 0x948: 0x12e6, 0x949: 0x12ea, 0x94a: 0x12f2, 0x94b: 0x12f6, + 0x94c: 0x1396, 0x94d: 0x13aa, 0x94e: 0x13de, 0x94f: 0x13e2, 0x950: 0x13ea, 0x951: 0x1416, + 0x952: 0x141e, 0x953: 0x1426, 0x954: 0x142e, 0x955: 0x146a, 0x956: 0x146e, 0x957: 0x1476, + 0x958: 0x147a, 0x959: 0x147e, 0x95a: 0x14aa, 0x95b: 0x14ae, 0x95c: 0x14b6, 0x95d: 0x14ca, + 0x95e: 0x14ce, 0x95f: 0x14ea, 0x960: 0x14f2, 0x961: 0x14f6, 0x962: 0x151a, 0x963: 0x153a, + 0x964: 0x154e, 0x965: 0x1552, 0x966: 0x155a, 0x967: 0x1586, 0x968: 0x158a, 0x969: 0x159a, + 0x96a: 0x15be, 0x96b: 0x15ca, 0x96c: 0x15da, 0x96d: 0x15f2, 0x96e: 0x15fa, 0x96f: 0x15fe, + 0x970: 0x1602, 0x971: 0x1606, 0x972: 0x1612, 0x973: 0x1616, 0x974: 0x161e, 0x975: 0x163a, + 0x976: 0x163e, 0x977: 0x1642, 0x978: 0x165a, 0x979: 0x165e, 0x97a: 0x1666, 0x97b: 0x167a, + 0x97c: 0x167e, 0x97d: 0x1682, 0x97e: 0x168a, 0x97f: 0x168e, + // Block 0x26, offset 0x980 + 0x986: 0xa000, 0x98b: 0xa000, + 0x98c: 0x4049, 0x98d: 0xa000, 0x98e: 0x4051, 0x98f: 0xa000, 0x990: 0x4059, 0x991: 0xa000, + 0x992: 0x4061, 0x993: 0xa000, 0x994: 0x4069, 0x995: 0xa000, 0x996: 0x4071, 0x997: 0xa000, + 0x998: 0x4079, 0x999: 0xa000, 0x99a: 0x4081, 0x99b: 0xa000, 0x99c: 0x4089, 0x99d: 0xa000, + 0x99e: 0x4091, 0x99f: 0xa000, 0x9a0: 0x4099, 0x9a1: 0xa000, 0x9a2: 0x40a1, + 0x9a4: 0xa000, 0x9a5: 0x40a9, 0x9a6: 0xa000, 0x9a7: 0x40b1, 0x9a8: 0xa000, 0x9a9: 0x40b9, + 0x9af: 0xa000, + 0x9b0: 0x40c1, 0x9b1: 0x40c9, 0x9b2: 0xa000, 0x9b3: 0x40d1, 0x9b4: 0x40d9, 0x9b5: 0xa000, + 0x9b6: 0x40e1, 0x9b7: 0x40e9, 0x9b8: 0xa000, 0x9b9: 0x40f1, 0x9ba: 0x40f9, 0x9bb: 0xa000, + 0x9bc: 0x4101, 0x9bd: 0x4109, + // Block 0x27, offset 0x9c0 + 0x9d4: 0x4041, + 0x9d9: 0x9904, 0x9da: 0x9904, 0x9db: 0x441d, 0x9dc: 0x4423, 0x9dd: 0xa000, + 0x9de: 0x4111, 0x9df: 0x27e4, + 0x9e6: 0xa000, + 0x9eb: 0xa000, 0x9ec: 0x4121, 0x9ed: 0xa000, 0x9ee: 0x4129, 0x9ef: 0xa000, + 0x9f0: 0x4131, 0x9f1: 0xa000, 0x9f2: 0x4139, 0x9f3: 0xa000, 0x9f4: 0x4141, 0x9f5: 0xa000, + 0x9f6: 0x4149, 0x9f7: 0xa000, 0x9f8: 0x4151, 0x9f9: 0xa000, 0x9fa: 0x4159, 0x9fb: 0xa000, + 0x9fc: 0x4161, 0x9fd: 0xa000, 0x9fe: 0x4169, 0x9ff: 0xa000, + // Block 0x28, offset 0xa00 + 0xa00: 0x4171, 0xa01: 0xa000, 0xa02: 0x4179, 0xa04: 0xa000, 0xa05: 0x4181, + 0xa06: 0xa000, 0xa07: 0x4189, 0xa08: 0xa000, 0xa09: 0x4191, + 0xa0f: 0xa000, 0xa10: 0x4199, 0xa11: 0x41a1, + 0xa12: 0xa000, 0xa13: 0x41a9, 0xa14: 0x41b1, 0xa15: 0xa000, 0xa16: 0x41b9, 0xa17: 0x41c1, + 0xa18: 0xa000, 0xa19: 0x41c9, 0xa1a: 0x41d1, 0xa1b: 0xa000, 0xa1c: 0x41d9, 0xa1d: 0x41e1, + 0xa2f: 0xa000, + 0xa30: 0xa000, 0xa31: 0xa000, 0xa32: 0xa000, 0xa34: 0x4119, + 0xa37: 0x41e9, 0xa38: 0x41f1, 0xa39: 0x41f9, 0xa3a: 0x4201, + 0xa3d: 0xa000, 0xa3e: 0x4209, 0xa3f: 0x27f9, + // Block 0x29, offset 0xa40 + 0xa40: 0x045a, 0xa41: 0x041e, 0xa42: 0x0422, 0xa43: 0x0426, 0xa44: 0x046e, 0xa45: 0x042a, + 0xa46: 0x042e, 0xa47: 0x0432, 0xa48: 0x0436, 0xa49: 0x043a, 0xa4a: 0x043e, 0xa4b: 0x0442, + 0xa4c: 0x0446, 0xa4d: 0x044a, 0xa4e: 0x044e, 0xa4f: 0x4afe, 0xa50: 0x4b04, 0xa51: 0x4b0a, + 0xa52: 0x4b10, 0xa53: 0x4b16, 0xa54: 0x4b1c, 0xa55: 0x4b22, 0xa56: 0x4b28, 0xa57: 0x4b2e, + 0xa58: 0x4b34, 0xa59: 0x4b3a, 0xa5a: 0x4b40, 0xa5b: 0x4b46, 0xa5c: 0x4b4c, 0xa5d: 0x4b52, + 0xa5e: 0x4b58, 0xa5f: 0x4b5e, 0xa60: 0x4b64, 0xa61: 0x4b6a, 0xa62: 0x4b70, 0xa63: 0x4b76, + 0xa64: 0x04b6, 0xa65: 0x0452, 0xa66: 0x0456, 0xa67: 0x04da, 0xa68: 0x04de, 0xa69: 0x04e2, + 0xa6a: 0x04e6, 0xa6b: 0x04ea, 0xa6c: 0x04ee, 0xa6d: 0x04f2, 0xa6e: 0x045e, 0xa6f: 0x04f6, + 0xa70: 0x04fa, 0xa71: 0x0462, 0xa72: 0x0466, 0xa73: 0x046a, 0xa74: 0x0472, 0xa75: 0x0476, + 0xa76: 0x047a, 0xa77: 0x047e, 0xa78: 0x0482, 0xa79: 0x0486, 0xa7a: 0x048a, 0xa7b: 0x048e, + 0xa7c: 0x0492, 0xa7d: 0x0496, 0xa7e: 0x049a, 0xa7f: 0x049e, + // Block 0x2a, offset 0xa80 + 0xa80: 0x04a2, 0xa81: 0x04a6, 0xa82: 0x04fe, 0xa83: 0x0502, 0xa84: 0x04aa, 0xa85: 0x04ae, + 0xa86: 0x04b2, 0xa87: 0x04ba, 0xa88: 0x04be, 0xa89: 0x04c2, 0xa8a: 0x04c6, 0xa8b: 0x04ca, + 0xa8c: 0x04ce, 0xa8d: 0x04d2, 0xa8e: 0x04d6, + 0xa92: 0x07ba, 0xa93: 0x0816, 0xa94: 0x07c6, 0xa95: 0x0a76, 0xa96: 0x07ca, 0xa97: 0x07e2, + 0xa98: 0x07ce, 0xa99: 0x108e, 0xa9a: 0x0802, 0xa9b: 0x07d6, 0xa9c: 0x07be, 0xa9d: 0x0afa, + 0xa9e: 0x0a8a, 0xa9f: 0x082a, + // Block 0x2b, offset 0xac0 + 0xac0: 0x2184, 0xac1: 0x218a, 0xac2: 0x2190, 0xac3: 0x2196, 0xac4: 0x219c, 0xac5: 0x21a2, + 0xac6: 0x21a8, 0xac7: 0x21ae, 0xac8: 0x21b4, 0xac9: 0x21ba, 0xaca: 0x21c0, 0xacb: 0x21c6, + 0xacc: 0x21cc, 0xacd: 0x21d2, 0xace: 0x285d, 0xacf: 0x2866, 0xad0: 0x286f, 0xad1: 0x2878, + 0xad2: 0x2881, 0xad3: 0x288a, 0xad4: 0x2893, 0xad5: 0x289c, 0xad6: 0x28a5, 0xad7: 0x28b7, + 0xad8: 0x28c0, 0xad9: 0x28c9, 0xada: 0x28d2, 0xadb: 0x28db, 0xadc: 0x28ae, 0xadd: 0x2ce3, + 0xade: 0x2c24, 0xae0: 0x21d8, 0xae1: 0x21f0, 0xae2: 0x21e4, 0xae3: 0x2238, + 0xae4: 0x21f6, 0xae5: 0x2214, 0xae6: 0x21de, 0xae7: 0x220e, 0xae8: 0x21ea, 0xae9: 0x2220, + 0xaea: 0x2250, 0xaeb: 0x226e, 0xaec: 0x2268, 0xaed: 0x225c, 0xaee: 0x22aa, 0xaef: 0x223e, + 0xaf0: 0x224a, 0xaf1: 0x2262, 0xaf2: 0x2256, 0xaf3: 0x2280, 0xaf4: 0x222c, 0xaf5: 0x2274, + 0xaf6: 0x229e, 0xaf7: 0x2286, 0xaf8: 0x221a, 0xaf9: 0x21fc, 0xafa: 0x2232, 0xafb: 0x2244, + 0xafc: 0x227a, 0xafd: 0x2202, 0xafe: 0x22a4, 0xaff: 0x2226, + // Block 0x2c, offset 0xb00 + 0xb00: 0x228c, 0xb01: 0x2208, 0xb02: 0x2292, 0xb03: 0x2298, 0xb04: 0x0a2a, 0xb05: 0x0bfe, + 0xb06: 0x0da2, 0xb07: 0x11c2, + 0xb10: 0x1cf4, 0xb11: 0x19d6, + 0xb12: 0x19d9, 0xb13: 0x19dc, 0xb14: 0x19df, 0xb15: 0x19e2, 0xb16: 0x19e5, 0xb17: 0x19e8, + 0xb18: 0x19eb, 0xb19: 0x19ee, 0xb1a: 0x19f7, 0xb1b: 0x19fa, 0xb1c: 0x19fd, 0xb1d: 0x1a00, + 0xb1e: 0x1a03, 0xb1f: 0x1a06, 0xb20: 0x0406, 0xb21: 0x040e, 0xb22: 0x0412, 0xb23: 0x041a, + 0xb24: 0x041e, 0xb25: 0x0422, 0xb26: 0x042a, 0xb27: 0x0432, 0xb28: 0x0436, 0xb29: 0x043e, + 0xb2a: 0x0442, 0xb2b: 0x0446, 0xb2c: 0x044a, 0xb2d: 0x044e, 0xb2e: 0x2f59, 0xb2f: 0x2f61, + 0xb30: 0x2f69, 0xb31: 0x2f71, 0xb32: 0x2f79, 0xb33: 0x2f81, 0xb34: 0x2f89, 0xb35: 0x2f91, + 0xb36: 0x2fa1, 0xb37: 0x2fa9, 0xb38: 0x2fb1, 0xb39: 0x2fb9, 0xb3a: 0x2fc1, 0xb3b: 0x2fc9, + 0xb3c: 0x3014, 0xb3d: 0x2fdc, 0xb3e: 0x2f99, + // Block 0x2d, offset 0xb40 + 0xb40: 0x07ba, 0xb41: 0x0816, 0xb42: 0x07c6, 0xb43: 0x0a76, 0xb44: 0x081a, 0xb45: 0x08aa, + 0xb46: 0x07c2, 0xb47: 0x08a6, 0xb48: 0x0806, 0xb49: 0x0982, 0xb4a: 0x0e02, 0xb4b: 0x0f8a, + 0xb4c: 0x0ed2, 0xb4d: 0x0e16, 0xb4e: 0x155a, 0xb4f: 0x0a86, 0xb50: 0x0dca, 0xb51: 0x0e46, + 0xb52: 0x0e06, 0xb53: 0x1146, 0xb54: 0x09f6, 0xb55: 0x0ffe, 0xb56: 0x1482, 0xb57: 0x115a, + 0xb58: 0x093e, 0xb59: 0x118a, 0xb5a: 0x1096, 0xb5b: 0x0b12, 0xb5c: 0x150a, 0xb5d: 0x087a, + 0xb5e: 0x09a6, 0xb5f: 0x0ef2, 0xb60: 0x1622, 0xb61: 0x083e, 0xb62: 0x08ce, 0xb63: 0x0e96, + 0xb64: 0x07ca, 0xb65: 0x07e2, 0xb66: 0x07ce, 0xb67: 0x0bd6, 0xb68: 0x09ea, 0xb69: 0x097a, + 0xb6a: 0x0b52, 0xb6b: 0x0b46, 0xb6c: 0x10e6, 0xb6d: 0x083a, 0xb6e: 0x1496, 0xb6f: 0x0996, + 0xb70: 0x0aee, 0xb71: 0x1a09, 0xb72: 0x1a0c, 0xb73: 0x1a0f, 0xb74: 0x1a12, 0xb75: 0x1a1b, + 0xb76: 0x1a1e, 0xb77: 0x1a21, 0xb78: 0x1a24, 0xb79: 0x1a27, 0xb7a: 0x1a2a, 0xb7b: 0x1a2d, + 0xb7c: 0x1a30, 0xb7d: 0x1a33, 0xb7e: 0x1a36, 0xb7f: 0x1a3f, + // Block 0x2e, offset 0xb80 + 0xb80: 0x1df6, 0xb81: 0x1e05, 0xb82: 0x1e14, 0xb83: 0x1e23, 0xb84: 0x1e32, 0xb85: 0x1e41, + 0xb86: 0x1e50, 0xb87: 0x1e5f, 0xb88: 0x1e6e, 0xb89: 0x22bc, 0xb8a: 0x22ce, 0xb8b: 0x22e0, + 0xb8c: 0x1a81, 0xb8d: 0x1d34, 0xb8e: 0x1b02, 0xb8f: 0x1cd8, 0xb90: 0x05c6, 0xb91: 0x05ce, + 0xb92: 0x05d6, 0xb93: 0x05de, 0xb94: 0x05e6, 0xb95: 0x05ea, 0xb96: 0x05ee, 0xb97: 0x05f2, + 0xb98: 0x05f6, 0xb99: 0x05fa, 0xb9a: 0x05fe, 0xb9b: 0x0602, 0xb9c: 0x0606, 0xb9d: 0x060a, + 0xb9e: 0x060e, 0xb9f: 0x0612, 0xba0: 0x0616, 0xba1: 0x061e, 0xba2: 0x0622, 0xba3: 0x0626, + 0xba4: 0x062a, 0xba5: 0x062e, 0xba6: 0x0632, 0xba7: 0x0636, 0xba8: 0x063a, 0xba9: 0x063e, + 0xbaa: 0x0642, 0xbab: 0x0646, 0xbac: 0x064a, 0xbad: 0x064e, 0xbae: 0x0652, 0xbaf: 0x0656, + 0xbb0: 0x065a, 0xbb1: 0x065e, 0xbb2: 0x0662, 0xbb3: 0x066a, 0xbb4: 0x0672, 0xbb5: 0x067a, + 0xbb6: 0x067e, 0xbb7: 0x0682, 0xbb8: 0x0686, 0xbb9: 0x068a, 0xbba: 0x068e, 0xbbb: 0x0692, + 0xbbc: 0x0696, 0xbbd: 0x069a, 0xbbe: 0x069e, 0xbbf: 0x282a, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x2c43, 0xbc1: 0x2adf, 0xbc2: 0x2c53, 0xbc3: 0x29b7, 0xbc4: 0x3025, 0xbc5: 0x29c1, + 0xbc6: 0x29cb, 0xbc7: 0x3069, 0xbc8: 0x2aec, 0xbc9: 0x29d5, 0xbca: 0x29df, 0xbcb: 0x29e9, + 0xbcc: 0x2b13, 0xbcd: 0x2b20, 0xbce: 0x2af9, 0xbcf: 0x2b06, 0xbd0: 0x2fea, 0xbd1: 0x2b2d, + 0xbd2: 0x2b3a, 0xbd3: 0x2cf5, 0xbd4: 0x27eb, 0xbd5: 0x2d08, 0xbd6: 0x2d1b, 0xbd7: 0x2c63, + 0xbd8: 0x2b47, 0xbd9: 0x2d2e, 0xbda: 0x2d41, 0xbdb: 0x2b54, 0xbdc: 0x29f3, 0xbdd: 0x29fd, + 0xbde: 0x2ff8, 0xbdf: 0x2b61, 0xbe0: 0x2c73, 0xbe1: 0x3036, 0xbe2: 0x2a07, 0xbe3: 0x2a11, + 0xbe4: 0x2b6e, 0xbe5: 0x2a1b, 0xbe6: 0x2a25, 0xbe7: 0x2800, 0xbe8: 0x2807, 0xbe9: 0x2a2f, + 0xbea: 0x2a39, 0xbeb: 0x2d54, 0xbec: 0x2b7b, 0xbed: 0x2c83, 0xbee: 0x2d67, 0xbef: 0x2b88, + 0xbf0: 0x2a4d, 0xbf1: 0x2a43, 0xbf2: 0x307d, 0xbf3: 0x2b95, 0xbf4: 0x2d7a, 0xbf5: 0x2a57, + 0xbf6: 0x2c93, 0xbf7: 0x2a61, 0xbf8: 0x2baf, 0xbf9: 0x2a6b, 0xbfa: 0x2bbc, 0xbfb: 0x3047, + 0xbfc: 0x2ba2, 0xbfd: 0x2ca3, 0xbfe: 0x2bc9, 0xbff: 0x280e, + // Block 0x30, offset 0xc00 + 0xc00: 0x3058, 0xc01: 0x2a75, 0xc02: 0x2a7f, 0xc03: 0x2bd6, 0xc04: 0x2a89, 0xc05: 0x2a93, + 0xc06: 0x2a9d, 0xc07: 0x2cb3, 0xc08: 0x2be3, 0xc09: 0x2815, 0xc0a: 0x2d8d, 0xc0b: 0x2fd1, + 0xc0c: 0x2cc3, 0xc0d: 0x2bf0, 0xc0e: 0x3006, 0xc0f: 0x2aa7, 0xc10: 0x2ab1, 0xc11: 0x2bfd, + 0xc12: 0x281c, 0xc13: 0x2c0a, 0xc14: 0x2cd3, 0xc15: 0x2823, 0xc16: 0x2da0, 0xc17: 0x2abb, + 0xc18: 0x1de7, 0xc19: 0x1dfb, 0xc1a: 0x1e0a, 0xc1b: 0x1e19, 0xc1c: 0x1e28, 0xc1d: 0x1e37, + 0xc1e: 0x1e46, 0xc1f: 0x1e55, 0xc20: 0x1e64, 0xc21: 0x1e73, 0xc22: 0x22c2, 0xc23: 0x22d4, + 0xc24: 0x22e6, 0xc25: 0x22f2, 0xc26: 0x22fe, 0xc27: 0x230a, 0xc28: 0x2316, 0xc29: 0x2322, + 0xc2a: 0x232e, 0xc2b: 0x233a, 0xc2c: 0x2376, 0xc2d: 0x2382, 0xc2e: 0x238e, 0xc2f: 0x239a, + 0xc30: 0x23a6, 0xc31: 0x1d44, 0xc32: 0x1af6, 0xc33: 0x1a63, 0xc34: 0x1d14, 0xc35: 0x1b77, + 0xc36: 0x1b86, 0xc37: 0x1afc, 0xc38: 0x1d2c, 0xc39: 0x1d30, 0xc3a: 0x1a8d, 0xc3b: 0x2838, + 0xc3c: 0x2846, 0xc3d: 0x2831, 0xc3e: 0x283f, 0xc3f: 0x2c17, + // Block 0x31, offset 0xc40 + 0xc40: 0x1b7a, 0xc41: 0x1b62, 0xc42: 0x1d90, 0xc43: 0x1b4a, 0xc44: 0x1b23, 0xc45: 0x1a96, + 0xc46: 0x1aa5, 0xc47: 0x1a75, 0xc48: 0x1d20, 0xc49: 0x1e82, 0xc4a: 0x1b7d, 0xc4b: 0x1b65, + 0xc4c: 0x1d94, 0xc4d: 0x1da0, 0xc4e: 0x1b56, 0xc4f: 0x1b2c, 0xc50: 0x1a84, 0xc51: 0x1d4c, + 0xc52: 0x1ce0, 0xc53: 0x1ccc, 0xc54: 0x1cfc, 0xc55: 0x1da4, 0xc56: 0x1b59, 0xc57: 0x1af9, + 0xc58: 0x1b2f, 0xc59: 0x1b0e, 0xc5a: 0x1b71, 0xc5b: 0x1da8, 0xc5c: 0x1b5c, 0xc5d: 0x1af0, + 0xc5e: 0x1b32, 0xc5f: 0x1d6c, 0xc60: 0x1d24, 0xc61: 0x1b44, 0xc62: 0x1d54, 0xc63: 0x1d70, + 0xc64: 0x1d28, 0xc65: 0x1b47, 0xc66: 0x1d58, 0xc67: 0x2418, 0xc68: 0x242c, 0xc69: 0x1ac6, + 0xc6a: 0x1d50, 0xc6b: 0x1ce4, 0xc6c: 0x1cd0, 0xc6d: 0x1d78, 0xc6e: 0x284d, 0xc6f: 0x28e4, + 0xc70: 0x1b89, 0xc71: 0x1b74, 0xc72: 0x1dac, 0xc73: 0x1b5f, 0xc74: 0x1b80, 0xc75: 0x1b68, + 0xc76: 0x1d98, 0xc77: 0x1b4d, 0xc78: 0x1b26, 0xc79: 0x1ab1, 0xc7a: 0x1b83, 0xc7b: 0x1b6b, + 0xc7c: 0x1d9c, 0xc7d: 0x1b50, 0xc7e: 0x1b29, 0xc7f: 0x1ab4, + // Block 0x32, offset 0xc80 + 0xc80: 0x1d5c, 0xc81: 0x1ce8, 0xc82: 0x1e7d, 0xc83: 0x1a66, 0xc84: 0x1aea, 0xc85: 0x1aed, + 0xc86: 0x2425, 0xc87: 0x1cc4, 0xc88: 0x1af3, 0xc89: 0x1a78, 0xc8a: 0x1b11, 0xc8b: 0x1a7b, + 0xc8c: 0x1b1a, 0xc8d: 0x1a99, 0xc8e: 0x1a9c, 0xc8f: 0x1b35, 0xc90: 0x1b3b, 0xc91: 0x1b3e, + 0xc92: 0x1d60, 0xc93: 0x1b41, 0xc94: 0x1b53, 0xc95: 0x1d68, 0xc96: 0x1d74, 0xc97: 0x1ac0, + 0xc98: 0x1e87, 0xc99: 0x1cec, 0xc9a: 0x1ac3, 0xc9b: 0x1b8c, 0xc9c: 0x1ad5, 0xc9d: 0x1ae4, + 0xc9e: 0x2412, 0xc9f: 0x240c, 0xca0: 0x1df1, 0xca1: 0x1e00, 0xca2: 0x1e0f, 0xca3: 0x1e1e, + 0xca4: 0x1e2d, 0xca5: 0x1e3c, 0xca6: 0x1e4b, 0xca7: 0x1e5a, 0xca8: 0x1e69, 0xca9: 0x22b6, + 0xcaa: 0x22c8, 0xcab: 0x22da, 0xcac: 0x22ec, 0xcad: 0x22f8, 0xcae: 0x2304, 0xcaf: 0x2310, + 0xcb0: 0x231c, 0xcb1: 0x2328, 0xcb2: 0x2334, 0xcb3: 0x2370, 0xcb4: 0x237c, 0xcb5: 0x2388, + 0xcb6: 0x2394, 0xcb7: 0x23a0, 0xcb8: 0x23ac, 0xcb9: 0x23b2, 0xcba: 0x23b8, 0xcbb: 0x23be, + 0xcbc: 0x23c4, 0xcbd: 0x23d6, 0xcbe: 0x23dc, 0xcbf: 0x1d40, + // Block 0x33, offset 0xcc0 + 0xcc0: 0x1472, 0xcc1: 0x0df6, 0xcc2: 0x14ce, 0xcc3: 0x149a, 0xcc4: 0x0f52, 0xcc5: 0x07e6, + 0xcc6: 0x09da, 0xcc7: 0x1726, 0xcc8: 0x1726, 0xcc9: 0x0b06, 0xcca: 0x155a, 0xccb: 0x0a3e, + 0xccc: 0x0b02, 0xccd: 0x0cea, 0xcce: 0x10ca, 0xccf: 0x125a, 0xcd0: 0x1392, 0xcd1: 0x13ce, + 0xcd2: 0x1402, 0xcd3: 0x1516, 0xcd4: 0x0e6e, 0xcd5: 0x0efa, 0xcd6: 0x0fa6, 0xcd7: 0x103e, + 0xcd8: 0x135a, 0xcd9: 0x1542, 0xcda: 0x166e, 0xcdb: 0x080a, 0xcdc: 0x09ae, 0xcdd: 0x0e82, + 0xcde: 0x0fca, 0xcdf: 0x138e, 0xce0: 0x16be, 0xce1: 0x0bae, 0xce2: 0x0f72, 0xce3: 0x137e, + 0xce4: 0x1412, 0xce5: 0x0d1e, 0xce6: 0x12b6, 0xce7: 0x13da, 0xce8: 0x0c1a, 0xce9: 0x0e0a, + 0xcea: 0x0f12, 0xceb: 0x1016, 0xcec: 0x1522, 0xced: 0x084a, 0xcee: 0x08e2, 0xcef: 0x094e, + 0xcf0: 0x0d86, 0xcf1: 0x0e7a, 0xcf2: 0x0fc6, 0xcf3: 0x10ea, 0xcf4: 0x1272, 0xcf5: 0x1386, + 0xcf6: 0x139e, 0xcf7: 0x14c2, 0xcf8: 0x15ea, 0xcf9: 0x169e, 0xcfa: 0x16ba, 0xcfb: 0x1126, + 0xcfc: 0x1166, 0xcfd: 0x121e, 0xcfe: 0x133e, 0xcff: 0x1576, + // Block 0x34, offset 0xd00 + 0xd00: 0x16c6, 0xd01: 0x1446, 0xd02: 0x0ac2, 0xd03: 0x0c36, 0xd04: 0x11d6, 0xd05: 0x1296, + 0xd06: 0x0ffa, 0xd07: 0x112e, 0xd08: 0x1492, 0xd09: 0x15e2, 0xd0a: 0x0abe, 0xd0b: 0x0b8a, + 0xd0c: 0x0e72, 0xd0d: 0x0f26, 0xd0e: 0x0f5a, 0xd0f: 0x120e, 0xd10: 0x1236, 0xd11: 0x15a2, + 0xd12: 0x094a, 0xd13: 0x12a2, 0xd14: 0x08ee, 0xd15: 0x08ea, 0xd16: 0x1192, 0xd17: 0x1222, + 0xd18: 0x1356, 0xd19: 0x15aa, 0xd1a: 0x1462, 0xd1b: 0x0d22, 0xd1c: 0x0e6e, 0xd1d: 0x1452, + 0xd1e: 0x07f2, 0xd1f: 0x0b5e, 0xd20: 0x0c8e, 0xd21: 0x102a, 0xd22: 0x10aa, 0xd23: 0x096e, + 0xd24: 0x1136, 0xd25: 0x085a, 0xd26: 0x0c72, 0xd27: 0x07d2, 0xd28: 0x0ee6, 0xd29: 0x0d9e, + 0xd2a: 0x120a, 0xd2b: 0x09c2, 0xd2c: 0x0aae, 0xd2d: 0x10f6, 0xd2e: 0x135e, 0xd2f: 0x1436, + 0xd30: 0x0eb2, 0xd31: 0x14f2, 0xd32: 0x0ede, 0xd33: 0x0d32, 0xd34: 0x1316, 0xd35: 0x0d52, + 0xd36: 0x10a6, 0xd37: 0x0826, 0xd38: 0x08a2, 0xd39: 0x08e6, 0xd3a: 0x0e4e, 0xd3b: 0x11f6, + 0xd3c: 0x12ee, 0xd3d: 0x1442, 0xd3e: 0x1556, 0xd3f: 0x0956, + // Block 0x35, offset 0xd40 + 0xd40: 0x0a0a, 0xd41: 0x0b12, 0xd42: 0x0c2a, 0xd43: 0x0dba, 0xd44: 0x0f76, 0xd45: 0x113a, + 0xd46: 0x1592, 0xd47: 0x1676, 0xd48: 0x16ca, 0xd49: 0x16e2, 0xd4a: 0x0932, 0xd4b: 0x0dee, + 0xd4c: 0x0e9e, 0xd4d: 0x14e6, 0xd4e: 0x0bf6, 0xd4f: 0x0cd2, 0xd50: 0x0cee, 0xd51: 0x0d7e, + 0xd52: 0x0f66, 0xd53: 0x0fb2, 0xd54: 0x1062, 0xd55: 0x1186, 0xd56: 0x122a, 0xd57: 0x128e, + 0xd58: 0x14d6, 0xd59: 0x1366, 0xd5a: 0x14fe, 0xd5b: 0x157a, 0xd5c: 0x090a, 0xd5d: 0x0936, + 0xd5e: 0x0a1e, 0xd5f: 0x0fa2, 0xd60: 0x13ee, 0xd61: 0x1436, 0xd62: 0x0c16, 0xd63: 0x0c86, + 0xd64: 0x0d4a, 0xd65: 0x0eaa, 0xd66: 0x11d2, 0xd67: 0x101e, 0xd68: 0x0836, 0xd69: 0x0a7a, + 0xd6a: 0x0b5e, 0xd6b: 0x0bc2, 0xd6c: 0x0c92, 0xd6d: 0x103a, 0xd6e: 0x1056, 0xd6f: 0x1266, + 0xd70: 0x1286, 0xd71: 0x155e, 0xd72: 0x15de, 0xd73: 0x15ee, 0xd74: 0x162a, 0xd75: 0x084e, + 0xd76: 0x117a, 0xd77: 0x154a, 0xd78: 0x15c6, 0xd79: 0x0caa, 0xd7a: 0x0812, 0xd7b: 0x0872, + 0xd7c: 0x0b62, 0xd7d: 0x0b82, 0xd7e: 0x0daa, 0xd7f: 0x0e6e, + // Block 0x36, offset 0xd80 + 0xd80: 0x0fbe, 0xd81: 0x10c6, 0xd82: 0x1372, 0xd83: 0x1512, 0xd84: 0x171e, 0xd85: 0x0dde, + 0xd86: 0x159e, 0xd87: 0x092e, 0xd88: 0x0e2a, 0xd89: 0x0e36, 0xd8a: 0x0f0a, 0xd8b: 0x0f42, + 0xd8c: 0x1046, 0xd8d: 0x10a2, 0xd8e: 0x1122, 0xd8f: 0x1206, 0xd90: 0x1636, 0xd91: 0x08aa, + 0xd92: 0x0cfe, 0xd93: 0x15ae, 0xd94: 0x0862, 0xd95: 0x0ba6, 0xd96: 0x0f2a, 0xd97: 0x14da, + 0xd98: 0x0c62, 0xd99: 0x0cb2, 0xd9a: 0x0e3e, 0xd9b: 0x102a, 0xd9c: 0x15b6, 0xd9d: 0x0912, + 0xd9e: 0x09fa, 0xd9f: 0x0b92, 0xda0: 0x0dce, 0xda1: 0x0e1a, 0xda2: 0x0e5a, 0xda3: 0x0eee, + 0xda4: 0x1042, 0xda5: 0x10b6, 0xda6: 0x1252, 0xda7: 0x13f2, 0xda8: 0x13fe, 0xda9: 0x1552, + 0xdaa: 0x15d2, 0xdab: 0x097e, 0xdac: 0x0f46, 0xdad: 0x09fe, 0xdae: 0x0fc2, 0xdaf: 0x1066, + 0xdb0: 0x1382, 0xdb1: 0x15ba, 0xdb2: 0x16a6, 0xdb3: 0x16ce, 0xdb4: 0x0e32, 0xdb5: 0x0f22, + 0xdb6: 0x12be, 0xdb7: 0x11b2, 0xdb8: 0x11be, 0xdb9: 0x11e2, 0xdba: 0x1012, 0xdbb: 0x0f9a, + 0xdbc: 0x145e, 0xdbd: 0x082e, 0xdbe: 0x1326, 0xdbf: 0x0916, + // Block 0x37, offset 0xdc0 + 0xdc0: 0x0906, 0xdc1: 0x0c06, 0xdc2: 0x0d26, 0xdc3: 0x11ee, 0xdc4: 0x0b4e, 0xdc5: 0x0efe, + 0xdc6: 0x0dea, 0xdc7: 0x14e2, 0xdc8: 0x13e2, 0xdc9: 0x15a6, 0xdca: 0x141e, 0xdcb: 0x0c22, + 0xdcc: 0x0882, 0xdcd: 0x0a56, 0xdd0: 0x0aaa, + 0xdd2: 0x0dda, 0xdd5: 0x08f2, 0xdd6: 0x101a, 0xdd7: 0x10de, + 0xdd8: 0x1142, 0xdd9: 0x115e, 0xdda: 0x1162, 0xddb: 0x1176, 0xddc: 0x15f6, 0xddd: 0x11e6, + 0xdde: 0x126a, 0xde0: 0x138a, 0xde2: 0x144e, + 0xde5: 0x1502, 0xde6: 0x152e, + 0xdea: 0x164a, 0xdeb: 0x164e, 0xdec: 0x1652, 0xded: 0x16b6, 0xdee: 0x1526, 0xdef: 0x15c2, + 0xdf0: 0x0852, 0xdf1: 0x0876, 0xdf2: 0x088a, 0xdf3: 0x0946, 0xdf4: 0x0952, 0xdf5: 0x0992, + 0xdf6: 0x0a46, 0xdf7: 0x0a62, 0xdf8: 0x0a6a, 0xdf9: 0x0aa6, 0xdfa: 0x0ab2, 0xdfb: 0x0b8e, + 0xdfc: 0x0b96, 0xdfd: 0x0c9e, 0xdfe: 0x0cc6, 0xdff: 0x0cce, + // Block 0x38, offset 0xe00 + 0xe00: 0x0ce6, 0xe01: 0x0d92, 0xe02: 0x0dc2, 0xe03: 0x0de2, 0xe04: 0x0e52, 0xe05: 0x0f16, + 0xe06: 0x0f32, 0xe07: 0x0f62, 0xe08: 0x0fb6, 0xe09: 0x0fd6, 0xe0a: 0x104a, 0xe0b: 0x112a, + 0xe0c: 0x1146, 0xe0d: 0x114e, 0xe0e: 0x114a, 0xe0f: 0x1152, 0xe10: 0x1156, 0xe11: 0x115a, + 0xe12: 0x116e, 0xe13: 0x1172, 0xe14: 0x1196, 0xe15: 0x11aa, 0xe16: 0x11c6, 0xe17: 0x122a, + 0xe18: 0x1232, 0xe19: 0x123a, 0xe1a: 0x124e, 0xe1b: 0x1276, 0xe1c: 0x12c6, 0xe1d: 0x12fa, + 0xe1e: 0x12fa, 0xe1f: 0x1362, 0xe20: 0x140a, 0xe21: 0x1422, 0xe22: 0x1456, 0xe23: 0x145a, + 0xe24: 0x149e, 0xe25: 0x14a2, 0xe26: 0x14fa, 0xe27: 0x1502, 0xe28: 0x15d6, 0xe29: 0x161a, + 0xe2a: 0x1632, 0xe2b: 0x0c96, 0xe2c: 0x184b, 0xe2d: 0x12de, + 0xe30: 0x07da, 0xe31: 0x08de, 0xe32: 0x089e, 0xe33: 0x0846, 0xe34: 0x0886, 0xe35: 0x08b2, + 0xe36: 0x0942, 0xe37: 0x095e, 0xe38: 0x0a46, 0xe39: 0x0a32, 0xe3a: 0x0a42, 0xe3b: 0x0a5e, + 0xe3c: 0x0aaa, 0xe3d: 0x0aba, 0xe3e: 0x0afe, 0xe3f: 0x0b0a, + // Block 0x39, offset 0xe40 + 0xe40: 0x0b26, 0xe41: 0x0b36, 0xe42: 0x0c1e, 0xe43: 0x0c26, 0xe44: 0x0c56, 0xe45: 0x0c76, + 0xe46: 0x0ca6, 0xe47: 0x0cbe, 0xe48: 0x0cae, 0xe49: 0x0cce, 0xe4a: 0x0cc2, 0xe4b: 0x0ce6, + 0xe4c: 0x0d02, 0xe4d: 0x0d5a, 0xe4e: 0x0d66, 0xe4f: 0x0d6e, 0xe50: 0x0d96, 0xe51: 0x0dda, + 0xe52: 0x0e0a, 0xe53: 0x0e0e, 0xe54: 0x0e22, 0xe55: 0x0ea2, 0xe56: 0x0eb2, 0xe57: 0x0f0a, + 0xe58: 0x0f56, 0xe59: 0x0f4e, 0xe5a: 0x0f62, 0xe5b: 0x0f7e, 0xe5c: 0x0fb6, 0xe5d: 0x110e, + 0xe5e: 0x0fda, 0xe5f: 0x100e, 0xe60: 0x101a, 0xe61: 0x105a, 0xe62: 0x1076, 0xe63: 0x109a, + 0xe64: 0x10be, 0xe65: 0x10c2, 0xe66: 0x10de, 0xe67: 0x10e2, 0xe68: 0x10f2, 0xe69: 0x1106, + 0xe6a: 0x1102, 0xe6b: 0x1132, 0xe6c: 0x11ae, 0xe6d: 0x11c6, 0xe6e: 0x11de, 0xe6f: 0x1216, + 0xe70: 0x122a, 0xe71: 0x1246, 0xe72: 0x1276, 0xe73: 0x132a, 0xe74: 0x1352, 0xe75: 0x13c6, + 0xe76: 0x140e, 0xe77: 0x141a, 0xe78: 0x1422, 0xe79: 0x143a, 0xe7a: 0x144e, 0xe7b: 0x143e, + 0xe7c: 0x1456, 0xe7d: 0x1452, 0xe7e: 0x144a, 0xe7f: 0x145a, + // Block 0x3a, offset 0xe80 + 0xe80: 0x1466, 0xe81: 0x14a2, 0xe82: 0x14de, 0xe83: 0x150e, 0xe84: 0x1546, 0xe85: 0x1566, + 0xe86: 0x15b2, 0xe87: 0x15d6, 0xe88: 0x15f6, 0xe89: 0x160a, 0xe8a: 0x161a, 0xe8b: 0x1626, + 0xe8c: 0x1632, 0xe8d: 0x1686, 0xe8e: 0x1726, 0xe8f: 0x17e2, 0xe90: 0x17dd, 0xe91: 0x180f, + 0xe92: 0x0702, 0xe93: 0x072a, 0xe94: 0x072e, 0xe95: 0x1891, 0xe96: 0x18be, 0xe97: 0x1936, + 0xe98: 0x1712, 0xe99: 0x1722, + // Block 0x3b, offset 0xec0 + 0xec0: 0x1b05, 0xec1: 0x1b08, 0xec2: 0x1b0b, 0xec3: 0x1d38, 0xec4: 0x1d3c, 0xec5: 0x1b8f, + 0xec6: 0x1b8f, + 0xed3: 0x1ea5, 0xed4: 0x1e96, 0xed5: 0x1e9b, 0xed6: 0x1eaa, 0xed7: 0x1ea0, + 0xedd: 0x44d1, + 0xede: 0x8116, 0xedf: 0x4543, 0xee0: 0x0320, 0xee1: 0x0308, 0xee2: 0x0311, 0xee3: 0x0314, + 0xee4: 0x0317, 0xee5: 0x031a, 0xee6: 0x031d, 0xee7: 0x0323, 0xee8: 0x0326, 0xee9: 0x0017, + 0xeea: 0x4531, 0xeeb: 0x4537, 0xeec: 0x4635, 0xeed: 0x463d, 0xeee: 0x4489, 0xeef: 0x448f, + 0xef0: 0x4495, 0xef1: 0x449b, 0xef2: 0x44a7, 0xef3: 0x44ad, 0xef4: 0x44b3, 0xef5: 0x44bf, + 0xef6: 0x44c5, 0xef8: 0x44cb, 0xef9: 0x44d7, 0xefa: 0x44dd, 0xefb: 0x44e3, + 0xefc: 0x44ef, 0xefe: 0x44f5, + // Block 0x3c, offset 0xf00 + 0xf00: 0x44fb, 0xf01: 0x4501, 0xf03: 0x4507, 0xf04: 0x450d, + 0xf06: 0x4519, 0xf07: 0x451f, 0xf08: 0x4525, 0xf09: 0x452b, 0xf0a: 0x453d, 0xf0b: 0x44b9, + 0xf0c: 0x44a1, 0xf0d: 0x44e9, 0xf0e: 0x4513, 0xf0f: 0x1eaf, 0xf10: 0x038c, 0xf11: 0x038c, + 0xf12: 0x0395, 0xf13: 0x0395, 0xf14: 0x0395, 0xf15: 0x0395, 0xf16: 0x0398, 0xf17: 0x0398, + 0xf18: 0x0398, 0xf19: 0x0398, 0xf1a: 0x039e, 0xf1b: 0x039e, 0xf1c: 0x039e, 0xf1d: 0x039e, + 0xf1e: 0x0392, 0xf1f: 0x0392, 0xf20: 0x0392, 0xf21: 0x0392, 0xf22: 0x039b, 0xf23: 0x039b, + 0xf24: 0x039b, 0xf25: 0x039b, 0xf26: 0x038f, 0xf27: 0x038f, 0xf28: 0x038f, 0xf29: 0x038f, + 0xf2a: 0x03c2, 0xf2b: 0x03c2, 0xf2c: 0x03c2, 0xf2d: 0x03c2, 0xf2e: 0x03c5, 0xf2f: 0x03c5, + 0xf30: 0x03c5, 0xf31: 0x03c5, 0xf32: 0x03a4, 0xf33: 0x03a4, 0xf34: 0x03a4, 0xf35: 0x03a4, + 0xf36: 0x03a1, 0xf37: 0x03a1, 0xf38: 0x03a1, 0xf39: 0x03a1, 0xf3a: 0x03a7, 0xf3b: 0x03a7, + 0xf3c: 0x03a7, 0xf3d: 0x03a7, 0xf3e: 0x03aa, 0xf3f: 0x03aa, + // Block 0x3d, offset 0xf40 + 0xf40: 0x03aa, 0xf41: 0x03aa, 0xf42: 0x03b3, 0xf43: 0x03b3, 0xf44: 0x03b0, 0xf45: 0x03b0, + 0xf46: 0x03b6, 0xf47: 0x03b6, 0xf48: 0x03ad, 0xf49: 0x03ad, 0xf4a: 0x03bc, 0xf4b: 0x03bc, + 0xf4c: 0x03b9, 0xf4d: 0x03b9, 0xf4e: 0x03c8, 0xf4f: 0x03c8, 0xf50: 0x03c8, 0xf51: 0x03c8, + 0xf52: 0x03ce, 0xf53: 0x03ce, 0xf54: 0x03ce, 0xf55: 0x03ce, 0xf56: 0x03d4, 0xf57: 0x03d4, + 0xf58: 0x03d4, 0xf59: 0x03d4, 0xf5a: 0x03d1, 0xf5b: 0x03d1, 0xf5c: 0x03d1, 0xf5d: 0x03d1, + 0xf5e: 0x03d7, 0xf5f: 0x03d7, 0xf60: 0x03da, 0xf61: 0x03da, 0xf62: 0x03da, 0xf63: 0x03da, + 0xf64: 0x45af, 0xf65: 0x45af, 0xf66: 0x03e0, 0xf67: 0x03e0, 0xf68: 0x03e0, 0xf69: 0x03e0, + 0xf6a: 0x03dd, 0xf6b: 0x03dd, 0xf6c: 0x03dd, 0xf6d: 0x03dd, 0xf6e: 0x03fb, 0xf6f: 0x03fb, + 0xf70: 0x45a9, 0xf71: 0x45a9, + // Block 0x3e, offset 0xf80 + 0xf93: 0x03cb, 0xf94: 0x03cb, 0xf95: 0x03cb, 0xf96: 0x03cb, 0xf97: 0x03e9, + 0xf98: 0x03e9, 0xf99: 0x03e6, 0xf9a: 0x03e6, 0xf9b: 0x03ec, 0xf9c: 0x03ec, 0xf9d: 0x217f, + 0xf9e: 0x03f2, 0xf9f: 0x03f2, 0xfa0: 0x03e3, 0xfa1: 0x03e3, 0xfa2: 0x03ef, 0xfa3: 0x03ef, + 0xfa4: 0x03f8, 0xfa5: 0x03f8, 0xfa6: 0x03f8, 0xfa7: 0x03f8, 0xfa8: 0x0380, 0xfa9: 0x0380, + 0xfaa: 0x26da, 0xfab: 0x26da, 0xfac: 0x274a, 0xfad: 0x274a, 0xfae: 0x2719, 0xfaf: 0x2719, + 0xfb0: 0x2735, 0xfb1: 0x2735, 0xfb2: 0x272e, 0xfb3: 0x272e, 0xfb4: 0x273c, 0xfb5: 0x273c, + 0xfb6: 0x2743, 0xfb7: 0x2743, 0xfb8: 0x2743, 0xfb9: 0x2720, 0xfba: 0x2720, 0xfbb: 0x2720, + 0xfbc: 0x03f5, 0xfbd: 0x03f5, 0xfbe: 0x03f5, 0xfbf: 0x03f5, + // Block 0x3f, offset 0xfc0 + 0xfc0: 0x26e1, 0xfc1: 0x26e8, 0xfc2: 0x2704, 0xfc3: 0x2720, 0xfc4: 0x2727, 0xfc5: 0x1eb9, + 0xfc6: 0x1ebe, 0xfc7: 0x1ec3, 0xfc8: 0x1ed2, 0xfc9: 0x1ee1, 0xfca: 0x1ee6, 0xfcb: 0x1eeb, + 0xfcc: 0x1ef0, 0xfcd: 0x1ef5, 0xfce: 0x1f04, 0xfcf: 0x1f13, 0xfd0: 0x1f18, 0xfd1: 0x1f1d, + 0xfd2: 0x1f2c, 0xfd3: 0x1f3b, 0xfd4: 0x1f40, 0xfd5: 0x1f45, 0xfd6: 0x1f4a, 0xfd7: 0x1f59, + 0xfd8: 0x1f5e, 0xfd9: 0x1f6d, 0xfda: 0x1f72, 0xfdb: 0x1f77, 0xfdc: 0x1f86, 0xfdd: 0x1f8b, + 0xfde: 0x1f90, 0xfdf: 0x1f9a, 0xfe0: 0x1fd6, 0xfe1: 0x1fe5, 0xfe2: 0x1ff4, 0xfe3: 0x1ff9, + 0xfe4: 0x1ffe, 0xfe5: 0x2008, 0xfe6: 0x2017, 0xfe7: 0x201c, 0xfe8: 0x202b, 0xfe9: 0x2030, + 0xfea: 0x2035, 0xfeb: 0x2044, 0xfec: 0x2049, 0xfed: 0x2058, 0xfee: 0x205d, 0xfef: 0x2062, + 0xff0: 0x2067, 0xff1: 0x206c, 0xff2: 0x2071, 0xff3: 0x2076, 0xff4: 0x207b, 0xff5: 0x2080, + 0xff6: 0x2085, 0xff7: 0x208a, 0xff8: 0x208f, 0xff9: 0x2094, 0xffa: 0x2099, 0xffb: 0x209e, + 0xffc: 0x20a3, 0xffd: 0x20a8, 0xffe: 0x20ad, 0xfff: 0x20b7, + // Block 0x40, offset 0x1000 + 0x1000: 0x20bc, 0x1001: 0x20c1, 0x1002: 0x20c6, 0x1003: 0x20d0, 0x1004: 0x20d5, 0x1005: 0x20df, + 0x1006: 0x20e4, 0x1007: 0x20e9, 0x1008: 0x20ee, 0x1009: 0x20f3, 0x100a: 0x20f8, 0x100b: 0x20fd, + 0x100c: 0x2102, 0x100d: 0x2107, 0x100e: 0x2116, 0x100f: 0x2125, 0x1010: 0x212a, 0x1011: 0x212f, + 0x1012: 0x2134, 0x1013: 0x2139, 0x1014: 0x213e, 0x1015: 0x2148, 0x1016: 0x214d, 0x1017: 0x2152, + 0x1018: 0x2161, 0x1019: 0x2170, 0x101a: 0x2175, 0x101b: 0x4561, 0x101c: 0x4567, 0x101d: 0x459d, + 0x101e: 0x45f4, 0x101f: 0x45fb, 0x1020: 0x4602, 0x1021: 0x4609, 0x1022: 0x4610, 0x1023: 0x4617, + 0x1024: 0x26f6, 0x1025: 0x26fd, 0x1026: 0x2704, 0x1027: 0x270b, 0x1028: 0x2720, 0x1029: 0x2727, + 0x102a: 0x1ec8, 0x102b: 0x1ecd, 0x102c: 0x1ed2, 0x102d: 0x1ed7, 0x102e: 0x1ee1, 0x102f: 0x1ee6, + 0x1030: 0x1efa, 0x1031: 0x1eff, 0x1032: 0x1f04, 0x1033: 0x1f09, 0x1034: 0x1f13, 0x1035: 0x1f18, + 0x1036: 0x1f22, 0x1037: 0x1f27, 0x1038: 0x1f2c, 0x1039: 0x1f31, 0x103a: 0x1f3b, 0x103b: 0x1f40, + 0x103c: 0x206c, 0x103d: 0x2071, 0x103e: 0x2080, 0x103f: 0x2085, + // Block 0x41, offset 0x1040 + 0x1040: 0x208a, 0x1041: 0x209e, 0x1042: 0x20a3, 0x1043: 0x20a8, 0x1044: 0x20ad, 0x1045: 0x20c6, + 0x1046: 0x20d0, 0x1047: 0x20d5, 0x1048: 0x20da, 0x1049: 0x20ee, 0x104a: 0x210c, 0x104b: 0x2111, + 0x104c: 0x2116, 0x104d: 0x211b, 0x104e: 0x2125, 0x104f: 0x212a, 0x1050: 0x459d, 0x1051: 0x2157, + 0x1052: 0x215c, 0x1053: 0x2161, 0x1054: 0x2166, 0x1055: 0x2170, 0x1056: 0x2175, 0x1057: 0x26e1, + 0x1058: 0x26e8, 0x1059: 0x26ef, 0x105a: 0x2704, 0x105b: 0x2712, 0x105c: 0x1eb9, 0x105d: 0x1ebe, + 0x105e: 0x1ec3, 0x105f: 0x1ed2, 0x1060: 0x1edc, 0x1061: 0x1eeb, 0x1062: 0x1ef0, 0x1063: 0x1ef5, + 0x1064: 0x1f04, 0x1065: 0x1f0e, 0x1066: 0x1f2c, 0x1067: 0x1f45, 0x1068: 0x1f4a, 0x1069: 0x1f59, + 0x106a: 0x1f5e, 0x106b: 0x1f6d, 0x106c: 0x1f77, 0x106d: 0x1f86, 0x106e: 0x1f8b, 0x106f: 0x1f90, + 0x1070: 0x1f9a, 0x1071: 0x1fd6, 0x1072: 0x1fdb, 0x1073: 0x1fe5, 0x1074: 0x1ff4, 0x1075: 0x1ff9, + 0x1076: 0x1ffe, 0x1077: 0x2008, 0x1078: 0x2017, 0x1079: 0x202b, 0x107a: 0x2030, 0x107b: 0x2035, + 0x107c: 0x2044, 0x107d: 0x2049, 0x107e: 0x2058, 0x107f: 0x205d, + // Block 0x42, offset 0x1080 + 0x1080: 0x2062, 0x1081: 0x2067, 0x1082: 0x2076, 0x1083: 0x207b, 0x1084: 0x208f, 0x1085: 0x2094, + 0x1086: 0x2099, 0x1087: 0x209e, 0x1088: 0x20a3, 0x1089: 0x20b7, 0x108a: 0x20bc, 0x108b: 0x20c1, + 0x108c: 0x20c6, 0x108d: 0x20cb, 0x108e: 0x20df, 0x108f: 0x20e4, 0x1090: 0x20e9, 0x1091: 0x20ee, + 0x1092: 0x20fd, 0x1093: 0x2102, 0x1094: 0x2107, 0x1095: 0x2116, 0x1096: 0x2120, 0x1097: 0x212f, + 0x1098: 0x2134, 0x1099: 0x4591, 0x109a: 0x2148, 0x109b: 0x214d, 0x109c: 0x2152, 0x109d: 0x2161, + 0x109e: 0x216b, 0x109f: 0x2704, 0x10a0: 0x2712, 0x10a1: 0x1ed2, 0x10a2: 0x1edc, 0x10a3: 0x1f04, + 0x10a4: 0x1f0e, 0x10a5: 0x1f2c, 0x10a6: 0x1f36, 0x10a7: 0x1f9a, 0x10a8: 0x1f9f, 0x10a9: 0x1fc2, + 0x10aa: 0x1fc7, 0x10ab: 0x209e, 0x10ac: 0x20a3, 0x10ad: 0x20c6, 0x10ae: 0x2116, 0x10af: 0x2120, + 0x10b0: 0x2161, 0x10b1: 0x216b, 0x10b2: 0x4645, 0x10b3: 0x464d, 0x10b4: 0x4655, 0x10b5: 0x2021, + 0x10b6: 0x2026, 0x10b7: 0x203a, 0x10b8: 0x203f, 0x10b9: 0x204e, 0x10ba: 0x2053, 0x10bb: 0x1fa4, + 0x10bc: 0x1fa9, 0x10bd: 0x1fcc, 0x10be: 0x1fd1, 0x10bf: 0x1f63, + // Block 0x43, offset 0x10c0 + 0x10c0: 0x1f68, 0x10c1: 0x1f4f, 0x10c2: 0x1f54, 0x10c3: 0x1f7c, 0x10c4: 0x1f81, 0x10c5: 0x1fea, + 0x10c6: 0x1fef, 0x10c7: 0x200d, 0x10c8: 0x2012, 0x10c9: 0x1fae, 0x10ca: 0x1fb3, 0x10cb: 0x1fb8, + 0x10cc: 0x1fc2, 0x10cd: 0x1fbd, 0x10ce: 0x1f95, 0x10cf: 0x1fe0, 0x10d0: 0x2003, 0x10d1: 0x2021, + 0x10d2: 0x2026, 0x10d3: 0x203a, 0x10d4: 0x203f, 0x10d5: 0x204e, 0x10d6: 0x2053, 0x10d7: 0x1fa4, + 0x10d8: 0x1fa9, 0x10d9: 0x1fcc, 0x10da: 0x1fd1, 0x10db: 0x1f63, 0x10dc: 0x1f68, 0x10dd: 0x1f4f, + 0x10de: 0x1f54, 0x10df: 0x1f7c, 0x10e0: 0x1f81, 0x10e1: 0x1fea, 0x10e2: 0x1fef, 0x10e3: 0x200d, + 0x10e4: 0x2012, 0x10e5: 0x1fae, 0x10e6: 0x1fb3, 0x10e7: 0x1fb8, 0x10e8: 0x1fc2, 0x10e9: 0x1fbd, + 0x10ea: 0x1f95, 0x10eb: 0x1fe0, 0x10ec: 0x2003, 0x10ed: 0x1fae, 0x10ee: 0x1fb3, 0x10ef: 0x1fb8, + 0x10f0: 0x1fc2, 0x10f1: 0x1f9f, 0x10f2: 0x1fc7, 0x10f3: 0x201c, 0x10f4: 0x1f86, 0x10f5: 0x1f8b, + 0x10f6: 0x1f90, 0x10f7: 0x1fae, 0x10f8: 0x1fb3, 0x10f9: 0x1fb8, 0x10fa: 0x201c, 0x10fb: 0x202b, + 0x10fc: 0x4549, 0x10fd: 0x4549, + // Block 0x44, offset 0x1100 + 0x1110: 0x2441, 0x1111: 0x2456, + 0x1112: 0x2456, 0x1113: 0x245d, 0x1114: 0x2464, 0x1115: 0x2479, 0x1116: 0x2480, 0x1117: 0x2487, + 0x1118: 0x24aa, 0x1119: 0x24aa, 0x111a: 0x24cd, 0x111b: 0x24c6, 0x111c: 0x24e2, 0x111d: 0x24d4, + 0x111e: 0x24db, 0x111f: 0x24fe, 0x1120: 0x24fe, 0x1121: 0x24f7, 0x1122: 0x2505, 0x1123: 0x2505, + 0x1124: 0x252f, 0x1125: 0x252f, 0x1126: 0x254b, 0x1127: 0x2513, 0x1128: 0x2513, 0x1129: 0x250c, + 0x112a: 0x2521, 0x112b: 0x2521, 0x112c: 0x2528, 0x112d: 0x2528, 0x112e: 0x2552, 0x112f: 0x2560, + 0x1130: 0x2560, 0x1131: 0x2567, 0x1132: 0x2567, 0x1133: 0x256e, 0x1134: 0x2575, 0x1135: 0x257c, + 0x1136: 0x2583, 0x1137: 0x2583, 0x1138: 0x258a, 0x1139: 0x2598, 0x113a: 0x25a6, 0x113b: 0x259f, + 0x113c: 0x25ad, 0x113d: 0x25ad, 0x113e: 0x25c2, 0x113f: 0x25c9, + // Block 0x45, offset 0x1140 + 0x1140: 0x25fa, 0x1141: 0x2608, 0x1142: 0x2601, 0x1143: 0x25e5, 0x1144: 0x25e5, 0x1145: 0x260f, + 0x1146: 0x260f, 0x1147: 0x2616, 0x1148: 0x2616, 0x1149: 0x2640, 0x114a: 0x2647, 0x114b: 0x264e, + 0x114c: 0x2624, 0x114d: 0x2632, 0x114e: 0x2655, 0x114f: 0x265c, + 0x1152: 0x262b, 0x1153: 0x26b0, 0x1154: 0x26b7, 0x1155: 0x268d, 0x1156: 0x2694, 0x1157: 0x2678, + 0x1158: 0x2678, 0x1159: 0x267f, 0x115a: 0x26a9, 0x115b: 0x26a2, 0x115c: 0x26cc, 0x115d: 0x26cc, + 0x115e: 0x243a, 0x115f: 0x244f, 0x1160: 0x2448, 0x1161: 0x2472, 0x1162: 0x246b, 0x1163: 0x2495, + 0x1164: 0x248e, 0x1165: 0x24b8, 0x1166: 0x249c, 0x1167: 0x24b1, 0x1168: 0x24e9, 0x1169: 0x2536, + 0x116a: 0x251a, 0x116b: 0x2559, 0x116c: 0x25f3, 0x116d: 0x261d, 0x116e: 0x26c5, 0x116f: 0x26be, + 0x1170: 0x26d3, 0x1171: 0x266a, 0x1172: 0x25d0, 0x1173: 0x269b, 0x1174: 0x25c2, 0x1175: 0x25fa, + 0x1176: 0x2591, 0x1177: 0x25de, 0x1178: 0x2671, 0x1179: 0x2663, 0x117a: 0x25ec, 0x117b: 0x25d7, + 0x117c: 0x25ec, 0x117d: 0x2671, 0x117e: 0x24a3, 0x117f: 0x24bf, + // Block 0x46, offset 0x1180 + 0x1180: 0x2639, 0x1181: 0x25b4, 0x1182: 0x2433, 0x1183: 0x25d7, 0x1184: 0x257c, 0x1185: 0x254b, + 0x1186: 0x24f0, 0x1187: 0x2686, + 0x11b0: 0x2544, 0x11b1: 0x25bb, 0x11b2: 0x28f6, 0x11b3: 0x28ed, 0x11b4: 0x2923, 0x11b5: 0x2911, + 0x11b6: 0x28ff, 0x11b7: 0x291a, 0x11b8: 0x292c, 0x11b9: 0x253d, 0x11ba: 0x2db3, 0x11bb: 0x2c33, + 0x11bc: 0x2908, + // Block 0x47, offset 0x11c0 + 0x11d0: 0x0019, 0x11d1: 0x057e, + 0x11d2: 0x0582, 0x11d3: 0x0035, 0x11d4: 0x0037, 0x11d5: 0x0003, 0x11d6: 0x003f, 0x11d7: 0x05ba, + 0x11d8: 0x05be, 0x11d9: 0x1c8c, + 0x11e0: 0x8133, 0x11e1: 0x8133, 0x11e2: 0x8133, 0x11e3: 0x8133, + 0x11e4: 0x8133, 0x11e5: 0x8133, 0x11e6: 0x8133, 0x11e7: 0x812e, 0x11e8: 0x812e, 0x11e9: 0x812e, + 0x11ea: 0x812e, 0x11eb: 0x812e, 0x11ec: 0x812e, 0x11ed: 0x812e, 0x11ee: 0x8133, 0x11ef: 0x8133, + 0x11f0: 0x19a0, 0x11f1: 0x053a, 0x11f2: 0x0536, 0x11f3: 0x007f, 0x11f4: 0x007f, 0x11f5: 0x0011, + 0x11f6: 0x0013, 0x11f7: 0x00b7, 0x11f8: 0x00bb, 0x11f9: 0x05b2, 0x11fa: 0x05b6, 0x11fb: 0x05a6, + 0x11fc: 0x05aa, 0x11fd: 0x058e, 0x11fe: 0x0592, 0x11ff: 0x0586, + // Block 0x48, offset 0x1200 + 0x1200: 0x058a, 0x1201: 0x0596, 0x1202: 0x059a, 0x1203: 0x059e, 0x1204: 0x05a2, + 0x1207: 0x0077, 0x1208: 0x007b, 0x1209: 0x43aa, 0x120a: 0x43aa, 0x120b: 0x43aa, + 0x120c: 0x43aa, 0x120d: 0x007f, 0x120e: 0x007f, 0x120f: 0x007f, 0x1210: 0x0019, 0x1211: 0x057e, + 0x1212: 0x001d, 0x1214: 0x0037, 0x1215: 0x0035, 0x1216: 0x003f, 0x1217: 0x0003, + 0x1218: 0x053a, 0x1219: 0x0011, 0x121a: 0x0013, 0x121b: 0x00b7, 0x121c: 0x00bb, 0x121d: 0x05b2, + 0x121e: 0x05b6, 0x121f: 0x0007, 0x1220: 0x000d, 0x1221: 0x0015, 0x1222: 0x0017, 0x1223: 0x001b, + 0x1224: 0x0039, 0x1225: 0x003d, 0x1226: 0x003b, 0x1228: 0x0079, 0x1229: 0x0009, + 0x122a: 0x000b, 0x122b: 0x0041, + 0x1230: 0x43eb, 0x1231: 0x456d, 0x1232: 0x43f0, 0x1234: 0x43f5, + 0x1236: 0x43fa, 0x1237: 0x4573, 0x1238: 0x43ff, 0x1239: 0x4579, 0x123a: 0x4404, 0x123b: 0x457f, + 0x123c: 0x4409, 0x123d: 0x4585, 0x123e: 0x440e, 0x123f: 0x458b, + // Block 0x49, offset 0x1240 + 0x1240: 0x0329, 0x1241: 0x454f, 0x1242: 0x454f, 0x1243: 0x4555, 0x1244: 0x4555, 0x1245: 0x4597, + 0x1246: 0x4597, 0x1247: 0x455b, 0x1248: 0x455b, 0x1249: 0x45a3, 0x124a: 0x45a3, 0x124b: 0x45a3, + 0x124c: 0x45a3, 0x124d: 0x032c, 0x124e: 0x032c, 0x124f: 0x032f, 0x1250: 0x032f, 0x1251: 0x032f, + 0x1252: 0x032f, 0x1253: 0x0332, 0x1254: 0x0332, 0x1255: 0x0335, 0x1256: 0x0335, 0x1257: 0x0335, + 0x1258: 0x0335, 0x1259: 0x0338, 0x125a: 0x0338, 0x125b: 0x0338, 0x125c: 0x0338, 0x125d: 0x033b, + 0x125e: 0x033b, 0x125f: 0x033b, 0x1260: 0x033b, 0x1261: 0x033e, 0x1262: 0x033e, 0x1263: 0x033e, + 0x1264: 0x033e, 0x1265: 0x0341, 0x1266: 0x0341, 0x1267: 0x0341, 0x1268: 0x0341, 0x1269: 0x0344, + 0x126a: 0x0344, 0x126b: 0x0347, 0x126c: 0x0347, 0x126d: 0x034a, 0x126e: 0x034a, 0x126f: 0x034d, + 0x1270: 0x034d, 0x1271: 0x0350, 0x1272: 0x0350, 0x1273: 0x0350, 0x1274: 0x0350, 0x1275: 0x0353, + 0x1276: 0x0353, 0x1277: 0x0353, 0x1278: 0x0353, 0x1279: 0x0356, 0x127a: 0x0356, 0x127b: 0x0356, + 0x127c: 0x0356, 0x127d: 0x0359, 0x127e: 0x0359, 0x127f: 0x0359, + // Block 0x4a, offset 0x1280 + 0x1280: 0x0359, 0x1281: 0x035c, 0x1282: 0x035c, 0x1283: 0x035c, 0x1284: 0x035c, 0x1285: 0x035f, + 0x1286: 0x035f, 0x1287: 0x035f, 0x1288: 0x035f, 0x1289: 0x0362, 0x128a: 0x0362, 0x128b: 0x0362, + 0x128c: 0x0362, 0x128d: 0x0365, 0x128e: 0x0365, 0x128f: 0x0365, 0x1290: 0x0365, 0x1291: 0x0368, + 0x1292: 0x0368, 0x1293: 0x0368, 0x1294: 0x0368, 0x1295: 0x036b, 0x1296: 0x036b, 0x1297: 0x036b, + 0x1298: 0x036b, 0x1299: 0x036e, 0x129a: 0x036e, 0x129b: 0x036e, 0x129c: 0x036e, 0x129d: 0x0371, + 0x129e: 0x0371, 0x129f: 0x0371, 0x12a0: 0x0371, 0x12a1: 0x0374, 0x12a2: 0x0374, 0x12a3: 0x0374, + 0x12a4: 0x0374, 0x12a5: 0x0377, 0x12a6: 0x0377, 0x12a7: 0x0377, 0x12a8: 0x0377, 0x12a9: 0x037a, + 0x12aa: 0x037a, 0x12ab: 0x037a, 0x12ac: 0x037a, 0x12ad: 0x037d, 0x12ae: 0x037d, 0x12af: 0x0380, + 0x12b0: 0x0380, 0x12b1: 0x0383, 0x12b2: 0x0383, 0x12b3: 0x0383, 0x12b4: 0x0383, 0x12b5: 0x2f41, + 0x12b6: 0x2f41, 0x12b7: 0x2f49, 0x12b8: 0x2f49, 0x12b9: 0x2f51, 0x12ba: 0x2f51, 0x12bb: 0x20b2, + 0x12bc: 0x20b2, + // Block 0x4b, offset 0x12c0 + 0x12c0: 0x0081, 0x12c1: 0x0083, 0x12c2: 0x0085, 0x12c3: 0x0087, 0x12c4: 0x0089, 0x12c5: 0x008b, + 0x12c6: 0x008d, 0x12c7: 0x008f, 0x12c8: 0x0091, 0x12c9: 0x0093, 0x12ca: 0x0095, 0x12cb: 0x0097, + 0x12cc: 0x0099, 0x12cd: 0x009b, 0x12ce: 0x009d, 0x12cf: 0x009f, 0x12d0: 0x00a1, 0x12d1: 0x00a3, + 0x12d2: 0x00a5, 0x12d3: 0x00a7, 0x12d4: 0x00a9, 0x12d5: 0x00ab, 0x12d6: 0x00ad, 0x12d7: 0x00af, + 0x12d8: 0x00b1, 0x12d9: 0x00b3, 0x12da: 0x00b5, 0x12db: 0x00b7, 0x12dc: 0x00b9, 0x12dd: 0x00bb, + 0x12de: 0x00bd, 0x12df: 0x056e, 0x12e0: 0x0572, 0x12e1: 0x0582, 0x12e2: 0x0596, 0x12e3: 0x059a, + 0x12e4: 0x057e, 0x12e5: 0x06a6, 0x12e6: 0x069e, 0x12e7: 0x05c2, 0x12e8: 0x05ca, 0x12e9: 0x05d2, + 0x12ea: 0x05da, 0x12eb: 0x05e2, 0x12ec: 0x0666, 0x12ed: 0x066e, 0x12ee: 0x0676, 0x12ef: 0x061a, + 0x12f0: 0x06aa, 0x12f1: 0x05c6, 0x12f2: 0x05ce, 0x12f3: 0x05d6, 0x12f4: 0x05de, 0x12f5: 0x05e6, + 0x12f6: 0x05ea, 0x12f7: 0x05ee, 0x12f8: 0x05f2, 0x12f9: 0x05f6, 0x12fa: 0x05fa, 0x12fb: 0x05fe, + 0x12fc: 0x0602, 0x12fd: 0x0606, 0x12fe: 0x060a, 0x12ff: 0x060e, + // Block 0x4c, offset 0x1300 + 0x1300: 0x0612, 0x1301: 0x0616, 0x1302: 0x061e, 0x1303: 0x0622, 0x1304: 0x0626, 0x1305: 0x062a, + 0x1306: 0x062e, 0x1307: 0x0632, 0x1308: 0x0636, 0x1309: 0x063a, 0x130a: 0x063e, 0x130b: 0x0642, + 0x130c: 0x0646, 0x130d: 0x064a, 0x130e: 0x064e, 0x130f: 0x0652, 0x1310: 0x0656, 0x1311: 0x065a, + 0x1312: 0x065e, 0x1313: 0x0662, 0x1314: 0x066a, 0x1315: 0x0672, 0x1316: 0x067a, 0x1317: 0x067e, + 0x1318: 0x0682, 0x1319: 0x0686, 0x131a: 0x068a, 0x131b: 0x068e, 0x131c: 0x0692, 0x131d: 0x06a2, + 0x131e: 0x4bb9, 0x131f: 0x4bbf, 0x1320: 0x04b6, 0x1321: 0x0406, 0x1322: 0x040a, 0x1323: 0x4b7c, + 0x1324: 0x040e, 0x1325: 0x4b82, 0x1326: 0x4b88, 0x1327: 0x0412, 0x1328: 0x0416, 0x1329: 0x041a, + 0x132a: 0x4b8e, 0x132b: 0x4b94, 0x132c: 0x4b9a, 0x132d: 0x4ba0, 0x132e: 0x4ba6, 0x132f: 0x4bac, + 0x1330: 0x045a, 0x1331: 0x041e, 0x1332: 0x0422, 0x1333: 0x0426, 0x1334: 0x046e, 0x1335: 0x042a, + 0x1336: 0x042e, 0x1337: 0x0432, 0x1338: 0x0436, 0x1339: 0x043a, 0x133a: 0x043e, 0x133b: 0x0442, + 0x133c: 0x0446, 0x133d: 0x044a, 0x133e: 0x044e, + // Block 0x4d, offset 0x1340 + 0x1342: 0x4afe, 0x1343: 0x4b04, 0x1344: 0x4b0a, 0x1345: 0x4b10, + 0x1346: 0x4b16, 0x1347: 0x4b1c, 0x134a: 0x4b22, 0x134b: 0x4b28, + 0x134c: 0x4b2e, 0x134d: 0x4b34, 0x134e: 0x4b3a, 0x134f: 0x4b40, + 0x1352: 0x4b46, 0x1353: 0x4b4c, 0x1354: 0x4b52, 0x1355: 0x4b58, 0x1356: 0x4b5e, 0x1357: 0x4b64, + 0x135a: 0x4b6a, 0x135b: 0x4b70, 0x135c: 0x4b76, + 0x1360: 0x00bf, 0x1361: 0x00c2, 0x1362: 0x00cb, 0x1363: 0x43a5, + 0x1364: 0x00c8, 0x1365: 0x00c5, 0x1366: 0x053e, 0x1368: 0x0562, 0x1369: 0x0542, + 0x136a: 0x0546, 0x136b: 0x054a, 0x136c: 0x054e, 0x136d: 0x0566, 0x136e: 0x056a, + // Block 0x4e, offset 0x1380 + 0x1381: 0x01f1, 0x1382: 0x01f4, 0x1383: 0x00d4, 0x1384: 0x01be, 0x1385: 0x010d, + 0x1387: 0x01d3, 0x1388: 0x174e, 0x1389: 0x01d9, 0x138a: 0x01d6, 0x138b: 0x0116, + 0x138c: 0x0119, 0x138d: 0x0526, 0x138e: 0x011c, 0x138f: 0x0128, 0x1390: 0x01e5, 0x1391: 0x013a, + 0x1392: 0x0134, 0x1393: 0x012e, 0x1394: 0x01c1, 0x1395: 0x00e0, 0x1396: 0x01c4, 0x1397: 0x0143, + 0x1398: 0x0194, 0x1399: 0x01e8, 0x139a: 0x01eb, 0x139b: 0x0152, 0x139c: 0x1756, 0x139d: 0x1742, + 0x139e: 0x0158, 0x139f: 0x175b, 0x13a0: 0x01a9, 0x13a1: 0x1760, 0x13a2: 0x00da, 0x13a3: 0x0170, + 0x13a4: 0x0173, 0x13a5: 0x00a3, 0x13a6: 0x017c, 0x13a7: 0x1765, 0x13a8: 0x0182, 0x13a9: 0x0185, + 0x13aa: 0x0188, 0x13ab: 0x01e2, 0x13ac: 0x01dc, 0x13ad: 0x1752, 0x13ae: 0x01df, 0x13af: 0x0197, + 0x13b0: 0x0576, 0x13b2: 0x01ac, 0x13b3: 0x01cd, 0x13b4: 0x01d0, 0x13b5: 0x01bb, + 0x13b6: 0x00f5, 0x13b7: 0x00f8, 0x13b8: 0x00fb, 0x13b9: 0x176a, 0x13ba: 0x176f, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x0063, 0x13c1: 0x0065, 0x13c2: 0x0067, 0x13c3: 0x0069, 0x13c4: 0x006b, 0x13c5: 0x006d, + 0x13c6: 0x006f, 0x13c7: 0x0071, 0x13c8: 0x0073, 0x13c9: 0x0075, 0x13ca: 0x0083, 0x13cb: 0x0085, + 0x13cc: 0x0087, 0x13cd: 0x0089, 0x13ce: 0x008b, 0x13cf: 0x008d, 0x13d0: 0x008f, 0x13d1: 0x0091, + 0x13d2: 0x0093, 0x13d3: 0x0095, 0x13d4: 0x0097, 0x13d5: 0x0099, 0x13d6: 0x009b, 0x13d7: 0x009d, + 0x13d8: 0x009f, 0x13d9: 0x00a1, 0x13da: 0x00a3, 0x13db: 0x00a5, 0x13dc: 0x00a7, 0x13dd: 0x00a9, + 0x13de: 0x00ab, 0x13df: 0x00ad, 0x13e0: 0x00af, 0x13e1: 0x00b1, 0x13e2: 0x00b3, 0x13e3: 0x00b5, + 0x13e4: 0x00e3, 0x13e5: 0x0101, 0x13e8: 0x01f7, 0x13e9: 0x01fa, + 0x13ea: 0x01fd, 0x13eb: 0x0200, 0x13ec: 0x0203, 0x13ed: 0x0206, 0x13ee: 0x0209, 0x13ef: 0x020c, + 0x13f0: 0x020f, 0x13f1: 0x0212, 0x13f2: 0x0215, 0x13f3: 0x0218, 0x13f4: 0x021b, 0x13f5: 0x021e, + 0x13f6: 0x0221, 0x13f7: 0x0224, 0x13f8: 0x0227, 0x13f9: 0x020c, 0x13fa: 0x022a, 0x13fb: 0x022d, + 0x13fc: 0x0230, 0x13fd: 0x0233, 0x13fe: 0x0236, 0x13ff: 0x0239, + // Block 0x50, offset 0x1400 + 0x1400: 0x0281, 0x1401: 0x0284, 0x1402: 0x0287, 0x1403: 0x0552, 0x1404: 0x024b, 0x1405: 0x0254, + 0x1406: 0x025a, 0x1407: 0x027e, 0x1408: 0x026f, 0x1409: 0x026c, 0x140a: 0x028a, 0x140b: 0x028d, + 0x140e: 0x0021, 0x140f: 0x0023, 0x1410: 0x0025, 0x1411: 0x0027, + 0x1412: 0x0029, 0x1413: 0x002b, 0x1414: 0x002d, 0x1415: 0x002f, 0x1416: 0x0031, 0x1417: 0x0033, + 0x1418: 0x0021, 0x1419: 0x0023, 0x141a: 0x0025, 0x141b: 0x0027, 0x141c: 0x0029, 0x141d: 0x002b, + 0x141e: 0x002d, 0x141f: 0x002f, 0x1420: 0x0031, 0x1421: 0x0033, 0x1422: 0x0021, 0x1423: 0x0023, + 0x1424: 0x0025, 0x1425: 0x0027, 0x1426: 0x0029, 0x1427: 0x002b, 0x1428: 0x002d, 0x1429: 0x002f, + 0x142a: 0x0031, 0x142b: 0x0033, 0x142c: 0x0021, 0x142d: 0x0023, 0x142e: 0x0025, 0x142f: 0x0027, + 0x1430: 0x0029, 0x1431: 0x002b, 0x1432: 0x002d, 0x1433: 0x002f, 0x1434: 0x0031, 0x1435: 0x0033, + 0x1436: 0x0021, 0x1437: 0x0023, 0x1438: 0x0025, 0x1439: 0x0027, 0x143a: 0x0029, 0x143b: 0x002b, + 0x143c: 0x002d, 0x143d: 0x002f, 0x143e: 0x0031, 0x143f: 0x0033, + // Block 0x51, offset 0x1440 + 0x1440: 0x8133, 0x1441: 0x8133, 0x1442: 0x8133, 0x1443: 0x8133, 0x1444: 0x8133, 0x1445: 0x8133, + 0x1446: 0x8133, 0x1448: 0x8133, 0x1449: 0x8133, 0x144a: 0x8133, 0x144b: 0x8133, + 0x144c: 0x8133, 0x144d: 0x8133, 0x144e: 0x8133, 0x144f: 0x8133, 0x1450: 0x8133, 0x1451: 0x8133, + 0x1452: 0x8133, 0x1453: 0x8133, 0x1454: 0x8133, 0x1455: 0x8133, 0x1456: 0x8133, 0x1457: 0x8133, + 0x1458: 0x8133, 0x145b: 0x8133, 0x145c: 0x8133, 0x145d: 0x8133, + 0x145e: 0x8133, 0x145f: 0x8133, 0x1460: 0x8133, 0x1461: 0x8133, 0x1463: 0x8133, + 0x1464: 0x8133, 0x1466: 0x8133, 0x1467: 0x8133, 0x1468: 0x8133, 0x1469: 0x8133, + 0x146a: 0x8133, + 0x1470: 0x0290, 0x1471: 0x0293, 0x1472: 0x0296, 0x1473: 0x0299, 0x1474: 0x029c, 0x1475: 0x029f, + 0x1476: 0x02a2, 0x1477: 0x02a5, 0x1478: 0x02a8, 0x1479: 0x02ab, 0x147a: 0x02ae, 0x147b: 0x02b1, + 0x147c: 0x02b7, 0x147d: 0x02ba, 0x147e: 0x02bd, 0x147f: 0x02c0, + // Block 0x52, offset 0x1480 + 0x1480: 0x02c3, 0x1481: 0x02c6, 0x1482: 0x02c9, 0x1483: 0x02cc, 0x1484: 0x02cf, 0x1485: 0x02d2, + 0x1486: 0x02d5, 0x1487: 0x02db, 0x1488: 0x02e1, 0x1489: 0x02e4, 0x148a: 0x1736, 0x148b: 0x0302, + 0x148c: 0x02ea, 0x148d: 0x02ed, 0x148e: 0x0305, 0x148f: 0x02f9, 0x1490: 0x02ff, 0x1491: 0x0290, + 0x1492: 0x0293, 0x1493: 0x0296, 0x1494: 0x0299, 0x1495: 0x029c, 0x1496: 0x029f, 0x1497: 0x02a2, + 0x1498: 0x02a5, 0x1499: 0x02a8, 0x149a: 0x02ab, 0x149b: 0x02ae, 0x149c: 0x02b7, 0x149d: 0x02ba, + 0x149e: 0x02c0, 0x149f: 0x02c6, 0x14a0: 0x02c9, 0x14a1: 0x02cc, 0x14a2: 0x02cf, 0x14a3: 0x02d2, + 0x14a4: 0x02d5, 0x14a5: 0x02d8, 0x14a6: 0x02db, 0x14a7: 0x02f3, 0x14a8: 0x02ea, 0x14a9: 0x02e7, + 0x14aa: 0x02f0, 0x14ab: 0x02f6, 0x14ac: 0x1732, 0x14ad: 0x02fc, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x032c, 0x14c1: 0x032f, 0x14c2: 0x033b, 0x14c3: 0x0344, 0x14c5: 0x037d, + 0x14c6: 0x034d, 0x14c7: 0x033e, 0x14c8: 0x035c, 0x14c9: 0x0383, 0x14ca: 0x036e, 0x14cb: 0x0371, + 0x14cc: 0x0374, 0x14cd: 0x0377, 0x14ce: 0x0350, 0x14cf: 0x0362, 0x14d0: 0x0368, 0x14d1: 0x0356, + 0x14d2: 0x036b, 0x14d3: 0x034a, 0x14d4: 0x0353, 0x14d5: 0x0335, 0x14d6: 0x0338, 0x14d7: 0x0341, + 0x14d8: 0x0347, 0x14d9: 0x0359, 0x14da: 0x035f, 0x14db: 0x0365, 0x14dc: 0x0386, 0x14dd: 0x03d7, + 0x14de: 0x03bf, 0x14df: 0x0389, 0x14e1: 0x032f, 0x14e2: 0x033b, + 0x14e4: 0x037a, 0x14e7: 0x033e, 0x14e9: 0x0383, + 0x14ea: 0x036e, 0x14eb: 0x0371, 0x14ec: 0x0374, 0x14ed: 0x0377, 0x14ee: 0x0350, 0x14ef: 0x0362, + 0x14f0: 0x0368, 0x14f1: 0x0356, 0x14f2: 0x036b, 0x14f4: 0x0353, 0x14f5: 0x0335, + 0x14f6: 0x0338, 0x14f7: 0x0341, 0x14f9: 0x0359, 0x14fb: 0x0365, + // Block 0x54, offset 0x1500 + 0x1502: 0x033b, + 0x1507: 0x033e, 0x1509: 0x0383, 0x150b: 0x0371, + 0x150d: 0x0377, 0x150e: 0x0350, 0x150f: 0x0362, 0x1511: 0x0356, + 0x1512: 0x036b, 0x1514: 0x0353, 0x1517: 0x0341, + 0x1519: 0x0359, 0x151b: 0x0365, 0x151d: 0x03d7, + 0x151f: 0x0389, 0x1521: 0x032f, 0x1522: 0x033b, + 0x1524: 0x037a, 0x1527: 0x033e, 0x1528: 0x035c, 0x1529: 0x0383, + 0x152a: 0x036e, 0x152c: 0x0374, 0x152d: 0x0377, 0x152e: 0x0350, 0x152f: 0x0362, + 0x1530: 0x0368, 0x1531: 0x0356, 0x1532: 0x036b, 0x1534: 0x0353, 0x1535: 0x0335, + 0x1536: 0x0338, 0x1537: 0x0341, 0x1539: 0x0359, 0x153a: 0x035f, 0x153b: 0x0365, + 0x153c: 0x0386, 0x153e: 0x03bf, + // Block 0x55, offset 0x1540 + 0x1540: 0x032c, 0x1541: 0x032f, 0x1542: 0x033b, 0x1543: 0x0344, 0x1544: 0x037a, 0x1545: 0x037d, + 0x1546: 0x034d, 0x1547: 0x033e, 0x1548: 0x035c, 0x1549: 0x0383, 0x154b: 0x0371, + 0x154c: 0x0374, 0x154d: 0x0377, 0x154e: 0x0350, 0x154f: 0x0362, 0x1550: 0x0368, 0x1551: 0x0356, + 0x1552: 0x036b, 0x1553: 0x034a, 0x1554: 0x0353, 0x1555: 0x0335, 0x1556: 0x0338, 0x1557: 0x0341, + 0x1558: 0x0347, 0x1559: 0x0359, 0x155a: 0x035f, 0x155b: 0x0365, + 0x1561: 0x032f, 0x1562: 0x033b, 0x1563: 0x0344, + 0x1565: 0x037d, 0x1566: 0x034d, 0x1567: 0x033e, 0x1568: 0x035c, 0x1569: 0x0383, + 0x156b: 0x0371, 0x156c: 0x0374, 0x156d: 0x0377, 0x156e: 0x0350, 0x156f: 0x0362, + 0x1570: 0x0368, 0x1571: 0x0356, 0x1572: 0x036b, 0x1573: 0x034a, 0x1574: 0x0353, 0x1575: 0x0335, + 0x1576: 0x0338, 0x1577: 0x0341, 0x1578: 0x0347, 0x1579: 0x0359, 0x157a: 0x035f, 0x157b: 0x0365, + // Block 0x56, offset 0x1580 + 0x1580: 0x19a6, 0x1581: 0x19a3, 0x1582: 0x19a9, 0x1583: 0x19cd, 0x1584: 0x19f1, 0x1585: 0x1a15, + 0x1586: 0x1a39, 0x1587: 0x1a42, 0x1588: 0x1a48, 0x1589: 0x1a4e, 0x158a: 0x1a54, + 0x1590: 0x1bbc, 0x1591: 0x1bc0, + 0x1592: 0x1bc4, 0x1593: 0x1bc8, 0x1594: 0x1bcc, 0x1595: 0x1bd0, 0x1596: 0x1bd4, 0x1597: 0x1bd8, + 0x1598: 0x1bdc, 0x1599: 0x1be0, 0x159a: 0x1be4, 0x159b: 0x1be8, 0x159c: 0x1bec, 0x159d: 0x1bf0, + 0x159e: 0x1bf4, 0x159f: 0x1bf8, 0x15a0: 0x1bfc, 0x15a1: 0x1c00, 0x15a2: 0x1c04, 0x15a3: 0x1c08, + 0x15a4: 0x1c0c, 0x15a5: 0x1c10, 0x15a6: 0x1c14, 0x15a7: 0x1c18, 0x15a8: 0x1c1c, 0x15a9: 0x1c20, + 0x15aa: 0x2855, 0x15ab: 0x0047, 0x15ac: 0x0065, 0x15ad: 0x1a69, 0x15ae: 0x1ae1, + 0x15b0: 0x0043, 0x15b1: 0x0045, 0x15b2: 0x0047, 0x15b3: 0x0049, 0x15b4: 0x004b, 0x15b5: 0x004d, + 0x15b6: 0x004f, 0x15b7: 0x0051, 0x15b8: 0x0053, 0x15b9: 0x0055, 0x15ba: 0x0057, 0x15bb: 0x0059, + 0x15bc: 0x005b, 0x15bd: 0x005d, 0x15be: 0x005f, 0x15bf: 0x0061, + // Block 0x57, offset 0x15c0 + 0x15c0: 0x27dd, 0x15c1: 0x27f2, 0x15c2: 0x05fe, + 0x15d0: 0x0d0a, 0x15d1: 0x0b42, + 0x15d2: 0x09ce, 0x15d3: 0x4705, 0x15d4: 0x0816, 0x15d5: 0x0aea, 0x15d6: 0x142a, 0x15d7: 0x0afa, + 0x15d8: 0x0822, 0x15d9: 0x0dd2, 0x15da: 0x0faa, 0x15db: 0x0daa, 0x15dc: 0x0922, 0x15dd: 0x0c66, + 0x15de: 0x08ba, 0x15df: 0x0db2, 0x15e0: 0x090e, 0x15e1: 0x1212, 0x15e2: 0x107e, 0x15e3: 0x1486, + 0x15e4: 0x0ace, 0x15e5: 0x0a06, 0x15e6: 0x0f5e, 0x15e7: 0x0d16, 0x15e8: 0x0d42, 0x15e9: 0x07ba, + 0x15ea: 0x07c6, 0x15eb: 0x1506, 0x15ec: 0x0bd6, 0x15ed: 0x07e2, 0x15ee: 0x09ea, 0x15ef: 0x0d36, + 0x15f0: 0x14ae, 0x15f1: 0x0d0e, 0x15f2: 0x116a, 0x15f3: 0x11a6, 0x15f4: 0x09f2, 0x15f5: 0x0f3e, + 0x15f6: 0x0e06, 0x15f7: 0x0e02, 0x15f8: 0x1092, 0x15f9: 0x0926, 0x15fa: 0x0a52, 0x15fb: 0x153e, + // Block 0x58, offset 0x1600 + 0x1600: 0x07f6, 0x1601: 0x07ee, 0x1602: 0x07fe, 0x1603: 0x1774, 0x1604: 0x0842, 0x1605: 0x0852, + 0x1606: 0x0856, 0x1607: 0x085e, 0x1608: 0x0866, 0x1609: 0x086a, 0x160a: 0x0876, 0x160b: 0x086e, + 0x160c: 0x06ae, 0x160d: 0x1788, 0x160e: 0x088a, 0x160f: 0x088e, 0x1610: 0x0892, 0x1611: 0x08ae, + 0x1612: 0x1779, 0x1613: 0x06b2, 0x1614: 0x089a, 0x1615: 0x08ba, 0x1616: 0x1783, 0x1617: 0x08ca, + 0x1618: 0x08d2, 0x1619: 0x0832, 0x161a: 0x08da, 0x161b: 0x08de, 0x161c: 0x195e, 0x161d: 0x08fa, + 0x161e: 0x0902, 0x161f: 0x06ba, 0x1620: 0x091a, 0x1621: 0x091e, 0x1622: 0x0926, 0x1623: 0x092a, + 0x1624: 0x06be, 0x1625: 0x0942, 0x1626: 0x0946, 0x1627: 0x0952, 0x1628: 0x095e, 0x1629: 0x0962, + 0x162a: 0x0966, 0x162b: 0x096e, 0x162c: 0x098e, 0x162d: 0x0992, 0x162e: 0x099a, 0x162f: 0x09aa, + 0x1630: 0x09b2, 0x1631: 0x09b6, 0x1632: 0x09b6, 0x1633: 0x09b6, 0x1634: 0x1797, 0x1635: 0x0f8e, + 0x1636: 0x09ca, 0x1637: 0x09d2, 0x1638: 0x179c, 0x1639: 0x09de, 0x163a: 0x09e6, 0x163b: 0x09ee, + 0x163c: 0x0a16, 0x163d: 0x0a02, 0x163e: 0x0a0e, 0x163f: 0x0a12, + // Block 0x59, offset 0x1640 + 0x1640: 0x0a1a, 0x1641: 0x0a22, 0x1642: 0x0a26, 0x1643: 0x0a2e, 0x1644: 0x0a36, 0x1645: 0x0a3a, + 0x1646: 0x0a3a, 0x1647: 0x0a42, 0x1648: 0x0a4a, 0x1649: 0x0a4e, 0x164a: 0x0a5a, 0x164b: 0x0a7e, + 0x164c: 0x0a62, 0x164d: 0x0a82, 0x164e: 0x0a66, 0x164f: 0x0a6e, 0x1650: 0x0906, 0x1651: 0x0aca, + 0x1652: 0x0a92, 0x1653: 0x0a96, 0x1654: 0x0a9a, 0x1655: 0x0a8e, 0x1656: 0x0aa2, 0x1657: 0x0a9e, + 0x1658: 0x0ab6, 0x1659: 0x17a1, 0x165a: 0x0ad2, 0x165b: 0x0ad6, 0x165c: 0x0ade, 0x165d: 0x0aea, + 0x165e: 0x0af2, 0x165f: 0x0b0e, 0x1660: 0x17a6, 0x1661: 0x17ab, 0x1662: 0x0b1a, 0x1663: 0x0b1e, + 0x1664: 0x0b22, 0x1665: 0x0b16, 0x1666: 0x0b2a, 0x1667: 0x06c2, 0x1668: 0x06c6, 0x1669: 0x0b32, + 0x166a: 0x0b3a, 0x166b: 0x0b3a, 0x166c: 0x17b0, 0x166d: 0x0b56, 0x166e: 0x0b5a, 0x166f: 0x0b5e, + 0x1670: 0x0b66, 0x1671: 0x17b5, 0x1672: 0x0b6e, 0x1673: 0x0b72, 0x1674: 0x0c4a, 0x1675: 0x0b7a, + 0x1676: 0x06ca, 0x1677: 0x0b86, 0x1678: 0x0b96, 0x1679: 0x0ba2, 0x167a: 0x0b9e, 0x167b: 0x17bf, + 0x167c: 0x0baa, 0x167d: 0x17c4, 0x167e: 0x0bb6, 0x167f: 0x0bb2, + // Block 0x5a, offset 0x1680 + 0x1680: 0x0bba, 0x1681: 0x0bca, 0x1682: 0x0bce, 0x1683: 0x06ce, 0x1684: 0x0bde, 0x1685: 0x0be6, + 0x1686: 0x0bea, 0x1687: 0x0bee, 0x1688: 0x06d2, 0x1689: 0x17c9, 0x168a: 0x06d6, 0x168b: 0x0c0a, + 0x168c: 0x0c0e, 0x168d: 0x0c12, 0x168e: 0x0c1a, 0x168f: 0x1990, 0x1690: 0x0c32, 0x1691: 0x17d3, + 0x1692: 0x17d3, 0x1693: 0x12d2, 0x1694: 0x0c42, 0x1695: 0x0c42, 0x1696: 0x06da, 0x1697: 0x17f6, + 0x1698: 0x18c8, 0x1699: 0x0c52, 0x169a: 0x0c5a, 0x169b: 0x06de, 0x169c: 0x0c6e, 0x169d: 0x0c7e, + 0x169e: 0x0c82, 0x169f: 0x0c8a, 0x16a0: 0x0c9a, 0x16a1: 0x06e6, 0x16a2: 0x06e2, 0x16a3: 0x0c9e, + 0x16a4: 0x17d8, 0x16a5: 0x0ca2, 0x16a6: 0x0cb6, 0x16a7: 0x0cba, 0x16a8: 0x0cbe, 0x16a9: 0x0cba, + 0x16aa: 0x0cca, 0x16ab: 0x0cce, 0x16ac: 0x0cde, 0x16ad: 0x0cd6, 0x16ae: 0x0cda, 0x16af: 0x0ce2, + 0x16b0: 0x0ce6, 0x16b1: 0x0cea, 0x16b2: 0x0cf6, 0x16b3: 0x0cfa, 0x16b4: 0x0d12, 0x16b5: 0x0d1a, + 0x16b6: 0x0d2a, 0x16b7: 0x0d3e, 0x16b8: 0x17e7, 0x16b9: 0x0d3a, 0x16ba: 0x0d2e, 0x16bb: 0x0d46, + 0x16bc: 0x0d4e, 0x16bd: 0x0d62, 0x16be: 0x17ec, 0x16bf: 0x0d6a, + // Block 0x5b, offset 0x16c0 + 0x16c0: 0x0d5e, 0x16c1: 0x0d56, 0x16c2: 0x06ea, 0x16c3: 0x0d72, 0x16c4: 0x0d7a, 0x16c5: 0x0d82, + 0x16c6: 0x0d76, 0x16c7: 0x06ee, 0x16c8: 0x0d92, 0x16c9: 0x0d9a, 0x16ca: 0x17f1, 0x16cb: 0x0dc6, + 0x16cc: 0x0dfa, 0x16cd: 0x0dd6, 0x16ce: 0x06fa, 0x16cf: 0x0de2, 0x16d0: 0x06f6, 0x16d1: 0x06f2, + 0x16d2: 0x08be, 0x16d3: 0x08c2, 0x16d4: 0x0dfe, 0x16d5: 0x0de6, 0x16d6: 0x12a6, 0x16d7: 0x075e, + 0x16d8: 0x0e0a, 0x16d9: 0x0e0e, 0x16da: 0x0e12, 0x16db: 0x0e26, 0x16dc: 0x0e1e, 0x16dd: 0x180a, + 0x16de: 0x06fe, 0x16df: 0x0e3a, 0x16e0: 0x0e2e, 0x16e1: 0x0e4a, 0x16e2: 0x0e52, 0x16e3: 0x1814, + 0x16e4: 0x0e56, 0x16e5: 0x0e42, 0x16e6: 0x0e5e, 0x16e7: 0x0702, 0x16e8: 0x0e62, 0x16e9: 0x0e66, + 0x16ea: 0x0e6a, 0x16eb: 0x0e76, 0x16ec: 0x1819, 0x16ed: 0x0e7e, 0x16ee: 0x0706, 0x16ef: 0x0e8a, + 0x16f0: 0x181e, 0x16f1: 0x0e8e, 0x16f2: 0x070a, 0x16f3: 0x0e9a, 0x16f4: 0x0ea6, 0x16f5: 0x0eb2, + 0x16f6: 0x0eb6, 0x16f7: 0x1823, 0x16f8: 0x17ba, 0x16f9: 0x1828, 0x16fa: 0x0ed6, 0x16fb: 0x182d, + 0x16fc: 0x0ee2, 0x16fd: 0x0eea, 0x16fe: 0x0eda, 0x16ff: 0x0ef6, + // Block 0x5c, offset 0x1700 + 0x1700: 0x0f06, 0x1701: 0x0f16, 0x1702: 0x0f0a, 0x1703: 0x0f0e, 0x1704: 0x0f1a, 0x1705: 0x0f1e, + 0x1706: 0x1832, 0x1707: 0x0f02, 0x1708: 0x0f36, 0x1709: 0x0f3a, 0x170a: 0x070e, 0x170b: 0x0f4e, + 0x170c: 0x0f4a, 0x170d: 0x1837, 0x170e: 0x0f2e, 0x170f: 0x0f6a, 0x1710: 0x183c, 0x1711: 0x1841, + 0x1712: 0x0f6e, 0x1713: 0x0f82, 0x1714: 0x0f7e, 0x1715: 0x0f7a, 0x1716: 0x0712, 0x1717: 0x0f86, + 0x1718: 0x0f96, 0x1719: 0x0f92, 0x171a: 0x0f9e, 0x171b: 0x177e, 0x171c: 0x0fae, 0x171d: 0x1846, + 0x171e: 0x0fba, 0x171f: 0x1850, 0x1720: 0x0fce, 0x1721: 0x0fda, 0x1722: 0x0fee, 0x1723: 0x1855, + 0x1724: 0x1002, 0x1725: 0x1006, 0x1726: 0x185a, 0x1727: 0x185f, 0x1728: 0x1022, 0x1729: 0x1032, + 0x172a: 0x0716, 0x172b: 0x1036, 0x172c: 0x071a, 0x172d: 0x071a, 0x172e: 0x104e, 0x172f: 0x1052, + 0x1730: 0x105a, 0x1731: 0x105e, 0x1732: 0x106a, 0x1733: 0x071e, 0x1734: 0x1082, 0x1735: 0x1864, + 0x1736: 0x109e, 0x1737: 0x1869, 0x1738: 0x10aa, 0x1739: 0x17ce, 0x173a: 0x10ba, 0x173b: 0x186e, + 0x173c: 0x1873, 0x173d: 0x1878, 0x173e: 0x0722, 0x173f: 0x0726, + // Block 0x5d, offset 0x1740 + 0x1740: 0x10f2, 0x1741: 0x1882, 0x1742: 0x187d, 0x1743: 0x1887, 0x1744: 0x188c, 0x1745: 0x10fa, + 0x1746: 0x10fe, 0x1747: 0x10fe, 0x1748: 0x1106, 0x1749: 0x072e, 0x174a: 0x110a, 0x174b: 0x0732, + 0x174c: 0x0736, 0x174d: 0x1896, 0x174e: 0x111e, 0x174f: 0x1126, 0x1750: 0x1132, 0x1751: 0x073a, + 0x1752: 0x189b, 0x1753: 0x1156, 0x1754: 0x18a0, 0x1755: 0x18a5, 0x1756: 0x1176, 0x1757: 0x118e, + 0x1758: 0x073e, 0x1759: 0x1196, 0x175a: 0x119a, 0x175b: 0x119e, 0x175c: 0x18aa, 0x175d: 0x18af, + 0x175e: 0x18af, 0x175f: 0x11b6, 0x1760: 0x0742, 0x1761: 0x18b4, 0x1762: 0x11ca, 0x1763: 0x11ce, + 0x1764: 0x0746, 0x1765: 0x18b9, 0x1766: 0x11ea, 0x1767: 0x074a, 0x1768: 0x11fa, 0x1769: 0x11f2, + 0x176a: 0x1202, 0x176b: 0x18c3, 0x176c: 0x121a, 0x176d: 0x074e, 0x176e: 0x1226, 0x176f: 0x122e, + 0x1770: 0x123e, 0x1771: 0x0752, 0x1772: 0x18cd, 0x1773: 0x18d2, 0x1774: 0x0756, 0x1775: 0x18d7, + 0x1776: 0x1256, 0x1777: 0x18dc, 0x1778: 0x1262, 0x1779: 0x126e, 0x177a: 0x1276, 0x177b: 0x18e1, + 0x177c: 0x18e6, 0x177d: 0x128a, 0x177e: 0x18eb, 0x177f: 0x1292, + // Block 0x5e, offset 0x1780 + 0x1780: 0x17fb, 0x1781: 0x075a, 0x1782: 0x12aa, 0x1783: 0x12ae, 0x1784: 0x0762, 0x1785: 0x12b2, + 0x1786: 0x0b2e, 0x1787: 0x18f0, 0x1788: 0x18f5, 0x1789: 0x1800, 0x178a: 0x1805, 0x178b: 0x12d2, + 0x178c: 0x12d6, 0x178d: 0x14ee, 0x178e: 0x0766, 0x178f: 0x1302, 0x1790: 0x12fe, 0x1791: 0x1306, + 0x1792: 0x093a, 0x1793: 0x130a, 0x1794: 0x130e, 0x1795: 0x1312, 0x1796: 0x131a, 0x1797: 0x18fa, + 0x1798: 0x1316, 0x1799: 0x131e, 0x179a: 0x1332, 0x179b: 0x1336, 0x179c: 0x1322, 0x179d: 0x133a, + 0x179e: 0x134e, 0x179f: 0x1362, 0x17a0: 0x132e, 0x17a1: 0x1342, 0x17a2: 0x1346, 0x17a3: 0x134a, + 0x17a4: 0x18ff, 0x17a5: 0x1909, 0x17a6: 0x1904, 0x17a7: 0x076a, 0x17a8: 0x136a, 0x17a9: 0x136e, + 0x17aa: 0x1376, 0x17ab: 0x191d, 0x17ac: 0x137a, 0x17ad: 0x190e, 0x17ae: 0x076e, 0x17af: 0x0772, + 0x17b0: 0x1913, 0x17b1: 0x1918, 0x17b2: 0x0776, 0x17b3: 0x139a, 0x17b4: 0x139e, 0x17b5: 0x13a2, + 0x17b6: 0x13a6, 0x17b7: 0x13b2, 0x17b8: 0x13ae, 0x17b9: 0x13ba, 0x17ba: 0x13b6, 0x17bb: 0x13c6, + 0x17bc: 0x13be, 0x17bd: 0x13c2, 0x17be: 0x13ca, 0x17bf: 0x077a, + // Block 0x5f, offset 0x17c0 + 0x17c0: 0x13d2, 0x17c1: 0x13d6, 0x17c2: 0x077e, 0x17c3: 0x13e6, 0x17c4: 0x13ea, 0x17c5: 0x1922, + 0x17c6: 0x13f6, 0x17c7: 0x13fa, 0x17c8: 0x0782, 0x17c9: 0x1406, 0x17ca: 0x06b6, 0x17cb: 0x1927, + 0x17cc: 0x192c, 0x17cd: 0x0786, 0x17ce: 0x078a, 0x17cf: 0x1432, 0x17d0: 0x144a, 0x17d1: 0x1466, + 0x17d2: 0x1476, 0x17d3: 0x1931, 0x17d4: 0x148a, 0x17d5: 0x148e, 0x17d6: 0x14a6, 0x17d7: 0x14b2, + 0x17d8: 0x193b, 0x17d9: 0x178d, 0x17da: 0x14be, 0x17db: 0x14ba, 0x17dc: 0x14c6, 0x17dd: 0x1792, + 0x17de: 0x14d2, 0x17df: 0x14de, 0x17e0: 0x1940, 0x17e1: 0x1945, 0x17e2: 0x151e, 0x17e3: 0x152a, + 0x17e4: 0x1532, 0x17e5: 0x194a, 0x17e6: 0x1536, 0x17e7: 0x1562, 0x17e8: 0x156e, 0x17e9: 0x1572, + 0x17ea: 0x156a, 0x17eb: 0x157e, 0x17ec: 0x1582, 0x17ed: 0x194f, 0x17ee: 0x158e, 0x17ef: 0x078e, + 0x17f0: 0x1596, 0x17f1: 0x1954, 0x17f2: 0x0792, 0x17f3: 0x15ce, 0x17f4: 0x0bbe, 0x17f5: 0x15e6, + 0x17f6: 0x1959, 0x17f7: 0x1963, 0x17f8: 0x0796, 0x17f9: 0x079a, 0x17fa: 0x160e, 0x17fb: 0x1968, + 0x17fc: 0x079e, 0x17fd: 0x196d, 0x17fe: 0x1626, 0x17ff: 0x1626, + // Block 0x60, offset 0x1800 + 0x1800: 0x162e, 0x1801: 0x1972, 0x1802: 0x1646, 0x1803: 0x07a2, 0x1804: 0x1656, 0x1805: 0x1662, + 0x1806: 0x166a, 0x1807: 0x1672, 0x1808: 0x07a6, 0x1809: 0x1977, 0x180a: 0x1686, 0x180b: 0x16a2, + 0x180c: 0x16ae, 0x180d: 0x07aa, 0x180e: 0x07ae, 0x180f: 0x16b2, 0x1810: 0x197c, 0x1811: 0x07b2, + 0x1812: 0x1981, 0x1813: 0x1986, 0x1814: 0x198b, 0x1815: 0x16d6, 0x1816: 0x07b6, 0x1817: 0x16ea, + 0x1818: 0x16f2, 0x1819: 0x16f6, 0x181a: 0x16fe, 0x181b: 0x1706, 0x181c: 0x170e, 0x181d: 0x1995, +} + +// nfkcIndex: 22 blocks, 1408 entries, 2816 bytes +// Block 0 is the zero block. +var nfkcIndex = [1408]uint16{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x5f, 0xc3: 0x01, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x60, 0xc7: 0x04, + 0xc8: 0x05, 0xca: 0x61, 0xcb: 0x62, 0xcc: 0x06, 0xcd: 0x07, 0xce: 0x08, 0xcf: 0x09, + 0xd0: 0x0a, 0xd1: 0x63, 0xd2: 0x64, 0xd3: 0x0b, 0xd6: 0x0c, 0xd7: 0x65, + 0xd8: 0x66, 0xd9: 0x0d, 0xdb: 0x67, 0xdc: 0x68, 0xdd: 0x69, 0xdf: 0x6a, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, + 0xea: 0x06, 0xeb: 0x07, 0xec: 0x08, 0xed: 0x09, 0xef: 0x0a, + 0xf0: 0x13, + // Block 0x4, offset 0x100 + 0x120: 0x6b, 0x121: 0x6c, 0x122: 0x6d, 0x123: 0x0e, 0x124: 0x6e, 0x125: 0x6f, 0x126: 0x70, 0x127: 0x71, + 0x128: 0x72, 0x129: 0x73, 0x12a: 0x74, 0x12b: 0x75, 0x12c: 0x70, 0x12d: 0x76, 0x12e: 0x77, 0x12f: 0x78, + 0x130: 0x74, 0x131: 0x79, 0x132: 0x7a, 0x133: 0x7b, 0x134: 0x7c, 0x135: 0x7d, 0x137: 0x7e, + 0x138: 0x7f, 0x139: 0x80, 0x13a: 0x81, 0x13b: 0x82, 0x13c: 0x83, 0x13d: 0x84, 0x13e: 0x85, 0x13f: 0x86, + // Block 0x5, offset 0x140 + 0x140: 0x87, 0x142: 0x88, 0x143: 0x89, 0x144: 0x8a, 0x145: 0x8b, 0x146: 0x8c, 0x147: 0x8d, + 0x14d: 0x8e, + 0x15c: 0x8f, 0x15f: 0x90, + 0x162: 0x91, 0x164: 0x92, + 0x168: 0x93, 0x169: 0x94, 0x16a: 0x95, 0x16b: 0x96, 0x16c: 0x0f, 0x16d: 0x97, 0x16e: 0x98, 0x16f: 0x99, + 0x170: 0x9a, 0x173: 0x9b, 0x174: 0x9c, 0x175: 0x10, 0x176: 0x11, 0x177: 0x12, + 0x178: 0x13, 0x179: 0x14, 0x17a: 0x15, 0x17b: 0x16, 0x17c: 0x17, 0x17d: 0x18, 0x17e: 0x19, 0x17f: 0x1a, + // Block 0x6, offset 0x180 + 0x180: 0x9d, 0x181: 0x9e, 0x182: 0x9f, 0x183: 0xa0, 0x184: 0x1b, 0x185: 0x1c, 0x186: 0xa1, 0x187: 0xa2, + 0x188: 0xa3, 0x189: 0x1d, 0x18a: 0x1e, 0x18b: 0xa4, 0x18c: 0xa5, + 0x191: 0x1f, 0x192: 0x20, 0x193: 0xa6, + 0x1a8: 0xa7, 0x1a9: 0xa8, 0x1ab: 0xa9, + 0x1b1: 0xaa, 0x1b3: 0xab, 0x1b5: 0xac, 0x1b7: 0xad, + 0x1ba: 0xae, 0x1bb: 0xaf, 0x1bc: 0x21, 0x1bd: 0x22, 0x1be: 0x23, 0x1bf: 0xb0, + // Block 0x7, offset 0x1c0 + 0x1c0: 0xb1, 0x1c1: 0x24, 0x1c2: 0x25, 0x1c3: 0x26, 0x1c4: 0xb2, 0x1c5: 0x27, 0x1c6: 0x28, + 0x1c8: 0x29, 0x1c9: 0x2a, 0x1ca: 0x2b, 0x1cb: 0x2c, 0x1cc: 0x2d, 0x1cd: 0x2e, 0x1ce: 0x2f, 0x1cf: 0x30, + // Block 0x8, offset 0x200 + 0x219: 0xb3, 0x21a: 0xb4, 0x21b: 0xb5, 0x21d: 0xb6, 0x21f: 0xb7, + 0x220: 0xb8, 0x223: 0xb9, 0x224: 0xba, 0x225: 0xbb, 0x226: 0xbc, 0x227: 0xbd, + 0x22a: 0xbe, 0x22b: 0xbf, 0x22d: 0xc0, 0x22f: 0xc1, + 0x230: 0xc2, 0x231: 0xc3, 0x232: 0xc4, 0x233: 0xc5, 0x234: 0xc6, 0x235: 0xc7, 0x236: 0xc8, 0x237: 0xc2, + 0x238: 0xc3, 0x239: 0xc4, 0x23a: 0xc5, 0x23b: 0xc6, 0x23c: 0xc7, 0x23d: 0xc8, 0x23e: 0xc2, 0x23f: 0xc3, + // Block 0x9, offset 0x240 + 0x240: 0xc4, 0x241: 0xc5, 0x242: 0xc6, 0x243: 0xc7, 0x244: 0xc8, 0x245: 0xc2, 0x246: 0xc3, 0x247: 0xc4, + 0x248: 0xc5, 0x249: 0xc6, 0x24a: 0xc7, 0x24b: 0xc8, 0x24c: 0xc2, 0x24d: 0xc3, 0x24e: 0xc4, 0x24f: 0xc5, + 0x250: 0xc6, 0x251: 0xc7, 0x252: 0xc8, 0x253: 0xc2, 0x254: 0xc3, 0x255: 0xc4, 0x256: 0xc5, 0x257: 0xc6, + 0x258: 0xc7, 0x259: 0xc8, 0x25a: 0xc2, 0x25b: 0xc3, 0x25c: 0xc4, 0x25d: 0xc5, 0x25e: 0xc6, 0x25f: 0xc7, + 0x260: 0xc8, 0x261: 0xc2, 0x262: 0xc3, 0x263: 0xc4, 0x264: 0xc5, 0x265: 0xc6, 0x266: 0xc7, 0x267: 0xc8, + 0x268: 0xc2, 0x269: 0xc3, 0x26a: 0xc4, 0x26b: 0xc5, 0x26c: 0xc6, 0x26d: 0xc7, 0x26e: 0xc8, 0x26f: 0xc2, + 0x270: 0xc3, 0x271: 0xc4, 0x272: 0xc5, 0x273: 0xc6, 0x274: 0xc7, 0x275: 0xc8, 0x276: 0xc2, 0x277: 0xc3, + 0x278: 0xc4, 0x279: 0xc5, 0x27a: 0xc6, 0x27b: 0xc7, 0x27c: 0xc8, 0x27d: 0xc2, 0x27e: 0xc3, 0x27f: 0xc4, + // Block 0xa, offset 0x280 + 0x280: 0xc5, 0x281: 0xc6, 0x282: 0xc7, 0x283: 0xc8, 0x284: 0xc2, 0x285: 0xc3, 0x286: 0xc4, 0x287: 0xc5, + 0x288: 0xc6, 0x289: 0xc7, 0x28a: 0xc8, 0x28b: 0xc2, 0x28c: 0xc3, 0x28d: 0xc4, 0x28e: 0xc5, 0x28f: 0xc6, + 0x290: 0xc7, 0x291: 0xc8, 0x292: 0xc2, 0x293: 0xc3, 0x294: 0xc4, 0x295: 0xc5, 0x296: 0xc6, 0x297: 0xc7, + 0x298: 0xc8, 0x299: 0xc2, 0x29a: 0xc3, 0x29b: 0xc4, 0x29c: 0xc5, 0x29d: 0xc6, 0x29e: 0xc7, 0x29f: 0xc8, + 0x2a0: 0xc2, 0x2a1: 0xc3, 0x2a2: 0xc4, 0x2a3: 0xc5, 0x2a4: 0xc6, 0x2a5: 0xc7, 0x2a6: 0xc8, 0x2a7: 0xc2, + 0x2a8: 0xc3, 0x2a9: 0xc4, 0x2aa: 0xc5, 0x2ab: 0xc6, 0x2ac: 0xc7, 0x2ad: 0xc8, 0x2ae: 0xc2, 0x2af: 0xc3, + 0x2b0: 0xc4, 0x2b1: 0xc5, 0x2b2: 0xc6, 0x2b3: 0xc7, 0x2b4: 0xc8, 0x2b5: 0xc2, 0x2b6: 0xc3, 0x2b7: 0xc4, + 0x2b8: 0xc5, 0x2b9: 0xc6, 0x2ba: 0xc7, 0x2bb: 0xc8, 0x2bc: 0xc2, 0x2bd: 0xc3, 0x2be: 0xc4, 0x2bf: 0xc5, + // Block 0xb, offset 0x2c0 + 0x2c0: 0xc6, 0x2c1: 0xc7, 0x2c2: 0xc8, 0x2c3: 0xc2, 0x2c4: 0xc3, 0x2c5: 0xc4, 0x2c6: 0xc5, 0x2c7: 0xc6, + 0x2c8: 0xc7, 0x2c9: 0xc8, 0x2ca: 0xc2, 0x2cb: 0xc3, 0x2cc: 0xc4, 0x2cd: 0xc5, 0x2ce: 0xc6, 0x2cf: 0xc7, + 0x2d0: 0xc8, 0x2d1: 0xc2, 0x2d2: 0xc3, 0x2d3: 0xc4, 0x2d4: 0xc5, 0x2d5: 0xc6, 0x2d6: 0xc7, 0x2d7: 0xc8, + 0x2d8: 0xc2, 0x2d9: 0xc3, 0x2da: 0xc4, 0x2db: 0xc5, 0x2dc: 0xc6, 0x2dd: 0xc7, 0x2de: 0xc9, + // Block 0xc, offset 0x300 + 0x324: 0x31, 0x325: 0x32, 0x326: 0x33, 0x327: 0x34, + 0x328: 0x35, 0x329: 0x36, 0x32a: 0x37, 0x32b: 0x38, 0x32c: 0x39, 0x32d: 0x3a, 0x32e: 0x3b, 0x32f: 0x3c, + 0x330: 0x3d, 0x331: 0x3e, 0x332: 0x3f, 0x333: 0x40, 0x334: 0x41, 0x335: 0x42, 0x336: 0x43, 0x337: 0x44, + 0x338: 0x45, 0x339: 0x46, 0x33a: 0x47, 0x33b: 0x48, 0x33c: 0xca, 0x33d: 0x49, 0x33e: 0x4a, 0x33f: 0x4b, + // Block 0xd, offset 0x340 + 0x347: 0xcb, + 0x34b: 0xcc, 0x34d: 0xcd, + 0x35e: 0x4c, + 0x368: 0xce, 0x36b: 0xcf, + 0x374: 0xd0, + 0x37a: 0xd1, 0x37b: 0xd2, 0x37d: 0xd3, 0x37e: 0xd4, + // Block 0xe, offset 0x380 + 0x381: 0xd5, 0x382: 0xd6, 0x384: 0xd7, 0x385: 0xbc, 0x387: 0xd8, + 0x388: 0xd9, 0x38b: 0xda, 0x38c: 0xdb, 0x38d: 0xdc, + 0x391: 0xdd, 0x392: 0xde, 0x393: 0xdf, 0x396: 0xe0, 0x397: 0xe1, + 0x398: 0xe2, 0x39a: 0xe3, 0x39c: 0xe4, + 0x3a0: 0xe5, 0x3a4: 0xe6, 0x3a5: 0xe7, 0x3a7: 0xe8, + 0x3a8: 0xe9, 0x3a9: 0xea, 0x3aa: 0xeb, + 0x3b0: 0xe2, 0x3b5: 0xec, 0x3b6: 0xed, + 0x3bd: 0xee, + // Block 0xf, offset 0x3c0 + 0x3eb: 0xef, 0x3ec: 0xf0, + 0x3ff: 0xf1, + // Block 0x10, offset 0x400 + 0x432: 0xf2, + // Block 0x11, offset 0x440 + 0x445: 0xf3, 0x446: 0xf4, 0x447: 0xf5, + 0x449: 0xf6, + 0x450: 0xf7, 0x451: 0xf8, 0x452: 0xf9, 0x453: 0xfa, 0x454: 0xfb, 0x455: 0xfc, 0x456: 0xfd, 0x457: 0xfe, + 0x458: 0xff, 0x459: 0x100, 0x45a: 0x4d, 0x45b: 0x101, 0x45c: 0x102, 0x45d: 0x103, 0x45e: 0x104, 0x45f: 0x4e, + // Block 0x12, offset 0x480 + 0x480: 0x4f, 0x481: 0x50, 0x482: 0x105, 0x484: 0xf0, + 0x48a: 0x106, 0x48b: 0x107, + 0x493: 0x108, + 0x4a3: 0x109, 0x4a5: 0x10a, + 0x4b8: 0x51, 0x4b9: 0x52, 0x4ba: 0x53, + // Block 0x13, offset 0x4c0 + 0x4c4: 0x54, 0x4c5: 0x10b, 0x4c6: 0x10c, + 0x4c8: 0x55, 0x4c9: 0x10d, + 0x4ef: 0x10e, + // Block 0x14, offset 0x500 + 0x520: 0x56, 0x521: 0x57, 0x522: 0x58, 0x523: 0x59, 0x524: 0x5a, 0x525: 0x5b, 0x526: 0x5c, 0x527: 0x5d, + 0x528: 0x5e, + // Block 0x15, offset 0x540 + 0x550: 0x0b, 0x551: 0x0c, 0x556: 0x0d, + 0x55b: 0x0e, 0x55d: 0x0f, 0x55e: 0x10, 0x55f: 0x11, + 0x56f: 0x12, +} + +// nfkcSparseOffset: 176 entries, 352 bytes +var nfkcSparseOffset = []uint16{0x0, 0xe, 0x12, 0x1c, 0x26, 0x36, 0x38, 0x3d, 0x48, 0x57, 0x64, 0x6c, 0x71, 0x76, 0x78, 0x7c, 0x84, 0x8b, 0x8e, 0x96, 0x9a, 0x9e, 0xa0, 0xa2, 0xab, 0xaf, 0xb6, 0xbb, 0xbe, 0xc8, 0xcb, 0xd2, 0xda, 0xde, 0xe0, 0xe4, 0xe8, 0xee, 0xff, 0x10b, 0x10d, 0x113, 0x115, 0x117, 0x119, 0x11b, 0x11d, 0x11f, 0x121, 0x124, 0x127, 0x129, 0x12c, 0x12f, 0x133, 0x139, 0x140, 0x149, 0x14b, 0x14e, 0x150, 0x15b, 0x166, 0x174, 0x182, 0x192, 0x1a0, 0x1a7, 0x1ad, 0x1bc, 0x1c0, 0x1c2, 0x1c6, 0x1c8, 0x1cb, 0x1cd, 0x1d0, 0x1d2, 0x1d5, 0x1d7, 0x1d9, 0x1db, 0x1e7, 0x1f1, 0x1fb, 0x1fe, 0x202, 0x204, 0x206, 0x20b, 0x20e, 0x211, 0x213, 0x215, 0x217, 0x219, 0x21f, 0x222, 0x227, 0x229, 0x230, 0x236, 0x23c, 0x244, 0x24a, 0x250, 0x256, 0x25a, 0x25c, 0x25e, 0x260, 0x262, 0x268, 0x26b, 0x26d, 0x26f, 0x271, 0x277, 0x27b, 0x27f, 0x287, 0x28e, 0x291, 0x294, 0x296, 0x299, 0x2a1, 0x2a5, 0x2ac, 0x2af, 0x2b5, 0x2b7, 0x2b9, 0x2bc, 0x2be, 0x2c1, 0x2c6, 0x2c8, 0x2ca, 0x2cc, 0x2ce, 0x2d0, 0x2d3, 0x2d5, 0x2d7, 0x2d9, 0x2db, 0x2dd, 0x2df, 0x2ec, 0x2f6, 0x2f8, 0x2fa, 0x2fe, 0x303, 0x30f, 0x314, 0x31d, 0x323, 0x328, 0x32c, 0x331, 0x335, 0x345, 0x353, 0x361, 0x36f, 0x371, 0x373, 0x375, 0x379, 0x37b, 0x37e, 0x389, 0x38b, 0x395} + +// nfkcSparseValues: 919 entries, 3676 bytes +var nfkcSparseValues = [919]valueRange{ + // Block 0x0, offset 0x0 + {value: 0x0002, lo: 0x0d}, + {value: 0x0001, lo: 0xa0, hi: 0xa0}, + {value: 0x43b9, lo: 0xa8, hi: 0xa8}, + {value: 0x0083, lo: 0xaa, hi: 0xaa}, + {value: 0x43a5, lo: 0xaf, hi: 0xaf}, + {value: 0x0025, lo: 0xb2, hi: 0xb3}, + {value: 0x439b, lo: 0xb4, hi: 0xb4}, + {value: 0x0260, lo: 0xb5, hi: 0xb5}, + {value: 0x43d2, lo: 0xb8, hi: 0xb8}, + {value: 0x0023, lo: 0xb9, hi: 0xb9}, + {value: 0x009f, lo: 0xba, hi: 0xba}, + {value: 0x234c, lo: 0xbc, hi: 0xbc}, + {value: 0x2340, lo: 0xbd, hi: 0xbd}, + {value: 0x23e2, lo: 0xbe, hi: 0xbe}, + // Block 0x1, offset 0xe + {value: 0x0091, lo: 0x03}, + {value: 0x4823, lo: 0xa0, hi: 0xa1}, + {value: 0x4855, lo: 0xaf, hi: 0xb0}, + {value: 0xa000, lo: 0xb7, hi: 0xb7}, + // Block 0x2, offset 0x12 + {value: 0x0004, lo: 0x09}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x0091, lo: 0xb0, hi: 0xb0}, + {value: 0x0140, lo: 0xb1, hi: 0xb1}, + {value: 0x0095, lo: 0xb2, hi: 0xb2}, + {value: 0x00a5, lo: 0xb3, hi: 0xb3}, + {value: 0x0179, lo: 0xb4, hi: 0xb4}, + {value: 0x017f, lo: 0xb5, hi: 0xb5}, + {value: 0x018b, lo: 0xb6, hi: 0xb6}, + {value: 0x00af, lo: 0xb7, hi: 0xb8}, + // Block 0x3, offset 0x1c + {value: 0x000a, lo: 0x09}, + {value: 0x43af, lo: 0x98, hi: 0x98}, + {value: 0x43b4, lo: 0x99, hi: 0x9a}, + {value: 0x43d7, lo: 0x9b, hi: 0x9b}, + {value: 0x43a0, lo: 0x9c, hi: 0x9c}, + {value: 0x43c3, lo: 0x9d, hi: 0x9d}, + {value: 0x0137, lo: 0xa0, hi: 0xa0}, + {value: 0x0099, lo: 0xa1, hi: 0xa1}, + {value: 0x00a7, lo: 0xa2, hi: 0xa3}, + {value: 0x01b8, lo: 0xa4, hi: 0xa4}, + // Block 0x4, offset 0x26 + {value: 0x0000, lo: 0x0f}, + {value: 0xa000, lo: 0x83, hi: 0x83}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0xa000, lo: 0x8b, hi: 0x8b}, + {value: 0xa000, lo: 0x8d, hi: 0x8d}, + {value: 0x38e6, lo: 0x90, hi: 0x90}, + {value: 0x38f2, lo: 0x91, hi: 0x91}, + {value: 0x38e0, lo: 0x93, hi: 0x93}, + {value: 0xa000, lo: 0x96, hi: 0x96}, + {value: 0x3958, lo: 0x97, hi: 0x97}, + {value: 0x3922, lo: 0x9c, hi: 0x9c}, + {value: 0x390a, lo: 0x9d, hi: 0x9d}, + {value: 0x3934, lo: 0x9e, hi: 0x9e}, + {value: 0xa000, lo: 0xb4, hi: 0xb5}, + {value: 0x395e, lo: 0xb6, hi: 0xb6}, + {value: 0x3964, lo: 0xb7, hi: 0xb7}, + // Block 0x5, offset 0x36 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0x83, hi: 0x87}, + // Block 0x6, offset 0x38 + {value: 0x0001, lo: 0x04}, + {value: 0x8114, lo: 0x81, hi: 0x82}, + {value: 0x8133, lo: 0x84, hi: 0x84}, + {value: 0x812e, lo: 0x85, hi: 0x85}, + {value: 0x810e, lo: 0x87, hi: 0x87}, + // Block 0x7, offset 0x3d + {value: 0x0000, lo: 0x0a}, + {value: 0x8133, lo: 0x90, hi: 0x97}, + {value: 0x811a, lo: 0x98, hi: 0x98}, + {value: 0x811b, lo: 0x99, hi: 0x99}, + {value: 0x811c, lo: 0x9a, hi: 0x9a}, + {value: 0x3982, lo: 0xa2, hi: 0xa2}, + {value: 0x3988, lo: 0xa3, hi: 0xa3}, + {value: 0x3994, lo: 0xa4, hi: 0xa4}, + {value: 0x398e, lo: 0xa5, hi: 0xa5}, + {value: 0x399a, lo: 0xa6, hi: 0xa6}, + {value: 0xa000, lo: 0xa7, hi: 0xa7}, + // Block 0x8, offset 0x48 + {value: 0x0000, lo: 0x0e}, + {value: 0x39ac, lo: 0x80, hi: 0x80}, + {value: 0xa000, lo: 0x81, hi: 0x81}, + {value: 0x39a0, lo: 0x82, hi: 0x82}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x39a6, lo: 0x93, hi: 0x93}, + {value: 0xa000, lo: 0x95, hi: 0x95}, + {value: 0x8133, lo: 0x96, hi: 0x9c}, + {value: 0x8133, lo: 0x9f, hi: 0xa2}, + {value: 0x812e, lo: 0xa3, hi: 0xa3}, + {value: 0x8133, lo: 0xa4, hi: 0xa4}, + {value: 0x8133, lo: 0xa7, hi: 0xa8}, + {value: 0x812e, lo: 0xaa, hi: 0xaa}, + {value: 0x8133, lo: 0xab, hi: 0xac}, + {value: 0x812e, lo: 0xad, hi: 0xad}, + // Block 0x9, offset 0x57 + {value: 0x0000, lo: 0x0c}, + {value: 0x8120, lo: 0x91, hi: 0x91}, + {value: 0x8133, lo: 0xb0, hi: 0xb0}, + {value: 0x812e, lo: 0xb1, hi: 0xb1}, + {value: 0x8133, lo: 0xb2, hi: 0xb3}, + {value: 0x812e, lo: 0xb4, hi: 0xb4}, + {value: 0x8133, lo: 0xb5, hi: 0xb6}, + {value: 0x812e, lo: 0xb7, hi: 0xb9}, + {value: 0x8133, lo: 0xba, hi: 0xba}, + {value: 0x812e, lo: 0xbb, hi: 0xbc}, + {value: 0x8133, lo: 0xbd, hi: 0xbd}, + {value: 0x812e, lo: 0xbe, hi: 0xbe}, + {value: 0x8133, lo: 0xbf, hi: 0xbf}, + // Block 0xa, offset 0x64 + {value: 0x0005, lo: 0x07}, + {value: 0x8133, lo: 0x80, hi: 0x80}, + {value: 0x8133, lo: 0x81, hi: 0x81}, + {value: 0x812e, lo: 0x82, hi: 0x83}, + {value: 0x812e, lo: 0x84, hi: 0x85}, + {value: 0x812e, lo: 0x86, hi: 0x87}, + {value: 0x812e, lo: 0x88, hi: 0x89}, + {value: 0x8133, lo: 0x8a, hi: 0x8a}, + // Block 0xb, offset 0x6c + {value: 0x0000, lo: 0x04}, + {value: 0x8133, lo: 0xab, hi: 0xb1}, + {value: 0x812e, lo: 0xb2, hi: 0xb2}, + {value: 0x8133, lo: 0xb3, hi: 0xb3}, + {value: 0x812e, lo: 0xbd, hi: 0xbd}, + // Block 0xc, offset 0x71 + {value: 0x0000, lo: 0x04}, + {value: 0x8133, lo: 0x96, hi: 0x99}, + {value: 0x8133, lo: 0x9b, hi: 0xa3}, + {value: 0x8133, lo: 0xa5, hi: 0xa7}, + {value: 0x8133, lo: 0xa9, hi: 0xad}, + // Block 0xd, offset 0x76 + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0x99, hi: 0x9b}, + // Block 0xe, offset 0x78 + {value: 0x0000, lo: 0x03}, + {value: 0x8133, lo: 0x98, hi: 0x98}, + {value: 0x812e, lo: 0x99, hi: 0x9b}, + {value: 0x8133, lo: 0x9c, hi: 0x9f}, + // Block 0xf, offset 0x7c + {value: 0x0000, lo: 0x07}, + {value: 0xa000, lo: 0xa8, hi: 0xa8}, + {value: 0x4019, lo: 0xa9, hi: 0xa9}, + {value: 0xa000, lo: 0xb0, hi: 0xb0}, + {value: 0x4021, lo: 0xb1, hi: 0xb1}, + {value: 0xa000, lo: 0xb3, hi: 0xb3}, + {value: 0x4029, lo: 0xb4, hi: 0xb4}, + {value: 0x9903, lo: 0xbc, hi: 0xbc}, + // Block 0x10, offset 0x84 + {value: 0x0008, lo: 0x06}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x8133, lo: 0x91, hi: 0x91}, + {value: 0x812e, lo: 0x92, hi: 0x92}, + {value: 0x8133, lo: 0x93, hi: 0x93}, + {value: 0x8133, lo: 0x94, hi: 0x94}, + {value: 0x465d, lo: 0x98, hi: 0x9f}, + // Block 0x11, offset 0x8b + {value: 0x0000, lo: 0x02}, + {value: 0x8103, lo: 0xbc, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x12, offset 0x8e + {value: 0x0008, lo: 0x07}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2dd5, lo: 0x8b, hi: 0x8c}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + {value: 0x469d, lo: 0x9c, hi: 0x9d}, + {value: 0x46ad, lo: 0x9f, hi: 0x9f}, + {value: 0x8133, lo: 0xbe, hi: 0xbe}, + // Block 0x13, offset 0x96 + {value: 0x0000, lo: 0x03}, + {value: 0x46d5, lo: 0xb3, hi: 0xb3}, + {value: 0x46dd, lo: 0xb6, hi: 0xb6}, + {value: 0x8103, lo: 0xbc, hi: 0xbc}, + // Block 0x14, offset 0x9a + {value: 0x0008, lo: 0x03}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x46b5, lo: 0x99, hi: 0x9b}, + {value: 0x46cd, lo: 0x9e, hi: 0x9e}, + // Block 0x15, offset 0x9e + {value: 0x0000, lo: 0x01}, + {value: 0x8103, lo: 0xbc, hi: 0xbc}, + // Block 0x16, offset 0xa0 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + // Block 0x17, offset 0xa2 + {value: 0x0000, lo: 0x08}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2ded, lo: 0x88, hi: 0x88}, + {value: 0x2de5, lo: 0x8b, hi: 0x8b}, + {value: 0x2df5, lo: 0x8c, hi: 0x8c}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x96, hi: 0x97}, + {value: 0x46e5, lo: 0x9c, hi: 0x9c}, + {value: 0x46ed, lo: 0x9d, hi: 0x9d}, + // Block 0x18, offset 0xab + {value: 0x0000, lo: 0x03}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x2dfd, lo: 0x94, hi: 0x94}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x19, offset 0xaf + {value: 0x0000, lo: 0x06}, + {value: 0xa000, lo: 0x86, hi: 0x87}, + {value: 0x2e05, lo: 0x8a, hi: 0x8a}, + {value: 0x2e15, lo: 0x8b, hi: 0x8b}, + {value: 0x2e0d, lo: 0x8c, hi: 0x8c}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + // Block 0x1a, offset 0xb6 + {value: 0x1801, lo: 0x04}, + {value: 0xa000, lo: 0x86, hi: 0x86}, + {value: 0x4031, lo: 0x88, hi: 0x88}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x8121, lo: 0x95, hi: 0x96}, + // Block 0x1b, offset 0xbb + {value: 0x0000, lo: 0x02}, + {value: 0x8103, lo: 0xbc, hi: 0xbc}, + {value: 0xa000, lo: 0xbf, hi: 0xbf}, + // Block 0x1c, offset 0xbe + {value: 0x0000, lo: 0x09}, + {value: 0x2e1d, lo: 0x80, hi: 0x80}, + {value: 0x9900, lo: 0x82, hi: 0x82}, + {value: 0xa000, lo: 0x86, hi: 0x86}, + {value: 0x2e25, lo: 0x87, hi: 0x87}, + {value: 0x2e2d, lo: 0x88, hi: 0x88}, + {value: 0x3091, lo: 0x8a, hi: 0x8a}, + {value: 0x2f19, lo: 0x8b, hi: 0x8b}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x95, hi: 0x96}, + // Block 0x1d, offset 0xc8 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0xbb, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x1e, offset 0xcb + {value: 0x0000, lo: 0x06}, + {value: 0xa000, lo: 0x86, hi: 0x87}, + {value: 0x2e35, lo: 0x8a, hi: 0x8a}, + {value: 0x2e45, lo: 0x8b, hi: 0x8b}, + {value: 0x2e3d, lo: 0x8c, hi: 0x8c}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + // Block 0x1f, offset 0xd2 + {value: 0x6ab3, lo: 0x07}, + {value: 0x9905, lo: 0x8a, hi: 0x8a}, + {value: 0x9900, lo: 0x8f, hi: 0x8f}, + {value: 0xa000, lo: 0x99, hi: 0x99}, + {value: 0x4039, lo: 0x9a, hi: 0x9a}, + {value: 0x3099, lo: 0x9c, hi: 0x9c}, + {value: 0x2f24, lo: 0x9d, hi: 0x9d}, + {value: 0x2e4d, lo: 0x9e, hi: 0x9f}, + // Block 0x20, offset 0xda + {value: 0x0000, lo: 0x03}, + {value: 0x2751, lo: 0xb3, hi: 0xb3}, + {value: 0x8123, lo: 0xb8, hi: 0xb9}, + {value: 0x8105, lo: 0xba, hi: 0xba}, + // Block 0x21, offset 0xde + {value: 0x0000, lo: 0x01}, + {value: 0x8124, lo: 0x88, hi: 0x8b}, + // Block 0x22, offset 0xe0 + {value: 0x0000, lo: 0x03}, + {value: 0x2766, lo: 0xb3, hi: 0xb3}, + {value: 0x8125, lo: 0xb8, hi: 0xb9}, + {value: 0x8105, lo: 0xba, hi: 0xba}, + // Block 0x23, offset 0xe4 + {value: 0x0000, lo: 0x03}, + {value: 0x8126, lo: 0x88, hi: 0x8b}, + {value: 0x2758, lo: 0x9c, hi: 0x9c}, + {value: 0x275f, lo: 0x9d, hi: 0x9d}, + // Block 0x24, offset 0xe8 + {value: 0x0000, lo: 0x05}, + {value: 0x03fe, lo: 0x8c, hi: 0x8c}, + {value: 0x812e, lo: 0x98, hi: 0x99}, + {value: 0x812e, lo: 0xb5, hi: 0xb5}, + {value: 0x812e, lo: 0xb7, hi: 0xb7}, + {value: 0x812c, lo: 0xb9, hi: 0xb9}, + // Block 0x25, offset 0xee + {value: 0x0000, lo: 0x10}, + {value: 0x2774, lo: 0x83, hi: 0x83}, + {value: 0x277b, lo: 0x8d, hi: 0x8d}, + {value: 0x2782, lo: 0x92, hi: 0x92}, + {value: 0x2789, lo: 0x97, hi: 0x97}, + {value: 0x2790, lo: 0x9c, hi: 0x9c}, + {value: 0x276d, lo: 0xa9, hi: 0xa9}, + {value: 0x8127, lo: 0xb1, hi: 0xb1}, + {value: 0x8128, lo: 0xb2, hi: 0xb2}, + {value: 0x4bc5, lo: 0xb3, hi: 0xb3}, + {value: 0x8129, lo: 0xb4, hi: 0xb4}, + {value: 0x4bce, lo: 0xb5, hi: 0xb5}, + {value: 0x46f5, lo: 0xb6, hi: 0xb6}, + {value: 0x4735, lo: 0xb7, hi: 0xb7}, + {value: 0x46fd, lo: 0xb8, hi: 0xb8}, + {value: 0x4740, lo: 0xb9, hi: 0xb9}, + {value: 0x8128, lo: 0xba, hi: 0xbd}, + // Block 0x26, offset 0xff + {value: 0x0000, lo: 0x0b}, + {value: 0x8128, lo: 0x80, hi: 0x80}, + {value: 0x4bd7, lo: 0x81, hi: 0x81}, + {value: 0x8133, lo: 0x82, hi: 0x83}, + {value: 0x8105, lo: 0x84, hi: 0x84}, + {value: 0x8133, lo: 0x86, hi: 0x87}, + {value: 0x279e, lo: 0x93, hi: 0x93}, + {value: 0x27a5, lo: 0x9d, hi: 0x9d}, + {value: 0x27ac, lo: 0xa2, hi: 0xa2}, + {value: 0x27b3, lo: 0xa7, hi: 0xa7}, + {value: 0x27ba, lo: 0xac, hi: 0xac}, + {value: 0x2797, lo: 0xb9, hi: 0xb9}, + // Block 0x27, offset 0x10b + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0x86, hi: 0x86}, + // Block 0x28, offset 0x10d + {value: 0x0000, lo: 0x05}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x2e55, lo: 0xa6, hi: 0xa6}, + {value: 0x9900, lo: 0xae, hi: 0xae}, + {value: 0x8103, lo: 0xb7, hi: 0xb7}, + {value: 0x8105, lo: 0xb9, hi: 0xba}, + // Block 0x29, offset 0x113 + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0x8d, hi: 0x8d}, + // Block 0x2a, offset 0x115 + {value: 0x0000, lo: 0x01}, + {value: 0x0402, lo: 0xbc, hi: 0xbc}, + // Block 0x2b, offset 0x117 + {value: 0x0000, lo: 0x01}, + {value: 0xa000, lo: 0x80, hi: 0x92}, + // Block 0x2c, offset 0x119 + {value: 0x0000, lo: 0x01}, + {value: 0xb900, lo: 0xa1, hi: 0xb5}, + // Block 0x2d, offset 0x11b + {value: 0x0000, lo: 0x01}, + {value: 0x9900, lo: 0xa8, hi: 0xbf}, + // Block 0x2e, offset 0x11d + {value: 0x0000, lo: 0x01}, + {value: 0x9900, lo: 0x80, hi: 0x82}, + // Block 0x2f, offset 0x11f + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0x9d, hi: 0x9f}, + // Block 0x30, offset 0x121 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x94, hi: 0x95}, + {value: 0x8105, lo: 0xb4, hi: 0xb4}, + // Block 0x31, offset 0x124 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x92, hi: 0x92}, + {value: 0x8133, lo: 0x9d, hi: 0x9d}, + // Block 0x32, offset 0x127 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xa9, hi: 0xa9}, + // Block 0x33, offset 0x129 + {value: 0x0004, lo: 0x02}, + {value: 0x812f, lo: 0xb9, hi: 0xba}, + {value: 0x812e, lo: 0xbb, hi: 0xbb}, + // Block 0x34, offset 0x12c + {value: 0x0000, lo: 0x02}, + {value: 0x8133, lo: 0x97, hi: 0x97}, + {value: 0x812e, lo: 0x98, hi: 0x98}, + // Block 0x35, offset 0x12f + {value: 0x0000, lo: 0x03}, + {value: 0x8105, lo: 0xa0, hi: 0xa0}, + {value: 0x8133, lo: 0xb5, hi: 0xbc}, + {value: 0x812e, lo: 0xbf, hi: 0xbf}, + // Block 0x36, offset 0x133 + {value: 0x0000, lo: 0x05}, + {value: 0x8133, lo: 0xb0, hi: 0xb4}, + {value: 0x812e, lo: 0xb5, hi: 0xba}, + {value: 0x8133, lo: 0xbb, hi: 0xbc}, + {value: 0x812e, lo: 0xbd, hi: 0xbd}, + {value: 0x812e, lo: 0xbf, hi: 0xbf}, + // Block 0x37, offset 0x139 + {value: 0x0000, lo: 0x06}, + {value: 0x812e, lo: 0x80, hi: 0x80}, + {value: 0x8133, lo: 0x81, hi: 0x82}, + {value: 0x812e, lo: 0x83, hi: 0x84}, + {value: 0x8133, lo: 0x85, hi: 0x89}, + {value: 0x812e, lo: 0x8a, hi: 0x8a}, + {value: 0x8133, lo: 0x8b, hi: 0x8e}, + // Block 0x38, offset 0x140 + {value: 0x0000, lo: 0x08}, + {value: 0x2e9d, lo: 0x80, hi: 0x80}, + {value: 0x2ea5, lo: 0x81, hi: 0x81}, + {value: 0xa000, lo: 0x82, hi: 0x82}, + {value: 0x2ead, lo: 0x83, hi: 0x83}, + {value: 0x8105, lo: 0x84, hi: 0x84}, + {value: 0x8133, lo: 0xab, hi: 0xab}, + {value: 0x812e, lo: 0xac, hi: 0xac}, + {value: 0x8133, lo: 0xad, hi: 0xb3}, + // Block 0x39, offset 0x149 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xaa, hi: 0xab}, + // Block 0x3a, offset 0x14b + {value: 0x0000, lo: 0x02}, + {value: 0x8103, lo: 0xa6, hi: 0xa6}, + {value: 0x8105, lo: 0xb2, hi: 0xb3}, + // Block 0x3b, offset 0x14e + {value: 0x0000, lo: 0x01}, + {value: 0x8103, lo: 0xb7, hi: 0xb7}, + // Block 0x3c, offset 0x150 + {value: 0x0000, lo: 0x0a}, + {value: 0x8133, lo: 0x90, hi: 0x92}, + {value: 0x8101, lo: 0x94, hi: 0x94}, + {value: 0x812e, lo: 0x95, hi: 0x99}, + {value: 0x8133, lo: 0x9a, hi: 0x9b}, + {value: 0x812e, lo: 0x9c, hi: 0x9f}, + {value: 0x8133, lo: 0xa0, hi: 0xa0}, + {value: 0x8101, lo: 0xa2, hi: 0xa8}, + {value: 0x812e, lo: 0xad, hi: 0xad}, + {value: 0x8133, lo: 0xb4, hi: 0xb4}, + {value: 0x8133, lo: 0xb8, hi: 0xb9}, + // Block 0x3d, offset 0x15b + {value: 0x0002, lo: 0x0a}, + {value: 0x0043, lo: 0xac, hi: 0xac}, + {value: 0x00d1, lo: 0xad, hi: 0xad}, + {value: 0x0045, lo: 0xae, hi: 0xae}, + {value: 0x0049, lo: 0xb0, hi: 0xb1}, + {value: 0x00ec, lo: 0xb2, hi: 0xb2}, + {value: 0x004f, lo: 0xb3, hi: 0xba}, + {value: 0x005f, lo: 0xbc, hi: 0xbc}, + {value: 0x00fe, lo: 0xbd, hi: 0xbd}, + {value: 0x0061, lo: 0xbe, hi: 0xbe}, + {value: 0x0065, lo: 0xbf, hi: 0xbf}, + // Block 0x3e, offset 0x166 + {value: 0x0000, lo: 0x0d}, + {value: 0x0001, lo: 0x80, hi: 0x8a}, + {value: 0x0532, lo: 0x91, hi: 0x91}, + {value: 0x43dc, lo: 0x97, hi: 0x97}, + {value: 0x001d, lo: 0xa4, hi: 0xa4}, + {value: 0x19a0, lo: 0xa5, hi: 0xa5}, + {value: 0x1c8c, lo: 0xa6, hi: 0xa6}, + {value: 0x0001, lo: 0xaf, hi: 0xaf}, + {value: 0x27c1, lo: 0xb3, hi: 0xb3}, + {value: 0x2935, lo: 0xb4, hi: 0xb4}, + {value: 0x27c8, lo: 0xb6, hi: 0xb6}, + {value: 0x293f, lo: 0xb7, hi: 0xb7}, + {value: 0x199a, lo: 0xbc, hi: 0xbc}, + {value: 0x43aa, lo: 0xbe, hi: 0xbe}, + // Block 0x3f, offset 0x174 + {value: 0x0002, lo: 0x0d}, + {value: 0x1a60, lo: 0x87, hi: 0x87}, + {value: 0x1a5d, lo: 0x88, hi: 0x88}, + {value: 0x199d, lo: 0x89, hi: 0x89}, + {value: 0x2ac5, lo: 0x97, hi: 0x97}, + {value: 0x0001, lo: 0x9f, hi: 0x9f}, + {value: 0x0021, lo: 0xb0, hi: 0xb0}, + {value: 0x0093, lo: 0xb1, hi: 0xb1}, + {value: 0x0029, lo: 0xb4, hi: 0xb9}, + {value: 0x0017, lo: 0xba, hi: 0xba}, + {value: 0x055e, lo: 0xbb, hi: 0xbb}, + {value: 0x003b, lo: 0xbc, hi: 0xbc}, + {value: 0x0011, lo: 0xbd, hi: 0xbe}, + {value: 0x009d, lo: 0xbf, hi: 0xbf}, + // Block 0x40, offset 0x182 + {value: 0x0002, lo: 0x0f}, + {value: 0x0021, lo: 0x80, hi: 0x89}, + {value: 0x0017, lo: 0x8a, hi: 0x8a}, + {value: 0x055e, lo: 0x8b, hi: 0x8b}, + {value: 0x003b, lo: 0x8c, hi: 0x8c}, + {value: 0x0011, lo: 0x8d, hi: 0x8e}, + {value: 0x0083, lo: 0x90, hi: 0x90}, + {value: 0x008b, lo: 0x91, hi: 0x91}, + {value: 0x009f, lo: 0x92, hi: 0x92}, + {value: 0x00b1, lo: 0x93, hi: 0x93}, + {value: 0x011f, lo: 0x94, hi: 0x94}, + {value: 0x0091, lo: 0x95, hi: 0x95}, + {value: 0x0097, lo: 0x96, hi: 0x99}, + {value: 0x00a1, lo: 0x9a, hi: 0x9a}, + {value: 0x00a7, lo: 0x9b, hi: 0x9c}, + {value: 0x1ac9, lo: 0xa8, hi: 0xa8}, + // Block 0x41, offset 0x192 + {value: 0x0000, lo: 0x0d}, + {value: 0x8133, lo: 0x90, hi: 0x91}, + {value: 0x8101, lo: 0x92, hi: 0x93}, + {value: 0x8133, lo: 0x94, hi: 0x97}, + {value: 0x8101, lo: 0x98, hi: 0x9a}, + {value: 0x8133, lo: 0x9b, hi: 0x9c}, + {value: 0x8133, lo: 0xa1, hi: 0xa1}, + {value: 0x8101, lo: 0xa5, hi: 0xa6}, + {value: 0x8133, lo: 0xa7, hi: 0xa7}, + {value: 0x812e, lo: 0xa8, hi: 0xa8}, + {value: 0x8133, lo: 0xa9, hi: 0xa9}, + {value: 0x8101, lo: 0xaa, hi: 0xab}, + {value: 0x812e, lo: 0xac, hi: 0xaf}, + {value: 0x8133, lo: 0xb0, hi: 0xb0}, + // Block 0x42, offset 0x1a0 + {value: 0x0007, lo: 0x06}, + {value: 0x22b0, lo: 0x89, hi: 0x89}, + {value: 0xa000, lo: 0x90, hi: 0x90}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0xa000, lo: 0x94, hi: 0x94}, + {value: 0x3cfa, lo: 0x9a, hi: 0x9b}, + {value: 0x3d08, lo: 0xae, hi: 0xae}, + // Block 0x43, offset 0x1a7 + {value: 0x000e, lo: 0x05}, + {value: 0x3d0f, lo: 0x8d, hi: 0x8e}, + {value: 0x3d16, lo: 0x8f, hi: 0x8f}, + {value: 0xa000, lo: 0x90, hi: 0x90}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0xa000, lo: 0x94, hi: 0x94}, + // Block 0x44, offset 0x1ad + {value: 0x017a, lo: 0x0e}, + {value: 0xa000, lo: 0x83, hi: 0x83}, + {value: 0x3d24, lo: 0x84, hi: 0x84}, + {value: 0xa000, lo: 0x88, hi: 0x88}, + {value: 0x3d2b, lo: 0x89, hi: 0x89}, + {value: 0xa000, lo: 0x8b, hi: 0x8b}, + {value: 0x3d32, lo: 0x8c, hi: 0x8c}, + {value: 0xa000, lo: 0xa3, hi: 0xa3}, + {value: 0x3d39, lo: 0xa4, hi: 0xa4}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x3d40, lo: 0xa6, hi: 0xa6}, + {value: 0x27cf, lo: 0xac, hi: 0xad}, + {value: 0x27d6, lo: 0xaf, hi: 0xaf}, + {value: 0x2953, lo: 0xb0, hi: 0xb0}, + {value: 0xa000, lo: 0xbc, hi: 0xbc}, + // Block 0x45, offset 0x1bc + {value: 0x0007, lo: 0x03}, + {value: 0x3da9, lo: 0xa0, hi: 0xa1}, + {value: 0x3dd3, lo: 0xa2, hi: 0xa3}, + {value: 0x3dfd, lo: 0xaa, hi: 0xad}, + // Block 0x46, offset 0x1c0 + {value: 0x0004, lo: 0x01}, + {value: 0x0586, lo: 0xa9, hi: 0xaa}, + // Block 0x47, offset 0x1c2 + {value: 0x0002, lo: 0x03}, + {value: 0x0057, lo: 0x80, hi: 0x8f}, + {value: 0x0083, lo: 0x90, hi: 0xa9}, + {value: 0x0021, lo: 0xaa, hi: 0xaa}, + // Block 0x48, offset 0x1c6 + {value: 0x0000, lo: 0x01}, + {value: 0x2ad2, lo: 0x8c, hi: 0x8c}, + // Block 0x49, offset 0x1c8 + {value: 0x0266, lo: 0x02}, + {value: 0x1cbc, lo: 0xb4, hi: 0xb4}, + {value: 0x1a5a, lo: 0xb5, hi: 0xb6}, + // Block 0x4a, offset 0x1cb + {value: 0x0000, lo: 0x01}, + {value: 0x461e, lo: 0x9c, hi: 0x9c}, + // Block 0x4b, offset 0x1cd + {value: 0x0000, lo: 0x02}, + {value: 0x0095, lo: 0xbc, hi: 0xbc}, + {value: 0x006d, lo: 0xbd, hi: 0xbd}, + // Block 0x4c, offset 0x1d0 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xaf, hi: 0xb1}, + // Block 0x4d, offset 0x1d2 + {value: 0x0000, lo: 0x02}, + {value: 0x057a, lo: 0xaf, hi: 0xaf}, + {value: 0x8105, lo: 0xbf, hi: 0xbf}, + // Block 0x4e, offset 0x1d5 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xa0, hi: 0xbf}, + // Block 0x4f, offset 0x1d7 + {value: 0x0000, lo: 0x01}, + {value: 0x0ebe, lo: 0x9f, hi: 0x9f}, + // Block 0x50, offset 0x1d9 + {value: 0x0000, lo: 0x01}, + {value: 0x172a, lo: 0xb3, hi: 0xb3}, + // Block 0x51, offset 0x1db + {value: 0x0004, lo: 0x0b}, + {value: 0x1692, lo: 0x80, hi: 0x82}, + {value: 0x16aa, lo: 0x83, hi: 0x83}, + {value: 0x16c2, lo: 0x84, hi: 0x85}, + {value: 0x16d2, lo: 0x86, hi: 0x89}, + {value: 0x16e6, lo: 0x8a, hi: 0x8c}, + {value: 0x16fa, lo: 0x8d, hi: 0x8d}, + {value: 0x1702, lo: 0x8e, hi: 0x8e}, + {value: 0x170a, lo: 0x8f, hi: 0x90}, + {value: 0x1716, lo: 0x91, hi: 0x93}, + {value: 0x1726, lo: 0x94, hi: 0x94}, + {value: 0x172e, lo: 0x95, hi: 0x95}, + // Block 0x52, offset 0x1e7 + {value: 0x0004, lo: 0x09}, + {value: 0x0001, lo: 0x80, hi: 0x80}, + {value: 0x812d, lo: 0xaa, hi: 0xaa}, + {value: 0x8132, lo: 0xab, hi: 0xab}, + {value: 0x8134, lo: 0xac, hi: 0xac}, + {value: 0x812f, lo: 0xad, hi: 0xad}, + {value: 0x8130, lo: 0xae, hi: 0xae}, + {value: 0x8130, lo: 0xaf, hi: 0xaf}, + {value: 0x05ae, lo: 0xb6, hi: 0xb6}, + {value: 0x0982, lo: 0xb8, hi: 0xba}, + // Block 0x53, offset 0x1f1 + {value: 0x0006, lo: 0x09}, + {value: 0x0406, lo: 0xb1, hi: 0xb1}, + {value: 0x040a, lo: 0xb2, hi: 0xb2}, + {value: 0x4b7c, lo: 0xb3, hi: 0xb3}, + {value: 0x040e, lo: 0xb4, hi: 0xb4}, + {value: 0x4b82, lo: 0xb5, hi: 0xb6}, + {value: 0x0412, lo: 0xb7, hi: 0xb7}, + {value: 0x0416, lo: 0xb8, hi: 0xb8}, + {value: 0x041a, lo: 0xb9, hi: 0xb9}, + {value: 0x4b8e, lo: 0xba, hi: 0xbf}, + // Block 0x54, offset 0x1fb + {value: 0x0000, lo: 0x02}, + {value: 0x8133, lo: 0xaf, hi: 0xaf}, + {value: 0x8133, lo: 0xb4, hi: 0xbd}, + // Block 0x55, offset 0x1fe + {value: 0x0000, lo: 0x03}, + {value: 0x02d8, lo: 0x9c, hi: 0x9c}, + {value: 0x02de, lo: 0x9d, hi: 0x9d}, + {value: 0x8133, lo: 0x9e, hi: 0x9f}, + // Block 0x56, offset 0x202 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xb0, hi: 0xb1}, + // Block 0x57, offset 0x204 + {value: 0x0000, lo: 0x01}, + {value: 0x173e, lo: 0xb0, hi: 0xb0}, + // Block 0x58, offset 0x206 + {value: 0x0006, lo: 0x04}, + {value: 0x0047, lo: 0xb2, hi: 0xb3}, + {value: 0x0063, lo: 0xb4, hi: 0xb4}, + {value: 0x00dd, lo: 0xb8, hi: 0xb8}, + {value: 0x00e9, lo: 0xb9, hi: 0xb9}, + // Block 0x59, offset 0x20b + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x86, hi: 0x86}, + {value: 0x8105, lo: 0xac, hi: 0xac}, + // Block 0x5a, offset 0x20e + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x84, hi: 0x84}, + {value: 0x8133, lo: 0xa0, hi: 0xb1}, + // Block 0x5b, offset 0x211 + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0xab, hi: 0xad}, + // Block 0x5c, offset 0x213 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x93, hi: 0x93}, + // Block 0x5d, offset 0x215 + {value: 0x0000, lo: 0x01}, + {value: 0x8103, lo: 0xb3, hi: 0xb3}, + // Block 0x5e, offset 0x217 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x80, hi: 0x80}, + // Block 0x5f, offset 0x219 + {value: 0x0000, lo: 0x05}, + {value: 0x8133, lo: 0xb0, hi: 0xb0}, + {value: 0x8133, lo: 0xb2, hi: 0xb3}, + {value: 0x812e, lo: 0xb4, hi: 0xb4}, + {value: 0x8133, lo: 0xb7, hi: 0xb8}, + {value: 0x8133, lo: 0xbe, hi: 0xbf}, + // Block 0x60, offset 0x21f + {value: 0x0000, lo: 0x02}, + {value: 0x8133, lo: 0x81, hi: 0x81}, + {value: 0x8105, lo: 0xb6, hi: 0xb6}, + // Block 0x61, offset 0x222 + {value: 0x000c, lo: 0x04}, + {value: 0x173a, lo: 0x9c, hi: 0x9d}, + {value: 0x014f, lo: 0x9e, hi: 0x9e}, + {value: 0x174a, lo: 0x9f, hi: 0x9f}, + {value: 0x01a6, lo: 0xa9, hi: 0xa9}, + // Block 0x62, offset 0x227 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xad, hi: 0xad}, + // Block 0x63, offset 0x229 + {value: 0x0000, lo: 0x06}, + {value: 0xe500, lo: 0x80, hi: 0x80}, + {value: 0xc600, lo: 0x81, hi: 0x9b}, + {value: 0xe500, lo: 0x9c, hi: 0x9c}, + {value: 0xc600, lo: 0x9d, hi: 0xb7}, + {value: 0xe500, lo: 0xb8, hi: 0xb8}, + {value: 0xc600, lo: 0xb9, hi: 0xbf}, + // Block 0x64, offset 0x230 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x93}, + {value: 0xe500, lo: 0x94, hi: 0x94}, + {value: 0xc600, lo: 0x95, hi: 0xaf}, + {value: 0xe500, lo: 0xb0, hi: 0xb0}, + {value: 0xc600, lo: 0xb1, hi: 0xbf}, + // Block 0x65, offset 0x236 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x8b}, + {value: 0xe500, lo: 0x8c, hi: 0x8c}, + {value: 0xc600, lo: 0x8d, hi: 0xa7}, + {value: 0xe500, lo: 0xa8, hi: 0xa8}, + {value: 0xc600, lo: 0xa9, hi: 0xbf}, + // Block 0x66, offset 0x23c + {value: 0x0000, lo: 0x07}, + {value: 0xc600, lo: 0x80, hi: 0x83}, + {value: 0xe500, lo: 0x84, hi: 0x84}, + {value: 0xc600, lo: 0x85, hi: 0x9f}, + {value: 0xe500, lo: 0xa0, hi: 0xa0}, + {value: 0xc600, lo: 0xa1, hi: 0xbb}, + {value: 0xe500, lo: 0xbc, hi: 0xbc}, + {value: 0xc600, lo: 0xbd, hi: 0xbf}, + // Block 0x67, offset 0x244 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x97}, + {value: 0xe500, lo: 0x98, hi: 0x98}, + {value: 0xc600, lo: 0x99, hi: 0xb3}, + {value: 0xe500, lo: 0xb4, hi: 0xb4}, + {value: 0xc600, lo: 0xb5, hi: 0xbf}, + // Block 0x68, offset 0x24a + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x8f}, + {value: 0xe500, lo: 0x90, hi: 0x90}, + {value: 0xc600, lo: 0x91, hi: 0xab}, + {value: 0xe500, lo: 0xac, hi: 0xac}, + {value: 0xc600, lo: 0xad, hi: 0xbf}, + // Block 0x69, offset 0x250 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x87}, + {value: 0xe500, lo: 0x88, hi: 0x88}, + {value: 0xc600, lo: 0x89, hi: 0xa3}, + {value: 0xe500, lo: 0xa4, hi: 0xa4}, + {value: 0xc600, lo: 0xa5, hi: 0xbf}, + // Block 0x6a, offset 0x256 + {value: 0x0000, lo: 0x03}, + {value: 0xc600, lo: 0x80, hi: 0x87}, + {value: 0xe500, lo: 0x88, hi: 0x88}, + {value: 0xc600, lo: 0x89, hi: 0xa3}, + // Block 0x6b, offset 0x25a + {value: 0x0002, lo: 0x01}, + {value: 0x0003, lo: 0x81, hi: 0xbf}, + // Block 0x6c, offset 0x25c + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0xbd, hi: 0xbd}, + // Block 0x6d, offset 0x25e + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0xa0, hi: 0xa0}, + // Block 0x6e, offset 0x260 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xb6, hi: 0xba}, + // Block 0x6f, offset 0x262 + {value: 0x002d, lo: 0x05}, + {value: 0x812e, lo: 0x8d, hi: 0x8d}, + {value: 0x8133, lo: 0x8f, hi: 0x8f}, + {value: 0x8133, lo: 0xb8, hi: 0xb8}, + {value: 0x8101, lo: 0xb9, hi: 0xba}, + {value: 0x8105, lo: 0xbf, hi: 0xbf}, + // Block 0x70, offset 0x268 + {value: 0x0000, lo: 0x02}, + {value: 0x8133, lo: 0xa5, hi: 0xa5}, + {value: 0x812e, lo: 0xa6, hi: 0xa6}, + // Block 0x71, offset 0x26b + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xa4, hi: 0xa7}, + // Block 0x72, offset 0x26d + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xab, hi: 0xac}, + // Block 0x73, offset 0x26f + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0xbd, hi: 0xbf}, + // Block 0x74, offset 0x271 + {value: 0x0000, lo: 0x05}, + {value: 0x812e, lo: 0x86, hi: 0x87}, + {value: 0x8133, lo: 0x88, hi: 0x8a}, + {value: 0x812e, lo: 0x8b, hi: 0x8b}, + {value: 0x8133, lo: 0x8c, hi: 0x8c}, + {value: 0x812e, lo: 0x8d, hi: 0x90}, + // Block 0x75, offset 0x277 + {value: 0x0005, lo: 0x03}, + {value: 0x8133, lo: 0x82, hi: 0x82}, + {value: 0x812e, lo: 0x83, hi: 0x84}, + {value: 0x812e, lo: 0x85, hi: 0x85}, + // Block 0x76, offset 0x27b + {value: 0x0000, lo: 0x03}, + {value: 0x8105, lo: 0x86, hi: 0x86}, + {value: 0x8105, lo: 0xb0, hi: 0xb0}, + {value: 0x8105, lo: 0xbf, hi: 0xbf}, + // Block 0x77, offset 0x27f + {value: 0x17fe, lo: 0x07}, + {value: 0xa000, lo: 0x99, hi: 0x99}, + {value: 0x4379, lo: 0x9a, hi: 0x9a}, + {value: 0xa000, lo: 0x9b, hi: 0x9b}, + {value: 0x4383, lo: 0x9c, hi: 0x9c}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x438d, lo: 0xab, hi: 0xab}, + {value: 0x8105, lo: 0xb9, hi: 0xba}, + // Block 0x78, offset 0x287 + {value: 0x0000, lo: 0x06}, + {value: 0x8133, lo: 0x80, hi: 0x82}, + {value: 0x9900, lo: 0xa7, hi: 0xa7}, + {value: 0x2eb5, lo: 0xae, hi: 0xae}, + {value: 0x2ebf, lo: 0xaf, hi: 0xaf}, + {value: 0xa000, lo: 0xb1, hi: 0xb2}, + {value: 0x8105, lo: 0xb3, hi: 0xb4}, + // Block 0x79, offset 0x28e + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x80, hi: 0x80}, + {value: 0x8103, lo: 0x8a, hi: 0x8a}, + // Block 0x7a, offset 0x291 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0xb5, hi: 0xb5}, + {value: 0x8103, lo: 0xb6, hi: 0xb6}, + // Block 0x7b, offset 0x294 + {value: 0x0002, lo: 0x01}, + {value: 0x8103, lo: 0xa9, hi: 0xaa}, + // Block 0x7c, offset 0x296 + {value: 0x0000, lo: 0x02}, + {value: 0x8103, lo: 0xbb, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x7d, offset 0x299 + {value: 0x0000, lo: 0x07}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2ec9, lo: 0x8b, hi: 0x8b}, + {value: 0x2ed3, lo: 0x8c, hi: 0x8c}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + {value: 0x8133, lo: 0xa6, hi: 0xac}, + {value: 0x8133, lo: 0xb0, hi: 0xb4}, + // Block 0x7e, offset 0x2a1 + {value: 0x0000, lo: 0x03}, + {value: 0x8105, lo: 0x82, hi: 0x82}, + {value: 0x8103, lo: 0x86, hi: 0x86}, + {value: 0x8133, lo: 0x9e, hi: 0x9e}, + // Block 0x7f, offset 0x2a5 + {value: 0x6a23, lo: 0x06}, + {value: 0x9900, lo: 0xb0, hi: 0xb0}, + {value: 0xa000, lo: 0xb9, hi: 0xb9}, + {value: 0x9900, lo: 0xba, hi: 0xba}, + {value: 0x2ee7, lo: 0xbb, hi: 0xbb}, + {value: 0x2edd, lo: 0xbc, hi: 0xbd}, + {value: 0x2ef1, lo: 0xbe, hi: 0xbe}, + // Block 0x80, offset 0x2ac + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x82, hi: 0x82}, + {value: 0x8103, lo: 0x83, hi: 0x83}, + // Block 0x81, offset 0x2af + {value: 0x0000, lo: 0x05}, + {value: 0x9900, lo: 0xaf, hi: 0xaf}, + {value: 0xa000, lo: 0xb8, hi: 0xb9}, + {value: 0x2efb, lo: 0xba, hi: 0xba}, + {value: 0x2f05, lo: 0xbb, hi: 0xbb}, + {value: 0x8105, lo: 0xbf, hi: 0xbf}, + // Block 0x82, offset 0x2b5 + {value: 0x0000, lo: 0x01}, + {value: 0x8103, lo: 0x80, hi: 0x80}, + // Block 0x83, offset 0x2b7 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xbf, hi: 0xbf}, + // Block 0x84, offset 0x2b9 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0xb6, hi: 0xb6}, + {value: 0x8103, lo: 0xb7, hi: 0xb7}, + // Block 0x85, offset 0x2bc + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xab, hi: 0xab}, + // Block 0x86, offset 0x2be + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0xb9, hi: 0xb9}, + {value: 0x8103, lo: 0xba, hi: 0xba}, + // Block 0x87, offset 0x2c1 + {value: 0x0000, lo: 0x04}, + {value: 0x9900, lo: 0xb0, hi: 0xb0}, + {value: 0xa000, lo: 0xb5, hi: 0xb5}, + {value: 0x2f0f, lo: 0xb8, hi: 0xb8}, + {value: 0x8105, lo: 0xbd, hi: 0xbe}, + // Block 0x88, offset 0x2c6 + {value: 0x0000, lo: 0x01}, + {value: 0x8103, lo: 0x83, hi: 0x83}, + // Block 0x89, offset 0x2c8 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xa0, hi: 0xa0}, + // Block 0x8a, offset 0x2ca + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xb4, hi: 0xb4}, + // Block 0x8b, offset 0x2cc + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x87, hi: 0x87}, + // Block 0x8c, offset 0x2ce + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x99, hi: 0x99}, + // Block 0x8d, offset 0x2d0 + {value: 0x0000, lo: 0x02}, + {value: 0x8103, lo: 0x82, hi: 0x82}, + {value: 0x8105, lo: 0x84, hi: 0x85}, + // Block 0x8e, offset 0x2d3 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x97, hi: 0x97}, + // Block 0x8f, offset 0x2d5 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x81, hi: 0x82}, + // Block 0x90, offset 0x2d7 + {value: 0x0000, lo: 0x01}, + {value: 0x8101, lo: 0xb0, hi: 0xb4}, + // Block 0x91, offset 0x2d9 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xb0, hi: 0xb6}, + // Block 0x92, offset 0x2db + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0xb0, hi: 0xb1}, + // Block 0x93, offset 0x2dd + {value: 0x0000, lo: 0x01}, + {value: 0x8101, lo: 0x9e, hi: 0x9e}, + // Block 0x94, offset 0x2df + {value: 0x0000, lo: 0x0c}, + {value: 0x470d, lo: 0x9e, hi: 0x9e}, + {value: 0x4717, lo: 0x9f, hi: 0x9f}, + {value: 0x474b, lo: 0xa0, hi: 0xa0}, + {value: 0x4759, lo: 0xa1, hi: 0xa1}, + {value: 0x4767, lo: 0xa2, hi: 0xa2}, + {value: 0x4775, lo: 0xa3, hi: 0xa3}, + {value: 0x4783, lo: 0xa4, hi: 0xa4}, + {value: 0x812c, lo: 0xa5, hi: 0xa6}, + {value: 0x8101, lo: 0xa7, hi: 0xa9}, + {value: 0x8131, lo: 0xad, hi: 0xad}, + {value: 0x812c, lo: 0xae, hi: 0xb2}, + {value: 0x812e, lo: 0xbb, hi: 0xbf}, + // Block 0x95, offset 0x2ec + {value: 0x0000, lo: 0x09}, + {value: 0x812e, lo: 0x80, hi: 0x82}, + {value: 0x8133, lo: 0x85, hi: 0x89}, + {value: 0x812e, lo: 0x8a, hi: 0x8b}, + {value: 0x8133, lo: 0xaa, hi: 0xad}, + {value: 0x4721, lo: 0xbb, hi: 0xbb}, + {value: 0x472b, lo: 0xbc, hi: 0xbc}, + {value: 0x4791, lo: 0xbd, hi: 0xbd}, + {value: 0x47ad, lo: 0xbe, hi: 0xbe}, + {value: 0x479f, lo: 0xbf, hi: 0xbf}, + // Block 0x96, offset 0x2f6 + {value: 0x0000, lo: 0x01}, + {value: 0x47bb, lo: 0x80, hi: 0x80}, + // Block 0x97, offset 0x2f8 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0x82, hi: 0x84}, + // Block 0x98, offset 0x2fa + {value: 0x0002, lo: 0x03}, + {value: 0x0043, lo: 0x80, hi: 0x99}, + {value: 0x0083, lo: 0x9a, hi: 0xb3}, + {value: 0x0043, lo: 0xb4, hi: 0xbf}, + // Block 0x99, offset 0x2fe + {value: 0x0002, lo: 0x04}, + {value: 0x005b, lo: 0x80, hi: 0x8d}, + {value: 0x0083, lo: 0x8e, hi: 0x94}, + {value: 0x0093, lo: 0x96, hi: 0xa7}, + {value: 0x0043, lo: 0xa8, hi: 0xbf}, + // Block 0x9a, offset 0x303 + {value: 0x0002, lo: 0x0b}, + {value: 0x0073, lo: 0x80, hi: 0x81}, + {value: 0x0083, lo: 0x82, hi: 0x9b}, + {value: 0x0043, lo: 0x9c, hi: 0x9c}, + {value: 0x0047, lo: 0x9e, hi: 0x9f}, + {value: 0x004f, lo: 0xa2, hi: 0xa2}, + {value: 0x0055, lo: 0xa5, hi: 0xa6}, + {value: 0x005d, lo: 0xa9, hi: 0xac}, + {value: 0x0067, lo: 0xae, hi: 0xb5}, + {value: 0x0083, lo: 0xb6, hi: 0xb9}, + {value: 0x008d, lo: 0xbb, hi: 0xbb}, + {value: 0x0091, lo: 0xbd, hi: 0xbf}, + // Block 0x9b, offset 0x30f + {value: 0x0002, lo: 0x04}, + {value: 0x0097, lo: 0x80, hi: 0x83}, + {value: 0x00a1, lo: 0x85, hi: 0x8f}, + {value: 0x0043, lo: 0x90, hi: 0xa9}, + {value: 0x0083, lo: 0xaa, hi: 0xbf}, + // Block 0x9c, offset 0x314 + {value: 0x0002, lo: 0x08}, + {value: 0x00af, lo: 0x80, hi: 0x83}, + {value: 0x0043, lo: 0x84, hi: 0x85}, + {value: 0x0049, lo: 0x87, hi: 0x8a}, + {value: 0x0055, lo: 0x8d, hi: 0x94}, + {value: 0x0067, lo: 0x96, hi: 0x9c}, + {value: 0x0083, lo: 0x9e, hi: 0xb7}, + {value: 0x0043, lo: 0xb8, hi: 0xb9}, + {value: 0x0049, lo: 0xbb, hi: 0xbe}, + // Block 0x9d, offset 0x31d + {value: 0x0002, lo: 0x05}, + {value: 0x0053, lo: 0x80, hi: 0x84}, + {value: 0x005f, lo: 0x86, hi: 0x86}, + {value: 0x0067, lo: 0x8a, hi: 0x90}, + {value: 0x0083, lo: 0x92, hi: 0xab}, + {value: 0x0043, lo: 0xac, hi: 0xbf}, + // Block 0x9e, offset 0x323 + {value: 0x0002, lo: 0x04}, + {value: 0x006b, lo: 0x80, hi: 0x85}, + {value: 0x0083, lo: 0x86, hi: 0x9f}, + {value: 0x0043, lo: 0xa0, hi: 0xb9}, + {value: 0x0083, lo: 0xba, hi: 0xbf}, + // Block 0x9f, offset 0x328 + {value: 0x0002, lo: 0x03}, + {value: 0x008f, lo: 0x80, hi: 0x93}, + {value: 0x0043, lo: 0x94, hi: 0xad}, + {value: 0x0083, lo: 0xae, hi: 0xbf}, + // Block 0xa0, offset 0x32c + {value: 0x0002, lo: 0x04}, + {value: 0x00a7, lo: 0x80, hi: 0x87}, + {value: 0x0043, lo: 0x88, hi: 0xa1}, + {value: 0x0083, lo: 0xa2, hi: 0xbb}, + {value: 0x0043, lo: 0xbc, hi: 0xbf}, + // Block 0xa1, offset 0x331 + {value: 0x0002, lo: 0x03}, + {value: 0x004b, lo: 0x80, hi: 0x95}, + {value: 0x0083, lo: 0x96, hi: 0xaf}, + {value: 0x0043, lo: 0xb0, hi: 0xbf}, + // Block 0xa2, offset 0x335 + {value: 0x0003, lo: 0x0f}, + {value: 0x023c, lo: 0x80, hi: 0x80}, + {value: 0x0556, lo: 0x81, hi: 0x81}, + {value: 0x023f, lo: 0x82, hi: 0x9a}, + {value: 0x0552, lo: 0x9b, hi: 0x9b}, + {value: 0x024b, lo: 0x9c, hi: 0x9c}, + {value: 0x0254, lo: 0x9d, hi: 0x9d}, + {value: 0x025a, lo: 0x9e, hi: 0x9e}, + {value: 0x027e, lo: 0x9f, hi: 0x9f}, + {value: 0x026f, lo: 0xa0, hi: 0xa0}, + {value: 0x026c, lo: 0xa1, hi: 0xa1}, + {value: 0x01f7, lo: 0xa2, hi: 0xb2}, + {value: 0x020c, lo: 0xb3, hi: 0xb3}, + {value: 0x022a, lo: 0xb4, hi: 0xba}, + {value: 0x0556, lo: 0xbb, hi: 0xbb}, + {value: 0x023f, lo: 0xbc, hi: 0xbf}, + // Block 0xa3, offset 0x345 + {value: 0x0003, lo: 0x0d}, + {value: 0x024b, lo: 0x80, hi: 0x94}, + {value: 0x0552, lo: 0x95, hi: 0x95}, + {value: 0x024b, lo: 0x96, hi: 0x96}, + {value: 0x0254, lo: 0x97, hi: 0x97}, + {value: 0x025a, lo: 0x98, hi: 0x98}, + {value: 0x027e, lo: 0x99, hi: 0x99}, + {value: 0x026f, lo: 0x9a, hi: 0x9a}, + {value: 0x026c, lo: 0x9b, hi: 0x9b}, + {value: 0x01f7, lo: 0x9c, hi: 0xac}, + {value: 0x020c, lo: 0xad, hi: 0xad}, + {value: 0x022a, lo: 0xae, hi: 0xb4}, + {value: 0x0556, lo: 0xb5, hi: 0xb5}, + {value: 0x023f, lo: 0xb6, hi: 0xbf}, + // Block 0xa4, offset 0x353 + {value: 0x0003, lo: 0x0d}, + {value: 0x025d, lo: 0x80, hi: 0x8e}, + {value: 0x0552, lo: 0x8f, hi: 0x8f}, + {value: 0x024b, lo: 0x90, hi: 0x90}, + {value: 0x0254, lo: 0x91, hi: 0x91}, + {value: 0x025a, lo: 0x92, hi: 0x92}, + {value: 0x027e, lo: 0x93, hi: 0x93}, + {value: 0x026f, lo: 0x94, hi: 0x94}, + {value: 0x026c, lo: 0x95, hi: 0x95}, + {value: 0x01f7, lo: 0x96, hi: 0xa6}, + {value: 0x020c, lo: 0xa7, hi: 0xa7}, + {value: 0x022a, lo: 0xa8, hi: 0xae}, + {value: 0x0556, lo: 0xaf, hi: 0xaf}, + {value: 0x023f, lo: 0xb0, hi: 0xbf}, + // Block 0xa5, offset 0x361 + {value: 0x0003, lo: 0x0d}, + {value: 0x026f, lo: 0x80, hi: 0x88}, + {value: 0x0552, lo: 0x89, hi: 0x89}, + {value: 0x024b, lo: 0x8a, hi: 0x8a}, + {value: 0x0254, lo: 0x8b, hi: 0x8b}, + {value: 0x025a, lo: 0x8c, hi: 0x8c}, + {value: 0x027e, lo: 0x8d, hi: 0x8d}, + {value: 0x026f, lo: 0x8e, hi: 0x8e}, + {value: 0x026c, lo: 0x8f, hi: 0x8f}, + {value: 0x01f7, lo: 0x90, hi: 0xa0}, + {value: 0x020c, lo: 0xa1, hi: 0xa1}, + {value: 0x022a, lo: 0xa2, hi: 0xa8}, + {value: 0x0556, lo: 0xa9, hi: 0xa9}, + {value: 0x023f, lo: 0xaa, hi: 0xbf}, + // Block 0xa6, offset 0x36f + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0x8f, hi: 0x8f}, + // Block 0xa7, offset 0x371 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xae, hi: 0xae}, + // Block 0xa8, offset 0x373 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xac, hi: 0xaf}, + // Block 0xa9, offset 0x375 + {value: 0x0000, lo: 0x03}, + {value: 0x8134, lo: 0xac, hi: 0xad}, + {value: 0x812e, lo: 0xae, hi: 0xae}, + {value: 0x8133, lo: 0xaf, hi: 0xaf}, + // Block 0xaa, offset 0x379 + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0x90, hi: 0x96}, + // Block 0xab, offset 0x37b + {value: 0x0000, lo: 0x02}, + {value: 0x8133, lo: 0x84, hi: 0x89}, + {value: 0x8103, lo: 0x8a, hi: 0x8a}, + // Block 0xac, offset 0x37e + {value: 0x0002, lo: 0x0a}, + {value: 0x0063, lo: 0x80, hi: 0x89}, + {value: 0x1a7e, lo: 0x8a, hi: 0x8a}, + {value: 0x1ab1, lo: 0x8b, hi: 0x8b}, + {value: 0x1acc, lo: 0x8c, hi: 0x8c}, + {value: 0x1ad2, lo: 0x8d, hi: 0x8d}, + {value: 0x1cf0, lo: 0x8e, hi: 0x8e}, + {value: 0x1ade, lo: 0x8f, hi: 0x8f}, + {value: 0x1aa8, lo: 0xaa, hi: 0xaa}, + {value: 0x1aab, lo: 0xab, hi: 0xab}, + {value: 0x1aae, lo: 0xac, hi: 0xac}, + // Block 0xad, offset 0x389 + {value: 0x0000, lo: 0x01}, + {value: 0x1a6c, lo: 0x90, hi: 0x90}, + // Block 0xae, offset 0x38b + {value: 0x0028, lo: 0x09}, + {value: 0x2999, lo: 0x80, hi: 0x80}, + {value: 0x295d, lo: 0x81, hi: 0x81}, + {value: 0x2967, lo: 0x82, hi: 0x82}, + {value: 0x297b, lo: 0x83, hi: 0x84}, + {value: 0x2985, lo: 0x85, hi: 0x86}, + {value: 0x2971, lo: 0x87, hi: 0x87}, + {value: 0x298f, lo: 0x88, hi: 0x88}, + {value: 0x0c6a, lo: 0x90, hi: 0x90}, + {value: 0x09e2, lo: 0x91, hi: 0x91}, + // Block 0xaf, offset 0x395 + {value: 0x0002, lo: 0x01}, + {value: 0x0021, lo: 0xb0, hi: 0xb9}, +} + +// recompMap: 7528 bytes (entries only) +var recompMap map[uint32]rune +var recompMapOnce sync.Once + +const recompMapPacked = "" + + "\x00A\x03\x00\x00\x00\x00\xc0" + // 0x00410300: 0x000000C0 + "\x00A\x03\x01\x00\x00\x00\xc1" + // 0x00410301: 0x000000C1 + "\x00A\x03\x02\x00\x00\x00\xc2" + // 0x00410302: 0x000000C2 + "\x00A\x03\x03\x00\x00\x00\xc3" + // 0x00410303: 0x000000C3 + "\x00A\x03\b\x00\x00\x00\xc4" + // 0x00410308: 0x000000C4 + "\x00A\x03\n\x00\x00\x00\xc5" + // 0x0041030A: 0x000000C5 + "\x00C\x03'\x00\x00\x00\xc7" + // 0x00430327: 0x000000C7 + "\x00E\x03\x00\x00\x00\x00\xc8" + // 0x00450300: 0x000000C8 + "\x00E\x03\x01\x00\x00\x00\xc9" + // 0x00450301: 0x000000C9 + "\x00E\x03\x02\x00\x00\x00\xca" + // 0x00450302: 0x000000CA + "\x00E\x03\b\x00\x00\x00\xcb" + // 0x00450308: 0x000000CB + "\x00I\x03\x00\x00\x00\x00\xcc" + // 0x00490300: 0x000000CC + "\x00I\x03\x01\x00\x00\x00\xcd" + // 0x00490301: 0x000000CD + "\x00I\x03\x02\x00\x00\x00\xce" + // 0x00490302: 0x000000CE + "\x00I\x03\b\x00\x00\x00\xcf" + // 0x00490308: 0x000000CF + "\x00N\x03\x03\x00\x00\x00\xd1" + // 0x004E0303: 0x000000D1 + "\x00O\x03\x00\x00\x00\x00\xd2" + // 0x004F0300: 0x000000D2 + "\x00O\x03\x01\x00\x00\x00\xd3" + // 0x004F0301: 0x000000D3 + "\x00O\x03\x02\x00\x00\x00\xd4" + // 0x004F0302: 0x000000D4 + "\x00O\x03\x03\x00\x00\x00\xd5" + // 0x004F0303: 0x000000D5 + "\x00O\x03\b\x00\x00\x00\xd6" + // 0x004F0308: 0x000000D6 + "\x00U\x03\x00\x00\x00\x00\xd9" + // 0x00550300: 0x000000D9 + "\x00U\x03\x01\x00\x00\x00\xda" + // 0x00550301: 0x000000DA + "\x00U\x03\x02\x00\x00\x00\xdb" + // 0x00550302: 0x000000DB + "\x00U\x03\b\x00\x00\x00\xdc" + // 0x00550308: 0x000000DC + "\x00Y\x03\x01\x00\x00\x00\xdd" + // 0x00590301: 0x000000DD + "\x00a\x03\x00\x00\x00\x00\xe0" + // 0x00610300: 0x000000E0 + "\x00a\x03\x01\x00\x00\x00\xe1" + // 0x00610301: 0x000000E1 + "\x00a\x03\x02\x00\x00\x00\xe2" + // 0x00610302: 0x000000E2 + "\x00a\x03\x03\x00\x00\x00\xe3" + // 0x00610303: 0x000000E3 + "\x00a\x03\b\x00\x00\x00\xe4" + // 0x00610308: 0x000000E4 + "\x00a\x03\n\x00\x00\x00\xe5" + // 0x0061030A: 0x000000E5 + "\x00c\x03'\x00\x00\x00\xe7" + // 0x00630327: 0x000000E7 + "\x00e\x03\x00\x00\x00\x00\xe8" + // 0x00650300: 0x000000E8 + "\x00e\x03\x01\x00\x00\x00\xe9" + // 0x00650301: 0x000000E9 + "\x00e\x03\x02\x00\x00\x00\xea" + // 0x00650302: 0x000000EA + "\x00e\x03\b\x00\x00\x00\xeb" + // 0x00650308: 0x000000EB + "\x00i\x03\x00\x00\x00\x00\xec" + // 0x00690300: 0x000000EC + "\x00i\x03\x01\x00\x00\x00\xed" + // 0x00690301: 0x000000ED + "\x00i\x03\x02\x00\x00\x00\xee" + // 0x00690302: 0x000000EE + "\x00i\x03\b\x00\x00\x00\xef" + // 0x00690308: 0x000000EF + "\x00n\x03\x03\x00\x00\x00\xf1" + // 0x006E0303: 0x000000F1 + "\x00o\x03\x00\x00\x00\x00\xf2" + // 0x006F0300: 0x000000F2 + "\x00o\x03\x01\x00\x00\x00\xf3" + // 0x006F0301: 0x000000F3 + "\x00o\x03\x02\x00\x00\x00\xf4" + // 0x006F0302: 0x000000F4 + "\x00o\x03\x03\x00\x00\x00\xf5" + // 0x006F0303: 0x000000F5 + "\x00o\x03\b\x00\x00\x00\xf6" + // 0x006F0308: 0x000000F6 + "\x00u\x03\x00\x00\x00\x00\xf9" + // 0x00750300: 0x000000F9 + "\x00u\x03\x01\x00\x00\x00\xfa" + // 0x00750301: 0x000000FA + "\x00u\x03\x02\x00\x00\x00\xfb" + // 0x00750302: 0x000000FB + "\x00u\x03\b\x00\x00\x00\xfc" + // 0x00750308: 0x000000FC + "\x00y\x03\x01\x00\x00\x00\xfd" + // 0x00790301: 0x000000FD + "\x00y\x03\b\x00\x00\x00\xff" + // 0x00790308: 0x000000FF + "\x00A\x03\x04\x00\x00\x01\x00" + // 0x00410304: 0x00000100 + "\x00a\x03\x04\x00\x00\x01\x01" + // 0x00610304: 0x00000101 + "\x00A\x03\x06\x00\x00\x01\x02" + // 0x00410306: 0x00000102 + "\x00a\x03\x06\x00\x00\x01\x03" + // 0x00610306: 0x00000103 + "\x00A\x03(\x00\x00\x01\x04" + // 0x00410328: 0x00000104 + "\x00a\x03(\x00\x00\x01\x05" + // 0x00610328: 0x00000105 + "\x00C\x03\x01\x00\x00\x01\x06" + // 0x00430301: 0x00000106 + "\x00c\x03\x01\x00\x00\x01\a" + // 0x00630301: 0x00000107 + "\x00C\x03\x02\x00\x00\x01\b" + // 0x00430302: 0x00000108 + "\x00c\x03\x02\x00\x00\x01\t" + // 0x00630302: 0x00000109 + "\x00C\x03\a\x00\x00\x01\n" + // 0x00430307: 0x0000010A + "\x00c\x03\a\x00\x00\x01\v" + // 0x00630307: 0x0000010B + "\x00C\x03\f\x00\x00\x01\f" + // 0x0043030C: 0x0000010C + "\x00c\x03\f\x00\x00\x01\r" + // 0x0063030C: 0x0000010D + "\x00D\x03\f\x00\x00\x01\x0e" + // 0x0044030C: 0x0000010E + "\x00d\x03\f\x00\x00\x01\x0f" + // 0x0064030C: 0x0000010F + "\x00E\x03\x04\x00\x00\x01\x12" + // 0x00450304: 0x00000112 + "\x00e\x03\x04\x00\x00\x01\x13" + // 0x00650304: 0x00000113 + "\x00E\x03\x06\x00\x00\x01\x14" + // 0x00450306: 0x00000114 + "\x00e\x03\x06\x00\x00\x01\x15" + // 0x00650306: 0x00000115 + "\x00E\x03\a\x00\x00\x01\x16" + // 0x00450307: 0x00000116 + "\x00e\x03\a\x00\x00\x01\x17" + // 0x00650307: 0x00000117 + "\x00E\x03(\x00\x00\x01\x18" + // 0x00450328: 0x00000118 + "\x00e\x03(\x00\x00\x01\x19" + // 0x00650328: 0x00000119 + "\x00E\x03\f\x00\x00\x01\x1a" + // 0x0045030C: 0x0000011A + "\x00e\x03\f\x00\x00\x01\x1b" + // 0x0065030C: 0x0000011B + "\x00G\x03\x02\x00\x00\x01\x1c" + // 0x00470302: 0x0000011C + "\x00g\x03\x02\x00\x00\x01\x1d" + // 0x00670302: 0x0000011D + "\x00G\x03\x06\x00\x00\x01\x1e" + // 0x00470306: 0x0000011E + "\x00g\x03\x06\x00\x00\x01\x1f" + // 0x00670306: 0x0000011F + "\x00G\x03\a\x00\x00\x01 " + // 0x00470307: 0x00000120 + "\x00g\x03\a\x00\x00\x01!" + // 0x00670307: 0x00000121 + "\x00G\x03'\x00\x00\x01\"" + // 0x00470327: 0x00000122 + "\x00g\x03'\x00\x00\x01#" + // 0x00670327: 0x00000123 + "\x00H\x03\x02\x00\x00\x01$" + // 0x00480302: 0x00000124 + "\x00h\x03\x02\x00\x00\x01%" + // 0x00680302: 0x00000125 + "\x00I\x03\x03\x00\x00\x01(" + // 0x00490303: 0x00000128 + "\x00i\x03\x03\x00\x00\x01)" + // 0x00690303: 0x00000129 + "\x00I\x03\x04\x00\x00\x01*" + // 0x00490304: 0x0000012A + "\x00i\x03\x04\x00\x00\x01+" + // 0x00690304: 0x0000012B + "\x00I\x03\x06\x00\x00\x01," + // 0x00490306: 0x0000012C + "\x00i\x03\x06\x00\x00\x01-" + // 0x00690306: 0x0000012D + "\x00I\x03(\x00\x00\x01." + // 0x00490328: 0x0000012E + "\x00i\x03(\x00\x00\x01/" + // 0x00690328: 0x0000012F + "\x00I\x03\a\x00\x00\x010" + // 0x00490307: 0x00000130 + "\x00J\x03\x02\x00\x00\x014" + // 0x004A0302: 0x00000134 + "\x00j\x03\x02\x00\x00\x015" + // 0x006A0302: 0x00000135 + "\x00K\x03'\x00\x00\x016" + // 0x004B0327: 0x00000136 + "\x00k\x03'\x00\x00\x017" + // 0x006B0327: 0x00000137 + "\x00L\x03\x01\x00\x00\x019" + // 0x004C0301: 0x00000139 + "\x00l\x03\x01\x00\x00\x01:" + // 0x006C0301: 0x0000013A + "\x00L\x03'\x00\x00\x01;" + // 0x004C0327: 0x0000013B + "\x00l\x03'\x00\x00\x01<" + // 0x006C0327: 0x0000013C + "\x00L\x03\f\x00\x00\x01=" + // 0x004C030C: 0x0000013D + "\x00l\x03\f\x00\x00\x01>" + // 0x006C030C: 0x0000013E + "\x00N\x03\x01\x00\x00\x01C" + // 0x004E0301: 0x00000143 + "\x00n\x03\x01\x00\x00\x01D" + // 0x006E0301: 0x00000144 + "\x00N\x03'\x00\x00\x01E" + // 0x004E0327: 0x00000145 + "\x00n\x03'\x00\x00\x01F" + // 0x006E0327: 0x00000146 + "\x00N\x03\f\x00\x00\x01G" + // 0x004E030C: 0x00000147 + "\x00n\x03\f\x00\x00\x01H" + // 0x006E030C: 0x00000148 + "\x00O\x03\x04\x00\x00\x01L" + // 0x004F0304: 0x0000014C + "\x00o\x03\x04\x00\x00\x01M" + // 0x006F0304: 0x0000014D + "\x00O\x03\x06\x00\x00\x01N" + // 0x004F0306: 0x0000014E + "\x00o\x03\x06\x00\x00\x01O" + // 0x006F0306: 0x0000014F + "\x00O\x03\v\x00\x00\x01P" + // 0x004F030B: 0x00000150 + "\x00o\x03\v\x00\x00\x01Q" + // 0x006F030B: 0x00000151 + "\x00R\x03\x01\x00\x00\x01T" + // 0x00520301: 0x00000154 + "\x00r\x03\x01\x00\x00\x01U" + // 0x00720301: 0x00000155 + "\x00R\x03'\x00\x00\x01V" + // 0x00520327: 0x00000156 + "\x00r\x03'\x00\x00\x01W" + // 0x00720327: 0x00000157 + "\x00R\x03\f\x00\x00\x01X" + // 0x0052030C: 0x00000158 + "\x00r\x03\f\x00\x00\x01Y" + // 0x0072030C: 0x00000159 + "\x00S\x03\x01\x00\x00\x01Z" + // 0x00530301: 0x0000015A + "\x00s\x03\x01\x00\x00\x01[" + // 0x00730301: 0x0000015B + "\x00S\x03\x02\x00\x00\x01\\" + // 0x00530302: 0x0000015C + "\x00s\x03\x02\x00\x00\x01]" + // 0x00730302: 0x0000015D + "\x00S\x03'\x00\x00\x01^" + // 0x00530327: 0x0000015E + "\x00s\x03'\x00\x00\x01_" + // 0x00730327: 0x0000015F + "\x00S\x03\f\x00\x00\x01`" + // 0x0053030C: 0x00000160 + "\x00s\x03\f\x00\x00\x01a" + // 0x0073030C: 0x00000161 + "\x00T\x03'\x00\x00\x01b" + // 0x00540327: 0x00000162 + "\x00t\x03'\x00\x00\x01c" + // 0x00740327: 0x00000163 + "\x00T\x03\f\x00\x00\x01d" + // 0x0054030C: 0x00000164 + "\x00t\x03\f\x00\x00\x01e" + // 0x0074030C: 0x00000165 + "\x00U\x03\x03\x00\x00\x01h" + // 0x00550303: 0x00000168 + "\x00u\x03\x03\x00\x00\x01i" + // 0x00750303: 0x00000169 + "\x00U\x03\x04\x00\x00\x01j" + // 0x00550304: 0x0000016A + "\x00u\x03\x04\x00\x00\x01k" + // 0x00750304: 0x0000016B + "\x00U\x03\x06\x00\x00\x01l" + // 0x00550306: 0x0000016C + "\x00u\x03\x06\x00\x00\x01m" + // 0x00750306: 0x0000016D + "\x00U\x03\n\x00\x00\x01n" + // 0x0055030A: 0x0000016E + "\x00u\x03\n\x00\x00\x01o" + // 0x0075030A: 0x0000016F + "\x00U\x03\v\x00\x00\x01p" + // 0x0055030B: 0x00000170 + "\x00u\x03\v\x00\x00\x01q" + // 0x0075030B: 0x00000171 + "\x00U\x03(\x00\x00\x01r" + // 0x00550328: 0x00000172 + "\x00u\x03(\x00\x00\x01s" + // 0x00750328: 0x00000173 + "\x00W\x03\x02\x00\x00\x01t" + // 0x00570302: 0x00000174 + "\x00w\x03\x02\x00\x00\x01u" + // 0x00770302: 0x00000175 + "\x00Y\x03\x02\x00\x00\x01v" + // 0x00590302: 0x00000176 + "\x00y\x03\x02\x00\x00\x01w" + // 0x00790302: 0x00000177 + "\x00Y\x03\b\x00\x00\x01x" + // 0x00590308: 0x00000178 + "\x00Z\x03\x01\x00\x00\x01y" + // 0x005A0301: 0x00000179 + "\x00z\x03\x01\x00\x00\x01z" + // 0x007A0301: 0x0000017A + "\x00Z\x03\a\x00\x00\x01{" + // 0x005A0307: 0x0000017B + "\x00z\x03\a\x00\x00\x01|" + // 0x007A0307: 0x0000017C + "\x00Z\x03\f\x00\x00\x01}" + // 0x005A030C: 0x0000017D + "\x00z\x03\f\x00\x00\x01~" + // 0x007A030C: 0x0000017E + "\x00O\x03\x1b\x00\x00\x01\xa0" + // 0x004F031B: 0x000001A0 + "\x00o\x03\x1b\x00\x00\x01\xa1" + // 0x006F031B: 0x000001A1 + "\x00U\x03\x1b\x00\x00\x01\xaf" + // 0x0055031B: 0x000001AF + "\x00u\x03\x1b\x00\x00\x01\xb0" + // 0x0075031B: 0x000001B0 + "\x00A\x03\f\x00\x00\x01\xcd" + // 0x0041030C: 0x000001CD + "\x00a\x03\f\x00\x00\x01\xce" + // 0x0061030C: 0x000001CE + "\x00I\x03\f\x00\x00\x01\xcf" + // 0x0049030C: 0x000001CF + "\x00i\x03\f\x00\x00\x01\xd0" + // 0x0069030C: 0x000001D0 + "\x00O\x03\f\x00\x00\x01\xd1" + // 0x004F030C: 0x000001D1 + "\x00o\x03\f\x00\x00\x01\xd2" + // 0x006F030C: 0x000001D2 + "\x00U\x03\f\x00\x00\x01\xd3" + // 0x0055030C: 0x000001D3 + "\x00u\x03\f\x00\x00\x01\xd4" + // 0x0075030C: 0x000001D4 + "\x00\xdc\x03\x04\x00\x00\x01\xd5" + // 0x00DC0304: 0x000001D5 + "\x00\xfc\x03\x04\x00\x00\x01\xd6" + // 0x00FC0304: 0x000001D6 + "\x00\xdc\x03\x01\x00\x00\x01\xd7" + // 0x00DC0301: 0x000001D7 + "\x00\xfc\x03\x01\x00\x00\x01\xd8" + // 0x00FC0301: 0x000001D8 + "\x00\xdc\x03\f\x00\x00\x01\xd9" + // 0x00DC030C: 0x000001D9 + "\x00\xfc\x03\f\x00\x00\x01\xda" + // 0x00FC030C: 0x000001DA + "\x00\xdc\x03\x00\x00\x00\x01\xdb" + // 0x00DC0300: 0x000001DB + "\x00\xfc\x03\x00\x00\x00\x01\xdc" + // 0x00FC0300: 0x000001DC + "\x00\xc4\x03\x04\x00\x00\x01\xde" + // 0x00C40304: 0x000001DE + "\x00\xe4\x03\x04\x00\x00\x01\xdf" + // 0x00E40304: 0x000001DF + "\x02&\x03\x04\x00\x00\x01\xe0" + // 0x02260304: 0x000001E0 + "\x02'\x03\x04\x00\x00\x01\xe1" + // 0x02270304: 0x000001E1 + "\x00\xc6\x03\x04\x00\x00\x01\xe2" + // 0x00C60304: 0x000001E2 + "\x00\xe6\x03\x04\x00\x00\x01\xe3" + // 0x00E60304: 0x000001E3 + "\x00G\x03\f\x00\x00\x01\xe6" + // 0x0047030C: 0x000001E6 + "\x00g\x03\f\x00\x00\x01\xe7" + // 0x0067030C: 0x000001E7 + "\x00K\x03\f\x00\x00\x01\xe8" + // 0x004B030C: 0x000001E8 + "\x00k\x03\f\x00\x00\x01\xe9" + // 0x006B030C: 0x000001E9 + "\x00O\x03(\x00\x00\x01\xea" + // 0x004F0328: 0x000001EA + "\x00o\x03(\x00\x00\x01\xeb" + // 0x006F0328: 0x000001EB + "\x01\xea\x03\x04\x00\x00\x01\xec" + // 0x01EA0304: 0x000001EC + "\x01\xeb\x03\x04\x00\x00\x01\xed" + // 0x01EB0304: 0x000001ED + "\x01\xb7\x03\f\x00\x00\x01\xee" + // 0x01B7030C: 0x000001EE + "\x02\x92\x03\f\x00\x00\x01\xef" + // 0x0292030C: 0x000001EF + "\x00j\x03\f\x00\x00\x01\xf0" + // 0x006A030C: 0x000001F0 + "\x00G\x03\x01\x00\x00\x01\xf4" + // 0x00470301: 0x000001F4 + "\x00g\x03\x01\x00\x00\x01\xf5" + // 0x00670301: 0x000001F5 + "\x00N\x03\x00\x00\x00\x01\xf8" + // 0x004E0300: 0x000001F8 + "\x00n\x03\x00\x00\x00\x01\xf9" + // 0x006E0300: 0x000001F9 + "\x00\xc5\x03\x01\x00\x00\x01\xfa" + // 0x00C50301: 0x000001FA + "\x00\xe5\x03\x01\x00\x00\x01\xfb" + // 0x00E50301: 0x000001FB + "\x00\xc6\x03\x01\x00\x00\x01\xfc" + // 0x00C60301: 0x000001FC + "\x00\xe6\x03\x01\x00\x00\x01\xfd" + // 0x00E60301: 0x000001FD + "\x00\xd8\x03\x01\x00\x00\x01\xfe" + // 0x00D80301: 0x000001FE + "\x00\xf8\x03\x01\x00\x00\x01\xff" + // 0x00F80301: 0x000001FF + "\x00A\x03\x0f\x00\x00\x02\x00" + // 0x0041030F: 0x00000200 + "\x00a\x03\x0f\x00\x00\x02\x01" + // 0x0061030F: 0x00000201 + "\x00A\x03\x11\x00\x00\x02\x02" + // 0x00410311: 0x00000202 + "\x00a\x03\x11\x00\x00\x02\x03" + // 0x00610311: 0x00000203 + "\x00E\x03\x0f\x00\x00\x02\x04" + // 0x0045030F: 0x00000204 + "\x00e\x03\x0f\x00\x00\x02\x05" + // 0x0065030F: 0x00000205 + "\x00E\x03\x11\x00\x00\x02\x06" + // 0x00450311: 0x00000206 + "\x00e\x03\x11\x00\x00\x02\a" + // 0x00650311: 0x00000207 + "\x00I\x03\x0f\x00\x00\x02\b" + // 0x0049030F: 0x00000208 + "\x00i\x03\x0f\x00\x00\x02\t" + // 0x0069030F: 0x00000209 + "\x00I\x03\x11\x00\x00\x02\n" + // 0x00490311: 0x0000020A + "\x00i\x03\x11\x00\x00\x02\v" + // 0x00690311: 0x0000020B + "\x00O\x03\x0f\x00\x00\x02\f" + // 0x004F030F: 0x0000020C + "\x00o\x03\x0f\x00\x00\x02\r" + // 0x006F030F: 0x0000020D + "\x00O\x03\x11\x00\x00\x02\x0e" + // 0x004F0311: 0x0000020E + "\x00o\x03\x11\x00\x00\x02\x0f" + // 0x006F0311: 0x0000020F + "\x00R\x03\x0f\x00\x00\x02\x10" + // 0x0052030F: 0x00000210 + "\x00r\x03\x0f\x00\x00\x02\x11" + // 0x0072030F: 0x00000211 + "\x00R\x03\x11\x00\x00\x02\x12" + // 0x00520311: 0x00000212 + "\x00r\x03\x11\x00\x00\x02\x13" + // 0x00720311: 0x00000213 + "\x00U\x03\x0f\x00\x00\x02\x14" + // 0x0055030F: 0x00000214 + "\x00u\x03\x0f\x00\x00\x02\x15" + // 0x0075030F: 0x00000215 + "\x00U\x03\x11\x00\x00\x02\x16" + // 0x00550311: 0x00000216 + "\x00u\x03\x11\x00\x00\x02\x17" + // 0x00750311: 0x00000217 + "\x00S\x03&\x00\x00\x02\x18" + // 0x00530326: 0x00000218 + "\x00s\x03&\x00\x00\x02\x19" + // 0x00730326: 0x00000219 + "\x00T\x03&\x00\x00\x02\x1a" + // 0x00540326: 0x0000021A + "\x00t\x03&\x00\x00\x02\x1b" + // 0x00740326: 0x0000021B + "\x00H\x03\f\x00\x00\x02\x1e" + // 0x0048030C: 0x0000021E + "\x00h\x03\f\x00\x00\x02\x1f" + // 0x0068030C: 0x0000021F + "\x00A\x03\a\x00\x00\x02&" + // 0x00410307: 0x00000226 + "\x00a\x03\a\x00\x00\x02'" + // 0x00610307: 0x00000227 + "\x00E\x03'\x00\x00\x02(" + // 0x00450327: 0x00000228 + "\x00e\x03'\x00\x00\x02)" + // 0x00650327: 0x00000229 + "\x00\xd6\x03\x04\x00\x00\x02*" + // 0x00D60304: 0x0000022A + "\x00\xf6\x03\x04\x00\x00\x02+" + // 0x00F60304: 0x0000022B + "\x00\xd5\x03\x04\x00\x00\x02," + // 0x00D50304: 0x0000022C + "\x00\xf5\x03\x04\x00\x00\x02-" + // 0x00F50304: 0x0000022D + "\x00O\x03\a\x00\x00\x02." + // 0x004F0307: 0x0000022E + "\x00o\x03\a\x00\x00\x02/" + // 0x006F0307: 0x0000022F + "\x02.\x03\x04\x00\x00\x020" + // 0x022E0304: 0x00000230 + "\x02/\x03\x04\x00\x00\x021" + // 0x022F0304: 0x00000231 + "\x00Y\x03\x04\x00\x00\x022" + // 0x00590304: 0x00000232 + "\x00y\x03\x04\x00\x00\x023" + // 0x00790304: 0x00000233 + "\x00\xa8\x03\x01\x00\x00\x03\x85" + // 0x00A80301: 0x00000385 + "\x03\x91\x03\x01\x00\x00\x03\x86" + // 0x03910301: 0x00000386 + "\x03\x95\x03\x01\x00\x00\x03\x88" + // 0x03950301: 0x00000388 + "\x03\x97\x03\x01\x00\x00\x03\x89" + // 0x03970301: 0x00000389 + "\x03\x99\x03\x01\x00\x00\x03\x8a" + // 0x03990301: 0x0000038A + "\x03\x9f\x03\x01\x00\x00\x03\x8c" + // 0x039F0301: 0x0000038C + "\x03\xa5\x03\x01\x00\x00\x03\x8e" + // 0x03A50301: 0x0000038E + "\x03\xa9\x03\x01\x00\x00\x03\x8f" + // 0x03A90301: 0x0000038F + "\x03\xca\x03\x01\x00\x00\x03\x90" + // 0x03CA0301: 0x00000390 + "\x03\x99\x03\b\x00\x00\x03\xaa" + // 0x03990308: 0x000003AA + "\x03\xa5\x03\b\x00\x00\x03\xab" + // 0x03A50308: 0x000003AB + "\x03\xb1\x03\x01\x00\x00\x03\xac" + // 0x03B10301: 0x000003AC + "\x03\xb5\x03\x01\x00\x00\x03\xad" + // 0x03B50301: 0x000003AD + "\x03\xb7\x03\x01\x00\x00\x03\xae" + // 0x03B70301: 0x000003AE + "\x03\xb9\x03\x01\x00\x00\x03\xaf" + // 0x03B90301: 0x000003AF + "\x03\xcb\x03\x01\x00\x00\x03\xb0" + // 0x03CB0301: 0x000003B0 + "\x03\xb9\x03\b\x00\x00\x03\xca" + // 0x03B90308: 0x000003CA + "\x03\xc5\x03\b\x00\x00\x03\xcb" + // 0x03C50308: 0x000003CB + "\x03\xbf\x03\x01\x00\x00\x03\xcc" + // 0x03BF0301: 0x000003CC + "\x03\xc5\x03\x01\x00\x00\x03\xcd" + // 0x03C50301: 0x000003CD + "\x03\xc9\x03\x01\x00\x00\x03\xce" + // 0x03C90301: 0x000003CE + "\x03\xd2\x03\x01\x00\x00\x03\xd3" + // 0x03D20301: 0x000003D3 + "\x03\xd2\x03\b\x00\x00\x03\xd4" + // 0x03D20308: 0x000003D4 + "\x04\x15\x03\x00\x00\x00\x04\x00" + // 0x04150300: 0x00000400 + "\x04\x15\x03\b\x00\x00\x04\x01" + // 0x04150308: 0x00000401 + "\x04\x13\x03\x01\x00\x00\x04\x03" + // 0x04130301: 0x00000403 + "\x04\x06\x03\b\x00\x00\x04\a" + // 0x04060308: 0x00000407 + "\x04\x1a\x03\x01\x00\x00\x04\f" + // 0x041A0301: 0x0000040C + "\x04\x18\x03\x00\x00\x00\x04\r" + // 0x04180300: 0x0000040D + "\x04#\x03\x06\x00\x00\x04\x0e" + // 0x04230306: 0x0000040E + "\x04\x18\x03\x06\x00\x00\x04\x19" + // 0x04180306: 0x00000419 + "\x048\x03\x06\x00\x00\x049" + // 0x04380306: 0x00000439 + "\x045\x03\x00\x00\x00\x04P" + // 0x04350300: 0x00000450 + "\x045\x03\b\x00\x00\x04Q" + // 0x04350308: 0x00000451 + "\x043\x03\x01\x00\x00\x04S" + // 0x04330301: 0x00000453 + "\x04V\x03\b\x00\x00\x04W" + // 0x04560308: 0x00000457 + "\x04:\x03\x01\x00\x00\x04\\" + // 0x043A0301: 0x0000045C + "\x048\x03\x00\x00\x00\x04]" + // 0x04380300: 0x0000045D + "\x04C\x03\x06\x00\x00\x04^" + // 0x04430306: 0x0000045E + "\x04t\x03\x0f\x00\x00\x04v" + // 0x0474030F: 0x00000476 + "\x04u\x03\x0f\x00\x00\x04w" + // 0x0475030F: 0x00000477 + "\x04\x16\x03\x06\x00\x00\x04\xc1" + // 0x04160306: 0x000004C1 + "\x046\x03\x06\x00\x00\x04\xc2" + // 0x04360306: 0x000004C2 + "\x04\x10\x03\x06\x00\x00\x04\xd0" + // 0x04100306: 0x000004D0 + "\x040\x03\x06\x00\x00\x04\xd1" + // 0x04300306: 0x000004D1 + "\x04\x10\x03\b\x00\x00\x04\xd2" + // 0x04100308: 0x000004D2 + "\x040\x03\b\x00\x00\x04\xd3" + // 0x04300308: 0x000004D3 + "\x04\x15\x03\x06\x00\x00\x04\xd6" + // 0x04150306: 0x000004D6 + "\x045\x03\x06\x00\x00\x04\xd7" + // 0x04350306: 0x000004D7 + "\x04\xd8\x03\b\x00\x00\x04\xda" + // 0x04D80308: 0x000004DA + "\x04\xd9\x03\b\x00\x00\x04\xdb" + // 0x04D90308: 0x000004DB + "\x04\x16\x03\b\x00\x00\x04\xdc" + // 0x04160308: 0x000004DC + "\x046\x03\b\x00\x00\x04\xdd" + // 0x04360308: 0x000004DD + "\x04\x17\x03\b\x00\x00\x04\xde" + // 0x04170308: 0x000004DE + "\x047\x03\b\x00\x00\x04\xdf" + // 0x04370308: 0x000004DF + "\x04\x18\x03\x04\x00\x00\x04\xe2" + // 0x04180304: 0x000004E2 + "\x048\x03\x04\x00\x00\x04\xe3" + // 0x04380304: 0x000004E3 + "\x04\x18\x03\b\x00\x00\x04\xe4" + // 0x04180308: 0x000004E4 + "\x048\x03\b\x00\x00\x04\xe5" + // 0x04380308: 0x000004E5 + "\x04\x1e\x03\b\x00\x00\x04\xe6" + // 0x041E0308: 0x000004E6 + "\x04>\x03\b\x00\x00\x04\xe7" + // 0x043E0308: 0x000004E7 + "\x04\xe8\x03\b\x00\x00\x04\xea" + // 0x04E80308: 0x000004EA + "\x04\xe9\x03\b\x00\x00\x04\xeb" + // 0x04E90308: 0x000004EB + "\x04-\x03\b\x00\x00\x04\xec" + // 0x042D0308: 0x000004EC + "\x04M\x03\b\x00\x00\x04\xed" + // 0x044D0308: 0x000004ED + "\x04#\x03\x04\x00\x00\x04\xee" + // 0x04230304: 0x000004EE + "\x04C\x03\x04\x00\x00\x04\xef" + // 0x04430304: 0x000004EF + "\x04#\x03\b\x00\x00\x04\xf0" + // 0x04230308: 0x000004F0 + "\x04C\x03\b\x00\x00\x04\xf1" + // 0x04430308: 0x000004F1 + "\x04#\x03\v\x00\x00\x04\xf2" + // 0x0423030B: 0x000004F2 + "\x04C\x03\v\x00\x00\x04\xf3" + // 0x0443030B: 0x000004F3 + "\x04'\x03\b\x00\x00\x04\xf4" + // 0x04270308: 0x000004F4 + "\x04G\x03\b\x00\x00\x04\xf5" + // 0x04470308: 0x000004F5 + "\x04+\x03\b\x00\x00\x04\xf8" + // 0x042B0308: 0x000004F8 + "\x04K\x03\b\x00\x00\x04\xf9" + // 0x044B0308: 0x000004F9 + "\x06'\x06S\x00\x00\x06\"" + // 0x06270653: 0x00000622 + "\x06'\x06T\x00\x00\x06#" + // 0x06270654: 0x00000623 + "\x06H\x06T\x00\x00\x06$" + // 0x06480654: 0x00000624 + "\x06'\x06U\x00\x00\x06%" + // 0x06270655: 0x00000625 + "\x06J\x06T\x00\x00\x06&" + // 0x064A0654: 0x00000626 + "\x06\xd5\x06T\x00\x00\x06\xc0" + // 0x06D50654: 0x000006C0 + "\x06\xc1\x06T\x00\x00\x06\xc2" + // 0x06C10654: 0x000006C2 + "\x06\xd2\x06T\x00\x00\x06\xd3" + // 0x06D20654: 0x000006D3 + "\t(\t<\x00\x00\t)" + // 0x0928093C: 0x00000929 + "\t0\t<\x00\x00\t1" + // 0x0930093C: 0x00000931 + "\t3\t<\x00\x00\t4" + // 0x0933093C: 0x00000934 + "\t\xc7\t\xbe\x00\x00\t\xcb" + // 0x09C709BE: 0x000009CB + "\t\xc7\t\xd7\x00\x00\t\xcc" + // 0x09C709D7: 0x000009CC + "\vG\vV\x00\x00\vH" + // 0x0B470B56: 0x00000B48 + "\vG\v>\x00\x00\vK" + // 0x0B470B3E: 0x00000B4B + "\vG\vW\x00\x00\vL" + // 0x0B470B57: 0x00000B4C + "\v\x92\v\xd7\x00\x00\v\x94" + // 0x0B920BD7: 0x00000B94 + "\v\xc6\v\xbe\x00\x00\v\xca" + // 0x0BC60BBE: 0x00000BCA + "\v\xc7\v\xbe\x00\x00\v\xcb" + // 0x0BC70BBE: 0x00000BCB + "\v\xc6\v\xd7\x00\x00\v\xcc" + // 0x0BC60BD7: 0x00000BCC + "\fF\fV\x00\x00\fH" + // 0x0C460C56: 0x00000C48 + "\f\xbf\f\xd5\x00\x00\f\xc0" + // 0x0CBF0CD5: 0x00000CC0 + "\f\xc6\f\xd5\x00\x00\f\xc7" + // 0x0CC60CD5: 0x00000CC7 + "\f\xc6\f\xd6\x00\x00\f\xc8" + // 0x0CC60CD6: 0x00000CC8 + "\f\xc6\f\xc2\x00\x00\f\xca" + // 0x0CC60CC2: 0x00000CCA + "\f\xca\f\xd5\x00\x00\f\xcb" + // 0x0CCA0CD5: 0x00000CCB + "\rF\r>\x00\x00\rJ" + // 0x0D460D3E: 0x00000D4A + "\rG\r>\x00\x00\rK" + // 0x0D470D3E: 0x00000D4B + "\rF\rW\x00\x00\rL" + // 0x0D460D57: 0x00000D4C + "\r\xd9\r\xca\x00\x00\r\xda" + // 0x0DD90DCA: 0x00000DDA + "\r\xd9\r\xcf\x00\x00\r\xdc" + // 0x0DD90DCF: 0x00000DDC + "\r\xdc\r\xca\x00\x00\r\xdd" + // 0x0DDC0DCA: 0x00000DDD + "\r\xd9\r\xdf\x00\x00\r\xde" + // 0x0DD90DDF: 0x00000DDE + "\x10%\x10.\x00\x00\x10&" + // 0x1025102E: 0x00001026 + "\x1b\x05\x1b5\x00\x00\x1b\x06" + // 0x1B051B35: 0x00001B06 + "\x1b\a\x1b5\x00\x00\x1b\b" + // 0x1B071B35: 0x00001B08 + "\x1b\t\x1b5\x00\x00\x1b\n" + // 0x1B091B35: 0x00001B0A + "\x1b\v\x1b5\x00\x00\x1b\f" + // 0x1B0B1B35: 0x00001B0C + "\x1b\r\x1b5\x00\x00\x1b\x0e" + // 0x1B0D1B35: 0x00001B0E + "\x1b\x11\x1b5\x00\x00\x1b\x12" + // 0x1B111B35: 0x00001B12 + "\x1b:\x1b5\x00\x00\x1b;" + // 0x1B3A1B35: 0x00001B3B + "\x1b<\x1b5\x00\x00\x1b=" + // 0x1B3C1B35: 0x00001B3D + "\x1b>\x1b5\x00\x00\x1b@" + // 0x1B3E1B35: 0x00001B40 + "\x1b?\x1b5\x00\x00\x1bA" + // 0x1B3F1B35: 0x00001B41 + "\x1bB\x1b5\x00\x00\x1bC" + // 0x1B421B35: 0x00001B43 + "\x00A\x03%\x00\x00\x1e\x00" + // 0x00410325: 0x00001E00 + "\x00a\x03%\x00\x00\x1e\x01" + // 0x00610325: 0x00001E01 + "\x00B\x03\a\x00\x00\x1e\x02" + // 0x00420307: 0x00001E02 + "\x00b\x03\a\x00\x00\x1e\x03" + // 0x00620307: 0x00001E03 + "\x00B\x03#\x00\x00\x1e\x04" + // 0x00420323: 0x00001E04 + "\x00b\x03#\x00\x00\x1e\x05" + // 0x00620323: 0x00001E05 + "\x00B\x031\x00\x00\x1e\x06" + // 0x00420331: 0x00001E06 + "\x00b\x031\x00\x00\x1e\a" + // 0x00620331: 0x00001E07 + "\x00\xc7\x03\x01\x00\x00\x1e\b" + // 0x00C70301: 0x00001E08 + "\x00\xe7\x03\x01\x00\x00\x1e\t" + // 0x00E70301: 0x00001E09 + "\x00D\x03\a\x00\x00\x1e\n" + // 0x00440307: 0x00001E0A + "\x00d\x03\a\x00\x00\x1e\v" + // 0x00640307: 0x00001E0B + "\x00D\x03#\x00\x00\x1e\f" + // 0x00440323: 0x00001E0C + "\x00d\x03#\x00\x00\x1e\r" + // 0x00640323: 0x00001E0D + "\x00D\x031\x00\x00\x1e\x0e" + // 0x00440331: 0x00001E0E + "\x00d\x031\x00\x00\x1e\x0f" + // 0x00640331: 0x00001E0F + "\x00D\x03'\x00\x00\x1e\x10" + // 0x00440327: 0x00001E10 + "\x00d\x03'\x00\x00\x1e\x11" + // 0x00640327: 0x00001E11 + "\x00D\x03-\x00\x00\x1e\x12" + // 0x0044032D: 0x00001E12 + "\x00d\x03-\x00\x00\x1e\x13" + // 0x0064032D: 0x00001E13 + "\x01\x12\x03\x00\x00\x00\x1e\x14" + // 0x01120300: 0x00001E14 + "\x01\x13\x03\x00\x00\x00\x1e\x15" + // 0x01130300: 0x00001E15 + "\x01\x12\x03\x01\x00\x00\x1e\x16" + // 0x01120301: 0x00001E16 + "\x01\x13\x03\x01\x00\x00\x1e\x17" + // 0x01130301: 0x00001E17 + "\x00E\x03-\x00\x00\x1e\x18" + // 0x0045032D: 0x00001E18 + "\x00e\x03-\x00\x00\x1e\x19" + // 0x0065032D: 0x00001E19 + "\x00E\x030\x00\x00\x1e\x1a" + // 0x00450330: 0x00001E1A + "\x00e\x030\x00\x00\x1e\x1b" + // 0x00650330: 0x00001E1B + "\x02(\x03\x06\x00\x00\x1e\x1c" + // 0x02280306: 0x00001E1C + "\x02)\x03\x06\x00\x00\x1e\x1d" + // 0x02290306: 0x00001E1D + "\x00F\x03\a\x00\x00\x1e\x1e" + // 0x00460307: 0x00001E1E + "\x00f\x03\a\x00\x00\x1e\x1f" + // 0x00660307: 0x00001E1F + "\x00G\x03\x04\x00\x00\x1e " + // 0x00470304: 0x00001E20 + "\x00g\x03\x04\x00\x00\x1e!" + // 0x00670304: 0x00001E21 + "\x00H\x03\a\x00\x00\x1e\"" + // 0x00480307: 0x00001E22 + "\x00h\x03\a\x00\x00\x1e#" + // 0x00680307: 0x00001E23 + "\x00H\x03#\x00\x00\x1e$" + // 0x00480323: 0x00001E24 + "\x00h\x03#\x00\x00\x1e%" + // 0x00680323: 0x00001E25 + "\x00H\x03\b\x00\x00\x1e&" + // 0x00480308: 0x00001E26 + "\x00h\x03\b\x00\x00\x1e'" + // 0x00680308: 0x00001E27 + "\x00H\x03'\x00\x00\x1e(" + // 0x00480327: 0x00001E28 + "\x00h\x03'\x00\x00\x1e)" + // 0x00680327: 0x00001E29 + "\x00H\x03.\x00\x00\x1e*" + // 0x0048032E: 0x00001E2A + "\x00h\x03.\x00\x00\x1e+" + // 0x0068032E: 0x00001E2B + "\x00I\x030\x00\x00\x1e," + // 0x00490330: 0x00001E2C + "\x00i\x030\x00\x00\x1e-" + // 0x00690330: 0x00001E2D + "\x00\xcf\x03\x01\x00\x00\x1e." + // 0x00CF0301: 0x00001E2E + "\x00\xef\x03\x01\x00\x00\x1e/" + // 0x00EF0301: 0x00001E2F + "\x00K\x03\x01\x00\x00\x1e0" + // 0x004B0301: 0x00001E30 + "\x00k\x03\x01\x00\x00\x1e1" + // 0x006B0301: 0x00001E31 + "\x00K\x03#\x00\x00\x1e2" + // 0x004B0323: 0x00001E32 + "\x00k\x03#\x00\x00\x1e3" + // 0x006B0323: 0x00001E33 + "\x00K\x031\x00\x00\x1e4" + // 0x004B0331: 0x00001E34 + "\x00k\x031\x00\x00\x1e5" + // 0x006B0331: 0x00001E35 + "\x00L\x03#\x00\x00\x1e6" + // 0x004C0323: 0x00001E36 + "\x00l\x03#\x00\x00\x1e7" + // 0x006C0323: 0x00001E37 + "\x1e6\x03\x04\x00\x00\x1e8" + // 0x1E360304: 0x00001E38 + "\x1e7\x03\x04\x00\x00\x1e9" + // 0x1E370304: 0x00001E39 + "\x00L\x031\x00\x00\x1e:" + // 0x004C0331: 0x00001E3A + "\x00l\x031\x00\x00\x1e;" + // 0x006C0331: 0x00001E3B + "\x00L\x03-\x00\x00\x1e<" + // 0x004C032D: 0x00001E3C + "\x00l\x03-\x00\x00\x1e=" + // 0x006C032D: 0x00001E3D + "\x00M\x03\x01\x00\x00\x1e>" + // 0x004D0301: 0x00001E3E + "\x00m\x03\x01\x00\x00\x1e?" + // 0x006D0301: 0x00001E3F + "\x00M\x03\a\x00\x00\x1e@" + // 0x004D0307: 0x00001E40 + "\x00m\x03\a\x00\x00\x1eA" + // 0x006D0307: 0x00001E41 + "\x00M\x03#\x00\x00\x1eB" + // 0x004D0323: 0x00001E42 + "\x00m\x03#\x00\x00\x1eC" + // 0x006D0323: 0x00001E43 + "\x00N\x03\a\x00\x00\x1eD" + // 0x004E0307: 0x00001E44 + "\x00n\x03\a\x00\x00\x1eE" + // 0x006E0307: 0x00001E45 + "\x00N\x03#\x00\x00\x1eF" + // 0x004E0323: 0x00001E46 + "\x00n\x03#\x00\x00\x1eG" + // 0x006E0323: 0x00001E47 + "\x00N\x031\x00\x00\x1eH" + // 0x004E0331: 0x00001E48 + "\x00n\x031\x00\x00\x1eI" + // 0x006E0331: 0x00001E49 + "\x00N\x03-\x00\x00\x1eJ" + // 0x004E032D: 0x00001E4A + "\x00n\x03-\x00\x00\x1eK" + // 0x006E032D: 0x00001E4B + "\x00\xd5\x03\x01\x00\x00\x1eL" + // 0x00D50301: 0x00001E4C + "\x00\xf5\x03\x01\x00\x00\x1eM" + // 0x00F50301: 0x00001E4D + "\x00\xd5\x03\b\x00\x00\x1eN" + // 0x00D50308: 0x00001E4E + "\x00\xf5\x03\b\x00\x00\x1eO" + // 0x00F50308: 0x00001E4F + "\x01L\x03\x00\x00\x00\x1eP" + // 0x014C0300: 0x00001E50 + "\x01M\x03\x00\x00\x00\x1eQ" + // 0x014D0300: 0x00001E51 + "\x01L\x03\x01\x00\x00\x1eR" + // 0x014C0301: 0x00001E52 + "\x01M\x03\x01\x00\x00\x1eS" + // 0x014D0301: 0x00001E53 + "\x00P\x03\x01\x00\x00\x1eT" + // 0x00500301: 0x00001E54 + "\x00p\x03\x01\x00\x00\x1eU" + // 0x00700301: 0x00001E55 + "\x00P\x03\a\x00\x00\x1eV" + // 0x00500307: 0x00001E56 + "\x00p\x03\a\x00\x00\x1eW" + // 0x00700307: 0x00001E57 + "\x00R\x03\a\x00\x00\x1eX" + // 0x00520307: 0x00001E58 + "\x00r\x03\a\x00\x00\x1eY" + // 0x00720307: 0x00001E59 + "\x00R\x03#\x00\x00\x1eZ" + // 0x00520323: 0x00001E5A + "\x00r\x03#\x00\x00\x1e[" + // 0x00720323: 0x00001E5B + "\x1eZ\x03\x04\x00\x00\x1e\\" + // 0x1E5A0304: 0x00001E5C + "\x1e[\x03\x04\x00\x00\x1e]" + // 0x1E5B0304: 0x00001E5D + "\x00R\x031\x00\x00\x1e^" + // 0x00520331: 0x00001E5E + "\x00r\x031\x00\x00\x1e_" + // 0x00720331: 0x00001E5F + "\x00S\x03\a\x00\x00\x1e`" + // 0x00530307: 0x00001E60 + "\x00s\x03\a\x00\x00\x1ea" + // 0x00730307: 0x00001E61 + "\x00S\x03#\x00\x00\x1eb" + // 0x00530323: 0x00001E62 + "\x00s\x03#\x00\x00\x1ec" + // 0x00730323: 0x00001E63 + "\x01Z\x03\a\x00\x00\x1ed" + // 0x015A0307: 0x00001E64 + "\x01[\x03\a\x00\x00\x1ee" + // 0x015B0307: 0x00001E65 + "\x01`\x03\a\x00\x00\x1ef" + // 0x01600307: 0x00001E66 + "\x01a\x03\a\x00\x00\x1eg" + // 0x01610307: 0x00001E67 + "\x1eb\x03\a\x00\x00\x1eh" + // 0x1E620307: 0x00001E68 + "\x1ec\x03\a\x00\x00\x1ei" + // 0x1E630307: 0x00001E69 + "\x00T\x03\a\x00\x00\x1ej" + // 0x00540307: 0x00001E6A + "\x00t\x03\a\x00\x00\x1ek" + // 0x00740307: 0x00001E6B + "\x00T\x03#\x00\x00\x1el" + // 0x00540323: 0x00001E6C + "\x00t\x03#\x00\x00\x1em" + // 0x00740323: 0x00001E6D + "\x00T\x031\x00\x00\x1en" + // 0x00540331: 0x00001E6E + "\x00t\x031\x00\x00\x1eo" + // 0x00740331: 0x00001E6F + "\x00T\x03-\x00\x00\x1ep" + // 0x0054032D: 0x00001E70 + "\x00t\x03-\x00\x00\x1eq" + // 0x0074032D: 0x00001E71 + "\x00U\x03$\x00\x00\x1er" + // 0x00550324: 0x00001E72 + "\x00u\x03$\x00\x00\x1es" + // 0x00750324: 0x00001E73 + "\x00U\x030\x00\x00\x1et" + // 0x00550330: 0x00001E74 + "\x00u\x030\x00\x00\x1eu" + // 0x00750330: 0x00001E75 + "\x00U\x03-\x00\x00\x1ev" + // 0x0055032D: 0x00001E76 + "\x00u\x03-\x00\x00\x1ew" + // 0x0075032D: 0x00001E77 + "\x01h\x03\x01\x00\x00\x1ex" + // 0x01680301: 0x00001E78 + "\x01i\x03\x01\x00\x00\x1ey" + // 0x01690301: 0x00001E79 + "\x01j\x03\b\x00\x00\x1ez" + // 0x016A0308: 0x00001E7A + "\x01k\x03\b\x00\x00\x1e{" + // 0x016B0308: 0x00001E7B + "\x00V\x03\x03\x00\x00\x1e|" + // 0x00560303: 0x00001E7C + "\x00v\x03\x03\x00\x00\x1e}" + // 0x00760303: 0x00001E7D + "\x00V\x03#\x00\x00\x1e~" + // 0x00560323: 0x00001E7E + "\x00v\x03#\x00\x00\x1e\x7f" + // 0x00760323: 0x00001E7F + "\x00W\x03\x00\x00\x00\x1e\x80" + // 0x00570300: 0x00001E80 + "\x00w\x03\x00\x00\x00\x1e\x81" + // 0x00770300: 0x00001E81 + "\x00W\x03\x01\x00\x00\x1e\x82" + // 0x00570301: 0x00001E82 + "\x00w\x03\x01\x00\x00\x1e\x83" + // 0x00770301: 0x00001E83 + "\x00W\x03\b\x00\x00\x1e\x84" + // 0x00570308: 0x00001E84 + "\x00w\x03\b\x00\x00\x1e\x85" + // 0x00770308: 0x00001E85 + "\x00W\x03\a\x00\x00\x1e\x86" + // 0x00570307: 0x00001E86 + "\x00w\x03\a\x00\x00\x1e\x87" + // 0x00770307: 0x00001E87 + "\x00W\x03#\x00\x00\x1e\x88" + // 0x00570323: 0x00001E88 + "\x00w\x03#\x00\x00\x1e\x89" + // 0x00770323: 0x00001E89 + "\x00X\x03\a\x00\x00\x1e\x8a" + // 0x00580307: 0x00001E8A + "\x00x\x03\a\x00\x00\x1e\x8b" + // 0x00780307: 0x00001E8B + "\x00X\x03\b\x00\x00\x1e\x8c" + // 0x00580308: 0x00001E8C + "\x00x\x03\b\x00\x00\x1e\x8d" + // 0x00780308: 0x00001E8D + "\x00Y\x03\a\x00\x00\x1e\x8e" + // 0x00590307: 0x00001E8E + "\x00y\x03\a\x00\x00\x1e\x8f" + // 0x00790307: 0x00001E8F + "\x00Z\x03\x02\x00\x00\x1e\x90" + // 0x005A0302: 0x00001E90 + "\x00z\x03\x02\x00\x00\x1e\x91" + // 0x007A0302: 0x00001E91 + "\x00Z\x03#\x00\x00\x1e\x92" + // 0x005A0323: 0x00001E92 + "\x00z\x03#\x00\x00\x1e\x93" + // 0x007A0323: 0x00001E93 + "\x00Z\x031\x00\x00\x1e\x94" + // 0x005A0331: 0x00001E94 + "\x00z\x031\x00\x00\x1e\x95" + // 0x007A0331: 0x00001E95 + "\x00h\x031\x00\x00\x1e\x96" + // 0x00680331: 0x00001E96 + "\x00t\x03\b\x00\x00\x1e\x97" + // 0x00740308: 0x00001E97 + "\x00w\x03\n\x00\x00\x1e\x98" + // 0x0077030A: 0x00001E98 + "\x00y\x03\n\x00\x00\x1e\x99" + // 0x0079030A: 0x00001E99 + "\x01\x7f\x03\a\x00\x00\x1e\x9b" + // 0x017F0307: 0x00001E9B + "\x00A\x03#\x00\x00\x1e\xa0" + // 0x00410323: 0x00001EA0 + "\x00a\x03#\x00\x00\x1e\xa1" + // 0x00610323: 0x00001EA1 + "\x00A\x03\t\x00\x00\x1e\xa2" + // 0x00410309: 0x00001EA2 + "\x00a\x03\t\x00\x00\x1e\xa3" + // 0x00610309: 0x00001EA3 + "\x00\xc2\x03\x01\x00\x00\x1e\xa4" + // 0x00C20301: 0x00001EA4 + "\x00\xe2\x03\x01\x00\x00\x1e\xa5" + // 0x00E20301: 0x00001EA5 + "\x00\xc2\x03\x00\x00\x00\x1e\xa6" + // 0x00C20300: 0x00001EA6 + "\x00\xe2\x03\x00\x00\x00\x1e\xa7" + // 0x00E20300: 0x00001EA7 + "\x00\xc2\x03\t\x00\x00\x1e\xa8" + // 0x00C20309: 0x00001EA8 + "\x00\xe2\x03\t\x00\x00\x1e\xa9" + // 0x00E20309: 0x00001EA9 + "\x00\xc2\x03\x03\x00\x00\x1e\xaa" + // 0x00C20303: 0x00001EAA + "\x00\xe2\x03\x03\x00\x00\x1e\xab" + // 0x00E20303: 0x00001EAB + "\x1e\xa0\x03\x02\x00\x00\x1e\xac" + // 0x1EA00302: 0x00001EAC + "\x1e\xa1\x03\x02\x00\x00\x1e\xad" + // 0x1EA10302: 0x00001EAD + "\x01\x02\x03\x01\x00\x00\x1e\xae" + // 0x01020301: 0x00001EAE + "\x01\x03\x03\x01\x00\x00\x1e\xaf" + // 0x01030301: 0x00001EAF + "\x01\x02\x03\x00\x00\x00\x1e\xb0" + // 0x01020300: 0x00001EB0 + "\x01\x03\x03\x00\x00\x00\x1e\xb1" + // 0x01030300: 0x00001EB1 + "\x01\x02\x03\t\x00\x00\x1e\xb2" + // 0x01020309: 0x00001EB2 + "\x01\x03\x03\t\x00\x00\x1e\xb3" + // 0x01030309: 0x00001EB3 + "\x01\x02\x03\x03\x00\x00\x1e\xb4" + // 0x01020303: 0x00001EB4 + "\x01\x03\x03\x03\x00\x00\x1e\xb5" + // 0x01030303: 0x00001EB5 + "\x1e\xa0\x03\x06\x00\x00\x1e\xb6" + // 0x1EA00306: 0x00001EB6 + "\x1e\xa1\x03\x06\x00\x00\x1e\xb7" + // 0x1EA10306: 0x00001EB7 + "\x00E\x03#\x00\x00\x1e\xb8" + // 0x00450323: 0x00001EB8 + "\x00e\x03#\x00\x00\x1e\xb9" + // 0x00650323: 0x00001EB9 + "\x00E\x03\t\x00\x00\x1e\xba" + // 0x00450309: 0x00001EBA + "\x00e\x03\t\x00\x00\x1e\xbb" + // 0x00650309: 0x00001EBB + "\x00E\x03\x03\x00\x00\x1e\xbc" + // 0x00450303: 0x00001EBC + "\x00e\x03\x03\x00\x00\x1e\xbd" + // 0x00650303: 0x00001EBD + "\x00\xca\x03\x01\x00\x00\x1e\xbe" + // 0x00CA0301: 0x00001EBE + "\x00\xea\x03\x01\x00\x00\x1e\xbf" + // 0x00EA0301: 0x00001EBF + "\x00\xca\x03\x00\x00\x00\x1e\xc0" + // 0x00CA0300: 0x00001EC0 + "\x00\xea\x03\x00\x00\x00\x1e\xc1" + // 0x00EA0300: 0x00001EC1 + "\x00\xca\x03\t\x00\x00\x1e\xc2" + // 0x00CA0309: 0x00001EC2 + "\x00\xea\x03\t\x00\x00\x1e\xc3" + // 0x00EA0309: 0x00001EC3 + "\x00\xca\x03\x03\x00\x00\x1e\xc4" + // 0x00CA0303: 0x00001EC4 + "\x00\xea\x03\x03\x00\x00\x1e\xc5" + // 0x00EA0303: 0x00001EC5 + "\x1e\xb8\x03\x02\x00\x00\x1e\xc6" + // 0x1EB80302: 0x00001EC6 + "\x1e\xb9\x03\x02\x00\x00\x1e\xc7" + // 0x1EB90302: 0x00001EC7 + "\x00I\x03\t\x00\x00\x1e\xc8" + // 0x00490309: 0x00001EC8 + "\x00i\x03\t\x00\x00\x1e\xc9" + // 0x00690309: 0x00001EC9 + "\x00I\x03#\x00\x00\x1e\xca" + // 0x00490323: 0x00001ECA + "\x00i\x03#\x00\x00\x1e\xcb" + // 0x00690323: 0x00001ECB + "\x00O\x03#\x00\x00\x1e\xcc" + // 0x004F0323: 0x00001ECC + "\x00o\x03#\x00\x00\x1e\xcd" + // 0x006F0323: 0x00001ECD + "\x00O\x03\t\x00\x00\x1e\xce" + // 0x004F0309: 0x00001ECE + "\x00o\x03\t\x00\x00\x1e\xcf" + // 0x006F0309: 0x00001ECF + "\x00\xd4\x03\x01\x00\x00\x1e\xd0" + // 0x00D40301: 0x00001ED0 + "\x00\xf4\x03\x01\x00\x00\x1e\xd1" + // 0x00F40301: 0x00001ED1 + "\x00\xd4\x03\x00\x00\x00\x1e\xd2" + // 0x00D40300: 0x00001ED2 + "\x00\xf4\x03\x00\x00\x00\x1e\xd3" + // 0x00F40300: 0x00001ED3 + "\x00\xd4\x03\t\x00\x00\x1e\xd4" + // 0x00D40309: 0x00001ED4 + "\x00\xf4\x03\t\x00\x00\x1e\xd5" + // 0x00F40309: 0x00001ED5 + "\x00\xd4\x03\x03\x00\x00\x1e\xd6" + // 0x00D40303: 0x00001ED6 + "\x00\xf4\x03\x03\x00\x00\x1e\xd7" + // 0x00F40303: 0x00001ED7 + "\x1e\xcc\x03\x02\x00\x00\x1e\xd8" + // 0x1ECC0302: 0x00001ED8 + "\x1e\xcd\x03\x02\x00\x00\x1e\xd9" + // 0x1ECD0302: 0x00001ED9 + "\x01\xa0\x03\x01\x00\x00\x1e\xda" + // 0x01A00301: 0x00001EDA + "\x01\xa1\x03\x01\x00\x00\x1e\xdb" + // 0x01A10301: 0x00001EDB + "\x01\xa0\x03\x00\x00\x00\x1e\xdc" + // 0x01A00300: 0x00001EDC + "\x01\xa1\x03\x00\x00\x00\x1e\xdd" + // 0x01A10300: 0x00001EDD + "\x01\xa0\x03\t\x00\x00\x1e\xde" + // 0x01A00309: 0x00001EDE + "\x01\xa1\x03\t\x00\x00\x1e\xdf" + // 0x01A10309: 0x00001EDF + "\x01\xa0\x03\x03\x00\x00\x1e\xe0" + // 0x01A00303: 0x00001EE0 + "\x01\xa1\x03\x03\x00\x00\x1e\xe1" + // 0x01A10303: 0x00001EE1 + "\x01\xa0\x03#\x00\x00\x1e\xe2" + // 0x01A00323: 0x00001EE2 + "\x01\xa1\x03#\x00\x00\x1e\xe3" + // 0x01A10323: 0x00001EE3 + "\x00U\x03#\x00\x00\x1e\xe4" + // 0x00550323: 0x00001EE4 + "\x00u\x03#\x00\x00\x1e\xe5" + // 0x00750323: 0x00001EE5 + "\x00U\x03\t\x00\x00\x1e\xe6" + // 0x00550309: 0x00001EE6 + "\x00u\x03\t\x00\x00\x1e\xe7" + // 0x00750309: 0x00001EE7 + "\x01\xaf\x03\x01\x00\x00\x1e\xe8" + // 0x01AF0301: 0x00001EE8 + "\x01\xb0\x03\x01\x00\x00\x1e\xe9" + // 0x01B00301: 0x00001EE9 + "\x01\xaf\x03\x00\x00\x00\x1e\xea" + // 0x01AF0300: 0x00001EEA + "\x01\xb0\x03\x00\x00\x00\x1e\xeb" + // 0x01B00300: 0x00001EEB + "\x01\xaf\x03\t\x00\x00\x1e\xec" + // 0x01AF0309: 0x00001EEC + "\x01\xb0\x03\t\x00\x00\x1e\xed" + // 0x01B00309: 0x00001EED + "\x01\xaf\x03\x03\x00\x00\x1e\xee" + // 0x01AF0303: 0x00001EEE + "\x01\xb0\x03\x03\x00\x00\x1e\xef" + // 0x01B00303: 0x00001EEF + "\x01\xaf\x03#\x00\x00\x1e\xf0" + // 0x01AF0323: 0x00001EF0 + "\x01\xb0\x03#\x00\x00\x1e\xf1" + // 0x01B00323: 0x00001EF1 + "\x00Y\x03\x00\x00\x00\x1e\xf2" + // 0x00590300: 0x00001EF2 + "\x00y\x03\x00\x00\x00\x1e\xf3" + // 0x00790300: 0x00001EF3 + "\x00Y\x03#\x00\x00\x1e\xf4" + // 0x00590323: 0x00001EF4 + "\x00y\x03#\x00\x00\x1e\xf5" + // 0x00790323: 0x00001EF5 + "\x00Y\x03\t\x00\x00\x1e\xf6" + // 0x00590309: 0x00001EF6 + "\x00y\x03\t\x00\x00\x1e\xf7" + // 0x00790309: 0x00001EF7 + "\x00Y\x03\x03\x00\x00\x1e\xf8" + // 0x00590303: 0x00001EF8 + "\x00y\x03\x03\x00\x00\x1e\xf9" + // 0x00790303: 0x00001EF9 + "\x03\xb1\x03\x13\x00\x00\x1f\x00" + // 0x03B10313: 0x00001F00 + "\x03\xb1\x03\x14\x00\x00\x1f\x01" + // 0x03B10314: 0x00001F01 + "\x1f\x00\x03\x00\x00\x00\x1f\x02" + // 0x1F000300: 0x00001F02 + "\x1f\x01\x03\x00\x00\x00\x1f\x03" + // 0x1F010300: 0x00001F03 + "\x1f\x00\x03\x01\x00\x00\x1f\x04" + // 0x1F000301: 0x00001F04 + "\x1f\x01\x03\x01\x00\x00\x1f\x05" + // 0x1F010301: 0x00001F05 + "\x1f\x00\x03B\x00\x00\x1f\x06" + // 0x1F000342: 0x00001F06 + "\x1f\x01\x03B\x00\x00\x1f\a" + // 0x1F010342: 0x00001F07 + "\x03\x91\x03\x13\x00\x00\x1f\b" + // 0x03910313: 0x00001F08 + "\x03\x91\x03\x14\x00\x00\x1f\t" + // 0x03910314: 0x00001F09 + "\x1f\b\x03\x00\x00\x00\x1f\n" + // 0x1F080300: 0x00001F0A + "\x1f\t\x03\x00\x00\x00\x1f\v" + // 0x1F090300: 0x00001F0B + "\x1f\b\x03\x01\x00\x00\x1f\f" + // 0x1F080301: 0x00001F0C + "\x1f\t\x03\x01\x00\x00\x1f\r" + // 0x1F090301: 0x00001F0D + "\x1f\b\x03B\x00\x00\x1f\x0e" + // 0x1F080342: 0x00001F0E + "\x1f\t\x03B\x00\x00\x1f\x0f" + // 0x1F090342: 0x00001F0F + "\x03\xb5\x03\x13\x00\x00\x1f\x10" + // 0x03B50313: 0x00001F10 + "\x03\xb5\x03\x14\x00\x00\x1f\x11" + // 0x03B50314: 0x00001F11 + "\x1f\x10\x03\x00\x00\x00\x1f\x12" + // 0x1F100300: 0x00001F12 + "\x1f\x11\x03\x00\x00\x00\x1f\x13" + // 0x1F110300: 0x00001F13 + "\x1f\x10\x03\x01\x00\x00\x1f\x14" + // 0x1F100301: 0x00001F14 + "\x1f\x11\x03\x01\x00\x00\x1f\x15" + // 0x1F110301: 0x00001F15 + "\x03\x95\x03\x13\x00\x00\x1f\x18" + // 0x03950313: 0x00001F18 + "\x03\x95\x03\x14\x00\x00\x1f\x19" + // 0x03950314: 0x00001F19 + "\x1f\x18\x03\x00\x00\x00\x1f\x1a" + // 0x1F180300: 0x00001F1A + "\x1f\x19\x03\x00\x00\x00\x1f\x1b" + // 0x1F190300: 0x00001F1B + "\x1f\x18\x03\x01\x00\x00\x1f\x1c" + // 0x1F180301: 0x00001F1C + "\x1f\x19\x03\x01\x00\x00\x1f\x1d" + // 0x1F190301: 0x00001F1D + "\x03\xb7\x03\x13\x00\x00\x1f " + // 0x03B70313: 0x00001F20 + "\x03\xb7\x03\x14\x00\x00\x1f!" + // 0x03B70314: 0x00001F21 + "\x1f \x03\x00\x00\x00\x1f\"" + // 0x1F200300: 0x00001F22 + "\x1f!\x03\x00\x00\x00\x1f#" + // 0x1F210300: 0x00001F23 + "\x1f \x03\x01\x00\x00\x1f$" + // 0x1F200301: 0x00001F24 + "\x1f!\x03\x01\x00\x00\x1f%" + // 0x1F210301: 0x00001F25 + "\x1f \x03B\x00\x00\x1f&" + // 0x1F200342: 0x00001F26 + "\x1f!\x03B\x00\x00\x1f'" + // 0x1F210342: 0x00001F27 + "\x03\x97\x03\x13\x00\x00\x1f(" + // 0x03970313: 0x00001F28 + "\x03\x97\x03\x14\x00\x00\x1f)" + // 0x03970314: 0x00001F29 + "\x1f(\x03\x00\x00\x00\x1f*" + // 0x1F280300: 0x00001F2A + "\x1f)\x03\x00\x00\x00\x1f+" + // 0x1F290300: 0x00001F2B + "\x1f(\x03\x01\x00\x00\x1f," + // 0x1F280301: 0x00001F2C + "\x1f)\x03\x01\x00\x00\x1f-" + // 0x1F290301: 0x00001F2D + "\x1f(\x03B\x00\x00\x1f." + // 0x1F280342: 0x00001F2E + "\x1f)\x03B\x00\x00\x1f/" + // 0x1F290342: 0x00001F2F + "\x03\xb9\x03\x13\x00\x00\x1f0" + // 0x03B90313: 0x00001F30 + "\x03\xb9\x03\x14\x00\x00\x1f1" + // 0x03B90314: 0x00001F31 + "\x1f0\x03\x00\x00\x00\x1f2" + // 0x1F300300: 0x00001F32 + "\x1f1\x03\x00\x00\x00\x1f3" + // 0x1F310300: 0x00001F33 + "\x1f0\x03\x01\x00\x00\x1f4" + // 0x1F300301: 0x00001F34 + "\x1f1\x03\x01\x00\x00\x1f5" + // 0x1F310301: 0x00001F35 + "\x1f0\x03B\x00\x00\x1f6" + // 0x1F300342: 0x00001F36 + "\x1f1\x03B\x00\x00\x1f7" + // 0x1F310342: 0x00001F37 + "\x03\x99\x03\x13\x00\x00\x1f8" + // 0x03990313: 0x00001F38 + "\x03\x99\x03\x14\x00\x00\x1f9" + // 0x03990314: 0x00001F39 + "\x1f8\x03\x00\x00\x00\x1f:" + // 0x1F380300: 0x00001F3A + "\x1f9\x03\x00\x00\x00\x1f;" + // 0x1F390300: 0x00001F3B + "\x1f8\x03\x01\x00\x00\x1f<" + // 0x1F380301: 0x00001F3C + "\x1f9\x03\x01\x00\x00\x1f=" + // 0x1F390301: 0x00001F3D + "\x1f8\x03B\x00\x00\x1f>" + // 0x1F380342: 0x00001F3E + "\x1f9\x03B\x00\x00\x1f?" + // 0x1F390342: 0x00001F3F + "\x03\xbf\x03\x13\x00\x00\x1f@" + // 0x03BF0313: 0x00001F40 + "\x03\xbf\x03\x14\x00\x00\x1fA" + // 0x03BF0314: 0x00001F41 + "\x1f@\x03\x00\x00\x00\x1fB" + // 0x1F400300: 0x00001F42 + "\x1fA\x03\x00\x00\x00\x1fC" + // 0x1F410300: 0x00001F43 + "\x1f@\x03\x01\x00\x00\x1fD" + // 0x1F400301: 0x00001F44 + "\x1fA\x03\x01\x00\x00\x1fE" + // 0x1F410301: 0x00001F45 + "\x03\x9f\x03\x13\x00\x00\x1fH" + // 0x039F0313: 0x00001F48 + "\x03\x9f\x03\x14\x00\x00\x1fI" + // 0x039F0314: 0x00001F49 + "\x1fH\x03\x00\x00\x00\x1fJ" + // 0x1F480300: 0x00001F4A + "\x1fI\x03\x00\x00\x00\x1fK" + // 0x1F490300: 0x00001F4B + "\x1fH\x03\x01\x00\x00\x1fL" + // 0x1F480301: 0x00001F4C + "\x1fI\x03\x01\x00\x00\x1fM" + // 0x1F490301: 0x00001F4D + "\x03\xc5\x03\x13\x00\x00\x1fP" + // 0x03C50313: 0x00001F50 + "\x03\xc5\x03\x14\x00\x00\x1fQ" + // 0x03C50314: 0x00001F51 + "\x1fP\x03\x00\x00\x00\x1fR" + // 0x1F500300: 0x00001F52 + "\x1fQ\x03\x00\x00\x00\x1fS" + // 0x1F510300: 0x00001F53 + "\x1fP\x03\x01\x00\x00\x1fT" + // 0x1F500301: 0x00001F54 + "\x1fQ\x03\x01\x00\x00\x1fU" + // 0x1F510301: 0x00001F55 + "\x1fP\x03B\x00\x00\x1fV" + // 0x1F500342: 0x00001F56 + "\x1fQ\x03B\x00\x00\x1fW" + // 0x1F510342: 0x00001F57 + "\x03\xa5\x03\x14\x00\x00\x1fY" + // 0x03A50314: 0x00001F59 + "\x1fY\x03\x00\x00\x00\x1f[" + // 0x1F590300: 0x00001F5B + "\x1fY\x03\x01\x00\x00\x1f]" + // 0x1F590301: 0x00001F5D + "\x1fY\x03B\x00\x00\x1f_" + // 0x1F590342: 0x00001F5F + "\x03\xc9\x03\x13\x00\x00\x1f`" + // 0x03C90313: 0x00001F60 + "\x03\xc9\x03\x14\x00\x00\x1fa" + // 0x03C90314: 0x00001F61 + "\x1f`\x03\x00\x00\x00\x1fb" + // 0x1F600300: 0x00001F62 + "\x1fa\x03\x00\x00\x00\x1fc" + // 0x1F610300: 0x00001F63 + "\x1f`\x03\x01\x00\x00\x1fd" + // 0x1F600301: 0x00001F64 + "\x1fa\x03\x01\x00\x00\x1fe" + // 0x1F610301: 0x00001F65 + "\x1f`\x03B\x00\x00\x1ff" + // 0x1F600342: 0x00001F66 + "\x1fa\x03B\x00\x00\x1fg" + // 0x1F610342: 0x00001F67 + "\x03\xa9\x03\x13\x00\x00\x1fh" + // 0x03A90313: 0x00001F68 + "\x03\xa9\x03\x14\x00\x00\x1fi" + // 0x03A90314: 0x00001F69 + "\x1fh\x03\x00\x00\x00\x1fj" + // 0x1F680300: 0x00001F6A + "\x1fi\x03\x00\x00\x00\x1fk" + // 0x1F690300: 0x00001F6B + "\x1fh\x03\x01\x00\x00\x1fl" + // 0x1F680301: 0x00001F6C + "\x1fi\x03\x01\x00\x00\x1fm" + // 0x1F690301: 0x00001F6D + "\x1fh\x03B\x00\x00\x1fn" + // 0x1F680342: 0x00001F6E + "\x1fi\x03B\x00\x00\x1fo" + // 0x1F690342: 0x00001F6F + "\x03\xb1\x03\x00\x00\x00\x1fp" + // 0x03B10300: 0x00001F70 + "\x03\xb5\x03\x00\x00\x00\x1fr" + // 0x03B50300: 0x00001F72 + "\x03\xb7\x03\x00\x00\x00\x1ft" + // 0x03B70300: 0x00001F74 + "\x03\xb9\x03\x00\x00\x00\x1fv" + // 0x03B90300: 0x00001F76 + "\x03\xbf\x03\x00\x00\x00\x1fx" + // 0x03BF0300: 0x00001F78 + "\x03\xc5\x03\x00\x00\x00\x1fz" + // 0x03C50300: 0x00001F7A + "\x03\xc9\x03\x00\x00\x00\x1f|" + // 0x03C90300: 0x00001F7C + "\x1f\x00\x03E\x00\x00\x1f\x80" + // 0x1F000345: 0x00001F80 + "\x1f\x01\x03E\x00\x00\x1f\x81" + // 0x1F010345: 0x00001F81 + "\x1f\x02\x03E\x00\x00\x1f\x82" + // 0x1F020345: 0x00001F82 + "\x1f\x03\x03E\x00\x00\x1f\x83" + // 0x1F030345: 0x00001F83 + "\x1f\x04\x03E\x00\x00\x1f\x84" + // 0x1F040345: 0x00001F84 + "\x1f\x05\x03E\x00\x00\x1f\x85" + // 0x1F050345: 0x00001F85 + "\x1f\x06\x03E\x00\x00\x1f\x86" + // 0x1F060345: 0x00001F86 + "\x1f\a\x03E\x00\x00\x1f\x87" + // 0x1F070345: 0x00001F87 + "\x1f\b\x03E\x00\x00\x1f\x88" + // 0x1F080345: 0x00001F88 + "\x1f\t\x03E\x00\x00\x1f\x89" + // 0x1F090345: 0x00001F89 + "\x1f\n\x03E\x00\x00\x1f\x8a" + // 0x1F0A0345: 0x00001F8A + "\x1f\v\x03E\x00\x00\x1f\x8b" + // 0x1F0B0345: 0x00001F8B + "\x1f\f\x03E\x00\x00\x1f\x8c" + // 0x1F0C0345: 0x00001F8C + "\x1f\r\x03E\x00\x00\x1f\x8d" + // 0x1F0D0345: 0x00001F8D + "\x1f\x0e\x03E\x00\x00\x1f\x8e" + // 0x1F0E0345: 0x00001F8E + "\x1f\x0f\x03E\x00\x00\x1f\x8f" + // 0x1F0F0345: 0x00001F8F + "\x1f \x03E\x00\x00\x1f\x90" + // 0x1F200345: 0x00001F90 + "\x1f!\x03E\x00\x00\x1f\x91" + // 0x1F210345: 0x00001F91 + "\x1f\"\x03E\x00\x00\x1f\x92" + // 0x1F220345: 0x00001F92 + "\x1f#\x03E\x00\x00\x1f\x93" + // 0x1F230345: 0x00001F93 + "\x1f$\x03E\x00\x00\x1f\x94" + // 0x1F240345: 0x00001F94 + "\x1f%\x03E\x00\x00\x1f\x95" + // 0x1F250345: 0x00001F95 + "\x1f&\x03E\x00\x00\x1f\x96" + // 0x1F260345: 0x00001F96 + "\x1f'\x03E\x00\x00\x1f\x97" + // 0x1F270345: 0x00001F97 + "\x1f(\x03E\x00\x00\x1f\x98" + // 0x1F280345: 0x00001F98 + "\x1f)\x03E\x00\x00\x1f\x99" + // 0x1F290345: 0x00001F99 + "\x1f*\x03E\x00\x00\x1f\x9a" + // 0x1F2A0345: 0x00001F9A + "\x1f+\x03E\x00\x00\x1f\x9b" + // 0x1F2B0345: 0x00001F9B + "\x1f,\x03E\x00\x00\x1f\x9c" + // 0x1F2C0345: 0x00001F9C + "\x1f-\x03E\x00\x00\x1f\x9d" + // 0x1F2D0345: 0x00001F9D + "\x1f.\x03E\x00\x00\x1f\x9e" + // 0x1F2E0345: 0x00001F9E + "\x1f/\x03E\x00\x00\x1f\x9f" + // 0x1F2F0345: 0x00001F9F + "\x1f`\x03E\x00\x00\x1f\xa0" + // 0x1F600345: 0x00001FA0 + "\x1fa\x03E\x00\x00\x1f\xa1" + // 0x1F610345: 0x00001FA1 + "\x1fb\x03E\x00\x00\x1f\xa2" + // 0x1F620345: 0x00001FA2 + "\x1fc\x03E\x00\x00\x1f\xa3" + // 0x1F630345: 0x00001FA3 + "\x1fd\x03E\x00\x00\x1f\xa4" + // 0x1F640345: 0x00001FA4 + "\x1fe\x03E\x00\x00\x1f\xa5" + // 0x1F650345: 0x00001FA5 + "\x1ff\x03E\x00\x00\x1f\xa6" + // 0x1F660345: 0x00001FA6 + "\x1fg\x03E\x00\x00\x1f\xa7" + // 0x1F670345: 0x00001FA7 + "\x1fh\x03E\x00\x00\x1f\xa8" + // 0x1F680345: 0x00001FA8 + "\x1fi\x03E\x00\x00\x1f\xa9" + // 0x1F690345: 0x00001FA9 + "\x1fj\x03E\x00\x00\x1f\xaa" + // 0x1F6A0345: 0x00001FAA + "\x1fk\x03E\x00\x00\x1f\xab" + // 0x1F6B0345: 0x00001FAB + "\x1fl\x03E\x00\x00\x1f\xac" + // 0x1F6C0345: 0x00001FAC + "\x1fm\x03E\x00\x00\x1f\xad" + // 0x1F6D0345: 0x00001FAD + "\x1fn\x03E\x00\x00\x1f\xae" + // 0x1F6E0345: 0x00001FAE + "\x1fo\x03E\x00\x00\x1f\xaf" + // 0x1F6F0345: 0x00001FAF + "\x03\xb1\x03\x06\x00\x00\x1f\xb0" + // 0x03B10306: 0x00001FB0 + "\x03\xb1\x03\x04\x00\x00\x1f\xb1" + // 0x03B10304: 0x00001FB1 + "\x1fp\x03E\x00\x00\x1f\xb2" + // 0x1F700345: 0x00001FB2 + "\x03\xb1\x03E\x00\x00\x1f\xb3" + // 0x03B10345: 0x00001FB3 + "\x03\xac\x03E\x00\x00\x1f\xb4" + // 0x03AC0345: 0x00001FB4 + "\x03\xb1\x03B\x00\x00\x1f\xb6" + // 0x03B10342: 0x00001FB6 + "\x1f\xb6\x03E\x00\x00\x1f\xb7" + // 0x1FB60345: 0x00001FB7 + "\x03\x91\x03\x06\x00\x00\x1f\xb8" + // 0x03910306: 0x00001FB8 + "\x03\x91\x03\x04\x00\x00\x1f\xb9" + // 0x03910304: 0x00001FB9 + "\x03\x91\x03\x00\x00\x00\x1f\xba" + // 0x03910300: 0x00001FBA + "\x03\x91\x03E\x00\x00\x1f\xbc" + // 0x03910345: 0x00001FBC + "\x00\xa8\x03B\x00\x00\x1f\xc1" + // 0x00A80342: 0x00001FC1 + "\x1ft\x03E\x00\x00\x1f\xc2" + // 0x1F740345: 0x00001FC2 + "\x03\xb7\x03E\x00\x00\x1f\xc3" + // 0x03B70345: 0x00001FC3 + "\x03\xae\x03E\x00\x00\x1f\xc4" + // 0x03AE0345: 0x00001FC4 + "\x03\xb7\x03B\x00\x00\x1f\xc6" + // 0x03B70342: 0x00001FC6 + "\x1f\xc6\x03E\x00\x00\x1f\xc7" + // 0x1FC60345: 0x00001FC7 + "\x03\x95\x03\x00\x00\x00\x1f\xc8" + // 0x03950300: 0x00001FC8 + "\x03\x97\x03\x00\x00\x00\x1f\xca" + // 0x03970300: 0x00001FCA + "\x03\x97\x03E\x00\x00\x1f\xcc" + // 0x03970345: 0x00001FCC + "\x1f\xbf\x03\x00\x00\x00\x1f\xcd" + // 0x1FBF0300: 0x00001FCD + "\x1f\xbf\x03\x01\x00\x00\x1f\xce" + // 0x1FBF0301: 0x00001FCE + "\x1f\xbf\x03B\x00\x00\x1f\xcf" + // 0x1FBF0342: 0x00001FCF + "\x03\xb9\x03\x06\x00\x00\x1f\xd0" + // 0x03B90306: 0x00001FD0 + "\x03\xb9\x03\x04\x00\x00\x1f\xd1" + // 0x03B90304: 0x00001FD1 + "\x03\xca\x03\x00\x00\x00\x1f\xd2" + // 0x03CA0300: 0x00001FD2 + "\x03\xb9\x03B\x00\x00\x1f\xd6" + // 0x03B90342: 0x00001FD6 + "\x03\xca\x03B\x00\x00\x1f\xd7" + // 0x03CA0342: 0x00001FD7 + "\x03\x99\x03\x06\x00\x00\x1f\xd8" + // 0x03990306: 0x00001FD8 + "\x03\x99\x03\x04\x00\x00\x1f\xd9" + // 0x03990304: 0x00001FD9 + "\x03\x99\x03\x00\x00\x00\x1f\xda" + // 0x03990300: 0x00001FDA + "\x1f\xfe\x03\x00\x00\x00\x1f\xdd" + // 0x1FFE0300: 0x00001FDD + "\x1f\xfe\x03\x01\x00\x00\x1f\xde" + // 0x1FFE0301: 0x00001FDE + "\x1f\xfe\x03B\x00\x00\x1f\xdf" + // 0x1FFE0342: 0x00001FDF + "\x03\xc5\x03\x06\x00\x00\x1f\xe0" + // 0x03C50306: 0x00001FE0 + "\x03\xc5\x03\x04\x00\x00\x1f\xe1" + // 0x03C50304: 0x00001FE1 + "\x03\xcb\x03\x00\x00\x00\x1f\xe2" + // 0x03CB0300: 0x00001FE2 + "\x03\xc1\x03\x13\x00\x00\x1f\xe4" + // 0x03C10313: 0x00001FE4 + "\x03\xc1\x03\x14\x00\x00\x1f\xe5" + // 0x03C10314: 0x00001FE5 + "\x03\xc5\x03B\x00\x00\x1f\xe6" + // 0x03C50342: 0x00001FE6 + "\x03\xcb\x03B\x00\x00\x1f\xe7" + // 0x03CB0342: 0x00001FE7 + "\x03\xa5\x03\x06\x00\x00\x1f\xe8" + // 0x03A50306: 0x00001FE8 + "\x03\xa5\x03\x04\x00\x00\x1f\xe9" + // 0x03A50304: 0x00001FE9 + "\x03\xa5\x03\x00\x00\x00\x1f\xea" + // 0x03A50300: 0x00001FEA + "\x03\xa1\x03\x14\x00\x00\x1f\xec" + // 0x03A10314: 0x00001FEC + "\x00\xa8\x03\x00\x00\x00\x1f\xed" + // 0x00A80300: 0x00001FED + "\x1f|\x03E\x00\x00\x1f\xf2" + // 0x1F7C0345: 0x00001FF2 + "\x03\xc9\x03E\x00\x00\x1f\xf3" + // 0x03C90345: 0x00001FF3 + "\x03\xce\x03E\x00\x00\x1f\xf4" + // 0x03CE0345: 0x00001FF4 + "\x03\xc9\x03B\x00\x00\x1f\xf6" + // 0x03C90342: 0x00001FF6 + "\x1f\xf6\x03E\x00\x00\x1f\xf7" + // 0x1FF60345: 0x00001FF7 + "\x03\x9f\x03\x00\x00\x00\x1f\xf8" + // 0x039F0300: 0x00001FF8 + "\x03\xa9\x03\x00\x00\x00\x1f\xfa" + // 0x03A90300: 0x00001FFA + "\x03\xa9\x03E\x00\x00\x1f\xfc" + // 0x03A90345: 0x00001FFC + "!\x90\x038\x00\x00!\x9a" + // 0x21900338: 0x0000219A + "!\x92\x038\x00\x00!\x9b" + // 0x21920338: 0x0000219B + "!\x94\x038\x00\x00!\xae" + // 0x21940338: 0x000021AE + "!\xd0\x038\x00\x00!\xcd" + // 0x21D00338: 0x000021CD + "!\xd4\x038\x00\x00!\xce" + // 0x21D40338: 0x000021CE + "!\xd2\x038\x00\x00!\xcf" + // 0x21D20338: 0x000021CF + "\"\x03\x038\x00\x00\"\x04" + // 0x22030338: 0x00002204 + "\"\b\x038\x00\x00\"\t" + // 0x22080338: 0x00002209 + "\"\v\x038\x00\x00\"\f" + // 0x220B0338: 0x0000220C + "\"#\x038\x00\x00\"$" + // 0x22230338: 0x00002224 + "\"%\x038\x00\x00\"&" + // 0x22250338: 0x00002226 + "\"<\x038\x00\x00\"A" + // 0x223C0338: 0x00002241 + "\"C\x038\x00\x00\"D" + // 0x22430338: 0x00002244 + "\"E\x038\x00\x00\"G" + // 0x22450338: 0x00002247 + "\"H\x038\x00\x00\"I" + // 0x22480338: 0x00002249 + "\x00=\x038\x00\x00\"`" + // 0x003D0338: 0x00002260 + "\"a\x038\x00\x00\"b" + // 0x22610338: 0x00002262 + "\"M\x038\x00\x00\"m" + // 0x224D0338: 0x0000226D + "\x00<\x038\x00\x00\"n" + // 0x003C0338: 0x0000226E + "\x00>\x038\x00\x00\"o" + // 0x003E0338: 0x0000226F + "\"d\x038\x00\x00\"p" + // 0x22640338: 0x00002270 + "\"e\x038\x00\x00\"q" + // 0x22650338: 0x00002271 + "\"r\x038\x00\x00\"t" + // 0x22720338: 0x00002274 + "\"s\x038\x00\x00\"u" + // 0x22730338: 0x00002275 + "\"v\x038\x00\x00\"x" + // 0x22760338: 0x00002278 + "\"w\x038\x00\x00\"y" + // 0x22770338: 0x00002279 + "\"z\x038\x00\x00\"\x80" + // 0x227A0338: 0x00002280 + "\"{\x038\x00\x00\"\x81" + // 0x227B0338: 0x00002281 + "\"\x82\x038\x00\x00\"\x84" + // 0x22820338: 0x00002284 + "\"\x83\x038\x00\x00\"\x85" + // 0x22830338: 0x00002285 + "\"\x86\x038\x00\x00\"\x88" + // 0x22860338: 0x00002288 + "\"\x87\x038\x00\x00\"\x89" + // 0x22870338: 0x00002289 + "\"\xa2\x038\x00\x00\"\xac" + // 0x22A20338: 0x000022AC + "\"\xa8\x038\x00\x00\"\xad" + // 0x22A80338: 0x000022AD + "\"\xa9\x038\x00\x00\"\xae" + // 0x22A90338: 0x000022AE + "\"\xab\x038\x00\x00\"\xaf" + // 0x22AB0338: 0x000022AF + "\"|\x038\x00\x00\"\xe0" + // 0x227C0338: 0x000022E0 + "\"}\x038\x00\x00\"\xe1" + // 0x227D0338: 0x000022E1 + "\"\x91\x038\x00\x00\"\xe2" + // 0x22910338: 0x000022E2 + "\"\x92\x038\x00\x00\"\xe3" + // 0x22920338: 0x000022E3 + "\"\xb2\x038\x00\x00\"\xea" + // 0x22B20338: 0x000022EA + "\"\xb3\x038\x00\x00\"\xeb" + // 0x22B30338: 0x000022EB + "\"\xb4\x038\x00\x00\"\xec" + // 0x22B40338: 0x000022EC + "\"\xb5\x038\x00\x00\"\xed" + // 0x22B50338: 0x000022ED + "0K0\x99\x00\x000L" + // 0x304B3099: 0x0000304C + "0M0\x99\x00\x000N" + // 0x304D3099: 0x0000304E + "0O0\x99\x00\x000P" + // 0x304F3099: 0x00003050 + "0Q0\x99\x00\x000R" + // 0x30513099: 0x00003052 + "0S0\x99\x00\x000T" + // 0x30533099: 0x00003054 + "0U0\x99\x00\x000V" + // 0x30553099: 0x00003056 + "0W0\x99\x00\x000X" + // 0x30573099: 0x00003058 + "0Y0\x99\x00\x000Z" + // 0x30593099: 0x0000305A + "0[0\x99\x00\x000\\" + // 0x305B3099: 0x0000305C + "0]0\x99\x00\x000^" + // 0x305D3099: 0x0000305E + "0_0\x99\x00\x000`" + // 0x305F3099: 0x00003060 + "0a0\x99\x00\x000b" + // 0x30613099: 0x00003062 + "0d0\x99\x00\x000e" + // 0x30643099: 0x00003065 + "0f0\x99\x00\x000g" + // 0x30663099: 0x00003067 + "0h0\x99\x00\x000i" + // 0x30683099: 0x00003069 + "0o0\x99\x00\x000p" + // 0x306F3099: 0x00003070 + "0o0\x9a\x00\x000q" + // 0x306F309A: 0x00003071 + "0r0\x99\x00\x000s" + // 0x30723099: 0x00003073 + "0r0\x9a\x00\x000t" + // 0x3072309A: 0x00003074 + "0u0\x99\x00\x000v" + // 0x30753099: 0x00003076 + "0u0\x9a\x00\x000w" + // 0x3075309A: 0x00003077 + "0x0\x99\x00\x000y" + // 0x30783099: 0x00003079 + "0x0\x9a\x00\x000z" + // 0x3078309A: 0x0000307A + "0{0\x99\x00\x000|" + // 0x307B3099: 0x0000307C + "0{0\x9a\x00\x000}" + // 0x307B309A: 0x0000307D + "0F0\x99\x00\x000\x94" + // 0x30463099: 0x00003094 + "0\x9d0\x99\x00\x000\x9e" + // 0x309D3099: 0x0000309E + "0\xab0\x99\x00\x000\xac" + // 0x30AB3099: 0x000030AC + "0\xad0\x99\x00\x000\xae" + // 0x30AD3099: 0x000030AE + "0\xaf0\x99\x00\x000\xb0" + // 0x30AF3099: 0x000030B0 + "0\xb10\x99\x00\x000\xb2" + // 0x30B13099: 0x000030B2 + "0\xb30\x99\x00\x000\xb4" + // 0x30B33099: 0x000030B4 + "0\xb50\x99\x00\x000\xb6" + // 0x30B53099: 0x000030B6 + "0\xb70\x99\x00\x000\xb8" + // 0x30B73099: 0x000030B8 + "0\xb90\x99\x00\x000\xba" + // 0x30B93099: 0x000030BA + "0\xbb0\x99\x00\x000\xbc" + // 0x30BB3099: 0x000030BC + "0\xbd0\x99\x00\x000\xbe" + // 0x30BD3099: 0x000030BE + "0\xbf0\x99\x00\x000\xc0" + // 0x30BF3099: 0x000030C0 + "0\xc10\x99\x00\x000\xc2" + // 0x30C13099: 0x000030C2 + "0\xc40\x99\x00\x000\xc5" + // 0x30C43099: 0x000030C5 + "0\xc60\x99\x00\x000\xc7" + // 0x30C63099: 0x000030C7 + "0\xc80\x99\x00\x000\xc9" + // 0x30C83099: 0x000030C9 + "0\xcf0\x99\x00\x000\xd0" + // 0x30CF3099: 0x000030D0 + "0\xcf0\x9a\x00\x000\xd1" + // 0x30CF309A: 0x000030D1 + "0\xd20\x99\x00\x000\xd3" + // 0x30D23099: 0x000030D3 + "0\xd20\x9a\x00\x000\xd4" + // 0x30D2309A: 0x000030D4 + "0\xd50\x99\x00\x000\xd6" + // 0x30D53099: 0x000030D6 + "0\xd50\x9a\x00\x000\xd7" + // 0x30D5309A: 0x000030D7 + "0\xd80\x99\x00\x000\xd9" + // 0x30D83099: 0x000030D9 + "0\xd80\x9a\x00\x000\xda" + // 0x30D8309A: 0x000030DA + "0\xdb0\x99\x00\x000\xdc" + // 0x30DB3099: 0x000030DC + "0\xdb0\x9a\x00\x000\xdd" + // 0x30DB309A: 0x000030DD + "0\xa60\x99\x00\x000\xf4" + // 0x30A63099: 0x000030F4 + "0\xef0\x99\x00\x000\xf7" + // 0x30EF3099: 0x000030F7 + "0\xf00\x99\x00\x000\xf8" + // 0x30F03099: 0x000030F8 + "0\xf10\x99\x00\x000\xf9" + // 0x30F13099: 0x000030F9 + "0\xf20\x99\x00\x000\xfa" + // 0x30F23099: 0x000030FA + "0\xfd0\x99\x00\x000\xfe" + // 0x30FD3099: 0x000030FE + "\x10\x99\x10\xba\x00\x01\x10\x9a" + // 0x109910BA: 0x0001109A + "\x10\x9b\x10\xba\x00\x01\x10\x9c" + // 0x109B10BA: 0x0001109C + "\x10\xa5\x10\xba\x00\x01\x10\xab" + // 0x10A510BA: 0x000110AB + "\x111\x11'\x00\x01\x11." + // 0x11311127: 0x0001112E + "\x112\x11'\x00\x01\x11/" + // 0x11321127: 0x0001112F + "\x13G\x13>\x00\x01\x13K" + // 0x1347133E: 0x0001134B + "\x13G\x13W\x00\x01\x13L" + // 0x13471357: 0x0001134C + "\x14\xb9\x14\xba\x00\x01\x14\xbb" + // 0x14B914BA: 0x000114BB + "\x14\xb9\x14\xb0\x00\x01\x14\xbc" + // 0x14B914B0: 0x000114BC + "\x14\xb9\x14\xbd\x00\x01\x14\xbe" + // 0x14B914BD: 0x000114BE + "\x15\xb8\x15\xaf\x00\x01\x15\xba" + // 0x15B815AF: 0x000115BA + "\x15\xb9\x15\xaf\x00\x01\x15\xbb" + // 0x15B915AF: 0x000115BB + "\x195\x190\x00\x01\x198" + // 0x19351930: 0x00011938 + "" + // Total size of tables: 56KB (57068 bytes) diff --git a/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go index 0175eae..bf65457 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build !go1.10 -// +build !go1.10 package norm diff --git a/vendor/golang.org/x/text/unicode/norm/trie.go b/vendor/golang.org/x/text/unicode/norm/trie.go index 423386b..e4250ae 100644 --- a/vendor/golang.org/x/text/unicode/norm/trie.go +++ b/vendor/golang.org/x/text/unicode/norm/trie.go @@ -29,7 +29,7 @@ var ( nfkcData = newNfkcTrie(0) ) -// lookupValue determines the type of block n and looks up the value for b. +// lookup determines the type of block n and looks up the value for b. // For n < t.cutoff, the block is a simple lookup table. Otherwise, the block // is a list of ranges with an accompanying value. Given a matching range r, // the value for b is by r.value + (b - r.lo) * stride. diff --git a/vendor/gopkg.in/yaml.v3/LICENSE b/vendor/gopkg.in/yaml.v3/LICENSE new file mode 100644 index 0000000..2683e4b --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/LICENSE @@ -0,0 +1,50 @@ + +This project is covered by two different licenses: MIT and Apache. + +#### MIT License #### + +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original MIT license, with the additional +copyright staring in 2011 when the project was ported over: + + apic.go emitterc.go parserc.go readerc.go scannerc.go + writerc.go yamlh.go yamlprivateh.go + +Copyright (c) 2006-2010 Kirill Simonov +Copyright (c) 2006-2011 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +### Apache License ### + +All the remaining project files are covered by the Apache license: + +Copyright (c) 2011-2019 Canonical Ltd + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/gopkg.in/yaml.v3/NOTICE b/vendor/gopkg.in/yaml.v3/NOTICE new file mode 100644 index 0000000..866d74a --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/NOTICE @@ -0,0 +1,13 @@ +Copyright 2011-2016 Canonical Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/gopkg.in/yaml.v3/README.md b/vendor/gopkg.in/yaml.v3/README.md new file mode 100644 index 0000000..08eb1ba --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/README.md @@ -0,0 +1,150 @@ +# YAML support for the Go language + +Introduction +------------ + +The yaml package enables Go programs to comfortably encode and decode YAML +values. It was developed within [Canonical](https://www.canonical.com) as +part of the [juju](https://juju.ubuntu.com) project, and is based on a +pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) +C library to parse and generate YAML data quickly and reliably. + +Compatibility +------------- + +The yaml package supports most of YAML 1.2, but preserves some behavior +from 1.1 for backwards compatibility. + +Specifically, as of v3 of the yaml package: + + - YAML 1.1 bools (_yes/no, on/off_) are supported as long as they are being + decoded into a typed bool value. Otherwise they behave as a string. Booleans + in YAML 1.2 are _true/false_ only. + - Octals encode and decode as _0777_ per YAML 1.1, rather than _0o777_ + as specified in YAML 1.2, because most parsers still use the old format. + Octals in the _0o777_ format are supported though, so new files work. + - Does not support base-60 floats. These are gone from YAML 1.2, and were + actually never supported by this package as it's clearly a poor choice. + +and offers backwards +compatibility with YAML 1.1 in some cases. +1.2, including support for +anchors, tags, map merging, etc. Multi-document unmarshalling is not yet +implemented, and base-60 floats from YAML 1.1 are purposefully not +supported since they're a poor design and are gone in YAML 1.2. + +Installation and usage +---------------------- + +The import path for the package is *gopkg.in/yaml.v3*. + +To install it, run: + + go get gopkg.in/yaml.v3 + +API documentation +----------------- + +If opened in a browser, the import path itself leads to the API documentation: + + - [https://gopkg.in/yaml.v3](https://gopkg.in/yaml.v3) + +API stability +------------- + +The package API for yaml v3 will remain stable as described in [gopkg.in](https://gopkg.in). + + +License +------- + +The yaml package is licensed under the MIT and Apache License 2.0 licenses. +Please see the LICENSE file for details. + + +Example +------- + +```Go +package main + +import ( + "fmt" + "log" + + "gopkg.in/yaml.v3" +) + +var data = ` +a: Easy! +b: + c: 2 + d: [3, 4] +` + +// Note: struct fields must be public in order for unmarshal to +// correctly populate the data. +type T struct { + A string + B struct { + RenamedC int `yaml:"c"` + D []int `yaml:",flow"` + } +} + +func main() { + t := T{} + + err := yaml.Unmarshal([]byte(data), &t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t:\n%v\n\n", t) + + d, err := yaml.Marshal(&t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t dump:\n%s\n\n", string(d)) + + m := make(map[interface{}]interface{}) + + err = yaml.Unmarshal([]byte(data), &m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m:\n%v\n\n", m) + + d, err = yaml.Marshal(&m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m dump:\n%s\n\n", string(d)) +} +``` + +This example will generate the following output: + +``` +--- t: +{Easy! {2 [3 4]}} + +--- t dump: +a: Easy! +b: + c: 2 + d: [3, 4] + + +--- m: +map[a:Easy! b:map[c:2 d:[3 4]]] + +--- m dump: +a: Easy! +b: + c: 2 + d: + - 3 + - 4 +``` + diff --git a/vendor/gopkg.in/yaml.v3/apic.go b/vendor/gopkg.in/yaml.v3/apic.go new file mode 100644 index 0000000..ae7d049 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/apic.go @@ -0,0 +1,747 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "io" +) + +func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { + //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) + + // Check if we can move the queue at the beginning of the buffer. + if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { + if parser.tokens_head != len(parser.tokens) { + copy(parser.tokens, parser.tokens[parser.tokens_head:]) + } + parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] + parser.tokens_head = 0 + } + parser.tokens = append(parser.tokens, *token) + if pos < 0 { + return + } + copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) + parser.tokens[parser.tokens_head+pos] = *token +} + +// Create a new parser object. +func yaml_parser_initialize(parser *yaml_parser_t) bool { + *parser = yaml_parser_t{ + raw_buffer: make([]byte, 0, input_raw_buffer_size), + buffer: make([]byte, 0, input_buffer_size), + } + return true +} + +// Destroy a parser object. +func yaml_parser_delete(parser *yaml_parser_t) { + *parser = yaml_parser_t{} +} + +// String read handler. +func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + if parser.input_pos == len(parser.input) { + return 0, io.EOF + } + n = copy(buffer, parser.input[parser.input_pos:]) + parser.input_pos += n + return n, nil +} + +// Reader read handler. +func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + return parser.input_reader.Read(buffer) +} + +// Set a string input. +func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_string_read_handler + parser.input = input + parser.input_pos = 0 +} + +// Set a file input. +func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_reader_read_handler + parser.input_reader = r +} + +// Set the source encoding. +func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { + if parser.encoding != yaml_ANY_ENCODING { + panic("must set the encoding only once") + } + parser.encoding = encoding +} + +// Create a new emitter object. +func yaml_emitter_initialize(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{ + buffer: make([]byte, output_buffer_size), + raw_buffer: make([]byte, 0, output_raw_buffer_size), + states: make([]yaml_emitter_state_t, 0, initial_stack_size), + events: make([]yaml_event_t, 0, initial_queue_size), + best_width: -1, + } +} + +// Destroy an emitter object. +func yaml_emitter_delete(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{} +} + +// String write handler. +func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + *emitter.output_buffer = append(*emitter.output_buffer, buffer...) + return nil +} + +// yaml_writer_write_handler uses emitter.output_writer to write the +// emitted text. +func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + _, err := emitter.output_writer.Write(buffer) + return err +} + +// Set a string output. +func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_string_write_handler + emitter.output_buffer = output_buffer +} + +// Set a file output. +func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_writer_write_handler + emitter.output_writer = w +} + +// Set the output encoding. +func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { + if emitter.encoding != yaml_ANY_ENCODING { + panic("must set the output encoding only once") + } + emitter.encoding = encoding +} + +// Set the canonical output style. +func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { + emitter.canonical = canonical +} + +// Set the indentation increment. +func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { + if indent < 2 || indent > 9 { + indent = 2 + } + emitter.best_indent = indent +} + +// Set the preferred line width. +func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { + if width < 0 { + width = -1 + } + emitter.best_width = width +} + +// Set if unescaped non-ASCII characters are allowed. +func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { + emitter.unicode = unicode +} + +// Set the preferred line break character. +func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { + emitter.line_break = line_break +} + +///* +// * Destroy a token object. +// */ +// +//YAML_DECLARE(void) +//yaml_token_delete(yaml_token_t *token) +//{ +// assert(token); // Non-NULL token object expected. +// +// switch (token.type) +// { +// case YAML_TAG_DIRECTIVE_TOKEN: +// yaml_free(token.data.tag_directive.handle); +// yaml_free(token.data.tag_directive.prefix); +// break; +// +// case YAML_ALIAS_TOKEN: +// yaml_free(token.data.alias.value); +// break; +// +// case YAML_ANCHOR_TOKEN: +// yaml_free(token.data.anchor.value); +// break; +// +// case YAML_TAG_TOKEN: +// yaml_free(token.data.tag.handle); +// yaml_free(token.data.tag.suffix); +// break; +// +// case YAML_SCALAR_TOKEN: +// yaml_free(token.data.scalar.value); +// break; +// +// default: +// break; +// } +// +// memset(token, 0, sizeof(yaml_token_t)); +//} +// +///* +// * Check if a string is a valid UTF-8 sequence. +// * +// * Check 'reader.c' for more details on UTF-8 encoding. +// */ +// +//static int +//yaml_check_utf8(yaml_char_t *start, size_t length) +//{ +// yaml_char_t *end = start+length; +// yaml_char_t *pointer = start; +// +// while (pointer < end) { +// unsigned char octet; +// unsigned int width; +// unsigned int value; +// size_t k; +// +// octet = pointer[0]; +// width = (octet & 0x80) == 0x00 ? 1 : +// (octet & 0xE0) == 0xC0 ? 2 : +// (octet & 0xF0) == 0xE0 ? 3 : +// (octet & 0xF8) == 0xF0 ? 4 : 0; +// value = (octet & 0x80) == 0x00 ? octet & 0x7F : +// (octet & 0xE0) == 0xC0 ? octet & 0x1F : +// (octet & 0xF0) == 0xE0 ? octet & 0x0F : +// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; +// if (!width) return 0; +// if (pointer+width > end) return 0; +// for (k = 1; k < width; k ++) { +// octet = pointer[k]; +// if ((octet & 0xC0) != 0x80) return 0; +// value = (value << 6) + (octet & 0x3F); +// } +// if (!((width == 1) || +// (width == 2 && value >= 0x80) || +// (width == 3 && value >= 0x800) || +// (width == 4 && value >= 0x10000))) return 0; +// +// pointer += width; +// } +// +// return 1; +//} +// + +// Create STREAM-START. +func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + encoding: encoding, + } +} + +// Create STREAM-END. +func yaml_stream_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + } +} + +// Create DOCUMENT-START. +func yaml_document_start_event_initialize( + event *yaml_event_t, + version_directive *yaml_version_directive_t, + tag_directives []yaml_tag_directive_t, + implicit bool, +) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: implicit, + } +} + +// Create DOCUMENT-END. +func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + implicit: implicit, + } +} + +// Create ALIAS. +func yaml_alias_event_initialize(event *yaml_event_t, anchor []byte) bool { + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + anchor: anchor, + } + return true +} + +// Create SCALAR. +func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + anchor: anchor, + tag: tag, + value: value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-START. +func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-END. +func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + } + return true +} + +// Create MAPPING-START. +func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } +} + +// Create MAPPING-END. +func yaml_mapping_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + } +} + +// Destroy an event object. +func yaml_event_delete(event *yaml_event_t) { + *event = yaml_event_t{} +} + +///* +// * Create a document object. +// */ +// +//YAML_DECLARE(int) +//yaml_document_initialize(document *yaml_document_t, +// version_directive *yaml_version_directive_t, +// tag_directives_start *yaml_tag_directive_t, +// tag_directives_end *yaml_tag_directive_t, +// start_implicit int, end_implicit int) +//{ +// struct { +// error yaml_error_type_t +// } context +// struct { +// start *yaml_node_t +// end *yaml_node_t +// top *yaml_node_t +// } nodes = { NULL, NULL, NULL } +// version_directive_copy *yaml_version_directive_t = NULL +// struct { +// start *yaml_tag_directive_t +// end *yaml_tag_directive_t +// top *yaml_tag_directive_t +// } tag_directives_copy = { NULL, NULL, NULL } +// value yaml_tag_directive_t = { NULL, NULL } +// mark yaml_mark_t = { 0, 0, 0 } +// +// assert(document) // Non-NULL document object is expected. +// assert((tag_directives_start && tag_directives_end) || +// (tag_directives_start == tag_directives_end)) +// // Valid tag directives are expected. +// +// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error +// +// if (version_directive) { +// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) +// if (!version_directive_copy) goto error +// version_directive_copy.major = version_directive.major +// version_directive_copy.minor = version_directive.minor +// } +// +// if (tag_directives_start != tag_directives_end) { +// tag_directive *yaml_tag_directive_t +// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) +// goto error +// for (tag_directive = tag_directives_start +// tag_directive != tag_directives_end; tag_directive ++) { +// assert(tag_directive.handle) +// assert(tag_directive.prefix) +// if (!yaml_check_utf8(tag_directive.handle, +// strlen((char *)tag_directive.handle))) +// goto error +// if (!yaml_check_utf8(tag_directive.prefix, +// strlen((char *)tag_directive.prefix))) +// goto error +// value.handle = yaml_strdup(tag_directive.handle) +// value.prefix = yaml_strdup(tag_directive.prefix) +// if (!value.handle || !value.prefix) goto error +// if (!PUSH(&context, tag_directives_copy, value)) +// goto error +// value.handle = NULL +// value.prefix = NULL +// } +// } +// +// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, +// tag_directives_copy.start, tag_directives_copy.top, +// start_implicit, end_implicit, mark, mark) +// +// return 1 +// +//error: +// STACK_DEL(&context, nodes) +// yaml_free(version_directive_copy) +// while (!STACK_EMPTY(&context, tag_directives_copy)) { +// value yaml_tag_directive_t = POP(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// } +// STACK_DEL(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// +// return 0 +//} +// +///* +// * Destroy a document object. +// */ +// +//YAML_DECLARE(void) +//yaml_document_delete(document *yaml_document_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// tag_directive *yaml_tag_directive_t +// +// context.error = YAML_NO_ERROR // Eliminate a compiler warning. +// +// assert(document) // Non-NULL document object is expected. +// +// while (!STACK_EMPTY(&context, document.nodes)) { +// node yaml_node_t = POP(&context, document.nodes) +// yaml_free(node.tag) +// switch (node.type) { +// case YAML_SCALAR_NODE: +// yaml_free(node.data.scalar.value) +// break +// case YAML_SEQUENCE_NODE: +// STACK_DEL(&context, node.data.sequence.items) +// break +// case YAML_MAPPING_NODE: +// STACK_DEL(&context, node.data.mapping.pairs) +// break +// default: +// assert(0) // Should not happen. +// } +// } +// STACK_DEL(&context, document.nodes) +// +// yaml_free(document.version_directive) +// for (tag_directive = document.tag_directives.start +// tag_directive != document.tag_directives.end +// tag_directive++) { +// yaml_free(tag_directive.handle) +// yaml_free(tag_directive.prefix) +// } +// yaml_free(document.tag_directives.start) +// +// memset(document, 0, sizeof(yaml_document_t)) +//} +// +///** +// * Get a document node. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_node(document *yaml_document_t, index int) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (index > 0 && document.nodes.start + index <= document.nodes.top) { +// return document.nodes.start + index - 1 +// } +// return NULL +//} +// +///** +// * Get the root object. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_root_node(document *yaml_document_t) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (document.nodes.top != document.nodes.start) { +// return document.nodes.start +// } +// return NULL +//} +// +///* +// * Add a scalar node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_scalar(document *yaml_document_t, +// tag *yaml_char_t, value *yaml_char_t, length int, +// style yaml_scalar_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// value_copy *yaml_char_t = NULL +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// assert(value) // Non-NULL value is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (length < 0) { +// length = strlen((char *)value) +// } +// +// if (!yaml_check_utf8(value, length)) goto error +// value_copy = yaml_malloc(length+1) +// if (!value_copy) goto error +// memcpy(value_copy, value, length) +// value_copy[length] = '\0' +// +// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// yaml_free(tag_copy) +// yaml_free(value_copy) +// +// return 0 +//} +// +///* +// * Add a sequence node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_sequence(document *yaml_document_t, +// tag *yaml_char_t, style yaml_sequence_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_item_t +// end *yaml_node_item_t +// top *yaml_node_item_t +// } items = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error +// +// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, items) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Add a mapping node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_mapping(document *yaml_document_t, +// tag *yaml_char_t, style yaml_mapping_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_pair_t +// end *yaml_node_pair_t +// top *yaml_node_pair_t +// } pairs = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error +// +// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, pairs) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Append an item to a sequence node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_sequence_item(document *yaml_document_t, +// sequence int, item int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// assert(document) // Non-NULL document is required. +// assert(sequence > 0 +// && document.nodes.start + sequence <= document.nodes.top) +// // Valid sequence id is required. +// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) +// // A sequence node is required. +// assert(item > 0 && document.nodes.start + item <= document.nodes.top) +// // Valid item id is required. +// +// if (!PUSH(&context, +// document.nodes.start[sequence-1].data.sequence.items, item)) +// return 0 +// +// return 1 +//} +// +///* +// * Append a pair of a key and a value to a mapping node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_mapping_pair(document *yaml_document_t, +// mapping int, key int, value int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// pair yaml_node_pair_t +// +// assert(document) // Non-NULL document is required. +// assert(mapping > 0 +// && document.nodes.start + mapping <= document.nodes.top) +// // Valid mapping id is required. +// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) +// // A mapping node is required. +// assert(key > 0 && document.nodes.start + key <= document.nodes.top) +// // Valid key id is required. +// assert(value > 0 && document.nodes.start + value <= document.nodes.top) +// // Valid value id is required. +// +// pair.key = key +// pair.value = value +// +// if (!PUSH(&context, +// document.nodes.start[mapping-1].data.mapping.pairs, pair)) +// return 0 +// +// return 1 +//} +// +// diff --git a/vendor/gopkg.in/yaml.v3/decode.go b/vendor/gopkg.in/yaml.v3/decode.go new file mode 100644 index 0000000..0173b69 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/decode.go @@ -0,0 +1,1000 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "encoding" + "encoding/base64" + "fmt" + "io" + "math" + "reflect" + "strconv" + "time" +) + +// ---------------------------------------------------------------------------- +// Parser, produces a node tree out of a libyaml event stream. + +type parser struct { + parser yaml_parser_t + event yaml_event_t + doc *Node + anchors map[string]*Node + doneInit bool + textless bool +} + +func newParser(b []byte) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + if len(b) == 0 { + b = []byte{'\n'} + } + yaml_parser_set_input_string(&p.parser, b) + return &p +} + +func newParserFromReader(r io.Reader) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + yaml_parser_set_input_reader(&p.parser, r) + return &p +} + +func (p *parser) init() { + if p.doneInit { + return + } + p.anchors = make(map[string]*Node) + p.expect(yaml_STREAM_START_EVENT) + p.doneInit = true +} + +func (p *parser) destroy() { + if p.event.typ != yaml_NO_EVENT { + yaml_event_delete(&p.event) + } + yaml_parser_delete(&p.parser) +} + +// expect consumes an event from the event stream and +// checks that it's of the expected type. +func (p *parser) expect(e yaml_event_type_t) { + if p.event.typ == yaml_NO_EVENT { + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } + } + if p.event.typ == yaml_STREAM_END_EVENT { + failf("attempted to go past the end of stream; corrupted value?") + } + if p.event.typ != e { + p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ) + p.fail() + } + yaml_event_delete(&p.event) + p.event.typ = yaml_NO_EVENT +} + +// peek peeks at the next event in the event stream, +// puts the results into p.event and returns the event type. +func (p *parser) peek() yaml_event_type_t { + if p.event.typ != yaml_NO_EVENT { + return p.event.typ + } + // It's curious choice from the underlying API to generally return a + // positive result on success, but on this case return true in an error + // scenario. This was the source of bugs in the past (issue #666). + if !yaml_parser_parse(&p.parser, &p.event) || p.parser.error != yaml_NO_ERROR { + p.fail() + } + return p.event.typ +} + +func (p *parser) fail() { + var where string + var line int + if p.parser.context_mark.line != 0 { + line = p.parser.context_mark.line + // Scanner errors don't iterate line before returning error + if p.parser.error == yaml_SCANNER_ERROR { + line++ + } + } else if p.parser.problem_mark.line != 0 { + line = p.parser.problem_mark.line + // Scanner errors don't iterate line before returning error + if p.parser.error == yaml_SCANNER_ERROR { + line++ + } + } + if line != 0 { + where = "line " + strconv.Itoa(line) + ": " + } + var msg string + if len(p.parser.problem) > 0 { + msg = p.parser.problem + } else { + msg = "unknown problem parsing YAML content" + } + failf("%s%s", where, msg) +} + +func (p *parser) anchor(n *Node, anchor []byte) { + if anchor != nil { + n.Anchor = string(anchor) + p.anchors[n.Anchor] = n + } +} + +func (p *parser) parse() *Node { + p.init() + switch p.peek() { + case yaml_SCALAR_EVENT: + return p.scalar() + case yaml_ALIAS_EVENT: + return p.alias() + case yaml_MAPPING_START_EVENT: + return p.mapping() + case yaml_SEQUENCE_START_EVENT: + return p.sequence() + case yaml_DOCUMENT_START_EVENT: + return p.document() + case yaml_STREAM_END_EVENT: + // Happens when attempting to decode an empty buffer. + return nil + case yaml_TAIL_COMMENT_EVENT: + panic("internal error: unexpected tail comment event (please report)") + default: + panic("internal error: attempted to parse unknown event (please report): " + p.event.typ.String()) + } +} + +func (p *parser) node(kind Kind, defaultTag, tag, value string) *Node { + var style Style + if tag != "" && tag != "!" { + tag = shortTag(tag) + style = TaggedStyle + } else if defaultTag != "" { + tag = defaultTag + } else if kind == ScalarNode { + tag, _ = resolve("", value) + } + n := &Node{ + Kind: kind, + Tag: tag, + Value: value, + Style: style, + } + if !p.textless { + n.Line = p.event.start_mark.line + 1 + n.Column = p.event.start_mark.column + 1 + n.HeadComment = string(p.event.head_comment) + n.LineComment = string(p.event.line_comment) + n.FootComment = string(p.event.foot_comment) + } + return n +} + +func (p *parser) parseChild(parent *Node) *Node { + child := p.parse() + parent.Content = append(parent.Content, child) + return child +} + +func (p *parser) document() *Node { + n := p.node(DocumentNode, "", "", "") + p.doc = n + p.expect(yaml_DOCUMENT_START_EVENT) + p.parseChild(n) + if p.peek() == yaml_DOCUMENT_END_EVENT { + n.FootComment = string(p.event.foot_comment) + } + p.expect(yaml_DOCUMENT_END_EVENT) + return n +} + +func (p *parser) alias() *Node { + n := p.node(AliasNode, "", "", string(p.event.anchor)) + n.Alias = p.anchors[n.Value] + if n.Alias == nil { + failf("unknown anchor '%s' referenced", n.Value) + } + p.expect(yaml_ALIAS_EVENT) + return n +} + +func (p *parser) scalar() *Node { + var parsedStyle = p.event.scalar_style() + var nodeStyle Style + switch { + case parsedStyle&yaml_DOUBLE_QUOTED_SCALAR_STYLE != 0: + nodeStyle = DoubleQuotedStyle + case parsedStyle&yaml_SINGLE_QUOTED_SCALAR_STYLE != 0: + nodeStyle = SingleQuotedStyle + case parsedStyle&yaml_LITERAL_SCALAR_STYLE != 0: + nodeStyle = LiteralStyle + case parsedStyle&yaml_FOLDED_SCALAR_STYLE != 0: + nodeStyle = FoldedStyle + } + var nodeValue = string(p.event.value) + var nodeTag = string(p.event.tag) + var defaultTag string + if nodeStyle == 0 { + if nodeValue == "<<" { + defaultTag = mergeTag + } + } else { + defaultTag = strTag + } + n := p.node(ScalarNode, defaultTag, nodeTag, nodeValue) + n.Style |= nodeStyle + p.anchor(n, p.event.anchor) + p.expect(yaml_SCALAR_EVENT) + return n +} + +func (p *parser) sequence() *Node { + n := p.node(SequenceNode, seqTag, string(p.event.tag), "") + if p.event.sequence_style()&yaml_FLOW_SEQUENCE_STYLE != 0 { + n.Style |= FlowStyle + } + p.anchor(n, p.event.anchor) + p.expect(yaml_SEQUENCE_START_EVENT) + for p.peek() != yaml_SEQUENCE_END_EVENT { + p.parseChild(n) + } + n.LineComment = string(p.event.line_comment) + n.FootComment = string(p.event.foot_comment) + p.expect(yaml_SEQUENCE_END_EVENT) + return n +} + +func (p *parser) mapping() *Node { + n := p.node(MappingNode, mapTag, string(p.event.tag), "") + block := true + if p.event.mapping_style()&yaml_FLOW_MAPPING_STYLE != 0 { + block = false + n.Style |= FlowStyle + } + p.anchor(n, p.event.anchor) + p.expect(yaml_MAPPING_START_EVENT) + for p.peek() != yaml_MAPPING_END_EVENT { + k := p.parseChild(n) + if block && k.FootComment != "" { + // Must be a foot comment for the prior value when being dedented. + if len(n.Content) > 2 { + n.Content[len(n.Content)-3].FootComment = k.FootComment + k.FootComment = "" + } + } + v := p.parseChild(n) + if k.FootComment == "" && v.FootComment != "" { + k.FootComment = v.FootComment + v.FootComment = "" + } + if p.peek() == yaml_TAIL_COMMENT_EVENT { + if k.FootComment == "" { + k.FootComment = string(p.event.foot_comment) + } + p.expect(yaml_TAIL_COMMENT_EVENT) + } + } + n.LineComment = string(p.event.line_comment) + n.FootComment = string(p.event.foot_comment) + if n.Style&FlowStyle == 0 && n.FootComment != "" && len(n.Content) > 1 { + n.Content[len(n.Content)-2].FootComment = n.FootComment + n.FootComment = "" + } + p.expect(yaml_MAPPING_END_EVENT) + return n +} + +// ---------------------------------------------------------------------------- +// Decoder, unmarshals a node into a provided value. + +type decoder struct { + doc *Node + aliases map[*Node]bool + terrors []string + + stringMapType reflect.Type + generalMapType reflect.Type + + knownFields bool + uniqueKeys bool + decodeCount int + aliasCount int + aliasDepth int + + mergedFields map[interface{}]bool +} + +var ( + nodeType = reflect.TypeOf(Node{}) + durationType = reflect.TypeOf(time.Duration(0)) + stringMapType = reflect.TypeOf(map[string]interface{}{}) + generalMapType = reflect.TypeOf(map[interface{}]interface{}{}) + ifaceType = generalMapType.Elem() + timeType = reflect.TypeOf(time.Time{}) + ptrTimeType = reflect.TypeOf(&time.Time{}) +) + +func newDecoder() *decoder { + d := &decoder{ + stringMapType: stringMapType, + generalMapType: generalMapType, + uniqueKeys: true, + } + d.aliases = make(map[*Node]bool) + return d +} + +func (d *decoder) terror(n *Node, tag string, out reflect.Value) { + if n.Tag != "" { + tag = n.Tag + } + value := n.Value + if tag != seqTag && tag != mapTag { + if len(value) > 10 { + value = " `" + value[:7] + "...`" + } else { + value = " `" + value + "`" + } + } + d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.Line, shortTag(tag), value, out.Type())) +} + +func (d *decoder) callUnmarshaler(n *Node, u Unmarshaler) (good bool) { + err := u.UnmarshalYAML(n) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +func (d *decoder) callObsoleteUnmarshaler(n *Node, u obsoleteUnmarshaler) (good bool) { + terrlen := len(d.terrors) + err := u.UnmarshalYAML(func(v interface{}) (err error) { + defer handleErr(&err) + d.unmarshal(n, reflect.ValueOf(v)) + if len(d.terrors) > terrlen { + issues := d.terrors[terrlen:] + d.terrors = d.terrors[:terrlen] + return &TypeError{issues} + } + return nil + }) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +// d.prepare initializes and dereferences pointers and calls UnmarshalYAML +// if a value is found to implement it. +// It returns the initialized and dereferenced out value, whether +// unmarshalling was already done by UnmarshalYAML, and if so whether +// its types unmarshalled appropriately. +// +// If n holds a null value, prepare returns before doing anything. +func (d *decoder) prepare(n *Node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { + if n.ShortTag() == nullTag { + return out, false, false + } + again := true + for again { + again = false + if out.Kind() == reflect.Ptr { + if out.IsNil() { + out.Set(reflect.New(out.Type().Elem())) + } + out = out.Elem() + again = true + } + if out.CanAddr() { + outi := out.Addr().Interface() + if u, ok := outi.(Unmarshaler); ok { + good = d.callUnmarshaler(n, u) + return out, true, good + } + if u, ok := outi.(obsoleteUnmarshaler); ok { + good = d.callObsoleteUnmarshaler(n, u) + return out, true, good + } + } + } + return out, false, false +} + +func (d *decoder) fieldByIndex(n *Node, v reflect.Value, index []int) (field reflect.Value) { + if n.ShortTag() == nullTag { + return reflect.Value{} + } + for _, num := range index { + for { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + continue + } + break + } + v = v.Field(num) + } + return v +} + +const ( + // 400,000 decode operations is ~500kb of dense object declarations, or + // ~5kb of dense object declarations with 10000% alias expansion + alias_ratio_range_low = 400000 + + // 4,000,000 decode operations is ~5MB of dense object declarations, or + // ~4.5MB of dense object declarations with 10% alias expansion + alias_ratio_range_high = 4000000 + + // alias_ratio_range is the range over which we scale allowed alias ratios + alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low) +) + +func allowedAliasRatio(decodeCount int) float64 { + switch { + case decodeCount <= alias_ratio_range_low: + // allow 99% to come from alias expansion for small-to-medium documents + return 0.99 + case decodeCount >= alias_ratio_range_high: + // allow 10% to come from alias expansion for very large documents + return 0.10 + default: + // scale smoothly from 99% down to 10% over the range. + // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range. + // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps). + return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range) + } +} + +func (d *decoder) unmarshal(n *Node, out reflect.Value) (good bool) { + d.decodeCount++ + if d.aliasDepth > 0 { + d.aliasCount++ + } + if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) { + failf("document contains excessive aliasing") + } + if out.Type() == nodeType { + out.Set(reflect.ValueOf(n).Elem()) + return true + } + switch n.Kind { + case DocumentNode: + return d.document(n, out) + case AliasNode: + return d.alias(n, out) + } + out, unmarshaled, good := d.prepare(n, out) + if unmarshaled { + return good + } + switch n.Kind { + case ScalarNode: + good = d.scalar(n, out) + case MappingNode: + good = d.mapping(n, out) + case SequenceNode: + good = d.sequence(n, out) + case 0: + if n.IsZero() { + return d.null(out) + } + fallthrough + default: + failf("cannot decode node with unknown kind %d", n.Kind) + } + return good +} + +func (d *decoder) document(n *Node, out reflect.Value) (good bool) { + if len(n.Content) == 1 { + d.doc = n + d.unmarshal(n.Content[0], out) + return true + } + return false +} + +func (d *decoder) alias(n *Node, out reflect.Value) (good bool) { + if d.aliases[n] { + // TODO this could actually be allowed in some circumstances. + failf("anchor '%s' value contains itself", n.Value) + } + d.aliases[n] = true + d.aliasDepth++ + good = d.unmarshal(n.Alias, out) + d.aliasDepth-- + delete(d.aliases, n) + return good +} + +var zeroValue reflect.Value + +func resetMap(out reflect.Value) { + for _, k := range out.MapKeys() { + out.SetMapIndex(k, zeroValue) + } +} + +func (d *decoder) null(out reflect.Value) bool { + if out.CanAddr() { + switch out.Kind() { + case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + out.Set(reflect.Zero(out.Type())) + return true + } + } + return false +} + +func (d *decoder) scalar(n *Node, out reflect.Value) bool { + var tag string + var resolved interface{} + if n.indicatedString() { + tag = strTag + resolved = n.Value + } else { + tag, resolved = resolve(n.Tag, n.Value) + if tag == binaryTag { + data, err := base64.StdEncoding.DecodeString(resolved.(string)) + if err != nil { + failf("!!binary value contains invalid base64 data") + } + resolved = string(data) + } + } + if resolved == nil { + return d.null(out) + } + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + // We've resolved to exactly the type we want, so use that. + out.Set(resolvedv) + return true + } + // Perhaps we can use the value as a TextUnmarshaler to + // set its value. + if out.CanAddr() { + u, ok := out.Addr().Interface().(encoding.TextUnmarshaler) + if ok { + var text []byte + if tag == binaryTag { + text = []byte(resolved.(string)) + } else { + // We let any value be unmarshaled into TextUnmarshaler. + // That might be more lax than we'd like, but the + // TextUnmarshaler itself should bowl out any dubious values. + text = []byte(n.Value) + } + err := u.UnmarshalText(text) + if err != nil { + fail(err) + } + return true + } + } + switch out.Kind() { + case reflect.String: + if tag == binaryTag { + out.SetString(resolved.(string)) + return true + } + out.SetString(n.Value) + return true + case reflect.Interface: + out.Set(reflect.ValueOf(resolved)) + return true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + // This used to work in v2, but it's very unfriendly. + isDuration := out.Type() == durationType + + switch resolved := resolved.(type) { + case int: + if !isDuration && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case int64: + if !isDuration && !out.OverflowInt(resolved) { + out.SetInt(resolved) + return true + } + case uint64: + if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case float64: + if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case string: + if out.Type() == durationType { + d, err := time.ParseDuration(resolved) + if err == nil { + out.SetInt(int64(d)) + return true + } + } + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch resolved := resolved.(type) { + case int: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case int64: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case uint64: + if !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case float64: + if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + } + case reflect.Bool: + switch resolved := resolved.(type) { + case bool: + out.SetBool(resolved) + return true + case string: + // This offers some compatibility with the 1.1 spec (https://yaml.org/type/bool.html). + // It only works if explicitly attempting to unmarshal into a typed bool value. + switch resolved { + case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON": + out.SetBool(true) + return true + case "n", "N", "no", "No", "NO", "off", "Off", "OFF": + out.SetBool(false) + return true + } + } + case reflect.Float32, reflect.Float64: + switch resolved := resolved.(type) { + case int: + out.SetFloat(float64(resolved)) + return true + case int64: + out.SetFloat(float64(resolved)) + return true + case uint64: + out.SetFloat(float64(resolved)) + return true + case float64: + out.SetFloat(resolved) + return true + } + case reflect.Struct: + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + out.Set(resolvedv) + return true + } + case reflect.Ptr: + panic("yaml internal error: please report the issue") + } + d.terror(n, tag, out) + return false +} + +func settableValueOf(i interface{}) reflect.Value { + v := reflect.ValueOf(i) + sv := reflect.New(v.Type()).Elem() + sv.Set(v) + return sv +} + +func (d *decoder) sequence(n *Node, out reflect.Value) (good bool) { + l := len(n.Content) + + var iface reflect.Value + switch out.Kind() { + case reflect.Slice: + out.Set(reflect.MakeSlice(out.Type(), l, l)) + case reflect.Array: + if l != out.Len() { + failf("invalid array: want %d elements but got %d", out.Len(), l) + } + case reflect.Interface: + // No type hints. Will have to use a generic sequence. + iface = out + out = settableValueOf(make([]interface{}, l)) + default: + d.terror(n, seqTag, out) + return false + } + et := out.Type().Elem() + + j := 0 + for i := 0; i < l; i++ { + e := reflect.New(et).Elem() + if ok := d.unmarshal(n.Content[i], e); ok { + out.Index(j).Set(e) + j++ + } + } + if out.Kind() != reflect.Array { + out.Set(out.Slice(0, j)) + } + if iface.IsValid() { + iface.Set(out) + } + return true +} + +func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) { + l := len(n.Content) + if d.uniqueKeys { + nerrs := len(d.terrors) + for i := 0; i < l; i += 2 { + ni := n.Content[i] + for j := i + 2; j < l; j += 2 { + nj := n.Content[j] + if ni.Kind == nj.Kind && ni.Value == nj.Value { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: mapping key %#v already defined at line %d", nj.Line, nj.Value, ni.Line)) + } + } + } + if len(d.terrors) > nerrs { + return false + } + } + switch out.Kind() { + case reflect.Struct: + return d.mappingStruct(n, out) + case reflect.Map: + // okay + case reflect.Interface: + iface := out + if isStringMap(n) { + out = reflect.MakeMap(d.stringMapType) + } else { + out = reflect.MakeMap(d.generalMapType) + } + iface.Set(out) + default: + d.terror(n, mapTag, out) + return false + } + + outt := out.Type() + kt := outt.Key() + et := outt.Elem() + + stringMapType := d.stringMapType + generalMapType := d.generalMapType + if outt.Elem() == ifaceType { + if outt.Key().Kind() == reflect.String { + d.stringMapType = outt + } else if outt.Key() == ifaceType { + d.generalMapType = outt + } + } + + mergedFields := d.mergedFields + d.mergedFields = nil + + var mergeNode *Node + + mapIsNew := false + if out.IsNil() { + out.Set(reflect.MakeMap(outt)) + mapIsNew = true + } + for i := 0; i < l; i += 2 { + if isMerge(n.Content[i]) { + mergeNode = n.Content[i+1] + continue + } + k := reflect.New(kt).Elem() + if d.unmarshal(n.Content[i], k) { + if mergedFields != nil { + ki := k.Interface() + if mergedFields[ki] { + continue + } + mergedFields[ki] = true + } + kkind := k.Kind() + if kkind == reflect.Interface { + kkind = k.Elem().Kind() + } + if kkind == reflect.Map || kkind == reflect.Slice { + failf("invalid map key: %#v", k.Interface()) + } + e := reflect.New(et).Elem() + if d.unmarshal(n.Content[i+1], e) || n.Content[i+1].ShortTag() == nullTag && (mapIsNew || !out.MapIndex(k).IsValid()) { + out.SetMapIndex(k, e) + } + } + } + + d.mergedFields = mergedFields + if mergeNode != nil { + d.merge(n, mergeNode, out) + } + + d.stringMapType = stringMapType + d.generalMapType = generalMapType + return true +} + +func isStringMap(n *Node) bool { + if n.Kind != MappingNode { + return false + } + l := len(n.Content) + for i := 0; i < l; i += 2 { + shortTag := n.Content[i].ShortTag() + if shortTag != strTag && shortTag != mergeTag { + return false + } + } + return true +} + +func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) { + sinfo, err := getStructInfo(out.Type()) + if err != nil { + panic(err) + } + + var inlineMap reflect.Value + var elemType reflect.Type + if sinfo.InlineMap != -1 { + inlineMap = out.Field(sinfo.InlineMap) + elemType = inlineMap.Type().Elem() + } + + for _, index := range sinfo.InlineUnmarshalers { + field := d.fieldByIndex(n, out, index) + d.prepare(n, field) + } + + mergedFields := d.mergedFields + d.mergedFields = nil + var mergeNode *Node + var doneFields []bool + if d.uniqueKeys { + doneFields = make([]bool, len(sinfo.FieldsList)) + } + name := settableValueOf("") + l := len(n.Content) + for i := 0; i < l; i += 2 { + ni := n.Content[i] + if isMerge(ni) { + mergeNode = n.Content[i+1] + continue + } + if !d.unmarshal(ni, name) { + continue + } + sname := name.String() + if mergedFields != nil { + if mergedFields[sname] { + continue + } + mergedFields[sname] = true + } + if info, ok := sinfo.FieldsMap[sname]; ok { + if d.uniqueKeys { + if doneFields[info.Id] { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.Line, name.String(), out.Type())) + continue + } + doneFields[info.Id] = true + } + var field reflect.Value + if info.Inline == nil { + field = out.Field(info.Num) + } else { + field = d.fieldByIndex(n, out, info.Inline) + } + d.unmarshal(n.Content[i+1], field) + } else if sinfo.InlineMap != -1 { + if inlineMap.IsNil() { + inlineMap.Set(reflect.MakeMap(inlineMap.Type())) + } + value := reflect.New(elemType).Elem() + d.unmarshal(n.Content[i+1], value) + inlineMap.SetMapIndex(name, value) + } else if d.knownFields { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.Line, name.String(), out.Type())) + } + } + + d.mergedFields = mergedFields + if mergeNode != nil { + d.merge(n, mergeNode, out) + } + return true +} + +func failWantMap() { + failf("map merge requires map or sequence of maps as the value") +} + +func (d *decoder) merge(parent *Node, merge *Node, out reflect.Value) { + mergedFields := d.mergedFields + if mergedFields == nil { + d.mergedFields = make(map[interface{}]bool) + for i := 0; i < len(parent.Content); i += 2 { + k := reflect.New(ifaceType).Elem() + if d.unmarshal(parent.Content[i], k) { + d.mergedFields[k.Interface()] = true + } + } + } + + switch merge.Kind { + case MappingNode: + d.unmarshal(merge, out) + case AliasNode: + if merge.Alias != nil && merge.Alias.Kind != MappingNode { + failWantMap() + } + d.unmarshal(merge, out) + case SequenceNode: + for i := 0; i < len(merge.Content); i++ { + ni := merge.Content[i] + if ni.Kind == AliasNode { + if ni.Alias != nil && ni.Alias.Kind != MappingNode { + failWantMap() + } + } else if ni.Kind != MappingNode { + failWantMap() + } + d.unmarshal(ni, out) + } + default: + failWantMap() + } + + d.mergedFields = mergedFields +} + +func isMerge(n *Node) bool { + return n.Kind == ScalarNode && n.Value == "<<" && (n.Tag == "" || n.Tag == "!" || shortTag(n.Tag) == mergeTag) +} diff --git a/vendor/gopkg.in/yaml.v3/emitterc.go b/vendor/gopkg.in/yaml.v3/emitterc.go new file mode 100644 index 0000000..0f47c9c --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/emitterc.go @@ -0,0 +1,2020 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "bytes" + "fmt" +) + +// Flush the buffer if needed. +func flush(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) { + return yaml_emitter_flush(emitter) + } + return true +} + +// Put a character to the output buffer. +func put(emitter *yaml_emitter_t, value byte) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + emitter.buffer[emitter.buffer_pos] = value + emitter.buffer_pos++ + emitter.column++ + return true +} + +// Put a line break to the output buffer. +func put_break(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + switch emitter.line_break { + case yaml_CR_BREAK: + emitter.buffer[emitter.buffer_pos] = '\r' + emitter.buffer_pos += 1 + case yaml_LN_BREAK: + emitter.buffer[emitter.buffer_pos] = '\n' + emitter.buffer_pos += 1 + case yaml_CRLN_BREAK: + emitter.buffer[emitter.buffer_pos+0] = '\r' + emitter.buffer[emitter.buffer_pos+1] = '\n' + emitter.buffer_pos += 2 + default: + panic("unknown line break setting") + } + if emitter.column == 0 { + emitter.space_above = true + } + emitter.column = 0 + emitter.line++ + // [Go] Do this here and below and drop from everywhere else (see commented lines). + emitter.indention = true + return true +} + +// Copy a character from a string into buffer. +func write(emitter *yaml_emitter_t, s []byte, i *int) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + p := emitter.buffer_pos + w := width(s[*i]) + switch w { + case 4: + emitter.buffer[p+3] = s[*i+3] + fallthrough + case 3: + emitter.buffer[p+2] = s[*i+2] + fallthrough + case 2: + emitter.buffer[p+1] = s[*i+1] + fallthrough + case 1: + emitter.buffer[p+0] = s[*i+0] + default: + panic("unknown character width") + } + emitter.column++ + emitter.buffer_pos += w + *i += w + return true +} + +// Write a whole string into buffer. +func write_all(emitter *yaml_emitter_t, s []byte) bool { + for i := 0; i < len(s); { + if !write(emitter, s, &i) { + return false + } + } + return true +} + +// Copy a line break character from a string into buffer. +func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { + if s[*i] == '\n' { + if !put_break(emitter) { + return false + } + *i++ + } else { + if !write(emitter, s, i) { + return false + } + if emitter.column == 0 { + emitter.space_above = true + } + emitter.column = 0 + emitter.line++ + // [Go] Do this here and above and drop from everywhere else (see commented lines). + emitter.indention = true + } + return true +} + +// Set an emitter error and return false. +func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_EMITTER_ERROR + emitter.problem = problem + return false +} + +// Emit an event. +func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.events = append(emitter.events, *event) + for !yaml_emitter_need_more_events(emitter) { + event := &emitter.events[emitter.events_head] + if !yaml_emitter_analyze_event(emitter, event) { + return false + } + if !yaml_emitter_state_machine(emitter, event) { + return false + } + yaml_event_delete(event) + emitter.events_head++ + } + return true +} + +// Check if we need to accumulate more events before emitting. +// +// We accumulate extra +// - 1 event for DOCUMENT-START +// - 2 events for SEQUENCE-START +// - 3 events for MAPPING-START +// +func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { + if emitter.events_head == len(emitter.events) { + return true + } + var accumulate int + switch emitter.events[emitter.events_head].typ { + case yaml_DOCUMENT_START_EVENT: + accumulate = 1 + break + case yaml_SEQUENCE_START_EVENT: + accumulate = 2 + break + case yaml_MAPPING_START_EVENT: + accumulate = 3 + break + default: + return false + } + if len(emitter.events)-emitter.events_head > accumulate { + return false + } + var level int + for i := emitter.events_head; i < len(emitter.events); i++ { + switch emitter.events[i].typ { + case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: + level++ + case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: + level-- + } + if level == 0 { + return false + } + } + return true +} + +// Append a directive to the directives stack. +func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { + for i := 0; i < len(emitter.tag_directives); i++ { + if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") + } + } + + // [Go] Do we actually need to copy this given garbage collection + // and the lack of deallocating destructors? + tag_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(tag_copy.handle, value.handle) + copy(tag_copy.prefix, value.prefix) + emitter.tag_directives = append(emitter.tag_directives, tag_copy) + return true +} + +// Increase the indentation level. +func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { + emitter.indents = append(emitter.indents, emitter.indent) + if emitter.indent < 0 { + if flow { + emitter.indent = emitter.best_indent + } else { + emitter.indent = 0 + } + } else if !indentless { + // [Go] This was changed so that indentations are more regular. + if emitter.states[len(emitter.states)-1] == yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE { + // The first indent inside a sequence will just skip the "- " indicator. + emitter.indent += 2 + } else { + // Everything else aligns to the chosen indentation. + emitter.indent = emitter.best_indent*((emitter.indent+emitter.best_indent)/emitter.best_indent) + } + } + return true +} + +// State dispatcher. +func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { + switch emitter.state { + default: + case yaml_EMIT_STREAM_START_STATE: + return yaml_emitter_emit_stream_start(emitter, event) + + case yaml_EMIT_FIRST_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, true) + + case yaml_EMIT_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, false) + + case yaml_EMIT_DOCUMENT_CONTENT_STATE: + return yaml_emitter_emit_document_content(emitter, event) + + case yaml_EMIT_DOCUMENT_END_STATE: + return yaml_emitter_emit_document_end(emitter, event) + + case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, true, false) + + case yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false, true) + + case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false, false) + + case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, true, false) + + case yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false, true) + + case yaml_EMIT_FLOW_MAPPING_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false, false) + + case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, false) + + case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, true) + + case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, false) + + case yaml_EMIT_END_STATE: + return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") + } + panic("invalid emitter state") +} + +// Expect STREAM-START. +func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_STREAM_START_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") + } + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = event.encoding + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = yaml_UTF8_ENCODING + } + } + if emitter.best_indent < 2 || emitter.best_indent > 9 { + emitter.best_indent = 2 + } + if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { + emitter.best_width = 80 + } + if emitter.best_width < 0 { + emitter.best_width = 1<<31 - 1 + } + if emitter.line_break == yaml_ANY_BREAK { + emitter.line_break = yaml_LN_BREAK + } + + emitter.indent = -1 + emitter.line = 0 + emitter.column = 0 + emitter.whitespace = true + emitter.indention = true + emitter.space_above = true + emitter.foot_indent = -1 + + if emitter.encoding != yaml_UTF8_ENCODING { + if !yaml_emitter_write_bom(emitter) { + return false + } + } + emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE + return true +} + +// Expect DOCUMENT-START or STREAM-END. +func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + + if event.typ == yaml_DOCUMENT_START_EVENT { + + if event.version_directive != nil { + if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { + return false + } + } + + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { + return false + } + if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { + return false + } + } + + for i := 0; i < len(default_tag_directives); i++ { + tag_directive := &default_tag_directives[i] + if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { + return false + } + } + + implicit := event.implicit + if !first || emitter.canonical { + implicit = false + } + + if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if event.version_directive != nil { + implicit = false + if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if len(event.tag_directives) > 0 { + implicit = false + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { + return false + } + if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if yaml_emitter_check_empty_document(emitter) { + implicit = false + } + if !implicit { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { + return false + } + if emitter.canonical || true { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if len(emitter.head_comment) > 0 { + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if !put_break(emitter) { + return false + } + } + + emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE + return true + } + + if event.typ == yaml_STREAM_END_EVENT { + if emitter.open_ended { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_END_STATE + return true + } + + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") +} + +// Expect the root node. +func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) + + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if !yaml_emitter_emit_node(emitter, event, true, false, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect DOCUMENT-END. +func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_DOCUMENT_END_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") + } + // [Go] Force document foot separation. + emitter.foot_indent = 0 + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + emitter.foot_indent = -1 + if !yaml_emitter_write_indent(emitter) { + return false + } + if !event.implicit { + // [Go] Allocate the slice elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_DOCUMENT_START_STATE + emitter.tag_directives = emitter.tag_directives[:0] + return true +} + +// Expect a flow item node. +func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_SEQUENCE_END_EVENT { + if emitter.canonical && !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.column == 0 || emitter.canonical && !first { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + + return true + } + + if !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if emitter.column == 0 { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE) + } else { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) + } + if !yaml_emitter_emit_node(emitter, event, false, true, false, false) { + return false + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect a flow key node. +func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_MAPPING_END_EVENT { + if (emitter.canonical || len(emitter.head_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0) && !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if !yaml_emitter_process_head_comment(emitter) { + return false + } + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + + if !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if !yaml_emitter_process_head_comment(emitter) { + return false + } + + if emitter.column == 0 { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a flow value node. +func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { + return false + } + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE) + } else { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) + } + if !yaml_emitter_emit_node(emitter, event, false, false, true, false) { + return false + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect a block item node. +func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, false) { + return false + } + } + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) + if !yaml_emitter_emit_node(emitter, event, false, true, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect a block key node. +func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, false) { + return false + } + } + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if event.typ == yaml_MAPPING_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if len(emitter.line_comment) > 0 { + // [Go] A line comment was provided for the key. That's unusual as the + // scanner associates line comments with the value. Either way, + // save the line comment and render it appropriately later. + emitter.key_line_comment = emitter.line_comment + emitter.line_comment = nil + } + if yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block value node. +func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { + return false + } + } + if len(emitter.key_line_comment) > 0 { + // [Go] Line comments are generally associated with the value, but when there's + // no value on the same line as a mapping key they end up attached to the + // key itself. + if event.typ == yaml_SCALAR_EVENT { + if len(emitter.line_comment) == 0 { + // A scalar is coming and it has no line comments by itself yet, + // so just let it handle the line comment as usual. If it has a + // line comment, we can't have both so the one from the key is lost. + emitter.line_comment = emitter.key_line_comment + emitter.key_line_comment = nil + } + } else if event.sequence_style() != yaml_FLOW_SEQUENCE_STYLE && (event.typ == yaml_MAPPING_START_EVENT || event.typ == yaml_SEQUENCE_START_EVENT) { + // An indented block follows, so write the comment right now. + emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment + if !yaml_emitter_process_line_comment(emitter) { + return false + } + emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment + } + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) + if !yaml_emitter_emit_node(emitter, event, false, false, true, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +func yaml_emitter_silent_nil_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + return event.typ == yaml_SCALAR_EVENT && event.implicit && !emitter.canonical && len(emitter.scalar_data.value) == 0 +} + +// Expect a node. +func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, + root bool, sequence bool, mapping bool, simple_key bool) bool { + + emitter.root_context = root + emitter.sequence_context = sequence + emitter.mapping_context = mapping + emitter.simple_key_context = simple_key + + switch event.typ { + case yaml_ALIAS_EVENT: + return yaml_emitter_emit_alias(emitter, event) + case yaml_SCALAR_EVENT: + return yaml_emitter_emit_scalar(emitter, event) + case yaml_SEQUENCE_START_EVENT: + return yaml_emitter_emit_sequence_start(emitter, event) + case yaml_MAPPING_START_EVENT: + return yaml_emitter_emit_mapping_start(emitter, event) + default: + return yaml_emitter_set_emitter_error(emitter, + fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ)) + } +} + +// Expect ALIAS. +func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SCALAR. +func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_select_scalar_style(emitter, event) { + return false + } + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + if !yaml_emitter_process_scalar(emitter) { + return false + } + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SEQUENCE-START. +func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || + yaml_emitter_check_empty_sequence(emitter) { + emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE + } + return true +} + +// Expect MAPPING-START. +func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || + yaml_emitter_check_empty_mapping(emitter) { + emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE + } + return true +} + +// Check if the document content is an empty scalar. +func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { + return false // [Go] Huh? +} + +// Check if the next events represent an empty sequence. +func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT +} + +// Check if the next events represent an empty mapping. +func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT +} + +// Check if the next node can be expressed as a simple key. +func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { + length := 0 + switch emitter.events[emitter.events_head].typ { + case yaml_ALIAS_EVENT: + length += len(emitter.anchor_data.anchor) + case yaml_SCALAR_EVENT: + if emitter.scalar_data.multiline { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + + len(emitter.scalar_data.value) + case yaml_SEQUENCE_START_EVENT: + if !yaml_emitter_check_empty_sequence(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + case yaml_MAPPING_START_EVENT: + if !yaml_emitter_check_empty_mapping(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + default: + return false + } + return length <= 128 +} + +// Determine an acceptable scalar style. +func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 + if no_tag && !event.implicit && !event.quoted_implicit { + return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") + } + + style := event.scalar_style() + if style == yaml_ANY_SCALAR_STYLE { + style = yaml_PLAIN_SCALAR_STYLE + } + if emitter.canonical { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + if emitter.simple_key_context && emitter.scalar_data.multiline { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + if style == yaml_PLAIN_SCALAR_STYLE { + if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || + emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if no_tag && !event.implicit { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { + if !emitter.scalar_data.single_quoted_allowed { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { + if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + + if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { + emitter.tag_data.handle = []byte{'!'} + } + emitter.scalar_data.style = style + return true +} + +// Write an anchor. +func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { + if emitter.anchor_data.anchor == nil { + return true + } + c := []byte{'&'} + if emitter.anchor_data.alias { + c[0] = '*' + } + if !yaml_emitter_write_indicator(emitter, c, true, false, false) { + return false + } + return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) +} + +// Write a tag. +func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { + if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { + return true + } + if len(emitter.tag_data.handle) > 0 { + if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { + return false + } + if len(emitter.tag_data.suffix) > 0 { + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + } + } else { + // [Go] Allocate these slices elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { + return false + } + } + return true +} + +// Write a scalar. +func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { + switch emitter.scalar_data.style { + case yaml_PLAIN_SCALAR_STYLE: + return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_SINGLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_DOUBLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_LITERAL_SCALAR_STYLE: + return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) + + case yaml_FOLDED_SCALAR_STYLE: + return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) + } + panic("unknown scalar style") +} + +// Write a head comment. +func yaml_emitter_process_head_comment(emitter *yaml_emitter_t) bool { + if len(emitter.tail_comment) > 0 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_comment(emitter, emitter.tail_comment) { + return false + } + emitter.tail_comment = emitter.tail_comment[:0] + emitter.foot_indent = emitter.indent + if emitter.foot_indent < 0 { + emitter.foot_indent = 0 + } + } + + if len(emitter.head_comment) == 0 { + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_comment(emitter, emitter.head_comment) { + return false + } + emitter.head_comment = emitter.head_comment[:0] + return true +} + +// Write an line comment. +func yaml_emitter_process_line_comment(emitter *yaml_emitter_t) bool { + if len(emitter.line_comment) == 0 { + return true + } + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !yaml_emitter_write_comment(emitter, emitter.line_comment) { + return false + } + emitter.line_comment = emitter.line_comment[:0] + return true +} + +// Write a foot comment. +func yaml_emitter_process_foot_comment(emitter *yaml_emitter_t) bool { + if len(emitter.foot_comment) == 0 { + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_comment(emitter, emitter.foot_comment) { + return false + } + emitter.foot_comment = emitter.foot_comment[:0] + emitter.foot_indent = emitter.indent + if emitter.foot_indent < 0 { + emitter.foot_indent = 0 + } + return true +} + +// Check if a %YAML directive is valid. +func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { + if version_directive.major != 1 || version_directive.minor != 1 { + return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") + } + return true +} + +// Check if a %TAG directive is valid. +func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { + handle := tag_directive.handle + prefix := tag_directive.prefix + if len(handle) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") + } + if handle[0] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") + } + if handle[len(handle)-1] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") + } + for i := 1; i < len(handle)-1; i += width(handle[i]) { + if !is_alpha(handle, i) { + return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") + } + } + if len(prefix) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") + } + return true +} + +// Check if an anchor is valid. +func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { + if len(anchor) == 0 { + problem := "anchor value must not be empty" + if alias { + problem = "alias value must not be empty" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + for i := 0; i < len(anchor); i += width(anchor[i]) { + if !is_alpha(anchor, i) { + problem := "anchor value must contain alphanumerical characters only" + if alias { + problem = "alias value must contain alphanumerical characters only" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + } + emitter.anchor_data.anchor = anchor + emitter.anchor_data.alias = alias + return true +} + +// Check if a tag is valid. +func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { + if len(tag) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") + } + for i := 0; i < len(emitter.tag_directives); i++ { + tag_directive := &emitter.tag_directives[i] + if bytes.HasPrefix(tag, tag_directive.prefix) { + emitter.tag_data.handle = tag_directive.handle + emitter.tag_data.suffix = tag[len(tag_directive.prefix):] + return true + } + } + emitter.tag_data.suffix = tag + return true +} + +// Check if a scalar is valid. +func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { + var ( + block_indicators = false + flow_indicators = false + line_breaks = false + special_characters = false + tab_characters = false + + leading_space = false + leading_break = false + trailing_space = false + trailing_break = false + break_space = false + space_break = false + + preceded_by_whitespace = false + followed_by_whitespace = false + previous_space = false + previous_break = false + ) + + emitter.scalar_data.value = value + + if len(value) == 0 { + emitter.scalar_data.multiline = false + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = false + return true + } + + if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { + block_indicators = true + flow_indicators = true + } + + preceded_by_whitespace = true + for i, w := 0, 0; i < len(value); i += w { + w = width(value[i]) + followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) + + if i == 0 { + switch value[i] { + case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': + flow_indicators = true + block_indicators = true + case '?', ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '-': + if followed_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } else { + switch value[i] { + case ',', '?', '[', ']', '{', '}': + flow_indicators = true + case ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '#': + if preceded_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } + + if value[i] == '\t' { + tab_characters = true + } else if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { + special_characters = true + } + if is_space(value, i) { + if i == 0 { + leading_space = true + } + if i+width(value[i]) == len(value) { + trailing_space = true + } + if previous_break { + break_space = true + } + previous_space = true + previous_break = false + } else if is_break(value, i) { + line_breaks = true + if i == 0 { + leading_break = true + } + if i+width(value[i]) == len(value) { + trailing_break = true + } + if previous_space { + space_break = true + } + previous_space = false + previous_break = true + } else { + previous_space = false + previous_break = false + } + + // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. + preceded_by_whitespace = is_blankz(value, i) + } + + emitter.scalar_data.multiline = line_breaks + emitter.scalar_data.flow_plain_allowed = true + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = true + + if leading_space || leading_break || trailing_space || trailing_break { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if trailing_space { + emitter.scalar_data.block_allowed = false + } + if break_space { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || tab_characters || special_characters { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || special_characters { + emitter.scalar_data.block_allowed = false + } + if line_breaks { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if flow_indicators { + emitter.scalar_data.flow_plain_allowed = false + } + if block_indicators { + emitter.scalar_data.block_plain_allowed = false + } + return true +} + +// Check if the event data is valid. +func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + emitter.anchor_data.anchor = nil + emitter.tag_data.handle = nil + emitter.tag_data.suffix = nil + emitter.scalar_data.value = nil + + if len(event.head_comment) > 0 { + emitter.head_comment = event.head_comment + } + if len(event.line_comment) > 0 { + emitter.line_comment = event.line_comment + } + if len(event.foot_comment) > 0 { + emitter.foot_comment = event.foot_comment + } + if len(event.tail_comment) > 0 { + emitter.tail_comment = event.tail_comment + } + + switch event.typ { + case yaml_ALIAS_EVENT: + if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { + return false + } + + case yaml_SCALAR_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + if !yaml_emitter_analyze_scalar(emitter, event.value) { + return false + } + + case yaml_SEQUENCE_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + + case yaml_MAPPING_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + } + return true +} + +// Write the BOM character. +func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { + if !flush(emitter) { + return false + } + pos := emitter.buffer_pos + emitter.buffer[pos+0] = '\xEF' + emitter.buffer[pos+1] = '\xBB' + emitter.buffer[pos+2] = '\xBF' + emitter.buffer_pos += 3 + return true +} + +func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { + indent := emitter.indent + if indent < 0 { + indent = 0 + } + if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { + if !put_break(emitter) { + return false + } + } + if emitter.foot_indent == indent { + if !put_break(emitter) { + return false + } + } + for emitter.column < indent { + if !put(emitter, ' ') { + return false + } + } + emitter.whitespace = true + //emitter.indention = true + emitter.space_above = false + emitter.foot_indent = -1 + return true +} + +func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, indicator) { + return false + } + emitter.whitespace = is_whitespace + emitter.indention = (emitter.indention && is_indention) + emitter.open_ended = false + return true +} + +func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + for i := 0; i < len(value); { + var must_write bool + switch value[i] { + case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': + must_write = true + default: + must_write = is_alpha(value, i) + } + if must_write { + if !write(emitter, value, &i) { + return false + } + } else { + w := width(value[i]) + for k := 0; k < w; k++ { + octet := value[i] + i++ + if !put(emitter, '%') { + return false + } + + c := octet >> 4 + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + + c = octet & 0x0f + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + } + } + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + if len(value) > 0 && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + + if len(value) > 0 { + emitter.whitespace = false + } + emitter.indention = false + if emitter.root_context { + emitter.open_ended = true + } + + return true +} + +func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { + return false + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if value[i] == '\'' { + if !put(emitter, '\'') { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + spaces := false + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { + return false + } + + for i := 0; i < len(value); { + if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || + is_bom(value, i) || is_break(value, i) || + value[i] == '"' || value[i] == '\\' { + + octet := value[i] + + var w int + var v rune + switch { + case octet&0x80 == 0x00: + w, v = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, v = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, v = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, v = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = value[i+k] + v = (v << 6) + (rune(octet) & 0x3F) + } + i += w + + if !put(emitter, '\\') { + return false + } + + var ok bool + switch v { + case 0x00: + ok = put(emitter, '0') + case 0x07: + ok = put(emitter, 'a') + case 0x08: + ok = put(emitter, 'b') + case 0x09: + ok = put(emitter, 't') + case 0x0A: + ok = put(emitter, 'n') + case 0x0b: + ok = put(emitter, 'v') + case 0x0c: + ok = put(emitter, 'f') + case 0x0d: + ok = put(emitter, 'r') + case 0x1b: + ok = put(emitter, 'e') + case 0x22: + ok = put(emitter, '"') + case 0x5c: + ok = put(emitter, '\\') + case 0x85: + ok = put(emitter, 'N') + case 0xA0: + ok = put(emitter, '_') + case 0x2028: + ok = put(emitter, 'L') + case 0x2029: + ok = put(emitter, 'P') + default: + if v <= 0xFF { + ok = put(emitter, 'x') + w = 2 + } else if v <= 0xFFFF { + ok = put(emitter, 'u') + w = 4 + } else { + ok = put(emitter, 'U') + w = 8 + } + for k := (w - 1) * 4; ok && k >= 0; k -= 4 { + digit := byte((v >> uint(k)) & 0x0F) + if digit < 10 { + ok = put(emitter, digit+'0') + } else { + ok = put(emitter, digit+'A'-10) + } + } + } + if !ok { + return false + } + spaces = false + } else if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if is_space(value, i+1) { + if !put(emitter, '\\') { + return false + } + } + i += width(value[i]) + } else if !write(emitter, value, &i) { + return false + } + spaces = true + } else { + if !write(emitter, value, &i) { + return false + } + spaces = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { + if is_space(value, 0) || is_break(value, 0) { + indent_hint := []byte{'0' + byte(emitter.best_indent)} + if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { + return false + } + } + + emitter.open_ended = false + + var chomp_hint [1]byte + if len(value) == 0 { + chomp_hint[0] = '-' + } else { + i := len(value) - 1 + for value[i]&0xC0 == 0x80 { + i-- + } + if !is_break(value, i) { + chomp_hint[0] = '-' + } else if i == 0 { + chomp_hint[0] = '+' + emitter.open_ended = true + } else { + i-- + for value[i]&0xC0 == 0x80 { + i-- + } + if is_break(value, i) { + chomp_hint[0] = '+' + emitter.open_ended = true + } + } + } + if chomp_hint[0] != 0 { + if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { + return false + } + } + return true +} + +func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + //emitter.indention = true + emitter.whitespace = true + breaks := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + + return true +} + +func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + + //emitter.indention = true + emitter.whitespace = true + + breaks := true + leading_spaces := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !breaks && !leading_spaces && value[i] == '\n' { + k := 0 + for is_break(value, k) { + k += width(value[k]) + } + if !is_blankz(value, k) { + if !put_break(emitter) { + return false + } + } + } + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + leading_spaces = is_blank(value, i) + } + if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + emitter.indention = false + breaks = false + } + } + return true +} + +func yaml_emitter_write_comment(emitter *yaml_emitter_t, comment []byte) bool { + breaks := false + pound := false + for i := 0; i < len(comment); { + if is_break(comment, i) { + if !write_break(emitter, comment, &i) { + return false + } + //emitter.indention = true + breaks = true + pound = false + } else { + if breaks && !yaml_emitter_write_indent(emitter) { + return false + } + if !pound { + if comment[i] != '#' && (!put(emitter, '#') || !put(emitter, ' ')) { + return false + } + pound = true + } + if !write(emitter, comment, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + if !breaks && !put_break(emitter) { + return false + } + + emitter.whitespace = true + //emitter.indention = true + return true +} diff --git a/vendor/gopkg.in/yaml.v3/encode.go b/vendor/gopkg.in/yaml.v3/encode.go new file mode 100644 index 0000000..de9e72a --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/encode.go @@ -0,0 +1,577 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "encoding" + "fmt" + "io" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +type encoder struct { + emitter yaml_emitter_t + event yaml_event_t + out []byte + flow bool + indent int + doneInit bool +} + +func newEncoder() *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_string(&e.emitter, &e.out) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func newEncoderWithWriter(w io.Writer) *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_writer(&e.emitter, w) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func (e *encoder) init() { + if e.doneInit { + return + } + if e.indent == 0 { + e.indent = 4 + } + e.emitter.best_indent = e.indent + yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING) + e.emit() + e.doneInit = true +} + +func (e *encoder) finish() { + e.emitter.open_ended = false + yaml_stream_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) destroy() { + yaml_emitter_delete(&e.emitter) +} + +func (e *encoder) emit() { + // This will internally delete the e.event value. + e.must(yaml_emitter_emit(&e.emitter, &e.event)) +} + +func (e *encoder) must(ok bool) { + if !ok { + msg := e.emitter.problem + if msg == "" { + msg = "unknown problem generating YAML content" + } + failf("%s", msg) + } +} + +func (e *encoder) marshalDoc(tag string, in reflect.Value) { + e.init() + var node *Node + if in.IsValid() { + node, _ = in.Interface().(*Node) + } + if node != nil && node.Kind == DocumentNode { + e.nodev(in) + } else { + yaml_document_start_event_initialize(&e.event, nil, nil, true) + e.emit() + e.marshal(tag, in) + yaml_document_end_event_initialize(&e.event, true) + e.emit() + } +} + +func (e *encoder) marshal(tag string, in reflect.Value) { + tag = shortTag(tag) + if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() { + e.nilv() + return + } + iface := in.Interface() + switch value := iface.(type) { + case *Node: + e.nodev(in) + return + case Node: + if !in.CanAddr() { + var n = reflect.New(in.Type()).Elem() + n.Set(in) + in = n + } + e.nodev(in.Addr()) + return + case time.Time: + e.timev(tag, in) + return + case *time.Time: + e.timev(tag, in.Elem()) + return + case time.Duration: + e.stringv(tag, reflect.ValueOf(value.String())) + return + case Marshaler: + v, err := value.MarshalYAML() + if err != nil { + fail(err) + } + if v == nil { + e.nilv() + return + } + e.marshal(tag, reflect.ValueOf(v)) + return + case encoding.TextMarshaler: + text, err := value.MarshalText() + if err != nil { + fail(err) + } + in = reflect.ValueOf(string(text)) + case nil: + e.nilv() + return + } + switch in.Kind() { + case reflect.Interface: + e.marshal(tag, in.Elem()) + case reflect.Map: + e.mapv(tag, in) + case reflect.Ptr: + e.marshal(tag, in.Elem()) + case reflect.Struct: + e.structv(tag, in) + case reflect.Slice, reflect.Array: + e.slicev(tag, in) + case reflect.String: + e.stringv(tag, in) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + e.intv(tag, in) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + e.uintv(tag, in) + case reflect.Float32, reflect.Float64: + e.floatv(tag, in) + case reflect.Bool: + e.boolv(tag, in) + default: + panic("cannot marshal type: " + in.Type().String()) + } +} + +func (e *encoder) mapv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + keys := keyList(in.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + e.marshal("", k) + e.marshal("", in.MapIndex(k)) + } + }) +} + +func (e *encoder) fieldByIndex(v reflect.Value, index []int) (field reflect.Value) { + for _, num := range index { + for { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + return reflect.Value{} + } + v = v.Elem() + continue + } + break + } + v = v.Field(num) + } + return v +} + +func (e *encoder) structv(tag string, in reflect.Value) { + sinfo, err := getStructInfo(in.Type()) + if err != nil { + panic(err) + } + e.mappingv(tag, func() { + for _, info := range sinfo.FieldsList { + var value reflect.Value + if info.Inline == nil { + value = in.Field(info.Num) + } else { + value = e.fieldByIndex(in, info.Inline) + if !value.IsValid() { + continue + } + } + if info.OmitEmpty && isZero(value) { + continue + } + e.marshal("", reflect.ValueOf(info.Key)) + e.flow = info.Flow + e.marshal("", value) + } + if sinfo.InlineMap >= 0 { + m := in.Field(sinfo.InlineMap) + if m.Len() > 0 { + e.flow = false + keys := keyList(m.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + if _, found := sinfo.FieldsMap[k.String()]; found { + panic(fmt.Sprintf("cannot have key %q in inlined map: conflicts with struct field", k.String())) + } + e.marshal("", k) + e.flow = false + e.marshal("", m.MapIndex(k)) + } + } + } + }) +} + +func (e *encoder) mappingv(tag string, f func()) { + implicit := tag == "" + style := yaml_BLOCK_MAPPING_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_MAPPING_STYLE + } + yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style) + e.emit() + f() + yaml_mapping_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) slicev(tag string, in reflect.Value) { + implicit := tag == "" + style := yaml_BLOCK_SEQUENCE_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + n := in.Len() + for i := 0; i < n; i++ { + e.marshal("", in.Index(i)) + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.emit() +} + +// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. +// +// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported +// in YAML 1.2 and by this package, but these should be marshalled quoted for +// the time being for compatibility with other parsers. +func isBase60Float(s string) (result bool) { + // Fast path. + if s == "" { + return false + } + c := s[0] + if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { + return false + } + // Do the full match. + return base60float.MatchString(s) +} + +// From http://yaml.org/type/float.html, except the regular expression there +// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. +var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) + +// isOldBool returns whether s is bool notation as defined in YAML 1.1. +// +// We continue to force strings that YAML 1.1 would interpret as booleans to be +// rendered as quotes strings so that the marshalled output valid for YAML 1.1 +// parsing. +func isOldBool(s string) (result bool) { + switch s { + case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON", + "n", "N", "no", "No", "NO", "off", "Off", "OFF": + return true + default: + return false + } +} + +func (e *encoder) stringv(tag string, in reflect.Value) { + var style yaml_scalar_style_t + s := in.String() + canUsePlain := true + switch { + case !utf8.ValidString(s): + if tag == binaryTag { + failf("explicitly tagged !!binary data must be base64-encoded") + } + if tag != "" { + failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) + } + // It can't be encoded directly as YAML so use a binary tag + // and encode it as base64. + tag = binaryTag + s = encodeBase64(s) + case tag == "": + // Check to see if it would resolve to a specific + // tag when encoded unquoted. If it doesn't, + // there's no need to quote it. + rtag, _ := resolve("", s) + canUsePlain = rtag == strTag && !(isBase60Float(s) || isOldBool(s)) + } + // Note: it's possible for user code to emit invalid YAML + // if they explicitly specify a tag and a string containing + // text that's incompatible with that tag. + switch { + case strings.Contains(s, "\n"): + if e.flow { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } else { + style = yaml_LITERAL_SCALAR_STYLE + } + case canUsePlain: + style = yaml_PLAIN_SCALAR_STYLE + default: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + e.emitScalar(s, "", tag, style, nil, nil, nil, nil) +} + +func (e *encoder) boolv(tag string, in reflect.Value) { + var s string + if in.Bool() { + s = "true" + } else { + s = "false" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) intv(tag string, in reflect.Value) { + s := strconv.FormatInt(in.Int(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) uintv(tag string, in reflect.Value) { + s := strconv.FormatUint(in.Uint(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) timev(tag string, in reflect.Value) { + t := in.Interface().(time.Time) + s := t.Format(time.RFC3339Nano) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) floatv(tag string, in reflect.Value) { + // Issue #352: When formatting, use the precision of the underlying value + precision := 64 + if in.Kind() == reflect.Float32 { + precision = 32 + } + + s := strconv.FormatFloat(in.Float(), 'g', -1, precision) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) nilv() { + e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t, head, line, foot, tail []byte) { + // TODO Kill this function. Replace all initialize calls by their underlining Go literals. + implicit := tag == "" + if !implicit { + tag = longTag(tag) + } + e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) + e.event.head_comment = head + e.event.line_comment = line + e.event.foot_comment = foot + e.event.tail_comment = tail + e.emit() +} + +func (e *encoder) nodev(in reflect.Value) { + e.node(in.Interface().(*Node), "") +} + +func (e *encoder) node(node *Node, tail string) { + // Zero nodes behave as nil. + if node.Kind == 0 && node.IsZero() { + e.nilv() + return + } + + // If the tag was not explicitly requested, and dropping it won't change the + // implicit tag of the value, don't include it in the presentation. + var tag = node.Tag + var stag = shortTag(tag) + var forceQuoting bool + if tag != "" && node.Style&TaggedStyle == 0 { + if node.Kind == ScalarNode { + if stag == strTag && node.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0 { + tag = "" + } else { + rtag, _ := resolve("", node.Value) + if rtag == stag { + tag = "" + } else if stag == strTag { + tag = "" + forceQuoting = true + } + } + } else { + var rtag string + switch node.Kind { + case MappingNode: + rtag = mapTag + case SequenceNode: + rtag = seqTag + } + if rtag == stag { + tag = "" + } + } + } + + switch node.Kind { + case DocumentNode: + yaml_document_start_event_initialize(&e.event, nil, nil, true) + e.event.head_comment = []byte(node.HeadComment) + e.emit() + for _, node := range node.Content { + e.node(node, "") + } + yaml_document_end_event_initialize(&e.event, true) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case SequenceNode: + style := yaml_BLOCK_SEQUENCE_STYLE + if node.Style&FlowStyle != 0 { + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style)) + e.event.head_comment = []byte(node.HeadComment) + e.emit() + for _, node := range node.Content { + e.node(node, "") + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.event.line_comment = []byte(node.LineComment) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case MappingNode: + style := yaml_BLOCK_MAPPING_STYLE + if node.Style&FlowStyle != 0 { + style = yaml_FLOW_MAPPING_STYLE + } + yaml_mapping_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style) + e.event.tail_comment = []byte(tail) + e.event.head_comment = []byte(node.HeadComment) + e.emit() + + // The tail logic below moves the foot comment of prior keys to the following key, + // since the value for each key may be a nested structure and the foot needs to be + // processed only the entirety of the value is streamed. The last tail is processed + // with the mapping end event. + var tail string + for i := 0; i+1 < len(node.Content); i += 2 { + k := node.Content[i] + foot := k.FootComment + if foot != "" { + kopy := *k + kopy.FootComment = "" + k = &kopy + } + e.node(k, tail) + tail = foot + + v := node.Content[i+1] + e.node(v, "") + } + + yaml_mapping_end_event_initialize(&e.event) + e.event.tail_comment = []byte(tail) + e.event.line_comment = []byte(node.LineComment) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case AliasNode: + yaml_alias_event_initialize(&e.event, []byte(node.Value)) + e.event.head_comment = []byte(node.HeadComment) + e.event.line_comment = []byte(node.LineComment) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case ScalarNode: + value := node.Value + if !utf8.ValidString(value) { + if stag == binaryTag { + failf("explicitly tagged !!binary data must be base64-encoded") + } + if stag != "" { + failf("cannot marshal invalid UTF-8 data as %s", stag) + } + // It can't be encoded directly as YAML so use a binary tag + // and encode it as base64. + tag = binaryTag + value = encodeBase64(value) + } + + style := yaml_PLAIN_SCALAR_STYLE + switch { + case node.Style&DoubleQuotedStyle != 0: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + case node.Style&SingleQuotedStyle != 0: + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + case node.Style&LiteralStyle != 0: + style = yaml_LITERAL_SCALAR_STYLE + case node.Style&FoldedStyle != 0: + style = yaml_FOLDED_SCALAR_STYLE + case strings.Contains(value, "\n"): + style = yaml_LITERAL_SCALAR_STYLE + case forceQuoting: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + e.emitScalar(value, node.Anchor, tag, style, []byte(node.HeadComment), []byte(node.LineComment), []byte(node.FootComment), []byte(tail)) + default: + failf("cannot encode node with unknown kind %d", node.Kind) + } +} diff --git a/vendor/gopkg.in/yaml.v3/parserc.go b/vendor/gopkg.in/yaml.v3/parserc.go new file mode 100644 index 0000000..268558a --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/parserc.go @@ -0,0 +1,1258 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "bytes" +) + +// The parser implements the following grammar: +// +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// implicit_document ::= block_node DOCUMENT-END* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// block_node_or_indentless_sequence ::= +// ALIAS +// | properties (block_content | indentless_block_sequence)? +// | block_content +// | indentless_block_sequence +// block_node ::= ALIAS +// | properties block_content? +// | block_content +// flow_node ::= ALIAS +// | properties flow_content? +// | flow_content +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// block_content ::= block_collection | flow_collection | SCALAR +// flow_content ::= flow_collection | SCALAR +// block_collection ::= block_sequence | block_mapping +// flow_collection ::= flow_sequence | flow_mapping +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// block_mapping ::= BLOCK-MAPPING_START +// ((KEY block_node_or_indentless_sequence?)? +// (VALUE block_node_or_indentless_sequence?)?)* +// BLOCK-END +// flow_sequence ::= FLOW-SEQUENCE-START +// (flow_sequence_entry FLOW-ENTRY)* +// flow_sequence_entry? +// FLOW-SEQUENCE-END +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// flow_mapping ::= FLOW-MAPPING-START +// (flow_mapping_entry FLOW-ENTRY)* +// flow_mapping_entry? +// FLOW-MAPPING-END +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + +// Peek the next token in the token queue. +func peek_token(parser *yaml_parser_t) *yaml_token_t { + if parser.token_available || yaml_parser_fetch_more_tokens(parser) { + token := &parser.tokens[parser.tokens_head] + yaml_parser_unfold_comments(parser, token) + return token + } + return nil +} + +// yaml_parser_unfold_comments walks through the comments queue and joins all +// comments behind the position of the provided token into the respective +// top-level comment slices in the parser. +func yaml_parser_unfold_comments(parser *yaml_parser_t, token *yaml_token_t) { + for parser.comments_head < len(parser.comments) && token.start_mark.index >= parser.comments[parser.comments_head].token_mark.index { + comment := &parser.comments[parser.comments_head] + if len(comment.head) > 0 { + if token.typ == yaml_BLOCK_END_TOKEN { + // No heads on ends, so keep comment.head for a follow up token. + break + } + if len(parser.head_comment) > 0 { + parser.head_comment = append(parser.head_comment, '\n') + } + parser.head_comment = append(parser.head_comment, comment.head...) + } + if len(comment.foot) > 0 { + if len(parser.foot_comment) > 0 { + parser.foot_comment = append(parser.foot_comment, '\n') + } + parser.foot_comment = append(parser.foot_comment, comment.foot...) + } + if len(comment.line) > 0 { + if len(parser.line_comment) > 0 { + parser.line_comment = append(parser.line_comment, '\n') + } + parser.line_comment = append(parser.line_comment, comment.line...) + } + *comment = yaml_comment_t{} + parser.comments_head++ + } +} + +// Remove the next token from the queue (must be called after peek_token). +func skip_token(parser *yaml_parser_t) { + parser.token_available = false + parser.tokens_parsed++ + parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN + parser.tokens_head++ +} + +// Get the next event. +func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { + // Erase the event object. + *event = yaml_event_t{} + + // No events after the end of the stream or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { + return true + } + + // Generate the next event. + return yaml_parser_state_machine(parser, event) +} + +// Set parser error. +func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +// State dispatcher. +func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { + //trace("yaml_parser_state_machine", "state:", parser.state.String()) + + switch parser.state { + case yaml_PARSE_STREAM_START_STATE: + return yaml_parser_parse_stream_start(parser, event) + + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, true) + + case yaml_PARSE_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, false) + + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return yaml_parser_parse_document_content(parser, event) + + case yaml_PARSE_DOCUMENT_END_STATE: + return yaml_parser_parse_document_end(parser, event) + + case yaml_PARSE_BLOCK_NODE_STATE: + return yaml_parser_parse_node(parser, event, true, false) + + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return yaml_parser_parse_node(parser, event, true, true) + + case yaml_PARSE_FLOW_NODE_STATE: + return yaml_parser_parse_node(parser, event, false, false) + + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, true) + + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, false) + + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_indentless_sequence_entry(parser, event) + + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, true) + + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, false) + + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return yaml_parser_parse_block_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, true) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, false) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) + + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, true) + + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, true) + + default: + panic("invalid parser state") + } +} + +// Parse the production: +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// ************ +func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_STREAM_START_TOKEN { + return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) + } + parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + encoding: token.encoding, + } + skip_token(parser) + return true +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// * +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// ************************* +func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { + + token := peek_token(parser) + if token == nil { + return false + } + + // Parse extra document end indicators. + if !implicit { + for token.typ == yaml_DOCUMENT_END_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && + token.typ != yaml_TAG_DIRECTIVE_TOKEN && + token.typ != yaml_DOCUMENT_START_TOKEN && + token.typ != yaml_STREAM_END_TOKEN { + // Parse an implicit document. + if !yaml_parser_process_directives(parser, nil, nil) { + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_BLOCK_NODE_STATE + + var head_comment []byte + if len(parser.head_comment) > 0 { + // [Go] Scan the header comment backwards, and if an empty line is found, break + // the header so the part before the last empty line goes into the + // document header, while the bottom of it goes into a follow up event. + for i := len(parser.head_comment) - 1; i > 0; i-- { + if parser.head_comment[i] == '\n' { + if i == len(parser.head_comment)-1 { + head_comment = parser.head_comment[:i] + parser.head_comment = parser.head_comment[i+1:] + break + } else if parser.head_comment[i-1] == '\n' { + head_comment = parser.head_comment[:i-1] + parser.head_comment = parser.head_comment[i+1:] + break + } + } + } + } + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + + head_comment: head_comment, + } + + } else if token.typ != yaml_STREAM_END_TOKEN { + // Parse an explicit document. + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + start_mark := token.start_mark + if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { + return false + } + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_DOCUMENT_START_TOKEN { + yaml_parser_set_parser_error(parser, + "did not find expected ", token.start_mark) + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE + end_mark := token.end_mark + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: false, + } + skip_token(parser) + + } else { + // Parse the stream end. + parser.state = yaml_PARSE_END_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + } + + return true +} + +// Parse the productions: +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// *********** +// +func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || + token.typ == yaml_TAG_DIRECTIVE_TOKEN || + token.typ == yaml_DOCUMENT_START_TOKEN || + token.typ == yaml_DOCUMENT_END_TOKEN || + token.typ == yaml_STREAM_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + return yaml_parser_process_empty_scalar(parser, event, + token.start_mark) + } + return yaml_parser_parse_node(parser, event, true, false) +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// ************* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + start_mark := token.start_mark + end_mark := token.start_mark + + implicit := true + if token.typ == yaml_DOCUMENT_END_TOKEN { + end_mark = token.end_mark + skip_token(parser) + implicit = false + } + + parser.tag_directives = parser.tag_directives[:0] + + parser.state = yaml_PARSE_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + start_mark: start_mark, + end_mark: end_mark, + implicit: implicit, + } + yaml_parser_set_event_comments(parser, event) + if len(event.head_comment) > 0 && len(event.foot_comment) == 0 { + event.foot_comment = event.head_comment + event.head_comment = nil + } + return true +} + +func yaml_parser_set_event_comments(parser *yaml_parser_t, event *yaml_event_t) { + event.head_comment = parser.head_comment + event.line_comment = parser.line_comment + event.foot_comment = parser.foot_comment + parser.head_comment = nil + parser.line_comment = nil + parser.foot_comment = nil + parser.tail_comment = nil + parser.stem_comment = nil +} + +// Parse the productions: +// block_node_or_indentless_sequence ::= +// ALIAS +// ***** +// | properties (block_content | indentless_block_sequence)? +// ********** * +// | block_content | indentless_block_sequence +// * +// block_node ::= ALIAS +// ***** +// | properties block_content? +// ********** * +// | block_content +// * +// flow_node ::= ALIAS +// ***** +// | properties flow_content? +// ********** * +// | flow_content +// * +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// ************************* +// block_content ::= block_collection | flow_collection | SCALAR +// ****** +// flow_content ::= flow_collection | SCALAR +// ****** +func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { + //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_ALIAS_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + anchor: token.value, + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true + } + + start_mark := token.start_mark + end_mark := token.start_mark + + var tag_token bool + var tag_handle, tag_suffix, anchor []byte + var tag_mark yaml_mark_t + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + start_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } else if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + start_mark = token.start_mark + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + var tag []byte + if tag_token { + if len(tag_handle) == 0 { + tag = tag_suffix + tag_suffix = nil + } else { + for i := range parser.tag_directives { + if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { + tag = append([]byte(nil), parser.tag_directives[i].prefix...) + tag = append(tag, tag_suffix...) + break + } + } + if len(tag) == 0 { + yaml_parser_set_parser_error_context(parser, + "while parsing a node", start_mark, + "found undefined tag handle", tag_mark) + return false + } + } + } + + implicit := len(tag) == 0 + if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_SCALAR_TOKEN { + var plain_implicit, quoted_implicit bool + end_mark = token.end_mark + if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { + plain_implicit = true + } else if len(tag) == 0 { + quoted_implicit = true + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + value: token.value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(token.style), + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true + } + if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { + // [Go] Some of the events below can be merged as they differ only on style. + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), + } + yaml_parser_set_event_comments(parser, event) + return true + } + if token.typ == yaml_FLOW_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + yaml_parser_set_event_comments(parser, event) + return true + } + if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + if parser.stem_comment != nil { + event.head_comment = parser.stem_comment + parser.stem_comment = nil + } + return true + } + if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), + } + if parser.stem_comment != nil { + event.head_comment = parser.stem_comment + parser.stem_comment = nil + } + return true + } + if len(anchor) > 0 || len(tag) > 0 { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + quoted_implicit: false, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true + } + + context := "while parsing a flow node" + if block { + context = "while parsing a block node" + } + yaml_parser_set_parser_error_context(parser, context, start_mark, + "did not find expected node content", token.start_mark) + return false +} + +// Parse the productions: +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// ******************** *********** * ********* +// +func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + if token == nil { + return false + } + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + prior_head_len := len(parser.head_comment) + skip_token(parser) + yaml_parser_split_stem_comment(parser, prior_head_len) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } else { + parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } + if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block collection", context_mark, + "did not find expected '-' indicator", token.start_mark) +} + +// Parse the productions: +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// *********** * +func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + prior_head_len := len(parser.head_comment) + skip_token(parser) + yaml_parser_split_stem_comment(parser, prior_head_len) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && + token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? + } + return true +} + +// Split stem comment from head comment. +// +// When a sequence or map is found under a sequence entry, the former head comment +// is assigned to the underlying sequence or map as a whole, not the individual +// sequence or map entry as would be expected otherwise. To handle this case the +// previous head comment is moved aside as the stem comment. +func yaml_parser_split_stem_comment(parser *yaml_parser_t, stem_len int) { + if stem_len == 0 { + return + } + + token := peek_token(parser) + if token == nil || token.typ != yaml_BLOCK_SEQUENCE_START_TOKEN && token.typ != yaml_BLOCK_MAPPING_START_TOKEN { + return + } + + parser.stem_comment = parser.head_comment[:stem_len] + if len(parser.head_comment) == stem_len { + parser.head_comment = nil + } else { + // Copy suffix to prevent very strange bugs if someone ever appends + // further bytes to the prefix in the stem_comment slice above. + parser.head_comment = append([]byte(nil), parser.head_comment[stem_len+1:]...) + } +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// ******************* +// ((KEY block_node_or_indentless_sequence?)? +// *** * +// (VALUE block_node_or_indentless_sequence?)?)* +// +// BLOCK-END +// ********* +// +func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + if token == nil { + return false + } + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + // [Go] A tail comment was left from the prior mapping value processed. Emit an event + // as it needs to be processed with that value and not the following key. + if len(parser.tail_comment) > 0 { + *event = yaml_event_t{ + typ: yaml_TAIL_COMMENT_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + foot_comment: parser.tail_comment, + } + parser.tail_comment = nil + return true + } + + if token.typ == yaml_KEY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } else { + parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } else if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block mapping", context_mark, + "did not find expected key", token.start_mark) +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ((KEY block_node_or_indentless_sequence?)? +// +// (VALUE block_node_or_indentless_sequence?)?)* +// ***** * +// BLOCK-END +// +// +func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence ::= FLOW-SEQUENCE-START +// ******************* +// (flow_sequence_entry FLOW-ENTRY)* +// * ********** +// flow_sequence_entry? +// * +// FLOW-SEQUENCE-END +// ***************** +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + if token == nil { + return false + } + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow sequence", context_mark, + "did not find expected ',' or ']'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + implicit: true, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + skip_token(parser) + return true + } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + yaml_parser_set_event_comments(parser, event) + + skip_token(parser) + return true +} + +// +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// *** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + mark := token.end_mark + skip_token(parser) + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// ***** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? + } + return true +} + +// Parse the productions: +// flow_mapping ::= FLOW-MAPPING-START +// ****************** +// (flow_mapping_entry FLOW-ENTRY)* +// * ********** +// flow_mapping_entry? +// ****************** +// FLOW-MAPPING-END +// **************** +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * *** * +// +func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow mapping", context_mark, + "did not find expected ',' or '}'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } else { + parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true +} + +// Parse the productions: +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * ***** * +// +func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { + token := peek_token(parser) + if token == nil { + return false + } + if empty { + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Generate an empty scalar event. +func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: mark, + end_mark: mark, + value: nil, // Empty + implicit: true, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true +} + +var default_tag_directives = []yaml_tag_directive_t{ + {[]byte("!"), []byte("!")}, + {[]byte("!!"), []byte("tag:yaml.org,2002:")}, +} + +// Parse directives. +func yaml_parser_process_directives(parser *yaml_parser_t, + version_directive_ref **yaml_version_directive_t, + tag_directives_ref *[]yaml_tag_directive_t) bool { + + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + + token := peek_token(parser) + if token == nil { + return false + } + + for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { + if version_directive != nil { + yaml_parser_set_parser_error(parser, + "found duplicate %YAML directive", token.start_mark) + return false + } + if token.major != 1 || token.minor != 1 { + yaml_parser_set_parser_error(parser, + "found incompatible YAML document", token.start_mark) + return false + } + version_directive = &yaml_version_directive_t{ + major: token.major, + minor: token.minor, + } + } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { + value := yaml_tag_directive_t{ + handle: token.value, + prefix: token.prefix, + } + if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { + return false + } + tag_directives = append(tag_directives, value) + } + + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + + for i := range default_tag_directives { + if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { + return false + } + } + + if version_directive_ref != nil { + *version_directive_ref = version_directive + } + if tag_directives_ref != nil { + *tag_directives_ref = tag_directives + } + return true +} + +// Append a tag directive to the directives stack. +func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { + for i := range parser.tag_directives { + if bytes.Equal(value.handle, parser.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) + } + } + + // [Go] I suspect the copy is unnecessary. This was likely done + // because there was no way to track ownership of the data. + value_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(value_copy.handle, value.handle) + copy(value_copy.prefix, value.prefix) + parser.tag_directives = append(parser.tag_directives, value_copy) + return true +} diff --git a/vendor/gopkg.in/yaml.v3/readerc.go b/vendor/gopkg.in/yaml.v3/readerc.go new file mode 100644 index 0000000..b7de0a8 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/readerc.go @@ -0,0 +1,434 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "io" +) + +// Set the reader error and return 0. +func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { + parser.error = yaml_READER_ERROR + parser.problem = problem + parser.problem_offset = offset + parser.problem_value = value + return false +} + +// Byte order marks. +const ( + bom_UTF8 = "\xef\xbb\xbf" + bom_UTF16LE = "\xff\xfe" + bom_UTF16BE = "\xfe\xff" +) + +// Determine the input stream encoding by checking the BOM symbol. If no BOM is +// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. +func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { + // Ensure that we had enough bytes in the raw buffer. + for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { + if !yaml_parser_update_raw_buffer(parser) { + return false + } + } + + // Determine the encoding. + buf := parser.raw_buffer + pos := parser.raw_buffer_pos + avail := len(buf) - pos + if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { + parser.encoding = yaml_UTF16LE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { + parser.encoding = yaml_UTF16BE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { + parser.encoding = yaml_UTF8_ENCODING + parser.raw_buffer_pos += 3 + parser.offset += 3 + } else { + parser.encoding = yaml_UTF8_ENCODING + } + return true +} + +// Update the raw buffer. +func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { + size_read := 0 + + // Return if the raw buffer is full. + if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { + return true + } + + // Return on EOF. + if parser.eof { + return true + } + + // Move the remaining bytes in the raw buffer to the beginning. + if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { + copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) + } + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] + parser.raw_buffer_pos = 0 + + // Call the read handler to fill the buffer. + size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] + if err == io.EOF { + parser.eof = true + } else if err != nil { + return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) + } + return true +} + +// Ensure that the buffer contains at least `length` characters. +// Return true on success, false on failure. +// +// The length is supposed to be significantly less that the buffer size. +func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { + if parser.read_handler == nil { + panic("read handler must be set") + } + + // [Go] This function was changed to guarantee the requested length size at EOF. + // The fact we need to do this is pretty awful, but the description above implies + // for that to be the case, and there are tests + + // If the EOF flag is set and the raw buffer is empty, do nothing. + if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { + // [Go] ACTUALLY! Read the documentation of this function above. + // This is just broken. To return true, we need to have the + // given length in the buffer. Not doing that means every single + // check that calls this function to make sure the buffer has a + // given length is Go) panicking; or C) accessing invalid memory. + //return true + } + + // Return if the buffer contains enough characters. + if parser.unread >= length { + return true + } + + // Determine the input encoding if it is not known yet. + if parser.encoding == yaml_ANY_ENCODING { + if !yaml_parser_determine_encoding(parser) { + return false + } + } + + // Move the unread characters to the beginning of the buffer. + buffer_len := len(parser.buffer) + if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { + copy(parser.buffer, parser.buffer[parser.buffer_pos:]) + buffer_len -= parser.buffer_pos + parser.buffer_pos = 0 + } else if parser.buffer_pos == buffer_len { + buffer_len = 0 + parser.buffer_pos = 0 + } + + // Open the whole buffer for writing, and cut it before returning. + parser.buffer = parser.buffer[:cap(parser.buffer)] + + // Fill the buffer until it has enough characters. + first := true + for parser.unread < length { + + // Fill the raw buffer if necessary. + if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { + if !yaml_parser_update_raw_buffer(parser) { + parser.buffer = parser.buffer[:buffer_len] + return false + } + } + first = false + + // Decode the raw buffer. + inner: + for parser.raw_buffer_pos != len(parser.raw_buffer) { + var value rune + var width int + + raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos + + // Decode the next character. + switch parser.encoding { + case yaml_UTF8_ENCODING: + // Decode a UTF-8 character. Check RFC 3629 + // (http://www.ietf.org/rfc/rfc3629.txt) for more details. + // + // The following table (taken from the RFC) is used for + // decoding. + // + // Char. number range | UTF-8 octet sequence + // (hexadecimal) | (binary) + // --------------------+------------------------------------ + // 0000 0000-0000 007F | 0xxxxxxx + // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx + // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx + // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + // + // Additionally, the characters in the range 0xD800-0xDFFF + // are prohibited as they are reserved for use with UTF-16 + // surrogate pairs. + + // Determine the length of the UTF-8 sequence. + octet := parser.raw_buffer[parser.raw_buffer_pos] + switch { + case octet&0x80 == 0x00: + width = 1 + case octet&0xE0 == 0xC0: + width = 2 + case octet&0xF0 == 0xE0: + width = 3 + case octet&0xF8 == 0xF0: + width = 4 + default: + // The leading octet is invalid. + return yaml_parser_set_reader_error(parser, + "invalid leading UTF-8 octet", + parser.offset, int(octet)) + } + + // Check if the raw buffer contains an incomplete character. + if width > raw_unread { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-8 octet sequence", + parser.offset, -1) + } + break inner + } + + // Decode the leading octet. + switch { + case octet&0x80 == 0x00: + value = rune(octet & 0x7F) + case octet&0xE0 == 0xC0: + value = rune(octet & 0x1F) + case octet&0xF0 == 0xE0: + value = rune(octet & 0x0F) + case octet&0xF8 == 0xF0: + value = rune(octet & 0x07) + default: + value = 0 + } + + // Check and decode the trailing octets. + for k := 1; k < width; k++ { + octet = parser.raw_buffer[parser.raw_buffer_pos+k] + + // Check if the octet is valid. + if (octet & 0xC0) != 0x80 { + return yaml_parser_set_reader_error(parser, + "invalid trailing UTF-8 octet", + parser.offset+k, int(octet)) + } + + // Decode the octet. + value = (value << 6) + rune(octet&0x3F) + } + + // Check the length of the sequence against the value. + switch { + case width == 1: + case width == 2 && value >= 0x80: + case width == 3 && value >= 0x800: + case width == 4 && value >= 0x10000: + default: + return yaml_parser_set_reader_error(parser, + "invalid length of a UTF-8 sequence", + parser.offset, -1) + } + + // Check the range of the value. + if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { + return yaml_parser_set_reader_error(parser, + "invalid Unicode character", + parser.offset, int(value)) + } + + case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: + var low, high int + if parser.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + low, high = 1, 0 + } + + // The UTF-16 encoding is not as simple as one might + // naively think. Check RFC 2781 + // (http://www.ietf.org/rfc/rfc2781.txt). + // + // Normally, two subsequent bytes describe a Unicode + // character. However a special technique (called a + // surrogate pair) is used for specifying character + // values larger than 0xFFFF. + // + // A surrogate pair consists of two pseudo-characters: + // high surrogate area (0xD800-0xDBFF) + // low surrogate area (0xDC00-0xDFFF) + // + // The following formulas are used for decoding + // and encoding characters using surrogate pairs: + // + // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) + // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) + // W1 = 110110yyyyyyyyyy + // W2 = 110111xxxxxxxxxx + // + // where U is the character value, W1 is the high surrogate + // area, W2 is the low surrogate area. + + // Check for incomplete UTF-16 character. + if raw_unread < 2 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 character", + parser.offset, -1) + } + break inner + } + + // Get the character. + value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) + + // Check for unexpected low surrogate area. + if value&0xFC00 == 0xDC00 { + return yaml_parser_set_reader_error(parser, + "unexpected low surrogate area", + parser.offset, int(value)) + } + + // Check for a high surrogate area. + if value&0xFC00 == 0xD800 { + width = 4 + + // Check for incomplete surrogate pair. + if raw_unread < 4 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 surrogate pair", + parser.offset, -1) + } + break inner + } + + // Get the next character. + value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) + + // Check for a low surrogate area. + if value2&0xFC00 != 0xDC00 { + return yaml_parser_set_reader_error(parser, + "expected low surrogate area", + parser.offset+2, int(value2)) + } + + // Generate the value of the surrogate pair. + value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) + } else { + width = 2 + } + + default: + panic("impossible") + } + + // Check if the character is in the allowed range: + // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) + // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) + // | [#x10000-#x10FFFF] (32 bit) + switch { + case value == 0x09: + case value == 0x0A: + case value == 0x0D: + case value >= 0x20 && value <= 0x7E: + case value == 0x85: + case value >= 0xA0 && value <= 0xD7FF: + case value >= 0xE000 && value <= 0xFFFD: + case value >= 0x10000 && value <= 0x10FFFF: + default: + return yaml_parser_set_reader_error(parser, + "control characters are not allowed", + parser.offset, int(value)) + } + + // Move the raw pointers. + parser.raw_buffer_pos += width + parser.offset += width + + // Finally put the character into the buffer. + if value <= 0x7F { + // 0000 0000-0000 007F . 0xxxxxxx + parser.buffer[buffer_len+0] = byte(value) + buffer_len += 1 + } else if value <= 0x7FF { + // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) + parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) + buffer_len += 2 + } else if value <= 0xFFFF { + // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) + buffer_len += 3 + } else { + // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) + buffer_len += 4 + } + + parser.unread++ + } + + // On EOF, put NUL into the buffer and return. + if parser.eof { + parser.buffer[buffer_len] = 0 + buffer_len++ + parser.unread++ + break + } + } + // [Go] Read the documentation of this function above. To return true, + // we need to have the given length in the buffer. Not doing that means + // every single check that calls this function to make sure the buffer + // has a given length is Go) panicking; or C) accessing invalid memory. + // This happens here due to the EOF above breaking early. + for buffer_len < length { + parser.buffer[buffer_len] = 0 + buffer_len++ + } + parser.buffer = parser.buffer[:buffer_len] + return true +} diff --git a/vendor/gopkg.in/yaml.v3/resolve.go b/vendor/gopkg.in/yaml.v3/resolve.go new file mode 100644 index 0000000..64ae888 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/resolve.go @@ -0,0 +1,326 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "encoding/base64" + "math" + "regexp" + "strconv" + "strings" + "time" +) + +type resolveMapItem struct { + value interface{} + tag string +} + +var resolveTable = make([]byte, 256) +var resolveMap = make(map[string]resolveMapItem) + +func init() { + t := resolveTable + t[int('+')] = 'S' // Sign + t[int('-')] = 'S' + for _, c := range "0123456789" { + t[int(c)] = 'D' // Digit + } + for _, c := range "yYnNtTfFoO~" { + t[int(c)] = 'M' // In map + } + t[int('.')] = '.' // Float (potentially in map) + + var resolveMapList = []struct { + v interface{} + tag string + l []string + }{ + {true, boolTag, []string{"true", "True", "TRUE"}}, + {false, boolTag, []string{"false", "False", "FALSE"}}, + {nil, nullTag, []string{"", "~", "null", "Null", "NULL"}}, + {math.NaN(), floatTag, []string{".nan", ".NaN", ".NAN"}}, + {math.Inf(+1), floatTag, []string{".inf", ".Inf", ".INF"}}, + {math.Inf(+1), floatTag, []string{"+.inf", "+.Inf", "+.INF"}}, + {math.Inf(-1), floatTag, []string{"-.inf", "-.Inf", "-.INF"}}, + {"<<", mergeTag, []string{"<<"}}, + } + + m := resolveMap + for _, item := range resolveMapList { + for _, s := range item.l { + m[s] = resolveMapItem{item.v, item.tag} + } + } +} + +const ( + nullTag = "!!null" + boolTag = "!!bool" + strTag = "!!str" + intTag = "!!int" + floatTag = "!!float" + timestampTag = "!!timestamp" + seqTag = "!!seq" + mapTag = "!!map" + binaryTag = "!!binary" + mergeTag = "!!merge" +) + +var longTags = make(map[string]string) +var shortTags = make(map[string]string) + +func init() { + for _, stag := range []string{nullTag, boolTag, strTag, intTag, floatTag, timestampTag, seqTag, mapTag, binaryTag, mergeTag} { + ltag := longTag(stag) + longTags[stag] = ltag + shortTags[ltag] = stag + } +} + +const longTagPrefix = "tag:yaml.org,2002:" + +func shortTag(tag string) string { + if strings.HasPrefix(tag, longTagPrefix) { + if stag, ok := shortTags[tag]; ok { + return stag + } + return "!!" + tag[len(longTagPrefix):] + } + return tag +} + +func longTag(tag string) string { + if strings.HasPrefix(tag, "!!") { + if ltag, ok := longTags[tag]; ok { + return ltag + } + return longTagPrefix + tag[2:] + } + return tag +} + +func resolvableTag(tag string) bool { + switch tag { + case "", strTag, boolTag, intTag, floatTag, nullTag, timestampTag: + return true + } + return false +} + +var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`) + +func resolve(tag string, in string) (rtag string, out interface{}) { + tag = shortTag(tag) + if !resolvableTag(tag) { + return tag, in + } + + defer func() { + switch tag { + case "", rtag, strTag, binaryTag: + return + case floatTag: + if rtag == intTag { + switch v := out.(type) { + case int64: + rtag = floatTag + out = float64(v) + return + case int: + rtag = floatTag + out = float64(v) + return + } + } + } + failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) + }() + + // Any data is accepted as a !!str or !!binary. + // Otherwise, the prefix is enough of a hint about what it might be. + hint := byte('N') + if in != "" { + hint = resolveTable[in[0]] + } + if hint != 0 && tag != strTag && tag != binaryTag { + // Handle things we can lookup in a map. + if item, ok := resolveMap[in]; ok { + return item.tag, item.value + } + + // Base 60 floats are a bad idea, were dropped in YAML 1.2, and + // are purposefully unsupported here. They're still quoted on + // the way out for compatibility with other parser, though. + + switch hint { + case 'M': + // We've already checked the map above. + + case '.': + // Not in the map, so maybe a normal float. + floatv, err := strconv.ParseFloat(in, 64) + if err == nil { + return floatTag, floatv + } + + case 'D', 'S': + // Int, float, or timestamp. + // Only try values as a timestamp if the value is unquoted or there's an explicit + // !!timestamp tag. + if tag == "" || tag == timestampTag { + t, ok := parseTimestamp(in) + if ok { + return timestampTag, t + } + } + + plain := strings.Replace(in, "_", "", -1) + intv, err := strconv.ParseInt(plain, 0, 64) + if err == nil { + if intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + uintv, err := strconv.ParseUint(plain, 0, 64) + if err == nil { + return intTag, uintv + } + if yamlStyleFloat.MatchString(plain) { + floatv, err := strconv.ParseFloat(plain, 64) + if err == nil { + return floatTag, floatv + } + } + if strings.HasPrefix(plain, "0b") { + intv, err := strconv.ParseInt(plain[2:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 2, 64) + if err == nil { + return intTag, uintv + } + } else if strings.HasPrefix(plain, "-0b") { + intv, err := strconv.ParseInt("-"+plain[3:], 2, 64) + if err == nil { + if true || intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + } + // Octals as introduced in version 1.2 of the spec. + // Octals from the 1.1 spec, spelled as 0777, are still + // decoded by default in v3 as well for compatibility. + // May be dropped in v4 depending on how usage evolves. + if strings.HasPrefix(plain, "0o") { + intv, err := strconv.ParseInt(plain[2:], 8, 64) + if err == nil { + if intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 8, 64) + if err == nil { + return intTag, uintv + } + } else if strings.HasPrefix(plain, "-0o") { + intv, err := strconv.ParseInt("-"+plain[3:], 8, 64) + if err == nil { + if true || intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + } + default: + panic("internal error: missing handler for resolver table: " + string(rune(hint)) + " (with " + in + ")") + } + } + return strTag, in +} + +// encodeBase64 encodes s as base64 that is broken up into multiple lines +// as appropriate for the resulting length. +func encodeBase64(s string) string { + const lineLen = 70 + encLen := base64.StdEncoding.EncodedLen(len(s)) + lines := encLen/lineLen + 1 + buf := make([]byte, encLen*2+lines) + in := buf[0:encLen] + out := buf[encLen:] + base64.StdEncoding.Encode(in, []byte(s)) + k := 0 + for i := 0; i < len(in); i += lineLen { + j := i + lineLen + if j > len(in) { + j = len(in) + } + k += copy(out[k:], in[i:j]) + if lines > 1 { + out[k] = '\n' + k++ + } + } + return string(out[:k]) +} + +// This is a subset of the formats allowed by the regular expression +// defined at http://yaml.org/type/timestamp.html. +var allowedTimestampFormats = []string{ + "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields. + "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t". + "2006-1-2 15:4:5.999999999", // space separated with no time zone + "2006-1-2", // date only + // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5" + // from the set of examples. +} + +// parseTimestamp parses s as a timestamp string and +// returns the timestamp and reports whether it succeeded. +// Timestamp formats are defined at http://yaml.org/type/timestamp.html +func parseTimestamp(s string) (time.Time, bool) { + // TODO write code to check all the formats supported by + // http://yaml.org/type/timestamp.html instead of using time.Parse. + + // Quick check: all date formats start with YYYY-. + i := 0 + for ; i < len(s); i++ { + if c := s[i]; c < '0' || c > '9' { + break + } + } + if i != 4 || i == len(s) || s[i] != '-' { + return time.Time{}, false + } + for _, format := range allowedTimestampFormats { + if t, err := time.Parse(format, s); err == nil { + return t, true + } + } + return time.Time{}, false +} diff --git a/vendor/gopkg.in/yaml.v3/scannerc.go b/vendor/gopkg.in/yaml.v3/scannerc.go new file mode 100644 index 0000000..ca00701 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/scannerc.go @@ -0,0 +1,3038 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "bytes" + "fmt" +) + +// Introduction +// ************ +// +// The following notes assume that you are familiar with the YAML specification +// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in +// some cases we are less restrictive that it requires. +// +// The process of transforming a YAML stream into a sequence of events is +// divided on two steps: Scanning and Parsing. +// +// The Scanner transforms the input stream into a sequence of tokens, while the +// parser transform the sequence of tokens produced by the Scanner into a +// sequence of parsing events. +// +// The Scanner is rather clever and complicated. The Parser, on the contrary, +// is a straightforward implementation of a recursive-descendant parser (or, +// LL(1) parser, as it is usually called). +// +// Actually there are two issues of Scanning that might be called "clever", the +// rest is quite straightforward. The issues are "block collection start" and +// "simple keys". Both issues are explained below in details. +// +// Here the Scanning step is explained and implemented. We start with the list +// of all the tokens produced by the Scanner together with short descriptions. +// +// Now, tokens: +// +// STREAM-START(encoding) # The stream start. +// STREAM-END # The stream end. +// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. +// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. +// DOCUMENT-START # '---' +// DOCUMENT-END # '...' +// BLOCK-SEQUENCE-START # Indentation increase denoting a block +// BLOCK-MAPPING-START # sequence or a block mapping. +// BLOCK-END # Indentation decrease. +// FLOW-SEQUENCE-START # '[' +// FLOW-SEQUENCE-END # ']' +// BLOCK-SEQUENCE-START # '{' +// BLOCK-SEQUENCE-END # '}' +// BLOCK-ENTRY # '-' +// FLOW-ENTRY # ',' +// KEY # '?' or nothing (simple keys). +// VALUE # ':' +// ALIAS(anchor) # '*anchor' +// ANCHOR(anchor) # '&anchor' +// TAG(handle,suffix) # '!handle!suffix' +// SCALAR(value,style) # A scalar. +// +// The following two tokens are "virtual" tokens denoting the beginning and the +// end of the stream: +// +// STREAM-START(encoding) +// STREAM-END +// +// We pass the information about the input stream encoding with the +// STREAM-START token. +// +// The next two tokens are responsible for tags: +// +// VERSION-DIRECTIVE(major,minor) +// TAG-DIRECTIVE(handle,prefix) +// +// Example: +// +// %YAML 1.1 +// %TAG ! !foo +// %TAG !yaml! tag:yaml.org,2002: +// --- +// +// The correspoding sequence of tokens: +// +// STREAM-START(utf-8) +// VERSION-DIRECTIVE(1,1) +// TAG-DIRECTIVE("!","!foo") +// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") +// DOCUMENT-START +// STREAM-END +// +// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole +// line. +// +// The document start and end indicators are represented by: +// +// DOCUMENT-START +// DOCUMENT-END +// +// Note that if a YAML stream contains an implicit document (without '---' +// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be +// produced. +// +// In the following examples, we present whole documents together with the +// produced tokens. +// +// 1. An implicit document: +// +// 'a scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// STREAM-END +// +// 2. An explicit document: +// +// --- +// 'a scalar' +// ... +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// SCALAR("a scalar",single-quoted) +// DOCUMENT-END +// STREAM-END +// +// 3. Several documents in a stream: +// +// 'a scalar' +// --- +// 'another scalar' +// --- +// 'yet another scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// DOCUMENT-START +// SCALAR("another scalar",single-quoted) +// DOCUMENT-START +// SCALAR("yet another scalar",single-quoted) +// STREAM-END +// +// We have already introduced the SCALAR token above. The following tokens are +// used to describe aliases, anchors, tag, and scalars: +// +// ALIAS(anchor) +// ANCHOR(anchor) +// TAG(handle,suffix) +// SCALAR(value,style) +// +// The following series of examples illustrate the usage of these tokens: +// +// 1. A recursive sequence: +// +// &A [ *A ] +// +// Tokens: +// +// STREAM-START(utf-8) +// ANCHOR("A") +// FLOW-SEQUENCE-START +// ALIAS("A") +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A tagged scalar: +// +// !!float "3.14" # A good approximation. +// +// Tokens: +// +// STREAM-START(utf-8) +// TAG("!!","float") +// SCALAR("3.14",double-quoted) +// STREAM-END +// +// 3. Various scalar styles: +// +// --- # Implicit empty plain scalars do not produce tokens. +// --- a plain scalar +// --- 'a single-quoted scalar' +// --- "a double-quoted scalar" +// --- |- +// a literal scalar +// --- >- +// a folded +// scalar +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// DOCUMENT-START +// SCALAR("a plain scalar",plain) +// DOCUMENT-START +// SCALAR("a single-quoted scalar",single-quoted) +// DOCUMENT-START +// SCALAR("a double-quoted scalar",double-quoted) +// DOCUMENT-START +// SCALAR("a literal scalar",literal) +// DOCUMENT-START +// SCALAR("a folded scalar",folded) +// STREAM-END +// +// Now it's time to review collection-related tokens. We will start with +// flow collections: +// +// FLOW-SEQUENCE-START +// FLOW-SEQUENCE-END +// FLOW-MAPPING-START +// FLOW-MAPPING-END +// FLOW-ENTRY +// KEY +// VALUE +// +// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and +// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' +// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the +// indicators '?' and ':', which are used for denoting mapping keys and values, +// are represented by the KEY and VALUE tokens. +// +// The following examples show flow collections: +// +// 1. A flow sequence: +// +// [item 1, item 2, item 3] +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-SEQUENCE-START +// SCALAR("item 1",plain) +// FLOW-ENTRY +// SCALAR("item 2",plain) +// FLOW-ENTRY +// SCALAR("item 3",plain) +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A flow mapping: +// +// { +// a simple key: a value, # Note that the KEY token is produced. +// ? a complex key: another value, +// } +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// FLOW-ENTRY +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// FLOW-ENTRY +// FLOW-MAPPING-END +// STREAM-END +// +// A simple key is a key which is not denoted by the '?' indicator. Note that +// the Scanner still produce the KEY token whenever it encounters a simple key. +// +// For scanning block collections, the following tokens are used (note that we +// repeat KEY and VALUE here): +// +// BLOCK-SEQUENCE-START +// BLOCK-MAPPING-START +// BLOCK-END +// BLOCK-ENTRY +// KEY +// VALUE +// +// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation +// increase that precedes a block collection (cf. the INDENT token in Python). +// The token BLOCK-END denote indentation decrease that ends a block collection +// (cf. the DEDENT token in Python). However YAML has some syntax pecularities +// that makes detections of these tokens more complex. +// +// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators +// '-', '?', and ':' correspondingly. +// +// The following examples show how the tokens BLOCK-SEQUENCE-START, +// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: +// +// 1. Block sequences: +// +// - item 1 +// - item 2 +// - +// - item 3.1 +// - item 3.2 +// - +// key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 3.1",plain) +// BLOCK-ENTRY +// SCALAR("item 3.2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Block mappings: +// +// a simple key: a value # The KEY token is produced here. +// ? a complex key +// : another value +// a mapping: +// key 1: value 1 +// key 2: value 2 +// a sequence: +// - item 1 +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// KEY +// SCALAR("a mapping",plain) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML does not always require to start a new block collection from a new +// line. If the current line contains only '-', '?', and ':' indicators, a new +// block collection may start at the current line. The following examples +// illustrate this case: +// +// 1. Collections in a sequence: +// +// - - item 1 +// - item 2 +// - key 1: value 1 +// key 2: value 2 +// - ? complex key +// : complex value +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("complex key") +// VALUE +// SCALAR("complex value") +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Collections in a mapping: +// +// ? a sequence +// : - item 1 +// - item 2 +// ? a mapping +// : key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// KEY +// SCALAR("a mapping",plain) +// VALUE +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML also permits non-indented sequences if they are included into a block +// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: +// +// key: +// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key",plain) +// VALUE +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// + +// Ensure that the buffer contains the required number of characters. +// Return true on success, false on failure (reader error or memory error). +func cache(parser *yaml_parser_t, length int) bool { + // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) + return parser.unread >= length || yaml_parser_update_buffer(parser, length) +} + +// Advance the buffer pointer. +func skip(parser *yaml_parser_t) { + if !is_blank(parser.buffer, parser.buffer_pos) { + parser.newlines = 0 + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) +} + +func skip_line(parser *yaml_parser_t) { + if is_crlf(parser.buffer, parser.buffer_pos) { + parser.mark.index += 2 + parser.mark.column = 0 + parser.mark.line++ + parser.unread -= 2 + parser.buffer_pos += 2 + parser.newlines++ + } else if is_break(parser.buffer, parser.buffer_pos) { + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) + parser.newlines++ + } +} + +// Copy a character to a string buffer and advance pointers. +func read(parser *yaml_parser_t, s []byte) []byte { + if !is_blank(parser.buffer, parser.buffer_pos) { + parser.newlines = 0 + } + w := width(parser.buffer[parser.buffer_pos]) + if w == 0 { + panic("invalid character sequence") + } + if len(s) == 0 { + s = make([]byte, 0, 32) + } + if w == 1 && len(s)+w <= cap(s) { + s = s[:len(s)+1] + s[len(s)-1] = parser.buffer[parser.buffer_pos] + parser.buffer_pos++ + } else { + s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) + parser.buffer_pos += w + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + return s +} + +// Copy a line break character to a string buffer and advance pointers. +func read_line(parser *yaml_parser_t, s []byte) []byte { + buf := parser.buffer + pos := parser.buffer_pos + switch { + case buf[pos] == '\r' && buf[pos+1] == '\n': + // CR LF . LF + s = append(s, '\n') + parser.buffer_pos += 2 + parser.mark.index++ + parser.unread-- + case buf[pos] == '\r' || buf[pos] == '\n': + // CR|LF . LF + s = append(s, '\n') + parser.buffer_pos += 1 + case buf[pos] == '\xC2' && buf[pos+1] == '\x85': + // NEL . LF + s = append(s, '\n') + parser.buffer_pos += 2 + case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): + // LS|PS . LS|PS + s = append(s, buf[parser.buffer_pos:pos+3]...) + parser.buffer_pos += 3 + default: + return s + } + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.newlines++ + return s +} + +// Get the next token. +func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { + // Erase the token object. + *token = yaml_token_t{} // [Go] Is this necessary? + + // No tokens after STREAM-END or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR { + return true + } + + // Ensure that the tokens queue contains enough tokens. + if !parser.token_available { + if !yaml_parser_fetch_more_tokens(parser) { + return false + } + } + + // Fetch the next token from the queue. + *token = parser.tokens[parser.tokens_head] + parser.tokens_head++ + parser.tokens_parsed++ + parser.token_available = false + + if token.typ == yaml_STREAM_END_TOKEN { + parser.stream_end_produced = true + } + return true +} + +// Set the scanner error and return false. +func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { + parser.error = yaml_SCANNER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = parser.mark + return false +} + +func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { + context := "while parsing a tag" + if directive { + context = "while parsing a %TAG directive" + } + return yaml_parser_set_scanner_error(parser, context, context_mark, problem) +} + +func trace(args ...interface{}) func() { + pargs := append([]interface{}{"+++"}, args...) + fmt.Println(pargs...) + pargs = append([]interface{}{"---"}, args...) + return func() { fmt.Println(pargs...) } +} + +// Ensure that the tokens queue contains at least one token which can be +// returned to the Parser. +func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { + // While we need more tokens to fetch, do it. + for { + // [Go] The comment parsing logic requires a lookahead of two tokens + // so that foot comments may be parsed in time of associating them + // with the tokens that are parsed before them, and also for line + // comments to be transformed into head comments in some edge cases. + if parser.tokens_head < len(parser.tokens)-2 { + // If a potential simple key is at the head position, we need to fetch + // the next token to disambiguate it. + head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed] + if !ok { + break + } else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok { + return false + } else if !valid { + break + } + } + // Fetch the next token. + if !yaml_parser_fetch_next_token(parser) { + return false + } + } + + parser.token_available = true + return true +} + +// The dispatcher for token fetchers. +func yaml_parser_fetch_next_token(parser *yaml_parser_t) (ok bool) { + // Ensure that the buffer is initialized. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we just started scanning. Fetch STREAM-START then. + if !parser.stream_start_produced { + return yaml_parser_fetch_stream_start(parser) + } + + scan_mark := parser.mark + + // Eat whitespaces and comments until we reach the next token. + if !yaml_parser_scan_to_next_token(parser) { + return false + } + + // [Go] While unrolling indents, transform the head comments of prior + // indentation levels observed after scan_start into foot comments at + // the respective indexes. + + // Check the indentation level against the current column. + if !yaml_parser_unroll_indent(parser, parser.mark.column, scan_mark) { + return false + } + + // Ensure that the buffer contains at least 4 characters. 4 is the length + // of the longest indicators ('--- ' and '... '). + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + // Is it the end of the stream? + if is_z(parser.buffer, parser.buffer_pos) { + return yaml_parser_fetch_stream_end(parser) + } + + // Is it a directive? + if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { + return yaml_parser_fetch_directive(parser) + } + + buf := parser.buffer + pos := parser.buffer_pos + + // Is it the document start indicator? + if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) + } + + // Is it the document end indicator? + if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) + } + + comment_mark := parser.mark + if len(parser.tokens) > 0 && (parser.flow_level == 0 && buf[pos] == ':' || parser.flow_level > 0 && buf[pos] == ',') { + // Associate any following comments with the prior token. + comment_mark = parser.tokens[len(parser.tokens)-1].start_mark + } + defer func() { + if !ok { + return + } + if len(parser.tokens) > 0 && parser.tokens[len(parser.tokens)-1].typ == yaml_BLOCK_ENTRY_TOKEN { + // Sequence indicators alone have no line comments. It becomes + // a head comment for whatever follows. + return + } + if !yaml_parser_scan_line_comment(parser, comment_mark) { + ok = false + return + } + }() + + // Is it the flow sequence start indicator? + if buf[pos] == '[' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) + } + + // Is it the flow mapping start indicator? + if parser.buffer[parser.buffer_pos] == '{' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) + } + + // Is it the flow sequence end indicator? + if parser.buffer[parser.buffer_pos] == ']' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_SEQUENCE_END_TOKEN) + } + + // Is it the flow mapping end indicator? + if parser.buffer[parser.buffer_pos] == '}' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_MAPPING_END_TOKEN) + } + + // Is it the flow entry indicator? + if parser.buffer[parser.buffer_pos] == ',' { + return yaml_parser_fetch_flow_entry(parser) + } + + // Is it the block entry indicator? + if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { + return yaml_parser_fetch_block_entry(parser) + } + + // Is it the key indicator? + if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_key(parser) + } + + // Is it the value indicator? + if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_value(parser) + } + + // Is it an alias? + if parser.buffer[parser.buffer_pos] == '*' { + return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) + } + + // Is it an anchor? + if parser.buffer[parser.buffer_pos] == '&' { + return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) + } + + // Is it a tag? + if parser.buffer[parser.buffer_pos] == '!' { + return yaml_parser_fetch_tag(parser) + } + + // Is it a literal scalar? + if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, true) + } + + // Is it a folded scalar? + if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, false) + } + + // Is it a single-quoted scalar? + if parser.buffer[parser.buffer_pos] == '\'' { + return yaml_parser_fetch_flow_scalar(parser, true) + } + + // Is it a double-quoted scalar? + if parser.buffer[parser.buffer_pos] == '"' { + return yaml_parser_fetch_flow_scalar(parser, false) + } + + // Is it a plain scalar? + // + // A plain scalar may start with any non-blank characters except + // + // '-', '?', ':', ',', '[', ']', '{', '}', + // '#', '&', '*', '!', '|', '>', '\'', '\"', + // '%', '@', '`'. + // + // In the block context (and, for the '-' indicator, in the flow context + // too), it may also start with the characters + // + // '-', '?', ':' + // + // if it is followed by a non-space character. + // + // The last rule is more restrictive than the specification requires. + // [Go] TODO Make this logic more reasonable. + //switch parser.buffer[parser.buffer_pos] { + //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': + //} + if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || + parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || + parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || + (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level == 0 && + (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && + !is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_plain_scalar(parser) + } + + // If we don't determine the token type so far, it is an error. + return yaml_parser_set_scanner_error(parser, + "while scanning for the next token", parser.mark, + "found character that cannot start any token") +} + +func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) { + if !simple_key.possible { + return false, true + } + + // The 1.2 specification says: + // + // "If the ? indicator is omitted, parsing needs to see past the + // implicit key to recognize it as such. To limit the amount of + // lookahead required, the “:” indicator must appear at most 1024 + // Unicode characters beyond the start of the key. In addition, the key + // is restricted to a single line." + // + if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index { + // Check if the potential simple key to be removed is required. + if simple_key.required { + return false, yaml_parser_set_scanner_error(parser, + "while scanning a simple key", simple_key.mark, + "could not find expected ':'") + } + simple_key.possible = false + return false, true + } + return true, true +} + +// Check if a simple key may start at the current position and add it if +// needed. +func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { + // A simple key is required at the current position if the scanner is in + // the block context and the current column coincides with the indentation + // level. + + required := parser.flow_level == 0 && parser.indent == parser.mark.column + + // + // If the current position may start a simple key, save it. + // + if parser.simple_key_allowed { + simple_key := yaml_simple_key_t{ + possible: true, + required: required, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + } + + if !yaml_parser_remove_simple_key(parser) { + return false + } + parser.simple_keys[len(parser.simple_keys)-1] = simple_key + parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1 + } + return true +} + +// Remove a potential simple key at the current flow level. +func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { + i := len(parser.simple_keys) - 1 + if parser.simple_keys[i].possible { + // If the key is required, it is an error. + if parser.simple_keys[i].required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", parser.simple_keys[i].mark, + "could not find expected ':'") + } + // Remove the key from the stack. + parser.simple_keys[i].possible = false + delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number) + } + return true +} + +// max_flow_level limits the flow_level +const max_flow_level = 10000 + +// Increase the flow level and resize the simple key list if needed. +func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { + // Reset the simple key on the next level. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{ + possible: false, + required: false, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + }) + + // Increase the flow level. + parser.flow_level++ + if parser.flow_level > max_flow_level { + return yaml_parser_set_scanner_error(parser, + "while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_flow_level)) + } + return true +} + +// Decrease the flow level. +func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { + if parser.flow_level > 0 { + parser.flow_level-- + last := len(parser.simple_keys) - 1 + delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number) + parser.simple_keys = parser.simple_keys[:last] + } + return true +} + +// max_indents limits the indents stack size +const max_indents = 10000 + +// Push the current indentation level to the stack and set the new level +// the current column is greater than the indentation level. In this case, +// append or insert the specified token into the token queue. +func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + if parser.indent < column { + // Push the current indentation level to the stack and set the new + // indentation level. + parser.indents = append(parser.indents, parser.indent) + parser.indent = column + if len(parser.indents) > max_indents { + return yaml_parser_set_scanner_error(parser, + "while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_indents)) + } + + // Create a token and insert it into the queue. + token := yaml_token_t{ + typ: typ, + start_mark: mark, + end_mark: mark, + } + if number > -1 { + number -= parser.tokens_parsed + } + yaml_insert_token(parser, number, &token) + } + return true +} + +// Pop indentation levels from the indents stack until the current level +// becomes less or equal to the column. For each indentation level, append +// the BLOCK-END token. +func yaml_parser_unroll_indent(parser *yaml_parser_t, column int, scan_mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + block_mark := scan_mark + block_mark.index-- + + // Loop through the indentation levels in the stack. + for parser.indent > column { + + // [Go] Reposition the end token before potential following + // foot comments of parent blocks. For that, search + // backwards for recent comments that were at the same + // indent as the block that is ending now. + stop_index := block_mark.index + for i := len(parser.comments) - 1; i >= 0; i-- { + comment := &parser.comments[i] + + if comment.end_mark.index < stop_index { + // Don't go back beyond the start of the comment/whitespace scan, unless column < 0. + // If requested indent column is < 0, then the document is over and everything else + // is a foot anyway. + break + } + if comment.start_mark.column == parser.indent+1 { + // This is a good match. But maybe there's a former comment + // at that same indent level, so keep searching. + block_mark = comment.start_mark + } + + // While the end of the former comment matches with + // the start of the following one, we know there's + // nothing in between and scanning is still safe. + stop_index = comment.scan_mark.index + } + + // Create a token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_END_TOKEN, + start_mark: block_mark, + end_mark: block_mark, + } + yaml_insert_token(parser, -1, &token) + + // Pop the indentation level. + parser.indent = parser.indents[len(parser.indents)-1] + parser.indents = parser.indents[:len(parser.indents)-1] + } + return true +} + +// Initialize the scanner and produce the STREAM-START token. +func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { + + // Set the initial indentation. + parser.indent = -1 + + // Initialize the simple key stack. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + parser.simple_keys_by_tok = make(map[int]int) + + // A simple key is allowed at the beginning of the stream. + parser.simple_key_allowed = true + + // We have started. + parser.stream_start_produced = true + + // Create the STREAM-START token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_START_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + encoding: parser.encoding, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the STREAM-END token and shut down the scanner. +func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { + + // Force new line. + if parser.mark.column != 0 { + parser.mark.column = 0 + parser.mark.line++ + } + + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1, parser.mark) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the STREAM-END token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. +func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1, parser.mark) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. + token := yaml_token_t{} + if !yaml_parser_scan_directive(parser, &token) { + return false + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the DOCUMENT-START or DOCUMENT-END token. +func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1, parser.mark) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Consume the token. + start_mark := parser.mark + + skip(parser) + skip(parser) + skip(parser) + + end_mark := parser.mark + + // Create the DOCUMENT-START or DOCUMENT-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. +func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { + + // The indicators '[' and '{' may start a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // Increase the flow level. + if !yaml_parser_increase_flow_level(parser) { + return false + } + + // A simple key may follow the indicators '[' and '{'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. +func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset any potential simple key on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Decrease the flow level. + if !yaml_parser_decrease_flow_level(parser) { + return false + } + + // No simple keys after the indicators ']' and '}'. + parser.simple_key_allowed = false + + // Consume the token. + + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-ENTRY token. +func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after ','. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_FLOW_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the BLOCK-ENTRY token. +func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { + // Check if the scanner is in the block context. + if parser.flow_level == 0 { + // Check if we are allowed to start a new entry. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "block sequence entries are not allowed in this context") + } + // Add the BLOCK-SEQUENCE-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { + return false + } + } else { + // It is an error for the '-' indicator to occur in the flow context, + // but we let the Parser detect and report about it because the Parser + // is able to point to the context. + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '-'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the BLOCK-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the KEY token. +func yaml_parser_fetch_key(parser *yaml_parser_t) bool { + + // In the block context, additional checks are required. + if parser.flow_level == 0 { + // Check if we are allowed to start a new key (not nessesary simple). + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping keys are not allowed in this context") + } + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '?' in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the KEY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the VALUE token. +func yaml_parser_fetch_value(parser *yaml_parser_t) bool { + + simple_key := &parser.simple_keys[len(parser.simple_keys)-1] + + // Have we found a simple key? + if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok { + return false + + } else if valid { + + // Create the KEY token and insert it into the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: simple_key.mark, + end_mark: simple_key.mark, + } + yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) + + // In the block context, we may need to add the BLOCK-MAPPING-START token. + if !yaml_parser_roll_indent(parser, simple_key.mark.column, + simple_key.token_number, + yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { + return false + } + + // Remove the simple key. + simple_key.possible = false + delete(parser.simple_keys_by_tok, simple_key.token_number) + + // A simple key cannot follow another simple key. + parser.simple_key_allowed = false + + } else { + // The ':' indicator follows a complex key. + + // In the block context, extra checks are required. + if parser.flow_level == 0 { + + // Check if we are allowed to start a complex value. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping values are not allowed in this context") + } + + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Simple keys after ':' are allowed in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + } + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the VALUE token and append it to the queue. + token := yaml_token_t{ + typ: yaml_VALUE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the ALIAS or ANCHOR token. +func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // An anchor or an alias could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow an anchor or an alias. + parser.simple_key_allowed = false + + // Create the ALIAS or ANCHOR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_anchor(parser, &token, typ) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the TAG token. +func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { + // A tag could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a tag. + parser.simple_key_allowed = false + + // Create the TAG token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_tag(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. +func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { + // Remove any potential simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // A simple key may follow a block scalar. + parser.simple_key_allowed = true + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_block_scalar(parser, &token, literal) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. +func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_flow_scalar(parser, &token, single) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,plain) token. +func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_plain_scalar(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Eat whitespaces and comments until the next token is found. +func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { + + scan_mark := parser.mark + + // Until the next token is not found. + for { + // Allow the BOM mark to start a line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { + skip(parser) + } + + // Eat whitespaces. + // Tabs are allowed: + // - in the flow context + // - in the block context, but not at the beginning of the line or + // after '-', '?', or ':' (complex value). + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if we just had a line comment under a sequence entry that + // looks more like a header to the following content. Similar to this: + // + // - # The comment + // - Some data + // + // If so, transform the line comment to a head comment and reposition. + if len(parser.comments) > 0 && len(parser.tokens) > 1 { + tokenA := parser.tokens[len(parser.tokens)-2] + tokenB := parser.tokens[len(parser.tokens)-1] + comment := &parser.comments[len(parser.comments)-1] + if tokenA.typ == yaml_BLOCK_SEQUENCE_START_TOKEN && tokenB.typ == yaml_BLOCK_ENTRY_TOKEN && len(comment.line) > 0 && !is_break(parser.buffer, parser.buffer_pos) { + // If it was in the prior line, reposition so it becomes a + // header of the follow up token. Otherwise, keep it in place + // so it becomes a header of the former. + comment.head = comment.line + comment.line = nil + if comment.start_mark.line == parser.mark.line-1 { + comment.token_mark = parser.mark + } + } + } + + // Eat a comment until a line break. + if parser.buffer[parser.buffer_pos] == '#' { + if !yaml_parser_scan_comments(parser, scan_mark) { + return false + } + } + + // If it is a line break, eat it. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + + // In the block context, a new line may start a simple key. + if parser.flow_level == 0 { + parser.simple_key_allowed = true + } + } else { + break // We have found a token. + } + } + + return true +} + +// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { + // Eat '%'. + start_mark := parser.mark + skip(parser) + + // Scan the directive name. + var name []byte + if !yaml_parser_scan_directive_name(parser, start_mark, &name) { + return false + } + + // Is it a YAML directive? + if bytes.Equal(name, []byte("YAML")) { + // Scan the VERSION directive value. + var major, minor int8 + if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { + return false + } + end_mark := parser.mark + + // Create a VERSION-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_VERSION_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + major: major, + minor: minor, + } + + // Is it a TAG directive? + } else if bytes.Equal(name, []byte("TAG")) { + // Scan the TAG directive value. + var handle, prefix []byte + if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { + return false + } + end_mark := parser.mark + + // Create a TAG-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_TAG_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + prefix: prefix, + } + + // Unknown directive. + } else { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unknown directive name") + return false + } + + // Eat the rest of the line including any comments. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + if parser.buffer[parser.buffer_pos] == '#' { + // [Go] Discard this inline comment for the time being. + //if !yaml_parser_scan_line_comment(parser, start_mark) { + // return false + //} + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + return true +} + +// Scan the directive name. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^ +// +func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { + // Consume the directive name. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + var s []byte + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the name is empty. + if len(s) == 0 { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "could not find expected directive name") + return false + } + + // Check for an blank character after the name. + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unexpected non-alphabetical character") + return false + } + *name = s + return true +} + +// Scan the value of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^ +func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the major version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { + return false + } + + // Eat '.'. + if parser.buffer[parser.buffer_pos] != '.' { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected digit or '.' character") + } + + skip(parser) + + // Consume the minor version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { + return false + } + return true +} + +const max_number_length = 2 + +// Scan the version number of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^ +// %YAML 1.1 # a comment \n +// ^ +func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { + + // Repeat while the next character is digit. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var value, length int8 + for is_digit(parser.buffer, parser.buffer_pos) { + // Check if the number is too long. + length++ + if length > max_number_length { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "found extremely long version number") + } + value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the number was present. + if length == 0 { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected version number") + } + *number = value + return true +} + +// Scan the value of a TAG-DIRECTIVE token. +// +// Scope: +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { + var handle_value, prefix_value []byte + + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a handle. + if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { + return false + } + + // Expect a whitespace. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blank(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace") + return false + } + + // Eat whitespaces. + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a prefix. + if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { + return false + } + + // Expect a whitespace or line break. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace or line break") + return false + } + + *handle = handle_value + *prefix = prefix_value + return true +} + +func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { + var s []byte + + // Eat the indicator character. + start_mark := parser.mark + skip(parser) + + // Consume the value. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + end_mark := parser.mark + + /* + * Check if length of the anchor is greater than 0 and it is followed by + * a whitespace character or one of the indicators: + * + * '?', ':', ',', ']', '}', '%', '@', '`'. + */ + + if len(s) == 0 || + !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || + parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '`') { + context := "while scanning an alias" + if typ == yaml_ANCHOR_TOKEN { + context = "while scanning an anchor" + } + yaml_parser_set_scanner_error(parser, context, start_mark, + "did not find expected alphabetic or numeric character") + return false + } + + // Create a token. + *token = yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + value: s, + } + + return true +} + +/* + * Scan a TAG token. + */ + +func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { + var handle, suffix []byte + + start_mark := parser.mark + + // Check if the tag is in the canonical form. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + if parser.buffer[parser.buffer_pos+1] == '<' { + // Keep the handle as '' + + // Eat '!<' + skip(parser) + skip(parser) + + // Consume the tag value. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + + // Check for '>' and eat it. + if parser.buffer[parser.buffer_pos] != '>' { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find the expected '>'") + return false + } + + skip(parser) + } else { + // The tag has either the '!suffix' or the '!handle!suffix' form. + + // First, try to scan a handle. + if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { + return false + } + + // Check if it is, indeed, handle. + if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { + // Scan the suffix now. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + } else { + // It wasn't a handle after all. Scan the rest of the tag. + if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { + return false + } + + // Set the handle to '!'. + handle = []byte{'!'} + + // A special case: the '!' tag. Set the handle to '' and the + // suffix to '!'. + if len(suffix) == 0 { + handle, suffix = suffix, handle + } + } + } + + // Check the character which ends the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find expected whitespace or line break") + return false + } + + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_TAG_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + suffix: suffix, + } + return true +} + +// Scan a tag handle. +func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { + // Check the initial '!' character. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] != '!' { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + + var s []byte + + // Copy the '!' character. + s = read(parser, s) + + // Copy all subsequent alphabetical and numerical characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the trailing character is '!' and copy it. + if parser.buffer[parser.buffer_pos] == '!' { + s = read(parser, s) + } else { + // It's either the '!' tag or not really a tag handle. If it's a %TAG + // directive, it's an error. If it's a tag token, it must be a part of URI. + if directive && string(s) != "!" { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + } + + *handle = s + return true +} + +// Scan a tag. +func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { + //size_t length = head ? strlen((char *)head) : 0 + var s []byte + hasTag := len(head) > 0 + + // Copy the head if needed. + // + // Note that we don't copy the leading '!' character. + if len(head) > 1 { + s = append(s, head[1:]...) + } + + // Scan the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // The set of characters that may appear in URI is as follows: + // + // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', + // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', + // '%'. + // [Go] TODO Convert this into more reasonable logic. + for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || + parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || + parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || + parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || + parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || + parser.buffer[parser.buffer_pos] == '%' { + // Check if it is a URI-escape sequence. + if parser.buffer[parser.buffer_pos] == '%' { + if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { + return false + } + } else { + s = read(parser, s) + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + hasTag = true + } + + if !hasTag { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected tag URI") + return false + } + *uri = s + return true +} + +// Decode an URI-escape sequence corresponding to a single UTF-8 character. +func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { + + // Decode the required number of characters. + w := 1024 + for w > 0 { + // Check for a URI-escaped octet. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + + if !(parser.buffer[parser.buffer_pos] == '%' && + is_hex(parser.buffer, parser.buffer_pos+1) && + is_hex(parser.buffer, parser.buffer_pos+2)) { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find URI escaped octet") + } + + // Get the octet. + octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) + + // If it is the leading octet, determine the length of the UTF-8 sequence. + if w == 1024 { + w = width(octet) + if w == 0 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect leading UTF-8 octet") + } + } else { + // Check if the trailing octet is correct. + if octet&0xC0 != 0x80 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect trailing UTF-8 octet") + } + } + + // Copy the octet and move the pointers. + *s = append(*s, octet) + skip(parser) + skip(parser) + skip(parser) + w-- + } + return true +} + +// Scan a block scalar. +func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { + // Eat the indicator '|' or '>'. + start_mark := parser.mark + skip(parser) + + // Scan the additional block scalar indicators. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check for a chomping indicator. + var chomping, increment int + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + // Set the chomping method and eat the indicator. + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + + // Check for an indentation indicator. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_digit(parser.buffer, parser.buffer_pos) { + // Check that the indentation is greater than 0. + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + + // Get the indentation level and eat the indicator. + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + } + + } else if is_digit(parser.buffer, parser.buffer_pos) { + // Do the same as above, but in the opposite order. + + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + } + } + + // Eat whitespaces and comments to the end of the line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.buffer[parser.buffer_pos] == '#' { + if !yaml_parser_scan_line_comment(parser, start_mark) { + return false + } + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + end_mark := parser.mark + + // Set the indentation level if it was specified. + var indent int + if increment > 0 { + if parser.indent >= 0 { + indent = parser.indent + increment + } else { + indent = increment + } + } + + // Scan the leading line breaks and determine the indentation level if needed. + var s, leading_break, trailing_breaks []byte + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + + // Scan the block scalar content. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var leading_blank, trailing_blank bool + for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { + // We are at the beginning of a non-empty line. + + // Is it a trailing whitespace? + trailing_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Check if we need to fold the leading line break. + if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { + // Do we need to join the lines by space? + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } + } else { + s = append(s, leading_break...) + } + leading_break = leading_break[:0] + + // Append the remaining line breaks. + s = append(s, trailing_breaks...) + trailing_breaks = trailing_breaks[:0] + + // Is it a leading whitespace? + leading_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Consume the current line. + for !is_breakz(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + leading_break = read_line(parser, leading_break) + + // Eat the following indentation spaces and line breaks. + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + } + + // Chomp the tail. + if chomping != -1 { + s = append(s, leading_break...) + } + if chomping == 1 { + s = append(s, trailing_breaks...) + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_LITERAL_SCALAR_STYLE, + } + if !literal { + token.style = yaml_FOLDED_SCALAR_STYLE + } + return true +} + +// Scan indentation spaces and line breaks for a block scalar. Determine the +// indentation level if needed. +func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { + *end_mark = parser.mark + + // Eat the indentation spaces and line breaks. + max_indent := 0 + for { + // Eat the indentation spaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.mark.column > max_indent { + max_indent = parser.mark.column + } + + // Check for a tab character messing the indentation. + if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { + return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found a tab character where an indentation space is expected") + } + + // Have we found a non-empty line? + if !is_break(parser.buffer, parser.buffer_pos) { + break + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + // [Go] Should really be returning breaks instead. + *breaks = read_line(parser, *breaks) + *end_mark = parser.mark + } + + // Determine the indentation level if needed. + if *indent == 0 { + *indent = max_indent + if *indent < parser.indent+1 { + *indent = parser.indent + 1 + } + if *indent < 1 { + *indent = 1 + } + } + return true +} + +// Scan a quoted scalar. +func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { + // Eat the left quote. + start_mark := parser.mark + skip(parser) + + // Consume the content of the quoted scalar. + var s, leading_break, trailing_breaks, whitespaces []byte + for { + // Check that there are no document indicators at the beginning of the line. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected document indicator") + return false + } + + // Check for EOF. + if is_z(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected end of stream") + return false + } + + // Consume non-blank characters. + leading_blanks := false + for !is_blankz(parser.buffer, parser.buffer_pos) { + if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { + // Is is an escaped single quote. + s = append(s, '\'') + skip(parser) + skip(parser) + + } else if single && parser.buffer[parser.buffer_pos] == '\'' { + // It is a right single quote. + break + } else if !single && parser.buffer[parser.buffer_pos] == '"' { + // It is a right double quote. + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { + // It is an escaped line break. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + skip(parser) + skip_line(parser) + leading_blanks = true + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' { + // It is an escape sequence. + code_length := 0 + + // Check the escape character. + switch parser.buffer[parser.buffer_pos+1] { + case '0': + s = append(s, 0) + case 'a': + s = append(s, '\x07') + case 'b': + s = append(s, '\x08') + case 't', '\t': + s = append(s, '\x09') + case 'n': + s = append(s, '\x0A') + case 'v': + s = append(s, '\x0B') + case 'f': + s = append(s, '\x0C') + case 'r': + s = append(s, '\x0D') + case 'e': + s = append(s, '\x1B') + case ' ': + s = append(s, '\x20') + case '"': + s = append(s, '"') + case '\'': + s = append(s, '\'') + case '\\': + s = append(s, '\\') + case 'N': // NEL (#x85) + s = append(s, '\xC2') + s = append(s, '\x85') + case '_': // #xA0 + s = append(s, '\xC2') + s = append(s, '\xA0') + case 'L': // LS (#x2028) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA8') + case 'P': // PS (#x2029) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA9') + case 'x': + code_length = 2 + case 'u': + code_length = 4 + case 'U': + code_length = 8 + default: + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found unknown escape character") + return false + } + + skip(parser) + skip(parser) + + // Consume an arbitrary escape code. + if code_length > 0 { + var value int + + // Scan the character value. + if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { + return false + } + for k := 0; k < code_length; k++ { + if !is_hex(parser.buffer, parser.buffer_pos+k) { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "did not find expected hexdecimal number") + return false + } + value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) + } + + // Check the value and write the character. + if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found invalid Unicode character escape code") + return false + } + if value <= 0x7F { + s = append(s, byte(value)) + } else if value <= 0x7FF { + s = append(s, byte(0xC0+(value>>6))) + s = append(s, byte(0x80+(value&0x3F))) + } else if value <= 0xFFFF { + s = append(s, byte(0xE0+(value>>12))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } else { + s = append(s, byte(0xF0+(value>>18))) + s = append(s, byte(0x80+((value>>12)&0x3F))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } + + // Advance the pointer. + for k := 0; k < code_length; k++ { + skip(parser) + } + } + } else { + // It is a non-escaped non-blank character. + s = read(parser, s) + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we are at the end of the scalar. + if single { + if parser.buffer[parser.buffer_pos] == '\'' { + break + } + } else { + if parser.buffer[parser.buffer_pos] == '"' { + break + } + } + + // Consume blank characters. + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Join the whitespaces or fold line breaks. + if leading_blanks { + // Do we need to fold line breaks? + if len(leading_break) > 0 && leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Eat the right quote. + skip(parser) + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_SINGLE_QUOTED_SCALAR_STYLE, + } + if !single { + token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + return true +} + +// Scan a plain scalar. +func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { + + var s, leading_break, trailing_breaks, whitespaces []byte + var leading_blanks bool + var indent = parser.indent + 1 + + start_mark := parser.mark + end_mark := parser.mark + + // Consume the content of the plain scalar. + for { + // Check for a document indicator. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + break + } + + // Check for a comment. + if parser.buffer[parser.buffer_pos] == '#' { + break + } + + // Consume non-blank characters. + for !is_blankz(parser.buffer, parser.buffer_pos) { + + // Check for indicators that may end a plain scalar. + if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level > 0 && + (parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}')) { + break + } + + // Check if we need to join whitespaces and breaks. + if leading_blanks || len(whitespaces) > 0 { + if leading_blanks { + // Do we need to fold line breaks? + if leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + leading_blanks = false + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Copy the character. + s = read(parser, s) + + end_mark = parser.mark + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Is it the end? + if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { + break + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + + // Check for tab characters that abuse indentation. + if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found a tab character that violates indentation") + return false + } + + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check indentation level. + if parser.flow_level == 0 && parser.mark.column < indent { + break + } + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_PLAIN_SCALAR_STYLE, + } + + // Note that we change the 'simple_key_allowed' flag. + if leading_blanks { + parser.simple_key_allowed = true + } + return true +} + +func yaml_parser_scan_line_comment(parser *yaml_parser_t, token_mark yaml_mark_t) bool { + if parser.newlines > 0 { + return true + } + + var start_mark yaml_mark_t + var text []byte + + for peek := 0; peek < 512; peek++ { + if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) { + break + } + if is_blank(parser.buffer, parser.buffer_pos+peek) { + continue + } + if parser.buffer[parser.buffer_pos+peek] == '#' { + seen := parser.mark.index+peek + for { + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_breakz(parser.buffer, parser.buffer_pos) { + if parser.mark.index >= seen { + break + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } else if parser.mark.index >= seen { + if len(text) == 0 { + start_mark = parser.mark + } + text = read(parser, text) + } else { + skip(parser) + } + } + } + break + } + if len(text) > 0 { + parser.comments = append(parser.comments, yaml_comment_t{ + token_mark: token_mark, + start_mark: start_mark, + line: text, + }) + } + return true +} + +func yaml_parser_scan_comments(parser *yaml_parser_t, scan_mark yaml_mark_t) bool { + token := parser.tokens[len(parser.tokens)-1] + + if token.typ == yaml_FLOW_ENTRY_TOKEN && len(parser.tokens) > 1 { + token = parser.tokens[len(parser.tokens)-2] + } + + var token_mark = token.start_mark + var start_mark yaml_mark_t + var next_indent = parser.indent + if next_indent < 0 { + next_indent = 0 + } + + var recent_empty = false + var first_empty = parser.newlines <= 1 + + var line = parser.mark.line + var column = parser.mark.column + + var text []byte + + // The foot line is the place where a comment must start to + // still be considered as a foot of the prior content. + // If there's some content in the currently parsed line, then + // the foot is the line below it. + var foot_line = -1 + if scan_mark.line > 0 { + foot_line = parser.mark.line-parser.newlines+1 + if parser.newlines == 0 && parser.mark.column > 1 { + foot_line++ + } + } + + var peek = 0 + for ; peek < 512; peek++ { + if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) { + break + } + column++ + if is_blank(parser.buffer, parser.buffer_pos+peek) { + continue + } + c := parser.buffer[parser.buffer_pos+peek] + var close_flow = parser.flow_level > 0 && (c == ']' || c == '}') + if close_flow || is_breakz(parser.buffer, parser.buffer_pos+peek) { + // Got line break or terminator. + if close_flow || !recent_empty { + if close_flow || first_empty && (start_mark.line == foot_line && token.typ != yaml_VALUE_TOKEN || start_mark.column-1 < next_indent) { + // This is the first empty line and there were no empty lines before, + // so this initial part of the comment is a foot of the prior token + // instead of being a head for the following one. Split it up. + // Alternatively, this might also be the last comment inside a flow + // scope, so it must be a footer. + if len(text) > 0 { + if start_mark.column-1 < next_indent { + // If dedented it's unrelated to the prior token. + token_mark = start_mark + } + parser.comments = append(parser.comments, yaml_comment_t{ + scan_mark: scan_mark, + token_mark: token_mark, + start_mark: start_mark, + end_mark: yaml_mark_t{parser.mark.index + peek, line, column}, + foot: text, + }) + scan_mark = yaml_mark_t{parser.mark.index + peek, line, column} + token_mark = scan_mark + text = nil + } + } else { + if len(text) > 0 && parser.buffer[parser.buffer_pos+peek] != 0 { + text = append(text, '\n') + } + } + } + if !is_break(parser.buffer, parser.buffer_pos+peek) { + break + } + first_empty = false + recent_empty = true + column = 0 + line++ + continue + } + + if len(text) > 0 && (close_flow || column-1 < next_indent && column != start_mark.column) { + // The comment at the different indentation is a foot of the + // preceding data rather than a head of the upcoming one. + parser.comments = append(parser.comments, yaml_comment_t{ + scan_mark: scan_mark, + token_mark: token_mark, + start_mark: start_mark, + end_mark: yaml_mark_t{parser.mark.index + peek, line, column}, + foot: text, + }) + scan_mark = yaml_mark_t{parser.mark.index + peek, line, column} + token_mark = scan_mark + text = nil + } + + if parser.buffer[parser.buffer_pos+peek] != '#' { + break + } + + if len(text) == 0 { + start_mark = yaml_mark_t{parser.mark.index + peek, line, column} + } else { + text = append(text, '\n') + } + + recent_empty = false + + // Consume until after the consumed comment line. + seen := parser.mark.index+peek + for { + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_breakz(parser.buffer, parser.buffer_pos) { + if parser.mark.index >= seen { + break + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } else if parser.mark.index >= seen { + text = read(parser, text) + } else { + skip(parser) + } + } + + peek = 0 + column = 0 + line = parser.mark.line + next_indent = parser.indent + if next_indent < 0 { + next_indent = 0 + } + } + + if len(text) > 0 { + parser.comments = append(parser.comments, yaml_comment_t{ + scan_mark: scan_mark, + token_mark: start_mark, + start_mark: start_mark, + end_mark: yaml_mark_t{parser.mark.index + peek - 1, line, column}, + head: text, + }) + } + return true +} diff --git a/vendor/gopkg.in/yaml.v3/sorter.go b/vendor/gopkg.in/yaml.v3/sorter.go new file mode 100644 index 0000000..9210ece --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/sorter.go @@ -0,0 +1,134 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "reflect" + "unicode" +) + +type keyList []reflect.Value + +func (l keyList) Len() int { return len(l) } +func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l keyList) Less(i, j int) bool { + a := l[i] + b := l[j] + ak := a.Kind() + bk := b.Kind() + for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { + a = a.Elem() + ak = a.Kind() + } + for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { + b = b.Elem() + bk = b.Kind() + } + af, aok := keyFloat(a) + bf, bok := keyFloat(b) + if aok && bok { + if af != bf { + return af < bf + } + if ak != bk { + return ak < bk + } + return numLess(a, b) + } + if ak != reflect.String || bk != reflect.String { + return ak < bk + } + ar, br := []rune(a.String()), []rune(b.String()) + digits := false + for i := 0; i < len(ar) && i < len(br); i++ { + if ar[i] == br[i] { + digits = unicode.IsDigit(ar[i]) + continue + } + al := unicode.IsLetter(ar[i]) + bl := unicode.IsLetter(br[i]) + if al && bl { + return ar[i] < br[i] + } + if al || bl { + if digits { + return al + } else { + return bl + } + } + var ai, bi int + var an, bn int64 + if ar[i] == '0' || br[i] == '0' { + for j := i - 1; j >= 0 && unicode.IsDigit(ar[j]); j-- { + if ar[j] != '0' { + an = 1 + bn = 1 + break + } + } + } + for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { + an = an*10 + int64(ar[ai]-'0') + } + for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { + bn = bn*10 + int64(br[bi]-'0') + } + if an != bn { + return an < bn + } + if ai != bi { + return ai < bi + } + return ar[i] < br[i] + } + return len(ar) < len(br) +} + +// keyFloat returns a float value for v if it is a number/bool +// and whether it is a number/bool or not. +func keyFloat(v reflect.Value) (f float64, ok bool) { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return float64(v.Int()), true + case reflect.Float32, reflect.Float64: + return v.Float(), true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return float64(v.Uint()), true + case reflect.Bool: + if v.Bool() { + return 1, true + } + return 0, true + } + return 0, false +} + +// numLess returns whether a < b. +// a and b must necessarily have the same kind. +func numLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return a.Int() < b.Int() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Bool: + return !a.Bool() && b.Bool() + } + panic("not a number") +} diff --git a/vendor/gopkg.in/yaml.v3/writerc.go b/vendor/gopkg.in/yaml.v3/writerc.go new file mode 100644 index 0000000..b8a116b --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/writerc.go @@ -0,0 +1,48 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +// Set the writer error and return false. +func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_WRITER_ERROR + emitter.problem = problem + return false +} + +// Flush the output buffer. +func yaml_emitter_flush(emitter *yaml_emitter_t) bool { + if emitter.write_handler == nil { + panic("write handler not set") + } + + // Check if the buffer is empty. + if emitter.buffer_pos == 0 { + return true + } + + if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + return true +} diff --git a/vendor/gopkg.in/yaml.v3/yaml.go b/vendor/gopkg.in/yaml.v3/yaml.go new file mode 100644 index 0000000..8cec6da --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/yaml.go @@ -0,0 +1,698 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package yaml implements YAML support for the Go language. +// +// Source code and other details for the project are available at GitHub: +// +// https://github.com/go-yaml/yaml +// +package yaml + +import ( + "errors" + "fmt" + "io" + "reflect" + "strings" + "sync" + "unicode/utf8" +) + +// The Unmarshaler interface may be implemented by types to customize their +// behavior when being unmarshaled from a YAML document. +type Unmarshaler interface { + UnmarshalYAML(value *Node) error +} + +type obsoleteUnmarshaler interface { + UnmarshalYAML(unmarshal func(interface{}) error) error +} + +// The Marshaler interface may be implemented by types to customize their +// behavior when being marshaled into a YAML document. The returned value +// is marshaled in place of the original value implementing Marshaler. +// +// If an error is returned by MarshalYAML, the marshaling procedure stops +// and returns with the provided error. +type Marshaler interface { + MarshalYAML() (interface{}, error) +} + +// Unmarshal decodes the first document found within the in byte slice +// and assigns decoded values into the out value. +// +// Maps and pointers (to a struct, string, int, etc) are accepted as out +// values. If an internal pointer within a struct is not initialized, +// the yaml package will initialize it if necessary for unmarshalling +// the provided data. The out parameter must not be nil. +// +// The type of the decoded values should be compatible with the respective +// values in out. If one or more values cannot be decoded due to a type +// mismatches, decoding continues partially until the end of the YAML +// content, and a *yaml.TypeError is returned with details for all +// missed values. +// +// Struct fields are only unmarshalled if they are exported (have an +// upper case first letter), and are unmarshalled using the field name +// lowercased as the default key. Custom keys may be defined via the +// "yaml" name in the field tag: the content preceding the first comma +// is used as the key, and the following comma-separated options are +// used to tweak the marshalling process (see Marshal). +// Conflicting names result in a runtime error. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// var t T +// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) +// +// See the documentation of Marshal for the format of tags and a list of +// supported tag options. +// +func Unmarshal(in []byte, out interface{}) (err error) { + return unmarshal(in, out, false) +} + +// A Decoder reads and decodes YAML values from an input stream. +type Decoder struct { + parser *parser + knownFields bool +} + +// NewDecoder returns a new decoder that reads from r. +// +// The decoder introduces its own buffering and may read +// data from r beyond the YAML values requested. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{ + parser: newParserFromReader(r), + } +} + +// KnownFields ensures that the keys in decoded mappings to +// exist as fields in the struct being decoded into. +func (dec *Decoder) KnownFields(enable bool) { + dec.knownFields = enable +} + +// Decode reads the next YAML-encoded value from its input +// and stores it in the value pointed to by v. +// +// See the documentation for Unmarshal for details about the +// conversion of YAML into a Go value. +func (dec *Decoder) Decode(v interface{}) (err error) { + d := newDecoder() + d.knownFields = dec.knownFields + defer handleErr(&err) + node := dec.parser.parse() + if node == nil { + return io.EOF + } + out := reflect.ValueOf(v) + if out.Kind() == reflect.Ptr && !out.IsNil() { + out = out.Elem() + } + d.unmarshal(node, out) + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Decode decodes the node and stores its data into the value pointed to by v. +// +// See the documentation for Unmarshal for details about the +// conversion of YAML into a Go value. +func (n *Node) Decode(v interface{}) (err error) { + d := newDecoder() + defer handleErr(&err) + out := reflect.ValueOf(v) + if out.Kind() == reflect.Ptr && !out.IsNil() { + out = out.Elem() + } + d.unmarshal(n, out) + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +func unmarshal(in []byte, out interface{}, strict bool) (err error) { + defer handleErr(&err) + d := newDecoder() + p := newParser(in) + defer p.destroy() + node := p.parse() + if node != nil { + v := reflect.ValueOf(out) + if v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + d.unmarshal(node, v) + } + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Marshal serializes the value provided into a YAML document. The structure +// of the generated document will reflect the structure of the value itself. +// Maps and pointers (to struct, string, int, etc) are accepted as the in value. +// +// Struct fields are only marshalled if they are exported (have an upper case +// first letter), and are marshalled using the field name lowercased as the +// default key. Custom keys may be defined via the "yaml" name in the field +// tag: the content preceding the first comma is used as the key, and the +// following comma-separated options are used to tweak the marshalling process. +// Conflicting names result in a runtime error. +// +// The field tag format accepted is: +// +// `(...) yaml:"[][,[,]]" (...)` +// +// The following flags are currently supported: +// +// omitempty Only include the field if it's not set to the zero +// value for the type or to empty slices or maps. +// Zero valued structs will be omitted if all their public +// fields are zero, unless they implement an IsZero +// method (see the IsZeroer interface type), in which +// case the field will be excluded if IsZero returns true. +// +// flow Marshal using a flow style (useful for structs, +// sequences and maps). +// +// inline Inline the field, which must be a struct or a map, +// causing all of its fields or keys to be processed as if +// they were part of the outer struct. For maps, keys must +// not conflict with the yaml keys of other struct fields. +// +// In addition, if the key is "-", the field is ignored. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" +// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" +// +func Marshal(in interface{}) (out []byte, err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshalDoc("", reflect.ValueOf(in)) + e.finish() + out = e.out + return +} + +// An Encoder writes YAML values to an output stream. +type Encoder struct { + encoder *encoder +} + +// NewEncoder returns a new encoder that writes to w. +// The Encoder should be closed after use to flush all data +// to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + encoder: newEncoderWithWriter(w), + } +} + +// Encode writes the YAML encoding of v to the stream. +// If multiple items are encoded to the stream, the +// second and subsequent document will be preceded +// with a "---" document separator, but the first will not. +// +// See the documentation for Marshal for details about the conversion of Go +// values to YAML. +func (e *Encoder) Encode(v interface{}) (err error) { + defer handleErr(&err) + e.encoder.marshalDoc("", reflect.ValueOf(v)) + return nil +} + +// Encode encodes value v and stores its representation in n. +// +// See the documentation for Marshal for details about the +// conversion of Go values into YAML. +func (n *Node) Encode(v interface{}) (err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshalDoc("", reflect.ValueOf(v)) + e.finish() + p := newParser(e.out) + p.textless = true + defer p.destroy() + doc := p.parse() + *n = *doc.Content[0] + return nil +} + +// SetIndent changes the used indentation used when encoding. +func (e *Encoder) SetIndent(spaces int) { + if spaces < 0 { + panic("yaml: cannot indent to a negative number of spaces") + } + e.encoder.indent = spaces +} + +// Close closes the encoder by writing any remaining data. +// It does not write a stream terminating string "...". +func (e *Encoder) Close() (err error) { + defer handleErr(&err) + e.encoder.finish() + return nil +} + +func handleErr(err *error) { + if v := recover(); v != nil { + if e, ok := v.(yamlError); ok { + *err = e.err + } else { + panic(v) + } + } +} + +type yamlError struct { + err error +} + +func fail(err error) { + panic(yamlError{err}) +} + +func failf(format string, args ...interface{}) { + panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) +} + +// A TypeError is returned by Unmarshal when one or more fields in +// the YAML document cannot be properly decoded into the requested +// types. When this error is returned, the value is still +// unmarshaled partially. +type TypeError struct { + Errors []string +} + +func (e *TypeError) Error() string { + return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) +} + +type Kind uint32 + +const ( + DocumentNode Kind = 1 << iota + SequenceNode + MappingNode + ScalarNode + AliasNode +) + +type Style uint32 + +const ( + TaggedStyle Style = 1 << iota + DoubleQuotedStyle + SingleQuotedStyle + LiteralStyle + FoldedStyle + FlowStyle +) + +// Node represents an element in the YAML document hierarchy. While documents +// are typically encoded and decoded into higher level types, such as structs +// and maps, Node is an intermediate representation that allows detailed +// control over the content being decoded or encoded. +// +// It's worth noting that although Node offers access into details such as +// line numbers, colums, and comments, the content when re-encoded will not +// have its original textual representation preserved. An effort is made to +// render the data plesantly, and to preserve comments near the data they +// describe, though. +// +// Values that make use of the Node type interact with the yaml package in the +// same way any other type would do, by encoding and decoding yaml data +// directly or indirectly into them. +// +// For example: +// +// var person struct { +// Name string +// Address yaml.Node +// } +// err := yaml.Unmarshal(data, &person) +// +// Or by itself: +// +// var person Node +// err := yaml.Unmarshal(data, &person) +// +type Node struct { + // Kind defines whether the node is a document, a mapping, a sequence, + // a scalar value, or an alias to another node. The specific data type of + // scalar nodes may be obtained via the ShortTag and LongTag methods. + Kind Kind + + // Style allows customizing the apperance of the node in the tree. + Style Style + + // Tag holds the YAML tag defining the data type for the value. + // When decoding, this field will always be set to the resolved tag, + // even when it wasn't explicitly provided in the YAML content. + // When encoding, if this field is unset the value type will be + // implied from the node properties, and if it is set, it will only + // be serialized into the representation if TaggedStyle is used or + // the implicit tag diverges from the provided one. + Tag string + + // Value holds the unescaped and unquoted represenation of the value. + Value string + + // Anchor holds the anchor name for this node, which allows aliases to point to it. + Anchor string + + // Alias holds the node that this alias points to. Only valid when Kind is AliasNode. + Alias *Node + + // Content holds contained nodes for documents, mappings, and sequences. + Content []*Node + + // HeadComment holds any comments in the lines preceding the node and + // not separated by an empty line. + HeadComment string + + // LineComment holds any comments at the end of the line where the node is in. + LineComment string + + // FootComment holds any comments following the node and before empty lines. + FootComment string + + // Line and Column hold the node position in the decoded YAML text. + // These fields are not respected when encoding the node. + Line int + Column int +} + +// IsZero returns whether the node has all of its fields unset. +func (n *Node) IsZero() bool { + return n.Kind == 0 && n.Style == 0 && n.Tag == "" && n.Value == "" && n.Anchor == "" && n.Alias == nil && n.Content == nil && + n.HeadComment == "" && n.LineComment == "" && n.FootComment == "" && n.Line == 0 && n.Column == 0 +} + + +// LongTag returns the long form of the tag that indicates the data type for +// the node. If the Tag field isn't explicitly defined, one will be computed +// based on the node properties. +func (n *Node) LongTag() string { + return longTag(n.ShortTag()) +} + +// ShortTag returns the short form of the YAML tag that indicates data type for +// the node. If the Tag field isn't explicitly defined, one will be computed +// based on the node properties. +func (n *Node) ShortTag() string { + if n.indicatedString() { + return strTag + } + if n.Tag == "" || n.Tag == "!" { + switch n.Kind { + case MappingNode: + return mapTag + case SequenceNode: + return seqTag + case AliasNode: + if n.Alias != nil { + return n.Alias.ShortTag() + } + case ScalarNode: + tag, _ := resolve("", n.Value) + return tag + case 0: + // Special case to make the zero value convenient. + if n.IsZero() { + return nullTag + } + } + return "" + } + return shortTag(n.Tag) +} + +func (n *Node) indicatedString() bool { + return n.Kind == ScalarNode && + (shortTag(n.Tag) == strTag || + (n.Tag == "" || n.Tag == "!") && n.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0) +} + +// SetString is a convenience function that sets the node to a string value +// and defines its style in a pleasant way depending on its content. +func (n *Node) SetString(s string) { + n.Kind = ScalarNode + if utf8.ValidString(s) { + n.Value = s + n.Tag = strTag + } else { + n.Value = encodeBase64(s) + n.Tag = binaryTag + } + if strings.Contains(n.Value, "\n") { + n.Style = LiteralStyle + } +} + +// -------------------------------------------------------------------------- +// Maintain a mapping of keys to structure field indexes + +// The code in this section was copied from mgo/bson. + +// structInfo holds details for the serialization of fields of +// a given struct. +type structInfo struct { + FieldsMap map[string]fieldInfo + FieldsList []fieldInfo + + // InlineMap is the number of the field in the struct that + // contains an ,inline map, or -1 if there's none. + InlineMap int + + // InlineUnmarshalers holds indexes to inlined fields that + // contain unmarshaler values. + InlineUnmarshalers [][]int +} + +type fieldInfo struct { + Key string + Num int + OmitEmpty bool + Flow bool + // Id holds the unique field identifier, so we can cheaply + // check for field duplicates without maintaining an extra map. + Id int + + // Inline holds the field index if the field is part of an inlined struct. + Inline []int +} + +var structMap = make(map[reflect.Type]*structInfo) +var fieldMapMutex sync.RWMutex +var unmarshalerType reflect.Type + +func init() { + var v Unmarshaler + unmarshalerType = reflect.ValueOf(&v).Elem().Type() +} + +func getStructInfo(st reflect.Type) (*structInfo, error) { + fieldMapMutex.RLock() + sinfo, found := structMap[st] + fieldMapMutex.RUnlock() + if found { + return sinfo, nil + } + + n := st.NumField() + fieldsMap := make(map[string]fieldInfo) + fieldsList := make([]fieldInfo, 0, n) + inlineMap := -1 + inlineUnmarshalers := [][]int(nil) + for i := 0; i != n; i++ { + field := st.Field(i) + if field.PkgPath != "" && !field.Anonymous { + continue // Private field + } + + info := fieldInfo{Num: i} + + tag := field.Tag.Get("yaml") + if tag == "" && strings.Index(string(field.Tag), ":") < 0 { + tag = string(field.Tag) + } + if tag == "-" { + continue + } + + inline := false + fields := strings.Split(tag, ",") + if len(fields) > 1 { + for _, flag := range fields[1:] { + switch flag { + case "omitempty": + info.OmitEmpty = true + case "flow": + info.Flow = true + case "inline": + inline = true + default: + return nil, errors.New(fmt.Sprintf("unsupported flag %q in tag %q of type %s", flag, tag, st)) + } + } + tag = fields[0] + } + + if inline { + switch field.Type.Kind() { + case reflect.Map: + if inlineMap >= 0 { + return nil, errors.New("multiple ,inline maps in struct " + st.String()) + } + if field.Type.Key() != reflect.TypeOf("") { + return nil, errors.New("option ,inline needs a map with string keys in struct " + st.String()) + } + inlineMap = info.Num + case reflect.Struct, reflect.Ptr: + ftype := field.Type + for ftype.Kind() == reflect.Ptr { + ftype = ftype.Elem() + } + if ftype.Kind() != reflect.Struct { + return nil, errors.New("option ,inline may only be used on a struct or map field") + } + if reflect.PtrTo(ftype).Implements(unmarshalerType) { + inlineUnmarshalers = append(inlineUnmarshalers, []int{i}) + } else { + sinfo, err := getStructInfo(ftype) + if err != nil { + return nil, err + } + for _, index := range sinfo.InlineUnmarshalers { + inlineUnmarshalers = append(inlineUnmarshalers, append([]int{i}, index...)) + } + for _, finfo := range sinfo.FieldsList { + if _, found := fieldsMap[finfo.Key]; found { + msg := "duplicated key '" + finfo.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + if finfo.Inline == nil { + finfo.Inline = []int{i, finfo.Num} + } else { + finfo.Inline = append([]int{i}, finfo.Inline...) + } + finfo.Id = len(fieldsList) + fieldsMap[finfo.Key] = finfo + fieldsList = append(fieldsList, finfo) + } + } + default: + return nil, errors.New("option ,inline may only be used on a struct or map field") + } + continue + } + + if tag != "" { + info.Key = tag + } else { + info.Key = strings.ToLower(field.Name) + } + + if _, found = fieldsMap[info.Key]; found { + msg := "duplicated key '" + info.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + + info.Id = len(fieldsList) + fieldsList = append(fieldsList, info) + fieldsMap[info.Key] = info + } + + sinfo = &structInfo{ + FieldsMap: fieldsMap, + FieldsList: fieldsList, + InlineMap: inlineMap, + InlineUnmarshalers: inlineUnmarshalers, + } + + fieldMapMutex.Lock() + structMap[st] = sinfo + fieldMapMutex.Unlock() + return sinfo, nil +} + +// IsZeroer is used to check whether an object is zero to +// determine whether it should be omitted when marshaling +// with the omitempty flag. One notable implementation +// is time.Time. +type IsZeroer interface { + IsZero() bool +} + +func isZero(v reflect.Value) bool { + kind := v.Kind() + if z, ok := v.Interface().(IsZeroer); ok { + if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() { + return true + } + return z.IsZero() + } + switch kind { + case reflect.String: + return len(v.String()) == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflect.Slice: + return v.Len() == 0 + case reflect.Map: + return v.Len() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Struct: + vt := v.Type() + for i := v.NumField() - 1; i >= 0; i-- { + if vt.Field(i).PkgPath != "" { + continue // Private field + } + if !isZero(v.Field(i)) { + return false + } + } + return true + } + return false +} diff --git a/vendor/gopkg.in/yaml.v3/yamlh.go b/vendor/gopkg.in/yaml.v3/yamlh.go new file mode 100644 index 0000000..7c6d007 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/yamlh.go @@ -0,0 +1,807 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "fmt" + "io" +) + +// The version directive data. +type yaml_version_directive_t struct { + major int8 // The major version number. + minor int8 // The minor version number. +} + +// The tag directive data. +type yaml_tag_directive_t struct { + handle []byte // The tag handle. + prefix []byte // The tag prefix. +} + +type yaml_encoding_t int + +// The stream encoding. +const ( + // Let the parser choose the encoding. + yaml_ANY_ENCODING yaml_encoding_t = iota + + yaml_UTF8_ENCODING // The default UTF-8 encoding. + yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. + yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. +) + +type yaml_break_t int + +// Line break types. +const ( + // Let the parser choose the break type. + yaml_ANY_BREAK yaml_break_t = iota + + yaml_CR_BREAK // Use CR for line breaks (Mac style). + yaml_LN_BREAK // Use LN for line breaks (Unix style). + yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). +) + +type yaml_error_type_t int + +// Many bad things could happen with the parser and emitter. +const ( + // No error is produced. + yaml_NO_ERROR yaml_error_type_t = iota + + yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. + yaml_READER_ERROR // Cannot read or decode the input stream. + yaml_SCANNER_ERROR // Cannot scan the input stream. + yaml_PARSER_ERROR // Cannot parse the input stream. + yaml_COMPOSER_ERROR // Cannot compose a YAML document. + yaml_WRITER_ERROR // Cannot write to the output stream. + yaml_EMITTER_ERROR // Cannot emit a YAML stream. +) + +// The pointer position. +type yaml_mark_t struct { + index int // The position index. + line int // The position line. + column int // The position column. +} + +// Node Styles + +type yaml_style_t int8 + +type yaml_scalar_style_t yaml_style_t + +// Scalar styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = 0 + + yaml_PLAIN_SCALAR_STYLE yaml_scalar_style_t = 1 << iota // The plain scalar style. + yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. + yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. + yaml_LITERAL_SCALAR_STYLE // The literal scalar style. + yaml_FOLDED_SCALAR_STYLE // The folded scalar style. +) + +type yaml_sequence_style_t yaml_style_t + +// Sequence styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota + + yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. + yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. +) + +type yaml_mapping_style_t yaml_style_t + +// Mapping styles. +const ( + // Let the emitter choose the style. + yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota + + yaml_BLOCK_MAPPING_STYLE // The block mapping style. + yaml_FLOW_MAPPING_STYLE // The flow mapping style. +) + +// Tokens + +type yaml_token_type_t int + +// Token types. +const ( + // An empty token. + yaml_NO_TOKEN yaml_token_type_t = iota + + yaml_STREAM_START_TOKEN // A STREAM-START token. + yaml_STREAM_END_TOKEN // A STREAM-END token. + + yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. + yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. + yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. + yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. + + yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. + yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. + yaml_BLOCK_END_TOKEN // A BLOCK-END token. + + yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. + yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. + yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. + yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. + + yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. + yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. + yaml_KEY_TOKEN // A KEY token. + yaml_VALUE_TOKEN // A VALUE token. + + yaml_ALIAS_TOKEN // An ALIAS token. + yaml_ANCHOR_TOKEN // An ANCHOR token. + yaml_TAG_TOKEN // A TAG token. + yaml_SCALAR_TOKEN // A SCALAR token. +) + +func (tt yaml_token_type_t) String() string { + switch tt { + case yaml_NO_TOKEN: + return "yaml_NO_TOKEN" + case yaml_STREAM_START_TOKEN: + return "yaml_STREAM_START_TOKEN" + case yaml_STREAM_END_TOKEN: + return "yaml_STREAM_END_TOKEN" + case yaml_VERSION_DIRECTIVE_TOKEN: + return "yaml_VERSION_DIRECTIVE_TOKEN" + case yaml_TAG_DIRECTIVE_TOKEN: + return "yaml_TAG_DIRECTIVE_TOKEN" + case yaml_DOCUMENT_START_TOKEN: + return "yaml_DOCUMENT_START_TOKEN" + case yaml_DOCUMENT_END_TOKEN: + return "yaml_DOCUMENT_END_TOKEN" + case yaml_BLOCK_SEQUENCE_START_TOKEN: + return "yaml_BLOCK_SEQUENCE_START_TOKEN" + case yaml_BLOCK_MAPPING_START_TOKEN: + return "yaml_BLOCK_MAPPING_START_TOKEN" + case yaml_BLOCK_END_TOKEN: + return "yaml_BLOCK_END_TOKEN" + case yaml_FLOW_SEQUENCE_START_TOKEN: + return "yaml_FLOW_SEQUENCE_START_TOKEN" + case yaml_FLOW_SEQUENCE_END_TOKEN: + return "yaml_FLOW_SEQUENCE_END_TOKEN" + case yaml_FLOW_MAPPING_START_TOKEN: + return "yaml_FLOW_MAPPING_START_TOKEN" + case yaml_FLOW_MAPPING_END_TOKEN: + return "yaml_FLOW_MAPPING_END_TOKEN" + case yaml_BLOCK_ENTRY_TOKEN: + return "yaml_BLOCK_ENTRY_TOKEN" + case yaml_FLOW_ENTRY_TOKEN: + return "yaml_FLOW_ENTRY_TOKEN" + case yaml_KEY_TOKEN: + return "yaml_KEY_TOKEN" + case yaml_VALUE_TOKEN: + return "yaml_VALUE_TOKEN" + case yaml_ALIAS_TOKEN: + return "yaml_ALIAS_TOKEN" + case yaml_ANCHOR_TOKEN: + return "yaml_ANCHOR_TOKEN" + case yaml_TAG_TOKEN: + return "yaml_TAG_TOKEN" + case yaml_SCALAR_TOKEN: + return "yaml_SCALAR_TOKEN" + } + return "" +} + +// The token structure. +type yaml_token_t struct { + // The token type. + typ yaml_token_type_t + + // The start/end of the token. + start_mark, end_mark yaml_mark_t + + // The stream encoding (for yaml_STREAM_START_TOKEN). + encoding yaml_encoding_t + + // The alias/anchor/scalar value or tag/tag directive handle + // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). + value []byte + + // The tag suffix (for yaml_TAG_TOKEN). + suffix []byte + + // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). + prefix []byte + + // The scalar style (for yaml_SCALAR_TOKEN). + style yaml_scalar_style_t + + // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). + major, minor int8 +} + +// Events + +type yaml_event_type_t int8 + +// Event types. +const ( + // An empty event. + yaml_NO_EVENT yaml_event_type_t = iota + + yaml_STREAM_START_EVENT // A STREAM-START event. + yaml_STREAM_END_EVENT // A STREAM-END event. + yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. + yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. + yaml_ALIAS_EVENT // An ALIAS event. + yaml_SCALAR_EVENT // A SCALAR event. + yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. + yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. + yaml_MAPPING_START_EVENT // A MAPPING-START event. + yaml_MAPPING_END_EVENT // A MAPPING-END event. + yaml_TAIL_COMMENT_EVENT +) + +var eventStrings = []string{ + yaml_NO_EVENT: "none", + yaml_STREAM_START_EVENT: "stream start", + yaml_STREAM_END_EVENT: "stream end", + yaml_DOCUMENT_START_EVENT: "document start", + yaml_DOCUMENT_END_EVENT: "document end", + yaml_ALIAS_EVENT: "alias", + yaml_SCALAR_EVENT: "scalar", + yaml_SEQUENCE_START_EVENT: "sequence start", + yaml_SEQUENCE_END_EVENT: "sequence end", + yaml_MAPPING_START_EVENT: "mapping start", + yaml_MAPPING_END_EVENT: "mapping end", + yaml_TAIL_COMMENT_EVENT: "tail comment", +} + +func (e yaml_event_type_t) String() string { + if e < 0 || int(e) >= len(eventStrings) { + return fmt.Sprintf("unknown event %d", e) + } + return eventStrings[e] +} + +// The event structure. +type yaml_event_t struct { + + // The event type. + typ yaml_event_type_t + + // The start and end of the event. + start_mark, end_mark yaml_mark_t + + // The document encoding (for yaml_STREAM_START_EVENT). + encoding yaml_encoding_t + + // The version directive (for yaml_DOCUMENT_START_EVENT). + version_directive *yaml_version_directive_t + + // The list of tag directives (for yaml_DOCUMENT_START_EVENT). + tag_directives []yaml_tag_directive_t + + // The comments + head_comment []byte + line_comment []byte + foot_comment []byte + tail_comment []byte + + // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). + anchor []byte + + // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + tag []byte + + // The scalar value (for yaml_SCALAR_EVENT). + value []byte + + // Is the document start/end indicator implicit, or the tag optional? + // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). + implicit bool + + // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). + quoted_implicit bool + + // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + style yaml_style_t +} + +func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } +func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } +func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } + +// Nodes + +const ( + yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. + yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. + yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. + yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. + yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. + yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. + + yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. + yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. + + // Not in original libyaml. + yaml_BINARY_TAG = "tag:yaml.org,2002:binary" + yaml_MERGE_TAG = "tag:yaml.org,2002:merge" + + yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. + yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. + yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. +) + +type yaml_node_type_t int + +// Node types. +const ( + // An empty node. + yaml_NO_NODE yaml_node_type_t = iota + + yaml_SCALAR_NODE // A scalar node. + yaml_SEQUENCE_NODE // A sequence node. + yaml_MAPPING_NODE // A mapping node. +) + +// An element of a sequence node. +type yaml_node_item_t int + +// An element of a mapping node. +type yaml_node_pair_t struct { + key int // The key of the element. + value int // The value of the element. +} + +// The node structure. +type yaml_node_t struct { + typ yaml_node_type_t // The node type. + tag []byte // The node tag. + + // The node data. + + // The scalar parameters (for yaml_SCALAR_NODE). + scalar struct { + value []byte // The scalar value. + length int // The length of the scalar value. + style yaml_scalar_style_t // The scalar style. + } + + // The sequence parameters (for YAML_SEQUENCE_NODE). + sequence struct { + items_data []yaml_node_item_t // The stack of sequence items. + style yaml_sequence_style_t // The sequence style. + } + + // The mapping parameters (for yaml_MAPPING_NODE). + mapping struct { + pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). + pairs_start *yaml_node_pair_t // The beginning of the stack. + pairs_end *yaml_node_pair_t // The end of the stack. + pairs_top *yaml_node_pair_t // The top of the stack. + style yaml_mapping_style_t // The mapping style. + } + + start_mark yaml_mark_t // The beginning of the node. + end_mark yaml_mark_t // The end of the node. + +} + +// The document structure. +type yaml_document_t struct { + + // The document nodes. + nodes []yaml_node_t + + // The version directive. + version_directive *yaml_version_directive_t + + // The list of tag directives. + tag_directives_data []yaml_tag_directive_t + tag_directives_start int // The beginning of the tag directives list. + tag_directives_end int // The end of the tag directives list. + + start_implicit int // Is the document start indicator implicit? + end_implicit int // Is the document end indicator implicit? + + // The start/end of the document. + start_mark, end_mark yaml_mark_t +} + +// The prototype of a read handler. +// +// The read handler is called when the parser needs to read more bytes from the +// source. The handler should write not more than size bytes to the buffer. +// The number of written bytes should be set to the size_read variable. +// +// [in,out] data A pointer to an application data specified by +// yaml_parser_set_input(). +// [out] buffer The buffer to write the data from the source. +// [in] size The size of the buffer. +// [out] size_read The actual number of bytes read from the source. +// +// On success, the handler should return 1. If the handler failed, +// the returned value should be 0. On EOF, the handler should set the +// size_read to 0 and return 1. +type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) + +// This structure holds information about a potential simple key. +type yaml_simple_key_t struct { + possible bool // Is a simple key possible? + required bool // Is a simple key required? + token_number int // The number of the token. + mark yaml_mark_t // The position mark. +} + +// The states of the parser. +type yaml_parser_state_t int + +const ( + yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota + + yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. + yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. + yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. + yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. + yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. + yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. + yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. + yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. + yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. + yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. + yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. + yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. + yaml_PARSE_END_STATE // Expect nothing. +) + +func (ps yaml_parser_state_t) String() string { + switch ps { + case yaml_PARSE_STREAM_START_STATE: + return "yaml_PARSE_STREAM_START_STATE" + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_START_STATE: + return "yaml_PARSE_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return "yaml_PARSE_DOCUMENT_CONTENT_STATE" + case yaml_PARSE_DOCUMENT_END_STATE: + return "yaml_PARSE_DOCUMENT_END_STATE" + case yaml_PARSE_BLOCK_NODE_STATE: + return "yaml_PARSE_BLOCK_NODE_STATE" + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" + case yaml_PARSE_FLOW_NODE_STATE: + return "yaml_PARSE_FLOW_NODE_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" + case yaml_PARSE_END_STATE: + return "yaml_PARSE_END_STATE" + } + return "" +} + +// This structure holds aliases data. +type yaml_alias_data_t struct { + anchor []byte // The anchor. + index int // The node id. + mark yaml_mark_t // The anchor mark. +} + +// The parser structure. +// +// All members are internal. Manage the structure using the +// yaml_parser_ family of functions. +type yaml_parser_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + + problem string // Error description. + + // The byte about which the problem occurred. + problem_offset int + problem_value int + problem_mark yaml_mark_t + + // The error context. + context string + context_mark yaml_mark_t + + // Reader stuff + + read_handler yaml_read_handler_t // Read handler. + + input_reader io.Reader // File input data. + input []byte // String input data. + input_pos int + + eof bool // EOF flag + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + unread int // The number of unread characters in the buffer. + + newlines int // The number of line breaks since last non-break/non-blank character + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The input encoding. + + offset int // The offset of the current position (in bytes). + mark yaml_mark_t // The mark of the current position. + + // Comments + + head_comment []byte // The current head comments + line_comment []byte // The current line comments + foot_comment []byte // The current foot comments + tail_comment []byte // Foot comment that happens at the end of a block. + stem_comment []byte // Comment in item preceding a nested structure (list inside list item, etc) + + comments []yaml_comment_t // The folded comments for all parsed tokens + comments_head int + + // Scanner stuff + + stream_start_produced bool // Have we started to scan the input stream? + stream_end_produced bool // Have we reached the end of the input stream? + + flow_level int // The number of unclosed '[' and '{' indicators. + + tokens []yaml_token_t // The tokens queue. + tokens_head int // The head of the tokens queue. + tokens_parsed int // The number of tokens fetched from the queue. + token_available bool // Does the tokens queue contain a token ready for dequeueing. + + indent int // The current indentation level. + indents []int // The indentation levels stack. + + simple_key_allowed bool // May a simple key occur at the current position? + simple_keys []yaml_simple_key_t // The stack of simple keys. + simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number + + // Parser stuff + + state yaml_parser_state_t // The current parser state. + states []yaml_parser_state_t // The parser states stack. + marks []yaml_mark_t // The stack of marks. + tag_directives []yaml_tag_directive_t // The list of TAG directives. + + // Dumper stuff + + aliases []yaml_alias_data_t // The alias data. + + document *yaml_document_t // The currently parsed document. +} + +type yaml_comment_t struct { + + scan_mark yaml_mark_t // Position where scanning for comments started + token_mark yaml_mark_t // Position after which tokens will be associated with this comment + start_mark yaml_mark_t // Position of '#' comment mark + end_mark yaml_mark_t // Position where comment terminated + + head []byte + line []byte + foot []byte +} + +// Emitter Definitions + +// The prototype of a write handler. +// +// The write handler is called when the emitter needs to flush the accumulated +// characters to the output. The handler should write @a size bytes of the +// @a buffer to the output. +// +// @param[in,out] data A pointer to an application data specified by +// yaml_emitter_set_output(). +// @param[in] buffer The buffer with bytes to be written. +// @param[in] size The size of the buffer. +// +// @returns On success, the handler should return @c 1. If the handler failed, +// the returned value should be @c 0. +// +type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error + +type yaml_emitter_state_t int + +// The emitter states. +const ( + // Expect STREAM-START. + yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota + + yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. + yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE // Expect the next item of a flow sequence, with the comma already written out + yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. + yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE // Expect the next key of a flow mapping, with the comma already written out + yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. + yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. + yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. + yaml_EMIT_END_STATE // Expect nothing. +) + +// The emitter structure. +// +// All members are internal. Manage the structure using the @c yaml_emitter_ +// family of functions. +type yaml_emitter_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + problem string // Error description. + + // Writer stuff + + write_handler yaml_write_handler_t // Write handler. + + output_buffer *[]byte // String output data. + output_writer io.Writer // File output data. + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The stream encoding. + + // Emitter stuff + + canonical bool // If the output is in the canonical style? + best_indent int // The number of indentation spaces. + best_width int // The preferred width of the output lines. + unicode bool // Allow unescaped non-ASCII characters? + line_break yaml_break_t // The preferred line break. + + state yaml_emitter_state_t // The current emitter state. + states []yaml_emitter_state_t // The stack of states. + + events []yaml_event_t // The event queue. + events_head int // The head of the event queue. + + indents []int // The stack of indentation levels. + + tag_directives []yaml_tag_directive_t // The list of tag directives. + + indent int // The current indentation level. + + flow_level int // The current flow level. + + root_context bool // Is it the document root context? + sequence_context bool // Is it a sequence context? + mapping_context bool // Is it a mapping context? + simple_key_context bool // Is it a simple mapping key context? + + line int // The current line. + column int // The current column. + whitespace bool // If the last character was a whitespace? + indention bool // If the last character was an indentation character (' ', '-', '?', ':')? + open_ended bool // If an explicit document end is required? + + space_above bool // Is there's an empty line above? + foot_indent int // The indent used to write the foot comment above, or -1 if none. + + // Anchor analysis. + anchor_data struct { + anchor []byte // The anchor value. + alias bool // Is it an alias? + } + + // Tag analysis. + tag_data struct { + handle []byte // The tag handle. + suffix []byte // The tag suffix. + } + + // Scalar analysis. + scalar_data struct { + value []byte // The scalar value. + multiline bool // Does the scalar contain line breaks? + flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? + block_plain_allowed bool // Can the scalar be expressed in the block plain style? + single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? + block_allowed bool // Can the scalar be expressed in the literal or folded styles? + style yaml_scalar_style_t // The output style. + } + + // Comments + head_comment []byte + line_comment []byte + foot_comment []byte + tail_comment []byte + + key_line_comment []byte + + // Dumper stuff + + opened bool // If the stream was already opened? + closed bool // If the stream was already closed? + + // The information associated with the document nodes. + anchors *struct { + references int // The number of references. + anchor int // The anchor id. + serialized bool // If the node has been emitted? + } + + last_anchor_id int // The last assigned anchor id. + + document *yaml_document_t // The currently emitted document. +} diff --git a/vendor/gopkg.in/yaml.v3/yamlprivateh.go b/vendor/gopkg.in/yaml.v3/yamlprivateh.go new file mode 100644 index 0000000..e88f9c5 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/yamlprivateh.go @@ -0,0 +1,198 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +const ( + // The size of the input raw buffer. + input_raw_buffer_size = 512 + + // The size of the input buffer. + // It should be possible to decode the whole raw buffer. + input_buffer_size = input_raw_buffer_size * 3 + + // The size of the output buffer. + output_buffer_size = 128 + + // The size of the output raw buffer. + // It should be possible to encode the whole output buffer. + output_raw_buffer_size = (output_buffer_size*2 + 2) + + // The size of other stacks and queues. + initial_stack_size = 16 + initial_queue_size = 16 + initial_string_size = 16 +) + +// Check if the character at the specified position is an alphabetical +// character, a digit, '_', or '-'. +func is_alpha(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' +} + +// Check if the character at the specified position is a digit. +func is_digit(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' +} + +// Get the value of a digit. +func as_digit(b []byte, i int) int { + return int(b[i]) - '0' +} + +// Check if the character at the specified position is a hex-digit. +func is_hex(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' +} + +// Get the value of a hex-digit. +func as_hex(b []byte, i int) int { + bi := b[i] + if bi >= 'A' && bi <= 'F' { + return int(bi) - 'A' + 10 + } + if bi >= 'a' && bi <= 'f' { + return int(bi) - 'a' + 10 + } + return int(bi) - '0' +} + +// Check if the character is ASCII. +func is_ascii(b []byte, i int) bool { + return b[i] <= 0x7F +} + +// Check if the character at the start of the buffer can be printed unescaped. +func is_printable(b []byte, i int) bool { + return ((b[i] == 0x0A) || // . == #x0A + (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E + (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF + (b[i] > 0xC2 && b[i] < 0xED) || + (b[i] == 0xED && b[i+1] < 0xA0) || + (b[i] == 0xEE) || + (b[i] == 0xEF && // #xE000 <= . <= #xFFFD + !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF + !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) +} + +// Check if the character at the specified position is NUL. +func is_z(b []byte, i int) bool { + return b[i] == 0x00 +} + +// Check if the beginning of the buffer is a BOM. +func is_bom(b []byte, i int) bool { + return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF +} + +// Check if the character at the specified position is space. +func is_space(b []byte, i int) bool { + return b[i] == ' ' +} + +// Check if the character at the specified position is tab. +func is_tab(b []byte, i int) bool { + return b[i] == '\t' +} + +// Check if the character at the specified position is blank (space or tab). +func is_blank(b []byte, i int) bool { + //return is_space(b, i) || is_tab(b, i) + return b[i] == ' ' || b[i] == '\t' +} + +// Check if the character at the specified position is a line break. +func is_break(b []byte, i int) bool { + return (b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) +} + +func is_crlf(b []byte, i int) bool { + return b[i] == '\r' && b[i+1] == '\n' +} + +// Check if the character is a line break or NUL. +func is_breakz(b []byte, i int) bool { + //return is_break(b, i) || is_z(b, i) + return ( + // is_break: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + // is_z: + b[i] == 0) +} + +// Check if the character is a line break, space, or NUL. +func is_spacez(b []byte, i int) bool { + //return is_space(b, i) || is_breakz(b, i) + return ( + // is_space: + b[i] == ' ' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Check if the character is a line break, space, tab, or NUL. +func is_blankz(b []byte, i int) bool { + //return is_blank(b, i) || is_breakz(b, i) + return ( + // is_blank: + b[i] == ' ' || b[i] == '\t' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Determine the width of the character. +func width(b byte) int { + // Don't replace these by a switch without first + // confirming that it is being inlined. + if b&0x80 == 0x00 { + return 1 + } + if b&0xE0 == 0xC0 { + return 2 + } + if b&0xF0 == 0xE0 { + return 3 + } + if b&0xF8 == 0xF0 { + return 4 + } + return 0 + +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 82975fd..35681bd 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,20 +1,30 @@ +# filippo.io/edwards25519 v1.1.0 +## explicit; go 1.20 +filippo.io/edwards25519 +filippo.io/edwards25519/field # github.com/BurntSushi/toml v0.3.1 ## explicit -# github.com/ClickHouse/clickhouse-go/v2 v2.2.0 -## explicit; go 1.16 +# github.com/ClickHouse/ch-go v0.61.5 +## explicit; go 1.21 +github.com/ClickHouse/ch-go/compress +github.com/ClickHouse/ch-go/proto +# github.com/ClickHouse/clickhouse-go/v2 v2.27.1 +## explicit; go 1.21 github.com/ClickHouse/clickhouse-go/v2 github.com/ClickHouse/clickhouse-go/v2/contributors github.com/ClickHouse/clickhouse-go/v2/ext github.com/ClickHouse/clickhouse-go/v2/lib/binary -github.com/ClickHouse/clickhouse-go/v2/lib/cityhash102 github.com/ClickHouse/clickhouse-go/v2/lib/column -github.com/ClickHouse/clickhouse-go/v2/lib/compress github.com/ClickHouse/clickhouse-go/v2/lib/driver -github.com/ClickHouse/clickhouse-go/v2/lib/io github.com/ClickHouse/clickhouse-go/v2/lib/proto github.com/ClickHouse/clickhouse-go/v2/lib/timezone -# github.com/elastic/go-sysinfo v1.9.0 -## explicit; go 1.17 +github.com/ClickHouse/clickhouse-go/v2/resources +# github.com/andybalholm/brotli v1.1.0 +## explicit; go 1.13 +github.com/andybalholm/brotli +github.com/andybalholm/brotli/matchfinder +# github.com/elastic/go-sysinfo v1.11.2 +## explicit; go 1.18 github.com/elastic/go-sysinfo github.com/elastic/go-sysinfo/internal/registry github.com/elastic/go-sysinfo/providers/aix @@ -29,6 +39,12 @@ github.com/elastic/go-windows # github.com/fsnotify/fsnotify v1.4.9 ## explicit; go 1.13 github.com/fsnotify/fsnotify +# github.com/go-faster/city v1.0.1 +## explicit; go 1.17 +github.com/go-faster/city +# github.com/go-faster/errors v0.7.1 +## explicit; go 1.20 +github.com/go-faster/errors # github.com/go-pg/pg v8.0.3+incompatible ## explicit github.com/go-pg/pg @@ -40,8 +56,8 @@ github.com/go-pg/pg/internal/struct_filter github.com/go-pg/pg/internal/tag github.com/go-pg/pg/orm github.com/go-pg/pg/types -# github.com/go-sql-driver/mysql v1.7.0 -## explicit; go 1.13 +# github.com/go-sql-driver/mysql v1.8.1 +## explicit; go 1.18 github.com/go-sql-driver/mysql # github.com/golang-migrate/migrate/v4 v4.2.5 ## explicit @@ -51,7 +67,7 @@ github.com/golang-migrate/migrate/v4/database/mysql github.com/golang-migrate/migrate/v4/database/postgres github.com/golang-migrate/migrate/v4/source github.com/golang-migrate/migrate/v4/source/file -# github.com/google/uuid v1.3.0 +# github.com/google/uuid v1.6.0 ## explicit github.com/google/uuid # github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 @@ -90,30 +106,40 @@ github.com/joeshaw/multierror # github.com/jtolds/gls v4.20.0+incompatible ## explicit github.com/jtolds/gls +# github.com/klauspost/compress v1.17.7 +## explicit; go 1.20 +github.com/klauspost/compress +github.com/klauspost/compress/fse +github.com/klauspost/compress/huff0 +github.com/klauspost/compress/internal/cpuinfo +github.com/klauspost/compress/internal/snapref +github.com/klauspost/compress/zstd +github.com/klauspost/compress/zstd/internal/xxhash # github.com/lib/pq v1.10.6 ## explicit; go 1.13 github.com/lib/pq github.com/lib/pq/oid github.com/lib/pq/scram -# github.com/magiconair/properties v1.8.0 -## explicit +# github.com/magiconair/properties v1.8.7 +## explicit; go 1.19 github.com/magiconair/properties +# github.com/mfridman/interpolate v0.0.2 +## explicit; go 1.19 +github.com/mfridman/interpolate # github.com/mitchellh/mapstructure v1.5.0 ## explicit; go 1.14 github.com/mitchellh/mapstructure -# github.com/morikuni/aec v1.0.0 -## explicit # github.com/onsi/ginkgo v1.15.0 ## explicit; go 1.13 # github.com/onsi/gomega v1.10.5 ## explicit; go 1.14 -# github.com/paulmach/orb v0.7.1 +# github.com/paulmach/orb v0.11.1 ## explicit; go 1.15 github.com/paulmach/orb # github.com/pelletier/go-toml v1.2.0 ## explicit github.com/pelletier/go-toml -# github.com/pierrec/lz4/v4 v4.1.15 +# github.com/pierrec/lz4/v4 v4.1.21 ## explicit; go 1.14 github.com/pierrec/lz4/v4 github.com/pierrec/lz4/v4/internal/lz4block @@ -123,17 +149,33 @@ github.com/pierrec/lz4/v4/internal/xxh32 # github.com/pkg/errors v0.9.1 ## explicit github.com/pkg/errors -# github.com/pressly/goose/v3 v3.9.0 => github.com/chapsuk/goose/v3 v3.0.0-20230127172535-d8dae35e216b -## explicit; go 1.17 +# github.com/pressly/goose/v3 v3.21.1 => github.com/chapsuk/goose/v3 v3.0.0-20240820170337-b77972a35712 +## explicit; go 1.21 github.com/pressly/goose/v3 +github.com/pressly/goose/v3/database +github.com/pressly/goose/v3/internal/dialect +github.com/pressly/goose/v3/internal/dialect/dialectquery +github.com/pressly/goose/v3/internal/gooseutil github.com/pressly/goose/v3/internal/sqlparser -# github.com/prometheus/procfs v0.9.0 -## explicit; go 1.18 +github.com/pressly/goose/v3/lock +# github.com/prometheus/procfs v0.12.0 +## explicit; go 1.19 github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util -# github.com/shopspring/decimal v1.3.1 -## explicit; go 1.13 +# github.com/segmentio/asm v1.2.0 +## explicit; go 1.18 +github.com/segmentio/asm/bswap +github.com/segmentio/asm/cpu +github.com/segmentio/asm/cpu/arm +github.com/segmentio/asm/cpu/arm64 +github.com/segmentio/asm/cpu/cpuid +github.com/segmentio/asm/cpu/x86 +# github.com/sethvargo/go-retry v0.3.0 +## explicit; go 1.21 +github.com/sethvargo/go-retry +# github.com/shopspring/decimal v1.4.0 +## explicit; go 1.10 github.com/shopspring/decimal # github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d ## explicit @@ -164,7 +206,7 @@ github.com/spf13/pflag # github.com/spf13/viper v1.3.2 ## explicit github.com/spf13/viper -# github.com/vertica/vertica-sql-go v1.3.1 +# github.com/vertica/vertica-sql-go v1.3.3 ## explicit; go 1.13 github.com/vertica/vertica-sql-go github.com/vertica/vertica-sql-go/common @@ -172,44 +214,52 @@ github.com/vertica/vertica-sql-go/logger github.com/vertica/vertica-sql-go/msgs github.com/vertica/vertica-sql-go/parse github.com/vertica/vertica-sql-go/rowcache -# go.opentelemetry.io/otel v1.9.0 -## explicit; go 1.17 +# go.opentelemetry.io/otel v1.26.0 +## explicit; go 1.21 go.opentelemetry.io/otel/attribute go.opentelemetry.io/otel/codes go.opentelemetry.io/otel/internal -# go.opentelemetry.io/otel/trace v1.9.0 -## explicit; go 1.17 +go.opentelemetry.io/otel/internal/attribute +# go.opentelemetry.io/otel/trace v1.26.0 +## explicit; go 1.21 go.opentelemetry.io/otel/trace -# go.uber.org/atomic v1.3.2 -## explicit -go.uber.org/atomic -# go.uber.org/multierr v1.1.0 -## explicit +go.opentelemetry.io/otel/trace/embedded +# go.uber.org/multierr v1.11.0 +## explicit; go 1.19 go.uber.org/multierr -# go.uber.org/zap v1.9.1 -## explicit +# go.uber.org/zap v1.27.0 +## explicit; go 1.19 go.uber.org/zap go.uber.org/zap/buffer +go.uber.org/zap/internal go.uber.org/zap/internal/bufferpool go.uber.org/zap/internal/color go.uber.org/zap/internal/exit +go.uber.org/zap/internal/pool +go.uber.org/zap/internal/stacktrace go.uber.org/zap/zapcore -# golang.org/x/crypto v0.5.0 -## explicit; go 1.17 +# golang.org/x/crypto v0.25.0 +## explicit; go 1.20 golang.org/x/crypto/pbkdf2 -# golang.org/x/sys v0.4.0 -## explicit; go 1.17 -golang.org/x/sys/internal/unsafeheader +# golang.org/x/sync v0.8.0 +## explicit; go 1.18 +golang.org/x/sync/errgroup +# golang.org/x/sys v0.22.0 +## explicit; go 1.18 +golang.org/x/sys/cpu golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry -# golang.org/x/text v0.6.0 -## explicit; go 1.17 +# golang.org/x/text v0.16.0 +## explicit; go 1.18 golang.org/x/text/transform golang.org/x/text/unicode/norm # gopkg.in/yaml.v2 v2.4.0 ## explicit; go 1.15 gopkg.in/yaml.v2 +# gopkg.in/yaml.v3 v3.0.1 +## explicit +gopkg.in/yaml.v3 # howett.net/plist v1.0.0 ## explicit; go 1.12 howett.net/plist