From d18728420aa10ef8828c8a9548f309658e4474f0 Mon Sep 17 00:00:00 2001 From: Mike Zak Date: Tue, 17 Dec 2019 13:49:53 +0200 Subject: [PATCH] [NOD-495] Deleted everything except dnsseeder --- CHANGES | 955 --- Jenkinsfile | 10 - LICENSE | 4 +- README.md | 75 +- addrmgr/addrmanager.go | 1404 ----- addrmgr/addrmanager_test.go | 685 -- addrmgr/cov_report.sh | 17 - addrmgr/doc.go | 38 - addrmgr/internal_test.go | 25 - addrmgr/knownaddress.go | 105 - addrmgr/knownaddress_test.go | 114 - addrmgr/log.go | 13 - addrmgr/network.go | 287 - addrmgr/network_test.go | 225 - addrmgr/test_coverage.txt | 62 - blockdag/README.md | 41 - blockdag/accept.go | 130 - blockdag/accept_test.go | 143 - blockdag/blockheap.go | 88 - blockdag/blockheap_test.go | 120 - blockdag/blockidhash.go | 136 - blockdag/blockindex.go | 141 - blockdag/blockindex_test.go | 58 - blockdag/blocklocator.go | 143 - blockdag/blocknode.go | 230 - blockdag/blocknode_test.go | 86 - blockdag/blockset.go | 151 - blockdag/blockset_test.go | 296 - blockdag/blockwindow.go | 75 - blockdag/blockwindow_test.go | 138 - blockdag/coinbase.go | 278 - blockdag/coinbase_test.go | 60 - blockdag/common_test.go | 211 - blockdag/compress.go | 584 -- blockdag/compress_test.go | 436 -- blockdag/dag.go | 1967 ------ blockdag/dag_test.go | 1406 ----- blockdag/dagio.go | 850 --- blockdag/dagio_test.go | 274 - blockdag/difficulty.go | 52 - blockdag/difficulty_test.go | 198 - blockdag/doc.go | 64 - blockdag/error.go | 295 - blockdag/error_test.go | 143 - blockdag/external_dag_test.go | 558 -- blockdag/fullblocks_test.go | 317 - blockdag/fullblocktests/README.md | 29 - blockdag/fullblocktests/doc.go | 20 - blockdag/fullblocktests/generate.go | 2102 ------- blockdag/fullblocktests/params.go | 140 - blockdag/indexers/README.md | 24 - blockdag/indexers/acceptanceindex.go | 234 - blockdag/indexers/acceptanceindex_test.go | 340 - blockdag/indexers/addrindex.go | 902 --- blockdag/indexers/addrindex_test.go | 277 - blockdag/indexers/common.go | 112 - blockdag/indexers/log.go | 13 - blockdag/indexers/manager.go | 389 -- blockdag/indexers/txindex.go | 431 -- blockdag/indexers/txindex_test.go | 144 - blockdag/log.go | 13 - blockdag/mediantime.go | 218 - blockdag/mediantime_test.go | 104 - blockdag/merkle.go | 134 - blockdag/merkle_test.go | 36 - blockdag/notifications.go | 89 - blockdag/notifications_test.go | 61 - blockdag/phantom.go | 105 - blockdag/phantom_test.go | 892 --- blockdag/process.go | 215 - blockdag/process_test.go | 131 - blockdag/scriptval.go | 241 - blockdag/scriptval_test.go | 55 - blockdag/subnetworks.go | 194 - blockdag/subnetworks_test.go | 26 - blockdag/test_utils.go | 253 - blockdag/test_utils_test.go | 56 - blockdag/testdata/277647.dat | Bin 149173 -> 0 bytes blockdag/testdata/277647.utxostore | Bin 48852 -> 0 bytes blockdag/testdata/blk_0_to_4.dat | Bin 2055 -> 0 bytes blockdag/testdata/blk_3A.dat | Bin 470 -> 0 bytes blockdag/testdata/blk_3B.dat | Bin 357 -> 0 bytes blockdag/testdata/blk_3C.dat | Bin 382 -> 0 bytes blockdag/testdata/blk_3D.dat | Bin 508 -> 0 bytes blockdag/testdata/reorgtest.hex | 180 - blockdag/thresholdstate.go | 356 -- blockdag/thresholdstate_test.go | 134 - blockdag/timesorter.go | 27 - blockdag/timesorter_test.go | 47 - blockdag/utxo_ecmh.go | 32 - blockdag/utxodiffstore.go | 191 - blockdag/utxodiffstore_test.go | 86 - blockdag/utxoio.go | 314 - blockdag/utxoset.go | 906 --- blockdag/utxoset_test.go | 1187 ---- blockdag/validate.go | 978 --- blockdag/validate_test.go | 1343 ---- blockdag/versionbits.go | 294 - blockdag/virtualblock.go | 162 - blockdag/virtualblock_test.go | 234 - cmd/addblock/addblock.go | 130 - cmd/addblock/config.go | 126 - cmd/addblock/import.go | 341 - cmd/addsubnetwork/addsubnetwork.go | 87 - cmd/addsubnetwork/config.go | 71 - cmd/addsubnetwork/connect.go | 37 - cmd/addsubnetwork/keys.go | 19 - cmd/addsubnetwork/log.go | 10 - cmd/addsubnetwork/registrytx.go | 29 - cmd/addsubnetwork/utxo.go | 112 - cmd/genaddr/genaddr.go | 35 - cmd/gencerts/gencerts.go | 102 - cmd/kaspactl/config.go | 321 - cmd/kaspactl/httpclient.go | 128 - cmd/kaspactl/kaspactl.go | 167 - cmd/kaspactl/version.go | 75 - cmd/txgen/client.go | 45 - cmd/txgen/config.go | 79 - cmd/txgen/connect.go | 39 - cmd/txgen/docker/Dockerfile | 28 - cmd/txgen/docker/README | 9 - cmd/txgen/log.go | 27 - cmd/txgen/main.go | 70 - cmd/txgen/txloop.go | 528 -- cmd/txsigner/config.go | 36 - cmd/txsigner/txsigner.go | 88 - dnsseeder/config.go => config.go | 0 config/config.go | 978 --- config/config_test.go | 90 - config/log.go | 11 - config/network.go | 68 - connmgr/README.md | 27 - connmgr/connmanager.go | 650 -- connmgr/connmanager_test.go | 688 --- connmgr/doc.go | 14 - connmgr/dynamicbanscore.go | 144 - connmgr/dynamicbanscore_test.go | 68 - connmgr/log.go | 13 - connmgr/seed.go | 99 - dagconfig/README.md | 48 - dagconfig/doc.go | 61 - dagconfig/genesis.go | 146 - dagconfig/genesis_test.go | 154 - dagconfig/params.go | 528 -- dagconfig/params_test.go | 59 - dagconfig/register_test.go | 209 - database/README.md | 32 - database/cmd/dbtool/fetchblock.go | 62 - database/cmd/dbtool/fetchblockregion.go | 90 - database/cmd/dbtool/globalconfig.go | 111 - database/cmd/dbtool/insecureimport.go | 391 -- database/cmd/dbtool/loadheaders.go | 93 - database/cmd/dbtool/main.go | 119 - database/cmd/dbtool/signal.go | 82 - database/doc.go | 89 - database/driver.go | 84 - database/driver_test.go | 136 - database/error.go | 207 - database/error_test.go | 118 - database/example_test.go | 180 - database/export_test.go | 17 - database/ffldb/README.md | 34 - database/ffldb/bench_test.go | 97 - database/ffldb/blockio.go | 780 --- database/ffldb/blockio_test.go | 182 - database/ffldb/common_test.go | 43 - database/ffldb/db.go | 2084 ------- database/ffldb/db_test.go | 726 --- database/ffldb/dbcache.go | 660 -- database/ffldb/dbcache_test.go | 319 - database/ffldb/doc.go | 29 - database/ffldb/driver.go | 60 - database/ffldb/driver_test.go | 290 - database/ffldb/export_test.go | 29 - database/ffldb/init.go | 23 - database/ffldb/init_test.go | 26 - database/ffldb/interface_test.go | 2303 ------- database/ffldb/ldbtreapiter.go | 58 - database/ffldb/log.go | 11 - database/ffldb/mockfile_test.go | 163 - database/ffldb/reconcile.go | 117 - database/ffldb/reconcile_test.go | 180 - database/ffldb/whitebox_test.go | 707 --- database/interface.go | 469 -- database/internal/treap/README.md | 27 - database/internal/treap/common.go | 136 - database/internal/treap/common_test.go | 121 - database/internal/treap/doc.go | 27 - database/internal/treap/immutable.go | 360 -- database/internal/treap/immutable_test.go | 497 -- database/internal/treap/mutable.go | 278 - database/internal/treap/mutable_test.go | 465 -- database/internal/treap/treapiter.go | 354 -- database/internal/treap/treapiter_test.go | 719 --- database/log.go | 11 - database/testdata/blocks1-256.bz2 | Bin 9938 -> 0 bytes database/testdata/generator.go | 152 - deploy.sh | 164 - dnsseeder/dns.go => dns.go | 0 dnsseeder/dnsseed.go => dnsseed.go | 0 dnsseeder/LICENSE | 15 - dnsseeder/README.md | 58 - dnsseeder/docker/Dockerfile | 28 - dnsseeder/log.go | 27 - doc.go | 121 - docker/Dockerfile | 28 +- {dnsseeder/docker => docker}/README | 0 docs/README.md | 299 - docs/code_contribution_guidelines.md | 355 -- ...configure_peer_server_listen_interfaces.md | 35 - .../configure_rpc_server_listen_interfaces.md | 47 - docs/configuring_tor.md | 207 - docs/default_ports.md | 15 - docs/json_rpc_api.md | 1363 ---- docs/using_bootstrap_dat.md | 79 - ecc/README.md | 24 - ecc/bench_test.go | 123 - ecc/doc.go | 21 - ecc/ecc.go | 1083 ---- ecc/ecc_test.go | 887 --- ecc/ecmh.go | 152 - ecc/ecmh_test.go | 213 - ecc/example_test.go | 87 - ecc/field.go | 1223 ---- ecc/field_test.go | 822 --- ecc/genprecomps.go | 63 - ecc/gensecp256k1.go | 203 - ecc/precompute.go | 67 - ecc/privkey.go | 72 - ecc/privkey_test.go | 55 - ecc/pubkey.go | 189 - ecc/pubkey_test.go | 296 - ecc/secp256k1.go | 10 - ecc/signature.go | 267 - ecc/signature_test.go | 269 - faucet/config/config.go | 121 - faucet/database/database.go | 151 - faucet/database/log.go | 9 - faucet/docker/Dockerfile | 28 - faucet/faucet.go | 332 - faucet/ip_usage.go | 66 - faucet/log.go | 11 - faucet/main.go | 88 - .../000001_create_ip_uses_table.down.sql | 1 - .../000001_create_ip_uses_table.up.sql | 6 - faucet/server.go | 81 - go.mod | 28 - go.sum | 320 - goclean.sh | 38 - httpserverutils/context.go | 79 - httpserverutils/error.go | 109 - httpserverutils/log.go | 9 - httpserverutils/middlewares.go | 63 - httpserverutils/request.go | 58 - integration/README.md | 10 - integration/bip0009_test.go | 401 -- integration/csv_test.go | 574 -- integration/main.go | 8 - integration/rpcserver_test.go | 166 - integration/rpctest/README.md | 13 - integration/rpctest/blockgen.go | 207 - integration/rpctest/btcd.go | 73 - integration/rpctest/doc.go | 12 - integration/rpctest/memwallet.go | 526 -- integration/rpctest/node.go | 291 - integration/rpctest/rpc_harness.go | 491 -- integration/rpctest/rpc_harness_test.go | 630 -- integration/rpctest/utils.go | 170 - kaspad.go | 350 -- kasparov/config/config.go | 69 - kasparov/database/database.go | 141 - kasparov/database/log.go | 9 - .../000001_create_blocks_table.down.sql | 1 - .../000001_create_blocks_table.up.sql | 23 - ...000002_create_parent_blocks_table.down.sql | 1 - .../000002_create_parent_blocks_table.up.sql | 12 - .../000003_create_raw_blocks_table.down.sql | 1 - .../000003_create_raw_blocks_table.up.sql | 9 - .../000004_create_subnetworks_table.down.sql | 1 - .../000004_create_subnetworks_table.up.sql | 8 - .../000005_create_transactions_table.down.sql | 1 - .../000005_create_transactions_table.up.sql | 19 - ...eate_transactions_to_blocks_table.down.sql | 1 - ...create_transactions_to_blocks_table.up.sql | 14 - .../000007_create_addresses_table.down.sql | 1 - .../000007_create_addresses_table.up.sql | 7 - ..._create_transaction_outputs_table.down.sql | 1 - ...08_create_transaction_outputs_table.up.sql | 18 - ...9_create_transaction_inputs_table.down.sql | 1 - ...009_create_transaction_inputs_table.up.sql | 18 - kasparov/dbmodels/dbmodels.go | 111 - kasparov/jsonrpc/client.go | 124 - kasparov/jsonrpc/log.go | 16 - kasparov/kasparovd/apimodels/request_types.go | 6 - .../kasparovd/apimodels/response_types.go | 64 - kasparov/kasparovd/config/config.go | 49 - kasparov/kasparovd/controllers/block.go | 110 - kasparov/kasparovd/controllers/common.go | 66 - kasparov/kasparovd/controllers/feeestimate.go | 13 - kasparov/kasparovd/controllers/transaction.go | 314 - kasparov/kasparovd/docker/Dockerfile | 29 - kasparov/kasparovd/log.go | 11 - kasparov/kasparovd/main.go | 55 - kasparov/kasparovd/server/log.go | 9 - kasparov/kasparovd/server/routes.go | 178 - kasparov/kasparovd/server/server.go | 40 - kasparov/kasparovsyncd/config/config.go | 55 - kasparov/kasparovsyncd/docker/Dockerfile | 29 - kasparov/kasparovsyncd/log.go | 11 - kasparov/kasparovsyncd/main.go | 76 - kasparov/kasparovsyncd/mqtt/log.go | 9 - kasparov/kasparovsyncd/mqtt/mqtt.go | 75 - kasparov/kasparovsyncd/mqtt/selected_tip.go | 17 - kasparov/kasparovsyncd/mqtt/transactions.go | 117 - kasparov/kasparovsyncd/sync.go | 1162 ---- kasparov/logger/logger.go | 47 - limits/limits_plan9.go | 10 - limits/limits_unix.go | 52 - limits/limits_windows.go | 10 - log.go | 30 +- logger/logger.go | 256 - logs/LICENSE | 15 - logs/doc.go | 27 - logs/interface.go | 64 - logs/logs.go | 519 -- dnsseeder/manager.go => manager.go | 0 mempool/README.md | 72 - mempool/doc.go | 81 - mempool/error.go | 129 - mempool/log.go | 11 - mempool/mempool.go | 1390 ----- mempool/mempool_test.go | 1831 ------ mempool/policy.go | 267 - mempool/policy_test.go | 337 - mining/cpuminer/cpuminer.go | 639 -- mining/cpuminer/log.go | 13 - mining/log.go | 11 - mining/mining.go | 405 -- mining/mining_test.go | 239 - mining/policy.go | 14 - mining/simulator/addresslist.go | 65 - mining/simulator/client.go | 38 - mining/simulator/config.go | 66 - mining/simulator/conn_manager.go | 149 - mining/simulator/docker/Dockerfile | 28 - mining/simulator/docker/README | 10 - mining/simulator/log.go | 34 - mining/simulator/main.go | 46 - mining/simulator/mineloop.go | 207 - mining/test_utils.go | 130 - mining/txselection.go | 370 -- mining/txselection_test.go | 539 -- netsync/README.md | 15 - netsync/blocklogger.go | 80 - netsync/doc.go | 13 - netsync/interface.go | 33 - netsync/log.go | 13 - netsync/manager.go | 1102 ---- peer/README.md | 49 - peer/doc.go | 145 - peer/example_test.go | 110 - peer/export_test.go | 18 - peer/log.go | 192 - peer/mruinvmap.go | 127 - peer/mruinvmap_test.go | 170 - peer/mrunoncemap.go | 125 - peer/mrunoncemap_test.go | 152 - peer/peer.go | 2025 ------ peer/peer_test.go | 749 --- release/GIT-GPG-KEY-conformal.txt | 74 - release/notes.sample | 6 - release/prep_release.sh | 205 - rpcclient/CONTRIBUTORS | 13 - rpcclient/README.md | 40 - rpcclient/dag.go | 747 --- rpcclient/doc.go | 153 - rpcclient/examples/httppost/README.md | 24 - rpcclient/examples/httppost/main.go | 36 - rpcclient/examples/websockets/README.md | 24 - rpcclient/examples/websockets/main.go | 74 - rpcclient/infrastructure.go | 1353 ---- rpcclient/log.go | 51 - rpcclient/mining.go | 290 - rpcclient/net.go | 585 -- rpcclient/notify.go | 628 -- rpcclient/rawrequest.go | 75 - rpcclient/rawtransactions.go | 422 -- rpcmodel/CONTRIBUTORS | 16 - rpcmodel/README.md | 34 - rpcmodel/command_info.go | 249 - rpcmodel/command_info_test.go | 425 -- rpcmodel/command_parse.go | 550 -- rpcmodel/command_parse_test.go | 519 -- rpcmodel/doc.go | 141 - rpcmodel/error.go | 111 - rpcmodel/error_test.go | 80 - rpcmodel/example_test.go | 151 - rpcmodel/export_test.go | 48 - rpcmodel/help.go | 562 -- rpcmodel/help_test.go | 737 --- rpcmodel/helpers.go | 77 - rpcmodel/helpers_test.go | 115 - rpcmodel/jsonrpc.go | 150 - rpcmodel/jsonrpc_errors.go | 74 - rpcmodel/jsonrpc_test.go | 161 - rpcmodel/register.go | 287 - rpcmodel/register_test.go | 262 - rpcmodel/rpc_commands.go | 814 --- rpcmodel/rpc_commands_test.go | 1209 ---- rpcmodel/rpc_results.go | 523 -- rpcmodel/rpc_results_test.go | 92 - rpcmodel/rpc_websocket_commands.go | 152 - rpcmodel/rpc_websocket_commands_test.go | 240 - rpcmodel/rpc_websocket_notifications.go | 138 - rpcmodel/rpc_websocket_notifications_test.go | 215 - rpcmodel/rpc_websocket_results.go | 18 - rpcmodel/rpc_websocket_results_test.go | 50 - sample-kaspad.conf | 347 -- server/log.go | 15 - server/p2p/log.go | 20 - server/p2p/on_addr.go | 63 - server/p2p/on_block.go | 33 - server/p2p/on_block_locator.go | 44 - server/p2p/on_fee_filter.go | 24 - server/p2p/on_filter_add.go | 27 - server/p2p/on_filter_clear.go | 27 - server/p2p/on_filter_load.go | 23 - server/p2p/on_get_addr.go | 43 - server/p2p/on_get_block_invs.go | 43 - server/p2p/on_get_block_locator.go | 25 - server/p2p/on_get_data.go | 83 - server/p2p/on_inv.go | 41 - server/p2p/on_tx.go | 36 - server/p2p/on_version.go | 64 - server/p2p/p2p.go | 2046 ------ server/rpc/common.go | 382 -- server/rpc/handle_add_manual_node.go | 31 - server/rpc/handle_create_raw_transaction.go | 115 - server/rpc/handle_debug_level.go | 28 - server/rpc/handle_decode_raw_transaction.go | 41 - server/rpc/handle_decode_script.go | 55 - server/rpc/handle_generate.go | 68 - .../rpc/handle_get_all_manual_nodes_info.go | 9 - server/rpc/handle_get_block.go | 113 - server/rpc/handle_get_block_count.go | 6 - server/rpc/handle_get_block_dag_info.go | 100 - server/rpc/handle_get_block_header.go | 84 - server/rpc/handle_get_block_template.go | 868 --- server/rpc/handle_get_blocks.go | 118 - server/rpc/handle_get_chain_from_block.go | 90 - server/rpc/handle_get_connection_count.go | 6 - server/rpc/handle_get_current_net.go | 6 - server/rpc/handle_get_difficulty.go | 6 - server/rpc/handle_get_generate.go | 6 - server/rpc/handle_get_hashes_per_sec.go | 18 - server/rpc/handle_get_headers.go | 49 - server/rpc/handle_get_info.go | 26 - server/rpc/handle_get_manual_node_info.go | 113 - server/rpc/handle_get_mempool_info.go | 20 - server/rpc/handle_get_mining_info.go | 40 - server/rpc/handle_get_net_totals.go | 17 - server/rpc/handle_get_peer_info.go | 44 - server/rpc/handle_get_raw_mempool.go | 23 - server/rpc/handle_get_raw_transaction.go | 140 - server/rpc/handle_get_selected_tip.go | 46 - server/rpc/handle_get_selected_tip_hash.go | 6 - server/rpc/handle_get_subnetwork.go | 34 - server/rpc/handle_get_top_headers.go | 41 - server/rpc/handle_get_tx_out.go | 124 - server/rpc/handle_help.go | 42 - server/rpc/handle_load_tx_filter.go | 52 - server/rpc/handle_node.go | 114 - server/rpc/handle_notify_blocks.go | 8 - server/rpc/handle_notify_chain_changes.go | 19 - server/rpc/handle_notify_new_transactions.go | 63 - server/rpc/handle_ping.go | 19 - server/rpc/handle_remove_manual_node.go | 24 - server/rpc/handle_rescan_block_filter.go | 71 - server/rpc/handle_rescan_blocks.go | 74 - server/rpc/handle_search_raw_transactions.go | 472 -- server/rpc/handle_send_raw_transaction.go | 92 - server/rpc/handle_session.go | 9 - server/rpc/handle_set_generate.go | 49 - server/rpc/handle_stop.go | 10 - server/rpc/handle_stop_notify_blocks.go | 8 - .../rpc/handle_stop_notify_chain_changes.go | 8 - .../handle_stop_notify_new_transactions.go | 8 - server/rpc/handle_submit_block.go | 45 - server/rpc/handle_uptime.go | 8 - server/rpc/handle_validate_address.go | 23 - server/rpc/handle_version.go | 24 - server/rpc/handle_websocket_help.go | 50 - server/rpc/log.go | 15 - server/rpc/rpcadapters.go | 280 - server/rpc/rpcserver.go | 947 --- server/rpc/rpcserverhelp.go | 795 --- server/rpc/rpcserverhelp_test.go | 67 - server/rpc/rpcwebsocket.go | 1366 ---- server/server.go | 144 - server/serverutils/log.go | 11 - server/serverutils/upnp.go | 404 -- server/serverutils/utils.go | 86 - service_windows.go | 308 - signal/log.go | 11 - signal/signal.go | 83 - signal/signalsigterm.go | 16 - telegram.sh | 35 - test.sh | 35 - testutil/testutil.go | 13 - txscript/README.md | 24 - txscript/consensus.go | 13 - txscript/data/LICENSE | 8 - txscript/data/script_tests.json | 5497 ----------------- txscript/data/sighash.json | 3505 ----------- txscript/data/tx_invalid.json | 714 --- txscript/data/tx_valid.json | 938 --- txscript/doc.go | 38 - txscript/engine.go | 514 -- txscript/engine_test.go | 333 - txscript/error.go | 316 - txscript/error_test.go | 107 - txscript/example_test.go | 75 - txscript/log.go | 24 - txscript/main_test.go | 15 - txscript/opcode.go | 2312 ------- txscript/opcode_test.go | 194 - txscript/reference_test.go | 378 -- txscript/script.go | 483 -- txscript/script_test.go | 3973 ------------ txscript/scriptbuilder.go | 274 - txscript/scriptbuilder_test.go | 411 -- txscript/scriptnum.go | 220 - txscript/scriptnum_test.go | 256 - txscript/sigcache.go | 99 - txscript/sigcache_test.go | 140 - txscript/sign.go | 224 - txscript/sign_test.go | 953 --- txscript/stack.go | 360 -- txscript/stack_test.go | 864 --- txscript/standard.go | 400 -- txscript/standard_test.go | 504 -- upgrade.go | 11 - util/README.md | 7 - util/address.go | 291 - util/address_test.go | 390 -- util/amount.go | 121 - util/amount_test.go | 261 - util/appdata.go | 105 - util/appdata_test.go | 133 - util/base58/README.md | 23 - util/base58/alphabet.go | 49 - util/base58/base58.go | 75 - util/base58/base58_test.go | 97 - util/base58/base58bench_test.go | 34 - util/base58/base58check.go | 52 - util/base58/base58check_test.go | 65 - util/base58/cov_report.sh | 17 - util/base58/doc.go | 29 - util/base58/example_test.go | 70 - util/base58/genalphabet.go | 79 - util/bech32/README.md | 15 - util/bech32/bech32.go | 295 - util/bech32/bech32_test.go | 60 - util/bech32/doc.go | 12 - util/bech32/example_test.go | 42 - util/bech32/internal_test.go | 76 - util/binaryserializer/binaryserializer.go | 146 - .../binaryserializer/binaryserializer_test.go | 58 - util/block.go | 262 - util/block_test.go | 577 -- util/bloom/README.md | 15 - util/bloom/cov_report.sh | 17 - util/bloom/example_test.go | 45 - util/bloom/filter.go | 347 -- util/bloom/filter_test.go | 543 -- util/bloom/merkleblock.go | 125 - util/bloom/merkleblock_test.go | 107 - util/bloom/murmurhash3.go | 72 - util/bloom/murmurhash3_test.go | 44 - util/bloom/test_coverage.txt | 28 - util/btcmath.go | 146 - util/btcmath_test.go | 35 - util/btcstrings.go | 69 - util/btcstrings_test.go | 61 - util/btcutil.go | 18 - util/certgen.go | 143 - util/certgen_test.go | 123 - util/const.go | 13 - util/cov_report.sh | 17 - util/daghash/README.md | 10 - util/daghash/doc.go | 5 - util/daghash/hash.go | 258 - util/daghash/hash_test.go | 427 -- util/daghash/hashfuncs.go | 40 - util/daghash/hashfuncs_test.go | 136 - util/doc.go | 46 - util/example_test.go | 110 - util/fs/fs.go | 13 - util/hash160.go | 23 - util/hdkeychain/README.md | 43 - util/hdkeychain/bench_test.go | 83 - util/hdkeychain/cov_report.sh | 17 - util/hdkeychain/doc.go | 84 - util/hdkeychain/example_test.go | 182 - util/hdkeychain/extendedkey.go | 644 -- util/hdkeychain/extendedkey_test.go | 1070 ---- util/hdkeychain/test_coverage.txt | 20 - util/internal_test.go | 54 - util/locks/log.go | 15 - util/locks/prioritymutex.go | 79 - util/locks/prioritymutex_test.go | 80 - util/locks/waitgroup.go | 108 - util/locks/waitgroup_test.go | 247 - util/net.go | 18 - util/net_noop.go | 19 - util/network/network.go | 39 - util/network/tor.go | 129 - util/panics/panics.go | 46 - util/random/random.go | 25 - util/random/random_test.go | 72 - util/subnetworkid/subnetworkid.go | 210 - util/testtools/testtools.go | 96 - util/tx.go | 118 - util/tx_test.go | 111 - util/txsort/README.md | 18 - util/txsort/doc.go | 20 - util/txsort/testdata/bip69-1.hex | 1 - util/txsort/testdata/bip69-2.hex | 1 - util/txsort/testdata/bip69-3.hex | 1 - util/txsort/testdata/bip69-4.hex | 1 - util/txsort/testdata/bip69-5.hex | 1 - util/txsort/txsort.go | 95 - util/txsort/txsort_test.go | 124 - util/wif.go | 164 - util/wif_test.go | 71 - version/version.go | 72 - wire/README.md | 72 - wire/bench_test.go | 627 -- wire/blockheader.go | 185 - wire/blockheader_test.go | 346 -- wire/common.go | 531 -- wire/common_test.go | 710 --- wire/doc.go | 144 - wire/error.go | 34 - wire/fakemessage_test.go | 56 - wire/fixedIO_test.go | 77 - wire/invvect.go | 96 - wire/invvect_test.go | 173 - wire/message.go | 372 -- wire/message_test.go | 450 -- wire/msgaddr.go | 180 - wire/msgaddr_test.go | 313 - wire/msgblock.go | 249 - wire/msgblock_test.go | 610 -- wire/msgblocklocator.go | 112 - wire/msgblocklocator_test.go | 263 - wire/msgfeefilter.go | 51 - wire/msgfeefilter_test.go | 178 - wire/msgfilteradd.go | 69 - wire/msgfilteradd_test.go | 157 - wire/msgfilterclear.go | 46 - wire/msgfilterclear_test.go | 101 - wire/msgfilterload.go | 118 - wire/msgfilterload_test.go | 204 - wire/msggetaddr.go | 104 - wire/msggetaddr_test.go | 110 - wire/msggetblockinvs.go | 66 - wire/msggetblockinvs_test.go | 238 - wire/msggetblocklocator.go | 71 - wire/msggetblocklocator_test.go | 221 - wire/msggetdata.go | 133 - wire/msggetdata_test.go | 269 - wire/msggetheaders.go | 78 - wire/msggetheaders_test.go | 226 - wire/msgheaders.go | 136 - wire/msgheaders_test.go | 328 - wire/msginv.go | 141 - wire/msginv_test.go | 270 - wire/msgmerkleblock.go | 148 - wire/msgmerkleblock_test.go | 387 -- wire/msgnotfound.go | 110 - wire/msgnotfound_test.go | 259 - wire/msgping.go | 69 - wire/msgping_test.go | 164 - wire/msgpong.go | 53 - wire/msgpong_test.go | 201 - wire/msgreject.go | 171 - wire/msgreject_test.go | 296 - wire/msgsendheaders.go | 46 - wire/msgsendheaders_test.go | 118 - wire/msgtx.go | 995 --- wire/msgtx_test.go | 941 --- wire/msgverack.go | 46 - wire/msgverack_test.go | 87 - wire/msgversion.go | 285 - wire/msgversion_test.go | 406 -- wire/netaddress.go | 134 - wire/netaddress_test.go | 217 - wire/protocol.go | 133 - wire/protocol_test.go | 58 - wire/testdata/megatx.bin.bz2 | Bin 542816 -> 0 bytes 701 files changed, 55 insertions(+), 153940 deletions(-) delete mode 100644 CHANGES delete mode 100644 Jenkinsfile delete mode 100644 addrmgr/addrmanager.go delete mode 100644 addrmgr/addrmanager_test.go delete mode 100644 addrmgr/cov_report.sh delete mode 100644 addrmgr/doc.go delete mode 100644 addrmgr/internal_test.go delete mode 100644 addrmgr/knownaddress.go delete mode 100644 addrmgr/knownaddress_test.go delete mode 100644 addrmgr/log.go delete mode 100644 addrmgr/network.go delete mode 100644 addrmgr/network_test.go delete mode 100644 addrmgr/test_coverage.txt delete mode 100644 blockdag/README.md delete mode 100644 blockdag/accept.go delete mode 100644 blockdag/accept_test.go delete mode 100644 blockdag/blockheap.go delete mode 100644 blockdag/blockheap_test.go delete mode 100644 blockdag/blockidhash.go delete mode 100644 blockdag/blockindex.go delete mode 100644 blockdag/blockindex_test.go delete mode 100644 blockdag/blocklocator.go delete mode 100644 blockdag/blocknode.go delete mode 100644 blockdag/blocknode_test.go delete mode 100644 blockdag/blockset.go delete mode 100644 blockdag/blockset_test.go delete mode 100644 blockdag/blockwindow.go delete mode 100644 blockdag/blockwindow_test.go delete mode 100644 blockdag/coinbase.go delete mode 100644 blockdag/coinbase_test.go delete mode 100644 blockdag/common_test.go delete mode 100644 blockdag/compress.go delete mode 100644 blockdag/compress_test.go delete mode 100644 blockdag/dag.go delete mode 100644 blockdag/dag_test.go delete mode 100644 blockdag/dagio.go delete mode 100644 blockdag/dagio_test.go delete mode 100644 blockdag/difficulty.go delete mode 100644 blockdag/difficulty_test.go delete mode 100644 blockdag/doc.go delete mode 100644 blockdag/error.go delete mode 100644 blockdag/error_test.go delete mode 100644 blockdag/external_dag_test.go delete mode 100644 blockdag/fullblocks_test.go delete mode 100644 blockdag/fullblocktests/README.md delete mode 100644 blockdag/fullblocktests/doc.go delete mode 100644 blockdag/fullblocktests/generate.go delete mode 100644 blockdag/fullblocktests/params.go delete mode 100644 blockdag/indexers/README.md delete mode 100644 blockdag/indexers/acceptanceindex.go delete mode 100644 blockdag/indexers/acceptanceindex_test.go delete mode 100644 blockdag/indexers/addrindex.go delete mode 100644 blockdag/indexers/addrindex_test.go delete mode 100644 blockdag/indexers/common.go delete mode 100644 blockdag/indexers/log.go delete mode 100644 blockdag/indexers/manager.go delete mode 100644 blockdag/indexers/txindex.go delete mode 100644 blockdag/indexers/txindex_test.go delete mode 100644 blockdag/log.go delete mode 100644 blockdag/mediantime.go delete mode 100644 blockdag/mediantime_test.go delete mode 100644 blockdag/merkle.go delete mode 100644 blockdag/merkle_test.go delete mode 100644 blockdag/notifications.go delete mode 100644 blockdag/notifications_test.go delete mode 100644 blockdag/phantom.go delete mode 100644 blockdag/phantom_test.go delete mode 100644 blockdag/process.go delete mode 100644 blockdag/process_test.go delete mode 100644 blockdag/scriptval.go delete mode 100644 blockdag/scriptval_test.go delete mode 100644 blockdag/subnetworks.go delete mode 100644 blockdag/subnetworks_test.go delete mode 100644 blockdag/test_utils.go delete mode 100644 blockdag/test_utils_test.go delete mode 100644 blockdag/testdata/277647.dat delete mode 100644 blockdag/testdata/277647.utxostore delete mode 100644 blockdag/testdata/blk_0_to_4.dat delete mode 100644 blockdag/testdata/blk_3A.dat delete mode 100644 blockdag/testdata/blk_3B.dat delete mode 100644 blockdag/testdata/blk_3C.dat delete mode 100644 blockdag/testdata/blk_3D.dat delete mode 100644 blockdag/testdata/reorgtest.hex delete mode 100644 blockdag/thresholdstate.go delete mode 100644 blockdag/thresholdstate_test.go delete mode 100644 blockdag/timesorter.go delete mode 100644 blockdag/timesorter_test.go delete mode 100644 blockdag/utxo_ecmh.go delete mode 100644 blockdag/utxodiffstore.go delete mode 100644 blockdag/utxodiffstore_test.go delete mode 100644 blockdag/utxoio.go delete mode 100644 blockdag/utxoset.go delete mode 100644 blockdag/utxoset_test.go delete mode 100644 blockdag/validate.go delete mode 100644 blockdag/validate_test.go delete mode 100644 blockdag/versionbits.go delete mode 100644 blockdag/virtualblock.go delete mode 100644 blockdag/virtualblock_test.go delete mode 100644 cmd/addblock/addblock.go delete mode 100644 cmd/addblock/config.go delete mode 100644 cmd/addblock/import.go delete mode 100644 cmd/addsubnetwork/addsubnetwork.go delete mode 100644 cmd/addsubnetwork/config.go delete mode 100644 cmd/addsubnetwork/connect.go delete mode 100644 cmd/addsubnetwork/keys.go delete mode 100644 cmd/addsubnetwork/log.go delete mode 100644 cmd/addsubnetwork/registrytx.go delete mode 100644 cmd/addsubnetwork/utxo.go delete mode 100644 cmd/genaddr/genaddr.go delete mode 100644 cmd/gencerts/gencerts.go delete mode 100644 cmd/kaspactl/config.go delete mode 100644 cmd/kaspactl/httpclient.go delete mode 100644 cmd/kaspactl/kaspactl.go delete mode 100644 cmd/kaspactl/version.go delete mode 100644 cmd/txgen/client.go delete mode 100644 cmd/txgen/config.go delete mode 100644 cmd/txgen/connect.go delete mode 100644 cmd/txgen/docker/Dockerfile delete mode 100644 cmd/txgen/docker/README delete mode 100644 cmd/txgen/log.go delete mode 100644 cmd/txgen/main.go delete mode 100644 cmd/txgen/txloop.go delete mode 100644 cmd/txsigner/config.go delete mode 100644 cmd/txsigner/txsigner.go rename dnsseeder/config.go => config.go (100%) delete mode 100644 config/config.go delete mode 100644 config/config_test.go delete mode 100644 config/log.go delete mode 100644 config/network.go delete mode 100644 connmgr/README.md delete mode 100644 connmgr/connmanager.go delete mode 100644 connmgr/connmanager_test.go delete mode 100644 connmgr/doc.go delete mode 100644 connmgr/dynamicbanscore.go delete mode 100644 connmgr/dynamicbanscore_test.go delete mode 100644 connmgr/log.go delete mode 100644 connmgr/seed.go delete mode 100644 dagconfig/README.md delete mode 100644 dagconfig/doc.go delete mode 100644 dagconfig/genesis.go delete mode 100644 dagconfig/genesis_test.go delete mode 100644 dagconfig/params.go delete mode 100644 dagconfig/params_test.go delete mode 100644 dagconfig/register_test.go delete mode 100644 database/README.md delete mode 100644 database/cmd/dbtool/fetchblock.go delete mode 100644 database/cmd/dbtool/fetchblockregion.go delete mode 100644 database/cmd/dbtool/globalconfig.go delete mode 100644 database/cmd/dbtool/insecureimport.go delete mode 100644 database/cmd/dbtool/loadheaders.go delete mode 100644 database/cmd/dbtool/main.go delete mode 100644 database/cmd/dbtool/signal.go delete mode 100644 database/doc.go delete mode 100644 database/driver.go delete mode 100644 database/driver_test.go delete mode 100644 database/error.go delete mode 100644 database/error_test.go delete mode 100644 database/example_test.go delete mode 100644 database/export_test.go delete mode 100644 database/ffldb/README.md delete mode 100644 database/ffldb/bench_test.go delete mode 100644 database/ffldb/blockio.go delete mode 100644 database/ffldb/blockio_test.go delete mode 100644 database/ffldb/common_test.go delete mode 100644 database/ffldb/db.go delete mode 100644 database/ffldb/db_test.go delete mode 100644 database/ffldb/dbcache.go delete mode 100644 database/ffldb/dbcache_test.go delete mode 100644 database/ffldb/doc.go delete mode 100644 database/ffldb/driver.go delete mode 100644 database/ffldb/driver_test.go delete mode 100644 database/ffldb/export_test.go delete mode 100644 database/ffldb/init.go delete mode 100644 database/ffldb/init_test.go delete mode 100644 database/ffldb/interface_test.go delete mode 100644 database/ffldb/ldbtreapiter.go delete mode 100644 database/ffldb/log.go delete mode 100644 database/ffldb/mockfile_test.go delete mode 100644 database/ffldb/reconcile.go delete mode 100644 database/ffldb/reconcile_test.go delete mode 100644 database/ffldb/whitebox_test.go delete mode 100644 database/interface.go delete mode 100644 database/internal/treap/README.md delete mode 100644 database/internal/treap/common.go delete mode 100644 database/internal/treap/common_test.go delete mode 100644 database/internal/treap/doc.go delete mode 100644 database/internal/treap/immutable.go delete mode 100644 database/internal/treap/immutable_test.go delete mode 100644 database/internal/treap/mutable.go delete mode 100644 database/internal/treap/mutable_test.go delete mode 100644 database/internal/treap/treapiter.go delete mode 100644 database/internal/treap/treapiter_test.go delete mode 100644 database/log.go delete mode 100644 database/testdata/blocks1-256.bz2 delete mode 100644 database/testdata/generator.go delete mode 100755 deploy.sh rename dnsseeder/dns.go => dns.go (100%) rename dnsseeder/dnsseed.go => dnsseed.go (100%) delete mode 100644 dnsseeder/LICENSE delete mode 100644 dnsseeder/README.md delete mode 100644 dnsseeder/docker/Dockerfile delete mode 100644 dnsseeder/log.go delete mode 100644 doc.go rename {dnsseeder/docker => docker}/README (100%) delete mode 100644 docs/README.md delete mode 100644 docs/code_contribution_guidelines.md delete mode 100644 docs/configure_peer_server_listen_interfaces.md delete mode 100644 docs/configure_rpc_server_listen_interfaces.md delete mode 100644 docs/configuring_tor.md delete mode 100644 docs/default_ports.md delete mode 100644 docs/json_rpc_api.md delete mode 100644 docs/using_bootstrap_dat.md delete mode 100644 ecc/README.md delete mode 100644 ecc/bench_test.go delete mode 100644 ecc/doc.go delete mode 100644 ecc/ecc.go delete mode 100644 ecc/ecc_test.go delete mode 100644 ecc/ecmh.go delete mode 100644 ecc/ecmh_test.go delete mode 100644 ecc/example_test.go delete mode 100644 ecc/field.go delete mode 100644 ecc/field_test.go delete mode 100644 ecc/genprecomps.go delete mode 100644 ecc/gensecp256k1.go delete mode 100644 ecc/precompute.go delete mode 100644 ecc/privkey.go delete mode 100644 ecc/privkey_test.go delete mode 100644 ecc/pubkey.go delete mode 100644 ecc/pubkey_test.go delete mode 100644 ecc/secp256k1.go delete mode 100644 ecc/signature.go delete mode 100644 ecc/signature_test.go delete mode 100644 faucet/config/config.go delete mode 100644 faucet/database/database.go delete mode 100644 faucet/database/log.go delete mode 100644 faucet/docker/Dockerfile delete mode 100644 faucet/faucet.go delete mode 100644 faucet/ip_usage.go delete mode 100644 faucet/log.go delete mode 100644 faucet/main.go delete mode 100644 faucet/migrations/000001_create_ip_uses_table.down.sql delete mode 100644 faucet/migrations/000001_create_ip_uses_table.up.sql delete mode 100644 faucet/server.go delete mode 100644 go.mod delete mode 100644 go.sum delete mode 100755 goclean.sh delete mode 100644 httpserverutils/context.go delete mode 100644 httpserverutils/error.go delete mode 100644 httpserverutils/log.go delete mode 100644 httpserverutils/middlewares.go delete mode 100644 httpserverutils/request.go delete mode 100644 integration/README.md delete mode 100644 integration/bip0009_test.go delete mode 100644 integration/csv_test.go delete mode 100644 integration/main.go delete mode 100644 integration/rpcserver_test.go delete mode 100644 integration/rpctest/README.md delete mode 100644 integration/rpctest/blockgen.go delete mode 100644 integration/rpctest/btcd.go delete mode 100644 integration/rpctest/doc.go delete mode 100644 integration/rpctest/memwallet.go delete mode 100644 integration/rpctest/node.go delete mode 100644 integration/rpctest/rpc_harness.go delete mode 100644 integration/rpctest/rpc_harness_test.go delete mode 100644 integration/rpctest/utils.go delete mode 100644 kaspad.go delete mode 100644 kasparov/config/config.go delete mode 100644 kasparov/database/database.go delete mode 100644 kasparov/database/log.go delete mode 100644 kasparov/database/migrations/000001_create_blocks_table.down.sql delete mode 100644 kasparov/database/migrations/000001_create_blocks_table.up.sql delete mode 100644 kasparov/database/migrations/000002_create_parent_blocks_table.down.sql delete mode 100644 kasparov/database/migrations/000002_create_parent_blocks_table.up.sql delete mode 100644 kasparov/database/migrations/000003_create_raw_blocks_table.down.sql delete mode 100644 kasparov/database/migrations/000003_create_raw_blocks_table.up.sql delete mode 100644 kasparov/database/migrations/000004_create_subnetworks_table.down.sql delete mode 100644 kasparov/database/migrations/000004_create_subnetworks_table.up.sql delete mode 100644 kasparov/database/migrations/000005_create_transactions_table.down.sql delete mode 100644 kasparov/database/migrations/000005_create_transactions_table.up.sql delete mode 100644 kasparov/database/migrations/000006_create_transactions_to_blocks_table.down.sql delete mode 100644 kasparov/database/migrations/000006_create_transactions_to_blocks_table.up.sql delete mode 100644 kasparov/database/migrations/000007_create_addresses_table.down.sql delete mode 100644 kasparov/database/migrations/000007_create_addresses_table.up.sql delete mode 100644 kasparov/database/migrations/000008_create_transaction_outputs_table.down.sql delete mode 100644 kasparov/database/migrations/000008_create_transaction_outputs_table.up.sql delete mode 100644 kasparov/database/migrations/000009_create_transaction_inputs_table.down.sql delete mode 100644 kasparov/database/migrations/000009_create_transaction_inputs_table.up.sql delete mode 100644 kasparov/dbmodels/dbmodels.go delete mode 100644 kasparov/jsonrpc/client.go delete mode 100644 kasparov/jsonrpc/log.go delete mode 100644 kasparov/kasparovd/apimodels/request_types.go delete mode 100644 kasparov/kasparovd/apimodels/response_types.go delete mode 100644 kasparov/kasparovd/config/config.go delete mode 100644 kasparov/kasparovd/controllers/block.go delete mode 100644 kasparov/kasparovd/controllers/common.go delete mode 100644 kasparov/kasparovd/controllers/feeestimate.go delete mode 100644 kasparov/kasparovd/controllers/transaction.go delete mode 100644 kasparov/kasparovd/docker/Dockerfile delete mode 100644 kasparov/kasparovd/log.go delete mode 100644 kasparov/kasparovd/main.go delete mode 100644 kasparov/kasparovd/server/log.go delete mode 100644 kasparov/kasparovd/server/routes.go delete mode 100644 kasparov/kasparovd/server/server.go delete mode 100644 kasparov/kasparovsyncd/config/config.go delete mode 100644 kasparov/kasparovsyncd/docker/Dockerfile delete mode 100644 kasparov/kasparovsyncd/log.go delete mode 100644 kasparov/kasparovsyncd/main.go delete mode 100644 kasparov/kasparovsyncd/mqtt/log.go delete mode 100644 kasparov/kasparovsyncd/mqtt/mqtt.go delete mode 100644 kasparov/kasparovsyncd/mqtt/selected_tip.go delete mode 100644 kasparov/kasparovsyncd/mqtt/transactions.go delete mode 100644 kasparov/kasparovsyncd/sync.go delete mode 100644 kasparov/logger/logger.go delete mode 100644 limits/limits_plan9.go delete mode 100644 limits/limits_unix.go delete mode 100644 limits/limits_windows.go delete mode 100644 logger/logger.go delete mode 100644 logs/LICENSE delete mode 100644 logs/doc.go delete mode 100644 logs/interface.go delete mode 100644 logs/logs.go rename dnsseeder/manager.go => manager.go (100%) delete mode 100644 mempool/README.md delete mode 100644 mempool/doc.go delete mode 100644 mempool/error.go delete mode 100644 mempool/log.go delete mode 100644 mempool/mempool.go delete mode 100644 mempool/mempool_test.go delete mode 100644 mempool/policy.go delete mode 100644 mempool/policy_test.go delete mode 100644 mining/cpuminer/cpuminer.go delete mode 100644 mining/cpuminer/log.go delete mode 100644 mining/log.go delete mode 100644 mining/mining.go delete mode 100644 mining/mining_test.go delete mode 100644 mining/policy.go delete mode 100644 mining/simulator/addresslist.go delete mode 100644 mining/simulator/client.go delete mode 100644 mining/simulator/config.go delete mode 100644 mining/simulator/conn_manager.go delete mode 100644 mining/simulator/docker/Dockerfile delete mode 100644 mining/simulator/docker/README delete mode 100644 mining/simulator/log.go delete mode 100644 mining/simulator/main.go delete mode 100644 mining/simulator/mineloop.go delete mode 100644 mining/test_utils.go delete mode 100644 mining/txselection.go delete mode 100644 mining/txselection_test.go delete mode 100644 netsync/README.md delete mode 100644 netsync/blocklogger.go delete mode 100644 netsync/doc.go delete mode 100644 netsync/interface.go delete mode 100644 netsync/log.go delete mode 100644 netsync/manager.go delete mode 100644 peer/README.md delete mode 100644 peer/doc.go delete mode 100644 peer/example_test.go delete mode 100644 peer/export_test.go delete mode 100644 peer/log.go delete mode 100644 peer/mruinvmap.go delete mode 100644 peer/mruinvmap_test.go delete mode 100644 peer/mrunoncemap.go delete mode 100644 peer/mrunoncemap_test.go delete mode 100644 peer/peer.go delete mode 100644 peer/peer_test.go delete mode 100644 release/GIT-GPG-KEY-conformal.txt delete mode 100644 release/notes.sample delete mode 100644 release/prep_release.sh delete mode 100644 rpcclient/CONTRIBUTORS delete mode 100644 rpcclient/README.md delete mode 100644 rpcclient/dag.go delete mode 100644 rpcclient/doc.go delete mode 100644 rpcclient/examples/httppost/README.md delete mode 100644 rpcclient/examples/httppost/main.go delete mode 100644 rpcclient/examples/websockets/README.md delete mode 100644 rpcclient/examples/websockets/main.go delete mode 100644 rpcclient/infrastructure.go delete mode 100644 rpcclient/log.go delete mode 100644 rpcclient/mining.go delete mode 100644 rpcclient/net.go delete mode 100644 rpcclient/notify.go delete mode 100644 rpcclient/rawrequest.go delete mode 100644 rpcclient/rawtransactions.go delete mode 100644 rpcmodel/CONTRIBUTORS delete mode 100644 rpcmodel/README.md delete mode 100644 rpcmodel/command_info.go delete mode 100644 rpcmodel/command_info_test.go delete mode 100644 rpcmodel/command_parse.go delete mode 100644 rpcmodel/command_parse_test.go delete mode 100644 rpcmodel/doc.go delete mode 100644 rpcmodel/error.go delete mode 100644 rpcmodel/error_test.go delete mode 100644 rpcmodel/example_test.go delete mode 100644 rpcmodel/export_test.go delete mode 100644 rpcmodel/help.go delete mode 100644 rpcmodel/help_test.go delete mode 100644 rpcmodel/helpers.go delete mode 100644 rpcmodel/helpers_test.go delete mode 100644 rpcmodel/jsonrpc.go delete mode 100644 rpcmodel/jsonrpc_errors.go delete mode 100644 rpcmodel/jsonrpc_test.go delete mode 100644 rpcmodel/register.go delete mode 100644 rpcmodel/register_test.go delete mode 100644 rpcmodel/rpc_commands.go delete mode 100644 rpcmodel/rpc_commands_test.go delete mode 100644 rpcmodel/rpc_results.go delete mode 100644 rpcmodel/rpc_results_test.go delete mode 100644 rpcmodel/rpc_websocket_commands.go delete mode 100644 rpcmodel/rpc_websocket_commands_test.go delete mode 100644 rpcmodel/rpc_websocket_notifications.go delete mode 100644 rpcmodel/rpc_websocket_notifications_test.go delete mode 100644 rpcmodel/rpc_websocket_results.go delete mode 100644 rpcmodel/rpc_websocket_results_test.go delete mode 100644 sample-kaspad.conf delete mode 100644 server/log.go delete mode 100644 server/p2p/log.go delete mode 100644 server/p2p/on_addr.go delete mode 100644 server/p2p/on_block.go delete mode 100644 server/p2p/on_block_locator.go delete mode 100644 server/p2p/on_fee_filter.go delete mode 100644 server/p2p/on_filter_add.go delete mode 100644 server/p2p/on_filter_clear.go delete mode 100644 server/p2p/on_filter_load.go delete mode 100644 server/p2p/on_get_addr.go delete mode 100644 server/p2p/on_get_block_invs.go delete mode 100644 server/p2p/on_get_block_locator.go delete mode 100644 server/p2p/on_get_data.go delete mode 100644 server/p2p/on_inv.go delete mode 100644 server/p2p/on_tx.go delete mode 100644 server/p2p/on_version.go delete mode 100644 server/p2p/p2p.go delete mode 100644 server/rpc/common.go delete mode 100644 server/rpc/handle_add_manual_node.go delete mode 100644 server/rpc/handle_create_raw_transaction.go delete mode 100644 server/rpc/handle_debug_level.go delete mode 100644 server/rpc/handle_decode_raw_transaction.go delete mode 100644 server/rpc/handle_decode_script.go delete mode 100644 server/rpc/handle_generate.go delete mode 100644 server/rpc/handle_get_all_manual_nodes_info.go delete mode 100644 server/rpc/handle_get_block.go delete mode 100644 server/rpc/handle_get_block_count.go delete mode 100644 server/rpc/handle_get_block_dag_info.go delete mode 100644 server/rpc/handle_get_block_header.go delete mode 100644 server/rpc/handle_get_block_template.go delete mode 100644 server/rpc/handle_get_blocks.go delete mode 100644 server/rpc/handle_get_chain_from_block.go delete mode 100644 server/rpc/handle_get_connection_count.go delete mode 100644 server/rpc/handle_get_current_net.go delete mode 100644 server/rpc/handle_get_difficulty.go delete mode 100644 server/rpc/handle_get_generate.go delete mode 100644 server/rpc/handle_get_hashes_per_sec.go delete mode 100644 server/rpc/handle_get_headers.go delete mode 100644 server/rpc/handle_get_info.go delete mode 100644 server/rpc/handle_get_manual_node_info.go delete mode 100644 server/rpc/handle_get_mempool_info.go delete mode 100644 server/rpc/handle_get_mining_info.go delete mode 100644 server/rpc/handle_get_net_totals.go delete mode 100644 server/rpc/handle_get_peer_info.go delete mode 100644 server/rpc/handle_get_raw_mempool.go delete mode 100644 server/rpc/handle_get_raw_transaction.go delete mode 100644 server/rpc/handle_get_selected_tip.go delete mode 100644 server/rpc/handle_get_selected_tip_hash.go delete mode 100644 server/rpc/handle_get_subnetwork.go delete mode 100644 server/rpc/handle_get_top_headers.go delete mode 100644 server/rpc/handle_get_tx_out.go delete mode 100644 server/rpc/handle_help.go delete mode 100644 server/rpc/handle_load_tx_filter.go delete mode 100644 server/rpc/handle_node.go delete mode 100644 server/rpc/handle_notify_blocks.go delete mode 100644 server/rpc/handle_notify_chain_changes.go delete mode 100644 server/rpc/handle_notify_new_transactions.go delete mode 100644 server/rpc/handle_ping.go delete mode 100644 server/rpc/handle_remove_manual_node.go delete mode 100644 server/rpc/handle_rescan_block_filter.go delete mode 100644 server/rpc/handle_rescan_blocks.go delete mode 100644 server/rpc/handle_search_raw_transactions.go delete mode 100644 server/rpc/handle_send_raw_transaction.go delete mode 100644 server/rpc/handle_session.go delete mode 100644 server/rpc/handle_set_generate.go delete mode 100644 server/rpc/handle_stop.go delete mode 100644 server/rpc/handle_stop_notify_blocks.go delete mode 100644 server/rpc/handle_stop_notify_chain_changes.go delete mode 100644 server/rpc/handle_stop_notify_new_transactions.go delete mode 100644 server/rpc/handle_submit_block.go delete mode 100644 server/rpc/handle_uptime.go delete mode 100644 server/rpc/handle_validate_address.go delete mode 100644 server/rpc/handle_version.go delete mode 100644 server/rpc/handle_websocket_help.go delete mode 100644 server/rpc/log.go delete mode 100644 server/rpc/rpcadapters.go delete mode 100644 server/rpc/rpcserver.go delete mode 100644 server/rpc/rpcserverhelp.go delete mode 100644 server/rpc/rpcserverhelp_test.go delete mode 100644 server/rpc/rpcwebsocket.go delete mode 100644 server/server.go delete mode 100644 server/serverutils/log.go delete mode 100644 server/serverutils/upnp.go delete mode 100644 server/serverutils/utils.go delete mode 100644 service_windows.go delete mode 100644 signal/log.go delete mode 100644 signal/signal.go delete mode 100644 signal/signalsigterm.go delete mode 100755 telegram.sh delete mode 100755 test.sh delete mode 100644 testutil/testutil.go delete mode 100644 txscript/README.md delete mode 100644 txscript/consensus.go delete mode 100644 txscript/data/LICENSE delete mode 100644 txscript/data/script_tests.json delete mode 100644 txscript/data/sighash.json delete mode 100644 txscript/data/tx_invalid.json delete mode 100644 txscript/data/tx_valid.json delete mode 100644 txscript/doc.go delete mode 100644 txscript/engine.go delete mode 100644 txscript/engine_test.go delete mode 100644 txscript/error.go delete mode 100644 txscript/error_test.go delete mode 100644 txscript/example_test.go delete mode 100644 txscript/log.go delete mode 100644 txscript/main_test.go delete mode 100644 txscript/opcode.go delete mode 100644 txscript/opcode_test.go delete mode 100644 txscript/reference_test.go delete mode 100644 txscript/script.go delete mode 100644 txscript/script_test.go delete mode 100644 txscript/scriptbuilder.go delete mode 100644 txscript/scriptbuilder_test.go delete mode 100644 txscript/scriptnum.go delete mode 100644 txscript/scriptnum_test.go delete mode 100644 txscript/sigcache.go delete mode 100644 txscript/sigcache_test.go delete mode 100644 txscript/sign.go delete mode 100644 txscript/sign_test.go delete mode 100644 txscript/stack.go delete mode 100644 txscript/stack_test.go delete mode 100644 txscript/standard.go delete mode 100644 txscript/standard_test.go delete mode 100644 upgrade.go delete mode 100644 util/README.md delete mode 100644 util/address.go delete mode 100644 util/address_test.go delete mode 100644 util/amount.go delete mode 100644 util/amount_test.go delete mode 100644 util/appdata.go delete mode 100644 util/appdata_test.go delete mode 100644 util/base58/README.md delete mode 100644 util/base58/alphabet.go delete mode 100644 util/base58/base58.go delete mode 100644 util/base58/base58_test.go delete mode 100644 util/base58/base58bench_test.go delete mode 100644 util/base58/base58check.go delete mode 100644 util/base58/base58check_test.go delete mode 100644 util/base58/cov_report.sh delete mode 100644 util/base58/doc.go delete mode 100644 util/base58/example_test.go delete mode 100644 util/base58/genalphabet.go delete mode 100644 util/bech32/README.md delete mode 100644 util/bech32/bech32.go delete mode 100644 util/bech32/bech32_test.go delete mode 100644 util/bech32/doc.go delete mode 100644 util/bech32/example_test.go delete mode 100644 util/bech32/internal_test.go delete mode 100644 util/binaryserializer/binaryserializer.go delete mode 100644 util/binaryserializer/binaryserializer_test.go delete mode 100644 util/block.go delete mode 100644 util/block_test.go delete mode 100644 util/bloom/README.md delete mode 100644 util/bloom/cov_report.sh delete mode 100644 util/bloom/example_test.go delete mode 100644 util/bloom/filter.go delete mode 100644 util/bloom/filter_test.go delete mode 100644 util/bloom/merkleblock.go delete mode 100644 util/bloom/merkleblock_test.go delete mode 100644 util/bloom/murmurhash3.go delete mode 100644 util/bloom/murmurhash3_test.go delete mode 100644 util/bloom/test_coverage.txt delete mode 100644 util/btcmath.go delete mode 100644 util/btcmath_test.go delete mode 100644 util/btcstrings.go delete mode 100644 util/btcstrings_test.go delete mode 100644 util/btcutil.go delete mode 100644 util/certgen.go delete mode 100644 util/certgen_test.go delete mode 100644 util/const.go delete mode 100644 util/cov_report.sh delete mode 100644 util/daghash/README.md delete mode 100644 util/daghash/doc.go delete mode 100644 util/daghash/hash.go delete mode 100644 util/daghash/hash_test.go delete mode 100644 util/daghash/hashfuncs.go delete mode 100644 util/daghash/hashfuncs_test.go delete mode 100644 util/doc.go delete mode 100644 util/example_test.go delete mode 100644 util/fs/fs.go delete mode 100644 util/hash160.go delete mode 100644 util/hdkeychain/README.md delete mode 100644 util/hdkeychain/bench_test.go delete mode 100644 util/hdkeychain/cov_report.sh delete mode 100644 util/hdkeychain/doc.go delete mode 100644 util/hdkeychain/example_test.go delete mode 100644 util/hdkeychain/extendedkey.go delete mode 100644 util/hdkeychain/extendedkey_test.go delete mode 100644 util/hdkeychain/test_coverage.txt delete mode 100644 util/internal_test.go delete mode 100644 util/locks/log.go delete mode 100644 util/locks/prioritymutex.go delete mode 100644 util/locks/prioritymutex_test.go delete mode 100644 util/locks/waitgroup.go delete mode 100644 util/locks/waitgroup_test.go delete mode 100644 util/net.go delete mode 100644 util/net_noop.go delete mode 100644 util/network/network.go delete mode 100644 util/network/tor.go delete mode 100644 util/panics/panics.go delete mode 100644 util/random/random.go delete mode 100644 util/random/random_test.go delete mode 100644 util/subnetworkid/subnetworkid.go delete mode 100644 util/testtools/testtools.go delete mode 100644 util/tx.go delete mode 100644 util/tx_test.go delete mode 100644 util/txsort/README.md delete mode 100644 util/txsort/doc.go delete mode 100644 util/txsort/testdata/bip69-1.hex delete mode 100644 util/txsort/testdata/bip69-2.hex delete mode 100644 util/txsort/testdata/bip69-3.hex delete mode 100644 util/txsort/testdata/bip69-4.hex delete mode 100644 util/txsort/testdata/bip69-5.hex delete mode 100644 util/txsort/txsort.go delete mode 100644 util/txsort/txsort_test.go delete mode 100644 util/wif.go delete mode 100644 util/wif_test.go delete mode 100644 version/version.go delete mode 100644 wire/README.md delete mode 100644 wire/bench_test.go delete mode 100644 wire/blockheader.go delete mode 100644 wire/blockheader_test.go delete mode 100644 wire/common.go delete mode 100644 wire/common_test.go delete mode 100644 wire/doc.go delete mode 100644 wire/error.go delete mode 100644 wire/fakemessage_test.go delete mode 100644 wire/fixedIO_test.go delete mode 100644 wire/invvect.go delete mode 100644 wire/invvect_test.go delete mode 100644 wire/message.go delete mode 100644 wire/message_test.go delete mode 100644 wire/msgaddr.go delete mode 100644 wire/msgaddr_test.go delete mode 100644 wire/msgblock.go delete mode 100644 wire/msgblock_test.go delete mode 100644 wire/msgblocklocator.go delete mode 100644 wire/msgblocklocator_test.go delete mode 100644 wire/msgfeefilter.go delete mode 100644 wire/msgfeefilter_test.go delete mode 100644 wire/msgfilteradd.go delete mode 100644 wire/msgfilteradd_test.go delete mode 100644 wire/msgfilterclear.go delete mode 100644 wire/msgfilterclear_test.go delete mode 100644 wire/msgfilterload.go delete mode 100644 wire/msgfilterload_test.go delete mode 100644 wire/msggetaddr.go delete mode 100644 wire/msggetaddr_test.go delete mode 100644 wire/msggetblockinvs.go delete mode 100644 wire/msggetblockinvs_test.go delete mode 100644 wire/msggetblocklocator.go delete mode 100644 wire/msggetblocklocator_test.go delete mode 100644 wire/msggetdata.go delete mode 100644 wire/msggetdata_test.go delete mode 100644 wire/msggetheaders.go delete mode 100644 wire/msggetheaders_test.go delete mode 100644 wire/msgheaders.go delete mode 100644 wire/msgheaders_test.go delete mode 100644 wire/msginv.go delete mode 100644 wire/msginv_test.go delete mode 100644 wire/msgmerkleblock.go delete mode 100644 wire/msgmerkleblock_test.go delete mode 100644 wire/msgnotfound.go delete mode 100644 wire/msgnotfound_test.go delete mode 100644 wire/msgping.go delete mode 100644 wire/msgping_test.go delete mode 100644 wire/msgpong.go delete mode 100644 wire/msgpong_test.go delete mode 100644 wire/msgreject.go delete mode 100644 wire/msgreject_test.go delete mode 100644 wire/msgsendheaders.go delete mode 100644 wire/msgsendheaders_test.go delete mode 100644 wire/msgtx.go delete mode 100644 wire/msgtx_test.go delete mode 100644 wire/msgverack.go delete mode 100644 wire/msgverack_test.go delete mode 100644 wire/msgversion.go delete mode 100644 wire/msgversion_test.go delete mode 100644 wire/netaddress.go delete mode 100644 wire/netaddress_test.go delete mode 100644 wire/protocol.go delete mode 100644 wire/protocol_test.go delete mode 100644 wire/testdata/megatx.bin.bz2 diff --git a/CHANGES b/CHANGES deleted file mode 100644 index aff6f2c3..00000000 --- a/CHANGES +++ /dev/null @@ -1,955 +0,0 @@ -============================================================================ -User visible changes for btcd - A full-node bitcoin implementation written in Go -============================================================================ - -Changes in 0.12.0 (Fri Nov 20 2015) - - Protocol and network related changes: - - Add a new checkpoint at block height 382320 (#555) - - Implement BIP0065 which includes support for version 4 blocks, a new - consensus opcode (OP_CHECKLOCKTIMEVERIFY) that enforces transaction - lock times, and a double-threshold switchover mechanism (#535, #459, - #455) - - Implement BIP0111 which provides a new bloom filter service flag and - hence provides support for protocol version 70011 (#499) - - Add a new parameter --nopeerbloomfilters to allow disabling bloom - filter support (#499) - - Reject non-canonically encoded variable length integers (#507) - - Add mainnet peer discovery DNS seed (seed.bitcoin.jonasschnelli.ch) - (#496) - - Correct reconnect handling for persistent peers (#463, #464) - - Ignore requests for block headers if not fully synced (#444) - - Add CLI support for specifying the zone id on IPv6 addresses (#538) - - Fix a couple of issues where the initial block sync could stall (#518, - #229, #486) - - Fix an issue which prevented the --onion option from working as - intended (#446) - - Transaction relay (memory pool) changes: - - Require transactions to only include signatures encoded with the - canonical 'low-s' encoding (#512) - - Add a new parameter --minrelaytxfee to allow the minimum transaction - fee in BTC/kB to be overridden (#520) - - Retain memory pool transactions when they redeem another one that is - removed when a block is accepted (#539) - - Do not send reject messages for a transaction if it is valid but - causes an orphan transaction which depends on it to be determined - as invalid (#546) - - Refrain from attempting to add orphans to the memory pool multiple - times when the transaction they redeem is added (#551) - - Modify minimum transaction fee calculations to scale based on bytes - instead of full kilobyte boundaries (#521, #537) - - Implement signature cache: - - Provides a limited memory cache of validated signatures which is a - huge optimization when verifying blocks for transactions that are - already in the memory pool (#506) - - Add a new parameter '--sigcachemaxsize' which allows the size of the - new cache to be manually changed if desired (#506) - - Mining support changes: - - Notify getblocktemplate long polling clients when a block is pushed - via submitblock (#488) - - Speed up getblocktemplate by making use of the new signature cache - (#506) - - RPC changes: - - Implement getmempoolinfo command (#453) - - Implement getblockheader command (#461) - - Modify createrawtransaction command to accept a new optional parameter - 'locktime' (#529) - - Modify listunspent result to include the 'spendable' field (#440) - - Modify getinfo command to include 'errors' field (#511) - - Add timestamps to blockconnected and blockdisconnected notifications - (#450) - - Several modifications to searchrawtranscations command: - - Accept a new optional parameter 'vinextra' which causes the results - to include information about the outputs referenced by a transaction's - inputs (#485, #487) - - Skip entries in the mempool too (#495) - - Accept a new optional parameter 'reverse' to return the results in - reverse order (most recent to oldest) (#497) - - Accept a new optional parameter 'filteraddrs' which causes the - results to only include inputs and outputs which involve the - provided addresses (#516) - - Change the notification order to notify clients about mined - transactions (recvtx, redeemingtx) before the blockconnected - notification (#449) - - Update verifymessage RPC to use the standard algorithm so it is - compatible with other implementations (#515) - - Improve ping statistics by pinging on an interval (#517) - - Websocket changes: - - Implement session command which returns a per-session unique id (#500, - #503) - - btcctl utility changes: - - Add getmempoolinfo command (#453) - - Add getblockheader command (#461) - - Add getwalletinfo command (#471) - - Notable developer-related package changes: - - Introduce a new peer package which acts a common base for creating and - concurrently managing bitcoin network peers (#445) - - Various cleanup of the new peer package (#528, #531, #524, #534, - #549) - - Blocks heights now consistently use int32 everywhere (#481) - - The BlockHeader type in the wire package now provides the BtcDecode - and BtcEncode methods (#467) - - Update wire package to recognize BIP0064 (getutxo) service bit (#489) - - Export LockTimeThreshold constant from txscript package (#454) - - Export MaxDataCarrierSize constant from txscript package (#466) - - Provide new IsUnspendable function from the txscript package (#478) - - Export variable length string functions from the wire package (#514) - - Export DNS Seeds for each network from the chaincfg package (#544) - - Preliminary work towards separating the memory pool into a separate - package (#525, #548) - - Misc changes: - - Various documentation updates (#442, #462, #465, #460, #470, #473, - #505, #530, #545) - - Add installation instructions for gentoo (#542) - - Ensure an error is shown if OS limits can't be set at startup (#498) - - Tighten the standardness checks for multisig scripts (#526) - - Test coverage improvement (#468, #494, #527, #543, #550) - - Several optimizations (#457, #474, #475, #476, #508, #509) - - Minor code cleanup and refactoring (#472, #479, #482, #519, #540) - - Contributors (alphabetical order): - - Ben Echols - - Bruno Clermont - - danda - - Daniel Krawisz - - Dario Nieuwenhuis - - Dave Collins - - David Hill - - Javed Khan - - Jonathan Gillham - - Joseph Becher - - Josh Rickmar - - Justus Ranvier - - Mawuli Adzoe - - Olaoluwa Osuntokun - - Rune T. Aune - -Changes in 0.11.1 (Wed May 27 2015) - - Protocol and network related changes: - - Use correct sub-command in reject message for rejected transactions - (#436, #437) - - Add a new parameter --torisolation which forces new circuits for each - connection when using tor (#430) - - Transaction relay (memory pool) changes: - - Reduce the default number max number of allowed orphan transactions - to 1000 (#419) - - Add a new parameter --maxorphantx which allows the maximum number of - orphan transactions stored in the mempool to be specified (#419) - - RPC changes: - - Modify listtransactions result to include the 'involveswatchonly' and - 'vout' fields (#427) - - Update getrawtransaction result to omit the 'confirmations' field - when it is 0 (#420, #422) - - Update signrawtransaction result to include errors (#423) - - btcctl utility changes: - - Add gettxoutproof command (#428) - - Add verifytxoutproof command (#428) - - Notable developer-related package changes: - - The btcec package now provides the ability to perform ECDH - encryption and decryption (#375) - - The block and header validation in the blockchain package has been - split to help pave the way toward concurrent downloads (#386) - - Misc changes: - - Minor peer optimization (#433) - - Contributors (alphabetical order): - - Dave Collins - - David Hill - - Federico Bond - - Ishbir Singh - - Josh Rickmar - -Changes in 0.11.0 (Wed May 06 2015) - - Protocol and network related changes: - - **IMPORTANT: Update is required due to the following point** - - Correct a few corner cases in script handling which could result in - forking from the network on non-standard transactions (#425) - - Add a new checkpoint at block height 352940 (#418) - - Optimized script execution (#395, #400, #404, #409) - - Fix a case that could lead stalled syncs (#138, #296) - - Network address manager changes: - - Implement eclipse attack countermeasures as proposed in - http://cs-people.bu.edu/heilman/eclipse (#370, #373) - - Optional address indexing changes: - - Fix an issue where a reorg could cause an orderly shutdown when the - address index is active (#340, #357) - - Transaction relay (memory pool) changes: - - Increase maximum allowed space for nulldata transactions to 80 bytes - (#331) - - Implement support for the following rules specified by BIP0062: - - The S value in ECDSA signature must be at most half the curve order - (rule 5) (#349) - - Script execution must result in a single non-zero value on the stack - (rule 6) (#347) - - NOTE: All 7 rules of BIP0062 are now implemented - - Use network adjusted time in finalized transaction checks to improve - consistency across nodes (#332) - - Process orphan transactions on acceptance of new transactions (#345) - - RPC changes: - - Add support for a limited RPC user which is not allowed admin level - operations on the server (#363) - - Implement node command for more unified control over connected peers - (#79, #341) - - Implement generate command for regtest/simnet to support - deterministically mining a specified number of blocks (#362, #407) - - Update searchrawtransactions to return the matching transactions in - order (#354) - - Correct an issue with searchrawtransactions where it could return - duplicates (#346, #354) - - Increase precision of 'difficulty' field in getblock result to 8 - (#414, #415) - - Omit 'nextblockhash' field from getblock result when it is empty - (#416, #417) - - Add 'id' and 'timeoffset' fields to getpeerinfo result (#335) - - Websocket changes: - - Implement new commands stopnotifyspent, stopnotifyreceived, - stopnotifyblocks, and stopnotifynewtransactions to allow clients to - cancel notification registrations (#122, #342) - - btcctl utility changes: - - A single dash can now be used as an argument to cause that argument to - be read from stdin (#348) - - Add generate command - - Notable developer-related package changes: - - The new version 2 btcjson package has now replaced the deprecated - version 1 package (#368) - - The btcec package now performs all signing using RFC6979 deterministic - signatures (#358, #360) - - The txscript package has been significantly cleaned up and had a few - API changes (#387, #388, #389, #390, #391, #392, #393, #395, #396, - #400, #403, #404, #405, #406, #408, #409, #410, #412) - - A new PkScriptLocs function has been added to the wire package MsgTx - type which provides callers that deal with scripts optimization - opportunities (#343) - - Misc changes: - - Minor wire hashing optimizations (#366, #367) - - Other minor internal optimizations - - Contributors (alphabetical order): - - Alex Akselrod - - Arne Brutschy - - Chris Jepson - - Daniel Krawisz - - Dave Collins - - David Hill - - Jimmy Song - - Jonas Nick - - Josh Rickmar - - Olaoluwa Osuntokun - - Oleg Andreev - -Changes in 0.10.0 (Sun Mar 01 2015) - - Protocol and network related changes: - - Add a new checkpoint at block height 343185 - - Implement BIP066 which includes support for version 3 blocks, a new - consensus rule which prevents non-DER encoded signatures, and a - double-threshold switchover mechanism - - Rather than announcing all known addresses on getaddr requests which - can possibly result in multiple messages, randomize the results and - limit them to the max allowed by a single message (1000 addresses) - - Add more reserved IP spaces to the address manager - - Transaction relay (memory pool) changes: - - Make transactions which contain reserved opcodes nonstandard - - No longer accept or relay free and low-fee transactions that have - insufficient priority to be mined in the next block - - Implement support for the following rules specified by BIP0062: - - ECDSA signature must use strict DER encoding (rule 1) - - The signature script must only contain push operations (rule 2) - - All push operations must use the smallest possible encoding (rule 3) - - All stack values interpreted as a number must be encoding using the - shortest possible form (rule 4) - - NOTE: Rule 1 was already enforced, however the entire script now - evaluates to false rather than only the signature verification as - required by BIP0062 - - Allow transactions with nulldata transaction outputs to be treated as - standard - - Mining support changes: - - Modify the getblocktemplate RPC to generate and return block templates - for version 3 blocks which are compatible with BIP0066 - - Allow getblocktemplate to serve blocks when the current time is - less than the minimum allowed time for a generated block template - (https://github.com/btcsuite/btcd/issues/209) - - Crypto changes: - - Optimize scalar multiplication by the base point by using a - pre-computed table which results in approximately a 35% speedup - (https://github.com/btcsuite/btcec/issues/2) - - Optimize general scalar multiplication by using the secp256k1 - endomorphism which results in approximately a 17-20% speedup - (https://github.com/btcsuite/btcec/issues/1) - - Optimize general scalar multiplication by using non-adjacent form - which results in approximately an additional 8% speedup - (https://github.com/btcsuite/btcec/issues/3) - - Implement optional address indexing: - - Add a new parameter --addrindex which will enable the creation of an - address index which can be queried to determine all transactions which - involve a given address - (https://github.com/btcsuite/btcd/issues/190) - - Add a new logging subsystem for address index related operations - - Support new searchrawtransactions RPC - (https://github.com/btcsuite/btcd/issues/185) - - RPC changes: - - Require TLS version 1.2 as the minimum version for all TLS connections - - Provide support for disabling TLS when only listening on localhost - (https://github.com/btcsuite/btcd/pull/192) - - Modify help output for all commands to provide much more consistent - and detailed information - - Correct case in getrawtransaction which would refuse to serve certain - transactions with invalid scripts - (https://github.com/btcsuite/btcd/issues/210) - - Correct error handling in the getrawtransaction RPC which could lead - to a crash in rare cases - (https://github.com/btcsuite/btcd/issues/196) - - Update getinfo RPC to include the appropriate 'timeoffset' calculated - from the median network time - - Modify listreceivedbyaddress result type to include txids field so it - is compatible - - Add 'iswatchonly' field to validateaddress result - - Add 'startingpriority' and 'currentpriority' fields to getrawmempool - (https://github.com/btcsuite/btcd/issues/178) - - Don't omit the 'confirmations' field from getrawtransaction when it is - zero - - Websocket changes: - - Modify the behavior of the rescan command to automatically register - for notifications about transactions paying to rescanned addresses - or spending outputs from the final rescan utxo set when the rescan - is through the best block in the chain - - btcctl utility changes: - - Make the list of commands available via the -l option rather than - dumping the entire list on usage errors - - Alphabetize and categorize the list of commands by chain and wallet - - Make the help option only show the help options instead of also - dumping all of the commands - - Make the usage syntax much more consistent and correct a few cases of - misnamed fields - (https://github.com/btcsuite/btcd/issues/305) - - Improve usage errors to show the specific parameter number, reason, - and error code - - Only show the usage for specific command is shown when a valid command - is provided with invalid parameters - - Add support for a SOCK5 proxy - - Modify output for integer fields (such as timestamps) to display - normally instead in scientific notation - - Add invalidateblock command - - Add reconsiderblock command - - Add createnewaccount command - - Add renameaccount command - - Add searchrawtransactions command - - Add importaddress command - - Add importpubkey command - - showblock utility changes: - - Remove utility in favor of the RPC getblock method - - Notable developer-related package changes: - - Many of the core packages have been relocated into the btcd repository - (https://github.com/btcsuite/btcd/issues/214) - - A new version of the btcjson package that has been completely - redesigned from the ground up based based upon how the project has - evolved and lessons learned while using it since it was first written - is now available in the btcjson/v2/btcjson directory - - This will ultimately replace the current version so anyone making - use of this package will need to update their code accordingly - - The btcec package now provides better facilities for working directly - with its public and private keys without having to mix elements from - the ecdsa package - - Update the script builder to ensure all rules specified by BIP0062 are - adhered to when creating scripts - - The blockchain package now provides a MedianTimeSource interface and - concrete implementation for providing time samples from remote peers - and using that data to calculate an offset against the local time - - Misc changes: - - Fix a slow memory leak due to tickers not being stopped - (https://github.com/btcsuite/btcd/issues/189) - - Fix an issue where a mix of orphans and SPV clients could trigger a - condition where peers would no longer be served - (https://github.com/btcsuite/btcd/issues/231) - - The RPC username and password can now contain symbols which previously - conflicted with special symbols used in URLs - - Improve handling of obtaining random nonces to prevent cases where it - could error when not enough entropy was available - - Improve handling of home directory creation errors such as in the case - of unmounted symlinks (https://github.com/btcsuite/btcd/issues/193) - - Improve the error reporting for rejected transactions to include the - inputs which are missing and/or being double spent - - Update sample config file with new options and correct a comment - regarding the fact the RPC server only listens on localhost by default - (https://github.com/btcsuite/btcd/issues/218) - - Update the continuous integration builds to run several tools which - help keep code quality high - - Significant amount of internal code cleanup and improvements - - Other minor internal optimizations - - Code Contributors (alphabetical order): - - Beldur - - Ben Holden-Crowther - - Dave Collins - - David Evans - - David Hill - - Guilherme Salgado - - Javed Khan - - Jimmy Song - - John C. Vernaleo - - Jonathan Gillham - - Josh Rickmar - - Michael Ford - - Michail Kargakis - - kac - - Olaoluwa Osuntokun - -Changes in 0.9.0 (Sat Sep 20 2014) - - Protocol and network related changes: - - Add a new checkpoint at block height 319400 - - Add support for BIP0037 bloom filters - (https://github.com/conformal/btcd/issues/132) - - Implement BIP0061 reject handling and hence support for protocol - version 70002 (https://github.com/conformal/btcd/issues/133) - - Add testnet DNS seeds for peer discovery (testnet-seed.alexykot.me - and testnet-seed.bitcoin.schildbach.de) - - Add mainnet DNS seed for peer discovery (seeds.bitcoin.open-nodes.org) - - Make multisig transactions with non-null dummy data nonstandard - (https://github.com/conformal/btcd/issues/131) - - Make transactions with an excessive number of signature operations - nonstandard - - Perform initial DNS lookups concurrently which allows connections - more quickly - - Improve the address manager to significantly reduce memory usage and - add tests - - Remove orphan transactions when they appear in a mined block - (https://github.com/conformal/btcd/issues/166) - - Apply incremental back off on connection retries for persistent peers - that give invalid replies to mirror the logic used for failed - connections (https://github.com/conformal/btcd/issues/103) - - Correct rate-limiting of free and low-fee transactions - - Mining support changes: - - Implement getblocktemplate RPC with the following support: - (https://github.com/conformal/btcd/issues/124) - - BIP0022 Non-Optional Sections - - BIP0022 Long Polling - - BIP0023 Basic Pool Extensions - - BIP0023 Mutation coinbase/append - - BIP0023 Mutations time, time/increment, and time/decrement - - BIP0023 Mutation transactions/add - - BIP0023 Mutations prevblock, coinbase, and generation - - BIP0023 Block Proposals - - Implement built-in concurrent CPU miner - (https://github.com/conformal/btcd/issues/137) - NOTE: CPU mining on mainnet is pointless. This has been provided - for testing purposes such as for the new simulation test network - - Add --generate flag to enable CPU mining - - Deprecate the --getworkkey flag in favor of --miningaddr which - specifies which addresses generated blocks will choose from to pay - the subsidy to - - RPC changes: - - Implement gettxout command - (https://github.com/conformal/btcd/issues/141) - - Implement validateaddress command - - Implement verifymessage command - - Mark getunconfirmedbalance RPC as wallet-only - - Mark getwalletinfo RPC as wallet-only - - Update getgenerate, setgenerate, gethashespersec, and getmininginfo - to return the appropriate information about new CPU mining status - - Modify getpeerinfo pingtime and pingwait field types to float64 so - they are compatible - - Improve disconnect handling for normal HTTP clients - - Make error code returns for invalid hex more consistent - - Websocket changes: - - Switch to a new more efficient websocket package - (https://github.com/conformal/btcd/issues/134) - - Add rescanfinished notification - - Modify the rescanprogress notification to include block hash as well - as height (https://github.com/conformal/btcd/issues/151) - - btcctl utility changes: - - Accept --simnet flag which automatically selects the appropriate port - and TLS certificates needed to communicate with btcd and btcwallet on - the simulation test network - - Fix createrawtransaction command to send amounts denominated in BTC - - Add estimatefee command - - Add estimatepriority command - - Add getmininginfo command - - Add getnetworkinfo command - - Add gettxout command - - Add lockunspent command - - Add signrawtransaction command - - addblock utility changes: - - Accept --simnet flag which automatically selects the appropriate port - and TLS certificates needed to communicate with btcd and btcwallet on - the simulation test network - - Notable developer-related package changes: - - Provide a new bloom package in btcutil which allows creating and - working with BIP0037 bloom filters - - Provide a new hdkeychain package in btcutil which allows working with - BIP0032 hierarchical deterministic key chains - - Introduce a new btcnet package which houses network parameters - - Provide new simnet network (--simnet) which is useful for private - simulation testing - - Enforce low S values in serialized signatures as detailed in BIP0062 - - Return errors from all methods on the btcdb.Db interface - (https://github.com/conformal/btcdb/issues/5) - - Allow behavior flags to alter btcchain.ProcessBlock - (https://github.com/conformal/btcchain/issues/5) - - Provide a new SerializeSize API for blocks - (https://github.com/conformal/btcwire/issues/19) - - Several of the core packages now work with Google App Engine - - Misc changes: - - Correct an issue where the database could corrupt under certain - circumstances which would require a new chain download - - Slightly optimize deserialization - - Use the correct IP block for he.net - - Fix an issue where it was possible the block manager could hang on - shutdown - - Update sample config file so the comments are on a separate line - rather than the end of a line so they are not interpreted as settings - (https://github.com/conformal/btcd/issues/135) - - Correct an issue where getdata requests were not being properly - throttled which could lead to larger than necessary memory usage - - Always show help when given the help flag even when the config file - contains invalid entries - - General code cleanup and minor optimizations - -Changes in 0.8.0-beta (Sun May 25 2014) - - Btcd is now Beta (https://github.com/conformal/btcd/issues/130) - - Add a new checkpoint at block height 300255 - - Protocol and network related changes: - - Lower the minimum transaction relay fee to 1000 satoshi to match - recent reference client changes - (https://github.com/conformal/btcd/issues/100) - - Raise the maximum signature script size to support standard 15-of-15 - multi-signature pay-to-sript-hash transactions with compressed pubkeys - to remain compatible with the reference client - (https://github.com/conformal/btcd/issues/128) - - Reduce max bytes allowed for a standard nulldata transaction to 40 for - compatibility with the reference client - - Introduce a new btcnet package which houses all of the network params - for each network (mainnet, testnet, regtest) to ultimately enable - easier addition and tweaking of networks without needing to change - several packages - - Fix several script discrepancies found by reference client test data - - Add new DNS seed for peer discovery (seed.bitnodes.io) - - Reduce the max known inventory cache from 20000 items to 1000 items - - Fix an issue where unknown inventory types could lead to a hung peer - - Implement inventory rebroadcast handler for sendrawtransaction - (https://github.com/conformal/btcd/issues/99) - - Update user agent to fully support BIP0014 - (https://github.com/conformal/btcwire/issues/10) - - Implement initial mining support: - - Add a new logging subsystem for mining related operations - - Implement infrastructure for creating block templates - - Provide options to control block template creation settings - - Support the getwork RPC - - Allow address identifiers to apply to more than one network since both - testnet and the regression test network unfortunately use the same - identifier - - RPC changes: - - Set the content type for HTTP POST RPC connections to application/json - (https://github.com/conformal/btcd/issues/121) - - Modified the RPC server startup so it only requires at least one valid - listen interface - - Correct an error path where it was possible certain errors would not - be returned - - Implement getwork command - (https://github.com/conformal/btcd/issues/125) - - Update sendrawtransaction command to reject orphans - - Update sendrawtransaction command to include the reason a transaction - was rejected - - Update getinfo command to populate connection count field - - Update getinfo command to include relay fee field - (https://github.com/conformal/btcd/issues/107) - - Allow transactions submitted with sendrawtransaction to bypass the - rate limiter - - Allow the getcurrentnet and getbestblock extensions to be accessed via - HTTP POST in addition to Websockets - (https://github.com/conformal/btcd/issues/127) - - Websocket changes: - - Rework notifications to ensure they are delivered in the order they - occur - - Rename notifynewtxs command to notifyreceived (funds received) - - Rename notifyallnewtxs command to notifynewtransactions - - Rename alltx notification to txaccepted - - Rename allverbosetx notification to txacceptedverbose - (https://github.com/conformal/btcd/issues/98) - - Add rescan progress notification - - Add recvtx notification - - Add redeemingtx notification - - Modify notifyspent command to accept an array of outpoints - (https://github.com/conformal/btcd/issues/123) - - Significantly optimize the rescan command to yield up to a 60x speed - increase - - btcctl utility changes: - - Add createencryptedwallet command - - Add getblockchaininfo command - - Add importwallet command - - Add addmultisigaddress command - - Add setgenerate command - - Accept --testnet and --wallet flags which automatically select - the appropriate port and TLS certificates needed to communicate - with btcd and btcwallet (https://github.com/conformal/btcd/issues/112) - - Allow path expansion from config file entries - (https://github.com/conformal/btcd/issues/113) - - Minor refactor simplify handling of options - - addblock utility changes: - - Improve logging by making it consistent with the logging provided by - btcd (https://github.com/conformal/btcd/issues/90) - - Improve several package APIs for developers: - - Add new amount type for consistently handling monetary values - - Add new coin selector API - - Add new WIF (Wallet Import Format) API - - Add new crypto types for private keys and signatures - - Add new API to sign transactions including script merging and hash - types - - Expose function to extract all pushed data from a script - (https://github.com/conformal/btcscript/issues/8) - - Misc changes: - - Optimize address manager shuffling to do 67% less work on average - - Resolve a couple of benign data races found by the race detector - (https://github.com/conformal/btcd/issues/101) - - Add IP address to all peer related errors to clarify which peer is the - cause (https://github.com/conformal/btcd/issues/102) - - Fix a UPNP case issue that prevented the --upnp option from working - with some UPNP servers - - Update documentation in the sample config file regarding debug levels - - Adjust some logging levels to improve debug messages - - Improve the throughput of query messages to the block manager - - Several minor optimizations to reduce GC churn and enhance speed - - Other minor refactoring - - General code cleanup - -Changes in 0.7.0 (Thu Feb 20 2014) - - Fix an issue when parsing scripts which contain a multi-signature script - which require zero signatures such as testnet block - 000000001881dccfeda317393c261f76d09e399e15e27d280e5368420f442632 - (https://github.com/conformal/btcscript/issues/7) - - Add check to ensure all transactions accepted to mempool only contain - canonical data pushes (https://github.com/conformal/btcscript/issues/6) - - Fix an issue causing excessive memory consumption - - Significantly rework and improve the websocket notification system: - - Each client is now independent so slow clients no longer limit the - speed of other connected clients - - Potentially long-running operations such as rescans are now run in - their own handler and rate-limited to one operation at a time without - preventing simultaneous requests from the same client for the faster - requests or notifications - - A couple of scenarios which could cause shutdown to hang have been - resolved - - Update notifynewtx notifications to support all address types instead - of only pay-to-pubkey-hash - - Provide a --rpcmaxwebsockets option to allow limiting the number of - concurrent websocket clients - - Add a new websocket command notifyallnewtxs to request notifications - (https://github.com/conformal/btcd/issues/86) (thanks @flammit) - - Improve btcctl utility in the following ways: - - Add getnetworkhashps command - - Add gettransaction command (wallet-specific) - - Add signmessage command (wallet-specific) - - Update getwork command to accept - - Continue cleanup and work on implementing the RPC API: - - Implement getnettotals command - (https://github.com/conformal/btcd/issues/84) - - Implement networkhashps command - (https://github.com/conformal/btcd/issues/87) - - Update getpeerinfo to always include syncnode field even when false - - Remove help addenda for getpeerinfo now that it supports all fields - - Close standard RPC connections on auth failure - - Provide a --rpcmaxclients option to allow limiting the number of - concurrent RPC clients (https://github.com/conformal/btcd/issues/68) - - Include IP address in RPC auth failure log messages - - Resolve a rather harmless data races found by the race detector - (https://github.com/conformal/btcd/issues/94) - - Increase block priority size and max standard transaction size to 50k - and 100k, respectively (https://github.com/conformal/btcd/issues/71) - - Add rate limiting of free transactions to the memory pool to prevent - penny flooding (https://github.com/conformal/btcd/issues/40) - - Provide a --logdir option (https://github.com/conformal/btcd/issues/95) - - Change the default log file path to include the network - - Add a new ScriptBuilder interface to btcscript to support creation of - custom scripts (https://github.com/conformal/btcscript/issues/5) - - General code cleanup - -Changes in 0.6.0 (Tue Feb 04 2014) - - Fix an issue when parsing scripts which contain invalid signatures that - caused a chain fork on block - 0000000000000001e4241fd0b3469a713f41c5682605451c05d3033288fb2244 - - Correct an issue which could lead to an error in removeBlockNode - (https://github.com/conformal/btcchain/issues/4) - - Improve addblock utility as follows: - - Check imported blocks against all chain rules and checkpoints - - Skip blocks which are already known so you can stop and restart the - import or start the import after you have already downloaded a portion - of the chain - - Correct an issue where the utility did not shutdown cleanly after - processing all blocks - - Add error on attempt to import orphan blocks - - Improve error handling and reporting - - Display statistics after input file has been fully processed - - Rework, optimize, and improve headers-first mode: - - Resuming the chain sync from any point before the final checkpoint - will now use headers-first mode - (https://github.com/conformal/btcd/issues/69) - - Verify all checkpoints as opposed to only the final one - - Reduce and bound memory usage - - Rollback to the last known good point when a header does not match a - checkpoint - - Log information about what is happening with headers - - Improve btcctl utility in the following ways: - - Add getaddednodeinfo command - - Add getnettotals command - - Add getblocktemplate command (wallet-specific) - - Add getwork command (wallet-specific) - - Add getnewaddress command (wallet-specific) - - Add walletpassphrasechange command (wallet-specific) - - Add walletlock command (wallet-specific) - - Add sendfrom command (wallet-specific) - - Add sendmany command (wallet-specific) - - Add settxfee command (wallet-specific) - - Add listsinceblock command (wallet-specific) - - Add listaccounts command (wallet-specific) - - Add keypoolrefill command (wallet-specific) - - Add getreceivedbyaccount command (wallet-specific) - - Add getrawchangeaddress command (wallet-specific) - - Add gettxoutsetinfo command (wallet-specific) - - Add listaddressgroupings command (wallet-specific) - - Add listlockunspent command (wallet-specific) - - Add listlock command (wallet-specific) - - Add listreceivedbyaccount command (wallet-specific) - - Add validateaddress command (wallet-specific) - - Add verifymessage command (wallet-specific) - - Add sendtoaddress command (wallet-specific) - - Continue cleanup and work on implementing the RPC API: - - Implement submitblock command - (https://github.com/conformal/btcd/issues/61) - - Implement help command - - Implement ping command - - Implement getaddednodeinfo command - (https://github.com/conformal/btcd/issues/78) - - Implement getinfo command - - Update getpeerinfo to support bytesrecv and bytessent - (https://github.com/conformal/btcd/issues/83) - - Improve and correct several RPC server and websocket areas: - - Change the connection endpoint for websockets from /wallet to /ws - (https://github.com/conformal/btcd/issues/80) - - Implement an alternative authentication for websockets so clients - such as javascript from browsers that don't support setting HTTP - headers can authenticate (https://github.com/conformal/btcd/issues/77) - - Add an authentication deadline for RPC connections - (https://github.com/conformal/btcd/issues/68) - - Use standard authentication failure responses for RPC connections - - Make automatically generated certificate more standard so it works - from client such as node.js and Firefox - - Correct some minor issues which could prevent the RPC server from - shutting down in an orderly fashion - - Make all websocket notifications require registration - - Change the data sent over websockets to text since it is JSON-RPC - - Allow connections that do not have an Origin header set - - Expose and track the number of bytes read and written per peer - (https://github.com/conformal/btcwire/issues/6) - - Correct an issue with sendrawtransaction when invoked via websockets - which prevented a minedtx notification from being added - - Rescan operations issued from remote wallets are no stopped when - the wallet disconnects mid-operation - (https://github.com/conformal/btcd/issues/66) - - Several optimizations related to fetching block information from the - database - - General code cleanup - -Changes in 0.5.0 (Mon Jan 13 2014) - - Optimize initial block download by introducing a new mode which - downloads the block headers first (up to the final checkpoint) - - Improve peer handling to remove the potential for slow peers to cause - sluggishness amongst all peers - (https://github.com/conformal/btcd/issues/63) - - Fix an issue where the initial block sync could stall when the sync peer - disconnects (https://github.com/conformal/btcd/issues/62) - - Correct an issue where --externalip was doing a DNS lookup on the full - host:port instead of just the host portion - (https://github.com/conformal/btcd/issues/38) - - Fix an issue which could lead to a panic on chain switches - (https://github.com/conformal/btcd/issues/70) - - Improve btcctl utility in the following ways: - - Show getdifficulty output as floating point to 6 digits of precision - - Show all JSON object replies formatted as standard JSON - - Allow btcctl getblock to accept optional params - - Add getaccount command (wallet-specific) - - Add getaccountaddress command (wallet-specific) - - Add sendrawtransaction command - - Continue cleanup and work on implementing RPC API calls - - Update getrawmempool to support new optional verbose flag - - Update getrawtransaction to match the reference client - - Update getblock to support new optional verbose flag - - Update raw transactions to fully match the reference client including - support for all transaction types and address types - - Correct getrawmempool fee field to return BTC instead of Satoshi - - Correct getpeerinfo service flag to return 8 digit string so it - matches the reference client - - Correct verifychain to return a boolean - - Implement decoderawtransaction command - - Implement createrawtransaction command - - Implement decodescript command - - Implement gethashespersec command - - Allow RPC handler overrides when invoked via a websocket versus - legacy connection - - Add new DNS seed for peer discovery - - Display user agent on new valid peer log message - (https://github.com/conformal/btcd/issues/64) - - Notify wallet when new transactions that pay to registered addresses - show up in the mempool before being mined into a block - - Support a tor-specific proxy in addition to a normal proxy - (https://github.com/conformal/btcd/issues/47) - - Remove deprecated sqlite3 imports from utilities - - Remove leftover profile write from addblock utility - - Quite a bit of code cleanup and refactoring to improve maintainability - -Changes in 0.4.0 (Thu Dec 12 2013) - - Allow listen interfaces to be specified via --listen instead of only the - port (https://github.com/conformal/btcd/issues/33) - - Allow listen interfaces for the RPC server to be specified via - --rpclisten instead of only the port - (https://github.com/conformal/btcd/issues/34) - - Only disable listening when --connect or --proxy are used when no - --listen interface are specified - (https://github.com/conformal/btcd/issues/10) - - Add several new standard transaction checks to transaction memory pool: - - Support nulldata scripts as standard - - Only allow a max of one nulldata output per transaction - - Enforce a maximum of 3 public keys in multi-signature transactions - - The number of signatures in multi-signature transactions must not - exceed the number of public keys - - The number of inputs to a signature script must match the expected - number of inputs for the script type - - The number of inputs pushed onto the stack by a redeeming signature - script must match the number of inputs consumed by the referenced - public key script - - When a block is connected, remove any transactions from the memory pool - which are now double spends as a result of the newly connected - transactions - - Don't relay transactions resurrected during a chain switch since - other peers will also be switching chains and therefore already know - about them - - Cleanup a few cases where rejected transactions showed as an error - rather than as a rejected transaction - - Ignore the default configuration file when --regtest (regression test - mode) is specified - - Implement TLS support for RPC including automatic certificate generation - - Support HTTP authentication headers for web sockets - - Update address manager to recognize and properly work with Tor - addresses (https://github.com/conformal/btcd/issues/36) and - (https://github.com/conformal/btcd/issues/37) - - Improve btcctl utility in the following ways: - - Add the ability to specify a configuration file - - Add a default entry for the RPC cert to point to the location - it will likely be in the btcd home directory - - Implement --version flag - - Provide a --notls option to support non-TLS configurations - - Fix a couple of minor races found by the Go race detector - - Improve logging - - Allow logging level to be specified on a per subsystem basis - (https://github.com/conformal/btcd/issues/48) - - Allow logging levels to be dynamically changed via RPC - (https://github.com/conformal/btcd/issues/15) - - Implement a rolling log file with a max of 10MB per file and a - rotation size of 3 which results in a max logging size of 30 MB - - Correct a minor issue with the rescanning websocket call - (https://github.com/conformal/btcd/issues/54) - - Fix a race with pushing address messages that could lead to a panic - (https://github.com/conformal/btcd/issues/58) - - Improve which external IP address is reported to peers based on which - interface they are connected through - (https://github.com/conformal/btcd/issues/35) - - Add --externalip option to allow an external IP address to be specified - for cases such as tor hidden services or advanced network configurations - (https://github.com/conformal/btcd/issues/38) - - Add --upnp option to support automatic port mapping via UPnP - (https://github.com/conformal/btcd/issues/51) - - Update Ctrl+C interrupt handler to properly sync address manager and - remove the UPnP port mapping (if needed) - - Continue cleanup and work on implementing RPC API calls - - Add importprivkey (import private key) command to btcctl - - Update getrawtransaction to provide addresses properly, support - new verbose param, and match the reference implementation with the - exception of MULTISIG (thanks @flammit) - - Update getblock with new verbose flag (thanks @flammit) - - Add listtransactions command to btcctl - - Add getbalance command to btcctl - - Add basic support for btcd to run as a native Windows service - (https://github.com/conformal/btcd/issues/42) - - Package addblock utility with Windows MSIs - - Add support for TravisCI (continuous build integration) - - Cleanup some documentation and usage - - Several other minor bug fixes and general code cleanup - -Changes in 0.3.3 (Wed Nov 13 2013) - - Significantly improve initial block chain download speed - (https://github.com/conformal/btcd/issues/20) - - Add a new checkpoint at block height 267300 - - Optimize most recently used inventory handling - (https://github.com/conformal/btcd/issues/21) - - Optimize duplicate transaction input check - (https://github.com/conformal/btcchain/issues/2) - - Optimize transaction hashing - (https://github.com/conformal/btcd/issues/25) - - Rework and optimize wallet listener notifications - (https://github.com/conformal/btcd/issues/22) - - Optimize serialization and deserialization - (https://github.com/conformal/btcd/issues/27) - - Add support for minimum transaction fee to memory pool acceptance - (https://github.com/conformal/btcd/issues/29) - - Improve leveldb database performance by removing explicit GC call - - Fix an issue where Ctrl+C was not always finishing orderly database - shutdown - - Fix an issue in the script handling for OP_CHECKSIG - - Impose max limits on all variable length protocol entries to prevent - abuse from malicious peers - - Enforce DER signatures for transactions allowed into the memory pool - - Separate the debug profile http server from the RPC server - - Rework of the RPC code to improve performance and make the code cleaner - - The getrawtransaction RPC call now properly checks the memory pool - before consulting the db (https://github.com/conformal/btcd/issues/26) - - Add support for the following RPC calls: getpeerinfo, getconnectedcount, - addnode, verifychain - (https://github.com/conformal/btcd/issues/13) - (https://github.com/conformal/btcd/issues/17) - - Implement rescan websocket extension to allow wallet rescans - - Use correct paths for application data storage for all supported - operating systems (https://github.com/conformal/btcd/issues/30) - - Add a default redirect to the http profiling page when accessing the - http profile server - - Add a new --cpuprofile option which can be used to generate CPU - profiling data on platforms that support it - - Several other minor performance optimizations - - Other minor bug fixes and general code cleanup - -Changes in 0.3.2 (Tue Oct 22 2013) - - Fix an issue that could cause the download of the block chain to stall - (https://github.com/conformal/btcd/issues/12) - - Remove deprecated sqlite as an available database backend - - Close sqlite compile issue as sqlite has now been removed - (https://github.com/conformal/btcd/issues/11) - - Change default RPC ports to 8334 (mainnet) and 18334 (testnet) - - Continue cleanup and work on implementing RPC API calls - - Add support for the following RPC calls: getrawmempool, - getbestblockhash, decoderawtransaction, getdifficulty, - getconnectioncount, getpeerinfo, and addnode - - Improve the btcctl utility that is used to issue JSON-RPC commands - - Fix an issue preventing btcd from cleanly shutting down with the RPC - stop command - - Add a number of database interface tests to ensure backends implement - the expected interface - - Expose some additional information from btcscript to be used for - identifying "standard"" transactions - - Add support for plan9 - thanks @mischief - (https://github.com/conformal/btcd/pull/19) - - Other minor bug fixes and general code cleanup - -Changes in 0.3.1-alpha (Tue Oct 15 2013) - - Change default database to leveldb - NOTE: This does mean you will have to redownload the block chain. Since we - are still in alpha, we didn't feel writing a converter was worth the time as - it would take away from more important issues at this stage - - Add a warning if there are multiple block chain databases of different types - - Fix issue with unexpected EOF in leveldb -- https://github.com/conformal/btcd/issues/18 - - Fix issue preventing block 21066 on testnet -- https://github.com/conformal/btcchain/issues/1 - - Fix issue preventing block 96464 on testnet -- https://github.com/conformal/btcscript/issues/1 - - Optimize transaction lookups - - Correct a few cases of list removal that could result in improper cleanup - of no longer needed orphans - - Add functionality to increase ulimits on non-Windows platforms - - Add support for mempool command which allows remote peers to query the - transaction memory pool via the bitcoin protocol - - Clean up logging a bit - - Add a flag to disable checkpoints for developers - - Add a lot of useful debug logging such as message summaries - - Other minor bug fixes and general code cleanup - -Initial Release 0.3.0-alpha (Sat Oct 05 2013): - - Initial release diff --git a/Jenkinsfile b/Jenkinsfile deleted file mode 100644 index fb5b8b45..00000000 --- a/Jenkinsfile +++ /dev/null @@ -1,10 +0,0 @@ -node { - stage 'Checkout' - checkout scm - - stage 'Version' - sh './deploy.sh version' - - stage 'Build' - sh "./deploy.sh build" -} diff --git a/LICENSE b/LICENSE index 7fe8161a..2942320d 100644 --- a/LICENSE +++ b/LICENSE @@ -1,8 +1,6 @@ ISC License -Copyright (c) 2018-2019 The kaspanet developers -Copyright (c) 2013-2018 The btcsuite developers -Copyright (c) 2015-2016 The Decred developers +Copyright (c) 2018 The Decred developers Permission to use, copy, modify, and distribute this software for any purpose with or without fee is hereby granted, provided that the above diff --git a/README.md b/README.md index d1cbfaf3..4e8875c7 100644 --- a/README.md +++ b/README.md @@ -1,30 +1,19 @@ - -Kaspad -==== -Warning: This is pre-alpha software. There's no guarantee anything works. -==== - -[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org) -[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/kaspanet/kaspad) - -Kaspad is the reference full node Kaspa implementation written in Go (golang). - -This project is currently under active development and is in a pre-Alpha state. -Some things still don't work and APIs are far from finalized. The code is provided for reference only. +dnsseeder +========= ## Requirements -Latest version of [Go](http://golang.org) (currently 1.13). - -## Installation +Latest version of [Go](http://golang.org) (currently 1.13) -#### Build from Source +## Getting Started - Install Go according to the installation instructions here: http://golang.org/doc/install - Ensure Go was installed properly and is a supported version: +- Launch a kaspad node for the dnsseeder to connect to + ```bash $ go version $ go env GOROOT GOPATH @@ -35,45 +24,35 @@ recommended that `GOPATH` is set to a directory in your home directory such as `~/dev/go` to avoid write permission issues. It is also recommended to add `$GOPATH/bin` to your `PATH` at this point. -- Run the following commands to obtain and install kaspad including all dependencies: +- Run the following commands to obtain dnsseeder, all dependencies, and install it: ```bash -$ git clone https://github.com/kaspanet/kaspad $GOPATH/src/github.com/kaspanet/kaspad -$ cd $GOPATH/src/github.com/kaspanet/kaspad -$ ./test.sh -$ go install . ./cmd/... +$ git clone https://github.com/kaspanet/dnsseeder $GOPATH/src/github.com/kaspanet/dnsseeder +$ cd $GOPATH/src/github.com/kaspanet/dnsseeder +$ go install . ``` -`./test.sh` tests can be skipped, but some things might not run correctly on your system if tests fail. - -- Kaspad (and utilities) should now be installed in `$GOPATH/bin`. If you did - not already add the bin directory to your system path during Go installation, - you are encouraged to do so now. +- dnsseeder will now be installed in either ```$GOROOT/bin``` or + ```$GOPATH/bin``` depending on your configuration. If you did not already + add the bin directory to your system path during Go installation, we + recommend you do so now. -## Getting Started - -Kaspad has several configuration options available to tweak how it runs, but all -of the basic operations work with zero configuration. +To start dnsseeder listening on udp 127.0.0.1:5354 with an initial connection to working testnet node running on 127.0.0.1: -#### Linux/BSD/POSIX/Source - -```bash -$ ./kaspad +``` +$ ./dnsseeder -n nameserver.example.com -H network-seed.example.com -s 127.0.0.1 --testnet ``` -## Discord -Join our discord server using the following link: https://discord.gg/WmGhhzk - -## Issue Tracker - -The [integrated github issue tracker](https://github.com/kaspanet/kaspad/issues) -is used for this project. - -## Documentation - -The documentation is a work-in-progress. It is located in the [docs](https://github.com/kaspanet/kaspad/tree/master/docs) folder. +You will then need to redirect DNS traffic on your public IP port 53 to 127.0.0.1:5354 +Note: to listen directly on port 53 on most Unix systems, one has to run dnsseeder as root, which is discouraged -## License +## Setting up DNS Records -Kaspad is licensed under the [copyfree](http://copyfree.org) ISC License. +To create a working set-up where dnsseeder can provide IPs to kaspad instances, set the following DNS records: +``` +NAME TYPE VALUE +---- ---- ----- +[your.domain.name] A [your ip address] +[ns-your.domain.name] NS [your.domain.name] +``` diff --git a/addrmgr/addrmanager.go b/addrmgr/addrmanager.go deleted file mode 100644 index d5a9826d..00000000 --- a/addrmgr/addrmanager.go +++ /dev/null @@ -1,1404 +0,0 @@ -// Copyright (c) 2013-2016 The btcsuite developers -// Use of this source code is governed by an ISC -// license that can be found in the LICENSE file. - -package addrmgr - -import ( - "container/list" - crand "crypto/rand" // for seeding - "encoding/base32" - "encoding/binary" - "encoding/json" - "github.com/pkg/errors" - "io" - "math/rand" - "net" - "os" - "path/filepath" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/kaspanet/kaspad/util/subnetworkid" - - "github.com/kaspanet/kaspad/util/daghash" - "github.com/kaspanet/kaspad/wire" -) - -type newBucket [newBucketCount]map[string]*KnownAddress -type triedBucket [triedBucketCount]*list.List - -// AddrManager provides a concurrency safe address manager for caching potential -// peers on the Kaspa network. -type AddrManager struct { - mtx sync.Mutex - peersFile string - lookupFunc func(string) ([]net.IP, error) - rand *rand.Rand - key [32]byte - addrIndex map[string]*KnownAddress // address key to ka for all addrs. - addrNew map[subnetworkid.SubnetworkID]*newBucket - addrNewFullNodes newBucket - addrTried map[subnetworkid.SubnetworkID]*triedBucket - addrTriedFullNodes triedBucket - addrTrying map[*KnownAddress]bool - started int32 - shutdown int32 - wg sync.WaitGroup - quit chan struct{} - nTried map[subnetworkid.SubnetworkID]int - nNew map[subnetworkid.SubnetworkID]int - nTriedFullNodes int - nNewFullNodes int - lamtx sync.Mutex - localAddresses map[string]*localAddress - localSubnetworkID *subnetworkid.SubnetworkID -} - -type serializedKnownAddress struct { - Addr string - Src string - SubnetworkID string - Attempts int - TimeStamp int64 - LastAttempt int64 - LastSuccess int64 - // no refcount or tried, that is available from context. -} - -type serializedNewBucket [newBucketCount][]string -type serializedTriedBucket [triedBucketCount][]string - -type serializedAddrManager struct { - Version int - Key [32]byte - Addresses []*serializedKnownAddress - NewBuckets map[string]*serializedNewBucket // string is Subnetwork ID - NewBucketFullNodes serializedNewBucket - TriedBuckets map[string]*serializedTriedBucket // string is Subnetwork ID - TriedBucketFullNodes serializedTriedBucket -} - -type localAddress struct { - na *wire.NetAddress - score AddressPriority -} - -// AddressPriority type is used to describe the hierarchy of local address -// discovery methods. -type AddressPriority int - -const ( - // InterfacePrio signifies the address is on a local interface - InterfacePrio AddressPriority = iota - - // BoundPrio signifies the address has been explicitly bounded to. - BoundPrio - - // UpnpPrio signifies the address was obtained from UPnP. - UpnpPrio - - // HTTPPrio signifies the address was obtained from an external HTTP service. - HTTPPrio - - // ManualPrio signifies the address was provided by --externalip. - ManualPrio -) - -const ( - // needAddressThreshold is the number of addresses under which the - // address manager will claim to need more addresses. - needAddressThreshold = 1000 - - // dumpAddressInterval is the interval used to dump the address - // cache to disk for future use. - dumpAddressInterval = time.Minute * 10 - - // triedBucketSize is the maximum number of addresses in each - // tried address bucket. - triedBucketSize = 256 - - // triedBucketCount is the number of buckets we split tried - // addresses over. - triedBucketCount = 64 - - // newBucketSize is the maximum number of addresses in each new address - // bucket. - newBucketSize = 64 - - // newBucketCount is the number of buckets that we spread new addresses - // over. - newBucketCount = 1024 - - // triedBucketsPerGroup is the number of tried buckets over which an - // address group will be spread. - triedBucketsPerGroup = 8 - - // newBucketsPerGroup is the number of new buckets over which an - // source address group will be spread. - newBucketsPerGroup = 64 - - // newBucketsPerAddress is the number of buckets a frequently seen new - // address may end up in. - newBucketsPerAddress = 8 - - // numMissingDays is the number of days before which we assume an - // address has vanished if we have not seen it announced in that long. - numMissingDays = 30 - - // numRetries is the number of tried without a single success before - // we assume an address is bad. - numRetries = 3 - - // maxFailures is the maximum number of failures we will accept without - // a success before considering an address bad. - maxFailures = 10 - - // minBadDays is the number of days since the last success before we - // will consider evicting an address. - minBadDays = 7 - - // getAddrMin is the least addresses that we will send in response - // to a getAddr. If we have less than this amount, we send everything. - getAddrMin = 50 - - // getAddrMax is the most addresses that we will send in response - // to a getAddr (in practise the most addresses we will return from a - // call to AddressCache()). - getAddrMax = 2500 - - // getAddrPercent is the percentage of total addresses known that we - // will share with a call to AddressCache. - getAddrPercent = 23 - - // serialisationVersion is the current version of the on-disk format. - serialisationVersion = 1 -) - -// updateAddress is a helper function to either update an address already known -// to the address manager, or to add the address if not already known. -func (a *AddrManager) updateAddress(netAddr, srcAddr *wire.NetAddress, subnetworkID *subnetworkid.SubnetworkID) { - // Filter out non-routable addresses. Note that non-routable - // also includes invalid and local addresses. - if !IsRoutable(netAddr) { - return - } - - addr := NetAddressKey(netAddr) - ka := a.find(netAddr) - if ka != nil { - // TODO: only update addresses periodically. - // Update the last seen time and services. - // note that to prevent causing excess garbage on getaddr - // messages the netaddresses in addrmaanger are *immutable*, - // if we need to change them then we replace the pointer with a - // new copy so that we don't have to copy every na for getaddr. - if netAddr.Timestamp.After(ka.na.Timestamp) || - (ka.na.Services&netAddr.Services) != - netAddr.Services { - - naCopy := *ka.na - naCopy.Timestamp = netAddr.Timestamp - naCopy.AddService(netAddr.Services) - ka.na = &naCopy - } - - // If already in tried, we have nothing to do here. - if ka.tried { - return - } - - // Already at our max? - if ka.refs == newBucketsPerAddress { - return - } - - // The more entries we have, the less likely we are to add more. - // likelihood is 2N. - factor := int32(2 * ka.refs) - if a.rand.Int31n(factor) != 0 { - return - } - } else { - // Make a copy of the net address to avoid races since it is - // updated elsewhere in the addrmanager code and would otherwise - // change the actual netaddress on the peer. - netAddrCopy := *netAddr - ka = &KnownAddress{na: &netAddrCopy, srcAddr: srcAddr, subnetworkID: subnetworkID} - a.addrIndex[addr] = ka - if subnetworkID == nil { - a.nNewFullNodes++ - } else { - a.nNew[*subnetworkID]++ - } - // XXX time penalty? - } - - bucket := a.getNewBucket(netAddr, srcAddr) - - // Already exists? - if ka.subnetworkID == nil { - if _, ok := a.addrNewFullNodes[bucket][addr]; ok { - return - } - } else if a.addrNew[*ka.subnetworkID] != nil { - if _, ok := a.addrNew[*ka.subnetworkID][bucket][addr]; ok { - return - } - } - - // Enforce max addresses. - if ka.subnetworkID == nil { - if len(a.addrNewFullNodes[bucket]) > newBucketSize { - log.Tracef("new bucket of full nodes is full, expiring old") - a.expireNewFullNodes(bucket) - } - } else if a.addrNew[*ka.subnetworkID] != nil && len(a.addrNew[*ka.subnetworkID][bucket]) > newBucketSize { - log.Tracef("new bucket is full, expiring old") - a.expireNewBySubnetworkID(ka.subnetworkID, bucket) - } - - // Add to new bucket. - ka.refs++ - a.updateAddrNew(bucket, addr, ka) - - if ka.subnetworkID == nil { - log.Tracef("Added new full node address %s for a total of %d addresses", addr, - a.nTriedFullNodes+a.nNewFullNodes) - } else { - log.Tracef("Added new address %s for a total of %d addresses", addr, - a.nTried[*ka.subnetworkID]+a.nNew[*ka.subnetworkID]) - } -} - -func (a *AddrManager) updateAddrNew(bucket int, addr string, ka *KnownAddress) { - if ka.subnetworkID == nil { - a.addrNewFullNodes[bucket][addr] = ka - return - } - - if _, ok := a.addrNew[*ka.subnetworkID]; !ok { - a.addrNew[*ka.subnetworkID] = &newBucket{} - for i := range a.addrNew[*ka.subnetworkID] { - a.addrNew[*ka.subnetworkID][i] = make(map[string]*KnownAddress) - } - } - a.addrNew[*ka.subnetworkID][bucket][addr] = ka -} - -func (a *AddrManager) updateAddrTried(bucket int, ka *KnownAddress) { - if ka.subnetworkID == nil { - a.addrTriedFullNodes[bucket].PushBack(ka) - return - } - - if _, ok := a.addrTried[*ka.subnetworkID]; !ok { - a.addrTried[*ka.subnetworkID] = &triedBucket{} - for i := range a.addrTried[*ka.subnetworkID] { - a.addrTried[*ka.subnetworkID][i] = list.New() - } - } - a.addrTried[*ka.subnetworkID][bucket].PushBack(ka) -} - -// expireNew makes space in the new buckets by expiring the really bad entries. -// If no bad entries are available we look at a few and remove the oldest. -func (a *AddrManager) expireNew(bucket *newBucket, idx int, decrNewCounter func()) { - // First see if there are any entries that are so bad we can just throw - // them away. otherwise we throw away the oldest entry in the cache. - // We keep track of oldest in the initial traversal and use that - // information instead. - var oldest *KnownAddress - for k, v := range bucket[idx] { - if v.isBad() { - log.Tracef("expiring bad address %s", k) - delete(bucket[idx], k) - v.refs-- - if v.refs == 0 { - decrNewCounter() - delete(a.addrIndex, k) - } - continue - } - if oldest == nil { - oldest = v - } else if !v.na.Timestamp.After(oldest.na.Timestamp) { - oldest = v - } - } - - if oldest != nil { - key := NetAddressKey(oldest.na) - log.Tracef("expiring oldest address %s", key) - - delete(bucket[idx], key) - oldest.refs-- - if oldest.refs == 0 { - decrNewCounter() - delete(a.addrIndex, key) - } - } -} - -// expireNewBySubnetworkID makes space in the new buckets by expiring the really bad entries. -// If no bad entries are available we look at a few and remove the oldest. -func (a *AddrManager) expireNewBySubnetworkID(subnetworkID *subnetworkid.SubnetworkID, bucket int) { - a.expireNew(a.addrNew[*subnetworkID], bucket, func() { a.nNew[*subnetworkID]-- }) -} - -// expireNewFullNodes makes space in the new buckets by expiring the really bad entries. -// If no bad entries are available we look at a few and remove the oldest. -func (a *AddrManager) expireNewFullNodes(bucket int) { - a.expireNew(&a.addrNewFullNodes, bucket, func() { a.nNewFullNodes-- }) -} - -// pickTried selects an address from the tried bucket to be evicted. -// We just choose the eldest. -func (a *AddrManager) pickTried(subnetworkID *subnetworkid.SubnetworkID, bucket int) *list.Element { - var oldest *KnownAddress - var oldestElem *list.Element - var lst *list.List - if subnetworkID == nil { - lst = a.addrTriedFullNodes[bucket] - } else { - lst = a.addrTried[*subnetworkID][bucket] - } - for e := lst.Front(); e != nil; e = e.Next() { - ka := e.Value.(*KnownAddress) - if oldest == nil || oldest.na.Timestamp.After(ka.na.Timestamp) { - oldestElem = e - oldest = ka - } - - } - return oldestElem -} - -func (a *AddrManager) getNewBucket(netAddr, srcAddr *wire.NetAddress) int { - // doublesha256(key + sourcegroup + int64(doublesha256(key + group + sourcegroup))%bucket_per_source_group) % num_new_buckets - - data1 := []byte{} - data1 = append(data1, a.key[:]...) - data1 = append(data1, []byte(GroupKey(netAddr))...) - data1 = append(data1, []byte(GroupKey(srcAddr))...) - hash1 := daghash.DoubleHashB(data1) - hash64 := binary.LittleEndian.Uint64(hash1) - hash64 %= newBucketsPerGroup - var hashbuf [8]byte - binary.LittleEndian.PutUint64(hashbuf[:], hash64) - data2 := []byte{} - data2 = append(data2, a.key[:]...) - data2 = append(data2, GroupKey(srcAddr)...) - data2 = append(data2, hashbuf[:]...) - - hash2 := daghash.DoubleHashB(data2) - return int(binary.LittleEndian.Uint64(hash2) % newBucketCount) -} - -func (a *AddrManager) getTriedBucket(netAddr *wire.NetAddress) int { - // doublesha256(key + group + truncate_to_64bits(doublesha256(key)) % buckets_per_group) % num_buckets - data1 := []byte{} - data1 = append(data1, a.key[:]...) - data1 = append(data1, []byte(NetAddressKey(netAddr))...) - hash1 := daghash.DoubleHashB(data1) - hash64 := binary.LittleEndian.Uint64(hash1) - hash64 %= triedBucketsPerGroup - var hashbuf [8]byte - binary.LittleEndian.PutUint64(hashbuf[:], hash64) - data2 := []byte{} - data2 = append(data2, a.key[:]...) - data2 = append(data2, GroupKey(netAddr)...) - data2 = append(data2, hashbuf[:]...) - - hash2 := daghash.DoubleHashB(data2) - return int(binary.LittleEndian.Uint64(hash2) % triedBucketCount) -} - -// addressHandler is the main handler for the address manager. It must be run -// as a goroutine. -func (a *AddrManager) addressHandler() { - dumpAddressTicker := time.NewTicker(dumpAddressInterval) - defer dumpAddressTicker.Stop() -out: - for { - select { - case <-dumpAddressTicker.C: - a.savePeers() - - case <-a.quit: - break out - } - } - a.savePeers() - a.wg.Done() - log.Trace("Address handler done") -} - -// savePeers saves all the known addresses to a file so they can be read back -// in at next run. -func (a *AddrManager) savePeers() { - a.mtx.Lock() - defer a.mtx.Unlock() - - // First we make a serialisable datastructure so we can encode it to - // json. - sam := new(serializedAddrManager) - sam.Version = serialisationVersion - copy(sam.Key[:], a.key[:]) - - sam.Addresses = make([]*serializedKnownAddress, len(a.addrIndex)) - i := 0 - for k, v := range a.addrIndex { - ska := new(serializedKnownAddress) - ska.Addr = k - if v.subnetworkID == nil { - ska.SubnetworkID = "" - } else { - ska.SubnetworkID = v.subnetworkID.String() - } - ska.TimeStamp = v.na.Timestamp.Unix() - ska.Src = NetAddressKey(v.srcAddr) - ska.Attempts = v.attempts - ska.LastAttempt = v.lastattempt.Unix() - ska.LastSuccess = v.lastsuccess.Unix() - // Tried and refs are implicit in the rest of the structure - // and will be worked out from context on unserialisation. - sam.Addresses[i] = ska - i++ - } - - sam.NewBuckets = make(map[string]*serializedNewBucket) - for subnetworkID := range a.addrNew { - subnetworkIDStr := subnetworkID.String() - sam.NewBuckets[subnetworkIDStr] = &serializedNewBucket{} - - for i := range a.addrNew[subnetworkID] { - sam.NewBuckets[subnetworkIDStr][i] = make([]string, len(a.addrNew[subnetworkID][i])) - j := 0 - for k := range a.addrNew[subnetworkID][i] { - sam.NewBuckets[subnetworkIDStr][i][j] = k - j++ - } - } - } - - for i := range a.addrNewFullNodes { - sam.NewBucketFullNodes[i] = make([]string, len(a.addrNewFullNodes[i])) - j := 0 - for k := range a.addrNewFullNodes[i] { - sam.NewBucketFullNodes[i][j] = k - j++ - } - } - - sam.TriedBuckets = make(map[string]*serializedTriedBucket) - for subnetworkID := range a.addrTried { - subnetworkIDStr := subnetworkID.String() - sam.TriedBuckets[subnetworkIDStr] = &serializedTriedBucket{} - - for i := range a.addrTried[subnetworkID] { - sam.TriedBuckets[subnetworkIDStr][i] = make([]string, a.addrTried[subnetworkID][i].Len()) - j := 0 - for e := a.addrTried[subnetworkID][i].Front(); e != nil; e = e.Next() { - ka := e.Value.(*KnownAddress) - sam.TriedBuckets[subnetworkIDStr][i][j] = NetAddressKey(ka.na) - j++ - } - } - } - - for i := range a.addrTriedFullNodes { - sam.TriedBucketFullNodes[i] = make([]string, a.addrTriedFullNodes[i].Len()) - j := 0 - for e := a.addrTriedFullNodes[i].Front(); e != nil; e = e.Next() { - ka := e.Value.(*KnownAddress) - sam.TriedBucketFullNodes[i][j] = NetAddressKey(ka.na) - j++ - } - } - - w, err := os.Create(a.peersFile) - if err != nil { - log.Errorf("Error opening file %s: %s", a.peersFile, err) - return - } - enc := json.NewEncoder(w) - defer w.Close() - if err := enc.Encode(&sam); err != nil { - log.Errorf("Failed to encode file %s: %s", a.peersFile, err) - return - } -} - -// loadPeers loads the known address from the saved file. If empty, missing, or -// malformed file, just don't load anything and start fresh -func (a *AddrManager) loadPeers() { - a.mtx.Lock() - defer a.mtx.Unlock() - - err := a.deserializePeers(a.peersFile) - if err != nil { - log.Errorf("Failed to parse file %s: %s", a.peersFile, err) - // if it is invalid we nuke the old one unconditionally. - err = os.Remove(a.peersFile) - if err != nil { - log.Warnf("Failed to remove corrupt peers file %s: %s", - a.peersFile, err) - } - a.reset() - return - } - log.Infof("Loaded %d addresses from file '%s'", a.totalNumAddresses(), a.peersFile) -} - -func (a *AddrManager) deserializePeers(filePath string) error { - _, err := os.Stat(filePath) - if os.IsNotExist(err) { - return nil - } - r, err := os.Open(filePath) - if err != nil { - return errors.Errorf("%s error opening file: %s", filePath, err) - } - defer r.Close() - - var sam serializedAddrManager - dec := json.NewDecoder(r) - err = dec.Decode(&sam) - if err != nil { - return errors.Errorf("error reading %s: %s", filePath, err) - } - - if sam.Version != serialisationVersion { - return errors.Errorf("unknown version %d in serialized "+ - "addrmanager", sam.Version) - } - copy(a.key[:], sam.Key[:]) - - for _, v := range sam.Addresses { - ka := new(KnownAddress) - ka.na, err = a.DeserializeNetAddress(v.Addr) - if err != nil { - return errors.Errorf("failed to deserialize netaddress "+ - "%s: %s", v.Addr, err) - } - ka.srcAddr, err = a.DeserializeNetAddress(v.Src) - if err != nil { - return errors.Errorf("failed to deserialize netaddress "+ - "%s: %s", v.Src, err) - } - if v.SubnetworkID != "" { - ka.subnetworkID, err = subnetworkid.NewFromStr(v.SubnetworkID) - if err != nil { - return errors.Errorf("failed to deserialize subnetwork id "+ - "%s: %s", v.SubnetworkID, err) - } - } - ka.attempts = v.Attempts - ka.lastattempt = time.Unix(v.LastAttempt, 0) - ka.lastsuccess = time.Unix(v.LastSuccess, 0) - a.addrIndex[NetAddressKey(ka.na)] = ka - } - - for subnetworkIDStr := range sam.NewBuckets { - subnetworkID, err := subnetworkid.NewFromStr(subnetworkIDStr) - if err != nil { - return err - } - for i, subnetworkNewBucket := range sam.NewBuckets[subnetworkIDStr] { - for _, val := range subnetworkNewBucket { - ka, ok := a.addrIndex[val] - if !ok { - return errors.Errorf("newbucket contains %s but "+ - "none in address list", val) - } - - if ka.refs == 0 { - a.nNew[*subnetworkID]++ - } - ka.refs++ - a.updateAddrNew(i, val, ka) - } - } - } - - for i, newBucket := range sam.NewBucketFullNodes { - for _, val := range newBucket { - ka, ok := a.addrIndex[val] - if !ok { - return errors.Errorf("full nodes newbucket contains %s but "+ - "none in address list", val) - } - - if ka.refs == 0 { - a.nNewFullNodes++ - } - ka.refs++ - a.updateAddrNew(i, val, ka) - } - } - - for subnetworkIDStr := range sam.TriedBuckets { - subnetworkID, err := subnetworkid.NewFromStr(subnetworkIDStr) - if err != nil { - return err - } - for i, subnetworkTriedBucket := range sam.TriedBuckets[subnetworkIDStr] { - for _, val := range subnetworkTriedBucket { - ka, ok := a.addrIndex[val] - if !ok { - return errors.Errorf("Tried bucket contains %s but "+ - "none in address list", val) - } - - ka.tried = true - a.nTried[*subnetworkID]++ - a.addrTried[*subnetworkID][i].PushBack(ka) - } - } - } - - for i, triedBucket := range sam.TriedBucketFullNodes { - for _, val := range triedBucket { - ka, ok := a.addrIndex[val] - if !ok { - return errors.Errorf("Full nodes tried bucket contains %s but "+ - "none in address list", val) - } - - ka.tried = true - a.nTriedFullNodes++ - a.addrTriedFullNodes[i].PushBack(ka) - } - } - - // Sanity checking. - for k, v := range a.addrIndex { - if v.refs == 0 && !v.tried { - return errors.Errorf("address %s after serialisation "+ - "with no references", k) - } - - if v.refs > 0 && v.tried { - return errors.Errorf("address %s after serialisation "+ - "which is both new and tried!", k) - } - } - - return nil -} - -// DeserializeNetAddress converts a given address string to a *wire.NetAddress -func (a *AddrManager) DeserializeNetAddress(addr string) (*wire.NetAddress, error) { - host, portStr, err := net.SplitHostPort(addr) - if err != nil { - return nil, err - } - port, err := strconv.ParseUint(portStr, 10, 16) - if err != nil { - return nil, err - } - - return a.HostToNetAddress(host, uint16(port), wire.SFNodeNetwork) -} - -// Start begins the core address handler which manages a pool of known -// addresses, timeouts, and interval based writes. -func (a *AddrManager) Start() { - // Already started? - if atomic.AddInt32(&a.started, 1) != 1 { - return - } - - log.Trace("Starting address manager") - - // Load peers we already know about from file. - a.loadPeers() - - // Start the address ticker to save addresses periodically. - a.wg.Add(1) - spawn(a.addressHandler, a.handlePanic) -} - -func (a *AddrManager) handlePanic() { - atomic.AddInt32(&a.shutdown, 1) -} - -// Stop gracefully shuts down the address manager by stopping the main handler. -func (a *AddrManager) Stop() error { - if atomic.AddInt32(&a.shutdown, 1) != 1 { - log.Warnf("Address manager is already in the process of " + - "shutting down") - return nil - } - - log.Infof("Address manager shutting down") - close(a.quit) - a.wg.Wait() - return nil -} - -// AddAddresses adds new addresses to the address manager. It enforces a max -// number of addresses and silently ignores duplicate addresses. It is -// safe for concurrent access. -func (a *AddrManager) AddAddresses(addrs []*wire.NetAddress, srcAddr *wire.NetAddress, subnetworkID *subnetworkid.SubnetworkID) { - a.mtx.Lock() - defer a.mtx.Unlock() - - for _, na := range addrs { - a.updateAddress(na, srcAddr, subnetworkID) - } -} - -// AddAddress adds a new address to the address manager. It enforces a max -// number of addresses and silently ignores duplicate addresses. It is -// safe for concurrent access. -func (a *AddrManager) AddAddress(addr, srcAddr *wire.NetAddress, subnetworkID *subnetworkid.SubnetworkID) { - a.mtx.Lock() - defer a.mtx.Unlock() - - a.updateAddress(addr, srcAddr, subnetworkID) -} - -// AddAddressByIP adds an address where we are given an ip:port and not a -// wire.NetAddress. -func (a *AddrManager) AddAddressByIP(addrIP string, subnetworkID *subnetworkid.SubnetworkID) error { - // Split IP and port - addr, portStr, err := net.SplitHostPort(addrIP) - if err != nil { - return err - } - // Put it in wire.Netaddress - ip := net.ParseIP(addr) - if ip == nil { - return errors.Errorf("invalid ip address %s", addr) - } - port, err := strconv.ParseUint(portStr, 10, 0) - if err != nil { - return errors.Errorf("invalid port %s: %s", portStr, err) - } - na := wire.NewNetAddressIPPort(ip, uint16(port), 0) - a.AddAddress(na, na, subnetworkID) // XXX use correct src address - return nil -} - -// numAddresses returns the number of addresses that belongs to a specific subnetwork id -// which are known to the address manager. -func (a *AddrManager) numAddresses(subnetworkID *subnetworkid.SubnetworkID) int { - if subnetworkID == nil { - return a.nNewFullNodes + a.nTriedFullNodes - } - return a.nTried[*subnetworkID] + a.nNew[*subnetworkID] -} - -// totalNumAddresses returns the number of addresses known to the address manager. -func (a *AddrManager) totalNumAddresses() int { - total := a.nNewFullNodes + a.nTriedFullNodes - for _, numAddresses := range a.nTried { - total += numAddresses - } - for _, numAddresses := range a.nNew { - total += numAddresses - } - return total -} - -// TotalNumAddresses returns the number of addresses known to the address manager. -func (a *AddrManager) TotalNumAddresses() int { - a.mtx.Lock() - defer a.mtx.Unlock() - - return a.totalNumAddresses() -} - -// NeedMoreAddresses returns whether or not the address manager needs more -// addresses. -func (a *AddrManager) NeedMoreAddresses() bool { - a.mtx.Lock() - defer a.mtx.Unlock() - - allAddrs := a.numAddresses(a.localSubnetworkID) - if a.localSubnetworkID != nil { - allAddrs += a.numAddresses(nil) - } - return allAddrs < needAddressThreshold -} - -// AddressCache returns the current address cache. It must be treated as -// read-only (but since it is a copy now, this is not as dangerous). -func (a *AddrManager) AddressCache(includeAllSubnetworks bool, subnetworkID *subnetworkid.SubnetworkID) []*wire.NetAddress { - a.mtx.Lock() - defer a.mtx.Unlock() - - if len(a.addrIndex) == 0 { - return nil - } - - allAddr := []*wire.NetAddress{} - // Iteration order is undefined here, but we randomise it anyway. - for _, v := range a.addrIndex { - if includeAllSubnetworks || v.SubnetworkID().IsEqual(subnetworkID) { - allAddr = append(allAddr, v.na) - } - } - - numAddresses := len(allAddr) * getAddrPercent / 100 - if numAddresses > getAddrMax { - numAddresses = getAddrMax - } - if len(allAddr) < getAddrMin { - numAddresses = len(allAddr) - } - if len(allAddr) > getAddrMin && numAddresses < getAddrMin { - numAddresses = getAddrMin - } - - // Fisher-Yates shuffle the array. We only need to do the first - // `numAddresses' since we are throwing the rest. - for i := 0; i < numAddresses; i++ { - // pick a number between current index and the end - j := rand.Intn(len(allAddr)-i) + i - allAddr[i], allAddr[j] = allAddr[j], allAddr[i] - } - - // slice off the limit we are willing to share. - return allAddr[0:numAddresses] -} - -// reset resets the address manager by reinitialising the random source -// and allocating fresh empty bucket storage. -func (a *AddrManager) reset() { - - a.addrIndex = make(map[string]*KnownAddress) - - // fill key with bytes from a good random source. - io.ReadFull(crand.Reader, a.key[:]) - a.addrNew = make(map[subnetworkid.SubnetworkID]*newBucket) - a.addrTried = make(map[subnetworkid.SubnetworkID]*triedBucket) - - a.nNew = make(map[subnetworkid.SubnetworkID]int) - a.nTried = make(map[subnetworkid.SubnetworkID]int) - - for i := range a.addrNewFullNodes { - a.addrNewFullNodes[i] = make(map[string]*KnownAddress) - } - for i := range a.addrTriedFullNodes { - a.addrTriedFullNodes[i] = list.New() - } - a.nNewFullNodes = 0 - a.nTriedFullNodes = 0 - - a.addrTrying = make(map[*KnownAddress]bool) -} - -// HostToNetAddress returns a netaddress given a host address. If the address -// is a Tor .onion address this will be taken care of. Else if the host is -// not an IP address it will be resolved (via Tor if required). -func (a *AddrManager) HostToNetAddress(host string, port uint16, services wire.ServiceFlag) (*wire.NetAddress, error) { - // Tor address is 16 char base32 + ".onion" - var ip net.IP - if len(host) == 22 && host[16:] == ".onion" { - // go base32 encoding uses capitals (as does the rfc - // but Tor tend to user lowercase, so we switch - // case here. - data, err := base32.StdEncoding.DecodeString( - strings.ToUpper(host[:16])) - if err != nil { - return nil, err - } - prefix := []byte{0xfd, 0x87, 0xd8, 0x7e, 0xeb, 0x43} - ip = net.IP(append(prefix, data...)) - } else if ip = net.ParseIP(host); ip == nil { - ips, err := a.lookupFunc(host) - if err != nil { - return nil, err - } - if len(ips) == 0 { - return nil, errors.Errorf("no addresses found for %s", host) - } - ip = ips[0] - } - - return wire.NewNetAddressIPPort(ip, port, services), nil -} - -// ipString returns a string for the ip from the provided NetAddress. If the -// ip is in the range used for Tor addresses then it will be transformed into -// the relevant .onion address. -func ipString(na *wire.NetAddress) string { - if IsOnionCatTor(na) { - // We know now that na.IP is long enough. - base32 := base32.StdEncoding.EncodeToString(na.IP[6:]) - return strings.ToLower(base32) + ".onion" - } - - return na.IP.String() -} - -// NetAddressKey returns a string key in the form of ip:port for IPv4 addresses -// or [ip]:port for IPv6 addresses. -func NetAddressKey(na *wire.NetAddress) string { - port := strconv.FormatUint(uint64(na.Port), 10) - - return net.JoinHostPort(ipString(na), port) -} - -// GetAddress returns a single address that should be routable. It picks a -// random one from the possible addresses with preference given to ones that -// have not been used recently and should not pick 'close' addresses -// consecutively. -func (a *AddrManager) GetAddress() *KnownAddress { - // Protect concurrent access. - a.mtx.Lock() - defer a.mtx.Unlock() - - var knownAddress *KnownAddress - if a.localSubnetworkID == nil { - knownAddress = a.getAddress(&a.addrTriedFullNodes, a.nTriedFullNodes, - &a.addrNewFullNodes, a.nNewFullNodes) - } else { - subnetworkID := *a.localSubnetworkID - knownAddress = a.getAddress(a.addrTried[subnetworkID], a.nTried[subnetworkID], - a.addrNew[subnetworkID], a.nNew[subnetworkID]) - } - - if knownAddress != nil { - if a.addrTrying[knownAddress] { - return nil - } - a.addrTrying[knownAddress] = true - } - - return knownAddress - -} - -// see GetAddress for details -func (a *AddrManager) getAddress(addrTried *triedBucket, nTried int, addrNew *newBucket, nNew int) *KnownAddress { - // Use a 50% chance for choosing between tried and new table entries. - if nTried > 0 && (nNew == 0 || a.rand.Intn(2) == 0) { - // Tried entry. - large := 1 << 30 - factor := 1.0 - for { - // pick a random bucket. - bucket := a.rand.Intn(len(addrTried)) - if addrTried[bucket].Len() == 0 { - continue - } - - // Pick a random entry in the list - e := addrTried[bucket].Front() - for i := - a.rand.Int63n(int64(addrTried[bucket].Len())); i > 0; i-- { - e = e.Next() - } - ka := e.Value.(*KnownAddress) - randval := a.rand.Intn(large) - if float64(randval) < (factor * ka.chance() * float64(large)) { - log.Tracef("Selected %s from tried bucket", - NetAddressKey(ka.na)) - return ka - } - factor *= 1.2 - } - } else if nNew > 0 { - // new node. - // XXX use a closure/function to avoid repeating this. - large := 1 << 30 - factor := 1.0 - for { - // Pick a random bucket. - bucket := a.rand.Intn(len(addrNew)) - if len(addrNew[bucket]) == 0 { - continue - } - // Then, a random entry in it. - var ka *KnownAddress - nth := a.rand.Intn(len(addrNew[bucket])) - for _, value := range addrNew[bucket] { - if nth == 0 { - ka = value - } - nth-- - } - randval := a.rand.Intn(large) - if float64(randval) < (factor * ka.chance() * float64(large)) { - log.Tracef("Selected %s from new bucket", - NetAddressKey(ka.na)) - return ka - } - factor *= 1.2 - } - } - return nil -} - -func (a *AddrManager) find(addr *wire.NetAddress) *KnownAddress { - return a.addrIndex[NetAddressKey(addr)] -} - -// Attempt increases the given address' attempt counter and updates -// the last attempt time. -func (a *AddrManager) Attempt(addr *wire.NetAddress) { - a.mtx.Lock() - defer a.mtx.Unlock() - - // find address. - // Surely address will be in tried by now? - ka := a.find(addr) - if ka == nil { - return - } - // set last tried time to now - ka.attempts++ - ka.lastattempt = time.Now() - - delete(a.addrTrying, ka) -} - -// Connected Marks the given address as currently connected and working at the -// current time. The address must already be known to AddrManager else it will -// be ignored. -func (a *AddrManager) Connected(addr *wire.NetAddress) { - a.mtx.Lock() - defer a.mtx.Unlock() - - ka := a.find(addr) - if ka == nil { - return - } - - // Update the time as long as it has been 20 minutes since last we did - // so. - now := time.Now() - if now.After(ka.na.Timestamp.Add(time.Minute * 20)) { - // ka.na is immutable, so replace it. - naCopy := *ka.na - naCopy.Timestamp = time.Now() - ka.na = &naCopy - } -} - -// Good marks the given address as good. To be called after a successful -// connection and version exchange. If the address is unknown to the address -// manager it will be ignored. -func (a *AddrManager) Good(addr *wire.NetAddress, subnetworkID *subnetworkid.SubnetworkID) { - a.mtx.Lock() - defer a.mtx.Unlock() - - ka := a.find(addr) - if ka == nil { - return - } - oldSubnetworkID := ka.subnetworkID - - // ka.Timestamp is not updated here to avoid leaking information - // about currently connected peers. - now := time.Now() - ka.lastsuccess = now - ka.lastattempt = now - ka.attempts = 0 - ka.subnetworkID = subnetworkID - - addrKey := NetAddressKey(addr) - triedBucketIndex := a.getTriedBucket(ka.na) - - if ka.tried { - // If this address was already tried, and subnetworkID didn't change - don't do anything - if subnetworkID.IsEqual(oldSubnetworkID) { - return - } - - // If this address was already tried, but subnetworkID was changed - - // update subnetworkID, than continue as though this is a new address - bucketList := a.addrTried[*oldSubnetworkID][triedBucketIndex] - for e := bucketList.Front(); e != nil; e = e.Next() { - if NetAddressKey(e.Value.(*KnownAddress).NetAddress()) == addrKey { - bucketList.Remove(e) - break - } - } - } - - // Ok, need to move it to tried. - - // Remove from all new buckets. - // Record one of the buckets in question and call it the `first' - oldBucket := -1 - if !ka.tried { - if oldSubnetworkID == nil { - for i := range a.addrNewFullNodes { - // we check for existence so we can record the first one - if _, ok := a.addrNewFullNodes[i][addrKey]; ok { - delete(a.addrNewFullNodes[i], addrKey) - ka.refs-- - if oldBucket == -1 { - oldBucket = i - } - } - } - a.nNewFullNodes-- - } else { - for i := range a.addrNew[*oldSubnetworkID] { - // we check for existence so we can record the first one - if _, ok := a.addrNew[*oldSubnetworkID][i][addrKey]; ok { - delete(a.addrNew[*oldSubnetworkID][i], addrKey) - ka.refs-- - if oldBucket == -1 { - oldBucket = i - } - } - } - a.nNew[*oldSubnetworkID]-- - } - - if oldBucket == -1 { - // What? wasn't in a bucket after all.... Panic? - return - } - } - - // Room in this tried bucket? - if ka.subnetworkID == nil { - if a.nTriedFullNodes == 0 || a.addrTriedFullNodes[triedBucketIndex].Len() < triedBucketSize { - ka.tried = true - a.updateAddrTried(triedBucketIndex, ka) - a.nTriedFullNodes++ - return - } - } else if a.nTried[*ka.subnetworkID] == 0 || a.addrTried[*ka.subnetworkID][triedBucketIndex].Len() < triedBucketSize { - ka.tried = true - a.updateAddrTried(triedBucketIndex, ka) - a.nTried[*ka.subnetworkID]++ - return - } - - // No room, we have to evict something else. - entry := a.pickTried(ka.subnetworkID, triedBucketIndex) - rmka := entry.Value.(*KnownAddress) - - // First bucket it would have been put in. - newBucket := a.getNewBucket(rmka.na, rmka.srcAddr) - - // If no room in the original bucket, we put it in a bucket we just - // freed up a space in. - if ka.subnetworkID == nil { - if len(a.addrNewFullNodes[newBucket]) >= newBucketSize { - if oldBucket == -1 { - // If addr was a tried bucket with updated subnetworkID - oldBucket will be equal to -1. - // In that case - find some non-full bucket. - // If no such bucket exists - throw rmka away - for newBucket := range a.addrNewFullNodes { - if len(a.addrNewFullNodes[newBucket]) < newBucketSize { - break - } - } - } else { - newBucket = oldBucket - } - } - } else if len(a.addrNew[*ka.subnetworkID][newBucket]) >= newBucketSize { - if len(a.addrNew[*ka.subnetworkID][newBucket]) >= newBucketSize { - if oldBucket == -1 { - // If addr was a tried bucket with updated subnetworkID - oldBucket will be equal to -1. - // In that case - find some non-full bucket. - // If no such bucket exists - throw rmka away - for newBucket := range a.addrNew[*ka.subnetworkID] { - if len(a.addrNew[*ka.subnetworkID][newBucket]) < newBucketSize { - break - } - } - } else { - newBucket = oldBucket - } - } - } - - // Replace with ka in list. - ka.tried = true - entry.Value = ka - - rmka.tried = false - rmka.refs++ - - // We don't touch a.nTried here since the number of tried stays the same - // but we decemented new above, raise it again since we're putting - // something back. - if ka.subnetworkID == nil { - a.nNewFullNodes++ - } else { - a.nNew[*ka.subnetworkID]++ - } - - rmkey := NetAddressKey(rmka.na) - log.Tracef("Replacing %s with %s in tried", rmkey, addrKey) - - // We made sure there is space here just above. - if ka.subnetworkID == nil { - a.addrNewFullNodes[newBucket][rmkey] = rmka - } else { - a.addrNew[*ka.subnetworkID][newBucket][rmkey] = rmka - } -} - -// AddLocalAddress adds na to the list of known local addresses to advertise -// with the given priority. -func (a *AddrManager) AddLocalAddress(na *wire.NetAddress, priority AddressPriority) error { - if !IsRoutable(na) { - return errors.Errorf("address %s is not routable", na.IP) - } - - a.lamtx.Lock() - defer a.lamtx.Unlock() - - key := NetAddressKey(na) - la, ok := a.localAddresses[key] - if !ok || la.score < priority { - if ok { - la.score = priority + 1 - } else { - a.localAddresses[key] = &localAddress{ - na: na, - score: priority, - } - } - } - return nil -} - -// getReachabilityFrom returns the relative reachability of the provided local -// address to the provided remote address. -func getReachabilityFrom(localAddr, remoteAddr *wire.NetAddress) int { - const ( - Unreachable = 0 - Default = iota - Teredo - Ipv6Weak - Ipv4 - Ipv6Strong - Private - ) - - if !IsRoutable(remoteAddr) { - return Unreachable - } - - if IsOnionCatTor(remoteAddr) { - if IsOnionCatTor(localAddr) { - return Private - } - - if IsRoutable(localAddr) && IsIPv4(localAddr) { - return Ipv4 - } - - return Default - } - - if IsRFC4380(remoteAddr) { - if !IsRoutable(localAddr) { - return Default - } - - if IsRFC4380(localAddr) { - return Teredo - } - - if IsIPv4(localAddr) { - return Ipv4 - } - - return Ipv6Weak - } - - if IsIPv4(remoteAddr) { - if IsRoutable(localAddr) && IsIPv4(localAddr) { - return Ipv4 - } - return Unreachable - } - - /* ipv6 */ - var tunnelled bool - // Is our v6 is tunnelled? - if IsRFC3964(localAddr) || IsRFC6052(localAddr) || IsRFC6145(localAddr) { - tunnelled = true - } - - if !IsRoutable(localAddr) { - return Default - } - - if IsRFC4380(localAddr) { - return Teredo - } - - if IsIPv4(localAddr) { - return Ipv4 - } - - if tunnelled { - // only prioritise ipv6 if we aren't tunnelling it. - return Ipv6Weak - } - - return Ipv6Strong -} - -// GetBestLocalAddress returns the most appropriate local address to use -// for the given remote address. -func (a *AddrManager) GetBestLocalAddress(remoteAddr *wire.NetAddress) *wire.NetAddress { - a.lamtx.Lock() - defer a.lamtx.Unlock() - - bestreach := 0 - var bestscore AddressPriority - var bestAddress *wire.NetAddress - for _, la := range a.localAddresses { - reach := getReachabilityFrom(la.na, remoteAddr) - if reach > bestreach || - (reach == bestreach && la.score > bestscore) { - bestreach = reach - bestscore = la.score - bestAddress = la.na - } - } - if bestAddress != nil { - log.Debugf("Suggesting address %s:%d for %s:%d", bestAddress.IP, - bestAddress.Port, remoteAddr.IP, remoteAddr.Port) - } else { - log.Debugf("No worthy address for %s:%d", remoteAddr.IP, - remoteAddr.Port) - - // Send something unroutable if nothing suitable. - var ip net.IP - if !IsIPv4(remoteAddr) && !IsOnionCatTor(remoteAddr) { - ip = net.IPv6zero - } else { - ip = net.IPv4zero - } - services := wire.SFNodeNetwork | wire.SFNodeBloom - bestAddress = wire.NewNetAddressIPPort(ip, 0, services) - } - - return bestAddress -} - -// New returns a new Kaspa address manager. -// Use Start to begin processing asynchronous address updates. -func New(dataDir string, lookupFunc func(string) ([]net.IP, error), subnetworkID *subnetworkid.SubnetworkID) *AddrManager { - am := AddrManager{ - peersFile: filepath.Join(dataDir, "peers.json"), - lookupFunc: lookupFunc, - rand: rand.New(rand.NewSource(time.Now().UnixNano())), - quit: make(chan struct{}), - localAddresses: make(map[string]*localAddress), - localSubnetworkID: subnetworkID, - } - am.reset() - return &am -} diff --git a/addrmgr/addrmanager_test.go b/addrmgr/addrmanager_test.go deleted file mode 100644 index 1747bd46..00000000 --- a/addrmgr/addrmanager_test.go +++ /dev/null @@ -1,685 +0,0 @@ -// Copyright (c) 2013-2014 The btcsuite developers -// Use of this source code is governed by an ISC -// license that can be found in the LICENSE file. - -package addrmgr - -import ( - "bou.ke/monkey" - "fmt" - "github.com/kaspanet/kaspad/config" - "github.com/kaspanet/kaspad/dagconfig" - "github.com/pkg/errors" - "net" - "reflect" - "testing" - "time" - - "github.com/kaspanet/kaspad/util/subnetworkid" - - "github.com/kaspanet/kaspad/wire" -) - -// naTest is used to describe a test to be performed against the NetAddressKey -// method. -type naTest struct { - in wire.NetAddress - want string -} - -// naTests houses all of the tests to be performed against the NetAddressKey -// method. -var naTests = make([]naTest, 0) - -// Put some IP in here for convenience. Points to google. -var someIP = "173.194.115.66" - -// addNaTests -func addNaTests() { - // IPv4 - // Localhost - addNaTest("127.0.0.1", 8333, "127.0.0.1:8333") - addNaTest("127.0.0.1", 8334, "127.0.0.1:8334") - - // Class A - addNaTest("1.0.0.1", 8333, "1.0.0.1:8333") - addNaTest("2.2.2.2", 8334, "2.2.2.2:8334") - addNaTest("27.253.252.251", 8335, "27.253.252.251:8335") - addNaTest("123.3.2.1", 8336, "123.3.2.1:8336") - - // Private Class A - addNaTest("10.0.0.1", 8333, "10.0.0.1:8333") - addNaTest("10.1.1.1", 8334, "10.1.1.1:8334") - addNaTest("10.2.2.2", 8335, "10.2.2.2:8335") - addNaTest("10.10.10.10", 8336, "10.10.10.10:8336") - - // Class B - addNaTest("128.0.0.1", 8333, "128.0.0.1:8333") - addNaTest("129.1.1.1", 8334, "129.1.1.1:8334") - addNaTest("180.2.2.2", 8335, "180.2.2.2:8335") - addNaTest("191.10.10.10", 8336, "191.10.10.10:8336") - - // Private Class B - addNaTest("172.16.0.1", 8333, "172.16.0.1:8333") - addNaTest("172.16.1.1", 8334, "172.16.1.1:8334") - addNaTest("172.16.2.2", 8335, "172.16.2.2:8335") - addNaTest("172.16.172.172", 8336, "172.16.172.172:8336") - - // Class C - addNaTest("193.0.0.1", 8333, "193.0.0.1:8333") - addNaTest("200.1.1.1", 8334, "200.1.1.1:8334") - addNaTest("205.2.2.2", 8335, "205.2.2.2:8335") - addNaTest("223.10.10.10", 8336, "223.10.10.10:8336") - - // Private Class C - addNaTest("192.168.0.1", 8333, "192.168.0.1:8333") - addNaTest("192.168.1.1", 8334, "192.168.1.1:8334") - addNaTest("192.168.2.2", 8335, "192.168.2.2:8335") - addNaTest("192.168.192.192", 8336, "192.168.192.192:8336") - - // IPv6 - // Localhost - addNaTest("::1", 8333, "[::1]:8333") - addNaTest("fe80::1", 8334, "[fe80::1]:8334") - - // Link-local - addNaTest("fe80::1:1", 8333, "[fe80::1:1]:8333") - addNaTest("fe91::2:2", 8334, "[fe91::2:2]:8334") - addNaTest("fea2::3:3", 8335, "[fea2::3:3]:8335") - addNaTest("feb3::4:4", 8336, "[feb3::4:4]:8336") - - // Site-local - addNaTest("fec0::1:1", 8333, "[fec0::1:1]:8333") - addNaTest("fed1::2:2", 8334, "[fed1::2:2]:8334") - addNaTest("fee2::3:3", 8335, "[fee2::3:3]:8335") - addNaTest("fef3::4:4", 8336, "[fef3::4:4]:8336") -} - -func addNaTest(ip string, port uint16, want string) { - nip := net.ParseIP(ip) - na := *wire.NewNetAddressIPPort(nip, port, wire.SFNodeNetwork) - test := naTest{na, want} - naTests = append(naTests, test) -} - -func lookupFunc(host string) ([]net.IP, error) { - return nil, errors.New("not implemented") -} - -func TestStartStop(t *testing.T) { - n := New("teststartstop", lookupFunc, nil) - n.Start() - err := n.Stop() - if err != nil { - t.Fatalf("Address Manager failed to stop: %v", err) - } -} - -func TestAddAddressByIP(t *testing.T) { - activeConfigPatch := monkey.Patch(config.ActiveConfig, func() *config.Config { - return &config.Config{ - Flags: &config.Flags{ - NetworkFlags: config.NetworkFlags{ - ActiveNetParams: &dagconfig.SimNetParams}, - }, - } - }) - defer activeConfigPatch.Unpatch() - - fmtErr := errors.Errorf("") - addrErr := &net.AddrError{} - var tests = []struct { - addrIP string - err error - }{ - { - someIP + ":8333", - nil, - }, - { - someIP, - addrErr, - }, - { - someIP[:12] + ":8333", - fmtErr, - }, - { - someIP + ":abcd", - fmtErr, - }, - } - - amgr := New("testaddressbyip", nil, nil) - for i, test := range tests { - err := amgr.AddAddressByIP(test.addrIP, nil) - if test.err != nil && err == nil { - t.Errorf("TestAddAddressByIP test %d failed expected an error and got none", i) - continue - } - if test.err == nil && err != nil { - t.Errorf("TestAddAddressByIP test %d failed expected no error and got one", i) - continue - } - if reflect.TypeOf(err) != reflect.TypeOf(test.err) { - t.Errorf("TestAddAddressByIP test %d failed got %v, want %v", i, - reflect.TypeOf(err), reflect.TypeOf(test.err)) - continue - } - } -} - -func TestAddLocalAddress(t *testing.T) { - activeConfigPatch := monkey.Patch(config.ActiveConfig, func() *config.Config { - return &config.Config{ - Flags: &config.Flags{ - NetworkFlags: config.NetworkFlags{ - ActiveNetParams: &dagconfig.SimNetParams}, - }, - } - }) - defer activeConfigPatch.Unpatch() - - var tests = []struct { - address wire.NetAddress - priority AddressPriority - valid bool - }{ - { - wire.NetAddress{IP: net.ParseIP("192.168.0.100")}, - InterfacePrio, - false, - }, - { - wire.NetAddress{IP: net.ParseIP("204.124.1.1")}, - InterfacePrio, - true, - }, - { - wire.NetAddress{IP: net.ParseIP("204.124.1.1")}, - BoundPrio, - true, - }, - { - wire.NetAddress{IP: net.ParseIP("::1")}, - InterfacePrio, - false, - }, - { - wire.NetAddress{IP: net.ParseIP("fe80::1")}, - InterfacePrio, - false, - }, - { - wire.NetAddress{IP: net.ParseIP("2620:100::1")}, - InterfacePrio, - true, - }, - } - amgr := New("testaddlocaladdress", nil, nil) - for x, test := range tests { - result := amgr.AddLocalAddress(&test.address, test.priority) - if result == nil && !test.valid { - t.Errorf("TestAddLocalAddress test #%d failed: %s should have "+ - "been accepted", x, test.address.IP) - continue - } - if result != nil && test.valid { - t.Errorf("TestAddLocalAddress test #%d failed: %s should not have "+ - "been accepted", x, test.address.IP) - continue - } - } -} - -func TestAttempt(t *testing.T) { - activeConfigPatch := monkey.Patch(config.ActiveConfig, func() *config.Config { - return &config.Config{ - Flags: &config.Flags{ - NetworkFlags: config.NetworkFlags{ - ActiveNetParams: &dagconfig.SimNetParams}, - }, - } - }) - defer activeConfigPatch.Unpatch() - - n := New("testattempt", lookupFunc, nil) - - // Add a new address and get it - err := n.AddAddressByIP(someIP+":8333", nil) - if err != nil { - t.Fatalf("Adding address failed: %v", err) - } - ka := n.GetAddress() - - if !ka.LastAttempt().IsZero() { - t.Errorf("Address should not have attempts, but does") - } - - na := ka.NetAddress() - n.Attempt(na) - - if ka.LastAttempt().IsZero() { - t.Errorf("Address should have an attempt, but does not") - } -} - -func TestConnected(t *testing.T) { - activeConfigPatch := monkey.Patch(config.ActiveConfig, func() *config.Config { - return &config.Config{ - Flags: &config.Flags{ - NetworkFlags: config.NetworkFlags{ - ActiveNetParams: &dagconfig.SimNetParams}, - }, - } - }) - defer activeConfigPatch.Unpatch() - - n := New("testconnected", lookupFunc, nil) - - // Add a new address and get it - err := n.AddAddressByIP(someIP+":8333", nil) - if err != nil { - t.Fatalf("Adding address failed: %v", err) - } - ka := n.GetAddress() - na := ka.NetAddress() - // make it an hour ago - na.Timestamp = time.Unix(time.Now().Add(time.Hour*-1).Unix(), 0) - - n.Connected(na) - - if !ka.NetAddress().Timestamp.After(na.Timestamp) { - t.Errorf("Address should have a new timestamp, but does not") - } -} - -func TestNeedMoreAddresses(t *testing.T) { - activeConfigPatch := monkey.Patch(config.ActiveConfig, func() *config.Config { - return &config.Config{ - Flags: &config.Flags{ - NetworkFlags: config.NetworkFlags{ - ActiveNetParams: &dagconfig.SimNetParams}, - }, - } - }) - defer activeConfigPatch.Unpatch() - - n := New("testneedmoreaddresses", lookupFunc, nil) - addrsToAdd := 1500 - b := n.NeedMoreAddresses() - if !b { - t.Errorf("Expected that we need more addresses") - } - addrs := make([]*wire.NetAddress, addrsToAdd) - - var err error - for i := 0; i < addrsToAdd; i++ { - s := fmt.Sprintf("%d.%d.173.147:8333", i/128+60, i%128+60) - addrs[i], err = n.DeserializeNetAddress(s) - if err != nil { - t.Errorf("Failed to turn %s into an address: %v", s, err) - } - } - - srcAddr := wire.NewNetAddressIPPort(net.IPv4(173, 144, 173, 111), 8333, 0) - - n.AddAddresses(addrs, srcAddr, nil) - numAddrs := n.TotalNumAddresses() - if numAddrs > addrsToAdd { - t.Errorf("Number of addresses is too many %d vs %d", numAddrs, addrsToAdd) - } - - b = n.NeedMoreAddresses() - if b { - t.Errorf("Expected that we don't need more addresses") - } -} - -func TestGood(t *testing.T) { - activeConfigPatch := monkey.Patch(config.ActiveConfig, func() *config.Config { - return &config.Config{ - Flags: &config.Flags{ - NetworkFlags: config.NetworkFlags{ - ActiveNetParams: &dagconfig.SimNetParams}, - }, - } - }) - defer activeConfigPatch.Unpatch() - - n := New("testgood", lookupFunc, nil) - addrsToAdd := 64 * 64 - addrs := make([]*wire.NetAddress, addrsToAdd) - subnetworkCount := 32 - subnetworkIDs := make([]*subnetworkid.SubnetworkID, subnetworkCount) - - var err error - for i := 0; i < addrsToAdd; i++ { - s := fmt.Sprintf("%d.173.147.%d:8333", i/64+60, i%64+60) - addrs[i], err = n.DeserializeNetAddress(s) - if err != nil { - t.Errorf("Failed to turn %s into an address: %v", s, err) - } - } - - for i := 0; i < subnetworkCount; i++ { - subnetworkIDs[i] = &subnetworkid.SubnetworkID{0xff - byte(i)} - } - - srcAddr := wire.NewNetAddressIPPort(net.IPv4(173, 144, 173, 111), 8333, 0) - - n.AddAddresses(addrs, srcAddr, nil) - for i, addr := range addrs { - n.Good(addr, subnetworkIDs[i%subnetworkCount]) - } - - numAddrs := n.TotalNumAddresses() - if numAddrs >= addrsToAdd { - t.Errorf("Number of addresses is too many: %d vs %d", numAddrs, addrsToAdd) - } - - numCache := len(n.AddressCache(true, nil)) - if numCache == 0 || numCache >= numAddrs/4 { - t.Errorf("Number of addresses in cache: got %d, want positive and less than %d", - numCache, numAddrs/4) - } - - for i := 0; i < subnetworkCount; i++ { - numCache = len(n.AddressCache(false, subnetworkIDs[i])) - if numCache == 0 || numCache >= numAddrs/subnetworkCount { - t.Errorf("Number of addresses in subnetwork cache: got %d, want positive and less than %d", - numCache, numAddrs/4/subnetworkCount) - } - } -} - -func TestGoodChangeSubnetworkID(t *testing.T) { - activeConfigPatch := monkey.Patch(config.ActiveConfig, func() *config.Config { - return &config.Config{ - Flags: &config.Flags{ - NetworkFlags: config.NetworkFlags{ - ActiveNetParams: &dagconfig.SimNetParams}, - }, - } - }) - defer activeConfigPatch.Unpatch() - - n := New("test_good_change_subnetwork_id", lookupFunc, nil) - addr := wire.NewNetAddressIPPort(net.IPv4(173, 144, 173, 111), 8333, 0) - addrKey := NetAddressKey(addr) - srcAddr := wire.NewNetAddressIPPort(net.IPv4(173, 144, 173, 111), 8333, 0) - - oldSubnetwork := subnetworkid.SubnetworkIDNative - n.AddAddress(addr, srcAddr, oldSubnetwork) - n.Good(addr, oldSubnetwork) - - // make sure address was saved to addrIndex under oldSubnetwork - ka := n.find(addr) - if ka == nil { - t.Fatalf("Address was not found after first time .Good called") - } - if !ka.SubnetworkID().IsEqual(oldSubnetwork) { - t.Fatalf("Address index did not point to oldSubnetwork") - } - - // make sure address was added to correct bucket under oldSubnetwork - bucket := n.addrTried[*oldSubnetwork][n.getTriedBucket(addr)] - wasFound := false - for e := bucket.Front(); e != nil; e = e.Next() { - if NetAddressKey(e.Value.(*KnownAddress).NetAddress()) == addrKey { - wasFound = true - } - } - if !wasFound { - t.Fatalf("Address was not found in the correct bucket in oldSubnetwork") - } - - // now call .Good again with a different subnetwork - newSubnetwork := subnetworkid.SubnetworkIDRegistry - n.Good(addr, newSubnetwork) - - // make sure address was updated in addrIndex under newSubnetwork - ka = n.find(addr) - if ka == nil { - t.Fatalf("Address was not found after second time .Good called") - } - if !ka.SubnetworkID().IsEqual(newSubnetwork) { - t.Fatalf("Address index did not point to newSubnetwork") - } - - // make sure address was removed from bucket under oldSubnetwork - bucket = n.addrTried[*oldSubnetwork][n.getTriedBucket(addr)] - wasFound = false - for e := bucket.Front(); e != nil; e = e.Next() { - if NetAddressKey(e.Value.(*KnownAddress).NetAddress()) == addrKey { - wasFound = true - } - } - if wasFound { - t.Fatalf("Address was not removed from bucket in oldSubnetwork") - } - - // make sure address was added to correct bucket under newSubnetwork - bucket = n.addrTried[*newSubnetwork][n.getTriedBucket(addr)] - wasFound = false - for e := bucket.Front(); e != nil; e = e.Next() { - if NetAddressKey(e.Value.(*KnownAddress).NetAddress()) == addrKey { - wasFound = true - } - } - if !wasFound { - t.Fatalf("Address was not found in the correct bucket in newSubnetwork") - } -} - -func TestGetAddress(t *testing.T) { - activeConfigPatch := monkey.Patch(config.ActiveConfig, func() *config.Config { - return &config.Config{ - Flags: &config.Flags{ - NetworkFlags: config.NetworkFlags{ - ActiveNetParams: &dagconfig.SimNetParams}, - }, - } - }) - defer activeConfigPatch.Unpatch() - - localSubnetworkID := &subnetworkid.SubnetworkID{0xff} - n := New("testgetaddress", lookupFunc, localSubnetworkID) - - // Get an address from an empty set (should error) - if rv := n.GetAddress(); rv != nil { - t.Errorf("GetAddress failed: got: %v want: %v\n", rv, nil) - } - - // Add a new address and get it - err := n.AddAddressByIP(someIP+":8332", localSubnetworkID) - if err != nil { - t.Fatalf("Adding address failed: %v", err) - } - ka := n.GetAddress() - if ka == nil { - t.Fatalf("Did not get an address where there is one in the pool") - } - n.Attempt(ka.NetAddress()) - - // Checks that we don't get it if we find that it has other subnetwork ID than expected. - actualSubnetworkID := &subnetworkid.SubnetworkID{0xfe} - n.Good(ka.NetAddress(), actualSubnetworkID) - ka = n.GetAddress() - if ka != nil { - t.Errorf("Didn't expect to get an address because there shouldn't be any address from subnetwork ID %s or nil", localSubnetworkID) - } - - // Checks that the total number of addresses incremented although the new address is not full node or a partial node of the same subnetwork as the local node. - numAddrs := n.TotalNumAddresses() - if numAddrs != 1 { - t.Errorf("Wrong number of addresses: got %d, want %d", numAddrs, 1) - } - - // Now we repeat the same process, but now the address has the expected subnetwork ID. - - // Add a new address and get it - err = n.AddAddressByIP(someIP+":8333", localSubnetworkID) - if err != nil { - t.Fatalf("Adding address failed: %v", err) - } - ka = n.GetAddress() - if ka == nil { - t.Fatalf("Did not get an address where there is one in the pool") - } - if ka.NetAddress().IP.String() != someIP { - t.Errorf("Wrong IP: got %v, want %v", ka.NetAddress().IP.String(), someIP) - } - if !ka.SubnetworkID().IsEqual(localSubnetworkID) { - t.Errorf("Wrong Subnetwork ID: got %v, want %v", *ka.SubnetworkID(), localSubnetworkID) - } - n.Attempt(ka.NetAddress()) - - // Mark this as a good address and get it - n.Good(ka.NetAddress(), localSubnetworkID) - ka = n.GetAddress() - if ka == nil { - t.Fatalf("Did not get an address where there is one in the pool") - } - if ka.NetAddress().IP.String() != someIP { - t.Errorf("Wrong IP: got %v, want %v", ka.NetAddress().IP.String(), someIP) - } - if *ka.SubnetworkID() != *localSubnetworkID { - t.Errorf("Wrong Subnetwork ID: got %v, want %v", ka.SubnetworkID(), localSubnetworkID) - } - - numAddrs = n.TotalNumAddresses() - if numAddrs != 2 { - t.Errorf("Wrong number of addresses: got %d, want %d", numAddrs, 1) - } -} - -func TestGetBestLocalAddress(t *testing.T) { - activeConfigPatch := monkey.Patch(config.ActiveConfig, func() *config.Config { - return &config.Config{ - Flags: &config.Flags{ - NetworkFlags: config.NetworkFlags{ - ActiveNetParams: &dagconfig.SimNetParams}, - }, - } - }) - defer activeConfigPatch.Unpatch() - - localAddrs := []wire.NetAddress{ - {IP: net.ParseIP("192.168.0.100")}, - {IP: net.ParseIP("::1")}, - {IP: net.ParseIP("fe80::1")}, - {IP: net.ParseIP("2001:470::1")}, - } - - var tests = []struct { - remoteAddr wire.NetAddress - want0 wire.NetAddress - want1 wire.NetAddress - want2 wire.NetAddress - want3 wire.NetAddress - }{ - { - // Remote connection from public IPv4 - wire.NetAddress{IP: net.ParseIP("204.124.8.1")}, - wire.NetAddress{IP: net.IPv4zero}, - wire.NetAddress{IP: net.IPv4zero}, - wire.NetAddress{IP: net.ParseIP("204.124.8.100")}, - wire.NetAddress{IP: net.ParseIP("fd87:d87e:eb43:25::1")}, - }, - { - // Remote connection from private IPv4 - wire.NetAddress{IP: net.ParseIP("172.16.0.254")}, - wire.NetAddress{IP: net.IPv4zero}, - wire.NetAddress{IP: net.IPv4zero}, - wire.NetAddress{IP: net.IPv4zero}, - wire.NetAddress{IP: net.IPv4zero}, - }, - { - // Remote connection from public IPv6 - wire.NetAddress{IP: net.ParseIP("2602:100:abcd::102")}, - wire.NetAddress{IP: net.IPv6zero}, - wire.NetAddress{IP: net.ParseIP("2001:470::1")}, - wire.NetAddress{IP: net.ParseIP("2001:470::1")}, - wire.NetAddress{IP: net.ParseIP("2001:470::1")}, - }, - /* XXX - { - // Remote connection from Tor - wire.NetAddress{IP: net.ParseIP("fd87:d87e:eb43::100")}, - wire.NetAddress{IP: net.IPv4zero}, - wire.NetAddress{IP: net.ParseIP("204.124.8.100")}, - wire.NetAddress{IP: net.ParseIP("fd87:d87e:eb43:25::1")}, - }, - */ - } - - amgr := New("testgetbestlocaladdress", nil, nil) - - // Test against default when there's no address - for x, test := range tests { - got := amgr.GetBestLocalAddress(&test.remoteAddr) - if !test.want0.IP.Equal(got.IP) { - t.Errorf("TestGetBestLocalAddress test1 #%d failed for remote address %s: want %s got %s", - x, test.remoteAddr.IP, test.want1.IP, got.IP) - continue - } - } - - for _, localAddr := range localAddrs { - amgr.AddLocalAddress(&localAddr, InterfacePrio) - } - - // Test against want1 - for x, test := range tests { - got := amgr.GetBestLocalAddress(&test.remoteAddr) - if !test.want1.IP.Equal(got.IP) { - t.Errorf("TestGetBestLocalAddress test1 #%d failed for remote address %s: want %s got %s", - x, test.remoteAddr.IP, test.want1.IP, got.IP) - continue - } - } - - // Add a public IP to the list of local addresses. - localAddr := wire.NetAddress{IP: net.ParseIP("204.124.8.100")} - amgr.AddLocalAddress(&localAddr, InterfacePrio) - - // Test against want2 - for x, test := range tests { - got := amgr.GetBestLocalAddress(&test.remoteAddr) - if !test.want2.IP.Equal(got.IP) { - t.Errorf("TestGetBestLocalAddress test2 #%d failed for remote address %s: want %s got %s", - x, test.remoteAddr.IP, test.want2.IP, got.IP) - continue - } - } - /* - // Add a Tor generated IP address - localAddr = wire.NetAddress{IP: net.ParseIP("fd87:d87e:eb43:25::1")} - amgr.AddLocalAddress(&localAddr, ManualPrio) - - // Test against want3 - for x, test := range tests { - got := amgr.GetBestLocalAddress(&test.remoteAddr) - if !test.want3.IP.Equal(got.IP) { - t.Errorf("TestGetBestLocalAddress test3 #%d failed for remote address %s: want %s got %s", - x, test.remoteAddr.IP, test.want3.IP, got.IP) - continue - } - } - */ -} - -func TestNetAddressKey(t *testing.T) { - addNaTests() - - t.Logf("Running %d tests", len(naTests)) - for i, test := range naTests { - key := NetAddressKey(&test.in) - if key != test.want { - t.Errorf("NetAddressKey #%d\n got: %s want: %s", i, key, test.want) - continue - } - } - -} diff --git a/addrmgr/cov_report.sh b/addrmgr/cov_report.sh deleted file mode 100644 index 307f05b7..00000000 --- a/addrmgr/cov_report.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/sh - -# This script uses gocov to generate a test coverage report. -# The gocov tool my be obtained with the following command: -# go get github.com/axw/gocov/gocov -# -# It will be installed to $GOPATH/bin, so ensure that location is in your $PATH. - -# Check for gocov. -type gocov >/dev/null 2>&1 -if [ $? -ne 0 ]; then - echo >&2 "This script requires the gocov tool." - echo >&2 "You may obtain it with the following command:" - echo >&2 "go get github.com/axw/gocov/gocov" - exit 1 -fi -gocov test | gocov report diff --git a/addrmgr/doc.go b/addrmgr/doc.go deleted file mode 100644 index 86a0ad92..00000000 --- a/addrmgr/doc.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (c) 2014 The btcsuite developers -// Use of this source code is governed by an ISC -// license that can be found in the LICENSE file. - -/* -Package addrmgr implements concurrency safe Kaspa address manager. - -Address Manager Overview - -In order maintain the peer-to-peer Kaspa network, there needs to be a source -of addresses to connect to as nodes come and go. The Kaspa protocol provides -the getaddr and addr messages to allow peers to communicate known addresses with -each other. However, there needs to a mechanism to store those results and -select peers from them. It is also important to note that remote peers can't -be trusted to send valid peers nor attempt to provide you with only peers they -control with malicious intent. - -With that in mind, this package provides a concurrency safe address manager for -caching and selecting peers in a non-deterministic manner. The general idea is -the caller adds addresses to the address manager and notifies it when addresses -are connected, known good, and attempted. The caller also requests addresses as -it needs them. - -The address manager internally segregates the addresses into groups and -non-deterministically selects groups in a cryptographically random manner. This -reduce the chances multiple addresses from the same nets are selected which -generally helps provide greater peer diversity, and perhaps more importantly, -drastically reduces the chances an attacker is able to coerce your peer into -only connecting to nodes they control. - -The address manager also understands routability and Tor addresses and tries -hard to only return routable addresses. In addition, it uses the information -provided by the caller about connected, known good, and attempted addresses to -periodically purge peers which no longer appear to be good peers as well as -bias the selection toward known good peers. The general idea is to make a best -effort at only providing usable addresses. -*/ -package addrmgr diff --git a/addrmgr/internal_test.go b/addrmgr/internal_test.go deleted file mode 100644 index b8a69f89..00000000 --- a/addrmgr/internal_test.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (c) 2013-2015 The btcsuite developers -// Use of this source code is governed by an ISC -// license that can be found in the LICENSE file. - -package addrmgr - -import ( - "time" - - "github.com/kaspanet/kaspad/wire" -) - -func TstKnownAddressIsBad(ka *KnownAddress) bool { - return ka.isBad() -} - -func TstKnownAddressChance(ka *KnownAddress) float64 { - return ka.chance() -} - -func TstNewKnownAddress(na *wire.NetAddress, attempts int, - lastattempt, lastsuccess time.Time, tried bool, refs int) *KnownAddress { - return &KnownAddress{na: na, attempts: attempts, lastattempt: lastattempt, - lastsuccess: lastsuccess, tried: tried, refs: refs} -} diff --git a/addrmgr/knownaddress.go b/addrmgr/knownaddress.go deleted file mode 100644 index 1137deca..00000000 --- a/addrmgr/knownaddress.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright (c) 2013-2014 The btcsuite developers -// Use of this source code is governed by an ISC -// license that can be found in the LICENSE file. - -package addrmgr - -import ( - "time" - - "github.com/kaspanet/kaspad/util/subnetworkid" - - "github.com/kaspanet/kaspad/wire" -) - -// KnownAddress tracks information about a known network address that is used -// to determine how viable an address is. -type KnownAddress struct { - na *wire.NetAddress - srcAddr *wire.NetAddress - attempts int - lastattempt time.Time - lastsuccess time.Time - tried bool - refs int // reference count of new buckets - subnetworkID *subnetworkid.SubnetworkID -} - -// NetAddress returns the underlying wire.NetAddress associated with the -// known address. -func (ka *KnownAddress) NetAddress() *wire.NetAddress { - return ka.na -} - -// SubnetworkID returns the subnetwork ID of the known address. -func (ka *KnownAddress) SubnetworkID() *subnetworkid.SubnetworkID { - return ka.subnetworkID -} - -// LastAttempt returns the last time the known address was attempted. -func (ka *KnownAddress) LastAttempt() time.Time { - return ka.lastattempt -} - -// chance returns the selection probability for a known address. The priority -// depends upon how recently the address has been seen, how recently it was last -// attempted and how often attempts to connect to it have failed. -func (ka *KnownAddress) chance() float64 { - now := time.Now() - lastAttempt := now.Sub(ka.lastattempt) - - if lastAttempt < 0 { - lastAttempt = 0 - } - - c := 1.0 - - // Very recent attempts are less likely to be retried. - if lastAttempt < 10*time.Minute { - c *= 0.01 - } - - // Failed attempts deprioritise. - for i := ka.attempts; i > 0; i-- { - c /= 1.5 - } - - return c -} - -// isBad returns true if the address in question has not been tried in the last -// minute and meets one of the following criteria: -// 1) It claims to be from the future -// 2) It hasn't been seen in over a month -// 3) It has failed at least three times and never succeeded -// 4) It has failed ten times in the last week -// All addresses that meet these criteria are assumed to be worthless and not -// worth keeping hold of. -func (ka *KnownAddress) isBad() bool { - if ka.lastattempt.After(time.Now().Add(-1 * time.Minute)) { - return false - } - - // From the future? - if ka.na.Timestamp.After(time.Now().Add(10 * time.Minute)) { - return true - } - - // Over a month old? - if ka.na.Timestamp.Before(time.Now().Add(-1 * numMissingDays * time.Hour * 24)) { - return true - } - - // Never succeeded? - if ka.lastsuccess.IsZero() && ka.attempts >= numRetries { - return true - } - - // Hasn't succeeded in too long? - if !ka.lastsuccess.After(time.Now().Add(-1*minBadDays*time.Hour*24)) && - ka.attempts >= maxFailures { - return true - } - - return false -} diff --git a/addrmgr/knownaddress_test.go b/addrmgr/knownaddress_test.go deleted file mode 100644 index 0ac2550f..00000000 --- a/addrmgr/knownaddress_test.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright (c) 2013-2015 The btcsuite developers -// Use of this source code is governed by an ISC -// license that can be found in the LICENSE file. - -package addrmgr_test - -import ( - "math" - "testing" - "time" - - "github.com/kaspanet/kaspad/addrmgr" - "github.com/kaspanet/kaspad/wire" -) - -func TestChance(t *testing.T) { - now := time.Unix(time.Now().Unix(), 0) - var tests = []struct { - addr *addrmgr.KnownAddress - expected float64 - }{ - { - //Test normal case - addrmgr.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(-35 * time.Second)}, - 0, time.Now().Add(-30*time.Minute), time.Now(), false, 0), - 1.0, - }, { - //Test case in which lastseen < 0 - addrmgr.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(20 * time.Second)}, - 0, time.Now().Add(-30*time.Minute), time.Now(), false, 0), - 1.0, - }, { - //Test case in which lastattempt < 0 - addrmgr.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(-35 * time.Second)}, - 0, time.Now().Add(30*time.Minute), time.Now(), false, 0), - 1.0 * .01, - }, { - //Test case in which lastattempt < ten minutes - addrmgr.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(-35 * time.Second)}, - 0, time.Now().Add(-5*time.Minute), time.Now(), false, 0), - 1.0 * .01, - }, { - //Test case with several failed attempts. - addrmgr.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(-35 * time.Second)}, - 2, time.Now().Add(-30*time.Minute), time.Now(), false, 0), - 1 / 1.5 / 1.5, - }, - } - - err := .0001 - for i, test := range tests { - chance := addrmgr.TstKnownAddressChance(test.addr) - if math.Abs(test.expected-chance) >= err { - t.Errorf("case %d: got %f, expected %f", i, chance, test.expected) - } - } -} - -func TestIsBad(t *testing.T) { - now := time.Unix(time.Now().Unix(), 0) - future := now.Add(35 * time.Minute) - monthOld := now.Add(-43 * time.Hour * 24) - secondsOld := now.Add(-2 * time.Second) - minutesOld := now.Add(-27 * time.Minute) - hoursOld := now.Add(-5 * time.Hour) - zeroTime := time.Time{} - - futureNa := &wire.NetAddress{Timestamp: future} - minutesOldNa := &wire.NetAddress{Timestamp: minutesOld} - monthOldNa := &wire.NetAddress{Timestamp: monthOld} - currentNa := &wire.NetAddress{Timestamp: secondsOld} - - //Test addresses that have been tried in the last minute. - if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(futureNa, 3, secondsOld, zeroTime, false, 0)) { - t.Errorf("test case 1: addresses that have been tried in the last minute are not bad.") - } - if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(monthOldNa, 3, secondsOld, zeroTime, false, 0)) { - t.Errorf("test case 2: addresses that have been tried in the last minute are not bad.") - } - if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(currentNa, 3, secondsOld, zeroTime, false, 0)) { - t.Errorf("test case 3: addresses that have been tried in the last minute are not bad.") - } - if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(currentNa, 3, secondsOld, monthOld, true, 0)) { - t.Errorf("test case 4: addresses that have been tried in the last minute are not bad.") - } - if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(currentNa, 2, secondsOld, secondsOld, true, 0)) { - t.Errorf("test case 5: addresses that have been tried in the last minute are not bad.") - } - - //Test address that claims to be from the future. - if !addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(futureNa, 0, minutesOld, hoursOld, true, 0)) { - t.Errorf("test case 6: addresses that claim to be from the future are bad.") - } - - //Test address that has not been seen in over a month. - if !addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(monthOldNa, 0, minutesOld, hoursOld, true, 0)) { - t.Errorf("test case 7: addresses more than a month old are bad.") - } - - //It has failed at least three times and never succeeded. - if !addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(minutesOldNa, 3, minutesOld, zeroTime, true, 0)) { - t.Errorf("test case 8: addresses that have never succeeded are bad.") - } - - //It has failed ten times in the last week - if !addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(minutesOldNa, 10, minutesOld, monthOld, true, 0)) { - t.Errorf("test case 9: addresses that have not succeeded in too long are bad.") - } - - //Test an address that should work. - if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(minutesOldNa, 2, minutesOld, hoursOld, true, 0)) { - t.Errorf("test case 10: This should be a valid address.") - } -} diff --git a/addrmgr/log.go b/addrmgr/log.go deleted file mode 100644 index 0ff68a7a..00000000 --- a/addrmgr/log.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright (c) 2013-2014 The btcsuite developers -// Use of this source code is governed by an ISC -// license that can be found in the LICENSE file. - -package addrmgr - -import ( - "github.com/kaspanet/kaspad/logger" - "github.com/kaspanet/kaspad/util/panics" -) - -var log, _ = logger.Get(logger.SubsystemTags.ADXR) -var spawn = panics.GoroutineWrapperFuncWithPanicHandler(log) diff --git a/addrmgr/network.go b/addrmgr/network.go deleted file mode 100644 index 3fac4144..00000000 --- a/addrmgr/network.go +++ /dev/null @@ -1,287 +0,0 @@ -// Copyright (c) 2013-2014 The btcsuite developers -// Use of this source code is governed by an ISC -// license that can be found in the LICENSE file. - -package addrmgr - -import ( - "fmt" - "net" - - "github.com/kaspanet/kaspad/config" - - "github.com/kaspanet/kaspad/wire" -) - -var ( - // rfc1918Nets specifies the IPv4 private address blocks as defined by - // by RFC1918 (10.0.0.0/8, 172.16.0.0/12, and 192.168.0.0/16). - rfc1918Nets = []net.IPNet{ - ipNet("10.0.0.0", 8, 32), - ipNet("172.16.0.0", 12, 32), - ipNet("192.168.0.0", 16, 32), - } - - // rfc2544Net specifies the the IPv4 block as defined by RFC2544 - // (198.18.0.0/15) - rfc2544Net = ipNet("198.18.0.0", 15, 32) - - // rfc3849Net specifies the IPv6 documentation address block as defined - // by RFC3849 (2001:DB8::/32). - rfc3849Net = ipNet("2001:DB8::", 32, 128) - - // rfc3927Net specifies the IPv4 auto configuration address block as - // defined by RFC3927 (169.254.0.0/16). - rfc3927Net = ipNet("169.254.0.0", 16, 32) - - // rfc3964Net specifies the IPv6 to IPv4 encapsulation address block as - // defined by RFC3964 (2002::/16). - rfc3964Net = ipNet("2002::", 16, 128) - - // rfc4193Net specifies the IPv6 unique local address block as defined - // by RFC4193 (FC00::/7). - rfc4193Net = ipNet("FC00::", 7, 128) - - // rfc4380Net specifies the IPv6 teredo tunneling over UDP address block - // as defined by RFC4380 (2001::/32). - rfc4380Net = ipNet("2001::", 32, 128) - - // rfc4843Net specifies the IPv6 ORCHID address block as defined by - // RFC4843 (2001:10::/28). - rfc4843Net = ipNet("2001:10::", 28, 128) - - // rfc4862Net specifies the IPv6 stateless address autoconfiguration - // address block as defined by RFC4862 (FE80::/64). - rfc4862Net = ipNet("FE80::", 64, 128) - - // rfc5737Net specifies the IPv4 documentation address blocks as defined - // by RFC5737 (192.0.2.0/24, 198.51.100.0/24, 203.0.113.0/24) - rfc5737Net = []net.IPNet{ - ipNet("192.0.2.0", 24, 32), - ipNet("198.51.100.0", 24, 32), - ipNet("203.0.113.0", 24, 32), - } - - // rfc6052Net specifies the IPv6 well-known prefix address block as - // defined by RFC6052 (64:FF9B::/96). - rfc6052Net = ipNet("64:FF9B::", 96, 128) - - // rfc6145Net specifies the IPv6 to IPv4 translated address range as - // defined by RFC6145 (::FFFF:0:0:0/96). - rfc6145Net = ipNet("::FFFF:0:0:0", 96, 128) - - // rfc6598Net specifies the IPv4 block as defined by RFC6598 (100.64.0.0/10) - rfc6598Net = ipNet("100.64.0.0", 10, 32) - - // onionCatNet defines the IPv6 address block used to support Tor. - // We encode a .onion address as a 16 byte number by decoding the - // address prior to the .onion (i.e. the key hash) base32 into a ten - // byte number. It then stores the first 6 bytes of the address as - // 0xfd, 0x87, 0xd8, 0x7e, 0xeb, 0x43. - // - // This is the same range used by OnionCat, which is part part of the - // RFC4193 unique local IPv6 range. - // - // In summary the format is: - // { magic 6 bytes, 10 bytes base32 decode of key hash } - onionCatNet = ipNet("fd87:d87e:eb43::", 48, 128) - - // zero4Net defines the IPv4 address block for address staring with 0 - // (0.0.0.0/8). - zero4Net = ipNet("0.0.0.0", 8, 32) - - // heNet defines the Hurricane Electric IPv6 address block. - heNet = ipNet("2001:470::", 32, 128) -) - -// ipNet returns a net.IPNet struct given the passed IP address string, number -// of one bits to include at the start of the mask, and the total number of bits -// for the mask. -func ipNet(ip string, ones, bits int) net.IPNet { - return net.IPNet{IP: net.ParseIP(ip), Mask: net.CIDRMask(ones, bits)} -} - -// IsIPv4 returns whether or not the given address is an IPv4 address. -func IsIPv4(na *wire.NetAddress) bool { - return na.IP.To4() != nil -} - -// IsLocal returns whether or not the given address is a local address. -func IsLocal(na *wire.NetAddress) bool { - return na.IP.IsLoopback() || zero4Net.Contains(na.IP) -} - -// IsOnionCatTor returns whether or not the passed address is in the IPv6 range -// used by Kaspa to support Tor (fd87:d87e:eb43::/48). Note that this range -// is the same range used by OnionCat, which is part of the RFC4193 unique local -// IPv6 range. -func IsOnionCatTor(na *wire.NetAddress) bool { - return onionCatNet.Contains(na.IP) -} - -// IsRFC1918 returns whether or not the passed address is part of the IPv4 -// private network address space as defined by RFC1918 (10.0.0.0/8, -// 172.16.0.0/12, or 192.168.0.0/16). -func IsRFC1918(na *wire.NetAddress) bool { - for _, rfc := range rfc1918Nets { - if rfc.Contains(na.IP) { - return true - } - } - return false -} - -// IsRFC2544 returns whether or not the passed address is part of the IPv4 -// address space as defined by RFC2544 (198.18.0.0/15) -func IsRFC2544(na *wire.NetAddress) bool { - return rfc2544Net.Contains(na.IP) -} - -// IsRFC3849 returns whether or not the passed address is part of the IPv6 -// documentation range as defined by RFC3849 (2001:DB8::/32). -func IsRFC3849(na *wire.NetAddress) bool { - return rfc3849Net.Contains(na.IP) -} - -// IsRFC3927 returns whether or not the passed address is part of the IPv4 -// autoconfiguration range as defined by RFC3927 (169.254.0.0/16). -func IsRFC3927(na *wire.NetAddress) bool { - return rfc3927Net.Contains(na.IP) -} - -// IsRFC3964 returns whether or not the passed address is part of the IPv6 to -// IPv4 encapsulation range as defined by RFC3964 (2002::/16). -func IsRFC3964(na *wire.NetAddress) bool { - return rfc3964Net.Contains(na.IP) -} - -// IsRFC4193 returns whether or not the passed address is part of the IPv6 -// unique local range as defined by RFC4193 (FC00::/7). -func IsRFC4193(na *wire.NetAddress) bool { - return rfc4193Net.Contains(na.IP) -} - -// IsRFC4380 returns whether or not the passed address is part of the IPv6 -// teredo tunneling over UDP range as defined by RFC4380 (2001::/32). -func IsRFC4380(na *wire.NetAddress) bool { - return rfc4380Net.Contains(na.IP) -} - -// IsRFC4843 returns whether or not the passed address is part of the IPv6 -// ORCHID range as defined by RFC4843 (2001:10::/28). -func IsRFC4843(na *wire.NetAddress) bool { - return rfc4843Net.Contains(na.IP) -} - -// IsRFC4862 returns whether or not the passed address is part of the IPv6 -// stateless address autoconfiguration range as defined by RFC4862 (FE80::/64). -func IsRFC4862(na *wire.NetAddress) bool { - return rfc4862Net.Contains(na.IP) -} - -// IsRFC5737 returns whether or not the passed address is part of the IPv4 -// documentation address space as defined by RFC5737 (192.0.2.0/24, -// 198.51.100.0/24, 203.0.113.0/24) -func IsRFC5737(na *wire.NetAddress) bool { - for _, rfc := range rfc5737Net { - if rfc.Contains(na.IP) { - return true - } - } - - return false -} - -// IsRFC6052 returns whether or not the passed address is part of the IPv6 -// well-known prefix range as defined by RFC6052 (64:FF9B::/96). -func IsRFC6052(na *wire.NetAddress) bool { - return rfc6052Net.Contains(na.IP) -} - -// IsRFC6145 returns whether or not the passed address is part of the IPv6 to -// IPv4 translated address range as defined by RFC6145 (::FFFF:0:0:0/96). -func IsRFC6145(na *wire.NetAddress) bool { - return rfc6145Net.Contains(na.IP) -} - -// IsRFC6598 returns whether or not the passed address is part of the IPv4 -// shared address space specified by RFC6598 (100.64.0.0/10) -func IsRFC6598(na *wire.NetAddress) bool { - return rfc6598Net.Contains(na.IP) -} - -// IsValid returns whether or not the passed address is valid. The address is -// considered invalid under the following circumstances: -// IPv4: It is either a zero or all bits set address. -// IPv6: It is either a zero or RFC3849 documentation address. -func IsValid(na *wire.NetAddress) bool { - // IsUnspecified returns if address is 0, so only all bits set, and - // RFC3849 need to be explicitly checked. - return na.IP != nil && !(na.IP.IsUnspecified() || - na.IP.Equal(net.IPv4bcast)) -} - -// IsRoutable returns whether or not the passed address is routable over -// the public internet. This is true as long as the address is valid and is not -// in any reserved ranges. -func IsRoutable(na *wire.NetAddress) bool { - if config.ActiveConfig().NetParams().AcceptUnroutable { - return !IsLocal(na) - } - - return IsValid(na) && !(IsRFC1918(na) || IsRFC2544(na) || - IsRFC3927(na) || IsRFC4862(na) || IsRFC3849(na) || - IsRFC4843(na) || IsRFC5737(na) || IsRFC6598(na) || - IsLocal(na) || (IsRFC4193(na) && !IsOnionCatTor(na))) -} - -// GroupKey returns a string representing the network group an address is part -// of. This is the /16 for IPv4, the /32 (/36 for he.net) for IPv6, the string -// "local" for a local address, the string "tor:key" where key is the /4 of the -// onion address for Tor address, and the string "unroutable" for an unroutable -// address. -func GroupKey(na *wire.NetAddress) string { - if IsLocal(na) { - return "local" - } - if !IsRoutable(na) { - return "unroutable" - } - if IsIPv4(na) { - return na.IP.Mask(net.CIDRMask(16, 32)).String() - } - if IsRFC6145(na) || IsRFC6052(na) { - // last four bytes are the ip address - ip := na.IP[12:16] - return ip.Mask(net.CIDRMask(16, 32)).String() - } - - if IsRFC3964(na) { - ip := na.IP[2:6] - return ip.Mask(net.CIDRMask(16, 32)).String() - - } - if IsRFC4380(na) { - // teredo tunnels have the last 4 bytes as the v4 address XOR - // 0xff. - ip := net.IP(make([]byte, 4)) - for i, byte := range na.IP[12:16] { - ip[i] = byte ^ 0xff - } - return ip.Mask(net.CIDRMask(16, 32)).String() - } - if IsOnionCatTor(na) { - // group is keyed off the first 4 bits of the actual onion key. - return fmt.Sprintf("tor:%d", na.IP[6]&((1<<4)-1)) - } - - // OK, so now we know ourselves to be a IPv6 address. - // We use /32 for everything, except for Hurricane Electric's - // (he.net) IP range, which we use /36 for. - bits := 32 - if heNet.Contains(na.IP) { - bits = 36 - } - - return na.IP.Mask(net.CIDRMask(bits, 128)).String() -} diff --git a/addrmgr/network_test.go b/addrmgr/network_test.go deleted file mode 100644 index 06d77dfd..00000000 --- a/addrmgr/network_test.go +++ /dev/null @@ -1,225 +0,0 @@ -// Copyright (c) 2013-2014 The btcsuite developers -// Use of this source code is governed by an ISC -// license that can be found in the LICENSE file. - -package addrmgr_test - -import ( - "bou.ke/monkey" - "github.com/kaspanet/kaspad/config" - "github.com/kaspanet/kaspad/dagconfig" - "net" - "testing" - - "github.com/kaspanet/kaspad/addrmgr" - "github.com/kaspanet/kaspad/wire" -) - -// TestIPTypes ensures the various functions which determine the type of an IP -// address based on RFCs work as intended. -func TestIPTypes(t *testing.T) { - activeConfigPatch := monkey.Patch(config.ActiveConfig, func() *config.Config { - return &config.Config{ - Flags: &config.Flags{ - NetworkFlags: config.NetworkFlags{ - ActiveNetParams: &dagconfig.SimNetParams}, - }, - } - }) - defer activeConfigPatch.Unpatch() - - type ipTest struct { - in wire.NetAddress - rfc1918 bool - rfc2544 bool - rfc3849 bool - rfc3927 bool - rfc3964 bool - rfc4193 bool - rfc4380 bool - rfc4843 bool - rfc4862 bool - rfc5737 bool - rfc6052 bool - rfc6145 bool - rfc6598 bool - local bool - valid bool - routable bool - } - - newIPTest := func(ip string, rfc1918, rfc2544, rfc3849, rfc3927, rfc3964, - rfc4193, rfc4380, rfc4843, rfc4862, rfc5737, rfc6052, rfc6145, rfc6598, - local, valid, routable bool) ipTest { - nip := net.ParseIP(ip) - na := *wire.NewNetAddressIPPort(nip, 8333, wire.SFNodeNetwork) - test := ipTest{na, rfc1918, rfc2544, rfc3849, rfc3927, rfc3964, rfc4193, rfc4380, - rfc4843, rfc4862, rfc5737, rfc6052, rfc6145, rfc6598, local, valid, routable} - return test - } - - tests := []ipTest{ - newIPTest("10.255.255.255", true, false, false, false, false, false, - false, false, false, false, false, false, false, false, true, false), - newIPTest("192.168.0.1", true, false, false, false, false, false, - false, false, false, false, false, false, false, false, true, false), - newIPTest("172.31.255.1", true, false, false, false, false, false, - false, false, false, false, false, false, false, false, true, false), - newIPTest("172.32.1.1", false, false, false, false, false, false, false, false, - false, false, false, false, false, false, true, true), - newIPTest("169.254.250.120", false, false, false, true, false, false, - false, false, false, false, false, false, false, false, true, false), - newIPTest("0.0.0.0", false, false, false, false, false, false, false, - false, false, false, false, false, false, true, false, false), - newIPTest("255.255.255.255", false, false, false, false, false, false, - false, false, false, false, false, false, false, false, false, false), - newIPTest("127.0.0.1", false, false, false, false, false, false, - false, false, false, false, false, false, false, true, true, false), - newIPTest("fd00:dead::1", false, false, false, false, false, true, - false, false, false, false, false, false, false, false, true, false), - newIPTest("2001::1", false, false, false, false, false, false, - true, false, false, false, false, false, false, false, true, true), - newIPTest("2001:10:abcd::1:1", false, false, false, false, false, false, - false, true, false, false, false, false, false, false, true, false), - newIPTest("fe80::1", false, false, false, false, false, false, - false, false, true, false, false, false, false, false, true, false), - newIPTest("fe80:1::1", false, false, false, false, false, false, - false, false, false, false, false, false, false, false, true, true), - newIPTest("64:ff9b::1", false, false, false, false, false, false, - false, false, false, false, true, false, false, false, true, true), - newIPTest("::ffff:abcd:ef12:1", false, false, false, false, false, false, - false, false, false, false, false, false, false, false, true, true), - newIPTest("::1", false, false, false, false, false, false, false, false, - false, false, false, false, false, true, true, false), - newIPTest("198.18.0.1", false, true, false, false, false, false, false, - false, false, false, false, false, false, false, true, false), - newIPTest("100.127.255.1", false, false, false, false, false, false, false, - false, false, false, false, false, true, false, true, false), - newIPTest("203.0.113.1", false, false, false, false, false, false, false, - false, false, false, false, false, false, false, true, false), - } - - t.Logf("Running %d tests", len(tests)) - for _, test := range tests { - if rv := addrmgr.IsRFC1918(&test.in); rv != test.rfc1918 { - t.Errorf("IsRFC1918 %s\n got: %v want: %v", test.in.IP, rv, test.rfc1918) - } - - if rv := addrmgr.IsRFC3849(&test.in); rv != test.rfc3849 { - t.Errorf("IsRFC3849 %s\n got: %v want: %v", test.in.IP, rv, test.rfc3849) - } - - if rv := addrmgr.IsRFC3927(&test.in); rv != test.rfc3927 { - t.Errorf("IsRFC3927 %s\n got: %v want: %v", test.in.IP, rv, test.rfc3927) - } - - if rv := addrmgr.IsRFC3964(&test.in); rv != test.rfc3964 { - t.Errorf("IsRFC3964 %s\n got: %v want: %v", test.in.IP, rv, test.rfc3964) - } - - if rv := addrmgr.IsRFC4193(&test.in); rv != test.rfc4193 { - t.Errorf("IsRFC4193 %s\n got: %v want: %v", test.in.IP, rv, test.rfc4193) - } - - if rv := addrmgr.IsRFC4380(&test.in); rv != test.rfc4380 { - t.Errorf("IsRFC4380 %s\n got: %v want: %v", test.in.IP, rv, test.rfc4380) - } - - if rv := addrmgr.IsRFC4843(&test.in); rv != test.rfc4843 { - t.Errorf("IsRFC4843 %s\n got: %v want: %v", test.in.IP, rv, test.rfc4843) - } - - if rv := addrmgr.IsRFC4862(&test.in); rv != test.rfc4862 { - t.Errorf("IsRFC4862 %s\n got: %v want: %v", test.in.IP, rv, test.rfc4862) - } - - if rv := addrmgr.IsRFC6052(&test.in); rv != test.rfc6052 { - t.Errorf("isRFC6052 %s\n got: %v want: %v", test.in.IP, rv, test.rfc6052) - } - - if rv := addrmgr.IsRFC6145(&test.in); rv != test.rfc6145 { - t.Errorf("IsRFC1918 %s\n got: %v want: %v", test.in.IP, rv, test.rfc6145) - } - - if rv := addrmgr.IsLocal(&test.in); rv != test.local { - t.Errorf("IsLocal %s\n got: %v want: %v", test.in.IP, rv, test.local) - } - - if rv := addrmgr.IsValid(&test.in); rv != test.valid { - t.Errorf("IsValid %s\n got: %v want: %v", test.in.IP, rv, test.valid) - } - - if rv := addrmgr.IsRoutable(&test.in); rv != test.routable { - t.Errorf("IsRoutable %s\n got: %v want: %v", test.in.IP, rv, test.routable) - } - } -} - -// TestGroupKey tests the GroupKey function to ensure it properly groups various -// IP addresses. -func TestGroupKey(t *testing.T) { - activeConfigPatch := monkey.Patch(config.ActiveConfig, func() *config.Config { - return &config.Config{ - Flags: &config.Flags{ - NetworkFlags: config.NetworkFlags{ - ActiveNetParams: &dagconfig.SimNetParams}, - }, - } - }) - defer activeConfigPatch.Unpatch() - - tests := []struct { - name string - ip string - expected string - }{ - // Local addresses. - {name: "ipv4 localhost", ip: "127.0.0.1", expected: "local"}, - {name: "ipv6 localhost", ip: "::1", expected: "local"}, - {name: "ipv4 zero", ip: "0.0.0.0", expected: "local"}, - {name: "ipv4 first octet zero", ip: "0.1.2.3", expected: "local"}, - - // Unroutable addresses. - {name: "ipv4 invalid bcast", ip: "255.255.255.255", expected: "unroutable"}, - {name: "ipv4 rfc1918 10/8", ip: "10.1.2.3", expected: "unroutable"}, - {name: "ipv4 rfc1918 172.16/12", ip: "172.16.1.2", expected: "unroutable"}, - {name: "ipv4 rfc1918 192.168/16", ip: "192.168.1.2", expected: "unroutable"}, - {name: "ipv6 rfc3849 2001:db8::/32", ip: "2001:db8::1234", expected: "unroutable"}, - {name: "ipv4 rfc3927 169.254/16", ip: "169.254.1.2", expected: "unroutable"}, - {name: "ipv6 rfc4193 fc00::/7", ip: "fc00::1234", expected: "unroutable"}, - {name: "ipv6 rfc4843 2001:10::/28", ip: "2001:10::1234", expected: "unroutable"}, - {name: "ipv6 rfc4862 fe80::/64", ip: "fe80::1234", expected: "unroutable"}, - - // IPv4 normal. - {name: "ipv4 normal class a", ip: "12.1.2.3", expected: "12.1.0.0"}, - {name: "ipv4 normal class b", ip: "173.1.2.3", expected: "173.1.0.0"}, - {name: "ipv4 normal class c", ip: "196.1.2.3", expected: "196.1.0.0"}, - - // IPv6/IPv4 translations. - {name: "ipv6 rfc3964 with ipv4 encap", ip: "2002:0c01:0203::", expected: "12.1.0.0"}, - {name: "ipv6 rfc4380 toredo ipv4", ip: "2001:0:1234::f3fe:fdfc", expected: "12.1.0.0"}, - {name: "ipv6 rfc6052 well-known prefix with ipv4", ip: "64:ff9b::0c01:0203", expected: "12.1.0.0"}, - {name: "ipv6 rfc6145 translated ipv4", ip: "::ffff:0:0c01:0203", expected: "12.1.0.0"}, - - // Tor. - {name: "ipv6 tor onioncat", ip: "fd87:d87e:eb43:1234::5678", expected: "tor:2"}, - {name: "ipv6 tor onioncat 2", ip: "fd87:d87e:eb43:1245::6789", expected: "tor:2"}, - {name: "ipv6 tor onioncat 3", ip: "fd87:d87e:eb43:1345::6789", expected: "tor:3"}, - - // IPv6 normal. - {name: "ipv6 normal", ip: "2602:100::1", expected: "2602:100::"}, - {name: "ipv6 normal 2", ip: "2602:0100::1234", expected: "2602:100::"}, - {name: "ipv6 hurricane electric", ip: "2001:470:1f10:a1::2", expected: "2001:470:1000::"}, - {name: "ipv6 hurricane electric 2", ip: "2001:0470:1f10:a1::2", expected: "2001:470:1000::"}, - } - - for i, test := range tests { - nip := net.ParseIP(test.ip) - na := *wire.NewNetAddressIPPort(nip, 8333, wire.SFNodeNetwork) - if key := addrmgr.GroupKey(&na); key != test.expected { - t.Errorf("TestGroupKey #%d (%s): unexpected group key "+ - "- got '%s', want '%s'", i, test.name, - key, test.expected) - } - } -} diff --git a/addrmgr/test_coverage.txt b/addrmgr/test_coverage.txt deleted file mode 100644 index c67e0f6d..00000000 --- a/addrmgr/test_coverage.txt +++ /dev/null @@ -1,62 +0,0 @@ - -github.com/conformal/btcd/addrmgr/network.go GroupKey 100.00% (23/23) -github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.reset 100.00% (6/6) -github.com/conformal/btcd/addrmgr/network.go IsRFC5737 100.00% (4/4) -github.com/conformal/btcd/addrmgr/network.go IsRFC1918 100.00% (4/4) -github.com/conformal/btcd/addrmgr/addrmanager.go New 100.00% (3/3) -github.com/conformal/btcd/addrmgr/addrmanager.go NetAddressKey 100.00% (2/2) -github.com/conformal/btcd/addrmgr/network.go IsRFC4862 100.00% (1/1) -github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.numAddresses 100.00% (1/1) -github.com/conformal/btcd/addrmgr/log.go init 100.00% (1/1) -github.com/conformal/btcd/addrmgr/log.go DisableLog 100.00% (1/1) -github.com/conformal/btcd/addrmgr/network.go ipNet 100.00% (1/1) -github.com/conformal/btcd/addrmgr/network.go IsIPv4 100.00% (1/1) -github.com/conformal/btcd/addrmgr/network.go IsLocal 100.00% (1/1) -github.com/conformal/btcd/addrmgr/network.go IsOnionCatTor 100.00% (1/1) -github.com/conformal/btcd/addrmgr/network.go IsRFC2544 100.00% (1/1) -github.com/conformal/btcd/addrmgr/network.go IsRFC3849 100.00% (1/1) -github.com/conformal/btcd/addrmgr/network.go IsRFC3927 100.00% (1/1) -github.com/conformal/btcd/addrmgr/network.go IsRFC3964 100.00% (1/1) -github.com/conformal/btcd/addrmgr/network.go IsRFC4193 100.00% (1/1) -github.com/conformal/btcd/addrmgr/network.go IsRFC4380 100.00% (1/1) -github.com/conformal/btcd/addrmgr/network.go IsRFC4843 100.00% (1/1) -github.com/conformal/btcd/addrmgr/network.go IsRFC6052 100.00% (1/1) -github.com/conformal/btcd/addrmgr/network.go IsRFC6145 100.00% (1/1) -github.com/conformal/btcd/addrmgr/network.go IsRFC6598 100.00% (1/1) -github.com/conformal/btcd/addrmgr/network.go IsValid 100.00% (1/1) -github.com/conformal/btcd/addrmgr/network.go IsRoutable 100.00% (1/1) -github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.GetBestLocalAddress 94.74% (18/19) -github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.AddLocalAddress 90.91% (10/11) -github.com/conformal/btcd/addrmgr/addrmanager.go getReachabilityFrom 51.52% (17/33) -github.com/conformal/btcd/addrmgr/addrmanager.go ipString 50.00% (2/4) -github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.GetAddress 9.30% (4/43) -github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.deserializePeers 0.00% (0/50) -github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.Good 0.00% (0/44) -github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.savePeers 0.00% (0/39) -github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.updateAddress 0.00% (0/30) -github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.expireNew 0.00% (0/22) -github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.AddressCache 0.00% (0/16) -github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.HostToNetAddress 0.00% (0/15) -github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.getNewBucket 0.00% (0/15) -github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.AddAddressByIP 0.00% (0/14) -github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.getTriedBucket 0.00% (0/14) -github.com/conformal/btcd/addrmgr/knownaddress.go knownAddress.chance 0.00% (0/13) -github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.loadPeers 0.00% (0/11) -github.com/conformal/btcd/addrmgr/knownaddress.go knownAddress.isBad 0.00% (0/11) -github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.Connected 0.00% (0/10) -github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.addressHandler 0.00% (0/9) -github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.pickTried 0.00% (0/8) -github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.DeserializeNetAddress 0.00% (0/7) -github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.Stop 0.00% (0/7) -github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.Attempt 0.00% (0/7) -github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.Start 0.00% (0/6) -github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.AddAddresses 0.00% (0/4) -github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.NeedMoreAddresses 0.00% (0/3) -github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.NumAddresses 0.00% (0/3) -github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.AddAddress 0.00% (0/3) -github.com/conformal/btcd/addrmgr/knownaddress.go knownAddress.LastAttempt 0.00% (0/1) -github.com/conformal/btcd/addrmgr/knownaddress.go knownAddress.NetAddress 0.00% (0/1) -github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.find 0.00% (0/1) -github.com/conformal/btcd/addrmgr/log.go UseLogger 0.00% (0/1) -github.com/conformal/btcd/addrmgr --------------------------------- 21.04% (113/537) - diff --git a/blockdag/README.md b/blockdag/README.md deleted file mode 100644 index 542b147b..00000000 --- a/blockdag/README.md +++ /dev/null @@ -1,41 +0,0 @@ -blockchain -========== - -[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org) -[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/kaspanet/kaspad/blockchain) - -Package blockdag implements Kaspa block handling, organization of the blockDAG, -block sorting and UTXO-set maintenance. -The test coverage is currently only around 75%, but will be increasing over -time. - -## Kaspad BlockDAG Processing Overview - -Before a block is allowed into the block DAG, it must go through an intensive -series of validation rules. The following list serves as a general outline of -those rules to provide some intuition into what is going on under the hood, but -is by no means exhaustive: - - - Reject duplicate blocks - - Perform a series of sanity checks on the block and its transactions such as - verifying proof of work, timestamps, number and character of transactions, - transaction amounts, script complexity, and merkle root calculations - - Save the most recent orphan blocks for a limited time in case their parent - blocks become available. - - Save blocks from the future for delayed processing - - Stop processing if the block is an orphan or delayed as the rest of the - processing depends on the block's position within the block chain - - Make sure the block does not violate finality rules - - Perform a series of more thorough checks that depend on the block's position - within the blockDAG such as verifying block difficulties adhere to - difficulty retarget rules, timestamps are after the median of the last - several blocks, all transactions are finalized, checkpoint blocks match, and - block versions are in line with the previous blocks - - Determine how the block fits into the DAG and perform different actions - accordingly - - Run the transaction scripts to verify the spender is allowed to spend the - coins - - Run GhostDAG to fit the block in a canonical sorting - - Build the block's UTXO Set, as well as update the global UTXO Set accordingly - - Insert the block into the block database - diff --git a/blockdag/accept.go b/blockdag/accept.go deleted file mode 100644 index c5142b99..00000000 --- a/blockdag/accept.go +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright (c) 2013-2017 The btcsuite developers -// Use of this source code is governed by an ISC -// license that can be found in the LICENSE file. - -package blockdag - -import ( - "fmt" - "github.com/kaspanet/kaspad/database" - "github.com/kaspanet/kaspad/util" -) - -func (dag *BlockDAG) addNodeToIndexWithInvalidAncestor(block *util.Block) error { - blockHeader := &block.MsgBlock().Header - newNode := newBlockNode(blockHeader, newSet(), dag.dagParams.K) - newNode.status = statusInvalidAncestor - dag.index.AddNode(newNode) - return dag.index.flushToDB() -} - -// maybeAcceptBlock potentially accepts a block into the block DAG. It -// performs several validation checks which depend on its position within -// the block DAG before adding it. The block is expected to have already -// gone through ProcessBlock before calling this function with it. -// -// The flags are also passed to checkBlockContext and connectToDAG. See -// their documentation for how the flags modify their behavior. -// -// This function MUST be called with the dagLock held (for writes). -func (dag *BlockDAG) maybeAcceptBlock(block *util.Block, flags BehaviorFlags) error { - parents, err := lookupParentNodes(block, dag) - if err != nil { - if rErr, ok := err.(RuleError); ok && rErr.ErrorCode == ErrInvalidAncestorBlock { - err := dag.addNodeToIndexWithInvalidAncestor(block) - if err != nil { - return err - } - } - return err - } - - // The block must pass all of the validation rules which depend on the - // position of the block within the block DAG. - err = dag.checkBlockContext(block, parents, flags) - if err != nil { - return err - } - - // Create a new block node for the block and add it to the node index. - newNode := newBlockNode(&block.MsgBlock().Header, parents, dag.dagParams.K) - newNode.status = statusDataStored - dag.index.AddNode(newNode) - - // Insert the block into the database if it's not already there. Even - // though it is possible the block will ultimately fail to connect, it - // has already passed all proof-of-work and validity tests which means - // it would be prohibitively expensive for an attacker to fill up the - // disk with a bunch of blocks that fail to connect. This is necessary - // since it allows block download to be decoupled from the much more - // expensive connection logic. It also has some other nice properties - // such as making blocks that never become part of the DAG or - // blocks that fail to connect available for further analysis. - err = dag.db.Update(func(dbTx database.Tx) error { - err := dbStoreBlock(dbTx, block) - if err != nil { - return err - } - return dag.index.flushToDBWithTx(dbTx) - }) - if err != nil { - return err - } - - // Make sure that all the block's transactions are finalized - fastAdd := flags&BFFastAdd == BFFastAdd - bluestParent := parents.bluest() - if !fastAdd { - if err := dag.validateAllTxsFinalized(block, newNode, bluestParent); err != nil { - return err - } - } - - block.SetChainHeight(newNode.chainHeight) - - // Connect the passed block to the DAG. This also handles validation of the - // transaction scripts. - chainUpdates, err := dag.addBlock(newNode, parents, block, flags) - if err != nil { - return err - } - - // Notify the caller that the new block was accepted into the block - // DAG. The caller would typically want to react by relaying the - // inventory to other peers. - dag.dagLock.Unlock() - dag.sendNotification(NTBlockAdded, &BlockAddedNotificationData{ - Block: block, - WasUnorphaned: flags&BFWasUnorphaned != 0, - }) - if len(chainUpdates.addedChainBlockHashes) > 0 { - dag.sendNotification(NTChainChanged, &ChainChangedNotificationData{ - RemovedChainBlockHashes: chainUpdates.removedChainBlockHashes, - AddedChainBlockHashes: chainUpdates.addedChainBlockHashes, - }) - } - dag.dagLock.Lock() - - return nil -} - -func lookupParentNodes(block *util.Block, blockDAG *BlockDAG) (blockSet, error) { - header := block.MsgBlock().Header - parentHashes := header.ParentHashes - - nodes := newSet() - for _, parentHash := range parentHashes { - node := blockDAG.index.LookupNode(parentHash) - if node == nil { - str := fmt.Sprintf("parent block %s is unknown", parentHashes) - return nil, ruleError(ErrParentBlockUnknown, str) - } else if blockDAG.index.NodeStatus(node).KnownInvalid() { - str := fmt.Sprintf("parent block %s is known to be invalid", parentHashes) - return nil, ruleError(ErrInvalidAncestorBlock, str) - } - - nodes.add(node) - } - - return nodes, nil -} diff --git a/blockdag/accept_test.go b/blockdag/accept_test.go deleted file mode 100644 index 9d4014f8..00000000 --- a/blockdag/accept_test.go +++ /dev/null @@ -1,143 +0,0 @@ -package blockdag - -import ( - "github.com/pkg/errors" - "path/filepath" - "strings" - "testing" - - "bou.ke/monkey" - "github.com/kaspanet/kaspad/dagconfig" - "github.com/kaspanet/kaspad/database" - "github.com/kaspanet/kaspad/util" -) - -func TestMaybeAcceptBlockErrors(t *testing.T) { - // Create a new database and DAG instance to run tests against. - dag, teardownFunc, err := DAGSetup("TestMaybeAcceptBlockErrors", Config{ - DAGParams: &dagconfig.SimNetParams, - }) - if err != nil { - t.Fatalf("TestMaybeAcceptBlockErrors: Failed to setup DAG instance: %v", err) - } - defer teardownFunc() - - dag.TestSetCoinbaseMaturity(0) - - // Test rejecting the block if its parents are missing - orphanBlockFile := "blk_3B.dat" - loadedBlocks, err := LoadBlocks(filepath.Join("testdata/", orphanBlockFile)) - if err != nil { - t.Fatalf("TestMaybeAcceptBlockErrors: "+ - "Error loading file '%s': %s\n", orphanBlockFile, err) - } - block := loadedBlocks[0] - - err = dag.maybeAcceptBlock(block, BFNone) - if err == nil { - t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block if its parents are missing: "+ - "Expected: %s, got: ", ErrParentBlockUnknown) - } - ruleErr, ok := err.(RuleError) - if !ok { - t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block if its parents are missing: "+ - "Expected RuleError but got %s", err) - } else if ruleErr.ErrorCode != ErrParentBlockUnknown { - t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block if its parents are missing: "+ - "Unexpected error code. Want: %s, got: %s", ErrParentBlockUnknown, ruleErr.ErrorCode) - } - - // Test rejecting the block if its parents are invalid - blocksFile := "blk_0_to_4.dat" - blocks, err := LoadBlocks(filepath.Join("testdata/", blocksFile)) - if err != nil { - t.Fatalf("TestMaybeAcceptBlockErrors: "+ - "Error loading file '%s': %s\n", blocksFile, err) - } - - // Add a valid block and mark it as invalid - block1 := blocks[1] - isOrphan, delay, err := dag.ProcessBlock(block1, BFNone) - if err != nil { - t.Fatalf("TestMaybeAcceptBlockErrors: Valid block unexpectedly returned an error: %s", err) - } - if delay != 0 { - t.Fatalf("TestMaybeAcceptBlockErrors: block 1 is too far in the future") - } - if isOrphan { - t.Fatalf("TestMaybeAcceptBlockErrors: incorrectly returned block 1 is an orphan") - } - blockNode1 := dag.index.LookupNode(block1.Hash()) - dag.index.SetStatusFlags(blockNode1, statusValidateFailed) - - block2 := blocks[2] - err = dag.maybeAcceptBlock(block2, BFNone) - if err == nil { - t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block if its parents are invalid: "+ - "Expected: %s, got: ", ErrInvalidAncestorBlock) - } - ruleErr, ok = err.(RuleError) - if !ok { - t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block if its parents are invalid: "+ - "Expected RuleError but got %s", err) - } else if ruleErr.ErrorCode != ErrInvalidAncestorBlock { - t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block if its parents are invalid: "+ - "Unexpected error. Want: %s, got: %s", ErrInvalidAncestorBlock, ruleErr.ErrorCode) - } - - // Set block1's status back to valid for next tests - dag.index.UnsetStatusFlags(blockNode1, statusValidateFailed) - - // Test rejecting the block due to bad context - originalBits := block2.MsgBlock().Header.Bits - block2.MsgBlock().Header.Bits = 0 - err = dag.maybeAcceptBlock(block2, BFNone) - if err == nil { - t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block due to bad context: "+ - "Expected: %s, got: ", ErrUnexpectedDifficulty) - } - ruleErr, ok = err.(RuleError) - if !ok { - t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block due to bad context: "+ - "Expected RuleError but got %s", err) - } else if ruleErr.ErrorCode != ErrUnexpectedDifficulty { - t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block due to bad context: "+ - "Unexpected error. Want: %s, got: %s", ErrUnexpectedDifficulty, ruleErr.ErrorCode) - } - - // Set block2's bits back to valid for next tests - block2.MsgBlock().Header.Bits = originalBits - - // Test rejecting the node due to database error - databaseErrorMessage := "database error" - guard := monkey.Patch(dbStoreBlock, func(dbTx database.Tx, block *util.Block) error { - return errors.New(databaseErrorMessage) - }) - defer guard.Unpatch() - err = dag.maybeAcceptBlock(block2, BFNone) - if err == nil { - t.Errorf("TestMaybeAcceptBlockErrors: rejecting the node due to database error: "+ - "Expected: %s, got: ", databaseErrorMessage) - } - if !strings.Contains(err.Error(), databaseErrorMessage) { - t.Errorf("TestMaybeAcceptBlockErrors: rejecting the node due to database error: "+ - "Unexpected error. Want: %s, got: %s", databaseErrorMessage, err) - } - guard.Unpatch() - - // Test rejecting the node due to index error - indexErrorMessage := "index error" - guard = monkey.Patch((*blockIndex).flushToDB, func(_ *blockIndex) error { - return errors.New(indexErrorMessage) - }) - defer guard.Unpatch() - err = dag.maybeAcceptBlock(block2, BFNone) - if err == nil { - t.Errorf("TestMaybeAcceptBlockErrors: rejecting the node due to index error: "+ - "Expected %s, got: ", indexErrorMessage) - } - if !strings.Contains(err.Error(), indexErrorMessage) { - t.Errorf("TestMaybeAcceptBlockErrors: rejecting the node due to index error: "+ - "Unexpected error. Want: %s, got: %s", indexErrorMessage, err) - } -} diff --git a/blockdag/blockheap.go b/blockdag/blockheap.go deleted file mode 100644 index afcad6f1..00000000 --- a/blockdag/blockheap.go +++ /dev/null @@ -1,88 +0,0 @@ -package blockdag - -import ( - "container/heap" - - "github.com/kaspanet/kaspad/util/daghash" -) - -// baseHeap is an implementation for heap.Interface that sorts blocks by their height -type baseHeap []*blockNode - -func (h baseHeap) Len() int { return len(h) } -func (h baseHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] } - -func (h *baseHeap) Push(x interface{}) { - *h = append(*h, x.(*blockNode)) -} - -func (h *baseHeap) Pop() interface{} { - oldHeap := *h - oldLength := len(oldHeap) - popped := oldHeap[oldLength-1] - *h = oldHeap[0 : oldLength-1] - return popped -} - -// upHeap extends baseHeap to include Less operation that traverses from bottom to top -type upHeap struct{ baseHeap } - -func (h upHeap) Less(i, j int) bool { - if h.baseHeap[i].blueScore == h.baseHeap[j].blueScore { - return daghash.HashToBig(h.baseHeap[i].hash).Cmp(daghash.HashToBig(h.baseHeap[j].hash)) < 0 - } - - return h.baseHeap[i].blueScore < h.baseHeap[j].blueScore -} - -// downHeap extends baseHeap to include Less operation that traverses from top to bottom -type downHeap struct{ baseHeap } - -func (h downHeap) Less(i, j int) bool { - if h.baseHeap[i].blueScore == h.baseHeap[j].blueScore { - return daghash.HashToBig(h.baseHeap[i].hash).Cmp(daghash.HashToBig(h.baseHeap[j].hash)) > 0 - } - - return h.baseHeap[i].blueScore > h.baseHeap[j].blueScore -} - -// blockHeap represents a mutable heap of Blocks, sorted by their height -type blockHeap struct { - impl heap.Interface -} - -// newDownHeap initializes and returns a new blockHeap -func newDownHeap() blockHeap { - h := blockHeap{impl: &downHeap{}} - heap.Init(h.impl) - return h -} - -// newUpHeap initializes and returns a new blockHeap -func newUpHeap() blockHeap { - h := blockHeap{impl: &upHeap{}} - heap.Init(h.impl) - return h -} - -// pop removes the block with lowest height from this heap and returns it -func (bh blockHeap) pop() *blockNode { - return heap.Pop(bh.impl).(*blockNode) -} - -// Push pushes the block onto the heap -func (bh blockHeap) Push(block *blockNode) { - heap.Push(bh.impl, block) -} - -// pushSet pushes a blockset to the heap. -func (bh blockHeap) pushSet(bs blockSet) { - for _, block := range bs { - heap.Push(bh.impl, block) - } -} - -// Len returns the length of this heap -func (bh blockHeap) Len() int { - return bh.impl.Len() -} diff --git a/blockdag/blockheap_test.go b/blockdag/blockheap_test.go deleted file mode 100644 index 343e3860..00000000 --- a/blockdag/blockheap_test.go +++ /dev/null @@ -1,120 +0,0 @@ -package blockdag - -import ( - "testing" - - "github.com/kaspanet/kaspad/dagconfig" - "github.com/kaspanet/kaspad/util/daghash" -) - -// TestBlockHeap tests pushing, popping, and determining the length of the heap. -func TestBlockHeap(t *testing.T) { - block0Header := dagconfig.MainNetParams.GenesisBlock.Header - block0 := newBlockNode(&block0Header, newSet(), dagconfig.MainNetParams.K) - - block100000Header := Block100000.Header - block100000 := newBlockNode(&block100000Header, setFromSlice(block0), dagconfig.MainNetParams.K) - - block0smallHash := newBlockNode(&block0Header, newSet(), dagconfig.MainNetParams.K) - block0smallHash.hash = &daghash.Hash{} - - tests := []struct { - name string - toPush []*blockNode - expectedLength int - expectedPopUp *blockNode - expectedPopDown *blockNode - }{ - { - name: "empty heap must have length 0", - toPush: []*blockNode{}, - expectedLength: 0, - expectedPopDown: nil, - expectedPopUp: nil, - }, - { - name: "heap with one push must have length 1", - toPush: []*blockNode{block0}, - expectedLength: 1, - expectedPopDown: nil, - expectedPopUp: nil, - }, - { - name: "heap with one push and one pop", - toPush: []*blockNode{block0}, - expectedLength: 0, - expectedPopDown: block0, - expectedPopUp: block0, - }, - { - name: "push two blocks with different heights, heap shouldn't have to rebalance " + - "for down direction, but will have to rebalance for up direction", - toPush: []*blockNode{block100000, block0}, - expectedLength: 1, - expectedPopDown: block100000, - expectedPopUp: block0, - }, - { - name: "push two blocks with different heights, heap shouldn't have to rebalance " + - "for up direction, but will have to rebalance for down direction", - toPush: []*blockNode{block0, block100000}, - expectedLength: 1, - expectedPopDown: block100000, - expectedPopUp: block0, - }, - { - name: "push two blocks with equal heights but different hashes, heap shouldn't have to rebalance " + - "for down direction, but will have to rebalance for up direction", - toPush: []*blockNode{block0, block0smallHash}, - expectedLength: 1, - expectedPopDown: block0, - expectedPopUp: block0smallHash, - }, - { - name: "push two blocks with equal heights but different hashes, heap shouldn't have to rebalance " + - "for up direction, but will have to rebalance for down direction", - toPush: []*blockNode{block0smallHash, block0}, - expectedLength: 1, - expectedPopDown: block0, - expectedPopUp: block0smallHash, - }, - } - - for _, test := range tests { - dHeap := newDownHeap() - for _, block := range test.toPush { - dHeap.Push(block) - } - - var poppedBlock *blockNode - if test.expectedPopDown != nil { - poppedBlock = dHeap.pop() - } - if dHeap.Len() != test.expectedLength { - t.Errorf("unexpected down heap length in test \"%s\". "+ - "Expected: %v, got: %v", test.name, test.expectedLength, dHeap.Len()) - } - if poppedBlock != test.expectedPopDown { - t.Errorf("unexpected popped block for down heap in test \"%s\". "+ - "Expected: %v, got: %v", test.name, test.expectedPopDown, poppedBlock) - } - - uHeap := newUpHeap() - for _, block := range test.toPush { - uHeap.Push(block) - } - - poppedBlock = nil - if test.expectedPopUp != nil { - poppedBlock = uHeap.pop() - } - if uHeap.Len() != test.expectedLength { - t.Errorf("unexpected up heap length in test \"%s\". "+ - "Expected: %v, got: %v", test.name, test.expectedLength, uHeap.Len()) - } - if poppedBlock != test.expectedPopUp { - t.Errorf("unexpected popped block for up heap in test \"%s\". "+ - "Expected: %v, got: %v", test.name, test.expectedPopDown, poppedBlock) - } - } -} diff --git a/blockdag/blockidhash.go b/blockdag/blockidhash.go deleted file mode 100644 index e247b8ba..00000000 --- a/blockdag/blockidhash.go +++ /dev/null @@ -1,136 +0,0 @@ -package blockdag - -import ( - "github.com/kaspanet/kaspad/database" - "github.com/kaspanet/kaspad/util/daghash" - "github.com/pkg/errors" -) - -var ( - // idByHashIndexBucketName is the name of the db bucket used to house - // the block hash -> block id index. - idByHashIndexBucketName = []byte("idbyhashidx") - - // hashByIDIndexBucketName is the name of the db bucket used to house - // the block id -> block hash index. - hashByIDIndexBucketName = []byte("hashbyididx") - - currentBlockIDKey = []byte("currentblockid") -) - -// ----------------------------------------------------------------------------- -// This is a mapping between block hashes and unique IDs. The ID -// is simply a sequentially incremented uint64 that is used instead of block hash -// for the indexers. This is useful because it is only 8 bytes versus 32 bytes -// hashes and thus saves a ton of space when a block is referenced in an index. -// It consists of three buckets: the first bucket maps the hash of each -// block to the unique ID and the second maps that ID back to the block hash. -// The third bucket contains the last received block ID, and is used -// when starting the node to check that the enabled indexes are up to date -// with the latest received block, and if not, initiate recovery process. -// -// The serialized format for keys and values in the block hash to ID bucket is: -// = -// -// Field Type Size -// hash daghash.Hash 32 bytes -// ID uint64 8 bytes -// ----- -// Total: 40 bytes -// -// The serialized format for keys and values in the ID to block hash bucket is: -// = -// -// Field Type Size -// ID uint64 8 bytes -// hash daghash.Hash 32 bytes -// ----- -// Total: 40 bytes -// -// ----------------------------------------------------------------------------- - -const blockIDSize = 8 // 8 bytes for block ID - -// DBFetchBlockIDByHash uses an existing database transaction to retrieve the -// block id for the provided hash from the index. -func DBFetchBlockIDByHash(dbTx database.Tx, hash *daghash.Hash) (uint64, error) { - hashIndex := dbTx.Metadata().Bucket(idByHashIndexBucketName) - serializedID := hashIndex.Get(hash[:]) - if serializedID == nil { - return 0, errors.Errorf("no entry in the block ID index for block with hash %s", hash) - } - - return DeserializeBlockID(serializedID), nil -} - -// DBFetchBlockHashBySerializedID uses an existing database transaction to -// retrieve the hash for the provided serialized block id from the index. -func DBFetchBlockHashBySerializedID(dbTx database.Tx, serializedID []byte) (*daghash.Hash, error) { - idIndex := dbTx.Metadata().Bucket(hashByIDIndexBucketName) - hashBytes := idIndex.Get(serializedID) - if hashBytes == nil { - return nil, errors.Errorf("no entry in the block ID index for block with id %d", byteOrder.Uint64(serializedID)) - } - - var hash daghash.Hash - copy(hash[:], hashBytes) - return &hash, nil -} - -// dbPutBlockIDIndexEntry uses an existing database transaction to update or add -// the index entries for the hash to id and id to hash mappings for the provided -// values. -func dbPutBlockIDIndexEntry(dbTx database.Tx, hash *daghash.Hash, serializedID []byte) error { - // Add the block hash to ID mapping to the index. - meta := dbTx.Metadata() - hashIndex := meta.Bucket(idByHashIndexBucketName) - if err := hashIndex.Put(hash[:], serializedID[:]); err != nil { - return err - } - - // Add the block ID to hash mapping to the index. - idIndex := meta.Bucket(hashByIDIndexBucketName) - return idIndex.Put(serializedID[:], hash[:]) -} - -// DBFetchCurrentBlockID returns the last known block ID. -func DBFetchCurrentBlockID(dbTx database.Tx) uint64 { - serializedID := dbTx.Metadata().Get(currentBlockIDKey) - if serializedID == nil { - return 0 - } - return DeserializeBlockID(serializedID) -} - -// DeserializeBlockID returns a deserialized block id -func DeserializeBlockID(serializedID []byte) uint64 { - return byteOrder.Uint64(serializedID) -} - -// SerializeBlockID returns a serialized block id -func SerializeBlockID(blockID uint64) []byte { - serializedBlockID := make([]byte, blockIDSize) - byteOrder.PutUint64(serializedBlockID, blockID) - return serializedBlockID -} - -// DBFetchBlockHashByID uses an existing database transaction to retrieve the -// hash for the provided block id from the index. -func DBFetchBlockHashByID(dbTx database.Tx, id uint64) (*daghash.Hash, error) { - return DBFetchBlockHashBySerializedID(dbTx, SerializeBlockID(id)) -} - -func createBlockID(dbTx database.Tx, blockHash *daghash.Hash) (uint64, error) { - currentBlockID := DBFetchCurrentBlockID(dbTx) - newBlockID := currentBlockID + 1 - serializedNewBlockID := SerializeBlockID(newBlockID) - err := dbTx.Metadata().Put(currentBlockIDKey, serializedNewBlockID) - if err != nil { - return 0, err - } - err = dbPutBlockIDIndexEntry(dbTx, blockHash, serializedNewBlockID) - if err != nil { - return 0, err - } - return newBlockID, nil -} diff --git a/blockdag/blockindex.go b/blockdag/blockindex.go deleted file mode 100644 index 6a5a09df..00000000 --- a/blockdag/blockindex.go +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright (c) 2015-2017 The btcsuite developers -// Use of this source code is governed by an ISC -// license that can be found in the LICENSE file. - -package blockdag - -import ( - "sync" - - "github.com/kaspanet/kaspad/dagconfig" - "github.com/kaspanet/kaspad/database" - "github.com/kaspanet/kaspad/util/daghash" -) - -// blockIndex provides facilities for keeping track of an in-memory index of the -// block DAG. -type blockIndex struct { - // The following fields are set when the instance is created and can't - // be changed afterwards, so there is no need to protect them with a - // separate mutex. - db database.DB - dagParams *dagconfig.Params - - sync.RWMutex - index map[daghash.Hash]*blockNode - dirty map[*blockNode]struct{} -} - -// newBlockIndex returns a new empty instance of a block index. The index will -// be dynamically populated as block nodes are loaded from the database and -// manually added. -func newBlockIndex(db database.DB, dagParams *dagconfig.Params) *blockIndex { - return &blockIndex{ - db: db, - dagParams: dagParams, - index: make(map[daghash.Hash]*blockNode), - dirty: make(map[*blockNode]struct{}), - } -} - -// HaveBlock returns whether or not the block index contains the provided hash. -// -// This function is safe for concurrent access. -func (bi *blockIndex) HaveBlock(hash *daghash.Hash) bool { - bi.RLock() - _, hasBlock := bi.index[*hash] - bi.RUnlock() - return hasBlock -} - -// LookupNode returns the block node identified by the provided hash. It will -// return nil if there is no entry for the hash. -// -// This function is safe for concurrent access. -func (bi *blockIndex) LookupNode(hash *daghash.Hash) *blockNode { - bi.RLock() - node := bi.index[*hash] - bi.RUnlock() - return node -} - -// AddNode adds the provided node to the block index and marks it as dirty. -// Duplicate entries are not checked so it is up to caller to avoid adding them. -// -// This function is safe for concurrent access. -func (bi *blockIndex) AddNode(node *blockNode) { - bi.Lock() - bi.addNode(node) - bi.dirty[node] = struct{}{} - bi.Unlock() -} - -// addNode adds the provided node to the block index, but does not mark it as -// dirty. This can be used while initializing the block index. -// -// This function is NOT safe for concurrent access. -func (bi *blockIndex) addNode(node *blockNode) { - bi.index[*node.hash] = node -} - -// NodeStatus provides concurrent-safe access to the status field of a node. -// -// This function is safe for concurrent access. -func (bi *blockIndex) NodeStatus(node *blockNode) blockStatus { - bi.RLock() - status := node.status - bi.RUnlock() - return status -} - -// SetStatusFlags flips the provided status flags on the block node to on, -// regardless of whether they were on or off previously. This does not unset any -// flags currently on. -// -// This function is safe for concurrent access. -func (bi *blockIndex) SetStatusFlags(node *blockNode, flags blockStatus) { - bi.Lock() - node.status |= flags - bi.dirty[node] = struct{}{} - bi.Unlock() -} - -// UnsetStatusFlags flips the provided status flags on the block node to off, -// regardless of whether they were on or off previously. -// -// This function is safe for concurrent access. -func (bi *blockIndex) UnsetStatusFlags(node *blockNode, flags blockStatus) { - bi.Lock() - node.status &^= flags - bi.dirty[node] = struct{}{} - bi.Unlock() -} - -// flushToDB writes all dirty block nodes to the database. If all writes -// succeed, this clears the dirty set. -func (bi *blockIndex) flushToDB() error { - return bi.db.Update(func(dbTx database.Tx) error { - return bi.flushToDBWithTx(dbTx) - }) -} - -// flushToDBWithTx writes all dirty block nodes to the database. If all -// writes succeed, this clears the dirty set. -func (bi *blockIndex) flushToDBWithTx(dbTx database.Tx) error { - bi.Lock() - defer bi.Unlock() - if len(bi.dirty) == 0 { - return nil - } - - for node := range bi.dirty { - err := dbStoreBlockNode(dbTx, node) - if err != nil { - return err - } - } - - // If write was successful, clear the dirty set. - bi.dirty = make(map[*blockNode]struct{}) - return nil -} diff --git a/blockdag/blockindex_test.go b/blockdag/blockindex_test.go deleted file mode 100644 index 5662ae01..00000000 --- a/blockdag/blockindex_test.go +++ /dev/null @@ -1,58 +0,0 @@ -package blockdag - -import ( - "github.com/pkg/errors" - "strings" - "testing" - "time" - - "bou.ke/monkey" - "github.com/kaspanet/kaspad/dagconfig" - "github.com/kaspanet/kaspad/database" -) - -func TestAncestorErrors(t *testing.T) { - node := newTestNode(newSet(), int32(0x10000000), 0, time.Unix(0, 0), dagconfig.MainNetParams.K) - node.chainHeight = 2 - ancestor := node.SelectedAncestor(3) - if ancestor != nil { - t.Errorf("TestAncestorErrors: Ancestor() unexpectedly returned a node. Expected: ") - } -} - -func TestFlushToDBErrors(t *testing.T) { - // Create a new database and DAG instance to run tests against. - dag, teardownFunc, err := DAGSetup("TestFlushToDBErrors", Config{ - DAGParams: &dagconfig.SimNetParams, - }) - if err != nil { - t.Fatalf("TestFlushToDBErrors: Failed to setup DAG instance: %s", err) - } - defer teardownFunc() - - // Call flushToDB without anything to flush. This should succeed - err = dag.index.flushToDB() - if err != nil { - t.Errorf("TestFlushToDBErrors: flushToDB without anything to flush: "+ - "Unexpected flushToDB error: %s", err) - } - - // Mark the genesis block as dirty - dag.index.SetStatusFlags(dag.genesis, statusValid) - - // Test flushToDB failure due to database error - databaseErrorMessage := "database error" - guard := monkey.Patch(dbStoreBlockNode, func(_ database.Tx, _ *blockNode) error { - return errors.New(databaseErrorMessage) - }) - defer guard.Unpatch() - err = dag.index.flushToDB() - if err == nil { - t.Errorf("TestFlushToDBErrors: flushToDB failure due to database error: "+ - "Expected: %s, got: ", databaseErrorMessage) - } - if !strings.Contains(err.Error(), databaseErrorMessage) { - t.Errorf("TestFlushToDBErrors: flushToDB failure due to database error: "+ - "Unexpected flushToDB error. Expected: %s, got: %s", databaseErrorMessage, err) - } -} diff --git a/blockdag/blocklocator.go b/blockdag/blocklocator.go deleted file mode 100644 index 6998a31b..00000000 --- a/blockdag/blocklocator.go +++ /dev/null @@ -1,143 +0,0 @@ -package blockdag - -import ( - "github.com/kaspanet/kaspad/util" - "github.com/kaspanet/kaspad/util/daghash" -) - -// BlockLocator is used to help locate a specific block. The algorithm for -// building the block locator is to add block hashes in reverse order on the -// block's selected parent chain until the desired stop block is reached. -// In order to keep the list of locator hashes to a reasonable number of entries, -// the step between each entry is doubled each loop iteration to exponentially -// decrease the number of hashes as a function of the distance from the block -// being located. -// -// For example, assume a selected parent chain with IDs as depicted below, and the -// stop block is genesis: -// genesis -> 1 -> 2 -> ... -> 15 -> 16 -> 17 -> 18 -// -// The block locator for block 17 would be the hashes of blocks: -// [17 16 14 11 7 2 genesis] -type BlockLocator []*daghash.Hash - -// BlockLocatorFromHashes returns a block locator from start and stop hash. -// See BlockLocator for details on the algorithm used to create a block locator. -// -// In addition to the general algorithm referenced above, this function will -// return the block locator for the selected tip if the passed hash is not currently -// known. -// -// This function is safe for concurrent access. -func (dag *BlockDAG) BlockLocatorFromHashes(startHash, stopHash *daghash.Hash) BlockLocator { - dag.dagLock.RLock() - defer dag.dagLock.RUnlock() - startNode := dag.index.LookupNode(startHash) - var stopNode *blockNode - if !stopHash.IsEqual(&daghash.ZeroHash) { - stopNode = dag.index.LookupNode(stopHash) - } - return dag.blockLocator(startNode, stopNode) -} - -// LatestBlockLocator returns a block locator for the current tips of the DAG. -// -// This function is safe for concurrent access. -func (dag *BlockDAG) LatestBlockLocator() BlockLocator { - dag.dagLock.RLock() - defer dag.dagLock.RUnlock() - return dag.blockLocator(nil, nil) -} - -// blockLocator returns a block locator for the passed start and stop nodes. -// The default value for the start node is the selected tip, and the default -// values of the stop node is the genesis block. -// -// See the BlockLocator type comments for more details. -// -// This function MUST be called with the DAG state lock held (for reads). -func (dag *BlockDAG) blockLocator(startNode, stopNode *blockNode) BlockLocator { - // Use the selected tip if requested. - if startNode == nil { - startNode = dag.virtual.selectedParent - } - - if stopNode == nil { - stopNode = dag.genesis - } - - // We use the selected parent of the start node, so the - // block locator won't contain the start node. - startNode = startNode.selectedParent - - // If the start node or the stop node are not in the - // virtual's selected parent chain, we replace them with their - // closest selected parent that is part of the virtual's - // selected parent chain. - for !dag.IsInSelectedParentChain(stopNode.hash) { - stopNode = stopNode.selectedParent - } - - for !dag.IsInSelectedParentChain(startNode.hash) { - startNode = startNode.selectedParent - } - - // Calculate the max number of entries that will ultimately be in the - // block locator. See the description of the algorithm for how these - // numbers are derived. - - // startNode.hash + stopNode.hash. - // Then floor(log2(startNode.chainHeight-stopNode.chainHeight)) entries for the skip portion. - maxEntries := 2 + util.FastLog2Floor(startNode.chainHeight-stopNode.chainHeight) - locator := make(BlockLocator, 0, maxEntries) - - step := uint64(1) - for node := startNode; node != nil; { - locator = append(locator, node.hash) - - // Nothing more to add once the stop node has been added. - if node.chainHeight == stopNode.chainHeight { - break - } - - // Calculate chainHeight of previous node to include ensuring the - // final node is stopNode. - nextChainHeight := node.chainHeight - step - if nextChainHeight < stopNode.chainHeight { - nextChainHeight = stopNode.chainHeight - } - - // walk backwards through the nodes to the correct ancestor. - node = node.SelectedAncestor(nextChainHeight) - - // Double the distance between included hashes. - step *= 2 - } - - return locator -} - -// FindNextLocatorBoundaries returns the lowest unknown block locator, hash -// and the highest known block locator hash. This is used to create the -// next block locator to find the highest shared known chain block with the -// sync peer. -// -// This function MUST be called with the DAG state lock held (for reads). -func (dag *BlockDAG) FindNextLocatorBoundaries(locator BlockLocator) (startHash, stopHash *daghash.Hash) { - // Find the most recent locator block hash in the DAG. In the case none of - // the hashes in the locator are in the DAG, fall back to the genesis block. - stopNode := dag.genesis - nextBlockLocatorIndex := int64(len(locator) - 1) - for i, hash := range locator { - node := dag.index.LookupNode(hash) - if node != nil { - stopNode = node - nextBlockLocatorIndex = int64(i) - 1 - break - } - } - if nextBlockLocatorIndex < 0 { - return nil, stopNode.hash - } - return locator[nextBlockLocatorIndex], stopNode.hash -} diff --git a/blockdag/blocknode.go b/blockdag/blocknode.go deleted file mode 100644 index 076c9b1b..00000000 --- a/blockdag/blocknode.go +++ /dev/null @@ -1,230 +0,0 @@ -// Copyright (c) 2015-2017 The btcsuite developers -// Use of this source code is governed by an ISC -// license that can be found in the LICENSE file. - -package blockdag - -import ( - "fmt" - "time" - - "github.com/kaspanet/kaspad/util/daghash" - "github.com/kaspanet/kaspad/wire" -) - -// blockStatus is a bit field representing the validation state of the block. -type blockStatus byte - -const ( - // statusDataStored indicates that the block's payload is stored on disk. - statusDataStored blockStatus = 1 << iota - - // statusValid indicates that the block has been fully validated. - statusValid - - // statusValidateFailed indicates that the block has failed validation. - statusValidateFailed - - // statusInvalidAncestor indicates that one of the block's ancestors has - // has failed validation, thus the block is also invalid. - statusInvalidAncestor - - // statusNone indicates that the block has no validation state flags set. - // - // NOTE: This must be defined last in order to avoid influencing iota. - statusNone blockStatus = 0 -) - -// KnownValid returns whether the block is known to be valid. This will return -// false for a valid block that has not been fully validated yet. -func (status blockStatus) KnownValid() bool { - return status&statusValid != 0 -} - -// KnownInvalid returns whether the block is known to be invalid. This may be -// because the block itself failed validation or any of its ancestors is -// invalid. This will return false for invalid blocks that have not been proven -// invalid yet. -func (status blockStatus) KnownInvalid() bool { - return status&(statusValidateFailed|statusInvalidAncestor) != 0 -} - -// blockNode represents a block within the block DAG. The DAG is stored into -// the block database. -type blockNode struct { - // NOTE: Additions, deletions, or modifications to the order of the - // definitions in this struct should not be changed without considering - // how it affects alignment on 64-bit platforms. The current order is - // specifically crafted to result in minimal padding. There will be - // hundreds of thousands of these in memory, so a few extra bytes of - // padding adds up. - - // parents is the parent blocks for this node. - parents blockSet - - // selectedParent is the selected parent for this node. - // The selected parent is the parent that if chosen will maximize the blue score of this block - selectedParent *blockNode - - // children are all the blocks that refer to this block as a parent - children blockSet - - // blues are all blue blocks in this block's worldview that are in its selected parent anticone - blues []*blockNode - - // blueScore is the count of all the blue blocks in this block's past - blueScore uint64 - - // hash is the double sha 256 of the block. - hash *daghash.Hash - - // chainHeight is the number of hops you need to go down the selected parent chain in order to get to the genesis block. - chainHeight uint64 - - // Some fields from block headers to aid in reconstructing headers - // from memory. These must be treated as immutable and are intentionally - // ordered to avoid padding on 64-bit platforms. - version int32 - bits uint32 - nonce uint64 - timestamp int64 - hashMerkleRoot *daghash.Hash - acceptedIDMerkleRoot *daghash.Hash - utxoCommitment *daghash.Hash - - // status is a bitfield representing the validation state of the block. The - // status field, unlike the other fields, may be written to and so should - // only be accessed using the concurrent-safe NodeStatus method on - // blockIndex once the node has been added to the global index. - status blockStatus - - // isFinalized determines whether the node is below the finality point. - isFinalized bool -} - -// initBlockNode initializes a block node from the given header and parent nodes. -// This function is NOT safe for concurrent access. It must only be called when -// initially creating a node. -func initBlockNode(node *blockNode, blockHeader *wire.BlockHeader, parents blockSet, phantomK uint32) { - *node = blockNode{ - parents: parents, - children: make(blockSet), - timestamp: time.Now().Unix(), - } - - // blockHeader is nil only for the virtual block - if blockHeader != nil { - node.hash = blockHeader.BlockHash() - node.version = blockHeader.Version - node.bits = blockHeader.Bits - node.nonce = blockHeader.Nonce - node.timestamp = blockHeader.Timestamp.Unix() - node.hashMerkleRoot = blockHeader.HashMerkleRoot - node.acceptedIDMerkleRoot = blockHeader.AcceptedIDMerkleRoot - node.utxoCommitment = blockHeader.UTXOCommitment - } else { - node.hash = &daghash.ZeroHash - } - - if len(parents) > 0 { - node.blues, node.selectedParent, node.blueScore = phantom(node, phantomK) - node.chainHeight = calculateChainHeight(node) - } -} - -func calculateChainHeight(node *blockNode) uint64 { - if node.isGenesis() { - return 0 - } - return node.selectedParent.chainHeight + 1 -} - -// newBlockNode returns a new block node for the given block header and parent -//nodes. This function is NOT safe for concurrent access. -func newBlockNode(blockHeader *wire.BlockHeader, parents blockSet, phantomK uint32) *blockNode { - var node blockNode - initBlockNode(&node, blockHeader, parents, phantomK) - return &node -} - -// updateParentsChildren updates the node's parents to point to new node -func (node *blockNode) updateParentsChildren() { - for _, parent := range node.parents { - parent.children.add(node) - } -} - -// Header constructs a block header from the node and returns it. -// -// This function is safe for concurrent access. -func (node *blockNode) Header() *wire.BlockHeader { - // No lock is needed because all accessed fields are immutable. - return &wire.BlockHeader{ - Version: node.version, - ParentHashes: node.ParentHashes(), - HashMerkleRoot: node.hashMerkleRoot, - AcceptedIDMerkleRoot: node.acceptedIDMerkleRoot, - UTXOCommitment: node.utxoCommitment, - Timestamp: time.Unix(node.timestamp, 0), - Bits: node.bits, - Nonce: node.nonce, - } -} - -// SelectedAncestor returns the ancestor block node at the provided chain-height by following -// the selected-parents chain backwards from this node. The returned block will be nil when a -// height is requested that is after the height of the passed node. -// -// This function is safe for concurrent access. -func (node *blockNode) SelectedAncestor(chainHeight uint64) *blockNode { - if chainHeight < 0 || chainHeight > node.chainHeight { - return nil - } - - n := node - for ; n != nil && n.chainHeight != chainHeight; n = n.selectedParent { - // Intentionally left blank - } - - return n -} - -// RelativeAncestor returns the ancestor block node a relative 'distance' of -// chain-blocks before this node. This is equivalent to calling Ancestor with -// the node's chain-height minus provided distance. -// -// This function is safe for concurrent access. -func (node *blockNode) RelativeAncestor(distance uint64) *blockNode { - return node.SelectedAncestor(node.chainHeight - distance) -} - -// CalcPastMedianTime returns the median time of the previous few blocks -// prior to, and including, the block node. -// -// This function is safe for concurrent access. -func (node *blockNode) PastMedianTime(dag *BlockDAG) time.Time { - window := blueBlockWindow(node, 2*dag.TimestampDeviationTolerance-1) - medianTimestamp, err := window.medianTimestamp() - if err != nil { - panic(fmt.Sprintf("blueBlockWindow: %s", err)) - } - return time.Unix(medianTimestamp, 0) -} - -func (node *blockNode) ParentHashes() []*daghash.Hash { - return node.parents.hashes() -} - -// isGenesis returns if the current block is the genesis block -func (node *blockNode) isGenesis() bool { - return len(node.parents) == 0 -} - -func (node *blockNode) finalityScore(dag *BlockDAG) uint64 { - return node.blueScore / uint64(dag.dagParams.FinalityInterval) -} - -// String returns a string that contains the block hash. -func (node blockNode) String() string { - return node.hash.String() -} diff --git a/blockdag/blocknode_test.go b/blockdag/blocknode_test.go deleted file mode 100644 index 59a83d5b..00000000 --- a/blockdag/blocknode_test.go +++ /dev/null @@ -1,86 +0,0 @@ -package blockdag - -import ( - "testing" -) - -func TestChainHeight(t *testing.T) { - phantomK := uint32(2) - buildNode := buildNodeGenerator(phantomK, true) - - node0 := buildNode(setFromSlice()) - node1 := buildNode(setFromSlice(node0)) - node2 := buildNode(setFromSlice(node0)) - node3 := buildNode(setFromSlice(node0)) - node4 := buildNode(setFromSlice(node1, node2, node3)) - node5 := buildNode(setFromSlice(node1, node2, node3)) - node6 := buildNode(setFromSlice(node1, node2, node3)) - node7 := buildNode(setFromSlice(node0)) - node8 := buildNode(setFromSlice(node7)) - node9 := buildNode(setFromSlice(node8)) - node10 := buildNode(setFromSlice(node9, node6)) - - // Because nodes 7 & 8 were mined secretly, node10's selected - // parent will be node6, although node9 is higher. So in this - // case, node10.height and node10.chainHeight will be different - - tests := []struct { - node *blockNode - expectedChainHeight uint64 - }{ - { - node: node0, - expectedChainHeight: 0, - }, - { - node: node1, - expectedChainHeight: 1, - }, - { - node: node2, - expectedChainHeight: 1, - }, - { - node: node3, - expectedChainHeight: 1, - }, - { - node: node4, - expectedChainHeight: 2, - }, - { - node: node5, - expectedChainHeight: 2, - }, - { - node: node6, - expectedChainHeight: 2, - }, - { - node: node7, - expectedChainHeight: 1, - }, - { - node: node8, - expectedChainHeight: 2, - }, - { - node: node9, - expectedChainHeight: 3, - }, - { - node: node10, - expectedChainHeight: 3, - }, - } - - for _, test := range tests { - if test.node.chainHeight != test.expectedChainHeight { - t.Errorf("block %v expected chain height %v but got %v", test.node, test.expectedChainHeight, test.node.chainHeight) - } - if calculateChainHeight(test.node) != test.expectedChainHeight { - t.Errorf("block %v expected calculated chain height %v but got %v", test.node, test.expectedChainHeight, test.node.chainHeight) - } - } - -} diff --git a/blockdag/blockset.go b/blockdag/blockset.go deleted file mode 100644 index b6d7342c..00000000 --- a/blockdag/blockset.go +++ /dev/null @@ -1,151 +0,0 @@ -package blockdag - -import ( - "strings" - - "github.com/kaspanet/kaspad/util/daghash" -) - -// blockSet implements a basic unsorted set of blocks -type blockSet map[daghash.Hash]*blockNode - -// newSet creates a new, empty BlockSet -func newSet() blockSet { - return map[daghash.Hash]*blockNode{} -} - -// setFromSlice converts a slice of blocks into an unordered set represented as map -func setFromSlice(blocks ...*blockNode) blockSet { - set := newSet() - for _, block := range blocks { - set.add(block) - } - return set -} - -// add adds a block to this BlockSet -func (bs blockSet) add(block *blockNode) { - bs[*block.hash] = block -} - -// remove removes a block from this BlockSet, if exists -// Does nothing if this set does not contain the block -func (bs blockSet) remove(block *blockNode) { - delete(bs, *block.hash) -} - -// clone clones thie block set -func (bs blockSet) clone() blockSet { - clone := newSet() - for _, block := range bs { - clone.add(block) - } - return clone -} - -// subtract returns the difference between the BlockSet and another BlockSet -func (bs blockSet) subtract(other blockSet) blockSet { - diff := newSet() - for _, block := range bs { - if !other.contains(block) { - diff.add(block) - } - } - return diff -} - -// addSet adds all blocks in other set to this set -func (bs blockSet) addSet(other blockSet) { - for _, block := range other { - bs.add(block) - } -} - -// addSlice adds provided slice to this set -func (bs blockSet) addSlice(slice []*blockNode) { - for _, block := range slice { - bs.add(block) - } -} - -// union returns a BlockSet that contains all blocks included in this set, -// the other set, or both -func (bs blockSet) union(other blockSet) blockSet { - union := bs.clone() - - union.addSet(other) - - return union -} - -// contains returns true iff this set contains block -func (bs blockSet) contains(block *blockNode) bool { - _, ok := bs[*block.hash] - return ok -} - -// containsHash returns true iff this set contains a block hash -func (bs blockSet) containsHash(hash *daghash.Hash) bool { - _, ok := bs[*hash] - return ok -} - -// hashesEqual returns true if the given hashes are equal to the hashes -// of the blocks in this set. -// NOTE: The given hash slice must not contain duplicates. -func (bs blockSet) hashesEqual(hashes []*daghash.Hash) bool { - if len(hashes) != len(bs) { - return false - } - - for _, hash := range hashes { - if _, wasFound := bs[*hash]; !wasFound { - return false - } - } - - return true -} - -// hashes returns the hashes of the blocks in this set. -func (bs blockSet) hashes() []*daghash.Hash { - hashes := make([]*daghash.Hash, 0, len(bs)) - for _, node := range bs { - hashes = append(hashes, node.hash) - } - daghash.Sort(hashes) - return hashes -} - -func (bs blockSet) String() string { - nodeStrs := make([]string, 0, len(bs)) - for _, node := range bs { - nodeStrs = append(nodeStrs, node.String()) - } - return strings.Join(nodeStrs, ",") -} - -// anyChildInSet returns true iff any child of block is contained within this set -func (bs blockSet) anyChildInSet(block *blockNode) bool { - for _, child := range block.children { - if bs.contains(child) { - return true - } - } - - return false -} - -func (bs blockSet) bluest() *blockNode { - var bluestNode *blockNode - var maxScore uint64 - for _, node := range bs { - if bluestNode == nil || - node.blueScore > maxScore || - (node.blueScore == maxScore && daghash.Less(node.hash, bluestNode.hash)) { - bluestNode = node - maxScore = node.blueScore - } - } - return bluestNode -} diff --git a/blockdag/blockset_test.go b/blockdag/blockset_test.go deleted file mode 100644 index 23aff112..00000000 --- a/blockdag/blockset_test.go +++ /dev/null @@ -1,296 +0,0 @@ -package blockdag - -import ( - "reflect" - "testing" - - "github.com/kaspanet/kaspad/util/daghash" -) - -func TestHashes(t *testing.T) { - bs := setFromSlice( - &blockNode{ - hash: &daghash.Hash{3}, - }, - &blockNode{ - hash: &daghash.Hash{1}, - }, - &blockNode{ - hash: &daghash.Hash{0}, - }, - &blockNode{ - hash: &daghash.Hash{2}, - }, - ) - - expected := []*daghash.Hash{ - {0}, - {1}, - {2}, - {3}, - } - - hashes := bs.hashes() - if !daghash.AreEqual(hashes, expected) { - t.Errorf("TestHashes: hashes order is %s but expected %s", hashes, expected) - } -} - -func TestBlockSetSubtract(t *testing.T) { - node1 := &blockNode{hash: &daghash.Hash{10}} - node2 := &blockNode{hash: &daghash.Hash{20}} - node3 := &blockNode{hash: &daghash.Hash{30}} - - tests := []struct { - name string - setA blockSet - setB blockSet - expectedResult blockSet - }{ - { - name: "both sets empty", - setA: setFromSlice(), - setB: setFromSlice(), - expectedResult: setFromSlice(), - }, - { - name: "subtract an empty set", - setA: setFromSlice(node1), - setB: setFromSlice(), - expectedResult: setFromSlice(node1), - }, - { - name: "subtract from empty set", - setA: setFromSlice(), - setB: setFromSlice(node1), - expectedResult: setFromSlice(), - }, - { - name: "subtract unrelated set", - setA: setFromSlice(node1), - setB: setFromSlice(node2), - expectedResult: setFromSlice(node1), - }, - { - name: "typical case", - setA: setFromSlice(node1, node2), - setB: setFromSlice(node2, node3), - expectedResult: setFromSlice(node1), - }, - } - - for _, test := range tests { - result := test.setA.subtract(test.setB) - if !reflect.DeepEqual(result, test.expectedResult) { - t.Errorf("blockSet.subtract: unexpected result in test '%s'. "+ - "Expected: %v, got: %v", test.name, test.expectedResult, result) - } - } -} - -func TestBlockSetAddSet(t *testing.T) { - node1 := &blockNode{hash: &daghash.Hash{10}} - node2 := &blockNode{hash: &daghash.Hash{20}} - node3 := &blockNode{hash: &daghash.Hash{30}} - - tests := []struct { - name string - setA blockSet - setB blockSet - expectedResult blockSet - }{ - { - name: "both sets empty", - setA: setFromSlice(), - setB: setFromSlice(), - expectedResult: setFromSlice(), - }, - { - name: "add an empty set", - setA: setFromSlice(node1), - setB: setFromSlice(), - expectedResult: setFromSlice(node1), - }, - { - name: "add to empty set", - setA: setFromSlice(), - setB: setFromSlice(node1), - expectedResult: setFromSlice(node1), - }, - { - name: "add already added member", - setA: setFromSlice(node1, node2), - setB: setFromSlice(node1), - expectedResult: setFromSlice(node1, node2), - }, - { - name: "typical case", - setA: setFromSlice(node1, node2), - setB: setFromSlice(node2, node3), - expectedResult: setFromSlice(node1, node2, node3), - }, - } - - for _, test := range tests { - test.setA.addSet(test.setB) - if !reflect.DeepEqual(test.setA, test.expectedResult) { - t.Errorf("blockSet.addSet: unexpected result in test '%s'. "+ - "Expected: %v, got: %v", test.name, test.expectedResult, test.setA) - } - } -} - -func TestBlockSetAddSlice(t *testing.T) { - node1 := &blockNode{hash: &daghash.Hash{10}} - node2 := &blockNode{hash: &daghash.Hash{20}} - node3 := &blockNode{hash: &daghash.Hash{30}} - - tests := []struct { - name string - set blockSet - slice []*blockNode - expectedResult blockSet - }{ - { - name: "add empty slice to empty set", - set: setFromSlice(), - slice: []*blockNode{}, - expectedResult: setFromSlice(), - }, - { - name: "add an empty slice", - set: setFromSlice(node1), - slice: []*blockNode{}, - expectedResult: setFromSlice(node1), - }, - { - name: "add to empty set", - set: setFromSlice(), - slice: []*blockNode{node1}, - expectedResult: setFromSlice(node1), - }, - { - name: "add already added member", - set: setFromSlice(node1, node2), - slice: []*blockNode{node1}, - expectedResult: setFromSlice(node1, node2), - }, - { - name: "typical case", - set: setFromSlice(node1, node2), - slice: []*blockNode{node2, node3}, - expectedResult: setFromSlice(node1, node2, node3), - }, - } - - for _, test := range tests { - test.set.addSlice(test.slice) - if !reflect.DeepEqual(test.set, test.expectedResult) { - t.Errorf("blockSet.addSlice: unexpected result in test '%s'. "+ - "Expected: %v, got: %v", test.name, test.expectedResult, test.set) - } - } -} - -func TestBlockSetUnion(t *testing.T) { - node1 := &blockNode{hash: &daghash.Hash{10}} - node2 := &blockNode{hash: &daghash.Hash{20}} - node3 := &blockNode{hash: &daghash.Hash{30}} - - tests := []struct { - name string - setA blockSet - setB blockSet - expectedResult blockSet - }{ - { - name: "both sets empty", - setA: setFromSlice(), - setB: setFromSlice(), - expectedResult: setFromSlice(), - }, - { - name: "union against an empty set", - setA: setFromSlice(node1), - setB: setFromSlice(), - expectedResult: setFromSlice(node1), - }, - { - name: "union from an empty set", - setA: setFromSlice(), - setB: setFromSlice(node1), - expectedResult: setFromSlice(node1), - }, - { - name: "union with subset", - setA: setFromSlice(node1, node2), - setB: setFromSlice(node1), - expectedResult: setFromSlice(node1, node2), - }, - { - name: "typical case", - setA: setFromSlice(node1, node2), - setB: setFromSlice(node2, node3), - expectedResult: setFromSlice(node1, node2, node3), - }, - } - - for _, test := range tests { - result := test.setA.union(test.setB) - if !reflect.DeepEqual(result, test.expectedResult) { - t.Errorf("blockSet.union: unexpected result in test '%s'. "+ - "Expected: %v, got: %v", test.name, test.expectedResult, result) - } - } -} - -func TestBlockSetHashesEqual(t *testing.T) { - node1 := &blockNode{hash: &daghash.Hash{10}} - node2 := &blockNode{hash: &daghash.Hash{20}} - - tests := []struct { - name string - set blockSet - hashes []*daghash.Hash - expectedResult bool - }{ - { - name: "empty set, no hashes", - set: setFromSlice(), - hashes: []*daghash.Hash{}, - expectedResult: true, - }, - { - name: "empty set, one hash", - set: setFromSlice(), - hashes: []*daghash.Hash{node1.hash}, - expectedResult: false, - }, - { - name: "set and hashes of different length", - set: setFromSlice(node1, node2), - hashes: []*daghash.Hash{node1.hash}, - expectedResult: false, - }, - { - name: "set equal to hashes", - set: setFromSlice(node1, node2), - hashes: []*daghash.Hash{node1.hash, node2.hash}, - expectedResult: true, - }, - { - name: "set equal to hashes, different order", - set: setFromSlice(node1, node2), - hashes: []*daghash.Hash{node2.hash, node1.hash}, - expectedResult: true, - }, - } - - for _, test := range tests { - result := test.set.hashesEqual(test.hashes) - if result != test.expectedResult { - t.Errorf("blockSet.hashesEqual: unexpected result in test '%s'. "+ - "Expected: %t, got: %t", test.name, test.expectedResult, result) - } - } -} diff --git a/blockdag/blockwindow.go b/blockdag/blockwindow.go deleted file mode 100644 index f70d2396..00000000 --- a/blockdag/blockwindow.go +++ /dev/null @@ -1,75 +0,0 @@ -package blockdag - -import ( - "github.com/kaspanet/kaspad/util" - "github.com/pkg/errors" - "math" - "math/big" - "sort" -) - -type blockWindow []*blockNode - -// blueBlockWindow returns a blockWindow of the given size that contains the -// blues in the past of startindNode, sorted by phantom order. -// If the number of blues in the past of startingNode is less then windowSize, -// the window will be padded by genesis blocks to achieve a size of windowSize. -func blueBlockWindow(startingNode *blockNode, windowSize uint64) blockWindow { - window := make(blockWindow, 0, windowSize) - currentNode := startingNode - for uint64(len(window)) < windowSize && currentNode.selectedParent != nil { - if currentNode.selectedParent != nil { - for _, blue := range currentNode.blues { - window = append(window, blue) - if uint64(len(window)) == windowSize { - break - } - } - currentNode = currentNode.selectedParent - } - } - - if uint64(len(window)) < windowSize { - genesis := currentNode - for uint64(len(window)) < windowSize { - window = append(window, genesis) - } - } - - return window -} - -func (window blockWindow) minMaxTimestamps() (min, max int64) { - min = math.MaxInt64 - max = 0 - for _, node := range window { - if node.timestamp < min { - min = node.timestamp - } - if node.timestamp > max { - max = node.timestamp - } - } - return -} - -func (window blockWindow) averageTarget() *big.Int { - averageTarget := big.NewInt(0) - for _, node := range window { - target := util.CompactToBig(node.bits) - averageTarget.Add(averageTarget, target) - } - return averageTarget.Div(averageTarget, big.NewInt(int64(len(window)))) -} - -func (window blockWindow) medianTimestamp() (int64, error) { - if len(window) == 0 { - return 0, errors.New("Cannot calculate median timestamp for an empty block window") - } - timestamps := make([]int64, len(window)) - for i, node := range window { - timestamps[i] = node.timestamp - } - sort.Sort(timeSorter(timestamps)) - return timestamps[len(timestamps)/2], nil -} diff --git a/blockdag/blockwindow_test.go b/blockdag/blockwindow_test.go deleted file mode 100644 index 4f5e8cd8..00000000 --- a/blockdag/blockwindow_test.go +++ /dev/null @@ -1,138 +0,0 @@ -package blockdag - -import ( - "github.com/kaspanet/kaspad/dagconfig" - "github.com/kaspanet/kaspad/util/daghash" - "github.com/pkg/errors" - "reflect" - "testing" - "time" -) - -func TestBlueBlockWindow(t *testing.T) { - params := dagconfig.SimNetParams - params.K = 1 - dag := newTestDAG(¶ms) - - windowSize := uint64(10) - genesisNode := dag.genesis - blockTime := genesisNode.Header().Timestamp - blockByIDMap := make(map[string]*blockNode) - idByBlockMap := make(map[*blockNode]string) - blockByIDMap["A"] = genesisNode - idByBlockMap[genesisNode] = "A" - blockVersion := int32(0x10000000) - - blocksData := []*struct { - parents []string - id string //id is a virtual entity that is used only for tests so we can define relations between blocks without knowing their hash - expectedWindowWithGenesisPadding []string - }{ - { - parents: []string{"A"}, - id: "B", - expectedWindowWithGenesisPadding: []string{"A", "A", "A", "A", "A", "A", "A", "A", "A", "A"}, - }, - { - parents: []string{"B"}, - id: "C", - expectedWindowWithGenesisPadding: []string{"B", "A", "A", "A", "A", "A", "A", "A", "A", "A"}, - }, - { - parents: []string{"B"}, - id: "D", - expectedWindowWithGenesisPadding: []string{"B", "A", "A", "A", "A", "A", "A", "A", "A", "A"}, - }, - { - parents: []string{"C", "D"}, - id: "E", - expectedWindowWithGenesisPadding: []string{"D", "C", "B", "A", "A", "A", "A", "A", "A", "A"}, - }, - { - parents: []string{"C", "D"}, - id: "F", - expectedWindowWithGenesisPadding: []string{"D", "C", "B", "A", "A", "A", "A", "A", "A", "A"}, - }, - { - parents: []string{"A"}, - id: "G", - expectedWindowWithGenesisPadding: []string{"A", "A", "A", "A", "A", "A", "A", "A", "A", "A"}, - }, - { - parents: []string{"G"}, - id: "H", - expectedWindowWithGenesisPadding: []string{"G", "A", "A", "A", "A", "A", "A", "A", "A", "A"}, - }, - { - parents: []string{"H", "F"}, - id: "I", - expectedWindowWithGenesisPadding: []string{"F", "D", "C", "B", "A", "A", "A", "A", "A", "A"}, - }, - { - parents: []string{"I"}, - id: "J", - expectedWindowWithGenesisPadding: []string{"I", "F", "D", "C", "B", "A", "A", "A", "A", "A"}, - }, - { - parents: []string{"J"}, - id: "K", - expectedWindowWithGenesisPadding: []string{"J", "I", "F", "D", "C", "B", "A", "A", "A", "A"}, - }, - { - parents: []string{"K"}, - id: "L", - expectedWindowWithGenesisPadding: []string{"K", "J", "I", "F", "D", "C", "B", "A", "A", "A"}, - }, - { - parents: []string{"L"}, - id: "M", - expectedWindowWithGenesisPadding: []string{"L", "K", "J", "I", "F", "D", "C", "B", "A", "A"}, - }, - { - parents: []string{"M"}, - id: "N", - expectedWindowWithGenesisPadding: []string{"M", "L", "K", "J", "I", "F", "D", "C", "B", "A"}, - }, - { - parents: []string{"N"}, - id: "O", - expectedWindowWithGenesisPadding: []string{"N", "M", "L", "K", "J", "I", "F", "D", "C", "B"}, - }, - } - - for _, blockData := range blocksData { - blockTime = blockTime.Add(time.Second) - parents := blockSet{} - for _, parentID := range blockData.parents { - parent := blockByIDMap[parentID] - parents.add(parent) - } - node := newTestNode(parents, blockVersion, 0, blockTime, dag.dagParams.K) - node.hash = &daghash.Hash{} // It helps to predict hash order - for i, char := range blockData.id { - node.hash[i] = byte(char) - } - - dag.index.AddNode(node) - node.updateParentsChildren() - - blockByIDMap[blockData.id] = node - idByBlockMap[node] = blockData.id - - window := blueBlockWindow(node, windowSize) - if err := checkWindowIDs(window, blockData.expectedWindowWithGenesisPadding, idByBlockMap); err != nil { - t.Errorf("Unexpected values for window for block %s: %s", blockData.id, err) - } - } -} - -func checkWindowIDs(window []*blockNode, expectedIDs []string, idByBlockMap map[*blockNode]string) error { - ids := make([]string, len(window)) - for i, node := range window { - ids[i] = idByBlockMap[node] - } - if !reflect.DeepEqual(ids, expectedIDs) { - return errors.Errorf("window expected to have blocks %s but got %s", expectedIDs, ids) - } - return nil -} diff --git a/blockdag/coinbase.go b/blockdag/coinbase.go deleted file mode 100644 index 390865f9..00000000 --- a/blockdag/coinbase.go +++ /dev/null @@ -1,278 +0,0 @@ -package blockdag - -import ( - "bufio" - "bytes" - "encoding/binary" - "github.com/kaspanet/kaspad/util/subnetworkid" - "github.com/pkg/errors" - "io" - "math" - - "github.com/kaspanet/kaspad/database" - "github.com/kaspanet/kaspad/util" - "github.com/kaspanet/kaspad/util/daghash" - "github.com/kaspanet/kaspad/util/txsort" - "github.com/kaspanet/kaspad/wire" -) - -// compactFeeData is a specialized data type to store a compact list of fees -// inside a block. -// Every transaction gets a single uint64 value, stored as a plain binary list. -// The transactions are ordered the same way they are ordered inside the block, making it easy -// to traverse every transaction in a block and extract its fee. -// -// compactFeeFactory is used to create such a list. -// compactFeeIterator is used to iterate over such a list. - -type compactFeeData []byte - -func (cfd compactFeeData) Len() int { - return len(cfd) / 8 -} - -type compactFeeFactory struct { - buffer *bytes.Buffer - writer *bufio.Writer -} - -func newCompactFeeFactory() *compactFeeFactory { - buffer := bytes.NewBuffer([]byte{}) - return &compactFeeFactory{ - buffer: buffer, - writer: bufio.NewWriter(buffer), - } -} - -func (cfw *compactFeeFactory) add(txFee uint64) error { - return binary.Write(cfw.writer, binary.LittleEndian, txFee) -} - -func (cfw *compactFeeFactory) data() (compactFeeData, error) { - err := cfw.writer.Flush() - - return compactFeeData(cfw.buffer.Bytes()), err -} - -type compactFeeIterator struct { - reader io.Reader -} - -func (cfd compactFeeData) iterator() *compactFeeIterator { - return &compactFeeIterator{ - reader: bufio.NewReader(bytes.NewBuffer(cfd)), - } -} - -func (cfr *compactFeeIterator) next() (uint64, error) { - var txFee uint64 - - err := binary.Read(cfr.reader, binary.LittleEndian, &txFee) - - return txFee, err -} - -// The following functions relate to storing and retrieving fee data from the database -var feeBucket = []byte("fees") - -// getBluesFeeData returns the compactFeeData for all nodes's blues, -// used to calculate the fees this blockNode needs to pay -func (node *blockNode) getBluesFeeData(dag *BlockDAG) (map[daghash.Hash]compactFeeData, error) { - bluesFeeData := make(map[daghash.Hash]compactFeeData) - - err := dag.db.View(func(dbTx database.Tx) error { - for _, blueBlock := range node.blues { - feeData, err := dbFetchFeeData(dbTx, blueBlock.hash) - if err != nil { - return errors.Errorf("Error getting fee data for block %s: %s", blueBlock.hash, err) - } - - bluesFeeData[*blueBlock.hash] = feeData - } - - return nil - }) - if err != nil { - return nil, err - } - - return bluesFeeData, nil -} - -func dbStoreFeeData(dbTx database.Tx, blockHash *daghash.Hash, feeData compactFeeData) error { - feeBucket, err := dbTx.Metadata().CreateBucketIfNotExists(feeBucket) - if err != nil { - return errors.Errorf("Error creating or retrieving fee bucket: %s", err) - } - - return feeBucket.Put(blockHash.CloneBytes(), feeData) -} - -func dbFetchFeeData(dbTx database.Tx, blockHash *daghash.Hash) (compactFeeData, error) { - feeBucket := dbTx.Metadata().Bucket(feeBucket) - if feeBucket == nil { - return nil, errors.New("Fee bucket does not exist") - } - - feeData := feeBucket.Get(blockHash.CloneBytes()) - if feeData == nil { - return nil, errors.Errorf("No fee data found for block %s", blockHash) - } - - return feeData, nil -} - -// The following functions deal with building and validating the coinbase transaction - -func (node *blockNode) validateCoinbaseTransaction(dag *BlockDAG, block *util.Block, txsAcceptanceData MultiBlockTxsAcceptanceData) error { - if node.isGenesis() { - return nil - } - blockCoinbaseTx := block.CoinbaseTransaction().MsgTx() - scriptPubKey, extraData, err := DeserializeCoinbasePayload(blockCoinbaseTx) - if err != nil { - return err - } - expectedCoinbaseTransaction, err := node.expectedCoinbaseTransaction(dag, txsAcceptanceData, scriptPubKey, extraData) - if err != nil { - return err - } - - if !expectedCoinbaseTransaction.Hash().IsEqual(block.CoinbaseTransaction().Hash()) { - return ruleError(ErrBadCoinbaseTransaction, "Coinbase transaction is not built as expected") - } - - return nil -} - -// expectedCoinbaseTransaction returns the coinbase transaction for the current block -func (node *blockNode) expectedCoinbaseTransaction(dag *BlockDAG, txsAcceptanceData MultiBlockTxsAcceptanceData, scriptPubKey []byte, extraData []byte) (*util.Tx, error) { - bluesFeeData, err := node.getBluesFeeData(dag) - if err != nil { - return nil, err - } - - txIns := []*wire.TxIn{} - txOuts := []*wire.TxOut{} - - for _, blue := range node.blues { - txIn, txOut, err := coinbaseInputAndOutputForBlueBlock(dag, blue, txsAcceptanceData, bluesFeeData) - if err != nil { - return nil, err - } - txIns = append(txIns, txIn) - if txOut != nil { - txOuts = append(txOuts, txOut) - } - } - payload, err := SerializeCoinbasePayload(scriptPubKey, extraData) - if err != nil { - return nil, err - } - coinbaseTx := wire.NewSubnetworkMsgTx(wire.TxVersion, txIns, txOuts, subnetworkid.SubnetworkIDCoinbase, 0, payload) - sortedCoinbaseTx := txsort.Sort(coinbaseTx) - return util.NewTx(sortedCoinbaseTx), nil -} - -// SerializeCoinbasePayload builds the coinbase payload based on the provided scriptPubKey and extra data. -func SerializeCoinbasePayload(scriptPubKey []byte, extraData []byte) ([]byte, error) { - w := &bytes.Buffer{} - err := wire.WriteVarInt(w, uint64(len(scriptPubKey))) - if err != nil { - return nil, err - } - _, err = w.Write(scriptPubKey) - if err != nil { - return nil, err - } - _, err = w.Write(extraData) - if err != nil { - return nil, err - } - return w.Bytes(), nil -} - -// DeserializeCoinbasePayload deserialize the coinbase payload to its component (scriptPubKey and extra data). -func DeserializeCoinbasePayload(tx *wire.MsgTx) (scriptPubKey []byte, extraData []byte, err error) { - r := bytes.NewReader(tx.Payload) - scriptPubKeyLen, err := wire.ReadVarInt(r) - if err != nil { - return nil, nil, err - } - scriptPubKey = make([]byte, scriptPubKeyLen) - _, err = r.Read(scriptPubKey) - if err != nil { - return nil, nil, err - } - extraData = make([]byte, r.Len()) - if r.Len() != 0 { - _, err = r.Read(extraData) - if err != nil { - return nil, nil, err - } - } - return scriptPubKey, extraData, nil -} - -// feeInputAndOutputForBlueBlock calculates the input and output that should go into the coinbase transaction of blueBlock -// If blueBlock gets no fee - returns only txIn and nil for txOut -func coinbaseInputAndOutputForBlueBlock(dag *BlockDAG, blueBlock *blockNode, - txsAcceptanceData MultiBlockTxsAcceptanceData, feeData map[daghash.Hash]compactFeeData) ( - *wire.TxIn, *wire.TxOut, error) { - - blockTxsAcceptanceData, ok := txsAcceptanceData.FindAcceptanceData(blueBlock.hash) - if !ok { - return nil, nil, errors.Errorf("No txsAcceptanceData for block %s", blueBlock.hash) - } - blockFeeData, ok := feeData[*blueBlock.hash] - if !ok { - return nil, nil, errors.Errorf("No feeData for block %s", blueBlock.hash) - } - - if len(blockTxsAcceptanceData.TxAcceptanceData) != blockFeeData.Len() { - return nil, nil, errors.Errorf( - "length of accepted transaction data(%d) and fee data(%d) is not equal for block %s", - len(blockTxsAcceptanceData.TxAcceptanceData), blockFeeData.Len(), blueBlock.hash) - } - - txIn := &wire.TxIn{ - SignatureScript: []byte{}, - PreviousOutpoint: wire.Outpoint{ - TxID: daghash.TxID(*blueBlock.hash), - Index: math.MaxUint32, - }, - Sequence: wire.MaxTxInSequenceNum, - } - - totalFees := uint64(0) - feeIterator := blockFeeData.iterator() - - for _, txAcceptanceData := range blockTxsAcceptanceData.TxAcceptanceData { - fee, err := feeIterator.next() - if err != nil { - return nil, nil, errors.Errorf("Error retrieving fee from compactFeeData iterator: %s", err) - } - if txAcceptanceData.IsAccepted { - totalFees += fee - } - } - - totalReward := CalcBlockSubsidy(blueBlock.blueScore, dag.dagParams) + totalFees - - if totalReward == 0 { - return txIn, nil, nil - } - - // the ScriptPubKey for the coinbase is parsed from the coinbase payload - scriptPubKey, _, err := DeserializeCoinbasePayload(blockTxsAcceptanceData.TxAcceptanceData[0].Tx.MsgTx()) - if err != nil { - return nil, nil, err - } - - txOut := &wire.TxOut{ - Value: totalReward, - ScriptPubKey: scriptPubKey, - } - - return txIn, txOut, nil -} diff --git a/blockdag/coinbase_test.go b/blockdag/coinbase_test.go deleted file mode 100644 index 2be3c728..00000000 --- a/blockdag/coinbase_test.go +++ /dev/null @@ -1,60 +0,0 @@ -package blockdag - -import ( - "io" - "reflect" - "testing" -) - -func TestFeeAccumulators(t *testing.T) { - fees := []uint64{1, 2, 3, 4, 5, 6, 7, 0xffffffffffffffff} - - factory := newCompactFeeFactory() - - for _, fee := range fees { - err := factory.add(fee) - if err != nil { - t.Fatalf("Error writing %d as tx fee: %s", fee, err) - } - } - - expectedData := compactFeeData{ - 1, 0, 0, 0, 0, 0, 0, 0, - 2, 0, 0, 0, 0, 0, 0, 0, - 3, 0, 0, 0, 0, 0, 0, 0, - 4, 0, 0, 0, 0, 0, 0, 0, - 5, 0, 0, 0, 0, 0, 0, 0, - 6, 0, 0, 0, 0, 0, 0, 0, - 7, 0, 0, 0, 0, 0, 0, 0, - 255, 255, 255, 255, 255, 255, 255, 255, - } - actualData, err := factory.data() - - if err != nil { - t.Fatalf("Error getting bytes from writer: %s", err) - } - if !reflect.DeepEqual(expectedData, actualData) { - t.Errorf("Expected bytes: %v, but got: %v", expectedData, actualData) - } - - iterator := actualData.iterator() - - for i, expectedFee := range fees { - actualFee, err := iterator.next() - if err != nil { - t.Fatalf("Error getting fee for Tx#%d: %s", i, err) - } - - if actualFee != expectedFee { - t.Errorf("Tx #%d: Expected fee: %d, but got %d", i, expectedFee, actualFee) - } - } - - _, err = iterator.next() - if err == nil { - t.Fatal("No error from iterator.nextTxFee after done reading all transactions") - } - if err != io.EOF { - t.Fatalf("Error from iterator.nextTxFee after done reading all transactions is not io.EOF: %s", err) - } -} diff --git a/blockdag/common_test.go b/blockdag/common_test.go deleted file mode 100644 index 876e487e..00000000 --- a/blockdag/common_test.go +++ /dev/null @@ -1,211 +0,0 @@ -// Copyright (c) 2013-2017 The btcsuite developers -// Use of this source code is governed by an ISC -// license that can be found in the LICENSE file. - -package blockdag - -import ( - "compress/bzip2" - "encoding/binary" - "github.com/pkg/errors" - "io" - "os" - "path/filepath" - "reflect" - "strings" - "testing" - "time" - - "github.com/kaspanet/kaspad/dagconfig" - _ "github.com/kaspanet/kaspad/database/ffldb" - "github.com/kaspanet/kaspad/util" - "github.com/kaspanet/kaspad/util/daghash" - "github.com/kaspanet/kaspad/wire" -) - -func loadBlocksWithLog(t *testing.T, filename string) ([]*util.Block, error) { - blocks, err := LoadBlocks(filename) - if err == nil { - t.Logf("Loaded %d blocks from file %s", len(blocks), filename) - for i, b := range blocks { - t.Logf("Block #%d: %s", i, b.Hash()) - } - } - return blocks, err -} - -// loadUTXOSet returns a utxo view loaded from a file. -func loadUTXOSet(filename string) (UTXOSet, error) { - // The utxostore file format is: - // - // - // The output index and serialized utxo len are little endian uint32s - // and the serialized utxo uses the format described in dagio.go. - - filename = filepath.Join("testdata", filename) - fi, err := os.Open(filename) - if err != nil { - return nil, err - } - - // Choose read based on whether the file is compressed or not. - var r io.Reader - if strings.HasSuffix(filename, ".bz2") { - r = bzip2.NewReader(fi) - } else { - r = fi - } - defer fi.Close() - - utxoSet := NewFullUTXOSet() - for { - // Tx ID of the utxo entry. - var txID daghash.TxID - _, err := io.ReadAtLeast(r, txID[:], len(txID[:])) - if err != nil { - // Expected EOF at the right offset. - if err == io.EOF { - break - } - return nil, err - } - - // Output index of the utxo entry. - var index uint32 - err = binary.Read(r, binary.LittleEndian, &index) - if err != nil { - return nil, err - } - - // Num of serialized utxo entry bytes. - var numBytes uint32 - err = binary.Read(r, binary.LittleEndian, &numBytes) - if err != nil { - return nil, err - } - - // Serialized utxo entry. - serialized := make([]byte, numBytes) - _, err = io.ReadAtLeast(r, serialized, int(numBytes)) - if err != nil { - return nil, err - } - - // Deserialize it and add it to the view. - entry, err := deserializeUTXOEntry(serialized) - if err != nil { - return nil, err - } - utxoSet.utxoCollection[wire.Outpoint{TxID: txID, Index: index}] = entry - } - - return utxoSet, nil -} - -// TestSetCoinbaseMaturity makes the ability to set the coinbase maturity -// available when running tests. -func (dag *BlockDAG) TestSetCoinbaseMaturity(maturity uint64) { - dag.dagParams.BlockCoinbaseMaturity = maturity -} - -// newTestDAG returns a DAG that is usable for syntetic tests. It is -// important to note that this DAG has no database associated with it, so -// it is not usable with all functions and the tests must take care when making -// use of it. -func newTestDAG(params *dagconfig.Params) *BlockDAG { - // Create a genesis block node and block index index populated with it - // for use when creating the fake DAG below. - node := newBlockNode(¶ms.GenesisBlock.Header, newSet(), params.K) - index := newBlockIndex(nil, params) - index.AddNode(node) - - targetTimePerBlock := int64(params.TargetTimePerBlock / time.Second) - return &BlockDAG{ - dagParams: params, - timeSource: NewMedianTime(), - targetTimePerBlock: targetTimePerBlock, - difficultyAdjustmentWindowSize: params.DifficultyAdjustmentWindowSize, - TimestampDeviationTolerance: params.TimestampDeviationTolerance, - powMaxBits: util.BigToCompact(params.PowMax), - index: index, - virtual: newVirtualBlock(setFromSlice(node), params.K), - genesis: index.LookupNode(params.GenesisHash), - warningCaches: newThresholdCaches(vbNumBits), - deploymentCaches: newThresholdCaches(dagconfig.DefinedDeployments), - } -} - -// newTestNode creates a block node connected to the passed parent with the -// provided fields populated and fake values for the other fields. -func newTestNode(parents blockSet, blockVersion int32, bits uint32, timestamp time.Time, phantomK uint32) *blockNode { - // Make up a header and create a block node from it. - header := &wire.BlockHeader{ - Version: blockVersion, - ParentHashes: parents.hashes(), - Bits: bits, - Timestamp: timestamp, - HashMerkleRoot: &daghash.ZeroHash, - AcceptedIDMerkleRoot: &daghash.ZeroHash, - UTXOCommitment: &daghash.ZeroHash, - } - return newBlockNode(header, parents, phantomK) -} - -func addNodeAsChildToParents(node *blockNode) { - for _, parent := range node.parents { - parent.children.add(node) - } -} - -func buildNodeGenerator(phantomK uint32, withChildren bool) func(parents blockSet) *blockNode { - // For the purposes of these tests, we'll create blockNodes whose hashes are a - // series of numbers from 1 to 255. - hashCounter := byte(1) - buildNode := func(parents blockSet) *blockNode { - block := newBlockNode(nil, parents, phantomK) - block.hash = &daghash.Hash{hashCounter} - hashCounter++ - - return block - } - if withChildren { - return func(parents blockSet) *blockNode { - node := buildNode(parents) - addNodeAsChildToParents(node) - return node - } - } - return buildNode -} - -// checkRuleError ensures the type of the two passed errors are of the -// same type (either both nil or both of type RuleError) and their error codes -// match when not nil. -func checkRuleError(gotErr, wantErr error) error { - // Ensure the error code is of the expected type and the error - // code matches the value specified in the test instance. - if reflect.TypeOf(gotErr) != reflect.TypeOf(wantErr) { - return errors.Errorf("wrong error - got %T (%[1]v), want %T", - gotErr, wantErr) - } - if gotErr == nil { - return nil - } - - // Ensure the want error type is a script error. - werr, ok := wantErr.(RuleError) - if !ok { - return errors.Errorf("unexpected test error type %T", wantErr) - } - - // Ensure the error codes match. It's safe to use a raw type assert - // here since the code above already proved they are the same type and - // the want error is a script error. - gotErrorCode := gotErr.(RuleError).ErrorCode - if gotErrorCode != werr.ErrorCode { - return errors.Errorf("mismatched error code - got %v (%v), want %v", - gotErrorCode, gotErr, werr.ErrorCode) - } - - return nil -} diff --git a/blockdag/compress.go b/blockdag/compress.go deleted file mode 100644 index 6e9db031..00000000 --- a/blockdag/compress.go +++ /dev/null @@ -1,584 +0,0 @@ -// Copyright (c) 2015-2016 The btcsuite developers -// Use of this source code is governed by an ISC -// license that can be found in the LICENSE file. - -package blockdag - -import ( - "github.com/kaspanet/kaspad/ecc" - "github.com/kaspanet/kaspad/txscript" -) - -// ----------------------------------------------------------------------------- -// A variable length quantity (VLQ) is an encoding that uses an arbitrary number -// of binary octets to represent an arbitrarily large integer. The scheme -// employs a most significant byte (MSB) base-128 encoding where the high bit in -// each byte indicates whether or not the byte is the final one. In addition, -// to ensure there are no redundant encodings, an offset is subtracted every -// time a group of 7 bits is shifted out. Therefore each integer can be -// represented in exactly one way, and each representation stands for exactly -// one integer. -// -// Another nice property of this encoding is that it provides a compact -// representation of values that are typically used to indicate sizes. For -// example, the values 0 - 127 are represented with a single byte, 128 - 16511 -// with two bytes, and 16512 - 2113663 with three bytes. -// -// While the encoding allows arbitrarily large integers, it is artificially -// limited in this code to an unsigned 64-bit integer for efficiency purposes. -// -// Example encodings: -// 0 -> [0x00] -// 127 -> [0x7f] * Max 1-byte value -// 128 -> [0x80 0x00] -// 129 -> [0x80 0x01] -// 255 -> [0x80 0x7f] -// 256 -> [0x81 0x00] -// 16511 -> [0xff 0x7f] * Max 2-byte value -// 16512 -> [0x80 0x80 0x00] -// 32895 -> [0x80 0xff 0x7f] -// 2113663 -> [0xff 0xff 0x7f] * Max 3-byte value -// 270549119 -> [0xff 0xff 0xff 0x7f] * Max 4-byte value -// 2^64-1 -> [0x80 0xfe 0xfe 0xfe 0xfe 0xfe 0xfe 0xfe 0xfe 0x7f] -// -// References: -// https://en.wikipedia.org/wiki/Variable-length_quantity -// http://www.codecodex.com/wiki/Variable-Length_Integers -// ----------------------------------------------------------------------------- - -// serializeSizeVLQ returns the number of bytes it would take to serialize the -// passed number as a variable-length quantity according to the format described -// above. -func serializeSizeVLQ(n uint64) int { - size := 1 - for ; n > 0x7f; n = (n >> 7) - 1 { - size++ - } - - return size -} - -// putVLQ serializes the provided number to a variable-length quantity according -// to the format described above and returns the number of bytes of the encoded -// value. The result is placed directly into the passed byte slice which must -// be at least large enough to handle the number of bytes returned by the -// serializeSizeVLQ function or it will panic. -func putVLQ(target []byte, n uint64) int { - offset := 0 - for ; ; offset++ { - // The high bit is set when another byte follows. - highBitMask := byte(0x80) - if offset == 0 { - highBitMask = 0x00 - } - - target[offset] = byte(n&0x7f) | highBitMask - if n <= 0x7f { - break - } - n = (n >> 7) - 1 - } - - // Reverse the bytes so it is MSB-encoded. - for i, j := 0, offset; i < j; i, j = i+1, j-1 { - target[i], target[j] = target[j], target[i] - } - - return offset + 1 -} - -// deserializeVLQ deserializes the provided variable-length quantity according -// to the format described above. It also returns the number of bytes -// deserialized. -func deserializeVLQ(serialized []byte) (uint64, int) { - var n uint64 - var size int - for _, val := range serialized { - size++ - n = (n << 7) | uint64(val&0x7f) - if val&0x80 != 0x80 { - break - } - n++ - } - - return n, size -} - -// ----------------------------------------------------------------------------- -// In order to reduce the size of stored scripts, a domain specific compression -// algorithm is used which recognizes standard scripts and stores them using -// less bytes than the original script. -// -// The general serialized format is: -// -//