diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 44521c1af3..a5c322fc23 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -25,7 +25,7 @@ env: jobs: test-each-commit: name: 'test each commit' - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 if: github.event_name == 'pull_request' && github.event.pull_request.commits != 1 timeout-minutes: 360 # Use maximum time, see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idtimeout-minutes. Assuming a worst case time of 1 hour per commit, this leads to a --max-count=6 below. env: @@ -62,12 +62,12 @@ jobs: echo "TEST_BASE=$(git rev-list -n$((${{ env.MAX_COUNT }} + 1)) --reverse HEAD ^$(git rev-list -n1 --merges HEAD)^@ | head -1)" >> "$GITHUB_ENV" - run: | sudo apt-get update - sudo apt-get install clang-15 ccache build-essential libtool autotools-dev automake pkg-config bsdmainutils python3-zmq libevent-dev libboost-dev libsqlite3-dev libdb++-dev systemtap-sdt-dev libminiupnpc-dev libnatpmp-dev qtbase5-dev qttools5-dev qttools5-dev-tools qtwayland5 libqrencode-dev -y + sudo apt-get install clang ccache build-essential libtool autotools-dev automake pkg-config bsdmainutils python3-zmq libevent-dev libboost-dev libsqlite3-dev libdb++-dev systemtap-sdt-dev libminiupnpc-dev libnatpmp-dev qtbase5-dev qttools5-dev qttools5-dev-tools qtwayland5 libqrencode-dev -y - name: Compile and run tests run: | # Run tests on commits after the last merge commit and before the PR head commit # Use clang++, because it is a bit faster and uses less memory than g++ - git rebase --exec "echo Running test-one-commit on \$( git log -1 ) && ./autogen.sh && CC=clang-15 CXX=clang++-15 ./configure && make clean && make -j $(nproc) check && ./test/functional/test_runner.py -j $(( $(nproc) * 2 ))" ${{ env.TEST_BASE }} + git rebase --exec "echo Running test-one-commit on \$( git log -1 ) && ./autogen.sh && CC=clang CXX=clang++ ./configure && make clean && make -j $(nproc) check && ./test/functional/test_runner.py -j $(( $(nproc) * 2 ))" ${{ env.TEST_BASE }} macos-native-x86_64: name: 'macOS 13 native, x86_64, no depends, sqlite only, gui' diff --git a/contrib/devtools/test-security-check.py b/contrib/devtools/test-security-check.py index 48823c7e45..51bca4627e 100755 --- a/contrib/devtools/test-security-check.py +++ b/contrib/devtools/test-security-check.py @@ -27,22 +27,24 @@ def clean_files(source, executable): os.remove(source) os.remove(executable) -def call_security_check(cc: str, source: str, executable: str, options) -> tuple: +def env_flags() -> list[str]: # This should behave the same as AC_TRY_LINK, so arrange well-known flags # in the same order as autoconf would. # # See the definitions for ac_link in autoconf's lib/autoconf/c.m4 file for # reference. - env_flags: list[str] = [] + flags: list[str] = [] for var in ['CFLAGS', 'CPPFLAGS', 'LDFLAGS']: - env_flags += filter(None, os.environ.get(var, '').split(' ')) + flags += filter(None, os.environ.get(var, '').split(' ')) + return flags - subprocess.run([*cc,source,'-o',executable] + env_flags + options, check=True) +def call_security_check(cc: str, source: str, executable: str, options) -> tuple: + subprocess.run([*cc,source,'-o',executable] + env_flags() + options, check=True) p = subprocess.run([os.path.join(os.path.dirname(__file__), 'security-check.py'), executable], stdout=subprocess.PIPE, text=True) return (p.returncode, p.stdout.rstrip()) def get_arch(cc, source, executable): - subprocess.run([*cc, source, '-o', executable], check=True) + subprocess.run([*cc, source, '-o', executable] + env_flags(), check=True) binary = lief.parse(executable) arch = binary.abstract.header.architecture os.remove(executable) diff --git a/contrib/devtools/test-symbol-check.py b/contrib/devtools/test-symbol-check.py index 0140decb25..b00004586c 100755 --- a/contrib/devtools/test-symbol-check.py +++ b/contrib/devtools/test-symbol-check.py @@ -27,10 +27,6 @@ def call_symbol_check(cc: list[str], source, executable, options): os.remove(executable) return (p.returncode, p.stdout.rstrip()) -def get_machine(cc: list[str]): - p = subprocess.run([*cc,'-dumpmachine'], stdout=subprocess.PIPE, text=True) - return p.stdout.rstrip() - class TestSymbolChecks(unittest.TestCase): def test_ELF(self): source = 'test1.c' diff --git a/depends/funcs.mk b/depends/funcs.mk index d8a72421b4..7f79478cbd 100644 --- a/depends/funcs.mk +++ b/depends/funcs.mk @@ -180,6 +180,9 @@ $(1)_cmake=env CC="$$($(1)_cc)" \ CXXFLAGS="$$($(1)_cppflags) $$($(1)_cxxflags)" \ LDFLAGS="$$($(1)_ldflags)" \ cmake -DCMAKE_INSTALL_PREFIX:PATH="$$($($(1)_type)_prefix)" \ + -DCMAKE_AR=`which $$($(1)_ar)` \ + -DCMAKE_NM=`which $$($(1)_nm)` \ + -DCMAKE_RANLIB=`which $$($(1)_ranlib)` \ -DCMAKE_INSTALL_LIBDIR=lib/ \ -DCMAKE_POSITION_INDEPENDENT_CODE=ON \ -DCMAKE_VERBOSE_MAKEFILE:BOOL=$(V) \ diff --git a/doc/JSON-RPC-interface.md b/doc/JSON-RPC-interface.md index ec332d23eb..7640102172 100644 --- a/doc/JSON-RPC-interface.md +++ b/doc/JSON-RPC-interface.md @@ -74,6 +74,22 @@ major version via the `-deprecatedrpc=` command line option. The release notes of a new major release come with detailed instructions on what RPC features were deprecated and how to re-enable them temporarily. +## JSON-RPC 1.1 vs 2.0 + +The server recognizes [JSON-RPC v2.0](https://www.jsonrpc.org/specification) requests +and responds accordingly. A 2.0 request is identified by the presence of +`"jsonrpc": "2.0"` in the request body. If that key + value is not present in a request, +the legacy JSON-RPC v1.1 protocol is followed instead, which was the only available +protocol in previous releases. + +|| 1.1 | 2.0 | +|-|-|-| +| Request marker | `"version": "1.1"` (or none) | `"jsonrpc": "2.0"` | +| Response marker | (none) | `"jsonrpc": "2.0"` | +| `"error"` and `"result"` fields in response | both present | only one is present | +| HTTP codes in response | `200` unless there is any kind of RPC error (invalid parameters, method not found, etc) | Always `200` unless there is an actual HTTP server error (request parsing error, endpoint not found, etc) | +| Notifications: requests that get no reply | (not supported) | Supported for requests that exclude the "id" field | + ## Security The RPC interface allows other programs to control Bitcoin Core, diff --git a/doc/release-notes-27101.md b/doc/release-notes-27101.md new file mode 100644 index 0000000000..8775b59c00 --- /dev/null +++ b/doc/release-notes-27101.md @@ -0,0 +1,9 @@ +JSON-RPC +-------- + +The JSON-RPC server now recognizes JSON-RPC 2.0 requests and responds with +strict adherence to the specification (https://www.jsonrpc.org/specification): + +- Returning HTTP "204 No Content" responses to JSON-RPC 2.0 notifications instead of full responses. +- Returning HTTP "200 OK" responses in all other cases, rather than 404 responses for unknown methods, 500 responses for invalid parameters, etc. +- Returning either "result" fields or "error" fields in JSON-RPC responses, rather than returning both fields with one field set to null. diff --git a/src/Makefile.am b/src/Makefile.am index 553106faeb..83fbed3de3 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -1002,7 +1002,6 @@ libbitcoinkernel_la_SOURCES = \ txdb.cpp \ txmempool.cpp \ uint256.cpp \ - util/batchpriority.cpp \ util/chaintype.cpp \ util/check.cpp \ util/feefrac.cpp \ diff --git a/src/addrman.cpp b/src/addrman.cpp index eb9c372550..d0b820ee65 100644 --- a/src/addrman.cpp +++ b/src/addrman.cpp @@ -173,7 +173,7 @@ void AddrManImpl::Serialize(Stream& s_) const */ // Always serialize in the latest version (FILE_FORMAT). - ParamsStream s{CAddress::V2_DISK, s_}; + ParamsStream s{s_, CAddress::V2_DISK}; s << static_cast(FILE_FORMAT); @@ -238,7 +238,7 @@ void AddrManImpl::Unserialize(Stream& s_) s_ >> Using>(format); const auto ser_params = (format >= Format::V3_BIP155 ? CAddress::V2_DISK : CAddress::V1_DISK); - ParamsStream s{ser_params, s_}; + ParamsStream s{s_, ser_params}; uint8_t compat; s >> compat; diff --git a/src/bench/readblock.cpp b/src/bench/readblock.cpp index 0545c6b017..2b2bfe069e 100644 --- a/src/bench/readblock.cpp +++ b/src/bench/readblock.cpp @@ -18,7 +18,7 @@ static FlatFilePos WriteBlockToDisk(ChainstateManager& chainman) CBlock block; stream >> TX_WITH_WITNESS(block); - return chainman.m_blockman.SaveBlockToDisk(block, 0, nullptr); + return chainman.m_blockman.SaveBlockToDisk(block, 0); } static void ReadBlockFromDiskTest(benchmark::Bench& bench) diff --git a/src/bitcoin-chainstate.cpp b/src/bitcoin-chainstate.cpp index 4927634233..4d2a6f0c2a 100644 --- a/src/bitcoin-chainstate.cpp +++ b/src/bitcoin-chainstate.cpp @@ -151,7 +151,7 @@ int main(int argc, char* argv[]) { LOCK(chainman.GetMutex()); std::cout - << "\t" << "Reindexing: " << std::boolalpha << node::fReindex.load() << std::noboolalpha << std::endl + << "\t" << "Reindexing: " << std::boolalpha << chainman.m_blockman.m_reindexing.load() << std::noboolalpha << std::endl << "\t" << "Snapshot Active: " << std::boolalpha << chainman.IsSnapshotActive() << std::noboolalpha << std::endl << "\t" << "Active Height: " << chainman.ActiveHeight() << std::endl << "\t" << "Active IBD: " << std::boolalpha << chainman.IsInitialBlockDownload() << std::noboolalpha << std::endl; diff --git a/src/bitcoin-cli.cpp b/src/bitcoin-cli.cpp index 80871f093f..cac2cc3489 100644 --- a/src/bitcoin-cli.cpp +++ b/src/bitcoin-cli.cpp @@ -298,7 +298,7 @@ class AddrinfoRequestHandler : public BaseRequestHandler } addresses.pushKV("total", total); result.pushKV("addresses_known", addresses); - return JSONRPCReplyObj(result, NullUniValue, 1); + return JSONRPCReplyObj(std::move(result), NullUniValue, /*id=*/1, JSONRPCVersion::V1_LEGACY); } }; @@ -367,7 +367,7 @@ class GetinfoRequestHandler: public BaseRequestHandler } result.pushKV("relayfee", batch[ID_NETWORKINFO]["result"]["relayfee"]); result.pushKV("warnings", batch[ID_NETWORKINFO]["result"]["warnings"]); - return JSONRPCReplyObj(result, NullUniValue, 1); + return JSONRPCReplyObj(std::move(result), NullUniValue, /*id=*/1, JSONRPCVersion::V1_LEGACY); } }; @@ -622,7 +622,7 @@ class NetinfoRequestHandler : public BaseRequestHandler } } - return JSONRPCReplyObj(UniValue{result}, NullUniValue, 1); + return JSONRPCReplyObj(UniValue{result}, NullUniValue, /*id=*/1, JSONRPCVersion::V1_LEGACY); } const std::string m_help_doc{ @@ -709,7 +709,7 @@ class GenerateToAddressRequestHandler : public BaseRequestHandler UniValue result(UniValue::VOBJ); result.pushKV("address", address_str); result.pushKV("blocks", reply.get_obj()["result"]); - return JSONRPCReplyObj(result, NullUniValue, 1); + return JSONRPCReplyObj(std::move(result), NullUniValue, /*id=*/1, JSONRPCVersion::V1_LEGACY); } protected: std::string address_str; diff --git a/src/clientversion.cpp b/src/clientversion.cpp index 7979795476..bf32ec1bdd 100644 --- a/src/clientversion.cpp +++ b/src/clientversion.cpp @@ -5,11 +5,11 @@ #include // IWYU pragma: keep #include +#include #include #include -#include #include #include @@ -64,19 +64,9 @@ std::string FormatFullVersion() */ std::string FormatSubVersion(const std::string& name, int nClientVersion, const std::vector& comments) { - std::ostringstream ss; - ss << "/"; - ss << name << ":" << FormatVersion(nClientVersion); - if (!comments.empty()) - { - std::vector::const_iterator it(comments.begin()); - ss << "(" << *it; - for(++it; it != comments.end(); ++it) - ss << "; " << *it; - ss << ")"; - } - ss << "/"; - return ss.str(); + std::string comments_str; + if (!comments.empty()) comments_str = strprintf("(%s)", Join(comments, "; ")); + return strprintf("/%s:%s%s/", name, FormatVersion(nClientVersion), comments_str); } std::string CopyrightHolders(const std::string& strPrefix) diff --git a/src/crypto/sha256_sse4.cpp b/src/crypto/sha256_sse4.cpp index f4557291ce..f0e255a23c 100644 --- a/src/crypto/sha256_sse4.cpp +++ b/src/crypto/sha256_sse4.cpp @@ -13,6 +13,13 @@ namespace sha256_sse4 { void Transform(uint32_t* s, const unsigned char* chunk, size_t blocks) +#if defined(__clang__) && !defined(__OPTIMIZE__) + /* + clang is unable to compile this with -O0 and -fsanitize=address. + See upstream bug: https://github.com/llvm/llvm-project/issues/92182 + */ + __attribute__((no_sanitize("address"))) +#endif { static const uint32_t K256 alignas(16) [] = { 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, diff --git a/src/httprpc.cpp b/src/httprpc.cpp index c72dbf10bc..3eb34dbe6a 100644 --- a/src/httprpc.cpp +++ b/src/httprpc.cpp @@ -73,8 +73,11 @@ static std::vector> g_rpcauth; static std::map> g_rpc_whitelist; static bool g_rpc_whitelist_default = false; -static void JSONErrorReply(HTTPRequest* req, const UniValue& objError, const UniValue& id) +static void JSONErrorReply(HTTPRequest* req, UniValue objError, const JSONRPCRequest& jreq) { + // Sending HTTP errors is a legacy JSON-RPC behavior. + Assume(jreq.m_json_version != JSONRPCVersion::V2); + // Send error reply from json-rpc error object int nStatus = HTTP_INTERNAL_SERVER_ERROR; int code = objError.find_value("code").getInt(); @@ -84,7 +87,7 @@ static void JSONErrorReply(HTTPRequest* req, const UniValue& objError, const Uni else if (code == RPC_METHOD_NOT_FOUND) nStatus = HTTP_NOT_FOUND; - std::string strReply = JSONRPCReply(NullUniValue, objError, id); + std::string strReply = JSONRPCReplyObj(NullUniValue, std::move(objError), jreq.id, jreq.m_json_version).write() + "\n"; req->WriteHeader("Content-Type", "application/json"); req->WriteReply(nStatus, strReply); @@ -185,7 +188,7 @@ static bool HTTPReq_JSONRPC(const std::any& context, HTTPRequest* req) // Set the URI jreq.URI = req->GetURI(); - std::string strReply; + UniValue reply; bool user_has_whitelist = g_rpc_whitelist.count(jreq.authUser); if (!user_has_whitelist && g_rpc_whitelist_default) { LogPrintf("RPC User %s not allowed to call any methods\n", jreq.authUser); @@ -200,13 +203,23 @@ static bool HTTPReq_JSONRPC(const std::any& context, HTTPRequest* req) req->WriteReply(HTTP_FORBIDDEN); return false; } - UniValue result = tableRPC.execute(jreq); - // Send reply - strReply = JSONRPCReply(result, NullUniValue, jreq.id); + // Legacy 1.0/1.1 behavior is for failed requests to throw + // exceptions which return HTTP errors and RPC errors to the client. + // 2.0 behavior is to catch exceptions and return HTTP success with + // RPC errors, as long as there is not an actual HTTP server error. + const bool catch_errors{jreq.m_json_version == JSONRPCVersion::V2}; + reply = JSONRPCExec(jreq, catch_errors); + + if (jreq.IsNotification()) { + // Even though we do execute notifications, we do not respond to them + req->WriteReply(HTTP_NO_CONTENT); + return true; + } // array of requests } else if (valRequest.isArray()) { + // Check authorization for each request's method if (user_has_whitelist) { for (unsigned int reqIdx = 0; reqIdx < valRequest.size(); reqIdx++) { if (!valRequest[reqIdx].isObject()) { @@ -223,18 +236,49 @@ static bool HTTPReq_JSONRPC(const std::any& context, HTTPRequest* req) } } } - strReply = JSONRPCExecBatch(jreq, valRequest.get_array()); + + // Execute each request + reply = UniValue::VARR; + for (size_t i{0}; i < valRequest.size(); ++i) { + // Batches never throw HTTP errors, they are always just included + // in "HTTP OK" responses. Notifications never get any response. + UniValue response; + try { + jreq.parse(valRequest[i]); + response = JSONRPCExec(jreq, /*catch_errors=*/true); + } catch (UniValue& e) { + response = JSONRPCReplyObj(NullUniValue, std::move(e), jreq.id, jreq.m_json_version); + } catch (const std::exception& e) { + response = JSONRPCReplyObj(NullUniValue, JSONRPCError(RPC_PARSE_ERROR, e.what()), jreq.id, jreq.m_json_version); + } + if (!jreq.IsNotification()) { + reply.push_back(std::move(response)); + } + } + // Return no response for an all-notification batch, but only if the + // batch request is non-empty. Technically according to the JSON-RPC + // 2.0 spec, an empty batch request should also return no response, + // However, if the batch request is empty, it means the request did + // not contain any JSON-RPC version numbers, so returning an empty + // response could break backwards compatibility with old RPC clients + // relying on previous behavior. Return an empty array instead of an + // empty response in this case to favor being backwards compatible + // over complying with the JSON-RPC 2.0 spec in this case. + if (reply.size() == 0 && valRequest.size() > 0) { + req->WriteReply(HTTP_NO_CONTENT); + return true; + } } else throw JSONRPCError(RPC_PARSE_ERROR, "Top-level object parse error"); req->WriteHeader("Content-Type", "application/json"); - req->WriteReply(HTTP_OK, strReply); - } catch (const UniValue& objError) { - JSONErrorReply(req, objError, jreq.id); + req->WriteReply(HTTP_OK, reply.write() + "\n"); + } catch (UniValue& e) { + JSONErrorReply(req, std::move(e), jreq); return false; } catch (const std::exception& e) { - JSONErrorReply(req, JSONRPCError(RPC_PARSE_ERROR, e.what()), jreq.id); + JSONErrorReply(req, JSONRPCError(RPC_PARSE_ERROR, e.what()), jreq); return false; } return true; diff --git a/src/init.cpp b/src/init.cpp index ad1921f175..5eee07e381 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -75,6 +75,7 @@ #include #include #include +#include #include #include #include @@ -129,7 +130,6 @@ using node::CalculateCacheSizes; using node::DEFAULT_PERSIST_MEMPOOL; using node::DEFAULT_PRINTPRIORITY; using node::DEFAULT_STOPATHEIGHT; -using node::fReindex; using node::KernelNotifications; using node::LoadChainstate; using node::MempoolPath; @@ -544,9 +544,7 @@ void SetupServerArgs(ArgsManager& argsman) argsman.AddArg("-peerbloomfilters", strprintf("Support filtering of blocks and transaction with bloom filters (default: %u)", DEFAULT_PEERBLOOMFILTERS), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); argsman.AddArg("-peerblockfilters", strprintf("Serve compact block filters to peers per BIP 157 (default: %u)", DEFAULT_PEERBLOCKFILTERS), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); argsman.AddArg("-txreconciliation", strprintf("Enable transaction reconciliations per BIP 330 (default: %d)", DEFAULT_TXRECONCILIATION_ENABLE), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::CONNECTION); - // TODO: remove the sentence "Nodes not using ... incoming connections." once the changes from - // https://github.com/bitcoin/bitcoin/pull/23542 have become widespread. - argsman.AddArg("-port=", strprintf("Listen for connections on . Nodes not using the default ports (default: %u, testnet: %u, signet: %u, regtest: %u) are unlikely to get incoming connections. Not relevant for I2P (see doc/i2p.md).", defaultChainParams->GetDefaultPort(), testnetChainParams->GetDefaultPort(), signetChainParams->GetDefaultPort(), regtestChainParams->GetDefaultPort()), ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::CONNECTION); + argsman.AddArg("-port=", strprintf("Listen for connections on (default: %u, testnet: %u, signet: %u, regtest: %u). Not relevant for I2P (see doc/i2p.md).", defaultChainParams->GetDefaultPort(), testnetChainParams->GetDefaultPort(), signetChainParams->GetDefaultPort(), regtestChainParams->GetDefaultPort()), ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::CONNECTION); #if HAVE_SOCKADDR_UN argsman.AddArg("-proxy=", "Connect through SOCKS5 proxy, set -noproxy to disable (default: disabled). May be a local file path prefixed with 'unix:' if the proxy supports it.", ArgsManager::ALLOW_ANY | ArgsManager::DISALLOW_ELISION, OptionsCategory::CONNECTION); #else @@ -1504,7 +1502,6 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) node.notifications = std::make_unique(*Assert(node.shutdown), node.exit_status); ReadNotificationArgs(args, *node.notifications); - fReindex = args.GetBoolArg("-reindex", false); bool fReindexChainState = args.GetBoolArg("-reindex-chainstate", false); fNameHistory = args.GetBoolArg("-namehistory", false); ChainstateManager::Options chainman_opts{ @@ -1585,7 +1582,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) node::ChainstateLoadOptions options; options.mempool = Assert(node.mempool.get()); - options.reindex = node::fReindex; + options.reindex = chainman.m_blockman.m_reindexing; options.reindex_chainstate = fReindexChainState; options.prune = chainman.m_blockman.IsPruneMode(); options.nameHistory = args.GetBoolArg("-namehistory", false); @@ -1634,7 +1631,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) error.original + ".\nPlease restart with -reindex or -reindex-chainstate to recover.", "", CClientUIInterface::MSG_ERROR | CClientUIInterface::BTN_ABORT); if (fRet) { - fReindex = true; + chainman.m_blockman.m_reindexing = true; if (!Assert(node.shutdown)->reset()) { LogPrintf("Internal error: failed to reset shutdown signal.\n"); } @@ -1667,22 +1664,22 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) // ********************************************************* Step 8: start indexers if (args.GetBoolArg("-txindex", DEFAULT_TXINDEX)) { - g_txindex = std::make_unique(interfaces::MakeChain(node), cache_sizes.tx_index, false, fReindex); + g_txindex = std::make_unique(interfaces::MakeChain(node), cache_sizes.tx_index, false, chainman.m_blockman.m_reindexing); node.indexes.emplace_back(g_txindex.get()); } if (gArgs.GetBoolArg("-namehashindex", DEFAULT_NAMEHASHINDEX)) { - g_name_hash_index = std::make_unique(interfaces::MakeChain(node), cache_sizes.name_hash_index, false, fReindex); + g_name_hash_index = std::make_unique(interfaces::MakeChain(node), cache_sizes.name_hash_index, false, chainman.m_blockman.m_reindexing); node.indexes.emplace_back(g_name_hash_index.get()); } for (const auto& filter_type : g_enabled_filter_types) { - InitBlockFilterIndex([&]{ return interfaces::MakeChain(node); }, filter_type, cache_sizes.filter_index, false, fReindex); + InitBlockFilterIndex([&]{ return interfaces::MakeChain(node); }, filter_type, cache_sizes.filter_index, false, chainman.m_blockman.m_reindexing); node.indexes.emplace_back(GetBlockFilterIndex(filter_type)); } if (args.GetBoolArg("-coinstatsindex", DEFAULT_COINSTATSINDEX)) { - g_coin_stats_index = std::make_unique(interfaces::MakeChain(node), /*cache_size=*/0, false, fReindex); + g_coin_stats_index = std::make_unique(interfaces::MakeChain(node), /*cache_size=*/0, false, chainman.m_blockman.m_reindexing); node.indexes.emplace_back(g_coin_stats_index.get()); } @@ -1701,7 +1698,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) // if pruning, perform the initial blockstore prune // after any wallet rescanning has taken place. if (chainman.m_blockman.IsPruneMode()) { - if (!fReindex) { + if (!chainman.m_blockman.m_reindexing) { LOCK(cs_main); for (Chainstate* chainstate : chainman.GetAll()) { uiInterface.InitMessage(_("Pruning blockstoreā€¦").translated); @@ -1727,7 +1724,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) int chain_active_height = WITH_LOCK(cs_main, return chainman.ActiveChain().Height()); // On first startup, warn on low block storage space - if (!fReindex && !fReindexChainState && chain_active_height <= 1) { + if (!chainman.m_blockman.m_reindexing && !fReindexChainState && chain_active_height <= 1) { uint64_t assumed_chain_bytes{chainparams.AssumedBlockchainSize() * 1024 * 1024 * 1024}; uint64_t additional_bytes_needed{ chainman.m_blockman.IsPruneMode() ? @@ -1773,6 +1770,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) } chainman.m_thread_load = std::thread(&util::TraceThread, "initload", [=, &chainman, &args, &node] { + ScheduleBatchPriority(); // Import blocks ImportBlocks(chainman, vImportFiles); if (args.GetBoolArg("-stopafterblockimport", DEFAULT_STOPAFTERBLOCKIMPORT)) { diff --git a/src/kernel/blockmanager_opts.h b/src/kernel/blockmanager_opts.h index deeba7e318..16072b669b 100644 --- a/src/kernel/blockmanager_opts.h +++ b/src/kernel/blockmanager_opts.h @@ -24,6 +24,7 @@ struct BlockManagerOpts { bool fast_prune{false}; const fs::path blocks_dir; Notifications& notifications; + bool reindex{false}; }; } // namespace kernel diff --git a/src/kernel/mempool_persist.cpp b/src/kernel/mempool_persist.cpp index f06f609379..53028a45ae 100644 --- a/src/kernel/mempool_persist.cpp +++ b/src/kernel/mempool_persist.cpp @@ -96,7 +96,7 @@ bool LoadMempool(CTxMemPool& pool, const fs::path& load_path, Chainstate& active if (amountdelta && opts.apply_fee_delta_priority) { pool.PrioritiseTransaction(tx->GetHash(), amountdelta); } - if (nTime > TicksSinceEpoch(now - pool.m_expiry)) { + if (nTime > TicksSinceEpoch(now - pool.m_opts.expiry)) { LOCK(cs_main); const auto& accepted = AcceptToMemoryPool(active_chainstate, tx, nTime, /*bypass_limits=*/false, /*test_accept=*/false); if (accepted.m_result_type == MempoolAcceptResult::ResultType::VALID) { @@ -174,11 +174,11 @@ bool DumpMempool(const CTxMemPool& pool, const fs::path& dump_path, FopenFn mock } try { - const uint64_t version{pool.m_persist_v1_dat ? MEMPOOL_DUMP_VERSION_NO_XOR_KEY : MEMPOOL_DUMP_VERSION}; + const uint64_t version{pool.m_opts.persist_v1_dat ? MEMPOOL_DUMP_VERSION_NO_XOR_KEY : MEMPOOL_DUMP_VERSION}; file << version; std::vector xor_key(8); - if (!pool.m_persist_v1_dat) { + if (!pool.m_opts.persist_v1_dat) { FastRandomContext{}.fillrand(xor_key); file << xor_key; } diff --git a/src/net.cpp b/src/net.cpp index ad1e464667..f05b517dd2 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -196,8 +196,7 @@ static std::vector ConvertSeeds(const std::vector &vSeedsIn) const auto one_week{7 * 24h}; std::vector vSeedsOut; FastRandomContext rng; - DataStream underlying_stream{vSeedsIn}; - ParamsStream s{CAddress::V2_NETWORK, underlying_stream}; + ParamsStream s{DataStream{vSeedsIn}, CAddress::V2_NETWORK}; while (!s.eof()) { CService endpoint; s >> endpoint; @@ -2832,7 +2831,7 @@ std::vector CConnman::GetAddedNodeInfo(bool include_connected) co } for (const auto& addr : lAddresses) { - CService service(LookupNumeric(addr.m_added_node, GetDefaultPort(addr.m_added_node))); + CService service{MaybeFlipIPv6toCJDNS(LookupNumeric(addr.m_added_node, GetDefaultPort(addr.m_added_node)))}; AddedNodeInfo addedNode{addr, CService(), false, false}; if (service.IsValid()) { // strAddNode is an IP:port diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 0c51f3b1bc..c06c210a59 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -2309,7 +2309,23 @@ bool PeerManagerImpl::AlreadyHaveTx(const GenTxid& gtxid, bool include_reconside const uint256& hash = gtxid.GetHash(); - if (m_orphanage.HaveTx(gtxid)) return true; + if (gtxid.IsWtxid()) { + // Normal query by wtxid. + if (m_orphanage.HaveTx(Wtxid::FromUint256(hash))) return true; + } else { + // Never query by txid: it is possible that the transaction in the orphanage has the same + // txid but a different witness, which would give us a false positive result. If we decided + // not to request the transaction based on this result, an attacker could prevent us from + // downloading a transaction by intentionally creating a malleated version of it. While + // only one (or none!) of these transactions can ultimately be confirmed, we have no way of + // discerning which one that is, so the orphanage can store multiple transactions with the + // same txid. + // + // While we won't query by txid, we can try to "guess" what the wtxid is based on the txid. + // A non-segwit transaction's txid == wtxid. Query this txid "casted" to a wtxid. This will + // help us find non-segwit transactions, saving bandwidth, and should have no false positives. + if (m_orphanage.HaveTx(Wtxid::FromUint256(hash))) return true; + } if (include_reconsiderable && m_recent_rejects_reconsiderable.contains(hash)) return true; @@ -3289,7 +3305,7 @@ void PeerManagerImpl::ProcessInvalidTx(NodeId nodeid, const CTransactionRef& ptx // If the tx failed in ProcessOrphanTx, it should be removed from the orphanage unless the // tx was still missing inputs. If the tx was not in the orphanage, EraseTx does nothing and returns 0. - if (Assume(state.GetResult() != TxValidationResult::TX_MISSING_INPUTS) && m_orphanage.EraseTx(ptx->GetHash()) > 0) { + if (Assume(state.GetResult() != TxValidationResult::TX_MISSING_INPUTS) && m_orphanage.EraseTx(ptx->GetWitnessHash()) > 0) { LogDebug(BCLog::TXPACKAGES, " removed orphan tx %s (wtxid=%s)\n", ptx->GetHash().ToString(), ptx->GetWitnessHash().ToString()); } } @@ -3307,7 +3323,7 @@ void PeerManagerImpl::ProcessValidTx(NodeId nodeid, const CTransactionRef& tx, c m_orphanage.AddChildrenToWorkSet(*tx); // If it came from the orphanage, remove it. No-op if the tx is not in txorphanage. - m_orphanage.EraseTx(tx->GetHash()); + m_orphanage.EraseTx(tx->GetWitnessHash()); LogDebug(BCLog::MEMPOOL, "AcceptToMemoryPool: peer=%d: accepted %s (wtxid=%s) (poolsz %u txn, %u kB)\n", nodeid, @@ -5856,7 +5872,7 @@ void PeerManagerImpl::MaybeSendFeefilter(CNode& pto, Peer& peer, std::chrono::mi if (current_time > peer.m_next_send_feefilter) { CAmount filterToSend = m_fee_filter_rounder.round(currentFilter); // We always have a fee filter of at least the min relay fee - filterToSend = std::max(filterToSend, m_mempool.m_min_relay_feerate.GetFeePerK()); + filterToSend = std::max(filterToSend, m_mempool.m_opts.min_relay_feerate.GetFeePerK()); if (filterToSend != peer.m_fee_filter_sent) { MakeAndPushMessage(pto, NetMsgType::FEEFILTER, filterToSend); peer.m_fee_filter_sent = filterToSend; diff --git a/src/netaddress.h b/src/netaddress.h index c63bd4b4e5..ea2d14336e 100644 --- a/src/netaddress.h +++ b/src/netaddress.h @@ -237,7 +237,7 @@ class CNetAddr template void Serialize(Stream& s) const { - if (s.GetParams().enc == Encoding::V2) { + if (s.template GetParams().enc == Encoding::V2) { SerializeV2Stream(s); } else { SerializeV1Stream(s); @@ -250,7 +250,7 @@ class CNetAddr template void Unserialize(Stream& s) { - if (s.GetParams().enc == Encoding::V2) { + if (s.template GetParams().enc == Encoding::V2) { UnserializeV2Stream(s); } else { UnserializeV1Stream(s); diff --git a/src/node/blockmanager_args.cpp b/src/node/blockmanager_args.cpp index fa76566652..dd8419a68a 100644 --- a/src/node/blockmanager_args.cpp +++ b/src/node/blockmanager_args.cpp @@ -33,6 +33,8 @@ util::Result ApplyArgsManOptions(const ArgsManager& args, BlockManager::Op if (auto value{args.GetBoolArg("-fastprune")}) opts.fast_prune = *value; + if (auto value{args.GetBoolArg("-reindex")}) opts.reindex = *value; + return {}; } } // namespace node diff --git a/src/node/blockstorage.cpp b/src/node/blockstorage.cpp index 00a8dbb668..d03e0a1da6 100644 --- a/src/node/blockstorage.cpp +++ b/src/node/blockstorage.cpp @@ -150,7 +150,6 @@ bool BlockTreeDB::LoadBlockIndexGuts(const Consensus::Params& consensusParams, s } // namespace kernel namespace node { -std::atomic_bool fReindex(false); bool CBlockIndexWorkComparator::operator()(const CBlockIndex* pa, const CBlockIndex* pb) const { @@ -552,7 +551,7 @@ bool BlockManager::LoadBlockIndexDB(const std::optional& snapshot_block // Check whether we need to continue reindexing bool fReindexing = false; m_block_tree_db->ReadReindexing(fReindexing); - if (fReindexing) fReindex = true; + if (fReindexing) m_reindexing = true; // Check whether we have the name history m_block_tree_db->ReadFlag("namehistory", fNameHistory); @@ -852,7 +851,7 @@ fs::path BlockManager::GetBlockPosFilename(const FlatFilePos& pos) const return BlockFileSeq().FileName(pos); } -bool BlockManager::FindBlockPos(FlatFilePos& pos, unsigned int nAddSize, unsigned int nHeight, uint64_t nTime, bool fKnown) +FlatFilePos BlockManager::FindNextBlockPos(unsigned int nAddSize, unsigned int nHeight, uint64_t nTime) { LOCK(cs_LastBlockFile); @@ -867,88 +866,101 @@ bool BlockManager::FindBlockPos(FlatFilePos& pos, unsigned int nAddSize, unsigne } const int last_blockfile = m_blockfile_cursors[chain_type]->file_num; - int nFile = fKnown ? pos.nFile : last_blockfile; + int nFile = last_blockfile; if (static_cast(m_blockfile_info.size()) <= nFile) { m_blockfile_info.resize(nFile + 1); } bool finalize_undo = false; - if (!fKnown) { - unsigned int max_blockfile_size{MAX_BLOCKFILE_SIZE}; - // Use smaller blockfiles in test-only -fastprune mode - but avoid - // the possibility of having a block not fit into the block file. - if (m_opts.fast_prune) { - max_blockfile_size = 0x10000; // 64kiB - if (nAddSize >= max_blockfile_size) { - // dynamically adjust the blockfile size to be larger than the added size - max_blockfile_size = nAddSize + 1; - } + unsigned int max_blockfile_size{MAX_BLOCKFILE_SIZE}; + // Use smaller blockfiles in test-only -fastprune mode - but avoid + // the possibility of having a block not fit into the block file. + if (m_opts.fast_prune) { + max_blockfile_size = 0x10000; // 64kiB + if (nAddSize >= max_blockfile_size) { + // dynamically adjust the blockfile size to be larger than the added size + max_blockfile_size = nAddSize + 1; } - assert(nAddSize < max_blockfile_size); - - while (m_blockfile_info[nFile].nSize + nAddSize >= max_blockfile_size) { - // when the undo file is keeping up with the block file, we want to flush it explicitly - // when it is lagging behind (more blocks arrive than are being connected), we let the - // undo block write case handle it - finalize_undo = (static_cast(m_blockfile_info[nFile].nHeightLast) == - Assert(m_blockfile_cursors[chain_type])->undo_height); - - // Try the next unclaimed blockfile number - nFile = this->MaxBlockfileNum() + 1; - // Set to increment MaxBlockfileNum() for next iteration - m_blockfile_cursors[chain_type] = BlockfileCursor{nFile}; - - if (static_cast(m_blockfile_info.size()) <= nFile) { - m_blockfile_info.resize(nFile + 1); - } + } + assert(nAddSize < max_blockfile_size); + + while (m_blockfile_info[nFile].nSize + nAddSize >= max_blockfile_size) { + // when the undo file is keeping up with the block file, we want to flush it explicitly + // when it is lagging behind (more blocks arrive than are being connected), we let the + // undo block write case handle it + finalize_undo = (static_cast(m_blockfile_info[nFile].nHeightLast) == + Assert(m_blockfile_cursors[chain_type])->undo_height); + + // Try the next unclaimed blockfile number + nFile = this->MaxBlockfileNum() + 1; + // Set to increment MaxBlockfileNum() for next iteration + m_blockfile_cursors[chain_type] = BlockfileCursor{nFile}; + + if (static_cast(m_blockfile_info.size()) <= nFile) { + m_blockfile_info.resize(nFile + 1); } - pos.nFile = nFile; - pos.nPos = m_blockfile_info[nFile].nSize; } + FlatFilePos pos; + pos.nFile = nFile; + pos.nPos = m_blockfile_info[nFile].nSize; if (nFile != last_blockfile) { - if (!fKnown) { - LogPrint(BCLog::BLOCKSTORAGE, "Leaving block file %i: %s (onto %i) (height %i)\n", - last_blockfile, m_blockfile_info[last_blockfile].ToString(), nFile, nHeight); - - // Do not propagate the return code. The flush concerns a previous block - // and undo file that has already been written to. If a flush fails - // here, and we crash, there is no expected additional block data - // inconsistency arising from the flush failure here. However, the undo - // data may be inconsistent after a crash if the flush is called during - // a reindex. A flush error might also leave some of the data files - // untrimmed. - if (!FlushBlockFile(last_blockfile, !fKnown, finalize_undo)) { - LogPrintLevel(BCLog::BLOCKSTORAGE, BCLog::Level::Warning, - "Failed to flush previous block file %05i (finalize=%i, finalize_undo=%i) before opening new block file %05i\n", - last_blockfile, !fKnown, finalize_undo, nFile); - } + LogPrint(BCLog::BLOCKSTORAGE, "Leaving block file %i: %s (onto %i) (height %i)\n", + last_blockfile, m_blockfile_info[last_blockfile].ToString(), nFile, nHeight); + + // Do not propagate the return code. The flush concerns a previous block + // and undo file that has already been written to. If a flush fails + // here, and we crash, there is no expected additional block data + // inconsistency arising from the flush failure here. However, the undo + // data may be inconsistent after a crash if the flush is called during + // a reindex. A flush error might also leave some of the data files + // untrimmed. + if (!FlushBlockFile(last_blockfile, /*fFinalize=*/true, finalize_undo)) { + LogPrintLevel(BCLog::BLOCKSTORAGE, BCLog::Level::Warning, + "Failed to flush previous block file %05i (finalize=1, finalize_undo=%i) before opening new block file %05i\n", + last_blockfile, finalize_undo, nFile); } // No undo data yet in the new file, so reset our undo-height tracking. m_blockfile_cursors[chain_type] = BlockfileCursor{nFile}; } m_blockfile_info[nFile].AddBlock(nHeight, nTime); - if (fKnown) { - m_blockfile_info[nFile].nSize = std::max(pos.nPos + nAddSize, m_blockfile_info[nFile].nSize); - } else { - m_blockfile_info[nFile].nSize += nAddSize; + m_blockfile_info[nFile].nSize += nAddSize; + + bool out_of_space; + size_t bytes_allocated = BlockFileSeq().Allocate(pos, nAddSize, out_of_space); + if (out_of_space) { + m_opts.notifications.fatalError(_("Disk space is too low!")); + return {}; + } + if (bytes_allocated != 0 && IsPruneMode()) { + m_check_for_pruning = true; } - if (!fKnown) { - bool out_of_space; - size_t bytes_allocated = BlockFileSeq().Allocate(pos, nAddSize, out_of_space); - if (out_of_space) { - m_opts.notifications.fatalError(_("Disk space is too low!")); - return false; - } - if (bytes_allocated != 0 && IsPruneMode()) { - m_check_for_pruning = true; - } + m_dirty_fileinfo.insert(nFile); + return pos; +} + +void BlockManager::UpdateBlockInfo(const CBlock& block, unsigned int nHeight, const FlatFilePos& pos) +{ + LOCK(cs_LastBlockFile); + + // Update the cursor so it points to the last file. + const BlockfileType chain_type{BlockfileTypeForHeight(nHeight)}; + auto& cursor{m_blockfile_cursors[chain_type]}; + if (!cursor || cursor->file_num < pos.nFile) { + m_blockfile_cursors[chain_type] = BlockfileCursor{pos.nFile}; } + // Update the file information with the current block. + const unsigned int added_size = ::GetSerializeSize(TX_WITH_WITNESS(block)); + const int nFile = pos.nFile; + if (static_cast(m_blockfile_info.size()) <= nFile) { + m_blockfile_info.resize(nFile + 1); + } + m_blockfile_info[nFile].AddBlock(nHeight, block.GetBlockTime()); + m_blockfile_info[nFile].nSize = std::max(pos.nPos + added_size, m_blockfile_info[nFile].nSize); m_dirty_fileinfo.insert(nFile); - return true; } bool BlockManager::FindUndoPos(BlockValidationState& state, int nFile, FlatFilePos& pos, unsigned int nAddSize) @@ -1018,7 +1030,7 @@ bool BlockManager::WriteUndoDataForBlock(const CBlockUndo& blockundo, BlockValid // we want to flush the rev (undo) file once we've written the last block, which is indicated by the last height // in the block file info as below; note that this does not catch the case where the undo writes are keeping up // with the block writes (usually when a synced up node is getting newly mined blocks) -- this case is caught in - // the FindBlockPos function + // the FindNextBlockPos function if (_pos.nFile < cursor.file_num && static_cast(block.nHeight) == m_blockfile_info[_pos.nFile].nHeightLast) { // Do not propagate the return code, a failed flush here should not // be an indication for a failed write. If it were propagated here, @@ -1159,28 +1171,20 @@ bool BlockManager::ReadRawBlockFromDisk(std::vector& block, const FlatF return true; } -FlatFilePos BlockManager::SaveBlockToDisk(const CBlock& block, int nHeight, const FlatFilePos* dbp) +FlatFilePos BlockManager::SaveBlockToDisk(const CBlock& block, int nHeight) { unsigned int nBlockSize = ::GetSerializeSize(TX_WITH_WITNESS(block)); - FlatFilePos blockPos; - const auto position_known {dbp != nullptr}; - if (position_known) { - blockPos = *dbp; - } else { - // when known, blockPos.nPos points at the offset of the block data in the blk file. that already accounts for - // the serialization header present in the file (the 4 magic message start bytes + the 4 length bytes = 8 bytes = BLOCK_SERIALIZATION_HEADER_SIZE). - // we add BLOCK_SERIALIZATION_HEADER_SIZE only for new blocks since they will have the serialization header added when written to disk. - nBlockSize += static_cast(BLOCK_SERIALIZATION_HEADER_SIZE); - } - if (!FindBlockPos(blockPos, nBlockSize, nHeight, block.GetBlockTime(), position_known)) { - LogError("%s: FindBlockPos failed\n", __func__); + // Account for the 4 magic message start bytes + the 4 length bytes (8 bytes total, + // defined as BLOCK_SERIALIZATION_HEADER_SIZE) + nBlockSize += static_cast(BLOCK_SERIALIZATION_HEADER_SIZE); + FlatFilePos blockPos{FindNextBlockPos(nBlockSize, nHeight, block.GetBlockTime())}; + if (blockPos.IsNull()) { + LogError("%s: FindNextBlockPos failed\n", __func__); return FlatFilePos(); } - if (!position_known) { - if (!WriteBlockToDisk(block, blockPos)) { - m_opts.notifications.fatalError(_("Failed to write block.")); - return FlatFilePos(); - } + if (!WriteBlockToDisk(block, blockPos)) { + m_opts.notifications.fatalError(_("Failed to write block.")); + return FlatFilePos(); } return blockPos; } @@ -1204,69 +1208,66 @@ class ImportingNow void ImportBlocks(ChainstateManager& chainman, std::vector vImportFiles) { - ScheduleBatchPriority(); - - { - ImportingNow imp{chainman.m_blockman.m_importing}; - - // -reindex - if (fReindex) { - int nFile = 0; - // Map of disk positions for blocks with unknown parent (only used for reindex); - // parent hash -> child disk position, multiple children can have the same parent. - std::multimap blocks_with_unknown_parent; - while (true) { - FlatFilePos pos(nFile, 0); - if (!fs::exists(chainman.m_blockman.GetBlockPosFilename(pos))) { - break; // No block files left to reindex - } - AutoFile file{chainman.m_blockman.OpenBlockFile(pos, true)}; - if (file.IsNull()) { - break; // This error is logged in OpenBlockFile - } - LogPrintf("Reindexing block file blk%05u.dat...\n", (unsigned int)nFile); - chainman.LoadExternalBlockFile(file, &pos, &blocks_with_unknown_parent); - if (chainman.m_interrupt) { - LogPrintf("Interrupt requested. Exit %s\n", __func__); - return; - } - nFile++; + ImportingNow imp{chainman.m_blockman.m_importing}; + + // -reindex + if (chainman.m_blockman.m_reindexing) { + int nFile = 0; + // Map of disk positions for blocks with unknown parent (only used for reindex); + // parent hash -> child disk position, multiple children can have the same parent. + std::multimap blocks_with_unknown_parent; + while (true) { + FlatFilePos pos(nFile, 0); + if (!fs::exists(chainman.m_blockman.GetBlockPosFilename(pos))) { + break; // No block files left to reindex } - WITH_LOCK(::cs_main, chainman.m_blockman.m_block_tree_db->WriteReindexing(false)); - fReindex = false; - LogPrintf("Reindexing finished\n"); - // To avoid ending up in a situation without genesis block, re-try initializing (no-op if reindexing worked): - chainman.ActiveChainstate().LoadGenesisBlock(); + AutoFile file{chainman.m_blockman.OpenBlockFile(pos, true)}; + if (file.IsNull()) { + break; // This error is logged in OpenBlockFile + } + LogPrintf("Reindexing block file blk%05u.dat...\n", (unsigned int)nFile); + chainman.LoadExternalBlockFile(file, &pos, &blocks_with_unknown_parent); + if (chainman.m_interrupt) { + LogPrintf("Interrupt requested. Exit %s\n", __func__); + return; + } + nFile++; } - - // -loadblock= - for (const fs::path& path : vImportFiles) { - AutoFile file{fsbridge::fopen(path, "rb")}; - if (!file.IsNull()) { - LogPrintf("Importing blocks file %s...\n", fs::PathToString(path)); - chainman.LoadExternalBlockFile(file); - if (chainman.m_interrupt) { - LogPrintf("Interrupt requested. Exit %s\n", __func__); - return; - } - } else { - LogPrintf("Warning: Could not open blocks file %s\n", fs::PathToString(path)); + WITH_LOCK(::cs_main, chainman.m_blockman.m_block_tree_db->WriteReindexing(false)); + chainman.m_blockman.m_reindexing = false; + LogPrintf("Reindexing finished\n"); + // To avoid ending up in a situation without genesis block, re-try initializing (no-op if reindexing worked): + chainman.ActiveChainstate().LoadGenesisBlock(); + } + + // -loadblock= + for (const fs::path& path : vImportFiles) { + AutoFile file{fsbridge::fopen(path, "rb")}; + if (!file.IsNull()) { + LogPrintf("Importing blocks file %s...\n", fs::PathToString(path)); + chainman.LoadExternalBlockFile(file); + if (chainman.m_interrupt) { + LogPrintf("Interrupt requested. Exit %s\n", __func__); + return; } + } else { + LogPrintf("Warning: Could not open blocks file %s\n", fs::PathToString(path)); } + } - // scan for better chains in the block chain database, that are not yet connected in the active best chain + // scan for better chains in the block chain database, that are not yet connected in the active best chain - // We can't hold cs_main during ActivateBestChain even though we're accessing - // the chainman unique_ptrs since ABC requires us not to be holding cs_main, so retrieve - // the relevant pointers before the ABC call. - for (Chainstate* chainstate : WITH_LOCK(::cs_main, return chainman.GetAll())) { - BlockValidationState state; - if (!chainstate->ActivateBestChain(state, nullptr)) { - chainman.GetNotifications().fatalError(strprintf(_("Failed to connect best block (%s)."), state.ToString())); - return; - } + // We can't hold cs_main during ActivateBestChain even though we're accessing + // the chainman unique_ptrs since ABC requires us not to be holding cs_main, so retrieve + // the relevant pointers before the ABC call. + for (Chainstate* chainstate : WITH_LOCK(::cs_main, return chainman.GetAll())) { + BlockValidationState state; + if (!chainstate->ActivateBestChain(state, nullptr)) { + chainman.GetNotifications().fatalError(strprintf(_("Failed to connect best block (%s)."), state.ToString())); + return; } - } // End scope of ImportingNow + } + // End scope of ImportingNow } std::ostream& operator<<(std::ostream& os, const BlockfileType& type) { diff --git a/src/node/blockstorage.h b/src/node/blockstorage.h index be3afa9308..8fa9415b24 100644 --- a/src/node/blockstorage.h +++ b/src/node/blockstorage.h @@ -76,8 +76,6 @@ static const unsigned int MAX_BLOCKFILE_SIZE = 0x8000000; // 128 MiB /** Size of header written by WriteBlockToDisk before a serialized CBlock */ static constexpr size_t BLOCK_SERIALIZATION_HEADER_SIZE = std::tuple_size_v + sizeof(unsigned int); -extern std::atomic_bool fReindex; - // Because validation code takes pointers to the map's CBlockIndex objects, if // we ever switch to another associative container, we need to either use a // container that has stable addressing (true of all std associative @@ -159,7 +157,16 @@ class BlockManager /** Return false if undo file flushing fails. */ [[nodiscard]] bool FlushUndoFile(int block_file, bool finalize = false); - [[nodiscard]] bool FindBlockPos(FlatFilePos& pos, unsigned int nAddSize, unsigned int nHeight, uint64_t nTime, bool fKnown); + /** + * Helper function performing various preparations before a block can be saved to disk: + * Returns the correct position for the block to be saved, which may be in the current or a new + * block file depending on nAddSize. May flush the previous blockfile to disk if full, updates + * blockfile info, and checks if there is enough disk space to save the block. + * + * The nAddSize argument passed to this function should include not just the size of the serialized CBlock, but also the size of + * separator fields which are written before it by WriteBlockToDisk (BLOCK_SERIALIZATION_HEADER_SIZE). + */ + [[nodiscard]] FlatFilePos FindNextBlockPos(unsigned int nAddSize, unsigned int nHeight, uint64_t nTime); [[nodiscard]] bool FlushChainstateBlockFile(int tip_height); bool FindUndoPos(BlockValidationState& state, int nFile, FlatFilePos& pos, unsigned int nAddSize); @@ -168,6 +175,12 @@ class BlockManager AutoFile OpenUndoFile(const FlatFilePos& pos, bool fReadOnly = false) const; + /** + * Write a block to disk. The pos argument passed to this function is modified by this call. Before this call, it should + * point to an unused file location where separator fields will be written, followed by the serialized CBlock data. + * After this call, it will point to the beginning of the serialized CBlock data, after the separator fields + * (BLOCK_SERIALIZATION_HEADER_SIZE) + */ bool WriteBlockToDisk(const CBlock& block, FlatFilePos& pos) const; bool UndoWriteToDisk(const CBlockUndo& blockundo, FlatFilePos& pos, const uint256& hashBlock) const; @@ -210,7 +223,7 @@ class BlockManager //! effectively. //! //! This data structure maintains separate blockfile number cursors for each - //! BlockfileType. The ASSUMED state is initialized, when necessary, in FindBlockPos(). + //! BlockfileType. The ASSUMED state is initialized, when necessary, in FindNextBlockPos(). //! //! The first element is the NORMAL cursor, second is ASSUMED. std::array, BlockfileType::NUM_TYPES> @@ -258,11 +271,19 @@ class BlockManager explicit BlockManager(const util::SignalInterrupt& interrupt, Options opts) : m_prune_mode{opts.prune_target > 0}, m_opts{std::move(opts)}, - m_interrupt{interrupt} {}; + m_interrupt{interrupt}, + m_reindexing{m_opts.reindex} {}; const util::SignalInterrupt& m_interrupt; std::atomic m_importing{false}; + /** + * Tracks if a reindex is currently in progress. Set to true when a reindex + * is requested and false when reindexing completes. Its value is persisted + * in the BlockTreeDB across restarts. + */ + std::atomic_bool m_reindexing; + BlockMap m_block_index GUARDED_BY(cs_main); /** @@ -316,8 +337,24 @@ class BlockManager bool WriteUndoDataForBlock(const CBlockUndo& blockundo, BlockValidationState& state, CBlockIndex& block) EXCLUSIVE_LOCKS_REQUIRED(::cs_main); - /** Store block on disk. If dbp is not nullptr, then it provides the known position of the block within a block file on disk. */ - FlatFilePos SaveBlockToDisk(const CBlock& block, int nHeight, const FlatFilePos* dbp); + /** Store block on disk and update block file statistics. + * + * @param[in] block the block to be stored + * @param[in] nHeight the height of the block + * + * @returns in case of success, the position to which the block was written to + * in case of an error, an empty FlatFilePos + */ + FlatFilePos SaveBlockToDisk(const CBlock& block, int nHeight); + + /** Update blockfile info while processing a block during reindex. The block must be available on disk. + * + * @param[in] block the block being processed + * @param[in] nHeight the height of the block + * @param[in] pos the position of the serialized CBlock on disk. This is the position returned + * by WriteBlockToDisk pointing at the CBlock, not the separator fields before it + */ + void UpdateBlockInfo(const CBlock& block, unsigned int nHeight, const FlatFilePos& pos); /** Whether running in -prune mode. */ [[nodiscard]] bool IsPruneMode() const { return m_prune_mode; } @@ -326,7 +363,7 @@ class BlockManager [[nodiscard]] uint64_t GetPruneTarget() const { return m_opts.prune_target; } static constexpr auto PRUNE_TARGET_MANUAL{std::numeric_limits::max()}; - [[nodiscard]] bool LoadingBlocks() const { return m_importing || fReindex; } + [[nodiscard]] bool LoadingBlocks() const { return m_importing || m_reindexing; } /** Calculate the amount of disk space the block & undo files currently use */ uint64_t CalculateCurrentUsage(); diff --git a/src/node/chainstate.cpp b/src/node/chainstate.cpp index 3ffcb2342b..10c2b22d02 100644 --- a/src/node/chainstate.cpp +++ b/src/node/chainstate.cpp @@ -61,8 +61,8 @@ static ChainstateLoadResult CompleteChainstateInitialization( // LoadBlockIndex will load m_have_pruned if we've ever removed a // block file from disk. - // Note that it also sets fReindex global based on the disk flag! - // From here on, fReindex and options.reindex values may be different! + // Note that it also sets m_reindexing based on the disk flag! + // From here on, m_reindexing and options.reindex values may be different! if (!chainman.LoadBlockIndex()) { if (chainman.m_interrupt) return {ChainstateLoadStatus::INTERRUPTED, {}}; return {ChainstateLoadStatus::FAILURE, _("Error loading block database")}; @@ -90,7 +90,7 @@ static ChainstateLoadResult CompleteChainstateInitialization( // If we're not mid-reindex (based on disk + args), add a genesis block on disk // (otherwise we use the one already on disk). // This is called again in ImportBlocks after the reindex completes. - if (!fReindex && !chainman.ActiveChainstate().LoadGenesisBlock()) { + if (!chainman.m_blockman.m_reindexing && !chainman.ActiveChainstate().LoadGenesisBlock()) { return {ChainstateLoadStatus::FAILURE, _("Error initializing block database")}; } diff --git a/src/node/interfaces.cpp b/src/node/interfaces.cpp index c886357a34..216f44ab9e 100644 --- a/src/node/interfaces.cpp +++ b/src/node/interfaces.cpp @@ -318,7 +318,7 @@ class NodeImpl : public Node CFeeRate getDustRelayFee() override { if (!m_context->mempool) return CFeeRate{DUST_RELAY_TX_FEE}; - return m_context->mempool->m_dust_relay_feerate; + return m_context->mempool->m_opts.dust_relay_feerate; } UniValue executeRpc(const std::string& command, const UniValue& params, const std::string& uri) override { @@ -701,7 +701,7 @@ class ChainImpl : public Chain { const CTxMemPool::Limits default_limits{}; - const CTxMemPool::Limits& limits{m_node.mempool ? m_node.mempool->m_limits : default_limits}; + const CTxMemPool::Limits& limits{m_node.mempool ? m_node.mempool->m_opts.limits : default_limits}; limit_ancestor_count = limits.ancestor_count; limit_descendant_count = limits.descendant_count; @@ -732,17 +732,17 @@ class ChainImpl : public Chain CFeeRate relayMinFee() override { if (!m_node.mempool) return CFeeRate{DEFAULT_MIN_RELAY_TX_FEE}; - return m_node.mempool->m_min_relay_feerate; + return m_node.mempool->m_opts.min_relay_feerate; } CFeeRate relayIncrementalFee() override { if (!m_node.mempool) return CFeeRate{DEFAULT_INCREMENTAL_RELAY_FEE}; - return m_node.mempool->m_incremental_relay_feerate; + return m_node.mempool->m_opts.incremental_relay_feerate; } CFeeRate relayDustFee() override { if (!m_node.mempool) return CFeeRate{DUST_RELAY_TX_FEE}; - return m_node.mempool->m_dust_relay_feerate; + return m_node.mempool->m_opts.dust_relay_feerate; } bool havePruned() override { diff --git a/src/primitives/transaction.h b/src/primitives/transaction.h index f4ad35de25..d3e4b53b62 100644 --- a/src/primitives/transaction.h +++ b/src/primitives/transaction.h @@ -330,7 +330,7 @@ class CTransaction template inline void Serialize(Stream& s) const { - SerializeTransaction(*this, s, s.GetParams()); + SerializeTransaction(*this, s, s.template GetParams()); } /** This deserializing constructor is provided instead of an Unserialize method. @@ -338,7 +338,7 @@ class CTransaction template CTransaction(deserialize_type, const TransactionSerParams& params, Stream& s) : CTransaction(CMutableTransaction(deserialize, params, s)) {} template - CTransaction(deserialize_type, ParamsStream& s) : CTransaction(CMutableTransaction(deserialize, s)) {} + CTransaction(deserialize_type, Stream& s) : CTransaction(CMutableTransaction(deserialize, s)) {} bool IsNull() const { return vin.empty() && vout.empty(); @@ -395,12 +395,12 @@ struct CMutableTransaction template inline void Serialize(Stream& s) const { - SerializeTransaction(*this, s, s.GetParams()); + SerializeTransaction(*this, s, s.template GetParams()); } template inline void Unserialize(Stream& s) { - UnserializeTransaction(*this, s, s.GetParams()); + UnserializeTransaction(*this, s, s.template GetParams()); } template @@ -409,7 +409,7 @@ struct CMutableTransaction } template - CMutableTransaction(deserialize_type, ParamsStream& s) { + CMutableTransaction(deserialize_type, Stream& s) { Unserialize(s); } diff --git a/src/protocol.h b/src/protocol.h index 243cd23e6e..fba08c6f55 100644 --- a/src/protocol.h +++ b/src/protocol.h @@ -375,9 +375,10 @@ class CAddress : public CService static constexpr SerParams V1_DISK{{CNetAddr::Encoding::V1}, Format::Disk}; static constexpr SerParams V2_DISK{{CNetAddr::Encoding::V2}, Format::Disk}; - SERIALIZE_METHODS_PARAMS(CAddress, obj, SerParams, params) + SERIALIZE_METHODS(CAddress, obj) { bool use_v2; + auto& params = SER_PARAMS(SerParams); if (params.fmt == Format::Disk) { // In the disk serialization format, the encoding (v1 or v2) is determined by a flag version // that's part of the serialization itself. ADDRV2_FORMAT in the stream version only determines diff --git a/src/pubkey.cpp b/src/pubkey.cpp index 11e1b4abb5..13e3c2dbe0 100644 --- a/src/pubkey.cpp +++ b/src/pubkey.cpp @@ -13,6 +13,7 @@ #include #include #include +#include #include #include @@ -181,6 +182,17 @@ int ecdsa_signature_parse_der_lax(secp256k1_ecdsa_signature* sig, const unsigned return 1; } +/** Nothing Up My Sleeve (NUMS) point + * + * NUMS_H is a point with an unknown discrete logarithm, constructed by taking the sha256 of 'g' + * (uncompressed encoding), which happens to be a point on the curve. + * + * For an example script for calculating H, refer to the unit tests in + * ./test/functional/test_framework/crypto/secp256k1.py + */ +static const std::vector NUMS_H_DATA{ParseHex("50929b74c1a04954b78b4b6035e97a5e078a5a0f28ec96d547bfee9ace803ac0")}; +const XOnlyPubKey XOnlyPubKey::NUMS_H{NUMS_H_DATA}; + XOnlyPubKey::XOnlyPubKey(Span bytes) { assert(bytes.size() == 32); diff --git a/src/pubkey.h b/src/pubkey.h index 15d7e7bc07..ae34ddd0af 100644 --- a/src/pubkey.h +++ b/src/pubkey.h @@ -233,6 +233,11 @@ class XOnlyPubKey uint256 m_keydata; public: + /** Nothing Up My Sleeve point H + * Used as an internal key for provably disabling the key path spend + * see BIP341 for more details */ + static const XOnlyPubKey NUMS_H; + /** Construct an empty x-only pubkey. */ XOnlyPubKey() = default; diff --git a/src/qt/askpassphrasedialog.cpp b/src/qt/askpassphrasedialog.cpp index 9e71f4ce6e..1b07e4569e 100644 --- a/src/qt/askpassphrasedialog.cpp +++ b/src/qt/askpassphrasedialog.cpp @@ -100,10 +100,14 @@ void AskPassphraseDialog::accept() // Cannot encrypt with empty passphrase break; } - QMessageBox::StandardButton retval = QMessageBox::question(this, tr("Confirm wallet encryption"), - tr("Warning: If you encrypt your wallet and lose your passphrase, you will LOSE ALL OF YOUR NAMECOINS AND NAMES!") + "

" + tr("Are you sure you wish to encrypt your wallet?"), - QMessageBox::Yes|QMessageBox::Cancel, - QMessageBox::Cancel); + QMessageBox msgBoxConfirm(QMessageBox::Question, + tr("Confirm wallet encryption"), + tr("Warning: If you encrypt your wallet and lose your passphrase, you will LOSE ALL OF YOUR COINS AND NAMES!") + "

" + tr("Are you sure you wish to encrypt your wallet?"), + QMessageBox::Cancel | QMessageBox::Yes, this); + msgBoxConfirm.button(QMessageBox::Yes)->setText(tr("Continue")); + msgBoxConfirm.button(QMessageBox::Cancel)->setText(tr("Back")); + msgBoxConfirm.setDefaultButton(QMessageBox::Cancel); + QMessageBox::StandardButton retval = (QMessageBox::StandardButton)msgBoxConfirm.exec(); if(retval == QMessageBox::Yes) { if(newpass1 == newpass2) @@ -112,10 +116,19 @@ void AskPassphraseDialog::accept() "your coins and names from being stolen by malware infecting your computer."); if (m_passphrase_out) { m_passphrase_out->assign(newpass1); - QMessageBox::warning(this, tr("Wallet to be encrypted"), - "" + - tr("Your wallet is about to be encrypted. ") + encryption_reminder + - ""); + QMessageBox msgBoxWarning(QMessageBox::Warning, + tr("Wallet to be encrypted"), + "" + + tr("Your wallet is about to be encrypted. ") + encryption_reminder + " " + + tr("Are you sure you wish to encrypt your wallet?") + + "", + QMessageBox::Cancel | QMessageBox::Yes, this); + msgBoxWarning.setDefaultButton(QMessageBox::Cancel); + QMessageBox::StandardButton retval = (QMessageBox::StandardButton)msgBoxWarning.exec(); + if (retval == QMessageBox::Cancel) { + QDialog::reject(); + return; + } } else { assert(model != nullptr); if (model->setWalletEncrypted(newpass1)) { @@ -141,11 +154,7 @@ void AskPassphraseDialog::accept() tr("The supplied passphrases do not match.")); } } - else - { - QDialog::reject(); // Cancelled - } - } break; + } break; case Unlock: try { if (!model->setWalletLocked(false, oldpass)) { diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp index 819745910f..93504d56e2 100644 --- a/src/rpc/blockchain.cpp +++ b/src/rpc/blockchain.cpp @@ -219,12 +219,12 @@ UniValue blockToJSON(BlockManager& blockman, const CBlock& block, const CBlockIn const CTxUndo* txundo = (have_undo && i > 0) ? &blockUndo.vtxundo.at(i - 1) : nullptr; UniValue objTx(UniValue::VOBJ); TxToUniv(*tx, /*block_hash=*/uint256(), /*entry=*/objTx, /*include_hex=*/true, txundo, verbosity); - txs.push_back(objTx); + txs.push_back(std::move(objTx)); } break; } - result.pushKV("tx", txs); + result.pushKV("tx", std::move(txs)); return result; } diff --git a/src/rpc/fees.cpp b/src/rpc/fees.cpp index f3345b4f1c..b933d8c647 100644 --- a/src/rpc/fees.cpp +++ b/src/rpc/fees.cpp @@ -65,7 +65,7 @@ static RPCHelpMan estimatesmartfee() const NodeContext& node = EnsureAnyNodeContext(request.context); const CTxMemPool& mempool = EnsureMemPool(node); - CHECK_NONFATAL(mempool.m_signals)->SyncWithValidationInterfaceQueue(); + CHECK_NONFATAL(mempool.m_opts.signals)->SyncWithValidationInterfaceQueue(); unsigned int max_target = fee_estimator.HighestTargetTracked(FeeEstimateHorizon::LONG_HALFLIFE); unsigned int conf_target = ParseConfirmTarget(request.params[0], max_target); bool conservative = true; @@ -83,7 +83,7 @@ static RPCHelpMan estimatesmartfee() CFeeRate feeRate{fee_estimator.estimateSmartFee(conf_target, &feeCalc, conservative)}; if (feeRate != CFeeRate(0)) { CFeeRate min_mempool_feerate{mempool.GetMinFee()}; - CFeeRate min_relay_feerate{mempool.m_min_relay_feerate}; + CFeeRate min_relay_feerate{mempool.m_opts.min_relay_feerate}; feeRate = std::max({feeRate, min_mempool_feerate, min_relay_feerate}); result.pushKV("feerate", ValueFromAmount(feeRate.GetFeePerK())); } else { diff --git a/src/rpc/mempool.cpp b/src/rpc/mempool.cpp index 0672ec34b3..e599c7dc92 100644 --- a/src/rpc/mempool.cpp +++ b/src/rpc/mempool.cpp @@ -671,12 +671,12 @@ UniValue MempoolInfoToJSON(const CTxMemPool& pool) ret.pushKV("bytes", (int64_t)pool.GetTotalTxSize()); ret.pushKV("usage", (int64_t)pool.DynamicMemoryUsage()); ret.pushKV("total_fee", ValueFromAmount(pool.GetTotalFee())); - ret.pushKV("maxmempool", pool.m_max_size_bytes); - ret.pushKV("mempoolminfee", ValueFromAmount(std::max(pool.GetMinFee(), pool.m_min_relay_feerate).GetFeePerK())); - ret.pushKV("minrelaytxfee", ValueFromAmount(pool.m_min_relay_feerate.GetFeePerK())); - ret.pushKV("incrementalrelayfee", ValueFromAmount(pool.m_incremental_relay_feerate.GetFeePerK())); + ret.pushKV("maxmempool", pool.m_opts.max_size_bytes); + ret.pushKV("mempoolminfee", ValueFromAmount(std::max(pool.GetMinFee(), pool.m_opts.min_relay_feerate).GetFeePerK())); + ret.pushKV("minrelaytxfee", ValueFromAmount(pool.m_opts.min_relay_feerate.GetFeePerK())); + ret.pushKV("incrementalrelayfee", ValueFromAmount(pool.m_opts.incremental_relay_feerate.GetFeePerK())); ret.pushKV("unbroadcastcount", uint64_t{pool.GetUnbroadcastTxs().size()}); - ret.pushKV("fullrbf", pool.m_full_rbf); + ret.pushKV("fullrbf", pool.m_opts.full_rbf); return ret; } diff --git a/src/rpc/net.cpp b/src/rpc/net.cpp index a3151306ab..a797b5214d 100644 --- a/src/rpc/net.cpp +++ b/src/rpc/net.cpp @@ -697,8 +697,8 @@ static RPCHelpMan getnetworkinfo() obj.pushKV("networks", GetNetworksInfo()); if (node.mempool) { // Those fields can be deprecated, to be replaced by the getmempoolinfo fields - obj.pushKV("relayfee", ValueFromAmount(node.mempool->m_min_relay_feerate.GetFeePerK())); - obj.pushKV("incrementalfee", ValueFromAmount(node.mempool->m_incremental_relay_feerate.GetFeePerK())); + obj.pushKV("relayfee", ValueFromAmount(node.mempool->m_opts.min_relay_feerate.GetFeePerK())); + obj.pushKV("incrementalfee", ValueFromAmount(node.mempool->m_opts.incremental_relay_feerate.GetFeePerK())); } UniValue localAddresses(UniValue::VARR); { diff --git a/src/rpc/protocol.h b/src/rpc/protocol.h index 8cc3cb0bbf..145e4d909e 100644 --- a/src/rpc/protocol.h +++ b/src/rpc/protocol.h @@ -10,6 +10,7 @@ enum HTTPStatusCode { HTTP_OK = 200, + HTTP_NO_CONTENT = 204, HTTP_BAD_REQUEST = 400, HTTP_UNAUTHORIZED = 401, HTTP_FORBIDDEN = 403, diff --git a/src/rpc/request.cpp b/src/rpc/request.cpp index b7acd62ee3..d35782189e 100644 --- a/src/rpc/request.cpp +++ b/src/rpc/request.cpp @@ -26,6 +26,17 @@ * * 1.0 spec: http://json-rpc.org/wiki/specification * 1.2 spec: http://jsonrpc.org/historical/json-rpc-over-http.html + * + * If the server receives a request with the JSON-RPC 2.0 marker `{"jsonrpc": "2.0"}` + * then Bitcoin will respond with a strictly specified response. + * It will only return an HTTP error code if an actual HTTP error is encountered + * such as the endpoint is not found (404) or the request is not formatted correctly (500). + * Otherwise the HTTP code is always OK (200) and RPC errors will be included in the + * response body. + * + * 2.0 spec: https://www.jsonrpc.org/specification + * + * Also see http://www.simple-is-better.org/rpc/#differences-between-1-0-and-2-0 */ UniValue JSONRPCRequestObj(const std::string& strMethod, const UniValue& params, const UniValue& id) @@ -37,24 +48,25 @@ UniValue JSONRPCRequestObj(const std::string& strMethod, const UniValue& params, return request; } -UniValue JSONRPCReplyObj(const UniValue& result, const UniValue& error, const UniValue& id) +UniValue JSONRPCReplyObj(UniValue result, UniValue error, std::optional id, JSONRPCVersion jsonrpc_version) { UniValue reply(UniValue::VOBJ); - if (!error.isNull()) - reply.pushKV("result", NullUniValue); - else - reply.pushKV("result", result); - reply.pushKV("error", error); - reply.pushKV("id", id); + // Add JSON-RPC version number field in v2 only. + if (jsonrpc_version == JSONRPCVersion::V2) reply.pushKV("jsonrpc", "2.0"); + + // Add both result and error fields in v1, even though one will be null. + // Omit the null field in v2. + if (error.isNull()) { + reply.pushKV("result", std::move(result)); + if (jsonrpc_version == JSONRPCVersion::V1_LEGACY) reply.pushKV("error", NullUniValue); + } else { + if (jsonrpc_version == JSONRPCVersion::V1_LEGACY) reply.pushKV("result", NullUniValue); + reply.pushKV("error", std::move(error)); + } + if (id.has_value()) reply.pushKV("id", std::move(id.value())); return reply; } -std::string JSONRPCReply(const UniValue& result, const UniValue& error, const UniValue& id) -{ - UniValue reply = JSONRPCReplyObj(result, error, id); - return reply.write() + "\n"; -} - UniValue JSONRPCError(int code, const std::string& message) { UniValue error(UniValue::VOBJ); @@ -171,7 +183,30 @@ void JSONRPCRequest::parse(const UniValue& valRequest) const UniValue& request = valRequest.get_obj(); // Parse id now so errors from here on will have the id - id = request.find_value("id"); + if (request.exists("id")) { + id = request.find_value("id"); + } else { + id = std::nullopt; + } + + // Check for JSON-RPC 2.0 (default 1.1) + m_json_version = JSONRPCVersion::V1_LEGACY; + const UniValue& jsonrpc_version = request.find_value("jsonrpc"); + if (!jsonrpc_version.isNull()) { + if (!jsonrpc_version.isStr()) { + throw JSONRPCError(RPC_INVALID_REQUEST, "jsonrpc field must be a string"); + } + // The "jsonrpc" key was added in the 2.0 spec, but some older documentation + // incorrectly included {"jsonrpc":"1.0"} in a request object, so we + // maintain that for backwards compatibility. + if (jsonrpc_version.get_str() == "1.0") { + m_json_version = JSONRPCVersion::V1_LEGACY; + } else if (jsonrpc_version.get_str() == "2.0") { + m_json_version = JSONRPCVersion::V2; + } else { + throw JSONRPCError(RPC_INVALID_REQUEST, "JSON-RPC version not supported"); + } + } // Parse method const UniValue& valMethod{request.find_value("method")}; diff --git a/src/rpc/request.h b/src/rpc/request.h index 5a886bf7a2..6b478437db 100644 --- a/src/rpc/request.h +++ b/src/rpc/request.h @@ -7,13 +7,18 @@ #define BITCOIN_RPC_REQUEST_H #include +#include #include #include +enum class JSONRPCVersion { + V1_LEGACY, + V2 +}; + UniValue JSONRPCRequestObj(const std::string& strMethod, const UniValue& params, const UniValue& id); -UniValue JSONRPCReplyObj(const UniValue& result, const UniValue& error, const UniValue& id); -std::string JSONRPCReply(const UniValue& result, const UniValue& error, const UniValue& id); +UniValue JSONRPCReplyObj(UniValue result, UniValue error, std::optional id, JSONRPCVersion jsonrpc_version); UniValue JSONRPCError(int code, const std::string& message); /** Generate a new RPC authentication cookie and write it to disk */ @@ -28,7 +33,7 @@ std::vector JSONRPCProcessBatchReply(const UniValue& in); class JSONRPCRequest { public: - UniValue id; + std::optional id = UniValue::VNULL; std::string strMethod; UniValue params; enum Mode { EXECUTE, GET_HELP, GET_ARGS } mode = EXECUTE; @@ -37,8 +42,10 @@ class JSONRPCRequest std::string peerAddr; std::any context; std::any context2; + JSONRPCVersion m_json_version = JSONRPCVersion::V1_LEGACY; void parse(const UniValue& valRequest); + [[nodiscard]] bool IsNotification() const { return !id.has_value() && m_json_version == JSONRPCVersion::V2; }; }; #endif // BITCOIN_RPC_REQUEST_H diff --git a/src/rpc/server.cpp b/src/rpc/server.cpp index a800451f4a..1ed406354a 100644 --- a/src/rpc/server.cpp +++ b/src/rpc/server.cpp @@ -360,36 +360,22 @@ bool IsDeprecatedRPCEnabled(const std::string& method) return find(enabled_methods.begin(), enabled_methods.end(), method) != enabled_methods.end(); } -static UniValue JSONRPCExecOne(JSONRPCRequest jreq, const UniValue& req) -{ - UniValue rpc_result(UniValue::VOBJ); - - try { - jreq.parse(req); - - UniValue result = tableRPC.execute(jreq); - rpc_result = JSONRPCReplyObj(result, NullUniValue, jreq.id); - } - catch (const UniValue& objError) - { - rpc_result = JSONRPCReplyObj(NullUniValue, objError, jreq.id); - } - catch (const std::exception& e) - { - rpc_result = JSONRPCReplyObj(NullUniValue, - JSONRPCError(RPC_PARSE_ERROR, e.what()), jreq.id); +UniValue JSONRPCExec(const JSONRPCRequest& jreq, bool catch_errors) +{ + UniValue result; + if (catch_errors) { + try { + result = tableRPC.execute(jreq); + } catch (UniValue& e) { + return JSONRPCReplyObj(NullUniValue, std::move(e), jreq.id, jreq.m_json_version); + } catch (const std::exception& e) { + return JSONRPCReplyObj(NullUniValue, JSONRPCError(RPC_MISC_ERROR, e.what()), jreq.id, jreq.m_json_version); + } + } else { + result = tableRPC.execute(jreq); } - return rpc_result; -} - -std::string JSONRPCExecBatch(const JSONRPCRequest& jreq, const UniValue& vReq) -{ - UniValue ret(UniValue::VARR); - for (unsigned int reqIdx = 0; reqIdx < vReq.size(); reqIdx++) - ret.push_back(JSONRPCExecOne(jreq, vReq[reqIdx])); - - return ret.write() + "\n"; + return JSONRPCReplyObj(std::move(result), NullUniValue, jreq.id, jreq.m_json_version); } /** diff --git a/src/rpc/server.h b/src/rpc/server.h index b8348e4aa6..5735aff821 100644 --- a/src/rpc/server.h +++ b/src/rpc/server.h @@ -179,6 +179,6 @@ extern CRPCTable tableRPC; void StartRPC(); void InterruptRPC(); void StopRPC(); -std::string JSONRPCExecBatch(const JSONRPCRequest& jreq, const UniValue& vReq); +UniValue JSONRPCExec(const JSONRPCRequest& jreq, bool catch_errors); #endif // BITCOIN_RPC_SERVER_H diff --git a/src/rpc/util.cpp b/src/rpc/util.cpp index 25ac08c749..f5a628be15 100644 --- a/src/rpc/util.cpp +++ b/src/rpc/util.cpp @@ -175,7 +175,7 @@ std::string HelpExampleCliNamed(const std::string& methodname, const RPCArgList& std::string HelpExampleRpc(const std::string& methodname, const std::string& args) { - return "> curl --user myusername --data-binary '{\"jsonrpc\": \"1.0\", \"id\": \"curltest\", " + return "> curl --user myusername --data-binary '{\"jsonrpc\": \"2.0\", \"id\": \"curltest\", " "\"method\": \"" + methodname + "\", \"params\": [" + args + "]}' -H 'content-type: text/plain;' http://127.0.0.1:8336/\n"; } @@ -186,7 +186,7 @@ std::string HelpExampleRpcNamed(const std::string& methodname, const RPCArgList& params.pushKV(param.first, param.second); } - return "> curl --user myusername --data-binary '{\"jsonrpc\": \"1.0\", \"id\": \"curltest\", " + return "> curl --user myusername --data-binary '{\"jsonrpc\": \"2.0\", \"id\": \"curltest\", " "\"method\": \"" + methodname + "\", \"params\": " + params.write() + "}' -H 'content-type: text/plain;' http://127.0.0.1:8332/\n"; } diff --git a/src/serialize.h b/src/serialize.h index 2f13fba582..35519056a5 100644 --- a/src/serialize.h +++ b/src/serialize.h @@ -182,9 +182,8 @@ const Out& AsBase(const In& x) static void SerializationOps(Type& obj, Stream& s, Operation ser_action) /** - * Variant of FORMATTER_METHODS that supports a declared parameter type. - * - * If a formatter has a declared parameter type, it must be invoked directly or + * Formatter methods can retrieve parameters attached to a stream using the + * SER_PARAMS(type) macro as long as the stream is created directly or * indirectly with a parameter of that type. This permits making serialization * depend on run-time context in a type-safe way. * @@ -192,7 +191,8 @@ const Out& AsBase(const In& x) * struct BarParameter { bool fancy; ... }; * struct Bar { ... }; * struct FooFormatter { - * FORMATTER_METHODS(Bar, obj, BarParameter, param) { + * FORMATTER_METHODS(Bar, obj) { + * auto& param = SER_PARAMS(BarParameter); * if (param.fancy) { * READWRITE(VARINT(obj.value)); * } else { @@ -214,13 +214,7 @@ const Out& AsBase(const In& x) * Compilation will fail in any context where serialization is invoked but * no parameter of a type convertible to BarParameter is provided. */ -#define FORMATTER_METHODS_PARAMS(cls, obj, paramcls, paramobj) \ - template \ - static void Ser(Stream& s, const cls& obj) { SerializationOps(obj, s, ActionSerialize{}, s.GetParams()); } \ - template \ - static void Unser(Stream& s, cls& obj) { SerializationOps(obj, s, ActionUnserialize{}, s.GetParams()); } \ - template \ - static void SerializationOps(Type& obj, Stream& s, Operation ser_action, const paramcls& paramobj) +#define SER_PARAMS(type) (s.template GetParams()) #define BASE_SERIALIZE_METHODS(cls) \ template \ @@ -247,15 +241,6 @@ const Out& AsBase(const In& x) BASE_SERIALIZE_METHODS(cls) \ FORMATTER_METHODS(cls, obj) -/** - * Variant of SERIALIZE_METHODS that supports a declared parameter type. - * - * See FORMATTER_METHODS_PARAMS for more information on parameters. - */ -#define SERIALIZE_METHODS_PARAMS(cls, obj, paramcls, paramobj) \ - BASE_SERIALIZE_METHODS(cls) \ - FORMATTER_METHODS_PARAMS(cls, obj, paramcls, paramobj) - // Templates for serializing to anything that looks like a stream, // i.e. anything that supports .read(Span) and .write(Span) // @@ -1118,27 +1103,85 @@ size_t GetSerializeSize(const T& t) return (SizeComputer() << t).size(); } -/** Wrapper that overrides the GetParams() function of a stream (and hides GetVersion/GetType). */ -template +//! Check if type contains a stream by seeing if has a GetStream() method. +template +concept ContainsStream = requires(T t) { t.GetStream(); }; + +/** Wrapper that overrides the GetParams() function of a stream. */ +template class ParamsStream { const Params& m_params; - SubStream& m_substream; // private to avoid leaking version/type into serialization code that shouldn't see it + // If ParamsStream constructor is passed an lvalue argument, Substream will + // be a reference type, and m_substream will reference that argument. + // Otherwise m_substream will be a substream instance and move from the + // argument. Letting ParamsStream contain a substream instance instead of + // just a reference is useful to make the ParamsStream object self contained + // and let it do cleanup when destroyed, for example by closing files if + // SubStream is a file stream. + SubStream m_substream; public: - ParamsStream(const Params& params LIFETIMEBOUND, SubStream& substream LIFETIMEBOUND) : m_params{params}, m_substream{substream} {} + ParamsStream(SubStream&& substream, const Params& params LIFETIMEBOUND) : m_params{params}, m_substream{std::forward(substream)} {} + + template + ParamsStream(NestedSubstream&& s, const Params1& params1 LIFETIMEBOUND, const Params2& params2 LIFETIMEBOUND, const NestedParams&... params LIFETIMEBOUND) + : ParamsStream{::ParamsStream{std::forward(s), params2, params...}, params1} {} + template ParamsStream& operator<<(const U& obj) { ::Serialize(*this, obj); return *this; } template ParamsStream& operator>>(U&& obj) { ::Unserialize(*this, obj); return *this; } - void write(Span src) { m_substream.write(src); } - void read(Span dst) { m_substream.read(dst); } - void ignore(size_t num) { m_substream.ignore(num); } - bool eof() const { return m_substream.eof(); } - size_t size() const { return m_substream.size(); } - const Params& GetParams() const { return m_params; } - int GetVersion() = delete; // Deprecated with Params usage - int GetType() = delete; // Deprecated with Params usage + void write(Span src) { GetStream().write(src); } + void read(Span dst) { GetStream().read(dst); } + void ignore(size_t num) { GetStream().ignore(num); } + bool eof() const { return GetStream().eof(); } + size_t size() const { return GetStream().size(); } + + //! Get reference to stream parameters. + template + const auto& GetParams() const + { + if constexpr (std::is_convertible_v) { + return m_params; + } else { + return m_substream.template GetParams

(); + } + } + + //! Get reference to underlying stream. + auto& GetStream() + { + if constexpr (ContainsStream) { + return m_substream.GetStream(); + } else { + return m_substream; + } + } + const auto& GetStream() const + { + if constexpr (ContainsStream) { + return m_substream.GetStream(); + } else { + return m_substream; + } + } }; +/** + * Explicit template deduction guide is required for single-parameter + * constructor so Substream&& is treated as a forwarding reference, and + * SubStream is deduced as reference type for lvalue arguments. + */ +template +ParamsStream(Substream&&, const Params&) -> ParamsStream; + +/** + * Template deduction guide for multiple params arguments that creates a nested + * ParamsStream. + */ +template +ParamsStream(Substream&& s, const Params1& params1, const Params2& params2, const Params&... params) -> + ParamsStream(s), params2, params...}), Params1>; + /** Wrapper that serializes objects with the specified parameters. */ template class ParamsWrapper @@ -1152,13 +1195,13 @@ class ParamsWrapper template void Serialize(Stream& s) const { - ParamsStream ss{m_params, s}; + ParamsStream ss{s, m_params}; ::Serialize(ss, m_object); } template void Unserialize(Stream& s) { - ParamsStream ss{m_params, s}; + ParamsStream ss{s, m_params}; ::Unserialize(ss, m_object); } }; @@ -1176,7 +1219,7 @@ class ParamsWrapper /** \ * Return a wrapper around t that (de)serializes it with specified parameter params. \ * \ - * See FORMATTER_METHODS_PARAMS for more information on serialization parameters. \ + * See SER_PARAMS for more information on serialization parameters. \ */ \ template \ auto operator()(T&& t) const \ diff --git a/src/test/blockmanager_tests.cpp b/src/test/blockmanager_tests.cpp index d7ac0bf823..efe0983698 100644 --- a/src/test/blockmanager_tests.cpp +++ b/src/test/blockmanager_tests.cpp @@ -35,20 +35,20 @@ BOOST_AUTO_TEST_CASE(blockmanager_find_block_pos) }; BlockManager blockman{*Assert(m_node.shutdown), blockman_opts}; // simulate adding a genesis block normally - BOOST_CHECK_EQUAL(blockman.SaveBlockToDisk(params->GenesisBlock(), 0, nullptr).nPos, BLOCK_SERIALIZATION_HEADER_SIZE); + BOOST_CHECK_EQUAL(blockman.SaveBlockToDisk(params->GenesisBlock(), 0).nPos, BLOCK_SERIALIZATION_HEADER_SIZE); // simulate what happens during reindex // simulate a well-formed genesis block being found at offset 8 in the blk00000.dat file // the block is found at offset 8 because there is an 8 byte serialization header // consisting of 4 magic bytes + 4 length bytes before each block in a well-formed blk file. - FlatFilePos pos{0, BLOCK_SERIALIZATION_HEADER_SIZE}; - BOOST_CHECK_EQUAL(blockman.SaveBlockToDisk(params->GenesisBlock(), 0, &pos).nPos, BLOCK_SERIALIZATION_HEADER_SIZE); + const FlatFilePos pos{0, BLOCK_SERIALIZATION_HEADER_SIZE}; + blockman.UpdateBlockInfo(params->GenesisBlock(), 0, pos); // now simulate what happens after reindex for the first new block processed // the actual block contents don't matter, just that it's a block. // verify that the write position is at offset 0x12d. // this is a check to make sure that https://github.com/bitcoin/bitcoin/issues/21379 does not recur // 8 bytes (for serialization header) + 285 (for serialized genesis block) = 293 // add another 8 bytes for the second block's serialization header and we get 293 + 8 = 301 - FlatFilePos actual{blockman.SaveBlockToDisk(params->GenesisBlock(), 1, nullptr)}; + FlatFilePos actual{blockman.SaveBlockToDisk(params->GenesisBlock(), 1)}; BOOST_CHECK_EQUAL(actual.nPos, BLOCK_SERIALIZATION_HEADER_SIZE + ::GetSerializeSize(TX_WITH_WITNESS(params->GenesisBlock())) + BLOCK_SERIALIZATION_HEADER_SIZE); } @@ -156,12 +156,11 @@ BOOST_AUTO_TEST_CASE(blockmanager_flush_block_file) // Blockstore is empty BOOST_CHECK_EQUAL(blockman.CalculateCurrentUsage(), 0); - // Write the first block; dbp=nullptr means this block doesn't already have a disk - // location, so allocate a free location and write it there. - FlatFilePos pos1{blockman.SaveBlockToDisk(block1, /*nHeight=*/1, /*dbp=*/nullptr)}; + // Write the first block to a new location. + FlatFilePos pos1{blockman.SaveBlockToDisk(block1, /*nHeight=*/1)}; // Write second block - FlatFilePos pos2{blockman.SaveBlockToDisk(block2, /*nHeight=*/2, /*dbp=*/nullptr)}; + FlatFilePos pos2{blockman.SaveBlockToDisk(block2, /*nHeight=*/2)}; // Two blocks in the file BOOST_CHECK_EQUAL(blockman.CalculateCurrentUsage(), (TEST_BLOCK_SIZE + BLOCK_SERIALIZATION_HEADER_SIZE) * 2); @@ -181,22 +180,19 @@ BOOST_AUTO_TEST_CASE(blockmanager_flush_block_file) BOOST_CHECK_EQUAL(read_block.nVersion, 2); } - // When FlatFilePos* dbp is given, SaveBlockToDisk() will not write or - // overwrite anything to the flat file block storage. It will, however, - // update the blockfile metadata. This is to facilitate reindexing - // when the user has the blocks on disk but the metadata is being rebuilt. + // During reindex, the flat file block storage will not be written to. + // UpdateBlockInfo will, however, update the blockfile metadata. // Verify this behavior by attempting (and failing) to write block 3 data // to block 2 location. CBlockFileInfo* block_data = blockman.GetBlockFileInfo(0); BOOST_CHECK_EQUAL(block_data->nBlocks, 2); - BOOST_CHECK(blockman.SaveBlockToDisk(block3, /*nHeight=*/3, /*dbp=*/&pos2) == pos2); + blockman.UpdateBlockInfo(block3, /*nHeight=*/3, /*pos=*/pos2); // Metadata is updated... BOOST_CHECK_EQUAL(block_data->nBlocks, 3); // ...but there are still only two blocks in the file BOOST_CHECK_EQUAL(blockman.CalculateCurrentUsage(), (TEST_BLOCK_SIZE + BLOCK_SERIALIZATION_HEADER_SIZE) * 2); // Block 2 was not overwritten: - // SaveBlockToDisk() did not call WriteBlockToDisk() because `FlatFilePos* dbp` was non-null blockman.ReadBlockFromDisk(read_block, pos2); BOOST_CHECK_EQUAL(read_block.nVersion, 2); } diff --git a/src/test/fuzz/miniscript.cpp b/src/test/fuzz/miniscript.cpp index f10007222c..7e71af7c44 100644 --- a/src/test/fuzz/miniscript.cpp +++ b/src/test/fuzz/miniscript.cpp @@ -309,9 +309,6 @@ const struct KeyComparator { // A dummy scriptsig to pass to VerifyScript (we always use Segwit v0). const CScript DUMMY_SCRIPTSIG; -//! Public key to be used as internal key for dummy Taproot spends. -const std::vector NUMS_PK{ParseHex("50929b74c1a04954b78b4b6035e97a5e078a5a0f28ec96d547bfee9ace803ac0")}; - //! Construct a miniscript node as a shared_ptr. template NodeRef MakeNodeRef(Args&&... args) { return miniscript::MakeNodeRef(miniscript::internal::NoDupCheck{}, std::forward(args)...); @@ -1018,7 +1015,7 @@ CScript ScriptPubKey(MsCtx ctx, const CScript& script, TaprootBuilder& builder) // For Taproot outputs we always use a tree with a single script and a dummy internal key. builder.Add(0, script, TAPROOT_LEAF_TAPSCRIPT); - builder.Finalize(XOnlyPubKey{NUMS_PK}); + builder.Finalize(XOnlyPubKey::NUMS_H); return GetScriptForDestination(builder.GetOutput()); } diff --git a/src/test/fuzz/txorphan.cpp b/src/test/fuzz/txorphan.cpp index a44f47b00d..7bebb987b1 100644 --- a/src/test/fuzz/txorphan.cpp +++ b/src/test/fuzz/txorphan.cpp @@ -37,13 +37,12 @@ FUZZ_TARGET(txorphan, .init = initialize_orphanage) SetMockTime(ConsumeTime(fuzzed_data_provider)); TxOrphanage orphanage; - std::vector outpoints; + std::set outpoints; + // initial outpoints used to construct transactions later for (uint8_t i = 0; i < 4; i++) { - outpoints.emplace_back(Txid::FromUint256(uint256{i}), 0); + outpoints.emplace(Txid::FromUint256(uint256{i}), 0); } - // if true, allow duplicate input when constructing tx - const bool duplicate_input = fuzzed_data_provider.ConsumeBool(); CTransactionRef ptx_potential_parent = nullptr; @@ -53,29 +52,22 @@ FUZZ_TARGET(txorphan, .init = initialize_orphanage) const CTransactionRef tx = [&] { CMutableTransaction tx_mut; const auto num_in = fuzzed_data_provider.ConsumeIntegralInRange(1, outpoints.size()); - const auto num_out = fuzzed_data_provider.ConsumeIntegralInRange(1, outpoints.size()); - // pick unique outpoints from outpoints as input + const auto num_out = fuzzed_data_provider.ConsumeIntegralInRange(1, 256); + // pick outpoints from outpoints as input. We allow input duplicates on purpose, given we are not + // running any transaction validation logic before adding transactions to the orphanage for (uint32_t i = 0; i < num_in; i++) { auto& prevout = PickValue(fuzzed_data_provider, outpoints); - tx_mut.vin.emplace_back(prevout); - // pop the picked outpoint if duplicate input is not allowed - if (!duplicate_input) { - std::swap(prevout, outpoints.back()); - outpoints.pop_back(); - } + // try making transactions unique by setting a random nSequence, but allow duplicate transactions if they happen + tx_mut.vin.emplace_back(prevout, CScript{}, fuzzed_data_provider.ConsumeIntegralInRange(0, CTxIn::SEQUENCE_FINAL)); } // output amount will not affect txorphanage for (uint32_t i = 0; i < num_out; i++) { tx_mut.vout.emplace_back(CAmount{0}, CScript{}); } - // restore previously popped outpoints - for (auto& in : tx_mut.vin) { - outpoints.push_back(in.prevout); - } auto new_tx = MakeTransactionRef(tx_mut); - // add newly constructed transaction to outpoints + // add newly constructed outpoints to the coin pool for (uint32_t i = 0; i < num_out; i++) { - outpoints.emplace_back(new_tx->GetHash(), i); + outpoints.emplace(new_tx->GetHash(), i); } return new_tx; }(); @@ -112,13 +104,12 @@ FUZZ_TARGET(txorphan, .init = initialize_orphanage) { CTransactionRef ref = orphanage.GetTxToReconsider(peer_id); if (ref) { - bool have_tx = orphanage.HaveTx(GenTxid::Txid(ref->GetHash())) || orphanage.HaveTx(GenTxid::Wtxid(ref->GetWitnessHash())); - Assert(have_tx); + Assert(orphanage.HaveTx(ref->GetWitnessHash())); } } }, [&] { - bool have_tx = orphanage.HaveTx(GenTxid::Txid(tx->GetHash())) || orphanage.HaveTx(GenTxid::Wtxid(tx->GetWitnessHash())); + bool have_tx = orphanage.HaveTx(tx->GetWitnessHash()); // AddTx should return false if tx is too big or already have it // tx weight is unknown, we only check when tx is already in orphanage { @@ -126,7 +117,7 @@ FUZZ_TARGET(txorphan, .init = initialize_orphanage) // have_tx == true -> add_tx == false Assert(!have_tx || !add_tx); } - have_tx = orphanage.HaveTx(GenTxid::Txid(tx->GetHash())) || orphanage.HaveTx(GenTxid::Wtxid(tx->GetWitnessHash())); + have_tx = orphanage.HaveTx(tx->GetWitnessHash()); { bool add_tx = orphanage.AddTx(tx, peer_id); // if have_tx is still false, it must be too big @@ -135,15 +126,15 @@ FUZZ_TARGET(txorphan, .init = initialize_orphanage) } }, [&] { - bool have_tx = orphanage.HaveTx(GenTxid::Txid(tx->GetHash())) || orphanage.HaveTx(GenTxid::Wtxid(tx->GetWitnessHash())); + bool have_tx = orphanage.HaveTx(tx->GetWitnessHash()); // EraseTx should return 0 if m_orphans doesn't have the tx { - Assert(have_tx == orphanage.EraseTx(tx->GetHash())); + Assert(have_tx == orphanage.EraseTx(tx->GetWitnessHash())); } - have_tx = orphanage.HaveTx(GenTxid::Txid(tx->GetHash())) || orphanage.HaveTx(GenTxid::Wtxid(tx->GetWitnessHash())); + have_tx = orphanage.HaveTx(tx->GetWitnessHash()); // have_tx should be false and EraseTx should fail { - Assert(!have_tx && !orphanage.EraseTx(tx->GetHash())); + Assert(!have_tx && !orphanage.EraseTx(tx->GetWitnessHash())); } }, [&] { diff --git a/src/test/key_tests.cpp b/src/test/key_tests.cpp index 86a8d17a76..aaf4ca4977 100644 --- a/src/test/key_tests.cpp +++ b/src/test/key_tests.cpp @@ -6,6 +6,7 @@ #include #include +#include #include #include #include @@ -364,4 +365,13 @@ BOOST_AUTO_TEST_CASE(key_ellswift) } } +BOOST_AUTO_TEST_CASE(bip341_test_h) +{ + std::vector G_uncompressed = ParseHex("0479be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8"); + HashWriter hw; + hw.write(MakeByteSpan(G_uncompressed)); + XOnlyPubKey H{hw.GetSHA256()}; + BOOST_CHECK(XOnlyPubKey::NUMS_H == H); +} + BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/miniscript_tests.cpp b/src/test/miniscript_tests.cpp index a3699f84b6..7e39e9e4de 100644 --- a/src/test/miniscript_tests.cpp +++ b/src/test/miniscript_tests.cpp @@ -288,9 +288,6 @@ class TestSignatureChecker : public BaseSignatureChecker { } }; -//! Public key to be used as internal key for dummy Taproot spends. -const std::vector NUMS_PK{ParseHex("50929b74c1a04954b78b4b6035e97a5e078a5a0f28ec96d547bfee9ace803ac0")}; - using Fragment = miniscript::Fragment; using NodeRef = miniscript::NodeRef; using miniscript::operator"" _mst; @@ -330,7 +327,7 @@ CScript ScriptPubKey(miniscript::MiniscriptContext ctx, const CScript& script, T // For Taproot outputs we always use a tree with a single script and a dummy internal key. builder.Add(0, script, TAPROOT_LEAF_TAPSCRIPT); - builder.Finalize(XOnlyPubKey{NUMS_PK}); + builder.Finalize(XOnlyPubKey::NUMS_H); return GetScriptForDestination(builder.GetOutput()); } diff --git a/src/test/net_peer_connection_tests.cpp b/src/test/net_peer_connection_tests.cpp index c571d8d174..5cd7c48fed 100644 --- a/src/test/net_peer_connection_tests.cpp +++ b/src/test/net_peer_connection_tests.cpp @@ -111,6 +111,12 @@ BOOST_AUTO_TEST_CASE(test_addnode_getaddednodeinfo_and_connection_detection) AddPeer(id, nodes, *peerman, *connman, ConnectionType::BLOCK_RELAY, /*onion_peer=*/true); AddPeer(id, nodes, *peerman, *connman, ConnectionType::INBOUND); + // Add a CJDNS peer connection. + AddPeer(id, nodes, *peerman, *connman, ConnectionType::INBOUND, /*onion_peer=*/false, + /*address=*/"[fc00:3344:5566:7788:9900:aabb:ccdd:eeff]:1234"); + BOOST_CHECK(nodes.back()->IsInboundConn()); + BOOST_CHECK_EQUAL(nodes.back()->ConnectedThroughNetwork(), Network::NET_CJDNS); + BOOST_TEST_MESSAGE("Call AddNode() for all the peers"); for (auto node : connman->TestNodes()) { BOOST_CHECK(connman->AddNode({/*m_added_node=*/node->addr.ToStringAddrPort(), /*m_use_v2transport=*/true})); diff --git a/src/test/orphanage_tests.cpp b/src/test/orphanage_tests.cpp index b2643cf678..450bf6a4fc 100644 --- a/src/test/orphanage_tests.cpp +++ b/src/test/orphanage_tests.cpp @@ -30,8 +30,8 @@ class TxOrphanageTest : public TxOrphanage CTransactionRef RandomOrphan() EXCLUSIVE_LOCKS_REQUIRED(!m_mutex) { LOCK(m_mutex); - std::map::iterator it; - it = m_orphans.lower_bound(Txid::FromUint256(InsecureRand256())); + std::map::iterator it; + it = m_orphans.lower_bound(Wtxid::FromUint256(InsecureRand256())); if (it == m_orphans.end()) it = m_orphans.begin(); return it->second.tx; @@ -70,6 +70,16 @@ static CTransactionRef MakeTransactionSpending(const std::vector& out return MakeTransactionRef(tx); } +// Make another (not necessarily valid) tx with the same txid but different wtxid. +static CTransactionRef MakeMutation(const CTransactionRef& ptx) +{ + CMutableTransaction tx(*ptx); + tx.vin[0].scriptWitness.stack.push_back({5}); + auto mutated_tx = MakeTransactionRef(tx); + assert(ptx->GetHash() == mutated_tx->GetHash()); + return mutated_tx; +} + static bool EqualTxns(const std::set& set_txns, const std::vector& vec_txns) { if (vec_txns.size() != set_txns.size()) return false; @@ -180,6 +190,49 @@ BOOST_AUTO_TEST_CASE(DoS_mapOrphans) BOOST_CHECK(orphanage.CountOrphans() == 0); } +BOOST_AUTO_TEST_CASE(same_txid_diff_witness) +{ + FastRandomContext det_rand{true}; + TxOrphanage orphanage; + NodeId peer{0}; + + std::vector empty_outpoints; + auto parent = MakeTransactionSpending(empty_outpoints, det_rand); + + // Create children to go into orphanage. + auto child_normal = MakeTransactionSpending({{parent->GetHash(), 0}}, det_rand); + auto child_mutated = MakeMutation(child_normal); + + const auto& normal_wtxid = child_normal->GetWitnessHash(); + const auto& mutated_wtxid = child_mutated->GetWitnessHash(); + BOOST_CHECK(normal_wtxid != mutated_wtxid); + + BOOST_CHECK(orphanage.AddTx(child_normal, peer)); + // EraseTx fails as transaction by this wtxid doesn't exist. + BOOST_CHECK_EQUAL(orphanage.EraseTx(mutated_wtxid), 0); + BOOST_CHECK(orphanage.HaveTx(normal_wtxid)); + BOOST_CHECK(!orphanage.HaveTx(mutated_wtxid)); + + // Must succeed. Both transactions should be present in orphanage. + BOOST_CHECK(orphanage.AddTx(child_mutated, peer)); + BOOST_CHECK(orphanage.HaveTx(normal_wtxid)); + BOOST_CHECK(orphanage.HaveTx(mutated_wtxid)); + + // Outpoints map should track all entries: check that both are returned as children of the parent. + std::set expected_children{child_normal, child_mutated}; + BOOST_CHECK(EqualTxns(expected_children, orphanage.GetChildrenFromSamePeer(parent, peer))); + + // Erase by wtxid: mutated first + BOOST_CHECK_EQUAL(orphanage.EraseTx(mutated_wtxid), 1); + BOOST_CHECK(orphanage.HaveTx(normal_wtxid)); + BOOST_CHECK(!orphanage.HaveTx(mutated_wtxid)); + + BOOST_CHECK_EQUAL(orphanage.EraseTx(normal_wtxid), 1); + BOOST_CHECK(!orphanage.HaveTx(normal_wtxid)); + BOOST_CHECK(!orphanage.HaveTx(mutated_wtxid)); +} + + BOOST_AUTO_TEST_CASE(get_children) { FastRandomContext det_rand{true}; diff --git a/src/test/rpc_tests.cpp b/src/test/rpc_tests.cpp index d246f146ec..37589e2430 100644 --- a/src/test/rpc_tests.cpp +++ b/src/test/rpc_tests.cpp @@ -552,7 +552,7 @@ BOOST_AUTO_TEST_CASE(help_example) // test different argument types const RPCArgList& args = {{"foo", "bar"}, {"b", true}, {"n", 1}}; BOOST_CHECK_EQUAL(HelpExampleCliNamed("test", args), "> bitcoin-cli -named test foo=bar b=true n=1\n"); - BOOST_CHECK_EQUAL(HelpExampleRpcNamed("test", args), "> curl --user myusername --data-binary '{\"jsonrpc\": \"1.0\", \"id\": \"curltest\", \"method\": \"test\", \"params\": {\"foo\":\"bar\",\"b\":true,\"n\":1}}' -H 'content-type: text/plain;' http://127.0.0.1:8332/\n"); + BOOST_CHECK_EQUAL(HelpExampleRpcNamed("test", args), "> curl --user myusername --data-binary '{\"jsonrpc\": \"2.0\", \"id\": \"curltest\", \"method\": \"test\", \"params\": {\"foo\":\"bar\",\"b\":true,\"n\":1}}' -H 'content-type: text/plain;' http://127.0.0.1:8332/\n"); // test shell escape BOOST_CHECK_EQUAL(HelpExampleCliNamed("test", {{"foo", "b'ar"}}), "> bitcoin-cli -named test foo='b'''ar'\n"); @@ -565,7 +565,7 @@ BOOST_AUTO_TEST_CASE(help_example) obj_value.pushKV("b", false); obj_value.pushKV("n", 1); BOOST_CHECK_EQUAL(HelpExampleCliNamed("test", {{"name", obj_value}}), "> bitcoin-cli -named test name='{\"foo\":\"bar\",\"b\":false,\"n\":1}'\n"); - BOOST_CHECK_EQUAL(HelpExampleRpcNamed("test", {{"name", obj_value}}), "> curl --user myusername --data-binary '{\"jsonrpc\": \"1.0\", \"id\": \"curltest\", \"method\": \"test\", \"params\": {\"name\":{\"foo\":\"bar\",\"b\":false,\"n\":1}}}' -H 'content-type: text/plain;' http://127.0.0.1:8332/\n"); + BOOST_CHECK_EQUAL(HelpExampleRpcNamed("test", {{"name", obj_value}}), "> curl --user myusername --data-binary '{\"jsonrpc\": \"2.0\", \"id\": \"curltest\", \"method\": \"test\", \"params\": {\"name\":{\"foo\":\"bar\",\"b\":false,\"n\":1}}}' -H 'content-type: text/plain;' http://127.0.0.1:8332/\n"); // test array params UniValue arr_value(UniValue::VARR); @@ -573,7 +573,7 @@ BOOST_AUTO_TEST_CASE(help_example) arr_value.push_back(false); arr_value.push_back(1); BOOST_CHECK_EQUAL(HelpExampleCliNamed("test", {{"name", arr_value}}), "> bitcoin-cli -named test name='[\"bar\",false,1]'\n"); - BOOST_CHECK_EQUAL(HelpExampleRpcNamed("test", {{"name", arr_value}}), "> curl --user myusername --data-binary '{\"jsonrpc\": \"1.0\", \"id\": \"curltest\", \"method\": \"test\", \"params\": {\"name\":[\"bar\",false,1]}}' -H 'content-type: text/plain;' http://127.0.0.1:8332/\n"); + BOOST_CHECK_EQUAL(HelpExampleRpcNamed("test", {{"name", arr_value}}), "> curl --user myusername --data-binary '{\"jsonrpc\": \"2.0\", \"id\": \"curltest\", \"method\": \"test\", \"params\": {\"name\":[\"bar\",false,1]}}' -H 'content-type: text/plain;' http://127.0.0.1:8332/\n"); // test types don't matter for shell BOOST_CHECK_EQUAL(HelpExampleCliNamed("foo", {{"arg", true}}), HelpExampleCliNamed("foo", {{"arg", "true"}})); diff --git a/src/test/script_tests.cpp b/src/test/script_tests.cpp index e4142e203c..314b26609c 100644 --- a/src/test/script_tests.cpp +++ b/src/test/script_tests.cpp @@ -1268,8 +1268,7 @@ BOOST_AUTO_TEST_CASE(sign_invalid_miniscript) const auto invalid_pubkey{ParseHex("173d36c8c9c9c9ffffffffffff0200000000021e1e37373721361818181818181e1e1e1e19000000000000000000b19292929292926b006c9b9b9292")}; TaprootBuilder builder; builder.Add(0, {invalid_pubkey}, 0xc0); - XOnlyPubKey nums{ParseHex("50929b74c1a04954b78b4b6035e97a5e078a5a0f28ec96d547bfee9ace803ac0")}; - builder.Finalize(nums); + builder.Finalize(XOnlyPubKey::NUMS_H); prev.vout.emplace_back(0, GetScriptForDestination(builder.GetOutput())); curr.vin.emplace_back(COutPoint{prev.GetHash(), 0}); sig_data.tr_spenddata = builder.GetSpendData(); diff --git a/src/test/serialize_tests.cpp b/src/test/serialize_tests.cpp index d75eb499b4..b28e1b4196 100644 --- a/src/test/serialize_tests.cpp +++ b/src/test/serialize_tests.cpp @@ -15,6 +15,18 @@ BOOST_FIXTURE_TEST_SUITE(serialize_tests, BasicTestingSetup) +// For testing move-semantics, declare a version of datastream that can be moved +// but is not copyable. +class UncopyableStream : public DataStream +{ +public: + using DataStream::DataStream; + UncopyableStream(const UncopyableStream&) = delete; + UncopyableStream& operator=(const UncopyableStream&) = delete; + UncopyableStream(UncopyableStream&&) noexcept = default; + UncopyableStream& operator=(UncopyableStream&&) noexcept = default; +}; + class CSerializeMethodsTestSingle { protected: @@ -289,7 +301,7 @@ class Base template void Serialize(Stream& s) const { - if (s.GetParams().m_base_format == BaseFormat::RAW) { + if (s.template GetParams().m_base_format == BaseFormat::RAW) { s << m_base_data; } else { s << Span{HexStr(Span{&m_base_data, 1})}; @@ -299,7 +311,7 @@ class Base template void Unserialize(Stream& s) { - if (s.GetParams().m_base_format == BaseFormat::RAW) { + if (s.template GetParams().m_base_format == BaseFormat::RAW) { s >> m_base_data; } else { std::string hex{"aa"}; @@ -327,8 +339,9 @@ class Derived : public Base public: std::string m_derived_data; - SERIALIZE_METHODS_PARAMS(Derived, obj, DerivedAndBaseFormat, fmt) + SERIALIZE_METHODS(Derived, obj) { + auto& fmt = SER_PARAMS(DerivedAndBaseFormat); READWRITE(fmt.m_base_format(AsBase(obj))); if (ser_action.ForRead()) { @@ -343,6 +356,76 @@ class Derived : public Base } }; +struct OtherParam { + uint8_t param; + SER_PARAMS_OPFUNC +}; + +//! Checker for value of OtherParam. When being serialized, serializes the +//! param to the stream. When being unserialized, verifies the value in the +//! stream matches the param. +class OtherParamChecker +{ +public: + template + void Serialize(Stream& s) const + { + const uint8_t param = s.template GetParams().param; + s << param; + } + + template + void Unserialize(Stream& s) const + { + const uint8_t param = s.template GetParams().param; + uint8_t value; + s >> value; + BOOST_CHECK_EQUAL(value, param); + } +}; + +//! Test creating a stream with multiple parameters and making sure +//! serialization code requiring different parameters can retrieve them. Also +//! test that earlier parameters take precedence if the same parameter type is +//! specified twice. (Choice of whether earlier or later values take precedence +//! or multiple values of the same type are allowed was arbitrary, and just +//! decided based on what would require smallest amount of ugly C++ template +//! code. Intent of the test is to just ensure there is no unexpected behavior.) +BOOST_AUTO_TEST_CASE(with_params_multi) +{ + const OtherParam other_param_used{.param = 0x10}; + const OtherParam other_param_ignored{.param = 0x11}; + const OtherParam other_param_override{.param = 0x12}; + const OtherParamChecker check; + DataStream stream; + ParamsStream pstream{stream, RAW, other_param_used, other_param_ignored}; + + Base base1{0x20}; + pstream << base1 << check << other_param_override(check); + BOOST_CHECK_EQUAL(stream.str(), "\x20\x10\x12"); + + Base base2; + pstream >> base2 >> check >> other_param_override(check); + BOOST_CHECK_EQUAL(base2.m_base_data, 0x20); +} + +//! Test creating a ParamsStream that moves from a stream argument. +BOOST_AUTO_TEST_CASE(with_params_move) +{ + UncopyableStream stream{MakeByteSpan(std::string_view{"abc"})}; + ParamsStream pstream{std::move(stream), RAW, HEX, RAW}; + BOOST_CHECK_EQUAL(pstream.GetStream().str(), "abc"); + pstream.GetStream().clear(); + + Base base1{0x20}; + pstream << base1; + BOOST_CHECK_EQUAL(pstream.GetStream().str(), "\x20"); + + Base base2; + pstream >> base2; + BOOST_CHECK_EQUAL(base2.m_base_data, 0x20); +} + BOOST_AUTO_TEST_CASE(with_params_base) { Base b{0x0F}; diff --git a/src/test/txpackage_tests.cpp b/src/test/txpackage_tests.cpp index 15728ef601..bfabe8d473 100644 --- a/src/test/txpackage_tests.cpp +++ b/src/test/txpackage_tests.cpp @@ -679,7 +679,7 @@ BOOST_FIXTURE_TEST_CASE(package_witness_swap_tests, TestChain100Setup) CTransactionRef ptx_parent3 = MakeTransactionRef(mtx_parent3); package_mixed.push_back(ptx_parent3); BOOST_CHECK(m_node.mempool->GetMinFee().GetFee(GetVirtualTransactionSize(*ptx_parent3)) > low_fee_amt); - BOOST_CHECK(m_node.mempool->m_min_relay_feerate.GetFee(GetVirtualTransactionSize(*ptx_parent3)) <= low_fee_amt); + BOOST_CHECK(m_node.mempool->m_opts.min_relay_feerate.GetFee(GetVirtualTransactionSize(*ptx_parent3)) <= low_fee_amt); // child spends parent1, parent2, and parent3 CKey mixed_grandchild_key = GenerateRandomKey(); @@ -825,7 +825,7 @@ BOOST_FIXTURE_TEST_CASE(package_cpfp_tests, TestChain100Setup) CTransactionRef tx_parent_cheap = MakeTransactionRef(mtx_parent_cheap); package_still_too_low.push_back(tx_parent_cheap); BOOST_CHECK(m_node.mempool->GetMinFee().GetFee(GetVirtualTransactionSize(*tx_parent_cheap)) > parent_fee); - BOOST_CHECK(m_node.mempool->m_min_relay_feerate.GetFee(GetVirtualTransactionSize(*tx_parent_cheap)) <= parent_fee); + BOOST_CHECK(m_node.mempool->m_opts.min_relay_feerate.GetFee(GetVirtualTransactionSize(*tx_parent_cheap)) <= parent_fee); auto mtx_child_cheap = CreateValidMempoolTransaction(/*input_transaction=*/tx_parent_cheap, /*input_vout=*/0, /*input_height=*/101, /*input_signing_key=*/child_key, diff --git a/src/test/util/setup_common.cpp b/src/test/util/setup_common.cpp index 43debd2b68..db77b046d6 100644 --- a/src/test/util/setup_common.cpp +++ b/src/test/util/setup_common.cpp @@ -276,7 +276,7 @@ void ChainTestingSetup::LoadVerifyActivateChainstate() options.mempool = Assert(m_node.mempool.get()); options.block_tree_db_in_memory = m_block_tree_db_in_memory; options.coins_db_in_memory = m_coins_db_in_memory; - options.reindex = node::fReindex; + options.reindex = chainman.m_blockman.m_reindexing; options.reindex_chainstate = m_args.GetBoolArg("-reindex-chainstate", false); options.prune = chainman.m_blockman.IsPruneMode(); fNameHistory = false; @@ -560,9 +560,9 @@ void TestChain100Setup::MockMempoolMinFee(const CFeeRate& target_feerate) assert(m_node.mempool->size() == 0); // The target feerate cannot be too low... // ...otherwise the transaction's feerate will need to be negative. - assert(target_feerate > m_node.mempool->m_incremental_relay_feerate); + assert(target_feerate > m_node.mempool->m_opts.incremental_relay_feerate); // ...otherwise this is not meaningful. The feerate policy uses the maximum of both feerates. - assert(target_feerate > m_node.mempool->m_min_relay_feerate); + assert(target_feerate > m_node.mempool->m_opts.min_relay_feerate); // Manually create an invalid transaction. Manually set the fee in the CTxMemPoolEntry to // achieve the exact target feerate. @@ -573,7 +573,7 @@ void TestChain100Setup::MockMempoolMinFee(const CFeeRate& target_feerate) LockPoints lp; // The new mempool min feerate is equal to the removed package's feerate + incremental feerate. const auto tx_fee = target_feerate.GetFee(GetVirtualTransactionSize(*tx)) - - m_node.mempool->m_incremental_relay_feerate.GetFee(GetVirtualTransactionSize(*tx)); + m_node.mempool->m_opts.incremental_relay_feerate.GetFee(GetVirtualTransactionSize(*tx)); m_node.mempool->addUnchecked(CTxMemPoolEntry(tx, /*fee=*/tx_fee, /*time=*/0, /*entry_height=*/1, /*entry_sequence=*/0, /*spends_coinbase=*/true, /*sigops_cost=*/1, lp)); diff --git a/src/txmempool.cpp b/src/txmempool.cpp index 75ffa31143..cfbd5fb414 100644 --- a/src/txmempool.cpp +++ b/src/txmempool.cpp @@ -92,7 +92,7 @@ void CTxMemPool::UpdateForDescendants(txiter updateIt, cacheMap& cachedDescendan // Don't directly remove the transaction here -- doing so would // invalidate iterators in cachedDescendants. Mark it for removal // by inserting into descendants_to_remove. - if (descendant.GetCountWithAncestors() > uint64_t(m_limits.ancestor_count) || descendant.GetSizeWithAncestors() > m_limits.ancestor_size_vbytes) { + if (descendant.GetCountWithAncestors() > uint64_t(m_opts.limits.ancestor_count) || descendant.GetSizeWithAncestors() > m_opts.limits.ancestor_size_vbytes) { descendants_to_remove.insert(descendant.GetTx().GetHash()); } } @@ -203,14 +203,14 @@ util::Result CTxMemPool::CheckPackageLimits(const Package& package, size_t pack_count = package.size(); // Package itself is busting mempool limits; should be rejected even if no staged_ancestors exist - if (pack_count > static_cast(m_limits.ancestor_count)) { - return util::Error{Untranslated(strprintf("package count %u exceeds ancestor count limit [limit: %u]", pack_count, m_limits.ancestor_count))}; - } else if (pack_count > static_cast(m_limits.descendant_count)) { - return util::Error{Untranslated(strprintf("package count %u exceeds descendant count limit [limit: %u]", pack_count, m_limits.descendant_count))}; - } else if (total_vsize > m_limits.ancestor_size_vbytes) { - return util::Error{Untranslated(strprintf("package size %u exceeds ancestor size limit [limit: %u]", total_vsize, m_limits.ancestor_size_vbytes))}; - } else if (total_vsize > m_limits.descendant_size_vbytes) { - return util::Error{Untranslated(strprintf("package size %u exceeds descendant size limit [limit: %u]", total_vsize, m_limits.descendant_size_vbytes))}; + if (pack_count > static_cast(m_opts.limits.ancestor_count)) { + return util::Error{Untranslated(strprintf("package count %u exceeds ancestor count limit [limit: %u]", pack_count, m_opts.limits.ancestor_count))}; + } else if (pack_count > static_cast(m_opts.limits.descendant_count)) { + return util::Error{Untranslated(strprintf("package count %u exceeds descendant count limit [limit: %u]", pack_count, m_opts.limits.descendant_count))}; + } else if (total_vsize > m_opts.limits.ancestor_size_vbytes) { + return util::Error{Untranslated(strprintf("package size %u exceeds ancestor size limit [limit: %u]", total_vsize, m_opts.limits.ancestor_size_vbytes))}; + } else if (total_vsize > m_opts.limits.descendant_size_vbytes) { + return util::Error{Untranslated(strprintf("package size %u exceeds descendant size limit [limit: %u]", total_vsize, m_opts.limits.descendant_size_vbytes))}; } CTxMemPoolEntry::Parents staged_ancestors; @@ -219,8 +219,8 @@ util::Result CTxMemPool::CheckPackageLimits(const Package& package, std::optional piter = GetIter(input.prevout.hash); if (piter) { staged_ancestors.insert(**piter); - if (staged_ancestors.size() + package.size() > static_cast(m_limits.ancestor_count)) { - return util::Error{Untranslated(strprintf("too many unconfirmed parents [limit: %u]", m_limits.ancestor_count))}; + if (staged_ancestors.size() + package.size() > static_cast(m_opts.limits.ancestor_count)) { + return util::Error{Untranslated(strprintf("too many unconfirmed parents [limit: %u]", m_opts.limits.ancestor_count))}; } } } @@ -229,7 +229,7 @@ util::Result CTxMemPool::CheckPackageLimits(const Package& package, // considered together must be within limits even if they are not interdependent. This may be // stricter than the limits for each individual transaction. const auto ancestors{CalculateAncestorsAndCheckLimits(total_vsize, package.size(), - staged_ancestors, m_limits)}; + staged_ancestors, m_opts.limits)}; // It's possible to overestimate the ancestor/descendant totals. if (!ancestors.has_value()) return util::Error{Untranslated("possibly " + util::ErrorString(ancestors).original)}; return {}; @@ -396,20 +396,8 @@ void CTxMemPoolEntry::UpdateAncestorState(int32_t modifySize, CAmount modifyFee, } CTxMemPool::CTxMemPool(const Options& opts) - : m_check_ratio{opts.check_ratio}, - m_max_size_bytes{opts.max_size_bytes}, - m_expiry{opts.expiry}, - m_incremental_relay_feerate{opts.incremental_relay_feerate}, - m_min_relay_feerate{opts.min_relay_feerate}, - m_dust_relay_feerate{opts.dust_relay_feerate}, - m_permit_bare_multisig{opts.permit_bare_multisig}, - m_max_datacarrier_bytes{opts.max_datacarrier_bytes}, - m_require_standard{opts.require_standard}, - m_full_rbf{opts.full_rbf}, - m_persist_v1_dat{opts.persist_v1_dat}, - m_limits{opts.limits}, - m_signals{opts.signals}, - names{*this} + : names{*this}, + m_opts{opts} { } @@ -494,12 +482,12 @@ void CTxMemPool::removeUnchecked(txiter it, MemPoolRemovalReason reason) // even if not directly reported below. uint64_t mempool_sequence = GetAndIncrementSequence(); - if (reason != MemPoolRemovalReason::BLOCK && m_signals) { + if (reason != MemPoolRemovalReason::BLOCK && m_opts.signals) { // Notify clients that a transaction has been removed from the mempool // for any reason except being included in a block. Clients interested // in transactions included in blocks can subscribe to the BlockConnected // notification. - m_signals->TransactionRemovedFromMempool(it->GetSharedTx(), reason, mempool_sequence); + m_opts.signals->TransactionRemovedFromMempool(it->GetSharedTx(), reason, mempool_sequence); } TRACE5(mempool, removed, it->GetTx().GetHash().data(), @@ -653,8 +641,8 @@ void CTxMemPool::removeForBlock(const std::vector& vtx, unsigne removeConflicts(*tx); ClearPrioritisation(tx->GetHash()); } - if (m_signals) { - m_signals->MempoolTransactionsRemovedForBlock(txs_removed_for_block, nBlockHeight); + if (m_opts.signals) { + m_opts.signals->MempoolTransactionsRemovedForBlock(txs_removed_for_block, nBlockHeight); } lastRollingFeeUpdate = GetTime(); blockSinceLastRollingFeeBump = true; @@ -662,9 +650,9 @@ void CTxMemPool::removeForBlock(const std::vector& vtx, unsigne void CTxMemPool::check(const CCoinsViewCache& active_coins_tip, int64_t spendheight) const { - if (m_check_ratio == 0) return; + if (m_opts.check_ratio == 0) return; - if (GetRand(m_check_ratio) >= 1) return; + if (GetRand(m_opts.check_ratio) >= 1) return; AssertLockHeld(::cs_main); LOCK(cs); @@ -1123,12 +1111,12 @@ CFeeRate CTxMemPool::GetMinFee(size_t sizelimit) const { rollingMinimumFeeRate = rollingMinimumFeeRate / pow(2.0, (time - lastRollingFeeUpdate) / halflife); lastRollingFeeUpdate = time; - if (rollingMinimumFeeRate < (double)m_incremental_relay_feerate.GetFeePerK() / 2) { + if (rollingMinimumFeeRate < (double)m_opts.incremental_relay_feerate.GetFeePerK() / 2) { rollingMinimumFeeRate = 0; return CFeeRate(0); } } - return std::max(CFeeRate(llround(rollingMinimumFeeRate)), m_incremental_relay_feerate); + return std::max(CFeeRate(llround(rollingMinimumFeeRate)), m_opts.incremental_relay_feerate); } void CTxMemPool::trackPackageRemoved(const CFeeRate& rate) { @@ -1152,7 +1140,7 @@ void CTxMemPool::TrimToSize(size_t sizelimit, std::vector* pvNoSpends // to have 0 fee). This way, we don't allow txn to enter mempool with feerate // equal to txn which were removed with no block in between. CFeeRate removed(it->GetModFeesWithDescendants(), it->GetSizeWithDescendants()); - removed += m_incremental_relay_feerate; + removed += m_opts.incremental_relay_feerate; trackPackageRemoved(removed); maxFeeRateRemoved = std::max(maxFeeRateRemoved, removed); diff --git a/src/txmempool.h b/src/txmempool.h index c99abacf82..517afbdd40 100644 --- a/src/txmempool.h +++ b/src/txmempool.h @@ -302,7 +302,6 @@ struct TxMempoolInfo class CTxMemPool { protected: - const int m_check_ratio; //!< Value n means that 1 times in n we check. std::atomic nTransactionsUpdated{0}; //!< Used by getblocktemplate to trigger CreateNewBlock() invocation uint64_t totalTxSize GUARDED_BY(cs){0}; //!< sum of all mempool tx's virtual sizes. Differs from serialized tx size since witness data is discounted. Defined in BIP 141. @@ -440,20 +439,7 @@ class CTxMemPool using Options = kernel::MemPoolOptions; - const int64_t m_max_size_bytes; - const std::chrono::seconds m_expiry; - const CFeeRate m_incremental_relay_feerate; - const CFeeRate m_min_relay_feerate; - const CFeeRate m_dust_relay_feerate; - const bool m_permit_bare_multisig; - const std::optional m_max_datacarrier_bytes; - const bool m_require_standard; - const bool m_full_rbf; - const bool m_persist_v1_dat; - - const Limits m_limits; - - ValidationSignals* const m_signals; + const Options m_opts; /** Create a new CTxMemPool. * Sanity checks will be off by default for performance, because otherwise @@ -644,7 +630,7 @@ class CTxMemPool * would otherwise be half of this, it is set to 0 instead. */ CFeeRate GetMinFee() const { - return GetMinFee(m_max_size_bytes); + return GetMinFee(m_opts.max_size_bytes); } /** Remove transactions from the mempool until its dynamic size is <= sizelimit. diff --git a/src/txorphanage.cpp b/src/txorphanage.cpp index 92055ded72..13ef96f3be 100644 --- a/src/txorphanage.cpp +++ b/src/txorphanage.cpp @@ -23,7 +23,7 @@ bool TxOrphanage::AddTx(const CTransactionRef& tx, NodeId peer) const Txid& hash = tx->GetHash(); const Wtxid& wtxid = tx->GetWitnessHash(); - if (m_orphans.count(hash)) + if (m_orphans.count(wtxid)) return false; // Ignore big transactions, to avoid a @@ -40,30 +40,28 @@ bool TxOrphanage::AddTx(const CTransactionRef& tx, NodeId peer) return false; } - auto ret = m_orphans.emplace(hash, OrphanTx{tx, peer, GetTime() + ORPHAN_TX_EXPIRE_TIME, m_orphan_list.size()}); + auto ret = m_orphans.emplace(wtxid, OrphanTx{tx, peer, GetTime() + ORPHAN_TX_EXPIRE_TIME, m_orphan_list.size()}); assert(ret.second); m_orphan_list.push_back(ret.first); - // Allow for lookups in the orphan pool by wtxid, as well as txid - m_wtxid_to_orphan_it.emplace(tx->GetWitnessHash(), ret.first); for (const CTxIn& txin : tx->vin) { m_outpoint_to_orphan_it[txin.prevout].insert(ret.first); } - LogPrint(BCLog::TXPACKAGES, "stored orphan tx %s (wtxid=%s) (mapsz %u outsz %u)\n", hash.ToString(), wtxid.ToString(), + LogPrint(BCLog::TXPACKAGES, "stored orphan tx %s (wtxid=%s), weight: %u (mapsz %u outsz %u)\n", hash.ToString(), wtxid.ToString(), sz, m_orphans.size(), m_outpoint_to_orphan_it.size()); return true; } -int TxOrphanage::EraseTx(const Txid& txid) +int TxOrphanage::EraseTx(const Wtxid& wtxid) { LOCK(m_mutex); - return EraseTxNoLock(txid); + return EraseTxNoLock(wtxid); } -int TxOrphanage::EraseTxNoLock(const Txid& txid) +int TxOrphanage::EraseTxNoLock(const Wtxid& wtxid) { AssertLockHeld(m_mutex); - std::map::iterator it = m_orphans.find(txid); + std::map::iterator it = m_orphans.find(wtxid); if (it == m_orphans.end()) return 0; for (const CTxIn& txin : it->second.tx->vin) @@ -85,10 +83,12 @@ int TxOrphanage::EraseTxNoLock(const Txid& txid) m_orphan_list[old_pos] = it_last; it_last->second.list_pos = old_pos; } - const auto& wtxid = it->second.tx->GetWitnessHash(); - LogPrint(BCLog::TXPACKAGES, " removed orphan tx %s (wtxid=%s)\n", txid.ToString(), wtxid.ToString()); + const auto& txid = it->second.tx->GetHash(); + // Time spent in orphanage = difference between current and entry time. + // Entry time is equal to ORPHAN_TX_EXPIRE_TIME earlier than entry's expiry. + LogPrint(BCLog::TXPACKAGES, " removed orphan tx %s (wtxid=%s) after %ds\n", txid.ToString(), wtxid.ToString(), + GetTime() + ORPHAN_TX_EXPIRE_TIME - it->second.nTimeExpire); m_orphan_list.pop_back(); - m_wtxid_to_orphan_it.erase(it->second.tx->GetWitnessHash()); m_orphans.erase(it); return 1; @@ -101,16 +101,16 @@ void TxOrphanage::EraseForPeer(NodeId peer) m_peer_work_set.erase(peer); int nErased = 0; - std::map::iterator iter = m_orphans.begin(); + std::map::iterator iter = m_orphans.begin(); while (iter != m_orphans.end()) { - std::map::iterator maybeErase = iter++; // increment to avoid iterator becoming invalid - if (maybeErase->second.fromPeer == peer) - { - nErased += EraseTxNoLock(maybeErase->second.tx->GetHash()); + // increment to avoid iterator becoming invalid after erasure + const auto& [wtxid, orphan] = *iter++; + if (orphan.fromPeer == peer) { + nErased += EraseTxNoLock(wtxid); } } - if (nErased > 0) LogPrint(BCLog::TXPACKAGES, "Erased %d orphan tx from peer=%d\n", nErased, peer); + if (nErased > 0) LogPrint(BCLog::TXPACKAGES, "Erased %d orphan transaction(s) from peer=%d\n", nErased, peer); } void TxOrphanage::LimitOrphans(unsigned int max_orphans, FastRandomContext& rng) @@ -124,12 +124,12 @@ void TxOrphanage::LimitOrphans(unsigned int max_orphans, FastRandomContext& rng) // Sweep out expired orphan pool entries: int nErased = 0; int64_t nMinExpTime = nNow + ORPHAN_TX_EXPIRE_TIME - ORPHAN_TX_EXPIRE_INTERVAL; - std::map::iterator iter = m_orphans.begin(); + std::map::iterator iter = m_orphans.begin(); while (iter != m_orphans.end()) { - std::map::iterator maybeErase = iter++; + std::map::iterator maybeErase = iter++; if (maybeErase->second.nTimeExpire <= nNow) { - nErased += EraseTxNoLock(maybeErase->second.tx->GetHash()); + nErased += EraseTxNoLock(maybeErase->second.tx->GetWitnessHash()); } else { nMinExpTime = std::min(maybeErase->second.nTimeExpire, nMinExpTime); } @@ -142,7 +142,7 @@ void TxOrphanage::LimitOrphans(unsigned int max_orphans, FastRandomContext& rng) { // Evict a random orphan: size_t randompos = rng.randrange(m_orphan_list.size()); - EraseTxNoLock(m_orphan_list[randompos]->first); + EraseTxNoLock(m_orphan_list[randompos]->second.tx->GetWitnessHash()); ++nEvicted; } if (nEvicted > 0) LogPrint(BCLog::TXPACKAGES, "orphanage overflow, removed %u tx\n", nEvicted); @@ -159,7 +159,7 @@ void TxOrphanage::AddChildrenToWorkSet(const CTransaction& tx) for (const auto& elem : it_by_prev->second) { // Get this source peer's work set, emplacing an empty set if it didn't exist // (note: if this peer wasn't still connected, we would have removed the orphan tx already) - std::set& orphan_work_set = m_peer_work_set.try_emplace(elem->second.fromPeer).first->second; + std::set& orphan_work_set = m_peer_work_set.try_emplace(elem->second.fromPeer).first->second; // Add this tx to the work set orphan_work_set.insert(elem->first); LogPrint(BCLog::TXPACKAGES, "added %s (wtxid=%s) to peer %d workset\n", @@ -169,14 +169,10 @@ void TxOrphanage::AddChildrenToWorkSet(const CTransaction& tx) } } -bool TxOrphanage::HaveTx(const GenTxid& gtxid) const +bool TxOrphanage::HaveTx(const Wtxid& wtxid) const { LOCK(m_mutex); - if (gtxid.IsWtxid()) { - return m_wtxid_to_orphan_it.count(Wtxid::FromUint256(gtxid.GetHash())); - } else { - return m_orphans.count(Txid::FromUint256(gtxid.GetHash())); - } + return m_orphans.count(wtxid); } CTransactionRef TxOrphanage::GetTxToReconsider(NodeId peer) @@ -187,10 +183,10 @@ CTransactionRef TxOrphanage::GetTxToReconsider(NodeId peer) if (work_set_it != m_peer_work_set.end()) { auto& work_set = work_set_it->second; while (!work_set.empty()) { - Txid txid = *work_set.begin(); + Wtxid wtxid = *work_set.begin(); work_set.erase(work_set.begin()); - const auto orphan_it = m_orphans.find(txid); + const auto orphan_it = m_orphans.find(wtxid); if (orphan_it != m_orphans.end()) { return orphan_it->second.tx; } @@ -215,7 +211,7 @@ void TxOrphanage::EraseForBlock(const CBlock& block) { LOCK(m_mutex); - std::vector vOrphanErase; + std::vector vOrphanErase; for (const CTransactionRef& ptx : block.vtx) { const CTransaction& tx = *ptx; @@ -226,8 +222,7 @@ void TxOrphanage::EraseForBlock(const CBlock& block) if (itByPrev == m_outpoint_to_orphan_it.end()) continue; for (auto mi = itByPrev->second.begin(); mi != itByPrev->second.end(); ++mi) { const CTransaction& orphanTx = *(*mi)->second.tx; - const auto& orphanHash = orphanTx.GetHash(); - vOrphanErase.push_back(orphanHash); + vOrphanErase.push_back(orphanTx.GetWitnessHash()); } } } @@ -238,7 +233,7 @@ void TxOrphanage::EraseForBlock(const CBlock& block) for (const auto& orphanHash : vOrphanErase) { nErased += EraseTxNoLock(orphanHash); } - LogPrint(BCLog::TXPACKAGES, "Erased %d orphan tx included or conflicted by block\n", nErased); + LogPrint(BCLog::TXPACKAGES, "Erased %d orphan transaction(s) included or conflicted by block\n", nErased); } } diff --git a/src/txorphanage.h b/src/txorphanage.h index a3c8edaa2a..5f9888c969 100644 --- a/src/txorphanage.h +++ b/src/txorphanage.h @@ -23,8 +23,8 @@ class TxOrphanage { /** Add a new orphan transaction */ bool AddTx(const CTransactionRef& tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex); - /** Check if we already have an orphan transaction (by txid or wtxid) */ - bool HaveTx(const GenTxid& gtxid) const EXCLUSIVE_LOCKS_REQUIRED(!m_mutex); + /** Check if we already have an orphan transaction (by wtxid only) */ + bool HaveTx(const Wtxid& wtxid) const EXCLUSIVE_LOCKS_REQUIRED(!m_mutex); /** Extract a transaction from a peer's work set * Returns nullptr if there are no transactions to work on. @@ -33,8 +33,8 @@ class TxOrphanage { */ CTransactionRef GetTxToReconsider(NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex); - /** Erase an orphan by txid */ - int EraseTx(const Txid& txid) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex); + /** Erase an orphan by wtxid */ + int EraseTx(const Wtxid& wtxid) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex); /** Erase all orphans announced by a peer (eg, after that peer disconnects) */ void EraseForPeer(NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex); @@ -77,12 +77,12 @@ class TxOrphanage { size_t list_pos; }; - /** Map from txid to orphan transaction record. Limited by + /** Map from wtxid to orphan transaction record. Limited by * -maxorphantx/DEFAULT_MAX_ORPHAN_TRANSACTIONS */ - std::map m_orphans GUARDED_BY(m_mutex); + std::map m_orphans GUARDED_BY(m_mutex); /** Which peer provided the orphans that need to be reconsidered */ - std::map> m_peer_work_set GUARDED_BY(m_mutex); + std::map> m_peer_work_set GUARDED_BY(m_mutex); using OrphanMap = decltype(m_orphans); @@ -102,12 +102,8 @@ class TxOrphanage { /** Orphan transactions in vector for quick random eviction */ std::vector m_orphan_list GUARDED_BY(m_mutex); - /** Index from wtxid into the m_orphans to lookup orphan - * transactions using their witness ids. */ - std::map m_wtxid_to_orphan_it GUARDED_BY(m_mutex); - - /** Erase an orphan by txid */ - int EraseTxNoLock(const Txid& txid) EXCLUSIVE_LOCKS_REQUIRED(m_mutex); + /** Erase an orphan by wtxid */ + int EraseTxNoLock(const Wtxid& wtxid) EXCLUSIVE_LOCKS_REQUIRED(m_mutex); }; #endif // BITCOIN_TXORPHANAGE_H diff --git a/src/validation.cpp b/src/validation.cpp index 5f3d49ecf8..ee48015cc0 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -86,7 +86,6 @@ using node::BlockManager; using node::BlockMap; using node::CBlockIndexHeightOnlyComparator; using node::CBlockIndexWorkComparator; -using node::fReindex; using node::SnapshotMetadata; /** Time to wait between writing blocks/block index to disk. */ @@ -269,13 +268,13 @@ static void LimitMempoolSize(CTxMemPool& pool, CCoinsViewCache& coins_cache) { AssertLockHeld(::cs_main); AssertLockHeld(pool.cs); - int expired = pool.Expire(GetTime() - pool.m_expiry); + int expired = pool.Expire(GetTime() - pool.m_opts.expiry); if (expired != 0) { LogPrint(BCLog::MEMPOOL, "Expired %i transactions from the memory pool\n", expired); } std::vector vNoSpendsRemaining; - pool.TrimToSize(pool.m_max_size_bytes, &vNoSpendsRemaining); + pool.TrimToSize(pool.m_opts.max_size_bytes, &vNoSpendsRemaining); for (const COutPoint& removed : vNoSpendsRemaining) coins_cache.Uncache(removed); } @@ -696,9 +695,9 @@ class MemPoolAccept return state.Invalid(TxValidationResult::TX_RECONSIDERABLE, "mempool min fee not met", strprintf("%d < %d", package_fee, mempoolRejectFee)); } - if (package_fee < m_pool.m_min_relay_feerate.GetFee(package_size)) { + if (package_fee < m_pool.m_opts.min_relay_feerate.GetFee(package_size)) { return state.Invalid(TxValidationResult::TX_RECONSIDERABLE, "min relay fee not met", - strprintf("%d < %d", package_fee, m_pool.m_min_relay_feerate.GetFee(package_size))); + strprintf("%d < %d", package_fee, m_pool.m_opts.min_relay_feerate.GetFee(package_size))); } return true; } @@ -743,7 +742,7 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws) // Rather not work on nonstandard transactions (unless -testnet/-regtest) std::string reason; - if (m_pool.m_require_standard && !IsStandardTx(tx, m_pool.m_max_datacarrier_bytes, m_pool.m_permit_bare_multisig, m_pool.m_dust_relay_feerate, reason)) { + if (m_pool.m_opts.require_standard && !IsStandardTx(tx, m_pool.m_opts.max_datacarrier_bytes, m_pool.m_opts.permit_bare_multisig, m_pool.m_opts.dust_relay_feerate, reason)) { return state.Invalid(TxValidationResult::TX_NOT_STANDARD, reason); } @@ -790,7 +789,7 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws) // // Replaceability signaling of the original transactions may be // ignored due to node setting. - const bool allow_rbf{m_pool.m_full_rbf || SignalsOptInRBF(*ptxConflicting) || ptxConflicting->nVersion == 3}; + const bool allow_rbf{m_pool.m_opts.full_rbf || SignalsOptInRBF(*ptxConflicting) || ptxConflicting->nVersion == 3}; if (!allow_rbf) { return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "txn-mempool-conflict"); } @@ -877,12 +876,12 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws) return false; // state filled in by CheckTxInputs } - if (m_pool.m_require_standard && !AreInputsStandard(tx, m_view)) { + if (m_pool.m_opts.require_standard && !AreInputsStandard(tx, m_view)) { return state.Invalid(TxValidationResult::TX_INPUTS_NOT_STANDARD, "bad-txns-nonstandard-inputs"); } // Check for non-standard witnesses. - if (tx.HasWitness() && m_pool.m_require_standard && !IsWitnessStandard(tx, m_view)) { + if (tx.HasWitness() && m_pool.m_opts.require_standard && !IsWitnessStandard(tx, m_view)) { return state.Invalid(TxValidationResult::TX_WITNESS_MUTATED, "bad-witness-nonstandard"); } @@ -920,11 +919,11 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws) // method of ensuring the tx remains bumped. For example, the fee-bumping child could disappear // due to a replacement. // The only exception is v3 transactions. - if (!bypass_limits && ws.m_ptx->nVersion != 3 && ws.m_modified_fees < m_pool.m_min_relay_feerate.GetFee(ws.m_vsize)) { + if (!bypass_limits && ws.m_ptx->nVersion != 3 && ws.m_modified_fees < m_pool.m_opts.min_relay_feerate.GetFee(ws.m_vsize)) { // Even though this is a fee-related failure, this result is TX_MEMPOOL_POLICY, not // TX_RECONSIDERABLE, because it cannot be bypassed using package validation. return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "min relay fee not met", - strprintf("%d < %d", ws.m_modified_fees, m_pool.m_min_relay_feerate.GetFee(ws.m_vsize))); + strprintf("%d < %d", ws.m_modified_fees, m_pool.m_opts.min_relay_feerate.GetFee(ws.m_vsize))); } // No individual transactions are allowed below the mempool min feerate except from disconnected // blocks and transactions in a package. Package transactions will be checked using package @@ -935,7 +934,7 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws) // Note that these modifications are only applicable to single transaction scenarios; // carve-outs and package RBF are disabled for multi-transaction evaluations. - CTxMemPool::Limits maybe_rbf_limits = m_pool.m_limits; + CTxMemPool::Limits maybe_rbf_limits = m_pool.m_opts.limits; // Calculate in-mempool ancestors, up to a limit. if (ws.m_conflicts.size() == 1) { @@ -1088,7 +1087,7 @@ bool MemPoolAccept::ReplacementChecks(Workspace& ws) ws.m_conflicting_size += it->GetTxSize(); } if (const auto err_string{PaysForRBF(ws.m_conflicting_fees, ws.m_modified_fees, ws.m_vsize, - m_pool.m_incremental_relay_feerate, hash)}) { + m_pool.m_opts.incremental_relay_feerate, hash)}) { // Even though this is a fee-related failure, this result is TX_MEMPOOL_POLICY, not // TX_RECONSIDERABLE, because it cannot be bypassed using package validation. // This must be changed if package RBF is enabled. @@ -1262,7 +1261,7 @@ bool MemPoolAccept::SubmitPackage(const ATMPArgs& args, std::vector& // Re-calculate mempool ancestors to call addUnchecked(). They may have changed since the // last calculation done in PreChecks, since package ancestors have already been submitted. { - auto ancestors{m_pool.CalculateMemPoolAncestors(*ws.m_entry, m_pool.m_limits)}; + auto ancestors{m_pool.CalculateMemPoolAncestors(*ws.m_entry, m_pool.m_opts.limits)}; if(!ancestors) { results.emplace(ws.m_ptx->GetWitnessHash(), MempoolAcceptResult::Failure(ws.m_state)); // Since PreChecks() and PackageMempoolChecks() both enforce limits, this should never fail. @@ -1303,14 +1302,14 @@ bool MemPoolAccept::SubmitPackage(const ATMPArgs& args, std::vector& results.emplace(ws.m_ptx->GetWitnessHash(), MempoolAcceptResult::Success(std::move(ws.m_replaced_transactions), ws.m_vsize, ws.m_base_fees, effective_feerate, effective_feerate_wtxids)); - if (!m_pool.m_signals) continue; + if (!m_pool.m_opts.signals) continue; const CTransaction& tx = *ws.m_ptx; const auto tx_info = NewMempoolTransactionInfo(ws.m_ptx, ws.m_base_fees, ws.m_vsize, ws.m_entry->GetHeight(), args.m_bypass_limits, args.m_package_submission, IsCurrentForFeeEstimation(m_active_chainstate), m_pool.HasNoInputsOf(tx)); - m_pool.m_signals->TransactionAddedToMempool(tx_info, m_pool.GetAndIncrementSequence()); + m_pool.m_opts.signals->TransactionAddedToMempool(tx_info, m_pool.GetAndIncrementSequence()); } return all_submitted; } @@ -1318,7 +1317,7 @@ bool MemPoolAccept::SubmitPackage(const ATMPArgs& args, std::vector& MempoolAcceptResult MemPoolAccept::AcceptSingleTransaction(const CTransactionRef& ptx, ATMPArgs& args) { AssertLockHeld(cs_main); - LOCK(m_pool.cs); // mempool "read lock" (held through m_pool.m_signals->TransactionAddedToMempool()) + LOCK(m_pool.cs); // mempool "read lock" (held through m_pool.m_opts.signals->TransactionAddedToMempool()) Workspace ws(ptx); const std::vector single_wtxid{ws.m_ptx->GetWitnessHash()}; @@ -1359,14 +1358,14 @@ MempoolAcceptResult MemPoolAccept::AcceptSingleTransaction(const CTransactionRef return MempoolAcceptResult::FeeFailure(ws.m_state, CFeeRate(ws.m_modified_fees, ws.m_vsize), {ws.m_ptx->GetWitnessHash()}); } - if (m_pool.m_signals) { + if (m_pool.m_opts.signals) { const CTransaction& tx = *ws.m_ptx; const auto tx_info = NewMempoolTransactionInfo(ws.m_ptx, ws.m_base_fees, ws.m_vsize, ws.m_entry->GetHeight(), args.m_bypass_limits, args.m_package_submission, IsCurrentForFeeEstimation(m_active_chainstate), m_pool.HasNoInputsOf(tx)); - m_pool.m_signals->TransactionAddedToMempool(tx_info, m_pool.GetAndIncrementSequence()); + m_pool.m_opts.signals->TransactionAddedToMempool(tx_info, m_pool.GetAndIncrementSequence()); } return MempoolAcceptResult::Success(std::move(ws.m_replaced_transactions), ws.m_vsize, ws.m_base_fees, @@ -2660,7 +2659,7 @@ CoinsCacheSizeState Chainstate::GetCoinsCacheSizeState() AssertLockHeld(::cs_main); return this->GetCoinsCacheSizeState( m_coinstip_cache_size_bytes, - m_mempool ? m_mempool->m_max_size_bytes : 0); + m_mempool ? m_mempool->m_opts.max_size_bytes : 0); } CoinsCacheSizeState Chainstate::GetCoinsCacheSizeState( @@ -2707,7 +2706,7 @@ bool Chainstate::FlushStateToDisk( CoinsCacheSizeState cache_state = GetCoinsCacheSizeState(); LOCK(m_blockman.cs_LastBlockFile); - if (m_blockman.IsPruneMode() && (m_blockman.m_check_for_pruning || nManualPruneHeight > 0) && !fReindex) { + if (m_blockman.IsPruneMode() && (m_blockman.m_check_for_pruning || nManualPruneHeight > 0) && !m_chainman.m_blockman.m_reindexing) { // make sure we don't prune above any of the prune locks bestblocks // pruning is height-based int last_prune{m_chain.Height()}; // last height we can prune @@ -2813,8 +2812,10 @@ bool Chainstate::FlushStateToDisk( return FatalError(m_chainman.GetNotifications(), state, _("Disk space is too low!")); } // Flush the chainstate (which may refer to block index entries). - if (!CoinsTip().Flush()) + const auto empty_cache{(mode == FlushStateMode::ALWAYS) || fCacheLarge || fCacheCritical || fFlushForPrune}; + if (empty_cache ? !CoinsTip().Flush() : !CoinsTip().Sync()) { return FatalError(m_chainman.GetNotifications(), state, _("Failed to write to coin database.")); + } m_last_flush = nNow; full_flush_completed = true; TRACE5(utxocache, flush, @@ -3329,10 +3330,10 @@ bool Chainstate::ActivateBestChainStep(BlockValidationState& state, CBlockIndex* return true; } -static SynchronizationState GetSynchronizationState(bool init) +static SynchronizationState GetSynchronizationState(bool init, bool reindexing) { if (!init) return SynchronizationState::POST_INIT; - if (::fReindex) return SynchronizationState::INIT_REINDEX; + if (reindexing) return SynchronizationState::INIT_REINDEX; return SynchronizationState::INIT_DOWNLOAD; } @@ -3354,7 +3355,7 @@ static bool NotifyHeaderTip(ChainstateManager& chainman) LOCKS_EXCLUDED(cs_main) } // Send block tip changed notifications without cs_main if (fNotify) { - chainman.GetNotifications().headerTip(GetSynchronizationState(fInitialBlockDownload), pindexHeader->nHeight, pindexHeader->nTime, false); + chainman.GetNotifications().headerTip(GetSynchronizationState(fInitialBlockDownload, chainman.m_blockman.m_reindexing), pindexHeader->nHeight, pindexHeader->nTime, false); } return fNotify; } @@ -3473,7 +3474,7 @@ bool Chainstate::ActivateBestChain(BlockValidationState& state, std::shared_ptr< } // Always notify the UI if a new block tip was connected - if (kernel::IsInterrupted(m_chainman.GetNotifications().blockTip(GetSynchronizationState(still_in_ibd), *pindexNewTip))) { + if (kernel::IsInterrupted(m_chainman.GetNotifications().blockTip(GetSynchronizationState(still_in_ibd, m_chainman.m_blockman.m_reindexing), *pindexNewTip))) { // Just breaking and returning success for now. This could // be changed to bubble up the kernel::Interrupted value to // the caller so the caller could distinguish between @@ -3699,7 +3700,7 @@ bool Chainstate::InvalidateBlock(BlockValidationState& state, CBlockIndex* pinde // parameter indicating the source of the tip change so hooks can // distinguish user-initiated invalidateblock changes from other // changes. - (void)m_chainman.GetNotifications().blockTip(GetSynchronizationState(m_chainman.IsInitialBlockDownload()), *to_mark_failed->pprev); + (void)m_chainman.GetNotifications().blockTip(GetSynchronizationState(m_chainman.IsInitialBlockDownload(), m_chainman.m_blockman.m_reindexing), *to_mark_failed->pprev); } return true; } @@ -4383,7 +4384,7 @@ void ChainstateManager::ReportHeadersPresync(const arith_uint256& work, int64_t m_last_presync_update = now; } bool initial_download = IsInitialBlockDownload(); - GetNotifications().headerTip(GetSynchronizationState(initial_download), height, timestamp, /*presync=*/true); + GetNotifications().headerTip(GetSynchronizationState(initial_download, m_blockman.m_reindexing), height, timestamp, /*presync=*/true); if (initial_download) { int64_t blocks_left{(NodeClock::now() - NodeSeconds{std::chrono::seconds{timestamp}}) / GetConsensus().PowTargetSpacing()}; blocks_left = std::max(0, blocks_left); @@ -4464,10 +4465,16 @@ bool ChainstateManager::AcceptBlock(const std::shared_ptr& pblock, // Write block to history file if (fNewBlock) *fNewBlock = true; try { - FlatFilePos blockPos{m_blockman.SaveBlockToDisk(block, pindex->nHeight, dbp)}; - if (blockPos.IsNull()) { - state.Error(strprintf("%s: Failed to find position to write new block to disk", __func__)); - return false; + FlatFilePos blockPos{}; + if (dbp) { + blockPos = *dbp; + m_blockman.UpdateBlockInfo(block, pindex->nHeight, blockPos); + } else { + blockPos = m_blockman.SaveBlockToDisk(block, pindex->nHeight); + if (blockPos.IsNull()) { + state.Error(strprintf("%s: Failed to find position to write new block to disk", __func__)); + return false; + } } ReceivedBlockTransactions(block, pindex, blockPos); } catch (const std::runtime_error& e) { @@ -4907,8 +4914,8 @@ bool ChainstateManager::LoadBlockIndex() { AssertLockHeld(cs_main); // Load block index from databases - bool needs_init = fReindex; - if (!fReindex) { + bool needs_init = m_blockman.m_reindexing; + if (!m_blockman.m_reindexing) { bool ret{m_blockman.LoadBlockIndexDB(SnapshotBlockhash())}; if (!ret) return false; @@ -4945,8 +4952,8 @@ bool ChainstateManager::LoadBlockIndex() if (needs_init) { // Everything here is for *new* reindex/DBs. Thus, though - // LoadBlockIndexDB may have set fReindex if we shut down - // mid-reindex previously, we don't check fReindex and + // LoadBlockIndexDB may have set m_reindexing if we shut down + // mid-reindex previously, we don't check m_reindexing and // instead only check it prior to LoadBlockIndexDB to set // needs_init. @@ -4971,7 +4978,7 @@ bool Chainstate::LoadGenesisBlock() try { const CBlock& block = params.GenesisBlock(); - FlatFilePos blockPos{m_blockman.SaveBlockToDisk(block, 0, nullptr)}; + FlatFilePos blockPos{m_blockman.SaveBlockToDisk(block, 0)}; if (blockPos.IsNull()) { LogError("%s: writing genesis block to disk failed\n", __func__); return false; @@ -5092,7 +5099,7 @@ void ChainstateManager::LoadExternalBlockFile( } } - if (m_blockman.IsPruneMode() && !fReindex && pblock) { + if (m_blockman.IsPruneMode() && !m_blockman.m_reindexing && pblock) { // must update the tip for pruning to work while importing with -loadblock. // this is a tradeoff to conserve disk space at the expense of time // spent updating the tip to be able to prune. diff --git a/test/functional/feature_framework_unit_tests.py b/test/functional/feature_framework_unit_tests.py index c9754e083c..f03f084bed 100755 --- a/test/functional/feature_framework_unit_tests.py +++ b/test/functional/feature_framework_unit_tests.py @@ -25,6 +25,7 @@ "crypto.muhash", "crypto.poly1305", "crypto.ripemd160", + "crypto.secp256k1", "script", "segwit_addr", "wallet_util", diff --git a/test/functional/interface_rpc.py b/test/functional/interface_rpc.py index e873e2da0b..705534ea82 100755 --- a/test/functional/interface_rpc.py +++ b/test/functional/interface_rpc.py @@ -4,22 +4,80 @@ # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Tests some generic aspects of the RPC interface.""" +import json import os -from test_framework.authproxy import JSONRPCException +from dataclasses import dataclass from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal, assert_greater_than_or_equal from threading import Thread +from typing import Optional import subprocess -def expect_http_status(expected_http_status, expected_rpc_code, - fcn, *args): - try: - fcn(*args) - raise AssertionError(f"Expected RPC error {expected_rpc_code}, got none") - except JSONRPCException as exc: - assert_equal(exc.error["code"], expected_rpc_code) - assert_equal(exc.http_status, expected_http_status) +RPC_INVALID_ADDRESS_OR_KEY = -5 +RPC_INVALID_PARAMETER = -8 +RPC_METHOD_NOT_FOUND = -32601 +RPC_INVALID_REQUEST = -32600 +RPC_PARSE_ERROR = -32700 + + +@dataclass +class BatchOptions: + version: Optional[int] = None + notification: bool = False + request_fields: Optional[dict] = None + response_fields: Optional[dict] = None + + +def format_request(options, idx, fields): + request = {} + if options.version == 1: + request.update(version="1.1") + elif options.version == 2: + request.update(jsonrpc="2.0") + elif options.version is not None: + raise NotImplementedError(f"Unknown JSONRPC version {options.version}") + if not options.notification: + request.update(id=idx) + request.update(fields) + if options.request_fields: + request.update(options.request_fields) + return request + + +def format_response(options, idx, fields): + if options.version == 2 and options.notification: + return None + response = {} + if not options.notification: + response.update(id=idx) + if options.version == 2: + response.update(jsonrpc="2.0") + else: + response.update(result=None, error=None) + response.update(fields) + if options.response_fields: + response.update(options.response_fields) + return response + + +def send_raw_rpc(node, raw_body: bytes) -> tuple[object, int]: + return node._request("POST", "/", raw_body) + + +def send_json_rpc(node, body: object) -> tuple[object, int]: + raw = json.dumps(body).encode("utf-8") + return send_raw_rpc(node, raw) + + +def expect_http_rpc_status(expected_http_status, expected_rpc_error_code, node, method, params, version=1, notification=False): + req = format_request(BatchOptions(version, notification), 0, {"method": method, "params": params}) + response, status = send_json_rpc(node, req) + + if expected_rpc_error_code is not None: + assert_equal(response["error"]["code"], expected_rpc_error_code) + + assert_equal(status, expected_http_status) def test_work_queue_getblock(node, got_exceeded_error): @@ -48,37 +106,126 @@ def test_getrpcinfo(self): assert_greater_than_or_equal(command['duration'], 0) assert_equal(info['logpath'], os.path.join(self.nodes[0].chain_path, 'debug.log')) - def test_batch_request(self): - self.log.info("Testing basic JSON-RPC batch request...") - - results = self.nodes[0].batch([ + def test_batch_request(self, call_options): + calls = [ # A basic request that will work fine. - {"method": "getblockcount", "id": 1}, + {"method": "getblockcount"}, # Request that will fail. The whole batch request should still # work fine. - {"method": "invalidmethod", "id": 2}, + {"method": "invalidmethod"}, # Another call that should succeed. - {"method": "getblockhash", "id": 3, "params": [0]}, - ]) - - result_by_id = {} - for res in results: - result_by_id[res["id"]] = res - - assert_equal(result_by_id[1]['error'], None) - assert_equal(result_by_id[1]['result'], 0) - - assert_equal(result_by_id[2]['error']['code'], -32601) - assert_equal(result_by_id[2]['result'], None) - - assert_equal(result_by_id[3]['error'], None) - assert result_by_id[3]['result'] is not None + {"method": "getblockhash", "params": [0]}, + # Invalid request format + {"pizza": "sausage"} + ] + results = [ + {"result": 0}, + {"error": {"code": RPC_METHOD_NOT_FOUND, "message": "Method not found"}}, + {"result": "0f9188f13cb7b2c71f2a335e3a4fc328bf5beb436012afca590b1a11466e2206"}, + {"error": {"code": RPC_INVALID_REQUEST, "message": "Missing method"}}, + ] + + request = [] + response = [] + for idx, (call, result) in enumerate(zip(calls, results), 1): + options = call_options(idx) + if options is None: + continue + request.append(format_request(options, idx, call)) + r = format_response(options, idx, result) + if r is not None: + response.append(r) + + rpc_response, http_status = send_json_rpc(self.nodes[0], request) + if len(response) == 0 and len(request) > 0: + assert_equal(http_status, 204) + assert_equal(rpc_response, None) + else: + assert_equal(http_status, 200) + assert_equal(rpc_response, response) + + def test_batch_requests(self): + self.log.info("Testing empty batch request...") + self.test_batch_request(lambda idx: None) + + self.log.info("Testing basic JSON-RPC 2.0 batch request...") + self.test_batch_request(lambda idx: BatchOptions(version=2)) + + self.log.info("Testing JSON-RPC 2.0 batch with notifications...") + self.test_batch_request(lambda idx: BatchOptions(version=2, notification=idx < 2)) + + self.log.info("Testing JSON-RPC 2.0 batch of ALL notifications...") + self.test_batch_request(lambda idx: BatchOptions(version=2, notification=True)) + + # JSONRPC 1.1 does not support batch requests, but test them for backwards compatibility. + self.log.info("Testing nonstandard JSON-RPC 1.1 batch request...") + self.test_batch_request(lambda idx: BatchOptions(version=1)) + + self.log.info("Testing nonstandard mixed JSON-RPC 1.1/2.0 batch request...") + self.test_batch_request(lambda idx: BatchOptions(version=2 if idx % 2 else 1)) + + self.log.info("Testing nonstandard batch request without version numbers...") + self.test_batch_request(lambda idx: BatchOptions()) + + self.log.info("Testing nonstandard batch request without version numbers or ids...") + self.test_batch_request(lambda idx: BatchOptions(notification=True)) + + self.log.info("Testing nonstandard jsonrpc 1.0 version number is accepted...") + self.test_batch_request(lambda idx: BatchOptions(request_fields={"jsonrpc": "1.0"})) + + self.log.info("Testing unrecognized jsonrpc version number is rejected...") + self.test_batch_request(lambda idx: BatchOptions( + request_fields={"jsonrpc": "2.1"}, + response_fields={"result": None, "error": {"code": RPC_INVALID_REQUEST, "message": "JSON-RPC version not supported"}})) def test_http_status_codes(self): - self.log.info("Testing HTTP status codes for JSON-RPC requests...") - - expect_http_status(404, -32601, self.nodes[0].invalidmethod) - expect_http_status(500, -8, self.nodes[0].getblockhash, 42) + self.log.info("Testing HTTP status codes for JSON-RPC 1.1 requests...") + # OK + expect_http_rpc_status(200, None, self.nodes[0], "getblockhash", [0]) + # Errors + expect_http_rpc_status(404, RPC_METHOD_NOT_FOUND, self.nodes[0], "invalidmethod", []) + expect_http_rpc_status(500, RPC_INVALID_PARAMETER, self.nodes[0], "getblockhash", [42]) + # force-send empty request + response, status = send_raw_rpc(self.nodes[0], b"") + assert_equal(response, {"id": None, "result": None, "error": {"code": RPC_PARSE_ERROR, "message": "Parse error"}}) + assert_equal(status, 500) + # force-send invalidly formatted request + response, status = send_raw_rpc(self.nodes[0], b"this is bad") + assert_equal(response, {"id": None, "result": None, "error": {"code": RPC_PARSE_ERROR, "message": "Parse error"}}) + assert_equal(status, 500) + + self.log.info("Testing HTTP status codes for JSON-RPC 2.0 requests...") + # OK + expect_http_rpc_status(200, None, self.nodes[0], "getblockhash", [0], 2, False) + # RPC errors but not HTTP errors + expect_http_rpc_status(200, RPC_METHOD_NOT_FOUND, self.nodes[0], "invalidmethod", [], 2, False) + expect_http_rpc_status(200, RPC_INVALID_PARAMETER, self.nodes[0], "getblockhash", [42], 2, False) + # force-send invalidly formatted requests + response, status = send_json_rpc(self.nodes[0], {"jsonrpc": 2, "method": "getblockcount"}) + assert_equal(response, {"result": None, "error": {"code": RPC_INVALID_REQUEST, "message": "jsonrpc field must be a string"}}) + assert_equal(status, 400) + response, status = send_json_rpc(self.nodes[0], {"jsonrpc": "3.0", "method": "getblockcount"}) + assert_equal(response, {"result": None, "error": {"code": RPC_INVALID_REQUEST, "message": "JSON-RPC version not supported"}}) + assert_equal(status, 400) + + self.log.info("Testing HTTP status codes for JSON-RPC 2.0 notifications...") + # Not notification: id exists + response, status = send_json_rpc(self.nodes[0], {"jsonrpc": "2.0", "id": None, "method": "getblockcount"}) + assert_equal(response["result"], 0) + assert_equal(status, 200) + # Not notification: JSON 1.1 + expect_http_rpc_status(200, None, self.nodes[0], "getblockcount", [], 1) + # Not notification: has "id" field + expect_http_rpc_status(200, None, self.nodes[0], "getblockcount", [], 2, False) + block_count = self.nodes[0].getblockcount() + # Notification response status code: HTTP_NO_CONTENT + expect_http_rpc_status(204, None, self.nodes[0], "generatetoaddress", [1, "ncrt1qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq8w7hxl"], 2, True) + # The command worked even though there was no response + assert_equal(block_count + 1, self.nodes[0].getblockcount()) + # No error response for notifications even if they are invalid + expect_http_rpc_status(204, None, self.nodes[0], "generatetoaddress", [1, "invalid_address"], 2, True) + # Sanity check: command was not executed + assert_equal(block_count + 1, self.nodes[0].getblockcount()) def test_work_queue_exceeded(self): self.log.info("Testing work queue exceeded...") @@ -94,7 +241,7 @@ def test_work_queue_exceeded(self): def run_test(self): self.test_getrpcinfo() - self.test_batch_request() + self.test_batch_requests() self.test_http_status_codes() self.test_work_queue_exceeded() diff --git a/test/functional/p2p_invalid_tx.py b/test/functional/p2p_invalid_tx.py index ae9dc816ab..0ae05d4b0b 100755 --- a/test/functional/p2p_invalid_tx.py +++ b/test/functional/p2p_invalid_tx.py @@ -165,7 +165,7 @@ def run_test(self): node.p2ps[0].send_txs_and_test([rejected_parent], node, success=False) self.log.info('Test that a peer disconnection causes erase its transactions from the orphan pool') - with node.assert_debug_log(['Erased 100 orphan tx from peer=25']): + with node.assert_debug_log(['Erased 100 orphan transaction(s) from peer=25']): self.reconnect_p2p(num_connections=1) self.log.info('Test that a transaction in the orphan pool is included in a new tip block causes erase this transaction from the orphan pool') @@ -190,7 +190,7 @@ def run_test(self): block_A.solve() self.log.info('Send the block that includes the previous orphan ... ') - with node.assert_debug_log(["Erased 1 orphan tx included or conflicted by block"]): + with node.assert_debug_log(["Erased 1 orphan transaction(s) included or conflicted by block"]): node.p2ps[0].send_blocks_and_test([block_A], node, success=True) self.log.info('Test that a transaction in the orphan pool conflicts with a new tip block causes erase this transaction from the orphan pool') @@ -219,7 +219,7 @@ def run_test(self): block_B.solve() self.log.info('Send the block that includes a transaction which conflicts with the previous orphan ... ') - with node.assert_debug_log(["Erased 1 orphan tx included or conflicted by block"]): + with node.assert_debug_log(["Erased 1 orphan transaction(s) included or conflicted by block"]): node.p2ps[0].send_blocks_and_test([block_B], node, success=True) diff --git a/test/functional/p2p_orphan_handling.py b/test/functional/p2p_orphan_handling.py index 6166c62aa2..f525d87cca 100755 --- a/test/functional/p2p_orphan_handling.py +++ b/test/functional/p2p_orphan_handling.py @@ -7,6 +7,7 @@ from test_framework.messages import ( CInv, + CTxInWitness, MSG_TX, MSG_WITNESS_TX, MSG_WTX, @@ -21,6 +22,7 @@ NONPREF_PEER_TX_DELAY, OVERLOADED_PEER_TX_DELAY, p2p_lock, + P2PInterface, P2PTxInvStore, TXID_RELAY_DELAY, ) @@ -127,6 +129,22 @@ def relay_transaction(self, peer, tx): peer.wait_for_getdata([wtxid]) peer.send_and_ping(msg_tx(tx)) + def create_malleated_version(self, tx): + """ + Create a malleated version of the tx where the witness is replaced with garbage data. + Returns a CTransaction object. + """ + tx_bad_wit = tx_from_hex(tx["hex"]) + tx_bad_wit.wit.vtxinwit = [CTxInWitness()] + # Add garbage data to witness 0. We cannot simply strip the witness, as the node would + # classify it as a transaction in which the witness was missing rather than wrong. + tx_bad_wit.wit.vtxinwit[0].scriptWitness.stack = [b'garbage'] + + assert_equal(tx["txid"], tx_bad_wit.rehash()) + assert tx["wtxid"] != tx_bad_wit.getwtxid() + + return tx_bad_wit + @cleanup def test_arrival_timing_orphan(self): self.log.info("Test missing parents that arrive during delay are not requested") @@ -284,8 +302,8 @@ def test_orphan_multiple_parents(self): # doesn't give up on the orphan. Once all of the missing parents are received, it should be # submitted to mempool. peer.send_message(msg_notfound(vec=[CInv(MSG_WITNESS_TX, int(txid_conf_old, 16))])) + # Sync with ping to ensure orphans are reconsidered peer.send_and_ping(msg_tx(missing_tx["tx"])) - peer.sync_with_ping() assert_equal(node.getmempoolentry(orphan["txid"])["ancestorcount"], 3) @cleanup @@ -394,10 +412,161 @@ def test_orphan_inherit_rejection(self): peer2.assert_never_requested(child["tx"].getwtxid()) # The child should never be requested, even if announced again with potentially different witness. + # Sync with ping to ensure orphans are reconsidered peer3.send_and_ping(msg_inv([CInv(t=MSG_TX, h=int(child["txid"], 16))])) self.nodes[0].bumpmocktime(TXREQUEST_TIME_SKIP) peer3.assert_never_requested(child["txid"]) + @cleanup + def test_same_txid_orphan(self): + self.log.info("Check what happens when orphan with same txid is already in orphanage") + node = self.nodes[0] + + tx_parent = self.wallet.create_self_transfer() + + # Create the real child + tx_child = self.wallet.create_self_transfer(utxo_to_spend=tx_parent["new_utxo"]) + + # Create a fake version of the child + tx_orphan_bad_wit = self.create_malleated_version(tx_child) + + bad_peer = node.add_p2p_connection(P2PInterface()) + honest_peer = node.add_p2p_connection(P2PInterface()) + + # 1. Fake orphan is received first. It is missing an input. + bad_peer.send_and_ping(msg_tx(tx_orphan_bad_wit)) + + # 2. Node requests the missing parent by txid. + parent_txid_int = int(tx_parent["txid"], 16) + node.bumpmocktime(NONPREF_PEER_TX_DELAY + TXID_RELAY_DELAY) + bad_peer.wait_for_getdata([parent_txid_int]) + + # 3. Honest peer relays the real child, which is also missing parents and should be placed + # in the orphanage. + with node.assert_debug_log(["missingorspent", "stored orphan tx"]): + honest_peer.send_and_ping(msg_tx(tx_child["tx"])) + + # Time out the previous request for the parent (node will not request the same transaction + # from multiple nodes at the same time) + node.bumpmocktime(GETDATA_TX_INTERVAL) + + # 4. The parent is requested. Honest peer sends it. + honest_peer.wait_for_getdata([parent_txid_int]) + # Sync with ping to ensure orphans are reconsidered + honest_peer.send_and_ping(msg_tx(tx_parent["tx"])) + + # 5. After parent is accepted, orphans should be reconsidered. + # The real child should be accepted and the fake one rejected. + node_mempool = node.getrawmempool() + assert tx_parent["txid"] in node_mempool + assert tx_child["txid"] in node_mempool + assert_equal(node.getmempoolentry(tx_child["txid"])["wtxid"], tx_child["wtxid"]) + + @cleanup + def test_same_txid_orphan_of_orphan(self): + self.log.info("Check what happens when orphan's parent with same txid is already in orphanage") + node = self.nodes[0] + + tx_grandparent = self.wallet.create_self_transfer() + + # Create middle tx (both parent and child) which will be in orphanage. + tx_middle = self.wallet.create_self_transfer(utxo_to_spend=tx_grandparent["new_utxo"]) + + # Create a fake version of the middle tx + tx_orphan_bad_wit = self.create_malleated_version(tx_middle) + + # Create grandchild spending from tx_middle (and spending from tx_orphan_bad_wit since they + # have the same txid). + tx_grandchild = self.wallet.create_self_transfer(utxo_to_spend=tx_middle["new_utxo"]) + + bad_peer = node.add_p2p_connection(P2PInterface()) + honest_peer = node.add_p2p_connection(P2PInterface()) + + # 1. Fake orphan is received first. It is missing an input. + bad_peer.send_and_ping(msg_tx(tx_orphan_bad_wit)) + + # 2. Node requests missing tx_grandparent by txid. + grandparent_txid_int = int(tx_grandparent["txid"], 16) + node.bumpmocktime(NONPREF_PEER_TX_DELAY + TXID_RELAY_DELAY) + bad_peer.wait_for_getdata([grandparent_txid_int]) + + # 3. Honest peer relays the grandchild, which is missing a parent. The parent by txid already + # exists in orphanage, but should be re-requested because the node shouldn't assume that the + # witness data is the same. In this case, a same-txid-different-witness transaction exists! + with node.assert_debug_log(["stored orphan tx"]): + honest_peer.send_and_ping(msg_tx(tx_grandchild["tx"])) + middle_txid_int = int(tx_middle["txid"], 16) + node.bumpmocktime(NONPREF_PEER_TX_DELAY + TXID_RELAY_DELAY) + honest_peer.wait_for_getdata([middle_txid_int]) + + # 4. Honest peer relays the real child, which is also missing parents and should be placed + # in the orphanage. + with node.assert_debug_log(["stored orphan tx"]): + honest_peer.send_and_ping(msg_tx(tx_middle["tx"])) + assert_equal(len(node.getrawmempool()), 0) + + # 5. Honest peer sends tx_grandparent + honest_peer.send_and_ping(msg_tx(tx_grandparent["tx"])) + + # 6. After parent is accepted, orphans should be reconsidered. + # The real child should be accepted and the fake one rejected. + node_mempool = node.getrawmempool() + assert tx_grandparent["txid"] in node_mempool + assert tx_middle["txid"] in node_mempool + assert tx_grandchild["txid"] in node_mempool + assert_equal(node.getmempoolentry(tx_middle["txid"])["wtxid"], tx_middle["wtxid"]) + + @cleanup + def test_orphan_txid_inv(self): + self.log.info("Check node does not ignore announcement with same txid as tx in orphanage") + node = self.nodes[0] + + tx_parent = self.wallet.create_self_transfer() + + # Create the real child and fake version + tx_child = self.wallet.create_self_transfer(utxo_to_spend=tx_parent["new_utxo"]) + tx_orphan_bad_wit = self.create_malleated_version(tx_child) + + bad_peer = node.add_p2p_connection(PeerTxRelayer()) + # Must not send wtxidrelay because otherwise the inv(TX) will be ignored later + honest_peer = node.add_p2p_connection(P2PInterface(wtxidrelay=False)) + + # 1. Fake orphan is received first. It is missing an input. + bad_peer.send_and_ping(msg_tx(tx_orphan_bad_wit)) + + # 2. Node requests the missing parent by txid. + parent_txid_int = int(tx_parent["txid"], 16) + node.bumpmocktime(NONPREF_PEER_TX_DELAY + TXID_RELAY_DELAY) + bad_peer.wait_for_getdata([parent_txid_int]) + + # 3. Honest peer announces the real child, by txid (this isn't common but the node should + # still keep track of it). + child_txid_int = int(tx_child["txid"], 16) + honest_peer.send_and_ping(msg_inv([CInv(t=MSG_TX, h=child_txid_int)])) + + # 4. The child is requested. Honest peer sends it. + node.bumpmocktime(TXREQUEST_TIME_SKIP) + honest_peer.wait_for_getdata([child_txid_int]) + with node.assert_debug_log(["stored orphan tx"]): + honest_peer.send_and_ping(msg_tx(tx_child["tx"])) + + # 5. After first parent request times out, the node sends another one for the missing parent + # of the real orphan child. + node.bumpmocktime(GETDATA_TX_INTERVAL) + honest_peer.wait_for_getdata([parent_txid_int]) + honest_peer.send_and_ping(msg_tx(tx_parent["tx"])) + + # 6. After parent is accepted, orphans should be reconsidered. + # The real child should be accepted and the fake one rejected. This may happen in either + # order since the message-processing is randomized. If tx_orphan_bad_wit is validated first, + # its consensus error leads to disconnection of bad_peer. If tx_child is validated first, + # tx_orphan_bad_wit is rejected for txn-same-nonwitness-data-in-mempool (no punishment). + node_mempool = node.getrawmempool() + assert tx_parent["txid"] in node_mempool + assert tx_child["txid"] in node_mempool + assert_equal(node.getmempoolentry(tx_child["txid"])["wtxid"], tx_child["wtxid"]) + + def run_test(self): self.nodes[0].setmocktime(int(time.time())) self.wallet_nonsegwit = MiniWallet(self.nodes[0], mode=MiniWalletMode.RAW_P2PK) @@ -410,6 +579,9 @@ def run_test(self): self.test_orphans_overlapping_parents() self.test_orphan_of_orphan() self.test_orphan_inherit_rejection() + self.test_same_txid_orphan() + self.test_same_txid_orphan_of_orphan() + self.test_orphan_txid_inv() if __name__ == '__main__': diff --git a/test/functional/test_framework/authproxy.py b/test/functional/test_framework/authproxy.py index 03042877b2..7edf9f3679 100644 --- a/test/functional/test_framework/authproxy.py +++ b/test/functional/test_framework/authproxy.py @@ -160,6 +160,15 @@ def _get_response(self): raise JSONRPCException({ 'code': -342, 'message': 'missing HTTP response from server'}) + # Check for no-content HTTP status code, which can be returned when an + # RPC client requests a JSON-RPC 2.0 "notification" with no response. + # Currently this is only possible if clients call the _request() method + # directly to send a raw request. + if http_response.status == HTTPStatus.NO_CONTENT: + if len(http_response.read()) != 0: + raise JSONRPCException({'code': -342, 'message': 'Content received with NO CONTENT status code'}) + return None, http_response.status + content_type = http_response.getheader('Content-Type') if content_type != 'application/json': raise JSONRPCException( diff --git a/test/functional/test_framework/crypto/secp256k1.py b/test/functional/test_framework/crypto/secp256k1.py index 2e9e419da5..50a46dce37 100644 --- a/test/functional/test_framework/crypto/secp256k1.py +++ b/test/functional/test_framework/crypto/secp256k1.py @@ -15,6 +15,8 @@ * G: the secp256k1 generator point """ +import unittest +from hashlib import sha256 class FE: """Objects of this class represent elements of the field GF(2**256 - 2**32 - 977). @@ -344,3 +346,9 @@ def mul(self, a): # Precomputed table with multiples of G for fast multiplication FAST_G = FastGEMul(G) + +class TestFrameworkSecp256k1(unittest.TestCase): + def test_H(self): + H = sha256(G.to_bytes_uncompressed()).digest() + assert GE.lift_x(FE.from_bytes(H)) is not None + self.assertEqual(H.hex(), "50929b74c1a04954b78b4b6035e97a5e078a5a0f28ec96d547bfee9ace803ac0")