From f3a2b5237688e9f574444e793724664b00fb7f2a Mon Sep 17 00:00:00 2001 From: Ryan Ofsky Date: Wed, 22 Nov 2023 14:47:00 -0500 Subject: [PATCH 01/50] serialization: Support for multiple parameters This commit makes a minimal change to the ParamsStream class to let it retrieve multiple parameters. Followup commits after this commit clean up code using ParamsStream and make it easier to set multiple parameters. Currently it is only possible to attach one serialization parameter to a stream at a time. For example, it is not possible to set a parameter controlling the transaction format and a parameter controlling the address format at the same time because one parameter will override the other. This limitation is inconvenient for multiprocess code since it is not possible to create just one type of stream and serialize any object to it. Instead it is necessary to create different streams for different object types, which requires extra boilerplate and makes using the new parameter fields a lot more awkward than the older version and type fields. Fix this problem by allowing an unlimited number of serialization stream parameters to be set, and allowing them to be requested by type. Later parameters will still override earlier parameters, but only if they have the same type. This change requires replacing the stream.GetParams() method with a stream.GetParams() method in order for serialization code to retrieve the desired parameters. This change is more verbose, but probably a good thing for readability because previously it could be difficult to know what type the GetParams() method would return, and now it is more obvious. --- src/netaddress.h | 4 ++-- src/primitives/transaction.h | 6 +++--- src/protocol.h | 3 ++- src/serialize.h | 39 ++++++++++++++++-------------------- src/test/serialize_tests.cpp | 7 ++++--- 5 files changed, 28 insertions(+), 31 deletions(-) diff --git a/src/netaddress.h b/src/netaddress.h index 08dd77c0ff..5e5c412586 100644 --- a/src/netaddress.h +++ b/src/netaddress.h @@ -241,7 +241,7 @@ class CNetAddr template void Serialize(Stream& s) const { - if (s.GetParams().enc == Encoding::V2) { + if (s.template GetParams().enc == Encoding::V2) { SerializeV2Stream(s); } else { SerializeV1Stream(s); @@ -254,7 +254,7 @@ class CNetAddr template void Unserialize(Stream& s) { - if (s.GetParams().enc == Encoding::V2) { + if (s.template GetParams().enc == Encoding::V2) { UnserializeV2Stream(s); } else { UnserializeV1Stream(s); diff --git a/src/primitives/transaction.h b/src/primitives/transaction.h index ccbeb3ec49..d15b8005f9 100644 --- a/src/primitives/transaction.h +++ b/src/primitives/transaction.h @@ -326,7 +326,7 @@ class CTransaction template inline void Serialize(Stream& s) const { - SerializeTransaction(*this, s, s.GetParams()); + SerializeTransaction(*this, s, s.template GetParams()); } /** This deserializing constructor is provided instead of an Unserialize method. @@ -386,12 +386,12 @@ struct CMutableTransaction template inline void Serialize(Stream& s) const { - SerializeTransaction(*this, s, s.GetParams()); + SerializeTransaction(*this, s, s.template GetParams()); } template inline void Unserialize(Stream& s) { - UnserializeTransaction(*this, s, s.GetParams()); + UnserializeTransaction(*this, s, s.template GetParams()); } template diff --git a/src/protocol.h b/src/protocol.h index e405253632..58a7287d03 100644 --- a/src/protocol.h +++ b/src/protocol.h @@ -406,9 +406,10 @@ class CAddress : public CService static constexpr SerParams V1_DISK{{CNetAddr::Encoding::V1}, Format::Disk}; static constexpr SerParams V2_DISK{{CNetAddr::Encoding::V2}, Format::Disk}; - SERIALIZE_METHODS_PARAMS(CAddress, obj, SerParams, params) + SERIALIZE_METHODS(CAddress, obj) { bool use_v2; + auto& params = SER_PARAMS(SerParams); if (params.fmt == Format::Disk) { // In the disk serialization format, the encoding (v1 or v2) is determined by a flag version // that's part of the serialization itself. ADDRV2_FORMAT in the stream version only determines diff --git a/src/serialize.h b/src/serialize.h index 19585c630a..44e2db7ac7 100644 --- a/src/serialize.h +++ b/src/serialize.h @@ -181,9 +181,8 @@ const Out& AsBase(const In& x) static void SerializationOps(Type& obj, Stream& s, Operation ser_action) /** - * Variant of FORMATTER_METHODS that supports a declared parameter type. - * - * If a formatter has a declared parameter type, it must be invoked directly or + * Formatter methods can retrieve parameters attached to a stream using the + * SER_PARAMS(type) macro as long as the stream is created directly or * indirectly with a parameter of that type. This permits making serialization * depend on run-time context in a type-safe way. * @@ -191,7 +190,8 @@ const Out& AsBase(const In& x) * struct BarParameter { bool fancy; ... }; * struct Bar { ... }; * struct FooFormatter { - * FORMATTER_METHODS(Bar, obj, BarParameter, param) { + * FORMATTER_METHODS(Bar, obj) { + * auto& param = SER_PARAMS(BarParameter); * if (param.fancy) { * READWRITE(VARINT(obj.value)); * } else { @@ -213,13 +213,7 @@ const Out& AsBase(const In& x) * Compilation will fail in any context where serialization is invoked but * no parameter of a type convertible to BarParameter is provided. */ -#define FORMATTER_METHODS_PARAMS(cls, obj, paramcls, paramobj) \ - template \ - static void Ser(Stream& s, const cls& obj) { SerializationOps(obj, s, ActionSerialize{}, s.GetParams()); } \ - template \ - static void Unser(Stream& s, cls& obj) { SerializationOps(obj, s, ActionUnserialize{}, s.GetParams()); } \ - template \ - static void SerializationOps(Type& obj, Stream& s, Operation ser_action, const paramcls& paramobj) +#define SER_PARAMS(type) (s.template GetParams()) #define BASE_SERIALIZE_METHODS(cls) \ template \ @@ -246,15 +240,6 @@ const Out& AsBase(const In& x) BASE_SERIALIZE_METHODS(cls) \ FORMATTER_METHODS(cls, obj) -/** - * Variant of SERIALIZE_METHODS that supports a declared parameter type. - * - * See FORMATTER_METHODS_PARAMS for more information on parameters. - */ -#define SERIALIZE_METHODS_PARAMS(cls, obj, paramcls, paramobj) \ - BASE_SERIALIZE_METHODS(cls) \ - FORMATTER_METHODS_PARAMS(cls, obj, paramcls, paramobj) - // Templates for serializing to anything that looks like a stream, // i.e. anything that supports .read(Span) and .write(Span) // @@ -1134,9 +1119,19 @@ class ParamsStream void ignore(size_t num) { m_substream.ignore(num); } bool eof() const { return m_substream.eof(); } size_t size() const { return m_substream.size(); } - const Params& GetParams() const { return m_params; } int GetVersion() = delete; // Deprecated with Params usage int GetType() = delete; // Deprecated with Params usage + + //! Get reference to stream parameters. + template + const auto& GetParams() const + { + if constexpr (std::is_convertible_v) { + return m_params; + } else { + return m_substream.template GetParams

(); + } + } }; /** Wrapper that serializes objects with the specified parameters. */ @@ -1176,7 +1171,7 @@ class ParamsWrapper /** \ * Return a wrapper around t that (de)serializes it with specified parameter params. \ * \ - * See FORMATTER_METHODS_PARAMS for more information on serialization parameters. \ + * See SER_PARAMS for more information on serialization parameters. \ */ \ template \ auto operator()(T&& t) const \ diff --git a/src/test/serialize_tests.cpp b/src/test/serialize_tests.cpp index d75eb499b4..18356f2df0 100644 --- a/src/test/serialize_tests.cpp +++ b/src/test/serialize_tests.cpp @@ -289,7 +289,7 @@ class Base template void Serialize(Stream& s) const { - if (s.GetParams().m_base_format == BaseFormat::RAW) { + if (s.template GetParams().m_base_format == BaseFormat::RAW) { s << m_base_data; } else { s << Span{HexStr(Span{&m_base_data, 1})}; @@ -299,7 +299,7 @@ class Base template void Unserialize(Stream& s) { - if (s.GetParams().m_base_format == BaseFormat::RAW) { + if (s.template GetParams().m_base_format == BaseFormat::RAW) { s >> m_base_data; } else { std::string hex{"aa"}; @@ -327,8 +327,9 @@ class Derived : public Base public: std::string m_derived_data; - SERIALIZE_METHODS_PARAMS(Derived, obj, DerivedAndBaseFormat, fmt) + SERIALIZE_METHODS(Derived, obj) { + auto& fmt = SER_PARAMS(DerivedAndBaseFormat); READWRITE(fmt.m_base_format(AsBase(obj))); if (ser_action.ForRead()) { From cccddc03f0c625daeac7158eb20c1508aea5df39 Mon Sep 17 00:00:00 2001 From: Hernan Marino Date: Tue, 14 Mar 2023 14:01:17 -0300 Subject: [PATCH 02/50] Wallet encrypt on create, allow to navigate options --- src/qt/askpassphrasedialog.cpp | 35 +++++++++++++++++++++------------- 1 file changed, 22 insertions(+), 13 deletions(-) diff --git a/src/qt/askpassphrasedialog.cpp b/src/qt/askpassphrasedialog.cpp index 246dff0069..f0efa250d7 100644 --- a/src/qt/askpassphrasedialog.cpp +++ b/src/qt/askpassphrasedialog.cpp @@ -104,10 +104,14 @@ void AskPassphraseDialog::accept() // Cannot encrypt with empty passphrase break; } - QMessageBox::StandardButton retval = QMessageBox::question(this, tr("Confirm wallet encryption"), - tr("Warning: If you encrypt your wallet and lose your passphrase, you will LOSE ALL OF YOUR BITCOINS!") + "

" + tr("Are you sure you wish to encrypt your wallet?"), - QMessageBox::Yes|QMessageBox::Cancel, - QMessageBox::Cancel); + QMessageBox msgBoxConfirm(QMessageBox::Question, + tr("Confirm wallet encryption"), + tr("Warning: If you encrypt your wallet and lose your passphrase, you will LOSE ALL OF YOUR BITCOINS!") + "

" + tr("Are you sure you wish to encrypt your wallet?"), + QMessageBox::Cancel | QMessageBox::Yes, this); + msgBoxConfirm.button(QMessageBox::Yes)->setText(tr("Continue")); + msgBoxConfirm.button(QMessageBox::Cancel)->setText(tr("Back")); + msgBoxConfirm.setDefaultButton(QMessageBox::Cancel); + QMessageBox::StandardButton retval = (QMessageBox::StandardButton)msgBoxConfirm.exec(); if(retval == QMessageBox::Yes) { if(newpass1 == newpass2) @@ -116,10 +120,19 @@ void AskPassphraseDialog::accept() "your bitcoins from being stolen by malware infecting your computer."); if (m_passphrase_out) { m_passphrase_out->assign(newpass1); - QMessageBox::warning(this, tr("Wallet to be encrypted"), - "" + - tr("Your wallet is about to be encrypted. ") + encryption_reminder + - ""); + QMessageBox msgBoxWarning(QMessageBox::Warning, + tr("Wallet to be encrypted"), + "" + + tr("Your wallet is about to be encrypted. ") + encryption_reminder + " " + + tr("Are you sure you wish to encrypt your wallet?") + + "", + QMessageBox::Cancel | QMessageBox::Yes, this); + msgBoxWarning.setDefaultButton(QMessageBox::Cancel); + QMessageBox::StandardButton retval = (QMessageBox::StandardButton)msgBoxWarning.exec(); + if (retval == QMessageBox::Cancel) { + QDialog::reject(); + return; + } } else { assert(model != nullptr); if (model->setWalletEncrypted(newpass1)) { @@ -145,11 +158,7 @@ void AskPassphraseDialog::accept() tr("The supplied passphrases do not match.")); } } - else - { - QDialog::reject(); // Cancelled - } - } break; + } break; case Unlock: try { if (!model->setWalletLocked(false, oldpass)) { From 84502b755bcc35413ad466047893b5edf134c53f Mon Sep 17 00:00:00 2001 From: Ryan Ofsky Date: Wed, 21 Feb 2024 07:07:50 -0500 Subject: [PATCH 03/50] serialization: Drop references to GetVersion/GetType Drop references to stream GetVersion()/GetType() methods which no longer exist --- src/serialize.h | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/serialize.h b/src/serialize.h index 44e2db7ac7..08d9a80f96 100644 --- a/src/serialize.h +++ b/src/serialize.h @@ -1103,12 +1103,12 @@ size_t GetSerializeSize(const T& t) return (SizeComputer() << t).size(); } -/** Wrapper that overrides the GetParams() function of a stream (and hides GetVersion/GetType). */ +/** Wrapper that overrides the GetParams() function of a stream. */ template class ParamsStream { const Params& m_params; - SubStream& m_substream; // private to avoid leaking version/type into serialization code that shouldn't see it + SubStream& m_substream; public: ParamsStream(const Params& params LIFETIMEBOUND, SubStream& substream LIFETIMEBOUND) : m_params{params}, m_substream{substream} {} @@ -1119,8 +1119,6 @@ class ParamsStream void ignore(size_t num) { m_substream.ignore(num); } bool eof() const { return m_substream.eof(); } size_t size() const { return m_substream.size(); } - int GetVersion() = delete; // Deprecated with Params usage - int GetType() = delete; // Deprecated with Params usage //! Get reference to stream parameters. template From 83436d14f06551026bcf5529df3b63b4e8a679fb Mon Sep 17 00:00:00 2001 From: Ryan Ofsky Date: Wed, 22 Nov 2023 16:39:32 -0500 Subject: [PATCH 04/50] serialization: Drop unnecessary ParamsStream references Drop unnecessary ParamsStream references from CTransaction and CMutableTransaction constructors. This just couples these classes unnecessarily to the ParamsStream class, making the ParamsStream class harder to modify, and making the transaction classes in some cases (depending on parameter order) unable to work with stream classes that have multiple parameters set. --- src/primitives/transaction.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/primitives/transaction.h b/src/primitives/transaction.h index d15b8005f9..976542cfae 100644 --- a/src/primitives/transaction.h +++ b/src/primitives/transaction.h @@ -334,7 +334,7 @@ class CTransaction template CTransaction(deserialize_type, const TransactionSerParams& params, Stream& s) : CTransaction(CMutableTransaction(deserialize, params, s)) {} template - CTransaction(deserialize_type, ParamsStream& s) : CTransaction(CMutableTransaction(deserialize, s)) {} + CTransaction(deserialize_type, Stream& s) : CTransaction(CMutableTransaction(deserialize, s)) {} bool IsNull() const { return vin.empty() && vout.empty(); @@ -400,7 +400,7 @@ struct CMutableTransaction } template - CMutableTransaction(deserialize_type, ParamsStream& s) { + CMutableTransaction(deserialize_type, Stream& s) { Unserialize(s); } From cb28849a88339c1e7ba03ffc7e38998339074e6e Mon Sep 17 00:00:00 2001 From: Ryan Ofsky Date: Wed, 22 Nov 2023 16:39:32 -0500 Subject: [PATCH 05/50] serialization: Reverse ParamsStream constructor order Move parameter argument after stream argument so will be possible to accept multiple variadic parameter arguments in the following commit. Also reverse template parameter order for consistency. --- src/addrman.cpp | 4 ++-- src/net.cpp | 2 +- src/serialize.h | 8 ++++---- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/addrman.cpp b/src/addrman.cpp index a8206de6ee..b649950ece 100644 --- a/src/addrman.cpp +++ b/src/addrman.cpp @@ -171,7 +171,7 @@ void AddrManImpl::Serialize(Stream& s_) const */ // Always serialize in the latest version (FILE_FORMAT). - ParamsStream s{CAddress::V2_DISK, s_}; + ParamsStream s{s_, CAddress::V2_DISK}; s << static_cast(FILE_FORMAT); @@ -236,7 +236,7 @@ void AddrManImpl::Unserialize(Stream& s_) s_ >> Using>(format); const auto ser_params = (format >= Format::V3_BIP155 ? CAddress::V2_DISK : CAddress::V1_DISK); - ParamsStream s{ser_params, s_}; + ParamsStream s{s_, ser_params}; uint8_t compat; s >> compat; diff --git a/src/net.cpp b/src/net.cpp index 0cc794c2c3..ba764e28e1 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -199,7 +199,7 @@ static std::vector ConvertSeeds(const std::vector &vSeedsIn) std::vector vSeedsOut; FastRandomContext rng; DataStream underlying_stream{vSeedsIn}; - ParamsStream s{CAddress::V2_NETWORK, underlying_stream}; + ParamsStream s{underlying_stream, CAddress::V2_NETWORK}; while (!s.eof()) { CService endpoint; s >> endpoint; diff --git a/src/serialize.h b/src/serialize.h index 08d9a80f96..8ad5ad5147 100644 --- a/src/serialize.h +++ b/src/serialize.h @@ -1104,14 +1104,14 @@ size_t GetSerializeSize(const T& t) } /** Wrapper that overrides the GetParams() function of a stream. */ -template +template class ParamsStream { const Params& m_params; SubStream& m_substream; public: - ParamsStream(const Params& params LIFETIMEBOUND, SubStream& substream LIFETIMEBOUND) : m_params{params}, m_substream{substream} {} + ParamsStream(SubStream& substream LIFETIMEBOUND, const Params& params LIFETIMEBOUND) : m_params{params}, m_substream{substream} {} template ParamsStream& operator<<(const U& obj) { ::Serialize(*this, obj); return *this; } template ParamsStream& operator>>(U&& obj) { ::Unserialize(*this, obj); return *this; } void write(Span src) { m_substream.write(src); } @@ -1145,13 +1145,13 @@ class ParamsWrapper template void Serialize(Stream& s) const { - ParamsStream ss{m_params, s}; + ParamsStream ss{s, m_params}; ::Serialize(ss, m_object); } template void Unserialize(Stream& s) { - ParamsStream ss{m_params, s}; + ParamsStream ss{s, m_params}; ::Unserialize(ss, m_object); } }; From e6794e475c84d9edca4a2876e2342cbb1d85f804 Mon Sep 17 00:00:00 2001 From: Ryan Ofsky Date: Wed, 22 Nov 2023 17:56:04 -0500 Subject: [PATCH 06/50] serialization: Accept multiple parameters in ParamsStream constructor Before this change it was possible but awkward to create ParamStream streams with multiple parameter objects. After this change it is straightforward. The change to support multiple parameters is implemented by letting ParamsStream contain substream instances, instead of just references to external substreams. So a side-effect of this change is that ParamStream can now accept rvalue stream arguments and be easier to use in some other cases. A test for rvalues is added in this commit, and some simplifications to non-test code are made in the next commit. --- src/serialize.h | 32 ++++++++++++++- src/test/serialize_tests.cpp | 79 ++++++++++++++++++++++++++++++++++++ 2 files changed, 109 insertions(+), 2 deletions(-) diff --git a/src/serialize.h b/src/serialize.h index 8ad5ad5147..8bca4d8ada 100644 --- a/src/serialize.h +++ b/src/serialize.h @@ -1108,10 +1108,22 @@ template class ParamsStream { const Params& m_params; - SubStream& m_substream; + // If ParamsStream constructor is passed an lvalue argument, Substream will + // be a reference type, and m_substream will reference that argument. + // Otherwise m_substream will be a substream instance and move from the + // argument. Letting ParamsStream contain a substream instance instead of + // just a reference is useful to make the ParamsStream object self contained + // and let it do cleanup when destroyed, for example by closing files if + // SubStream is a file stream. + SubStream m_substream; public: - ParamsStream(SubStream& substream LIFETIMEBOUND, const Params& params LIFETIMEBOUND) : m_params{params}, m_substream{substream} {} + ParamsStream(SubStream&& substream, const Params& params LIFETIMEBOUND) : m_params{params}, m_substream{std::forward(substream)} {} + + template + ParamsStream(NestedSubstream&& s, const Params1& params1 LIFETIMEBOUND, const Params2& params2 LIFETIMEBOUND, const NestedParams&... params LIFETIMEBOUND) + : ParamsStream{::ParamsStream{std::forward(s), params2, params...}, params1} {} + template ParamsStream& operator<<(const U& obj) { ::Serialize(*this, obj); return *this; } template ParamsStream& operator>>(U&& obj) { ::Unserialize(*this, obj); return *this; } void write(Span src) { m_substream.write(src); } @@ -1132,6 +1144,22 @@ class ParamsStream } }; +/** + * Explicit template deduction guide is required for single-parameter + * constructor so Substream&& is treated as a forwarding reference, and + * SubStream is deduced as reference type for lvalue arguments. + */ +template +ParamsStream(Substream&&, const Params&) -> ParamsStream; + +/** + * Template deduction guide for multiple params arguments that creates a nested + * ParamsStream. + */ +template +ParamsStream(Substream&& s, const Params1& params1, const Params2& params2, const Params&... params) -> + ParamsStream(s), params2, params...}), Params1>; + /** Wrapper that serializes objects with the specified parameters. */ template class ParamsWrapper diff --git a/src/test/serialize_tests.cpp b/src/test/serialize_tests.cpp index 18356f2df0..ad4852d13d 100644 --- a/src/test/serialize_tests.cpp +++ b/src/test/serialize_tests.cpp @@ -15,6 +15,18 @@ BOOST_FIXTURE_TEST_SUITE(serialize_tests, BasicTestingSetup) +// For testing move-semantics, declare a version of datastream that can be moved +// but is not copyable. +class UncopyableStream : public DataStream +{ +public: + using DataStream::DataStream; + UncopyableStream(const UncopyableStream&) = delete; + UncopyableStream& operator=(const UncopyableStream&) = delete; + UncopyableStream(UncopyableStream&&) noexcept = default; + UncopyableStream& operator=(UncopyableStream&&) noexcept = default; +}; + class CSerializeMethodsTestSingle { protected: @@ -344,6 +356,73 @@ class Derived : public Base } }; +struct OtherParam { + uint8_t param; + SER_PARAMS_OPFUNC +}; + +//! Checker for value of OtherParam. When being serialized, serializes the +//! param to the stream. When being unserialized, verifies the value in the +//! stream matches the param. +class OtherParamChecker +{ +public: + template + void Serialize(Stream& s) const + { + const uint8_t param = s.template GetParams().param; + s << param; + } + + template + void Unserialize(Stream& s) const + { + const uint8_t param = s.template GetParams().param; + uint8_t value; + s >> value; + BOOST_CHECK_EQUAL(value, param); + } +}; + +//! Test creating a stream with multiple parameters and making sure +//! serialization code requiring different parameters can retrieve them. Also +//! test that earlier parameters take precedence if the same parameter type is +//! specified twice. (Choice of whether earlier or later values take precedence +//! or multiple values of the same type are allowed was arbitrary, and just +//! decided based on what would require smallest amount of ugly C++ template +//! code. Intent of the test is to just ensure there is no unexpected behavior.) +BOOST_AUTO_TEST_CASE(with_params_multi) +{ + const OtherParam other_param_used{.param = 0x10}; + const OtherParam other_param_ignored{.param = 0x11}; + const OtherParam other_param_override{.param = 0x12}; + const OtherParamChecker check; + DataStream stream; + ParamsStream pstream{stream, RAW, other_param_used, other_param_ignored}; + + Base base1{0x20}; + pstream << base1 << check << other_param_override(check); + BOOST_CHECK_EQUAL(stream.str(), "\x20\x10\x12"); + + Base base2; + pstream >> base2 >> check >> other_param_override(check); + BOOST_CHECK_EQUAL(base2.m_base_data, 0x20); +} + +//! Test creating a ParamsStream that moves from a stream argument. +BOOST_AUTO_TEST_CASE(with_params_move) +{ + UncopyableStream stream{}; + ParamsStream pstream{std::move(stream), RAW, HEX, RAW}; + + Base base1{0x20}; + pstream << base1; + + Base base2; + pstream >> base2; + BOOST_CHECK_EQUAL(base2.m_base_data, 0x20); +} + BOOST_AUTO_TEST_CASE(with_params_base) { Base b{0x0F}; From 951203bcc496c4415b7754cd764544659b76067f Mon Sep 17 00:00:00 2001 From: Ryan Ofsky Date: Tue, 20 Feb 2024 17:45:47 -0500 Subject: [PATCH 07/50] net: Simplify ParamsStream usage Simplify ParamsStream usage in ConvertSeeds now that ParamsStream supports rvalue substream arguments. --- src/net.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/net.cpp b/src/net.cpp index ba764e28e1..8bb1558ee9 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -198,8 +198,7 @@ static std::vector ConvertSeeds(const std::vector &vSeedsIn) const auto one_week{7 * 24h}; std::vector vSeedsOut; FastRandomContext rng; - DataStream underlying_stream{vSeedsIn}; - ParamsStream s{underlying_stream, CAddress::V2_NETWORK}; + ParamsStream s{DataStream{vSeedsIn}, CAddress::V2_NETWORK}; while (!s.eof()) { CService endpoint; s >> endpoint; From 8d491ae9ecf1948ea29f67b50ca7259123f602aa Mon Sep 17 00:00:00 2001 From: Ryan Ofsky Date: Wed, 21 Feb 2024 07:35:38 -0500 Subject: [PATCH 08/50] serialization: Add ParamsStream GetStream() method Add GetStream() method useful for accessing underlying stream. Use to improve ParamsStream test coverage. --- src/serialize.h | 32 +++++++++++++++++++++++++++----- src/test/serialize_tests.cpp | 5 ++++- 2 files changed, 31 insertions(+), 6 deletions(-) diff --git a/src/serialize.h b/src/serialize.h index 8bca4d8ada..d3eb1acb1f 100644 --- a/src/serialize.h +++ b/src/serialize.h @@ -1103,6 +1103,10 @@ size_t GetSerializeSize(const T& t) return (SizeComputer() << t).size(); } +//! Check if type contains a stream by seeing if has a GetStream() method. +template +concept ContainsStream = requires(T t) { t.GetStream(); }; + /** Wrapper that overrides the GetParams() function of a stream. */ template class ParamsStream @@ -1126,11 +1130,11 @@ class ParamsStream template ParamsStream& operator<<(const U& obj) { ::Serialize(*this, obj); return *this; } template ParamsStream& operator>>(U&& obj) { ::Unserialize(*this, obj); return *this; } - void write(Span src) { m_substream.write(src); } - void read(Span dst) { m_substream.read(dst); } - void ignore(size_t num) { m_substream.ignore(num); } - bool eof() const { return m_substream.eof(); } - size_t size() const { return m_substream.size(); } + void write(Span src) { GetStream().write(src); } + void read(Span dst) { GetStream().read(dst); } + void ignore(size_t num) { GetStream().ignore(num); } + bool eof() const { return GetStream().eof(); } + size_t size() const { return GetStream().size(); } //! Get reference to stream parameters. template @@ -1142,6 +1146,24 @@ class ParamsStream return m_substream.template GetParams

(); } } + + //! Get reference to underlying stream. + auto& GetStream() + { + if constexpr (ContainsStream) { + return m_substream.GetStream(); + } else { + return m_substream; + } + } + const auto& GetStream() const + { + if constexpr (ContainsStream) { + return m_substream.GetStream(); + } else { + return m_substream; + } + } }; /** diff --git a/src/test/serialize_tests.cpp b/src/test/serialize_tests.cpp index ad4852d13d..b28e1b4196 100644 --- a/src/test/serialize_tests.cpp +++ b/src/test/serialize_tests.cpp @@ -412,11 +412,14 @@ BOOST_AUTO_TEST_CASE(with_params_multi) //! Test creating a ParamsStream that moves from a stream argument. BOOST_AUTO_TEST_CASE(with_params_move) { - UncopyableStream stream{}; + UncopyableStream stream{MakeByteSpan(std::string_view{"abc"})}; ParamsStream pstream{std::move(stream), RAW, HEX, RAW}; + BOOST_CHECK_EQUAL(pstream.GetStream().str(), "abc"); + pstream.GetStream().clear(); Base base1{0x20}; pstream << base1; + BOOST_CHECK_EQUAL(pstream.GetStream().str(), "\x20"); Base base2; pstream >> base2; From 4202c170da37a3203e05a9f39f303d7df19b6d81 Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Mon, 27 Feb 2023 13:00:37 -0500 Subject: [PATCH 09/50] test: refactor interface_rpc.py Refactors the helper functions in the test to provide more fine-grained control over RPC requests and responses than the usual AuthProxy methods. No change in test behavior, the same RPC requests are made. --- test/functional/interface_rpc.py | 112 +++++++++++++++++++++++-------- 1 file changed, 83 insertions(+), 29 deletions(-) diff --git a/test/functional/interface_rpc.py b/test/functional/interface_rpc.py index e873e2da0b..501a841805 100755 --- a/test/functional/interface_rpc.py +++ b/test/functional/interface_rpc.py @@ -4,22 +4,72 @@ # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Tests some generic aspects of the RPC interface.""" +import json import os -from test_framework.authproxy import JSONRPCException +from dataclasses import dataclass from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal, assert_greater_than_or_equal from threading import Thread +from typing import Optional import subprocess -def expect_http_status(expected_http_status, expected_rpc_code, - fcn, *args): - try: - fcn(*args) - raise AssertionError(f"Expected RPC error {expected_rpc_code}, got none") - except JSONRPCException as exc: - assert_equal(exc.error["code"], expected_rpc_code) - assert_equal(exc.http_status, expected_http_status) +RPC_INVALID_PARAMETER = -8 +RPC_METHOD_NOT_FOUND = -32601 +RPC_INVALID_REQUEST = -32600 + + +@dataclass +class BatchOptions: + version: Optional[int] = None + notification: bool = False + request_fields: Optional[dict] = None + response_fields: Optional[dict] = None + + +def format_request(options, idx, fields): + request = {} + if options.version == 1: + request.update(version="1.1") + elif options.version == 2: + request.update(jsonrpc="2.0") + elif options.version is not None: + raise NotImplementedError(f"Unknown JSONRPC version {options.version}") + if not options.notification: + request.update(id=idx) + request.update(fields) + if options.request_fields: + request.update(options.request_fields) + return request + + +def format_response(options, idx, fields): + response = {} + response.update(id=None if options.notification else idx) + response.update(result=None, error=None) + response.update(fields) + if options.response_fields: + response.update(options.response_fields) + return response + + +def send_raw_rpc(node, raw_body: bytes) -> tuple[object, int]: + return node._request("POST", "/", raw_body) + + +def send_json_rpc(node, body: object) -> tuple[object, int]: + raw = json.dumps(body).encode("utf-8") + return send_raw_rpc(node, raw) + + +def expect_http_rpc_status(expected_http_status, expected_rpc_error_code, node, method, params, version=1, notification=False): + req = format_request(BatchOptions(version, notification), 0, {"method": method, "params": params}) + response, status = send_json_rpc(node, req) + + if expected_rpc_error_code is not None: + assert_equal(response["error"]["code"], expected_rpc_error_code) + + assert_equal(status, expected_http_status) def test_work_queue_getblock(node, got_exceeded_error): @@ -51,34 +101,38 @@ def test_getrpcinfo(self): def test_batch_request(self): self.log.info("Testing basic JSON-RPC batch request...") - results = self.nodes[0].batch([ + calls = [ # A basic request that will work fine. - {"method": "getblockcount", "id": 1}, + {"method": "getblockcount"}, # Request that will fail. The whole batch request should still # work fine. - {"method": "invalidmethod", "id": 2}, + {"method": "invalidmethod"}, # Another call that should succeed. - {"method": "getblockhash", "id": 3, "params": [0]}, - ]) - - result_by_id = {} - for res in results: - result_by_id[res["id"]] = res - - assert_equal(result_by_id[1]['error'], None) - assert_equal(result_by_id[1]['result'], 0) - - assert_equal(result_by_id[2]['error']['code'], -32601) - assert_equal(result_by_id[2]['result'], None) - - assert_equal(result_by_id[3]['error'], None) - assert result_by_id[3]['result'] is not None + {"method": "getblockhash", "params": [0]}, + ] + results = [ + {"result": 0}, + {"error": {"code": RPC_METHOD_NOT_FOUND, "message": "Method not found"}}, + {"result": "0f9188f13cb7b2c71f2a335e3a4fc328bf5beb436012afca590b1a11466e2206"}, + {"error": {"code": RPC_INVALID_REQUEST, "message": "Missing method"}}, + ] + + request = [] + response = [] + for idx, (call, result) in enumerate(zip(calls, results), 1): + options = BatchOptions() + request.append(format_request(options, idx, call)) + response.append(format_response(options, idx, result)) + + rpc_response, http_status = send_json_rpc(self.nodes[0], request) + assert_equal(http_status, 200) + assert_equal(rpc_response, response) def test_http_status_codes(self): self.log.info("Testing HTTP status codes for JSON-RPC requests...") - expect_http_status(404, -32601, self.nodes[0].invalidmethod) - expect_http_status(500, -8, self.nodes[0].getblockhash, 42) + expect_http_rpc_status(404, RPC_METHOD_NOT_FOUND, self.nodes[0], "invalidmethod", []) + expect_http_rpc_status(500, RPC_INVALID_PARAMETER, self.nodes[0], "getblockhash", [42]) def test_work_queue_exceeded(self): self.log.info("Testing work queue exceeded...") From 09416f9ec445e4d6bb277400758083b0b4e8b174 Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Sun, 31 Dec 2023 07:55:03 -0500 Subject: [PATCH 10/50] test: cover JSONRPC 2.0 requests, batches, and notifications --- test/functional/interface_rpc.py | 97 ++++++++++++++++++++++++++++---- 1 file changed, 87 insertions(+), 10 deletions(-) diff --git a/test/functional/interface_rpc.py b/test/functional/interface_rpc.py index 501a841805..748d83858a 100755 --- a/test/functional/interface_rpc.py +++ b/test/functional/interface_rpc.py @@ -14,9 +14,11 @@ import subprocess -RPC_INVALID_PARAMETER = -8 -RPC_METHOD_NOT_FOUND = -32601 -RPC_INVALID_REQUEST = -32600 +RPC_INVALID_ADDRESS_OR_KEY = -5 +RPC_INVALID_PARAMETER = -8 +RPC_METHOD_NOT_FOUND = -32601 +RPC_INVALID_REQUEST = -32600 +RPC_PARSE_ERROR = -32700 @dataclass @@ -98,9 +100,7 @@ def test_getrpcinfo(self): assert_greater_than_or_equal(command['duration'], 0) assert_equal(info['logpath'], os.path.join(self.nodes[0].chain_path, 'debug.log')) - def test_batch_request(self): - self.log.info("Testing basic JSON-RPC batch request...") - + def test_batch_request(self, call_options): calls = [ # A basic request that will work fine. {"method": "getblockcount"}, @@ -109,6 +109,8 @@ def test_batch_request(self): {"method": "invalidmethod"}, # Another call that should succeed. {"method": "getblockhash", "params": [0]}, + # Invalid request format + {"pizza": "sausage"} ] results = [ {"result": 0}, @@ -120,7 +122,9 @@ def test_batch_request(self): request = [] response = [] for idx, (call, result) in enumerate(zip(calls, results), 1): - options = BatchOptions() + options = call_options(idx) + if options is None: + continue request.append(format_request(options, idx, call)) response.append(format_response(options, idx, result)) @@ -128,11 +132,84 @@ def test_batch_request(self): assert_equal(http_status, 200) assert_equal(rpc_response, response) - def test_http_status_codes(self): - self.log.info("Testing HTTP status codes for JSON-RPC requests...") + def test_batch_requests(self): + self.log.info("Testing empty batch request...") + self.test_batch_request(lambda idx: None) + + self.log.info("Testing basic JSON-RPC 2.0 batch request...") + self.test_batch_request(lambda idx: BatchOptions(version=2)) + + self.log.info("Testing JSON-RPC 2.0 batch with notifications...") + self.test_batch_request(lambda idx: BatchOptions(version=2, notification=idx < 2)) + + self.log.info("Testing JSON-RPC 2.0 batch of ALL notifications...") + self.test_batch_request(lambda idx: BatchOptions(version=2, notification=True)) + + # JSONRPC 1.1 does not support batch requests, but test them for backwards compatibility. + self.log.info("Testing nonstandard JSON-RPC 1.1 batch request...") + self.test_batch_request(lambda idx: BatchOptions(version=1)) + self.log.info("Testing nonstandard mixed JSON-RPC 1.1/2.0 batch request...") + self.test_batch_request(lambda idx: BatchOptions(version=2 if idx % 2 else 1)) + + self.log.info("Testing nonstandard batch request without version numbers...") + self.test_batch_request(lambda idx: BatchOptions()) + + self.log.info("Testing nonstandard batch request without version numbers or ids...") + self.test_batch_request(lambda idx: BatchOptions(notification=True)) + + self.log.info("Testing nonstandard jsonrpc 1.0 version number is accepted...") + self.test_batch_request(lambda idx: BatchOptions(request_fields={"jsonrpc": "1.0"})) + + self.log.info("Testing unrecognized jsonrpc version number is accepted...") + self.test_batch_request(lambda idx: BatchOptions(request_fields={"jsonrpc": "2.1"})) + + def test_http_status_codes(self): + self.log.info("Testing HTTP status codes for JSON-RPC 1.1 requests...") + # OK + expect_http_rpc_status(200, None, self.nodes[0], "getblockhash", [0]) + # Errors expect_http_rpc_status(404, RPC_METHOD_NOT_FOUND, self.nodes[0], "invalidmethod", []) expect_http_rpc_status(500, RPC_INVALID_PARAMETER, self.nodes[0], "getblockhash", [42]) + # force-send empty request + response, status = send_raw_rpc(self.nodes[0], b"") + assert_equal(response, {"id": None, "result": None, "error": {"code": RPC_PARSE_ERROR, "message": "Parse error"}}) + assert_equal(status, 500) + # force-send invalidly formatted request + response, status = send_raw_rpc(self.nodes[0], b"this is bad") + assert_equal(response, {"id": None, "result": None, "error": {"code": RPC_PARSE_ERROR, "message": "Parse error"}}) + assert_equal(status, 500) + + self.log.info("Testing HTTP status codes for JSON-RPC 2.0 requests...") + # OK + expect_http_rpc_status(200, None, self.nodes[0], "getblockhash", [0], 2, False) + # RPC errors and HTTP errors + expect_http_rpc_status(404, RPC_METHOD_NOT_FOUND, self.nodes[0], "invalidmethod", [], 2, False) + expect_http_rpc_status(500, RPC_INVALID_PARAMETER, self.nodes[0], "getblockhash", [42], 2, False) + # force-send invalidly formatted requests + response, status = send_json_rpc(self.nodes[0], {"jsonrpc": 2, "method": "getblockcount"}) + assert_equal(response, {"error": None, "id": None, "result": 0}) + assert_equal(status, 200) + response, status = send_json_rpc(self.nodes[0], {"jsonrpc": "3.0", "method": "getblockcount"}) + assert_equal(response, {"error": None, "id": None, "result": 0}) + assert_equal(status, 200) + + self.log.info("Testing HTTP status codes for JSON-RPC 2.0 notifications...") + # Not notification: id exists + response, status = send_json_rpc(self.nodes[0], {"jsonrpc": "2.0", "id": None, "method": "getblockcount"}) + assert_equal(response["result"], 0) + assert_equal(status, 200) + # Not notification: JSON 1.1 + expect_http_rpc_status(200, None, self.nodes[0], "getblockcount", [], 1) + # Not notification: has "id" field + expect_http_rpc_status(200, None, self.nodes[0], "getblockcount", [], 2, False) + block_count = self.nodes[0].getblockcount() + expect_http_rpc_status(200, None, self.nodes[0], "generatetoaddress", [1, "bcrt1qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqdku202"], 2, True) + # The command worked even though there was no response + assert_equal(block_count + 1, self.nodes[0].getblockcount()) + expect_http_rpc_status(500, RPC_INVALID_ADDRESS_OR_KEY, self.nodes[0], "generatetoaddress", [1, "invalid_address"], 2, True) + # Sanity check: command was not executed + assert_equal(block_count + 1, self.nodes[0].getblockcount()) def test_work_queue_exceeded(self): self.log.info("Testing work queue exceeded...") @@ -148,7 +225,7 @@ def test_work_queue_exceeded(self): def run_test(self): self.test_getrpcinfo() - self.test_batch_request() + self.test_batch_requests() self.test_http_status_codes() self.test_work_queue_exceeded() From df6e3756d6feaf1856e7886820b70874209fd90b Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Tue, 23 Jan 2024 10:33:26 -0500 Subject: [PATCH 11/50] rpc: Avoid copies in JSONRPCReplyObj() Change parameters from const references to values, so they can be moved into the reply instead of copied. Also update callers to move instead of copy. --- src/bitcoin-cli.cpp | 6 +++--- src/httprpc.cpp | 10 +++++----- src/rpc/request.cpp | 14 ++++---------- src/rpc/request.h | 3 +-- src/rpc/server.cpp | 6 +++--- 5 files changed, 16 insertions(+), 23 deletions(-) diff --git a/src/bitcoin-cli.cpp b/src/bitcoin-cli.cpp index 45db7a9a66..3591f7248b 100644 --- a/src/bitcoin-cli.cpp +++ b/src/bitcoin-cli.cpp @@ -302,7 +302,7 @@ class AddrinfoRequestHandler : public BaseRequestHandler } addresses.pushKV("total", total); result.pushKV("addresses_known", addresses); - return JSONRPCReplyObj(result, NullUniValue, 1); + return JSONRPCReplyObj(std::move(result), NullUniValue, 1); } }; @@ -371,7 +371,7 @@ class GetinfoRequestHandler: public BaseRequestHandler } result.pushKV("relayfee", batch[ID_NETWORKINFO]["result"]["relayfee"]); result.pushKV("warnings", batch[ID_NETWORKINFO]["result"]["warnings"]); - return JSONRPCReplyObj(result, NullUniValue, 1); + return JSONRPCReplyObj(std::move(result), NullUniValue, 1); } }; @@ -709,7 +709,7 @@ class GenerateToAddressRequestHandler : public BaseRequestHandler UniValue result(UniValue::VOBJ); result.pushKV("address", address_str); result.pushKV("blocks", reply.get_obj()["result"]); - return JSONRPCReplyObj(result, NullUniValue, 1); + return JSONRPCReplyObj(std::move(result), NullUniValue, 1); } protected: std::string address_str; diff --git a/src/httprpc.cpp b/src/httprpc.cpp index c72dbf10bc..aef20c24fc 100644 --- a/src/httprpc.cpp +++ b/src/httprpc.cpp @@ -73,7 +73,7 @@ static std::vector> g_rpcauth; static std::map> g_rpc_whitelist; static bool g_rpc_whitelist_default = false; -static void JSONErrorReply(HTTPRequest* req, const UniValue& objError, const UniValue& id) +static void JSONErrorReply(HTTPRequest* req, UniValue objError, UniValue id) { // Send error reply from json-rpc error object int nStatus = HTTP_INTERNAL_SERVER_ERROR; @@ -84,7 +84,7 @@ static void JSONErrorReply(HTTPRequest* req, const UniValue& objError, const Uni else if (code == RPC_METHOD_NOT_FOUND) nStatus = HTTP_NOT_FOUND; - std::string strReply = JSONRPCReply(NullUniValue, objError, id); + std::string strReply = JSONRPCReplyObj(NullUniValue, std::move(objError), std::move(id)).write() + "\n"; req->WriteHeader("Content-Type", "application/json"); req->WriteReply(nStatus, strReply); @@ -203,7 +203,7 @@ static bool HTTPReq_JSONRPC(const std::any& context, HTTPRequest* req) UniValue result = tableRPC.execute(jreq); // Send reply - strReply = JSONRPCReply(result, NullUniValue, jreq.id); + strReply = JSONRPCReplyObj(std::move(result), NullUniValue, jreq.id).write() + "\n"; // array of requests } else if (valRequest.isArray()) { @@ -230,8 +230,8 @@ static bool HTTPReq_JSONRPC(const std::any& context, HTTPRequest* req) req->WriteHeader("Content-Type", "application/json"); req->WriteReply(HTTP_OK, strReply); - } catch (const UniValue& objError) { - JSONErrorReply(req, objError, jreq.id); + } catch (UniValue& e) { + JSONErrorReply(req, std::move(e), jreq.id); return false; } catch (const std::exception& e) { JSONErrorReply(req, JSONRPCError(RPC_PARSE_ERROR, e.what()), jreq.id); diff --git a/src/rpc/request.cpp b/src/rpc/request.cpp index b7acd62ee3..12726ecc88 100644 --- a/src/rpc/request.cpp +++ b/src/rpc/request.cpp @@ -37,24 +37,18 @@ UniValue JSONRPCRequestObj(const std::string& strMethod, const UniValue& params, return request; } -UniValue JSONRPCReplyObj(const UniValue& result, const UniValue& error, const UniValue& id) +UniValue JSONRPCReplyObj(UniValue result, UniValue error, UniValue id) { UniValue reply(UniValue::VOBJ); if (!error.isNull()) reply.pushKV("result", NullUniValue); else - reply.pushKV("result", result); - reply.pushKV("error", error); - reply.pushKV("id", id); + reply.pushKV("result", std::move(result)); + reply.pushKV("error", std::move(error)); + reply.pushKV("id", std::move(id)); return reply; } -std::string JSONRPCReply(const UniValue& result, const UniValue& error, const UniValue& id) -{ - UniValue reply = JSONRPCReplyObj(result, error, id); - return reply.write() + "\n"; -} - UniValue JSONRPCError(int code, const std::string& message) { UniValue error(UniValue::VOBJ); diff --git a/src/rpc/request.h b/src/rpc/request.h index a682c58d96..116ebb8aac 100644 --- a/src/rpc/request.h +++ b/src/rpc/request.h @@ -12,8 +12,7 @@ #include UniValue JSONRPCRequestObj(const std::string& strMethod, const UniValue& params, const UniValue& id); -UniValue JSONRPCReplyObj(const UniValue& result, const UniValue& error, const UniValue& id); -std::string JSONRPCReply(const UniValue& result, const UniValue& error, const UniValue& id); +UniValue JSONRPCReplyObj(UniValue result, UniValue error, UniValue id); UniValue JSONRPCError(int code, const std::string& message); /** Generate a new RPC authentication cookie and write it to disk */ diff --git a/src/rpc/server.cpp b/src/rpc/server.cpp index 4883341522..fd18a7f9ec 100644 --- a/src/rpc/server.cpp +++ b/src/rpc/server.cpp @@ -366,11 +366,11 @@ static UniValue JSONRPCExecOne(JSONRPCRequest jreq, const UniValue& req) jreq.parse(req); UniValue result = tableRPC.execute(jreq); - rpc_result = JSONRPCReplyObj(result, NullUniValue, jreq.id); + rpc_result = JSONRPCReplyObj(std::move(result), NullUniValue, jreq.id); } - catch (const UniValue& objError) + catch (UniValue& e) { - rpc_result = JSONRPCReplyObj(NullUniValue, objError, jreq.id); + rpc_result = JSONRPCReplyObj(NullUniValue, std::move(e), jreq.id); } catch (const std::exception& e) { From a64a2b77e09bff784a2635ba19ff4aa6582bb5a5 Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Mon, 27 Feb 2023 13:03:45 -0500 Subject: [PATCH 12/50] rpc: refactor single/batch requests Simplify the request handling flow so that errors and results only come from JSONRPCExec() --- src/httprpc.cpp | 24 ++++++++++++++++++------ src/rpc/server.cpp | 33 +++++---------------------------- src/rpc/server.h | 2 +- 3 files changed, 24 insertions(+), 35 deletions(-) diff --git a/src/httprpc.cpp b/src/httprpc.cpp index aef20c24fc..53f994efd7 100644 --- a/src/httprpc.cpp +++ b/src/httprpc.cpp @@ -185,7 +185,7 @@ static bool HTTPReq_JSONRPC(const std::any& context, HTTPRequest* req) // Set the URI jreq.URI = req->GetURI(); - std::string strReply; + UniValue reply; bool user_has_whitelist = g_rpc_whitelist.count(jreq.authUser); if (!user_has_whitelist && g_rpc_whitelist_default) { LogPrintf("RPC User %s not allowed to call any methods\n", jreq.authUser); @@ -200,13 +200,12 @@ static bool HTTPReq_JSONRPC(const std::any& context, HTTPRequest* req) req->WriteReply(HTTP_FORBIDDEN); return false; } - UniValue result = tableRPC.execute(jreq); - // Send reply - strReply = JSONRPCReplyObj(std::move(result), NullUniValue, jreq.id).write() + "\n"; + reply = JSONRPCExec(jreq); // array of requests } else if (valRequest.isArray()) { + // Check authorization for each request's method if (user_has_whitelist) { for (unsigned int reqIdx = 0; reqIdx < valRequest.size(); reqIdx++) { if (!valRequest[reqIdx].isObject()) { @@ -223,13 +222,26 @@ static bool HTTPReq_JSONRPC(const std::any& context, HTTPRequest* req) } } } - strReply = JSONRPCExecBatch(jreq, valRequest.get_array()); + + // Execute each request + reply = UniValue::VARR; + for (size_t i{0}; i < valRequest.size(); ++i) { + // Batches include errors in the batch response, they do not throw + try { + jreq.parse(valRequest[i]); + reply.push_back(JSONRPCExec(jreq)); + } catch (UniValue& e) { + reply.push_back(JSONRPCReplyObj(NullUniValue, std::move(e), jreq.id)); + } catch (const std::exception& e) { + reply.push_back(JSONRPCReplyObj(NullUniValue, JSONRPCError(RPC_PARSE_ERROR, e.what()), jreq.id)); + } + } } else throw JSONRPCError(RPC_PARSE_ERROR, "Top-level object parse error"); req->WriteHeader("Content-Type", "application/json"); - req->WriteReply(HTTP_OK, strReply); + req->WriteReply(HTTP_OK, reply.write() + "\n"); } catch (UniValue& e) { JSONErrorReply(req, std::move(e), jreq.id); return false; diff --git a/src/rpc/server.cpp b/src/rpc/server.cpp index fd18a7f9ec..f199914095 100644 --- a/src/rpc/server.cpp +++ b/src/rpc/server.cpp @@ -358,36 +358,13 @@ bool IsDeprecatedRPCEnabled(const std::string& method) return find(enabled_methods.begin(), enabled_methods.end(), method) != enabled_methods.end(); } -static UniValue JSONRPCExecOne(JSONRPCRequest jreq, const UniValue& req) +UniValue JSONRPCExec(const JSONRPCRequest& jreq) { - UniValue rpc_result(UniValue::VOBJ); + // Might throw exception. Single requests will throw and send HTTP error codes + // but inside a batch, we just include the error object and return HTTP 200 + UniValue result = tableRPC.execute(jreq); - try { - jreq.parse(req); - - UniValue result = tableRPC.execute(jreq); - rpc_result = JSONRPCReplyObj(std::move(result), NullUniValue, jreq.id); - } - catch (UniValue& e) - { - rpc_result = JSONRPCReplyObj(NullUniValue, std::move(e), jreq.id); - } - catch (const std::exception& e) - { - rpc_result = JSONRPCReplyObj(NullUniValue, - JSONRPCError(RPC_PARSE_ERROR, e.what()), jreq.id); - } - - return rpc_result; -} - -std::string JSONRPCExecBatch(const JSONRPCRequest& jreq, const UniValue& vReq) -{ - UniValue ret(UniValue::VARR); - for (unsigned int reqIdx = 0; reqIdx < vReq.size(); reqIdx++) - ret.push_back(JSONRPCExecOne(jreq, vReq[reqIdx])); - - return ret.write() + "\n"; + return JSONRPCReplyObj(std::move(result), NullUniValue, jreq.id); } /** diff --git a/src/rpc/server.h b/src/rpc/server.h index 9a49d82570..2761bcd9db 100644 --- a/src/rpc/server.h +++ b/src/rpc/server.h @@ -181,7 +181,7 @@ extern CRPCTable tableRPC; void StartRPC(); void InterruptRPC(); void StopRPC(); -std::string JSONRPCExecBatch(const JSONRPCRequest& jreq, const UniValue& vReq); +UniValue JSONRPCExec(const JSONRPCRequest& jreq); // Drop witness when serializing for RPC? bool RPCSerializationWithoutWitness(); From 4a6d1d1e3bc3d559cd13d134b4b827f22635ac4b Mon Sep 17 00:00:00 2001 From: Andrew Toth Date: Thu, 17 Aug 2023 16:17:18 -0400 Subject: [PATCH 13/50] validation: don't clear cache on periodic flush --- src/validation.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/validation.cpp b/src/validation.cpp index 5c585438d1..94c470ce2c 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -2745,8 +2745,10 @@ bool Chainstate::FlushStateToDisk( return FatalError(m_chainman.GetNotifications(), state, _("Disk space is too low!")); } // Flush the chainstate (which may refer to block index entries). - if (!CoinsTip().Flush()) + const auto empty_cache{(mode == FlushStateMode::ALWAYS) || fCacheLarge || fCacheCritical || fFlushForPrune}; + if (empty_cache ? !CoinsTip().Flush() : !CoinsTip().Sync()) { return FatalError(m_chainman.GetNotifications(), state, _("Failed to write to coin database.")); + } m_last_flush = nNow; full_flush_completed = true; TRACE5(utxocache, flush, From 95897ff181c0757e445f0e066a2a590a0a0120d2 Mon Sep 17 00:00:00 2001 From: kevkevin Date: Mon, 29 Apr 2024 08:57:58 -0500 Subject: [PATCH 14/50] doc: removed help text saying that peers may not connect automatically This help text was introduced about two years ago and now a significant portion of users are using version 23.0 and higher --- src/init.cpp | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/init.cpp b/src/init.cpp index c19d596c7f..3a071bce07 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -537,9 +537,7 @@ void SetupServerArgs(ArgsManager& argsman) argsman.AddArg("-peerbloomfilters", strprintf("Support filtering of blocks and transaction with bloom filters (default: %u)", DEFAULT_PEERBLOOMFILTERS), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); argsman.AddArg("-peerblockfilters", strprintf("Serve compact block filters to peers per BIP 157 (default: %u)", DEFAULT_PEERBLOCKFILTERS), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); argsman.AddArg("-txreconciliation", strprintf("Enable transaction reconciliations per BIP 330 (default: %d)", DEFAULT_TXRECONCILIATION_ENABLE), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::CONNECTION); - // TODO: remove the sentence "Nodes not using ... incoming connections." once the changes from - // https://github.com/bitcoin/bitcoin/pull/23542 have become widespread. - argsman.AddArg("-port=", strprintf("Listen for connections on . Nodes not using the default ports (default: %u, testnet: %u, signet: %u, regtest: %u) are unlikely to get incoming connections. Not relevant for I2P (see doc/i2p.md).", defaultChainParams->GetDefaultPort(), testnetChainParams->GetDefaultPort(), signetChainParams->GetDefaultPort(), regtestChainParams->GetDefaultPort()), ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::CONNECTION); + argsman.AddArg("-port=", strprintf("Listen for connections on (default: %u, testnet: %u, signet: %u, regtest: %u). Not relevant for I2P (see doc/i2p.md).", defaultChainParams->GetDefaultPort(), testnetChainParams->GetDefaultPort(), signetChainParams->GetDefaultPort(), regtestChainParams->GetDefaultPort()), ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::CONNECTION); #if HAVE_SOCKADDR_UN argsman.AddArg("-proxy=", "Connect through SOCKS5 proxy, set -noproxy to disable (default: disabled). May be a local file path prefixed with 'unix:' if the proxy supports it.", ArgsManager::ALLOW_ANY | ArgsManager::DISALLOW_ELISION, OptionsCategory::CONNECTION); #else From 58594c7040241f2312b0b8735a8baf0412674613 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Fri, 26 Apr 2024 14:28:53 -0400 Subject: [PATCH 15/50] fuzz: txorphan tests fixups Adds the following fixups in txorphan fuzz tests: - Don't bond the output count of the created orphans based on the number of available coins - Allow duplicate inputs, when applicable, but don't store duplicate outpoints Rationale --------- The way the test is currently written, duplicate inputs are allowed based on a random flag (`duplicate_input`). If the flag is unset, upon selecting an outpoint as input for a new transaction, the input is popped to prevent re-selection, and later re-added to the collection (once all inputs have been picked). However, the re-addition to the collection is performed independently of whether the flag was set or not. This means that, if the flag is set, the selected inputs are duplicated which in turn makes these inputs more likely to be re-picked in the following iteration of the loop. Additionally, both the input and output count of the transaction and bonded to the number of available outpoints. This makes sense for the former, but the latter shouldn't be. --- src/test/fuzz/txorphan.cpp | 28 ++++++++++------------------ 1 file changed, 10 insertions(+), 18 deletions(-) diff --git a/src/test/fuzz/txorphan.cpp b/src/test/fuzz/txorphan.cpp index a44f47b00d..037c7af77f 100644 --- a/src/test/fuzz/txorphan.cpp +++ b/src/test/fuzz/txorphan.cpp @@ -37,13 +37,12 @@ FUZZ_TARGET(txorphan, .init = initialize_orphanage) SetMockTime(ConsumeTime(fuzzed_data_provider)); TxOrphanage orphanage; - std::vector outpoints; + std::set outpoints; + // initial outpoints used to construct transactions later for (uint8_t i = 0; i < 4; i++) { - outpoints.emplace_back(Txid::FromUint256(uint256{i}), 0); + outpoints.emplace(Txid::FromUint256(uint256{i}), 0); } - // if true, allow duplicate input when constructing tx - const bool duplicate_input = fuzzed_data_provider.ConsumeBool(); CTransactionRef ptx_potential_parent = nullptr; @@ -53,29 +52,22 @@ FUZZ_TARGET(txorphan, .init = initialize_orphanage) const CTransactionRef tx = [&] { CMutableTransaction tx_mut; const auto num_in = fuzzed_data_provider.ConsumeIntegralInRange(1, outpoints.size()); - const auto num_out = fuzzed_data_provider.ConsumeIntegralInRange(1, outpoints.size()); - // pick unique outpoints from outpoints as input + const auto num_out = fuzzed_data_provider.ConsumeIntegralInRange(1, 256); + // pick outpoints from outpoints as input. We allow input duplicates on purpose, given we are not + // running any transaction validation logic before adding transactions to the orphanage for (uint32_t i = 0; i < num_in; i++) { auto& prevout = PickValue(fuzzed_data_provider, outpoints); - tx_mut.vin.emplace_back(prevout); - // pop the picked outpoint if duplicate input is not allowed - if (!duplicate_input) { - std::swap(prevout, outpoints.back()); - outpoints.pop_back(); - } + // try making transactions unique by setting a random nSequence, but allow duplicate transactions if they happen + tx_mut.vin.emplace_back(prevout, CScript{}, fuzzed_data_provider.ConsumeIntegralInRange(0, CTxIn::SEQUENCE_FINAL)); } // output amount will not affect txorphanage for (uint32_t i = 0; i < num_out; i++) { tx_mut.vout.emplace_back(CAmount{0}, CScript{}); } - // restore previously popped outpoints - for (auto& in : tx_mut.vin) { - outpoints.push_back(in.prevout); - } auto new_tx = MakeTransactionRef(tx_mut); - // add newly constructed transaction to outpoints + // add newly constructed outpoints to the coin pool for (uint32_t i = 0; i < num_out; i++) { - outpoints.emplace_back(new_tx->GetHash(), i); + outpoints.emplace(new_tx->GetHash(), i); } return new_tx; }(); From cc67d33fdac45357b593b1faff3d1735e5fe91ba Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Fri, 15 Dec 2023 03:28:34 +0000 Subject: [PATCH 16/50] refactor: Simply include CTxMemPool::Options in CTxMemPool directly rather than duplicating definition --- src/kernel/mempool_persist.cpp | 6 ++-- src/net_processing.cpp | 2 +- src/node/interfaces.cpp | 10 +++--- src/rpc/fees.cpp | 4 +-- src/rpc/mempool.cpp | 10 +++--- src/rpc/net.cpp | 4 +-- src/test/txpackage_tests.cpp | 4 +-- src/test/util/setup_common.cpp | 6 ++-- src/txmempool.cpp | 56 +++++++++++++--------------------- src/txmempool.h | 18 ++--------- src/validation.cpp | 38 +++++++++++------------ 11 files changed, 66 insertions(+), 92 deletions(-) diff --git a/src/kernel/mempool_persist.cpp b/src/kernel/mempool_persist.cpp index f06f609379..53028a45ae 100644 --- a/src/kernel/mempool_persist.cpp +++ b/src/kernel/mempool_persist.cpp @@ -96,7 +96,7 @@ bool LoadMempool(CTxMemPool& pool, const fs::path& load_path, Chainstate& active if (amountdelta && opts.apply_fee_delta_priority) { pool.PrioritiseTransaction(tx->GetHash(), amountdelta); } - if (nTime > TicksSinceEpoch(now - pool.m_expiry)) { + if (nTime > TicksSinceEpoch(now - pool.m_opts.expiry)) { LOCK(cs_main); const auto& accepted = AcceptToMemoryPool(active_chainstate, tx, nTime, /*bypass_limits=*/false, /*test_accept=*/false); if (accepted.m_result_type == MempoolAcceptResult::ResultType::VALID) { @@ -174,11 +174,11 @@ bool DumpMempool(const CTxMemPool& pool, const fs::path& dump_path, FopenFn mock } try { - const uint64_t version{pool.m_persist_v1_dat ? MEMPOOL_DUMP_VERSION_NO_XOR_KEY : MEMPOOL_DUMP_VERSION}; + const uint64_t version{pool.m_opts.persist_v1_dat ? MEMPOOL_DUMP_VERSION_NO_XOR_KEY : MEMPOOL_DUMP_VERSION}; file << version; std::vector xor_key(8); - if (!pool.m_persist_v1_dat) { + if (!pool.m_opts.persist_v1_dat) { FastRandomContext{}.fillrand(xor_key); file << xor_key; } diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 9b5983b9d0..4cea6b3cbe 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -5742,7 +5742,7 @@ void PeerManagerImpl::MaybeSendFeefilter(CNode& pto, Peer& peer, std::chrono::mi if (current_time > peer.m_next_send_feefilter) { CAmount filterToSend = m_fee_filter_rounder.round(currentFilter); // We always have a fee filter of at least the min relay fee - filterToSend = std::max(filterToSend, m_mempool.m_min_relay_feerate.GetFeePerK()); + filterToSend = std::max(filterToSend, m_mempool.m_opts.min_relay_feerate.GetFeePerK()); if (filterToSend != peer.m_fee_filter_sent) { MakeAndPushMessage(pto, NetMsgType::FEEFILTER, filterToSend); peer.m_fee_filter_sent = filterToSend; diff --git a/src/node/interfaces.cpp b/src/node/interfaces.cpp index bb13e53738..6a314c9a89 100644 --- a/src/node/interfaces.cpp +++ b/src/node/interfaces.cpp @@ -318,7 +318,7 @@ class NodeImpl : public Node CFeeRate getDustRelayFee() override { if (!m_context->mempool) return CFeeRate{DUST_RELAY_TX_FEE}; - return m_context->mempool->m_dust_relay_feerate; + return m_context->mempool->m_opts.dust_relay_feerate; } UniValue executeRpc(const std::string& command, const UniValue& params, const std::string& uri) override { @@ -701,7 +701,7 @@ class ChainImpl : public Chain { const CTxMemPool::Limits default_limits{}; - const CTxMemPool::Limits& limits{m_node.mempool ? m_node.mempool->m_limits : default_limits}; + const CTxMemPool::Limits& limits{m_node.mempool ? m_node.mempool->m_opts.limits : default_limits}; limit_ancestor_count = limits.ancestor_count; limit_descendant_count = limits.descendant_count; @@ -732,17 +732,17 @@ class ChainImpl : public Chain CFeeRate relayMinFee() override { if (!m_node.mempool) return CFeeRate{DEFAULT_MIN_RELAY_TX_FEE}; - return m_node.mempool->m_min_relay_feerate; + return m_node.mempool->m_opts.min_relay_feerate; } CFeeRate relayIncrementalFee() override { if (!m_node.mempool) return CFeeRate{DEFAULT_INCREMENTAL_RELAY_FEE}; - return m_node.mempool->m_incremental_relay_feerate; + return m_node.mempool->m_opts.incremental_relay_feerate; } CFeeRate relayDustFee() override { if (!m_node.mempool) return CFeeRate{DUST_RELAY_TX_FEE}; - return m_node.mempool->m_dust_relay_feerate; + return m_node.mempool->m_opts.dust_relay_feerate; } bool havePruned() override { diff --git a/src/rpc/fees.cpp b/src/rpc/fees.cpp index f3345b4f1c..b933d8c647 100644 --- a/src/rpc/fees.cpp +++ b/src/rpc/fees.cpp @@ -65,7 +65,7 @@ static RPCHelpMan estimatesmartfee() const NodeContext& node = EnsureAnyNodeContext(request.context); const CTxMemPool& mempool = EnsureMemPool(node); - CHECK_NONFATAL(mempool.m_signals)->SyncWithValidationInterfaceQueue(); + CHECK_NONFATAL(mempool.m_opts.signals)->SyncWithValidationInterfaceQueue(); unsigned int max_target = fee_estimator.HighestTargetTracked(FeeEstimateHorizon::LONG_HALFLIFE); unsigned int conf_target = ParseConfirmTarget(request.params[0], max_target); bool conservative = true; @@ -83,7 +83,7 @@ static RPCHelpMan estimatesmartfee() CFeeRate feeRate{fee_estimator.estimateSmartFee(conf_target, &feeCalc, conservative)}; if (feeRate != CFeeRate(0)) { CFeeRate min_mempool_feerate{mempool.GetMinFee()}; - CFeeRate min_relay_feerate{mempool.m_min_relay_feerate}; + CFeeRate min_relay_feerate{mempool.m_opts.min_relay_feerate}; feeRate = std::max({feeRate, min_mempool_feerate, min_relay_feerate}); result.pushKV("feerate", ValueFromAmount(feeRate.GetFeePerK())); } else { diff --git a/src/rpc/mempool.cpp b/src/rpc/mempool.cpp index e6fe3c7ebd..028594cb7b 100644 --- a/src/rpc/mempool.cpp +++ b/src/rpc/mempool.cpp @@ -671,12 +671,12 @@ UniValue MempoolInfoToJSON(const CTxMemPool& pool) ret.pushKV("bytes", (int64_t)pool.GetTotalTxSize()); ret.pushKV("usage", (int64_t)pool.DynamicMemoryUsage()); ret.pushKV("total_fee", ValueFromAmount(pool.GetTotalFee())); - ret.pushKV("maxmempool", pool.m_max_size_bytes); - ret.pushKV("mempoolminfee", ValueFromAmount(std::max(pool.GetMinFee(), pool.m_min_relay_feerate).GetFeePerK())); - ret.pushKV("minrelaytxfee", ValueFromAmount(pool.m_min_relay_feerate.GetFeePerK())); - ret.pushKV("incrementalrelayfee", ValueFromAmount(pool.m_incremental_relay_feerate.GetFeePerK())); + ret.pushKV("maxmempool", pool.m_opts.max_size_bytes); + ret.pushKV("mempoolminfee", ValueFromAmount(std::max(pool.GetMinFee(), pool.m_opts.min_relay_feerate).GetFeePerK())); + ret.pushKV("minrelaytxfee", ValueFromAmount(pool.m_opts.min_relay_feerate.GetFeePerK())); + ret.pushKV("incrementalrelayfee", ValueFromAmount(pool.m_opts.incremental_relay_feerate.GetFeePerK())); ret.pushKV("unbroadcastcount", uint64_t{pool.GetUnbroadcastTxs().size()}); - ret.pushKV("fullrbf", pool.m_full_rbf); + ret.pushKV("fullrbf", pool.m_opts.full_rbf); return ret; } diff --git a/src/rpc/net.cpp b/src/rpc/net.cpp index 04f9410b32..59397aa84d 100644 --- a/src/rpc/net.cpp +++ b/src/rpc/net.cpp @@ -697,8 +697,8 @@ static RPCHelpMan getnetworkinfo() obj.pushKV("networks", GetNetworksInfo()); if (node.mempool) { // Those fields can be deprecated, to be replaced by the getmempoolinfo fields - obj.pushKV("relayfee", ValueFromAmount(node.mempool->m_min_relay_feerate.GetFeePerK())); - obj.pushKV("incrementalfee", ValueFromAmount(node.mempool->m_incremental_relay_feerate.GetFeePerK())); + obj.pushKV("relayfee", ValueFromAmount(node.mempool->m_opts.min_relay_feerate.GetFeePerK())); + obj.pushKV("incrementalfee", ValueFromAmount(node.mempool->m_opts.incremental_relay_feerate.GetFeePerK())); } UniValue localAddresses(UniValue::VARR); { diff --git a/src/test/txpackage_tests.cpp b/src/test/txpackage_tests.cpp index 8112f5f685..55e0c5f285 100644 --- a/src/test/txpackage_tests.cpp +++ b/src/test/txpackage_tests.cpp @@ -679,7 +679,7 @@ BOOST_FIXTURE_TEST_CASE(package_witness_swap_tests, TestChain100Setup) CTransactionRef ptx_parent3 = MakeTransactionRef(mtx_parent3); package_mixed.push_back(ptx_parent3); BOOST_CHECK(m_node.mempool->GetMinFee().GetFee(GetVirtualTransactionSize(*ptx_parent3)) > low_fee_amt); - BOOST_CHECK(m_node.mempool->m_min_relay_feerate.GetFee(GetVirtualTransactionSize(*ptx_parent3)) <= low_fee_amt); + BOOST_CHECK(m_node.mempool->m_opts.min_relay_feerate.GetFee(GetVirtualTransactionSize(*ptx_parent3)) <= low_fee_amt); // child spends parent1, parent2, and parent3 CKey mixed_grandchild_key = GenerateRandomKey(); @@ -825,7 +825,7 @@ BOOST_FIXTURE_TEST_CASE(package_cpfp_tests, TestChain100Setup) CTransactionRef tx_parent_cheap = MakeTransactionRef(mtx_parent_cheap); package_still_too_low.push_back(tx_parent_cheap); BOOST_CHECK(m_node.mempool->GetMinFee().GetFee(GetVirtualTransactionSize(*tx_parent_cheap)) > parent_fee); - BOOST_CHECK(m_node.mempool->m_min_relay_feerate.GetFee(GetVirtualTransactionSize(*tx_parent_cheap)) <= parent_fee); + BOOST_CHECK(m_node.mempool->m_opts.min_relay_feerate.GetFee(GetVirtualTransactionSize(*tx_parent_cheap)) <= parent_fee); auto mtx_child_cheap = CreateValidMempoolTransaction(/*input_transaction=*/tx_parent_cheap, /*input_vout=*/0, /*input_height=*/101, /*input_signing_key=*/child_key, diff --git a/src/test/util/setup_common.cpp b/src/test/util/setup_common.cpp index 38350b33cc..34b1496f34 100644 --- a/src/test/util/setup_common.cpp +++ b/src/test/util/setup_common.cpp @@ -552,9 +552,9 @@ void TestChain100Setup::MockMempoolMinFee(const CFeeRate& target_feerate) assert(m_node.mempool->size() == 0); // The target feerate cannot be too low... // ...otherwise the transaction's feerate will need to be negative. - assert(target_feerate > m_node.mempool->m_incremental_relay_feerate); + assert(target_feerate > m_node.mempool->m_opts.incremental_relay_feerate); // ...otherwise this is not meaningful. The feerate policy uses the maximum of both feerates. - assert(target_feerate > m_node.mempool->m_min_relay_feerate); + assert(target_feerate > m_node.mempool->m_opts.min_relay_feerate); // Manually create an invalid transaction. Manually set the fee in the CTxMemPoolEntry to // achieve the exact target feerate. @@ -565,7 +565,7 @@ void TestChain100Setup::MockMempoolMinFee(const CFeeRate& target_feerate) LockPoints lp; // The new mempool min feerate is equal to the removed package's feerate + incremental feerate. const auto tx_fee = target_feerate.GetFee(GetVirtualTransactionSize(*tx)) - - m_node.mempool->m_incremental_relay_feerate.GetFee(GetVirtualTransactionSize(*tx)); + m_node.mempool->m_opts.incremental_relay_feerate.GetFee(GetVirtualTransactionSize(*tx)); m_node.mempool->addUnchecked(CTxMemPoolEntry(tx, /*fee=*/tx_fee, /*time=*/0, /*entry_height=*/1, /*entry_sequence=*/0, /*spends_coinbase=*/true, /*sigops_cost=*/1, lp)); diff --git a/src/txmempool.cpp b/src/txmempool.cpp index 06066e38b2..c845944d32 100644 --- a/src/txmempool.cpp +++ b/src/txmempool.cpp @@ -92,7 +92,7 @@ void CTxMemPool::UpdateForDescendants(txiter updateIt, cacheMap& cachedDescendan // Don't directly remove the transaction here -- doing so would // invalidate iterators in cachedDescendants. Mark it for removal // by inserting into descendants_to_remove. - if (descendant.GetCountWithAncestors() > uint64_t(m_limits.ancestor_count) || descendant.GetSizeWithAncestors() > m_limits.ancestor_size_vbytes) { + if (descendant.GetCountWithAncestors() > uint64_t(m_opts.limits.ancestor_count) || descendant.GetSizeWithAncestors() > m_opts.limits.ancestor_size_vbytes) { descendants_to_remove.insert(descendant.GetTx().GetHash()); } } @@ -203,14 +203,14 @@ util::Result CTxMemPool::CheckPackageLimits(const Package& package, size_t pack_count = package.size(); // Package itself is busting mempool limits; should be rejected even if no staged_ancestors exist - if (pack_count > static_cast(m_limits.ancestor_count)) { - return util::Error{Untranslated(strprintf("package count %u exceeds ancestor count limit [limit: %u]", pack_count, m_limits.ancestor_count))}; - } else if (pack_count > static_cast(m_limits.descendant_count)) { - return util::Error{Untranslated(strprintf("package count %u exceeds descendant count limit [limit: %u]", pack_count, m_limits.descendant_count))}; - } else if (total_vsize > m_limits.ancestor_size_vbytes) { - return util::Error{Untranslated(strprintf("package size %u exceeds ancestor size limit [limit: %u]", total_vsize, m_limits.ancestor_size_vbytes))}; - } else if (total_vsize > m_limits.descendant_size_vbytes) { - return util::Error{Untranslated(strprintf("package size %u exceeds descendant size limit [limit: %u]", total_vsize, m_limits.descendant_size_vbytes))}; + if (pack_count > static_cast(m_opts.limits.ancestor_count)) { + return util::Error{Untranslated(strprintf("package count %u exceeds ancestor count limit [limit: %u]", pack_count, m_opts.limits.ancestor_count))}; + } else if (pack_count > static_cast(m_opts.limits.descendant_count)) { + return util::Error{Untranslated(strprintf("package count %u exceeds descendant count limit [limit: %u]", pack_count, m_opts.limits.descendant_count))}; + } else if (total_vsize > m_opts.limits.ancestor_size_vbytes) { + return util::Error{Untranslated(strprintf("package size %u exceeds ancestor size limit [limit: %u]", total_vsize, m_opts.limits.ancestor_size_vbytes))}; + } else if (total_vsize > m_opts.limits.descendant_size_vbytes) { + return util::Error{Untranslated(strprintf("package size %u exceeds descendant size limit [limit: %u]", total_vsize, m_opts.limits.descendant_size_vbytes))}; } CTxMemPoolEntry::Parents staged_ancestors; @@ -219,8 +219,8 @@ util::Result CTxMemPool::CheckPackageLimits(const Package& package, std::optional piter = GetIter(input.prevout.hash); if (piter) { staged_ancestors.insert(**piter); - if (staged_ancestors.size() + package.size() > static_cast(m_limits.ancestor_count)) { - return util::Error{Untranslated(strprintf("too many unconfirmed parents [limit: %u]", m_limits.ancestor_count))}; + if (staged_ancestors.size() + package.size() > static_cast(m_opts.limits.ancestor_count)) { + return util::Error{Untranslated(strprintf("too many unconfirmed parents [limit: %u]", m_opts.limits.ancestor_count))}; } } } @@ -229,7 +229,7 @@ util::Result CTxMemPool::CheckPackageLimits(const Package& package, // considered together must be within limits even if they are not interdependent. This may be // stricter than the limits for each individual transaction. const auto ancestors{CalculateAncestorsAndCheckLimits(total_vsize, package.size(), - staged_ancestors, m_limits)}; + staged_ancestors, m_opts.limits)}; // It's possible to overestimate the ancestor/descendant totals. if (!ancestors.has_value()) return util::Error{Untranslated("possibly " + util::ErrorString(ancestors).original)}; return {}; @@ -396,19 +396,7 @@ void CTxMemPoolEntry::UpdateAncestorState(int32_t modifySize, CAmount modifyFee, } CTxMemPool::CTxMemPool(const Options& opts) - : m_check_ratio{opts.check_ratio}, - m_max_size_bytes{opts.max_size_bytes}, - m_expiry{opts.expiry}, - m_incremental_relay_feerate{opts.incremental_relay_feerate}, - m_min_relay_feerate{opts.min_relay_feerate}, - m_dust_relay_feerate{opts.dust_relay_feerate}, - m_permit_bare_multisig{opts.permit_bare_multisig}, - m_max_datacarrier_bytes{opts.max_datacarrier_bytes}, - m_require_standard{opts.require_standard}, - m_full_rbf{opts.full_rbf}, - m_persist_v1_dat{opts.persist_v1_dat}, - m_limits{opts.limits}, - m_signals{opts.signals} + : m_opts{opts} { } @@ -489,12 +477,12 @@ void CTxMemPool::removeUnchecked(txiter it, MemPoolRemovalReason reason) // even if not directly reported below. uint64_t mempool_sequence = GetAndIncrementSequence(); - if (reason != MemPoolRemovalReason::BLOCK && m_signals) { + if (reason != MemPoolRemovalReason::BLOCK && m_opts.signals) { // Notify clients that a transaction has been removed from the mempool // for any reason except being included in a block. Clients interested // in transactions included in blocks can subscribe to the BlockConnected // notification. - m_signals->TransactionRemovedFromMempool(it->GetSharedTx(), reason, mempool_sequence); + m_opts.signals->TransactionRemovedFromMempool(it->GetSharedTx(), reason, mempool_sequence); } TRACE5(mempool, removed, it->GetTx().GetHash().data(), @@ -645,8 +633,8 @@ void CTxMemPool::removeForBlock(const std::vector& vtx, unsigne removeConflicts(*tx); ClearPrioritisation(tx->GetHash()); } - if (m_signals) { - m_signals->MempoolTransactionsRemovedForBlock(txs_removed_for_block, nBlockHeight); + if (m_opts.signals) { + m_opts.signals->MempoolTransactionsRemovedForBlock(txs_removed_for_block, nBlockHeight); } lastRollingFeeUpdate = GetTime(); blockSinceLastRollingFeeBump = true; @@ -654,9 +642,9 @@ void CTxMemPool::removeForBlock(const std::vector& vtx, unsigne void CTxMemPool::check(const CCoinsViewCache& active_coins_tip, int64_t spendheight) const { - if (m_check_ratio == 0) return; + if (m_opts.check_ratio == 0) return; - if (GetRand(m_check_ratio) >= 1) return; + if (GetRand(m_opts.check_ratio) >= 1) return; AssertLockHeld(::cs_main); LOCK(cs); @@ -1108,12 +1096,12 @@ CFeeRate CTxMemPool::GetMinFee(size_t sizelimit) const { rollingMinimumFeeRate = rollingMinimumFeeRate / pow(2.0, (time - lastRollingFeeUpdate) / halflife); lastRollingFeeUpdate = time; - if (rollingMinimumFeeRate < (double)m_incremental_relay_feerate.GetFeePerK() / 2) { + if (rollingMinimumFeeRate < (double)m_opts.incremental_relay_feerate.GetFeePerK() / 2) { rollingMinimumFeeRate = 0; return CFeeRate(0); } } - return std::max(CFeeRate(llround(rollingMinimumFeeRate)), m_incremental_relay_feerate); + return std::max(CFeeRate(llround(rollingMinimumFeeRate)), m_opts.incremental_relay_feerate); } void CTxMemPool::trackPackageRemoved(const CFeeRate& rate) { @@ -1137,7 +1125,7 @@ void CTxMemPool::TrimToSize(size_t sizelimit, std::vector* pvNoSpends // to have 0 fee). This way, we don't allow txn to enter mempool with feerate // equal to txn which were removed with no block in between. CFeeRate removed(it->GetModFeesWithDescendants(), it->GetSizeWithDescendants()); - removed += m_incremental_relay_feerate; + removed += m_opts.incremental_relay_feerate; trackPackageRemoved(removed); maxFeeRateRemoved = std::max(maxFeeRateRemoved, removed); diff --git a/src/txmempool.h b/src/txmempool.h index 52a3dc2d7d..c9f6cdbfac 100644 --- a/src/txmempool.h +++ b/src/txmempool.h @@ -301,7 +301,6 @@ struct TxMempoolInfo class CTxMemPool { protected: - const int m_check_ratio; //!< Value n means that 1 times in n we check. std::atomic nTransactionsUpdated{0}; //!< Used by getblocktemplate to trigger CreateNewBlock() invocation uint64_t totalTxSize GUARDED_BY(cs){0}; //!< sum of all mempool tx's virtual sizes. Differs from serialized tx size since witness data is discounted. Defined in BIP 141. @@ -436,20 +435,7 @@ class CTxMemPool using Options = kernel::MemPoolOptions; - const int64_t m_max_size_bytes; - const std::chrono::seconds m_expiry; - const CFeeRate m_incremental_relay_feerate; - const CFeeRate m_min_relay_feerate; - const CFeeRate m_dust_relay_feerate; - const bool m_permit_bare_multisig; - const std::optional m_max_datacarrier_bytes; - const bool m_require_standard; - const bool m_full_rbf; - const bool m_persist_v1_dat; - - const Limits m_limits; - - ValidationSignals* const m_signals; + const Options m_opts; /** Create a new CTxMemPool. * Sanity checks will be off by default for performance, because otherwise @@ -625,7 +611,7 @@ class CTxMemPool * would otherwise be half of this, it is set to 0 instead. */ CFeeRate GetMinFee() const { - return GetMinFee(m_max_size_bytes); + return GetMinFee(m_opts.max_size_bytes); } /** Remove transactions from the mempool until its dynamic size is <= sizelimit. diff --git a/src/validation.cpp b/src/validation.cpp index f57851b4f7..880fd042d9 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -268,13 +268,13 @@ static void LimitMempoolSize(CTxMemPool& pool, CCoinsViewCache& coins_cache) { AssertLockHeld(::cs_main); AssertLockHeld(pool.cs); - int expired = pool.Expire(GetTime() - pool.m_expiry); + int expired = pool.Expire(GetTime() - pool.m_opts.expiry); if (expired != 0) { LogPrint(BCLog::MEMPOOL, "Expired %i transactions from the memory pool\n", expired); } std::vector vNoSpendsRemaining; - pool.TrimToSize(pool.m_max_size_bytes, &vNoSpendsRemaining); + pool.TrimToSize(pool.m_opts.max_size_bytes, &vNoSpendsRemaining); for (const COutPoint& removed : vNoSpendsRemaining) coins_cache.Uncache(removed); } @@ -695,9 +695,9 @@ class MemPoolAccept return state.Invalid(TxValidationResult::TX_RECONSIDERABLE, "mempool min fee not met", strprintf("%d < %d", package_fee, mempoolRejectFee)); } - if (package_fee < m_pool.m_min_relay_feerate.GetFee(package_size)) { + if (package_fee < m_pool.m_opts.min_relay_feerate.GetFee(package_size)) { return state.Invalid(TxValidationResult::TX_RECONSIDERABLE, "min relay fee not met", - strprintf("%d < %d", package_fee, m_pool.m_min_relay_feerate.GetFee(package_size))); + strprintf("%d < %d", package_fee, m_pool.m_opts.min_relay_feerate.GetFee(package_size))); } return true; } @@ -742,7 +742,7 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws) // Rather not work on nonstandard transactions (unless -testnet/-regtest) std::string reason; - if (m_pool.m_require_standard && !IsStandardTx(tx, m_pool.m_max_datacarrier_bytes, m_pool.m_permit_bare_multisig, m_pool.m_dust_relay_feerate, reason)) { + if (m_pool.m_opts.require_standard && !IsStandardTx(tx, m_pool.m_opts.max_datacarrier_bytes, m_pool.m_opts.permit_bare_multisig, m_pool.m_opts.dust_relay_feerate, reason)) { return state.Invalid(TxValidationResult::TX_NOT_STANDARD, reason); } @@ -789,7 +789,7 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws) // // Replaceability signaling of the original transactions may be // ignored due to node setting. - const bool allow_rbf{m_pool.m_full_rbf || SignalsOptInRBF(*ptxConflicting) || ptxConflicting->nVersion == 3}; + const bool allow_rbf{m_pool.m_opts.full_rbf || SignalsOptInRBF(*ptxConflicting) || ptxConflicting->nVersion == 3}; if (!allow_rbf) { return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "txn-mempool-conflict"); } @@ -850,12 +850,12 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws) return false; // state filled in by CheckTxInputs } - if (m_pool.m_require_standard && !AreInputsStandard(tx, m_view)) { + if (m_pool.m_opts.require_standard && !AreInputsStandard(tx, m_view)) { return state.Invalid(TxValidationResult::TX_INPUTS_NOT_STANDARD, "bad-txns-nonstandard-inputs"); } // Check for non-standard witnesses. - if (tx.HasWitness() && m_pool.m_require_standard && !IsWitnessStandard(tx, m_view)) { + if (tx.HasWitness() && m_pool.m_opts.require_standard && !IsWitnessStandard(tx, m_view)) { return state.Invalid(TxValidationResult::TX_WITNESS_MUTATED, "bad-witness-nonstandard"); } @@ -893,11 +893,11 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws) // method of ensuring the tx remains bumped. For example, the fee-bumping child could disappear // due to a replacement. // The only exception is v3 transactions. - if (!bypass_limits && ws.m_ptx->nVersion != 3 && ws.m_modified_fees < m_pool.m_min_relay_feerate.GetFee(ws.m_vsize)) { + if (!bypass_limits && ws.m_ptx->nVersion != 3 && ws.m_modified_fees < m_pool.m_opts.min_relay_feerate.GetFee(ws.m_vsize)) { // Even though this is a fee-related failure, this result is TX_MEMPOOL_POLICY, not // TX_RECONSIDERABLE, because it cannot be bypassed using package validation. return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "min relay fee not met", - strprintf("%d < %d", ws.m_modified_fees, m_pool.m_min_relay_feerate.GetFee(ws.m_vsize))); + strprintf("%d < %d", ws.m_modified_fees, m_pool.m_opts.min_relay_feerate.GetFee(ws.m_vsize))); } // No individual transactions are allowed below the mempool min feerate except from disconnected // blocks and transactions in a package. Package transactions will be checked using package @@ -908,7 +908,7 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws) // Note that these modifications are only applicable to single transaction scenarios; // carve-outs and package RBF are disabled for multi-transaction evaluations. - CTxMemPool::Limits maybe_rbf_limits = m_pool.m_limits; + CTxMemPool::Limits maybe_rbf_limits = m_pool.m_opts.limits; // Calculate in-mempool ancestors, up to a limit. if (ws.m_conflicts.size() == 1) { @@ -1061,7 +1061,7 @@ bool MemPoolAccept::ReplacementChecks(Workspace& ws) ws.m_conflicting_size += it->GetTxSize(); } if (const auto err_string{PaysForRBF(ws.m_conflicting_fees, ws.m_modified_fees, ws.m_vsize, - m_pool.m_incremental_relay_feerate, hash)}) { + m_pool.m_opts.incremental_relay_feerate, hash)}) { // Even though this is a fee-related failure, this result is TX_MEMPOOL_POLICY, not // TX_RECONSIDERABLE, because it cannot be bypassed using package validation. // This must be changed if package RBF is enabled. @@ -1230,7 +1230,7 @@ bool MemPoolAccept::SubmitPackage(const ATMPArgs& args, std::vector& // Re-calculate mempool ancestors to call addUnchecked(). They may have changed since the // last calculation done in PreChecks, since package ancestors have already been submitted. { - auto ancestors{m_pool.CalculateMemPoolAncestors(*ws.m_entry, m_pool.m_limits)}; + auto ancestors{m_pool.CalculateMemPoolAncestors(*ws.m_entry, m_pool.m_opts.limits)}; if(!ancestors) { results.emplace(ws.m_ptx->GetWitnessHash(), MempoolAcceptResult::Failure(ws.m_state)); // Since PreChecks() and PackageMempoolChecks() both enforce limits, this should never fail. @@ -1271,14 +1271,14 @@ bool MemPoolAccept::SubmitPackage(const ATMPArgs& args, std::vector& results.emplace(ws.m_ptx->GetWitnessHash(), MempoolAcceptResult::Success(std::move(ws.m_replaced_transactions), ws.m_vsize, ws.m_base_fees, effective_feerate, effective_feerate_wtxids)); - if (!m_pool.m_signals) continue; + if (!m_pool.m_opts.signals) continue; const CTransaction& tx = *ws.m_ptx; const auto tx_info = NewMempoolTransactionInfo(ws.m_ptx, ws.m_base_fees, ws.m_vsize, ws.m_entry->GetHeight(), args.m_bypass_limits, args.m_package_submission, IsCurrentForFeeEstimation(m_active_chainstate), m_pool.HasNoInputsOf(tx)); - m_pool.m_signals->TransactionAddedToMempool(tx_info, m_pool.GetAndIncrementSequence()); + m_pool.m_opts.signals->TransactionAddedToMempool(tx_info, m_pool.GetAndIncrementSequence()); } return all_submitted; } @@ -1286,7 +1286,7 @@ bool MemPoolAccept::SubmitPackage(const ATMPArgs& args, std::vector& MempoolAcceptResult MemPoolAccept::AcceptSingleTransaction(const CTransactionRef& ptx, ATMPArgs& args) { AssertLockHeld(cs_main); - LOCK(m_pool.cs); // mempool "read lock" (held through m_pool.m_signals->TransactionAddedToMempool()) + LOCK(m_pool.cs); // mempool "read lock" (held through m_pool.m_opts.signals->TransactionAddedToMempool()) Workspace ws(ptx); const std::vector single_wtxid{ws.m_ptx->GetWitnessHash()}; @@ -1327,14 +1327,14 @@ MempoolAcceptResult MemPoolAccept::AcceptSingleTransaction(const CTransactionRef return MempoolAcceptResult::FeeFailure(ws.m_state, CFeeRate(ws.m_modified_fees, ws.m_vsize), {ws.m_ptx->GetWitnessHash()}); } - if (m_pool.m_signals) { + if (m_pool.m_opts.signals) { const CTransaction& tx = *ws.m_ptx; const auto tx_info = NewMempoolTransactionInfo(ws.m_ptx, ws.m_base_fees, ws.m_vsize, ws.m_entry->GetHeight(), args.m_bypass_limits, args.m_package_submission, IsCurrentForFeeEstimation(m_active_chainstate), m_pool.HasNoInputsOf(tx)); - m_pool.m_signals->TransactionAddedToMempool(tx_info, m_pool.GetAndIncrementSequence()); + m_pool.m_opts.signals->TransactionAddedToMempool(tx_info, m_pool.GetAndIncrementSequence()); } return MempoolAcceptResult::Success(std::move(ws.m_replaced_transactions), ws.m_vsize, ws.m_base_fees, @@ -2597,7 +2597,7 @@ CoinsCacheSizeState Chainstate::GetCoinsCacheSizeState() AssertLockHeld(::cs_main); return this->GetCoinsCacheSizeState( m_coinstip_cache_size_bytes, - m_mempool ? m_mempool->m_max_size_bytes : 0); + m_mempool ? m_mempool->m_opts.max_size_bytes : 0); } CoinsCacheSizeState Chainstate::GetCoinsCacheSizeState( From 0d114e3cb20cb9e03fc9ba8daf3d03436b491742 Mon Sep 17 00:00:00 2001 From: Martin Zumsande Date: Wed, 20 Mar 2024 15:08:40 -0400 Subject: [PATCH 17/50] blockstorage: Add Assume for fKnown / snapshot chainstate fKnown is true during reindex (and only then), which deletes any existing snapshot chainstate. As a result, this function can never be called wth fKnown set and a snapshot chainstate. Add an Assume for this, and make the code initializing a blockfile cursor for the snapshot conditional on !fKnown. This is a preparation for splitting the reindex logic out of FindBlockPos in the following commits. --- src/node/blockstorage.cpp | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/src/node/blockstorage.cpp b/src/node/blockstorage.cpp index 576c07a833..9b8467c2f4 100644 --- a/src/node/blockstorage.cpp +++ b/src/node/blockstorage.cpp @@ -853,8 +853,16 @@ bool BlockManager::FindBlockPos(FlatFilePos& pos, unsigned int nAddSize, unsigne LOCK(cs_LastBlockFile); const BlockfileType chain_type = BlockfileTypeForHeight(nHeight); + // Check that chain type is NORMAL if fKnown is true, because fKnown is only + // true during reindexing, and reindexing deletes snapshot chainstates, so + // chain_type will not be SNAPSHOT. Also check that cursor exists, because + // the normal cursor should never be null. + if (fKnown) { + Assume(chain_type == BlockfileType::NORMAL); + Assume(m_blockfile_cursors[chain_type]); + } - if (!m_blockfile_cursors[chain_type]) { + if (!fKnown && !m_blockfile_cursors[chain_type]) { // If a snapshot is loaded during runtime, we may not have initialized this cursor yet. assert(chain_type == BlockfileType::ASSUMED); const auto new_cursor = BlockfileCursor{this->MaxBlockfileNum() + 1}; From e6aba463adeb88fc707342a12fef658f68b0a0ea Mon Sep 17 00:00:00 2001 From: fanquake Date: Thu, 9 May 2024 23:44:31 +0800 Subject: [PATCH 18/50] contrib: use env_flags in get_arch Otherwise we fail to link when trying to use lld. --- contrib/devtools/test-security-check.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/contrib/devtools/test-security-check.py b/contrib/devtools/test-security-check.py index 48823c7e45..51bca4627e 100755 --- a/contrib/devtools/test-security-check.py +++ b/contrib/devtools/test-security-check.py @@ -27,22 +27,24 @@ def clean_files(source, executable): os.remove(source) os.remove(executable) -def call_security_check(cc: str, source: str, executable: str, options) -> tuple: +def env_flags() -> list[str]: # This should behave the same as AC_TRY_LINK, so arrange well-known flags # in the same order as autoconf would. # # See the definitions for ac_link in autoconf's lib/autoconf/c.m4 file for # reference. - env_flags: list[str] = [] + flags: list[str] = [] for var in ['CFLAGS', 'CPPFLAGS', 'LDFLAGS']: - env_flags += filter(None, os.environ.get(var, '').split(' ')) + flags += filter(None, os.environ.get(var, '').split(' ')) + return flags - subprocess.run([*cc,source,'-o',executable] + env_flags + options, check=True) +def call_security_check(cc: str, source: str, executable: str, options) -> tuple: + subprocess.run([*cc,source,'-o',executable] + env_flags() + options, check=True) p = subprocess.run([os.path.join(os.path.dirname(__file__), 'security-check.py'), executable], stdout=subprocess.PIPE, text=True) return (p.returncode, p.stdout.rstrip()) def get_arch(cc, source, executable): - subprocess.run([*cc, source, '-o', executable], check=True) + subprocess.run([*cc, source, '-o', executable] + env_flags(), check=True) binary = lief.parse(executable) arch = binary.abstract.header.architecture os.remove(executable) From b59a027d957a4cffd225a681e6c85f9ae7fd77f3 Mon Sep 17 00:00:00 2001 From: fanquake Date: Fri, 10 May 2024 00:13:50 +0800 Subject: [PATCH 19/50] contrib: drop dead get_machine from test sym check --- contrib/devtools/test-symbol-check.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/contrib/devtools/test-symbol-check.py b/contrib/devtools/test-symbol-check.py index 0140decb25..b00004586c 100755 --- a/contrib/devtools/test-symbol-check.py +++ b/contrib/devtools/test-symbol-check.py @@ -27,10 +27,6 @@ def call_symbol_check(cc: list[str], source, executable, options): os.remove(executable) return (p.returncode, p.stdout.rstrip()) -def get_machine(cc: list[str]): - p = subprocess.run([*cc,'-dumpmachine'], stdout=subprocess.PIPE, text=True) - return p.stdout.rstrip() - class TestSymbolChecks(unittest.TestCase): def test_ELF(self): source = 'test1.c' From 684da9707040ce25d95b2954eda50b811136d92c Mon Sep 17 00:00:00 2001 From: Jon Atack Date: Fri, 11 Aug 2023 16:12:42 -0600 Subject: [PATCH 20/50] p2p, bugfix: detect addnode cjdns peers in GetAddedNodeInfo() Addnode (manual) peers connected to us via the cjdns network are currently not detected by CConnman::GetAddedNodeInfo(), i.e. fConnected is always false. This causes the following issues: - RPC `getaddednodeinfo` incorrectly shows them as not connected - CConnman::ThreadOpenAddedConnections() continually retries to connect them --- src/net.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/net.cpp b/src/net.cpp index ad1e464667..7a64d3ce04 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -2832,7 +2832,7 @@ std::vector CConnman::GetAddedNodeInfo(bool include_connected) co } for (const auto& addr : lAddresses) { - CService service(LookupNumeric(addr.m_added_node, GetDefaultPort(addr.m_added_node))); + CService service{MaybeFlipIPv6toCJDNS(LookupNumeric(addr.m_added_node, GetDefaultPort(addr.m_added_node)))}; AddedNodeInfo addedNode{addr, CService(), false, false}; if (service.IsValid()) { // strAddNode is an IP:port From d0b047494c28381942c09d0cca45baa323bfcffc Mon Sep 17 00:00:00 2001 From: Jon Atack Date: Tue, 14 Nov 2023 18:12:45 -0600 Subject: [PATCH 21/50] test: add GetAddedNodeInfo() CJDNS regression unit test --- src/test/net_peer_connection_tests.cpp | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/test/net_peer_connection_tests.cpp b/src/test/net_peer_connection_tests.cpp index 58cbe9eb72..00bc1fdb6a 100644 --- a/src/test/net_peer_connection_tests.cpp +++ b/src/test/net_peer_connection_tests.cpp @@ -108,6 +108,12 @@ BOOST_AUTO_TEST_CASE(test_addnode_getaddednodeinfo_and_connection_detection) AddPeer(id, nodes, *peerman, *connman, ConnectionType::BLOCK_RELAY, /*onion_peer=*/true); AddPeer(id, nodes, *peerman, *connman, ConnectionType::INBOUND); + // Add a CJDNS peer connection. + AddPeer(id, nodes, *peerman, *connman, ConnectionType::INBOUND, /*onion_peer=*/false, + /*address=*/"[fc00:3344:5566:7788:9900:aabb:ccdd:eeff]:1234"); + BOOST_CHECK(nodes.back()->IsInboundConn()); + BOOST_CHECK_EQUAL(nodes.back()->ConnectedThroughNetwork(), Network::NET_CJDNS); + BOOST_TEST_MESSAGE("Call AddNode() for all the peers"); for (auto node : connman->TestNodes()) { BOOST_CHECK(connman->AddNode({/*m_added_node=*/node->addr.ToStringAddrPort(), /*m_use_v2transport=*/true})); From 1e4412b317f74dd64069309544fe73c95e2c10e7 Mon Sep 17 00:00:00 2001 From: fanquake Date: Mon, 13 May 2024 20:01:06 +0800 Subject: [PATCH 22/50] depends: set AR for CMake --- depends/funcs.mk | 1 + 1 file changed, 1 insertion(+) diff --git a/depends/funcs.mk b/depends/funcs.mk index d8a72421b4..05abf7bf7e 100644 --- a/depends/funcs.mk +++ b/depends/funcs.mk @@ -180,6 +180,7 @@ $(1)_cmake=env CC="$$($(1)_cc)" \ CXXFLAGS="$$($(1)_cppflags) $$($(1)_cxxflags)" \ LDFLAGS="$$($(1)_ldflags)" \ cmake -DCMAKE_INSTALL_PREFIX:PATH="$$($($(1)_type)_prefix)" \ + -DCMAKE_AR=`which $$($(1)_ar)` \ -DCMAKE_INSTALL_LIBDIR=lib/ \ -DCMAKE_POSITION_INDEPENDENT_CODE=ON \ -DCMAKE_VERBOSE_MAKEFILE:BOOL=$(V) \ From 43cfb428cba04b8db98d4d0d56ffe28ad686e58c Mon Sep 17 00:00:00 2001 From: fanquake Date: Mon, 13 May 2024 20:01:37 +0800 Subject: [PATCH 23/50] depends: set NM for CMake --- depends/funcs.mk | 1 + 1 file changed, 1 insertion(+) diff --git a/depends/funcs.mk b/depends/funcs.mk index 05abf7bf7e..3127c09875 100644 --- a/depends/funcs.mk +++ b/depends/funcs.mk @@ -181,6 +181,7 @@ $(1)_cmake=env CC="$$($(1)_cc)" \ LDFLAGS="$$($(1)_ldflags)" \ cmake -DCMAKE_INSTALL_PREFIX:PATH="$$($($(1)_type)_prefix)" \ -DCMAKE_AR=`which $$($(1)_ar)` \ + -DCMAKE_NM=`which $$($(1)_nm)` \ -DCMAKE_INSTALL_LIBDIR=lib/ \ -DCMAKE_POSITION_INDEPENDENT_CODE=ON \ -DCMAKE_VERBOSE_MAKEFILE:BOOL=$(V) \ From 019ad7327c397094d7648b55503bf5373b108a57 Mon Sep 17 00:00:00 2001 From: fanquake Date: Mon, 13 May 2024 20:01:45 +0800 Subject: [PATCH 24/50] depends: set RANLIB for CMake --- depends/funcs.mk | 1 + 1 file changed, 1 insertion(+) diff --git a/depends/funcs.mk b/depends/funcs.mk index 3127c09875..7f79478cbd 100644 --- a/depends/funcs.mk +++ b/depends/funcs.mk @@ -182,6 +182,7 @@ $(1)_cmake=env CC="$$($(1)_cc)" \ cmake -DCMAKE_INSTALL_PREFIX:PATH="$$($($(1)_type)_prefix)" \ -DCMAKE_AR=`which $$($(1)_ar)` \ -DCMAKE_NM=`which $$($(1)_nm)` \ + -DCMAKE_RANLIB=`which $$($(1)_ranlib)` \ -DCMAKE_INSTALL_LIBDIR=lib/ \ -DCMAKE_POSITION_INDEPENDENT_CODE=ON \ -DCMAKE_VERBOSE_MAKEFILE:BOOL=$(V) \ From b77bad309e92f176f340598eec056eb7bff86f5f Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Fri, 10 May 2024 20:01:05 +0100 Subject: [PATCH 25/50] rpc: move UniValue in blockToJSON Without explicitly declaring the move, these UniValues get copied, causing increased memory usage. Fix this by explicitly moving the UniValue objects. Used by `rest_block` and `getblock` RPC. --- src/rpc/blockchain.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp index c9997ae063..1abaeafb2a 100644 --- a/src/rpc/blockchain.cpp +++ b/src/rpc/blockchain.cpp @@ -188,12 +188,12 @@ UniValue blockToJSON(BlockManager& blockman, const CBlock& block, const CBlockIn const CTxUndo* txundo = (have_undo && i > 0) ? &blockUndo.vtxundo.at(i - 1) : nullptr; UniValue objTx(UniValue::VOBJ); TxToUniv(*tx, /*block_hash=*/uint256(), /*entry=*/objTx, /*include_hex=*/true, txundo, verbosity); - txs.push_back(objTx); + txs.push_back(std::move(objTx)); } break; } - result.pushKV("tx", txs); + result.pushKV("tx", std::move(txs)); return result; } From 12d82817bf32396b58c8c65645012def606680b6 Mon Sep 17 00:00:00 2001 From: Sebastian Falbesoner Date: Mon, 13 May 2024 23:25:10 +0200 Subject: [PATCH 26/50] refactor: simplify `FormatSubVersion` using strprintf/Join Rather than using std::ostringstream and manually joining the comments, use strprintf and our own `Join` helper. --- src/clientversion.cpp | 18 ++++-------------- 1 file changed, 4 insertions(+), 14 deletions(-) diff --git a/src/clientversion.cpp b/src/clientversion.cpp index 3a5e060a39..6b9727a158 100644 --- a/src/clientversion.cpp +++ b/src/clientversion.cpp @@ -5,11 +5,11 @@ #include // IWYU pragma: keep #include +#include #include #include -#include #include #include @@ -64,19 +64,9 @@ std::string FormatFullVersion() */ std::string FormatSubVersion(const std::string& name, int nClientVersion, const std::vector& comments) { - std::ostringstream ss; - ss << "/"; - ss << name << ":" << FormatVersion(nClientVersion); - if (!comments.empty()) - { - std::vector::const_iterator it(comments.begin()); - ss << "(" << *it; - for(++it; it != comments.end(); ++it) - ss << "; " << *it; - ss << ")"; - } - ss << "/"; - return ss.str(); + std::string comments_str; + if (!comments.empty()) comments_str = strprintf("(%s)", Join(comments, "; ")); + return strprintf("/%s:%s%s/", name, FormatVersion(nClientVersion), comments_str); } std::string CopyrightHolders(const std::string& strPrefix) From b946f8a4c51be42e52d63a6d578158c0b2a6b7ed Mon Sep 17 00:00:00 2001 From: josibake Date: Tue, 13 Feb 2024 17:41:59 +0100 Subject: [PATCH 27/50] crypto: add NUMS_H const --- src/pubkey.cpp | 12 ++++++++++++ src/pubkey.h | 5 +++++ test/functional/feature_framework_unit_tests.py | 1 + test/functional/test_framework/crypto/secp256k1.py | 8 ++++++++ 4 files changed, 26 insertions(+) diff --git a/src/pubkey.cpp b/src/pubkey.cpp index 11e1b4abb5..13e3c2dbe0 100644 --- a/src/pubkey.cpp +++ b/src/pubkey.cpp @@ -13,6 +13,7 @@ #include #include #include +#include #include #include @@ -181,6 +182,17 @@ int ecdsa_signature_parse_der_lax(secp256k1_ecdsa_signature* sig, const unsigned return 1; } +/** Nothing Up My Sleeve (NUMS) point + * + * NUMS_H is a point with an unknown discrete logarithm, constructed by taking the sha256 of 'g' + * (uncompressed encoding), which happens to be a point on the curve. + * + * For an example script for calculating H, refer to the unit tests in + * ./test/functional/test_framework/crypto/secp256k1.py + */ +static const std::vector NUMS_H_DATA{ParseHex("50929b74c1a04954b78b4b6035e97a5e078a5a0f28ec96d547bfee9ace803ac0")}; +const XOnlyPubKey XOnlyPubKey::NUMS_H{NUMS_H_DATA}; + XOnlyPubKey::XOnlyPubKey(Span bytes) { assert(bytes.size() == 32); diff --git a/src/pubkey.h b/src/pubkey.h index 15d7e7bc07..ae34ddd0af 100644 --- a/src/pubkey.h +++ b/src/pubkey.h @@ -233,6 +233,11 @@ class XOnlyPubKey uint256 m_keydata; public: + /** Nothing Up My Sleeve point H + * Used as an internal key for provably disabling the key path spend + * see BIP341 for more details */ + static const XOnlyPubKey NUMS_H; + /** Construct an empty x-only pubkey. */ XOnlyPubKey() = default; diff --git a/test/functional/feature_framework_unit_tests.py b/test/functional/feature_framework_unit_tests.py index c9754e083c..f03f084bed 100755 --- a/test/functional/feature_framework_unit_tests.py +++ b/test/functional/feature_framework_unit_tests.py @@ -25,6 +25,7 @@ "crypto.muhash", "crypto.poly1305", "crypto.ripemd160", + "crypto.secp256k1", "script", "segwit_addr", "wallet_util", diff --git a/test/functional/test_framework/crypto/secp256k1.py b/test/functional/test_framework/crypto/secp256k1.py index 2e9e419da5..50a46dce37 100644 --- a/test/functional/test_framework/crypto/secp256k1.py +++ b/test/functional/test_framework/crypto/secp256k1.py @@ -15,6 +15,8 @@ * G: the secp256k1 generator point """ +import unittest +from hashlib import sha256 class FE: """Objects of this class represent elements of the field GF(2**256 - 2**32 - 977). @@ -344,3 +346,9 @@ def mul(self, a): # Precomputed table with multiples of G for fast multiplication FAST_G = FastGEMul(G) + +class TestFrameworkSecp256k1(unittest.TestCase): + def test_H(self): + H = sha256(G.to_bytes_uncompressed()).digest() + assert GE.lift_x(FE.from_bytes(H)) is not None + self.assertEqual(H.hex(), "50929b74c1a04954b78b4b6035e97a5e078a5a0f28ec96d547bfee9ace803ac0") From d4b17c7d46ad8e2833ade99d5b4c9741c913e84d Mon Sep 17 00:00:00 2001 From: TheCharlatan Date: Fri, 10 May 2024 22:10:34 +0200 Subject: [PATCH 28/50] kernel: Remove batchpriority from kernel library The current usage of ScheduleBatchPriority is not transparent. Once the thread scheduling is changed, it remains unchanged for the remainder of the thread's lifetime. So move the call from `ImportBlocks` to the init code where it is clearer that its effect lasts for the entire lifetime of the thread. Users of the kernel library might not expect `ImportBlocks` to have an influence on the thread it is called in. Particularly since it is only a compile time option and cannot be controlled at runtime. With this patch users of the kernel library can now choose their own scheduling policy. --- src/Makefile.am | 1 - src/init.cpp | 2 + src/node/blockstorage.cpp | 109 ++++++++++++++++++-------------------- 3 files changed, 55 insertions(+), 57 deletions(-) diff --git a/src/Makefile.am b/src/Makefile.am index 639aecf3b3..226ee9321f 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -975,7 +975,6 @@ libbitcoinkernel_la_SOURCES = \ txdb.cpp \ txmempool.cpp \ uint256.cpp \ - util/batchpriority.cpp \ util/chaintype.cpp \ util/check.cpp \ util/feefrac.cpp \ diff --git a/src/init.cpp b/src/init.cpp index fbf25a0341..d2e0a80780 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -69,6 +69,7 @@ #include #include #include +#include #include #include #include @@ -1735,6 +1736,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) } chainman.m_thread_load = std::thread(&util::TraceThread, "initload", [=, &chainman, &args, &node] { + ScheduleBatchPriority(); // Import blocks ImportBlocks(chainman, vImportFiles); if (args.GetBoolArg("-stopafterblockimport", DEFAULT_STOPAFTERBLOCKIMPORT)) { diff --git a/src/node/blockstorage.cpp b/src/node/blockstorage.cpp index 576c07a833..76837f2a27 100644 --- a/src/node/blockstorage.cpp +++ b/src/node/blockstorage.cpp @@ -1175,69 +1175,66 @@ class ImportingNow void ImportBlocks(ChainstateManager& chainman, std::vector vImportFiles) { - ScheduleBatchPriority(); - - { - ImportingNow imp{chainman.m_blockman.m_importing}; - - // -reindex - if (fReindex) { - int nFile = 0; - // Map of disk positions for blocks with unknown parent (only used for reindex); - // parent hash -> child disk position, multiple children can have the same parent. - std::multimap blocks_with_unknown_parent; - while (true) { - FlatFilePos pos(nFile, 0); - if (!fs::exists(chainman.m_blockman.GetBlockPosFilename(pos))) { - break; // No block files left to reindex - } - AutoFile file{chainman.m_blockman.OpenBlockFile(pos, true)}; - if (file.IsNull()) { - break; // This error is logged in OpenBlockFile - } - LogPrintf("Reindexing block file blk%05u.dat...\n", (unsigned int)nFile); - chainman.LoadExternalBlockFile(file, &pos, &blocks_with_unknown_parent); - if (chainman.m_interrupt) { - LogPrintf("Interrupt requested. Exit %s\n", __func__); - return; - } - nFile++; + ImportingNow imp{chainman.m_blockman.m_importing}; + + // -reindex + if (fReindex) { + int nFile = 0; + // Map of disk positions for blocks with unknown parent (only used for reindex); + // parent hash -> child disk position, multiple children can have the same parent. + std::multimap blocks_with_unknown_parent; + while (true) { + FlatFilePos pos(nFile, 0); + if (!fs::exists(chainman.m_blockman.GetBlockPosFilename(pos))) { + break; // No block files left to reindex } - WITH_LOCK(::cs_main, chainman.m_blockman.m_block_tree_db->WriteReindexing(false)); - fReindex = false; - LogPrintf("Reindexing finished\n"); - // To avoid ending up in a situation without genesis block, re-try initializing (no-op if reindexing worked): - chainman.ActiveChainstate().LoadGenesisBlock(); + AutoFile file{chainman.m_blockman.OpenBlockFile(pos, true)}; + if (file.IsNull()) { + break; // This error is logged in OpenBlockFile + } + LogPrintf("Reindexing block file blk%05u.dat...\n", (unsigned int)nFile); + chainman.LoadExternalBlockFile(file, &pos, &blocks_with_unknown_parent); + if (chainman.m_interrupt) { + LogPrintf("Interrupt requested. Exit %s\n", __func__); + return; + } + nFile++; } - - // -loadblock= - for (const fs::path& path : vImportFiles) { - AutoFile file{fsbridge::fopen(path, "rb")}; - if (!file.IsNull()) { - LogPrintf("Importing blocks file %s...\n", fs::PathToString(path)); - chainman.LoadExternalBlockFile(file); - if (chainman.m_interrupt) { - LogPrintf("Interrupt requested. Exit %s\n", __func__); - return; - } - } else { - LogPrintf("Warning: Could not open blocks file %s\n", fs::PathToString(path)); + WITH_LOCK(::cs_main, chainman.m_blockman.m_block_tree_db->WriteReindexing(false)); + fReindex = false; + LogPrintf("Reindexing finished\n"); + // To avoid ending up in a situation without genesis block, re-try initializing (no-op if reindexing worked): + chainman.ActiveChainstate().LoadGenesisBlock(); + } + + // -loadblock= + for (const fs::path& path : vImportFiles) { + AutoFile file{fsbridge::fopen(path, "rb")}; + if (!file.IsNull()) { + LogPrintf("Importing blocks file %s...\n", fs::PathToString(path)); + chainman.LoadExternalBlockFile(file); + if (chainman.m_interrupt) { + LogPrintf("Interrupt requested. Exit %s\n", __func__); + return; } + } else { + LogPrintf("Warning: Could not open blocks file %s\n", fs::PathToString(path)); } + } - // scan for better chains in the block chain database, that are not yet connected in the active best chain + // scan for better chains in the block chain database, that are not yet connected in the active best chain - // We can't hold cs_main during ActivateBestChain even though we're accessing - // the chainman unique_ptrs since ABC requires us not to be holding cs_main, so retrieve - // the relevant pointers before the ABC call. - for (Chainstate* chainstate : WITH_LOCK(::cs_main, return chainman.GetAll())) { - BlockValidationState state; - if (!chainstate->ActivateBestChain(state, nullptr)) { - chainman.GetNotifications().fatalError(strprintf(_("Failed to connect best block (%s)."), state.ToString())); - return; - } + // We can't hold cs_main during ActivateBestChain even though we're accessing + // the chainman unique_ptrs since ABC requires us not to be holding cs_main, so retrieve + // the relevant pointers before the ABC call. + for (Chainstate* chainstate : WITH_LOCK(::cs_main, return chainman.GetAll())) { + BlockValidationState state; + if (!chainstate->ActivateBestChain(state, nullptr)) { + chainman.GetNotifications().fatalError(strprintf(_("Failed to connect best block (%s)."), state.ToString())); + return; } - } // End scope of ImportingNow + } + // End scope of ImportingNow } std::ostream& operator<<(std::ostream& os, const BlockfileType& type) { From 7e475b9648bbee04f5825b922ba0399373eaa5a9 Mon Sep 17 00:00:00 2001 From: glozow Date: Fri, 10 May 2024 09:23:18 +0100 Subject: [PATCH 29/50] [p2p] don't query orphanage by txid --- src/net_processing.cpp | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/src/net_processing.cpp b/src/net_processing.cpp index bdbf077ab5..985b41b160 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -2295,7 +2295,20 @@ bool PeerManagerImpl::AlreadyHaveTx(const GenTxid& gtxid, bool include_reconside const uint256& hash = gtxid.GetHash(); - if (m_orphanage.HaveTx(gtxid)) return true; + if (gtxid.IsWtxid()) { + // Normal query by wtxid. + if (m_orphanage.HaveTx(gtxid)) return true; + } else { + // Never query by txid: it is possible that the transaction in the orphanage has the same + // txid but a different witness, which would give us a false positive result. If we decided + // not to request the transaction based on this result, an attacker could prevent us from + // downloading a transaction by intentionally creating a malleated version of it. + // + // While we won't query by txid, we can try to "guess" what the wtxid is based on the txid. + // A non-segwit transaction's txid == wtxid. Query this txid "casted" to a wtxid. This will + // help us find non-segwit transactions, saving bandwidth, and should have no false positives. + if (m_orphanage.HaveTx(GenTxid::Wtxid(hash))) return true; + } if (include_reconsiderable && m_recent_rejects_reconsiderable.contains(hash)) return true; From efcc5930175f31b685adb4627a038d9f0848eb1f Mon Sep 17 00:00:00 2001 From: glozow Date: Fri, 10 May 2024 10:55:39 +0100 Subject: [PATCH 30/50] [refactor] TxOrphanage::HaveTx only by wtxid --- src/net_processing.cpp | 4 ++-- src/test/fuzz/txorphan.cpp | 11 +++++------ src/txorphanage.cpp | 8 ++------ src/txorphanage.h | 4 ++-- 4 files changed, 11 insertions(+), 16 deletions(-) diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 985b41b160..0b8b896987 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -2297,7 +2297,7 @@ bool PeerManagerImpl::AlreadyHaveTx(const GenTxid& gtxid, bool include_reconside if (gtxid.IsWtxid()) { // Normal query by wtxid. - if (m_orphanage.HaveTx(gtxid)) return true; + if (m_orphanage.HaveTx(Wtxid::FromUint256(hash))) return true; } else { // Never query by txid: it is possible that the transaction in the orphanage has the same // txid but a different witness, which would give us a false positive result. If we decided @@ -2307,7 +2307,7 @@ bool PeerManagerImpl::AlreadyHaveTx(const GenTxid& gtxid, bool include_reconside // While we won't query by txid, we can try to "guess" what the wtxid is based on the txid. // A non-segwit transaction's txid == wtxid. Query this txid "casted" to a wtxid. This will // help us find non-segwit transactions, saving bandwidth, and should have no false positives. - if (m_orphanage.HaveTx(GenTxid::Wtxid(hash))) return true; + if (m_orphanage.HaveTx(Wtxid::FromUint256(hash))) return true; } if (include_reconsiderable && m_recent_rejects_reconsiderable.contains(hash)) return true; diff --git a/src/test/fuzz/txorphan.cpp b/src/test/fuzz/txorphan.cpp index a44f47b00d..5e9034e7f7 100644 --- a/src/test/fuzz/txorphan.cpp +++ b/src/test/fuzz/txorphan.cpp @@ -112,13 +112,12 @@ FUZZ_TARGET(txorphan, .init = initialize_orphanage) { CTransactionRef ref = orphanage.GetTxToReconsider(peer_id); if (ref) { - bool have_tx = orphanage.HaveTx(GenTxid::Txid(ref->GetHash())) || orphanage.HaveTx(GenTxid::Wtxid(ref->GetWitnessHash())); - Assert(have_tx); + Assert(orphanage.HaveTx(ref->GetWitnessHash())); } } }, [&] { - bool have_tx = orphanage.HaveTx(GenTxid::Txid(tx->GetHash())) || orphanage.HaveTx(GenTxid::Wtxid(tx->GetWitnessHash())); + bool have_tx = orphanage.HaveTx(tx->GetWitnessHash()); // AddTx should return false if tx is too big or already have it // tx weight is unknown, we only check when tx is already in orphanage { @@ -126,7 +125,7 @@ FUZZ_TARGET(txorphan, .init = initialize_orphanage) // have_tx == true -> add_tx == false Assert(!have_tx || !add_tx); } - have_tx = orphanage.HaveTx(GenTxid::Txid(tx->GetHash())) || orphanage.HaveTx(GenTxid::Wtxid(tx->GetWitnessHash())); + have_tx = orphanage.HaveTx(tx->GetWitnessHash()); { bool add_tx = orphanage.AddTx(tx, peer_id); // if have_tx is still false, it must be too big @@ -135,12 +134,12 @@ FUZZ_TARGET(txorphan, .init = initialize_orphanage) } }, [&] { - bool have_tx = orphanage.HaveTx(GenTxid::Txid(tx->GetHash())) || orphanage.HaveTx(GenTxid::Wtxid(tx->GetWitnessHash())); + bool have_tx = orphanage.HaveTx(tx->GetWitnessHash()); // EraseTx should return 0 if m_orphans doesn't have the tx { Assert(have_tx == orphanage.EraseTx(tx->GetHash())); } - have_tx = orphanage.HaveTx(GenTxid::Txid(tx->GetHash())) || orphanage.HaveTx(GenTxid::Wtxid(tx->GetWitnessHash())); + have_tx = orphanage.HaveTx(tx->GetWitnessHash()); // have_tx should be false and EraseTx should fail { Assert(!have_tx && !orphanage.EraseTx(tx->GetHash())); diff --git a/src/txorphanage.cpp b/src/txorphanage.cpp index 82206e56c3..e10f01c693 100644 --- a/src/txorphanage.cpp +++ b/src/txorphanage.cpp @@ -169,14 +169,10 @@ void TxOrphanage::AddChildrenToWorkSet(const CTransaction& tx) } } -bool TxOrphanage::HaveTx(const GenTxid& gtxid) const +bool TxOrphanage::HaveTx(const Wtxid& wtxid) const { LOCK(m_mutex); - if (gtxid.IsWtxid()) { - return m_wtxid_to_orphan_it.count(Wtxid::FromUint256(gtxid.GetHash())); - } else { - return m_orphans.count(Txid::FromUint256(gtxid.GetHash())); - } + return m_wtxid_to_orphan_it.count(wtxid); } CTransactionRef TxOrphanage::GetTxToReconsider(NodeId peer) diff --git a/src/txorphanage.h b/src/txorphanage.h index a3c8edaa2a..33e41ff5b7 100644 --- a/src/txorphanage.h +++ b/src/txorphanage.h @@ -23,8 +23,8 @@ class TxOrphanage { /** Add a new orphan transaction */ bool AddTx(const CTransactionRef& tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex); - /** Check if we already have an orphan transaction (by txid or wtxid) */ - bool HaveTx(const GenTxid& gtxid) const EXCLUSIVE_LOCKS_REQUIRED(!m_mutex); + /** Check if we already have an orphan transaction (by wtxid only) */ + bool HaveTx(const Wtxid& wtxid) const EXCLUSIVE_LOCKS_REQUIRED(!m_mutex); /** Extract a transaction from a peer's work set * Returns nullptr if there are no transactions to work on. From c31f148166f01a9167d82501a77823785d28a841 Mon Sep 17 00:00:00 2001 From: glozow Date: Fri, 10 May 2024 12:04:50 +0100 Subject: [PATCH 31/50] [refactor] TxOrphanage::EraseTx by wtxid No behavior change right now, as transactions in the orphanage are unique by txid. This makes the next commit easier to review. --- src/net_processing.cpp | 4 ++-- src/test/fuzz/txorphan.cpp | 4 ++-- src/txorphanage.cpp | 26 ++++++++++++++------------ src/txorphanage.h | 8 ++++---- 4 files changed, 22 insertions(+), 20 deletions(-) diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 0b8b896987..5ae76e01bf 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -3232,7 +3232,7 @@ void PeerManagerImpl::ProcessInvalidTx(NodeId nodeid, const CTransactionRef& ptx // If the tx failed in ProcessOrphanTx, it should be removed from the orphanage unless the // tx was still missing inputs. If the tx was not in the orphanage, EraseTx does nothing and returns 0. - if (Assume(state.GetResult() != TxValidationResult::TX_MISSING_INPUTS) && m_orphanage.EraseTx(ptx->GetHash()) > 0) { + if (Assume(state.GetResult() != TxValidationResult::TX_MISSING_INPUTS) && m_orphanage.EraseTx(ptx->GetWitnessHash()) > 0) { LogDebug(BCLog::TXPACKAGES, " removed orphan tx %s (wtxid=%s)\n", ptx->GetHash().ToString(), ptx->GetWitnessHash().ToString()); } } @@ -3250,7 +3250,7 @@ void PeerManagerImpl::ProcessValidTx(NodeId nodeid, const CTransactionRef& tx, c m_orphanage.AddChildrenToWorkSet(*tx); // If it came from the orphanage, remove it. No-op if the tx is not in txorphanage. - m_orphanage.EraseTx(tx->GetHash()); + m_orphanage.EraseTx(tx->GetWitnessHash()); LogDebug(BCLog::MEMPOOL, "AcceptToMemoryPool: peer=%d: accepted %s (wtxid=%s) (poolsz %u txn, %u kB)\n", nodeid, diff --git a/src/test/fuzz/txorphan.cpp b/src/test/fuzz/txorphan.cpp index 5e9034e7f7..64f6ccfcda 100644 --- a/src/test/fuzz/txorphan.cpp +++ b/src/test/fuzz/txorphan.cpp @@ -137,12 +137,12 @@ FUZZ_TARGET(txorphan, .init = initialize_orphanage) bool have_tx = orphanage.HaveTx(tx->GetWitnessHash()); // EraseTx should return 0 if m_orphans doesn't have the tx { - Assert(have_tx == orphanage.EraseTx(tx->GetHash())); + Assert(have_tx == orphanage.EraseTx(tx->GetWitnessHash())); } have_tx = orphanage.HaveTx(tx->GetWitnessHash()); // have_tx should be false and EraseTx should fail { - Assert(!have_tx && !orphanage.EraseTx(tx->GetHash())); + Assert(!have_tx && !orphanage.EraseTx(tx->GetWitnessHash())); } }, [&] { diff --git a/src/txorphanage.cpp b/src/txorphanage.cpp index e10f01c693..ca8a0e3a92 100644 --- a/src/txorphanage.cpp +++ b/src/txorphanage.cpp @@ -54,16 +54,19 @@ bool TxOrphanage::AddTx(const CTransactionRef& tx, NodeId peer) return true; } -int TxOrphanage::EraseTx(const Txid& txid) +int TxOrphanage::EraseTx(const Wtxid& wtxid) { LOCK(m_mutex); - return EraseTxNoLock(txid); + return EraseTxNoLock(wtxid); } -int TxOrphanage::EraseTxNoLock(const Txid& txid) +int TxOrphanage::EraseTxNoLock(const Wtxid& wtxid) { AssertLockHeld(m_mutex); - std::map::iterator it = m_orphans.find(txid); + auto it_by_wtxid = m_wtxid_to_orphan_it.find(wtxid); + if (it_by_wtxid == m_wtxid_to_orphan_it.end()) return 0; + + std::map::iterator it = it_by_wtxid->second; if (it == m_orphans.end()) return 0; for (const CTxIn& txin : it->second.tx->vin) @@ -85,10 +88,10 @@ int TxOrphanage::EraseTxNoLock(const Txid& txid) m_orphan_list[old_pos] = it_last; it_last->second.list_pos = old_pos; } - const auto& wtxid = it->second.tx->GetWitnessHash(); + const auto& txid = it->second.tx->GetHash(); LogPrint(BCLog::TXPACKAGES, " removed orphan tx %s (wtxid=%s)\n", txid.ToString(), wtxid.ToString()); m_orphan_list.pop_back(); - m_wtxid_to_orphan_it.erase(it->second.tx->GetWitnessHash()); + m_wtxid_to_orphan_it.erase(wtxid); m_orphans.erase(it); return 1; @@ -107,7 +110,7 @@ void TxOrphanage::EraseForPeer(NodeId peer) std::map::iterator maybeErase = iter++; // increment to avoid iterator becoming invalid if (maybeErase->second.fromPeer == peer) { - nErased += EraseTxNoLock(maybeErase->second.tx->GetHash()); + nErased += EraseTxNoLock(maybeErase->second.tx->GetWitnessHash()); } } if (nErased > 0) LogPrint(BCLog::TXPACKAGES, "Erased %d orphan tx from peer=%d\n", nErased, peer); @@ -129,7 +132,7 @@ void TxOrphanage::LimitOrphans(unsigned int max_orphans, FastRandomContext& rng) { std::map::iterator maybeErase = iter++; if (maybeErase->second.nTimeExpire <= nNow) { - nErased += EraseTxNoLock(maybeErase->second.tx->GetHash()); + nErased += EraseTxNoLock(maybeErase->second.tx->GetWitnessHash()); } else { nMinExpTime = std::min(maybeErase->second.nTimeExpire, nMinExpTime); } @@ -142,7 +145,7 @@ void TxOrphanage::LimitOrphans(unsigned int max_orphans, FastRandomContext& rng) { // Evict a random orphan: size_t randompos = rng.randrange(m_orphan_list.size()); - EraseTxNoLock(m_orphan_list[randompos]->first); + EraseTxNoLock(m_orphan_list[randompos]->second.tx->GetWitnessHash()); ++nEvicted; } if (nEvicted > 0) LogPrint(BCLog::TXPACKAGES, "orphanage overflow, removed %u tx\n", nEvicted); @@ -211,7 +214,7 @@ void TxOrphanage::EraseForBlock(const CBlock& block) { LOCK(m_mutex); - std::vector vOrphanErase; + std::vector vOrphanErase; for (const CTransactionRef& ptx : block.vtx) { const CTransaction& tx = *ptx; @@ -222,8 +225,7 @@ void TxOrphanage::EraseForBlock(const CBlock& block) if (itByPrev == m_outpoint_to_orphan_it.end()) continue; for (auto mi = itByPrev->second.begin(); mi != itByPrev->second.end(); ++mi) { const CTransaction& orphanTx = *(*mi)->second.tx; - const auto& orphanHash = orphanTx.GetHash(); - vOrphanErase.push_back(orphanHash); + vOrphanErase.push_back(orphanTx.GetWitnessHash()); } } } diff --git a/src/txorphanage.h b/src/txorphanage.h index 33e41ff5b7..a549fc7f9b 100644 --- a/src/txorphanage.h +++ b/src/txorphanage.h @@ -33,8 +33,8 @@ class TxOrphanage { */ CTransactionRef GetTxToReconsider(NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex); - /** Erase an orphan by txid */ - int EraseTx(const Txid& txid) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex); + /** Erase an orphan by wtxid */ + int EraseTx(const Wtxid& wtxid) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex); /** Erase all orphans announced by a peer (eg, after that peer disconnects) */ void EraseForPeer(NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex); @@ -106,8 +106,8 @@ class TxOrphanage { * transactions using their witness ids. */ std::map m_wtxid_to_orphan_it GUARDED_BY(m_mutex); - /** Erase an orphan by txid */ - int EraseTxNoLock(const Txid& txid) EXCLUSIVE_LOCKS_REQUIRED(m_mutex); + /** Erase an orphan by wtxid */ + int EraseTxNoLock(const Wtxid& wtxid) EXCLUSIVE_LOCKS_REQUIRED(m_mutex); }; #endif // BITCOIN_TXORPHANAGE_H From 8923edfc1f12ebc6a074651c084ba7d249074799 Mon Sep 17 00:00:00 2001 From: glozow Date: Mon, 29 Apr 2024 16:40:28 +0100 Subject: [PATCH 32/50] [p2p] allow entries with the same txid in TxOrphanage Index by wtxid instead of txid to allow entries with the same txid but different witnesses in orphanage. This prevents an attacker from blocking a transaction from entering the orphanage by sending a mutated version of it. --- src/net_processing.cpp | 5 ++++- src/test/orphanage_tests.cpp | 4 ++-- src/txorphanage.cpp | 34 ++++++++++++++-------------------- src/txorphanage.h | 10 +++------- 4 files changed, 23 insertions(+), 30 deletions(-) diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 5ae76e01bf..73c15d76b7 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -2302,7 +2302,10 @@ bool PeerManagerImpl::AlreadyHaveTx(const GenTxid& gtxid, bool include_reconside // Never query by txid: it is possible that the transaction in the orphanage has the same // txid but a different witness, which would give us a false positive result. If we decided // not to request the transaction based on this result, an attacker could prevent us from - // downloading a transaction by intentionally creating a malleated version of it. + // downloading a transaction by intentionally creating a malleated version of it. While + // only one (or none!) of these transactions can ultimately be confirmed, we have no way of + // discerning which one that is, so the orphanage can store multiple transactions with the + // same txid. // // While we won't query by txid, we can try to "guess" what the wtxid is based on the txid. // A non-segwit transaction's txid == wtxid. Query this txid "casted" to a wtxid. This will diff --git a/src/test/orphanage_tests.cpp b/src/test/orphanage_tests.cpp index b2643cf678..a93f7a4ef4 100644 --- a/src/test/orphanage_tests.cpp +++ b/src/test/orphanage_tests.cpp @@ -30,8 +30,8 @@ class TxOrphanageTest : public TxOrphanage CTransactionRef RandomOrphan() EXCLUSIVE_LOCKS_REQUIRED(!m_mutex) { LOCK(m_mutex); - std::map::iterator it; - it = m_orphans.lower_bound(Txid::FromUint256(InsecureRand256())); + std::map::iterator it; + it = m_orphans.lower_bound(Wtxid::FromUint256(InsecureRand256())); if (it == m_orphans.end()) it = m_orphans.begin(); return it->second.tx; diff --git a/src/txorphanage.cpp b/src/txorphanage.cpp index ca8a0e3a92..4a1d48a466 100644 --- a/src/txorphanage.cpp +++ b/src/txorphanage.cpp @@ -23,7 +23,7 @@ bool TxOrphanage::AddTx(const CTransactionRef& tx, NodeId peer) const Txid& hash = tx->GetHash(); const Wtxid& wtxid = tx->GetWitnessHash(); - if (m_orphans.count(hash)) + if (m_orphans.count(wtxid)) return false; // Ignore big transactions, to avoid a @@ -40,11 +40,9 @@ bool TxOrphanage::AddTx(const CTransactionRef& tx, NodeId peer) return false; } - auto ret = m_orphans.emplace(hash, OrphanTx{tx, peer, GetTime() + ORPHAN_TX_EXPIRE_TIME, m_orphan_list.size()}); + auto ret = m_orphans.emplace(wtxid, OrphanTx{tx, peer, GetTime() + ORPHAN_TX_EXPIRE_TIME, m_orphan_list.size()}); assert(ret.second); m_orphan_list.push_back(ret.first); - // Allow for lookups in the orphan pool by wtxid, as well as txid - m_wtxid_to_orphan_it.emplace(tx->GetWitnessHash(), ret.first); for (const CTxIn& txin : tx->vin) { m_outpoint_to_orphan_it[txin.prevout].insert(ret.first); } @@ -63,10 +61,7 @@ int TxOrphanage::EraseTx(const Wtxid& wtxid) int TxOrphanage::EraseTxNoLock(const Wtxid& wtxid) { AssertLockHeld(m_mutex); - auto it_by_wtxid = m_wtxid_to_orphan_it.find(wtxid); - if (it_by_wtxid == m_wtxid_to_orphan_it.end()) return 0; - - std::map::iterator it = it_by_wtxid->second; + std::map::iterator it = m_orphans.find(wtxid); if (it == m_orphans.end()) return 0; for (const CTxIn& txin : it->second.tx->vin) @@ -91,7 +86,6 @@ int TxOrphanage::EraseTxNoLock(const Wtxid& wtxid) const auto& txid = it->second.tx->GetHash(); LogPrint(BCLog::TXPACKAGES, " removed orphan tx %s (wtxid=%s)\n", txid.ToString(), wtxid.ToString()); m_orphan_list.pop_back(); - m_wtxid_to_orphan_it.erase(wtxid); m_orphans.erase(it); return 1; @@ -104,13 +98,13 @@ void TxOrphanage::EraseForPeer(NodeId peer) m_peer_work_set.erase(peer); int nErased = 0; - std::map::iterator iter = m_orphans.begin(); + std::map::iterator iter = m_orphans.begin(); while (iter != m_orphans.end()) { - std::map::iterator maybeErase = iter++; // increment to avoid iterator becoming invalid - if (maybeErase->second.fromPeer == peer) - { - nErased += EraseTxNoLock(maybeErase->second.tx->GetWitnessHash()); + // increment to avoid iterator becoming invalid after erasure + const auto& [wtxid, orphan] = *iter++; + if (orphan.fromPeer == peer) { + nErased += EraseTxNoLock(wtxid); } } if (nErased > 0) LogPrint(BCLog::TXPACKAGES, "Erased %d orphan tx from peer=%d\n", nErased, peer); @@ -127,10 +121,10 @@ void TxOrphanage::LimitOrphans(unsigned int max_orphans, FastRandomContext& rng) // Sweep out expired orphan pool entries: int nErased = 0; int64_t nMinExpTime = nNow + ORPHAN_TX_EXPIRE_TIME - ORPHAN_TX_EXPIRE_INTERVAL; - std::map::iterator iter = m_orphans.begin(); + std::map::iterator iter = m_orphans.begin(); while (iter != m_orphans.end()) { - std::map::iterator maybeErase = iter++; + std::map::iterator maybeErase = iter++; if (maybeErase->second.nTimeExpire <= nNow) { nErased += EraseTxNoLock(maybeErase->second.tx->GetWitnessHash()); } else { @@ -162,7 +156,7 @@ void TxOrphanage::AddChildrenToWorkSet(const CTransaction& tx) for (const auto& elem : it_by_prev->second) { // Get this source peer's work set, emplacing an empty set if it didn't exist // (note: if this peer wasn't still connected, we would have removed the orphan tx already) - std::set& orphan_work_set = m_peer_work_set.try_emplace(elem->second.fromPeer).first->second; + std::set& orphan_work_set = m_peer_work_set.try_emplace(elem->second.fromPeer).first->second; // Add this tx to the work set orphan_work_set.insert(elem->first); LogPrint(BCLog::TXPACKAGES, "added %s (wtxid=%s) to peer %d workset\n", @@ -175,7 +169,7 @@ void TxOrphanage::AddChildrenToWorkSet(const CTransaction& tx) bool TxOrphanage::HaveTx(const Wtxid& wtxid) const { LOCK(m_mutex); - return m_wtxid_to_orphan_it.count(wtxid); + return m_orphans.count(wtxid); } CTransactionRef TxOrphanage::GetTxToReconsider(NodeId peer) @@ -186,10 +180,10 @@ CTransactionRef TxOrphanage::GetTxToReconsider(NodeId peer) if (work_set_it != m_peer_work_set.end()) { auto& work_set = work_set_it->second; while (!work_set.empty()) { - Txid txid = *work_set.begin(); + Wtxid wtxid = *work_set.begin(); work_set.erase(work_set.begin()); - const auto orphan_it = m_orphans.find(txid); + const auto orphan_it = m_orphans.find(wtxid); if (orphan_it != m_orphans.end()) { return orphan_it->second.tx; } diff --git a/src/txorphanage.h b/src/txorphanage.h index a549fc7f9b..5f9888c969 100644 --- a/src/txorphanage.h +++ b/src/txorphanage.h @@ -77,12 +77,12 @@ class TxOrphanage { size_t list_pos; }; - /** Map from txid to orphan transaction record. Limited by + /** Map from wtxid to orphan transaction record. Limited by * -maxorphantx/DEFAULT_MAX_ORPHAN_TRANSACTIONS */ - std::map m_orphans GUARDED_BY(m_mutex); + std::map m_orphans GUARDED_BY(m_mutex); /** Which peer provided the orphans that need to be reconsidered */ - std::map> m_peer_work_set GUARDED_BY(m_mutex); + std::map> m_peer_work_set GUARDED_BY(m_mutex); using OrphanMap = decltype(m_orphans); @@ -102,10 +102,6 @@ class TxOrphanage { /** Orphan transactions in vector for quick random eviction */ std::vector m_orphan_list GUARDED_BY(m_mutex); - /** Index from wtxid into the m_orphans to lookup orphan - * transactions using their witness ids. */ - std::map m_wtxid_to_orphan_it GUARDED_BY(m_mutex); - /** Erase an orphan by wtxid */ int EraseTxNoLock(const Wtxid& wtxid) EXCLUSIVE_LOCKS_REQUIRED(m_mutex); }; From 6675f6428d653bf7a53537bd773114f4fb5ba53f Mon Sep 17 00:00:00 2001 From: glozow Date: Fri, 10 May 2024 12:38:03 +0100 Subject: [PATCH 33/50] [unit test] TxOrphanage handling of same-txid-different-witness txns --- src/test/orphanage_tests.cpp | 53 ++++++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) diff --git a/src/test/orphanage_tests.cpp b/src/test/orphanage_tests.cpp index a93f7a4ef4..450bf6a4fc 100644 --- a/src/test/orphanage_tests.cpp +++ b/src/test/orphanage_tests.cpp @@ -70,6 +70,16 @@ static CTransactionRef MakeTransactionSpending(const std::vector& out return MakeTransactionRef(tx); } +// Make another (not necessarily valid) tx with the same txid but different wtxid. +static CTransactionRef MakeMutation(const CTransactionRef& ptx) +{ + CMutableTransaction tx(*ptx); + tx.vin[0].scriptWitness.stack.push_back({5}); + auto mutated_tx = MakeTransactionRef(tx); + assert(ptx->GetHash() == mutated_tx->GetHash()); + return mutated_tx; +} + static bool EqualTxns(const std::set& set_txns, const std::vector& vec_txns) { if (vec_txns.size() != set_txns.size()) return false; @@ -180,6 +190,49 @@ BOOST_AUTO_TEST_CASE(DoS_mapOrphans) BOOST_CHECK(orphanage.CountOrphans() == 0); } +BOOST_AUTO_TEST_CASE(same_txid_diff_witness) +{ + FastRandomContext det_rand{true}; + TxOrphanage orphanage; + NodeId peer{0}; + + std::vector empty_outpoints; + auto parent = MakeTransactionSpending(empty_outpoints, det_rand); + + // Create children to go into orphanage. + auto child_normal = MakeTransactionSpending({{parent->GetHash(), 0}}, det_rand); + auto child_mutated = MakeMutation(child_normal); + + const auto& normal_wtxid = child_normal->GetWitnessHash(); + const auto& mutated_wtxid = child_mutated->GetWitnessHash(); + BOOST_CHECK(normal_wtxid != mutated_wtxid); + + BOOST_CHECK(orphanage.AddTx(child_normal, peer)); + // EraseTx fails as transaction by this wtxid doesn't exist. + BOOST_CHECK_EQUAL(orphanage.EraseTx(mutated_wtxid), 0); + BOOST_CHECK(orphanage.HaveTx(normal_wtxid)); + BOOST_CHECK(!orphanage.HaveTx(mutated_wtxid)); + + // Must succeed. Both transactions should be present in orphanage. + BOOST_CHECK(orphanage.AddTx(child_mutated, peer)); + BOOST_CHECK(orphanage.HaveTx(normal_wtxid)); + BOOST_CHECK(orphanage.HaveTx(mutated_wtxid)); + + // Outpoints map should track all entries: check that both are returned as children of the parent. + std::set expected_children{child_normal, child_mutated}; + BOOST_CHECK(EqualTxns(expected_children, orphanage.GetChildrenFromSamePeer(parent, peer))); + + // Erase by wtxid: mutated first + BOOST_CHECK_EQUAL(orphanage.EraseTx(mutated_wtxid), 1); + BOOST_CHECK(orphanage.HaveTx(normal_wtxid)); + BOOST_CHECK(!orphanage.HaveTx(mutated_wtxid)); + + BOOST_CHECK_EQUAL(orphanage.EraseTx(normal_wtxid), 1); + BOOST_CHECK(!orphanage.HaveTx(normal_wtxid)); + BOOST_CHECK(!orphanage.HaveTx(mutated_wtxid)); +} + + BOOST_AUTO_TEST_CASE(get_children) { FastRandomContext det_rand{true}; From b16da7eda76944719713be68b61f03d4acdd3e16 Mon Sep 17 00:00:00 2001 From: glozow Date: Mon, 29 Apr 2024 16:06:17 +0100 Subject: [PATCH 34/50] [functional test] attackers sending mutated orphans --- test/functional/p2p_orphan_handling.py | 174 ++++++++++++++++++++++++- 1 file changed, 173 insertions(+), 1 deletion(-) diff --git a/test/functional/p2p_orphan_handling.py b/test/functional/p2p_orphan_handling.py index 6166c62aa2..f525d87cca 100755 --- a/test/functional/p2p_orphan_handling.py +++ b/test/functional/p2p_orphan_handling.py @@ -7,6 +7,7 @@ from test_framework.messages import ( CInv, + CTxInWitness, MSG_TX, MSG_WITNESS_TX, MSG_WTX, @@ -21,6 +22,7 @@ NONPREF_PEER_TX_DELAY, OVERLOADED_PEER_TX_DELAY, p2p_lock, + P2PInterface, P2PTxInvStore, TXID_RELAY_DELAY, ) @@ -127,6 +129,22 @@ def relay_transaction(self, peer, tx): peer.wait_for_getdata([wtxid]) peer.send_and_ping(msg_tx(tx)) + def create_malleated_version(self, tx): + """ + Create a malleated version of the tx where the witness is replaced with garbage data. + Returns a CTransaction object. + """ + tx_bad_wit = tx_from_hex(tx["hex"]) + tx_bad_wit.wit.vtxinwit = [CTxInWitness()] + # Add garbage data to witness 0. We cannot simply strip the witness, as the node would + # classify it as a transaction in which the witness was missing rather than wrong. + tx_bad_wit.wit.vtxinwit[0].scriptWitness.stack = [b'garbage'] + + assert_equal(tx["txid"], tx_bad_wit.rehash()) + assert tx["wtxid"] != tx_bad_wit.getwtxid() + + return tx_bad_wit + @cleanup def test_arrival_timing_orphan(self): self.log.info("Test missing parents that arrive during delay are not requested") @@ -284,8 +302,8 @@ def test_orphan_multiple_parents(self): # doesn't give up on the orphan. Once all of the missing parents are received, it should be # submitted to mempool. peer.send_message(msg_notfound(vec=[CInv(MSG_WITNESS_TX, int(txid_conf_old, 16))])) + # Sync with ping to ensure orphans are reconsidered peer.send_and_ping(msg_tx(missing_tx["tx"])) - peer.sync_with_ping() assert_equal(node.getmempoolentry(orphan["txid"])["ancestorcount"], 3) @cleanup @@ -394,10 +412,161 @@ def test_orphan_inherit_rejection(self): peer2.assert_never_requested(child["tx"].getwtxid()) # The child should never be requested, even if announced again with potentially different witness. + # Sync with ping to ensure orphans are reconsidered peer3.send_and_ping(msg_inv([CInv(t=MSG_TX, h=int(child["txid"], 16))])) self.nodes[0].bumpmocktime(TXREQUEST_TIME_SKIP) peer3.assert_never_requested(child["txid"]) + @cleanup + def test_same_txid_orphan(self): + self.log.info("Check what happens when orphan with same txid is already in orphanage") + node = self.nodes[0] + + tx_parent = self.wallet.create_self_transfer() + + # Create the real child + tx_child = self.wallet.create_self_transfer(utxo_to_spend=tx_parent["new_utxo"]) + + # Create a fake version of the child + tx_orphan_bad_wit = self.create_malleated_version(tx_child) + + bad_peer = node.add_p2p_connection(P2PInterface()) + honest_peer = node.add_p2p_connection(P2PInterface()) + + # 1. Fake orphan is received first. It is missing an input. + bad_peer.send_and_ping(msg_tx(tx_orphan_bad_wit)) + + # 2. Node requests the missing parent by txid. + parent_txid_int = int(tx_parent["txid"], 16) + node.bumpmocktime(NONPREF_PEER_TX_DELAY + TXID_RELAY_DELAY) + bad_peer.wait_for_getdata([parent_txid_int]) + + # 3. Honest peer relays the real child, which is also missing parents and should be placed + # in the orphanage. + with node.assert_debug_log(["missingorspent", "stored orphan tx"]): + honest_peer.send_and_ping(msg_tx(tx_child["tx"])) + + # Time out the previous request for the parent (node will not request the same transaction + # from multiple nodes at the same time) + node.bumpmocktime(GETDATA_TX_INTERVAL) + + # 4. The parent is requested. Honest peer sends it. + honest_peer.wait_for_getdata([parent_txid_int]) + # Sync with ping to ensure orphans are reconsidered + honest_peer.send_and_ping(msg_tx(tx_parent["tx"])) + + # 5. After parent is accepted, orphans should be reconsidered. + # The real child should be accepted and the fake one rejected. + node_mempool = node.getrawmempool() + assert tx_parent["txid"] in node_mempool + assert tx_child["txid"] in node_mempool + assert_equal(node.getmempoolentry(tx_child["txid"])["wtxid"], tx_child["wtxid"]) + + @cleanup + def test_same_txid_orphan_of_orphan(self): + self.log.info("Check what happens when orphan's parent with same txid is already in orphanage") + node = self.nodes[0] + + tx_grandparent = self.wallet.create_self_transfer() + + # Create middle tx (both parent and child) which will be in orphanage. + tx_middle = self.wallet.create_self_transfer(utxo_to_spend=tx_grandparent["new_utxo"]) + + # Create a fake version of the middle tx + tx_orphan_bad_wit = self.create_malleated_version(tx_middle) + + # Create grandchild spending from tx_middle (and spending from tx_orphan_bad_wit since they + # have the same txid). + tx_grandchild = self.wallet.create_self_transfer(utxo_to_spend=tx_middle["new_utxo"]) + + bad_peer = node.add_p2p_connection(P2PInterface()) + honest_peer = node.add_p2p_connection(P2PInterface()) + + # 1. Fake orphan is received first. It is missing an input. + bad_peer.send_and_ping(msg_tx(tx_orphan_bad_wit)) + + # 2. Node requests missing tx_grandparent by txid. + grandparent_txid_int = int(tx_grandparent["txid"], 16) + node.bumpmocktime(NONPREF_PEER_TX_DELAY + TXID_RELAY_DELAY) + bad_peer.wait_for_getdata([grandparent_txid_int]) + + # 3. Honest peer relays the grandchild, which is missing a parent. The parent by txid already + # exists in orphanage, but should be re-requested because the node shouldn't assume that the + # witness data is the same. In this case, a same-txid-different-witness transaction exists! + with node.assert_debug_log(["stored orphan tx"]): + honest_peer.send_and_ping(msg_tx(tx_grandchild["tx"])) + middle_txid_int = int(tx_middle["txid"], 16) + node.bumpmocktime(NONPREF_PEER_TX_DELAY + TXID_RELAY_DELAY) + honest_peer.wait_for_getdata([middle_txid_int]) + + # 4. Honest peer relays the real child, which is also missing parents and should be placed + # in the orphanage. + with node.assert_debug_log(["stored orphan tx"]): + honest_peer.send_and_ping(msg_tx(tx_middle["tx"])) + assert_equal(len(node.getrawmempool()), 0) + + # 5. Honest peer sends tx_grandparent + honest_peer.send_and_ping(msg_tx(tx_grandparent["tx"])) + + # 6. After parent is accepted, orphans should be reconsidered. + # The real child should be accepted and the fake one rejected. + node_mempool = node.getrawmempool() + assert tx_grandparent["txid"] in node_mempool + assert tx_middle["txid"] in node_mempool + assert tx_grandchild["txid"] in node_mempool + assert_equal(node.getmempoolentry(tx_middle["txid"])["wtxid"], tx_middle["wtxid"]) + + @cleanup + def test_orphan_txid_inv(self): + self.log.info("Check node does not ignore announcement with same txid as tx in orphanage") + node = self.nodes[0] + + tx_parent = self.wallet.create_self_transfer() + + # Create the real child and fake version + tx_child = self.wallet.create_self_transfer(utxo_to_spend=tx_parent["new_utxo"]) + tx_orphan_bad_wit = self.create_malleated_version(tx_child) + + bad_peer = node.add_p2p_connection(PeerTxRelayer()) + # Must not send wtxidrelay because otherwise the inv(TX) will be ignored later + honest_peer = node.add_p2p_connection(P2PInterface(wtxidrelay=False)) + + # 1. Fake orphan is received first. It is missing an input. + bad_peer.send_and_ping(msg_tx(tx_orphan_bad_wit)) + + # 2. Node requests the missing parent by txid. + parent_txid_int = int(tx_parent["txid"], 16) + node.bumpmocktime(NONPREF_PEER_TX_DELAY + TXID_RELAY_DELAY) + bad_peer.wait_for_getdata([parent_txid_int]) + + # 3. Honest peer announces the real child, by txid (this isn't common but the node should + # still keep track of it). + child_txid_int = int(tx_child["txid"], 16) + honest_peer.send_and_ping(msg_inv([CInv(t=MSG_TX, h=child_txid_int)])) + + # 4. The child is requested. Honest peer sends it. + node.bumpmocktime(TXREQUEST_TIME_SKIP) + honest_peer.wait_for_getdata([child_txid_int]) + with node.assert_debug_log(["stored orphan tx"]): + honest_peer.send_and_ping(msg_tx(tx_child["tx"])) + + # 5. After first parent request times out, the node sends another one for the missing parent + # of the real orphan child. + node.bumpmocktime(GETDATA_TX_INTERVAL) + honest_peer.wait_for_getdata([parent_txid_int]) + honest_peer.send_and_ping(msg_tx(tx_parent["tx"])) + + # 6. After parent is accepted, orphans should be reconsidered. + # The real child should be accepted and the fake one rejected. This may happen in either + # order since the message-processing is randomized. If tx_orphan_bad_wit is validated first, + # its consensus error leads to disconnection of bad_peer. If tx_child is validated first, + # tx_orphan_bad_wit is rejected for txn-same-nonwitness-data-in-mempool (no punishment). + node_mempool = node.getrawmempool() + assert tx_parent["txid"] in node_mempool + assert tx_child["txid"] in node_mempool + assert_equal(node.getmempoolentry(tx_child["txid"])["wtxid"], tx_child["wtxid"]) + + def run_test(self): self.nodes[0].setmocktime(int(time.time())) self.wallet_nonsegwit = MiniWallet(self.nodes[0], mode=MiniWalletMode.RAW_P2PK) @@ -410,6 +579,9 @@ def run_test(self): self.test_orphans_overlapping_parents() self.test_orphan_of_orphan() self.test_orphan_inherit_rejection() + self.test_same_txid_orphan() + self.test_same_txid_orphan_of_orphan() + self.test_orphan_txid_inv() if __name__ == '__main__': From 0fb17bf61a40b73a2b81a18e70b3de180c917f22 Mon Sep 17 00:00:00 2001 From: glozow Date: Mon, 13 May 2024 14:54:13 +0100 Subject: [PATCH 35/50] [log] updates in TxOrphanage - Add elapsed time in "remove orphan" log - Add size in "stored orphan" log - grammar edit --- src/txorphanage.cpp | 11 +++++++---- test/functional/p2p_invalid_tx.py | 6 +++--- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/src/txorphanage.cpp b/src/txorphanage.cpp index 4a1d48a466..692dc0461f 100644 --- a/src/txorphanage.cpp +++ b/src/txorphanage.cpp @@ -47,7 +47,7 @@ bool TxOrphanage::AddTx(const CTransactionRef& tx, NodeId peer) m_outpoint_to_orphan_it[txin.prevout].insert(ret.first); } - LogPrint(BCLog::TXPACKAGES, "stored orphan tx %s (wtxid=%s) (mapsz %u outsz %u)\n", hash.ToString(), wtxid.ToString(), + LogPrint(BCLog::TXPACKAGES, "stored orphan tx %s (wtxid=%s), weight: %u (mapsz %u outsz %u)\n", hash.ToString(), wtxid.ToString(), sz, m_orphans.size(), m_outpoint_to_orphan_it.size()); return true; } @@ -84,7 +84,10 @@ int TxOrphanage::EraseTxNoLock(const Wtxid& wtxid) it_last->second.list_pos = old_pos; } const auto& txid = it->second.tx->GetHash(); - LogPrint(BCLog::TXPACKAGES, " removed orphan tx %s (wtxid=%s)\n", txid.ToString(), wtxid.ToString()); + // Time spent in orphanage = difference between current and entry time. + // Entry time is equal to ORPHAN_TX_EXPIRE_TIME earlier than entry's expiry. + LogPrint(BCLog::TXPACKAGES, " removed orphan tx %s (wtxid=%s) after %ds\n", txid.ToString(), wtxid.ToString(), + GetTime() + ORPHAN_TX_EXPIRE_TIME - it->second.nTimeExpire); m_orphan_list.pop_back(); m_orphans.erase(it); @@ -107,7 +110,7 @@ void TxOrphanage::EraseForPeer(NodeId peer) nErased += EraseTxNoLock(wtxid); } } - if (nErased > 0) LogPrint(BCLog::TXPACKAGES, "Erased %d orphan tx from peer=%d\n", nErased, peer); + if (nErased > 0) LogPrint(BCLog::TXPACKAGES, "Erased %d orphan transaction(s) from peer=%d\n", nErased, peer); } void TxOrphanage::LimitOrphans(unsigned int max_orphans, FastRandomContext& rng) @@ -230,7 +233,7 @@ void TxOrphanage::EraseForBlock(const CBlock& block) for (const auto& orphanHash : vOrphanErase) { nErased += EraseTxNoLock(orphanHash); } - LogPrint(BCLog::TXPACKAGES, "Erased %d orphan tx included or conflicted by block\n", nErased); + LogPrint(BCLog::TXPACKAGES, "Erased %d orphan transaction(s) included or conflicted by block\n", nErased); } } diff --git a/test/functional/p2p_invalid_tx.py b/test/functional/p2p_invalid_tx.py index ae9dc816ab..0ae05d4b0b 100755 --- a/test/functional/p2p_invalid_tx.py +++ b/test/functional/p2p_invalid_tx.py @@ -165,7 +165,7 @@ def run_test(self): node.p2ps[0].send_txs_and_test([rejected_parent], node, success=False) self.log.info('Test that a peer disconnection causes erase its transactions from the orphan pool') - with node.assert_debug_log(['Erased 100 orphan tx from peer=25']): + with node.assert_debug_log(['Erased 100 orphan transaction(s) from peer=25']): self.reconnect_p2p(num_connections=1) self.log.info('Test that a transaction in the orphan pool is included in a new tip block causes erase this transaction from the orphan pool') @@ -190,7 +190,7 @@ def run_test(self): block_A.solve() self.log.info('Send the block that includes the previous orphan ... ') - with node.assert_debug_log(["Erased 1 orphan tx included or conflicted by block"]): + with node.assert_debug_log(["Erased 1 orphan transaction(s) included or conflicted by block"]): node.p2ps[0].send_blocks_and_test([block_A], node, success=True) self.log.info('Test that a transaction in the orphan pool conflicts with a new tip block causes erase this transaction from the orphan pool') @@ -219,7 +219,7 @@ def run_test(self): block_B.solve() self.log.info('Send the block that includes a transaction which conflicts with the previous orphan ... ') - with node.assert_debug_log(["Erased 1 orphan tx included or conflicted by block"]): + with node.assert_debug_log(["Erased 1 orphan transaction(s) included or conflicted by block"]): node.p2ps[0].send_blocks_and_test([block_B], node, success=True) From 9408a04e424cee0d226bde79171bd4954f9caeb0 Mon Sep 17 00:00:00 2001 From: josibake Date: Tue, 7 May 2024 11:16:45 +0200 Subject: [PATCH 36/50] tests, fuzz: use new NUMS_H const --- src/test/fuzz/miniscript.cpp | 5 +---- src/test/key_tests.cpp | 10 ++++++++++ src/test/miniscript_tests.cpp | 5 +---- src/test/script_tests.cpp | 3 +-- 4 files changed, 13 insertions(+), 10 deletions(-) diff --git a/src/test/fuzz/miniscript.cpp b/src/test/fuzz/miniscript.cpp index f10007222c..7e71af7c44 100644 --- a/src/test/fuzz/miniscript.cpp +++ b/src/test/fuzz/miniscript.cpp @@ -309,9 +309,6 @@ const struct KeyComparator { // A dummy scriptsig to pass to VerifyScript (we always use Segwit v0). const CScript DUMMY_SCRIPTSIG; -//! Public key to be used as internal key for dummy Taproot spends. -const std::vector NUMS_PK{ParseHex("50929b74c1a04954b78b4b6035e97a5e078a5a0f28ec96d547bfee9ace803ac0")}; - //! Construct a miniscript node as a shared_ptr. template NodeRef MakeNodeRef(Args&&... args) { return miniscript::MakeNodeRef(miniscript::internal::NoDupCheck{}, std::forward(args)...); @@ -1018,7 +1015,7 @@ CScript ScriptPubKey(MsCtx ctx, const CScript& script, TaprootBuilder& builder) // For Taproot outputs we always use a tree with a single script and a dummy internal key. builder.Add(0, script, TAPROOT_LEAF_TAPSCRIPT); - builder.Finalize(XOnlyPubKey{NUMS_PK}); + builder.Finalize(XOnlyPubKey::NUMS_H); return GetScriptForDestination(builder.GetOutput()); } diff --git a/src/test/key_tests.cpp b/src/test/key_tests.cpp index 86a8d17a76..aaf4ca4977 100644 --- a/src/test/key_tests.cpp +++ b/src/test/key_tests.cpp @@ -6,6 +6,7 @@ #include #include +#include #include #include #include @@ -364,4 +365,13 @@ BOOST_AUTO_TEST_CASE(key_ellswift) } } +BOOST_AUTO_TEST_CASE(bip341_test_h) +{ + std::vector G_uncompressed = ParseHex("0479be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8"); + HashWriter hw; + hw.write(MakeByteSpan(G_uncompressed)); + XOnlyPubKey H{hw.GetSHA256()}; + BOOST_CHECK(XOnlyPubKey::NUMS_H == H); +} + BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/miniscript_tests.cpp b/src/test/miniscript_tests.cpp index a3699f84b6..7e39e9e4de 100644 --- a/src/test/miniscript_tests.cpp +++ b/src/test/miniscript_tests.cpp @@ -288,9 +288,6 @@ class TestSignatureChecker : public BaseSignatureChecker { } }; -//! Public key to be used as internal key for dummy Taproot spends. -const std::vector NUMS_PK{ParseHex("50929b74c1a04954b78b4b6035e97a5e078a5a0f28ec96d547bfee9ace803ac0")}; - using Fragment = miniscript::Fragment; using NodeRef = miniscript::NodeRef; using miniscript::operator"" _mst; @@ -330,7 +327,7 @@ CScript ScriptPubKey(miniscript::MiniscriptContext ctx, const CScript& script, T // For Taproot outputs we always use a tree with a single script and a dummy internal key. builder.Add(0, script, TAPROOT_LEAF_TAPSCRIPT); - builder.Finalize(XOnlyPubKey{NUMS_PK}); + builder.Finalize(XOnlyPubKey::NUMS_H); return GetScriptForDestination(builder.GetOutput()); } diff --git a/src/test/script_tests.cpp b/src/test/script_tests.cpp index e4142e203c..314b26609c 100644 --- a/src/test/script_tests.cpp +++ b/src/test/script_tests.cpp @@ -1268,8 +1268,7 @@ BOOST_AUTO_TEST_CASE(sign_invalid_miniscript) const auto invalid_pubkey{ParseHex("173d36c8c9c9c9ffffffffffff0200000000021e1e37373721361818181818181e1e1e1e19000000000000000000b19292929292926b006c9b9b9292")}; TaprootBuilder builder; builder.Add(0, {invalid_pubkey}, 0xc0); - XOnlyPubKey nums{ParseHex("50929b74c1a04954b78b4b6035e97a5e078a5a0f28ec96d547bfee9ace803ac0")}; - builder.Finalize(nums); + builder.Finalize(XOnlyPubKey::NUMS_H); prev.vout.emplace_back(0, GetScriptForDestination(builder.GetOutput())); curr.vin.emplace_back(COutPoint{prev.GetHash(), 0}); sig_data.tr_spenddata = builder.GetSpendData(); From 2ca1460ae3a7217eaa8c5972515bf622bedadfce Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Fri, 7 Jul 2023 14:31:18 -0400 Subject: [PATCH 37/50] rpc: identify JSON-RPC 2.0 requests --- src/rpc/request.cpp | 19 +++++++++++++++++++ src/rpc/request.h | 6 ++++++ test/functional/interface_rpc.py | 14 ++++++++------ 3 files changed, 33 insertions(+), 6 deletions(-) diff --git a/src/rpc/request.cpp b/src/rpc/request.cpp index 12726ecc88..08e0658561 100644 --- a/src/rpc/request.cpp +++ b/src/rpc/request.cpp @@ -167,6 +167,25 @@ void JSONRPCRequest::parse(const UniValue& valRequest) // Parse id now so errors from here on will have the id id = request.find_value("id"); + // Check for JSON-RPC 2.0 (default 1.1) + m_json_version = JSONRPCVersion::V1_LEGACY; + const UniValue& jsonrpc_version = request.find_value("jsonrpc"); + if (!jsonrpc_version.isNull()) { + if (!jsonrpc_version.isStr()) { + throw JSONRPCError(RPC_INVALID_REQUEST, "jsonrpc field must be a string"); + } + // The "jsonrpc" key was added in the 2.0 spec, but some older documentation + // incorrectly included {"jsonrpc":"1.0"} in a request object, so we + // maintain that for backwards compatibility. + if (jsonrpc_version.get_str() == "1.0") { + m_json_version = JSONRPCVersion::V1_LEGACY; + } else if (jsonrpc_version.get_str() == "2.0") { + m_json_version = JSONRPCVersion::V2; + } else { + throw JSONRPCError(RPC_INVALID_REQUEST, "JSON-RPC version not supported"); + } + } + // Parse method const UniValue& valMethod{request.find_value("method")}; if (valMethod.isNull()) diff --git a/src/rpc/request.h b/src/rpc/request.h index 116ebb8aac..8b72172695 100644 --- a/src/rpc/request.h +++ b/src/rpc/request.h @@ -11,6 +11,11 @@ #include +enum class JSONRPCVersion { + V1_LEGACY, + V2 +}; + UniValue JSONRPCRequestObj(const std::string& strMethod, const UniValue& params, const UniValue& id); UniValue JSONRPCReplyObj(UniValue result, UniValue error, UniValue id); UniValue JSONRPCError(int code, const std::string& message); @@ -35,6 +40,7 @@ class JSONRPCRequest std::string authUser; std::string peerAddr; std::any context; + JSONRPCVersion m_json_version = JSONRPCVersion::V1_LEGACY; void parse(const UniValue& valRequest); }; diff --git a/test/functional/interface_rpc.py b/test/functional/interface_rpc.py index 748d83858a..075dd1c2bc 100755 --- a/test/functional/interface_rpc.py +++ b/test/functional/interface_rpc.py @@ -161,8 +161,10 @@ def test_batch_requests(self): self.log.info("Testing nonstandard jsonrpc 1.0 version number is accepted...") self.test_batch_request(lambda idx: BatchOptions(request_fields={"jsonrpc": "1.0"})) - self.log.info("Testing unrecognized jsonrpc version number is accepted...") - self.test_batch_request(lambda idx: BatchOptions(request_fields={"jsonrpc": "2.1"})) + self.log.info("Testing unrecognized jsonrpc version number is rejected...") + self.test_batch_request(lambda idx: BatchOptions( + request_fields={"jsonrpc": "2.1"}, + response_fields={"result": None, "error": {"code": RPC_INVALID_REQUEST, "message": "JSON-RPC version not supported"}})) def test_http_status_codes(self): self.log.info("Testing HTTP status codes for JSON-RPC 1.1 requests...") @@ -188,11 +190,11 @@ def test_http_status_codes(self): expect_http_rpc_status(500, RPC_INVALID_PARAMETER, self.nodes[0], "getblockhash", [42], 2, False) # force-send invalidly formatted requests response, status = send_json_rpc(self.nodes[0], {"jsonrpc": 2, "method": "getblockcount"}) - assert_equal(response, {"error": None, "id": None, "result": 0}) - assert_equal(status, 200) + assert_equal(response, {"id": None, "result": None, "error": {"code": RPC_INVALID_REQUEST, "message": "jsonrpc field must be a string"}}) + assert_equal(status, 400) response, status = send_json_rpc(self.nodes[0], {"jsonrpc": "3.0", "method": "getblockcount"}) - assert_equal(response, {"error": None, "id": None, "result": 0}) - assert_equal(status, 200) + assert_equal(response, {"id": None, "result": None, "error": {"code": RPC_INVALID_REQUEST, "message": "JSON-RPC version not supported"}}) + assert_equal(status, 400) self.log.info("Testing HTTP status codes for JSON-RPC 2.0 notifications...") # Not notification: id exists From 466b90562f4785de74b548f7c4a256069e2aaf43 Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Fri, 7 Jul 2023 14:41:23 -0400 Subject: [PATCH 38/50] rpc: Add "jsonrpc" field and drop null "result"/"error" fields Only for JSON-RPC 2.0 requests. --- src/bitcoin-cli.cpp | 8 ++++---- src/httprpc.cpp | 12 ++++++------ src/rpc/request.cpp | 17 ++++++++++++----- src/rpc/request.h | 2 +- src/rpc/server.cpp | 2 +- test/functional/interface_rpc.py | 5 ++++- 6 files changed, 28 insertions(+), 18 deletions(-) diff --git a/src/bitcoin-cli.cpp b/src/bitcoin-cli.cpp index 3591f7248b..0bafb4f645 100644 --- a/src/bitcoin-cli.cpp +++ b/src/bitcoin-cli.cpp @@ -302,7 +302,7 @@ class AddrinfoRequestHandler : public BaseRequestHandler } addresses.pushKV("total", total); result.pushKV("addresses_known", addresses); - return JSONRPCReplyObj(std::move(result), NullUniValue, 1); + return JSONRPCReplyObj(std::move(result), NullUniValue, /*id=*/1, JSONRPCVersion::V1_LEGACY); } }; @@ -371,7 +371,7 @@ class GetinfoRequestHandler: public BaseRequestHandler } result.pushKV("relayfee", batch[ID_NETWORKINFO]["result"]["relayfee"]); result.pushKV("warnings", batch[ID_NETWORKINFO]["result"]["warnings"]); - return JSONRPCReplyObj(std::move(result), NullUniValue, 1); + return JSONRPCReplyObj(std::move(result), NullUniValue, /*id=*/1, JSONRPCVersion::V1_LEGACY); } }; @@ -623,7 +623,7 @@ class NetinfoRequestHandler : public BaseRequestHandler } } - return JSONRPCReplyObj(UniValue{result}, NullUniValue, 1); + return JSONRPCReplyObj(UniValue{result}, NullUniValue, /*id=*/1, JSONRPCVersion::V1_LEGACY); } const std::string m_help_doc{ @@ -709,7 +709,7 @@ class GenerateToAddressRequestHandler : public BaseRequestHandler UniValue result(UniValue::VOBJ); result.pushKV("address", address_str); result.pushKV("blocks", reply.get_obj()["result"]); - return JSONRPCReplyObj(std::move(result), NullUniValue, 1); + return JSONRPCReplyObj(std::move(result), NullUniValue, /*id=*/1, JSONRPCVersion::V1_LEGACY); } protected: std::string address_str; diff --git a/src/httprpc.cpp b/src/httprpc.cpp index 53f994efd7..817c75f3b5 100644 --- a/src/httprpc.cpp +++ b/src/httprpc.cpp @@ -73,7 +73,7 @@ static std::vector> g_rpcauth; static std::map> g_rpc_whitelist; static bool g_rpc_whitelist_default = false; -static void JSONErrorReply(HTTPRequest* req, UniValue objError, UniValue id) +static void JSONErrorReply(HTTPRequest* req, UniValue objError, const JSONRPCRequest& jreq) { // Send error reply from json-rpc error object int nStatus = HTTP_INTERNAL_SERVER_ERROR; @@ -84,7 +84,7 @@ static void JSONErrorReply(HTTPRequest* req, UniValue objError, UniValue id) else if (code == RPC_METHOD_NOT_FOUND) nStatus = HTTP_NOT_FOUND; - std::string strReply = JSONRPCReplyObj(NullUniValue, std::move(objError), std::move(id)).write() + "\n"; + std::string strReply = JSONRPCReplyObj(NullUniValue, std::move(objError), jreq.id, jreq.m_json_version).write() + "\n"; req->WriteHeader("Content-Type", "application/json"); req->WriteReply(nStatus, strReply); @@ -231,9 +231,9 @@ static bool HTTPReq_JSONRPC(const std::any& context, HTTPRequest* req) jreq.parse(valRequest[i]); reply.push_back(JSONRPCExec(jreq)); } catch (UniValue& e) { - reply.push_back(JSONRPCReplyObj(NullUniValue, std::move(e), jreq.id)); + reply.push_back(JSONRPCReplyObj(NullUniValue, std::move(e), jreq.id, jreq.m_json_version)); } catch (const std::exception& e) { - reply.push_back(JSONRPCReplyObj(NullUniValue, JSONRPCError(RPC_PARSE_ERROR, e.what()), jreq.id)); + reply.push_back(JSONRPCReplyObj(NullUniValue, JSONRPCError(RPC_PARSE_ERROR, e.what()), jreq.id, jreq.m_json_version)); } } } @@ -243,10 +243,10 @@ static bool HTTPReq_JSONRPC(const std::any& context, HTTPRequest* req) req->WriteHeader("Content-Type", "application/json"); req->WriteReply(HTTP_OK, reply.write() + "\n"); } catch (UniValue& e) { - JSONErrorReply(req, std::move(e), jreq.id); + JSONErrorReply(req, std::move(e), jreq); return false; } catch (const std::exception& e) { - JSONErrorReply(req, JSONRPCError(RPC_PARSE_ERROR, e.what()), jreq.id); + JSONErrorReply(req, JSONRPCError(RPC_PARSE_ERROR, e.what()), jreq); return false; } return true; diff --git a/src/rpc/request.cpp b/src/rpc/request.cpp index 08e0658561..b5bf1e5ffa 100644 --- a/src/rpc/request.cpp +++ b/src/rpc/request.cpp @@ -37,14 +37,21 @@ UniValue JSONRPCRequestObj(const std::string& strMethod, const UniValue& params, return request; } -UniValue JSONRPCReplyObj(UniValue result, UniValue error, UniValue id) +UniValue JSONRPCReplyObj(UniValue result, UniValue error, UniValue id, JSONRPCVersion jsonrpc_version) { UniValue reply(UniValue::VOBJ); - if (!error.isNull()) - reply.pushKV("result", NullUniValue); - else + // Add JSON-RPC version number field in v2 only. + if (jsonrpc_version == JSONRPCVersion::V2) reply.pushKV("jsonrpc", "2.0"); + + // Add both result and error fields in v1, even though one will be null. + // Omit the null field in v2. + if (error.isNull()) { reply.pushKV("result", std::move(result)); - reply.pushKV("error", std::move(error)); + if (jsonrpc_version == JSONRPCVersion::V1_LEGACY) reply.pushKV("error", NullUniValue); + } else { + if (jsonrpc_version == JSONRPCVersion::V1_LEGACY) reply.pushKV("result", NullUniValue); + reply.pushKV("error", std::move(error)); + } reply.pushKV("id", std::move(id)); return reply; } diff --git a/src/rpc/request.h b/src/rpc/request.h index 8b72172695..a7c9af2216 100644 --- a/src/rpc/request.h +++ b/src/rpc/request.h @@ -17,7 +17,7 @@ enum class JSONRPCVersion { }; UniValue JSONRPCRequestObj(const std::string& strMethod, const UniValue& params, const UniValue& id); -UniValue JSONRPCReplyObj(UniValue result, UniValue error, UniValue id); +UniValue JSONRPCReplyObj(UniValue result, UniValue error, UniValue id, JSONRPCVersion jsonrpc_version); UniValue JSONRPCError(int code, const std::string& message); /** Generate a new RPC authentication cookie and write it to disk */ diff --git a/src/rpc/server.cpp b/src/rpc/server.cpp index f199914095..8e150ef2b7 100644 --- a/src/rpc/server.cpp +++ b/src/rpc/server.cpp @@ -364,7 +364,7 @@ UniValue JSONRPCExec(const JSONRPCRequest& jreq) // but inside a batch, we just include the error object and return HTTP 200 UniValue result = tableRPC.execute(jreq); - return JSONRPCReplyObj(std::move(result), NullUniValue, jreq.id); + return JSONRPCReplyObj(std::move(result), NullUniValue, jreq.id, jreq.m_json_version); } /** diff --git a/test/functional/interface_rpc.py b/test/functional/interface_rpc.py index 075dd1c2bc..dc9fb5d40f 100755 --- a/test/functional/interface_rpc.py +++ b/test/functional/interface_rpc.py @@ -48,7 +48,10 @@ def format_request(options, idx, fields): def format_response(options, idx, fields): response = {} response.update(id=None if options.notification else idx) - response.update(result=None, error=None) + if options.version == 2: + response.update(jsonrpc="2.0") + else: + response.update(result=None, error=None) response.update(fields) if options.response_fields: response.update(options.response_fields) From bf1a1f1662427fbf1a43bb951364eface469bdb7 Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Fri, 7 Jul 2023 14:41:23 -0400 Subject: [PATCH 39/50] rpc: Avoid returning HTTP errors for JSON-RPC 2.0 requests Avoid returning HTTP status errors for non-batch JSON-RPC 2.0 requests if the RPC method failed but the HTTP request was otherwise valid. Batch requests already did not return HTTP errors previously. --- src/httprpc.cpp | 15 ++++++++++++--- src/rpc/server.cpp | 19 ++++++++++++++----- src/rpc/server.h | 2 +- test/functional/interface_rpc.py | 8 ++++---- 4 files changed, 31 insertions(+), 13 deletions(-) diff --git a/src/httprpc.cpp b/src/httprpc.cpp index 817c75f3b5..777ad32bbe 100644 --- a/src/httprpc.cpp +++ b/src/httprpc.cpp @@ -75,6 +75,9 @@ static bool g_rpc_whitelist_default = false; static void JSONErrorReply(HTTPRequest* req, UniValue objError, const JSONRPCRequest& jreq) { + // Sending HTTP errors is a legacy JSON-RPC behavior. + Assume(jreq.m_json_version != JSONRPCVersion::V2); + // Send error reply from json-rpc error object int nStatus = HTTP_INTERNAL_SERVER_ERROR; int code = objError.find_value("code").getInt(); @@ -201,7 +204,12 @@ static bool HTTPReq_JSONRPC(const std::any& context, HTTPRequest* req) return false; } - reply = JSONRPCExec(jreq); + // Legacy 1.0/1.1 behavior is for failed requests to throw + // exceptions which return HTTP errors and RPC errors to the client. + // 2.0 behavior is to catch exceptions and return HTTP success with + // RPC errors, as long as there is not an actual HTTP server error. + const bool catch_errors{jreq.m_json_version == JSONRPCVersion::V2}; + reply = JSONRPCExec(jreq, catch_errors); // array of requests } else if (valRequest.isArray()) { @@ -226,10 +234,11 @@ static bool HTTPReq_JSONRPC(const std::any& context, HTTPRequest* req) // Execute each request reply = UniValue::VARR; for (size_t i{0}; i < valRequest.size(); ++i) { - // Batches include errors in the batch response, they do not throw + // Batches never throw HTTP errors, they are always just included + // in "HTTP OK" responses. try { jreq.parse(valRequest[i]); - reply.push_back(JSONRPCExec(jreq)); + reply.push_back(JSONRPCExec(jreq, /*catch_errors=*/true)); } catch (UniValue& e) { reply.push_back(JSONRPCReplyObj(NullUniValue, std::move(e), jreq.id, jreq.m_json_version)); } catch (const std::exception& e) { diff --git a/src/rpc/server.cpp b/src/rpc/server.cpp index 8e150ef2b7..a30fd3b7d0 100644 --- a/src/rpc/server.cpp +++ b/src/rpc/server.cpp @@ -358,11 +358,20 @@ bool IsDeprecatedRPCEnabled(const std::string& method) return find(enabled_methods.begin(), enabled_methods.end(), method) != enabled_methods.end(); } -UniValue JSONRPCExec(const JSONRPCRequest& jreq) -{ - // Might throw exception. Single requests will throw and send HTTP error codes - // but inside a batch, we just include the error object and return HTTP 200 - UniValue result = tableRPC.execute(jreq); +UniValue JSONRPCExec(const JSONRPCRequest& jreq, bool catch_errors) +{ + UniValue result; + if (catch_errors) { + try { + result = tableRPC.execute(jreq); + } catch (UniValue& e) { + return JSONRPCReplyObj(NullUniValue, std::move(e), jreq.id, jreq.m_json_version); + } catch (const std::exception& e) { + return JSONRPCReplyObj(NullUniValue, JSONRPCError(RPC_MISC_ERROR, e.what()), jreq.id, jreq.m_json_version); + } + } else { + result = tableRPC.execute(jreq); + } return JSONRPCReplyObj(std::move(result), NullUniValue, jreq.id, jreq.m_json_version); } diff --git a/src/rpc/server.h b/src/rpc/server.h index 2761bcd9db..c0e7bc04ad 100644 --- a/src/rpc/server.h +++ b/src/rpc/server.h @@ -181,7 +181,7 @@ extern CRPCTable tableRPC; void StartRPC(); void InterruptRPC(); void StopRPC(); -UniValue JSONRPCExec(const JSONRPCRequest& jreq); +UniValue JSONRPCExec(const JSONRPCRequest& jreq, bool catch_errors); // Drop witness when serializing for RPC? bool RPCSerializationWithoutWitness(); diff --git a/test/functional/interface_rpc.py b/test/functional/interface_rpc.py index dc9fb5d40f..824a7200cd 100755 --- a/test/functional/interface_rpc.py +++ b/test/functional/interface_rpc.py @@ -188,9 +188,9 @@ def test_http_status_codes(self): self.log.info("Testing HTTP status codes for JSON-RPC 2.0 requests...") # OK expect_http_rpc_status(200, None, self.nodes[0], "getblockhash", [0], 2, False) - # RPC errors and HTTP errors - expect_http_rpc_status(404, RPC_METHOD_NOT_FOUND, self.nodes[0], "invalidmethod", [], 2, False) - expect_http_rpc_status(500, RPC_INVALID_PARAMETER, self.nodes[0], "getblockhash", [42], 2, False) + # RPC errors but not HTTP errors + expect_http_rpc_status(200, RPC_METHOD_NOT_FOUND, self.nodes[0], "invalidmethod", [], 2, False) + expect_http_rpc_status(200, RPC_INVALID_PARAMETER, self.nodes[0], "getblockhash", [42], 2, False) # force-send invalidly formatted requests response, status = send_json_rpc(self.nodes[0], {"jsonrpc": 2, "method": "getblockcount"}) assert_equal(response, {"id": None, "result": None, "error": {"code": RPC_INVALID_REQUEST, "message": "jsonrpc field must be a string"}}) @@ -212,7 +212,7 @@ def test_http_status_codes(self): expect_http_rpc_status(200, None, self.nodes[0], "generatetoaddress", [1, "bcrt1qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqdku202"], 2, True) # The command worked even though there was no response assert_equal(block_count + 1, self.nodes[0].getblockcount()) - expect_http_rpc_status(500, RPC_INVALID_ADDRESS_OR_KEY, self.nodes[0], "generatetoaddress", [1, "invalid_address"], 2, True) + expect_http_rpc_status(200, RPC_INVALID_ADDRESS_OR_KEY, self.nodes[0], "generatetoaddress", [1, "invalid_address"], 2, True) # Sanity check: command was not executed assert_equal(block_count + 1, self.nodes[0].getblockcount()) From e7ee80dcf2b68684eae96070875ea13a60e3e7b0 Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Fri, 7 Jul 2023 15:06:35 -0400 Subject: [PATCH 40/50] rpc: JSON-RPC 2.0 should not respond to "notifications" For JSON-RPC 2.0 requests we need to distinguish between a missing "id" field and "id":null. This is accomplished by making the JSONRPCRequest id property a std::optional with a default value of UniValue::VNULL. A side-effect of this change for non-2.0 requests is that request which do not specify an "id" field will no longer return "id": null in the response. --- src/httprpc.cpp | 31 ++++++++++++++++++--- src/rpc/protocol.h | 1 + src/rpc/request.cpp | 10 +++++-- src/rpc/request.h | 6 ++-- test/functional/interface_rpc.py | 27 ++++++++++++------ test/functional/test_framework/authproxy.py | 9 ++++++ 6 files changed, 67 insertions(+), 17 deletions(-) diff --git a/src/httprpc.cpp b/src/httprpc.cpp index 777ad32bbe..3eb34dbe6a 100644 --- a/src/httprpc.cpp +++ b/src/httprpc.cpp @@ -211,6 +211,12 @@ static bool HTTPReq_JSONRPC(const std::any& context, HTTPRequest* req) const bool catch_errors{jreq.m_json_version == JSONRPCVersion::V2}; reply = JSONRPCExec(jreq, catch_errors); + if (jreq.IsNotification()) { + // Even though we do execute notifications, we do not respond to them + req->WriteReply(HTTP_NO_CONTENT); + return true; + } + // array of requests } else if (valRequest.isArray()) { // Check authorization for each request's method @@ -235,15 +241,32 @@ static bool HTTPReq_JSONRPC(const std::any& context, HTTPRequest* req) reply = UniValue::VARR; for (size_t i{0}; i < valRequest.size(); ++i) { // Batches never throw HTTP errors, they are always just included - // in "HTTP OK" responses. + // in "HTTP OK" responses. Notifications never get any response. + UniValue response; try { jreq.parse(valRequest[i]); - reply.push_back(JSONRPCExec(jreq, /*catch_errors=*/true)); + response = JSONRPCExec(jreq, /*catch_errors=*/true); } catch (UniValue& e) { - reply.push_back(JSONRPCReplyObj(NullUniValue, std::move(e), jreq.id, jreq.m_json_version)); + response = JSONRPCReplyObj(NullUniValue, std::move(e), jreq.id, jreq.m_json_version); } catch (const std::exception& e) { - reply.push_back(JSONRPCReplyObj(NullUniValue, JSONRPCError(RPC_PARSE_ERROR, e.what()), jreq.id, jreq.m_json_version)); + response = JSONRPCReplyObj(NullUniValue, JSONRPCError(RPC_PARSE_ERROR, e.what()), jreq.id, jreq.m_json_version); } + if (!jreq.IsNotification()) { + reply.push_back(std::move(response)); + } + } + // Return no response for an all-notification batch, but only if the + // batch request is non-empty. Technically according to the JSON-RPC + // 2.0 spec, an empty batch request should also return no response, + // However, if the batch request is empty, it means the request did + // not contain any JSON-RPC version numbers, so returning an empty + // response could break backwards compatibility with old RPC clients + // relying on previous behavior. Return an empty array instead of an + // empty response in this case to favor being backwards compatible + // over complying with the JSON-RPC 2.0 spec in this case. + if (reply.size() == 0 && valRequest.size() > 0) { + req->WriteReply(HTTP_NO_CONTENT); + return true; } } else diff --git a/src/rpc/protocol.h b/src/rpc/protocol.h index 75e42e4c88..83a9010681 100644 --- a/src/rpc/protocol.h +++ b/src/rpc/protocol.h @@ -10,6 +10,7 @@ enum HTTPStatusCode { HTTP_OK = 200, + HTTP_NO_CONTENT = 204, HTTP_BAD_REQUEST = 400, HTTP_UNAUTHORIZED = 401, HTTP_FORBIDDEN = 403, diff --git a/src/rpc/request.cpp b/src/rpc/request.cpp index b5bf1e5ffa..ca424b4953 100644 --- a/src/rpc/request.cpp +++ b/src/rpc/request.cpp @@ -37,7 +37,7 @@ UniValue JSONRPCRequestObj(const std::string& strMethod, const UniValue& params, return request; } -UniValue JSONRPCReplyObj(UniValue result, UniValue error, UniValue id, JSONRPCVersion jsonrpc_version) +UniValue JSONRPCReplyObj(UniValue result, UniValue error, std::optional id, JSONRPCVersion jsonrpc_version) { UniValue reply(UniValue::VOBJ); // Add JSON-RPC version number field in v2 only. @@ -52,7 +52,7 @@ UniValue JSONRPCReplyObj(UniValue result, UniValue error, UniValue id, JSONRPCVe if (jsonrpc_version == JSONRPCVersion::V1_LEGACY) reply.pushKV("result", NullUniValue); reply.pushKV("error", std::move(error)); } - reply.pushKV("id", std::move(id)); + if (id.has_value()) reply.pushKV("id", std::move(id.value())); return reply; } @@ -172,7 +172,11 @@ void JSONRPCRequest::parse(const UniValue& valRequest) const UniValue& request = valRequest.get_obj(); // Parse id now so errors from here on will have the id - id = request.find_value("id"); + if (request.exists("id")) { + id = request.find_value("id"); + } else { + id = std::nullopt; + } // Check for JSON-RPC 2.0 (default 1.1) m_json_version = JSONRPCVersion::V1_LEGACY; diff --git a/src/rpc/request.h b/src/rpc/request.h index a7c9af2216..e47f90af86 100644 --- a/src/rpc/request.h +++ b/src/rpc/request.h @@ -7,6 +7,7 @@ #define BITCOIN_RPC_REQUEST_H #include +#include #include #include @@ -17,7 +18,7 @@ enum class JSONRPCVersion { }; UniValue JSONRPCRequestObj(const std::string& strMethod, const UniValue& params, const UniValue& id); -UniValue JSONRPCReplyObj(UniValue result, UniValue error, UniValue id, JSONRPCVersion jsonrpc_version); +UniValue JSONRPCReplyObj(UniValue result, UniValue error, std::optional id, JSONRPCVersion jsonrpc_version); UniValue JSONRPCError(int code, const std::string& message); /** Generate a new RPC authentication cookie and write it to disk */ @@ -32,7 +33,7 @@ std::vector JSONRPCProcessBatchReply(const UniValue& in); class JSONRPCRequest { public: - UniValue id; + std::optional id = UniValue::VNULL; std::string strMethod; UniValue params; enum Mode { EXECUTE, GET_HELP, GET_ARGS } mode = EXECUTE; @@ -43,6 +44,7 @@ class JSONRPCRequest JSONRPCVersion m_json_version = JSONRPCVersion::V1_LEGACY; void parse(const UniValue& valRequest); + [[nodiscard]] bool IsNotification() const { return !id.has_value() && m_json_version == JSONRPCVersion::V2; }; }; #endif // BITCOIN_RPC_REQUEST_H diff --git a/test/functional/interface_rpc.py b/test/functional/interface_rpc.py index 824a7200cd..b08ca42796 100755 --- a/test/functional/interface_rpc.py +++ b/test/functional/interface_rpc.py @@ -46,8 +46,11 @@ def format_request(options, idx, fields): def format_response(options, idx, fields): + if options.version == 2 and options.notification: + return None response = {} - response.update(id=None if options.notification else idx) + if not options.notification: + response.update(id=idx) if options.version == 2: response.update(jsonrpc="2.0") else: @@ -129,11 +132,17 @@ def test_batch_request(self, call_options): if options is None: continue request.append(format_request(options, idx, call)) - response.append(format_response(options, idx, result)) + r = format_response(options, idx, result) + if r is not None: + response.append(r) rpc_response, http_status = send_json_rpc(self.nodes[0], request) - assert_equal(http_status, 200) - assert_equal(rpc_response, response) + if len(response) == 0 and len(request) > 0: + assert_equal(http_status, 204) + assert_equal(rpc_response, None) + else: + assert_equal(http_status, 200) + assert_equal(rpc_response, response) def test_batch_requests(self): self.log.info("Testing empty batch request...") @@ -193,10 +202,10 @@ def test_http_status_codes(self): expect_http_rpc_status(200, RPC_INVALID_PARAMETER, self.nodes[0], "getblockhash", [42], 2, False) # force-send invalidly formatted requests response, status = send_json_rpc(self.nodes[0], {"jsonrpc": 2, "method": "getblockcount"}) - assert_equal(response, {"id": None, "result": None, "error": {"code": RPC_INVALID_REQUEST, "message": "jsonrpc field must be a string"}}) + assert_equal(response, {"result": None, "error": {"code": RPC_INVALID_REQUEST, "message": "jsonrpc field must be a string"}}) assert_equal(status, 400) response, status = send_json_rpc(self.nodes[0], {"jsonrpc": "3.0", "method": "getblockcount"}) - assert_equal(response, {"id": None, "result": None, "error": {"code": RPC_INVALID_REQUEST, "message": "JSON-RPC version not supported"}}) + assert_equal(response, {"result": None, "error": {"code": RPC_INVALID_REQUEST, "message": "JSON-RPC version not supported"}}) assert_equal(status, 400) self.log.info("Testing HTTP status codes for JSON-RPC 2.0 notifications...") @@ -209,10 +218,12 @@ def test_http_status_codes(self): # Not notification: has "id" field expect_http_rpc_status(200, None, self.nodes[0], "getblockcount", [], 2, False) block_count = self.nodes[0].getblockcount() - expect_http_rpc_status(200, None, self.nodes[0], "generatetoaddress", [1, "bcrt1qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqdku202"], 2, True) + # Notification response status code: HTTP_NO_CONTENT + expect_http_rpc_status(204, None, self.nodes[0], "generatetoaddress", [1, "bcrt1qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqdku202"], 2, True) # The command worked even though there was no response assert_equal(block_count + 1, self.nodes[0].getblockcount()) - expect_http_rpc_status(200, RPC_INVALID_ADDRESS_OR_KEY, self.nodes[0], "generatetoaddress", [1, "invalid_address"], 2, True) + # No error response for notifications even if they are invalid + expect_http_rpc_status(204, None, self.nodes[0], "generatetoaddress", [1, "invalid_address"], 2, True) # Sanity check: command was not executed assert_equal(block_count + 1, self.nodes[0].getblockcount()) diff --git a/test/functional/test_framework/authproxy.py b/test/functional/test_framework/authproxy.py index 03042877b2..7edf9f3679 100644 --- a/test/functional/test_framework/authproxy.py +++ b/test/functional/test_framework/authproxy.py @@ -160,6 +160,15 @@ def _get_response(self): raise JSONRPCException({ 'code': -342, 'message': 'missing HTTP response from server'}) + # Check for no-content HTTP status code, which can be returned when an + # RPC client requests a JSON-RPC 2.0 "notification" with no response. + # Currently this is only possible if clients call the _request() method + # directly to send a raw request. + if http_response.status == HTTPStatus.NO_CONTENT: + if len(http_response.read()) != 0: + raise JSONRPCException({'code': -342, 'message': 'Content received with NO CONTENT status code'}) + return None, http_response.status + content_type = http_response.getheader('Content-Type') if content_type != 'application/json': raise JSONRPCException( From cbc6c440e3811d342fa570713702900b3e3e75b9 Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Wed, 1 Mar 2023 14:03:26 -0500 Subject: [PATCH 41/50] doc: add comments and release-notes for JSON-RPC 2.0 --- doc/JSON-RPC-interface.md | 16 ++++++++++++++++ doc/release-notes-27101.md | 9 +++++++++ src/rpc/request.cpp | 11 +++++++++++ src/rpc/util.cpp | 4 ++-- src/test/rpc_tests.cpp | 6 +++--- 5 files changed, 41 insertions(+), 5 deletions(-) create mode 100644 doc/release-notes-27101.md diff --git a/doc/JSON-RPC-interface.md b/doc/JSON-RPC-interface.md index ec332d23eb..7640102172 100644 --- a/doc/JSON-RPC-interface.md +++ b/doc/JSON-RPC-interface.md @@ -74,6 +74,22 @@ major version via the `-deprecatedrpc=` command line option. The release notes of a new major release come with detailed instructions on what RPC features were deprecated and how to re-enable them temporarily. +## JSON-RPC 1.1 vs 2.0 + +The server recognizes [JSON-RPC v2.0](https://www.jsonrpc.org/specification) requests +and responds accordingly. A 2.0 request is identified by the presence of +`"jsonrpc": "2.0"` in the request body. If that key + value is not present in a request, +the legacy JSON-RPC v1.1 protocol is followed instead, which was the only available +protocol in previous releases. + +|| 1.1 | 2.0 | +|-|-|-| +| Request marker | `"version": "1.1"` (or none) | `"jsonrpc": "2.0"` | +| Response marker | (none) | `"jsonrpc": "2.0"` | +| `"error"` and `"result"` fields in response | both present | only one is present | +| HTTP codes in response | `200` unless there is any kind of RPC error (invalid parameters, method not found, etc) | Always `200` unless there is an actual HTTP server error (request parsing error, endpoint not found, etc) | +| Notifications: requests that get no reply | (not supported) | Supported for requests that exclude the "id" field | + ## Security The RPC interface allows other programs to control Bitcoin Core, diff --git a/doc/release-notes-27101.md b/doc/release-notes-27101.md new file mode 100644 index 0000000000..8775b59c00 --- /dev/null +++ b/doc/release-notes-27101.md @@ -0,0 +1,9 @@ +JSON-RPC +-------- + +The JSON-RPC server now recognizes JSON-RPC 2.0 requests and responds with +strict adherence to the specification (https://www.jsonrpc.org/specification): + +- Returning HTTP "204 No Content" responses to JSON-RPC 2.0 notifications instead of full responses. +- Returning HTTP "200 OK" responses in all other cases, rather than 404 responses for unknown methods, 500 responses for invalid parameters, etc. +- Returning either "result" fields or "error" fields in JSON-RPC responses, rather than returning both fields with one field set to null. diff --git a/src/rpc/request.cpp b/src/rpc/request.cpp index ca424b4953..d35782189e 100644 --- a/src/rpc/request.cpp +++ b/src/rpc/request.cpp @@ -26,6 +26,17 @@ * * 1.0 spec: http://json-rpc.org/wiki/specification * 1.2 spec: http://jsonrpc.org/historical/json-rpc-over-http.html + * + * If the server receives a request with the JSON-RPC 2.0 marker `{"jsonrpc": "2.0"}` + * then Bitcoin will respond with a strictly specified response. + * It will only return an HTTP error code if an actual HTTP error is encountered + * such as the endpoint is not found (404) or the request is not formatted correctly (500). + * Otherwise the HTTP code is always OK (200) and RPC errors will be included in the + * response body. + * + * 2.0 spec: https://www.jsonrpc.org/specification + * + * Also see http://www.simple-is-better.org/rpc/#differences-between-1-0-and-2-0 */ UniValue JSONRPCRequestObj(const std::string& strMethod, const UniValue& params, const UniValue& id) diff --git a/src/rpc/util.cpp b/src/rpc/util.cpp index cf48ee11e7..6bad1ffc55 100644 --- a/src/rpc/util.cpp +++ b/src/rpc/util.cpp @@ -161,7 +161,7 @@ std::string HelpExampleCliNamed(const std::string& methodname, const RPCArgList& std::string HelpExampleRpc(const std::string& methodname, const std::string& args) { - return "> curl --user myusername --data-binary '{\"jsonrpc\": \"1.0\", \"id\": \"curltest\", " + return "> curl --user myusername --data-binary '{\"jsonrpc\": \"2.0\", \"id\": \"curltest\", " "\"method\": \"" + methodname + "\", \"params\": [" + args + "]}' -H 'content-type: text/plain;' http://127.0.0.1:8332/\n"; } @@ -172,7 +172,7 @@ std::string HelpExampleRpcNamed(const std::string& methodname, const RPCArgList& params.pushKV(param.first, param.second); } - return "> curl --user myusername --data-binary '{\"jsonrpc\": \"1.0\", \"id\": \"curltest\", " + return "> curl --user myusername --data-binary '{\"jsonrpc\": \"2.0\", \"id\": \"curltest\", " "\"method\": \"" + methodname + "\", \"params\": " + params.write() + "}' -H 'content-type: text/plain;' http://127.0.0.1:8332/\n"; } diff --git a/src/test/rpc_tests.cpp b/src/test/rpc_tests.cpp index 0d2460c606..41d52d8881 100644 --- a/src/test/rpc_tests.cpp +++ b/src/test/rpc_tests.cpp @@ -551,7 +551,7 @@ BOOST_AUTO_TEST_CASE(help_example) // test different argument types const RPCArgList& args = {{"foo", "bar"}, {"b", true}, {"n", 1}}; BOOST_CHECK_EQUAL(HelpExampleCliNamed("test", args), "> bitcoin-cli -named test foo=bar b=true n=1\n"); - BOOST_CHECK_EQUAL(HelpExampleRpcNamed("test", args), "> curl --user myusername --data-binary '{\"jsonrpc\": \"1.0\", \"id\": \"curltest\", \"method\": \"test\", \"params\": {\"foo\":\"bar\",\"b\":true,\"n\":1}}' -H 'content-type: text/plain;' http://127.0.0.1:8332/\n"); + BOOST_CHECK_EQUAL(HelpExampleRpcNamed("test", args), "> curl --user myusername --data-binary '{\"jsonrpc\": \"2.0\", \"id\": \"curltest\", \"method\": \"test\", \"params\": {\"foo\":\"bar\",\"b\":true,\"n\":1}}' -H 'content-type: text/plain;' http://127.0.0.1:8332/\n"); // test shell escape BOOST_CHECK_EQUAL(HelpExampleCliNamed("test", {{"foo", "b'ar"}}), "> bitcoin-cli -named test foo='b'''ar'\n"); @@ -564,7 +564,7 @@ BOOST_AUTO_TEST_CASE(help_example) obj_value.pushKV("b", false); obj_value.pushKV("n", 1); BOOST_CHECK_EQUAL(HelpExampleCliNamed("test", {{"name", obj_value}}), "> bitcoin-cli -named test name='{\"foo\":\"bar\",\"b\":false,\"n\":1}'\n"); - BOOST_CHECK_EQUAL(HelpExampleRpcNamed("test", {{"name", obj_value}}), "> curl --user myusername --data-binary '{\"jsonrpc\": \"1.0\", \"id\": \"curltest\", \"method\": \"test\", \"params\": {\"name\":{\"foo\":\"bar\",\"b\":false,\"n\":1}}}' -H 'content-type: text/plain;' http://127.0.0.1:8332/\n"); + BOOST_CHECK_EQUAL(HelpExampleRpcNamed("test", {{"name", obj_value}}), "> curl --user myusername --data-binary '{\"jsonrpc\": \"2.0\", \"id\": \"curltest\", \"method\": \"test\", \"params\": {\"name\":{\"foo\":\"bar\",\"b\":false,\"n\":1}}}' -H 'content-type: text/plain;' http://127.0.0.1:8332/\n"); // test array params UniValue arr_value(UniValue::VARR); @@ -572,7 +572,7 @@ BOOST_AUTO_TEST_CASE(help_example) arr_value.push_back(false); arr_value.push_back(1); BOOST_CHECK_EQUAL(HelpExampleCliNamed("test", {{"name", arr_value}}), "> bitcoin-cli -named test name='[\"bar\",false,1]'\n"); - BOOST_CHECK_EQUAL(HelpExampleRpcNamed("test", {{"name", arr_value}}), "> curl --user myusername --data-binary '{\"jsonrpc\": \"1.0\", \"id\": \"curltest\", \"method\": \"test\", \"params\": {\"name\":[\"bar\",false,1]}}' -H 'content-type: text/plain;' http://127.0.0.1:8332/\n"); + BOOST_CHECK_EQUAL(HelpExampleRpcNamed("test", {{"name", arr_value}}), "> curl --user myusername --data-binary '{\"jsonrpc\": \"2.0\", \"id\": \"curltest\", \"method\": \"test\", \"params\": {\"name\":[\"bar\",false,1]}}' -H 'content-type: text/plain;' http://127.0.0.1:8332/\n"); // test types don't matter for shell BOOST_CHECK_EQUAL(HelpExampleCliNamed("foo", {{"arg", true}}), HelpExampleCliNamed("foo", {{"arg", "true"}})); From fdae638e83522c28a1222e65c43d1cbca3e34cba Mon Sep 17 00:00:00 2001 From: Martin Zumsande Date: Fri, 10 May 2024 16:46:35 -0400 Subject: [PATCH 42/50] doc: Improve doc for functions involved in saving blocks to disk In particular, document the flat file positions expected and returned by functions better. Co-authored-by: Ryan Ofsky --- src/node/blockstorage.h | 27 ++++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/src/node/blockstorage.h b/src/node/blockstorage.h index ce514cc645..27b7737051 100644 --- a/src/node/blockstorage.h +++ b/src/node/blockstorage.h @@ -155,6 +155,16 @@ class BlockManager /** Return false if undo file flushing fails. */ [[nodiscard]] bool FlushUndoFile(int block_file, bool finalize = false); + /** + * Helper function performing various preparations before a block can be saved to disk: + * Returns the correct position for the block to be saved, which may be in the current or a new + * block file depending on nAddSize. May flush the previous blockfile to disk if full, updates + * blockfile info, and checks if there is enough disk space to save the block. + * + * If fKnown is false, the nAddSize argument passed to this function should include not just the size of the serialized CBlock, but also the size of + * separator fields which are written before it by WriteBlockToDisk (BLOCK_SERIALIZATION_HEADER_SIZE). + * If fKnown is true, nAddSize should be just the size of the serialized CBlock. + */ [[nodiscard]] bool FindBlockPos(FlatFilePos& pos, unsigned int nAddSize, unsigned int nHeight, uint64_t nTime, bool fKnown); [[nodiscard]] bool FlushChainstateBlockFile(int tip_height); bool FindUndoPos(BlockValidationState& state, int nFile, FlatFilePos& pos, unsigned int nAddSize); @@ -164,6 +174,12 @@ class BlockManager AutoFile OpenUndoFile(const FlatFilePos& pos, bool fReadOnly = false) const; + /** + * Write a block to disk. The pos argument passed to this function is modified by this call. Before this call, it should + * point to an unused file location where separator fields will be written, followed by the serialized CBlock data. + * After this call, it will point to the beginning of the serialized CBlock data, after the separator fields + * (BLOCK_SERIALIZATION_HEADER_SIZE) + */ bool WriteBlockToDisk(const CBlock& block, FlatFilePos& pos) const; bool UndoWriteToDisk(const CBlockUndo& blockundo, FlatFilePos& pos, const uint256& hashBlock) const; @@ -312,7 +328,16 @@ class BlockManager bool WriteUndoDataForBlock(const CBlockUndo& blockundo, BlockValidationState& state, CBlockIndex& block) EXCLUSIVE_LOCKS_REQUIRED(::cs_main); - /** Store block on disk. If dbp is not nullptr, then it provides the known position of the block within a block file on disk. */ + /** Store block on disk and update block file statistics. + * If dbp is non-null, it means the block data is already stored, and dbp contains the file position. + * In this case, the block data will not be written, only the block file statistics will be updated. This case should only happen during reindexing. + * + * @param[in] block the block to be stored + * @param[in] nHeight the height of the block + * + * @returns in case of success, the position to which the block was written to + * in case of an error, an empty FlatFilePos + */ FlatFilePos SaveBlockToDisk(const CBlock& block, int nHeight, const FlatFilePos* dbp); /** Whether running in -prune mode. */ From 064859bbad6984a6ec85c744064abdf757807c58 Mon Sep 17 00:00:00 2001 From: Martin Zumsande Date: Wed, 20 Mar 2024 15:05:08 -0400 Subject: [PATCH 43/50] blockstorage: split up FindBlockPos function FindBlockPos does different things depending on whether the block is known or not, as shown by the fact that much of the existing code is conditional on fKnown set or not. If the block position is known (during reindex) the function only updates the block info statistics. It doesn't actually find a block position in this case. This commit removes fKnown and splits up these two code paths by introducing a separate function for the reindex case when the block position is known. It doesn't change behavior. --- src/node/blockstorage.cpp | 163 ++++++++++++++++++++------------------ src/node/blockstorage.h | 14 +++- 2 files changed, 97 insertions(+), 80 deletions(-) diff --git a/src/node/blockstorage.cpp b/src/node/blockstorage.cpp index 9b8467c2f4..0fe989d722 100644 --- a/src/node/blockstorage.cpp +++ b/src/node/blockstorage.cpp @@ -848,21 +848,13 @@ fs::path BlockManager::GetBlockPosFilename(const FlatFilePos& pos) const return BlockFileSeq().FileName(pos); } -bool BlockManager::FindBlockPos(FlatFilePos& pos, unsigned int nAddSize, unsigned int nHeight, uint64_t nTime, bool fKnown) +bool BlockManager::FindBlockPos(FlatFilePos& pos, unsigned int nAddSize, unsigned int nHeight, uint64_t nTime) { LOCK(cs_LastBlockFile); const BlockfileType chain_type = BlockfileTypeForHeight(nHeight); - // Check that chain type is NORMAL if fKnown is true, because fKnown is only - // true during reindexing, and reindexing deletes snapshot chainstates, so - // chain_type will not be SNAPSHOT. Also check that cursor exists, because - // the normal cursor should never be null. - if (fKnown) { - Assume(chain_type == BlockfileType::NORMAL); - Assume(m_blockfile_cursors[chain_type]); - } - if (!fKnown && !m_blockfile_cursors[chain_type]) { + if (!m_blockfile_cursors[chain_type]) { // If a snapshot is loaded during runtime, we may not have initialized this cursor yet. assert(chain_type == BlockfileType::ASSUMED); const auto new_cursor = BlockfileCursor{this->MaxBlockfileNum() + 1}; @@ -871,90 +863,107 @@ bool BlockManager::FindBlockPos(FlatFilePos& pos, unsigned int nAddSize, unsigne } const int last_blockfile = m_blockfile_cursors[chain_type]->file_num; - int nFile = fKnown ? pos.nFile : last_blockfile; + int nFile = last_blockfile; if (static_cast(m_blockfile_info.size()) <= nFile) { m_blockfile_info.resize(nFile + 1); } bool finalize_undo = false; - if (!fKnown) { - unsigned int max_blockfile_size{MAX_BLOCKFILE_SIZE}; - // Use smaller blockfiles in test-only -fastprune mode - but avoid - // the possibility of having a block not fit into the block file. - if (m_opts.fast_prune) { - max_blockfile_size = 0x10000; // 64kiB - if (nAddSize >= max_blockfile_size) { - // dynamically adjust the blockfile size to be larger than the added size - max_blockfile_size = nAddSize + 1; - } + unsigned int max_blockfile_size{MAX_BLOCKFILE_SIZE}; + // Use smaller blockfiles in test-only -fastprune mode - but avoid + // the possibility of having a block not fit into the block file. + if (m_opts.fast_prune) { + max_blockfile_size = 0x10000; // 64kiB + if (nAddSize >= max_blockfile_size) { + // dynamically adjust the blockfile size to be larger than the added size + max_blockfile_size = nAddSize + 1; } - assert(nAddSize < max_blockfile_size); - - while (m_blockfile_info[nFile].nSize + nAddSize >= max_blockfile_size) { - // when the undo file is keeping up with the block file, we want to flush it explicitly - // when it is lagging behind (more blocks arrive than are being connected), we let the - // undo block write case handle it - finalize_undo = (static_cast(m_blockfile_info[nFile].nHeightLast) == - Assert(m_blockfile_cursors[chain_type])->undo_height); - - // Try the next unclaimed blockfile number - nFile = this->MaxBlockfileNum() + 1; - // Set to increment MaxBlockfileNum() for next iteration - m_blockfile_cursors[chain_type] = BlockfileCursor{nFile}; - - if (static_cast(m_blockfile_info.size()) <= nFile) { - m_blockfile_info.resize(nFile + 1); - } + } + assert(nAddSize < max_blockfile_size); + + while (m_blockfile_info[nFile].nSize + nAddSize >= max_blockfile_size) { + // when the undo file is keeping up with the block file, we want to flush it explicitly + // when it is lagging behind (more blocks arrive than are being connected), we let the + // undo block write case handle it + finalize_undo = (static_cast(m_blockfile_info[nFile].nHeightLast) == + Assert(m_blockfile_cursors[chain_type])->undo_height); + + // Try the next unclaimed blockfile number + nFile = this->MaxBlockfileNum() + 1; + // Set to increment MaxBlockfileNum() for next iteration + m_blockfile_cursors[chain_type] = BlockfileCursor{nFile}; + + if (static_cast(m_blockfile_info.size()) <= nFile) { + m_blockfile_info.resize(nFile + 1); } - pos.nFile = nFile; - pos.nPos = m_blockfile_info[nFile].nSize; } + pos.nFile = nFile; + pos.nPos = m_blockfile_info[nFile].nSize; if (nFile != last_blockfile) { - if (!fKnown) { - LogPrint(BCLog::BLOCKSTORAGE, "Leaving block file %i: %s (onto %i) (height %i)\n", - last_blockfile, m_blockfile_info[last_blockfile].ToString(), nFile, nHeight); - - // Do not propagate the return code. The flush concerns a previous block - // and undo file that has already been written to. If a flush fails - // here, and we crash, there is no expected additional block data - // inconsistency arising from the flush failure here. However, the undo - // data may be inconsistent after a crash if the flush is called during - // a reindex. A flush error might also leave some of the data files - // untrimmed. - if (!FlushBlockFile(last_blockfile, !fKnown, finalize_undo)) { - LogPrintLevel(BCLog::BLOCKSTORAGE, BCLog::Level::Warning, - "Failed to flush previous block file %05i (finalize=%i, finalize_undo=%i) before opening new block file %05i\n", - last_blockfile, !fKnown, finalize_undo, nFile); - } + LogPrint(BCLog::BLOCKSTORAGE, "Leaving block file %i: %s (onto %i) (height %i)\n", + last_blockfile, m_blockfile_info[last_blockfile].ToString(), nFile, nHeight); + + // Do not propagate the return code. The flush concerns a previous block + // and undo file that has already been written to. If a flush fails + // here, and we crash, there is no expected additional block data + // inconsistency arising from the flush failure here. However, the undo + // data may be inconsistent after a crash if the flush is called during + // a reindex. A flush error might also leave some of the data files + // untrimmed. + if (!FlushBlockFile(last_blockfile, /*fFinalize=*/true, finalize_undo)) { + LogPrintLevel(BCLog::BLOCKSTORAGE, BCLog::Level::Warning, + "Failed to flush previous block file %05i (finalize=1, finalize_undo=%i) before opening new block file %05i\n", + last_blockfile, finalize_undo, nFile); } // No undo data yet in the new file, so reset our undo-height tracking. m_blockfile_cursors[chain_type] = BlockfileCursor{nFile}; } m_blockfile_info[nFile].AddBlock(nHeight, nTime); - if (fKnown) { - m_blockfile_info[nFile].nSize = std::max(pos.nPos + nAddSize, m_blockfile_info[nFile].nSize); - } else { - m_blockfile_info[nFile].nSize += nAddSize; - } + m_blockfile_info[nFile].nSize += nAddSize; - if (!fKnown) { - bool out_of_space; - size_t bytes_allocated = BlockFileSeq().Allocate(pos, nAddSize, out_of_space); - if (out_of_space) { - m_opts.notifications.fatalError(_("Disk space is too low!")); - return false; - } - if (bytes_allocated != 0 && IsPruneMode()) { - m_check_for_pruning = true; - } + bool out_of_space; + size_t bytes_allocated = BlockFileSeq().Allocate(pos, nAddSize, out_of_space); + if (out_of_space) { + m_opts.notifications.fatalError(_("Disk space is too low!")); + return false; + } + if (bytes_allocated != 0 && IsPruneMode()) { + m_check_for_pruning = true; } m_dirty_fileinfo.insert(nFile); return true; } +void BlockManager::UpdateBlockInfo(const CBlock& block, unsigned int nHeight, const FlatFilePos& pos) +{ + LOCK(cs_LastBlockFile); + + const unsigned int added_size = ::GetSerializeSize(TX_WITH_WITNESS(block)); + const BlockfileType chain_type = BlockfileTypeForHeight(nHeight); + // Check that chain type is NORMAL, because this function is only + // called during reindexing, and reindexing deletes snapshot chainstates, so + // chain_type will not be SNAPSHOT. Also check that cursor exists, because + // the normal cursor should never be null. + Assume(chain_type == BlockfileType::NORMAL); + Assume(m_blockfile_cursors[chain_type]); + const int nFile = pos.nFile; + if (static_cast(m_blockfile_info.size()) <= nFile) { + m_blockfile_info.resize(nFile + 1); + } + + const int last_blockfile = m_blockfile_cursors[chain_type]->file_num; + if (nFile != last_blockfile) { + // No undo data yet in the new file, so reset our undo-height tracking. + m_blockfile_cursors[chain_type] = BlockfileCursor{nFile}; + } + m_blockfile_info[nFile].AddBlock(nHeight, block.GetBlockTime()); + m_blockfile_info[nFile].nSize = std::max(pos.nPos + added_size, m_blockfile_info[nFile].nSize); + m_dirty_fileinfo.insert(nFile); +} + bool BlockManager::FindUndoPos(BlockValidationState& state, int nFile, FlatFilePos& pos, unsigned int nAddSize) { pos.nFile = nFile; @@ -1145,17 +1154,17 @@ FlatFilePos BlockManager::SaveBlockToDisk(const CBlock& block, int nHeight, cons const auto position_known {dbp != nullptr}; if (position_known) { blockPos = *dbp; + // position_known is set iff performing a reindex. In this case, no blocks need to be written, only the blockfile info database needs to be rebuilt. + UpdateBlockInfo(block, nHeight, *dbp); } else { // when known, blockPos.nPos points at the offset of the block data in the blk file. that already accounts for // the serialization header present in the file (the 4 magic message start bytes + the 4 length bytes = 8 bytes = BLOCK_SERIALIZATION_HEADER_SIZE). // we add BLOCK_SERIALIZATION_HEADER_SIZE only for new blocks since they will have the serialization header added when written to disk. nBlockSize += static_cast(BLOCK_SERIALIZATION_HEADER_SIZE); - } - if (!FindBlockPos(blockPos, nBlockSize, nHeight, block.GetBlockTime(), position_known)) { - LogError("%s: FindBlockPos failed\n", __func__); - return FlatFilePos(); - } - if (!position_known) { + if (!FindBlockPos(blockPos, nBlockSize, nHeight, block.GetBlockTime())) { + LogError("%s: FindBlockPos failed\n", __func__); + return FlatFilePos(); + } if (!WriteBlockToDisk(block, blockPos)) { m_opts.notifications.fatalError(_("Failed to write block.")); return FlatFilePos(); diff --git a/src/node/blockstorage.h b/src/node/blockstorage.h index 27b7737051..ff77a66b3b 100644 --- a/src/node/blockstorage.h +++ b/src/node/blockstorage.h @@ -161,11 +161,10 @@ class BlockManager * block file depending on nAddSize. May flush the previous blockfile to disk if full, updates * blockfile info, and checks if there is enough disk space to save the block. * - * If fKnown is false, the nAddSize argument passed to this function should include not just the size of the serialized CBlock, but also the size of + * The nAddSize argument passed to this function should include not just the size of the serialized CBlock, but also the size of * separator fields which are written before it by WriteBlockToDisk (BLOCK_SERIALIZATION_HEADER_SIZE). - * If fKnown is true, nAddSize should be just the size of the serialized CBlock. */ - [[nodiscard]] bool FindBlockPos(FlatFilePos& pos, unsigned int nAddSize, unsigned int nHeight, uint64_t nTime, bool fKnown); + [[nodiscard]] bool FindBlockPos(FlatFilePos& pos, unsigned int nAddSize, unsigned int nHeight, uint64_t nTime); [[nodiscard]] bool FlushChainstateBlockFile(int tip_height); bool FindUndoPos(BlockValidationState& state, int nFile, FlatFilePos& pos, unsigned int nAddSize); @@ -340,6 +339,15 @@ class BlockManager */ FlatFilePos SaveBlockToDisk(const CBlock& block, int nHeight, const FlatFilePos* dbp); + /** Update blockfile info while processing a block during reindex. The block must be available on disk. + * + * @param[in] block the block being processed + * @param[in] nHeight the height of the block + * @param[in] pos the position of the serialized CBlock on disk. This is the position returned + * by WriteBlockToDisk pointing at the CBlock, not the separator fields before it + */ + void UpdateBlockInfo(const CBlock& block, unsigned int nHeight, const FlatFilePos& pos); + /** Whether running in -prune mode. */ [[nodiscard]] bool IsPruneMode() const { return m_prune_mode; } From d9e477c4dc39d9623ed66c35c06e28f94ae62ad5 Mon Sep 17 00:00:00 2001 From: Martin Zumsande Date: Fri, 26 Apr 2024 15:06:55 -0400 Subject: [PATCH 44/50] validation, blockstorage: Separate code paths for reindex and saving new blocks By calling SaveBlockToDisk only when we actually want to save a new block to disk. In the reindex case, we now call UpdateBlockInfo directly from validation. This commit doesn't change behavior. --- src/bench/readblock.cpp | 2 +- src/node/blockstorage.cpp | 30 +++++++++++------------------- src/node/blockstorage.h | 4 +--- src/test/blockmanager_tests.cpp | 24 ++++++++++-------------- src/validation.cpp | 16 +++++++++++----- 5 files changed, 34 insertions(+), 42 deletions(-) diff --git a/src/bench/readblock.cpp b/src/bench/readblock.cpp index 0545c6b017..2b2bfe069e 100644 --- a/src/bench/readblock.cpp +++ b/src/bench/readblock.cpp @@ -18,7 +18,7 @@ static FlatFilePos WriteBlockToDisk(ChainstateManager& chainman) CBlock block; stream >> TX_WITH_WITNESS(block); - return chainman.m_blockman.SaveBlockToDisk(block, 0, nullptr); + return chainman.m_blockman.SaveBlockToDisk(block, 0); } static void ReadBlockFromDiskTest(benchmark::Bench& bench) diff --git a/src/node/blockstorage.cpp b/src/node/blockstorage.cpp index 0fe989d722..9d8c4701ef 100644 --- a/src/node/blockstorage.cpp +++ b/src/node/blockstorage.cpp @@ -1147,28 +1147,20 @@ bool BlockManager::ReadRawBlockFromDisk(std::vector& block, const FlatF return true; } -FlatFilePos BlockManager::SaveBlockToDisk(const CBlock& block, int nHeight, const FlatFilePos* dbp) +FlatFilePos BlockManager::SaveBlockToDisk(const CBlock& block, int nHeight) { unsigned int nBlockSize = ::GetSerializeSize(TX_WITH_WITNESS(block)); FlatFilePos blockPos; - const auto position_known {dbp != nullptr}; - if (position_known) { - blockPos = *dbp; - // position_known is set iff performing a reindex. In this case, no blocks need to be written, only the blockfile info database needs to be rebuilt. - UpdateBlockInfo(block, nHeight, *dbp); - } else { - // when known, blockPos.nPos points at the offset of the block data in the blk file. that already accounts for - // the serialization header present in the file (the 4 magic message start bytes + the 4 length bytes = 8 bytes = BLOCK_SERIALIZATION_HEADER_SIZE). - // we add BLOCK_SERIALIZATION_HEADER_SIZE only for new blocks since they will have the serialization header added when written to disk. - nBlockSize += static_cast(BLOCK_SERIALIZATION_HEADER_SIZE); - if (!FindBlockPos(blockPos, nBlockSize, nHeight, block.GetBlockTime())) { - LogError("%s: FindBlockPos failed\n", __func__); - return FlatFilePos(); - } - if (!WriteBlockToDisk(block, blockPos)) { - m_opts.notifications.fatalError(_("Failed to write block.")); - return FlatFilePos(); - } + // Account for the 4 magic message start bytes + the 4 length bytes (8 bytes total, + // defined as BLOCK_SERIALIZATION_HEADER_SIZE) + nBlockSize += static_cast(BLOCK_SERIALIZATION_HEADER_SIZE); + if (!FindBlockPos(blockPos, nBlockSize, nHeight, block.GetBlockTime())) { + LogError("%s: FindBlockPos failed\n", __func__); + return FlatFilePos(); + } + if (!WriteBlockToDisk(block, blockPos)) { + m_opts.notifications.fatalError(_("Failed to write block.")); + return FlatFilePos(); } return blockPos; } diff --git a/src/node/blockstorage.h b/src/node/blockstorage.h index ff77a66b3b..ae47478779 100644 --- a/src/node/blockstorage.h +++ b/src/node/blockstorage.h @@ -328,8 +328,6 @@ class BlockManager EXCLUSIVE_LOCKS_REQUIRED(::cs_main); /** Store block on disk and update block file statistics. - * If dbp is non-null, it means the block data is already stored, and dbp contains the file position. - * In this case, the block data will not be written, only the block file statistics will be updated. This case should only happen during reindexing. * * @param[in] block the block to be stored * @param[in] nHeight the height of the block @@ -337,7 +335,7 @@ class BlockManager * @returns in case of success, the position to which the block was written to * in case of an error, an empty FlatFilePos */ - FlatFilePos SaveBlockToDisk(const CBlock& block, int nHeight, const FlatFilePos* dbp); + FlatFilePos SaveBlockToDisk(const CBlock& block, int nHeight); /** Update blockfile info while processing a block during reindex. The block must be available on disk. * diff --git a/src/test/blockmanager_tests.cpp b/src/test/blockmanager_tests.cpp index d7ac0bf823..efe0983698 100644 --- a/src/test/blockmanager_tests.cpp +++ b/src/test/blockmanager_tests.cpp @@ -35,20 +35,20 @@ BOOST_AUTO_TEST_CASE(blockmanager_find_block_pos) }; BlockManager blockman{*Assert(m_node.shutdown), blockman_opts}; // simulate adding a genesis block normally - BOOST_CHECK_EQUAL(blockman.SaveBlockToDisk(params->GenesisBlock(), 0, nullptr).nPos, BLOCK_SERIALIZATION_HEADER_SIZE); + BOOST_CHECK_EQUAL(blockman.SaveBlockToDisk(params->GenesisBlock(), 0).nPos, BLOCK_SERIALIZATION_HEADER_SIZE); // simulate what happens during reindex // simulate a well-formed genesis block being found at offset 8 in the blk00000.dat file // the block is found at offset 8 because there is an 8 byte serialization header // consisting of 4 magic bytes + 4 length bytes before each block in a well-formed blk file. - FlatFilePos pos{0, BLOCK_SERIALIZATION_HEADER_SIZE}; - BOOST_CHECK_EQUAL(blockman.SaveBlockToDisk(params->GenesisBlock(), 0, &pos).nPos, BLOCK_SERIALIZATION_HEADER_SIZE); + const FlatFilePos pos{0, BLOCK_SERIALIZATION_HEADER_SIZE}; + blockman.UpdateBlockInfo(params->GenesisBlock(), 0, pos); // now simulate what happens after reindex for the first new block processed // the actual block contents don't matter, just that it's a block. // verify that the write position is at offset 0x12d. // this is a check to make sure that https://github.com/bitcoin/bitcoin/issues/21379 does not recur // 8 bytes (for serialization header) + 285 (for serialized genesis block) = 293 // add another 8 bytes for the second block's serialization header and we get 293 + 8 = 301 - FlatFilePos actual{blockman.SaveBlockToDisk(params->GenesisBlock(), 1, nullptr)}; + FlatFilePos actual{blockman.SaveBlockToDisk(params->GenesisBlock(), 1)}; BOOST_CHECK_EQUAL(actual.nPos, BLOCK_SERIALIZATION_HEADER_SIZE + ::GetSerializeSize(TX_WITH_WITNESS(params->GenesisBlock())) + BLOCK_SERIALIZATION_HEADER_SIZE); } @@ -156,12 +156,11 @@ BOOST_AUTO_TEST_CASE(blockmanager_flush_block_file) // Blockstore is empty BOOST_CHECK_EQUAL(blockman.CalculateCurrentUsage(), 0); - // Write the first block; dbp=nullptr means this block doesn't already have a disk - // location, so allocate a free location and write it there. - FlatFilePos pos1{blockman.SaveBlockToDisk(block1, /*nHeight=*/1, /*dbp=*/nullptr)}; + // Write the first block to a new location. + FlatFilePos pos1{blockman.SaveBlockToDisk(block1, /*nHeight=*/1)}; // Write second block - FlatFilePos pos2{blockman.SaveBlockToDisk(block2, /*nHeight=*/2, /*dbp=*/nullptr)}; + FlatFilePos pos2{blockman.SaveBlockToDisk(block2, /*nHeight=*/2)}; // Two blocks in the file BOOST_CHECK_EQUAL(blockman.CalculateCurrentUsage(), (TEST_BLOCK_SIZE + BLOCK_SERIALIZATION_HEADER_SIZE) * 2); @@ -181,22 +180,19 @@ BOOST_AUTO_TEST_CASE(blockmanager_flush_block_file) BOOST_CHECK_EQUAL(read_block.nVersion, 2); } - // When FlatFilePos* dbp is given, SaveBlockToDisk() will not write or - // overwrite anything to the flat file block storage. It will, however, - // update the blockfile metadata. This is to facilitate reindexing - // when the user has the blocks on disk but the metadata is being rebuilt. + // During reindex, the flat file block storage will not be written to. + // UpdateBlockInfo will, however, update the blockfile metadata. // Verify this behavior by attempting (and failing) to write block 3 data // to block 2 location. CBlockFileInfo* block_data = blockman.GetBlockFileInfo(0); BOOST_CHECK_EQUAL(block_data->nBlocks, 2); - BOOST_CHECK(blockman.SaveBlockToDisk(block3, /*nHeight=*/3, /*dbp=*/&pos2) == pos2); + blockman.UpdateBlockInfo(block3, /*nHeight=*/3, /*pos=*/pos2); // Metadata is updated... BOOST_CHECK_EQUAL(block_data->nBlocks, 3); // ...but there are still only two blocks in the file BOOST_CHECK_EQUAL(blockman.CalculateCurrentUsage(), (TEST_BLOCK_SIZE + BLOCK_SERIALIZATION_HEADER_SIZE) * 2); // Block 2 was not overwritten: - // SaveBlockToDisk() did not call WriteBlockToDisk() because `FlatFilePos* dbp` was non-null blockman.ReadBlockFromDisk(read_block, pos2); BOOST_CHECK_EQUAL(read_block.nVersion, 2); } diff --git a/src/validation.cpp b/src/validation.cpp index 903f9caf13..2bdffdb1b0 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -4342,10 +4342,16 @@ bool ChainstateManager::AcceptBlock(const std::shared_ptr& pblock, // Write block to history file if (fNewBlock) *fNewBlock = true; try { - FlatFilePos blockPos{m_blockman.SaveBlockToDisk(block, pindex->nHeight, dbp)}; - if (blockPos.IsNull()) { - state.Error(strprintf("%s: Failed to find position to write new block to disk", __func__)); - return false; + FlatFilePos blockPos{}; + if (dbp) { + blockPos = *dbp; + m_blockman.UpdateBlockInfo(block, pindex->nHeight, blockPos); + } else { + blockPos = m_blockman.SaveBlockToDisk(block, pindex->nHeight); + if (blockPos.IsNull()) { + state.Error(strprintf("%s: Failed to find position to write new block to disk", __func__)); + return false; + } } ReceivedBlockTransactions(block, pindex, blockPos); } catch (const std::runtime_error& e) { @@ -4845,7 +4851,7 @@ bool Chainstate::LoadGenesisBlock() try { const CBlock& block = params.GenesisBlock(); - FlatFilePos blockPos{m_blockman.SaveBlockToDisk(block, 0, nullptr)}; + FlatFilePos blockPos{m_blockman.SaveBlockToDisk(block, 0)}; if (blockPos.IsNull()) { LogError("%s: writing genesis block to disk failed\n", __func__); return false; From 17103637c6fa2dfcf5374ebb0cd715e540dd4ce1 Mon Sep 17 00:00:00 2001 From: Martin Zumsande Date: Fri, 10 May 2024 15:08:55 -0400 Subject: [PATCH 45/50] blockstorage: Rename FindBlockPos and have it return a FlatFilePos The new name reflects that it is no longer called with existing blocks for which the position is already known. Returning a FlatFilePos directly simplifies the interface. --- src/node/blockstorage.cpp | 15 ++++++++------- src/node/blockstorage.h | 4 ++-- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/src/node/blockstorage.cpp b/src/node/blockstorage.cpp index 9d8c4701ef..aab6a9e7fb 100644 --- a/src/node/blockstorage.cpp +++ b/src/node/blockstorage.cpp @@ -848,7 +848,7 @@ fs::path BlockManager::GetBlockPosFilename(const FlatFilePos& pos) const return BlockFileSeq().FileName(pos); } -bool BlockManager::FindBlockPos(FlatFilePos& pos, unsigned int nAddSize, unsigned int nHeight, uint64_t nTime) +FlatFilePos BlockManager::FindNextBlockPos(unsigned int nAddSize, unsigned int nHeight, uint64_t nTime) { LOCK(cs_LastBlockFile); @@ -897,6 +897,7 @@ bool BlockManager::FindBlockPos(FlatFilePos& pos, unsigned int nAddSize, unsigne m_blockfile_info.resize(nFile + 1); } } + FlatFilePos pos; pos.nFile = nFile; pos.nPos = m_blockfile_info[nFile].nSize; @@ -927,14 +928,14 @@ bool BlockManager::FindBlockPos(FlatFilePos& pos, unsigned int nAddSize, unsigne size_t bytes_allocated = BlockFileSeq().Allocate(pos, nAddSize, out_of_space); if (out_of_space) { m_opts.notifications.fatalError(_("Disk space is too low!")); - return false; + return {}; } if (bytes_allocated != 0 && IsPruneMode()) { m_check_for_pruning = true; } m_dirty_fileinfo.insert(nFile); - return true; + return pos; } void BlockManager::UpdateBlockInfo(const CBlock& block, unsigned int nHeight, const FlatFilePos& pos) @@ -1031,7 +1032,7 @@ bool BlockManager::WriteUndoDataForBlock(const CBlockUndo& blockundo, BlockValid // we want to flush the rev (undo) file once we've written the last block, which is indicated by the last height // in the block file info as below; note that this does not catch the case where the undo writes are keeping up // with the block writes (usually when a synced up node is getting newly mined blocks) -- this case is caught in - // the FindBlockPos function + // the FindNextBlockPos function if (_pos.nFile < cursor.file_num && static_cast(block.nHeight) == m_blockfile_info[_pos.nFile].nHeightLast) { // Do not propagate the return code, a failed flush here should not // be an indication for a failed write. If it were propagated here, @@ -1150,12 +1151,12 @@ bool BlockManager::ReadRawBlockFromDisk(std::vector& block, const FlatF FlatFilePos BlockManager::SaveBlockToDisk(const CBlock& block, int nHeight) { unsigned int nBlockSize = ::GetSerializeSize(TX_WITH_WITNESS(block)); - FlatFilePos blockPos; // Account for the 4 magic message start bytes + the 4 length bytes (8 bytes total, // defined as BLOCK_SERIALIZATION_HEADER_SIZE) nBlockSize += static_cast(BLOCK_SERIALIZATION_HEADER_SIZE); - if (!FindBlockPos(blockPos, nBlockSize, nHeight, block.GetBlockTime())) { - LogError("%s: FindBlockPos failed\n", __func__); + FlatFilePos blockPos{FindNextBlockPos(nBlockSize, nHeight, block.GetBlockTime())}; + if (blockPos.IsNull()) { + LogError("%s: FindNextBlockPos failed\n", __func__); return FlatFilePos(); } if (!WriteBlockToDisk(block, blockPos)) { diff --git a/src/node/blockstorage.h b/src/node/blockstorage.h index ae47478779..eb2ed14747 100644 --- a/src/node/blockstorage.h +++ b/src/node/blockstorage.h @@ -164,7 +164,7 @@ class BlockManager * The nAddSize argument passed to this function should include not just the size of the serialized CBlock, but also the size of * separator fields which are written before it by WriteBlockToDisk (BLOCK_SERIALIZATION_HEADER_SIZE). */ - [[nodiscard]] bool FindBlockPos(FlatFilePos& pos, unsigned int nAddSize, unsigned int nHeight, uint64_t nTime); + [[nodiscard]] FlatFilePos FindNextBlockPos(unsigned int nAddSize, unsigned int nHeight, uint64_t nTime); [[nodiscard]] bool FlushChainstateBlockFile(int tip_height); bool FindUndoPos(BlockValidationState& state, int nFile, FlatFilePos& pos, unsigned int nAddSize); @@ -221,7 +221,7 @@ class BlockManager //! effectively. //! //! This data structure maintains separate blockfile number cursors for each - //! BlockfileType. The ASSUMED state is initialized, when necessary, in FindBlockPos(). + //! BlockfileType. The ASSUMED state is initialized, when necessary, in FindNextBlockPos(). //! //! The first element is the NORMAL cursor, second is ASSUMED. std::array, BlockfileType::NUM_TYPES> From e41667b720372dae8438ea86e9819027e62b54e0 Mon Sep 17 00:00:00 2001 From: Ryan Ofsky Date: Thu, 9 May 2024 18:34:14 -0400 Subject: [PATCH 46/50] blockstorage: Don't move cursor backwards in UpdateBlockInfo Previously, it was possible to move the cursor back to an older file during reindex if blocks are enocuntered out of order during reindex. This would mean that MaxBlockfileNum() would be incorrect, and a wrong DB_LAST_BLOCK could be written to disk. This improves the logic by only ever moving the cursor forward (if possible) but not backwards. Co-authored-by: Martin Zumsande --- src/node/blockstorage.cpp | 21 ++++++++------------- 1 file changed, 8 insertions(+), 13 deletions(-) diff --git a/src/node/blockstorage.cpp b/src/node/blockstorage.cpp index aab6a9e7fb..2c0cbe7163 100644 --- a/src/node/blockstorage.cpp +++ b/src/node/blockstorage.cpp @@ -942,24 +942,19 @@ void BlockManager::UpdateBlockInfo(const CBlock& block, unsigned int nHeight, co { LOCK(cs_LastBlockFile); + // Update the cursor so it points to the last file. + const BlockfileType chain_type{BlockfileTypeForHeight(nHeight)}; + auto& cursor{m_blockfile_cursors[chain_type]}; + if (!cursor || cursor->file_num < pos.nFile) { + m_blockfile_cursors[chain_type] = BlockfileCursor{pos.nFile}; + } + + // Update the file information with the current block. const unsigned int added_size = ::GetSerializeSize(TX_WITH_WITNESS(block)); - const BlockfileType chain_type = BlockfileTypeForHeight(nHeight); - // Check that chain type is NORMAL, because this function is only - // called during reindexing, and reindexing deletes snapshot chainstates, so - // chain_type will not be SNAPSHOT. Also check that cursor exists, because - // the normal cursor should never be null. - Assume(chain_type == BlockfileType::NORMAL); - Assume(m_blockfile_cursors[chain_type]); const int nFile = pos.nFile; if (static_cast(m_blockfile_info.size()) <= nFile) { m_blockfile_info.resize(nFile + 1); } - - const int last_blockfile = m_blockfile_cursors[chain_type]->file_num; - if (nFile != last_blockfile) { - // No undo data yet in the new file, so reset our undo-height tracking. - m_blockfile_cursors[chain_type] = BlockfileCursor{nFile}; - } m_blockfile_info[nFile].AddBlock(nHeight, block.GetBlockTime()); m_blockfile_info[nFile].nSize = std::max(pos.nPos + added_size, m_blockfile_info[nFile].nSize); m_dirty_fileinfo.insert(nFile); From fa6c82dd9b0ef9687c28ddd6b57065d0ba7de85b Mon Sep 17 00:00:00 2001 From: MarcoFalke <*~=`'#}+{/-|&$^_@721217.xyz> Date: Wed, 8 May 2024 11:25:55 +0200 Subject: [PATCH 47/50] ci: Remove clang version pin in test-each-commit --- .github/workflows/ci.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 44521c1af3..d4fd477063 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -62,12 +62,12 @@ jobs: echo "TEST_BASE=$(git rev-list -n$((${{ env.MAX_COUNT }} + 1)) --reverse HEAD ^$(git rev-list -n1 --merges HEAD)^@ | head -1)" >> "$GITHUB_ENV" - run: | sudo apt-get update - sudo apt-get install clang-15 ccache build-essential libtool autotools-dev automake pkg-config bsdmainutils python3-zmq libevent-dev libboost-dev libsqlite3-dev libdb++-dev systemtap-sdt-dev libminiupnpc-dev libnatpmp-dev qtbase5-dev qttools5-dev qttools5-dev-tools qtwayland5 libqrencode-dev -y + sudo apt-get install clang ccache build-essential libtool autotools-dev automake pkg-config bsdmainutils python3-zmq libevent-dev libboost-dev libsqlite3-dev libdb++-dev systemtap-sdt-dev libminiupnpc-dev libnatpmp-dev qtbase5-dev qttools5-dev qttools5-dev-tools qtwayland5 libqrencode-dev -y - name: Compile and run tests run: | # Run tests on commits after the last merge commit and before the PR head commit # Use clang++, because it is a bit faster and uses less memory than g++ - git rebase --exec "echo Running test-one-commit on \$( git log -1 ) && ./autogen.sh && CC=clang-15 CXX=clang++-15 ./configure && make clean && make -j $(nproc) check && ./test/functional/test_runner.py -j $(( $(nproc) * 2 ))" ${{ env.TEST_BASE }} + git rebase --exec "echo Running test-one-commit on \$( git log -1 ) && ./autogen.sh && CC=clang CXX=clang++ ./configure && make clean && make -j $(nproc) check && ./test/functional/test_runner.py -j $(( $(nproc) * 2 ))" ${{ env.TEST_BASE }} macos-native-x86_64: name: 'macOS 13 native, x86_64, no depends, sqlite only, gui' From fa90ad23c0cb99bde305af156c978c066f7bacb8 Mon Sep 17 00:00:00 2001 From: MarcoFalke <*~=`'#}+{/-|&$^_@721217.xyz> Date: Wed, 8 May 2024 11:24:55 +0200 Subject: [PATCH 48/50] ci: Roll test-each-commit Ubuntu --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d4fd477063..a5c322fc23 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -25,7 +25,7 @@ env: jobs: test-each-commit: name: 'test each commit' - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 if: github.event_name == 'pull_request' && github.event.pull_request.commits != 1 timeout-minutes: 360 # Use maximum time, see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idtimeout-minutes. Assuming a worst case time of 1 hour per commit, this leads to a --max-count=6 below. env: From 141df0a28810470e53fdbc6d32d3cb4020fe3ca1 Mon Sep 17 00:00:00 2001 From: Cory Fields Date: Mon, 13 May 2024 18:13:07 +0000 Subject: [PATCH 49/50] crypto: disable asan for sha256_sse4 with clang and -O0 Clang is unable to compile the Transform function for that combination of options. --- src/crypto/sha256_sse4.cpp | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/crypto/sha256_sse4.cpp b/src/crypto/sha256_sse4.cpp index f4557291ce..f0e255a23c 100644 --- a/src/crypto/sha256_sse4.cpp +++ b/src/crypto/sha256_sse4.cpp @@ -13,6 +13,13 @@ namespace sha256_sse4 { void Transform(uint32_t* s, const unsigned char* chunk, size_t blocks) +#if defined(__clang__) && !defined(__OPTIMIZE__) + /* + clang is unable to compile this with -O0 and -fsanitize=address. + See upstream bug: https://github.com/llvm/llvm-project/issues/92182 + */ + __attribute__((no_sanitize("address"))) +#endif { static const uint32_t K256 alignas(16) [] = { 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, From b47bd959207e82555f07e028cc2246943d32d4c3 Mon Sep 17 00:00:00 2001 From: TheCharlatan Date: Fri, 5 Apr 2024 12:44:24 +0200 Subject: [PATCH 50/50] kernel: De-globalize fReindex fReindex is one of the last remaining globals exposed by the kernel library, so move it into the blockstorage class to reduce the amount of global mutable state and make the kernel library a bit less awkward to use. --- src/bitcoin-chainstate.cpp | 2 +- src/init.cpp | 16 +++++++--------- src/kernel/blockmanager_opts.h | 1 + src/node/blockmanager_args.cpp | 2 ++ src/node/blockstorage.cpp | 7 +++---- src/node/blockstorage.h | 14 ++++++++++---- src/node/chainstate.cpp | 6 +++--- src/test/util/setup_common.cpp | 2 +- src/validation.cpp | 25 ++++++++++++------------- 9 files changed, 40 insertions(+), 35 deletions(-) diff --git a/src/bitcoin-chainstate.cpp b/src/bitcoin-chainstate.cpp index 4927634233..4d2a6f0c2a 100644 --- a/src/bitcoin-chainstate.cpp +++ b/src/bitcoin-chainstate.cpp @@ -151,7 +151,7 @@ int main(int argc, char* argv[]) { LOCK(chainman.GetMutex()); std::cout - << "\t" << "Reindexing: " << std::boolalpha << node::fReindex.load() << std::noboolalpha << std::endl + << "\t" << "Reindexing: " << std::boolalpha << chainman.m_blockman.m_reindexing.load() << std::noboolalpha << std::endl << "\t" << "Snapshot Active: " << std::boolalpha << chainman.IsSnapshotActive() << std::noboolalpha << std::endl << "\t" << "Active Height: " << chainman.ActiveHeight() << std::endl << "\t" << "Active IBD: " << std::boolalpha << chainman.IsInitialBlockDownload() << std::noboolalpha << std::endl; diff --git a/src/init.cpp b/src/init.cpp index 51ade4de93..0aac2ac65f 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -126,7 +126,6 @@ using node::CalculateCacheSizes; using node::DEFAULT_PERSIST_MEMPOOL; using node::DEFAULT_PRINTPRIORITY; using node::DEFAULT_STOPATHEIGHT; -using node::fReindex; using node::KernelNotifications; using node::LoadChainstate; using node::MempoolPath; @@ -1483,7 +1482,6 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) node.notifications = std::make_unique(*Assert(node.shutdown), node.exit_status); ReadNotificationArgs(args, *node.notifications); - fReindex = args.GetBoolArg("-reindex", false); bool fReindexChainState = args.GetBoolArg("-reindex-chainstate", false); ChainstateManager::Options chainman_opts{ .chainparams = chainparams, @@ -1560,7 +1558,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) node::ChainstateLoadOptions options; options.mempool = Assert(node.mempool.get()); - options.reindex = node::fReindex; + options.reindex = chainman.m_blockman.m_reindexing; options.reindex_chainstate = fReindexChainState; options.prune = chainman.m_blockman.IsPruneMode(); options.check_blocks = args.GetIntArg("-checkblocks", DEFAULT_CHECKBLOCKS); @@ -1608,7 +1606,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) error.original + ".\nPlease restart with -reindex or -reindex-chainstate to recover.", "", CClientUIInterface::MSG_ERROR | CClientUIInterface::BTN_ABORT); if (fRet) { - fReindex = true; + chainman.m_blockman.m_reindexing = true; if (!Assert(node.shutdown)->reset()) { LogPrintf("Internal error: failed to reset shutdown signal.\n"); } @@ -1641,17 +1639,17 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) // ********************************************************* Step 8: start indexers if (args.GetBoolArg("-txindex", DEFAULT_TXINDEX)) { - g_txindex = std::make_unique(interfaces::MakeChain(node), cache_sizes.tx_index, false, fReindex); + g_txindex = std::make_unique(interfaces::MakeChain(node), cache_sizes.tx_index, false, chainman.m_blockman.m_reindexing); node.indexes.emplace_back(g_txindex.get()); } for (const auto& filter_type : g_enabled_filter_types) { - InitBlockFilterIndex([&]{ return interfaces::MakeChain(node); }, filter_type, cache_sizes.filter_index, false, fReindex); + InitBlockFilterIndex([&]{ return interfaces::MakeChain(node); }, filter_type, cache_sizes.filter_index, false, chainman.m_blockman.m_reindexing); node.indexes.emplace_back(GetBlockFilterIndex(filter_type)); } if (args.GetBoolArg("-coinstatsindex", DEFAULT_COINSTATSINDEX)) { - g_coin_stats_index = std::make_unique(interfaces::MakeChain(node), /*cache_size=*/0, false, fReindex); + g_coin_stats_index = std::make_unique(interfaces::MakeChain(node), /*cache_size=*/0, false, chainman.m_blockman.m_reindexing); node.indexes.emplace_back(g_coin_stats_index.get()); } @@ -1670,7 +1668,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) // if pruning, perform the initial blockstore prune // after any wallet rescanning has taken place. if (chainman.m_blockman.IsPruneMode()) { - if (!fReindex) { + if (!chainman.m_blockman.m_reindexing) { LOCK(cs_main); for (Chainstate* chainstate : chainman.GetAll()) { uiInterface.InitMessage(_("Pruning blockstoreā€¦").translated); @@ -1696,7 +1694,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) int chain_active_height = WITH_LOCK(cs_main, return chainman.ActiveChain().Height()); // On first startup, warn on low block storage space - if (!fReindex && !fReindexChainState && chain_active_height <= 1) { + if (!chainman.m_blockman.m_reindexing && !fReindexChainState && chain_active_height <= 1) { uint64_t assumed_chain_bytes{chainparams.AssumedBlockchainSize() * 1024 * 1024 * 1024}; uint64_t additional_bytes_needed{ chainman.m_blockman.IsPruneMode() ? diff --git a/src/kernel/blockmanager_opts.h b/src/kernel/blockmanager_opts.h index deeba7e318..16072b669b 100644 --- a/src/kernel/blockmanager_opts.h +++ b/src/kernel/blockmanager_opts.h @@ -24,6 +24,7 @@ struct BlockManagerOpts { bool fast_prune{false}; const fs::path blocks_dir; Notifications& notifications; + bool reindex{false}; }; } // namespace kernel diff --git a/src/node/blockmanager_args.cpp b/src/node/blockmanager_args.cpp index fa76566652..dd8419a68a 100644 --- a/src/node/blockmanager_args.cpp +++ b/src/node/blockmanager_args.cpp @@ -33,6 +33,8 @@ util::Result ApplyArgsManOptions(const ArgsManager& args, BlockManager::Op if (auto value{args.GetBoolArg("-fastprune")}) opts.fast_prune = *value; + if (auto value{args.GetBoolArg("-reindex")}) opts.reindex = *value; + return {}; } } // namespace node diff --git a/src/node/blockstorage.cpp b/src/node/blockstorage.cpp index 76837f2a27..7e82e4aa95 100644 --- a/src/node/blockstorage.cpp +++ b/src/node/blockstorage.cpp @@ -150,7 +150,6 @@ bool BlockTreeDB::LoadBlockIndexGuts(const Consensus::Params& consensusParams, s } // namespace kernel namespace node { -std::atomic_bool fReindex(false); bool CBlockIndexWorkComparator::operator()(const CBlockIndex* pa, const CBlockIndex* pb) const { @@ -552,7 +551,7 @@ bool BlockManager::LoadBlockIndexDB(const std::optional& snapshot_block // Check whether we need to continue reindexing bool fReindexing = false; m_block_tree_db->ReadReindexing(fReindexing); - if (fReindexing) fReindex = true; + if (fReindexing) m_reindexing = true; return true; } @@ -1178,7 +1177,7 @@ void ImportBlocks(ChainstateManager& chainman, std::vector vImportFile ImportingNow imp{chainman.m_blockman.m_importing}; // -reindex - if (fReindex) { + if (chainman.m_blockman.m_reindexing) { int nFile = 0; // Map of disk positions for blocks with unknown parent (only used for reindex); // parent hash -> child disk position, multiple children can have the same parent. @@ -1201,7 +1200,7 @@ void ImportBlocks(ChainstateManager& chainman, std::vector vImportFile nFile++; } WITH_LOCK(::cs_main, chainman.m_blockman.m_block_tree_db->WriteReindexing(false)); - fReindex = false; + chainman.m_blockman.m_reindexing = false; LogPrintf("Reindexing finished\n"); // To avoid ending up in a situation without genesis block, re-try initializing (no-op if reindexing worked): chainman.ActiveChainstate().LoadGenesisBlock(); diff --git a/src/node/blockstorage.h b/src/node/blockstorage.h index ce514cc645..1867c70415 100644 --- a/src/node/blockstorage.h +++ b/src/node/blockstorage.h @@ -76,8 +76,6 @@ static const unsigned int MAX_BLOCKFILE_SIZE = 0x8000000; // 128 MiB /** Size of header written by WriteBlockToDisk before a serialized CBlock */ static constexpr size_t BLOCK_SERIALIZATION_HEADER_SIZE = std::tuple_size_v + sizeof(unsigned int); -extern std::atomic_bool fReindex; - // Because validation code takes pointers to the map's CBlockIndex objects, if // we ever switch to another associative container, we need to either use a // container that has stable addressing (true of all std associative @@ -254,11 +252,19 @@ class BlockManager explicit BlockManager(const util::SignalInterrupt& interrupt, Options opts) : m_prune_mode{opts.prune_target > 0}, m_opts{std::move(opts)}, - m_interrupt{interrupt} {}; + m_interrupt{interrupt}, + m_reindexing{m_opts.reindex} {}; const util::SignalInterrupt& m_interrupt; std::atomic m_importing{false}; + /** + * Tracks if a reindex is currently in progress. Set to true when a reindex + * is requested and false when reindexing completes. Its value is persisted + * in the BlockTreeDB across restarts. + */ + std::atomic_bool m_reindexing; + BlockMap m_block_index GUARDED_BY(cs_main); /** @@ -322,7 +328,7 @@ class BlockManager [[nodiscard]] uint64_t GetPruneTarget() const { return m_opts.prune_target; } static constexpr auto PRUNE_TARGET_MANUAL{std::numeric_limits::max()}; - [[nodiscard]] bool LoadingBlocks() const { return m_importing || fReindex; } + [[nodiscard]] bool LoadingBlocks() const { return m_importing || m_reindexing; } /** Calculate the amount of disk space the block & undo files currently use */ uint64_t CalculateCurrentUsage(); diff --git a/src/node/chainstate.cpp b/src/node/chainstate.cpp index bf1fc06b0b..d6eb14f513 100644 --- a/src/node/chainstate.cpp +++ b/src/node/chainstate.cpp @@ -60,8 +60,8 @@ static ChainstateLoadResult CompleteChainstateInitialization( // LoadBlockIndex will load m_have_pruned if we've ever removed a // block file from disk. - // Note that it also sets fReindex global based on the disk flag! - // From here on, fReindex and options.reindex values may be different! + // Note that it also sets m_reindexing based on the disk flag! + // From here on, m_reindexing and options.reindex values may be different! if (!chainman.LoadBlockIndex()) { if (chainman.m_interrupt) return {ChainstateLoadStatus::INTERRUPTED, {}}; return {ChainstateLoadStatus::FAILURE, _("Error loading block database")}; @@ -84,7 +84,7 @@ static ChainstateLoadResult CompleteChainstateInitialization( // If we're not mid-reindex (based on disk + args), add a genesis block on disk // (otherwise we use the one already on disk). // This is called again in ImportBlocks after the reindex completes. - if (!fReindex && !chainman.ActiveChainstate().LoadGenesisBlock()) { + if (!chainman.m_blockman.m_reindexing && !chainman.ActiveChainstate().LoadGenesisBlock()) { return {ChainstateLoadStatus::FAILURE, _("Error initializing block database")}; } diff --git a/src/test/util/setup_common.cpp b/src/test/util/setup_common.cpp index e9566cf50b..fd07931716 100644 --- a/src/test/util/setup_common.cpp +++ b/src/test/util/setup_common.cpp @@ -276,7 +276,7 @@ void ChainTestingSetup::LoadVerifyActivateChainstate() options.mempool = Assert(m_node.mempool.get()); options.block_tree_db_in_memory = m_block_tree_db_in_memory; options.coins_db_in_memory = m_coins_db_in_memory; - options.reindex = node::fReindex; + options.reindex = chainman.m_blockman.m_reindexing; options.reindex_chainstate = m_args.GetBoolArg("-reindex-chainstate", false); options.prune = chainman.m_blockman.IsPruneMode(); options.check_blocks = m_args.GetIntArg("-checkblocks", DEFAULT_CHECKBLOCKS); diff --git a/src/validation.cpp b/src/validation.cpp index 90f5897b5f..4dc865ae1a 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -83,7 +83,6 @@ using node::BlockManager; using node::BlockMap; using node::CBlockIndexHeightOnlyComparator; using node::CBlockIndexWorkComparator; -using node::fReindex; using node::SnapshotMetadata; /** Time to wait between writing blocks/block index to disk. */ @@ -2642,7 +2641,7 @@ bool Chainstate::FlushStateToDisk( CoinsCacheSizeState cache_state = GetCoinsCacheSizeState(); LOCK(m_blockman.cs_LastBlockFile); - if (m_blockman.IsPruneMode() && (m_blockman.m_check_for_pruning || nManualPruneHeight > 0) && !fReindex) { + if (m_blockman.IsPruneMode() && (m_blockman.m_check_for_pruning || nManualPruneHeight > 0) && !m_chainman.m_blockman.m_reindexing) { // make sure we don't prune above any of the prune locks bestblocks // pruning is height-based int last_prune{m_chain.Height()}; // last height we can prune @@ -3255,10 +3254,10 @@ bool Chainstate::ActivateBestChainStep(BlockValidationState& state, CBlockIndex* return true; } -static SynchronizationState GetSynchronizationState(bool init) +static SynchronizationState GetSynchronizationState(bool init, bool reindexing) { if (!init) return SynchronizationState::POST_INIT; - if (::fReindex) return SynchronizationState::INIT_REINDEX; + if (reindexing) return SynchronizationState::INIT_REINDEX; return SynchronizationState::INIT_DOWNLOAD; } @@ -3280,7 +3279,7 @@ static bool NotifyHeaderTip(ChainstateManager& chainman) LOCKS_EXCLUDED(cs_main) } // Send block tip changed notifications without cs_main if (fNotify) { - chainman.GetNotifications().headerTip(GetSynchronizationState(fInitialBlockDownload), pindexHeader->nHeight, pindexHeader->nTime, false); + chainman.GetNotifications().headerTip(GetSynchronizationState(fInitialBlockDownload, chainman.m_blockman.m_reindexing), pindexHeader->nHeight, pindexHeader->nTime, false); } return fNotify; } @@ -3399,7 +3398,7 @@ bool Chainstate::ActivateBestChain(BlockValidationState& state, std::shared_ptr< } // Always notify the UI if a new block tip was connected - if (kernel::IsInterrupted(m_chainman.GetNotifications().blockTip(GetSynchronizationState(still_in_ibd), *pindexNewTip))) { + if (kernel::IsInterrupted(m_chainman.GetNotifications().blockTip(GetSynchronizationState(still_in_ibd, m_chainman.m_blockman.m_reindexing), *pindexNewTip))) { // Just breaking and returning success for now. This could // be changed to bubble up the kernel::Interrupted value to // the caller so the caller could distinguish between @@ -3625,7 +3624,7 @@ bool Chainstate::InvalidateBlock(BlockValidationState& state, CBlockIndex* pinde // parameter indicating the source of the tip change so hooks can // distinguish user-initiated invalidateblock changes from other // changes. - (void)m_chainman.GetNotifications().blockTip(GetSynchronizationState(m_chainman.IsInitialBlockDownload()), *to_mark_failed->pprev); + (void)m_chainman.GetNotifications().blockTip(GetSynchronizationState(m_chainman.IsInitialBlockDownload(), m_chainman.m_blockman.m_reindexing), *to_mark_failed->pprev); } return true; } @@ -4264,7 +4263,7 @@ void ChainstateManager::ReportHeadersPresync(const arith_uint256& work, int64_t m_last_presync_update = now; } bool initial_download = IsInitialBlockDownload(); - GetNotifications().headerTip(GetSynchronizationState(initial_download), height, timestamp, /*presync=*/true); + GetNotifications().headerTip(GetSynchronizationState(initial_download, m_blockman.m_reindexing), height, timestamp, /*presync=*/true); if (initial_download) { int64_t blocks_left{(NodeClock::now() - NodeSeconds{std::chrono::seconds{timestamp}}) / GetConsensus().PowTargetSpacing()}; blocks_left = std::max(0, blocks_left); @@ -4785,8 +4784,8 @@ bool ChainstateManager::LoadBlockIndex() { AssertLockHeld(cs_main); // Load block index from databases - bool needs_init = fReindex; - if (!fReindex) { + bool needs_init = m_blockman.m_reindexing; + if (!m_blockman.m_reindexing) { bool ret{m_blockman.LoadBlockIndexDB(SnapshotBlockhash())}; if (!ret) return false; @@ -4823,8 +4822,8 @@ bool ChainstateManager::LoadBlockIndex() if (needs_init) { // Everything here is for *new* reindex/DBs. Thus, though - // LoadBlockIndexDB may have set fReindex if we shut down - // mid-reindex previously, we don't check fReindex and + // LoadBlockIndexDB may have set m_reindexing if we shut down + // mid-reindex previously, we don't check m_reindexing and // instead only check it prior to LoadBlockIndexDB to set // needs_init. @@ -4969,7 +4968,7 @@ void ChainstateManager::LoadExternalBlockFile( } } - if (m_blockman.IsPruneMode() && !fReindex && pblock) { + if (m_blockman.IsPruneMode() && !m_blockman.m_reindexing && pblock) { // must update the tip for pruning to work while importing with -loadblock. // this is a tradeoff to conserve disk space at the expense of time // spent updating the tip to be able to prune.