apple/swift 54873
The Swift Programming Language
apple/swift-evolution 11802
This maintains proposals for changes and user-visible enhancements to the Swift Programming Language.
apple/swift-package-manager 8136
The Package Manager for the Swift Programming Language
apple/swift-corelibs-foundation 4108
The Foundation Project, providing core utilities, internationalization, and OS independence
apple/swift-corelibs-libdispatch 1962
The libdispatch Project, (a.k.a. Grand Central Dispatch), for concurrency on multicore hardware
ElementsProject/lightning 1783
c-lightning — a Lightning Network implementation in C
apple/swift-corelibs-xctest 872
The XCTest Project, A Swift core library for providing unit test support
A low-level build system, used by Xcode and the Swift Package Manager
apple/swift-llvm 802
Pull request review commentbitcoin/bitcoin
addrman: Fix new table bucketing during unserialization
friend class CAddrManTest; nTried -= nLost; // Store positions in the new table buckets to apply later (if possible).- std::map<int, int> entryToBucket; // Represents which entry belonged to which bucket when serializing-- for (int bucket = 0; bucket < nUBuckets; bucket++) {- int nSize = 0;- s >> nSize;- for (int n = 0; n < nSize; n++) {- int nIndex = 0;- s >> nIndex;- if (nIndex >= 0 && nIndex < nNew) {- entryToBucket[nIndex] = bucket;+ // An entry may appear in up to ADDRMAN_NEW_BUCKETS_PER_ADDRESS buckets,+ // so we store all bucket-entry_index pairs to iterate through later.+ std::vector<std::pair<int, int>> bucket_entries;++ for (int bucket = 0; bucket < nUBuckets; ++bucket) {+ int num_entries{0};+ s >> num_entries;+ for (int n = 0; n < num_entries; ++n) {+ int entry_index{0};+ s >> entry_index;+ if (entry_index >= 0 && entry_index < nNew) {+ bucket_entries.emplace_back(bucket, entry_index); } } } - uint256 supplied_asmap_version;+ // If the bucket count and asmap checksum haven't changed, then attempt+ // to restore the entries to the buckets/positions they were in before+ // serialization.+ uint256 supplied_asmap_checksum; if (m_asmap.size() != 0) {- supplied_asmap_version = SerializeHash(m_asmap);+ supplied_asmap_checksum = SerializeHash(m_asmap); }- uint256 serialized_asmap_version;+ uint256 serialized_asmap_checksum; if (format >= Format::V2_ASMAP) {- s >> serialized_asmap_version;+ s >> serialized_asmap_checksum; }+ const bool restore_bucketing{nUBuckets == ADDRMAN_NEW_BUCKET_COUNT &&+ serialized_asmap_checksum == supplied_asmap_checksum}; - for (int n = 0; n < nNew; n++) {- CAddrInfo &info = mapInfo[n];- int bucket = entryToBucket[n];- int nUBucketPos = info.GetBucketPosition(nKey, true, bucket);- if (format >= Format::V2_ASMAP && nUBuckets == ADDRMAN_NEW_BUCKET_COUNT && vvNew[bucket][nUBucketPos] == -1 &&- info.nRefCount < ADDRMAN_NEW_BUCKETS_PER_ADDRESS && serialized_asmap_version == supplied_asmap_version) {+ for (auto bucket_entry : bucket_entries) {+ int bucket{bucket_entry.first};+ const int entry_index{bucket_entry.second};+ CAddrInfo& info = mapInfo[entry_index];++ // The entry shouldn't appear in more than+ // ADDRMAN_NEW_BUCKETS_PER_ADDRESS. If it has already, just skip+ // this bucket_entry.+ if (info.nRefCount >= ADDRMAN_NEW_BUCKETS_PER_ADDRESS) continue;++ int bucket_position = info.GetBucketPosition(nKey, true, bucket);+ if (restore_bucketing && vvNew[bucket][bucket_position] == -1) {
Under what conditions could it happen that restore_bucketing
is false
and vvNew[bucket][bucket_position] != -1
(occupied)?
Is it only if ADDRMAN_BUCKET_SIZE
has been changed? If that has happened then we shouldn't try to restore the bucketing (i.e. should set restore_bucketing
to false
), but unfortunately the old ADDRMAN_BUCKET_SIZE
is not saved on disk, so we can only indirectly observe that the file was written with a different ADDRMAN_BUCKET_SIZE
.
(the above is not a suggestion for a change, just a few questions to confirm my understanding is correct)
comment created time in 44 minutes
Pull request review commentbitcoin/bitcoin
addrman: Fix new table bucketing during unserialization
friend class CAddrManTest; nTried -= nLost; // Store positions in the new table buckets to apply later (if possible).- std::map<int, int> entryToBucket; // Represents which entry belonged to which bucket when serializing-- for (int bucket = 0; bucket < nUBuckets; bucket++) {- int nSize = 0;- s >> nSize;- for (int n = 0; n < nSize; n++) {- int nIndex = 0;- s >> nIndex;- if (nIndex >= 0 && nIndex < nNew) {- entryToBucket[nIndex] = bucket;+ // An entry may appear in up to ADDRMAN_NEW_BUCKETS_PER_ADDRESS buckets,+ // so we store all bucket-entry_index pairs to iterate through later.+ std::vector<std::pair<int, int>> bucket_entries;++ for (int bucket = 0; bucket < nUBuckets; ++bucket) {+ int num_entries{0};+ s >> num_entries;+ for (int n = 0; n < num_entries; ++n) {+ int entry_index{0};+ s >> entry_index;+ if (entry_index >= 0 && entry_index < nNew) {+ bucket_entries.emplace_back(bucket, entry_index); } } } - uint256 supplied_asmap_version;+ // If the bucket count and asmap checksum haven't changed, then attempt+ // to restore the entries to the buckets/positions they were in before+ // serialization.+ uint256 supplied_asmap_checksum; if (m_asmap.size() != 0) {- supplied_asmap_version = SerializeHash(m_asmap);+ supplied_asmap_checksum = SerializeHash(m_asmap); }- uint256 serialized_asmap_version;+ uint256 serialized_asmap_checksum; if (format >= Format::V2_ASMAP) {- s >> serialized_asmap_version;+ s >> serialized_asmap_checksum; }+ const bool restore_bucketing{nUBuckets == ADDRMAN_NEW_BUCKET_COUNT &&+ serialized_asmap_checksum == supplied_asmap_checksum}; - for (int n = 0; n < nNew; n++) {- CAddrInfo &info = mapInfo[n];- int bucket = entryToBucket[n];- int nUBucketPos = info.GetBucketPosition(nKey, true, bucket);- if (format >= Format::V2_ASMAP && nUBuckets == ADDRMAN_NEW_BUCKET_COUNT && vvNew[bucket][nUBucketPos] == -1 &&- info.nRefCount < ADDRMAN_NEW_BUCKETS_PER_ADDRESS && serialized_asmap_version == supplied_asmap_version) {+ for (auto bucket_entry : bucket_entries) {+ int bucket{bucket_entry.first};+ const int entry_index{bucket_entry.second};+ CAddrInfo& info = mapInfo[entry_index];++ // The entry shouldn't appear in more than+ // ADDRMAN_NEW_BUCKETS_PER_ADDRESS. If it has already, just skip+ // this bucket_entry.+ if (info.nRefCount >= ADDRMAN_NEW_BUCKETS_PER_ADDRESS) continue;++ int bucket_position = info.GetBucketPosition(nKey, true, bucket);+ if (restore_bucketing && vvNew[bucket][bucket_position] == -1) { // Bucketing has not changed, using existing bucket positions for the new table- vvNew[bucket][nUBucketPos] = n;- info.nRefCount++;+ vvNew[bucket][bucket_position] = entry_index;+ ++info.nRefCount; } else {- // In case the new table data cannot be used (format unknown, bucket count wrong or new asmap),+ // In case the new table data cannot be used (bucket count wrong or new asmap), // try to give them a reference based on their primary source address. LogPrint(BCLog::ADDRMAN, "Bucketing method was updated, re-bucketing addrman entries from disk\n");
If restore_bucketing
is false
, then this will be printed for every address, possibly many times. But it does not include the address itself. What about printing the address:
LogPrint(BCLog::ADDRMAN, "Bucketing method was updated, re-bucketing addrman entry from disk for %s\n", info.ToString());
or print the generic message once:
if (!printed) {
LogPrint(BCLog::ADDRMAN, "Bucketing method was updated, re-bucketing addrman entries from disk\n");
printed = true;
}
comment created time in 28 minutes
Pull request review commentbitcoin/bitcoin
addrman: Fix new table bucketing during unserialization
friend class CAddrManTest; * Notice that vvTried, mapAddr and vVector are never encoded explicitly; * they are instead reconstructed from the other information. *- * vvNew is serialized, but only used if ADDRMAN_UNKNOWN_BUCKET_COUNT didn't change,- * otherwise it is reconstructed as well.+ * vvNew is serialized, but only used if ADDRMAN_NEW_BUCKET_COUNT and the asmap checksum+ * didn't change, otherwise it is reconstructed as well.
vVector
does not exist. More importantly - none of the members is serialized explicitly/directly. So, I think the following changes would make a better description of the source code:
diff --git i/src/addrman.h w/src/addrman.h
index cde864f25..983a1f4fd 100644
--- i/src/addrman.h
+++ w/src/addrman.h
@@ -332,29 +332,26 @@ public:
* (format=5, lowest_compatible=5) and so any versions that do not know how to parse
* format=5 will not try to read the file.
* * nKey
* * nNew
* * nTried
* * number of "new" buckets XOR 2**30
- * * all nNew addrinfos in vvNew
- * * all nTried addrinfos in vvTried
- * * for each bucket:
+ * * all new addresses (total count: nNew)
+ * * all tried addresses (total count: nTried)
+ * * for each new bucket:
* * number of elements
- * * for each element: index
+ * * for each element: index in the serialized "all new addresses"
* * asmap checksum
*
* 2**30 is xorred with the number of buckets to make addrman deserializer v0 detect it
* as incompatible. This is necessary because it did not check the version number on
* deserialization.
*
- * Notice that vvTried, mapAddr and vVector are never encoded explicitly;
+ * Notice that vvNew, vvTried, mapAddr, mapInfo and vRandom are never encoded explicitly;
* they are instead reconstructed from the other information.
*
- * vvNew is serialized, but only used if ADDRMAN_NEW_BUCKET_COUNT and the asmap checksum
- * didn't change, otherwise it is reconstructed as well.
- *
* This format is more complex, but significantly smaller (at most 1.5 MiB), and supports
* changes to the ADDRMAN_ parameters without breaking the on-disk structure.
*
* We don't use SERIALIZE_METHODS since the serialization and deserialization code has
* very little in common.
*/
Why vvNew
is not serialized? Because vvNew[i][j]
contains the id
under which the element can be found in mapInfo
. We serialize a condensed version of vvNew
(without the -1
s), but instead of that id
we store an index in the serialized list of CAddrInfo
which exists only on disk.
comment created time in an hour
PR opened bitcoin/bitcoin
Closes #17862
Context from original comment (minor edits):
RewindBlockIndex()
is a mechanism to allow nodes to be upgraded after segwit activation, while still keeping their chainstate/datadir in a consistent state. It works as follows:
- A pre-segwit (i.e. v0.13.0 or older) node is running.
- Segwit activates. The pre-segwit node remains sync'ed to the tip, but is not enforcing the new segwit rules.
- The user upgrades the node to a segwit-aware version (v0.13.1 or newer).
- On startup, in
AppInitMain()
,RewindBlockIndex()
is called. This walks the chain backwards from the tip, disconnecting and erasing blocks that from after segwit activation that weren't validated with segwit rules. - those blocks are then redownloaded (with witness data) and validated with segwit rules.
This logic probably isn't required any more since:
- Segwit activated at height 481824, when the block chain was 130GB and the total number of txs was 250 million. Today, we're at height 661100, the blockchain is around 315GB and the total number of txs is around 600 million. Even if 20% of that added data is witness data (a high estimate), then around 150GB of transactions would need to be rewound to get back to segwit activation height. It'd probably be faster to simply validate from genesis, especially since we won't be validating any scripts before the assumevalid block. It's also unclear whether rewinding 150GB of transactions would even work. It's certainly never been tested.
- Bitcoin Core v0.13 is hardly used any more. https://luke.dashjr.org/programs/bitcoin/files/charts/software.html shows less than 50 nodes running it. The software was EOL on Aug 1st 2018. It's very unlikely that anyone is running 0.13 and will want to upgrade to 0.22.
Removing this dead code allows the following:
- removal of tests using
segwitheight=-1
in p2p_segwit.py. - in turn, that allows us to drop support for
-segwitheight=-1
, which is only supported for that test. that would allow us to always setNODE_WITNESS
in our local services. The only reason we don't do that is to support-segwitheight=-1
. - that in turn allows us to drop all of the
GetLocalServices() & NODE_WITNESS
checks insidenet_processing.cpp
, since our local services would always includeNODE_WITNESS
This PR introduces NeedsIBD()
which merely checks for insufficiently validated segwit blocks and requests that the user delete the contents of blocks dir and chainstate dir to perform IBD upon restart.
pr created time in 2 hours
issue commentbitcoin/bitcoin
Can I make readme.md and other introduction files in another language?
.... for translating bitcoin homepage. But we're just trying to translate readme.md on thie repo
comment created time in 3 hours
issue commentbitcoin/bitcoin
Can I make readme.md and other introduction files in another language?
Yep I guess this is the right team
comment created time in 3 hours
pull request commentbitcoin/bitcoin
<!--a722867cd34abeea1fadc8d60700f111-->
Gitian builds
File | commit d0852f39a7a3bfbb36437ef20bf94c263cad632a<br>(master) | commit 06ddcb1d3dc2627d5262ab22423d2e55e6cd6f95<br>(master and this pull) |
---|---|---|
bitcoin-core-linux-22-res.yml | 71a83cb9ff92de39... |
|
bitcoin-core-osx-22-res.yml | c24e9746c660fd94... |
|
bitcoin-core-win-22-res.yml | 23c52e66375b8552... |
|
*-aarch64-linux-gnu-debug.tar.gz | 8d57e596b614cac3... |
|
*-aarch64-linux-gnu.tar.gz | 59b3cbe158e31ed6... |
|
*-arm-linux-gnueabihf-debug.tar.gz | c740fceba55b6744... |
|
*-arm-linux-gnueabihf.tar.gz | d048a0e64edaa8bf... |
|
*-osx-unsigned.dmg | 6edea68f072311e4... |
|
*-osx64.tar.gz | d0e07a4123befe5b... |
|
*-riscv64-linux-gnu-debug.tar.gz | 943970e956f65e15... |
|
*-riscv64-linux-gnu.tar.gz | 15cae22d0f1b0cfb... |
|
*-win64-debug.zip | 52f70a8aa6154c7e... |
|
*-win64-setup-unsigned.exe | c25576ac4a20cf8a... |
|
*-win64.zip | 45b4446c73a75b64... |
|
*-x86_64-linux-gnu-debug.tar.gz | 2456b5b723e0dc71... |
|
*-x86_64-linux-gnu.tar.gz | d60a9f2be554d1cf... |
|
*.tar.gz | ef37b0e6911cbe01... |
|
linux-build.log | 5ed6be7e2d31d3b3... |
78f6c8f9e04aefaa... |
osx-build.log | e606ca23b7750ec0... |
5a281933faea8783... |
win-build.log | 63a601ac834b5ca7... |
16be22c88ea95e56... |
linux-build.log.diff | 3243d042b2935296... |
|
osx-build.log.diff | ac0522a525ab3188... |
|
win-build.log.diff | d4d0421cd1f55af3... |
comment created time in 3 hours
pull request commentbitcoin/bitcoin
test: add test for corrupt wallet bdb logs
<!--e57a25ab6845829454e8d69fc972939a-->
The following sections might be updated with supplementary metadata relevant to reviewers and maintainers.
<!--174a7506f384e20aa4161008e828411d-->
Conflicts
Reviewers, this pull request conflicts with the following ones:
- #20892 (tests: Run both descriptor and legacy tests within a single test invocation by achow101)
If you consider this pull request important, please also help to review the conflicting pull requests. Ideally, start with the one that should be merged first.
comment created time in 4 hours
pull request commentbitcoin/bitcoin
test: Move MakeNoLogFileContext to libtest_util, and use it in bench
<!--e57a25ab6845829454e8d69fc972939a-->
The following sections might be updated with supplementary metadata relevant to reviewers and maintainers.
<!--174a7506f384e20aa4161008e828411d-->
Conflicts
Reviewers, this pull request conflicts with the following ones:
- #20915 (fuzz: Fail if message type is not fuzzed by MarcoFalke)
- #20833 (rpc/validation: enable packages through testmempoolaccept by glozow)
- #20773 (refactor: split CWallet::Create by S3RK)
- #19806 (validation: UTXO snapshot activation by jamesob)
If you consider this pull request important, please also help to review the conflicting pull requests. Ideally, start with the one that should be merged first.
comment created time in 4 hours
pull request commentbitcoin/bitcoin
bitcoind: Add -daemonwait option to wait for initialization
<!--e57a25ab6845829454e8d69fc972939a-->
The following sections might be updated with supplementary metadata relevant to reviewers and maintainers.
<!--174a7506f384e20aa4161008e828411d-->
Conflicts
Reviewers, this pull request conflicts with the following ones:
- #19471 (util: Make default arg values more specific by hebasto)
- #19160 (multiprocess: Add basic spawn and IPC support by ryanofsky)
If you consider this pull request important, please also help to review the conflicting pull requests. Ideally, start with the one that should be merged first.
comment created time in 4 hours
PR opened bitcoin/bitcoin
Fixes #20934 by using the "sync up" method described in https://github.com/bitcoin/bitcoin/issues/20538#issuecomment-738791868.
After improving robustness with this approach (commits 1-3), it turned out that there were still some fails, but those were unrelated to zmq: Out of 500 runs, 3 times sync_mempool()
or sync_blocks()
timed out, which can happen because the trickle relay time has no upper bound -- hence in rare cases, it takes longer than 60s. This is fixed by enabling immediate tx relay on node1 (commit 4), which as a nice side-effect also gives us a rough 2x speedup for the test.
For further details, also see the explanations in the commit messages.
pr created time in 4 hours
pull request commentbitcoin/bitcoin
Disable and fix tests for when BDB is not compiled
Rebased. I think there are improvements that can be done in followup PRs. For example, #20892 implements running a test twice. But for now, I would like for this to be merged so that we can get more test coverage of descriptor wallets.
comment created time in 5 hours
Pull request review commentbitcoin/bitcoin
Disable and fix tests for when BDB is not compiled
def run_test(self): self.nodes[1].createwallet(wallet_name="w1") w1 = self.nodes[1].get_wallet_rpc("w1") # w2 contains the private keys for w3- self.nodes[1].createwallet(wallet_name="w2")+ self.nodes[1].createwallet(wallet_name="w2", blank=True) w2 = self.nodes[1].get_wallet_rpc("w2")+ xpriv = "tprv8ZgxMBicQKsPfHCsTwkiM1KT56RXbGGTqvc2hgqzycpwbHqqpcajQeMRZoBD35kW4RtyCemu6j34Ku5DEspmgjKdt2qe4SvRch5Kk8B8A2v"
Done
comment created time in 5 hours
Pull request review commentbitcoin/bitcoin
Disable and fix tests for when BDB is not compiled
def run_test(self): self.nodes[1].createwallet(wallet_name="w1") w1 = self.nodes[1].get_wallet_rpc("w1") # w2 contains the private keys for w3- self.nodes[1].createwallet(wallet_name="w2")+ self.nodes[1].createwallet(wallet_name="w2", blank=True) w2 = self.nodes[1].get_wallet_rpc("w2")+ xpriv = "tprv8ZgxMBicQKsPfHCsTwkiM1KT56RXbGGTqvc2hgqzycpwbHqqpcajQeMRZoBD35kW4RtyCemu6j34Ku5DEspmgjKdt2qe4SvRch5Kk8B8A2v"+ xpub = "tpubD6NzVbkrYhZ4YkEfMbRJkQyZe7wTkbTNRECozCtJPtdLRn6cT1QKb8yHjwAPcAr26eHBFYs5iLiFFnCbwPRsncCKUKCfubHDMGKzMVcN1Jg"+ if self.options.descriptors:+ w2.importdescriptors([{+ "desc": descsum_create("wpkh(" + xpriv + "/0/0/*)"),+ "timestamp": "now",+ "range": [0, 100],+ "active": True+ },{+ "desc": descsum_create("wpkh(" + xpriv + "/0/1/*)"),+ "timestamp": "now",+ "range": [0, 100],+ "active": True,+ "internal": True+ }])+ else:+ w2.sethdseed(True)+ # w3 is a watch-only wallet, based on w2 self.nodes[1].createwallet(wallet_name="w3", disable_private_keys=True) w3 = self.nodes[1].get_wallet_rpc("w3")- for _ in range(3):- a2_receive = w2.getnewaddress()- a2_change = w2.getrawchangeaddress() # doesn't actually use change derivation- res = w3.importmulti([{- "desc": w2.getaddressinfo(a2_receive)["desc"],+ if self.options.descriptors:+ # Match the privkeys in w2 for descriptors+ res = w3.importdescriptors([{+ "desc": descsum_create("wpkh(" + xpub + "/0/0/*)"), "timestamp": "now",+ "range": [0, 100], "keypool": True,+ "active": True, "watchonly": True },{- "desc": w2.getaddressinfo(a2_change)["desc"],+ "desc": descsum_create("wpkh(" + xpub + "/0/1/*)"), "timestamp": "now",+ "range": [0, 100], "keypool": True,+ "active": True, "internal": True, "watchonly": True }]) assert_equal(res, [{"success": True}, {"success": True}]) - w0.sendtoaddress(a2_receive, 10) # fund w3- self.nodes[0].generate(1)- self.sync_blocks()-- # w4 has private keys enabled, but only contains watch-only keys (from w2)- self.nodes[1].createwallet(wallet_name="w4", disable_private_keys=False)- w4 = self.nodes[1].get_wallet_rpc("w4") for _ in range(3): a2_receive = w2.getnewaddress()- res = w4.importmulti([{- "desc": w2.getaddressinfo(a2_receive)["desc"],- "timestamp": "now",- "keypool": False,- "watchonly": True- }])- assert_equal(res, [{"success": True}])+ if not self.options.descriptors:+ # Because legacy wallets use exclusively hardened derivation, we can't do a ranged import like we do for descriptors+ a2_change = w2.getrawchangeaddress() # doesn't actually use change derivation+ res = w3.importmulti([{+ "desc": w2.getaddressinfo(a2_receive)["desc"],+ "timestamp": "now",+ "keypool": True,+ "watchonly": True+ },{+ "desc": w2.getaddressinfo(a2_change)["desc"],+ "timestamp": "now",+ "keypool": True,+ "internal": True,+ "watchonly": True+ }])+ assert_equal(res, [{"success": True}, {"success": True}]) - w0.sendtoaddress(a2_receive, 10) # fund w4+ w0.sendtoaddress(a2_receive, 10) # fund w3 self.nodes[0].generate(1) self.sync_blocks() + if not self.options.descriptors:+ # w4 has private keys enabled, but only contains watch-only keys (from w2)+ # This is legacy wallet behavior only
Done
comment created time in 5 hours
Pull request review commentbitcoin/bitcoin
Disable and fix tests for when BDB is not compiled
def run_test(self): res = w2.walletprocesspsbt(res["psbt"]) assert res["complete"] - self.log.info("Create PSBT from wallet w4 with watch-only keys, sign with w2...")- self.test_send(from_wallet=w4, to_wallet=w1, amount=1, expect_error=(-4, "Insufficient funds"))- res = self.test_send(from_wallet=w4, to_wallet=w1, amount=1, include_watching=True, add_to_wallet=False)- res = w2.walletprocesspsbt(res["psbt"])- assert res["complete"]+ if not self.options.descriptors:+ # This tests legacy watch-only behavior only.
Added a comment.
comment created time in 5 hours
Pull request review commentbitcoin/bitcoin
Disable and fix tests for when BDB is not compiled
class MempoolCompatibilityTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 2 self.wallet_names = [None]+ self.requires_wallet = True
Perhaps, but that's a bit more complicated.
comment created time in 5 hours
Pull request review commentbitcoin/bitcoin
Disable and fix tests for when BDB is not compiled
def skip_test_if_missing_module(self): self.skip_if_no_bitcoind_zmq() def run_test(self):+ if self.is_wallet_compiled():+ self.nodes[0].createwallet(wallet_name=self.default_wallet_name, descriptors=self.options.descriptors, load_on_startup=True)+ self.nodes[0].importprivkey(privkey=self.nodes[0].get_deterministic_priv_key().key, label='coinbase')+ self.nodes[1].createwallet(wallet_name=self.default_wallet_name, descriptors=self.options.descriptors, load_on_startup=True)+ self.nodes[1].importprivkey(privkey=self.nodes[1].get_deterministic_priv_key().key, label='coinbase')
Yes. Done
comment created time in 5 hours
Pull request review commentbitcoin/bitcoin
Disable and fix tests for when BDB is not compiled
def set_test_params(self): self.supports_cli = False def run_test(self):+ self.wallet = MiniWallet(self.nodes[0])
Added a comment. It is only used to generate a transaction so that last_transaction
in getpeerinfo
can be checked.
comment created time in 5 hours
Pull request review commentbitcoin/bitcoin
Disable and fix tests for when BDB is not compiled
def run_test(self): self.log.info("Test -getinfo returns expected network and blockchain info") if self.is_wallet_compiled():+ self.nodes[0].createwallet(wallet_name=self.default_wallet_name, descriptors=self.options.descriptors)+ self.nodes[0].importprivkey(privkey=self.nodes[0].get_deterministic_priv_key().key, label='coinbase')
Done
comment created time in 5 hours
Pull request review commentbitcoin/bitcoin
Disable and fix tests for when BDB is not compiled
def setup_network(self): super().setup_network() def run_test(self):+ if self.is_wallet_compiled():+ # Setup the descriptors to be imported to the wallet
Done
comment created time in 5 hours
Pull request review commentbitcoin/bitcoin
Disable and fix tests for when BDB is not compiled
def import_deterministic_coinbase_privkeys(self): self.init_wallet(i) def init_wallet(self, i):+ if not self.requires_wallet:
Done.
comment created time in 5 hours
Pull request review commentbitcoin/bitcoin
Disable and fix tests for when BDB is not compiled
def parse_args(self): parser.add_argument('--timeout-factor', dest="timeout_factor", type=float, default=1.0, help='adjust test timeouts by a factor. Setting it to 0 disables all timeouts') group = parser.add_mutually_exclusive_group()- group.add_argument("--descriptors", default=False, action="store_true",+ group.add_argument("--descriptors", action='store_const', const=True, help="Run test using a descriptor wallet", dest='descriptors')- group.add_argument("--legacy-wallet", default=False, action="store_false",+ group.add_argument("--legacy-wallet", action='store_const', const=False, help="Run test using legacy wallets", dest='descriptors') self.add_options(parser) self.options = parser.parse_args() self.options.previous_releases_path = previous_releases_path + config = configparser.ConfigParser()+ config.read_file(open(self.options.configfile))+ self.config = config++ if self.options.descriptors is None:+ # Prefer BDB unless it isn't available+ if self.is_bdb_compiled():+ self.options.descriptors = False+ elif self.is_sqlite_compiled():+ self.options.descriptors = True+ # If neither are compiled, tests requiring a wallet will be skipped and the value of self.options.descriptors won't matter
Done
comment created time in 5 hours
Pull request review commentbitcoin/bitcoin
Disable and fix tests for when BDB is not compiled
def parse_args(self): parser.add_argument('--timeout-factor', dest="timeout_factor", type=float, default=1.0, help='adjust test timeouts by a factor. Setting it to 0 disables all timeouts') group = parser.add_mutually_exclusive_group()- group.add_argument("--descriptors", default=False, action="store_true",+ group.add_argument("--descriptors", action='store_const', const=True, help="Run test using a descriptor wallet", dest='descriptors')- group.add_argument("--legacy-wallet", default=False, action="store_false",+ group.add_argument("--legacy-wallet", action='store_const', const=False, help="Run test using legacy wallets", dest='descriptors') self.add_options(parser) self.options = parser.parse_args() self.options.previous_releases_path = previous_releases_path + config = configparser.ConfigParser()+ config.read_file(open(self.options.configfile))+ self.config = config
The is_*_compiled
checks require self.config to already exist, so it must come before them and can't be at the bottom of the function.
comment created time in 5 hours
Pull request review commentbitcoin/bitcoin
export LC_ALL=C.UTF-8 export CONTAINER_NAME=ci_win64-export DOCKER_NAME_TAG=ubuntu:18.04 # Check that bionic can cross-compile to win64 (bionic is used in the gitian build as well)+export DOCKER_NAME_TAG=ubuntu:20.04
I don't like this change, but without it the ci fails:
A newer version of mingw-w64 & headers is required to build this version of Qt, so we (essentially) have to use a newer version of Ubuntu in the CI & gitian (I'll add those changes, but will PR them separate in advance). This was also discussed above.
comment created time in 6 hours
pull request commentbitcoin/bitcoin
Implement Bech32m and use it for v1+ segwit addresses
<!--cf906140f33d8803c4a75a2196329ecb--> 🐙 This pull request conflicts with the target branch and needs rebase.
<sub>Want to unsubscribe from rebase notifications on this pull request? Just convert this pull request to a "draft".</sub>
comment created time in 6 hours
Pull request review commentbitcoin/bitcoin
policy, wallet, refactor: check for non-representable CFeeRates
BOOST_AUTO_TEST_CASE(GetFeeTest) feeRate = CFeeRate(1000); altFeeRate = CFeeRate(feeRate); BOOST_CHECK_EQUAL(feeRate.GetFee(100), altFeeRate.GetFee(100));+ BOOST_CHECK(!altFeeRate.IsZero()); } BOOST_AUTO_TEST_CASE(CFeeRateConstructorTest) { // Test CFeeRate(CAmount fee_rate, size_t bytes) constructor // full constructor+ BOOST_CHECK(CFeeRate(CAmount(0), 0).IsZero());
in b427ce178592283b9211b9fbd19874c7349bfd7e:
nit: I would have stuck these IsZero()
tests in one place below since they are testing the function and not the ctor.
comment created time in 6 hours
Pull request review commentbitcoin/bitcoin
policy, wallet, refactor: check for non-representable CFeeRates
BOOST_AUTO_TEST_CASE(GetFeeTest) CFeeRate(MAX_MONEY, std::numeric_limits<size_t>::max() >> 1).GetFeePerK(); } +BOOST_AUTO_TEST_CASE(CFeeRateNamedConstructorsTest)+{+ // Test CFeerate(CAmount fee_rate, FeeEstimatemode mode) constructor+ // with BTC/kvB, returns same values as CFeeRate(amount) or CFeeRate(amount, 1000)+ BOOST_CHECK(CFeeRate::FromBtcKb(CAmount(-1)) == CFeeRate(-1));+ BOOST_CHECK(CFeeRate::FromBtcKb(CAmount(-1)) == CFeeRate(-1, 1000));
in afba3e188fcb14236324a0d1355445a99b85d155:
Aren't we already testing somewhere that CFeeRate(-1) == CFeeRate(-1, 1000)
? Then I think we could save one of those and some other similar lines following.
comment created time in 6 hours
Pull request review commentbitcoin/bitcoin
policy, wallet, refactor: check for non-representable CFeeRates
class CFeeRate SERIALIZE_METHODS(CFeeRate, obj) { READWRITE(obj.nSatoshisPerK); } }; +/** Construct a CFeeRate from a CAmount in sat/vB */+inline CFeeRate CFeeRate::FromSatB(CAmount fee_rate) { return CFeeRate(fee_rate, COIN); }
in afba3e188fcb14236324a0d1355445a99b85d155:
Why not just have the body of the ctor in the class?
comment created time in 7 hours
Pull request review commentbitcoin/bitcoin
Disable and fix tests for when BDB is not compiled
def setup_network(self): super().setup_network() def run_test(self):+ if self.is_wallet_compiled():+ # Setup the descriptors to be imported to the wallet+ seed = "cTdGmKFWpbvpKQ7ejrdzqYT2hhjyb3GPHnLAK7wdi5Em67YLwSm9"+ xpriv = "tprv8ZgxMBicQKsPfHCsTwkiM1KT56RXbGGTqvc2hgqzycpwbHqqpcajQeMRZoBD35kW4RtyCemu6j34Ku5DEspmgjKdt2qe4SvRch5Kk8B8A2v"
Until we implement private descriptor export, that won't be possible.
comment created time in 6 hours
PR merged bitcoin/bitcoin
Closes: https://github.com/bitcoin/bitcoin/issues/17217
pr closed time in 6 hours