pytest: don't assume disconnect finished atomically, and suppress interfering redirects.

In various places, we assumed that when `connected` is false,
everything is finished.  This is not true: we should wait for the
state we expect.

In addition, various places allows reconnections, which interfered
with the logic; suppress them.

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
This commit is contained in:
Rusty Russell
2022-07-18 21:42:28 +09:30
committed by neil saitug
parent aec307f7ba
commit 2962b93199
5 changed files with 61 additions and 30 deletions

View File

@@ -173,8 +173,10 @@ def test_closing_id(node_factory):
l1.fundchannel(l2, 10**6) l1.fundchannel(l2, 10**6)
cid = l2.rpc.listpeers()['peers'][0]['channels'][0]['channel_id'] cid = l2.rpc.listpeers()['peers'][0]['channels'][0]['channel_id']
l2.rpc.close(cid) l2.rpc.close(cid)
wait_for(lambda: not only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['connected']) # Technically, l2 disconnects before l1 finishes analyzing the final msg.
wait_for(lambda: not only_one(l2.rpc.listpeers(l1.info['id'])['peers'])['connected']) # Wait for them to both consider it closed!
wait_for(lambda: any([c['state'] == 'CLOSINGD_COMPLETE' for c in only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['channels']]))
wait_for(lambda: any([c['state'] == 'CLOSINGD_COMPLETE' for c in only_one(l2.rpc.listpeers(l1.info['id'])['peers'])['channels']]))
# Close by peer ID. # Close by peer ID.
l2.rpc.connect(l1.info['id'], 'localhost', l1.port) l2.rpc.connect(l1.info['id'], 'localhost', l1.port)
@@ -182,8 +184,8 @@ def test_closing_id(node_factory):
l2.fundchannel(l1, 10**6) l2.fundchannel(l1, 10**6)
pid = l1.info['id'] pid = l1.info['id']
l2.rpc.close(pid) l2.rpc.close(pid)
wait_for(lambda: not only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['connected']) wait_for(lambda: any([c['state'] == 'CLOSINGD_COMPLETE' for c in only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['channels']]))
wait_for(lambda: not only_one(l2.rpc.listpeers(l1.info['id'])['peers'])['connected']) wait_for(lambda: any([c['state'] == 'CLOSINGD_COMPLETE' for c in only_one(l2.rpc.listpeers(l1.info['id'])['peers'])['channels']]))
@unittest.skipIf(TEST_NETWORK != 'regtest', 'FIXME: broken under elements') @unittest.skipIf(TEST_NETWORK != 'regtest', 'FIXME: broken under elements')
@@ -776,7 +778,8 @@ def test_channel_lease_post_expiry(node_factory, bitcoind, chainparams):
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py') coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
opts = {'funder-policy': 'match', 'funder-policy-mod': 100, opts = {'funder-policy': 'match', 'funder-policy-mod': 100,
'lease-fee-base-sat': '100sat', 'lease-fee-basis': 100, 'lease-fee-base-sat': '100sat', 'lease-fee-basis': 100,
'may_reconnect': True, 'plugin': coin_mvt_plugin} 'may_reconnect': True, 'plugin': coin_mvt_plugin,
'dev-no-reconnect': None}
l1, l2, = node_factory.get_nodes(2, opts=opts) l1, l2, = node_factory.get_nodes(2, opts=opts)
@@ -1056,17 +1059,18 @@ def test_channel_lease_lessor_cheat(node_factory, bitcoind, chainparams):
@unittest.skipIf(TEST_NETWORK != 'regtest', 'elementsd doesnt yet support PSBT features we need') @unittest.skipIf(TEST_NETWORK != 'regtest', 'elementsd doesnt yet support PSBT features we need')
@pytest.mark.openchannel('v2') @pytest.mark.openchannel('v2')
@unittest.skipIf(os.getenv('TEST_DB_PROVIDER', 'sqlite3') != 'sqlite3', "Makes use of the sqlite3 db") @unittest.skipIf(os.getenv('TEST_DB_PROVIDER', 'sqlite3') != 'sqlite3', "Makes use of the sqlite3 db")
@pytest.mark.developer("requres 'dev-queryrates'") @pytest.mark.developer("requres 'dev-queryrates', dev-no-reconnect")
def test_channel_lease_lessee_cheat(node_factory, bitcoind, chainparams): def test_channel_lease_lessee_cheat(node_factory, bitcoind, chainparams):
''' '''
Check that lessor can recover funds if lessee cheats Check that lessor can recover funds if lessee cheats
''' '''
opts = [{'funder-policy': 'match', 'funder-policy-mod': 100, opts = [{'funder-policy': 'match', 'funder-policy-mod': 100,
'lease-fee-base-sat': '100sat', 'lease-fee-basis': 100, 'lease-fee-base-sat': '100sat', 'lease-fee-basis': 100,
'may_reconnect': True, 'allow_broken_log': True}, 'may_reconnect': True, 'dev-no-reconnect': None,
'allow_broken_log': True},
{'funder-policy': 'match', 'funder-policy-mod': 100, {'funder-policy': 'match', 'funder-policy-mod': 100,
'lease-fee-base-sat': '100sat', 'lease-fee-basis': 100, 'lease-fee-base-sat': '100sat', 'lease-fee-basis': 100,
'may_reconnect': True}] 'may_reconnect': True, 'dev-no-reconnect': None}]
l1, l2, = node_factory.get_nodes(2, opts=opts) l1, l2, = node_factory.get_nodes(2, opts=opts)
amount = 500000 amount = 500000
feerate = 2000 feerate = 2000
@@ -1098,7 +1102,7 @@ def test_channel_lease_lessee_cheat(node_factory, bitcoind, chainparams):
l1_db_path_bak = os.path.join(l1.daemon.lightning_dir, chainparams['name'], 'lightningd.sqlite3.bak') l1_db_path_bak = os.path.join(l1.daemon.lightning_dir, chainparams['name'], 'lightningd.sqlite3.bak')
copyfile(l1_db_path, l1_db_path_bak) copyfile(l1_db_path, l1_db_path_bak)
l1.start() l1.start()
l1.rpc.connect(l1.info['id'], 'localhost', l1.port) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
sync_blockheight(bitcoind, [l1]) sync_blockheight(bitcoind, [l1])
# push some money from l2->l1, so the commit counter advances # push some money from l2->l1, so the commit counter advances
@@ -2669,8 +2673,8 @@ def test_onchain_different_fees(node_factory, bitcoind, executor):
# Now, 100 blocks it should be done. # Now, 100 blocks it should be done.
bitcoind.generate_block(100) bitcoind.generate_block(100)
wait_for(lambda: l1.rpc.listpeers()['peers'] == []) wait_for(lambda: only_one(l1.rpc.listpeers()['peers'])['channels'] == [])
wait_for(lambda: l2.rpc.listpeers()['peers'] == []) wait_for(lambda: only_one(l2.rpc.listpeers()['peers'])['channels'] == [])
@pytest.mark.developer("needs DEVELOPER=1") @pytest.mark.developer("needs DEVELOPER=1")
@@ -3443,9 +3447,10 @@ def test_you_forgot_closed_channel(node_factory, executor):
wait_for(lambda: only_one(only_one(l2.rpc.listpeers()['peers'])['channels'])['state'] == 'CLOSINGD_COMPLETE') wait_for(lambda: only_one(only_one(l2.rpc.listpeers()['peers'])['channels'])['state'] == 'CLOSINGD_COMPLETE')
assert only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['state'] == 'CLOSINGD_SIGEXCHANGE' assert only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['state'] == 'CLOSINGD_SIGEXCHANGE'
# l2 closes on us.
wait_for(lambda: only_one(l1.rpc.listpeers()['peers'])['connected'] is False)
# l1 reconnects, it should succeed. # l1 reconnects, it should succeed.
if only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['connected']:
l1.rpc.disconnect(l2.info['id'], force=True)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
fut.result(TIMEOUT) fut.result(TIMEOUT)

View File

@@ -284,7 +284,7 @@ def test_bad_opening(node_factory):
l2.daemon.wait_for_log('to_self_delay 100 larger than 99') l2.daemon.wait_for_log('to_self_delay 100 larger than 99')
@pytest.mark.developer("gossip without DEVELOPER=1 is slow") @pytest.mark.developer("gossip without DEVELOPER=1 is slow, need dev-no-reconnect")
@unittest.skipIf(TEST_NETWORK != 'regtest', "Fee computation and limits are network specific") @unittest.skipIf(TEST_NETWORK != 'regtest', "Fee computation and limits are network specific")
@pytest.mark.slow_test @pytest.mark.slow_test
@pytest.mark.openchannel('v1') @pytest.mark.openchannel('v1')
@@ -319,10 +319,10 @@ def test_opening_tiny_channel(node_factory):
l3_min_capacity = 10000 # the current default l3_min_capacity = 10000 # the current default
l4_min_capacity = 20000 # a server with more than default minimum l4_min_capacity = 20000 # a server with more than default minimum
opts = [{'min-capacity-sat': 0}, opts = [{'min-capacity-sat': 0, 'dev-no-reconnect': None},
{'min-capacity-sat': l2_min_capacity}, {'min-capacity-sat': l2_min_capacity, 'dev-no-reconnect': None},
{'min-capacity-sat': l3_min_capacity}, {'min-capacity-sat': l3_min_capacity, 'dev-no-reconnect': None},
{'min-capacity-sat': l4_min_capacity}] {'min-capacity-sat': l4_min_capacity, 'dev-no-reconnect': None}]
l1, l2, l3, l4 = node_factory.get_nodes(4, opts=opts) l1, l2, l3, l4 = node_factory.get_nodes(4, opts=opts)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.rpc.connect(l3.info['id'], 'localhost', l3.port) l1.rpc.connect(l3.info['id'], 'localhost', l3.port)
@@ -330,16 +330,19 @@ def test_opening_tiny_channel(node_factory):
with pytest.raises(RpcError, match=r'They sent [error|warning].*channel capacity is .*, which is below .*sat'): with pytest.raises(RpcError, match=r'They sent [error|warning].*channel capacity is .*, which is below .*sat'):
l1.fundchannel(l2, l2_min_capacity + overhead - 1) l1.fundchannel(l2, l2_min_capacity + overhead - 1)
wait_for(lambda: l1.rpc.listpeers(l2.info['id'])['peers'] == [])
l1.rpc.connect(l2.info['id'], 'localhost', l2.port) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fundchannel(l2, l2_min_capacity + overhead) l1.fundchannel(l2, l2_min_capacity + overhead)
with pytest.raises(RpcError, match=r'They sent [error|warning].*channel capacity is .*, which is below .*sat'): with pytest.raises(RpcError, match=r'They sent [error|warning].*channel capacity is .*, which is below .*sat'):
l1.fundchannel(l3, l3_min_capacity + overhead - 1) l1.fundchannel(l3, l3_min_capacity + overhead - 1)
wait_for(lambda: l1.rpc.listpeers(l3.info['id'])['peers'] == [])
l1.rpc.connect(l3.info['id'], 'localhost', l3.port) l1.rpc.connect(l3.info['id'], 'localhost', l3.port)
l1.fundchannel(l3, l3_min_capacity + overhead) l1.fundchannel(l3, l3_min_capacity + overhead)
with pytest.raises(RpcError, match=r'They sent [error|warning].*channel capacity is .*, which is below .*sat'): with pytest.raises(RpcError, match=r'They sent [error|warning].*channel capacity is .*, which is below .*sat'):
l1.fundchannel(l4, l4_min_capacity + overhead - 1) l1.fundchannel(l4, l4_min_capacity + overhead - 1)
wait_for(lambda: l1.rpc.listpeers(l4.info['id'])['peers'] == [])
l1.rpc.connect(l4.info['id'], 'localhost', l4.port) l1.rpc.connect(l4.info['id'], 'localhost', l4.port)
l1.fundchannel(l4, l4_min_capacity + overhead) l1.fundchannel(l4, l4_min_capacity + overhead)
@@ -348,6 +351,7 @@ def test_opening_tiny_channel(node_factory):
l3.rpc.connect(l2.info['id'], 'localhost', l2.port) l3.rpc.connect(l2.info['id'], 'localhost', l2.port)
with pytest.raises(RpcError, match=r"channel capacity is .*, which is below .*sat"): with pytest.raises(RpcError, match=r"channel capacity is .*, which is below .*sat"):
l3.fundchannel(l2, l3_min_capacity + overhead - 1) l3.fundchannel(l2, l3_min_capacity + overhead - 1)
wait_for(lambda: l3.rpc.listpeers(l2.info['id'])['peers'] == [])
l3.rpc.connect(l2.info['id'], 'localhost', l2.port) l3.rpc.connect(l2.info['id'], 'localhost', l2.port)
l3.fundchannel(l2, l3_min_capacity + overhead) l3.fundchannel(l2, l3_min_capacity + overhead)
@@ -1213,8 +1217,8 @@ def test_funding_by_utxos(node_factory, bitcoind):
@pytest.mark.developer("needs dev_forget_channel") @pytest.mark.developer("needs dev_forget_channel")
@pytest.mark.openchannel('v1') @pytest.mark.openchannel('v1')
def test_funding_external_wallet_corners(node_factory, bitcoind): def test_funding_external_wallet_corners(node_factory, bitcoind):
l1 = node_factory.get_node(may_reconnect=True) l1, l2 = node_factory.get_nodes(2, opts={'may_reconnect': True,
l2 = node_factory.get_node(may_reconnect=True) 'dev-no-reconnect': None})
amount = 2**24 amount = 2**24
l1.fundwallet(amount + 10000000) l1.fundwallet(amount + 10000000)
@@ -1265,6 +1269,7 @@ def test_funding_external_wallet_corners(node_factory, bitcoind):
l1.rpc.fundchannel_cancel(l2.info['id']) l1.rpc.fundchannel_cancel(l2.info['id'])
# Cancelling causes disconnection. # Cancelling causes disconnection.
wait_for(lambda: l1.rpc.listpeers(l2.info['id'])['peers'] == [])
l1.rpc.connect(l2.info['id'], 'localhost', l2.port) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
amount2 = 1000000 amount2 = 1000000
funding_addr = l1.rpc.fundchannel_start(l2.info['id'], amount2)['funding_address'] funding_addr = l1.rpc.fundchannel_start(l2.info['id'], amount2)['funding_address']
@@ -1286,6 +1291,7 @@ def test_funding_external_wallet_corners(node_factory, bitcoind):
# But must unreserve inputs manually. # But must unreserve inputs manually.
l1.rpc.txdiscard(prep['txid']) l1.rpc.txdiscard(prep['txid'])
wait_for(lambda: l1.rpc.listpeers(l2.info['id'])['peers'] == [])
l1.rpc.connect(l2.info['id'], 'localhost', l2.port) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
funding_addr = l1.rpc.fundchannel_start(l2.info['id'], amount)['funding_address'] funding_addr = l1.rpc.fundchannel_start(l2.info['id'], amount)['funding_address']
prep = l1.rpc.txprepare([{funding_addr: amount}]) prep = l1.rpc.txprepare([{funding_addr: amount}])
@@ -1307,6 +1313,7 @@ def test_funding_external_wallet_corners(node_factory, bitcoind):
== 'CHANNELD_AWAITING_LOCKIN') == 'CHANNELD_AWAITING_LOCKIN')
# on reconnect, channel should get destroyed # on reconnect, channel should get destroyed
wait_for(lambda: l1.rpc.listpeers(l2.info['id'])['peers'] == [])
l1.rpc.connect(l2.info['id'], 'localhost', l2.port) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.daemon.wait_for_log('Unknown channel .* for WIRE_CHANNEL_REESTABLISH') l1.daemon.wait_for_log('Unknown channel .* for WIRE_CHANNEL_REESTABLISH')
wait_for(lambda: len(l1.rpc.listpeers()['peers']) == 0) wait_for(lambda: len(l1.rpc.listpeers()['peers']) == 0)
@@ -1316,6 +1323,7 @@ def test_funding_external_wallet_corners(node_factory, bitcoind):
l1.rpc.txdiscard(prep['txid']) l1.rpc.txdiscard(prep['txid'])
# we have to connect again, because we got disconnected when everything errored # we have to connect again, because we got disconnected when everything errored
wait_for(lambda: l1.rpc.listpeers(l2.info['id'])['peers'] == [])
l1.rpc.connect(l2.info['id'], 'localhost', l2.port) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
funding_addr = l1.rpc.fundchannel_start(l2.info['id'], amount)['funding_address'] funding_addr = l1.rpc.fundchannel_start(l2.info['id'], amount)['funding_address']
prep = l1.rpc.txprepare([{funding_addr: amount}]) prep = l1.rpc.txprepare([{funding_addr: amount}])
@@ -1891,9 +1899,10 @@ def test_multifunding_disconnect(node_factory):
disconnects = ["-WIRE_INIT", disconnects = ["-WIRE_INIT",
"-WIRE_ACCEPT_CHANNEL", "-WIRE_ACCEPT_CHANNEL",
"+WIRE_ACCEPT_CHANNEL"] "+WIRE_ACCEPT_CHANNEL"]
l1 = node_factory.get_node() l1, l2, l3 = node_factory.get_nodes(3, opts=[{'dev-no-reconnect': None},
l2 = node_factory.get_node(disconnect=disconnects) {'dev-no-reconnect': None,
l3 = node_factory.get_node() 'disconnect': disconnects},
{'dev-no-reconnect': None}])
l1.fundwallet(2000000) l1.fundwallet(2000000)
@@ -1907,6 +1916,7 @@ def test_multifunding_disconnect(node_factory):
for d in disconnects: for d in disconnects:
with pytest.raises(RpcError): with pytest.raises(RpcError):
l1.rpc.multifundchannel(destinations) l1.rpc.multifundchannel(destinations)
wait_for(lambda: l1.rpc.listpeers(l2.info['id'])['peers'] == [])
# TODO: failing at the fundchannel_complete phase # TODO: failing at the fundchannel_complete phase
# (-WIRE_FUNDING_SIGNED +-WIRE_FUNDING_SIGNED) # (-WIRE_FUNDING_SIGNED +-WIRE_FUNDING_SIGNED)
@@ -1946,6 +1956,9 @@ def test_multifunding_wumbo(node_factory):
with pytest.raises(RpcError, match='Amount exceeded'): with pytest.raises(RpcError, match='Amount exceeded'):
l1.rpc.multifundchannel(destinations) l1.rpc.multifundchannel(destinations)
# Make sure it's disconnected from l2 before retrying.
wait_for(lambda: l1.rpc.listpeers(l2.info['id'])['peers'] == [])
# This should succeed. # This should succeed.
destinations = [{"id": '{}@localhost:{}'.format(l2.info['id'], l2.port), destinations = [{"id": '{}@localhost:{}'.format(l2.info['id'], l2.port),
"amount": 1 << 24}, "amount": 1 << 24},
@@ -2538,6 +2551,7 @@ def test_multiple_channels(node_factory):
l2.daemon.wait_for_log( l2.daemon.wait_for_log(
r'State changed from CLOSINGD_SIGEXCHANGE to CLOSINGD_COMPLETE' r'State changed from CLOSINGD_SIGEXCHANGE to CLOSINGD_COMPLETE'
) )
wait_for(lambda: only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['connected'] is False)
channels = only_one(l1.rpc.listpeers()['peers'])['channels'] channels = only_one(l1.rpc.listpeers()['peers'])['channels']
assert len(channels) == 3 assert len(channels) == 3
@@ -2567,7 +2581,7 @@ def test_forget_channel(node_factory):
# Forcing should work # Forcing should work
l1.rpc.dev_forget_channel(l2.info['id'], True) l1.rpc.dev_forget_channel(l2.info['id'], True)
assert len(l1.rpc.listpeers()['peers']) == 0 wait_for(lambda: l1.rpc.listpeers()['peers'] == [])
# And restarting should keep that peer forgotten # And restarting should keep that peer forgotten
l1.restart() l1.restart()
@@ -2715,7 +2729,7 @@ def test_fundee_forget_funding_tx_unconfirmed(node_factory, bitcoind):
l2.daemon.wait_for_log(r'Forgetting channel: It has been {}\d blocks'.format(str(blocks)[:-1])) l2.daemon.wait_for_log(r'Forgetting channel: It has been {}\d blocks'.format(str(blocks)[:-1]))
# fundee will also forget and disconnect from peer. # fundee will also forget and disconnect from peer.
assert len(l2.rpc.listpeers(l1.info['id'])['peers']) == 0 wait_for(lambda: l2.rpc.listpeers(l1.info['id'])['peers'] == [])
@pytest.mark.developer("needs --dev-max-funding-unconfirmed-blocks") @pytest.mark.developer("needs --dev-max-funding-unconfirmed-blocks")

View File

@@ -265,7 +265,7 @@ def test_announce_dns_without_port(node_factory, bitcoind):
def test_gossip_timestamp_filter(node_factory, bitcoind, chainparams): def test_gossip_timestamp_filter(node_factory, bitcoind, chainparams):
# Updates get backdated 5 seconds with --dev-fast-gossip. # Updates get backdated 5 seconds with --dev-fast-gossip.
backdate = 5 backdate = 5
l1, l2, l3, l4 = node_factory.line_graph(4, fundchannel=False) l1, l2, l3, l4 = node_factory.line_graph(4, fundchannel=False, opts={'log-level': 'io'})
genesis_blockhash = chainparams['chain_hash'] genesis_blockhash = chainparams['chain_hash']
before_anything = int(time.time()) before_anything = int(time.time())

View File

@@ -1286,9 +1286,12 @@ def test_bitcoind_goes_backwards(node_factory, bitcoind):
@pytest.mark.openchannel('v1') @pytest.mark.openchannel('v1')
@pytest.mark.openchannel('v2') @pytest.mark.openchannel('v2')
@pytest.mark.developer("needs dev-no-reconnect")
def test_reserve_enforcement(node_factory, executor): def test_reserve_enforcement(node_factory, executor):
"""Channeld should disallow you spending into your reserve""" """Channeld should disallow you spending into your reserve"""
l1, l2 = node_factory.line_graph(2, opts={'may_reconnect': True, 'allow_warning': True}) l1, l2 = node_factory.line_graph(2, opts={'may_reconnect': True,
'dev-no-reconnect': None,
'allow_warning': True})
# Pay 1000 satoshi to l2. # Pay 1000 satoshi to l2.
l1.pay(l2, 1000000) l1.pay(l2, 1000000)
@@ -1302,7 +1305,7 @@ def test_reserve_enforcement(node_factory, executor):
l2.db.execute('UPDATE channel_configs SET channel_reserve_satoshis=0') l2.db.execute('UPDATE channel_configs SET channel_reserve_satoshis=0')
l2.start() l2.start()
wait_for(lambda: only_one(l2.rpc.listpeers(l1.info['id'])['peers'])['connected']) l2.rpc.connect(l1.info['id'], 'localhost', l1.port)
# This should be impossible to pay entire thing back: l1 should warn and # This should be impossible to pay entire thing back: l1 should warn and
# close connection for trying to violate reserve. # close connection for trying to violate reserve.

View File

@@ -21,7 +21,7 @@ def find_next_feerate(node, peer):
@pytest.mark.openchannel('v2') @pytest.mark.openchannel('v2')
@pytest.mark.developer("requres 'dev-queryrates'") @pytest.mark.developer("requres 'dev-queryrates'")
def test_queryrates(node_factory, bitcoind): def test_queryrates(node_factory, bitcoind):
l1, l2 = node_factory.get_nodes(2) l1, l2 = node_factory.get_nodes(2, opts={'dev-no-reconnect': None})
amount = 10 ** 6 amount = 10 ** 6
@@ -42,6 +42,7 @@ def test_queryrates(node_factory, bitcoind):
'channel_fee_max_base_msat': '3sat', 'channel_fee_max_base_msat': '3sat',
'channel_fee_max_proportional_thousandths': 101}) 'channel_fee_max_proportional_thousandths': 101})
wait_for(lambda: l1.rpc.listpeers()['peers'] == [])
l1.rpc.connect(l2.info['id'], 'localhost', l2.port) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
result = l1.rpc.dev_queryrates(l2.info['id'], amount, amount) result = l1.rpc.dev_queryrates(l2.info['id'], amount, amount)
assert result['our_funding_msat'] == Millisatoshi(amount * 1000) assert result['our_funding_msat'] == Millisatoshi(amount * 1000)
@@ -128,6 +129,10 @@ def test_multifunding_v2_best_effort(node_factory, bitcoind):
for node in node_list: for node in node_list:
node.daemon.wait_for_log(r'to CLOSINGD_COMPLETE') node.daemon.wait_for_log(r'to CLOSINGD_COMPLETE')
# Make sure disconnections are complete
if not failed_sign:
wait_for(lambda: all([c['connected'] is False for c in l1.rpc.listpeers()['peers']]))
# With 2 down, it will fail to fund channel # With 2 down, it will fail to fund channel
l2.stop() l2.stop()
l3.stop() l3.stop()
@@ -416,10 +421,12 @@ def test_v2_rbf_liquidity_ad(node_factory, bitcoind, chainparams):
@unittest.skipIf(TEST_NETWORK != 'regtest', 'elementsd doesnt yet support PSBT features we need') @unittest.skipIf(TEST_NETWORK != 'regtest', 'elementsd doesnt yet support PSBT features we need')
@pytest.mark.developer("uses dev-no-reconnect")
@pytest.mark.openchannel('v2') @pytest.mark.openchannel('v2')
def test_v2_rbf_multi(node_factory, bitcoind, chainparams): def test_v2_rbf_multi(node_factory, bitcoind, chainparams):
l1, l2 = node_factory.get_nodes(2, l1, l2 = node_factory.get_nodes(2,
opts={'may_reconnect': True, opts={'may_reconnect': True,
'dev-no-reconnect': None,
'allow_warning': True}) 'allow_warning': True})
l1.rpc.connect(l2.info['id'], 'localhost', l2.port) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
@@ -463,6 +470,7 @@ def test_v2_rbf_multi(node_factory, bitcoind, chainparams):
# Abort this open attempt! We will re-try # Abort this open attempt! We will re-try
aborted = l1.rpc.openchannel_abort(chan_id) aborted = l1.rpc.openchannel_abort(chan_id)
assert not aborted['channel_canceled'] assert not aborted['channel_canceled']
wait_for(lambda: only_one(l1.rpc.listpeers()['peers'])['connected'] is False)
# Do the bump, again, same feerate # Do the bump, again, same feerate
l1.rpc.connect(l2.info['id'], 'localhost', l2.port) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
@@ -653,7 +661,7 @@ def test_rbf_reconnect_tx_construct(node_factory, bitcoind, chainparams):
l1.rpc.connect(l2.info['id'], 'localhost', l2.port) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
with pytest.raises(RpcError): with pytest.raises(RpcError):
l1.rpc.openchannel_bump(chan_id, chan_amount, initpsbt['psbt']) l1.rpc.openchannel_bump(chan_id, chan_amount, initpsbt['psbt'])
assert l1.rpc.getpeer(l2.info['id']) is not None wait_for(lambda: l1.rpc.getpeer(l2.info['id'])['connected'] is False)
# Now we finish off the completes failure check # Now we finish off the completes failure check
for d in disconnects[-2:]: for d in disconnects[-2:]:
@@ -661,6 +669,7 @@ def test_rbf_reconnect_tx_construct(node_factory, bitcoind, chainparams):
bump = l1.rpc.openchannel_bump(chan_id, chan_amount, initpsbt['psbt']) bump = l1.rpc.openchannel_bump(chan_id, chan_amount, initpsbt['psbt'])
with pytest.raises(RpcError): with pytest.raises(RpcError):
update = l1.rpc.openchannel_update(chan_id, bump['psbt']) update = l1.rpc.openchannel_update(chan_id, bump['psbt'])
wait_for(lambda: l1.rpc.getpeer(l2.info['id'])['connected'] is False)
# Now we succeed # Now we succeed
l1.rpc.connect(l2.info['id'], 'localhost', l2.port) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)