tests: use listpeerchannels.

This commit is contained in:
Vincenzo Palazzo
2023-01-12 11:55:55 +10:30
committed by Rusty Russell
parent 1fa32333b9
commit a2347c7452
11 changed files with 300 additions and 289 deletions

View File

@@ -703,7 +703,7 @@ def test_rebalance_tracking(node_factory, bitcoind):
wait_for(lambda: 'invoice' not in [ev['tag'] for ev in l1.rpc.bkpr_listincome()['income_events']]) wait_for(lambda: 'invoice' not in [ev['tag'] for ev in l1.rpc.bkpr_listincome()['income_events']])
inc_evs = l1.rpc.bkpr_listincome()['income_events'] inc_evs = l1.rpc.bkpr_listincome()['income_events']
outbound_chan_id = only_one(only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['channels'])['channel_id'] outbound_chan_id = only_one(l1.rpc.listpeerchannels(l2.info['id'])['channels'])['channel_id']
outbound_ev = only_one([ev for ev in inc_evs if ev['tag'] == 'rebalance_fee']) outbound_ev = only_one([ev for ev in inc_evs if ev['tag'] == 'rebalance_fee'])
assert outbound_ev['account'] == outbound_chan_id assert outbound_ev['account'] == outbound_chan_id

View File

@@ -32,16 +32,16 @@ def test_closing_simple(node_factory, bitcoind, chainparams):
assert bitcoind.rpc.getmempoolinfo()['size'] == 0 assert bitcoind.rpc.getmempoolinfo()['size'] == 0
billboard = only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status'] billboard = only_one(l1.rpc.listpeerchannels(l2.info['id'])['channels'])['status']
assert billboard == ['CHANNELD_NORMAL:Channel ready for use.'] assert billboard == ['CHANNELD_NORMAL:Channel ready for use.']
billboard = only_one(l2.rpc.listpeers(l1.info['id'])['peers'][0]['channels'])['status'] billboard = only_one(l2.rpc.listpeerchannels(l1.info['id'])['channels'])['status']
assert billboard == ['CHANNELD_NORMAL:Channel ready for use.'] assert billboard == ['CHANNELD_NORMAL:Channel ready for use.']
bitcoind.generate_block(5) bitcoind.generate_block(5)
wait_for(lambda: len(l1.getactivechannels()) == 2) wait_for(lambda: len(l1.getactivechannels()) == 2)
wait_for(lambda: len(l2.getactivechannels()) == 2) wait_for(lambda: len(l2.getactivechannels()) == 2)
billboard = only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status'] billboard = only_one(l1.rpc.listpeerchannels(l2.info['id'])['channels'])['status']
# This may either be from a local_update or an announce, so just # This may either be from a local_update or an announce, so just
# check for the substring # check for the substring
assert 'CHANNELD_NORMAL:Channel ready for use.' in billboard[0] assert 'CHANNELD_NORMAL:Channel ready for use.' in billboard[0]
@@ -67,7 +67,7 @@ def test_closing_simple(node_factory, bitcoind, chainparams):
# Now grab the close transaction # Now grab the close transaction
closetxid = only_one(bitcoind.rpc.getrawmempool(False)) closetxid = only_one(bitcoind.rpc.getrawmempool(False))
billboard = only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status'] billboard = only_one(l1.rpc.listpeerchannels(l2.info['id'])['channels'])['status']
assert billboard == [ assert billboard == [
'CLOSINGD_SIGEXCHANGE:We agreed on a closing fee of {} satoshi for tx:{}'.format(fee, closetxid), 'CLOSINGD_SIGEXCHANGE:We agreed on a closing fee of {} satoshi for tx:{}'.format(fee, closetxid),
] ]
@@ -80,14 +80,14 @@ def test_closing_simple(node_factory, bitcoind, chainparams):
assert closetxid in set([o['txid'] for o in l1.rpc.listfunds()['outputs']]) assert closetxid in set([o['txid'] for o in l1.rpc.listfunds()['outputs']])
assert closetxid in set([o['txid'] for o in l2.rpc.listfunds()['outputs']]) assert closetxid in set([o['txid'] for o in l2.rpc.listfunds()['outputs']])
wait_for(lambda: only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status'] == [ wait_for(lambda: only_one(l1.rpc.listpeerchannels(l2.info['id'])['channels'])['status'] == [
'CLOSINGD_SIGEXCHANGE:We agreed on a closing fee of {} satoshi for tx:{}'.format(fee, closetxid), 'CLOSINGD_SIGEXCHANGE:We agreed on a closing fee of {} satoshi for tx:{}'.format(fee, closetxid),
'ONCHAIN:Tracking mutual close transaction', 'ONCHAIN:Tracking mutual close transaction',
'ONCHAIN:All outputs resolved: waiting 99 more blocks before forgetting channel' 'ONCHAIN:All outputs resolved: waiting 99 more blocks before forgetting channel'
]) ])
bitcoind.generate_block(9) bitcoind.generate_block(9)
wait_for(lambda: only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status'] == [ wait_for(lambda: only_one(l1.rpc.listpeerchannels(l2.info['id'])['channels'])['status'] == [
'CLOSINGD_SIGEXCHANGE:We agreed on a closing fee of {} satoshi for tx:{}'.format(fee, closetxid), 'CLOSINGD_SIGEXCHANGE:We agreed on a closing fee of {} satoshi for tx:{}'.format(fee, closetxid),
'ONCHAIN:Tracking mutual close transaction', 'ONCHAIN:Tracking mutual close transaction',
'ONCHAIN:All outputs resolved: waiting 90 more blocks before forgetting channel' 'ONCHAIN:All outputs resolved: waiting 90 more blocks before forgetting channel'
@@ -172,12 +172,12 @@ def test_closing_id(node_factory):
# Close by full channel ID. # Close by full channel ID.
l1.rpc.connect(l2.info['id'], 'localhost', l2.port) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fundchannel(l2, 10**6) l1.fundchannel(l2, 10**6)
cid = l2.rpc.listpeers()['peers'][0]['channels'][0]['channel_id'] cid = l2.rpc.listpeerchannels()['channels'][0]['channel_id']
l2.rpc.close(cid) l2.rpc.close(cid)
# Technically, l2 disconnects before l1 finishes analyzing the final msg. # Technically, l2 disconnects before l1 finishes analyzing the final msg.
# Wait for them to both consider it closed! # Wait for them to both consider it closed!
wait_for(lambda: any([c['state'] == 'CLOSINGD_COMPLETE' for c in only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['channels']])) wait_for(lambda: any([c['state'] == 'CLOSINGD_COMPLETE' for c in l1.rpc.listpeerchannels(l2.info['id'])['channels']]))
wait_for(lambda: any([c['state'] == 'CLOSINGD_COMPLETE' for c in only_one(l2.rpc.listpeers(l1.info['id'])['peers'])['channels']])) wait_for(lambda: any([c['state'] == 'CLOSINGD_COMPLETE' for c in l2.rpc.listpeerchannels(l1.info['id'])['channels']]))
# Close by peer ID. # Close by peer ID.
l2.rpc.connect(l1.info['id'], 'localhost', l1.port) l2.rpc.connect(l1.info['id'], 'localhost', l1.port)
@@ -185,8 +185,8 @@ def test_closing_id(node_factory):
l2.fundchannel(l1, 10**6) l2.fundchannel(l1, 10**6)
pid = l1.info['id'] pid = l1.info['id']
l2.rpc.close(pid) l2.rpc.close(pid)
wait_for(lambda: any([c['state'] == 'CLOSINGD_COMPLETE' for c in only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['channels']])) wait_for(lambda: any([c['state'] == 'CLOSINGD_COMPLETE' for c in l1.rpc.listpeerchannels(l2.info['id'])['channels']]))
wait_for(lambda: any([c['state'] == 'CLOSINGD_COMPLETE' for c in only_one(l2.rpc.listpeers(l1.info['id'])['peers'])['channels']])) wait_for(lambda: any([c['state'] == 'CLOSINGD_COMPLETE' for c in l2.rpc.listpeerchannels(l1.info['id'])['channels']]))
@unittest.skipIf(TEST_NETWORK != 'regtest', 'FIXME: broken under elements') @unittest.skipIf(TEST_NETWORK != 'regtest', 'FIXME: broken under elements')
@@ -245,7 +245,7 @@ def test_closing_different_fees(node_factory, bitcoind, executor):
bitcoind.generate_block(1) bitcoind.generate_block(1)
for p in peers: for p in peers:
p.daemon.wait_for_log(' to ONCHAIN') p.daemon.wait_for_log(' to ONCHAIN')
wait_for(lambda: 'ONCHAIN:Tracking mutual close transaction' in only_one(p.rpc.listpeers(l1.info['id'])['peers'][0]['channels'])['status']) wait_for(lambda: 'ONCHAIN:Tracking mutual close transaction' in only_one(p.rpc.listpeerchannels(l1.info['id'])['channels'])['status'])
l1.daemon.wait_for_logs([' to ONCHAIN'] * num_peers) l1.daemon.wait_for_logs([' to ONCHAIN'] * num_peers)
@@ -306,7 +306,7 @@ def test_closing_specified_destination(node_factory, bitcoind, chainparams):
# Now grab the close transaction # Now grab the close transaction
closetxs = {} closetxs = {}
for i, n in enumerate([l2, l3, l4]): for i, n in enumerate([l2, l3, l4]):
billboard = only_one(l1.rpc.listpeers(n.info['id'])['peers'][0]['channels'])['status'][0] billboard = only_one(l1.rpc.listpeerchannels(n.info['id'])['channels'])['status'][0]
m = re.search(r'CLOSINGD_SIGEXCHANGE.* tx:([a-f0-9]{64})', billboard) m = re.search(r'CLOSINGD_SIGEXCHANGE.* tx:([a-f0-9]{64})', billboard)
closetxs[n] = m.group(1) closetxs[n] = m.group(1)
@@ -371,8 +371,7 @@ def closing_negotiation_step(node_factory, bitcoind, chainparams, opts):
def get_fee_from_status(node, peer_id, i): def get_fee_from_status(node, peer_id, i):
nonlocal fees_from_status nonlocal fees_from_status
peer = only_one(node.rpc.listpeers(peer_id)['peers']) channel = only_one(node.rpc.listpeerchannels(peer_id)['channels'])
channel = only_one(peer['channels'])
status = channel['status'][0] status = channel['status'][0]
m = status_agreed_regex.search(status) m = status_agreed_regex.search(status)
@@ -551,7 +550,7 @@ def test_penalty_inhtlc(node_factory, bitcoind, executor, chainparams):
bitcoind.generate_block(100) bitcoind.generate_block(100)
sync_blockheight(bitcoind, [l1, l2]) sync_blockheight(bitcoind, [l1, l2])
wait_for(lambda: only_one(l2.rpc.listpeers()['peers'])['channels'] == []) wait_for(lambda: l2.rpc.listpeerchannels()['channels'] == [])
# Do one last pass over the logs to extract the reactions l2 sent # Do one last pass over the logs to extract the reactions l2 sent
l2.daemon.logsearch_start = needle l2.daemon.logsearch_start = needle
@@ -680,7 +679,8 @@ def test_penalty_outhtlc(node_factory, bitcoind, executor, chainparams):
bitcoind.generate_block(100) bitcoind.generate_block(100)
sync_blockheight(bitcoind, [l1, l2]) sync_blockheight(bitcoind, [l1, l2])
wait_for(lambda: only_one(l2.rpc.listpeers()['peers'])['channels'] == []) peer = only_one(l2.rpc.listpeers()["peers"])
wait_for(lambda: l2.rpc.listpeerchannels(peer["id"])['channels'] == [])
# Do one last pass over the logs to extract the reactions l2 sent # Do one last pass over the logs to extract the reactions l2 sent
l2.daemon.logsearch_start = needle l2.daemon.logsearch_start = needle
@@ -801,7 +801,8 @@ def test_channel_lease_post_expiry(node_factory, bitcoind, chainparams):
est_fees = calc_lease_fee(amount, feerate, rates) est_fees = calc_lease_fee(amount, feerate, rates)
# This should be the accepter's amount # This should be the accepter's amount
fundings = only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['funding'] peer = only_one(l1.rpc.listpeers()["peers"])
fundings = only_one(l1.rpc.listpeerchannels(peer["id"])['channels'])['funding']
assert Millisatoshi(amount * 1000) == fundings['remote_funds_msat'] assert Millisatoshi(amount * 1000) == fundings['remote_funds_msat']
assert Millisatoshi(est_fees + amount * 1000) == fundings['local_funds_msat'] assert Millisatoshi(est_fees + amount * 1000) == fundings['local_funds_msat']
assert Millisatoshi(est_fees) == fundings['fee_paid_msat'] assert Millisatoshi(est_fees) == fundings['fee_paid_msat']
@@ -818,7 +819,8 @@ def test_channel_lease_post_expiry(node_factory, bitcoind, chainparams):
# make sure it's completely resolved before we generate blocks, # make sure it's completely resolved before we generate blocks,
# otherwise it can close HTLC! # otherwise it can close HTLC!
wait_for(lambda: only_one(only_one(l2.rpc.listpeers()['peers'])['channels'])['htlcs'] == []) peer = only_one(l2.rpc.listpeers()["peers"])
wait_for(lambda: only_one(l2.rpc.listpeerchannels(peer["id"])['channels'])['htlcs'] == [])
# l2 attempts to close a channel that it leased, should fail # l2 attempts to close a channel that it leased, should fail
with pytest.raises(RpcError, match=r'Peer leased this channel from us'): with pytest.raises(RpcError, match=r'Peer leased this channel from us'):
@@ -927,7 +929,8 @@ def test_channel_lease_unilat_closes(node_factory, bitcoind):
est_fees = calc_lease_fee(amount, feerate, rates) est_fees = calc_lease_fee(amount, feerate, rates)
# This should be the accepter's amount # This should be the accepter's amount
fundings = only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['funding'] peer = only_one(l1.rpc.listpeers()["peers"])
fundings = only_one(l1.rpc.listpeerchannels(peer["id"])['channels'])['funding']
assert Millisatoshi(amount * 1000) == Millisatoshi(fundings['remote_funds_msat']) assert Millisatoshi(amount * 1000) == Millisatoshi(fundings['remote_funds_msat'])
assert Millisatoshi(est_fees + amount * 1000) == Millisatoshi(fundings['local_funds_msat']) assert Millisatoshi(est_fees + amount * 1000) == Millisatoshi(fundings['local_funds_msat'])
@@ -1206,7 +1209,7 @@ def test_penalty_htlc_tx_fulfill(node_factory, bitcoind, chainparams):
l4.rpc.sendpay(route, sticky_inv['payment_hash'], payment_secret=sticky_inv['payment_secret']) l4.rpc.sendpay(route, sticky_inv['payment_hash'], payment_secret=sticky_inv['payment_secret'])
l1.daemon.wait_for_log('dev_disconnect: -WIRE_UPDATE_FULFILL_HTLC') l1.daemon.wait_for_log('dev_disconnect: -WIRE_UPDATE_FULFILL_HTLC')
wait_for(lambda: len(l2.rpc.listpeers(l3.info['id'])['peers'][0]['channels'][0]['htlcs']) == 1) wait_for(lambda: len(l2.rpc.listpeerchannels(l3.info['id'])['channels'][0]['htlcs']) == 1)
# make database snapshot of l2 # make database snapshot of l2
l2.stop() l2.stop()
@@ -1397,7 +1400,7 @@ def test_penalty_htlc_tx_timeout(node_factory, bitcoind, chainparams):
l4.rpc.sendpay(route, sticky_inv_2['payment_hash'], payment_secret=sticky_inv_2['payment_secret']) l4.rpc.sendpay(route, sticky_inv_2['payment_hash'], payment_secret=sticky_inv_2['payment_secret'])
l1.daemon.wait_for_log('dev_disconnect: -WIRE_UPDATE_FULFILL_HTLC') l1.daemon.wait_for_log('dev_disconnect: -WIRE_UPDATE_FULFILL_HTLC')
wait_for(lambda: len(l2.rpc.listpeers(l3.info['id'])['peers'][0]['channels'][0]['htlcs']) == 2) wait_for(lambda: len(l2.rpc.listpeerchannels(l3.info['id'])['channels'][0]['htlcs']) == 2)
# make database snapshot of l2 # make database snapshot of l2
l2.stop() l2.stop()
@@ -2717,10 +2720,9 @@ def test_onchain_different_fees(node_factory, bitcoind, executor):
# Now, 100 blocks it should be done. # Now, 100 blocks it should be done.
bitcoind.generate_block(100) bitcoind.generate_block(100)
# May reconnect, may not: if not, peer does not exist! # May reconnect, may not: if not, peer does not exist!
wait_for(lambda: all(p['channels'] == [] for p in l1.rpc.listpeers()['peers'])) wait_for(lambda: l1.rpc.listpeerchannels()['channels'] == [])
wait_for(lambda: all(p['channels'] == [] for p in l2.rpc.listpeers()['peers'])) wait_for(lambda: l2.rpc.listpeerchannels()['channels'] == [])
@pytest.mark.developer("needs DEVELOPER=1") @pytest.mark.developer("needs DEVELOPER=1")
@@ -2799,9 +2801,9 @@ def setup_multihtlc_test(node_factory, bitcoind):
# Make sure they're all in normal state. # Make sure they're all in normal state.
bitcoind.generate_block(1) bitcoind.generate_block(1)
wait_for(lambda: all([only_one(p['channels'])['state'] == 'CHANNELD_NORMAL' wait_for(lambda: all([only_one(l4.rpc.listpeerchannels(p["id"])['channels'])['state'] == 'CHANNELD_NORMAL'
for p in l4.rpc.listpeers()['peers']])) for p in l4.rpc.listpeers()['peers']]))
wait_for(lambda: all([only_one(p['channels'])['state'] == 'CHANNELD_NORMAL' wait_for(lambda: all([only_one(l5.rpc.listpeerchannels(p["id"])['channels'])['state'] == 'CHANNELD_NORMAL'
for p in l5.rpc.listpeers()['peers']])) for p in l5.rpc.listpeers()['peers']]))
# Balance them # Balance them
@@ -2882,7 +2884,7 @@ def setup_multihtlc_test(node_factory, bitcoind):
l1.daemon.wait_for_logs(['peer_in WIRE_UPDATE_ADD_HTLC'] * 4) l1.daemon.wait_for_logs(['peer_in WIRE_UPDATE_ADD_HTLC'] * 4)
# We have 6 HTLCs trapped in l4-l5 channel. # We have 6 HTLCs trapped in l4-l5 channel.
assert len(only_one(only_one(l4.rpc.listpeers(l5.info['id'])['peers'])['channels'])['htlcs']) == 6 assert len(only_one(l4.rpc.listpeerchannels(l5.info['id'])['channels'])['htlcs']) == 6
# We are all connected. # We are all connected.
for n in l1, l2, l3, l4, l5, l6, l7: for n in l1, l2, l3, l4, l5, l6, l7:
@@ -3144,12 +3146,12 @@ def test_permfail(node_factory, bitcoind):
l2.daemon.wait_for_log(' to ONCHAIN') l2.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log('Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET (.*) after 5 blocks') l2.daemon.wait_for_log('Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET (.*) after 5 blocks')
wait_for(lambda: only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status'] wait_for(lambda: only_one(l1.rpc.listpeerchannels(l2.info['id'])['channels'])['status']
== ['ONCHAIN:Tracking their unilateral close', == ['ONCHAIN:Tracking their unilateral close',
'ONCHAIN:All outputs resolved: waiting 99 more blocks before forgetting channel']) 'ONCHAIN:All outputs resolved: waiting 99 more blocks before forgetting channel'])
def check_billboard(): def check_billboard():
billboard = only_one(l2.rpc.listpeers(l1.info['id'])['peers'][0]['channels'])['status'] billboard = only_one(l2.rpc.listpeerchannels(l1.info['id'])['channels'])['status']
return ( return (
len(billboard) == 2 len(billboard) == 2
and billboard[0] == 'ONCHAIN:Tracking our own unilateral close' and billboard[0] == 'ONCHAIN:Tracking our own unilateral close'
@@ -3176,7 +3178,7 @@ def test_permfail(node_factory, bitcoind):
bitcoind.generate_block(95) bitcoind.generate_block(95)
wait_for(lambda: l1.rpc.listpeers()['peers'] == []) wait_for(lambda: l1.rpc.listpeers()['peers'] == [])
wait_for(lambda: only_one(l2.rpc.listpeers(l1.info['id'])['peers'][0]['channels'])['status'] == [ wait_for(lambda: only_one(l2.rpc.listpeerchannels(l1.info['id'])['channels'])['status'] == [
'ONCHAIN:Tracking our own unilateral close', 'ONCHAIN:Tracking our own unilateral close',
'ONCHAIN:All outputs resolved: waiting 5 more blocks before forgetting channel' 'ONCHAIN:All outputs resolved: waiting 5 more blocks before forgetting channel'
]) ])
@@ -3234,8 +3236,8 @@ def test_option_upfront_shutdown_script(node_factory, bitcoind, executor):
l2.rpc.close(l1.info['id'], unilateraltimeout=1) l2.rpc.close(l1.info['id'], unilateraltimeout=1)
bitcoind.generate_block(1, wait_for_mempool=1) bitcoind.generate_block(1, wait_for_mempool=1)
fut.result(TIMEOUT) fut.result(TIMEOUT)
wait_for(lambda: [c['state'] for c in only_one(l1.rpc.listpeers()['peers'])['channels']] == ['ONCHAIN']) wait_for(lambda: [c['state'] for c in l1.rpc.listpeerchannels()['channels']] == ['ONCHAIN'])
wait_for(lambda: [c['state'] for c in only_one(l2.rpc.listpeers()['peers'])['channels']] == ['ONCHAIN']) wait_for(lambda: [c['state'] for c in l2.rpc.listpeerchannels()['channels']] == ['ONCHAIN'])
# Works when l2 closes channel, too. # Works when l2 closes channel, too.
l1.rpc.connect(l2.info['id'], 'localhost', l2.port) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
@@ -3250,8 +3252,8 @@ def test_option_upfront_shutdown_script(node_factory, bitcoind, executor):
fut.result(TIMEOUT) fut.result(TIMEOUT)
bitcoind.generate_block(1, wait_for_mempool=1) bitcoind.generate_block(1, wait_for_mempool=1)
wait_for(lambda: [c['state'] for c in only_one(l1.rpc.listpeers()['peers'])['channels']] == ['ONCHAIN', 'ONCHAIN']) wait_for(lambda: [c['state'] for c in l1.rpc.listpeerchannels()['channels']] == ['ONCHAIN', 'ONCHAIN'])
wait_for(lambda: [c['state'] for c in only_one(l2.rpc.listpeers()['peers'])['channels']] == ['ONCHAIN', 'ONCHAIN']) wait_for(lambda: [c['state'] for c in l2.rpc.listpeerchannels()['channels']] == ['ONCHAIN', 'ONCHAIN'])
# Figure out what address it will try to use. # Figure out what address it will try to use.
keyidx = int(l1.db_query("SELECT intval FROM vars WHERE name='bip32_max_index';")[0]['intval']) keyidx = int(l1.db_query("SELECT intval FROM vars WHERE name='bip32_max_index';")[0]['intval'])
@@ -3273,7 +3275,7 @@ def test_option_upfront_shutdown_script(node_factory, bitcoind, executor):
l1.rpc.connect(l2.info['id'], 'localhost', l2.port) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.rpc.fundchannel(l2.info['id'], 1000000) l1.rpc.fundchannel(l2.info['id'], 1000000)
l1.rpc.close(l2.info['id']) l1.rpc.close(l2.info['id'])
wait_for(lambda: sorted([c['state'] for c in only_one(l1.rpc.listpeers()['peers'])['channels']]) == ['CLOSINGD_COMPLETE', 'ONCHAIN', 'ONCHAIN']) wait_for(lambda: sorted([c['state'] for c in l1.rpc.listpeerchannels()['channels']]) == ['CLOSINGD_COMPLETE', 'ONCHAIN', 'ONCHAIN'])
@pytest.mark.developer("needs to set upfront_shutdown_script") @pytest.mark.developer("needs to set upfront_shutdown_script")
@@ -3401,8 +3403,8 @@ def test_closing_higherfee(node_factory, bitcoind, executor):
fut.result(TIMEOUT) fut.result(TIMEOUT)
# But we still complete negotiation! # But we still complete negotiation!
wait_for(lambda: only_one(l1.rpc.listpeers()['peers'])['channels'][0]['state'] == 'CLOSINGD_COMPLETE') wait_for(lambda: l1.rpc.listpeerchannels()['channels'][0]['state'] == 'CLOSINGD_COMPLETE')
wait_for(lambda: only_one(l2.rpc.listpeers()['peers'])['channels'][0]['state'] == 'CLOSINGD_COMPLETE') wait_for(lambda: l2.rpc.listpeerchannels()['channels'][0]['state'] == 'CLOSINGD_COMPLETE')
@unittest.skipIf(True, "Test is extremely flaky") @unittest.skipIf(True, "Test is extremely flaky")
@@ -3438,8 +3440,8 @@ def test_htlc_rexmit_while_closing(node_factory, executor):
# Now l2 should be in CLOSINGD_SIGEXCHANGE, l1 still waiting on # Now l2 should be in CLOSINGD_SIGEXCHANGE, l1 still waiting on
# WIRE_REVOKE_AND_ACK. # WIRE_REVOKE_AND_ACK.
wait_for(lambda: only_one(only_one(l2.rpc.listpeers()['peers'])['channels'])['state'] == 'CLOSINGD_SIGEXCHANGE') wait_for(lambda: only_one(l2.rpc.listpeerchannels()['channels'])['state'] == 'CLOSINGD_SIGEXCHANGE')
assert only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['state'] == 'CHANNELD_SHUTTING_DOWN' assert only_one(l1.rpc.listpeerchannels()['channels'])['state'] == 'CHANNELD_SHUTTING_DOWN'
# They don't realize they're not talking, so disconnect and reconnect. # They don't realize they're not talking, so disconnect and reconnect.
l1.rpc.disconnect(l2.info['id'], force=True) l1.rpc.disconnect(l2.info['id'], force=True)
@@ -3468,8 +3470,8 @@ def test_you_forgot_closed_channel(node_factory, executor):
fut = executor.submit(l1.rpc.close, l2.info['id']) fut = executor.submit(l1.rpc.close, l2.info['id'])
# l2 considers the closing done, l1 does not # l2 considers the closing done, l1 does not
wait_for(lambda: only_one(only_one(l2.rpc.listpeers()['peers'])['channels'])['state'] == 'CLOSINGD_COMPLETE') wait_for(lambda: only_one(l2.rpc.listpeerchannels()['channels'])['state'] == 'CLOSINGD_COMPLETE')
assert only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['state'] == 'CLOSINGD_SIGEXCHANGE' assert only_one(l1.rpc.listpeerchannels()['channels'])['state'] == 'CLOSINGD_SIGEXCHANGE'
# l1 won't send anything else until we reconnect, then it should succeed. # l1 won't send anything else until we reconnect, then it should succeed.
l1.rpc.disconnect(l2.info['id'], force=True) l1.rpc.disconnect(l2.info['id'], force=True)
@@ -3493,8 +3495,8 @@ def test_you_forgot_closed_channel_onchain(node_factory, bitcoind, executor):
fut = executor.submit(l1.rpc.close, l2.info['id']) fut = executor.submit(l1.rpc.close, l2.info['id'])
# l2 considers the closing done, l1 does not # l2 considers the closing done, l1 does not
wait_for(lambda: only_one(only_one(l2.rpc.listpeers()['peers'])['channels'])['state'] == 'CLOSINGD_COMPLETE') wait_for(lambda: only_one(l2.rpc.listpeerchannels()['channels'])['state'] == 'CLOSINGD_COMPLETE')
assert only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['state'] == 'CLOSINGD_SIGEXCHANGE' assert only_one(l1.rpc.listpeerchannels()['channels'])['state'] == 'CLOSINGD_SIGEXCHANGE'
# l1 does not see any new blocks. # l1 does not see any new blocks.
def no_new_blocks(req): def no_new_blocks(req):
@@ -3505,7 +3507,7 @@ def test_you_forgot_closed_channel_onchain(node_factory, bitcoind, executor):
# Close transaction mined # Close transaction mined
bitcoind.generate_block(1, wait_for_mempool=1) bitcoind.generate_block(1, wait_for_mempool=1)
wait_for(lambda: only_one(only_one(l2.rpc.listpeers()['peers'])['channels'])['state'] == 'ONCHAIN') wait_for(lambda: only_one(l2.rpc.listpeerchannels()['channels'])['state'] == 'ONCHAIN')
# l1 reconnects, it should succeed. # l1 reconnects, it should succeed.
l1.rpc.disconnect(l2.info['id'], force=True) l1.rpc.disconnect(l2.info['id'], force=True)
@@ -3538,11 +3540,11 @@ def test_segwit_anyshutdown(node_factory, bitcoind, executor):
# because the resulting tx is too small! Balance channel so close # because the resulting tx is too small! Balance channel so close
# has two outputs. # has two outputs.
bitcoind.generate_block(1, wait_for_mempool=1) bitcoind.generate_block(1, wait_for_mempool=1)
wait_for(lambda: any([c['state'] == 'CHANNELD_NORMAL' for c in only_one(l1.rpc.listpeers()['peers'])['channels']])) wait_for(lambda: any([c['state'] == 'CHANNELD_NORMAL' for c in l1.rpc.listpeerchannels()['channels']]))
l1.pay(l2, 10**9 // 2) l1.pay(l2, 10**9 // 2)
l1.rpc.close(l2.info['id'], destination=addr) l1.rpc.close(l2.info['id'], destination=addr)
bitcoind.generate_block(1, wait_for_mempool=1) bitcoind.generate_block(1, wait_for_mempool=1)
wait_for(lambda: all([c['state'] == 'ONCHAIN' for c in only_one(l1.rpc.listpeers()['peers'])['channels']])) wait_for(lambda: all([c['state'] == 'ONCHAIN' for c in l1.rpc.listpeerchannels()['channels']]))
@pytest.mark.developer("needs to manipulate features") @pytest.mark.developer("needs to manipulate features")
@@ -3565,7 +3567,7 @@ def test_anysegwit_close_needs_feature(node_factory, bitcoind):
# Now it will work! # Now it will work!
l1.rpc.connect(l2.info['id'], 'localhost', l2.port) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.rpc.close(l2.info['id'], destination='bcrt1pw508d6qejxtdg4y5r3zarvary0c5xw7kw508d6qejxtdg4y5r3zarvary0c5xw7k0ylj56') l1.rpc.close(l2.info['id'], destination='bcrt1pw508d6qejxtdg4y5r3zarvary0c5xw7kw508d6qejxtdg4y5r3zarvary0c5xw7k0ylj56')
wait_for(lambda: only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['state'] == 'CLOSINGD_COMPLETE') wait_for(lambda: only_one(l1.rpc.listpeerchannels()['channels'])['state'] == 'CLOSINGD_COMPLETE')
bitcoind.generate_block(1, wait_for_mempool=1) bitcoind.generate_block(1, wait_for_mempool=1)
@@ -3734,7 +3736,8 @@ def test_onchain_rexmit_tx(node_factory, bitcoind):
l2.stop() l2.stop()
l1.rpc.close(l2.info['id'], unilateraltimeout=1) l1.rpc.close(l2.info['id'], unilateraltimeout=1)
wait_for(lambda: only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['state'] == 'AWAITING_UNILATERAL') peer = only_one(l1.rpc.listpeers()["peers"])
wait_for(lambda: only_one(l1.rpc.listpeerchannels(peer["id"])['channels'])['state'] == 'AWAITING_UNILATERAL')
l1.stop() l1.stop()
assert bitcoind.rpc.getrawmempool() == [] assert bitcoind.rpc.getrawmempool() == []

View File

@@ -28,8 +28,8 @@ def test_connect_basic(node_factory):
# These should be in openingd. # These should be in openingd.
assert l1.rpc.getpeer(l2.info['id'])['connected'] assert l1.rpc.getpeer(l2.info['id'])['connected']
assert l2.rpc.getpeer(l1.info['id'])['connected'] assert l2.rpc.getpeer(l1.info['id'])['connected']
assert len(l1.rpc.getpeer(l2.info['id'])['channels']) == 0 assert len(l1.rpc.listpeerchannels(l2.info['id'])['channels']) == 0
assert len(l2.rpc.getpeer(l1.info['id'])['channels']) == 0 assert len(l2.rpc.listpeerchannels(l1.info['id'])['channels']) == 0
# Reconnect should be a noop # Reconnect should be a noop
ret = l1.rpc.connect(l2.info['id'], 'localhost', port=l2.port) ret = l1.rpc.connect(l2.info['id'], 'localhost', port=l2.port)
@@ -268,8 +268,8 @@ def test_connection_moved(node_factory, executor):
def test_balance(node_factory): def test_balance(node_factory):
l1, l2 = node_factory.line_graph(2, fundchannel=True) l1, l2 = node_factory.line_graph(2, fundchannel=True)
p1 = only_one(l1.rpc.getpeer(peer_id=l2.info['id'], level='info')['channels']) p1 = only_one(l1.rpc.listpeerchannels(peer_id=l2.info['id'])['channels'])
p2 = only_one(l2.rpc.getpeer(l1.info['id'], 'info')['channels']) p2 = only_one(l2.rpc.listpeerchannels(l1.info['id'])['channels'])
assert p1['to_us_msat'] == 10**6 * 1000 assert p1['to_us_msat'] == 10**6 * 1000
assert p1['total_msat'] == 10**6 * 1000 assert p1['total_msat'] == 10**6 * 1000
assert p2['to_us_msat'] == 0 assert p2['to_us_msat'] == 0
@@ -406,8 +406,7 @@ def test_channel_abandon(node_factory, bitcoind):
bitcoind.generate_block(1, wait_for_mempool=withdraw['txid']) bitcoind.generate_block(1, wait_for_mempool=withdraw['txid'])
# FIXME: lightningd should notice channel will never now open! # FIXME: lightningd should notice channel will never now open!
print(l1.rpc.listpeers()) assert (only_one(l1.rpc.listpeerchannels()['channels'])['state']
assert (only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['state']
== 'CHANNELD_AWAITING_LOCKIN') == 'CHANNELD_AWAITING_LOCKIN')
@@ -579,7 +578,7 @@ def test_disconnect_half_signed(node_factory):
# Peer remembers, opener doesn't. # Peer remembers, opener doesn't.
wait_for(lambda: l1.rpc.listpeers(l2.info['id'])['peers'] == []) wait_for(lambda: l1.rpc.listpeers(l2.info['id'])['peers'] == [])
assert len(only_one(l2.rpc.listpeers(l1.info['id'])['peers'])['channels']) == 1 assert len(l2.rpc.listpeerchannels(l1.info['id'])['channels']) == 1
@pytest.mark.developer @pytest.mark.developer
@@ -923,7 +922,7 @@ def test_reconnect_remote_sends_no_sigs(node_factory):
l1.restart() l1.restart()
# l2 will now uses (REMOTE's) announcement_signatures it has stored # l2 will now uses (REMOTE's) announcement_signatures it has stored
wait_for(lambda: only_one(l2.rpc.listpeers()['peers'][0]['channels'])['status'] == [ wait_for(lambda: only_one(l2.rpc.listpeerchannels()['channels'])['status'] == [
'CHANNELD_NORMAL:Reconnected, and reestablished.', 'CHANNELD_NORMAL:Reconnected, and reestablished.',
'CHANNELD_NORMAL:Channel ready for use. Channel announced.']) 'CHANNELD_NORMAL:Channel ready for use. Channel announced.'])
@@ -970,8 +969,8 @@ def test_shutdown_awaiting_lockin(node_factory, bitcoind):
bitcoind.generate_block(100) bitcoind.generate_block(100)
# Won't disconnect! # Won't disconnect!
wait_for(lambda: only_one(l1.rpc.listpeers()['peers'])['channels'] == []) wait_for(lambda: l1.rpc.listpeerchannels()['channels'] == [])
wait_for(lambda: only_one(l2.rpc.listpeers()['peers'])['channels'] == []) wait_for(lambda: l2.rpc.listpeerchannels()['channels'] == [])
@pytest.mark.openchannel('v1') @pytest.mark.openchannel('v1')
@@ -1316,7 +1315,7 @@ def test_funding_external_wallet_corners(node_factory, bitcoind):
l1.rpc.disconnect(l2.info['id'], force=True) l1.rpc.disconnect(l2.info['id'], force=True)
wait_for(lambda: not only_one(l1.rpc.listpeers()['peers'])['connected']) wait_for(lambda: not only_one(l1.rpc.listpeers()['peers'])['connected'])
wait_for(lambda: only_one(only_one(l2.rpc.listpeers()['peers'])['channels'])['state'] wait_for(lambda: only_one(l2.rpc.listpeerchannels()['channels'])['state']
== 'CHANNELD_AWAITING_LOCKIN') == 'CHANNELD_AWAITING_LOCKIN')
assert l1.rpc.fundchannel_cancel(l2.info['id'])['cancelled'] assert l1.rpc.fundchannel_cancel(l2.info['id'])['cancelled']
@@ -1597,7 +1596,7 @@ def test_funding_close_upfront(node_factory, bitcoind):
return False return False
return any([c['state'] == 'CHANNELD_AWAITING_LOCKIN' return any([c['state'] == 'CHANNELD_AWAITING_LOCKIN'
or c['state'] == 'CHANNELD_NORMAL' or c['state'] == 'CHANNELD_NORMAL'
for c in only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['channels']]) for c in l1.rpc.listpeerchannels(l2.info['id'])['channels']])
def _fundchannel(l1, l2, amount, close_to): def _fundchannel(l1, l2, amount, close_to):
l1.rpc.connect(l2.info['id'], 'localhost', l2.port) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
@@ -1614,7 +1613,7 @@ def test_funding_close_upfront(node_factory, bitcoind):
assert 'close_to' not in resp assert 'close_to' not in resp
for node in [l1, l2]: for node in [l1, l2]:
channel = node.rpc.listpeers()['peers'][0]['channels'][-1] channel = node.rpc.listpeerchannels()['channels'][-1]
assert amount * 1000 == channel['total_msat'] assert amount * 1000 == channel['total_msat']
def _close(src, dst, addr=None): def _close(src, dst, addr=None):
@@ -1640,21 +1639,21 @@ def test_funding_close_upfront(node_factory, bitcoind):
# check that you can provide a closing address upfront # check that you can provide a closing address upfront
addr = l1.rpc.newaddr()['bech32'] addr = l1.rpc.newaddr()['bech32']
_fundchannel(l1, l2, amt_normal, addr) _fundchannel(l1, l2, amt_normal, addr)
# confirm that it appears in listpeers # confirm that it appears in listpeerchannels
assert addr == only_one(l1.rpc.listpeers()['peers'])['channels'][1]['close_to_addr'] assert addr == l1.rpc.listpeerchannels()['channels'][1]['close_to_addr']
assert _close(l1, l2) == [addr] assert _close(l1, l2) == [addr]
# check that passing in the same addr to close works # check that passing in the same addr to close works
addr = bitcoind.rpc.getnewaddress() addr = bitcoind.rpc.getnewaddress()
_fundchannel(l1, l2, amt_normal, addr) _fundchannel(l1, l2, amt_normal, addr)
assert addr == only_one(l1.rpc.listpeers()['peers'])['channels'][2]['close_to_addr'] assert addr == l1.rpc.listpeerchannels()['channels'][2]['close_to_addr']
assert _close(l1, l2, addr) == [addr] assert _close(l1, l2, addr) == [addr]
# check that remote peer closing works as expected (and that remote's close_to works) # check that remote peer closing works as expected (and that remote's close_to works)
_fundchannel(l1, l2, amt_addr, addr) _fundchannel(l1, l2, amt_addr, addr)
# send some money to remote so that they have a closeout # send some money to remote so that they have a closeout
l1.rpc.pay(l2.rpc.invoice((amt_addr // 2) * 1000, 'test_remote_close_to', 'desc')['bolt11']) l1.rpc.pay(l2.rpc.invoice((amt_addr // 2) * 1000, 'test_remote_close_to', 'desc')['bolt11'])
assert only_one(l2.rpc.listpeers()['peers'])['channels'][-1]['close_to_addr'] == remote_valid_addr assert l2.rpc.listpeerchannels()['channels'][-1]['close_to_addr'] == remote_valid_addr
# The tx outputs must be one of the two permutations # The tx outputs must be one of the two permutations
assert _close(l2, l1) in ([addr, remote_valid_addr], [remote_valid_addr, addr]) assert _close(l2, l1) in ([addr, remote_valid_addr], [remote_valid_addr, addr])
@@ -1682,8 +1681,11 @@ def test_funding_external_wallet(node_factory, bitcoind):
# Peer should still be connected and in state waiting for funding_txid # Peer should still be connected and in state waiting for funding_txid
assert peer['id'] == l2.info['id'] assert peer['id'] == l2.info['id']
r = re.compile('Funding channel start: awaiting funding_txid with output to .*') r = re.compile('Funding channel start: awaiting funding_txid with output to .*')
assert any(r.match(line) for line in peer['channels'][0]['status'])
assert 'OPENINGD' in peer['channels'][0]['state'] channels = l1.rpc.listpeerchannels(peer['id'])['channels']
assert len(channels) == 1, f"Channels for peer {peer['id']} need to be not empty"
assert any(r.match(line) for line in channels[0]['status'])
assert 'OPENINGD' in channels[0]['state']
# Trying to start a second funding should not work, it's in progress. # Trying to start a second funding should not work, it's in progress.
with pytest.raises(RpcError, match=r'Already funding channel'): with pytest.raises(RpcError, match=r'Already funding channel'):
@@ -1712,7 +1714,7 @@ def test_funding_external_wallet(node_factory, bitcoind):
for node in [l1, l2]: for node in [l1, l2]:
node.daemon.wait_for_log(r'State changed from CHANNELD_AWAITING_LOCKIN to CHANNELD_NORMAL') node.daemon.wait_for_log(r'State changed from CHANNELD_AWAITING_LOCKIN to CHANNELD_NORMAL')
channel = node.rpc.listpeers()['peers'][0]['channels'][0] channel = node.rpc.listpeerchannels()['channels'][0]
assert amount * 1000 == channel['total_msat'] assert amount * 1000 == channel['total_msat']
# Test that we don't crash if peer disconnects after fundchannel_start # Test that we don't crash if peer disconnects after fundchannel_start
@@ -2012,14 +2014,14 @@ def test_multifunding_feerates(node_factory, bitcoind):
expected_fee = int(funding_tx_feerate[:-5]) * weight // 1000 expected_fee = int(funding_tx_feerate[:-5]) * weight // 1000
assert expected_fee == entry['fees']['base'] * 10 ** 8 assert expected_fee == entry['fees']['base'] * 10 ** 8
assert only_one(only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['channels'])['feerate']['perkw'] == commitment_tx_feerate_int assert only_one(l1.rpc.listpeerchannels(l2.info['id'])['channels'])['feerate']['perkw'] == commitment_tx_feerate_int
assert only_one(only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['channels'])['feerate']['perkb'] == commitment_tx_feerate_int * 4 assert only_one(l1.rpc.listpeerchannels(l2.info['id'])['channels'])['feerate']['perkb'] == commitment_tx_feerate_int * 4
txfee = only_one(only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['channels'])['last_tx_fee_msat'] txfee = only_one(l1.rpc.listpeerchannels(l2.info['id'])['channels'])['last_tx_fee_msat']
# We get the expected close txid, force close the channel, then fish # We get the expected close txid, force close the channel, then fish
# the details about the transaction out of the mempoool entry # the details about the transaction out of the mempoool entry
close_txid = only_one(only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['channels'])['scratch_txid'] close_txid = only_one(l1.rpc.listpeerchannels(l2.info['id'])['channels'])['scratch_txid']
l1.rpc.dev_fail(l2.info['id']) l1.rpc.dev_fail(l2.info['id'])
l1.wait_for_channel_onchain(l2.info['id']) l1.wait_for_channel_onchain(l2.info['id'])
entry = bitcoind.rpc.getmempoolentry(close_txid) entry = bitcoind.rpc.getmempoolentry(close_txid)
@@ -2134,10 +2136,7 @@ def test_multifunding_best_effort(node_factory, bitcoind):
# open again, so multiple channels may remain # open again, so multiple channels may remain
# listed. # listed.
def get_funded_channel_scid(n1, n2): def get_funded_channel_scid(n1, n2):
peers = n1.rpc.listpeers(n2.info['id'])['peers'] channels = n1.rpc.listpeerchannels(n2.info['id'])['channels']
assert len(peers) == 1
peer = peers[0]
channels = peer['channels']
assert channels assert channels
for c in channels: for c in channels:
state = c['state'] state = c['state']
@@ -2236,8 +2235,8 @@ def test_channel_persistence(node_factory, bitcoind, executor):
l1.fundchannel(l2, 100000) l1.fundchannel(l2, 100000)
peers = l1.rpc.listpeers()['peers'] channels = l1.rpc.listpeerchannels()['channels']
assert(only_one(peers[0]['channels'])['state'] == 'CHANNELD_NORMAL') assert(only_one(channels)['state'] == 'CHANNELD_NORMAL')
# Both nodes should now have exactly one channel in the database # Both nodes should now have exactly one channel in the database
for n in (l1, l2): for n in (l1, l2):
@@ -2257,14 +2256,14 @@ def test_channel_persistence(node_factory, bitcoind, executor):
del l2.daemon.opts['dev-disable-commit-after'] del l2.daemon.opts['dev-disable-commit-after']
# Wait for l1 to notice # Wait for l1 to notice
wait_for(lambda: 'connected' not in only_one(l1.rpc.listpeers()['peers'][0]['channels'])) wait_for(lambda: 'connected' not in l1.rpc.listpeerchannels()['channels'])
# Now restart l2 and it should reload peers/channels from the DB # Now restart l2 and it should reload peers/channels from the DB
l2.start() l2.start()
wait_for(lambda: len(l2.rpc.listpeers()['peers']) == 1) wait_for(lambda: len(l2.rpc.listpeers()['peers']) == 1)
# Wait for the restored HTLC to finish # Wait for the restored HTLC to finish
wait_for(lambda: only_one(l1.rpc.listpeers()['peers'][0]['channels'])['to_us_msat'] == 99990000) wait_for(lambda: only_one(l1.rpc.listpeerchannels()['channels'])['to_us_msat'] == 99990000)
wait_for(lambda: len([p for p in l1.rpc.listpeers()['peers'] if p['connected']])) wait_for(lambda: len([p for p in l1.rpc.listpeers()['peers'] if p['connected']]))
wait_for(lambda: len([p for p in l2.rpc.listpeers()['peers'] if p['connected']])) wait_for(lambda: len([p for p in l2.rpc.listpeers()['peers'] if p['connected']]))
@@ -2274,12 +2273,12 @@ def test_channel_persistence(node_factory, bitcoind, executor):
# L1 doesn't actually update to_us_msat until it receives # L1 doesn't actually update to_us_msat until it receives
# revoke_and_ack from L2, which can take a little bit. # revoke_and_ack from L2, which can take a little bit.
wait_for(lambda: only_one(l1.rpc.listpeers()['peers'][0]['channels'])['to_us_msat'] == 99980000) wait_for(lambda: only_one(l1.rpc.listpeerchannels()['channels'])['to_us_msat'] == 99980000)
assert only_one(l2.rpc.listpeers()['peers'][0]['channels'])['to_us_msat'] == 20000 assert only_one(l2.rpc.listpeerchannels()['channels'])['to_us_msat'] == 20000
# Finally restart l1, and make sure it remembers # Finally restart l1, and make sure it remembers
l1.restart() l1.restart()
assert only_one(l1.rpc.listpeers()['peers'][0]['channels'])['to_us_msat'] == 99980000 assert only_one(l1.rpc.listpeerchannels()['channels'])['to_us_msat'] == 99980000
# Keep l1 from sending its onchain tx # Keep l1 from sending its onchain tx
def censoring_sendrawtx(r): def censoring_sendrawtx(r):
@@ -2315,9 +2314,9 @@ def test_private_channel(node_factory):
assert not l2.daemon.is_in_log('Received node_announcement for node {}'.format(l1.info['id'])) assert not l2.daemon.is_in_log('Received node_announcement for node {}'.format(l1.info['id']))
# test for 'private' flag in rpc output # test for 'private' flag in rpc output
assert only_one(only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['channels'])['private'] assert only_one(l1.rpc.listpeerchannels(l2.info['id'])['channels'])['private']
# check non-private channel # check non-private channel
assert not only_one(only_one(l4.rpc.listpeers(l3.info['id'])['peers'])['channels'])['private'] assert not only_one(l4.rpc.listpeerchannels(l3.info['id'])['channels'])['private']
@pytest.mark.developer("gossip without DEVELOPER=1 is slow") @pytest.mark.developer("gossip without DEVELOPER=1 is slow")
@@ -2400,8 +2399,8 @@ def test_fee_limits(node_factory, bitcoind):
l1.daemon.wait_for_log('Peer transient failure in CHANNELD_NORMAL: channeld WARNING: .*: update_fee 253 outside range 1875-75000') l1.daemon.wait_for_log('Peer transient failure in CHANNELD_NORMAL: channeld WARNING: .*: update_fee 253 outside range 1875-75000')
# Closes, but does not error. Make sure it's noted in their status though. # Closes, but does not error. Make sure it's noted in their status though.
assert 'update_fee 253 outside range 1875-75000' in only_one(only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['channels'])['status'][0] assert 'update_fee 253 outside range 1875-75000' in only_one(l1.rpc.listpeerchannels(l2.info['id'])['channels'])['status'][0]
assert 'update_fee 253 outside range 1875-75000' in only_one(only_one(l2.rpc.listpeers(l1.info['id'])['peers'])['channels'])['status'][0] assert 'update_fee 253 outside range 1875-75000' in only_one(l2.rpc.listpeerchannels(l1.info['id'])['channels'])['status'][0]
# Make l2 accept those fees, and it should recover. # Make l2 accept those fees, and it should recover.
l2.stop() l2.stop()
@@ -2570,7 +2569,7 @@ def test_multiple_channels(node_factory):
r'State changed from CLOSINGD_SIGEXCHANGE to CLOSINGD_COMPLETE' r'State changed from CLOSINGD_SIGEXCHANGE to CLOSINGD_COMPLETE'
) )
channels = only_one(l1.rpc.listpeers()['peers'])['channels'] channels = l1.rpc.listpeerchannels()['channels']
assert len(channels) == 3 assert len(channels) == 3
# Most in state ONCHAIN, last is CLOSINGD_COMPLETE # Most in state ONCHAIN, last is CLOSINGD_COMPLETE
for i in range(len(channels) - 1): for i in range(len(channels) - 1):
@@ -2598,7 +2597,7 @@ def test_forget_channel(node_factory):
# Forcing should work # Forcing should work
l1.rpc.dev_forget_channel(l2.info['id'], True) l1.rpc.dev_forget_channel(l2.info['id'], True)
wait_for(lambda: only_one(l1.rpc.listpeers()['peers'])['channels'] == []) wait_for(lambda: l1.rpc.listpeerchannels()['channels'] == [])
# And restarting should keep that peer forgotten # And restarting should keep that peer forgotten
l1.restart() l1.restart()
@@ -2623,7 +2622,7 @@ def test_peerinfo(node_factory, bitcoind):
# Gossiping but no node announcement yet # Gossiping but no node announcement yet
assert l1.rpc.getpeer(l2.info['id'])['connected'] assert l1.rpc.getpeer(l2.info['id'])['connected']
assert len(l1.rpc.getpeer(l2.info['id'])['channels']) == 0 assert len(l1.rpc.listpeerchannels(l2.info['id'])['channels']) == 0
assert l1.rpc.getpeer(l2.info['id'])['features'] == lfeatures assert l1.rpc.getpeer(l2.info['id'])['features'] == lfeatures
# Fund a channel to force a node announcement # Fund a channel to force a node announcement
@@ -2658,8 +2657,8 @@ def test_peerinfo(node_factory, bitcoind):
bitcoind.generate_block(100, wait_for_mempool=1) bitcoind.generate_block(100, wait_for_mempool=1)
l1.daemon.wait_for_log('onchaind complete, forgetting peer') l1.daemon.wait_for_log('onchaind complete, forgetting peer')
l2.daemon.wait_for_log('onchaind complete, forgetting peer') l2.daemon.wait_for_log('onchaind complete, forgetting peer')
assert only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['channels'] == [] assert l1.rpc.listpeerchannels(l2.info['id'])['channels'] == []
assert only_one(l2.rpc.listpeers(l1.info['id'])['peers'])['channels'] == [] assert l2.rpc.listpeerchannels(l1.info['id'])['channels'] == []
# The only channel was closed, everybody should have forgotten the nodes # The only channel was closed, everybody should have forgotten the nodes
assert l1.rpc.listnodes()['nodes'] == [] assert l1.rpc.listnodes()['nodes'] == []
@@ -2673,9 +2672,9 @@ def test_disconnectpeer(node_factory, bitcoind):
# Gossiping # Gossiping
assert l1.rpc.getpeer(l2.info['id'])['connected'] assert l1.rpc.getpeer(l2.info['id'])['connected']
assert len(l1.rpc.getpeer(l2.info['id'])['channels']) == 0 assert len(l1.rpc.listpeerchannels(l2.info['id'])['channels']) == 0
assert l1.rpc.getpeer(l3.info['id'])['connected'] assert l1.rpc.getpeer(l3.info['id'])['connected']
assert len(l1.rpc.getpeer(l3.info['id'])['channels']) == 0 assert len(l1.rpc.listpeerchannels(l3.info['id'])['channels']) == 0
wait_for(lambda: l2.rpc.getpeer(l1.info['id']) is not None) wait_for(lambda: l2.rpc.getpeer(l1.info['id']) is not None)
# Disconnect l2 from l1 # Disconnect l2 from l1
@@ -2745,7 +2744,7 @@ def test_fundee_forget_funding_tx_unconfirmed(node_factory, bitcoind):
l2.daemon.wait_for_log(r'Forgetting channel: It has been {}\d blocks'.format(str(blocks)[:-1])) l2.daemon.wait_for_log(r'Forgetting channel: It has been {}\d blocks'.format(str(blocks)[:-1]))
# fundee will also forget, but not disconnect from peer. # fundee will also forget, but not disconnect from peer.
wait_for(lambda: only_one(l2.rpc.listpeers(l1.info['id'])['peers'])['channels'] == []) wait_for(lambda: l2.rpc.listpeerchannels(l1.info['id'])['channels'] == [])
@pytest.mark.developer("needs --dev-max-funding-unconfirmed-blocks") @pytest.mark.developer("needs --dev-max-funding-unconfirmed-blocks")
@@ -2783,7 +2782,7 @@ def test_fundee_node_unconfirmed(node_factory, bitcoind):
bitcoind.generate_block(1, wait_for_mempool=1) bitcoind.generate_block(1, wait_for_mempool=1)
# Check that l1 opened the channel # Check that l1 opened the channel
wait_for(lambda: only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['state'] == 'CHANNELD_NORMAL') wait_for(lambda: only_one(l1.rpc.listpeerchannels()['channels'])['state'] == 'CHANNELD_NORMAL')
end_amount = only_one(l1.rpc.listfunds()['outputs'])['amount_msat'] end_amount = only_one(l1.rpc.listfunds()['outputs'])['amount_msat']
# We should be out the onchaind fees # We should be out the onchaind fees
assert start_amount > end_amount + Millisatoshi(10 ** 7 * 100) assert start_amount > end_amount + Millisatoshi(10 ** 7 * 100)
@@ -2840,8 +2839,8 @@ def test_no_fee_estimate(node_factory, bitcoind, executor):
l1.daemon.wait_for_log('Failing due to dev-fail command') l1.daemon.wait_for_log('Failing due to dev-fail command')
l1.wait_for_channel_onchain(l2.info['id']) l1.wait_for_channel_onchain(l2.info['id'])
bitcoind.generate_block(6) bitcoind.generate_block(6)
wait_for(lambda: only_one(l1.rpc.getpeer(l2.info['id'])['channels'])['state'] == 'ONCHAIN') wait_for(lambda: only_one(l1.rpc.listpeerchannels(l2.info['id'])['channels'])['state'] == 'ONCHAIN')
wait_for(lambda: only_one(l2.rpc.getpeer(l1.info['id'])['channels'])['state'] == 'ONCHAIN') wait_for(lambda: only_one(l2.rpc.listpeerchannels(l1.info['id'])['channels'])['state'] == 'ONCHAIN')
# But can accept incoming connections. # But can accept incoming connections.
l1.rpc.connect(l2.info['id'], 'localhost', l2.port) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
@@ -3462,10 +3461,10 @@ def test_wumbo_channels(node_factory, bitcoind):
l1.rpc.connect(l2.info['id'], 'localhost', port=l2.port) l1.rpc.connect(l2.info['id'], 'localhost', port=l2.port)
l1.rpc.fundchannel(l2.info['id'], 'all') l1.rpc.fundchannel(l2.info['id'], 'all')
bitcoind.generate_block(1, wait_for_mempool=1) bitcoind.generate_block(1, wait_for_mempool=1)
wait_for(lambda: 'CHANNELD_NORMAL' in [c['state'] for c in only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['channels']]) wait_for(lambda: 'CHANNELD_NORMAL' in [c['state'] for c in l1.rpc.listpeerchannels(l2.info['id'])['channels']])
# Exact amount depends on fees, but it will be wumbo! # Exact amount depends on fees, but it will be wumbo!
chan = only_one([c for c in only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['channels'] if c['state'] == 'CHANNELD_NORMAL']) chan = only_one([c for c in l1.rpc.listpeerchannels(l2.info['id'])['channels'] if c['state'] == 'CHANNELD_NORMAL'])
amount = chan['funding']['local_funds_msat'] amount = chan['funding']['local_funds_msat']
assert amount > Millisatoshi(str((1 << 24) - 1) + "sat") assert amount > Millisatoshi(str((1 << 24) - 1) + "sat")
@@ -3474,7 +3473,7 @@ def test_wumbo_channels(node_factory, bitcoind):
assert spendable > Millisatoshi(str((1 << 24) - 1) + "sat") assert spendable > Millisatoshi(str((1 << 24) - 1) + "sat")
# So should peer. # So should peer.
chan = only_one([c for c in only_one(l2.rpc.listpeers(l1.info['id'])['peers'])['channels'] if c['state'] == 'CHANNELD_NORMAL']) chan = only_one([c for c in l2.rpc.listpeerchannels(l1.info['id'])['channels'] if c['state'] == 'CHANNELD_NORMAL'])
assert chan['receivable_msat'] == spendable assert chan['receivable_msat'] == spendable
# And we can wumbo pay, right? # And we can wumbo pay, right?
@@ -3501,26 +3500,26 @@ def test_channel_features(node_factory, bitcoind):
l1.rpc.fundchannel(l2.info['id'], 'all') l1.rpc.fundchannel(l2.info['id'], 'all')
# We should see features in unconfirmed channels. # We should see features in unconfirmed channels.
chan = only_one(only_one(l1.rpc.listpeers()['peers'])['channels']) chan = only_one(l1.rpc.listpeerchannels()['channels'])
assert 'option_static_remotekey' in chan['features'] assert 'option_static_remotekey' in chan['features']
if EXPERIMENTAL_FEATURES or l1.config('experimental-dual-fund'): if EXPERIMENTAL_FEATURES or l1.config('experimental-dual-fund'):
assert 'option_anchor_outputs' in chan['features'] assert 'option_anchor_outputs' in chan['features']
# l2 should agree. # l2 should agree.
assert only_one(only_one(l2.rpc.listpeers()['peers'])['channels'])['features'] == chan['features'] assert only_one(l2.rpc.listpeerchannels()['channels'])['features'] == chan['features']
# Confirm it. # Confirm it.
bitcoind.generate_block(1) bitcoind.generate_block(1)
wait_for(lambda: only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['state'] == 'CHANNELD_NORMAL') wait_for(lambda: only_one(l1.rpc.listpeerchannels()['channels'])['state'] == 'CHANNELD_NORMAL')
wait_for(lambda: only_one(only_one(l2.rpc.listpeers()['peers'])['channels'])['state'] == 'CHANNELD_NORMAL') wait_for(lambda: only_one(l2.rpc.listpeerchannels()['channels'])['state'] == 'CHANNELD_NORMAL')
chan = only_one(only_one(l1.rpc.listpeers()['peers'])['channels']) chan = only_one(l1.rpc.listpeerchannels()['channels'])
assert 'option_static_remotekey' in chan['features'] assert 'option_static_remotekey' in chan['features']
if EXPERIMENTAL_FEATURES or l1.config('experimental-dual-fund'): if EXPERIMENTAL_FEATURES or l1.config('experimental-dual-fund'):
assert 'option_anchor_outputs' in chan['features'] assert 'option_anchor_outputs' in chan['features']
# l2 should agree. # l2 should agree.
assert only_one(only_one(l2.rpc.listpeers()['peers'])['channels'])['features'] == chan['features'] assert only_one(l2.rpc.listpeerchannels()['channels'])['features'] == chan['features']
@pytest.mark.developer("need dev-force-features") @pytest.mark.developer("need dev-force-features")
@@ -3531,7 +3530,7 @@ def test_nonstatic_channel(node_factory, bitcoind):
# needs at least 15 to connect # needs at least 15 to connect
# (and 9 is a dependent) # (and 9 is a dependent)
{'dev-force-features': '9,15////////'}]) {'dev-force-features': '9,15////////'}])
chan = only_one(only_one(l1.rpc.listpeers()['peers'])['channels']) chan = only_one(l1.rpc.listpeerchannels()['channels'])
assert 'option_static_remotekey' not in chan['features'] assert 'option_static_remotekey' not in chan['features']
assert 'option_anchor_outputs' not in chan['features'] assert 'option_anchor_outputs' not in chan['features']
@@ -3712,8 +3711,8 @@ def test_upgrade_statickey_onchaind(node_factory, executor, bitcoind):
l2.wait_for_onchaind_broadcast('OUR_PENALTY_TX', l2.wait_for_onchaind_broadcast('OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM') 'THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM')
bitcoind.generate_block(100) bitcoind.generate_block(100)
# This works even if they disconnect and listpeers() is empty: # This works even if they disconnect and listpeerchannels() is empty:
wait_for(lambda: all([p['channels'] == [] for p in l2.rpc.listpeers()['peers']])) wait_for(lambda: l2.rpc.listpeerchannels()['channels'] == [])
# TEST 2: Cheat from post-upgrade. # TEST 2: Cheat from post-upgrade.
node_factory.join_nodes([l1, l2]) node_factory.join_nodes([l1, l2])
@@ -3738,7 +3737,7 @@ def test_upgrade_statickey_onchaind(node_factory, executor, bitcoind):
'THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM') 'THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM')
bitcoind.generate_block(100) bitcoind.generate_block(100)
# This works even if they disconnect and listpeers() is empty: # This works even if they disconnect and listpeers() is empty:
wait_for(lambda: all([p['channels'] == [] for p in l2.rpc.listpeers()['peers']])) wait_for(lambda: len(l2.rpc.listpeerchannels()['channels']) == 0)
# TEST 3: Unilateral close from pre-upgrade # TEST 3: Unilateral close from pre-upgrade
node_factory.join_nodes([l1, l2]) node_factory.join_nodes([l1, l2])
@@ -3766,8 +3765,8 @@ def test_upgrade_statickey_onchaind(node_factory, executor, bitcoind):
bitcoind.generate_block(5) bitcoind.generate_block(5)
bitcoind.generate_block(100, wait_for_mempool=1) bitcoind.generate_block(100, wait_for_mempool=1)
# This works even if they disconnect and listpeers() is empty: # This works even if they disconnect and listpeerchannels() is empty:
wait_for(lambda: all([p['channels'] == [] for p in l2.rpc.listpeers()['peers']])) wait_for(lambda: len(l2.rpc.listpeerchannels()['channels']) == 0)
# TEST 4: Unilateral close from post-upgrade # TEST 4: Unilateral close from post-upgrade
node_factory.join_nodes([l1, l2]) node_factory.join_nodes([l1, l2])
@@ -3792,8 +3791,8 @@ def test_upgrade_statickey_onchaind(node_factory, executor, bitcoind):
bitcoind.generate_block(5) bitcoind.generate_block(5)
bitcoind.generate_block(100, wait_for_mempool=1) bitcoind.generate_block(100, wait_for_mempool=1)
# This works even if they disconnect and listpeers() is empty: # This works even if they disconnect and listpeerchannels() is empty:
wait_for(lambda: all([p['channels'] == [] for p in l2.rpc.listpeers()['peers']])) wait_for(lambda: len(l2.rpc.listpeerchannels()['channels']) == 0)
@unittest.skipIf(not EXPERIMENTAL_FEATURES, "upgrade protocol not available") @unittest.skipIf(not EXPERIMENTAL_FEATURES, "upgrade protocol not available")
@@ -3837,8 +3836,8 @@ def test_upgrade_statickey_fail(node_factory, executor, bitcoind):
# Make sure we already skip the first of these. # Make sure we already skip the first of these.
l1.daemon.wait_for_log('billboard perm: Reconnected, and reestablished.') l1.daemon.wait_for_log('billboard perm: Reconnected, and reestablished.')
assert 'option_static_remotekey' not in only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['features'] assert 'option_static_remotekey' not in only_one(l1.rpc.listpeerchannels()['channels'])['features']
assert 'option_static_remotekey' not in only_one(only_one(l2.rpc.listpeers()['peers'])['channels'])['features'] assert 'option_static_remotekey' not in only_one(l2.rpc.listpeerchannels()['channels'])['features']
sleeptime = 1 sleeptime = 1
while True: while True:
@@ -3858,8 +3857,8 @@ def test_upgrade_statickey_fail(node_factory, executor, bitcoind):
l1.daemon.logsearch_start = oldstart l1.daemon.logsearch_start = oldstart
assert l1.daemon.wait_for_log('option_static_remotekey enabled at 2/2') assert l1.daemon.wait_for_log('option_static_remotekey enabled at 2/2')
assert l2.daemon.wait_for_log('option_static_remotekey enabled at 2/2') assert l2.daemon.wait_for_log('option_static_remotekey enabled at 2/2')
assert 'option_static_remotekey' in only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['features'] assert 'option_static_remotekey' in only_one(l1.rpc.listpeerchannels()['channels'])['features']
assert 'option_static_remotekey' in only_one(only_one(l2.rpc.listpeers()['peers'])['channels'])['features'] assert 'option_static_remotekey' in only_one(l2.rpc.listpeerchannels()['channels'])['features']
@unittest.skipIf(not EXPERIMENTAL_FEATURES, "quiescence is experimental") @unittest.skipIf(not EXPERIMENTAL_FEATURES, "quiescence is experimental")
@@ -3924,8 +3923,8 @@ def test_multichan_stress(node_factory, executor, bitcoind):
bitcoind.generate_block(1) bitcoind.generate_block(1)
sync_blockheight(bitcoind, [l2]) sync_blockheight(bitcoind, [l2])
l2.rpc.fundchannel(l3.info['id'], '0.01001btc') l2.rpc.fundchannel(l3.info['id'], '0.01001btc')
assert(len(only_one(l2.rpc.listpeers(l3.info['id'])['peers'])['channels']) == 2) assert(len(l2.rpc.listpeerchannels(l3.info['id'])['channels']) == 2)
assert(len(only_one(l3.rpc.listpeers(l2.info['id'])['peers'])['channels']) == 2) assert(len(l3.rpc.listpeerchannels(l2.info['id'])['channels']) == 2)
# Make sure gossip works. # Make sure gossip works.
bitcoind.generate_block(6, wait_for_mempool=1) bitcoind.generate_block(6, wait_for_mempool=1)
@@ -4076,17 +4075,17 @@ def test_multichan(node_factory, executor, bitcoind):
bitcoind.generate_block(1) bitcoind.generate_block(1)
sync_blockheight(bitcoind, [l1, l2, l3]) sync_blockheight(bitcoind, [l1, l2, l3])
l2.rpc.fundchannel(l3.info['id'], '0.01001btc') l2.rpc.fundchannel(l3.info['id'], '0.01001btc')
assert(len(only_one(l2.rpc.listpeers(l3.info['id'])['peers'])['channels']) == 2) assert(len(l2.rpc.listpeerchannels(l3.info['id'])['channels']) == 2)
assert(len(only_one(l3.rpc.listpeers(l2.info['id'])['peers'])['channels']) == 2) assert(len(l3.rpc.listpeerchannels(l2.info['id'])['channels']) == 2)
bitcoind.generate_block(1, wait_for_mempool=1) bitcoind.generate_block(1, wait_for_mempool=1)
sync_blockheight(bitcoind, [l1, l2, l3]) sync_blockheight(bitcoind, [l1, l2, l3])
# Make sure new channel is also CHANNELD_NORMAL # Make sure new channel is also CHANNELD_NORMAL
wait_for(lambda: [c['state'] for c in only_one(l2.rpc.listpeers(l3.info['id'])['peers'])['channels']] == ["CHANNELD_NORMAL", "CHANNELD_NORMAL"]) wait_for(lambda: [c['state'] for c in l2.rpc.listpeerchannels(l3.info['id'])['channels']] == ["CHANNELD_NORMAL", "CHANNELD_NORMAL"])
# Dance around to get the *other* scid. # Dance around to get the *other* scid.
wait_for(lambda: all(['short_channel_id' in c for c in l3.rpc.listpeers()['peers'][0]['channels']])) wait_for(lambda: all(['short_channel_id' in c for c in l3.rpc.listpeerchannels()['channels']]))
scids = [c['short_channel_id'] for c in l3.rpc.listpeers()['peers'][0]['channels']] scids = [c['short_channel_id'] for c in l3.rpc.listpeerchannels()['channels']]
assert len(scids) == 2 assert len(scids) == 2
if scids[0] == scid23a: if scids[0] == scid23a:
@@ -4105,13 +4104,15 @@ def test_multichan(node_factory, executor, bitcoind):
'id': l3.info['id'], 'id': l3.info['id'],
'delay': 5, 'delay': 5,
'channel': scid23a}] 'channel': scid23a}]
before = only_one(l2.rpc.listpeers(l3.info['id'])['peers'])['channels']
before = l2.rpc.listpeerchannels(l3.info['id'])['channels']
inv1 = l3.rpc.invoice(100000000, "invoice", "invoice") inv1 = l3.rpc.invoice(100000000, "invoice", "invoice")
l1.rpc.sendpay(route, inv1['payment_hash'], payment_secret=inv1['payment_secret']) l1.rpc.sendpay(route, inv1['payment_hash'], payment_secret=inv1['payment_secret'])
l1.rpc.waitsendpay(inv1['payment_hash']) l1.rpc.waitsendpay(inv1['payment_hash'])
# Wait until HTLCs fully settled # Wait until HTLCs fully settled
wait_for(lambda: [c['htlcs'] for c in only_one(l2.rpc.listpeers(l3.info['id'])['peers'])['channels']] == [[], []]) wait_for(lambda: [c['htlcs'] for c in l2.rpc.listpeerchannels(l3.info['id'])['channels']] == [[], []])
after = only_one(l2.rpc.listpeers(l3.info['id'])['peers'])['channels'] after = l2.rpc.listpeerchannels(l3.info['id'])['channels']
if before[0]['short_channel_id'] == scid23a: if before[0]['short_channel_id'] == scid23a:
chan23a_idx = 0 chan23a_idx = 0
@@ -4130,14 +4131,14 @@ def test_multichan(node_factory, executor, bitcoind):
assert before[chan23a_idx]['to_us_msat'] == after[chan23a_idx]['to_us_msat'] assert before[chan23a_idx]['to_us_msat'] == after[chan23a_idx]['to_us_msat']
assert before[chan23b_idx]['to_us_msat'] != after[chan23b_idx]['to_us_msat'] assert before[chan23b_idx]['to_us_msat'] != after[chan23b_idx]['to_us_msat']
before = only_one(l2.rpc.listpeers(l3.info['id'])['peers'])['channels'] before = l2.rpc.listpeerchannels(l3.info['id'])['channels']
route[1]['channel'] = scid23b route[1]['channel'] = scid23b
inv2 = l3.rpc.invoice(100000000, "invoice2", "invoice2") inv2 = l3.rpc.invoice(100000000, "invoice2", "invoice2")
l1.rpc.sendpay(route, inv2['payment_hash'], payment_secret=inv2['payment_secret']) l1.rpc.sendpay(route, inv2['payment_hash'], payment_secret=inv2['payment_secret'])
l1.rpc.waitsendpay(inv2['payment_hash']) l1.rpc.waitsendpay(inv2['payment_hash'])
# Wait until HTLCs fully settled # Wait until HTLCs fully settled
wait_for(lambda: [c['htlcs'] for c in only_one(l2.rpc.listpeers(l3.info['id'])['peers'])['channels']] == [[], []]) wait_for(lambda: [c['htlcs'] for c in l2.rpc.listpeerchannels(l3.info['id'])['channels']] == [[], []])
after = only_one(l2.rpc.listpeers(l3.info['id'])['peers'])['channels'] after = l2.rpc.listpeerchannels(l3.info['id'])['channels']
# Now the first channel is larger! # Now the first channel is larger!
assert before[chan23a_idx]['to_us_msat'] != after[chan23a_idx]['to_us_msat'] assert before[chan23a_idx]['to_us_msat'] != after[chan23a_idx]['to_us_msat']
@@ -4283,10 +4284,22 @@ def test_no_reconnect_awating_unilateral(node_factory, bitcoind):
# Close immediately. # Close immediately.
l1.rpc.close(l2.info['id'], 1) l1.rpc.close(l2.info['id'], 1)
wait_for(lambda: only_one(only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['channels'])['state'] == 'AWAITING_UNILATERAL') wait_for(lambda: only_one(l1.rpc.listpeerchannels(l2.info['id'])['channels'])['state'] == 'AWAITING_UNILATERAL')
# After switching to AWAITING_UNILATERAL it will *not* try to reconnect. # After switching to AWAITING_UNILATERAL it will *not* try to reconnect.
l1.daemon.wait_for_log("State changed from CHANNELD_SHUTTING_DOWN to AWAITING_UNILATERAL") l1.daemon.wait_for_log("State changed from CHANNELD_SHUTTING_DOWN to AWAITING_UNILATERAL")
time.sleep(10) time.sleep(10)
assert not l1.daemon.is_in_log('Will try reconnect', start=l1.daemon.logsearch_start) assert not l1.daemon.is_in_log('Will try reconnect', start=l1.daemon.logsearch_start)
def test_peer_disconnected_reflected_in_channel_state(node_factory):
"""
Make sure that if a node is disconnected we have the value correct value
across listpeer and listpeerchannels.
"""
l1, l2 = node_factory.line_graph(2, opts={'may_reconnect': True})
l2.stop()
wait_for(lambda: only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['connected'] is False)
wait_for(lambda: only_one(l1.rpc.listpeerchannels(l2.info['id'])['channels'])['peer_connected'] is False)

View File

@@ -120,8 +120,8 @@ def test_max_channel_id(node_factory, bitcoind):
l2.wait_for_channel_onchain(l1.info['id']) l2.wait_for_channel_onchain(l1.info['id'])
bitcoind.generate_block(101) bitcoind.generate_block(101)
wait_for(lambda: only_one(l1.rpc.listpeers()['peers'])['channels'] == []) wait_for(lambda: l1.rpc.listpeerchannels()['channels'] == [])
wait_for(lambda: only_one(l2.rpc.listpeers()['peers'])['channels'] == []) wait_for(lambda: l2.rpc.listpeerchannels()['channels'] == [])
# Stop l2, and restart # Stop l2, and restart
l2.stop() l2.stop()

View File

@@ -739,8 +739,8 @@ def test_gossip_query_channel_range(node_factory, bitcoind, chainparams):
# Make sure l4 has received all the gossip. # Make sure l4 has received all the gossip.
l4.daemon.wait_for_logs(['Received node_announcement for node ' + n.info['id'] for n in (l1, l2, l3)]) l4.daemon.wait_for_logs(['Received node_announcement for node ' + n.info['id'] for n in (l1, l2, l3)])
scid12 = only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['channels'][0]['short_channel_id'] scid12 = l1.rpc.listpeerchannels(l2.info['id'])['channels'][0]['short_channel_id']
scid23 = only_one(l3.rpc.listpeers(l2.info['id'])['peers'])['channels'][0]['short_channel_id'] scid23 = l3.rpc.listpeerchannels(l2.info['id'])['channels'][0]['short_channel_id']
block12 = int(scid12.split('x')[0]) block12 = int(scid12.split('x')[0])
block23 = int(scid23.split('x')[0]) block23 = int(scid23.split('x')[0])
@@ -1419,7 +1419,7 @@ def test_gossip_notices_close(node_factory, bitcoind):
node_announcement = l1.daemon.is_in_log(r'\[IN\] 0101').split(' ')[-1][:-1] node_announcement = l1.daemon.is_in_log(r'\[IN\] 0101').split(' ')[-1][:-1]
txid = l2.rpc.close(l3.info['id'])['txid'] txid = l2.rpc.close(l3.info['id'])['txid']
wait_for(lambda: only_one(l2.rpc.listpeers(l3.info['id'])['peers'])['channels'][0]['state'] == 'CLOSINGD_COMPLETE') wait_for(lambda: l2.rpc.listpeerchannels(l3.info['id'])['channels'][0]['state'] == 'CLOSINGD_COMPLETE')
bitcoind.generate_block(13, txid) bitcoind.generate_block(13, txid)
wait_for(lambda: l1.rpc.listchannels()['channels'] == []) wait_for(lambda: l1.rpc.listchannels()['channels'] == [])

View File

@@ -170,7 +170,7 @@ def test_invoice_routeboost(node_factory, bitcoind):
# Route array has single route with single element. # Route array has single route with single element.
r = only_one(only_one(l1.rpc.decodepay(inv['bolt11'])['routes'])) r = only_one(only_one(l1.rpc.decodepay(inv['bolt11'])['routes']))
assert r['pubkey'] == l1.info['id'] assert r['pubkey'] == l1.info['id']
assert r['short_channel_id'] == l2.rpc.listpeers(l1.info['id'])['peers'][0]['channels'][0]['short_channel_id'] assert r['short_channel_id'] == l2.rpc.listpeerchannels(l1.info['id'])['channels'][0]['short_channel_id']
assert r['fee_base_msat'] == 1 assert r['fee_base_msat'] == 1
assert r['fee_proportional_millionths'] == 10 assert r['fee_proportional_millionths'] == 10
assert r['cltv_expiry_delta'] == 6 assert r['cltv_expiry_delta'] == 6
@@ -233,7 +233,7 @@ def test_invoice_routeboost_private(node_factory, bitcoind):
# Make sure channel is totally public. # Make sure channel is totally public.
wait_for(lambda: [c['public'] for c in l2.rpc.listchannels(scid_dummy)['channels']] == [True, True]) wait_for(lambda: [c['public'] for c in l2.rpc.listchannels(scid_dummy)['channels']] == [True, True])
alias = only_one(only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['channels'])['alias']['local'] alias = only_one(l1.rpc.listpeerchannels(l2.info['id'])['channels'])['alias']['local']
# Since there's only one route, it will reluctantly hint that even # Since there's only one route, it will reluctantly hint that even
# though it's private # though it's private
inv = l2.rpc.invoice(amount_msat=123456, label="inv0", description="?") inv = l2.rpc.invoice(amount_msat=123456, label="inv0", description="?")

View File

@@ -1268,7 +1268,7 @@ def test_funding_reorg_private(node_factory, bitcoind):
bitcoind.generate_block(1) # height 106 bitcoind.generate_block(1) # height 106
daemon = 'DUALOPEND' if l1.config('experimental-dual-fund') else 'CHANNELD' daemon = 'DUALOPEND' if l1.config('experimental-dual-fund') else 'CHANNELD'
wait_for(lambda: only_one(l1.rpc.listpeers()['peers'][0]['channels'])['status'] wait_for(lambda: only_one(l1.rpc.listpeerchannels()['channels'])['status']
== ['{}_AWAITING_LOCKIN:Funding needs 1 more confirmations to be ready.'.format(daemon)]) == ['{}_AWAITING_LOCKIN:Funding needs 1 more confirmations to be ready.'.format(daemon)])
bitcoind.generate_block(1) # height 107 bitcoind.generate_block(1) # height 107
l1.wait_channel_active('106x1x0') l1.wait_channel_active('106x1x0')
@@ -1325,7 +1325,7 @@ def test_funding_reorg_remote_lags(node_factory, bitcoind):
bitcoind.generate_block(1) bitcoind.generate_block(1)
l1.daemon.wait_for_log(r'Peer transient failure .* short_channel_id changed to 104x1x0 \(was 103x1x0\)') l1.daemon.wait_for_log(r'Peer transient failure .* short_channel_id changed to 104x1x0 \(was 103x1x0\)')
wait_for(lambda: only_one(l2.rpc.listpeers()['peers'][0]['channels'])['status'] == [ wait_for(lambda: only_one(l2.rpc.listpeerchannels()['channels'])['status'] == [
'CHANNELD_NORMAL:Reconnected, and reestablished.', 'CHANNELD_NORMAL:Reconnected, and reestablished.',
'CHANNELD_NORMAL:Channel ready for use. They need our announcement signatures.']) 'CHANNELD_NORMAL:Channel ready for use. They need our announcement signatures.'])
@@ -1335,7 +1335,7 @@ def test_funding_reorg_remote_lags(node_factory, bitcoind):
wait_for(lambda: chan_active(l2, '104x1x0', True)) wait_for(lambda: chan_active(l2, '104x1x0', True))
assert l2.rpc.listchannels('103x1x0')['channels'] == [] assert l2.rpc.listchannels('103x1x0')['channels'] == []
wait_for(lambda: only_one(l2.rpc.listpeers()['peers'][0]['channels'])['status'] == [ wait_for(lambda: only_one(l2.rpc.listpeerchannels()['channels'])['status'] == [
'CHANNELD_NORMAL:Reconnected, and reestablished.', 'CHANNELD_NORMAL:Reconnected, and reestablished.',
'CHANNELD_NORMAL:Channel ready for use. Channel announced.']) 'CHANNELD_NORMAL:Channel ready for use. Channel announced.'])
@@ -2663,7 +2663,7 @@ def test_listforwards_and_listhtlcs(node_factory, bitcoind):
# Once channels are gone, htlcs are gone. # Once channels are gone, htlcs are gone.
for n in (l1, l2, l3, l4): for n in (l1, l2, l3, l4):
# They might reconnect, but still will have no channels # They might reconnect, but still will have no channels
wait_for(lambda: all(p['channels'] == [] for p in n.rpc.listpeers()['peers'])) wait_for(lambda: n.rpc.listpeerchannels()['channels'] == [])
assert n.rpc.listhtlcs() == {'htlcs': []} assert n.rpc.listhtlcs() == {'htlcs': []}
# But forwards are not forgotten! # But forwards are not forgotten!

View File

@@ -13,7 +13,7 @@ import unittest
def find_next_feerate(node, peer): def find_next_feerate(node, peer):
chan = only_one(only_one(node.rpc.listpeers(peer.info['id'])['peers'])['channels']) chan = only_one(node.rpc.listpeerchannels(peer.info['id'])['channels'])
return chan['next_feerate'] return chan['next_feerate']
@@ -110,11 +110,8 @@ def test_multifunding_v2_best_effort(node_factory, bitcoind):
# open again, so multiple channels may remain # open again, so multiple channels may remain
# listed. # listed.
def get_funded_channel_scid(n1, n2): def get_funded_channel_scid(n1, n2):
peers = n1.rpc.listpeers(n2.info['id'])['peers'] channels = n1.rpc.listpeerchannels(n2.info['id'])['channels']
assert len(peers) == 1 assert channels and len(channels) != 0
peer = peers[0]
channels = peer['channels']
assert channels
for c in channels: for c in channels:
state = c['state'] state = c['state']
if state in ('DUALOPEND_AWAITING_LOCKIN', 'CHANNELD_AWAITING_LOCKIN', 'CHANNELD_NORMAL'): if state in ('DUALOPEND_AWAITING_LOCKIN', 'CHANNELD_AWAITING_LOCKIN', 'CHANNELD_NORMAL'):
@@ -178,7 +175,7 @@ def test_v2_open_sigs_restart(node_factory, bitcoind):
pass pass
l2.daemon.wait_for_log('Broadcasting funding tx') l2.daemon.wait_for_log('Broadcasting funding tx')
txid = l2.rpc.listpeers(l1.info['id'])['peers'][0]['channels'][0]['funding_txid'] txid = l2.rpc.listpeerchannels(l1.info['id'])['channels'][0]['funding_txid']
bitcoind.generate_block(6, wait_for_mempool=txid) bitcoind.generate_block(6, wait_for_mempool=txid)
# Make sure we're ok. # Make sure we're ok.
@@ -267,7 +264,7 @@ def test_v2_rbf_single(node_factory, bitcoind, chainparams):
next_feerate = find_next_feerate(l1, l2) next_feerate = find_next_feerate(l1, l2)
# Check that feerate info is correct # Check that feerate info is correct
info_1 = only_one(only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['channels']) info_1 = only_one(l1.rpc.listpeerchannels(l2.info['id'])['channels'])
assert info_1['initial_feerate'] == info_1['last_feerate'] assert info_1['initial_feerate'] == info_1['last_feerate']
rate = int(info_1['last_feerate'][:-5]) rate = int(info_1['last_feerate'][:-5])
assert int(info_1['next_feerate'][:-5]) == rate * 65 // 64 assert int(info_1['next_feerate'][:-5]) == rate * 65 // 64
@@ -286,7 +283,7 @@ def test_v2_rbf_single(node_factory, bitcoind, chainparams):
assert update['commitments_secured'] assert update['commitments_secured']
# Check that feerate info has incremented # Check that feerate info has incremented
info_2 = only_one(only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['channels']) info_2 = only_one(l1.rpc.listpeerchannels(l2.info['id'])['channels'])
assert info_1['initial_feerate'] == info_2['initial_feerate'] assert info_1['initial_feerate'] == info_2['initial_feerate']
assert info_1['next_feerate'] == info_2['last_feerate'] assert info_1['next_feerate'] == info_2['last_feerate']
@@ -301,7 +298,7 @@ def test_v2_rbf_single(node_factory, bitcoind, chainparams):
l1.rpc.openchannel_signed(chan_id, signed_psbt) l1.rpc.openchannel_signed(chan_id, signed_psbt)
# Do it again, with a higher feerate # Do it again, with a higher feerate
info_2 = only_one(only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['channels']) info_2 = only_one(l1.rpc.listpeerchannels(l2.info['id'])['channels'])
assert info_1['initial_feerate'] == info_2['initial_feerate'] assert info_1['initial_feerate'] == info_2['initial_feerate']
assert info_1['next_feerate'] == info_2['last_feerate'] assert info_1['next_feerate'] == info_2['last_feerate']
rate = int(info_2['last_feerate'][:-5]) rate = int(info_2['last_feerate'][:-5])
@@ -328,7 +325,7 @@ def test_v2_rbf_single(node_factory, bitcoind, chainparams):
l1.daemon.wait_for_log(' to CHANNELD_NORMAL') l1.daemon.wait_for_log(' to CHANNELD_NORMAL')
# Check that feerate info is gone # Check that feerate info is gone
info_1 = only_one(only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['channels']) info_1 = only_one(l1.rpc.listpeerchannels(l2.info['id'])['channels'])
assert 'initial_feerate' not in info_1 assert 'initial_feerate' not in info_1
assert 'last_feerate' not in info_1 assert 'last_feerate' not in info_1
assert 'next_feerate' not in info_1 assert 'next_feerate' not in info_1
@@ -375,7 +372,7 @@ def test_v2_rbf_liquidity_ad(node_factory, bitcoind, chainparams):
est_fees = calc_lease_fee(amount, feerate, rates) est_fees = calc_lease_fee(amount, feerate, rates)
# This should be the accepter's amount # This should be the accepter's amount
fundings = only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['funding'] fundings = only_one(l1.rpc.listpeerchannels()['channels'])['funding']
assert Millisatoshi(amount * 1000) == fundings['remote_funds_msat'] assert Millisatoshi(amount * 1000) == fundings['remote_funds_msat']
assert Millisatoshi(est_fees + amount * 1000) == fundings['local_funds_msat'] assert Millisatoshi(est_fees + amount * 1000) == fundings['local_funds_msat']
assert Millisatoshi(est_fees) == fundings['fee_paid_msat'] assert Millisatoshi(est_fees) == fundings['fee_paid_msat']
@@ -416,9 +413,9 @@ def test_v2_rbf_liquidity_ad(node_factory, bitcoind, chainparams):
assert l2.rpc.listdatastore() == {'datastore': []} assert l2.rpc.listdatastore() == {'datastore': []}
# This should be the accepter's amount # This should be the accepter's amount
fundings = only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['funding'] fundings = only_one(l1.rpc.listpeerchannels()['channels'])['funding']
# The lease is still there! # The is still there!
assert Millisatoshi(amount * 1000) == fundings['remote_funds_msat'] assert Millisatoshi(amount * 1000) == Millisatoshi(fundings['remote_funds_msat'])
wait_for(lambda: [c['active'] for c in l1.rpc.listchannels(l1.get_channel_scid(l2))['channels']] == [True, True]) wait_for(lambda: [c['active'] for c in l1.rpc.listchannels(l1.get_channel_scid(l2))['channels']] == [True, True])
@@ -791,8 +788,8 @@ def test_rbf_reconnect_tx_sigs(node_factory, bitcoind, chainparams):
l1.daemon.wait_for_log(' to CHANNELD_NORMAL') l1.daemon.wait_for_log(' to CHANNELD_NORMAL')
# Check that they have matching funding txid # Check that they have matching funding txid
l1_funding_txid = only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['funding_txid'] l1_funding_txid = only_one(l1.rpc.listpeerchannels()['channels'])['funding_txid']
l2_funding_txid = only_one(only_one(l2.rpc.listpeers()['peers'])['channels'])['funding_txid'] l2_funding_txid = only_one(l2.rpc.listpeerchannels()['channels'])['funding_txid']
assert l1_funding_txid == l2_funding_txid assert l1_funding_txid == l2_funding_txid
@@ -858,7 +855,7 @@ def test_rbf_fails_to_broadcast(node_factory, bitcoind, chainparams):
# Check that we're waiting for lockin # Check that we're waiting for lockin
l1.daemon.wait_for_log(' to DUALOPEND_AWAITING_LOCKIN') l1.daemon.wait_for_log(' to DUALOPEND_AWAITING_LOCKIN')
inflights = only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['inflight'] inflights = only_one(l1.rpc.listpeerchannels()['channels'])['inflight']
assert inflights[-1]['funding_txid'] in bitcoind.rpc.getrawmempool() assert inflights[-1]['funding_txid'] in bitcoind.rpc.getrawmempool()
def run_retry(): def run_retry():
@@ -885,7 +882,7 @@ def test_rbf_fails_to_broadcast(node_factory, bitcoind, chainparams):
signed_psbt = run_retry() signed_psbt = run_retry()
l1.rpc.openchannel_signed(chan_id, signed_psbt) l1.rpc.openchannel_signed(chan_id, signed_psbt)
inflights = only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['inflight'] inflights = only_one(l1.rpc.listpeerchannels()['channels'])['inflight']
assert inflights[-1]['funding_txid'] in bitcoind.rpc.getrawmempool() assert inflights[-1]['funding_txid'] in bitcoind.rpc.getrawmempool()
# Restart and listpeers, used to crash # Restart and listpeers, used to crash
@@ -895,7 +892,7 @@ def test_rbf_fails_to_broadcast(node_factory, bitcoind, chainparams):
# We've restarted. Let's RBF # We've restarted. Let's RBF
signed_psbt = run_retry() signed_psbt = run_retry()
l1.rpc.openchannel_signed(chan_id, signed_psbt) l1.rpc.openchannel_signed(chan_id, signed_psbt)
inflights = only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['inflight'] inflights = only_one(l1.rpc.listpeerchannels()['channels'])['inflight']
assert len(inflights) == 3 assert len(inflights) == 3
assert inflights[-1]['funding_txid'] in bitcoind.rpc.getrawmempool() assert inflights[-1]['funding_txid'] in bitcoind.rpc.getrawmempool()
@@ -903,7 +900,7 @@ def test_rbf_fails_to_broadcast(node_factory, bitcoind, chainparams):
# Are inflights the same post restart # Are inflights the same post restart
prev_inflights = inflights prev_inflights = inflights
inflights = only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['inflight'] inflights = only_one(l1.rpc.listpeerchannels()['channels'])['inflight']
assert prev_inflights == inflights assert prev_inflights == inflights
assert inflights[-1]['funding_txid'] in bitcoind.rpc.getrawmempool() assert inflights[-1]['funding_txid'] in bitcoind.rpc.getrawmempool()
@@ -942,7 +939,7 @@ def test_rbf_broadcast_close_inflights(node_factory, bitcoind, chainparams):
# Check that we're waiting for lockin # Check that we're waiting for lockin
l1.daemon.wait_for_log(' to DUALOPEND_AWAITING_LOCKIN') l1.daemon.wait_for_log(' to DUALOPEND_AWAITING_LOCKIN')
inflights = only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['inflight'] inflights = only_one(l1.rpc.listpeerchannels()['channels'])['inflight']
assert inflights[-1]['funding_txid'] in bitcoind.rpc.getrawmempool() assert inflights[-1]['funding_txid'] in bitcoind.rpc.getrawmempool()
# Make it such that l1 and l2 cannot broadcast transactions # Make it such that l1 and l2 cannot broadcast transactions
@@ -970,10 +967,10 @@ def test_rbf_broadcast_close_inflights(node_factory, bitcoind, chainparams):
signed_psbt = run_retry() signed_psbt = run_retry()
l1.rpc.openchannel_signed(chan_id, signed_psbt) l1.rpc.openchannel_signed(chan_id, signed_psbt)
inflights = only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['inflight'] inflights = only_one(l1.rpc.listpeerchannels()['channels'])['inflight']
assert inflights[-1]['funding_txid'] not in bitcoind.rpc.getrawmempool() assert inflights[-1]['funding_txid'] not in bitcoind.rpc.getrawmempool()
cmtmt_txid = only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['scratch_txid'] cmtmt_txid = only_one(l1.rpc.listpeerchannels()['channels'])['scratch_txid']
assert cmtmt_txid == inflights[-1]['scratch_txid'] assert cmtmt_txid == inflights[-1]['scratch_txid']
# l2 goes offline # l2 goes offline
@@ -1016,7 +1013,7 @@ def test_rbf_non_last_mined(node_factory, bitcoind, chainparams):
# Check that we're waiting for lockin # Check that we're waiting for lockin
l1.daemon.wait_for_log(' to DUALOPEND_AWAITING_LOCKIN') l1.daemon.wait_for_log(' to DUALOPEND_AWAITING_LOCKIN')
inflights = only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['inflight'] inflights = only_one(l1.rpc.listpeerchannels()['channels'])['inflight']
assert inflights[-1]['funding_txid'] in bitcoind.rpc.getrawmempool() assert inflights[-1]['funding_txid'] in bitcoind.rpc.getrawmempool()
def run_retry(): def run_retry():
@@ -1056,7 +1053,7 @@ def test_rbf_non_last_mined(node_factory, bitcoind, chainparams):
l2.daemon.rpcproxy.mock_rpc('sendrawtransaction', None) l2.daemon.rpcproxy.mock_rpc('sendrawtransaction', None)
# We fetch out our inflights list # We fetch out our inflights list
inflights = only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['inflight'] inflights = only_one(l1.rpc.listpeerchannels()['channels'])['inflight']
# l2 goes offline # l2 goes offline
l2.stop() l2.stop()
@@ -1071,7 +1068,7 @@ def test_rbf_non_last_mined(node_factory, bitcoind, chainparams):
l1.daemon.wait_for_log(r'to CHANNELD_NORMAL') l1.daemon.wait_for_log(r'to CHANNELD_NORMAL')
l2.daemon.wait_for_log(r'to CHANNELD_NORMAL') l2.daemon.wait_for_log(r'to CHANNELD_NORMAL')
channel = only_one(only_one(l1.rpc.listpeers()['peers'])['channels']) channel = only_one(l1.rpc.listpeerchannels()['channels'])
assert channel['funding_txid'] == inflights[1]['funding_txid'] assert channel['funding_txid'] == inflights[1]['funding_txid']
assert channel['scratch_txid'] == inflights[1]['scratch_txid'] assert channel['scratch_txid'] == inflights[1]['scratch_txid']
@@ -1113,7 +1110,7 @@ def test_funder_options(node_factory, bitcoind):
# l2 funds a chanenl with us. We don't contribute # l2 funds a chanenl with us. We don't contribute
l2.rpc.connect(l1.info['id'], 'localhost', l1.port) l2.rpc.connect(l1.info['id'], 'localhost', l1.port)
l2.fundchannel(l1, 10**6) l2.fundchannel(l1, 10**6)
chan_info = only_one(only_one(l2.rpc.listpeers(l1.info['id'])['peers'])['channels']) chan_info = only_one(l2.rpc.listpeerchannels(l1.info['id'])['channels'])
# l1 contributed nothing # l1 contributed nothing
assert chan_info['funding']['remote_funds_msat'] == Millisatoshi('0msat') assert chan_info['funding']['remote_funds_msat'] == Millisatoshi('0msat')
assert chan_info['funding']['local_funds_msat'] != Millisatoshi('0msat') assert chan_info['funding']['local_funds_msat'] != Millisatoshi('0msat')
@@ -1146,7 +1143,7 @@ def test_funder_options(node_factory, bitcoind):
{'fund_probability': 100}) {'fund_probability': 100})
l3.rpc.connect(l1.info['id'], 'localhost', l1.port) l3.rpc.connect(l1.info['id'], 'localhost', l1.port)
l3.fundchannel(l1, 10**6) l3.fundchannel(l1, 10**6)
chan_info = only_one(only_one(l3.rpc.listpeers(l1.info['id'])['peers'])['channels']) chan_info = only_one(l3.rpc.listpeerchannels(l1.info['id'])['channels'])
# l1 contributed all its funds! # l1 contributed all its funds!
assert chan_info['funding']['remote_funds_msat'] == Millisatoshi('9994255000msat') assert chan_info['funding']['remote_funds_msat'] == Millisatoshi('9994255000msat')
assert chan_info['funding']['local_funds_msat'] == Millisatoshi('1000000000msat') assert chan_info['funding']['local_funds_msat'] == Millisatoshi('1000000000msat')
@@ -1291,8 +1288,8 @@ def test_zeroconf_mindepth(bitcoind, node_factory):
bitcoind.generate_block(4) # Confirm on the l2 side. bitcoind.generate_block(4) # Confirm on the l2 side.
l1.daemon.wait_for_log(r'peer_out WIRE_CHANNEL_READY') l1.daemon.wait_for_log(r'peer_out WIRE_CHANNEL_READY')
wait_for(lambda: l1.rpc.listpeers()['peers'][0]['channels'][0]['state'] == "CHANNELD_NORMAL") wait_for(lambda: only_one(l1.rpc.listpeerchannels()['channels'])['state'] == "CHANNELD_NORMAL")
wait_for(lambda: l2.rpc.listpeers()['peers'][0]['channels'][0]['state'] == "CHANNELD_NORMAL") wait_for(lambda: only_one(l2.rpc.listpeerchannels()['channels'])['state'] == "CHANNELD_NORMAL")
def test_zeroconf_open(bitcoind, node_factory): def test_zeroconf_open(bitcoind, node_factory):
@@ -1335,8 +1332,8 @@ def test_zeroconf_open(bitcoind, node_factory):
r'Peer told us that they\'ll use alias=[0-9x]+ for this channel', r'Peer told us that they\'ll use alias=[0-9x]+ for this channel',
]) ])
wait_for(lambda: l1.rpc.listpeers()['peers'][0]['channels'][0]['state'] == 'CHANNELD_NORMAL') wait_for(lambda: only_one(l1.rpc.listpeerchannels()['channels'])['state'] == 'CHANNELD_NORMAL')
wait_for(lambda: l2.rpc.listpeers()['peers'][0]['channels'][0]['state'] == 'CHANNELD_NORMAL') wait_for(lambda: only_one(l2.rpc.listpeerchannels()['channels'])['state'] == 'CHANNELD_NORMAL')
wait_for(lambda: l2.rpc.listincoming()['incoming'] != []) wait_for(lambda: l2.rpc.listincoming()['incoming'] != [])
inv = l2.rpc.invoice(10**8, 'lbl', 'desc')['bolt11'] inv = l2.rpc.invoice(10**8, 'lbl', 'desc')['bolt11']
@@ -1344,7 +1341,7 @@ def test_zeroconf_open(bitcoind, node_factory):
pprint(details) pprint(details)
assert('routes' in details and len(details['routes']) == 1) assert('routes' in details and len(details['routes']) == 1)
hop = details['routes'][0][0] # First (and only) hop of hint 0 hop = details['routes'][0][0] # First (and only) hop of hint 0
l1alias = l1.rpc.listpeers()['peers'][0]['channels'][0]['alias']['local'] l1alias = only_one(l1.rpc.listpeerchannels()['channels'])['alias']['local']
assert(hop['pubkey'] == l1.info['id']) # l1 is the entrypoint assert(hop['pubkey'] == l1.info['id']) # l1 is the entrypoint
assert(hop['short_channel_id'] == l1alias) # Alias has to make sense to entrypoint assert(hop['short_channel_id'] == l1alias) # Alias has to make sense to entrypoint
l1.rpc.pay(inv) l1.rpc.pay(inv)
@@ -1389,8 +1386,8 @@ def test_zeroconf_public(bitcoind, node_factory, chainparams):
l1.daemon.wait_for_log(r'Got WIRE_HSMD_CUPDATE_SIG_REQ') l1.daemon.wait_for_log(r'Got WIRE_HSMD_CUPDATE_SIG_REQ')
l2.daemon.wait_for_log(r'Got WIRE_HSMD_CUPDATE_SIG_REQ') l2.daemon.wait_for_log(r'Got WIRE_HSMD_CUPDATE_SIG_REQ')
l1chan = l1.rpc.listpeers()['peers'][0]['channels'][0] l1chan = only_one(l1.rpc.listpeerchannels()['channels'])
l2chan = l2.rpc.listpeers()['peers'][0]['channels'][0] l2chan = only_one(l2.rpc.listpeerchannels()['channels'])
channel_id = l1chan['channel_id'] channel_id = l1chan['channel_id']
# We have no confirmation yet, so no `short_channel_id` # We have no confirmation yet, so no `short_channel_id`
@@ -1421,8 +1418,8 @@ def test_zeroconf_public(bitcoind, node_factory, chainparams):
l1.daemon.wait_for_log(r'Funding tx [a-f0-9]{64} depth 1 of 0') l1.daemon.wait_for_log(r'Funding tx [a-f0-9]{64} depth 1 of 0')
l2.daemon.wait_for_log(r'Funding tx [a-f0-9]{64} depth 1 of 0') l2.daemon.wait_for_log(r'Funding tx [a-f0-9]{64} depth 1 of 0')
l1chan = l1.rpc.listpeers()['peers'][0]['channels'][0] l1chan = only_one(l1.rpc.listpeerchannels()['channels'])
l2chan = l2.rpc.listpeers()['peers'][0]['channels'][0] l2chan = only_one(l2.rpc.listpeerchannels()['channels'])
assert('short_channel_id' in l1chan) assert('short_channel_id' in l1chan)
assert('short_channel_id' in l2chan) assert('short_channel_id' in l2chan)
@@ -1501,7 +1498,7 @@ def test_zeroconf_forward(node_factory, bitcoind):
wait_for(lambda: len(l3.rpc.listchannels()['channels']) == 4) wait_for(lambda: len(l3.rpc.listchannels()['channels']) == 4)
# Make sure all htlcs completely settled! # Make sure all htlcs completely settled!
wait_for(lambda: all(only_one(p['channels'])['htlcs'] == [] for p in l2.rpc.listpeers()['peers'])) wait_for(lambda: (p['htlcs'] == [] for p in l2.rpc.listpeerchannels()['channels']))
inv = l1.rpc.invoice(42, 'back1', 'desc')['bolt11'] inv = l1.rpc.invoice(42, 'back1', 'desc')['bolt11']
l3.rpc.pay(inv) l3.rpc.pay(inv)
@@ -1653,9 +1650,9 @@ def test_scid_alias_private(node_factory, bitcoind):
l2.rpc.fundchannel(l3.info['id'], 'all', announce=False) l2.rpc.fundchannel(l3.info['id'], 'all', announce=False)
bitcoind.generate_block(1, wait_for_mempool=1) bitcoind.generate_block(1, wait_for_mempool=1)
wait_for(lambda: only_one(only_one(l2.rpc.listpeers(l3.info['id'])['peers'])['channels'])['state'] == 'CHANNELD_NORMAL') wait_for(lambda: only_one(l2.rpc.listpeerchannels(l3.info['id'])['channels'])['state'] == 'CHANNELD_NORMAL')
chan = only_one(only_one(l2.rpc.listpeers(l3.info['id'])['peers'])['channels']) chan = only_one(l2.rpc.listpeerchannels(l3.info['id'])['channels'])
assert chan['private'] is True assert chan['private'] is True
scid23 = chan['short_channel_id'] scid23 = chan['short_channel_id']
alias23 = chan['alias']['local'] alias23 = chan['alias']['local']
@@ -1667,7 +1664,7 @@ def test_scid_alias_private(node_factory, bitcoind):
bitcoind.generate_block(6, wait_for_mempool=1) bitcoind.generate_block(6, wait_for_mempool=1)
wait_for(lambda: len(l3.rpc.listchannels(source=l1.info['id'])['channels']) == 1) wait_for(lambda: len(l3.rpc.listchannels(source=l1.info['id'])['channels']) == 1)
chan = only_one(only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['channels']) chan = only_one(l1.rpc.listpeerchannels(l2.info['id'])['channels'])
assert chan['private'] is False assert chan['private'] is False
scid12 = chan['short_channel_id'] scid12 = chan['short_channel_id']
@@ -1743,7 +1740,7 @@ def test_zeroconf_multichan_forward(node_factory):
inv = l3.rpc.invoice(amount_msat=10000, label='lbl1', description='desc')['bolt11'] inv = l3.rpc.invoice(amount_msat=10000, label='lbl1', description='desc')['bolt11']
l1.rpc.pay(inv) l1.rpc.pay(inv)
for c in only_one(l2.rpc.listpeers(l3.info['id'])['peers'])['channels']: for c in l2.rpc.listpeerchannels(l3.info['id'])['channels']:
if c['channel_id'] == zeroconf_cid: if c['channel_id'] == zeroconf_cid:
zeroconf_scid = c['alias']['local'] zeroconf_scid = c['alias']['local']
else: else:
@@ -1796,12 +1793,12 @@ def test_zeroreserve(node_factory, bitcoind):
wait_for(lambda: l3.channel_state(l1) == 'CHANNELD_NORMAL') wait_for(lambda: l3.channel_state(l1) == 'CHANNELD_NORMAL')
# Now make sure we all agree on each others reserves # Now make sure we all agree on each others reserves
l1c1 = l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'][0] l1c1 = l1.rpc.listpeerchannels(l2.info['id'])['channels'][0]
l2c1 = l2.rpc.listpeers(l1.info['id'])['peers'][0]['channels'][0] l2c1 = l2.rpc.listpeerchannels(l1.info['id'])['channels'][0]
l2c2 = l2.rpc.listpeers(l3.info['id'])['peers'][0]['channels'][0] l2c2 = l2.rpc.listpeerchannels(l3.info['id'])['channels'][0]
l3c2 = l3.rpc.listpeers(l2.info['id'])['peers'][0]['channels'][0] l3c2 = l3.rpc.listpeerchannels(l2.info['id'])['channels'][0]
l3c3 = l3.rpc.listpeers(l1.info['id'])['peers'][0]['channels'][0] l3c3 = l3.rpc.listpeerchannels(l1.info['id'])['channels'][0]
l1c3 = l1.rpc.listpeers(l3.info['id'])['peers'][0]['channels'][0] l1c3 = l1.rpc.listpeerchannels(l3.info['id'])['channels'][0]
# l1 imposed a 0sat reserve on l2, while l2 imposed the default 1% reserve on l1 # l1 imposed a 0sat reserve on l2, while l2 imposed the default 1% reserve on l1
assert l1c1['their_reserve_msat'] == l2c1['our_reserve_msat'] == Millisatoshi('0sat') assert l1c1['their_reserve_msat'] == l2c1['our_reserve_msat'] == Millisatoshi('0sat')
@@ -1821,7 +1818,7 @@ def test_zeroreserve(node_factory, bitcoind):
l2.drain(l1) l2.drain(l1)
# Remember that this is the reserve l1 imposed on l2, so l2 can drain completely # Remember that this is the reserve l1 imposed on l2, so l2 can drain completely
l2c1 = l2.rpc.listpeers(l1.info['id'])['peers'][0]['channels'][0] l2c1 = l2.rpc.listpeerchannels(l1.info['id'])['channels'][0]
# And despite us briefly being above dust (with a to_us output), # And despite us briefly being above dust (with a to_us output),
# closing should result in the output being trimmed again since we # closing should result in the output being trimmed again since we

View File

@@ -267,7 +267,7 @@ def test_pay_disconnect(node_factory, bitcoind):
l2.stop() l2.stop()
# Make sure channeld has exited! # Make sure channeld has exited!
wait_for(lambda: 'owner' not in only_one(only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['channels'])) wait_for(lambda: 'owner' not in only_one(l1.rpc.listpeerchannels(l2.info['id'])['channels']))
# Can't pay while its offline. # Can't pay while its offline.
with pytest.raises(RpcError, match=r'failed: WIRE_TEMPORARY_CHANNEL_FAILURE \(First peer not ready\)'): with pytest.raises(RpcError, match=r'failed: WIRE_TEMPORARY_CHANNEL_FAILURE \(First peer not ready\)'):
@@ -622,12 +622,12 @@ def test_sendpay(node_factory):
assert invoice_unpaid(l2, 'testpayment2') assert invoice_unpaid(l2, 'testpayment2')
# FIXME: test paying via another node, should fail to pay twice. # FIXME: test paying via another node, should fail to pay twice.
p1 = l1.rpc.getpeer(l2.info['id'], 'info') c1 = only_one(l1.rpc.listpeerchannels(l2.info['id'])['channels'])
p2 = l2.rpc.getpeer(l1.info['id'], 'info') c2 = only_one(l2.rpc.listpeerchannels(l1.info['id'])['channels'])
assert only_one(p1['channels'])['to_us_msat'] == 10**6 * 1000 assert c1['to_us_msat'] == 10**6 * 1000
assert only_one(p1['channels'])['total_msat'] == 10**6 * 1000 assert c1['total_msat'] == 10**6 * 1000
assert only_one(p2['channels'])['to_us_msat'] == 0 assert c2['to_us_msat'] == 0
assert only_one(p2['channels'])['total_msat'] == 10**6 * 1000 assert c2['total_msat'] == 10**6 * 1000
# This works. # This works.
before = int(time.time()) before = int(time.time())
@@ -648,13 +648,13 @@ def test_sendpay(node_factory):
# Balances should reflect it. # Balances should reflect it.
def check_balances(): def check_balances():
p1 = l1.rpc.getpeer(l2.info['id'], 'info') c1 = only_one(l1.rpc.listpeerchannels(l2.info['id'])['channels'])
p2 = l2.rpc.getpeer(l1.info['id'], 'info') c2 = only_one(l2.rpc.listpeerchannels(l1.info['id'])['channels'])
return ( return (
only_one(p1['channels'])['to_us_msat'] == 10**6 * 1000 - amt c1['to_us_msat'] == 10**6 * 1000 - amt
and only_one(p1['channels'])['total_msat'] == 10**6 * 1000 and c1['total_msat'] == 10**6 * 1000
and only_one(p2['channels'])['to_us_msat'] == amt and c2['to_us_msat'] == amt
and only_one(p2['channels'])['total_msat'] == 10**6 * 1000 and c2['total_msat'] == 10**6 * 1000
) )
wait_for(check_balances) wait_for(check_balances)
@@ -1079,10 +1079,10 @@ def test_forward(node_factory, bitcoind):
# If they're at different block heights we can get spurious errors. # If they're at different block heights we can get spurious errors.
sync_blockheight(bitcoind, [l1, l2, l3]) sync_blockheight(bitcoind, [l1, l2, l3])
chanid1 = only_one(l1.rpc.getpeer(l2.info['id'])['channels'])['short_channel_id'] chanid1 = only_one(l1.rpc.listpeerchannels(l2.info['id'])['channels'])['short_channel_id']
chanid2 = only_one(l2.rpc.getpeer(l3.info['id'])['channels'])['short_channel_id'] chanid2 = only_one(l2.rpc.listpeerchannels(l3.info['id'])['channels'])['short_channel_id']
assert only_one(l2.rpc.getpeer(l1.info['id'])['channels'])['short_channel_id'] == chanid1 assert only_one(l2.rpc.listpeerchannels(l1.info['id'])['channels'])['short_channel_id'] == chanid1
assert only_one(l3.rpc.getpeer(l2.info['id'])['channels'])['short_channel_id'] == chanid2 assert only_one(l3.rpc.listpeerchannels(l2.info['id'])['channels'])['short_channel_id'] == chanid2
inv = l3.rpc.invoice(100000000, 'testpayment1', 'desc') inv = l3.rpc.invoice(100000000, 'testpayment1', 'desc')
rhash = inv['payment_hash'] rhash = inv['payment_hash']
@@ -1396,8 +1396,8 @@ def test_forward_stats(node_factory, bitcoind):
states = [f['state'] for f in forwardings] states = [f['state'] for f in forwardings]
assert(states == [1, 2, 0]) # settled, failed, offered assert(states == [1, 2, 0]) # settled, failed, offered
inchan = l2.rpc.listpeers(l1.info['id'])['peers'][0]['channels'][0] inchan = l2.rpc.listpeerchannels(l1.info['id'])['channels'][0]
outchan = l2.rpc.listpeers(l3.info['id'])['peers'][0]['channels'][0] outchan = l2.rpc.listpeerchannels(l3.info['id'])['channels'][0]
# Check that we correctly account channel changes # Check that we correctly account channel changes
assert inchan['in_payments_offered'] == 3 assert inchan['in_payments_offered'] == 3
@@ -1739,8 +1739,7 @@ def test_pay_retry(node_factory, bitcoind, executor, chainparams):
def exhaust_channel(opener, peer, scid, already_spent=0): def exhaust_channel(opener, peer, scid, already_spent=0):
"""Spend all available capacity (10^6 - 1%) of channel """Spend all available capacity (10^6 - 1%) of channel
""" """
peer_node = opener.rpc.listpeers(peer.info['id'])['peers'][0] chan = only_one(opener.rpc.listpeerchannels(peer.info['id'])["channels"])
chan = peer_node['channels'][0]
maxpay = chan['spendable_msat'] maxpay = chan['spendable_msat']
lbl = ''.join(random.choice(string.ascii_letters) for _ in range(20)) lbl = ''.join(random.choice(string.ascii_letters) for _ in range(20))
inv = peer.rpc.invoice(maxpay, lbl, "exhaust_channel") inv = peer.rpc.invoice(maxpay, lbl, "exhaust_channel")
@@ -1863,7 +1862,7 @@ def test_pay_routeboost(node_factory, bitcoind):
assert 'routehint_modifications' not in only_one(status['pay']) assert 'routehint_modifications' not in only_one(status['pay'])
assert 'local_exclusions' not in only_one(status['pay']) assert 'local_exclusions' not in only_one(status['pay'])
attempts = only_one(status['pay'])['attempts'] attempts = only_one(status['pay'])['attempts']
scid34 = only_one(l3.rpc.listpeers(l4.info['id'])['peers'])['channels'][0]['alias']['local'] scid34 = l3.rpc.listpeerchannels(l4.info['id'])['channels'][0]['alias']['local']
assert(len(attempts) == 1) assert(len(attempts) == 1)
a = attempts[0] a = attempts[0]
assert(a['strategy'] == "Initial attempt") assert(a['strategy'] == "Initial attempt")
@@ -1872,7 +1871,7 @@ def test_pay_routeboost(node_factory, bitcoind):
# With dev-route option we can test longer routehints. # With dev-route option we can test longer routehints.
if DEVELOPER: if DEVELOPER:
scid45 = only_one(l4.rpc.listpeers(l5.info['id'])['peers'])['channels'][0]['alias']['local'] scid45 = l4.rpc.listpeerchannels(l5.info['id'])['channels'][0]['alias']['local']
routel3l4l5 = [{'id': l3.info['id'], routel3l4l5 = [{'id': l3.info['id'],
'short_channel_id': scid34, 'short_channel_id': scid34,
'fee_base_msat': 1000, 'fee_base_msat': 1000,
@@ -1970,10 +1969,10 @@ def test_setchannel_usage(node_factory, bitcoind):
# This will be the capacity - reserves: # This will be the capacity - reserves:
assert(db_fees[0]['htlc_maximum_msat'] == MAX_HTLC) assert(db_fees[0]['htlc_maximum_msat'] == MAX_HTLC)
# this is also what listpeers should return # this is also what listpeers should return
peers = l1.rpc.listpeers()['peers'] channel = only_one(l1.rpc.listpeerchannels()['channels'])
assert peers[0]['channels'][0]['fee_base_msat'] == DEF_BASE_MSAT assert channel['fee_base_msat'] == DEF_BASE_MSAT
assert peers[0]['channels'][0]['fee_proportional_millionths'] == DEF_PPM assert channel['fee_proportional_millionths'] == DEF_PPM
assert peers[0]['channels'][0]['maximum_htlc_out_msat'] == MAX_HTLC assert channel['maximum_htlc_out_msat'] == MAX_HTLC
# custom setchannel scid <feebase> <feeppm> <htlcmin> <htlcmax> # custom setchannel scid <feebase> <feeppm> <htlcmin> <htlcmax>
result = l1.rpc.setchannel(scid, 1337, 137, 17, 133337) result = l1.rpc.setchannel(scid, 1337, 137, 17, 133337)
@@ -1995,11 +1994,11 @@ def test_setchannel_usage(node_factory, bitcoind):
assert(db_fees[0]['htlc_minimum_msat'] == 17) assert(db_fees[0]['htlc_minimum_msat'] == 17)
assert(db_fees[0]['htlc_maximum_msat'] == 133337) assert(db_fees[0]['htlc_maximum_msat'] == 133337)
# also check for updated values in `listpeers` # also check for updated values in `listpeers`
peers = l1.rpc.listpeers()['peers'] channel = only_one(l1.rpc.listpeerchannels()['channels'])
assert peers[0]['channels'][0]['fee_base_msat'] == Millisatoshi(1337) assert channel['fee_base_msat'] == Millisatoshi(1337)
assert peers[0]['channels'][0]['fee_proportional_millionths'] == 137 assert channel['fee_proportional_millionths'] == 137
assert peers[0]['channels'][0]['minimum_htlc_out_msat'] == 17 assert channel['minimum_htlc_out_msat'] == 17
assert peers[0]['channels'][0]['maximum_htlc_out_msat'] == 133337 assert channel['maximum_htlc_out_msat'] == 133337
# wait for gossip and check if l1 sees new fees in listchannels # wait for gossip and check if l1 sees new fees in listchannels
wait_for(lambda: [c['base_fee_millisatoshi'] for c in l1.rpc.listchannels(scid)['channels']] == [DEF_BASE, 1337]) wait_for(lambda: [c['base_fee_millisatoshi'] for c in l1.rpc.listchannels(scid)['channels']] == [DEF_BASE, 1337])
@@ -2068,9 +2067,9 @@ def test_setchannel_usage(node_factory, bitcoind):
assert(db_fees[0]['feerate_base'] == 0) assert(db_fees[0]['feerate_base'] == 0)
assert(db_fees[0]['feerate_ppm'] == 0) assert(db_fees[0]['feerate_ppm'] == 0)
# also check for updated values in `listpeers` # also check for updated values in `listpeers`
peers = l1.rpc.listpeers()['peers'] channel = only_one(l1.rpc.listpeerchannels()['channels'])
assert peers[0]['channels'][0]['fee_base_msat'] == Millisatoshi(0) assert channel['fee_base_msat'] == Millisatoshi(0)
assert peers[0]['channels'][0]['fee_proportional_millionths'] == 0 assert channel['fee_proportional_millionths'] == 0
# check also peer id can be used # check also peer id can be used
result = l1.rpc.setchannel(l2.info['id'], 142, 143) result = l1.rpc.setchannel(l2.info['id'], 142, 143)
@@ -2463,7 +2462,7 @@ def test_channel_spendable(node_factory, bitcoind):
payment_hash = inv['payment_hash'] payment_hash = inv['payment_hash']
# We should be able to spend this much, and not one msat more! # We should be able to spend this much, and not one msat more!
amount = l1.rpc.listpeers()['peers'][0]['channels'][0]['spendable_msat'] amount = l1.rpc.listpeerchannels()['channels'][0]['spendable_msat']
route = l1.rpc.getroute(l2.info['id'], amount + 1, riskfactor=1, fuzzpercent=0)['route'] route = l1.rpc.getroute(l2.info['id'], amount + 1, riskfactor=1, fuzzpercent=0)['route']
l1.rpc.sendpay(route, payment_hash, payment_secret=inv['payment_secret']) l1.rpc.sendpay(route, payment_hash, payment_secret=inv['payment_secret'])
@@ -2477,16 +2476,16 @@ def test_channel_spendable(node_factory, bitcoind):
# Amount should drop to 0 once HTLC is sent; we have time, thanks to # Amount should drop to 0 once HTLC is sent; we have time, thanks to
# hold_invoice.py plugin. # hold_invoice.py plugin.
wait_for(lambda: len(l1.rpc.listpeers()['peers'][0]['channels'][0]['htlcs']) == 1) wait_for(lambda: len(l1.rpc.listpeerchannels()['channels'][0]['htlcs']) == 1)
assert l1.rpc.listpeers()['peers'][0]['channels'][0]['spendable_msat'] == Millisatoshi(0) assert l1.rpc.listpeerchannels()['channels'][0]['spendable_msat'] == Millisatoshi(0)
l1.rpc.waitsendpay(payment_hash, TIMEOUT) l1.rpc.waitsendpay(payment_hash, TIMEOUT)
# Make sure l2 thinks it's all over. # Make sure l2 thinks it's all over.
wait_for(lambda: len(l2.rpc.listpeers()['peers'][0]['channels'][0]['htlcs']) == 0) wait_for(lambda: len(l2.rpc.listpeerchannels()['channels'][0]['htlcs']) == 0)
# Now, reverse should work similarly. # Now, reverse should work similarly.
inv = l1.rpc.invoice('any', 'inv', 'for testing') inv = l1.rpc.invoice('any', 'inv', 'for testing')
payment_hash = inv['payment_hash'] payment_hash = inv['payment_hash']
amount = l2.rpc.listpeers()['peers'][0]['channels'][0]['spendable_msat'] amount = l2.rpc.listpeerchannels()['channels'][0]['spendable_msat']
# Turns out we won't route this, as it's over max - reserve: # Turns out we won't route this, as it's over max - reserve:
route = l2.rpc.getroute(l1.info['id'], amount + 1, riskfactor=1, fuzzpercent=0)['route'] route = l2.rpc.getroute(l1.info['id'], amount + 1, riskfactor=1, fuzzpercent=0)['route']
@@ -2502,8 +2501,8 @@ def test_channel_spendable(node_factory, bitcoind):
# Amount should drop to 0 once HTLC is sent; we have time, thanks to # Amount should drop to 0 once HTLC is sent; we have time, thanks to
# hold_invoice.py plugin. # hold_invoice.py plugin.
wait_for(lambda: len(l2.rpc.listpeers()['peers'][0]['channels'][0]['htlcs']) == 1) wait_for(lambda: len(l2.rpc.listpeerchannels()['channels'][0]['htlcs']) == 1)
assert l2.rpc.listpeers()['peers'][0]['channels'][0]['spendable_msat'] == Millisatoshi(0) assert l2.rpc.listpeerchannels()['channels'][0]['spendable_msat'] == Millisatoshi(0)
l2.rpc.waitsendpay(payment_hash, TIMEOUT) l2.rpc.waitsendpay(payment_hash, TIMEOUT)
@@ -2518,7 +2517,7 @@ def test_channel_receivable(node_factory, bitcoind):
payment_hash = inv['payment_hash'] payment_hash = inv['payment_hash']
# We should be able to receive this much, and not one msat more! # We should be able to receive this much, and not one msat more!
amount = l2.rpc.listpeers()['peers'][0]['channels'][0]['receivable_msat'] amount = l2.rpc.listpeerchannels()['channels'][0]['receivable_msat']
route = l1.rpc.getroute(l2.info['id'], amount + 1, riskfactor=1, fuzzpercent=0)['route'] route = l1.rpc.getroute(l2.info['id'], amount + 1, riskfactor=1, fuzzpercent=0)['route']
l1.rpc.sendpay(route, payment_hash, payment_secret=inv['payment_secret']) l1.rpc.sendpay(route, payment_hash, payment_secret=inv['payment_secret'])
@@ -2532,17 +2531,17 @@ def test_channel_receivable(node_factory, bitcoind):
# Amount should drop to 0 once HTLC is sent; we have time, thanks to # Amount should drop to 0 once HTLC is sent; we have time, thanks to
# hold_invoice.py plugin. # hold_invoice.py plugin.
wait_for(lambda: len(l2.rpc.listpeers()['peers'][0]['channels'][0]['htlcs']) == 1) wait_for(lambda: len(l2.rpc.listpeerchannels()['channels'][0]['htlcs']) == 1)
assert l2.rpc.listpeers()['peers'][0]['channels'][0]['receivable_msat'] == Millisatoshi(0) assert l2.rpc.listpeerchannels()['channels'][0]['receivable_msat'] == Millisatoshi(0)
l1.rpc.waitsendpay(payment_hash, TIMEOUT) l1.rpc.waitsendpay(payment_hash, TIMEOUT)
# Make sure both think it's all over. # Make sure both think it's all over.
wait_for(lambda: len(l1.rpc.listpeers()['peers'][0]['channels'][0]['htlcs']) == 0) wait_for(lambda: len(l1.rpc.listpeerchannels()['channels'][0]['htlcs']) == 0)
wait_for(lambda: len(l2.rpc.listpeers()['peers'][0]['channels'][0]['htlcs']) == 0) wait_for(lambda: len(l2.rpc.listpeerchannels()['channels'][0]['htlcs']) == 0)
# Now, reverse should work similarly. # Now, reverse should work similarly.
inv = l1.rpc.invoice('any', 'inv', 'for testing') inv = l1.rpc.invoice('any', 'inv', 'for testing')
payment_hash = inv['payment_hash'] payment_hash = inv['payment_hash']
amount = l1.rpc.listpeers()['peers'][0]['channels'][0]['receivable_msat'] amount = l1.rpc.listpeerchannels()['channels'][0]['receivable_msat']
# Turns out we won't route this, as it's over max - reserve: # Turns out we won't route this, as it's over max - reserve:
route = l2.rpc.getroute(l1.info['id'], amount + 1, riskfactor=1, fuzzpercent=0)['route'] route = l2.rpc.getroute(l1.info['id'], amount + 1, riskfactor=1, fuzzpercent=0)['route']
@@ -2558,8 +2557,8 @@ def test_channel_receivable(node_factory, bitcoind):
# Amount should drop to 0 once HTLC is sent; we have time, thanks to # Amount should drop to 0 once HTLC is sent; we have time, thanks to
# hold_invoice.py plugin. # hold_invoice.py plugin.
wait_for(lambda: len(l1.rpc.listpeers()['peers'][0]['channels'][0]['htlcs']) == 1) wait_for(lambda: len(l1.rpc.listpeerchannels()['channels'][0]['htlcs']) == 1)
assert l1.rpc.listpeers()['peers'][0]['channels'][0]['receivable_msat'] == Millisatoshi(0) assert l1.rpc.listpeerchannels()['channels'][0]['receivable_msat'] == Millisatoshi(0)
l2.rpc.waitsendpay(payment_hash, TIMEOUT) l2.rpc.waitsendpay(payment_hash, TIMEOUT)
@@ -2582,10 +2581,10 @@ def test_channel_spendable_large(node_factory, bitcoind):
payment_hash = inv['payment_hash'] payment_hash = inv['payment_hash']
# We should be able to spend this much, and not one msat more! # We should be able to spend this much, and not one msat more!
spendable = l1.rpc.listpeers()['peers'][0]['channels'][0]['spendable_msat'] spendable = l1.rpc.listpeerchannels()['channels'][0]['spendable_msat']
# receivable from the other side should calculate to the exact same amount # receivable from the other side should calculate to the exact same amount
receivable = l2.rpc.listpeers()['peers'][0]['channels'][0]['receivable_msat'] receivable = l2.rpc.listpeerchannels()['channels'][0]['receivable_msat']
assert spendable == receivable assert spendable == receivable
# route or waitsendpay fill fail. # route or waitsendpay fill fail.
@@ -2604,8 +2603,8 @@ def test_channel_spendable_receivable_capped(node_factory, bitcoind):
"""Test that spendable_msat and receivable_msat is capped at 2^32-1""" """Test that spendable_msat and receivable_msat is capped at 2^32-1"""
sats = 16777215 sats = 16777215
l1, l2 = node_factory.line_graph(2, fundamount=sats, wait_for_announce=False) l1, l2 = node_factory.line_graph(2, fundamount=sats, wait_for_announce=False)
assert l1.rpc.listpeers()['peers'][0]['channels'][0]['spendable_msat'] == Millisatoshi(0xFFFFFFFF) assert l1.rpc.listpeerchannels()['channels'][0]['spendable_msat'] == Millisatoshi(0xFFFFFFFF)
assert l2.rpc.listpeers()['peers'][0]['channels'][0]['receivable_msat'] == Millisatoshi(0xFFFFFFFF) assert l2.rpc.listpeerchannels()['channels'][0]['receivable_msat'] == Millisatoshi(0xFFFFFFFF)
@unittest.skipIf(True, "Test is extremely flaky") @unittest.skipIf(True, "Test is extremely flaky")
@@ -3647,7 +3646,7 @@ def test_keysend_routehint(node_factory):
routehints = [ routehints = [
[ [
{ {
'scid': l3.rpc.listpeers()['peers'][0]['channels'][0]['alias']['remote'], 'scid': only_one(l3.rpc.listpeerchannels()['channels'])['alias']['remote'],
'id': l2.info['id'], 'id': l2.info['id'],
'feebase': '1msat', 'feebase': '1msat',
'feeprop': 10, 'feeprop': 10,
@@ -3767,8 +3766,7 @@ def test_pay_peer(node_factory, bitcoind):
wait_for(lambda: len(l1.rpc.listchannels()['channels']) == 6) wait_for(lambda: len(l1.rpc.listchannels()['channels']) == 6)
def spendable(n1, n2): def spendable(n1, n2):
peer = n1.rpc.listpeers(n2.info['id'])['peers'][0] chan = n1.rpc.listpeerchannels(n2.info['id'])['channels'][0]
chan = peer['channels'][0]
avail = chan['spendable_msat'] avail = chan['spendable_msat']
return avail return avail
@@ -3876,8 +3874,8 @@ def test_mpp_adaptive(node_factory, bitcoind):
l1.rpc.listpeers() l1.rpc.listpeers()
# Make sure neither channel can fit the payment by itself. # Make sure neither channel can fit the payment by itself.
c12 = l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'][0] c12 = l1.rpc.listpeerchannels(l2.info['id'])['channels'][0]
c34 = l3.rpc.listpeers(l4.info['id'])['peers'][0]['channels'][0] c34 = l3.rpc.listpeerchannels(l4.info['id'])['channels'][0]
assert(c12['spendable_msat'].millisatoshis < amt) assert(c12['spendable_msat'].millisatoshis < amt)
assert(c34['spendable_msat'].millisatoshis < amt) assert(c34['spendable_msat'].millisatoshis < amt)
@@ -3885,7 +3883,7 @@ def test_mpp_adaptive(node_factory, bitcoind):
def all_htlcs(n): def all_htlcs(n):
htlcs = [] htlcs = []
for p in n.rpc.listpeers()['peers']: for p in n.rpc.listpeers()['peers']:
for c in p['channels']: for c in n.rpc.listpeerchannels(p['id'])['channels']:
htlcs += c['htlcs'] htlcs += c['htlcs']
return htlcs return htlcs
@@ -3953,7 +3951,7 @@ def test_pay_fail_unconfirmed_channel(node_factory, bitcoind):
l2.rpc.pay(invl1) l2.rpc.pay(invl1)
# Wait for us to recognize that the channel is available # Wait for us to recognize that the channel is available
wait_for(lambda: l1.rpc.listpeers()['peers'][0]['channels'][0]['spendable_msat'].millisatoshis > amount_sat * 1000) wait_for(lambda: l1.rpc.listpeerchannels()['channels'][0]['spendable_msat'].millisatoshis > amount_sat * 1000)
# Now l1 can pay to l2. # Now l1 can pay to l2.
l1.rpc.pay(invl2) l1.rpc.pay(invl2)
@@ -3974,7 +3972,7 @@ def test_bolt11_null_after_pay(node_factory, bitcoind):
# Let the channel confirm. # Let the channel confirm.
bitcoind.generate_block(6) bitcoind.generate_block(6)
sync_blockheight(bitcoind, [l1, l2]) sync_blockheight(bitcoind, [l1, l2])
wait_for(lambda: only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['state'] == 'CHANNELD_NORMAL') wait_for(lambda: only_one(l1.rpc.listpeerchannels()['channels'])['state'] == 'CHANNELD_NORMAL')
amt = Millisatoshi(amount_sat * 2 * 1000) amt = Millisatoshi(amount_sat * 2 * 1000)
invl1 = l1.rpc.invoice(amt, 'j', 'j')['bolt11'] invl1 = l1.rpc.invoice(amt, 'j', 'j')['bolt11']
@@ -4820,7 +4818,7 @@ def test_fetchinvoice_autoconnect(node_factory, bitcoind):
l3.rpc.pay(l2.rpc.invoice(FUNDAMOUNT * 500, 'balancer', 'balancer')['bolt11']) l3.rpc.pay(l2.rpc.invoice(FUNDAMOUNT * 500, 'balancer', 'balancer')['bolt11'])
# Make sure l2 has capacity (can be still resolving!). # Make sure l2 has capacity (can be still resolving!).
wait_for(lambda: only_one(only_one(l2.rpc.listpeers(l1.info['id'])['peers'])['channels'])['spendable_msat'] != Millisatoshi(0)) wait_for(lambda: only_one(l2.rpc.listpeerchannels(l1.info['id'])['channels'])['spendable_msat'] != Millisatoshi(0))
l3.rpc.disconnect(l2.info['id']) l3.rpc.disconnect(l2.info['id'])
l3.rpc.call('sendinvoice', {'invreq': invreq['bolt12'], 'label': 'payme for real!'}) l3.rpc.call('sendinvoice', {'invreq': invreq['bolt12'], 'label': 'payme for real!'})
@@ -5018,7 +5016,7 @@ gives a routehint straight to us causes an issue
inv = l3.rpc.invoice(10, "test", "test")['bolt11'] inv = l3.rpc.invoice(10, "test", "test")['bolt11']
decoded = l3.rpc.decodepay(inv) decoded = l3.rpc.decodepay(inv)
assert(only_one(only_one(decoded['routes']))['short_channel_id'] assert(only_one(only_one(decoded['routes']))['short_channel_id']
== only_one(only_one(l3.rpc.listpeers()['peers'])['channels'])['alias']['remote']) == only_one(l3.rpc.listpeerchannels()['channels'])['alias']['remote'])
l3.stop() l3.stop()
with pytest.raises(RpcError, match=r'Destination .* is not reachable directly and all routehints were unusable'): with pytest.raises(RpcError, match=r'Destination .* is not reachable directly and all routehints were unusable'):
@@ -5043,8 +5041,8 @@ def test_setchannel_enforcement_delay(node_factory, bitcoind):
opts={'fee-base': 1, opts={'fee-base': 1,
'fee-per-satoshi': 10000}) 'fee-per-satoshi': 10000})
chanid1 = only_one(l1.rpc.getpeer(l2.info['id'])['channels'])['short_channel_id'] chanid1 = only_one(l1.rpc.listpeerchannels(l2.info['id'])['channels'])['short_channel_id']
chanid2 = only_one(l2.rpc.getpeer(l3.info['id'])['channels'])['short_channel_id'] chanid2 = only_one(l2.rpc.listpeerchannels(l3.info['id'])['channels'])['short_channel_id']
route = [{'amount_msat': 1011, route = [{'amount_msat': 1011,
'id': l2.info['id'], 'id': l2.info['id'],
@@ -5159,7 +5157,7 @@ def test_sendpay_grouping(node_factory, bitcoind):
assert(invoices[0]['status'] == 'unpaid') assert(invoices[0]['status'] == 'unpaid')
# Will reconnect automatically # Will reconnect automatically
wait_for(lambda: only_one(l3.rpc.listpeers()['peers'])['connected'] is True) wait_for(lambda: only_one(l3.rpc.listpeers()['peers'])['connected'] is True)
scid = l3.rpc.listpeers()['peers'][0]['channels'][0]['short_channel_id'] scid = l3.rpc.listpeerchannels()['channels'][0]['short_channel_id']
wait_for(lambda: [c['active'] for c in l1.rpc.listchannels(scid)['channels']] == [True, True]) wait_for(lambda: [c['active'] for c in l1.rpc.listchannels(scid)['channels']] == [True, True])
l1.rpc.pay(inv, amount_msat='420000msat') l1.rpc.pay(inv, amount_msat='420000msat')
@@ -5174,8 +5172,8 @@ def test_pay_manual_exclude(node_factory, bitcoind):
l1_id = l1.rpc.getinfo()['id'] l1_id = l1.rpc.getinfo()['id']
l2_id = l2.rpc.getinfo()['id'] l2_id = l2.rpc.getinfo()['id']
l3_id = l3.rpc.getinfo()['id'] l3_id = l3.rpc.getinfo()['id']
chan12 = l1.rpc.listpeers(l2_id)['peers'][0]['channels'][0] chan12 = l1.rpc.listpeerchannels(l2_id)['channels'][0]
chan23 = l2.rpc.listpeers(l3_id)['peers'][0]['channels'][0] chan23 = l2.rpc.listpeerchannels(l3_id)['channels'][0]
scid12 = chan12['short_channel_id'] + '/' + str(chan12['direction']) scid12 = chan12['short_channel_id'] + '/' + str(chan12['direction'])
scid23 = chan23['short_channel_id'] + '/' + str(chan23['direction']) scid23 = chan23['short_channel_id'] + '/' + str(chan23['direction'])
inv = l3.rpc.invoice(amount_msat=123000, label='label1', description='desc')['bolt11'] inv = l3.rpc.invoice(amount_msat=123000, label='label1', description='desc')['bolt11']
@@ -5231,8 +5229,8 @@ def test_pay_middle_fail(node_factory, bitcoind, executor):
{'feerates': (1500,) * 4, {'feerates': (1500,) * 4,
'disconnect': ['-WIRE_REVOKE_AND_ACK*2']}]) 'disconnect': ['-WIRE_REVOKE_AND_ACK*2']}])
chanid12 = only_one(l1.rpc.getpeer(l2.info['id'])['channels'])['short_channel_id'] chanid12 = only_one(l1.rpc.listpeerchannels(l2.info['id'])['channels'])['short_channel_id']
chanid23 = only_one(l2.rpc.getpeer(l3.info['id'])['channels'])['short_channel_id'] chanid23 = only_one(l2.rpc.listpeerchannels(l3.info['id'])['channels'])['short_channel_id']
# Make a failing payment. # Make a failing payment.
route = [{'amount_msat': 1011, route = [{'amount_msat': 1011,
@@ -5253,7 +5251,7 @@ def test_pay_middle_fail(node_factory, bitcoind, executor):
# l2 will go onchain since HTLC is not resolved. # l2 will go onchain since HTLC is not resolved.
bitcoind.generate_block(12) bitcoind.generate_block(12)
sync_blockheight(bitcoind, [l1, l2, l3]) sync_blockheight(bitcoind, [l1, l2, l3])
wait_for(lambda: only_one(only_one(l2.rpc.listpeers(l3.info['id'])['peers'])['channels'])['state'] == 'AWAITING_UNILATERAL') wait_for(lambda: only_one(l2.rpc.listpeerchannels(l3.info['id'])['channels'])['state'] == 'AWAITING_UNILATERAL')
# Three blocks and it will resolve the parent. # Three blocks and it will resolve the parent.
bitcoind.generate_block(3, wait_for_mempool=1) bitcoind.generate_block(3, wait_for_mempool=1)

View File

@@ -680,7 +680,7 @@ def test_openchannel_hook(node_factory, bitcoind):
# Close it. # Close it.
txid = l1.rpc.close(l2.info['id'])['txid'] txid = l1.rpc.close(l2.info['id'])['txid']
bitcoind.generate_block(1, txid) bitcoind.generate_block(1, txid)
wait_for(lambda: [c['state'] for c in only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['channels']] == ['ONCHAIN']) wait_for(lambda: [c['state'] for c in l1.rpc.listpeerchannels(l2.info['id'])['channels']] == ['ONCHAIN'])
# Odd amount: fails # Odd amount: fails
l1.connect(l2) l1.connect(l2)
@@ -773,11 +773,11 @@ def test_channel_state_changed_bilateral(node_factory, bitcoind):
return event return event
# check channel 'opener' and 'closer' within this testcase ... # check channel 'opener' and 'closer' within this testcase ...
assert(l1.rpc.listpeers()['peers'][0]['channels'][0]['opener'] == 'local') assert(l1.rpc.listpeerchannels()['channels'][0]['opener'] == 'local')
assert(l2.rpc.listpeers()['peers'][0]['channels'][0]['opener'] == 'remote') assert(l2.rpc.listpeerchannels()['channels'][0]['opener'] == 'remote')
# the 'closer' should be missing initially # the 'closer' should be missing initially
assert 'closer' not in l1.rpc.listpeers()['peers'][0]['channels'][0] assert 'closer' not in l1.rpc.listpeerchannels()['channels'][0]
assert 'closer' not in l2.rpc.listpeers()['peers'][0]['channels'][0] assert 'closer' not in l2.rpc.listpeerchannels()['channels'][0]
event1 = wait_for_event(l1) event1 = wait_for_event(l1)
event2 = wait_for_event(l2) event2 = wait_for_event(l2)
@@ -841,8 +841,8 @@ def test_channel_state_changed_bilateral(node_factory, bitcoind):
assert(event2['message'] == "Peer closes channel") assert(event2['message'] == "Peer closes channel")
# 'closer' should now be set accordingly ... # 'closer' should now be set accordingly ...
assert(l1.rpc.listpeers()['peers'][0]['channels'][0]['closer'] == 'local') assert(l1.rpc.listpeerchannels()['channels'][0]['closer'] == 'local')
assert(l2.rpc.listpeers()['peers'][0]['channels'][0]['closer'] == 'remote') assert(l2.rpc.listpeerchannels()['channels'][0]['closer'] == 'remote')
event1 = wait_for_event(l1) event1 = wait_for_event(l1)
assert(event1['old_state'] == "CHANNELD_SHUTTING_DOWN") assert(event1['old_state'] == "CHANNELD_SHUTTING_DOWN")
@@ -959,7 +959,7 @@ def test_channel_state_changed_unilateral(node_factory, bitcoind):
l1.restart() l1.restart()
wait_for(lambda: len(l1.rpc.listpeers()['peers']) == 1) wait_for(lambda: len(l1.rpc.listpeers()['peers']) == 1)
# check 'closer' on l2 while the peer is not yet forgotten # check 'closer' on l2 while the peer is not yet forgotten
assert(l2.rpc.listpeers()['peers'][0]['channels'][0]['closer'] == 'local') assert(l2.rpc.listpeerchannels()['channels'][0]['closer'] == 'local')
if EXPERIMENTAL_DUAL_FUND: if EXPERIMENTAL_DUAL_FUND:
l1.daemon.wait_for_log(r'Peer has reconnected, state') l1.daemon.wait_for_log(r'Peer has reconnected, state')
l2.daemon.wait_for_log(r'Telling connectd to send error') l2.daemon.wait_for_log(r'Telling connectd to send error')
@@ -968,7 +968,7 @@ def test_channel_state_changed_unilateral(node_factory, bitcoind):
# FIXME: l2 should re-xmit shutdown, but it doesn't until it's mined :( # FIXME: l2 should re-xmit shutdown, but it doesn't until it's mined :(
event1 = wait_for_event(l1) event1 = wait_for_event(l1)
# Doesn't have closer, since it blames the "protocol"? # Doesn't have closer, since it blames the "protocol"?
assert 'closer' not in l1.rpc.listpeers()['peers'][0]['channels'][0] assert 'closer' not in l1.rpc.listpeerchannels()['channels'][0]
assert(event1['old_state'] == "CHANNELD_NORMAL") assert(event1['old_state'] == "CHANNELD_NORMAL")
assert(event1['new_state'] == "AWAITING_UNILATERAL") assert(event1['new_state'] == "AWAITING_UNILATERAL")
assert(event1['cause'] == "protocol") assert(event1['cause'] == "protocol")
@@ -990,7 +990,7 @@ def test_channel_state_changed_unilateral(node_factory, bitcoind):
# Check 'closer' on l1 while the peer is not yet forgotten # Check 'closer' on l1 while the peer is not yet forgotten
event1 = wait_for_event(l1) event1 = wait_for_event(l1)
assert(l1.rpc.listpeers()['peers'][0]['channels'][0]['closer'] == 'remote') assert(l1.rpc.listpeerchannels()['channels'][0]['closer'] == 'remote')
assert(event1['old_state'] == "AWAITING_UNILATERAL") assert(event1['old_state'] == "AWAITING_UNILATERAL")
assert(event1['new_state'] == "FUNDING_SPEND_SEEN") assert(event1['new_state'] == "FUNDING_SPEND_SEEN")
@@ -1014,7 +1014,7 @@ def test_channel_state_change_history(node_factory, bitcoind):
scid = l1.get_channel_scid(l2) scid = l1.get_channel_scid(l2)
l1.rpc.close(scid) l1.rpc.close(scid)
history = l1.rpc.listpeers()['peers'][0]['channels'][0]['state_changes'] history = l1.rpc.listpeerchannels()['channels'][0]['state_changes']
if l1.config('experimental-dual-fund'): if l1.config('experimental-dual-fund'):
assert(history[0]['cause'] == "user") assert(history[0]['cause'] == "user")
assert(history[0]['old_state'] == "DUALOPEND_OPEN_INIT") assert(history[0]['old_state'] == "DUALOPEND_OPEN_INIT")
@@ -1121,8 +1121,8 @@ def test_htlc_accepted_hook_direct_restart(node_factory, executor):
# Check that the status mentions the HTLC being held # Check that the status mentions the HTLC being held
l2.rpc.listpeers() l2.rpc.listpeers()
peers = l2.rpc.listpeers()['peers'] channel = only_one(l2.rpc.listpeerchannels()['channels'])
htlc_status = peers[0]['channels'][0]['htlcs'][0].get('status', None) htlc_status = channel['htlcs'][0].get('status', None)
assert htlc_status == "Waiting for the htlc_accepted hook of plugin hold_htlcs.py" assert htlc_status == "Waiting for the htlc_accepted hook of plugin hold_htlcs.py"
needle = l2.daemon.logsearch_start needle = l2.daemon.logsearch_start
@@ -1892,7 +1892,7 @@ def test_watchtower(node_factory, bitcoind, directory, chainparams):
2, 2,
opts=[{'may_fail': True, 'allow_broken_log': True}, {'plugin': p}] opts=[{'may_fail': True, 'allow_broken_log': True}, {'plugin': p}]
) )
channel_id = l1.rpc.listpeers()['peers'][0]['channels'][0]['channel_id'] channel_id = l1.rpc.listpeerchannels()['channels'][0]['channel_id']
# Force a new commitment # Force a new commitment
l1.rpc.pay(l2.rpc.invoice(25000000, 'lbl1', 'desc1')['bolt11']) l1.rpc.pay(l2.rpc.invoice(25000000, 'lbl1', 'desc1')['bolt11'])
@@ -2051,7 +2051,7 @@ def test_coin_movement_notices(node_factory, bitcoind, chainparams):
# restart to test index # restart to test index
l2.restart() l2.restart()
wait_for(lambda: all(p['channels'][0]['state'] == 'CHANNELD_NORMAL' for p in l2.rpc.listpeers()['peers'])) wait_for(lambda: all(c['state'] == 'CHANNELD_NORMAL' for c in l2.rpc.listpeerchannels()["channels"]))
# close the channels down # close the channels down
chan1 = l2.get_channel_scid(l1) chan1 = l2.get_channel_scid(l1)
@@ -2412,15 +2412,15 @@ def test_htlc_accepted_hook_fwdto(node_factory):
# Add some balance # Add some balance
l1.rpc.pay(l2.rpc.invoice(10**9 // 2, 'balance', '')['bolt11']) l1.rpc.pay(l2.rpc.invoice(10**9 // 2, 'balance', '')['bolt11'])
wait_for(lambda: only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['htlcs'] == []) wait_for(lambda: only_one(l1.rpc.listpeerchannels()['channels'])['htlcs'] == [])
# make it forward back down same channel. # make it forward back down same channel.
l2.rpc.setfwdto(only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['channel_id']) l2.rpc.setfwdto(only_one(l1.rpc.listpeerchannels()['channels'])['channel_id'])
inv = l3.rpc.invoice(42, 'fwdto', '')['bolt11'] inv = l3.rpc.invoice(42, 'fwdto', '')['bolt11']
with pytest.raises(RpcError, match="WIRE_INVALID_ONION_HMAC"): with pytest.raises(RpcError, match="WIRE_INVALID_ONION_HMAC"):
l1.rpc.pay(inv) l1.rpc.pay(inv)
assert l2.rpc.listforwards()['forwards'][0]['out_channel'] == only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['short_channel_id'] assert l2.rpc.listforwards()['forwards'][0]['out_channel'] == only_one(l1.rpc.listpeerchannels()['channels'])['short_channel_id']
def test_dynamic_args(node_factory): def test_dynamic_args(node_factory):

View File

@@ -1033,9 +1033,9 @@ def test_transaction_annotations(node_factory, bitcoind):
assert(types[changeidx] == 'deposit' and types[fundidx] == 'channel_funding') assert(types[changeidx] == 'deposit' and types[fundidx] == 'channel_funding')
# And check the channel annotation on the funding output # And check the channel annotation on the funding output
peers = l1.rpc.listpeers()['peers'] channels = l1.rpc.listpeerchannels()['channels']
assert(len(peers) == 1 and len(peers[0]['channels']) == 1) assert(len(channels) == 1)
scid = peers[0]['channels'][0]['short_channel_id'] scid = channels[0]['short_channel_id']
assert(txs[1]['outputs'][fundidx]['channel'] == scid) assert(txs[1]['outputs'][fundidx]['channel'] == scid)