mirror of
https://github.com/aljazceru/lightning.git
synced 2025-12-19 07:04:22 +01:00
tests: use listpeerchannels.
This commit is contained in:
committed by
Rusty Russell
parent
1fa32333b9
commit
a2347c7452
@@ -680,7 +680,7 @@ def test_openchannel_hook(node_factory, bitcoind):
|
||||
# Close it.
|
||||
txid = l1.rpc.close(l2.info['id'])['txid']
|
||||
bitcoind.generate_block(1, txid)
|
||||
wait_for(lambda: [c['state'] for c in only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['channels']] == ['ONCHAIN'])
|
||||
wait_for(lambda: [c['state'] for c in l1.rpc.listpeerchannels(l2.info['id'])['channels']] == ['ONCHAIN'])
|
||||
|
||||
# Odd amount: fails
|
||||
l1.connect(l2)
|
||||
@@ -773,11 +773,11 @@ def test_channel_state_changed_bilateral(node_factory, bitcoind):
|
||||
return event
|
||||
|
||||
# check channel 'opener' and 'closer' within this testcase ...
|
||||
assert(l1.rpc.listpeers()['peers'][0]['channels'][0]['opener'] == 'local')
|
||||
assert(l2.rpc.listpeers()['peers'][0]['channels'][0]['opener'] == 'remote')
|
||||
assert(l1.rpc.listpeerchannels()['channels'][0]['opener'] == 'local')
|
||||
assert(l2.rpc.listpeerchannels()['channels'][0]['opener'] == 'remote')
|
||||
# the 'closer' should be missing initially
|
||||
assert 'closer' not in l1.rpc.listpeers()['peers'][0]['channels'][0]
|
||||
assert 'closer' not in l2.rpc.listpeers()['peers'][0]['channels'][0]
|
||||
assert 'closer' not in l1.rpc.listpeerchannels()['channels'][0]
|
||||
assert 'closer' not in l2.rpc.listpeerchannels()['channels'][0]
|
||||
|
||||
event1 = wait_for_event(l1)
|
||||
event2 = wait_for_event(l2)
|
||||
@@ -841,8 +841,8 @@ def test_channel_state_changed_bilateral(node_factory, bitcoind):
|
||||
assert(event2['message'] == "Peer closes channel")
|
||||
|
||||
# 'closer' should now be set accordingly ...
|
||||
assert(l1.rpc.listpeers()['peers'][0]['channels'][0]['closer'] == 'local')
|
||||
assert(l2.rpc.listpeers()['peers'][0]['channels'][0]['closer'] == 'remote')
|
||||
assert(l1.rpc.listpeerchannels()['channels'][0]['closer'] == 'local')
|
||||
assert(l2.rpc.listpeerchannels()['channels'][0]['closer'] == 'remote')
|
||||
|
||||
event1 = wait_for_event(l1)
|
||||
assert(event1['old_state'] == "CHANNELD_SHUTTING_DOWN")
|
||||
@@ -959,7 +959,7 @@ def test_channel_state_changed_unilateral(node_factory, bitcoind):
|
||||
l1.restart()
|
||||
wait_for(lambda: len(l1.rpc.listpeers()['peers']) == 1)
|
||||
# check 'closer' on l2 while the peer is not yet forgotten
|
||||
assert(l2.rpc.listpeers()['peers'][0]['channels'][0]['closer'] == 'local')
|
||||
assert(l2.rpc.listpeerchannels()['channels'][0]['closer'] == 'local')
|
||||
if EXPERIMENTAL_DUAL_FUND:
|
||||
l1.daemon.wait_for_log(r'Peer has reconnected, state')
|
||||
l2.daemon.wait_for_log(r'Telling connectd to send error')
|
||||
@@ -968,7 +968,7 @@ def test_channel_state_changed_unilateral(node_factory, bitcoind):
|
||||
# FIXME: l2 should re-xmit shutdown, but it doesn't until it's mined :(
|
||||
event1 = wait_for_event(l1)
|
||||
# Doesn't have closer, since it blames the "protocol"?
|
||||
assert 'closer' not in l1.rpc.listpeers()['peers'][0]['channels'][0]
|
||||
assert 'closer' not in l1.rpc.listpeerchannels()['channels'][0]
|
||||
assert(event1['old_state'] == "CHANNELD_NORMAL")
|
||||
assert(event1['new_state'] == "AWAITING_UNILATERAL")
|
||||
assert(event1['cause'] == "protocol")
|
||||
@@ -990,7 +990,7 @@ def test_channel_state_changed_unilateral(node_factory, bitcoind):
|
||||
|
||||
# Check 'closer' on l1 while the peer is not yet forgotten
|
||||
event1 = wait_for_event(l1)
|
||||
assert(l1.rpc.listpeers()['peers'][0]['channels'][0]['closer'] == 'remote')
|
||||
assert(l1.rpc.listpeerchannels()['channels'][0]['closer'] == 'remote')
|
||||
|
||||
assert(event1['old_state'] == "AWAITING_UNILATERAL")
|
||||
assert(event1['new_state'] == "FUNDING_SPEND_SEEN")
|
||||
@@ -1014,7 +1014,7 @@ def test_channel_state_change_history(node_factory, bitcoind):
|
||||
scid = l1.get_channel_scid(l2)
|
||||
l1.rpc.close(scid)
|
||||
|
||||
history = l1.rpc.listpeers()['peers'][0]['channels'][0]['state_changes']
|
||||
history = l1.rpc.listpeerchannels()['channels'][0]['state_changes']
|
||||
if l1.config('experimental-dual-fund'):
|
||||
assert(history[0]['cause'] == "user")
|
||||
assert(history[0]['old_state'] == "DUALOPEND_OPEN_INIT")
|
||||
@@ -1121,8 +1121,8 @@ def test_htlc_accepted_hook_direct_restart(node_factory, executor):
|
||||
|
||||
# Check that the status mentions the HTLC being held
|
||||
l2.rpc.listpeers()
|
||||
peers = l2.rpc.listpeers()['peers']
|
||||
htlc_status = peers[0]['channels'][0]['htlcs'][0].get('status', None)
|
||||
channel = only_one(l2.rpc.listpeerchannels()['channels'])
|
||||
htlc_status = channel['htlcs'][0].get('status', None)
|
||||
assert htlc_status == "Waiting for the htlc_accepted hook of plugin hold_htlcs.py"
|
||||
|
||||
needle = l2.daemon.logsearch_start
|
||||
@@ -1892,7 +1892,7 @@ def test_watchtower(node_factory, bitcoind, directory, chainparams):
|
||||
2,
|
||||
opts=[{'may_fail': True, 'allow_broken_log': True}, {'plugin': p}]
|
||||
)
|
||||
channel_id = l1.rpc.listpeers()['peers'][0]['channels'][0]['channel_id']
|
||||
channel_id = l1.rpc.listpeerchannels()['channels'][0]['channel_id']
|
||||
|
||||
# Force a new commitment
|
||||
l1.rpc.pay(l2.rpc.invoice(25000000, 'lbl1', 'desc1')['bolt11'])
|
||||
@@ -2051,7 +2051,7 @@ def test_coin_movement_notices(node_factory, bitcoind, chainparams):
|
||||
|
||||
# restart to test index
|
||||
l2.restart()
|
||||
wait_for(lambda: all(p['channels'][0]['state'] == 'CHANNELD_NORMAL' for p in l2.rpc.listpeers()['peers']))
|
||||
wait_for(lambda: all(c['state'] == 'CHANNELD_NORMAL' for c in l2.rpc.listpeerchannels()["channels"]))
|
||||
|
||||
# close the channels down
|
||||
chan1 = l2.get_channel_scid(l1)
|
||||
@@ -2412,15 +2412,15 @@ def test_htlc_accepted_hook_fwdto(node_factory):
|
||||
|
||||
# Add some balance
|
||||
l1.rpc.pay(l2.rpc.invoice(10**9 // 2, 'balance', '')['bolt11'])
|
||||
wait_for(lambda: only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['htlcs'] == [])
|
||||
wait_for(lambda: only_one(l1.rpc.listpeerchannels()['channels'])['htlcs'] == [])
|
||||
|
||||
# make it forward back down same channel.
|
||||
l2.rpc.setfwdto(only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['channel_id'])
|
||||
l2.rpc.setfwdto(only_one(l1.rpc.listpeerchannels()['channels'])['channel_id'])
|
||||
inv = l3.rpc.invoice(42, 'fwdto', '')['bolt11']
|
||||
with pytest.raises(RpcError, match="WIRE_INVALID_ONION_HMAC"):
|
||||
l1.rpc.pay(inv)
|
||||
|
||||
assert l2.rpc.listforwards()['forwards'][0]['out_channel'] == only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['short_channel_id']
|
||||
assert l2.rpc.listforwards()['forwards'][0]['out_channel'] == only_one(l1.rpc.listpeerchannels()['channels'])['short_channel_id']
|
||||
|
||||
|
||||
def test_dynamic_args(node_factory):
|
||||
|
||||
Reference in New Issue
Block a user