mirror of
https://github.com/aljazceru/lightning.git
synced 2025-12-20 07:34:24 +01:00
patch refine-test_gossip_persistence.patch
This commit is contained in:
committed by
neil saitug
parent
d8aee68ba8
commit
cccce75e56
@@ -397,33 +397,37 @@ def test_gossip_persistence(node_factory, bitcoind):
|
|||||||
l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
|
l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
|
||||||
l3.rpc.connect(l4.info['id'], 'localhost', l4.port)
|
l3.rpc.connect(l4.info['id'], 'localhost', l4.port)
|
||||||
|
|
||||||
l1.fund_channel(l2, 10**6)
|
scid12 = l1.fund_channel(l2, 10**6)
|
||||||
l2.fund_channel(l3, 10**6)
|
scid23 = l2.fund_channel(l3, 10**6)
|
||||||
|
|
||||||
# Make channels public, except for l3 -> l4, which is kept local-only for now
|
# Make channels public, except for l3 -> l4, which is kept local-only for now
|
||||||
bitcoind.generate_block(5)
|
bitcoind.generate_block(5)
|
||||||
l3.fund_channel(l4, 10**6)
|
scid34 = l3.fund_channel(l4, 10**6)
|
||||||
bitcoind.generate_block(1)
|
bitcoind.generate_block(1)
|
||||||
|
|
||||||
def count_active(node):
|
def active(node):
|
||||||
chans = node.rpc.listchannels()['channels']
|
chans = node.rpc.listchannels()['channels']
|
||||||
active = [c for c in chans if c['active']]
|
return sorted([c['short_channel_id'] for c in chans if c['active']])
|
||||||
return len(active)
|
|
||||||
|
def non_public(node):
|
||||||
|
chans = node.rpc.listchannels()['channels']
|
||||||
|
return sorted([c['short_channel_id'] for c in chans if not c['public']])
|
||||||
|
|
||||||
# Channels should be activated
|
# Channels should be activated
|
||||||
wait_for(lambda: count_active(l1) == 4)
|
wait_for(lambda: active(l1) == [scid12, scid12, scid23, scid23])
|
||||||
wait_for(lambda: count_active(l2) == 4)
|
wait_for(lambda: active(l2) == [scid12, scid12, scid23, scid23])
|
||||||
wait_for(lambda: count_active(l3) == 6) # 4 public + 2 local
|
# This one sees its private channel
|
||||||
|
wait_for(lambda: active(l3) == [scid12, scid12, scid23, scid23, scid34, scid34])
|
||||||
|
|
||||||
# l1 restarts and doesn't connect, but loads from persisted store, all
|
# l1 restarts and doesn't connect, but loads from persisted store, all
|
||||||
# local channels should be disabled, leaving only the two l2 <-> l3
|
# local channels should be disabled, leaving only the two l2 <-> l3
|
||||||
# directions
|
# directions
|
||||||
l1.restart()
|
l1.restart()
|
||||||
wait_for(lambda: count_active(l1) == 2)
|
wait_for(lambda: active(l1) == [scid23, scid23])
|
||||||
|
|
||||||
# Now reconnect, they should re-enable the two l1 <-> l2 directions
|
# Now reconnect, they should re-enable the two l1 <-> l2 directions
|
||||||
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
||||||
wait_for(lambda: count_active(l1) == 4)
|
wait_for(lambda: active(l1) == [scid12, scid12, scid23, scid23])
|
||||||
|
|
||||||
# Now spend the funding tx, generate a block and see others deleting the
|
# Now spend the funding tx, generate a block and see others deleting the
|
||||||
# channel from their network view
|
# channel from their network view
|
||||||
@@ -431,32 +435,26 @@ def test_gossip_persistence(node_factory, bitcoind):
|
|||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
bitcoind.generate_block(1)
|
bitcoind.generate_block(1)
|
||||||
|
|
||||||
wait_for(lambda: count_active(l1) == 2)
|
wait_for(lambda: active(l1) == [scid23, scid23])
|
||||||
wait_for(lambda: count_active(l2) == 2)
|
wait_for(lambda: active(l2) == [scid23, scid23])
|
||||||
wait_for(lambda: count_active(l3) == 4) # 2 public + 2 local
|
wait_for(lambda: active(l3) == [scid23, scid23, scid34, scid34])
|
||||||
|
|
||||||
# We should have one local-only channel
|
|
||||||
def count_non_public(node):
|
|
||||||
chans = node.rpc.listchannels()['channels']
|
|
||||||
nonpublic = [c for c in chans if not c['public']]
|
|
||||||
return len(nonpublic)
|
|
||||||
|
|
||||||
# The channel l3 -> l4 should be known only to them
|
# The channel l3 -> l4 should be known only to them
|
||||||
assert count_non_public(l1) == 0
|
assert non_public(l1) == []
|
||||||
assert count_non_public(l2) == 0
|
assert non_public(l2) == []
|
||||||
wait_for(lambda: count_non_public(l3) == 2)
|
wait_for(lambda: non_public(l3) == [scid34, scid34])
|
||||||
wait_for(lambda: count_non_public(l4) == 2)
|
wait_for(lambda: non_public(l4) == [scid34, scid34])
|
||||||
|
|
||||||
# Finally, it should also remember the deletion after a restart
|
# Finally, it should also remember the deletion after a restart
|
||||||
l3.restart()
|
l3.restart()
|
||||||
l4.restart()
|
l4.restart()
|
||||||
l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
|
l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
|
||||||
l3.rpc.connect(l4.info['id'], 'localhost', l4.port)
|
l3.rpc.connect(l4.info['id'], 'localhost', l4.port)
|
||||||
wait_for(lambda: count_active(l3) == 4) # 2 public + 2 local
|
wait_for(lambda: active(l3) == [scid23, scid23, scid34, scid34])
|
||||||
|
|
||||||
# Both l3 and l4 should remember their local-only channel
|
# Both l3 and l4 should remember their local-only channel
|
||||||
wait_for(lambda: count_non_public(l3) == 2)
|
wait_for(lambda: non_public(l3) == [scid34, scid34])
|
||||||
wait_for(lambda: count_non_public(l4) == 2)
|
wait_for(lambda: non_public(l4) == [scid34, scid34])
|
||||||
|
|
||||||
|
|
||||||
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
|
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
|
||||||
|
|||||||
Reference in New Issue
Block a user