mirror of
https://github.com/aljazceru/lightning.git
synced 2025-12-19 23:24:27 +01:00
pytest: Change the channel persistence test to add inflight HTLCs
Now that we have HTLC persistence we'd also like to test it. This kills the second node in the middle of an HTLC, it'll recover and finish the flow. Signed-off-by: Christian Decker <decker.christian@gmail.com>
This commit is contained in:
committed by
Rusty Russell
parent
11903aed6c
commit
f5a412d90d
@@ -1501,8 +1501,12 @@ class LightningDTests(BaseLightningDTests):
|
||||
assert outputs[2] == 10000000
|
||||
|
||||
def test_channel_persistence(self):
|
||||
# Start two nodes and open a channel (to remember)
|
||||
l1, l2 = self.connect()
|
||||
# Start two nodes and open a channel (to remember). l2 will
|
||||
# mysteriously die while committing the first HTLC so we can
|
||||
# check that HTLCs reloaded from the DB work.
|
||||
l1 = self.node_factory.get_node()
|
||||
l2 = self.node_factory.get_node(disconnect=['_WIRE_COMMITMENT_SIGNED'])
|
||||
l1.rpc.connect('localhost', l2.info['port'], l2.info['id'])
|
||||
|
||||
# Neither node should have a channel open, they are just connected
|
||||
for n in (l1, l2):
|
||||
@@ -1517,13 +1521,18 @@ class LightningDTests(BaseLightningDTests):
|
||||
for n in (l1, l2):
|
||||
assert(n.db_query('SELECT COUNT(id) as count FROM channels;')[0]['count'] == 1)
|
||||
|
||||
# Perform a payment so we have something to restore
|
||||
self.pay(l1, l2, 10000)
|
||||
time.sleep(1)
|
||||
assert l1.rpc.getpeers()['peers'][0]['msatoshi_to_us'] == 99990000
|
||||
assert l2.rpc.getpeers()['peers'][0]['msatoshi_to_us'] == 10000
|
||||
# Fire off a sendpay request, it'll get interrupted by a restart
|
||||
fut = self.executor.submit(self.pay, l1, l2, 10000)
|
||||
# Wait for it to be committed to, i.e., stored in the DB
|
||||
l1.daemon.wait_for_log('peer_in WIRE_COMMITMENT_SIGNED')
|
||||
|
||||
# Stop l2, l1 will reattempt to connect
|
||||
l2.stop()
|
||||
print("Killing l2 in mid HTLC")
|
||||
l2.daemon.proc.terminate()
|
||||
|
||||
# Clear the disconnect and timer stop so we can proceed normally
|
||||
l2.daemon.cmd_line = [e for e in l2.daemon.cmd_line if 'disconnect' not in e]
|
||||
print(" ".join(l2.daemon.cmd_line + ['--dev-debugger=channeld']))
|
||||
|
||||
# Wait for l1 to notice
|
||||
wait_for(lambda: not l1.rpc.getpeers()['peers'][0]['connected'])
|
||||
@@ -1532,6 +1541,9 @@ class LightningDTests(BaseLightningDTests):
|
||||
l2.daemon.start()
|
||||
wait_for(lambda: len(l2.rpc.getpeers()['peers']) == 1)
|
||||
|
||||
# Wait for the restored HTLC to finish
|
||||
wait_for(lambda: l1.rpc.getpeers()['peers'][0]['msatoshi_to_us'] == 99990000, interval=1)
|
||||
|
||||
wait_for(lambda: len([p for p in l1.rpc.getpeers()['peers'] if p['connected']]), interval=1)
|
||||
wait_for(lambda: len([p for p in l2.rpc.getpeers()['peers'] if p['connected']]), interval=1)
|
||||
|
||||
|
||||
@@ -235,7 +235,7 @@ class LightningD(TailableProc):
|
||||
]
|
||||
|
||||
self.cmd_line += ["--{}={}".format(k, v) for k, v in LIGHTNINGD_CONFIG.items()]
|
||||
self.prefix = 'lightningd'
|
||||
self.prefix = 'lightningd(%d)' % (port)
|
||||
|
||||
if not os.path.exists(lightning_dir):
|
||||
os.makedirs(lightning_dir)
|
||||
|
||||
Reference in New Issue
Block a user