mirror of
https://github.com/aljazceru/lightning.git
synced 2025-12-20 15:44:21 +01:00
pytest: Integrate with known/allowed failures
Moved the flagging for allowed failures into the factory getter, and renamed into `may_fail`. Also stopped the teardown of a node from throwing an exception if we are allowed to exit non-cleanly. Signed-off-by: Christian Decker <decker.christian@gmail.com>
This commit is contained in:
committed by
Rusty Russell
parent
c1f4c86589
commit
0b350d2f5f
@@ -87,7 +87,7 @@ class NodeFactory(object):
|
|||||||
self.nodes = []
|
self.nodes = []
|
||||||
self.executor = executor
|
self.executor = executor
|
||||||
|
|
||||||
def get_node(self, disconnect=None, options=None):
|
def get_node(self, disconnect=None, options=None, may_fail=False):
|
||||||
node_id = self.next_id
|
node_id = self.next_id
|
||||||
self.next_id += 1
|
self.next_id += 1
|
||||||
|
|
||||||
@@ -107,7 +107,7 @@ class NodeFactory(object):
|
|||||||
daemon.cmd_line.append(options)
|
daemon.cmd_line.append(options)
|
||||||
rpc = LightningRpc(socket_path, self.executor)
|
rpc = LightningRpc(socket_path, self.executor)
|
||||||
|
|
||||||
node = utils.LightningNode(daemon, rpc, bitcoind, self.executor)
|
node = utils.LightningNode(daemon, rpc, bitcoind, self.executor, may_fail=may_fail)
|
||||||
self.nodes.append(node)
|
self.nodes.append(node)
|
||||||
if VALGRIND:
|
if VALGRIND:
|
||||||
node.daemon.cmd_line = [
|
node.daemon.cmd_line = [
|
||||||
@@ -164,7 +164,7 @@ class BaseLightningDTests(unittest.TestCase):
|
|||||||
return 1 if errors else 0
|
return 1 if errors else 0
|
||||||
|
|
||||||
def getCrashLog(self, node):
|
def getCrashLog(self, node):
|
||||||
if node.known_fail:
|
if node.may_fail:
|
||||||
return None, None
|
return None, None
|
||||||
try:
|
try:
|
||||||
crashlog = os.path.join(node.daemon.lightning_dir, 'crash.log')
|
crashlog = os.path.join(node.daemon.lightning_dir, 'crash.log')
|
||||||
@@ -731,7 +731,7 @@ class LightningDTests(BaseLightningDTests):
|
|||||||
def test_penalty_inhtlc(self):
|
def test_penalty_inhtlc(self):
|
||||||
"""Test penalty transaction with an incoming HTLC"""
|
"""Test penalty transaction with an incoming HTLC"""
|
||||||
# We suppress each one after first commit; HTLC gets added not fulfilled.
|
# We suppress each one after first commit; HTLC gets added not fulfilled.
|
||||||
l1 = self.node_factory.get_node(disconnect=['_WIRE_COMMITMENT_SIGNED'])
|
l1 = self.node_factory.get_node(disconnect=['_WIRE_COMMITMENT_SIGNED'], may_fail=True)
|
||||||
l2 = self.node_factory.get_node(disconnect=['_WIRE_COMMITMENT_SIGNED'])
|
l2 = self.node_factory.get_node(disconnect=['_WIRE_COMMITMENT_SIGNED'])
|
||||||
|
|
||||||
l1.rpc.connect('localhost', l2.info['port'], l2.info['id'])
|
l1.rpc.connect('localhost', l2.info['port'], l2.info['id'])
|
||||||
@@ -770,7 +770,6 @@ class LightningDTests(BaseLightningDTests):
|
|||||||
|
|
||||||
l2.daemon.wait_for_log('-> ONCHAIND_CHEATED')
|
l2.daemon.wait_for_log('-> ONCHAIND_CHEATED')
|
||||||
# FIXME: l1 should try to stumble along!
|
# FIXME: l1 should try to stumble along!
|
||||||
l1.allow_failure()
|
|
||||||
|
|
||||||
# l2 should spend all of the outputs (except to-us).
|
# l2 should spend all of the outputs (except to-us).
|
||||||
# Could happen in any order, depending on commitment tx.
|
# Could happen in any order, depending on commitment tx.
|
||||||
@@ -790,7 +789,7 @@ class LightningDTests(BaseLightningDTests):
|
|||||||
def test_penalty_outhtlc(self):
|
def test_penalty_outhtlc(self):
|
||||||
"""Test penalty transaction with an outgoing HTLC"""
|
"""Test penalty transaction with an outgoing HTLC"""
|
||||||
# First we need to get funds to l2, so suppress after second.
|
# First we need to get funds to l2, so suppress after second.
|
||||||
l1 = self.node_factory.get_node(disconnect=['_WIRE_COMMITMENT_SIGNED*3'])
|
l1 = self.node_factory.get_node(disconnect=['_WIRE_COMMITMENT_SIGNED*3'], may_fail=True)
|
||||||
l2 = self.node_factory.get_node(disconnect=['_WIRE_COMMITMENT_SIGNED*3'])
|
l2 = self.node_factory.get_node(disconnect=['_WIRE_COMMITMENT_SIGNED*3'])
|
||||||
|
|
||||||
l1.rpc.connect('localhost', l2.info['port'], l2.info['id'])
|
l1.rpc.connect('localhost', l2.info['port'], l2.info['id'])
|
||||||
@@ -832,7 +831,6 @@ class LightningDTests(BaseLightningDTests):
|
|||||||
|
|
||||||
l2.daemon.wait_for_log('-> ONCHAIND_CHEATED')
|
l2.daemon.wait_for_log('-> ONCHAIND_CHEATED')
|
||||||
# FIXME: l1 should try to stumble along!
|
# FIXME: l1 should try to stumble along!
|
||||||
l1.allow_failure()
|
|
||||||
|
|
||||||
# l2 should spend all of the outputs (except to-us).
|
# l2 should spend all of the outputs (except to-us).
|
||||||
# Could happen in any order, depending on commitment tx.
|
# Could happen in any order, depending on commitment tx.
|
||||||
|
|||||||
@@ -252,12 +252,12 @@ class LightningD(TailableProc):
|
|||||||
return self.proc.returncode
|
return self.proc.returncode
|
||||||
|
|
||||||
class LightningNode(object):
|
class LightningNode(object):
|
||||||
def __init__(self, daemon, rpc, btc, executor):
|
def __init__(self, daemon, rpc, btc, executor, may_fail=False):
|
||||||
self.rpc = rpc
|
self.rpc = rpc
|
||||||
self.daemon = daemon
|
self.daemon = daemon
|
||||||
self.bitcoin = btc
|
self.bitcoin = btc
|
||||||
self.executor = executor
|
self.executor = executor
|
||||||
self.known_fail = False
|
self.may_fail = may_fail
|
||||||
|
|
||||||
# Use batch if you're doing more than one async.
|
# Use batch if you're doing more than one async.
|
||||||
def connect(self, remote_node, capacity, async=False):
|
def connect(self, remote_node, capacity, async=False):
|
||||||
@@ -323,12 +323,6 @@ class LightningNode(object):
|
|||||||
db.close()
|
db.close()
|
||||||
return result
|
return result
|
||||||
|
|
||||||
# FIXME: we should flag daemon on startup, suppress error
|
|
||||||
def allow_failure(self):
|
|
||||||
"""Note that a daemon has (deliberately) crashed, so we don't fail
|
|
||||||
on cleanup"""
|
|
||||||
self.known_fail = True
|
|
||||||
|
|
||||||
def stop(self, timeout=10):
|
def stop(self, timeout=10):
|
||||||
""" Attempt to do a clean shutdown, but kill if it hangs
|
""" Attempt to do a clean shutdown, but kill if it hangs
|
||||||
"""
|
"""
|
||||||
@@ -346,7 +340,7 @@ class LightningNode(object):
|
|||||||
if rc is None:
|
if rc is None:
|
||||||
rc = self.daemon.stop()
|
rc = self.daemon.stop()
|
||||||
|
|
||||||
if rc != 0:
|
if rc != 0 and not self.may_fail:
|
||||||
raise ValueError("Node did not exit cleanly, rc={}".format(rc))
|
raise ValueError("Node did not exit cleanly, rc={}".format(rc))
|
||||||
else:
|
else:
|
||||||
return rc
|
return rc
|
||||||
|
|||||||
Reference in New Issue
Block a user