mirror of
https://github.com/aljazceru/plugins.git
synced 2025-12-19 14:14:20 +01:00
Caching writes was causing us some issues if the startup of `lightningd` didn't complete, i.e., the writes would happen on the live DB, but we'd forget about them. Since we have the backup.lock file in same directory as the plugin is running in, we no longer have to defer the writes. This hugely simplifies our logic. Fixes #155 Changelog-Fixed: backup: The plugin doesn't lose sync anymore if the startup is interrupted
192 lines
6.3 KiB
Python
192 lines
6.3 KiB
Python
from flaky import flaky
|
|
from pyln.client import RpcError
|
|
from pyln.testing.fixtures import *
|
|
from pyln.client import RpcError
|
|
import os
|
|
import pytest
|
|
import subprocess
|
|
|
|
|
|
plugin_dir = os.path.dirname(__file__)
|
|
plugin_path = os.path.join(plugin_dir, "backup.py")
|
|
cli_path = os.path.join(os.path.dirname(__file__), "backup-cli")
|
|
|
|
# For the transition period we require deprecated_apis to be true
|
|
deprecated_apis = True
|
|
|
|
def test_start(node_factory, directory):
|
|
bpath = os.path.join(directory, 'lightning-1', 'regtest')
|
|
bdest = 'file://' + os.path.join(bpath, 'backup.dbak')
|
|
os.makedirs(bpath)
|
|
subprocess.check_call([cli_path, "init", bpath, bdest])
|
|
opts = {
|
|
'plugin': plugin_path,
|
|
'allow-deprecated-apis': deprecated_apis,
|
|
}
|
|
l1 = node_factory.get_node(options=opts, cleandir=False)
|
|
plugins = [os.path.basename(p['name']) for p in l1.rpc.plugin("list")['plugins']]
|
|
assert("backup.py" in plugins)
|
|
|
|
# Restart the node a couple of times, to check that we can resume normally
|
|
for i in range(5):
|
|
l1.restart()
|
|
plugins = [os.path.basename(p['name']) for p in l1.rpc.plugin("list")['plugins']]
|
|
assert("backup.py" in plugins)
|
|
|
|
|
|
def test_start_no_init(node_factory, directory):
|
|
"""The plugin should refuse to start if we haven't initialized the backup
|
|
"""
|
|
bpath = os.path.join(directory, 'lightning-1', 'regtest')
|
|
bdest = 'file://' + os.path.join(bpath, 'backup.dbak')
|
|
os.makedirs(bpath)
|
|
opts = {
|
|
'plugin': plugin_path,
|
|
}
|
|
l1 = node_factory.get_node(
|
|
options=opts, cleandir=False, may_fail=True, start=False
|
|
)
|
|
|
|
with pytest.raises(ValueError):
|
|
# The way we detect a failure to start is when we attempt to connect
|
|
# to the RPC.
|
|
l1.start()
|
|
assert(l1.daemon.is_in_log(
|
|
r'Could not find backup.lock in the lightning-dir'
|
|
))
|
|
|
|
|
|
def test_init_not_empty(node_factory, directory):
|
|
"""We want to add backups to an existing lightning node.
|
|
|
|
backup-cli init should start the backup with an initial snapshot.
|
|
"""
|
|
bpath = os.path.join(directory, 'lightning-1', 'regtest')
|
|
bdest = 'file://' + os.path.join(bpath, 'backup.dbak')
|
|
l1 = node_factory.get_node()
|
|
l1.stop()
|
|
|
|
out = subprocess.check_output([cli_path, "init", bpath, bdest])
|
|
assert(b'Found an existing database' in out)
|
|
assert(b'Successfully written initial snapshot' in out)
|
|
|
|
# Now restart and add the plugin
|
|
l1.daemon.opts['plugin'] = plugin_path
|
|
l1.daemon.opts['allow-deprecated-apis'] = deprecated_apis
|
|
l1.start()
|
|
assert(l1.daemon.is_in_log(r'plugin-backup.py: Versions match up'))
|
|
|
|
|
|
def test_tx_abort(node_factory, directory):
|
|
"""Simulate a crash between hook call and DB commit.
|
|
|
|
We simulate this by updating the data_version var in the database before
|
|
restarting the node. This desyncs the node from the backup, and restoring
|
|
may not work (depending on which transaction was pretend-rolled-back), but
|
|
continuing should work fine, since it can happen that we crash just
|
|
inbetween the hook call and the DB transaction.
|
|
|
|
"""
|
|
bpath = os.path.join(directory, 'lightning-1', 'regtest')
|
|
bdest = 'file://' + os.path.join(bpath, 'backup.dbak')
|
|
os.makedirs(bpath)
|
|
subprocess.check_call([cli_path, "init", bpath, bdest])
|
|
opts = {
|
|
'plugin': plugin_path,
|
|
'allow-deprecated-apis': deprecated_apis,
|
|
}
|
|
l1 = node_factory.get_node(options=opts, cleandir=False)
|
|
l1.stop()
|
|
|
|
print(l1.db.query("SELECT * FROM vars;"))
|
|
|
|
# Now fudge the data_version:
|
|
l1.db.execute("UPDATE vars SET intval = intval - 1 WHERE name = 'data_version'")
|
|
|
|
print(l1.db.query("SELECT * FROM vars;"))
|
|
|
|
l1.restart()
|
|
assert(l1.daemon.is_in_log(r'Last changes not applied'))
|
|
|
|
|
|
@flaky
|
|
def test_failing_restore(node_factory, directory):
|
|
"""The node database is having memory loss, make sure we abort.
|
|
|
|
We simulate a loss of transactions by manually resetting the data_version
|
|
in the database back to n-2, which is non-recoverable.
|
|
|
|
"""
|
|
bpath = os.path.join(directory, 'lightning-1', 'regtest')
|
|
bdest = 'file://' + os.path.join(bpath, 'backup.dbak')
|
|
os.makedirs(bpath)
|
|
subprocess.check_call([cli_path, "init", bpath, bdest])
|
|
opts = {
|
|
'plugin': plugin_path,
|
|
'allow-deprecated-apis': deprecated_apis,
|
|
}
|
|
|
|
def section(comment):
|
|
print("="*25, comment, "="*25)
|
|
|
|
section("Starting node for the first time")
|
|
l1 = node_factory.get_node(options=opts, cleandir=False, may_fail=True)
|
|
l1.stop()
|
|
|
|
# Now fudge the data_version:
|
|
section("Simulating a restore of an old version")
|
|
l1.db.execute("UPDATE vars SET intval = intval - 2 WHERE name = 'data_version'")
|
|
|
|
section("Restarting node, should fail")
|
|
with pytest.raises(Exception):
|
|
l1.start()
|
|
|
|
l1.daemon.proc.wait()
|
|
section("Verifying the node died with an error")
|
|
assert(l1.daemon.is_in_log(r'lost some state') is not None)
|
|
|
|
|
|
def test_intermittent_backup(node_factory, directory):
|
|
"""Simulate intermittent use of the backup, or an old file backup.
|
|
|
|
"""
|
|
bpath = os.path.join(directory, 'lightning-1', 'regtest')
|
|
bdest = 'file://' + os.path.join(bpath, 'backup.dbak')
|
|
os.makedirs(bpath)
|
|
subprocess.check_call([cli_path, "init", bpath, bdest])
|
|
opts = {
|
|
'plugin': plugin_path,
|
|
'allow-deprecated-apis': deprecated_apis,
|
|
}
|
|
l1 = node_factory.get_node(options=opts, cleandir=False, may_fail=True)
|
|
|
|
# Now start without the plugin. This should work fine.
|
|
del l1.daemon.opts['plugin']
|
|
l1.restart()
|
|
|
|
# Now restart adding the plugin again, and it should fail due to gaps in
|
|
# the backup.
|
|
l1.stop()
|
|
with pytest.raises(Exception):
|
|
l1.daemon.opts.update(opts)
|
|
l1.start()
|
|
|
|
l1.daemon.proc.wait()
|
|
assert(l1.daemon.is_in_log(r'Backup is out of date') is not None)
|
|
|
|
|
|
def test_restore(node_factory, directory):
|
|
bpath = os.path.join(directory, 'lightning-1', 'regtest')
|
|
bdest = 'file://' + os.path.join(bpath, 'backup.dbak')
|
|
os.makedirs(bpath)
|
|
subprocess.check_call([cli_path, "init", bpath, bdest])
|
|
opts = {
|
|
'plugin': plugin_path,
|
|
'allow-deprecated-apis': deprecated_apis,
|
|
}
|
|
l1 = node_factory.get_node(options=opts, cleandir=False)
|
|
l1.stop()
|
|
|
|
rdest = os.path.join(bpath, 'lightningd.sqlite.restore')
|
|
subprocess.check_call([cli_path, "restore", bdest, rdest])
|