mirror of
https://github.com/aljazceru/plugins.git
synced 2026-01-06 23:04:19 +01:00
Rename directory 'archive' to 'Unmaintained'
This commit is contained in:
38
Unmaintained/autopilot/README.md
Normal file
38
Unmaintained/autopilot/README.md
Normal file
@@ -0,0 +1,38 @@
|
||||
# Autopilot
|
||||
|
||||
This is a version of Rene Pickhardt's [Autopilot library][lib] ported as a
|
||||
Core-Lightning plugin.
|
||||
|
||||
> :warning: This plugin is still being ported and may not be currently reflect
|
||||
> the entire functionality. :construction:
|
||||
|
||||
## Command line options
|
||||
|
||||
The plugin exposes the following new command line options:
|
||||
|
||||
- `--autopilot-percent`: What percentage of funds should be under the
|
||||
autopilots control? You may not want the autopilot to manage all of your
|
||||
funds, in case you still want to manually open a channel. This parameter
|
||||
limits the amount the plugin will use to manage its own channels. Default
|
||||
value is 75% of available funds.
|
||||
- `--autopilot-num-channels`: How many channels should the autopilot aim for?
|
||||
Default is 10 channels overall, including any manually opened channels.
|
||||
- `--autopilot-min-channel-size-msat`: Minimum channel size to open. The
|
||||
plugin will never open channels smaller than this amount. Default value is
|
||||
100000000msat = 1mBTC.
|
||||
|
||||
## JSON-RPC methods
|
||||
|
||||
The plugin also exposes the following methods:
|
||||
|
||||
- `autopilot-run-once`: let's the plugin inspect the current state of
|
||||
channels and, if required, will search for candidate peers to open new
|
||||
channels with. The optional argument `dryrun` will run the recommendation
|
||||
but not actually connect to the peer or open channels.
|
||||
|
||||
At the time of writing the recommendations may take considerable time and
|
||||
consume a lot of CPU cycles due to the use of multiple algorithms that are not
|
||||
tuned to the network's size.
|
||||
|
||||
|
||||
[lib]: https://github.com/ElementsProject/lightning/pull/1888
|
||||
0
Unmaintained/autopilot/__init__.py
Normal file
0
Unmaintained/autopilot/__init__.py
Normal file
221
Unmaintained/autopilot/autopilot.py
Executable file
221
Unmaintained/autopilot/autopilot.py
Executable file
@@ -0,0 +1,221 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
from bech32 import bech32_decode, convertbits
|
||||
from lib_autopilot import Autopilot, Strategy
|
||||
from pyln.client import Millisatoshi, Plugin, RpcError
|
||||
import random
|
||||
import threading
|
||||
import math
|
||||
import networkx as nx
|
||||
import dns.resolver
|
||||
import time
|
||||
|
||||
|
||||
plugin = Plugin()
|
||||
|
||||
|
||||
class CLightning_autopilot(Autopilot):
|
||||
|
||||
def __init__(self, rpc):
|
||||
self.__rpc_interface = rpc
|
||||
|
||||
plugin.log("No input specified download graph from peers")
|
||||
G = self.__download_graph()
|
||||
Autopilot.__init__(self, G)
|
||||
|
||||
def __get_seed_keys(self):
|
||||
"""
|
||||
retrieve the nodeids of the ln seed nodes from lseed.bitcoinstats.com
|
||||
"""
|
||||
domain = "lseed.bitcoinstats.com"
|
||||
srv_records = dns.resolver.resolve(domain, "SRV")
|
||||
res = []
|
||||
for srv in srv_records:
|
||||
bech32 = str(srv.target).rstrip(".").split(".")[0]
|
||||
data = bech32_decode(bech32)[1]
|
||||
decoded = convertbits(data, 5, 4)
|
||||
res.append("".join(
|
||||
['{:1x}'.format(integer) for integer in decoded])[:-1])
|
||||
return res
|
||||
|
||||
def __connect_to_seeds(self):
|
||||
"""
|
||||
sets up peering connection to seed nodes of the lightning network
|
||||
|
||||
This is necessary in case the node operating the autopilot has never
|
||||
been connected to the lightning network.
|
||||
"""
|
||||
seed_keys = self.__get_seed_keys()
|
||||
random.shuffle(seed_keys)
|
||||
for nodeid in seed_keys:
|
||||
try:
|
||||
plugin.log(f"peering with node: {nodeid}")
|
||||
self.__rpc_interface.connect(nodeid)
|
||||
# FIXME: better strategy than sleep(2) for building up
|
||||
time.sleep(2)
|
||||
except RpcError as e:
|
||||
plugin.log(f"Unable to connect to node: {nodeid} {str(e)}", 'warn')
|
||||
|
||||
def __download_graph(self):
|
||||
"""
|
||||
Downloads a local copy of the nodes view of the lightning network
|
||||
|
||||
This copy is retrieved by listnodes and listedges RPC calls and will
|
||||
thus be incomplete as peering might not be ready yet.
|
||||
"""
|
||||
|
||||
# FIXME: it is a real problem that we don't know how many nodes there
|
||||
# could be. In particular billion nodes networks will outgrow memory
|
||||
G = nx.Graph()
|
||||
plugin.log("Instantiated networkx graph to store the lightning network")
|
||||
|
||||
nodes = []
|
||||
plugin.log("Attempt RPC-call to download nodes from the lightning network")
|
||||
try:
|
||||
while len(nodes) == 0:
|
||||
peers = self.__rpc_interface.listpeers()["peers"]
|
||||
if len(peers) < 1:
|
||||
self.__connect_to_seeds()
|
||||
nodes = self.__rpc_interface.listnodes()["nodes"]
|
||||
except ValueError as e:
|
||||
plugin.log("Node list could not be retrieved from the peers of the lightning network", 'error')
|
||||
raise e
|
||||
|
||||
for node in nodes:
|
||||
G.add_node(node["nodeid"], **node)
|
||||
|
||||
plugin.log(f"Number of nodes found and added to the local networkx graph: {len(nodes)}")
|
||||
|
||||
channels = {}
|
||||
try:
|
||||
plugin.log("Attempt RPC-call to download channels from the lightning network")
|
||||
channels = self.__rpc_interface.listchannels()["channels"]
|
||||
plugin.log(f"Number of retrieved channels: {len(channels)}")
|
||||
except ValueError:
|
||||
plugin.log("Channel list could not be retrieved from the peers of the lightning network")
|
||||
return False
|
||||
|
||||
for channel in channels:
|
||||
G.add_edge(
|
||||
channel["source"],
|
||||
channel["destination"],
|
||||
**channel)
|
||||
|
||||
return G
|
||||
|
||||
def connect(self, candidates, balance=1000000, dryrun=False):
|
||||
pdf = self.calculate_statistics(candidates)
|
||||
connection_dict = self.calculate_proposed_channel_capacities(pdf, balance)
|
||||
messages = []
|
||||
for nodeid, fraction in connection_dict.items():
|
||||
try:
|
||||
satoshis = min(math.ceil(int(balance) * float(fraction)), 16777215)
|
||||
messages.append(f"Try to open channel with a capacity of {satoshis} to node {nodeid}")
|
||||
plugin.log(messages[-1])
|
||||
if not dryrun:
|
||||
self.__rpc_interface.connect(nodeid)
|
||||
self.__rpc_interface.fundchannel(nodeid, satoshis, None, True, 0)
|
||||
except ValueError as e:
|
||||
messages.append(f"Could not open a channel to {nodeid} with capacity of {satoshis}. Error: {str(e)}")
|
||||
plugin.log(messages[-1], 'error')
|
||||
return messages
|
||||
|
||||
|
||||
@plugin.init()
|
||||
def init(configuration, options, plugin):
|
||||
plugin.num_channels = int(options['autopilot-num-channels'])
|
||||
plugin.percent = int(options['autopilot-percent'])
|
||||
plugin.min_capacity_msat = Millisatoshi(options['autopilot-min-channel-size-msat'])
|
||||
plugin.initialized = threading.Event()
|
||||
plugin.autopilot = None
|
||||
plugin.initerror = None
|
||||
plugin.log('Initialized autopilot function')
|
||||
|
||||
def initialize_autopilot():
|
||||
try:
|
||||
plugin.autopilot = CLightning_autopilot(plugin.rpc)
|
||||
except Exception as e:
|
||||
plugin.initerror = e
|
||||
plugin.initialized.set()
|
||||
|
||||
# Load the autopilot in the background and have it notify
|
||||
# dependents once we're finished.
|
||||
threading.Thread(target=initialize_autopilot, daemon=True).start()
|
||||
|
||||
|
||||
@plugin.method('autopilot-run-once')
|
||||
def run_once(plugin, dryrun=False):
|
||||
"""
|
||||
Run the autopilot manually one time.
|
||||
|
||||
The argument 'dryrun' can be set to True in order to just output what would
|
||||
be done without actually opening any channels.
|
||||
"""
|
||||
# Let's start by inspecting the current state of the node
|
||||
funds = plugin.rpc.listfunds()
|
||||
awaiting_lockin_msat = Millisatoshi(sum([o['our_amount_msat'] for o in funds['channels'] if o['state'] == 'CHANNELD_AWAITING_LOCKIN']))
|
||||
onchain_msat = Millisatoshi(sum([o['amount_msat'] for o in funds['outputs'] if o['status'] == 'confirmed'])) - awaiting_lockin_msat
|
||||
channels = funds['channels']
|
||||
available_funds = onchain_msat / 100.0 * plugin.percent
|
||||
|
||||
# Now we can look whether and how we'd like to open new channels. This
|
||||
# depends on available funds and the number of channels we were configured
|
||||
# to open
|
||||
|
||||
if available_funds < plugin.min_capacity_msat:
|
||||
message = f"Too low available funds: {available_funds} < {plugin.min_capacity_msat}"
|
||||
plugin.log(message)
|
||||
return message
|
||||
|
||||
if len(channels) >= plugin.num_channels:
|
||||
message = f"Already have {len(channels)} channels. Aim is for {plugin.num_channels}."
|
||||
plugin.log(message)
|
||||
return message
|
||||
|
||||
num_channels = min(
|
||||
int(available_funds / plugin.min_capacity_msat),
|
||||
plugin.num_channels - len(channels)
|
||||
)
|
||||
|
||||
# Each channel will have this capacity
|
||||
channel_capacity = available_funds / num_channels
|
||||
|
||||
plugin.log(f"I'd like to open {num_channels} new channels with {channel_capacity} satoshis each")
|
||||
|
||||
plugin.initialized.wait()
|
||||
if plugin.initerror:
|
||||
message = f"Error: autopilot had initialization errors: {str(plugin.initerror)}"
|
||||
plugin.log(message, 'error')
|
||||
return message
|
||||
|
||||
candidates = plugin.autopilot.find_candidates(
|
||||
num_channels,
|
||||
strategy=Strategy.DIVERSE,
|
||||
percentile=0.5
|
||||
)
|
||||
return plugin.autopilot.connect(candidates, available_funds / 1000, dryrun=dryrun)
|
||||
|
||||
|
||||
plugin.add_option(
|
||||
'autopilot-percent',
|
||||
'75',
|
||||
'What percentage of funds should be under the autopilots control?'
|
||||
)
|
||||
|
||||
|
||||
plugin.add_option(
|
||||
'autopilot-num-channels',
|
||||
'10',
|
||||
'How many channels should the autopilot aim for?'
|
||||
)
|
||||
|
||||
|
||||
plugin.add_option(
|
||||
'autopilot-min-channel-size-msat',
|
||||
'100000000msat',
|
||||
'Minimum channel size to open.',
|
||||
'string'
|
||||
)
|
||||
|
||||
|
||||
plugin.run()
|
||||
86
Unmaintained/autopilot/bech32.py
Normal file
86
Unmaintained/autopilot/bech32.py
Normal file
@@ -0,0 +1,86 @@
|
||||
# Copyright (c) 2017 Pieter Wuille
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
# of this software and associated documentation files (the "Software"), to deal
|
||||
# in the Software without restriction, including without limitation the rights
|
||||
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
# copies of the Software, and to permit persons to whom the Software is
|
||||
# furnished to do so, subject to the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be included in
|
||||
# all copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
# THE SOFTWARE.
|
||||
|
||||
"""subset of the reference implementation for Bech32 addresses."""
|
||||
|
||||
CHARSET = "qpzry9x8gf2tvdw0s3jn54khce6mua7l"
|
||||
|
||||
|
||||
def bech32_polymod(values):
|
||||
"""Internal function that computes the Bech32 checksum."""
|
||||
generator = [0x3b6a57b2, 0x26508e6d, 0x1ea119fa, 0x3d4233dd, 0x2a1462b3]
|
||||
chk = 1
|
||||
for value in values:
|
||||
top = chk >> 25
|
||||
chk = (chk & 0x1ffffff) << 5 ^ value
|
||||
for i in range(5):
|
||||
chk ^= generator[i] if ((top >> i) & 1) else 0
|
||||
return chk
|
||||
|
||||
|
||||
def bech32_hrp_expand(hrp):
|
||||
"""Expand the HRP into values for checksum computation."""
|
||||
return [ord(x) >> 5 for x in hrp] + [0] + [ord(x) & 31 for x in hrp]
|
||||
|
||||
|
||||
def bech32_verify_checksum(hrp, data):
|
||||
"""Verify a checksum given HRP and converted data characters."""
|
||||
return bech32_polymod(bech32_hrp_expand(hrp) + data) == 1
|
||||
|
||||
|
||||
def bech32_decode(bech):
|
||||
"""Validate a Bech32 string, and determine HRP and data."""
|
||||
if ((any(ord(x) < 33 or ord(x) > 126 for x in bech)) or
|
||||
(bech.lower() != bech and bech.upper() != bech)):
|
||||
return (None, None)
|
||||
bech = bech.lower()
|
||||
pos = bech.rfind('1')
|
||||
if pos < 1 or pos + 7 > len(bech) or len(bech) > 90:
|
||||
return (None, None)
|
||||
if not all(x in CHARSET for x in bech[pos + 1:]):
|
||||
return (None, None)
|
||||
hrp = bech[:pos]
|
||||
data = [CHARSET.find(x) for x in bech[pos + 1:]]
|
||||
if not bech32_verify_checksum(hrp, data):
|
||||
return (None, None)
|
||||
return (hrp, data[:-6])
|
||||
|
||||
|
||||
def convertbits(data, frombits, tobits, pad=True):
|
||||
"""General power-of-2 base conversion."""
|
||||
acc = 0
|
||||
bits = 0
|
||||
ret = []
|
||||
maxv = (1 << tobits) - 1
|
||||
max_acc = (1 << (frombits + tobits - 1)) - 1
|
||||
for value in data:
|
||||
if value < 0 or (value >> frombits):
|
||||
return None
|
||||
acc = ((acc << frombits) | value) & max_acc
|
||||
bits += frombits
|
||||
while bits >= tobits:
|
||||
bits -= tobits
|
||||
ret.append((acc >> bits) & maxv)
|
||||
if pad:
|
||||
if bits:
|
||||
ret.append((acc << (tobits - bits)) & maxv)
|
||||
elif bits >= frombits or ((acc << (tobits - bits)) & maxv):
|
||||
return None
|
||||
return ret
|
||||
114
Unmaintained/autopilot/c-lightning-autopilot.py
Normal file
114
Unmaintained/autopilot/c-lightning-autopilot.py
Normal file
@@ -0,0 +1,114 @@
|
||||
'''
|
||||
Created on 04.09.2018
|
||||
|
||||
@author: rpickhardt
|
||||
|
||||
This software is a command line tool and Core-Lightning wrapper for lib_autopilot
|
||||
|
||||
You need to have a Core-Lightning node running in order to utilize this program.
|
||||
Also you need lib_autopilot. You can run
|
||||
|
||||
python3 Core-Lightning-autopilot --help
|
||||
|
||||
in order to get all the command line options
|
||||
|
||||
usage: Core-Lightning-autopilot.py [-h] [-b BALANCE] [-c CHANNELS]
|
||||
[-r PATH_TO_RPC_INTERFACE]
|
||||
[-s {diverse,merge}] [-p PERCENTILE_CUTOFF]
|
||||
[-d] [-i INPUT]
|
||||
|
||||
optional arguments:
|
||||
-h, --help show this help message and exit
|
||||
-b BALANCE, --balance BALANCE
|
||||
use specified number of satoshis to open all channels
|
||||
-c CHANNELS, --channels CHANNELS
|
||||
opens specified amount of channels
|
||||
-r PATH_TO_RPC_INTERFACE, --path_to_rpc_interface PATH_TO_RPC_INTERFACE
|
||||
specifies the path to the rpc_interface
|
||||
-s {diverse,merge}, --strategy {diverse,merge}
|
||||
defines the strategy
|
||||
-p PERCENTILE_CUTOFF, --percentile_cutoff PERCENTILE_CUTOFF
|
||||
only uses the top percentile of each probability
|
||||
distribution
|
||||
-d, --dont_store don't store the network on the hard drive
|
||||
-i INPUT, --input INPUT
|
||||
points to a pickle file
|
||||
|
||||
a good example call of the program could look like that:
|
||||
|
||||
python3 core-lightning-autopilot.py -s diverse -c 30 -b 10000000
|
||||
|
||||
This call would use up to 10'000'000 satoshi to create 30 channels which are
|
||||
generated by using the diverse strategy to mix the 4 heuristics.
|
||||
|
||||
Currently the software will not check, if sufficient funds are available
|
||||
or if a channel already exists.
|
||||
'''
|
||||
|
||||
from os.path import expanduser
|
||||
import argparse
|
||||
import logging
|
||||
import math
|
||||
import pickle
|
||||
import sys
|
||||
|
||||
from pyln.client import LightningRpc
|
||||
import dns.resolver
|
||||
|
||||
from bech32 import bech32_decode, CHARSET, convertbits
|
||||
from lib_autopilot import Autopilot
|
||||
from lib_autopilot import Strategy
|
||||
import networkx as nx
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("-b", "--balance",
|
||||
help="use specified number of satoshis to open all channels")
|
||||
parser.add_argument("-c", "--channels",
|
||||
help="opens specified amount of channels")
|
||||
# FIXME: add the following command line option
|
||||
# parser.add_argument("-m", "--maxchannels",
|
||||
# help="opens channels as long as maxchannels is not reached")
|
||||
parser.add_argument("-r", "--path_to_rpc_interface",
|
||||
help="specifies the path to the rpc_interface")
|
||||
parser.add_argument("-s", "--strategy", choices=[Strategy.DIVERSE, Strategy.MERGE],
|
||||
help="defines the strategy ")
|
||||
parser.add_argument("-p", "--percentile_cutoff",
|
||||
help="only uses the top percentile of each probability distribution")
|
||||
parser.add_argument("-d", "--dont_store", action='store_true',
|
||||
help="don't store the network on the hard drive")
|
||||
parser.add_argument("-i", "--input",
|
||||
help="points to a pickle file")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# FIXME: find ln-dir from lightningd.
|
||||
path = expanduser("~/.lightning/lightning-rpc")
|
||||
if args.path_to_rpc_interface is not None:
|
||||
path = expanduser(args.path_to_rpc_interface)
|
||||
|
||||
balance = 1000000
|
||||
if args.balance is not None:
|
||||
# FIXME: parser.argument does not accept type = int
|
||||
balance = int(args.balance)
|
||||
|
||||
num_channels = 21
|
||||
if args.channels is not None:
|
||||
# FIXME: parser.argument does not accept type = int
|
||||
num_channels = int(args.channels)
|
||||
|
||||
percentile = None
|
||||
if args.percentile_cutoff is not None:
|
||||
# FIXME: parser.argument does not accept type = float
|
||||
percentile = float(args.percentile_cutoff)
|
||||
|
||||
autopilot = CLightning_autopilot(path, input=args.input,
|
||||
dont_store=args.dont_store)
|
||||
|
||||
candidates = autopilot.find_candidates(num_channels,
|
||||
strategy=args.strategy,
|
||||
percentile=percentile)
|
||||
|
||||
autopilot.connect(candidates, balance)
|
||||
print("Autopilot finished. We hope it did a good job for you (and the lightning network). Thanks for using it.")
|
||||
425
Unmaintained/autopilot/lib_autopilot.py
Normal file
425
Unmaintained/autopilot/lib_autopilot.py
Normal file
@@ -0,0 +1,425 @@
|
||||
'''
|
||||
Created on 26.08.2018
|
||||
|
||||
@author: rpickhardt
|
||||
|
||||
lib_autopilot is a library which based on a networkx graph tries to
|
||||
predict which channels should be added for a new node on the network. The
|
||||
long term is to generate a lightning network with good topological properties.
|
||||
|
||||
This library currently uses 4 heuristics to select channels and supports
|
||||
two strategies for combining those heuristics.
|
||||
1.) Diverse: which tries to to get nodes from every distribution
|
||||
2.) Merge: which builds the mixture distribution of the 4 heuristics
|
||||
|
||||
The library also estimates how much funds should be used for every newly
|
||||
added channel. This is achieved by looking at the average channel capacity
|
||||
of the suggested channel partners. A probability distribution which is
|
||||
proportional to those capacities is created and smoothed with the uniform
|
||||
distribution.
|
||||
|
||||
The 4 heuristics for channel partner suggestion are:
|
||||
|
||||
1.) Random: following the Erdoes Renyi model nodes are drawn from a uniform
|
||||
distribution
|
||||
2.) Central: nodes are sampled from a distribution proportional to the
|
||||
betweeness centrality of nodes
|
||||
3.) Decrease Diameter: nodes are sampled from distribution of the nodes which
|
||||
favors badly connected nodes
|
||||
4.) Richness: nodes with high liquidity are taken and it is sampled from a
|
||||
uniform distribution of those
|
||||
|
||||
The library is supposed to be extended by a simulation framework which can
|
||||
be used to evaluate which strategies are useful on the long term. For this
|
||||
heavy computations (like centrality measures) might have to be reimplemented
|
||||
in a more dynamic way.
|
||||
|
||||
Also it is important to understand that this program is not optimized to run
|
||||
efficiently on large scale graphs with more than 100k nodes or on densly
|
||||
connected graphs.
|
||||
|
||||
the programm needs the following dependencies:
|
||||
pip install networkx numpy
|
||||
'''
|
||||
"""
|
||||
ideas:
|
||||
* should we respect our own channel balances?
|
||||
* respect node life time / uptime? or time of channels?
|
||||
* include more statistics of the network:
|
||||
* allow autopilots of various nodes to exchange some information
|
||||
* exchange algorithms if the network grows.
|
||||
* include better handling for duplicates and existing channels
|
||||
* cap number of channels for well connected nodes.
|
||||
* channel balance of automatic channels should not be more than 50% of
|
||||
cummulative channel balance of destination node
|
||||
|
||||
|
||||
next steps:
|
||||
* test if the rankings from the heuristics are statistically independent
|
||||
* evaluate / simulate which method produces graphs with desirable properties
|
||||
"""
|
||||
|
||||
from operator import itemgetter
|
||||
import logging
|
||||
import math
|
||||
import pickle
|
||||
import networkx as nx
|
||||
import numpy as np
|
||||
|
||||
|
||||
class Strategy:
|
||||
# define constants. Never changed as they are part of the API
|
||||
DIVERSE = "diverse"
|
||||
MERGE = "merge"
|
||||
|
||||
|
||||
class Autopilot():
|
||||
|
||||
def __init__(self, G):
|
||||
self.__add_logger()
|
||||
self.G = G
|
||||
|
||||
def __add_logger(self):
|
||||
""" initiates the logging service for this class """
|
||||
# FIXME: adapt to the settings that are proper for you
|
||||
self.__logger = logging.getLogger('lib-autopilot')
|
||||
self.__logger.setLevel(logging.INFO)
|
||||
ch = logging.StreamHandler()
|
||||
ch.setLevel(logging.INFO)
|
||||
formatter = logging.Formatter(
|
||||
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
||||
ch.setFormatter(formatter)
|
||||
self.__logger.addHandler(ch)
|
||||
|
||||
def __sample_from_pdf(self, pdf, k=21):
|
||||
"""
|
||||
helper function to quickly sample from a pdf encoded in a dictionary
|
||||
"""
|
||||
if type(k) is not int:
|
||||
raise TypeError("__sample_from: k must be an integer variable")
|
||||
if k < 0 or k > 21000:
|
||||
raise ValueError("__sample_from: k must be between 0 and 21000")
|
||||
|
||||
keys, v = zip(*list(pdf.items()))
|
||||
if k >= len(keys):
|
||||
return keys
|
||||
res = np.random.choice(keys, k, replace=False, p=v)
|
||||
return res
|
||||
|
||||
def __sample_from_percentile(self, pdf, percentile=0.5, num_items=21):
|
||||
"""
|
||||
only look at the most likely items and sample from those
|
||||
"""
|
||||
if not percentile:
|
||||
return self.__sample_from_pdf(pdf, num_items)
|
||||
|
||||
if type(percentile) is not float:
|
||||
raise TypeError("percentile must be a floating point variable")
|
||||
if percentile < 0 or percentile > 1:
|
||||
raise ValueError("percentile must be btween 0 and 1")
|
||||
|
||||
cumsum = 1 # Avoid division by 0
|
||||
used_pdf = {}
|
||||
for n, value in sorted(
|
||||
pdf.items(), key=itemgetter(1), reverse=True):
|
||||
cumsum += value
|
||||
used_pdf[n] = value
|
||||
if cumsum > percentile:
|
||||
break
|
||||
|
||||
used_pdf = {k: v / cumsum for k, v in used_pdf.items()}
|
||||
return self.__sample_from_pdf(used_pdf, num_items)
|
||||
|
||||
def __get_uniform_pdf(self):
|
||||
"""
|
||||
Generates a uniform distribution of all nodes in the graph
|
||||
|
||||
In opposite to other methods there are no arguments for smoothing
|
||||
or skewing since this would not do anything to the uniform
|
||||
distribution
|
||||
"""
|
||||
pdf = {n: 1 for n in self.G.nodes()}
|
||||
length = len(pdf)
|
||||
return {k: v / length for k, v in pdf.items()}
|
||||
|
||||
def __get_centrality_pdf(self, skew=False, smooth=False):
|
||||
"""
|
||||
produces a probability distribution which is proportional to nodes betweeness centrality scores
|
||||
|
||||
the betweeness centrality counts on how many shortest paths a node is
|
||||
connecting to thos nodes will most likely make them even more central
|
||||
however it is good for the node operating those operation as this node
|
||||
itself gets a position in the network which is close to central nodes
|
||||
|
||||
this distribution can be skewed and smoothed
|
||||
"""
|
||||
self.__logger.info(
|
||||
"CENTRALITY_PDF: Try to generate a PDF proportional to centrality scores")
|
||||
pdf = {}
|
||||
cumsum = 0
|
||||
for n, score in nx.betweenness_centrality(self.G).items():
|
||||
pdf[n] = score
|
||||
cumsum += score
|
||||
|
||||
# renoremalize result
|
||||
pdf = {k: v / (cumsum + 1) for k, v in pdf.items()}
|
||||
self.__logger.info(
|
||||
"CENTRALITY_PDF: Generated pdf")
|
||||
|
||||
if skew and smooth:
|
||||
self.__logger.info(
|
||||
"CENTRALITY_PDF: Won't skew and smooth distribution ignore both")
|
||||
smooth = False
|
||||
skew = False
|
||||
return self.__manipulate_pdf(pdf, skew, smooth)
|
||||
|
||||
def __get_rich_nodes_pdf(self, skew=False, smooth=False):
|
||||
"""
|
||||
Get a PDF proportional to the cummulative capacity of nodes
|
||||
|
||||
The probability density function is calculated by looking at the
|
||||
cummulative capacity of all channels one node is part of.
|
||||
|
||||
The method will by default skew the pdf by taking the squares of the
|
||||
sums of capacitoes after deriving a pdf. If one whishes the method
|
||||
can also be smoothed by taking the mixture distribution with the
|
||||
uniform distribution
|
||||
|
||||
Skewing and smoothing is controlled via the arguments skew and smooth
|
||||
"""
|
||||
self.__logger.info(
|
||||
"RICH_PDF: Try to retrieve a PDF proportional to capacities")
|
||||
|
||||
rich_nodes = {}
|
||||
network_capacity = 1 # Avoid division by 0
|
||||
candidates = []
|
||||
for n in self.G.nodes():
|
||||
total_capacity = sum(
|
||||
self.G.get_edge_data(
|
||||
n, m)["satoshis"] for m in self.G.neighbors(n))
|
||||
network_capacity += total_capacity
|
||||
rich_nodes[n] = total_capacity
|
||||
|
||||
rich_nodes = {k: v / network_capacity for k, v in rich_nodes.items()}
|
||||
|
||||
self.__logger.info(
|
||||
"RICH_PDF: Generated a PDF proportional to capacities")
|
||||
|
||||
if skew and smooth:
|
||||
self.__logger.info(
|
||||
"RICH_PDF: Can't skew and smooth distribution ignore both")
|
||||
smooth = False
|
||||
skew = False
|
||||
|
||||
return self.__manipulate_pdf(rich_nodes, skew, smooth)
|
||||
|
||||
def __get_long_path_pdf(self, skew=True, smooth=False):
|
||||
"""
|
||||
A probability distribution in which badly connected nodes are likely
|
||||
|
||||
This method looks at all pairs shortest paths and takes the sum of all
|
||||
path lenghts for each node and derives the a probability distribution
|
||||
from the sums. The idea of this method is to find nodes which are
|
||||
increasing the diameter of the network.
|
||||
|
||||
The method will by default skew the pdf by taking the squares of the
|
||||
sums of path lengths before deriving a pdf. If one whishes the method
|
||||
can also be smoothed by taking the mixture distribution with the
|
||||
uniform distribution
|
||||
|
||||
Skewing and smoothing is controlled via the arguments skew and smooth
|
||||
"""
|
||||
if skew and smooth:
|
||||
self.__logger.info(
|
||||
"DECREASE DIAMETER: Can't skew and smooth distribution ignore smoothing")
|
||||
smooth = False
|
||||
|
||||
path_pdf = {}
|
||||
self.__logger.info(
|
||||
"DECREASE DIAMETER: Generating probability density function")
|
||||
|
||||
all_pair_shortest_path_lengths = nx.shortest_path_length(self.G)
|
||||
|
||||
for node, paths in all_pair_shortest_path_lengths:
|
||||
path_sum = sum(length for _, length in paths.items())
|
||||
path_pdf[node] = path_sum
|
||||
|
||||
s = sum(path_pdf.values())
|
||||
path_pdf = {k: v / (s + 1) for k, v in path_pdf.items()}
|
||||
self.__logger.info(
|
||||
"DECREASE DIAMETER: probability density function created")
|
||||
|
||||
path_pdf = self.__manipulate_pdf(path_pdf, skew, smooth)
|
||||
|
||||
return path_pdf
|
||||
|
||||
def __manipulate_pdf(self, pdf, skew=True, smooth=False):
|
||||
"""
|
||||
helper function to skew or smooth a probability distribution
|
||||
|
||||
skewing is achieved by taking the squares of probabilities and
|
||||
re normalize
|
||||
|
||||
smoothing is achieved by taking the mixture distribution with the
|
||||
uniform distribution
|
||||
|
||||
smoothing and skewing are not inverse to each other but should also
|
||||
not happen at the same time. The method will however not prevent this
|
||||
"""
|
||||
if not skew and not smooth: # nothing to do
|
||||
return pdf
|
||||
length = len(pdf)
|
||||
if skew:
|
||||
self.__logger.info(
|
||||
"manipulate_pdf: Skewing the probability density function")
|
||||
pdf = {k: v**2 for k, v in pdf.items()}
|
||||
s = sum(pdf.values())
|
||||
pdf = {k: v / (s+1) for k, v in pdf.items()}
|
||||
|
||||
if smooth:
|
||||
self.__logger.info(
|
||||
"manipulate_pdf: Smoothing the probability density function")
|
||||
pdf = {k: 0.5 * v + 0.5 / length for k, v in pdf.items()}
|
||||
|
||||
return pdf
|
||||
|
||||
def __create_pdfs(self):
|
||||
res = {}
|
||||
res["path"] = self.__get_long_path_pdf()
|
||||
res["centrality"] = self.__get_centrality_pdf()
|
||||
res["rich"] = self.__get_rich_nodes_pdf()
|
||||
res["uniform"] = self.__get_uniform_pdf()
|
||||
return res
|
||||
|
||||
def calculate_statistics(self, candidates):
|
||||
"""
|
||||
computes statistics of the candidate set about connectivity, wealth
|
||||
and returns a probability density function (pdf) which encodes which
|
||||
percentage of the funds should be used for each channel with each
|
||||
candidate node
|
||||
|
||||
the pdf is proportional to the average balance of each candidate and
|
||||
smoothed with a uniform distribution currently the smoothing is just a
|
||||
weighted arithmetic mean with a weight of 0.3 for the uniform
|
||||
distribution.
|
||||
"""
|
||||
pdf = {}
|
||||
for candidate in candidates:
|
||||
neighbors = list(self.G.neighbors(candidate))
|
||||
capacity = sum([self.G.get_edge_data(candidate, n)
|
||||
["satoshis"] for n in neighbors])
|
||||
average = capacity / (1 + len(neighbors))
|
||||
pdf[candidate] = average
|
||||
cumsum = max(1, sum(pdf.values()))
|
||||
pdf = {k: v / cumsum for k, v in pdf.items()}
|
||||
w = 0.7
|
||||
print("percentage smoothed percentage capacity numchannels alias")
|
||||
print("----------------------------------------------------------------------")
|
||||
res_pdf = {}
|
||||
for k, v in pdf.items():
|
||||
neighbors = list(self.G.neighbors(k))
|
||||
capacity = sum([self.G.get_edge_data(k, n)["satoshis"]
|
||||
for n in neighbors])
|
||||
name = k
|
||||
if "alias" in self.G.nodes[k]:
|
||||
name = self.G.nodes[k]["alias"]
|
||||
print("{:12.2f} ".format(100 * v),
|
||||
"{:12.2f} ".format(
|
||||
100 * (w * v + (1 - w) / len(candidates))),
|
||||
"{:10} {:10} ".format(capacity,
|
||||
len(neighbors)),
|
||||
name)
|
||||
res_pdf[k] = (w * v + (1 - w) / len(candidates))
|
||||
return res_pdf
|
||||
|
||||
def calculate_proposed_channel_capacities(self, pdf, balance=1000000):
|
||||
minimal_channel_balance = 20000 # lnd uses 20k satoshi which seems reasonble
|
||||
|
||||
min_probability = min(pdf.values())
|
||||
needed_total_balance = math.ceil(
|
||||
minimal_channel_balance / min_probability)
|
||||
self.__logger.info(
|
||||
"Need at least a balance of {} satoshi to open {} channels".format(
|
||||
needed_total_balance, len(pdf)))
|
||||
while int(needed_total_balance) > int(balance) and len(pdf) > 1:
|
||||
min_val = min(pdf.values())
|
||||
k = [k for k, v in pdf.items() if v == min_val][0]
|
||||
self.__logger.info(
|
||||
"Not enough balance to open {} channels. Remove node: {} and rebalance pdf for channel balances".format(
|
||||
len(pdf), k))
|
||||
del pdf[k]
|
||||
|
||||
s = sum(pdf.values())
|
||||
pdf = {k: v / s for k, v in pdf.items()}
|
||||
|
||||
min_probability = min(pdf.values())
|
||||
needed_total_balance = math.ceil(
|
||||
minimal_channel_balance / min_probability)
|
||||
self.__logger.info(
|
||||
"Need at least a balance of {} satoshi to open {} channels".format(
|
||||
needed_total_balance, len(pdf)))
|
||||
|
||||
return pdf
|
||||
|
||||
def find_candidates(self, num_items=21, strategy=Strategy.DIVERSE,
|
||||
percentile=None):
|
||||
self.__logger.info("running the autopilot on a graph with {} nodes and {} edges.".format(
|
||||
len(self.G.nodes()), len(self.G.edges())))
|
||||
"""
|
||||
Generates candidates with several strategies
|
||||
"""
|
||||
sub_k = math.ceil(num_items / 4)
|
||||
self.__logger.info(
|
||||
"GENERATE CANDIDATES: Try to generate up to {} nodes with 4 strategies: (random, central, network Improvement, liquidity)".format(num_items))
|
||||
# FIXME: should remember from where nodes are known
|
||||
|
||||
res = self.__create_pdfs()
|
||||
|
||||
candidats = set()
|
||||
# FIXME: Run simulations to decide the following problem:
|
||||
"""
|
||||
we can either do a global sampling by merging all probability
|
||||
distributions and sample once from them or we can sample from
|
||||
each probability distribution and merge the results. These processes
|
||||
are obviously not commutative and we need to check which one seems
|
||||
more reasonable.
|
||||
My (renepickhardt) guts feeling says several samples which are
|
||||
merged gives the best of all worlds where the other method would
|
||||
probably result in something that is either pretty uniform or
|
||||
dominated by one very skew distribution. as mentioned this needs
|
||||
to be tested
|
||||
"""
|
||||
if strategy == Strategy.DIVERSE:
|
||||
for strategy, pdf in res.items():
|
||||
tmp = self.__sample_from_percentile(pdf, percentile, sub_k)
|
||||
candidats = candidats.union(set(tmp))
|
||||
|
||||
elif strategy == Strategy.MERGE:
|
||||
merged = {}
|
||||
denominator = len(res)
|
||||
for pdf in res.values():
|
||||
for k, v in pdf.items():
|
||||
if k not in merged:
|
||||
merged[k] = v / denominator
|
||||
else:
|
||||
merged[k] += v / denominator
|
||||
candidats = self.__sample_from_percentile(merged, percentile,
|
||||
num_items)
|
||||
"""
|
||||
following code prints a list of candidates for debugging
|
||||
for k in res:
|
||||
if "alias" in self.G.nodes[key[k]]:
|
||||
print(pdf[key[k]], self.G.nodes[key[k]]["alias"])
|
||||
"""
|
||||
|
||||
if len(candidats) > num_items:
|
||||
candidats = np.random.choice(list(candidats), num_items, replace=False)
|
||||
|
||||
self.__logger.info(
|
||||
"GENERATE CANDIDATES: Found {} nodes with which channel creation is suggested".format(
|
||||
len(candidats)))
|
||||
return candidats
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
print("This lib needs to be given a network graph so you need to create a wrapper")
|
||||
30
Unmaintained/autopilot/pyproject.toml
Normal file
30
Unmaintained/autopilot/pyproject.toml
Normal file
@@ -0,0 +1,30 @@
|
||||
[tool.poetry]
|
||||
name = "cln-autopilot"
|
||||
version = "0.1.0"
|
||||
description = "Automatically manage lightning channels (OUTDATED)"
|
||||
authors = ["Rene Pickhardt <@renepickhardt>"]
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = "^3.7"
|
||||
pyln-client = "0.12.1"
|
||||
dnspython = "^2.2.0"
|
||||
numpy = [
|
||||
{ version = "^1.24", python = ">=3.8,<3.12" },
|
||||
{ version = "^1.19", python = "<3.8" }
|
||||
]
|
||||
networkx = [
|
||||
{ version = "^2.8", python = ">=3.8" },
|
||||
{ version = "^2.6", python = ">=3.7,<3.8" }
|
||||
]
|
||||
|
||||
[tool.poetry.dev-dependencies]
|
||||
pyln-testing = "0.12.1"
|
||||
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
pytest-rerunfailures = "^10.3"
|
||||
pytest-timeout = "^2.1.0"
|
||||
pytest-xdist = "^3.1.0"
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core>=1.0.0"]
|
||||
build-backend = "poetry.core.masonry.api"
|
||||
28
Unmaintained/autopilot/test_autopilot.py
Normal file
28
Unmaintained/autopilot/test_autopilot.py
Normal file
@@ -0,0 +1,28 @@
|
||||
import os
|
||||
from pyln.testing.fixtures import * # noqa: F401,F403
|
||||
import unittest
|
||||
|
||||
CI = os.environ.get('CI') in ('True', 'true')
|
||||
|
||||
plugin_path = os.path.join(os.path.dirname(__file__), "autopilot.py")
|
||||
plugin_opt = {'plugin': plugin_path}
|
||||
|
||||
|
||||
def test_starts(node_factory):
|
||||
l1 = node_factory.get_node(allow_broken_log=True)
|
||||
# Test dynamically
|
||||
l1.rpc.plugin_start(plugin_path)
|
||||
l1.rpc.plugin_stop(plugin_path)
|
||||
l1.rpc.plugin_start(plugin_path)
|
||||
l1.stop()
|
||||
# Then statically
|
||||
l1.daemon.opts["plugin"] = plugin_path
|
||||
l1.start()
|
||||
|
||||
|
||||
@unittest.skipIf(CI, "Test autopilot is hanging on DNS request")
|
||||
def test_main(node_factory):
|
||||
l1, l2 = node_factory.line_graph(2, wait_for_announce=True, opts=plugin_opt)
|
||||
# just call main function
|
||||
res = l1.rpc.autopilot_run_once(dryrun=True)
|
||||
l1.daemon.wait_for_log("I'd like to open [0-9]+ new channels with [0-9]+msat satoshis each")
|
||||
82
Unmaintained/backup/README.md
Normal file
82
Unmaintained/backup/README.md
Normal file
@@ -0,0 +1,82 @@
|
||||
# A simple and reliable backup plugin
|
||||
|
||||
**This version only supports the default SQLite3 database**
|
||||
|
||||
This plugin will maintain clean database backups to another location. It uses
|
||||
the `db_write` hook to make sure to always have a backup that is not missing any
|
||||
state updates and is not potentially harmful.
|
||||
|
||||
Related info about backup solutions: https://github.com/ElementsProject/lightning/blob/master/doc/BACKUP.md
|
||||
|
||||
## Installation
|
||||
|
||||
There are some Python dependencies. You can install them using `poetry`:
|
||||
|
||||
```bash
|
||||
poetry install
|
||||
```
|
||||
|
||||
|
||||
## Setup
|
||||
|
||||
Before the backup plugin can be used it has to be initialized once. The following
|
||||
command will create /mnt/external/location/file.sql as backup file and reference it
|
||||
in `backup.lock` in the lightning directory that stores the internal state, and
|
||||
which makes sure no two instances are using the same backup. (Make sure to stop
|
||||
your Lightning node before running this command)
|
||||
|
||||
```bash
|
||||
poetry run ./backup-cli init --lightning-dir ~/.lightning/bitcoin file:///mnt/external/location/file.bkp
|
||||
```
|
||||
|
||||
Notes:
|
||||
- If you are not using the default lightning directory you'll need to
|
||||
change `~/.lightning/bitcoin` in the command line to point to that
|
||||
directory instead.
|
||||
- You should use some non-local SSH or NFS mount as destination,
|
||||
otherwise any failure of the disk may result in both the original
|
||||
as well as the backup being corrupted.
|
||||
- Currently only the `file:///` URL scheme is supported.
|
||||
|
||||
## IMPORTANT note about hsm_secret
|
||||
|
||||
**You need to secure `~/.lightning/bitcoin/hsm_secret` once! This
|
||||
file will not change, but without this file, the database backup will be
|
||||
unusable!**
|
||||
|
||||
Make sure it has user read only permissions, otherwise `lightningd` will refuse
|
||||
to work: `chmod 0400 hsm_secret`
|
||||
|
||||
|
||||
## Running
|
||||
|
||||
In order to tell `lightningd` to use the plugin you either need to tell it
|
||||
via the startup option `--plugin /path/to/backup.py` or by placing it (or a
|
||||
symlink to it) in the lightning plugin directory (`~/.lightning/plugins`) or
|
||||
by adding it to the `lightningd` configuration (`important-plugin=/path/to/backup.py`).
|
||||
|
||||
On daemon startup the plugin will check the integrity of the existing backup
|
||||
and complain if there is a version mismatch.
|
||||
|
||||
|
||||
## Performing backup compaction
|
||||
|
||||
A backup compaction incorporates incremental updates into a single snapshot.
|
||||
This will reduce the size of the backup file and reduce the time needed to
|
||||
restore the backup. This can be done through the plugin command `backup-compact`:
|
||||
|
||||
```
|
||||
lightning-cli backup-compact
|
||||
```
|
||||
|
||||
Be aware that this can take a long time depending on the size of the backup
|
||||
and I/O speeds, during which the daemon will not be reachable.
|
||||
|
||||
## Restoring a backup
|
||||
|
||||
If things really messed up and you need to reinstall clightning, you can
|
||||
restore the database backup by using the `backup-cli` utility:
|
||||
|
||||
```bash
|
||||
./backup-cli restore file:///mnt/external/location ~/.lightning/bitcoin/lightningd.sqlite3
|
||||
```
|
||||
125
Unmaintained/backup/backend.py
Normal file
125
Unmaintained/backup/backend.py
Normal file
@@ -0,0 +1,125 @@
|
||||
from collections import namedtuple
|
||||
import os
|
||||
import re
|
||||
from typing import Iterator
|
||||
|
||||
import sqlite3
|
||||
from tqdm import tqdm
|
||||
|
||||
# A 'transaction' that was proposed by Core-Lightning and that needs saving to the
|
||||
# backup. `version` is the `data_version` of the database **after** `transaction`
|
||||
# has been applied. A 'snapshot' represents a complete copy of the database.
|
||||
# This is used by the plugin from time to time to allow the backend to compress
|
||||
# the changelog and forms a new basis for the backup.
|
||||
# If `Change` contains a snapshot and a transaction, they apply in that order.
|
||||
Change = namedtuple('Change', ['version', 'snapshot', 'transaction'])
|
||||
|
||||
|
||||
class Backend(object):
|
||||
def __init__(self, destination: str):
|
||||
"""Read the metadata from the destination and prepare any necessary resources.
|
||||
|
||||
After this call the following members must be initialized:
|
||||
|
||||
- backend.version: the last data version we wrote to the backend
|
||||
- backend.prev_version: the previous data version in case we need to
|
||||
roll back the last one
|
||||
"""
|
||||
self.version = None
|
||||
self.prev_version = None
|
||||
raise NotImplementedError
|
||||
|
||||
def add_change(self, change: Change) -> bool:
|
||||
"""Add a single change to the backend.
|
||||
|
||||
This call should always make sure that the change has been correctly
|
||||
written and flushed before returning.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def initialize(self) -> bool:
|
||||
"""Set up any resources needed by this backend.
|
||||
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def stream_changes(self) -> Iterator[Change]:
|
||||
"""Retrieve changes from the backend in order to perform a restore.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def rewind(self) -> bool:
|
||||
"""Remove the last change that was added to the backup
|
||||
|
||||
Because the transaction is reported to the backup plugin before it is
|
||||
being committed to the database it can happen that we get notified
|
||||
about a transaction but then `lightningd` is stopped and the
|
||||
transaction is not committed. This means the backup includes an
|
||||
extraneous transaction which needs to be removed. A backend must allow
|
||||
a single rewind operation, and should fail additional calls to rewind
|
||||
(we may have at most one pending transaction not being committed at
|
||||
any time).
|
||||
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def compact(self):
|
||||
"""Apply some incremental changes to the snapshot to reduce our size.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def _db_open(self, dest: str) -> sqlite3.Connection:
|
||||
db = sqlite3.connect(dest)
|
||||
db.execute("PRAGMA foreign_keys = 1")
|
||||
return db
|
||||
|
||||
def _restore_snapshot(self, snapshot: bytes, dest: str):
|
||||
if os.path.exists(dest):
|
||||
os.unlink(dest)
|
||||
with open(dest, 'wb') as f:
|
||||
f.write(snapshot)
|
||||
self.db = self._db_open(dest)
|
||||
|
||||
def _rewrite_stmt(self, stmt: str) -> str:
|
||||
"""We had a stmt expansion bug in Core-Lightning, this replicates the fix.
|
||||
|
||||
We were expanding statements incorrectly, missing some
|
||||
whitespace between a param and the `WHERE` keyword. This
|
||||
re-inserts the space.
|
||||
|
||||
"""
|
||||
stmt = re.sub(r'reserved_til=([0-9]+)WHERE', r'reserved_til=\1 WHERE', stmt)
|
||||
stmt = re.sub(r'peer_id=([0-9]+)WHERE channels.id=', r'peer_id=\1 WHERE channels.id=', stmt)
|
||||
return stmt
|
||||
|
||||
def _restore_transaction(self, tx: Iterator[str]):
|
||||
assert self.db
|
||||
cur = self.db.cursor()
|
||||
for q in tx:
|
||||
q = self._rewrite_stmt(q)
|
||||
cur.execute(q)
|
||||
|
||||
def restore(self, dest: str, remove_existing: bool = False):
|
||||
"""Restore the backup in this backend to its former glory.
|
||||
|
||||
If `dest` is a directory, we assume the default database filename:
|
||||
lightningd.sqlite3
|
||||
"""
|
||||
if os.path.isdir(dest):
|
||||
dest = os.path.join(dest, "lightningd.sqlite3")
|
||||
if os.path.exists(dest):
|
||||
if not remove_existing:
|
||||
raise ValueError(
|
||||
"Destination for backup restore exists: {dest}".format(
|
||||
dest=dest
|
||||
)
|
||||
)
|
||||
os.unlink(dest)
|
||||
|
||||
self.db = self._db_open(dest)
|
||||
for c in tqdm(self.stream_changes(), total=self.version_count):
|
||||
if c.snapshot is not None:
|
||||
self._restore_snapshot(c.snapshot, dest)
|
||||
if c.transaction is not None:
|
||||
self._restore_transaction(c.transaction)
|
||||
self.db.commit()
|
||||
33
Unmaintained/backup/backends.py
Normal file
33
Unmaintained/backup/backends.py
Normal file
@@ -0,0 +1,33 @@
|
||||
'''Create a backend instance based on URI scheme dispatch.'''
|
||||
from typing import Type
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from backend import Backend
|
||||
from socketbackend import SocketBackend
|
||||
from filebackend import FileBackend
|
||||
|
||||
|
||||
def resolve_backend_class(backend_url):
|
||||
|
||||
backend_map: Mapping[str, Type[Backend]] = {
|
||||
'file': FileBackend,
|
||||
'socket': SocketBackend,
|
||||
}
|
||||
p = urlparse(backend_url)
|
||||
backend_cl = backend_map.get(p.scheme, None)
|
||||
return backend_cl
|
||||
|
||||
|
||||
def get_backend(destination, create=False, require_init=False):
|
||||
backend_cl = resolve_backend_class(destination)
|
||||
if backend_cl is None:
|
||||
raise ValueError("No backend implementation found for {destination}".format(
|
||||
destination=destination,
|
||||
))
|
||||
backend = backend_cl(destination, create=create)
|
||||
initialized = backend.initialize()
|
||||
if require_init and not initialized:
|
||||
kill("Could not initialize the backup {}, please use 'backup-cli' to initialize the backup first.".format(destination))
|
||||
assert backend.version is not None
|
||||
assert backend.prev_version is not None
|
||||
return backend
|
||||
100
Unmaintained/backup/backup-cli
Executable file
100
Unmaintained/backup/backup-cli
Executable file
@@ -0,0 +1,100 @@
|
||||
#!/usr/bin/env python3
|
||||
from backends import get_backend
|
||||
from backend import Change
|
||||
from server import SocketServer, setup_server_logging
|
||||
|
||||
import os
|
||||
import click
|
||||
import json
|
||||
import logging
|
||||
import sqlite3
|
||||
import sys
|
||||
|
||||
root = logging.getLogger()
|
||||
root.setLevel(logging.INFO)
|
||||
|
||||
handler = logging.StreamHandler(sys.stdout)
|
||||
handler.setLevel(logging.DEBUG)
|
||||
formatter = logging.Formatter('%(message)s')
|
||||
handler.setFormatter(formatter)
|
||||
root.addHandler(handler)
|
||||
|
||||
@click.command()
|
||||
@click.argument("backend-url")
|
||||
@click.option('--lightning-dir', type=click.Path(exists=True), default=None, help='Use an existing lightning directory (default: initialize an empty backup).')
|
||||
def init(lightning_dir, backend_url):
|
||||
destination = backend_url
|
||||
backend = get_backend(destination, create=True)
|
||||
|
||||
if lightning_dir is not None:
|
||||
lock_file = os.path.join(lightning_dir, "backup.lock")
|
||||
db_file = os.path.join(lightning_dir, "lightningd.sqlite3")
|
||||
|
||||
with open(lock_file, "w") as f:
|
||||
f.write(json.dumps({
|
||||
'backend_url': destination,
|
||||
}))
|
||||
|
||||
data_version = 0
|
||||
if os.path.exists(db_file):
|
||||
print("Found an existing database at {db_file}, initializing the backup with a snapshot".format(db_file=db_file))
|
||||
# Peek into the DB to see if we have
|
||||
db = sqlite3.connect(db_file)
|
||||
cur = db.cursor()
|
||||
rows = cur.execute("SELECT intval FROM vars WHERE name = 'data_version'")
|
||||
data_version = rows.fetchone()[0]
|
||||
|
||||
snapshot = Change(
|
||||
version=data_version,
|
||||
snapshot=open(db_file, 'rb').read(),
|
||||
transaction=None
|
||||
)
|
||||
if not backend.add_change(snapshot):
|
||||
print("Could not write snapshot to backend")
|
||||
sys.exit(1)
|
||||
else:
|
||||
print("Successfully written initial snapshot to {destination}".format(destination=destination))
|
||||
else:
|
||||
print("Database does not exist yet, created an empty backup file")
|
||||
|
||||
print("Initialized backup backend {destination}, you can now start Core-Lightning".format(
|
||||
destination=destination,
|
||||
))
|
||||
|
||||
|
||||
@click.command()
|
||||
@click.argument("backend-url")
|
||||
@click.argument("restore-destination")
|
||||
def restore(backend_url, restore_destination):
|
||||
destination = backend_url
|
||||
backend = get_backend(destination)
|
||||
backend.restore(restore_destination)
|
||||
|
||||
|
||||
@click.command()
|
||||
@click.argument("backend-url")
|
||||
@click.argument("addr")
|
||||
@click.option('--log-mode', type=click.Choice(['plain', 'systemd'], case_sensitive=False), default='plain', help='Debug log mode, defaults to plain')
|
||||
@click.option('--log-level', type=click.Choice(['debug', 'info', 'notice', 'warning', 'error', 'critical'], case_sensitive=False), default='info', help='Debug log level, defaults to info')
|
||||
def server(backend_url, addr, log_mode, log_level):
|
||||
backend = get_backend(backend_url)
|
||||
addr, port = addr.split(':')
|
||||
port = int(port)
|
||||
|
||||
setup_server_logging(log_mode, log_level)
|
||||
|
||||
server = SocketServer((addr, port), backend)
|
||||
server.run()
|
||||
|
||||
|
||||
@click.group()
|
||||
def cli():
|
||||
pass
|
||||
|
||||
|
||||
cli.add_command(init)
|
||||
cli.add_command(restore)
|
||||
cli.add_command(server)
|
||||
|
||||
if __name__ == "__main__":
|
||||
cli()
|
||||
144
Unmaintained/backup/backup.py
Executable file
144
Unmaintained/backup/backup.py
Executable file
@@ -0,0 +1,144 @@
|
||||
#!/usr/bin/env python3
|
||||
from pyln.client import Plugin
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import psutil
|
||||
|
||||
from backend import Change
|
||||
from backends import get_backend
|
||||
|
||||
plugin = Plugin()
|
||||
|
||||
root = logging.getLogger()
|
||||
root.setLevel(logging.INFO)
|
||||
|
||||
handler = logging.StreamHandler(sys.stdout)
|
||||
handler.setLevel(logging.DEBUG)
|
||||
formatter = logging.Formatter('%(message)s')
|
||||
handler.setFormatter(formatter)
|
||||
root.addHandler(handler)
|
||||
|
||||
|
||||
def check_first_write(plugin, data_version):
|
||||
"""Verify that we are up-to-date and Core-Lightning didn't forget writes.
|
||||
|
||||
We may be at most 1 write off:
|
||||
|
||||
- Core-Lightning and backup are at the same version (happy case)
|
||||
- Core-Lightning is 1 write behind: it must've crashed inbetween calling the
|
||||
hook and committing the DB transaction.
|
||||
- Core-Lightning is one or more writes ahead: either we lost some writes, or
|
||||
Core-Lightning was running without the plugin at some point -> crash!
|
||||
- c-lighning is more than 1 write behind: Core-Lightning had a lobotomy, or
|
||||
was restored from an old backup -> crash!
|
||||
"""
|
||||
backend = plugin.backend
|
||||
|
||||
logging.info("Comparing backup version {} versus first write version {}".format(
|
||||
backend.version, data_version
|
||||
))
|
||||
|
||||
if backend.version == data_version - 1:
|
||||
logging.info("Versions match up")
|
||||
return True
|
||||
|
||||
elif backend.prev_version == data_version - 1 and plugin.backend.rewind():
|
||||
logging.info("Last changes not applied, rewinding non-committed transaction")
|
||||
return True
|
||||
|
||||
elif backend.prev_version > data_version - 1:
|
||||
kill("Core-Lightning seems to have lost some state (failed restore?). Emergency shutdown.")
|
||||
|
||||
else:
|
||||
kill("Backup is out of date, we cannot continue safely. Emergency shutdown.")
|
||||
|
||||
|
||||
@plugin.hook('db_write')
|
||||
def on_db_write(writes, data_version, plugin, **kwargs):
|
||||
change = Change(data_version, None, writes)
|
||||
if not plugin.initialized:
|
||||
assert check_first_write(plugin, change.version)
|
||||
plugin.initialized = True
|
||||
|
||||
if plugin.backend.add_change(change):
|
||||
return {"result": "continue"}
|
||||
else:
|
||||
kill("Could not append DB change to the backup. Need to shutdown!")
|
||||
|
||||
|
||||
@plugin.method("backup-compact")
|
||||
def compact(plugin):
|
||||
"""Perform a backup compaction.
|
||||
|
||||
Synchronously restores the DB from the backup, initializes a new
|
||||
backup from the restored DB, and then swaps out the backup
|
||||
file. This can be used to reduce the backup file's size as well as
|
||||
speeding up an eventual recovery by rolling in the incremental
|
||||
changes into the snapshot.
|
||||
|
||||
"""
|
||||
return plugin.backend.compact()
|
||||
|
||||
|
||||
@plugin.init()
|
||||
def on_init(options, **kwargs):
|
||||
dest = options.get('backup-destination', 'null')
|
||||
if dest != 'null':
|
||||
plugin.log(
|
||||
"The `--backup-destination` option is deprecated and will be "
|
||||
"removed in future versions of the backup plugin. Please remove "
|
||||
"it from your configuration. The destination is now determined by "
|
||||
"the `backup.lock` file in the lightning directory",
|
||||
level="warn"
|
||||
)
|
||||
|
||||
# IMPORTANT NOTE
|
||||
# Putting RPC stuff in init() like the following can cause deadlocks!
|
||||
# See: https://github.com/lightningd/plugins/issues/209
|
||||
# configs = plugin.rpc.listconfigs()
|
||||
# if not configs['wallet'].startswith('sqlite3'):
|
||||
# kill("The backup plugin only works with the sqlite3 database.")
|
||||
|
||||
|
||||
def kill(message: str):
|
||||
plugin.log(message)
|
||||
time.sleep(1)
|
||||
# Search for lightningd in my ancestor processes:
|
||||
procs = [p for p in psutil.Process(os.getpid()).parents()]
|
||||
for p in procs:
|
||||
if p.name() != 'lightningd':
|
||||
continue
|
||||
plugin.log("Killing process {name} ({pid})".format(
|
||||
name=p.name(),
|
||||
pid=p.pid
|
||||
))
|
||||
p.kill()
|
||||
|
||||
# Sleep forever, just in case the master doesn't die on us...
|
||||
while True:
|
||||
time.sleep(30)
|
||||
|
||||
|
||||
plugin.add_option(
|
||||
'backup-destination', None,
|
||||
'UNUSED. Kept for backward compatibility only. Please update your configuration to remove this option.'
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Did we perform the first write check?
|
||||
plugin.initialized = False
|
||||
if not os.path.exists("backup.lock"):
|
||||
kill("Could not find backup.lock in the lightning-dir")
|
||||
|
||||
try:
|
||||
d = json.load(open("backup.lock", 'r'))
|
||||
destination = d['backend_url']
|
||||
plugin.backend = get_backend(destination, require_init=True)
|
||||
plugin.run()
|
||||
except Exception:
|
||||
logging.exception('Exception while initializing backup plugin')
|
||||
kill('Exception while initializing plugin, terminating lightningd')
|
||||
211
Unmaintained/backup/filebackend.py
Normal file
211
Unmaintained/backup/filebackend.py
Normal file
@@ -0,0 +1,211 @@
|
||||
import logging
|
||||
import os
|
||||
import struct
|
||||
import shutil
|
||||
import tempfile
|
||||
from typing import Iterator
|
||||
from urllib.parse import urlparse
|
||||
from backend import Backend, Change
|
||||
|
||||
|
||||
class FileBackend(Backend):
|
||||
def __init__(self, destination: str, create: bool):
|
||||
self.version = None
|
||||
self.prev_version = None
|
||||
self.destination = destination
|
||||
self.offsets = [0, 0]
|
||||
self.version_count = 0
|
||||
self.url = urlparse(self.destination)
|
||||
|
||||
if os.path.exists(self.url.path) and create:
|
||||
raise ValueError("Attempted to create a FileBackend, but file already exists.")
|
||||
if not os.path.exists(self.url.path) and not create:
|
||||
raise ValueError("Attempted to open a FileBackend but file doesn't already exists, use `backup-cli init` to initialize it first.")
|
||||
if create:
|
||||
# Initialize a new backup file
|
||||
self.version, self.prev_version = 0, 0
|
||||
self.offsets = [512, 0]
|
||||
self.version_count = 0
|
||||
self.write_metadata()
|
||||
|
||||
def initialize(self) -> bool:
|
||||
return self.read_metadata()
|
||||
|
||||
def write_metadata(self):
|
||||
blob = struct.pack("!IIQIQQ", 0x01, self.version, self.offsets[0],
|
||||
self.prev_version, self.offsets[1],
|
||||
self.version_count)
|
||||
|
||||
# Pad the header
|
||||
blob += b'\x00' * (512 - len(blob))
|
||||
mode = "rb+" if os.path.exists(self.url.path) else "wb+"
|
||||
|
||||
with open(self.url.path, mode) as f:
|
||||
f.seek(0)
|
||||
f.write(blob)
|
||||
f.flush()
|
||||
|
||||
def read_metadata(self):
|
||||
with open(self.url.path, 'rb') as f:
|
||||
blob = f.read(512)
|
||||
if len(blob) != 512:
|
||||
logging.warn("Corrupt FileBackend header, expected 512 bytes, got {} bytes".format(len(blob)))
|
||||
return False
|
||||
|
||||
file_version, = struct.unpack_from("!I", blob)
|
||||
if file_version != 1:
|
||||
logging.warn("Unknown FileBackend version {}".format(file_version))
|
||||
return False
|
||||
|
||||
self.version, self.offsets[0], self.prev_version, self.offsets[1], self.version_count, = struct.unpack_from("!IQIQQ", blob, offset=4)
|
||||
|
||||
return True
|
||||
|
||||
def add_change(self, entry: Change) -> bool:
|
||||
typ = b'\x01' if entry.snapshot is None else b'\x02'
|
||||
if typ == b'\x01':
|
||||
payload = b'\x00'.join([t.encode('UTF-8') for t in entry.transaction])
|
||||
elif typ == b'\x02':
|
||||
payload = entry.snapshot
|
||||
|
||||
length = struct.pack("!I", len(payload))
|
||||
version = struct.pack("!I", entry.version)
|
||||
with open(self.url.path, 'ab') as f:
|
||||
f.seek(self.offsets[0])
|
||||
f.write(length)
|
||||
f.write(version)
|
||||
f.write(typ)
|
||||
f.write(payload)
|
||||
self.prev_version, self.offsets[1] = self.version, self.offsets[0]
|
||||
self.version = entry.version
|
||||
self.offsets[0] += 9 + len(payload)
|
||||
self.version_count += 1
|
||||
self.write_metadata()
|
||||
|
||||
return True
|
||||
|
||||
def rewind(self):
|
||||
# After rewinding we set offsets[0] and prev_version to 0 (best effort
|
||||
# result). If either of these are set to 0 we have two consecutive
|
||||
# rewinds which cannot be safely done (we'd be rewinding more than the
|
||||
# one in-flight transaction).
|
||||
if self.offsets[1] == 0 or self.prev_version == 0:
|
||||
logging.warn("Cannot rewind multiple times.")
|
||||
return False
|
||||
|
||||
self.version, self.offsets[0] = self.prev_version, self.offsets[1]
|
||||
self.prev_version, self.offsets[1] = 0, 0
|
||||
return True
|
||||
|
||||
def stream_changes(self) -> Iterator[Change]:
|
||||
self.read_metadata()
|
||||
version = -1
|
||||
with open(self.url.path, 'rb') as f:
|
||||
# Skip the header
|
||||
f.seek(512)
|
||||
while version < self.version:
|
||||
length, version, typ = struct.unpack("!IIb", f.read(9))
|
||||
payload = f.read(length)
|
||||
if typ == 1:
|
||||
yield Change(
|
||||
version=version,
|
||||
snapshot=None,
|
||||
transaction=[t.decode('UTF-8') for t in payload.split(b'\x00')]
|
||||
)
|
||||
elif typ == 2:
|
||||
yield Change(version=version, snapshot=payload, transaction=None)
|
||||
else:
|
||||
raise ValueError("Unknown FileBackend entry type {}".format(typ))
|
||||
|
||||
if version != self.version:
|
||||
raise ValueError("Versions do not match up: restored version {}, backend version {}".format(version, self.version))
|
||||
assert version == self.version
|
||||
|
||||
def compact(self):
|
||||
stop = self.version # Stop one version short of the head when compacting
|
||||
tmp = tempfile.TemporaryDirectory()
|
||||
backupdir, clonename = os.path.split(self.url.path)
|
||||
|
||||
# Path of the backup clone that we're trying to build up. We
|
||||
# are trying to put this right next to the original backup, to
|
||||
# maximize the chances of both being on the same FS, which
|
||||
# makes the move below atomic.
|
||||
clonepath = os.path.join(backupdir, clonename + ".compacting")
|
||||
|
||||
# Location we extract the snapshot to and then apply
|
||||
# incremental changes.
|
||||
snapshotpath = os.path.join(tmp.name, "lightningd.sqlite3")
|
||||
|
||||
stats = {
|
||||
'before': {
|
||||
'backupsize': os.stat(self.url.path).st_size,
|
||||
'version_count': self.version_count,
|
||||
},
|
||||
}
|
||||
|
||||
print("Starting compaction: stats={}".format(stats))
|
||||
self.db = self._db_open(snapshotpath)
|
||||
|
||||
for change in self.stream_changes():
|
||||
if change.version == stop:
|
||||
break
|
||||
|
||||
if change.snapshot is not None:
|
||||
self._restore_snapshot(change.snapshot, snapshotpath)
|
||||
|
||||
if change.transaction is not None:
|
||||
self._restore_transaction(change.transaction)
|
||||
self.db.commit()
|
||||
|
||||
# If this assertion fails we are in a degenerate state: we
|
||||
# have less than two changes in the backup (starting
|
||||
# Core-Lightning alone produces 6 changes), and compacting an
|
||||
# almost empty backup is not useful.
|
||||
assert change is not None
|
||||
|
||||
# Remember `change`, it's the rewindable change we need to
|
||||
# stash on top of the new snapshot.
|
||||
clone = FileBackend(clonepath, create=True)
|
||||
clone.offsets = [512, 0]
|
||||
|
||||
# We are about to add the snapshot n-1 on top of n-2 (init),
|
||||
# followed by the last change for n on top of
|
||||
# n-1. prev_version trails that by one.
|
||||
clone.version = change.version - 2
|
||||
clone.prev_version = clone.version - 1
|
||||
clone.version_count = 0
|
||||
clone.write_metadata()
|
||||
|
||||
snapshot = Change(
|
||||
version=change.version - 1,
|
||||
snapshot=open(snapshotpath, 'rb').read(),
|
||||
transaction=None
|
||||
)
|
||||
print("Adding intial snapshot with {} bytes for version {}".format(
|
||||
len(snapshot.snapshot),
|
||||
snapshot.version
|
||||
))
|
||||
clone.add_change(snapshot)
|
||||
|
||||
assert clone.version == change.version - 1
|
||||
assert clone.prev_version == change.version - 2
|
||||
clone.add_change(change)
|
||||
|
||||
assert self.version == clone.version
|
||||
assert self.prev_version == clone.prev_version
|
||||
|
||||
stats['after'] = {
|
||||
'version_count': clone.version_count,
|
||||
'backupsize': os.stat(clonepath).st_size,
|
||||
}
|
||||
|
||||
print("Compacted {} changes, saving {} bytes, swapping backups".format(
|
||||
stats['before']['version_count'] - stats['after']['version_count'],
|
||||
stats['before']['backupsize'] - stats['after']['backupsize'],
|
||||
))
|
||||
shutil.move(clonepath, self.url.path)
|
||||
|
||||
# Re-initialize ourselves so we have the correct metadata
|
||||
self.read_metadata()
|
||||
|
||||
return stats
|
||||
1083
Unmaintained/backup/poetry.lock
generated
Normal file
1083
Unmaintained/backup/poetry.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
77
Unmaintained/backup/protocol.py
Normal file
77
Unmaintained/backup/protocol.py
Normal file
@@ -0,0 +1,77 @@
|
||||
'''
|
||||
Socket-based remote backup protocol. This is used to create a connection to a backup backend, and send it incremental database updates.
|
||||
'''
|
||||
import socket
|
||||
import struct
|
||||
from typing import Tuple
|
||||
import zlib
|
||||
|
||||
from backend import Change
|
||||
|
||||
|
||||
class PacketType:
|
||||
CHANGE = 0x01
|
||||
SNAPSHOT = 0x02
|
||||
REWIND = 0x03
|
||||
REQ_METADATA = 0x04
|
||||
RESTORE = 0x05
|
||||
ACK = 0x06
|
||||
NACK = 0x07
|
||||
METADATA = 0x08
|
||||
DONE = 0x09
|
||||
COMPACT = 0x0a
|
||||
COMPACT_RES = 0x0b
|
||||
|
||||
|
||||
PKT_CHANGE_TYPES = {PacketType.CHANGE, PacketType.SNAPSHOT}
|
||||
|
||||
|
||||
def recvall(sock: socket.socket, n: int) -> bytearray:
|
||||
'''Receive exactly n bytes from a socket.'''
|
||||
buf = bytearray(n)
|
||||
view = memoryview(buf)
|
||||
ptr = 0
|
||||
while ptr < n:
|
||||
count = sock.recv_into(view[ptr:])
|
||||
if count == 0:
|
||||
raise IOError('Premature end of stream')
|
||||
ptr += count
|
||||
return buf
|
||||
|
||||
|
||||
def send_packet(sock: socket.socket, typ: int, payload: bytes) -> None:
|
||||
sock.sendall(struct.pack('!BI', typ, len(payload)))
|
||||
sock.sendall(payload)
|
||||
|
||||
|
||||
def recv_packet(sock: socket.socket) -> Tuple[int, bytes]:
|
||||
(typ, length) = struct.unpack('!BI', recvall(sock, 5))
|
||||
payload = recvall(sock, length)
|
||||
return (typ, payload)
|
||||
|
||||
|
||||
def change_from_packet(typ, payload):
|
||||
'''Convert a network packet to a Change object.'''
|
||||
if typ == PacketType.CHANGE:
|
||||
(version, ) = struct.unpack('!I', payload[0:4])
|
||||
payload = zlib.decompress(payload[4:])
|
||||
return Change(version=version, snapshot=None,
|
||||
transaction=[t.decode('UTF-8') for t in payload.split(b'\x00')])
|
||||
elif typ == PacketType.SNAPSHOT:
|
||||
(version, ) = struct.unpack('!I', payload[0:4])
|
||||
payload = zlib.decompress(payload[4:])
|
||||
return Change(version=version, snapshot=payload, transaction=None)
|
||||
raise ValueError('Not a change (typ {})'.format(typ))
|
||||
|
||||
|
||||
def packet_from_change(entry):
|
||||
'''Convert a Change object to a network packet.'''
|
||||
if entry.snapshot is None:
|
||||
typ = PacketType.CHANGE
|
||||
payload = b'\x00'.join([t.encode('UTF-8') for t in entry.transaction])
|
||||
else:
|
||||
typ = PacketType.SNAPSHOT
|
||||
payload = entry.snapshot
|
||||
|
||||
version = struct.pack("!I", entry.version)
|
||||
return typ, version + zlib.compress(payload)
|
||||
23
Unmaintained/backup/pyproject.toml
Normal file
23
Unmaintained/backup/pyproject.toml
Normal file
@@ -0,0 +1,23 @@
|
||||
[tool.poetry]
|
||||
name = "cln-backup"
|
||||
version = "0.1.0"
|
||||
description = "Keep your Core-Lightning node save by backing it up, in real-time (allows recovering without channel closures)."
|
||||
authors = ["Christian Decker <decker@blockstream.com>"]
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = "^3.7"
|
||||
pyln-client = "0.12.1"
|
||||
click = "^8.0.4"
|
||||
tqdm = "^4.62.3"
|
||||
psutil = "5.9.4"
|
||||
flask = "2.2.2"
|
||||
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
pyln-testing = "0.12.1"
|
||||
flaky = "^3.7.0"
|
||||
pytest-timeout = "^2.1.0"
|
||||
pytest-xdist = "^3.1.0"
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core>=1.0.0"]
|
||||
build-backend = "poetry.core.masonry.api"
|
||||
197
Unmaintained/backup/remote.md
Normal file
197
Unmaintained/backup/remote.md
Normal file
@@ -0,0 +1,197 @@
|
||||
Remote backup backend for Core-Lightning
|
||||
=====================================
|
||||
|
||||
Introduction
|
||||
------------
|
||||
|
||||
The purpose of this backend is to allow hassle-free incremental remote backups of a Core-Lightning
|
||||
daemon's state.
|
||||
|
||||
The remote backup system consists of two parts:
|
||||
|
||||
- A `backup.py` plugin backend that listens for changes to Core-Lightning's database and communicates them
|
||||
to a remote server.
|
||||
|
||||
- A server daemon that receives changes from the backup backend and communicates with a local backup backend
|
||||
to store them. The server side does not need to be running Core-Lightning, nor have it installed.
|
||||
|
||||
### URL scheme
|
||||
|
||||
The backend URL format is `socket:<host>:<port>[?<param>=<value>[&...]]`. For example `socket:127.0.0.1:1234`. To supply a IPv6
|
||||
address use the bracketed syntax `socket:[::1]:1234`.
|
||||
|
||||
The only currently accepted `<param>` is `proxy`. This can be used to connect to the backup server through a proxy. See [Usage with Tor](#usage-with-tor).
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
||||
First initialize an empty file backend on the server side, then start the server:
|
||||
|
||||
```bash
|
||||
backup-cli init file:///path/to/backup
|
||||
backup-cli server file:///path/to/backup 127.0.0.1:8700
|
||||
```
|
||||
|
||||
On the client side:
|
||||
|
||||
```bash
|
||||
# Make sure Core-Lightning is not running
|
||||
lightning-cli stop
|
||||
# Initialize the socket backend (this makes an initial snapshot, and creates a configuration file for the plugin)
|
||||
backup-cli init socket:127.0.0.1:8700 --lightning-dir "$HOME/.lightning/bitcoin"
|
||||
# Start c-lighting, with the backup plugin as important plugin so that any issue with it stops the daemon
|
||||
lightningd ... \
|
||||
--important-plugin /path/to/plugins/backup/backup.py
|
||||
```
|
||||
|
||||
Usage with SSH
|
||||
--------------
|
||||
|
||||
The easiest way to connect the server and client if they are not running on the same host is with a ssh
|
||||
forward. For example, when connecting from another machine to the one running Core-Lightning use:
|
||||
|
||||
```bash
|
||||
ssh mylightninghost -R 8700:127.0.0.1:8700
|
||||
```
|
||||
|
||||
Or when it is the other way around:
|
||||
|
||||
```bash
|
||||
ssh backupserver -L 8700:127.0.0.1:8700
|
||||
```
|
||||
|
||||
Usage with Tor
|
||||
--------------
|
||||
|
||||
To use the backup plugin with Tor the Python module PySocks needs to be installed (`pip install --user pysocks`).
|
||||
|
||||
Assuming Tor's `SocksPort` is 9050, the following URL can be used to connect the backup plugin to a backup server over an onion service:
|
||||
|
||||
```
|
||||
socket:axz53......onion:8700?proxy=socks5:127.0.0.1:9050
|
||||
```
|
||||
|
||||
On the server side, manually define an onion service in `torrc` that forwards incoming connections to the local port, e.g.:
|
||||
|
||||
```
|
||||
HiddenServiceDir /var/lib/tor/lightning/
|
||||
HiddenServiceVersion 3
|
||||
HiddenServicePort 8700 127.0.0.1:8700
|
||||
```
|
||||
|
||||
Goals
|
||||
-----
|
||||
|
||||
- Hassle-free incremental remote backup of Core-Lightning's database over a simple TCP protocol.
|
||||
|
||||
- Safety. Core-Lightning will only proceed when the remote backend has acknowledged storing a change, and will halt when there is no connection to the backup server.
|
||||
|
||||
- Bandwidth efficiency. Updates can be really large, and SQL statements ought to be well compressible, so bandwidth is saved by performing zlib compression on the changes and snapshots.
|
||||
|
||||
Non-goals
|
||||
---------
|
||||
|
||||
- Encryption. This is outside scope, a VPN (say, a wireguard connection), SSH tunnel (ssh `-L` or `-R`), or even a Tor onion service is more flexible, avoids the pitfalls of custom cryptography code, and for the user to learn yet another way to configure secure transport.
|
||||
|
||||
Protocol details
|
||||
================
|
||||
|
||||
A bidirectional TCP protocol is used to synchronize state between the client and server. It is documented here in case anyone wants to make a custom server implementation.
|
||||
|
||||
Packet format:
|
||||
|
||||
<typ u8> <length u32> <payload u8 * length...>
|
||||
|
||||
Every packet has a type and a 32-bit length. Defined packet types are:
|
||||
|
||||
0x01 CHANGE Change
|
||||
0x02 SNAPSHOT Snapshot
|
||||
0x03 REWIND Rewind a version (can only be done once)
|
||||
0x04 REQ_METADATA Request metadata
|
||||
0x05 RESTORE Request stream of changes to restore
|
||||
0x06 ACK Acknowledge change, snapshot or rewind
|
||||
0x07 NACK An error happened (e.g. rewind too far)
|
||||
0x08 METADATA Metadata response
|
||||
0x09 DONE Restore is complete
|
||||
0x0A COMPACT Do backup compaction
|
||||
0x0B COMPACT_RES Database compaction result
|
||||
|
||||
CHANGE
|
||||
------
|
||||
|
||||
A database update.
|
||||
|
||||
Fields:
|
||||
|
||||
- version (u32)
|
||||
- a list of SQL statements to be executed for this update, encoded as UTF-8, separated by NULL bytes. The last statement will not be terminated with a NULL byte. (zlib compressed)
|
||||
|
||||
SNAPSHOT
|
||||
--------
|
||||
|
||||
A full database snapshot, replacing the previous incremental backup.
|
||||
|
||||
Fields:
|
||||
|
||||
- version (u32)
|
||||
- a raw dump of the sqlite database (zlib compressed)
|
||||
|
||||
REQ_METADATA
|
||||
------------
|
||||
|
||||
Request metadata from server. The server should respond with a `METADATA` packet.
|
||||
|
||||
No fields.
|
||||
|
||||
RESTORE
|
||||
-------
|
||||
|
||||
Request a stream of changes to restore the database.
|
||||
|
||||
The server should respond with a stream of `CHANGE` and `SNAPSHOT` packets, finishing with a `DONE` packet.
|
||||
|
||||
Unlike when sending a change to backup, the client is not required to (but may) respond to these with `ACK`.
|
||||
|
||||
No fields.
|
||||
|
||||
ACK
|
||||
---
|
||||
|
||||
General succss response. Acknowledge having processed a `CHANGE` and `SNAPSHOT` packet.
|
||||
|
||||
Fields:
|
||||
|
||||
- new version (u32)
|
||||
|
||||
NACK
|
||||
----
|
||||
|
||||
Indicates an error processing the last packet.
|
||||
|
||||
No fields.
|
||||
|
||||
METADATA
|
||||
--------
|
||||
|
||||
Metadata response, sent as response to `REQ_METADATA`.
|
||||
|
||||
Fields:
|
||||
|
||||
- protocol (should be 0x01) (u32)
|
||||
- version (u32)
|
||||
- prev_version (u32)
|
||||
- version_count (u64)
|
||||
|
||||
COMPACT
|
||||
--------
|
||||
|
||||
Do a database compaction. Sends `COMPACT_RES` on succesful completion, `NACK` otherwise.
|
||||
|
||||
COMPACT_RES
|
||||
-----------
|
||||
|
||||
Result of a database compaction.
|
||||
|
||||
Fields
|
||||
|
||||
- A UTF-8 encoded JSON data structure with statistics as returned by Backend.compact()
|
||||
129
Unmaintained/backup/server.py
Normal file
129
Unmaintained/backup/server.py
Normal file
@@ -0,0 +1,129 @@
|
||||
import logging
|
||||
import socket
|
||||
import struct
|
||||
import json
|
||||
import sys
|
||||
from typing import Tuple
|
||||
|
||||
from backend import Backend
|
||||
from protocol import PacketType, PKT_CHANGE_TYPES, change_from_packet, packet_from_change, send_packet, recv_packet
|
||||
|
||||
|
||||
class SystemdHandler(logging.Handler):
|
||||
PREFIX = {
|
||||
# EMERG <0>
|
||||
# ALERT <1>
|
||||
logging.CRITICAL: "<2>",
|
||||
logging.ERROR: "<3>",
|
||||
logging.WARNING: "<4>",
|
||||
# NOTICE <5>
|
||||
logging.INFO: "<6>",
|
||||
logging.DEBUG: "<7>",
|
||||
logging.NOTSET: "<7>"
|
||||
}
|
||||
|
||||
def __init__(self, stream=sys.stdout):
|
||||
self.stream = stream
|
||||
logging.Handler.__init__(self)
|
||||
|
||||
def emit(self, record):
|
||||
try:
|
||||
msg = self.PREFIX[record.levelno] + self.format(record) + "\n"
|
||||
self.stream.write(msg)
|
||||
self.stream.flush()
|
||||
except Exception:
|
||||
self.handleError(record)
|
||||
|
||||
|
||||
def setup_server_logging(mode, level):
|
||||
root_logger = logging.getLogger()
|
||||
root_logger.setLevel(level.upper())
|
||||
mode = mode.lower()
|
||||
if mode == 'systemd':
|
||||
# replace handler with systemd one
|
||||
root_logger.handlers = []
|
||||
root_logger.addHandler(SystemdHandler())
|
||||
else:
|
||||
assert mode == 'plain'
|
||||
|
||||
|
||||
class SocketServer:
|
||||
def __init__(self, addr: Tuple[str, int], backend: Backend) -> None:
|
||||
self.backend = backend
|
||||
self.addr = addr
|
||||
self.bind = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
self.bind.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
||||
self.bind.bind(addr)
|
||||
|
||||
def _send_packet(self, typ: int, payload: bytes) -> None:
|
||||
send_packet(self.sock, typ, payload)
|
||||
|
||||
def _recv_packet(self) -> Tuple[int, bytes]:
|
||||
return recv_packet(self.sock)
|
||||
|
||||
def _handle_conn(self, conn) -> None:
|
||||
# Can only handle one connection at a time
|
||||
logging.info('Servicing incoming connection')
|
||||
self.sock = conn
|
||||
while True:
|
||||
try:
|
||||
(typ, payload) = self._recv_packet()
|
||||
except IOError:
|
||||
logging.info('Connection closed')
|
||||
break
|
||||
if typ in PKT_CHANGE_TYPES:
|
||||
change = change_from_packet(typ, payload)
|
||||
if typ == PacketType.CHANGE:
|
||||
logging.debug('Received CHANGE {}'.format(change.version))
|
||||
else:
|
||||
logging.info('Received SNAPSHOT {}'.format(change.version))
|
||||
self.backend.add_change(change)
|
||||
self._send_packet(PacketType.ACK, struct.pack("!I", self.backend.version))
|
||||
elif typ == PacketType.REWIND:
|
||||
logging.info('Received REWIND')
|
||||
to_version, = struct.unpack('!I', payload)
|
||||
if to_version != self.backend.prev_version:
|
||||
logging.info('Cannot rewind to version {}'.format(to_version))
|
||||
self._send_packet(PacketType.NACK, struct.pack("!I", self.backend.version))
|
||||
else:
|
||||
self.backend.rewind()
|
||||
self._send_packet(PacketType.ACK, struct.pack("!I", self.backend.version))
|
||||
elif typ == PacketType.REQ_METADATA:
|
||||
logging.debug('Received REQ_METADATA')
|
||||
blob = struct.pack("!IIIQ", 0x01, self.backend.version,
|
||||
self.backend.prev_version,
|
||||
self.backend.version_count)
|
||||
self._send_packet(PacketType.METADATA, blob)
|
||||
elif typ == PacketType.RESTORE:
|
||||
logging.info('Received RESTORE')
|
||||
for change in self.backend.stream_changes():
|
||||
(typ, payload) = packet_from_change(change)
|
||||
self._send_packet(typ, payload)
|
||||
self._send_packet(PacketType.DONE, b'')
|
||||
elif typ == PacketType.COMPACT:
|
||||
logging.info('Received COMPACT')
|
||||
stats = self.backend.compact()
|
||||
self._send_packet(PacketType.COMPACT_RES, json.dumps(stats).encode())
|
||||
elif typ == PacketType.ACK:
|
||||
logging.debug('Received ACK')
|
||||
elif typ == PacketType.NACK:
|
||||
logging.debug('Received NACK')
|
||||
elif typ == PacketType.METADATA:
|
||||
logging.debug('Received METADATA')
|
||||
elif typ == PacketType.COMPACT_RES:
|
||||
logging.debug('Received COMPACT_RES')
|
||||
else:
|
||||
raise Exception('Unknown or unexpected packet type {}'.format(typ))
|
||||
self.conn = None
|
||||
|
||||
def run(self) -> None:
|
||||
self.bind.listen(1)
|
||||
logging.info('Waiting for connection on {}'.format(self.addr))
|
||||
while True:
|
||||
conn, _ = self.bind.accept()
|
||||
try:
|
||||
self._handle_conn(conn)
|
||||
except Exception:
|
||||
logging.exception('Got exception')
|
||||
finally:
|
||||
conn.close()
|
||||
231
Unmaintained/backup/socketbackend.py
Normal file
231
Unmaintained/backup/socketbackend.py
Normal file
@@ -0,0 +1,231 @@
|
||||
from collections import namedtuple
|
||||
import json
|
||||
import logging
|
||||
import socket
|
||||
import re
|
||||
import struct
|
||||
import time
|
||||
from typing import Tuple, Iterator
|
||||
from urllib.parse import urlparse, parse_qs
|
||||
|
||||
from backend import Backend, Change
|
||||
from protocol import PacketType, PKT_CHANGE_TYPES, change_from_packet, packet_from_change, send_packet, recv_packet
|
||||
|
||||
# Total number of reconnection tries
|
||||
RECONNECT_TRIES = 5
|
||||
|
||||
# Delay in seconds between reconnections (initial)
|
||||
RECONNECT_DELAY = 5
|
||||
|
||||
# Scale delay factor after each failure
|
||||
RECONNECT_DELAY_BACKOFF = 1.5
|
||||
|
||||
HostPortInfo = namedtuple('HostPortInfo', ['host', 'port', 'addrtype'])
|
||||
SocketURLInfo = namedtuple('SocketURLInfo', ['target', 'proxytype', 'proxytarget'])
|
||||
|
||||
# Network address type.
|
||||
|
||||
|
||||
class AddrType:
|
||||
IPv4 = 0
|
||||
IPv6 = 1
|
||||
NAME = 2
|
||||
|
||||
# Proxy type. Only SOCKS5 supported at the moment as this is sufficient for Tor.
|
||||
|
||||
|
||||
class ProxyType:
|
||||
DIRECT = 0
|
||||
SOCKS5 = 1
|
||||
|
||||
|
||||
def parse_host_port(path: str) -> HostPortInfo:
|
||||
'''Parse a host:port pair.'''
|
||||
if path.startswith('['): # bracketed IPv6 address
|
||||
eidx = path.find(']')
|
||||
if eidx == -1:
|
||||
raise ValueError('Unterminated bracketed host address.')
|
||||
host = path[1:eidx]
|
||||
addrtype = AddrType.IPv6
|
||||
eidx += 1
|
||||
if eidx >= len(path) or path[eidx] != ':':
|
||||
raise ValueError('Port number missing.')
|
||||
eidx += 1
|
||||
else:
|
||||
eidx = path.find(':')
|
||||
if eidx == -1:
|
||||
raise ValueError('Port number missing.')
|
||||
host = path[0:eidx]
|
||||
if re.match(r'\d+\.\d+\.\d+\.\d+$', host): # matches IPv4 address format
|
||||
addrtype = AddrType.IPv4
|
||||
else:
|
||||
addrtype = AddrType.NAME
|
||||
eidx += 1
|
||||
|
||||
try:
|
||||
port = int(path[eidx:])
|
||||
except ValueError:
|
||||
raise ValueError('Invalid port number')
|
||||
|
||||
return HostPortInfo(host=host, port=port, addrtype=addrtype)
|
||||
|
||||
|
||||
def parse_socket_url(destination: str) -> SocketURLInfo:
|
||||
'''Parse a socket: URL to extract the information contained in it.'''
|
||||
url = urlparse(destination)
|
||||
if url.scheme != 'socket':
|
||||
raise ValueError('Scheme for socket backend must be socket:...')
|
||||
|
||||
target = parse_host_port(url.path)
|
||||
|
||||
proxytype = ProxyType.DIRECT
|
||||
proxytarget = None
|
||||
# parse query parameters
|
||||
# reject unknown parameters (currently all of them)
|
||||
qs = parse_qs(url.query)
|
||||
for (key, values) in qs.items():
|
||||
if key == 'proxy': # proxy=socks5:127.0.0.1:9050
|
||||
if len(values) != 1:
|
||||
raise ValueError('Proxy can only have one value')
|
||||
|
||||
(ptype, ptarget) = values[0].split(':', 1)
|
||||
if ptype != 'socks5':
|
||||
raise ValueError('Unknown proxy type ' + ptype)
|
||||
|
||||
proxytype = ProxyType.SOCKS5
|
||||
proxytarget = parse_host_port(ptarget)
|
||||
else:
|
||||
raise ValueError('Unknown query string parameter ' + key)
|
||||
|
||||
return SocketURLInfo(target=target, proxytype=proxytype, proxytarget=proxytarget)
|
||||
|
||||
|
||||
class SocketBackend(Backend):
|
||||
def __init__(self, destination: str, create: bool):
|
||||
self.version = None
|
||||
self.prev_version = None
|
||||
self.destination = destination
|
||||
self.url = parse_socket_url(destination)
|
||||
self.connect()
|
||||
|
||||
def connect(self):
|
||||
if self.url.proxytype == ProxyType.DIRECT:
|
||||
if self.url.target.addrtype == AddrType.IPv6:
|
||||
self.sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
|
||||
else: # TODO NAME is assumed to be IPv4 for now
|
||||
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
else:
|
||||
assert self.url.proxytype == ProxyType.SOCKS5
|
||||
import socks
|
||||
self.sock = socks.socksocket()
|
||||
self.sock.set_proxy(socks.SOCKS5, self.url.proxytarget.host, self.url.proxytarget.port)
|
||||
|
||||
logging.info('Connecting to {}:{} (addrtype {}, proxytype {}, proxytarget {})...'.format(
|
||||
self.url.target.host, self.url.target.port, self.url.target.addrtype,
|
||||
self.url.proxytype, self.url.proxytarget))
|
||||
self.sock.connect((self.url.target.host, self.url.target.port))
|
||||
logging.info('Connected to {}'.format(self.destination))
|
||||
|
||||
def _send_packet(self, typ: int, payload: bytes) -> None:
|
||||
send_packet(self.sock, typ, payload)
|
||||
|
||||
def _recv_packet(self) -> Tuple[int, bytes]:
|
||||
return recv_packet(self.sock)
|
||||
|
||||
def initialize(self) -> bool:
|
||||
'''
|
||||
Initialize socket backend by request current metadata from server.
|
||||
'''
|
||||
logging.info('Initializing backend')
|
||||
self._request_metadata()
|
||||
logging.info('Initialized SocketBackend: protocol={}, version={}, prev_version={}, version_count={}'.format(
|
||||
self.protocol, self.version, self.prev_version, self.version_count
|
||||
))
|
||||
return True
|
||||
|
||||
def _request_metadata(self) -> None:
|
||||
self._send_packet(PacketType.REQ_METADATA, b'')
|
||||
(typ, payload) = self._recv_packet()
|
||||
assert typ == PacketType.METADATA
|
||||
self.protocol, self.version, self.prev_version, self.version_count = struct.unpack("!IIIQ", payload)
|
||||
|
||||
def add_change(self, entry: Change) -> bool:
|
||||
typ, payload = packet_from_change(entry)
|
||||
|
||||
base_version = self.version
|
||||
retry = 0
|
||||
retry_delay = RECONNECT_DELAY
|
||||
need_connect = False
|
||||
while True: # Retry loop
|
||||
try:
|
||||
if need_connect:
|
||||
self.connect()
|
||||
# Request metadata, to know where we stand
|
||||
self._request_metadata()
|
||||
if self.version == entry.version:
|
||||
# If the current version at the server side matches the version of the
|
||||
# entry, the packet was succesfully sent and processed and the error
|
||||
# happened afterward. Nothing left to do.
|
||||
return True
|
||||
elif base_version == self.version:
|
||||
# The other acceptable option is that the current version still matches
|
||||
# that on the server side. Then we retry.
|
||||
pass
|
||||
else:
|
||||
raise Exception('Unexpected backup version {} after reconnect'.format(self.version))
|
||||
|
||||
self._send_packet(typ, payload)
|
||||
# Wait for change to be acknowledged before continuing.
|
||||
(typ, _) = self._recv_packet()
|
||||
assert typ == PacketType.ACK
|
||||
except (BrokenPipeError, OSError):
|
||||
pass
|
||||
else:
|
||||
break
|
||||
|
||||
if retry == RECONNECT_TRIES:
|
||||
logging.error('Connection was lost while sending change (giving up after {} retries)'.format(retry))
|
||||
raise IOError('Connection was lost while sending change')
|
||||
|
||||
retry += 1
|
||||
logging.warning('Connection was lost while sending change (retry {} of {}, will try again after {} seconds)'.format(retry, RECONNECT_TRIES, retry_delay))
|
||||
time.sleep(retry_delay)
|
||||
retry_delay *= RECONNECT_DELAY_BACKOFF
|
||||
need_connect = True
|
||||
|
||||
self.prev_version = self.version
|
||||
self.version = entry.version
|
||||
return True
|
||||
|
||||
def rewind(self) -> bool:
|
||||
'''Rewind to previous version.'''
|
||||
version = struct.pack("!I", self.prev_version)
|
||||
self._send_packet(PacketType.REWIND, version)
|
||||
# Wait for change to be acknowledged before continuing.
|
||||
(typ, _) = self._recv_packet()
|
||||
assert typ == PacketType.ACK
|
||||
return True
|
||||
|
||||
def stream_changes(self) -> Iterator[Change]:
|
||||
self._send_packet(PacketType.RESTORE, b'')
|
||||
version = -1
|
||||
while True:
|
||||
(typ, payload) = self._recv_packet()
|
||||
if typ in PKT_CHANGE_TYPES:
|
||||
change = change_from_packet(typ, payload)
|
||||
version = change.version
|
||||
yield change
|
||||
elif typ == PacketType.DONE:
|
||||
break
|
||||
else:
|
||||
raise ValueError("Unknown entry type {}".format(typ))
|
||||
|
||||
if version != self.version:
|
||||
raise ValueError("Versions do not match up: restored version {}, backend version {}".format(version, self.version))
|
||||
assert version == self.version
|
||||
|
||||
def compact(self):
|
||||
self._send_packet(PacketType.COMPACT, b'')
|
||||
(typ, payload) = self._recv_packet()
|
||||
assert typ == PacketType.COMPACT_RES
|
||||
return json.loads(payload.decode())
|
||||
335
Unmaintained/backup/test_backup.py
Normal file
335
Unmaintained/backup/test_backup.py
Normal file
@@ -0,0 +1,335 @@
|
||||
from backend import Backend
|
||||
import socketbackend
|
||||
from flaky import flaky
|
||||
from pyln.testing.fixtures import * # noqa: F401,F403
|
||||
from pyln.testing.utils import sync_blockheight
|
||||
import os
|
||||
import pytest
|
||||
import subprocess
|
||||
import tempfile
|
||||
|
||||
|
||||
plugin_dir = os.path.dirname(__file__)
|
||||
plugin_path = os.path.join(plugin_dir, "backup.py")
|
||||
cli_path = os.path.join(os.path.dirname(__file__), "backup-cli")
|
||||
|
||||
# For the transition period we require deprecated_apis to be true
|
||||
deprecated_apis = True
|
||||
|
||||
|
||||
def test_start(node_factory, directory):
|
||||
bpath = os.path.join(directory, 'lightning-1', 'regtest')
|
||||
bdest = 'file://' + os.path.join(bpath, 'backup.dbak')
|
||||
os.makedirs(bpath)
|
||||
subprocess.check_call([cli_path, "init", "--lightning-dir", bpath, bdest])
|
||||
opts = {
|
||||
'plugin': plugin_path,
|
||||
'allow-deprecated-apis': deprecated_apis,
|
||||
}
|
||||
l1 = node_factory.get_node(options=opts, cleandir=False)
|
||||
plugins = [os.path.basename(p['name']) for p in l1.rpc.plugin("list")['plugins']]
|
||||
assert "backup.py" in plugins
|
||||
|
||||
# Restart the node a couple of times, to check that we can resume normally
|
||||
for i in range(5):
|
||||
l1.restart()
|
||||
plugins = [os.path.basename(p['name']) for p in l1.rpc.plugin("list")['plugins']]
|
||||
assert "backup.py" in plugins
|
||||
|
||||
|
||||
def test_start_no_init(node_factory, directory):
|
||||
"""The plugin should refuse to start if we haven't initialized the backup
|
||||
"""
|
||||
bpath = os.path.join(directory, 'lightning-1', 'regtest')
|
||||
os.makedirs(bpath)
|
||||
opts = {
|
||||
'plugin': plugin_path,
|
||||
}
|
||||
l1 = node_factory.get_node(
|
||||
options=opts, cleandir=False, may_fail=True, start=False
|
||||
)
|
||||
|
||||
with pytest.raises(TimeoutError):
|
||||
# The way we detect a failure to start is when start() is running
|
||||
# into timeout looking for 'Server started with public key'.
|
||||
l1.start()
|
||||
assert l1.daemon.is_in_log(r'Could not find backup.lock in the lightning-dir')
|
||||
|
||||
|
||||
def test_init_not_empty(node_factory, directory):
|
||||
"""We want to add backups to an existing lightning node.
|
||||
|
||||
backup-cli init should start the backup with an initial snapshot.
|
||||
"""
|
||||
bpath = os.path.join(directory, 'lightning-1', 'regtest')
|
||||
bdest = 'file://' + os.path.join(bpath, 'backup.dbak')
|
||||
l1 = node_factory.get_node()
|
||||
l1.stop()
|
||||
|
||||
out = subprocess.check_output([cli_path, "init", "--lightning-dir", bpath, bdest])
|
||||
assert b'Found an existing database' in out
|
||||
assert b'Successfully written initial snapshot' in out
|
||||
|
||||
# Now restart and add the plugin
|
||||
l1.daemon.opts['plugin'] = plugin_path
|
||||
l1.daemon.opts['allow-deprecated-apis'] = deprecated_apis
|
||||
l1.start()
|
||||
assert l1.daemon.is_in_log(r'plugin-backup.py: Versions match up')
|
||||
|
||||
|
||||
@flaky
|
||||
def test_tx_abort(node_factory, directory):
|
||||
"""Simulate a crash between hook call and DB commit.
|
||||
|
||||
We simulate this by updating the data_version var in the database before
|
||||
restarting the node. This desyncs the node from the backup, and restoring
|
||||
may not work (depending on which transaction was pretend-rolled-back), but
|
||||
continuing should work fine, since it can happen that we crash just
|
||||
inbetween the hook call and the DB transaction.
|
||||
|
||||
"""
|
||||
bpath = os.path.join(directory, 'lightning-1', 'regtest')
|
||||
bdest = 'file://' + os.path.join(bpath, 'backup.dbak')
|
||||
os.makedirs(bpath)
|
||||
subprocess.check_call([cli_path, "init", "--lightning-dir", bpath, bdest])
|
||||
opts = {
|
||||
'plugin': plugin_path,
|
||||
'allow-deprecated-apis': deprecated_apis,
|
||||
}
|
||||
l1 = node_factory.get_node(options=opts, cleandir=False)
|
||||
l1.stop()
|
||||
|
||||
print(l1.db.query("SELECT * FROM vars;"))
|
||||
|
||||
# Now fudge the data_version:
|
||||
l1.db.execute("UPDATE vars SET intval = intval - 1 WHERE name = 'data_version'")
|
||||
|
||||
print(l1.db.query("SELECT * FROM vars;"))
|
||||
|
||||
l1.restart()
|
||||
assert l1.daemon.is_in_log(r'Last changes not applied')
|
||||
|
||||
|
||||
@flaky
|
||||
def test_failing_restore(node_factory, directory):
|
||||
"""The node database is having memory loss, make sure we abort.
|
||||
|
||||
We simulate a loss of transactions by manually resetting the data_version
|
||||
in the database back to n-2, which is non-recoverable.
|
||||
|
||||
"""
|
||||
bpath = os.path.join(directory, 'lightning-1', 'regtest')
|
||||
bdest = 'file://' + os.path.join(bpath, 'backup.dbak')
|
||||
os.makedirs(bpath)
|
||||
subprocess.check_call([cli_path, "init", "--lightning-dir", bpath, bdest])
|
||||
opts = {
|
||||
'plugin': plugin_path,
|
||||
'allow-deprecated-apis': deprecated_apis,
|
||||
}
|
||||
|
||||
def section(comment):
|
||||
print("=" * 25, comment, "=" * 25)
|
||||
|
||||
section("Starting node for the first time")
|
||||
l1 = node_factory.get_node(options=opts, cleandir=False, may_fail=True)
|
||||
l1.stop()
|
||||
|
||||
# Now fudge the data_version:
|
||||
section("Simulating a restore of an old version")
|
||||
l1.db.execute("UPDATE vars SET intval = intval - 2 WHERE name = 'data_version'")
|
||||
|
||||
section("Restarting node, should fail")
|
||||
with pytest.raises(Exception):
|
||||
l1.start()
|
||||
|
||||
l1.daemon.proc.wait()
|
||||
section("Verifying the node died with an error")
|
||||
assert l1.daemon.is_in_log(r'lost some state') is not None
|
||||
|
||||
|
||||
def test_intermittent_backup(node_factory, directory):
|
||||
"""Simulate intermittent use of the backup, or an old file backup.
|
||||
|
||||
"""
|
||||
bpath = os.path.join(directory, 'lightning-1', 'regtest')
|
||||
bdest = 'file://' + os.path.join(bpath, 'backup.dbak')
|
||||
os.makedirs(bpath)
|
||||
subprocess.check_call([cli_path, "init", "--lightning-dir", bpath, bdest])
|
||||
opts = {
|
||||
'plugin': plugin_path,
|
||||
'allow-deprecated-apis': deprecated_apis,
|
||||
}
|
||||
l1 = node_factory.get_node(options=opts, cleandir=False, may_fail=True)
|
||||
|
||||
# Now start without the plugin. This should work fine.
|
||||
del l1.daemon.opts['plugin']
|
||||
l1.restart()
|
||||
|
||||
# Now restart adding the plugin again, and it should fail due to gaps in
|
||||
# the backup.
|
||||
l1.stop()
|
||||
with pytest.raises(Exception):
|
||||
l1.daemon.opts.update(opts)
|
||||
l1.start()
|
||||
|
||||
l1.daemon.proc.wait()
|
||||
assert l1.daemon.is_in_log(r'Backup is out of date') is not None
|
||||
|
||||
|
||||
def test_restore(node_factory, directory):
|
||||
bpath = os.path.join(directory, 'lightning-1', 'regtest')
|
||||
bdest = 'file://' + os.path.join(bpath, 'backup.dbak')
|
||||
os.makedirs(bpath)
|
||||
subprocess.check_call([cli_path, "init", "--lightning-dir", bpath, bdest])
|
||||
opts = {
|
||||
'plugin': plugin_path,
|
||||
'allow-deprecated-apis': deprecated_apis,
|
||||
}
|
||||
l1 = node_factory.get_node(options=opts, cleandir=False)
|
||||
l1.stop()
|
||||
|
||||
rdest = os.path.join(bpath, 'lightningd.sqlite.restore')
|
||||
subprocess.check_call([cli_path, "restore", bdest, rdest])
|
||||
|
||||
|
||||
def test_restore_dir(node_factory, directory):
|
||||
bpath = os.path.join(directory, 'lightning-1', 'regtest')
|
||||
bdest = 'file://' + os.path.join(bpath, 'backup.dbak')
|
||||
os.makedirs(bpath)
|
||||
subprocess.check_call([cli_path, "init", "--lightning-dir", bpath, bdest])
|
||||
opts = {
|
||||
'plugin': plugin_path,
|
||||
'allow-deprecated-apis': deprecated_apis,
|
||||
}
|
||||
l1 = node_factory.get_node(options=opts, cleandir=False)
|
||||
l1.stop()
|
||||
|
||||
# should raise error without remove_existing
|
||||
with pytest.raises(Exception):
|
||||
subprocess.check_call([cli_path, "restore", bdest, bpath])
|
||||
|
||||
# but succeed when we remove the sqlite3 dbfile before
|
||||
os.remove(os.path.join(bpath, "lightningd.sqlite3"))
|
||||
subprocess.check_call([cli_path, "restore", bdest, bpath])
|
||||
|
||||
|
||||
def test_warning(directory, node_factory):
|
||||
bpath = os.path.join(directory, 'lightning-1', 'regtest')
|
||||
bdest = 'file://' + os.path.join(bpath, 'backup.dbak')
|
||||
os.makedirs(bpath)
|
||||
subprocess.check_call([cli_path, "init", "--lightning-dir", bpath, bdest])
|
||||
opts = {
|
||||
'plugin': plugin_path,
|
||||
'allow-deprecated-apis': deprecated_apis,
|
||||
'backup-destination': 'somewhere/over/the/rainbox',
|
||||
}
|
||||
l1 = node_factory.get_node(options=opts, cleandir=False)
|
||||
l1.stop()
|
||||
|
||||
assert l1.daemon.is_in_log(r'The `--backup-destination` option is deprecated and will be removed in future versions of the backup plugin.')
|
||||
|
||||
|
||||
class DummyBackend(Backend):
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
|
||||
def test_rewrite():
|
||||
tests = [
|
||||
(
|
||||
r'UPDATE outputs SET status=123, reserved_til=1891733WHERE prev_out_tx=1 AND prev_out_index=2',
|
||||
r'UPDATE outputs SET status=123, reserved_til=1891733 WHERE prev_out_tx=1 AND prev_out_index=2',
|
||||
),
|
||||
]
|
||||
|
||||
b = DummyBackend()
|
||||
|
||||
for i, o in tests:
|
||||
assert b._rewrite_stmt(i) == o
|
||||
|
||||
|
||||
def test_restore_pre_4090(directory):
|
||||
"""The prev-4090-backup.dbak contains faulty expansions, fix em.
|
||||
"""
|
||||
bdest = 'file://' + os.path.join(os.path.dirname(__file__), 'tests', 'pre-4090-backup.dbak')
|
||||
rdest = os.path.join(directory, 'lightningd.sqlite.restore')
|
||||
subprocess.check_call([cli_path, "restore", bdest, rdest])
|
||||
|
||||
|
||||
def test_compact(bitcoind, directory, node_factory):
|
||||
bpath = os.path.join(directory, 'lightning-1', 'regtest')
|
||||
bdest = 'file://' + os.path.join(bpath, 'backup.dbak')
|
||||
os.makedirs(bpath)
|
||||
subprocess.check_call([cli_path, "init", "--lightning-dir", bpath, bdest])
|
||||
opts = {
|
||||
'plugin': plugin_path,
|
||||
'allow-deprecated-apis': deprecated_apis,
|
||||
}
|
||||
l1 = node_factory.get_node(options=opts, cleandir=False)
|
||||
l1.rpc.backup_compact()
|
||||
|
||||
tmp = tempfile.TemporaryDirectory()
|
||||
subprocess.check_call([cli_path, "restore", bdest, tmp.name])
|
||||
|
||||
# Trigger a couple more changes and the compact again.
|
||||
bitcoind.generate_block(100)
|
||||
sync_blockheight(bitcoind, [l1])
|
||||
|
||||
l1.rpc.backup_compact()
|
||||
tmp = tempfile.TemporaryDirectory()
|
||||
subprocess.check_call([cli_path, "restore", bdest, tmp.name])
|
||||
|
||||
|
||||
def test_parse_socket_url():
|
||||
with pytest.raises(ValueError):
|
||||
# fail: invalid url scheme
|
||||
socketbackend.parse_socket_url('none')
|
||||
# fail: no port number
|
||||
socketbackend.parse_socket_url('socket:127.0.0.1')
|
||||
socketbackend.parse_socket_url('socket:127.0.0.1:')
|
||||
# fail: unbracketed IPv6
|
||||
socketbackend.parse_socket_url('socket:::1:1234')
|
||||
# fail: no port number IPv6
|
||||
socketbackend.parse_socket_url('socket:[::1]')
|
||||
socketbackend.parse_socket_url('socket:[::1]:')
|
||||
# fail: invalid port number
|
||||
socketbackend.parse_socket_url('socket:127.0.0.1:12bla')
|
||||
# fail: unrecognized query string key
|
||||
socketbackend.parse_socket_url('socket:127.0.0.1:1234?dummy=value')
|
||||
# fail: incomplete proxy spec
|
||||
socketbackend.parse_socket_url('socket:127.0.0.1:1234?proxy=socks5')
|
||||
socketbackend.parse_socket_url('socket:127.0.0.1:1234?proxy=socks5:')
|
||||
socketbackend.parse_socket_url('socket:127.0.0.1:1234?proxy=socks5:127.0.0.1:')
|
||||
# fail: unknown proxy scheme
|
||||
socketbackend.parse_socket_url('socket:127.0.0.1:1234?proxy=socks6:127.0.0.1:9050')
|
||||
|
||||
# IPv4
|
||||
s = socketbackend.parse_socket_url('socket:127.0.0.1:1234')
|
||||
assert s.target.host == '127.0.0.1'
|
||||
assert s.target.port == 1234
|
||||
assert s.target.addrtype == socketbackend.AddrType.IPv4
|
||||
assert s.proxytype == socketbackend.ProxyType.DIRECT
|
||||
|
||||
# IPv6
|
||||
s = socketbackend.parse_socket_url('socket:[::1]:1235')
|
||||
assert s.target.host == '::1'
|
||||
assert s.target.port == 1235
|
||||
assert s.target.addrtype == socketbackend.AddrType.IPv6
|
||||
assert s.proxytype == socketbackend.ProxyType.DIRECT
|
||||
|
||||
# Hostname
|
||||
s = socketbackend.parse_socket_url('socket:backup.local:1236')
|
||||
assert s.target.host == 'backup.local'
|
||||
assert s.target.port == 1236
|
||||
assert s.target.addrtype == socketbackend.AddrType.NAME
|
||||
assert s.proxytype == socketbackend.ProxyType.DIRECT
|
||||
|
||||
# Tor
|
||||
s = socketbackend.parse_socket_url('socket:backupserver.onion:1234?proxy=socks5:127.0.0.1:9050')
|
||||
assert s.target.host == 'backupserver.onion'
|
||||
assert s.target.port == 1234
|
||||
assert s.target.addrtype == socketbackend.AddrType.NAME
|
||||
assert s.proxytype == socketbackend.ProxyType.SOCKS5
|
||||
assert s.proxytarget.host == '127.0.0.1'
|
||||
assert s.proxytarget.port == 9050
|
||||
assert s.proxytarget.addrtype == socketbackend.AddrType.IPv4
|
||||
BIN
Unmaintained/backup/tests/pre-4090-backup.dbak
Normal file
BIN
Unmaintained/backup/tests/pre-4090-backup.dbak
Normal file
Binary file not shown.
174
Unmaintained/commando/README.md
Normal file
174
Unmaintained/commando/README.md
Normal file
@@ -0,0 +1,174 @@
|
||||
# Commando plugin
|
||||
|
||||
Commando has been **included in Core Lightning as first class C plugin**.
|
||||
|
||||
It has been actively developed since and has more cool new features added
|
||||
than listed below.
|
||||
|
||||
Checkout latest updates on commando at:
|
||||
https://docs.corelightning.org/docs/commando &
|
||||
https://docs.corelightning.org/reference/lightning-commando
|
||||
|
||||
------------------------------------------------------------------------------------------------------
|
||||
|
||||
# Archived Commando python plugin
|
||||
|
||||
This plugin allows other nodes to send your node commands, and allows you
|
||||
to send them to other nodes. The nodes must be authorized, and must be
|
||||
directly connected.
|
||||
|
||||
Motto: Reckless? Try going commando!
|
||||
|
||||
## Installation
|
||||
|
||||
This plugin requires the runes library; and to use runes requires
|
||||
datastore support. You can either use a lightningd version after
|
||||
0.10.1, or the [datastore plugin](https://github.com/lightningd/plugins/blob/datastore/README.md).
|
||||
|
||||
For general plugin installation instructions see the repos main
|
||||
[README.md](https://github.com/lightningd/plugins/blob/master/README.md#Installation)
|
||||
|
||||
## Options and Commands
|
||||
|
||||
There are two configuration options, which can be specified multiple
|
||||
times:
|
||||
|
||||
* --commando-reader: a node id which can execute (most) `list` and `get` / `summary` commands
|
||||
* --commando-writer: a node id which can execute any commands.
|
||||
|
||||
You can do this for static access lists, no runes necessary. You would
|
||||
normally put "commando-writer" (or "commando-reader") lines in your
|
||||
config file.
|
||||
|
||||
For quick testing, you can use this fairly awkward command to start the
|
||||
plugin dynamically, with a reader by node id:
|
||||
|
||||
lightning-cli plugin subcommand=start plugin=`pwd`/commando.py commando_reader=0266e4598d1d3c415f572a8488830b60f7e744ed9235eb0b1ba93283b315c03518
|
||||
|
||||
|
||||
### Using Commando to Control A Node
|
||||
|
||||
Once the node has authorized you can run the `commando` command to send it a
|
||||
command, like this example which sends a `stop` message to 022d...
|
||||
|
||||
lightning-cli commando 022d223620a359a47ff7f7ac447c85c46c923da53389221a0054c11c1e3ca31d59 stop
|
||||
|
||||
For more advanced authorization, you can create **runes** which permit
|
||||
restricted access, and send them along with commands.
|
||||
|
||||
|
||||
### Creating Runes
|
||||
|
||||
If you have datastore support (see the [datastore
|
||||
plugin](https://github.com/lightningd/plugins/blob/datastore/README.md),
|
||||
you can also create a "rune": anyone who has the rune can use it to
|
||||
execute the commands it allows.
|
||||
|
||||
- `commando-rune` by itself gives a "full access" rune.
|
||||
- `commando-rune restrictions=readonly` gives a rune which is restricted to get,
|
||||
list and summary commands.
|
||||
|
||||
For example, say we have peer
|
||||
0336efaa22b8ba77ae721a25d589e1c5f2486073dd2f041add32a23316150e8b62,
|
||||
and we want to allow peer
|
||||
0266e4598d1d3c415f572a8488830b60f7e744ed9235eb0b1ba93283b315c03518 to
|
||||
run read-only commands:
|
||||
|
||||
0336...$ lightning-cli commando-rune restrictions=readonly
|
||||
{
|
||||
"rune": "ZN7IkVe8S0fO7htvQ23mCMQ-QGzFTvn0OZPqucp881Vjb21tYW5kXmxpc3R8Y29tbWFuZF5nZXR8Y29tbWFuZD1zdW1tYXJ5JmNvbW1hbmQvZ2V0c2hhcmVkc2VjcmV0"
|
||||
}
|
||||
|
||||
We could hand that rune out to anyone, to access.
|
||||
|
||||
### Using Runes
|
||||
|
||||
You use a rune a peer gives you with the `rune` parameter to `commando`, eg:
|
||||
|
||||
02be...$ lightning-cli commando 0336efaa22b8ba77ae721a25d589e1c5f2486073dd2f041add32a23316150e8b62 listchannels {} j2fEW43Y8Ie7d0oGt9pPxaIcl6RP6MjRGC1mgxKuUDxpZD0wMmJlODVlNzA4MjFlNmNjZjIxNDlmMWE3YmY1ZTM0ZDc3OTAwMGY3MjgxNTQ1MDhjYzkwNzJlNGU5MDE4MmNkZDI=
|
||||
|
||||
Or, using keyword parameters:
|
||||
|
||||
02be...$ lightning-cli commando peer_id=0336efaa22b8ba77ae721a25d589e1c5f2486073dd2f041add32a23316150e8b62 method=listchannels rune=j2fEW43Y8Ie7d0oGt9pPxaIcl6RP6MjRGC1mgxKuUDxpZD0wMmJlODVlNzA4MjFlNmNjZjIxNDlmMWE3YmY1ZTM0ZDc3OTAwMGY3MjgxNTQ1MDhjYzkwNzJlNGU5MDE4MmNkZDI=
|
||||
|
||||
It's more common to set your peer to persistently cache the rune as the default for whenever you issue a command, using `commando-cacherune`:
|
||||
|
||||
02be...$ lightning-cli commando 0336efaa22b8ba77ae721a25d589e1c5f2486073dd2f041add32a23316150e8b62 commando-cacherune {} j2fEW43Y8Ie7d0oGt9pPxaIcl6RP6MjRGC1mgxKuUDxpZD0wMmJlODVlNzA4MjFlNmNjZjIxNDlmMWE3YmY1ZTM0ZDc3OTAwMGY3MjgxNTQ1MDhjYzkwNzJlNGU5MDE4MmNkZDI=
|
||||
02be...$ lightning-cli commando 0336efaa22b8ba77ae721a25d589e1c5f2486073dd2f041add32a23316150e8b62 listpeers
|
||||
|
||||
|
||||
### Restricting Runes
|
||||
|
||||
There's a [runes library](https://github.com/rustyrussell/runes/): which lets you add restrictions, but for
|
||||
convenience the `commando-rune` can also add them, like so:
|
||||
|
||||
- `commandorune RUNE RESTRICTION...`
|
||||
|
||||
Each RESTRICTION is a string: fieldname, followed by a condition, followed by a
|
||||
value. It can either be a single string, or an array of strings.
|
||||
|
||||
Valid fieldnames are:
|
||||
* **id**: what peer is allowed to use it.
|
||||
* **time**: time in seconds since 1970, as returned by `date +%s` or Python `int(time.time())`.
|
||||
* **version**: what version of c-lightning is running.
|
||||
* **command**: the command they are trying to run (e.g. "listpeers")
|
||||
* **parr0**..**parrN**: the parameters if specified using a JSON array
|
||||
* **pnameNAME**..: the parameters (by name) if specified using a JSON object ('-' and other punctuation are removed from NAME).
|
||||
|
||||
conditions are listed in the [runes documentation](https://github.com/rustyrussell/runes/blob/v0.3.1/README.md#rune-language):
|
||||
|
||||
* `!`: Pass if field is missing (value ignored)
|
||||
* `=`: Pass if exists and exactly equals
|
||||
* `^`: Pass if exists and begins with
|
||||
* `$`: Pass if exists and ends with
|
||||
* `~`: Pass if exists and contains
|
||||
* `<`: Pass if exists, is a valid decimal (may be signed), and numerically less than
|
||||
* `>`: Pass if exists, is a valid decimal (may be signed), and numerically greater than
|
||||
* `}`: Pass if exists and lexicograpically greater than (or longer)
|
||||
* `{`: Pass if exists and lexicograpically less than (or shorter)
|
||||
* `#`: Always pass: no condition, this is a comment.
|
||||
|
||||
Say we have peer
|
||||
0336efaa22b8ba77ae721a25d589e1c5f2486073dd2f041add32a23316150e8b62,
|
||||
and we want to allow peer
|
||||
0266e4598d1d3c415f572a8488830b60f7e744ed9235eb0b1ba93283b315c03518 to
|
||||
run listpeers on itself. This is actually three restrictions:
|
||||
|
||||
1. id=0266e4598d1d3c415f572a8488830b60f7e744ed9235eb0b1ba93283b315c03518,
|
||||
since it must be the one initiating the command.
|
||||
2. method=listpeers, since that's the only command it can run.
|
||||
3. pnameid=0266e4598d1d3c415f572a8488830b60f7e744ed9235eb0b1ba93283b315c03518 OR
|
||||
parr0=0266e4598d1d3c415f572a8488830b60f7e744ed9235eb0b1ba93283b315c03518;
|
||||
we let them specify parameters by name or array, so allow both.
|
||||
|
||||
We can add these restrictions one at a time, or specify them all at
|
||||
once. By default, we start with the master rune, which has no
|
||||
restrictions:
|
||||
|
||||
0336...$ lightning-cli commando-rune restrictions='["id=02be85e70821e6ccf2149f1a7bf5e34d779000f728154508cc9072e4e90182cdd2","method=listpeers","pnameid=0266e4598d1d3c415f572a8488830b60f7e744ed9235eb0b1ba93283b315c03518|parr0=0266e4598d1d3c415f572a8488830b60f7e744ed9235eb0b1ba93283b315c03518"]'
|
||||
{
|
||||
"rune": "-As0gqymadZpgTnm9fBoDVtrjPmpwPrKmCQWUcqlouJpZD0wMmJlODVlNzA4MjFlNmNjZjIxNDlmMWE3YmY1ZTM0ZDc3OTAwMGY3MjgxNTQ1MDhjYzkwNzJlNGU5MDE4MmNkZDImbWV0aG9kPWxpc3RwZWVycyZwbmFtZWlkPTAyNjZlNDU5OGQxZDNjNDE1ZjU3MmE4NDg4ODMwYjYwZjdlNzQ0ZWQ5MjM1ZWIwYjFiYTkzMjgzYjMxNWMwMzUxOHxwYXJyMD0wMjY2ZTQ1OThkMWQzYzQxNWY1NzJhODQ4ODgzMGI2MGY3ZTc0NGVkOTIzNWViMGIxYmE5MzI4M2IzMTVjMDM1MTg="
|
||||
}
|
||||
|
||||
We can publish this on Twitter and it doesn't matter, since it only
|
||||
works for that one peer.
|
||||
|
||||
|
||||
### Temporary Runes to Authorize Yourself
|
||||
|
||||
This creates a rune which can only be used to create another rune for
|
||||
a specific nodeid, for (as of this writing!) the next 60 seconds:
|
||||
|
||||
lightning-cli commando-rune restrictions='["method=commando-rune","pnamerestrictions^id=|parr1^id=","time<1627886935"]'
|
||||
|
||||
That rune only allows them to rune "commando-rune" with an "id="
|
||||
restriction, within the given time; useful to place in a QR code to
|
||||
allow self-authorization.
|
||||
|
||||
|
||||
### Huge Commands and Responses
|
||||
|
||||
Commands larger than about 64k are split into multiple parts; command
|
||||
responses similarly. To avoid a Denial of Service, commands must be
|
||||
less than about 1MB in size: that's increased to 10MB if the peer has
|
||||
successfully used `commando-cacherune`.
|
||||
426
Unmaintained/commando/commando.py
Executable file
426
Unmaintained/commando/commando.py
Executable file
@@ -0,0 +1,426 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Commando is a plugin to allow one node to control another. You use
|
||||
"commando" to send commands, with 'method', 'params' and optional
|
||||
'rune' which authorizes it.
|
||||
|
||||
Additionally, you can use "commando-rune" to create/add restrictions to
|
||||
existing runes (you can also use the runes.py library).
|
||||
|
||||
Rather than handing a rune every time, peers can do "commando-cacherune"
|
||||
to make it the persistent default for their peer_id.
|
||||
|
||||
The formats are:
|
||||
|
||||
type:4C4D - execute this command (with more coming)
|
||||
type:4C4F - execute this command
|
||||
type:594B - reply (with more coming)
|
||||
type:594D - last reply
|
||||
|
||||
Each one is an 8 byte id (to link replies to command), followed by JSON.
|
||||
|
||||
"""
|
||||
from pyln.client import Plugin, RpcError # type: ignore
|
||||
import json
|
||||
import textwrap
|
||||
import time
|
||||
import random
|
||||
import secrets
|
||||
import string
|
||||
import runes # type: ignore
|
||||
import multiprocessing
|
||||
from typing import Dict, Tuple, Optional
|
||||
|
||||
plugin = Plugin()
|
||||
|
||||
# "YOLO"!
|
||||
COMMANDO_CMD_CONTINUES = 0x4c4d
|
||||
COMMANDO_CMD_TERM = 0x4c4f
|
||||
|
||||
# Replies are split across multiple CONTINUES, then TERM.
|
||||
COMMANDO_REPLY_CONTINUES = 0x594b
|
||||
COMMANDO_REPLY_TERM = 0x594d
|
||||
|
||||
|
||||
class CommandResponse:
|
||||
def __init__(self, req):
|
||||
self.buf = bytes()
|
||||
self.req = req
|
||||
|
||||
|
||||
class InReq:
|
||||
def __init__(self, idnum):
|
||||
self.idnum = idnum
|
||||
self.buf = b''
|
||||
self.discard = False
|
||||
|
||||
def append(self, data):
|
||||
if not self.discard:
|
||||
self.buf += data
|
||||
|
||||
def start_discard(self):
|
||||
self.buf = b''
|
||||
self.discard = True
|
||||
|
||||
|
||||
def split_cmd(cmdstr):
|
||||
"""Interprets JSON and method and params"""
|
||||
cmd = json.loads(cmdstr)
|
||||
|
||||
return cmd['method'], cmd.get('params', {}), cmd.get('rune')
|
||||
|
||||
|
||||
def send_msg(plugin, peer_id, msgtype, idnum, contents):
|
||||
"""Messages are form [8-byte-id][data]"""
|
||||
msg = (msgtype.to_bytes(2, 'big')
|
||||
+ idnum.to_bytes(8, 'big')
|
||||
+ bytes(contents, encoding='utf8'))
|
||||
plugin.rpc.call(plugin.msgcmd, {'node_id': peer_id, 'msg': msg.hex()})
|
||||
|
||||
|
||||
def send_msgs(plugin, peer_id, idnum, obj, msgtype_cont, msgtype_term):
|
||||
# We can only send 64k in a message, but there is 10 byte overhead
|
||||
# in the message header; 65000 is safe.
|
||||
parts = textwrap.wrap(json.dumps(obj), 65000)
|
||||
for p in parts[:-1]:
|
||||
send_msg(plugin, peer_id, msgtype_cont, idnum, p)
|
||||
|
||||
send_msg(plugin, peer_id, msgtype_term, idnum, parts[-1])
|
||||
|
||||
|
||||
def send_result(plugin, peer_id, idnum, res):
|
||||
send_msgs(plugin, peer_id, idnum, res,
|
||||
COMMANDO_REPLY_CONTINUES, COMMANDO_REPLY_TERM)
|
||||
|
||||
|
||||
def send_request(plugin, peer_id, idnum, req):
|
||||
send_msgs(plugin, peer_id, idnum, req,
|
||||
COMMANDO_CMD_CONTINUES, COMMANDO_CMD_TERM)
|
||||
|
||||
|
||||
def is_rune_valid(plugin, runestr) -> Tuple[Optional[runes.Rune], str]:
|
||||
"""Is this runestring valid, and authorized for us?"""
|
||||
try:
|
||||
rune = runes.Rune.from_base64(runestr)
|
||||
except: # noqa: E722
|
||||
return None, 'Malformed base64 string'
|
||||
|
||||
if not plugin.masterrune.is_rune_authorized(rune):
|
||||
return None, 'Invalid rune string'
|
||||
|
||||
return rune, ''
|
||||
|
||||
|
||||
def check_rune(plugin, node_id, runestr, command, params) -> Tuple[bool, str]:
|
||||
"""If we have a runestr, check it's valid and conditions met"""
|
||||
# If they don't specify a rune, we use any previous for this peer
|
||||
if runestr is None:
|
||||
runestr = plugin.peer_runes.get(node_id)
|
||||
if runestr is None:
|
||||
# Finally, try reader-writer lists
|
||||
if node_id in plugin.writers:
|
||||
runestr = plugin.masterrune.to_base64()
|
||||
elif node_id in plugin.readers:
|
||||
runestr = add_reader_restrictions(plugin.masterrune.copy())
|
||||
|
||||
if runestr is None:
|
||||
return False, 'No rune'
|
||||
|
||||
commando_dict = {'time': int(time.time()),
|
||||
'id': node_id,
|
||||
'version': plugin.version,
|
||||
'method': command}
|
||||
|
||||
# FIXME: This doesn't work well with complex params (it makes them str())
|
||||
if isinstance(params, list):
|
||||
for i, p in enumerate(params):
|
||||
commando_dict['parr{}'.format(i)] = p
|
||||
else:
|
||||
for k, v in params.items():
|
||||
# Cannot have punctuation in fieldnames, so remove.
|
||||
for c in string.punctuation:
|
||||
k = k.replace(c, '')
|
||||
commando_dict['pname{}'.format(k)] = v
|
||||
|
||||
return plugin.masterrune.check_with_reason(runestr, commando_dict)
|
||||
|
||||
|
||||
def do_cacherune(plugin, peer_id, runestr):
|
||||
if not plugin.have_datastore:
|
||||
return {'error': 'No datastore available: try datastore.py?'}
|
||||
|
||||
if runestr is None:
|
||||
return {'error': 'No rune set?'}
|
||||
|
||||
rune, whynot = is_rune_valid(plugin, runestr)
|
||||
if not rune:
|
||||
return {'error': whynot}
|
||||
|
||||
plugin.peer_runes[peer_id] = runestr
|
||||
save_peer_rune(plugin, peer_id, runestr)
|
||||
return {'result': {'rune': runestr}}
|
||||
|
||||
|
||||
def command_run(plugin, peer_id, idnum, method, params):
|
||||
"""Function to run a command and write the result"""
|
||||
try:
|
||||
res = {'result': plugin.rpc.call(method, params)}
|
||||
except RpcError as e:
|
||||
res = {'error': e.error}
|
||||
send_result(plugin, peer_id, idnum, res)
|
||||
|
||||
|
||||
def try_command(plugin, peer_id, idnum, method, params, runestr):
|
||||
"""Run an arbitrary command and message back the result"""
|
||||
# You can always set your rune, even if *that rune* wouldn't
|
||||
# allow it!
|
||||
if method == 'commando-cacherune':
|
||||
res = do_cacherune(plugin, peer_id, runestr)
|
||||
else:
|
||||
ok, failstr = check_rune(plugin, peer_id, runestr, method, params)
|
||||
if not ok:
|
||||
res = {'error': 'Not authorized: ' + failstr}
|
||||
elif method in plugin.methods:
|
||||
# Don't try to call indirectly into ourselves; we deadlock!
|
||||
# But commando-rune is useful, so hardcode that.
|
||||
if method == "commando-rune":
|
||||
if isinstance(params, list):
|
||||
res = {'result': commando_rune(plugin, *params)}
|
||||
else:
|
||||
res = {'result': commando_rune(plugin, **params)}
|
||||
else:
|
||||
res = {'error': 'FIXME: Refusing to call inside ourselves'}
|
||||
else:
|
||||
# The subprocess does send_result itself: pyln-client doesn't
|
||||
# support async RPC yet.
|
||||
multiprocessing.Process(target=command_run,
|
||||
args=(plugin, peer_id, idnum, method, params)).start()
|
||||
return
|
||||
|
||||
send_result(plugin, peer_id, idnum, res)
|
||||
|
||||
|
||||
@plugin.async_hook('custommsg')
|
||||
def on_custommsg(peer_id, payload, plugin, request, **kwargs):
|
||||
pbytes = bytes.fromhex(payload)
|
||||
mtype = int.from_bytes(pbytes[:2], "big")
|
||||
idnum = int.from_bytes(pbytes[2:10], "big")
|
||||
data = pbytes[10:]
|
||||
|
||||
if mtype == COMMANDO_CMD_CONTINUES:
|
||||
if peer_id not in plugin.in_reqs or idnum != plugin.in_reqs[peer_id].idnum:
|
||||
plugin.in_reqs[peer_id] = InReq(idnum)
|
||||
plugin.in_reqs[peer_id].append(data)
|
||||
|
||||
# If you have cached a rune, give 10MB, otherwise 1MB.
|
||||
# We can have hundreds of these things...
|
||||
max_cmdlen = 1000000
|
||||
if peer_id in plugin.peer_runes:
|
||||
max_cmdlen *= 10
|
||||
|
||||
if len(plugin.in_reqs[peer_id].buf) > max_cmdlen:
|
||||
plugin.in_reqs[peer_id].start_discard()
|
||||
elif mtype == COMMANDO_CMD_TERM:
|
||||
# Prepend any prior data from COMMANDO_CMD_CONTINUES:
|
||||
if peer_id in plugin.in_reqs:
|
||||
data = plugin.in_reqs[peer_id].buf + data
|
||||
discard = plugin.in_reqs[peer_id].discard
|
||||
del plugin.in_reqs[peer_id]
|
||||
# Were we ignoring this for being too long? Error out now.
|
||||
if discard:
|
||||
send_result(plugin, peer_id, idnum,
|
||||
{'error': "Command too long"})
|
||||
request.set_result({'result': 'continue'})
|
||||
return
|
||||
|
||||
method, params, runestr = split_cmd(data)
|
||||
try_command(plugin, peer_id, idnum, method, params, runestr)
|
||||
elif mtype == COMMANDO_REPLY_CONTINUES:
|
||||
if idnum in plugin.out_reqs:
|
||||
plugin.out_reqs[idnum].buf += data
|
||||
elif mtype == COMMANDO_REPLY_TERM:
|
||||
if idnum in plugin.out_reqs:
|
||||
plugin.out_reqs[idnum].buf += data
|
||||
finished = plugin.out_reqs[idnum]
|
||||
del plugin.out_reqs[idnum]
|
||||
|
||||
try:
|
||||
ret = json.loads(finished.buf.decode())
|
||||
except Exception as e:
|
||||
# Bad response
|
||||
finished.req.set_exception(e)
|
||||
return {'result': 'continue'}
|
||||
|
||||
if 'error' in ret:
|
||||
# Pass through error
|
||||
finished.req.set_exception(RpcError('commando', {},
|
||||
ret['error']))
|
||||
else:
|
||||
# Pass through result
|
||||
finished.req.set_result(ret['result'])
|
||||
request.set_result({'result': 'continue'})
|
||||
|
||||
|
||||
@plugin.subscribe('disconnect')
|
||||
def on_disconnect(id, plugin, request, **kwargs):
|
||||
if id in plugin.in_reqs:
|
||||
del plugin.in_reqs[id]
|
||||
|
||||
|
||||
@plugin.async_method("commando")
|
||||
def commando(plugin, request, peer_id, method, params=None, rune=None):
|
||||
"""Send a command to node_id, and wait for a response"""
|
||||
res = {'method': method}
|
||||
if params:
|
||||
res['params'] = params
|
||||
if rune:
|
||||
res['rune'] = rune
|
||||
|
||||
while True:
|
||||
idnum = random.randint(0, 2**64)
|
||||
if idnum not in plugin.out_reqs:
|
||||
break
|
||||
|
||||
plugin.out_reqs[idnum] = CommandResponse(request)
|
||||
send_request(plugin, peer_id, idnum, res)
|
||||
|
||||
|
||||
@plugin.method("commando-cacherune")
|
||||
def commando_cacherune(plugin, rune):
|
||||
"""Sets the rune given to the persistent rune for this peer_id"""
|
||||
# This is intercepted by commando runner, above.
|
||||
raise RpcError('commando-cacherune', {},
|
||||
'Must be called as a remote commando call')
|
||||
|
||||
|
||||
def add_reader_restrictions(rune: runes.Rune) -> str:
|
||||
"""Let them execute list or get, but not getsharesecret!"""
|
||||
# Allow list*, get* or summary.
|
||||
rune.add_restriction(runes.Restriction.from_str('method^list'
|
||||
'|method^get'
|
||||
'|method=summary'))
|
||||
# But not getsharesecret!
|
||||
rune.add_restriction(runes.Restriction.from_str('method/getsharedsecret'))
|
||||
# And not listdatastore!
|
||||
rune.add_restriction(runes.Restriction.from_str('method/listdatastore'))
|
||||
return rune.to_base64()
|
||||
|
||||
|
||||
def save_peer_rune(plugin, peer_id, runestr) -> None:
|
||||
assert plugin.have_datastore
|
||||
plugin.rpc.datastore(key=['commando', 'peer_runes', peer_id],
|
||||
string=runestr,
|
||||
mode='create-or-replace')
|
||||
|
||||
|
||||
def load_peer_runes(plugin) -> Dict[str, str]:
|
||||
if not plugin.have_datastore:
|
||||
return {}
|
||||
|
||||
peer_runes = {}
|
||||
entries = plugin.rpc.listdatastore(key=['commando', 'peer_runes'])
|
||||
for entry in entries['datastore']:
|
||||
peer_runes[entry['key'][2]] = entry['string']
|
||||
return peer_runes
|
||||
|
||||
|
||||
@plugin.method("commando-rune")
|
||||
def commando_rune(plugin, rune=None, restrictions=[]):
|
||||
"""Create a rune, (or derive from {rune}) with the given
|
||||
{restrictions} array (or string), or 'readonly'"""
|
||||
if not plugin.have_datastore:
|
||||
raise RpcError('commando-rune', {},
|
||||
'No datastore available: try datastore.py?')
|
||||
if rune is None:
|
||||
this_rune = plugin.masterrune.copy()
|
||||
this_rune.add_restriction(runes.Restriction.unique_id(plugin.rune_counter))
|
||||
else:
|
||||
this_rune, whynot = is_rune_valid(plugin, rune)
|
||||
if this_rune is None:
|
||||
raise RpcError('commando-rune', {'rune': rune}, whynot)
|
||||
|
||||
if restrictions == 'readonly':
|
||||
add_reader_restrictions(this_rune)
|
||||
elif isinstance(restrictions, str):
|
||||
this_rune.add_restriction(runes.Restriction.from_str(restrictions))
|
||||
else:
|
||||
for r in restrictions:
|
||||
this_rune.add_restriction(runes.Restriction.from_str(r))
|
||||
|
||||
# Now we've succeeded, update rune_counter.
|
||||
if rune is None:
|
||||
plugin.rpc.datastore(key=['commando', 'rune_counter'],
|
||||
string=str(plugin.rune_counter + 1),
|
||||
mode='must-replace',
|
||||
generation=plugin.rune_counter_generation)
|
||||
plugin.rune_counter += 1
|
||||
plugin.rune_counter_generation += 1
|
||||
|
||||
return {'rune': this_rune.to_base64()}
|
||||
|
||||
|
||||
@plugin.init()
|
||||
def init(options, configuration, plugin):
|
||||
plugin.out_reqs = {}
|
||||
plugin.in_reqs = {}
|
||||
plugin.writers = options['commando-writer']
|
||||
plugin.readers = options['commando-reader']
|
||||
plugin.version = plugin.rpc.getinfo()['version']
|
||||
|
||||
# dev-sendcustommsg was renamed to sendcustommsg for 0.10.1
|
||||
try:
|
||||
plugin.rpc.help('sendcustommsg')
|
||||
plugin.msgcmd = 'sendcustommsg'
|
||||
except RpcError:
|
||||
plugin.msgcmd = 'dev-sendcustommsg'
|
||||
|
||||
# Unfortunately, on startup it can take a while for
|
||||
# the datastore to be loaded (as it's actually a second plugin,
|
||||
# loaded by the first.
|
||||
end = time.time() + 10
|
||||
secret = None
|
||||
while time.time() < end:
|
||||
try:
|
||||
secret = plugin.rpc.listdatastore(['commando', 'secret'])['datastore']
|
||||
except RpcError:
|
||||
time.sleep(1)
|
||||
else:
|
||||
break
|
||||
|
||||
if secret is None:
|
||||
# Use a throwaway secret
|
||||
secret = secrets.token_bytes()
|
||||
plugin.have_datastore = False
|
||||
plugin.peer_runes = {}
|
||||
plugin.log("Initialized without rune support"
|
||||
" (needs datastore.py plugin)",
|
||||
level="info")
|
||||
else:
|
||||
plugin.have_datastore = True
|
||||
if secret == []:
|
||||
plugin.log("Creating initial rune secret", level='unusual')
|
||||
secret = secrets.token_bytes()
|
||||
plugin.rpc.datastore(key=['commando', 'secret'], hex=secret.hex())
|
||||
plugin.rune_counter = 0
|
||||
plugin.rune_counter_generation = 0
|
||||
plugin.rpc.datastore(key=['commando', 'rune_counter'], string=str(0))
|
||||
else:
|
||||
secret = bytes.fromhex(secret[0]['hex'])
|
||||
counter = plugin.rpc.listdatastore(['commando', 'rune_counter'])['datastore'][0]
|
||||
plugin.rune_counter = int(counter['string'])
|
||||
plugin.rune_counter_generation = int(counter['generation'])
|
||||
plugin.log("Initialized with rune support: {} runes so far".format(plugin.rune_counter),
|
||||
level="info")
|
||||
|
||||
plugin.masterrune = runes.MasterRune(secret)
|
||||
plugin.peer_runes = load_peer_runes(plugin)
|
||||
|
||||
|
||||
plugin.add_option('commando-writer',
|
||||
description="What nodeid can do all commands?",
|
||||
default=[],
|
||||
multi=True)
|
||||
plugin.add_option('commando-reader',
|
||||
description="What nodeid can do list/get/summary commands?",
|
||||
default=[],
|
||||
multi=True)
|
||||
plugin.run()
|
||||
2
Unmaintained/commando/requirements.txt
Normal file
2
Unmaintained/commando/requirements.txt
Normal file
@@ -0,0 +1,2 @@
|
||||
runes>=0.4
|
||||
pyln-client>=0.10.1
|
||||
325
Unmaintained/commando/test_commando.py
Executable file
325
Unmaintained/commando/test_commando.py
Executable file
@@ -0,0 +1,325 @@
|
||||
import os
|
||||
from pyln.testing.fixtures import * # type: ignore
|
||||
from pyln.client import RpcError # type: ignore
|
||||
import pytest
|
||||
import json
|
||||
import runes # type: ignore
|
||||
import commando
|
||||
import time
|
||||
|
||||
plugin_path = os.path.join(os.path.dirname(__file__), "commando.py")
|
||||
datastore_path = os.path.join(os.path.dirname(os.path.dirname(__file__)),
|
||||
"datastore", "datastore.py")
|
||||
|
||||
|
||||
def test_add_reader_restrictions():
|
||||
mrune = runes.MasterRune(bytes(32))
|
||||
runestr = commando.add_reader_restrictions(mrune.copy())
|
||||
assert mrune.check_with_reason(runestr, {'method': 'listfoo'}) == (True, '')
|
||||
assert mrune.check_with_reason(runestr, {'method': 'getfoo'}) == (True, '')
|
||||
assert mrune.check_with_reason(runestr, {'method': 'getsharedsecret'}) == (False, 'method: = getsharedsecret')
|
||||
assert mrune.check_with_reason(runestr, {'method': 'summary'}) == (True, '')
|
||||
assert mrune.check_with_reason(runestr, {'method': 'fail'}) == (False, 'method: does not start with list AND method: does not start with get AND method: != summary')
|
||||
|
||||
|
||||
def test_commando(node_factory):
|
||||
l1, l2 = node_factory.line_graph(2, fundchannel=True)
|
||||
|
||||
l1.rpc.plugin_start(plugin_path, commando_reader=l2.info['id'])
|
||||
l2.rpc.plugin_start(plugin_path)
|
||||
|
||||
# This works
|
||||
res = l2.rpc.call(method='commando',
|
||||
payload={'peer_id': l1.info['id'],
|
||||
'method': 'listpeers'})
|
||||
assert len(res['peers']) == 1
|
||||
assert res['peers'][0]['id'] == l2.info['id']
|
||||
|
||||
res = l2.rpc.call(method='commando',
|
||||
payload={'peer_id': l1.info['id'],
|
||||
'method': 'listpeers',
|
||||
'params': {'id': l2.info['id']}})
|
||||
assert len(res['peers']) == 1
|
||||
assert res['peers'][0]['id'] == l2.info['id']
|
||||
|
||||
# This fails
|
||||
with pytest.raises(RpcError, match='method: does not start with list AND method: does not start with get AND method: != summary'):
|
||||
l2.rpc.call(method='commando',
|
||||
payload={'peer_id': l1.info['id'],
|
||||
'method': 'withdraw'})
|
||||
|
||||
# As a writer, anything goes.
|
||||
l1.rpc.plugin_stop(plugin_path)
|
||||
l1.rpc.plugin_start(plugin_path, commando_writer=l2.info['id'])
|
||||
|
||||
with pytest.raises(RpcError, match='missing required parameter'):
|
||||
l2.rpc.call(method='commando',
|
||||
payload={'peer_id': l1.info['id'],
|
||||
'method': 'withdraw'})
|
||||
|
||||
ret = l2.rpc.call(method='commando',
|
||||
payload={'peer_id': l1.info['id'],
|
||||
'method': 'ping',
|
||||
'params': {'id': l2.info['id']}})
|
||||
assert 'totlen' in ret
|
||||
|
||||
# Now, this will go over a single message!
|
||||
ret = l2.rpc.call(method='commando',
|
||||
payload={'peer_id': l1.info['id'],
|
||||
'method': 'getlog',
|
||||
'params': {'level': 'io'}})
|
||||
|
||||
assert len(json.dumps(ret)) > 65535
|
||||
|
||||
|
||||
def test_commando_rune(node_factory):
|
||||
l1, l2, l3 = node_factory.line_graph(3, fundchannel=False,
|
||||
opts={'plugin': [plugin_path,
|
||||
datastore_path]})
|
||||
|
||||
l1.daemon.logsearch_start = 0
|
||||
l1.daemon.wait_for_log("Initialized with rune support")
|
||||
l2.daemon.logsearch_start = 0
|
||||
l2.daemon.wait_for_log("Initialized with rune support")
|
||||
l3.daemon.logsearch_start = 0
|
||||
l3.daemon.wait_for_log("Initialized with rune support")
|
||||
|
||||
wrune = l2.rpc.commando_rune()['rune']
|
||||
rrune = l2.rpc.commando_rune(restrictions='readonly')['rune']
|
||||
|
||||
# This works
|
||||
res = l1.rpc.call(method='commando',
|
||||
payload={'peer_id': l2.info['id'],
|
||||
'rune': rrune,
|
||||
'method': 'listpeers'})
|
||||
assert len(res['peers']) == 2
|
||||
|
||||
# This fails (no rune!)
|
||||
with pytest.raises(RpcError, match='Not authorized'):
|
||||
l1.rpc.call(method='commando',
|
||||
payload={'peer_id': l2.info['id'],
|
||||
'method': 'withdraw'})
|
||||
|
||||
# This fails (ro rune!)
|
||||
with pytest.raises(RpcError, match='Not authorized'):
|
||||
res = l1.rpc.call(method='commando',
|
||||
payload={'peer_id': l2.info['id'],
|
||||
'rune': rrune,
|
||||
'method': 'withdraw'})
|
||||
|
||||
# This would succeed, except missing param)
|
||||
with pytest.raises(RpcError, match='missing required parameter'):
|
||||
res = l1.rpc.call(method='commando',
|
||||
payload={'peer_id': l2.info['id'],
|
||||
'rune': wrune,
|
||||
'method': 'withdraw'})
|
||||
|
||||
# We can subrune and use that rune explicitly.
|
||||
lcrune = l2.rpc.commando_rune(rrune, 'method=listchannels')['rune']
|
||||
with pytest.raises(RpcError, match='Not authorized'):
|
||||
l1.rpc.call(method='commando',
|
||||
payload={'peer_id': l2.info['id'],
|
||||
'rune': lcrune,
|
||||
'method': 'listpeers'})
|
||||
|
||||
l1.rpc.call(method='commando',
|
||||
payload={'peer_id': l2.info['id'],
|
||||
'rune': lcrune,
|
||||
'method': 'listchannels'})
|
||||
|
||||
# Only allow it to list l3's channels (by source, second param)
|
||||
lcrune = l2.rpc.commando_rune(rrune, ['method=listchannels',
|
||||
'pnamesource=' + l3.info['id']
|
||||
+ '|' + 'parr1=' + l3.info['id']])['rune']
|
||||
|
||||
# Needs rune!
|
||||
with pytest.raises(RpcError, match='Not authorized'):
|
||||
l3.rpc.call(method='commando',
|
||||
payload={'peer_id': l2.info['id'],
|
||||
'method': 'listchannels',
|
||||
'params': [None, l3.info['id']]})
|
||||
# Command wrong
|
||||
with pytest.raises(RpcError, match='Not authorized.*method'):
|
||||
l3.rpc.call(method='commando',
|
||||
payload={'peer_id': l2.info['id'],
|
||||
'rune': lcrune,
|
||||
'method': 'withdraw'})
|
||||
|
||||
# Params missing
|
||||
with pytest.raises(RpcError, match='Not authorized.*missing'):
|
||||
l3.rpc.call(method='commando',
|
||||
payload={'peer_id': l2.info['id'],
|
||||
'rune': lcrune,
|
||||
'method': 'listchannels'})
|
||||
|
||||
# Param wrong (array)
|
||||
with pytest.raises(RpcError, match='Not authorized.*parr1'):
|
||||
l3.rpc.call(method='commando',
|
||||
payload={'peer_id': l2.info['id'],
|
||||
'rune': lcrune,
|
||||
'method': 'listchannels',
|
||||
'params': [None, l2.info['id']]})
|
||||
|
||||
# Param wrong (obj)
|
||||
with pytest.raises(RpcError, match='Not authorized.*pnamesource'):
|
||||
l3.rpc.call(method='commando',
|
||||
payload={'peer_id': l2.info['id'],
|
||||
'rune': lcrune,
|
||||
'method': 'listchannels',
|
||||
'params': {'source': l2.info['id']}})
|
||||
|
||||
# Param right (array)
|
||||
l3.rpc.call(method='commando',
|
||||
payload={'peer_id': l2.info['id'],
|
||||
'rune': lcrune,
|
||||
'method': 'listchannels',
|
||||
'params': [None, l3.info['id']]})
|
||||
|
||||
# Param right (obj)
|
||||
l3.rpc.call(method='commando',
|
||||
payload={'peer_id': l2.info['id'],
|
||||
'rune': lcrune,
|
||||
'method': 'listchannels',
|
||||
'params': {'source': l3.info['id']}})
|
||||
|
||||
|
||||
def test_commando_cacherune(node_factory):
|
||||
l1, l2 = node_factory.line_graph(2, fundchannel=False,
|
||||
opts={'plugin': [plugin_path,
|
||||
datastore_path]})
|
||||
restrictions = ['method=listchannels',
|
||||
'pnamesource={id}|parr1={id}'.format(id=l1.info['id'])]
|
||||
lcrune = l2.rpc.commando_rune(restrictions=restrictions)['rune']
|
||||
|
||||
# You can't set it, it needs to be via commando!
|
||||
with pytest.raises(RpcError,
|
||||
match='Must be called as a remote commando call'):
|
||||
l1.rpc.commando_cacherune(lcrune)
|
||||
|
||||
l1.rpc.commando(peer_id=l2.info['id'],
|
||||
method='commando-cacherune',
|
||||
rune=lcrune)
|
||||
|
||||
# Param wrong (array)
|
||||
with pytest.raises(RpcError, match='Not authorized.*parr1'):
|
||||
l1.rpc.call(method='commando',
|
||||
payload={'peer_id': l2.info['id'],
|
||||
'method': 'listchannels',
|
||||
'params': [None, l2.info['id']]})
|
||||
|
||||
# Param wrong (obj)
|
||||
with pytest.raises(RpcError, match='Not authorized.*pnamesource'):
|
||||
l1.rpc.call(method='commando',
|
||||
payload={'peer_id': l2.info['id'],
|
||||
'method': 'listchannels',
|
||||
'params': {'source': l2.info['id']}})
|
||||
|
||||
# Param right (array)
|
||||
l1.rpc.call(method='commando',
|
||||
payload={'peer_id': l2.info['id'],
|
||||
'method': 'listchannels',
|
||||
'params': [None, l1.info['id']]})
|
||||
|
||||
# Param right (obj)
|
||||
l1.rpc.call(method='commando',
|
||||
payload={'peer_id': l2.info['id'],
|
||||
'method': 'listchannels',
|
||||
'params': {'source': l1.info['id']}})
|
||||
|
||||
# Still works after restart!
|
||||
l2.restart()
|
||||
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
||||
l1.rpc.call(method='commando',
|
||||
payload={'peer_id': l2.info['id'],
|
||||
'method': 'listchannels',
|
||||
'params': {'source': l1.info['id']}})
|
||||
|
||||
|
||||
def test_rune_time(node_factory):
|
||||
l1, l2 = node_factory.line_graph(2, fundchannel=False,
|
||||
opts={'plugin': [plugin_path,
|
||||
datastore_path]})
|
||||
|
||||
rune = l1.rpc.commando_rune(restrictions=["method=commando-rune",
|
||||
"pnamerestrictions^id=|parr1^id=",
|
||||
"time<{}"
|
||||
.format(int(time.time()) + 15)])['rune']
|
||||
# l2 has to obey restrictions
|
||||
with pytest.raises(RpcError, match='Not authorized.*pnamerestrictions'):
|
||||
l2.rpc.commando(peer_id=l1.info['id'], method='commando-rune', rune=rune)
|
||||
|
||||
with pytest.raises(RpcError, match='Not authorized.*pnamerestrictions'):
|
||||
l2.rpc.commando(peer_id=l1.info['id'], method='commando-rune', rune=rune,
|
||||
params={'restrictions': 'id<{}'.format(l2.info['id'])})
|
||||
|
||||
# By name
|
||||
rune2 = l2.rpc.commando(peer_id=l1.info['id'],
|
||||
method='commando-rune',
|
||||
rune=rune,
|
||||
params={'restrictions': 'id={}'.format(l2.info['id'])})
|
||||
# By position
|
||||
rune2a = l2.rpc.commando(peer_id=l1.info['id'],
|
||||
method='commando-rune',
|
||||
rune=rune,
|
||||
params=[None, 'id={}'.format(l2.info['id'])])
|
||||
# r2a ID will be 1 greater than r2 ID
|
||||
r2 = runes.Rune.from_base64(rune2['rune'])
|
||||
r2a = runes.Rune.from_base64(rune2a['rune'])
|
||||
assert len(r2.restrictions) == len(r2a.restrictions)
|
||||
assert r2a.restrictions[0].alternatives == [runes.Alternative(r2.restrictions[0].alternatives[0].field,
|
||||
r2.restrictions[0].alternatives[0].cond,
|
||||
str(int(r2.restrictions[0].alternatives[0].value) + 1))]
|
||||
for r2_r, r2a_r in zip(r2.restrictions[1:], r2a.restrictions[1:]):
|
||||
assert r2_r == r2a_r
|
||||
|
||||
time.sleep(16)
|
||||
with pytest.raises(RpcError, match='Not authorized.*time'):
|
||||
l2.rpc.commando(peer_id=l1.info['id'],
|
||||
method='commando-rune',
|
||||
rune=rune,
|
||||
params={'restrictions': 'id={}'.format(l2.info['id'])})
|
||||
|
||||
|
||||
def test_readonly(node_factory):
|
||||
l1, l2 = node_factory.line_graph(2, fundchannel=False,
|
||||
opts={'plugin': [plugin_path,
|
||||
datastore_path]})
|
||||
rrune = l2.rpc.commando_rune(restrictions='readonly')['rune']
|
||||
|
||||
l1.rpc.call(method='commando',
|
||||
payload={'peer_id': l2.info['id'],
|
||||
'method': 'listchannels',
|
||||
'rune': rrune,
|
||||
'params': {'source': l1.info['id']}})
|
||||
|
||||
with pytest.raises(RpcError, match='Not authorized.* = getsharedsecret'):
|
||||
l1.rpc.commando(peer_id=l2.info['id'],
|
||||
rune=rrune,
|
||||
method='getsharedsecret')
|
||||
|
||||
with pytest.raises(RpcError, match='Not authorized.* = listdatastore'):
|
||||
l1.rpc.commando(peer_id=l2.info['id'],
|
||||
rune=rrune,
|
||||
method='listdatastore')
|
||||
|
||||
|
||||
def test_megacmd(node_factory):
|
||||
l1, l2 = node_factory.line_graph(2, fundchannel=False,
|
||||
opts={'plugin': [plugin_path,
|
||||
datastore_path]})
|
||||
rrune = l2.rpc.commando_rune(restrictions='readonly')['rune']
|
||||
|
||||
# Proof that it got the rune: fails with "Unknown command" not "Not authorized"
|
||||
with pytest.raises(RpcError, match='Unknown command'):
|
||||
l1.rpc.call(method='commando',
|
||||
payload={'peer_id': l2.info['id'],
|
||||
'method': 'get' + 'x' * 130000,
|
||||
'rune': rrune,
|
||||
'params': {}})
|
||||
|
||||
with pytest.raises(RpcError, match='Command too long'):
|
||||
l1.rpc.call(method='commando',
|
||||
payload={'peer_id': l2.info['id'],
|
||||
'method': 'get' + 'x' * 1100000,
|
||||
'rune': rrune,
|
||||
'params': {}})
|
||||
22
Unmaintained/donations/LICENSE
Normal file
22
Unmaintained/donations/LICENSE
Normal file
@@ -0,0 +1,22 @@
|
||||
Note: the modules in the ccan/ directory have their own licenses, but
|
||||
the rest of the code is covered by the following (BSD-MIT) license:
|
||||
|
||||
Copyright Rene Pickhardt 2018.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
56
Unmaintained/donations/README.md
Normal file
56
Unmaintained/donations/README.md
Normal file
@@ -0,0 +1,56 @@
|
||||
# Invoice Service (for Donations) plugin
|
||||
|
||||
This plugin enables c-lightning nodes to start one or several small webserver
|
||||
via the command line on specified port. The webserver is based on flask and
|
||||
exposes the invoice API call.
|
||||
|
||||
Therefor people can query for an invoice which they can use to pay. The plugin
|
||||
can be started with `lightningd` by adding the following `--plugin` option
|
||||
(adjusting the path to wherever the plugins are actually stored):
|
||||
|
||||
```
|
||||
lightningd --plugin=/path/to/plugins/donations.py
|
||||
```
|
||||
|
||||
By default the plugin will automatically start a webserver serving the donations page on port `8088`.
|
||||
|
||||
|
||||
The following command line options are registered by the plugin and can be used to customize its behavior:
|
||||
|
||||
| Command line option | Description |
|
||||
|------------------------|---------------------------------------------------------------------|
|
||||
| `--donation-autostart` | Should the donation server start automatically? (default: `true`) |
|
||||
| `--donation-web-port` | Which port should the donation server listen to? (default: `8088`) |
|
||||
|
||||
|
||||
Once the plugin is active you can run `lightning-cli help donationserver` to
|
||||
learn about the command line API:
|
||||
|
||||
Controls a donationserver with `start`/`stop`/`restart`/`list` on `port`.
|
||||
|
||||
A Simple HTTP Server is created that can serve a donation webpage and allow to
|
||||
issue invoices. The plugin takes one of the following three commands
|
||||
{start/stop/restart} as the first agument By default the plugin starts the
|
||||
server on port 8088. This can however be changed with the port argument.
|
||||
|
||||
This means after starting `lightningd` together with the plugin you can run:
|
||||
`lightning-cli donationserver start` and access the server at
|
||||
http://localhost:8088/donation (in case you run your lightning node at
|
||||
`localhost`)
|
||||
|
||||
## About the plugin
|
||||
You can see a demo of the plugin on the [authors website][rene-donations]:
|
||||
|
||||
|
||||
This plugin was created and is maintained by Rene Pickhardt. Thus Rene Pickhardt
|
||||
is the copyright owner of this plugin. It shall serve as an educational resource
|
||||
on his [Youtube channel][rene-youtube].
|
||||
|
||||
The plugin is licensed like the rest of c-lightning with BSD-MIT license
|
||||
and comes without any warrenty.
|
||||
|
||||
If you like my work feel free to support me on [patreon][rene-patreon].
|
||||
|
||||
[rene-donations]: https://ln.rene-pickhardt.de/donation
|
||||
[rene-patreon]: https://www.patreon.com/renepickhardt
|
||||
[rene-youtube]: https://www.youtube.com/user/RenePickhardt
|
||||
212
Unmaintained/donations/donations.py
Executable file
212
Unmaintained/donations/donations.py
Executable file
@@ -0,0 +1,212 @@
|
||||
#!/usr/bin/env python3
|
||||
""" A small donation service so that users can request ln invoices
|
||||
|
||||
This plugin spins up a small flask server that provides a form to
|
||||
users who wish to donate some money to the owner of the lightning
|
||||
node. The server can run on an arbitrary port and returns an invoice.
|
||||
Also a list of previously paid invoices (only those that used this
|
||||
service) will be displayed. Displaying paid invoices could be made
|
||||
optionally in a future version.
|
||||
|
||||
Author: Rene Pickhardt (https://ln.rene-pickhardt.de)
|
||||
|
||||
you can see a demo of the plugin (and leave a tip) directly at:
|
||||
https://ln.rene-pickhardt.de/donation
|
||||
|
||||
LICENSE: MIT / APACHE
|
||||
"""
|
||||
import base64
|
||||
import multiprocessing
|
||||
import qrcode
|
||||
|
||||
|
||||
from flask import Flask, render_template
|
||||
from flask_bootstrap import Bootstrap
|
||||
from flask_wtf import FlaskForm
|
||||
from io import BytesIO
|
||||
from pyln.client import Plugin
|
||||
from random import random
|
||||
from wtforms import StringField, SubmitField, IntegerField
|
||||
from wtforms.validators import DataRequired, NumberRange
|
||||
|
||||
|
||||
plugin = Plugin()
|
||||
|
||||
|
||||
class DonationForm(FlaskForm):
|
||||
"""Form for donations """
|
||||
amount = IntegerField("Enter how many Satoshis you want to donate!",
|
||||
validators=[DataRequired(), NumberRange(min=1, max=16666666)])
|
||||
description = StringField("Leave a comment (displayed publically)")
|
||||
submit = SubmitField('Donate')
|
||||
|
||||
|
||||
def make_base64_qr_code(bolt11):
|
||||
qr = qrcode.QRCode(
|
||||
version=1,
|
||||
error_correction=qrcode.constants.ERROR_CORRECT_H,
|
||||
box_size=4,
|
||||
border=4,
|
||||
)
|
||||
|
||||
qr.add_data(bolt11)
|
||||
qr.make(fit=True)
|
||||
img = qr.make_image()
|
||||
|
||||
buffered = BytesIO()
|
||||
img.save(buffered, format="PNG")
|
||||
img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
|
||||
return img_str
|
||||
|
||||
|
||||
def ajax(label):
|
||||
global plugin
|
||||
msg = plugin.rpc.listinvoices(label)["invoices"][0]
|
||||
if msg["status"] == "paid":
|
||||
return "Your donation has been received and is well appricated."
|
||||
return "waiting"
|
||||
|
||||
|
||||
def donation_form():
|
||||
global plugin
|
||||
form = DonationForm()
|
||||
b11 = None
|
||||
qr = None
|
||||
label = None
|
||||
if form.validate_on_submit():
|
||||
amount = form.amount.data
|
||||
description = form.description.data
|
||||
label = "ln-plugin-donations-{}".format(random())
|
||||
invoice = plugin.rpc.invoice(int(amount) * 1000, label, description)
|
||||
b11 = invoice["bolt11"]
|
||||
qr = make_base64_qr_code(b11)
|
||||
|
||||
invoices = plugin.rpc.listinvoices()["invoices"]
|
||||
donations = []
|
||||
for invoice in invoices:
|
||||
if invoice["label"].startswith("ln-plugin-donations-"):
|
||||
# FIXME: change to paid after debugging
|
||||
if invoice["status"] == "paid":
|
||||
bolt11 = plugin.rpc.decodepay(invoice["bolt11"])
|
||||
satoshis = int(bolt11["msatoshi"]) // 1000
|
||||
description = bolt11["description"]
|
||||
ts = bolt11["created_at"]
|
||||
donations.append((ts, satoshis, description))
|
||||
|
||||
if b11 is not None:
|
||||
return render_template("donation.html", donations=sorted(donations, reverse=True), form=form, bolt11=b11, qr=qr, label=label)
|
||||
else:
|
||||
return render_template("donation.html", donations=sorted(donations, reverse=True), form=form)
|
||||
|
||||
|
||||
def worker(port):
|
||||
app = Flask(__name__)
|
||||
# FIXME: use hexlified hsm secret or something else
|
||||
app.config['SECRET_KEY'] = 'you-will-never-guess-this'
|
||||
app.add_url_rule('/donation', 'donation',
|
||||
donation_form, methods=["GET", "POST"])
|
||||
app.add_url_rule('/is_invoice_paid/<label>', 'ajax', ajax)
|
||||
Bootstrap(app)
|
||||
app.run(host="0.0.0.0", port=port)
|
||||
return
|
||||
|
||||
|
||||
jobs = {}
|
||||
|
||||
|
||||
def start_server(port):
|
||||
if port in jobs:
|
||||
return False, "server already running"
|
||||
|
||||
p = multiprocessing.Process(
|
||||
target=worker, args=[port], name="server on port {}".format(port))
|
||||
p.daemon = True
|
||||
|
||||
jobs[port] = p
|
||||
p.start()
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def stop_server(port):
|
||||
if port in jobs:
|
||||
jobs[port].terminate()
|
||||
jobs[port].join()
|
||||
del jobs[port]
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
@plugin.method('donationserver')
|
||||
def donationserver(command="start", port=8088):
|
||||
"""Starts a donationserver with {start/stop/restart} on {port}.
|
||||
|
||||
A Simple HTTP Server is created that can serve a donation webpage and
|
||||
allow to issue invoices. The plugin takes one of the following three
|
||||
commands {start/stop/restart} as the first agument By default the plugin
|
||||
starts the server on port 8088. This can however be changed with the port
|
||||
argument.
|
||||
|
||||
"""
|
||||
commands = {"start", "stop", "restart", "list"}
|
||||
|
||||
# if command unknown make start our default command
|
||||
if command not in commands:
|
||||
command = "start"
|
||||
|
||||
# if port not an integer make 8088 as default
|
||||
try:
|
||||
port = int(port)
|
||||
except Exception:
|
||||
port = int(plugin.options['donations-web-port']['value'])
|
||||
|
||||
if command == "list":
|
||||
return "servers running on the following ports: {}".format(list(jobs.keys()))
|
||||
|
||||
if command == "start":
|
||||
if port in jobs:
|
||||
return "Server already running on port {}. Maybe restart the server?".format(port)
|
||||
suc = start_server(port)
|
||||
if suc:
|
||||
return "started server successfully on port {}".format(port)
|
||||
else:
|
||||
return "Could not start server on port {}".format(port)
|
||||
|
||||
if command == "stop":
|
||||
if stop_server(port):
|
||||
return "stopped server on port {}".format(port)
|
||||
else:
|
||||
return "could not stop the server on port {}".format(port)
|
||||
|
||||
if command == "restart":
|
||||
stop_server(port)
|
||||
suc = start_server(port)
|
||||
if suc:
|
||||
return "started server successfully on port {}".format(port)
|
||||
else:
|
||||
return "Could not start server on port {}".format(port)
|
||||
|
||||
|
||||
plugin.add_option(
|
||||
'donations-autostart',
|
||||
'true',
|
||||
'Should the donation server start automatically'
|
||||
)
|
||||
|
||||
plugin.add_option(
|
||||
'donations-web-port',
|
||||
'8088',
|
||||
'Which port should the donation server listen to?'
|
||||
)
|
||||
|
||||
|
||||
@plugin.init()
|
||||
def init(options, configuration, plugin):
|
||||
port = int(options['donations-web-port'])
|
||||
|
||||
if options['donations-autostart'].lower() in ['true', '1']:
|
||||
start_server(port)
|
||||
|
||||
|
||||
plugin.run()
|
||||
1090
Unmaintained/donations/poetry.lock
generated
Normal file
1090
Unmaintained/donations/poetry.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
23
Unmaintained/donations/pyproject.toml
Normal file
23
Unmaintained/donations/pyproject.toml
Normal file
@@ -0,0 +1,23 @@
|
||||
[tool.poetry]
|
||||
name = "cln-plugins-donations"
|
||||
version = "0.1.0"
|
||||
description = ""
|
||||
authors = ["Christian Decker <decker.christian@gmail.com>"]
|
||||
license = "MIT"
|
||||
readme = "README.md"
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = "^3.7"
|
||||
qrcode = "6.1"
|
||||
flask = "2.0.3"
|
||||
pyln-client = "0.12.1"
|
||||
flask-bootstrap = "^3.3.7.1"
|
||||
flask-wtf = "0.15.1"
|
||||
wtforms = "2.3.3"
|
||||
|
||||
[tool.poetry.dev-dependencies]
|
||||
pyln-testing = "0.12.1"
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core"]
|
||||
build-backend = "poetry.core.masonry.api"
|
||||
79
Unmaintained/donations/templates/donation.html
Normal file
79
Unmaintained/donations/templates/donation.html
Normal file
@@ -0,0 +1,79 @@
|
||||
{% extends 'bootstrap/base.html' %}
|
||||
{% import 'bootstrap/wtf.html' as wtf %}
|
||||
|
||||
{% block title %}
|
||||
Lightning Donations
|
||||
{% endblock %}
|
||||
|
||||
{% block navbar %}
|
||||
<nav class="navbar navbar-default">
|
||||
... navigation bar here (see complete code on GitHub) ...
|
||||
</nav>
|
||||
{% endblock %}
|
||||
|
||||
{% block content %}
|
||||
<div class="container">
|
||||
<h1>Leave a donation to support my work!</h1>
|
||||
{% if bolt11 %}
|
||||
<div id="target_div">
|
||||
<div>
|
||||
<input type="text" value="{{bolt11}}" id="bolt11">
|
||||
<button onclick="copyFunction()">Copy invoice</button>
|
||||
</div>
|
||||
<div>
|
||||
<img src="data:image/png;base64,{{qr}}" />
|
||||
</div>
|
||||
</div>
|
||||
{% else %}
|
||||
{{ wtf.quick_form(form) }}
|
||||
{% endif %}
|
||||
<h2>Most recent donations & comments</h2>
|
||||
<ul>
|
||||
{% for item in donations %}
|
||||
<li>{{ item[1] }} Satoshi. Message: {{ item[2] }}</a></li>
|
||||
{% endfor %}
|
||||
</ul>
|
||||
<p>The above texts come from a community of unknown users. If you think they violate against your copyrite please <a href="https://www.rene-pickhardt.de/imprint" rel="nofollow"> contact me</a> so that I can remove those comments. According to the German law I am not responsible for Copyright violations rising from user generated content unless you notify me and I don't react.</p>
|
||||
|
||||
<hr>
|
||||
<p>
|
||||
c-lightning invoice query service for donations and spontanious payments is brought to you by
|
||||
<a href="https://ln.rene-pickhardt.de">Rene Pickhardt</a>.</p>
|
||||
|
||||
<p>
|
||||
If you want to learn more about the Lightning network (for beginners and for developers) check out
|
||||
<a href="https://www.youtube.com/user/RenePickhardt">his youtube channel</a>.
|
||||
</p>
|
||||
<p>
|
||||
Find the source code for this plugin at: <a href="https://github.com/ElementsProject/lightning/tree/master/contrib/plugins/donations">https://github.com/ElementsProject/lightning/tree/master/contrib/plugins/donations</a>
|
||||
</p>
|
||||
</div>
|
||||
{% endblock %}
|
||||
{% block scripts %}
|
||||
{{super()}}
|
||||
<script>
|
||||
var interval = null;
|
||||
$(document).on('ready',function(){
|
||||
interval = setInterval(updateDiv,3000);
|
||||
});
|
||||
|
||||
function updateDiv(){
|
||||
$.ajax({
|
||||
url: '/is_invoice_paid/{{label}}',
|
||||
success: function(data){
|
||||
if (data != "waiting") {
|
||||
var tc = document.getElementById("target_div");
|
||||
tc.innerHTML = data;
|
||||
clearInterval(interval);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
function copyFunction() {
|
||||
document.getElementById("bolt11").select();
|
||||
document.execCommand("copy");
|
||||
alert("Copied invoice to clipboard.");
|
||||
}
|
||||
</script>
|
||||
{% endblock %}
|
||||
28
Unmaintained/donations/test_donations.py
Normal file
28
Unmaintained/donations/test_donations.py
Normal file
@@ -0,0 +1,28 @@
|
||||
import os
|
||||
from pyln.testing.fixtures import * # noqa: F401,F403
|
||||
from ephemeral_port_reserve import reserve # type: ignore
|
||||
|
||||
plugin_path = os.path.join(os.path.dirname(__file__), "donations.py")
|
||||
|
||||
|
||||
def test_donation_starts(node_factory):
|
||||
l1 = node_factory.get_node(allow_warning=True)
|
||||
# Test dynamically
|
||||
l1.rpc.plugin_start(plugin_path)
|
||||
l1.rpc.plugin_stop(plugin_path)
|
||||
l1.rpc.plugin_start(plugin_path)
|
||||
l1.stop()
|
||||
# Then statically
|
||||
l1.daemon.opts["plugin"] = plugin_path
|
||||
l1.start()
|
||||
|
||||
|
||||
def test_donation_server(node_factory):
|
||||
pluginopt = {'plugin': plugin_path, 'donations-autostart': False}
|
||||
l1 = node_factory.get_node(options=pluginopt, allow_warning=True)
|
||||
port = reserve()
|
||||
l1.rpc.donationserver('start', port)
|
||||
l1.daemon.wait_for_log("plugin-donations.py:.*Serving Flask app 'donations'")
|
||||
l1.daemon.wait_for_log("plugin-donations.py:.*Running on all addresses")
|
||||
msg = l1.rpc.donationserver("stop", port)
|
||||
assert msg == f'stopped server on port {port}'
|
||||
120
Unmaintained/drain/README.md
Normal file
120
Unmaintained/drain/README.md
Normal file
@@ -0,0 +1,120 @@
|
||||
# Drain and Fill Plugin
|
||||
|
||||
This plugin drains or fills up the capacity of one of your channel using
|
||||
circular payments to yourself. This can be useful for:
|
||||
|
||||
- liquidity management
|
||||
- cleaning up or reducing channels before closing
|
||||
- keeping capacity: pushing remaining balance into other lightning channels
|
||||
- reducing capacity: filling up a channel before closing to reduce capacity
|
||||
- accumulating dust before closing multiple channels
|
||||
- ...
|
||||
|
||||
## Installation
|
||||
|
||||
This plugin relies on the `pyln-client` library. As with most plugins you should
|
||||
be able to install dependencies with `pip`:
|
||||
|
||||
```bash
|
||||
pip3 install -r requirements.txt
|
||||
```
|
||||
|
||||
You might need to also specify the `--user` command line flag depending on
|
||||
your environment. If you dont want this and your plugin only uses `pyln-client`
|
||||
as the only dependency, you can also start `lightningd` with the `PYTHONPATH`
|
||||
environment variable to the `pyln-client` package of your `lightningd`
|
||||
installation, for example:
|
||||
|
||||
```
|
||||
PYTHONPATH=/home/user/lightning.git/contrib/pyln-client lightningd --plugin=...
|
||||
```
|
||||
|
||||
## Startup
|
||||
|
||||
The plugin can be started with `lightningd` by adding the `--plugin` option.
|
||||
Remember that all `lightningd` plugins have to have executable permissions.
|
||||
|
||||
```
|
||||
lightningd --plugin=/path/to/plugin/drain.py
|
||||
```
|
||||
|
||||
Alternatively, you can also symlink or copy the plugins executable to the
|
||||
`.lightning/plugins` folder or the `plugins` folder of your Core-Lightning
|
||||
installation as executables within these directories will be loaded as plugins.
|
||||
|
||||
|
||||
## Usage
|
||||
|
||||
Once the plugin is active you can use it to `drain` a given percentage of
|
||||
liquidity (default 100%) on one of your channels by:
|
||||
|
||||
```
|
||||
lightning-cli drain scid [percentage] [chunks] [retry_for] [maxfeepercent] [exemptfee]
|
||||
```
|
||||
|
||||
The plugin has also a `fill` command that does excactly the opposite. You
|
||||
can use it to fill up a given percentage of liquidity (default 100%) on your
|
||||
side of a channel:
|
||||
|
||||
```
|
||||
lightning-cli fill scid [percentage] [chunks] [retry_for] [maxfeepercent] [exemptfee]
|
||||
```
|
||||
|
||||
Another useful command is the `setbalance` that will fill up or drain your side
|
||||
of a channels balance to a given total percentage (default 50%). It will do all
|
||||
the math for you, so that you do not need to care for current channel balance:
|
||||
|
||||
```
|
||||
lightning-cli setbalance scid [percentage] [chunks] [retry_for] [maxfeepercent] [exemptfee]
|
||||
```
|
||||
|
||||
|
||||
|
||||
### Parameters
|
||||
|
||||
- The `scid` is the short_channel_id of the channel to drain or fill.
|
||||
- OPTIONAL: The `percentage` parameter tells the plugin how much of a channels
|
||||
total capacity should be `drain`ed or `fill`ed (default: 100%).
|
||||
For the `setbalance` command this sets the target percentage and it defaults
|
||||
to 50% in this case. Resulting over or under capacity will be limited
|
||||
to 100% (full) or 0% (empty) automatically. Examples:
|
||||
- A 'drain 10' will send out 10% of the channels total (not current) capacity.
|
||||
- A 'drain 100' will send out 100% of the channels total capacity, the channel
|
||||
will be empty after this.
|
||||
- A 'fill 10' will increase your side of a channels balance by 10% from total.
|
||||
- A 'fill 100' will increase will fill up your channel.
|
||||
- A 'setbalance' will balance out a channel.
|
||||
- A 'setbalance 70' will bring a channel in a state where your side will hold
|
||||
70% of total capacity.
|
||||
- OPTIONAL: The `chunks` parameter tells the plugin to try breaking down the
|
||||
payment into several smaller ones. In this case it may happen that the
|
||||
operation will only be partially completed. The parameters value is the
|
||||
number of chunks to use. Default: auto-detect based on capacities, max 16.
|
||||
- OPTIONAL: `retry_for` defines the number of seconds the plugin will retry to
|
||||
find a suitable route. Default: 60 seconds. Note: Applies for each chunk.
|
||||
- OPTIONAL: `maxfeepercent` is a percentage limit of the money to be paid in
|
||||
fees and defaults to 0.5.
|
||||
- OPTIONAL: The `exemptfee` option can be used for tiny payments which would be
|
||||
dominated by the fee leveraged by forwarding nodes. Setting `exemptfee`
|
||||
allows the `maxfeepercent` check to be skipped on fees that are smaller than
|
||||
exemptfee (default: 5000 millisatoshi).
|
||||
|
||||
|
||||
## Tips and Tricks
|
||||
|
||||
- To find the correct channel IDs, you can use the `summary` plugin which can
|
||||
be found [here](https://github.com/lightningd/plugins/tree/master/summary).
|
||||
- After some failed attempts, may worth checking the `lightningd` logs for
|
||||
further information.
|
||||
- Channels have a `channel_reserve_satoshis` value, which is usually 1% of the
|
||||
channel's total balance. Initially, this reserve may not be met, as only one
|
||||
side has funds; but the protocol ensures that there is always progress toward
|
||||
meeting this reserve, and once met, [it is maintained.](https://github.com/lightningnetwork/lightning-rfc/blob/master/02-peer-protocol.md#rationale)
|
||||
Therefore you cannot drain or fill a channel to be completely empty or full.
|
||||
|
||||
|
||||
## TODOs
|
||||
- fix: use hook instead of waitsendpay to prevent race conditions
|
||||
- fix: occasionally strange route errors. maybe try increasing chunks on route errors.
|
||||
- feat: set HTLC_FEE MIN/MAX/STP by feerate
|
||||
- chore: reconsider use of listchannels
|
||||
0
Unmaintained/drain/__init__.py
Normal file
0
Unmaintained/drain/__init__.py
Normal file
23
Unmaintained/drain/clnutils.py
Normal file
23
Unmaintained/drain/clnutils.py
Normal file
@@ -0,0 +1,23 @@
|
||||
import re
|
||||
|
||||
|
||||
def cln_parse_rpcversion(string):
|
||||
"""
|
||||
Parse cln version string to determine RPC version.
|
||||
|
||||
cln switched from 'semver' alike `major.minor.sub[rcX][-mod]`
|
||||
to ubuntu style with version 22.11 `yy.mm[.patch][-mod]`
|
||||
make sure we can read all of them for (the next 80 years).
|
||||
"""
|
||||
rpcversion = string
|
||||
if rpcversion.startswith('v'): # strip leading 'v'
|
||||
rpcversion = rpcversion[1:]
|
||||
if rpcversion.find('-') != -1: # strip mods
|
||||
rpcversion = rpcversion[:rpcversion.find('-')]
|
||||
if re.search('.*(rc[\\d]*)$', rpcversion): # strip release candidates
|
||||
rpcversion = rpcversion[:rpcversion.find('rc')]
|
||||
if rpcversion.count('.') == 1: # imply patch version 0 if not given
|
||||
rpcversion = rpcversion + '.0'
|
||||
|
||||
# split and convert numeric string parts to actual integers
|
||||
return list(map(int, rpcversion.split('.')))
|
||||
530
Unmaintained/drain/drain.py
Executable file
530
Unmaintained/drain/drain.py
Executable file
@@ -0,0 +1,530 @@
|
||||
#!/usr/bin/env python3
|
||||
from clnutils import cln_parse_rpcversion
|
||||
from pyln.client import Plugin, Millisatoshi, RpcError
|
||||
from utils import get_ours, wait_ours
|
||||
import re
|
||||
import time
|
||||
import uuid
|
||||
|
||||
plugin = Plugin()
|
||||
|
||||
|
||||
# When draining 100% we must account (not pay) for an additional HTLC fee.
|
||||
# Currently there is no way of getting the exact number before the fact,
|
||||
# so we try and error until it is high enough, or take the exception text.
|
||||
HTLC_FEE_NUL = Millisatoshi('0sat')
|
||||
HTLC_FEE_STP = Millisatoshi('10sat')
|
||||
HTLC_FEE_MIN = Millisatoshi('100sat')
|
||||
HTLC_FEE_MAX = Millisatoshi('100000sat')
|
||||
HTLC_FEE_EST = Millisatoshi('3000sat')
|
||||
HTLC_FEE_PAT = re.compile("^.* HTLC fee: ([0-9]+sat).*$")
|
||||
|
||||
|
||||
# The route msat helpers are needed because older versions of cln
|
||||
# had different msat/msatoshi fields with different types Millisatoshi/int
|
||||
def route_set_msat(r, msat):
|
||||
if plugin.rpcversion[0] == 0 and plugin.rpcversion[1] < 12:
|
||||
r[plugin.msatfield] = msat.millisatoshis
|
||||
r['amount_msat'] = Millisatoshi(msat)
|
||||
else:
|
||||
r[plugin.msatfield] = Millisatoshi(msat)
|
||||
|
||||
|
||||
def route_get_msat(r):
|
||||
return Millisatoshi(r[plugin.msatfield])
|
||||
|
||||
|
||||
def setup_routing_fees(payload, route, amount, substractfees: bool = False):
|
||||
delay = plugin.cltv_final
|
||||
|
||||
amount_iter = amount
|
||||
for r in reversed(route):
|
||||
route_set_msat(r, amount_iter)
|
||||
r['delay'] = delay
|
||||
channels = plugin.rpc.listchannels(r['channel'])
|
||||
ch = next(c for c in channels.get('channels') if c['destination'] == r['id'])
|
||||
fee = Millisatoshi(ch['base_fee_millisatoshi'])
|
||||
# BOLT #7 requires fee >= fee_base_msat + ( amount_to_forward * fee_proportional_millionths / 1000000 )
|
||||
fee += (amount_iter * ch['fee_per_millionth'] + 10**6 - 1) // 10**6 # integer math trick to round up
|
||||
amount_iter += fee
|
||||
delay += ch['delay']
|
||||
|
||||
# amounts have to be calculated the other way when being fee substracted
|
||||
# we took the upper loop as well for the delay parameter
|
||||
if substractfees:
|
||||
amount_iter = amount
|
||||
first = True
|
||||
for r in route:
|
||||
channels = plugin.rpc.listchannels(r['channel'])
|
||||
ch = next(c for c in channels.get('channels') if c['destination'] == r['id'])
|
||||
if not first:
|
||||
fee = Millisatoshi(ch['base_fee_millisatoshi'])
|
||||
# BOLT #7 requires fee >= fee_base_msat + ( amount_to_forward * fee_proportional_millionths / 1000000 )
|
||||
fee += (amount_iter * ch['fee_per_millionth'] + 10**6 - 1) // 10**6 # integer math trick to round up
|
||||
if fee > amount_iter:
|
||||
raise RpcError(payload['command'], payload, {'message': 'Cannot cover fees to %s %s' % (payload['command'], amount)})
|
||||
amount_iter -= fee
|
||||
first = False
|
||||
route_set_msat(r, amount_iter)
|
||||
|
||||
|
||||
# This raises an error when a channel is not normal or peer is not connected
|
||||
def get_channel(payload, peer_id, scid=None):
|
||||
if scid is None:
|
||||
scid = payload['scid']
|
||||
|
||||
# from versions 23 and onwards we have `listpeers` and `listpeerchannels`
|
||||
# if plugin.rpcversion[0] >= 23:
|
||||
if plugin.listpeerchannels: # FIXME: replace by rpcversion check (see above) once 23 is released
|
||||
channels = plugin.rpc.listpeerchannels(peer_id)["channels"]
|
||||
if len(channels) == 0:
|
||||
raise RpcError(payload['command'], payload, {'message': 'Cannot find channels for peer %s' % (peer_id)})
|
||||
try:
|
||||
channel = next(c for c in channels if 'short_channel_id' in c and c['short_channel_id'] == scid)
|
||||
except StopIteration:
|
||||
raise RpcError(payload['command'], payload, {'message': 'Cannot find channel for peer %s with scid %s' % (peer_id, scid)})
|
||||
if channel['state'] != "CHANNELD_NORMAL":
|
||||
raise RpcError(payload['command'], payload, {'message': 'Channel %s: not in state CHANNELD_NORMAL, but: %s' % (scid, channel['state'])})
|
||||
if not channel['peer_connected']:
|
||||
raise RpcError(payload['command'], payload, {'message': 'Channel %s: peer is not connected.' % scid})
|
||||
return channel
|
||||
|
||||
peers = plugin.rpc.listpeers(peer_id)['peers']
|
||||
if len(peers) == 0:
|
||||
raise RpcError(payload['command'], payload, {'message': 'Cannot find peer %s' % peer_id})
|
||||
try:
|
||||
channel = next(c for c in peers[0]['channels'] if 'short_channel_id' in c and c['short_channel_id'] == scid)
|
||||
except StopIteration:
|
||||
raise RpcError(payload['command'], payload, {'message': 'Cannot find channel %s for peer %s' % (scid, peer_id)})
|
||||
if channel['state'] != "CHANNELD_NORMAL":
|
||||
raise RpcError(payload['command'], payload, {'message': 'Channel %s not in state CHANNELD_NORMAL, but: %s' % (scid, channel['state'])})
|
||||
if not peers[0]['connected']:
|
||||
raise RpcError(payload['command'], payload, {'message': 'Channel %s peer is not connected.' % scid})
|
||||
return channel
|
||||
|
||||
|
||||
def spendable_from_scid(payload, scid=None, _raise=False):
|
||||
if scid is None:
|
||||
scid = payload['scid']
|
||||
|
||||
peer_id = peer_from_scid(payload, scid)
|
||||
try:
|
||||
channel = get_channel(payload, peer_id, scid)
|
||||
except RpcError as e:
|
||||
if _raise:
|
||||
raise e
|
||||
return Millisatoshi(0), Millisatoshi(0)
|
||||
|
||||
# we check amounts via gossip and not wallet funds, as its more accurate
|
||||
our = Millisatoshi(channel['to_us_msat'])
|
||||
total = Millisatoshi(channel['total_msat'])
|
||||
our_reserve = Millisatoshi(channel['our_reserve_msat'])
|
||||
their_reserve = Millisatoshi(channel['their_reserve_msat'])
|
||||
their = total - our
|
||||
|
||||
# reserves maybe not filled up yet
|
||||
if our < our_reserve:
|
||||
our_reserve = our
|
||||
if their < their_reserve:
|
||||
their_reserve = their
|
||||
|
||||
spendable = channel['spendable_msat']
|
||||
receivable = channel.get('receivable_msat')
|
||||
|
||||
# receivable_msat was added with the 0.8.2 release, have a fallback
|
||||
if not receivable:
|
||||
receivable = their - their_reserve
|
||||
# we also need to subsctract a possible commit tx fee
|
||||
if receivable >= HTLC_FEE_EST:
|
||||
receivable -= HTLC_FEE_EST
|
||||
return spendable, receivable
|
||||
|
||||
|
||||
def peer_from_scid(payload, scid=None):
|
||||
if scid is None:
|
||||
scid = payload['scid']
|
||||
|
||||
channels = plugin.rpc.listchannels(scid).get('channels')
|
||||
try:
|
||||
return next(c for c in channels if c['source'] == payload['my_id'])['destination']
|
||||
except StopIteration:
|
||||
raise RpcError(payload['command'], payload, {'message': 'Cannot find peer for channel: ' + scid})
|
||||
|
||||
|
||||
def find_worst_channel(route):
|
||||
if len(route) < 4:
|
||||
return None
|
||||
start_idx = 2
|
||||
worst = route[start_idx]
|
||||
worst_val = route_get_msat(route[start_idx - 1]) - route_get_msat(worst)
|
||||
for i in range(start_idx + 1, len(route) - 1):
|
||||
val = route_get_msat(route[i - 1]) - route_get_msat(route[i])
|
||||
if val > worst_val:
|
||||
worst = route[i]
|
||||
worst_val = val
|
||||
return worst
|
||||
|
||||
|
||||
def test_or_set_chunks(payload):
|
||||
scid = payload['scid']
|
||||
cmd = payload['command']
|
||||
spendable, receivable = spendable_from_scid(payload)
|
||||
total = spendable + receivable
|
||||
amount = Millisatoshi(int(int(total) * (0.01 * payload['percentage'])))
|
||||
|
||||
# if capacity exceeds, limit amount to full or empty channel
|
||||
if cmd == "drain" and amount > spendable:
|
||||
amount = spendable
|
||||
if cmd == "fill" and amount > receivable:
|
||||
amount = receivable
|
||||
if amount == Millisatoshi(0):
|
||||
raise RpcError(payload['command'], payload, {'message': 'Cannot detect required chunks to perform operation. Amount would be 0msat.'})
|
||||
|
||||
# get all spendable/receivables for our channels
|
||||
channels = {}
|
||||
for channel in payload['mychannels']:
|
||||
if channel['short_channel_id'] == scid:
|
||||
continue
|
||||
try:
|
||||
spend, recv = spendable_from_scid(payload, channel['short_channel_id'], True)
|
||||
except RpcError:
|
||||
continue
|
||||
channels[channel['short_channel_id']] = {
|
||||
'spendable': spend,
|
||||
'receivable': recv,
|
||||
}
|
||||
if len(channels) == 0:
|
||||
raise RpcError(payload['command'], payload, {'message': 'Not enough usable channels to perform cyclic routing.'})
|
||||
|
||||
# test if selected chunks fit into other channel capacities
|
||||
chunks = payload['chunks']
|
||||
if chunks > 0:
|
||||
chunksize = amount / chunks
|
||||
fit = 0
|
||||
for i in channels:
|
||||
channel = channels[i]
|
||||
if cmd == "drain":
|
||||
fit += int(channel['receivable']) // int(chunksize)
|
||||
if cmd == "fill":
|
||||
fit += int(channel['spendable']) // int(chunksize)
|
||||
if fit >= chunks:
|
||||
return
|
||||
if cmd == "drain":
|
||||
raise RpcError(payload['command'], payload, {'message': 'Selected chunks (%d) will not fit incoming channel capacities.' % chunks})
|
||||
if cmd == "fill":
|
||||
raise RpcError(payload['command'], payload, {'message': 'Selected chunks (%d) will not fit outgoing channel capacities.' % chunks})
|
||||
|
||||
# if chunks is 0 -> auto detect from 1 to 16 (max) chunks until amounts fit
|
||||
else:
|
||||
chunks = 0
|
||||
while chunks < 16:
|
||||
chunks += 1
|
||||
chunksize = amount / chunks
|
||||
fit = 0
|
||||
for i in channels:
|
||||
channel = channels[i]
|
||||
if cmd == "drain" and int(channel['receivable']) > 0:
|
||||
fit += int(channel['receivable']) // int(chunksize)
|
||||
if cmd == "fill" and int(channel['spendable']) > 0:
|
||||
fit += int(channel['spendable']) // int(chunksize)
|
||||
if fit >= chunks:
|
||||
payload['chunks'] = chunks
|
||||
return
|
||||
|
||||
if cmd == "drain":
|
||||
raise RpcError(payload['command'], payload, {'message': 'Cannot detect required chunks to perform operation. Incoming capacity problem.'})
|
||||
if cmd == "fill":
|
||||
raise RpcError(payload['command'], payload, {'message': 'Cannot detect required chunks to perform operation. Outgoing capacity problem.'})
|
||||
|
||||
|
||||
def cleanup(payload, error=None):
|
||||
# delete all invoices and count how many went through
|
||||
successful_chunks = 0
|
||||
for label in payload['labels']:
|
||||
try:
|
||||
plugin.rpc.delinvoice(label, 'unpaid')
|
||||
except RpcError as e:
|
||||
# race condition: waitsendpay timed out, but invoice got paid
|
||||
if 'status is paid' in e.error.get('message', ""):
|
||||
successful_chunks += 1
|
||||
|
||||
if successful_chunks == payload['chunks']:
|
||||
return payload['success_msg']
|
||||
if successful_chunks > 0:
|
||||
error = RpcError(payload['command'], payload, {'message': 'Partially completed %d/%d chunks. Error: %s' % (successful_chunks, payload['chunks'], str(error))})
|
||||
if error is None:
|
||||
error = RpcError(payload['command'], payload, {'message': 'Command failed, no chunk succeeded.'})
|
||||
raise error
|
||||
|
||||
|
||||
def try_for_htlc_fee(payload, peer_id, amount, chunk, spendable_before):
|
||||
start_ts = int(time.time())
|
||||
remaining_secs = max(0, payload['start_ts'] + payload['retry_for'] - start_ts)
|
||||
remaining_chunks = payload['chunks'] - chunk
|
||||
retry_for = int(remaining_secs / remaining_chunks)
|
||||
my_id = payload['my_id']
|
||||
label = f"{payload['command']}-{uuid.uuid4()}"
|
||||
payload['labels'] += [label]
|
||||
description = "%s %s %s%s [%d/%d]" % (payload['command'], payload['scid'], payload['percentage'], '%', chunk + 1, payload['chunks'])
|
||||
invoice = plugin.rpc.invoice("any", label, description, retry_for + 60)
|
||||
payment_hash = invoice['payment_hash']
|
||||
# The requirement for payment_secret coincided with its addition to the invoice output.
|
||||
payment_secret = invoice.get('payment_secret')
|
||||
plugin.log("Invoice payment_hash: %s" % payment_hash)
|
||||
|
||||
# exclude selected channel to prevent unwanted shortcuts
|
||||
excludes = [f"{payload['scid']}/0", f"{payload['scid']}/1"]
|
||||
|
||||
# exclude local channels known to have too little capacity.
|
||||
for channel in payload['mychannels']:
|
||||
if channel['short_channel_id'] == payload['scid']:
|
||||
continue # already added few lines above
|
||||
spend, recv = spendable_from_scid(payload, channel['short_channel_id'])
|
||||
if payload['command'] == 'drain' and recv < amount or payload['command'] == 'fill' and spend < amount:
|
||||
excludes.append(f"{channel['short_channel_id']}/0")
|
||||
excludes.append(f"{channel['short_channel_id']}/1")
|
||||
|
||||
while int(time.time()) - start_ts < retry_for:
|
||||
if payload['command'] == 'drain':
|
||||
r = plugin.rpc.getroute(my_id, amount, fromid=peer_id, exclude=excludes,
|
||||
maxhops=6, riskfactor=10, cltv=9, fuzzpercent=0)
|
||||
route_out = {'id': peer_id, 'channel': payload['scid'], 'direction': int(my_id >= peer_id)}
|
||||
route = [route_out] + r['route']
|
||||
setup_routing_fees(payload, route, amount, True)
|
||||
if payload['command'] == 'fill':
|
||||
r = plugin.rpc.getroute(peer_id, amount, fromid=my_id, exclude=excludes,
|
||||
maxhops=6, riskfactor=10, cltv=9, fuzzpercent=0)
|
||||
route_in = {'id': my_id, 'channel': payload['scid'], 'direction': int(peer_id >= my_id)}
|
||||
route = r['route'] + [route_in]
|
||||
setup_routing_fees(payload, route, amount, False)
|
||||
|
||||
# check fee and exclude worst channel the next time
|
||||
# NOTE: the int(msat) casts are just a workaround for outdated pylightning versions
|
||||
fees = route_get_msat(route[0]) - route_get_msat(route[-1])
|
||||
if fees > payload['exemptfee'] and int(fees) > int(amount) * payload['maxfeepercent'] / 100:
|
||||
worst_channel = find_worst_channel(route)
|
||||
if worst_channel is None:
|
||||
raise RpcError(payload['command'], payload, {'message': 'Insufficient fee'})
|
||||
excludes.append(f"{worst_channel['channel']}/{worst_channel['direction']}")
|
||||
continue
|
||||
|
||||
plugin.log(f"[{chunk + 1}/{payload['chunks']}] Sending over "
|
||||
f"{len(route)} hops to {payload['command']} {amount} using "
|
||||
f"{fees} fees", 'debug')
|
||||
for r in route:
|
||||
plugin.log(" - %s %14s %s" % (r['id'], r['channel'], route_get_msat(r)), 'debug')
|
||||
|
||||
try:
|
||||
ours = get_ours(plugin, payload['scid'])
|
||||
plugin.rpc.sendpay(route, payment_hash, label, payment_secret=payment_secret)
|
||||
running_for = int(time.time()) - start_ts
|
||||
result = plugin.rpc.waitsendpay(payment_hash, max(retry_for - running_for, 0))
|
||||
if not result.get('status') == 'complete':
|
||||
return False # should not happen, but maybe API changes
|
||||
payload['success_msg'].append(f"{amount + fees}msat sent over {len(route)} "
|
||||
f"hops to {payload['command']} {amount}msat "
|
||||
f"[{chunk + 1}/{payload['chunks']}]")
|
||||
# we need to wait for HTLC to resolve, so remaining amounts
|
||||
# can be calculated correctly for the next chunk
|
||||
wait_ours(plugin, payload['scid'], ours)
|
||||
return True
|
||||
|
||||
except RpcError as e:
|
||||
erring_message = e.error.get('message', '')
|
||||
erring_channel = e.error.get('data', {}).get('erring_channel')
|
||||
erring_index = e.error.get('data', {}).get('erring_index')
|
||||
erring_direction = e.error.get('data', {}).get('erring_direction')
|
||||
|
||||
# detect exceeding of HTLC commitment fee
|
||||
if 'Capacity exceeded' in erring_message and erring_index == 0:
|
||||
match = HTLC_FEE_PAT.search(erring_message)
|
||||
if match: # new servers tell htlc_fee via exception (#2691)
|
||||
raise ValueError("htlc_fee is %s" % match.group(1))
|
||||
raise ValueError("htlc_fee unknown")
|
||||
|
||||
if erring_channel == payload['scid']:
|
||||
raise RpcError(payload['command'], payload, {'message': 'Error with selected channel: %s' % erring_message})
|
||||
|
||||
plugin.log("RpcError: " + str(e))
|
||||
if erring_channel is not None and erring_direction is not None:
|
||||
excludes.append(f"{erring_channel}/{erring_direction}")
|
||||
|
||||
|
||||
def read_params(command: str, scid: str, percentage: float, chunks: int,
|
||||
retry_for: int, maxfeepercent: float, exemptfee: int):
|
||||
|
||||
# check parameters
|
||||
if command != 'drain' and command != 'fill' and command != 'setbalance':
|
||||
raise RpcError(command, {}, {'message': 'Invalid command. Must be "drain", "fill" or "setbalance"'})
|
||||
percentage = float(percentage)
|
||||
if percentage < 0 or percentage > 100 or command != 'setbalance' and percentage == 0.0:
|
||||
raise RpcError(command, {}, {'message': 'Percentage must be between 0 and 100'})
|
||||
if chunks < 0:
|
||||
raise RpcError(command, {}, {'message': 'Negative chunks do not make sense. Try a positive '
|
||||
'value or use 0 (default) for auto-detection.'})
|
||||
|
||||
# forge operation payload
|
||||
payload = {
|
||||
"command": command,
|
||||
"scid": scid,
|
||||
"percentage": percentage,
|
||||
"chunks": chunks,
|
||||
"retry_for": retry_for,
|
||||
"maxfeepercent": maxfeepercent,
|
||||
"exemptfee": exemptfee,
|
||||
"labels": [],
|
||||
"success_msg": [],
|
||||
}
|
||||
|
||||
# cache some often required data
|
||||
payload['my_id'] = plugin.getinfo.get('id')
|
||||
payload['start_ts'] = int(time.time())
|
||||
payload['mychannels'] = plugin.rpc.listchannels(source=payload['my_id']).get('channels')
|
||||
|
||||
# translate a 'setbalance' into respective drain or fill
|
||||
if command == 'setbalance':
|
||||
spendable, receivable = spendable_from_scid(payload)
|
||||
total = spendable + receivable
|
||||
target = Millisatoshi(int(int(total) * (0.01 * payload['percentage'])))
|
||||
if target == spendable:
|
||||
raise RpcError(payload['command'], payload, {'message': 'target already reached, nothing to do.'})
|
||||
if spendable > target:
|
||||
payload['command'] = 'drain'
|
||||
amount = spendable - target
|
||||
else:
|
||||
payload['command'] = 'fill'
|
||||
amount = target - spendable
|
||||
payload['percentage'] = 100.0 * int(amount) / int(total)
|
||||
if payload['percentage'] == 0.0:
|
||||
raise RpcError(command, payload, {'message': 'target already reached, nothing to do.'})
|
||||
|
||||
return payload
|
||||
|
||||
|
||||
def execute(payload: dict):
|
||||
peer_id = peer_from_scid(payload)
|
||||
get_channel(payload, peer_id) # ensures or raises error
|
||||
test_or_set_chunks(payload)
|
||||
plugin.log("%s %s %d%% %d chunks" % (payload['command'], payload['scid'], payload['percentage'], payload['chunks']))
|
||||
|
||||
# iterate of chunks, default just one
|
||||
for chunk in range(payload['chunks']):
|
||||
# we discover remaining capacities for each chunk,
|
||||
# as fees from previous chunks affect reserves
|
||||
spendable, receivable = spendable_from_scid(payload)
|
||||
total = spendable + receivable
|
||||
amount = Millisatoshi(int(int(total) * (0.01 * payload['percentage'] / payload['chunks'])))
|
||||
if amount == Millisatoshi(0):
|
||||
raise RpcError(payload['command'], payload, {'message': 'Cannot process chunk. Amount would be 0msat.'})
|
||||
|
||||
# if capacity exceeds, limit amount to full or empty channel
|
||||
if payload['command'] == "drain" and amount > spendable:
|
||||
amount = spendable
|
||||
if payload['command'] == "fill" and amount > receivable:
|
||||
amount = receivable
|
||||
|
||||
result = False
|
||||
try:
|
||||
# we need to try with different HTLC_FEE values
|
||||
# until we dont get capacity error on first hop
|
||||
htlc_fee = HTLC_FEE_NUL
|
||||
htlc_stp = HTLC_FEE_STP
|
||||
|
||||
while htlc_fee < HTLC_FEE_MAX and result is False:
|
||||
# When getting close to 100% we need to account for HTLC commitment fee
|
||||
if payload['command'] == 'drain' and spendable - amount <= htlc_fee:
|
||||
if amount < htlc_fee:
|
||||
raise RpcError(payload['command'], payload, {'message': 'channel too low to cover fees'})
|
||||
amount -= htlc_fee
|
||||
plugin.log("Trying... chunk:%s/%s spendable:%s receivable:%s htlc_fee:%s => amount:%s" % (chunk + 1, payload['chunks'], spendable, receivable, htlc_fee, amount))
|
||||
|
||||
try:
|
||||
result = try_for_htlc_fee(payload, peer_id, amount, chunk, spendable)
|
||||
except Exception as err:
|
||||
if "htlc_fee unknown" in str(err):
|
||||
if htlc_fee == HTLC_FEE_NUL:
|
||||
htlc_fee = HTLC_FEE_MIN - HTLC_FEE_STP
|
||||
htlc_fee += htlc_stp
|
||||
htlc_stp *= 1.1 # exponential increase steps
|
||||
plugin.log("Retrying with additional HTLC onchain fees: %s" % htlc_fee)
|
||||
continue
|
||||
if "htlc_fee is" in str(err):
|
||||
htlc_fee = Millisatoshi(str(err)[12:])
|
||||
plugin.log("Retrying with exact HTLC onchain fees: %s" % htlc_fee)
|
||||
continue
|
||||
raise err
|
||||
|
||||
# If result is still false, we tried allowed htlc_fee range unsuccessfully
|
||||
if result is False:
|
||||
raise RpcError(payload['command'], payload, {'message': 'Cannot determine required htlc commitment fees.'})
|
||||
|
||||
except Exception as e:
|
||||
return cleanup(payload, e)
|
||||
|
||||
return cleanup(payload)
|
||||
|
||||
|
||||
@plugin.method("drain")
|
||||
def drain(plugin, scid: str, percentage: float = 100, chunks: int = 0, retry_for: int = 60,
|
||||
maxfeepercent: float = 0.5, exemptfee: int = 5000):
|
||||
"""Draining channel liquidity with circular payments.
|
||||
|
||||
Percentage defaults to 100, resulting in an empty channel.
|
||||
Chunks defaults to 0 (auto-detect).
|
||||
Use 'drain 10' to decrease a channels total balance by 10%.
|
||||
"""
|
||||
payload = read_params('drain', scid, percentage, chunks, retry_for, maxfeepercent, exemptfee)
|
||||
return execute(payload)
|
||||
|
||||
|
||||
@plugin.method("fill")
|
||||
def fill(plugin, scid: str, percentage: float = 100, chunks: int = 0, retry_for: int = 60,
|
||||
maxfeepercent: float = 0.5, exemptfee: Millisatoshi = Millisatoshi(5000)):
|
||||
"""Filling channel liquidity with circular payments.
|
||||
|
||||
Percentage defaults to 100, resulting in a full channel.
|
||||
Chunks defaults to 0 (auto-detect).
|
||||
Use 'fill 10' to incease a channels total balance by 10%.
|
||||
"""
|
||||
payload = read_params('fill', scid, percentage, chunks, retry_for, maxfeepercent, exemptfee)
|
||||
return execute(payload)
|
||||
|
||||
|
||||
@plugin.method("setbalance")
|
||||
def setbalance(plugin, scid: str, percentage: float = 50, chunks: int = 0, retry_for: int = 60,
|
||||
maxfeepercent: float = 0.5, exemptfee: Millisatoshi = Millisatoshi(5000)):
|
||||
"""Brings a channels own liquidity to X percent using circular payments.
|
||||
|
||||
Percentage defaults to 50, resulting in a balanced channel.
|
||||
Chunks defaults to 0 (auto-detect).
|
||||
Use 'setbalance 100' to fill a channel. Use 'setbalance 0' to drain a channel.
|
||||
"""
|
||||
payload = read_params('setbalance', scid, percentage, chunks, retry_for, maxfeepercent, exemptfee)
|
||||
return execute(payload)
|
||||
|
||||
|
||||
@plugin.init()
|
||||
def init(options, configuration, plugin):
|
||||
rpchelp = plugin.rpc.help().get('help')
|
||||
# detect if server cli has moved `listpeers.channels[]` to `listpeerchannels`
|
||||
# See https://github.com/ElementsProject/lightning/pull/5825
|
||||
# TODO: replace by rpc version check once v23 is released
|
||||
plugin.listpeerchannels = False
|
||||
if len([c for c in rpchelp if c["command"].startswith("listpeerchannels ")]) != 0:
|
||||
plugin.listpeerchannels = True
|
||||
|
||||
# do all the stuff that needs to be done just once ...
|
||||
plugin.getinfo = plugin.rpc.getinfo()
|
||||
plugin.rpcversion = cln_parse_rpcversion(plugin.getinfo.get('version'))
|
||||
plugin.configs = plugin.rpc.listconfigs()
|
||||
plugin.cltv_final = plugin.configs.get('cltv-final')
|
||||
|
||||
# use getroute amount_msat/msatoshi field depending on version
|
||||
plugin.msatfield = 'amount_msat'
|
||||
if plugin.rpcversion[0] == 0 and plugin.rpcversion[1] < 12:
|
||||
plugin.msatfield = 'msatoshi'
|
||||
|
||||
plugin.log("Plugin drain.py initialized")
|
||||
|
||||
|
||||
plugin.run()
|
||||
1
Unmaintained/drain/requirements-dev.txt
Normal file
1
Unmaintained/drain/requirements-dev.txt
Normal file
@@ -0,0 +1 @@
|
||||
flaky==3.7.0
|
||||
1
Unmaintained/drain/requirements.txt
Normal file
1
Unmaintained/drain/requirements.txt
Normal file
@@ -0,0 +1 @@
|
||||
pyln-client>=0.12
|
||||
348
Unmaintained/drain/test_drain.py
Normal file
348
Unmaintained/drain/test_drain.py
Normal file
@@ -0,0 +1,348 @@
|
||||
from pyln.testing.fixtures import * # noqa: F401,F403
|
||||
from pyln.client import RpcError
|
||||
from .utils import get_ours, get_theirs, wait_ours, wait_for_all_htlcs
|
||||
import os
|
||||
import unittest
|
||||
import pytest
|
||||
|
||||
|
||||
plugin_path = os.path.join(os.path.dirname(__file__), "drain.py")
|
||||
pluginopt = {'plugin': plugin_path}
|
||||
EXPERIMENTAL_FEATURES = int(os.environ.get("EXPERIMENTAL_FEATURES", "0"))
|
||||
|
||||
|
||||
def test_plugin_starts(node_factory):
|
||||
l1 = node_factory.get_node()
|
||||
# Test dynamically
|
||||
l1.rpc.plugin_start(plugin_path)
|
||||
l1.rpc.plugin_stop(plugin_path)
|
||||
l1.rpc.plugin_start(plugin_path)
|
||||
l1.stop()
|
||||
# Then statically
|
||||
l1.daemon.opts["plugin"] = plugin_path
|
||||
l1.start()
|
||||
|
||||
|
||||
def test_drain_and_refill(node_factory, bitcoind):
|
||||
# Scenario: first drain then refill
|
||||
#
|
||||
# SETUP: A basic circular setup to run drain and fill tests
|
||||
#
|
||||
# l1---l2
|
||||
# | |
|
||||
# l4---l3
|
||||
#
|
||||
|
||||
l1, l2, l3, l4 = node_factory.line_graph(4, opts=pluginopt)
|
||||
l4.rpc.connect(l1.info['id'], 'localhost', l1.port)
|
||||
nodes = [l1, l2, l3, l4]
|
||||
|
||||
scid12 = l1.get_channel_scid(l2)
|
||||
scid23 = l2.get_channel_scid(l3)
|
||||
scid34 = l3.get_channel_scid(l4)
|
||||
l4.fundchannel(l1, 10**6)
|
||||
scid41 = l4.get_channel_scid(l1)
|
||||
|
||||
# disable fees to make circular line graph tests a lot easier
|
||||
for n in nodes:
|
||||
try:
|
||||
n.rpc.setchannel('all', 0, 0)
|
||||
except RpcError: # retry with deprecated command name
|
||||
n.rpc.setchannelfee('all', 0, 0)
|
||||
|
||||
# wait for each others gossip
|
||||
bitcoind.generate_block(6)
|
||||
for n in nodes:
|
||||
for scid in [scid12, scid23, scid34, scid41]:
|
||||
n.wait_channel_active(scid)
|
||||
|
||||
# do some draining and filling
|
||||
ours_before = get_ours(l1, scid12)
|
||||
assert(l1.rpc.drain(scid12))
|
||||
wait_for_all_htlcs(nodes)
|
||||
assert(get_ours(l1, scid12) < ours_before * 0.05) # account some reserves
|
||||
assert(get_theirs(l1, scid12) > ours_before * 0.95)
|
||||
|
||||
# Investiage: Older version of cln forbid to do a 100% circular fill
|
||||
# The testcase for this was as followed:
|
||||
#
|
||||
# refill again with 100% should not be possible in a line_graph circle,
|
||||
# this is not because of ln routing fees (turned off) but because of
|
||||
# HTLC commitment tx fee margin that applies for the funder.
|
||||
# with pytest.raises(RpcError, match=r"Outgoing capacity problem"):
|
||||
# l1.rpc.fill(scid12)
|
||||
# If we only go for 99.9% or exatctly 9741msat less, this must work.
|
||||
# assert(l1.rpc.fill(scid12, 99.9))
|
||||
|
||||
ours_before = get_ours(l1, scid12)
|
||||
theirs_before = get_theirs(l1, scid12)
|
||||
assert(l1.rpc.fill(scid12))
|
||||
wait_for_all_htlcs(nodes)
|
||||
assert(get_ours(l1, scid12) > theirs_before * 0.95) # account some reserves
|
||||
assert(get_theirs(l1, scid12) < theirs_before * 0.05)
|
||||
|
||||
|
||||
def test_fill_and_drain(node_factory, bitcoind):
|
||||
# Scenario: first fill of an empty channel and drain afterwards.
|
||||
#
|
||||
# SETUP: A basic circular setup to run drain and fill tests#
|
||||
#
|
||||
# l1---l2
|
||||
# | |
|
||||
# l4---l3
|
||||
#
|
||||
|
||||
l1, l2, l3, l4 = node_factory.line_graph(4, opts=pluginopt)
|
||||
l4.rpc.connect(l1.info['id'], 'localhost', l1.port)
|
||||
nodes = [l1, l2, l3, l4]
|
||||
|
||||
scid12 = l1.get_channel_scid(l2)
|
||||
scid23 = l2.get_channel_scid(l3)
|
||||
scid34 = l3.get_channel_scid(l4)
|
||||
l4.fundchannel(l1, 10**6)
|
||||
scid41 = l4.get_channel_scid(l1)
|
||||
|
||||
# disable fees to make circular line graph tests a lot easier
|
||||
for n in nodes:
|
||||
try:
|
||||
n.rpc.setchannel('all', 0, 0)
|
||||
except RpcError: # retry with deprecated command name
|
||||
n.rpc.setchannelfee('all', 0, 0)
|
||||
|
||||
# wait for each others gossip
|
||||
bitcoind.generate_block(6)
|
||||
for n in nodes:
|
||||
for scid in [scid12, scid23, scid34, scid41]:
|
||||
n.wait_channel_active(scid)
|
||||
|
||||
# for l2 to fill scid12, it needs to send on scid23, where its funder
|
||||
# commit tx fee applies, so doing 99.9% or exactly 9741msat less must work.
|
||||
ours_before = get_ours(l1, scid12)
|
||||
assert(l2.rpc.fill(scid12, 99.9))
|
||||
wait_for_all_htlcs(nodes)
|
||||
assert(get_ours(l1, scid12) < ours_before * 0.05) # account some reserves
|
||||
|
||||
# note: fees are disabled, drain 100% must work,
|
||||
# as fundee doesnt pay commit tx fee
|
||||
theirs_before = get_theirs(l1, scid12)
|
||||
l2.rpc.drain(scid12)
|
||||
wait_for_all_htlcs(nodes)
|
||||
assert(get_theirs(l1, scid12) < theirs_before * 0.05) # account some reserves
|
||||
|
||||
|
||||
def test_setbalance(node_factory, bitcoind):
|
||||
# SETUP: a basic circular setup to run setbalance tests
|
||||
#
|
||||
# l1---l2
|
||||
# | |
|
||||
# l4---l3
|
||||
#
|
||||
|
||||
l1, l2, l3, l4 = node_factory.line_graph(4, opts=pluginopt)
|
||||
l4.rpc.connect(l1.info['id'], 'localhost', l1.port)
|
||||
nodes = [l1, l2, l3, l4]
|
||||
|
||||
scid12 = l1.get_channel_scid(l2)
|
||||
scid23 = l2.get_channel_scid(l3)
|
||||
scid34 = l3.get_channel_scid(l4)
|
||||
l4.fundchannel(l1, 10**6)
|
||||
scid41 = l4.get_channel_scid(l1)
|
||||
|
||||
# wait for each others gossip
|
||||
bitcoind.generate_block(6)
|
||||
for n in nodes:
|
||||
for scid in [scid12, scid23, scid34, scid41]:
|
||||
n.wait_channel_active(scid)
|
||||
|
||||
# test auto 50/50 balancing
|
||||
ours_before = get_ours(l1, scid12)
|
||||
assert(l1.rpc.setbalance(scid12))
|
||||
ours_after = wait_ours(l1, scid12, ours_before)
|
||||
# TODO: can we fix/change/improve this to be more precise?
|
||||
assert(ours_after < ours_before * 0.52)
|
||||
assert(ours_after > ours_before * 0.48)
|
||||
|
||||
# set and test some 70/30 specific balancing
|
||||
assert(l1.rpc.setbalance(scid12, 30))
|
||||
wait_for_all_htlcs(nodes)
|
||||
ours_after = get_ours(l1, scid12)
|
||||
assert(ours_after < ours_before * 0.34)
|
||||
assert(ours_after > ours_before * 0.27)
|
||||
|
||||
assert(l1.rpc.setbalance(scid12, 70))
|
||||
wait_for_all_htlcs(nodes)
|
||||
ours_after = get_ours(l1, scid12)
|
||||
assert(ours_after < ours_before * 0.73)
|
||||
assert(ours_after > ours_before * 0.67)
|
||||
|
||||
|
||||
# helper function that balances incoming capacity, so autodetection edge case
|
||||
# testing gets a lot simpler.
|
||||
def balance(node, node_a, scid_a, node_b, scid_b, node_c):
|
||||
msat_a = get_ours(node_a, scid_a)
|
||||
msat_b = get_ours(node_b, scid_b)
|
||||
if (msat_a > msat_b):
|
||||
node.pay(node_b, msat_a - msat_b)
|
||||
node_b.pay(node_c, msat_a - msat_b)
|
||||
if (msat_b > msat_a):
|
||||
node.pay(node_a, msat_b - msat_a)
|
||||
node_a.pay(node_c, msat_b - msat_a)
|
||||
wait_for_all_htlcs([node, node_a, node_b])
|
||||
|
||||
|
||||
def test_drain_chunks(node_factory, bitcoind):
|
||||
# SETUP: a small mesh that enables testing chunks
|
||||
#
|
||||
# l2-- --l3
|
||||
# | \ / |
|
||||
# | l1 |
|
||||
# | || |
|
||||
# | || |
|
||||
# o----l4----o
|
||||
#
|
||||
# In such a scenario we can disstribute the funds in such a way
|
||||
# that only correct chunking allows rebalancing for l1
|
||||
#
|
||||
# FUNDING:
|
||||
# scid12: l1 -> l2 10**6
|
||||
# scid13: l1 -> l3 10**6
|
||||
# scid24: l2 -> l4 10**6
|
||||
# scid34: l4 -> l4 10**6
|
||||
# scid41: l4 -> l1 11**6 (~1.750.000 sat)
|
||||
|
||||
l1, l2, l3, l4 = node_factory.get_nodes(4, opts=pluginopt)
|
||||
l1.connect(l2)
|
||||
l1.connect(l3)
|
||||
l2.connect(l4)
|
||||
l3.connect(l4)
|
||||
l4.connect(l1)
|
||||
l1.fundchannel(l2, 10**6)
|
||||
l1.fundchannel(l3, 10**6)
|
||||
l2.fundchannel(l4, 10**6)
|
||||
l3.fundchannel(l4, 10**6)
|
||||
l4.fundchannel(l1, 11**6)
|
||||
scid12 = l1.get_channel_scid(l2)
|
||||
scid13 = l1.get_channel_scid(l3)
|
||||
scid24 = l2.get_channel_scid(l4)
|
||||
scid34 = l3.get_channel_scid(l4)
|
||||
scid41 = l4.get_channel_scid(l1)
|
||||
nodes = [l1, l2, l3, l4]
|
||||
scids = [scid12, scid13, scid24, scid34, scid41]
|
||||
|
||||
# wait for each others gossip
|
||||
bitcoind.generate_block(6)
|
||||
for n in nodes:
|
||||
for scid in scids:
|
||||
n.wait_channel_active(scid)
|
||||
amount = get_ours(l4, scid41)
|
||||
|
||||
# drain in one chunk should be impossible and detected before doing anything
|
||||
with pytest.raises(RpcError, match=r"Selected chunks \(1\) will not fit incoming channel capacities."):
|
||||
l4.rpc.drain(scid41, 100, 1)
|
||||
|
||||
# using 3 chunks should also not be possible, as it would overfill one of the incoming channels
|
||||
with pytest.raises(RpcError, match=r"Selected chunks \(3\) will not fit incoming channel capacities."):
|
||||
l4.rpc.drain(scid41, 100, 3)
|
||||
|
||||
# test chunk autodetection and even chunks 2,4,6
|
||||
assert(l4.rpc.drain(scid41))
|
||||
wait_for_all_htlcs(nodes)
|
||||
assert(get_ours(l1, scid41) > amount * 0.9)
|
||||
balance(l1, l2, scid12, l3, scid13, l4)
|
||||
assert(l1.rpc.drain(scid41))
|
||||
wait_for_all_htlcs(nodes)
|
||||
assert(get_theirs(l1, scid41) > amount * 0.9)
|
||||
assert(l4.rpc.drain(scid41, 100, 2))
|
||||
wait_for_all_htlcs(nodes)
|
||||
assert(get_ours(l1, scid41) > amount * 0.9)
|
||||
assert(l1.rpc.drain(scid41, 100, 2))
|
||||
wait_for_all_htlcs(nodes)
|
||||
assert(get_theirs(l1, scid41) > amount * 0.9)
|
||||
assert(l4.rpc.drain(scid41, 100, 4))
|
||||
wait_for_all_htlcs(nodes)
|
||||
assert(get_ours(l1, scid41) > amount * 0.9)
|
||||
assert(l1.rpc.drain(scid41, 100, 4))
|
||||
wait_for_all_htlcs(nodes)
|
||||
assert(get_theirs(l1, scid41) > amount * 0.9)
|
||||
assert(l4.rpc.drain(scid41, 100, 6))
|
||||
wait_for_all_htlcs(nodes)
|
||||
assert(get_ours(l1, scid41) > amount * 0.9)
|
||||
assert(l1.rpc.drain(scid41, 100, 6))
|
||||
wait_for_all_htlcs(nodes)
|
||||
assert(get_theirs(l1, scid41) > amount * 0.9)
|
||||
|
||||
|
||||
def test_fill_chunks(node_factory, bitcoind):
|
||||
# SETUP: a small mesh that enables testing chunks
|
||||
#
|
||||
# l2-- --l3
|
||||
# | \ / |
|
||||
# | l1 |
|
||||
# | || |
|
||||
# | || |
|
||||
# o----l4----o
|
||||
#
|
||||
# In such a scenario we can disstribute the funds in such a way
|
||||
# that only correct chunking allows rebalancing for l1
|
||||
#
|
||||
# FUNDING:
|
||||
# scid12: l1 -> l2 10**6
|
||||
# scid13: l1 -> l3 10**6
|
||||
# scid24: l2 -> l4 10**6
|
||||
# scid34: l4 -> l4 10**6
|
||||
# scid41: l4 -> l1 11**6 (~1.750.000 sat)
|
||||
|
||||
l1, l2, l3, l4 = node_factory.get_nodes(4, opts=pluginopt)
|
||||
l1.connect(l2)
|
||||
l1.connect(l3)
|
||||
l2.connect(l4)
|
||||
l3.connect(l4)
|
||||
l4.connect(l1)
|
||||
l1.fundchannel(l2, 10**6)
|
||||
l1.fundchannel(l3, 10**6)
|
||||
l2.fundchannel(l4, 10**6)
|
||||
l3.fundchannel(l4, 10**6)
|
||||
l4.fundchannel(l1, 11**6)
|
||||
scid12 = l1.get_channel_scid(l2)
|
||||
scid13 = l1.get_channel_scid(l3)
|
||||
scid24 = l2.get_channel_scid(l4)
|
||||
scid34 = l3.get_channel_scid(l4)
|
||||
scid41 = l4.get_channel_scid(l1)
|
||||
nodes = [l1, l2, l3, l4]
|
||||
scids = [scid12, scid13, scid24, scid34, scid41]
|
||||
|
||||
# wait for each others gossip
|
||||
bitcoind.generate_block(6)
|
||||
for n in nodes:
|
||||
for scid in scids:
|
||||
n.wait_channel_active(scid)
|
||||
amount = get_ours(l4, scid41)
|
||||
|
||||
# fill in one chunk should be impossible and detected before doing anything
|
||||
with pytest.raises(RpcError, match=r"Selected chunks \(1\) will not fit outgoing channel capacities."):
|
||||
l1.rpc.fill(scid41, 100, 1)
|
||||
|
||||
# using 3 chunks should also not be possible, as it would overdrain one of the outgoing channels
|
||||
with pytest.raises(RpcError, match=r"Selected chunks \(3\) will not fit outgoing channel capacities."):
|
||||
print(l1.rpc.fill(scid41, 100, 3))
|
||||
|
||||
# test chunk autodetection and even chunks 2,4,6
|
||||
assert(l1.rpc.fill(scid41))
|
||||
wait_for_all_htlcs(nodes)
|
||||
assert(get_ours(l1, scid41) > amount * 0.9)
|
||||
balance(l1, l2, scid12, l3, scid13, l4)
|
||||
assert(l4.rpc.fill(scid41))
|
||||
wait_for_all_htlcs(nodes)
|
||||
assert(get_theirs(l1, scid41) > amount * 0.9)
|
||||
assert(l1.rpc.fill(scid41, 100, 2))
|
||||
wait_for_all_htlcs(nodes)
|
||||
assert(get_ours(l1, scid41) > amount * 0.9)
|
||||
assert(l4.rpc.fill(scid41, 100, 2))
|
||||
wait_for_all_htlcs(nodes)
|
||||
assert(get_theirs(l1, scid41) > amount * 0.9)
|
||||
assert(l1.rpc.fill(scid41, 100, 4))
|
||||
wait_for_all_htlcs(nodes)
|
||||
assert(get_ours(l1, scid41) > amount * 0.9)
|
||||
assert(l4.rpc.fill(scid41, 100, 4))
|
||||
wait_for_all_htlcs(nodes)
|
||||
assert(get_theirs(l1, scid41) > amount * 0.9)
|
||||
41
Unmaintained/drain/utils.py
Normal file
41
Unmaintained/drain/utils.py
Normal file
@@ -0,0 +1,41 @@
|
||||
import time
|
||||
|
||||
TIMEOUT = 60
|
||||
|
||||
|
||||
# we need to have this pyln.testing.utils code duplication
|
||||
# as this also needs to be run without testing libs
|
||||
def wait_for(success, timeout=TIMEOUT):
|
||||
start_time = time.time()
|
||||
interval = 0.25
|
||||
while not success() and time.time() < start_time + timeout:
|
||||
time.sleep(interval)
|
||||
interval *= 2
|
||||
if interval > 5:
|
||||
interval = 5
|
||||
if time.time() > start_time + timeout:
|
||||
raise ValueError("Timeout waiting for {}", success)
|
||||
|
||||
|
||||
# waits for a bunch of nodes HTLCs to settle
|
||||
def wait_for_all_htlcs(nodes):
|
||||
for n in nodes:
|
||||
n.wait_for_htlcs()
|
||||
|
||||
|
||||
# returns our_amount_msat for a given node and scid
|
||||
def get_ours(node, scid):
|
||||
return [c for c in node.rpc.listfunds()['channels'] if c.get('short_channel_id') == scid][0]['our_amount_msat']
|
||||
|
||||
|
||||
# returns their_amount_msat for a given node and scid
|
||||
def get_theirs(node, scid):
|
||||
ours = get_ours(node, scid)
|
||||
total = [c for c in node.rpc.listfunds()['channels'] if c.get('short_channel_id') == scid][0]['amount_msat']
|
||||
return total - ours
|
||||
|
||||
|
||||
# these wait for the HTLC commit settlement to change our/their amounts
|
||||
def wait_ours(node, scid, ours_before):
|
||||
wait_for(lambda: ours_before != get_ours(node, scid))
|
||||
return get_ours(node, scid)
|
||||
9
Unmaintained/helpme/Makefile
Normal file
9
Unmaintained/helpme/Makefile
Normal file
@@ -0,0 +1,9 @@
|
||||
#! /usr/bin/make
|
||||
|
||||
check: flake8 pytest
|
||||
|
||||
flake8:
|
||||
flake8 --ignore=E501,W503 helpme.py
|
||||
|
||||
pytest:
|
||||
pytest helpme.py
|
||||
8
Unmaintained/helpme/README.md
Normal file
8
Unmaintained/helpme/README.md
Normal file
@@ -0,0 +1,8 @@
|
||||
# Helpme plugin for Core-Lightning
|
||||
|
||||
This plugin is designed to walk you through setting up a fresh
|
||||
Core-Lightning node, offering advice for common problems.
|
||||
|
||||
## Example Usage
|
||||
|
||||
`lightning-cli helpme` will describe various topics.
|
||||
1084
Unmaintained/helpme/helpme.py
Executable file
1084
Unmaintained/helpme/helpme.py
Executable file
File diff suppressed because it is too large
Load Diff
1
Unmaintained/helpme/requirements-dev.txt
Normal file
1
Unmaintained/helpme/requirements-dev.txt
Normal file
@@ -0,0 +1 @@
|
||||
pyln-testing ~= 0.12
|
||||
1
Unmaintained/helpme/requirements.txt
Normal file
1
Unmaintained/helpme/requirements.txt
Normal file
@@ -0,0 +1 @@
|
||||
pyln-client>=0.7.3
|
||||
22
Unmaintained/helpme/test_helpme.py
Normal file
22
Unmaintained/helpme/test_helpme.py
Normal file
@@ -0,0 +1,22 @@
|
||||
import os
|
||||
from pyln.testing.fixtures import * # noqa: F401,F403
|
||||
|
||||
plugin_path = os.path.join(os.path.dirname(__file__), "helpme.py")
|
||||
|
||||
|
||||
def test_helpme_starts(node_factory):
|
||||
l1 = node_factory.get_node()
|
||||
# Test dynamically
|
||||
l1.rpc.plugin_start(plugin_path)
|
||||
l1.rpc.plugin_stop(plugin_path)
|
||||
l1.rpc.plugin_start(plugin_path)
|
||||
l1.stop()
|
||||
# Then statically
|
||||
l1.daemon.opts["plugin"] = plugin_path
|
||||
l1.start()
|
||||
|
||||
|
||||
def test_main(node_factory):
|
||||
pluginopt = {'plugin': plugin_path}
|
||||
l1, l2 = node_factory.line_graph(2, opts=pluginopt)
|
||||
assert l1.rpc.helpme()
|
||||
93
Unmaintained/historian/README.org
Normal file
93
Unmaintained/historian/README.org
Normal file
@@ -0,0 +1,93 @@
|
||||
#+TITLE: historian: Archiving the Lightning Network
|
||||
|
||||
* About
|
||||
The historian plugin aims to provide tools to archive the Lightning
|
||||
Network gossip messages and to work with the archived messages.
|
||||
|
||||
The plugin tails the ~gossip_store~ file used by ~lightningd~ to
|
||||
persist gossip messages across restarts. The plugin monitors the file
|
||||
for changes and resumes reading messages whenever there is an
|
||||
actionable event (append, move, etc). When a new message is detected
|
||||
it is parsed, extracting the fields necessary to deduplicate them and
|
||||
then stored in the database. The messages themselves are inserted
|
||||
verbatim in order to maintain their integrity and ensure signatures
|
||||
remain valid.
|
||||
|
||||
** Install the plugin
|
||||
There are two ways to install the plugin:
|
||||
|
||||
- Specify the path to ~historian.py~ with the ~--plugin~ or
|
||||
~--important-plugin~ options.
|
||||
- Add a symbolic link to ~historian.py~ in one of the directories
|
||||
specified as ~--plugin-dir~ or in
|
||||
~$HOME/.lightning/bitcoin/plugins~.
|
||||
|
||||
Notice that copying the entire directory into the plugin-dir will
|
||||
cause errors at startup. This is caused by ~lightningd~ attempting to
|
||||
start all executables in the plugin-dir, even the ~historian-cli~
|
||||
which is not a plugin. The errors are not dangerous, just annoying and
|
||||
may delay startup slightly.
|
||||
|
||||
If the plugin starts correctly you should be able to call
|
||||
~lightning-cli historian-stats~ and see that it is starting to store
|
||||
messages in the database.
|
||||
|
||||
** Command line
|
||||
The command line tool ~historian-cli~ can be used to manage the
|
||||
databases, manage backups and manage snapshots:
|
||||
|
||||
- A ~backup~ is a full dump of a range of messages from a database,
|
||||
it contains all node announcements and channel updates. Backups can
|
||||
be used to reconstruct the network at any time in the past.
|
||||
- A ~snapshot~ is a set of messages representing the latest known
|
||||
state of a node or a channel, i.e., it omits node announcements and
|
||||
channel updates that were later overwritten. Snapshots can be used
|
||||
to quickly and efficiently sync a node from a database.
|
||||
|
||||
The following commands are available:
|
||||
|
||||
- ~historian-cli db merge [source] [destination]~ iterates through
|
||||
messages in ~source~ and adds them to ~destination~ if they are not
|
||||
present yet.
|
||||
|
||||
- ~historian-cli backup create [destination]~ dump all messages in
|
||||
the database into ~destination~
|
||||
|
||||
- ~historian-cli backup read [source]~ hex-encode each message and
|
||||
print one per line.
|
||||
|
||||
- ~historian-cli snapshot full [destination]~ create a new snapshot
|
||||
that includes all non-pruned channels, i.e., start from 2 weeks ago
|
||||
and collect channels, updates, and nodes.
|
||||
|
||||
- ~historian-cli snapshot incremental [destination] [since]~ create a
|
||||
new snapshot that only includes changes since the ~since~ date.
|
||||
|
||||
- ~historian-cli snapshot read [source]~ hex-encode each message and
|
||||
print one per line.
|
||||
|
||||
- ~historian-cli snapshot load [source]~ connect to a lightning node
|
||||
over the P2P and inject the messages in the snapshot. Useful to
|
||||
catch up a node with changes since the last sync.
|
||||
|
||||
** File format
|
||||
The plugin writes all messages into a sqlite3 database in the same
|
||||
directory as the ~gossip_store~ file. There are three tables, one for
|
||||
each message type, with the ~raw~ column as the raw message, and a
|
||||
couple of fields to enable message deduplication.
|
||||
|
||||
All files generated and read by the ~historian-cli~ tool have four
|
||||
bytes of prefix ~GSP\x01~, indicating GSP file version 1, followed by
|
||||
the messages. Each message is prefix by its size as a ~CompactSize~
|
||||
integer.
|
||||
|
||||
If you just want to iterate through messages in a file, there are the
|
||||
~historian-cli backup read~ and the ~historian-cli snapshot read~
|
||||
commands that will print each hex-encoded message on a line to
|
||||
~stdout~.
|
||||
|
||||
* Projects
|
||||
These projects make use of the historian plugin:
|
||||
|
||||
- [[https://github.com/lnresearch/topology][lnresearch/topology]] uses the backups as dataset to enable research
|
||||
on the topological evolution of the network.
|
||||
56
Unmaintained/historian/cli/backup.py
Normal file
56
Unmaintained/historian/cli/backup.py
Normal file
@@ -0,0 +1,56 @@
|
||||
import click
|
||||
from .common import db_session, split_gossip
|
||||
import os
|
||||
from pyln.proto.primitives import varint_decode, varint_encode
|
||||
|
||||
@click.group()
|
||||
def backup():
|
||||
pass
|
||||
|
||||
|
||||
@backup.command()
|
||||
@click.argument('destination', type=click.File('wb'))
|
||||
@click.option('--db', type=str, default=None)
|
||||
def create(destination, db):
|
||||
with db_session(db) as session:
|
||||
rows = session.execute("SELECT raw FROM channel_announcements")
|
||||
|
||||
# Write the header now that we know we'll be writing something.
|
||||
destination.write(b"GSP\x01")
|
||||
|
||||
for r in rows:
|
||||
varint_encode(len(r[0]), destination)
|
||||
destination.write(r[0])
|
||||
|
||||
rows = session.execute("SELECT raw FROM channel_updates ORDER BY timestamp ASC")
|
||||
for r in rows:
|
||||
varint_encode(len(r[0]), destination)
|
||||
destination.write(r[0])
|
||||
|
||||
rows = session.execute("SELECT raw FROM node_announcements ORDER BY timestamp ASC")
|
||||
for r in rows:
|
||||
varint_encode(len(r[0]), destination)
|
||||
destination.write(r[0])
|
||||
|
||||
destination.close()
|
||||
|
||||
@backup.command()
|
||||
@click.argument("source", type=click.File('rb'))
|
||||
def read(source):
|
||||
"""Load gossip messages from the specified source and print it to stdout
|
||||
|
||||
Prints the hex-encoded raw gossip message to stdout.
|
||||
"""
|
||||
header = source.read(4)
|
||||
if len(header) < 4:
|
||||
raise ValueError("Could not read header")
|
||||
|
||||
tag, version = header[0:3], header[3]
|
||||
if tag != b'GSP':
|
||||
raise ValueError(f"Header mismatch, expected GSP, got {repr(tag)}")
|
||||
|
||||
if version != 1:
|
||||
raise ValueError(f"Unsupported version {version}, only support up to version 1")
|
||||
|
||||
for m in split_gossip(source):
|
||||
print(m.hex())
|
||||
105
Unmaintained/historian/cli/common.py
Normal file
105
Unmaintained/historian/cli/common.py
Normal file
@@ -0,0 +1,105 @@
|
||||
from sqlalchemy.orm import sessionmaker
|
||||
from sqlalchemy import create_engine
|
||||
from contextlib import contextmanager
|
||||
import os
|
||||
from common import Base
|
||||
import io
|
||||
from pyln.proto.primitives import varint_decode
|
||||
from gossipd import parse
|
||||
import click
|
||||
import bz2
|
||||
|
||||
default_db = "sqlite:///$HOME/.lightning/bitcoin/historian.sqlite3"
|
||||
|
||||
|
||||
@contextmanager
|
||||
def db_session(dsn):
|
||||
"""Tiny contextmanager to facilitate sqlalchemy session management"""
|
||||
if dsn is None:
|
||||
dsn = default_db
|
||||
dsn = os.path.expandvars(dsn)
|
||||
engine = create_engine(dsn, echo=False)
|
||||
Base.metadata.create_all(engine)
|
||||
session_maker = sessionmaker(bind=engine)
|
||||
session = session_maker()
|
||||
try:
|
||||
yield session
|
||||
session.commit()
|
||||
except:
|
||||
session.rollback()
|
||||
raise
|
||||
finally:
|
||||
session.close()
|
||||
|
||||
|
||||
def split_gossip(reader: io.BytesIO):
|
||||
while True:
|
||||
length = varint_decode(reader)
|
||||
if length is None:
|
||||
break
|
||||
|
||||
msg = reader.read(length)
|
||||
if len(msg) != length:
|
||||
raise ValueError("Incomplete read at end of file")
|
||||
|
||||
yield msg
|
||||
|
||||
|
||||
class GossipStream:
|
||||
def __init__(self, file_stream, filename, decode=True):
|
||||
self.stream = file_stream
|
||||
self.decode = decode
|
||||
self.filename = filename
|
||||
|
||||
# Read header
|
||||
header = self.stream.read(4)
|
||||
assert len(header) == 4
|
||||
assert header[:3] == b"GSP"
|
||||
assert header[3] == 1
|
||||
|
||||
def seek(self, offset):
|
||||
"""Allow skipping to a specific point in the stream.
|
||||
|
||||
The offset is denoted in bytes from the start, including the
|
||||
header, and matches the value of f.tell()
|
||||
"""
|
||||
self.stream.seek(offset, io.SEEK_SET)
|
||||
|
||||
def tell(self):
|
||||
"""Returns the absolute position in the stream.
|
||||
|
||||
Includes the header, and matches f.seek()
|
||||
"""
|
||||
return self.stream.tell()
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def __next__(self):
|
||||
pos = self.stream.tell()
|
||||
length = varint_decode(self.stream)
|
||||
|
||||
if length is None:
|
||||
raise StopIteration
|
||||
|
||||
msg = self.stream.read(length)
|
||||
if len(msg) != length:
|
||||
raise ValueError(
|
||||
"Error reading snapshot at {pos}: incomplete read of {length} bytes, only got {lmsg} bytes".format(
|
||||
pos=pos, length=length, lmsg=len(msg)
|
||||
)
|
||||
)
|
||||
if not self.decode:
|
||||
return msg
|
||||
|
||||
return parse(msg)
|
||||
|
||||
|
||||
class GossipFile(click.File):
|
||||
def __init__(self, decode=True):
|
||||
click.File.__init__(self)
|
||||
self.decode = decode
|
||||
|
||||
def convert(self, value, param, ctx):
|
||||
f = bz2.open(value, "rb") if value.endswith(".bz2") else open(value, "rb")
|
||||
return GossipStream(f, value, self.decode)
|
||||
60
Unmaintained/historian/cli/db.py
Normal file
60
Unmaintained/historian/cli/db.py
Normal file
@@ -0,0 +1,60 @@
|
||||
import click
|
||||
from common import NodeAnnouncement, ChannelAnnouncement, ChannelUpdate
|
||||
from tqdm import tqdm
|
||||
from gossipd import parse
|
||||
from cli.common import db_session, default_db
|
||||
|
||||
|
||||
@click.group()
|
||||
def db():
|
||||
pass
|
||||
|
||||
|
||||
@db.command()
|
||||
@click.argument('source', type=str)
|
||||
@click.argument('destination', type=str, default=default_db)
|
||||
def merge(source, destination):
|
||||
"""Merge two historian databases by copying from source to destination.
|
||||
"""
|
||||
|
||||
meta = {
|
||||
'channel_announcements': None,
|
||||
'channel_updates': None,
|
||||
'node_announcements': None,
|
||||
}
|
||||
|
||||
with db_session(source) as source, db_session(destination) as target:
|
||||
# Not strictly necessary, but I like progress indicators and ETAs.
|
||||
for table in meta.keys():
|
||||
rows = source.execute(f"SELECT count(*) FROM {table}")
|
||||
count, = rows.fetchone()
|
||||
meta[table] = count
|
||||
|
||||
for r, in tqdm(
|
||||
source.execute("SELECT raw FROM channel_announcements"),
|
||||
total=meta['channel_announcements'],
|
||||
):
|
||||
msg = parse(r)
|
||||
if isinstance(r, memoryview):
|
||||
r = bytes(r)
|
||||
target.merge(ChannelAnnouncement.from_gossip(msg, r))
|
||||
|
||||
for r, in tqdm(
|
||||
source.execute("SELECT raw FROM channel_updates ORDER BY timestamp ASC"),
|
||||
total=meta['channel_updates'],
|
||||
):
|
||||
msg = parse(r)
|
||||
if isinstance(r, memoryview):
|
||||
r = bytes(r)
|
||||
target.merge(ChannelUpdate.from_gossip(msg, r))
|
||||
|
||||
for r, in tqdm(
|
||||
source.execute("SELECT raw FROM node_announcements ORDER BY timestamp ASC"),
|
||||
total=meta['node_announcements'],
|
||||
):
|
||||
msg = parse(r)
|
||||
if isinstance(r, memoryview):
|
||||
r = bytes(r)
|
||||
target.merge(NodeAnnouncement.from_gossip(msg, r))
|
||||
|
||||
target.commit()
|
||||
210
Unmaintained/historian/common.py
Normal file
210
Unmaintained/historian/common.py
Normal file
@@ -0,0 +1,210 @@
|
||||
from binascii import hexlify
|
||||
from datetime import datetime
|
||||
from sqlalchemy.ext.declarative import declarative_base
|
||||
from sqlalchemy import Column, BigInteger, SmallInteger, DateTime, LargeBinary
|
||||
import gossipd
|
||||
from contextlib import contextmanager
|
||||
from sqlalchemy import create_engine
|
||||
from sqlalchemy.orm import sessionmaker
|
||||
import os
|
||||
from dotenv import load_dotenv
|
||||
|
||||
load_dotenv()
|
||||
|
||||
|
||||
Base = declarative_base()
|
||||
default_db = os.environ.get(
|
||||
"HIST_DEFAULT_DSN",
|
||||
"sqlite:///$HOME/.lightning/bitcoin/historian.sqlite3"
|
||||
)
|
||||
|
||||
|
||||
class ChannelUpdate(Base):
|
||||
__tablename__ = 'channel_updates'
|
||||
scid = Column(BigInteger, primary_key=True)
|
||||
direction = Column(SmallInteger, primary_key=True)
|
||||
timestamp = Column(DateTime, primary_key=True)
|
||||
raw = Column(LargeBinary)
|
||||
|
||||
@classmethod
|
||||
def from_gossip(cls, gcu: gossipd.ChannelUpdate,
|
||||
raw: bytes) -> 'ChannelUpdate':
|
||||
assert(raw[:2] == b'\x01\x02')
|
||||
self = ChannelUpdate()
|
||||
self.scid = gcu.num_short_channel_id
|
||||
self.timestamp = datetime.fromtimestamp(gcu.timestamp)
|
||||
self.direction = gcu.direction
|
||||
self.raw = raw
|
||||
return self
|
||||
|
||||
def to_json(self):
|
||||
return {
|
||||
'scid': "{}x{}x{}".format(self.scid >> 40, self.scid >> 16 & 0xFFFFFF, self.scid & 0xFFFF),
|
||||
'nscid': self.scid,
|
||||
'direction': self.direction,
|
||||
'timestamp': self.timestamp.strftime("%Y/%m/%d, %H:%M:%S"),
|
||||
'raw': hexlify(self.raw).decode('ASCII'),
|
||||
}
|
||||
|
||||
|
||||
class ChannelAnnouncement(Base):
|
||||
__tablename__ = "channel_announcements"
|
||||
scid = Column(BigInteger, primary_key=True)
|
||||
raw = Column(LargeBinary)
|
||||
|
||||
@classmethod
|
||||
def from_gossip(cls, gca: gossipd.ChannelAnnouncement,
|
||||
raw: bytes) -> 'ChannelAnnouncement':
|
||||
assert(raw[:2] == b'\x01\x00')
|
||||
self = ChannelAnnouncement()
|
||||
self.scid = gca.num_short_channel_id
|
||||
self.raw = raw
|
||||
return self
|
||||
|
||||
def to_json(self):
|
||||
return {
|
||||
'scid': "{}x{}x{}".format(self.scid >> 40, self.scid >> 16 & 0xFFFFFF, self.scid & 0xFFFF),
|
||||
'nscid': self.scid,
|
||||
'raw': hexlify(self.raw).decode('ASCII'),
|
||||
}
|
||||
|
||||
|
||||
class NodeAnnouncement(Base):
|
||||
__tablename__ = "node_announcements"
|
||||
node_id = Column(LargeBinary, primary_key=True)
|
||||
timestamp = Column(DateTime, primary_key=True)
|
||||
raw = Column(LargeBinary)
|
||||
|
||||
@classmethod
|
||||
def from_gossip(cls, gna: gossipd.NodeAnnouncement,
|
||||
raw: bytes) -> 'NodeAnnouncement':
|
||||
assert(raw[:2] == b'\x01\x01')
|
||||
self = NodeAnnouncement()
|
||||
self.node_id = gna.node_id
|
||||
self.timestamp = datetime.fromtimestamp(gna.timestamp)
|
||||
self.raw = raw
|
||||
return self
|
||||
|
||||
def to_json(self):
|
||||
return {
|
||||
'node_id': hexlify(self.node_id).decode('ASCII'),
|
||||
'timestamp': self.timestamp.strftime("%Y/%m/%d, %H:%M:%S"),
|
||||
'raw': hexlify(self.raw).decode('ASCII'),
|
||||
}
|
||||
|
||||
|
||||
@contextmanager
|
||||
def db_session(dsn):
|
||||
"""Tiny contextmanager to facilitate sqlalchemy session management"""
|
||||
if dsn is None:
|
||||
dsn = default_db
|
||||
dsn = os.path.expandvars(dsn)
|
||||
engine = create_engine(dsn, echo=False)
|
||||
Base.metadata.create_all(engine)
|
||||
session_maker = sessionmaker(bind=engine)
|
||||
session = session_maker()
|
||||
try:
|
||||
yield session
|
||||
session.commit()
|
||||
except:
|
||||
session.rollback()
|
||||
raise
|
||||
finally:
|
||||
session.close()
|
||||
|
||||
|
||||
def stream_snapshot_since(since, db=None):
|
||||
with db_session(db) as session:
|
||||
# Several nested queries here because join was a bit too
|
||||
# restrictive. The inner SELECT in the WHERE-clause selects all scids
|
||||
# that had any updates in the desired timerange. The outer SELECT then
|
||||
# gets all the announcements and kicks off inner SELECTs that look for
|
||||
# the latest update for each direction.
|
||||
rows = session.execute(
|
||||
"""
|
||||
SELECT
|
||||
a.scid,
|
||||
a.raw,
|
||||
(
|
||||
SELECT
|
||||
u.raw
|
||||
FROM
|
||||
channel_updates u
|
||||
WHERE
|
||||
u.scid = a.scid AND
|
||||
direction = 0
|
||||
ORDER BY
|
||||
timestamp
|
||||
DESC LIMIT 1
|
||||
) as u0,
|
||||
(
|
||||
SELECT
|
||||
u.raw
|
||||
FROM
|
||||
channel_updates u
|
||||
WHERE
|
||||
u.scid = a.scid AND
|
||||
direction = 1
|
||||
ORDER BY
|
||||
timestamp
|
||||
DESC LIMIT 1
|
||||
) as u1
|
||||
FROM
|
||||
channel_announcements a
|
||||
WHERE
|
||||
a.scid IN (
|
||||
SELECT
|
||||
u.scid
|
||||
FROM
|
||||
channel_updates u
|
||||
WHERE
|
||||
u.timestamp >= '{}'
|
||||
GROUP BY
|
||||
u.scid
|
||||
)
|
||||
ORDER BY
|
||||
a.scid
|
||||
""".format(
|
||||
since.strftime("%Y-%m-%d %H:%M:%S")
|
||||
)
|
||||
)
|
||||
last_scid = None
|
||||
for scid, cann, u1, u2 in rows:
|
||||
if scid == last_scid:
|
||||
continue
|
||||
last_scid = scid
|
||||
yield cann
|
||||
if u1 is not None:
|
||||
yield u1
|
||||
if u2 is not None:
|
||||
yield u2
|
||||
|
||||
# Now get and return the node_announcements in the timerange. These
|
||||
# come after the channels since no node without a
|
||||
# channel_announcements and channel_update is allowed.
|
||||
rows = session.execute(
|
||||
"""
|
||||
SELECT
|
||||
n.node_id,
|
||||
n.timestamp,
|
||||
n.raw
|
||||
FROM
|
||||
node_announcements n
|
||||
WHERE
|
||||
n.timestamp >= '{}'
|
||||
GROUP BY
|
||||
n.node_id,
|
||||
n.timestamp
|
||||
HAVING
|
||||
n.timestamp = MAX(n.timestamp)
|
||||
ORDER BY timestamp DESC
|
||||
""".format(
|
||||
since.strftime("%Y-%m-%d %H:%M:%S")
|
||||
)
|
||||
)
|
||||
last_nid = None
|
||||
for nid, ts, nann in rows:
|
||||
if nid == last_nid:
|
||||
continue
|
||||
last_nid = nid
|
||||
yield nann
|
||||
266
Unmaintained/historian/gossipd.py
Normal file
266
Unmaintained/historian/gossipd.py
Normal file
@@ -0,0 +1,266 @@
|
||||
from binascii import hexlify
|
||||
|
||||
|
||||
import io
|
||||
import struct
|
||||
|
||||
|
||||
class ChannelAnnouncement(object):
|
||||
def __init__(self):
|
||||
self.num_short_channel_id = None
|
||||
self.node_signatures = [None, None]
|
||||
self.bitcoin_signatures = [None, None]
|
||||
self.features = None
|
||||
self.chain_hash = None
|
||||
self.node_ids = [None, None]
|
||||
self.bitcoin_keys = [None, None]
|
||||
|
||||
@property
|
||||
def short_channel_id(self):
|
||||
return "{}x{}x{}".format(
|
||||
(self.num_short_channel_id >> 40) & 0xFFFFFF,
|
||||
(self.num_short_channel_id >> 16) & 0xFFFFFF,
|
||||
(self.num_short_channel_id >> 00) & 0xFFFF
|
||||
)
|
||||
|
||||
def __eq__(self, other):
|
||||
return (
|
||||
self.num_short_channel_id == other.num_short_channel_id and
|
||||
self.bitcoin_keys == other.bitcoin_keys and
|
||||
self.chain_hash == other.chain_hash and
|
||||
self.node_ids == other.node_ids and
|
||||
self.features == other.features
|
||||
)
|
||||
|
||||
def serialize(self):
|
||||
raise ValueError()
|
||||
|
||||
def __str__(self):
|
||||
na = hexlify(self.node_ids[0]).decode('ASCII')
|
||||
nb = hexlify(self.node_ids[1]).decode('ASCII')
|
||||
return "ChannelAnnouncement(scid={short_channel_id}, nodes=[{na},{nb}])".format(
|
||||
na=na, nb=nb, short_channel_id=self.short_channel_id)
|
||||
|
||||
|
||||
class ChannelUpdate(object):
|
||||
def __init__(self):
|
||||
self.signature = None
|
||||
self.chain_hash = None
|
||||
self.num_short_channel_id = None
|
||||
self.timestamp = None
|
||||
self.message_flags = None
|
||||
self.channel_flags = None
|
||||
self.cltv_expiry_delta = None
|
||||
self.htlc_minimum_msat = None
|
||||
self.fee_base_msat = None
|
||||
self.fee_proportional_millionths = None
|
||||
self.htlc_maximum_msat = None
|
||||
|
||||
@property
|
||||
def short_channel_id(self):
|
||||
return "{}x{}x{}".format(
|
||||
(self.num_short_channel_id >> 40) & 0xFFFFFF,
|
||||
(self.num_short_channel_id >> 16) & 0xFFFFFF,
|
||||
(self.num_short_channel_id >> 00) & 0xFFFF
|
||||
)
|
||||
|
||||
@property
|
||||
def direction(self):
|
||||
b, = struct.unpack("!B", self.channel_flags)
|
||||
return b & 0x01
|
||||
|
||||
def serialize(self):
|
||||
raise ValueError()
|
||||
|
||||
def __str__(self):
|
||||
return 'ChannelUpdate(scid={short_channel_id}, timestamp={timestamp})'.format(
|
||||
timestamp=self.timestamp, short_channel_id=self.short_channel_id)
|
||||
|
||||
def __eq__(self, other):
|
||||
return (
|
||||
self.chain_hash == other.chain_hash and
|
||||
self.num_short_channel_id == other.num_short_channel_id and
|
||||
self.timestamp == other.timestamp and
|
||||
self.message_flags == other.message_flags and
|
||||
self.channel_flags == other.channel_flags and
|
||||
self.cltv_expiry_delta == other.cltv_expiry_delta and
|
||||
self.htlc_minimum_msat == other.htlc_minimum_msat and
|
||||
self.fee_base_msat == other.fee_base_msat and
|
||||
self.fee_proportional_millionths == other.fee_proportional_millionths and
|
||||
self.htlc_maximum_msat == other.htlc_maximum_msat
|
||||
)
|
||||
|
||||
|
||||
class Address(object):
|
||||
def __init__(self, typ=None, addr=None, port=None):
|
||||
self.typ = typ
|
||||
self.addr = addr
|
||||
self.port = port
|
||||
|
||||
def __eq__(self, other):
|
||||
return (
|
||||
self.typ == other.typ and
|
||||
self.addr == other.addr and
|
||||
self.port == other.port
|
||||
)
|
||||
|
||||
def __len__(self):
|
||||
l = {
|
||||
1: 6,
|
||||
2: 18,
|
||||
3: 12,
|
||||
4: 37,
|
||||
}
|
||||
return l[self.typ] + 1
|
||||
|
||||
def __str__(self):
|
||||
addr = self.addr
|
||||
if self.typ == 1:
|
||||
addr = ".".join([str(c) for c in addr])
|
||||
|
||||
protos = {
|
||||
1: "ipv4",
|
||||
2: "ipv6",
|
||||
3: "torv2",
|
||||
4: "torv3",
|
||||
}
|
||||
|
||||
return f"{protos[self.typ]}://{addr}:{self.port}"
|
||||
|
||||
|
||||
class NodeAnnouncement(object):
|
||||
def __init__(self):
|
||||
self.signature = None
|
||||
self.features = ""
|
||||
self.timestamp = None
|
||||
self.node_id = None
|
||||
self.rgb_color = None
|
||||
self.alias = None
|
||||
self.addresses = None
|
||||
|
||||
def __str__(self):
|
||||
return "NodeAnnouncement(id={hexlify(node_id)}, alias={alias}, color={rgb_color})".format(
|
||||
node_id=self.node_id, alias=self.alias, rgb_color=self.rgb_color)
|
||||
|
||||
def __eq__(self, other):
|
||||
return (
|
||||
self.features == other.features and
|
||||
self.timestamp == other.timestamp and
|
||||
self.node_id == other.node_id and
|
||||
self.rgb_color == other.rgb_color and
|
||||
self.alias == other.alias
|
||||
)
|
||||
|
||||
|
||||
def parse(b):
|
||||
if not isinstance(b, io.BytesIO):
|
||||
b = io.BytesIO(b)
|
||||
typ, = struct.unpack("!H", b.read(2))
|
||||
|
||||
parsers = {
|
||||
256: parse_channel_announcement,
|
||||
257: parse_node_announcement,
|
||||
258: parse_channel_update,
|
||||
3503: parse_ignore,
|
||||
4103: parse_ignore,
|
||||
}
|
||||
|
||||
if typ not in parsers:
|
||||
raise ValueError("No parser registered for type {typ}".format(typ=typ))
|
||||
|
||||
return parsers[typ](b)
|
||||
|
||||
|
||||
def parse_ignore(b):
|
||||
return None
|
||||
|
||||
|
||||
def parse_channel_announcement(b):
|
||||
if not isinstance(b, io.BytesIO):
|
||||
b = io.BytesIO(b)
|
||||
|
||||
ca = ChannelAnnouncement()
|
||||
ca.node_signatures = (b.read(64), b.read(64))
|
||||
ca.bitcoin_signatures = (b.read(64), b.read(64))
|
||||
flen, = struct.unpack("!H", b.read(2))
|
||||
ca.features = b.read(flen)
|
||||
ca.chain_hash = b.read(32)[::-1]
|
||||
ca.num_short_channel_id, = struct.unpack("!Q", b.read(8))
|
||||
ca.node_ids = (b.read(33), b.read(33))
|
||||
ca.bitcoin_keys = (b.read(33), b.read(33))
|
||||
return ca
|
||||
|
||||
|
||||
def parse_channel_update(b):
|
||||
if not isinstance(b, io.BytesIO):
|
||||
b = io.BytesIO(b)
|
||||
|
||||
cu = ChannelUpdate()
|
||||
cu.signature = b.read(64)
|
||||
cu.chain_hash = b.read(32)[::-1]
|
||||
cu.num_short_channel_id, = struct.unpack("!Q", b.read(8))
|
||||
cu.timestamp, = struct.unpack("!I", b.read(4))
|
||||
cu.message_flags = b.read(1)
|
||||
cu.channel_flags = b.read(1)
|
||||
cu.cltv_expiry_delta, = struct.unpack("!H", b.read(2))
|
||||
cu.htlc_minimum_msat, = struct.unpack("!Q", b.read(8))
|
||||
cu.fee_base_msat, = struct.unpack("!I", b.read(4))
|
||||
cu.fee_proportional_millionths, = struct.unpack("!I", b.read(4))
|
||||
t = b.read(8)
|
||||
if len(t) == 8:
|
||||
cu.htlc_maximum_msat, = struct.unpack("!Q", t)
|
||||
else:
|
||||
cu.htlc_maximum_msat = None
|
||||
|
||||
return cu
|
||||
|
||||
|
||||
def parse_address(b):
|
||||
if not isinstance(b, io.BytesIO):
|
||||
b = io.BytesIO(b)
|
||||
|
||||
t = b.read(1)
|
||||
if len(t) != 1:
|
||||
return None
|
||||
|
||||
a = Address()
|
||||
a.typ, = struct.unpack("!B", t)
|
||||
|
||||
if a.typ == 1:
|
||||
a.addr = b.read(4)
|
||||
elif a.typ == 2:
|
||||
a.addr = b.read(16)
|
||||
elif a.typ == 3:
|
||||
a.addr = b.read(10)
|
||||
elif a.typ == 4:
|
||||
a.addr = b.read(35)
|
||||
else:
|
||||
print(f"Unknown address type {a.typ}")
|
||||
return None
|
||||
a.port, = struct.unpack("!H", b.read(2))
|
||||
return a
|
||||
|
||||
|
||||
def parse_node_announcement(b):
|
||||
if not isinstance(b, io.BytesIO):
|
||||
b = io.BytesIO(b)
|
||||
|
||||
na = NodeAnnouncement()
|
||||
na.signature = b.read(64)
|
||||
flen, = struct.unpack("!H", b.read(2))
|
||||
na.features = b.read(flen)
|
||||
na.timestamp, = struct.unpack("!I", b.read(4))
|
||||
na.node_id = b.read(33)
|
||||
na.rgb_color = b.read(3)
|
||||
na.alias = b.read(32)
|
||||
alen, = struct.unpack("!H", b.read(2))
|
||||
abytes = io.BytesIO(b.read(alen))
|
||||
na.addresses = []
|
||||
while True:
|
||||
addr = parse_address(abytes)
|
||||
if addr is None:
|
||||
break
|
||||
else:
|
||||
na.addresses.append(addr)
|
||||
|
||||
return na
|
||||
288
Unmaintained/historian/historian-cli
Executable file
288
Unmaintained/historian/historian-cli
Executable file
@@ -0,0 +1,288 @@
|
||||
#!/usr/bin/env python3
|
||||
import struct
|
||||
from tqdm import tqdm
|
||||
import shlex
|
||||
import subprocess
|
||||
from contextlib import contextmanager
|
||||
from sqlalchemy import create_engine
|
||||
from cli import common
|
||||
from common import Base, ChannelAnnouncement, ChannelUpdate, NodeAnnouncement
|
||||
from sqlalchemy.orm import sessionmaker
|
||||
from sqlalchemy import func
|
||||
from datetime import datetime, timedelta
|
||||
from collections import namedtuple
|
||||
import click
|
||||
from pyln.proto.primitives import varint_encode, varint_decode
|
||||
import os
|
||||
from sqlalchemy.orm import load_only
|
||||
import re
|
||||
import io
|
||||
import logging
|
||||
import socket
|
||||
from pyln.proto import wire
|
||||
from cli.backup import backup
|
||||
from cli.db import db
|
||||
from common import db_session, default_db, stream_snapshot_since
|
||||
import json
|
||||
|
||||
|
||||
@click.group()
|
||||
def cli():
|
||||
pass
|
||||
|
||||
|
||||
cli.add_command(backup)
|
||||
cli.add_command(db)
|
||||
|
||||
|
||||
@cli.group()
|
||||
def snapshot():
|
||||
pass
|
||||
|
||||
|
||||
dt_fmt = "%Y-%m-%d %H:%M:%S"
|
||||
default_since = datetime.utcnow() - timedelta(hours=1)
|
||||
|
||||
|
||||
@snapshot.command()
|
||||
@click.argument("destination", type=click.File("wb"))
|
||||
@click.argument(
|
||||
"since",
|
||||
type=click.DateTime(formats=[dt_fmt]),
|
||||
default=default_since.strftime(dt_fmt),
|
||||
)
|
||||
@click.option("--db", type=str, default=default_db)
|
||||
def incremental(since, destination, db):
|
||||
# Write the header
|
||||
destination.write(b"GSP\x01")
|
||||
|
||||
node_count, chan_count = 0, 0
|
||||
for msg in stream_snapshot_since(since, db):
|
||||
varint_encode(len(msg), destination)
|
||||
destination.write(msg)
|
||||
|
||||
typ = msg[:2]
|
||||
if typ == b"\x01\x01":
|
||||
node_count += 1
|
||||
elif typ == b"\x01\x00":
|
||||
chan_count += 1
|
||||
|
||||
click.echo(
|
||||
f"Wrote {chan_count} channels and {node_count} nodes to {destination.name}",
|
||||
err=True,
|
||||
)
|
||||
|
||||
|
||||
@snapshot.command()
|
||||
@click.argument("destination", type=click.File("wb"))
|
||||
@click.pass_context
|
||||
@click.option("--db", type=str, default=default_db)
|
||||
def full(ctx, destination, db):
|
||||
since = datetime.utcnow() - timedelta(weeks=2)
|
||||
ctx.invoke(incremental, since=since, destination=destination, db=db)
|
||||
|
||||
|
||||
@snapshot.command()
|
||||
@click.argument("snapshot", type=click.File("rb"))
|
||||
def read(snapshot):
|
||||
header = snapshot.read(4)
|
||||
if len(header) < 4:
|
||||
raise ValueError("Could not read header")
|
||||
|
||||
tag, version = header[0:3], header[3]
|
||||
if tag != b"GSP":
|
||||
raise ValueError(f"Header mismatch, expected GSP, got {repr(tag)}")
|
||||
|
||||
if version != 1:
|
||||
raise ValueError(f"Unsupported version {version}, only support up to version 1")
|
||||
|
||||
while True:
|
||||
l = varint_decode(snapshot)
|
||||
if l is None:
|
||||
break
|
||||
|
||||
msg = snapshot.read(l)
|
||||
if len(msg) != l:
|
||||
raise ValueError("Incomplete read at end of file")
|
||||
|
||||
print(msg.hex())
|
||||
|
||||
|
||||
@snapshot.command()
|
||||
@click.argument("snapshot", type=common.GossipFile(decode=False))
|
||||
@click.argument("max_bytes", type=int)
|
||||
@click.option("-x", "--exec", type=str)
|
||||
def split(snapshot, max_bytes, exec):
|
||||
def bundle(f: common.GossipFile):
|
||||
bundle = None
|
||||
for m in f:
|
||||
(typ,) = struct.unpack_from("!H", m)
|
||||
|
||||
if typ == 257:
|
||||
# NodeAnnouncements are always self-contained, so yield them
|
||||
# individually
|
||||
yield m,
|
||||
elif typ == 256:
|
||||
# ChannelAnnouncements indicate the start of a new bundle
|
||||
if bundle is not None:
|
||||
yield tuple(bundle)
|
||||
bundle = []
|
||||
bundle.append(m)
|
||||
else:
|
||||
# ChannelUpdates belong to the bundle
|
||||
bundle.append(m)
|
||||
# If we have an unyielded bundle we need to flush it at the end.
|
||||
yield tuple(bundle)
|
||||
|
||||
def serialize_bundle(b):
|
||||
buff = io.BytesIO()
|
||||
for m in b:
|
||||
varint_encode(len(m), buff)
|
||||
buff.write(m)
|
||||
return buff.getvalue()
|
||||
|
||||
filenum = 0
|
||||
prefix, extension = os.path.splitext(snapshot.filename)
|
||||
filename = "{prefix}_{{filenum:04d}}{extension}".format(
|
||||
prefix=prefix, extension=extension
|
||||
)
|
||||
|
||||
def on_complete(filenum):
|
||||
fname = filename.format(filenum=filenum)
|
||||
if exec is not None:
|
||||
cmd = shlex.split(exec.replace("{}", shlex.quote(fname)))
|
||||
logging.debug("Exec:\n> {}".format(" ".join(cmd)))
|
||||
subprocess.run(cmd)
|
||||
|
||||
f = open(filename.format(filenum=filenum), "wb")
|
||||
f.write(b"GSP\x01")
|
||||
for b in bundle(snapshot):
|
||||
assert len(b) <= 3
|
||||
m = serialize_bundle(b)
|
||||
|
||||
if f.tell() + len(m) > max_bytes:
|
||||
f.close()
|
||||
on_complete(filenum)
|
||||
filenum += 1
|
||||
f = open(filename.format(filenum=filenum), "wb")
|
||||
f.write(b"GSP\x01")
|
||||
f.write(m)
|
||||
f.close()
|
||||
on_complete(filenum)
|
||||
|
||||
|
||||
LightningAddress = namedtuple("LightningAddress", ["node_id", "host", "port"])
|
||||
|
||||
|
||||
class LightningAddressParam(click.ParamType):
|
||||
def convert(self, value, param, ctx):
|
||||
m = re.match(r"(0[23][a-fA-F0-9]+)@([a-zA-Z0-9\.:]+):([0-9]+)?", value)
|
||||
if m is None:
|
||||
self.fail(
|
||||
f"{value} isn't a valid lightning connection string, "
|
||||
'expected "[node_id]@[host]:[port]"'
|
||||
)
|
||||
return
|
||||
|
||||
if len(m.groups()) < 3:
|
||||
return LightningAddress(m[1], m[2], 9735)
|
||||
else:
|
||||
return LightningAddress(m[1], m[2], int(m[3]))
|
||||
|
||||
|
||||
class LightningPeer:
|
||||
def __init__(self, node_id: str, address: str, port: int = 9735):
|
||||
self.node_id = node_id
|
||||
self.address = address
|
||||
self.port = port
|
||||
self.connection = None
|
||||
self.local_privkey = wire.PrivateKey(os.urandom(32))
|
||||
|
||||
def connect(self):
|
||||
sock = socket.create_connection((self.address, self.port), timeout=30)
|
||||
self.connection = wire.LightningConnection(
|
||||
sock,
|
||||
remote_pubkey=wire.PublicKey(bytes.fromhex(self.node_id)),
|
||||
local_privkey=self.local_privkey,
|
||||
is_initiator=True,
|
||||
)
|
||||
self.connection.shake()
|
||||
|
||||
# Send an init message, with no global features, and 0b10101010 as
|
||||
# local features.
|
||||
self.connection.send_message(b"\x00\x10\x00\x00\x00\x01\xaa")
|
||||
|
||||
def send(self, packet: bytes) -> None:
|
||||
if self.connection is None:
|
||||
raise ValueError("Not connected to peer")
|
||||
|
||||
logging.debug("Sending {}".format(packet.hex()))
|
||||
self.connection.send_message(packet)
|
||||
|
||||
def send_all(self, packets) -> None:
|
||||
assert self.connection is not None
|
||||
for p in packets:
|
||||
self.send(p)
|
||||
|
||||
def disconnect(self):
|
||||
self.connection.connection.close()
|
||||
|
||||
|
||||
def split_gossip(reader: io.BytesIO):
|
||||
while True:
|
||||
length = varint_decode(reader)
|
||||
if length is None:
|
||||
break
|
||||
|
||||
msg = reader.read(length)
|
||||
if len(msg) != length:
|
||||
raise ValueError("Incomplete read at end of file")
|
||||
|
||||
yield msg
|
||||
|
||||
|
||||
@snapshot.command()
|
||||
@click.argument("snapshot", type=click.File("rb"))
|
||||
@click.argument("destination", type=LightningAddressParam(), required=False)
|
||||
def load(snapshot, destination=None):
|
||||
|
||||
if destination is None:
|
||||
logging.debug("No destination specified, attempting auto-discovery")
|
||||
info = json.loads(subprocess.check_output(["lightning-cli", "getinfo"]))
|
||||
id = info["id"]
|
||||
bindings = [
|
||||
(id, b["address"], b["port"])
|
||||
for b in info["binding"]
|
||||
if b["type"] == "ipv4"
|
||||
]
|
||||
if len(bindings) < 1:
|
||||
raise ValueError(
|
||||
"Could not automatically determine the c-lightning"
|
||||
" address to connect to. Please provide a --destination"
|
||||
)
|
||||
binding = bindings[0]
|
||||
logging.debug("Discovered local node {}@{}:{}".format(*binding))
|
||||
destination = LightningAddress(*binding)
|
||||
|
||||
header = snapshot.read(4)
|
||||
if len(header) < 4:
|
||||
raise ValueError("Could not read header")
|
||||
|
||||
tag, version = header[0:3], header[3]
|
||||
if tag != b"GSP":
|
||||
raise ValueError(f"Header mismatch, expected GSP, got {repr(tag)}")
|
||||
|
||||
if version != 1:
|
||||
raise ValueError(f"Unsupported version {version}, only support up to version 1")
|
||||
|
||||
logging.debug(f"Connecting to {destination}")
|
||||
peer = LightningPeer(destination.node_id, destination.host, destination.port)
|
||||
peer.connect()
|
||||
logging.debug("Connected, streaming messages from snapshot")
|
||||
peer.send_all(tqdm(split_gossip(snapshot)))
|
||||
peer.disconnect()
|
||||
logging.debug("Done streaming messages, disconnecting")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
cli()
|
||||
212
Unmaintained/historian/historian.py
Executable file
212
Unmaintained/historian/historian.py
Executable file
@@ -0,0 +1,212 @@
|
||||
#!/usr/bin/env python3
|
||||
from inotify import constants
|
||||
from inotify.adapters import Inotify
|
||||
from pyln.client import Plugin
|
||||
from sqlalchemy import create_engine
|
||||
from sqlalchemy import desc
|
||||
from sqlalchemy.orm import sessionmaker
|
||||
from threading import Thread
|
||||
from common import Base, ChannelAnnouncement, ChannelUpdate, NodeAnnouncement
|
||||
import logging
|
||||
import gossipd
|
||||
import struct
|
||||
import time
|
||||
|
||||
# Any message that is larger than this threshold will not be processed
|
||||
# as it bloats the database.
|
||||
MAX_MSG_SIZE = 1024
|
||||
|
||||
plugin = Plugin()
|
||||
|
||||
|
||||
class FsMonitor(Thread):
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def run(self):
|
||||
watch_mask = constants.IN_ALL_EVENTS
|
||||
|
||||
print("Starting FsMonitor")
|
||||
i = Inotify()
|
||||
i.add_watch('gossip_store', mask=watch_mask)
|
||||
for event in i.event_gen(yield_nones=False):
|
||||
(e, type_names, path, filename) = event
|
||||
if e.mask & constants.IN_DELETE_SELF:
|
||||
i.remove_watch('gossip_store')
|
||||
i.add_watch('gossip_store', mask=watch_mask)
|
||||
|
||||
|
||||
class FileTailer():
|
||||
def __init__(self, filename):
|
||||
self.filename = filename
|
||||
self.pos = 1
|
||||
self.version = None
|
||||
|
||||
def resume(self):
|
||||
ev_count = 0
|
||||
with open(self.filename, 'rb') as f:
|
||||
self.version, = struct.unpack("!B", f.read(1))
|
||||
f.seek(self.pos)
|
||||
while True:
|
||||
skip = False
|
||||
diff = 8
|
||||
hdr = f.read(8)
|
||||
if len(hdr) < 8:
|
||||
break
|
||||
|
||||
length, crc = struct.unpack("!II", hdr)
|
||||
if self.version > 3:
|
||||
f.read(4) # Throw away the CRC
|
||||
diff += 4
|
||||
|
||||
# deleted = (length & 0x80000000 != 0)
|
||||
# important = (length & 0x40000000 != 0)
|
||||
length = length & (~0x80000000) & (~0x40000000)
|
||||
|
||||
msg = f.read(length)
|
||||
|
||||
# Incomplete write, will try again
|
||||
if len(msg) < length:
|
||||
logging.debug(
|
||||
f"Partial read: {len(msg)}<{length}, waiting 1 second"
|
||||
)
|
||||
time.sleep(1)
|
||||
f.seek(self.pos)
|
||||
continue
|
||||
|
||||
diff += length
|
||||
|
||||
# Strip eventual wrappers:
|
||||
typ, = struct.unpack("!H", msg[:2])
|
||||
if self.version <= 3 and typ in [4096, 4097, 4098]:
|
||||
msg = msg[4:]
|
||||
|
||||
self.pos += diff
|
||||
if typ in [4101, 3503]:
|
||||
f.seek(self.pos)
|
||||
continue
|
||||
|
||||
if length > MAX_MSG_SIZE:
|
||||
logging.warn(
|
||||
f"Unreasonably large message type {typ} at position {self.pos} ({length} bytes), skipping"
|
||||
)
|
||||
continue
|
||||
|
||||
ev_count += 1
|
||||
|
||||
yield msg
|
||||
logging.debug(
|
||||
f"Reached end of {self.filename} at {self.pos} after {ev_count} "
|
||||
"new messages, waiting for new fs event"
|
||||
)
|
||||
|
||||
def wait_actionable(self, i):
|
||||
for event in i.event_gen(yield_nones=False):
|
||||
if event[0].mask & constants.IN_DELETE_SELF:
|
||||
return 'swap'
|
||||
if event[0].mask & constants.IN_MODIFY:
|
||||
return 'append'
|
||||
|
||||
def tail(self):
|
||||
watch_mask = (constants.IN_ALL_EVENTS ^ constants.IN_ACCESS ^
|
||||
constants.IN_OPEN ^ constants.IN_CLOSE_NOWRITE)
|
||||
i = Inotify()
|
||||
i.add_watch(self.filename, mask=watch_mask)
|
||||
while True:
|
||||
# Consume as much as possible.
|
||||
yield from self.resume()
|
||||
|
||||
# Now wait for a change that we can react to
|
||||
ev = self.wait_actionable(i)
|
||||
|
||||
if ev == 'append':
|
||||
continue
|
||||
|
||||
if ev == 'swap':
|
||||
# Need to reach around since file-deletion removes C watches,
|
||||
# but not the python one...
|
||||
try:
|
||||
i.remove_watch(self.filename)
|
||||
except Exception:
|
||||
pass
|
||||
i.add_watch(self.filename, mask=watch_mask)
|
||||
self.pos = 1
|
||||
continue
|
||||
|
||||
|
||||
class Flusher(Thread):
|
||||
def __init__(self, engine):
|
||||
Thread.__init__(self)
|
||||
self.engine = engine
|
||||
self.session_maker = sessionmaker(bind=engine)
|
||||
self.session = None
|
||||
|
||||
def run(self):
|
||||
logging.info("Starting flusher")
|
||||
ft = FileTailer('gossip_store')
|
||||
last_flush = time.time()
|
||||
|
||||
self.session = self.session_maker()
|
||||
for i, e in enumerate(ft.tail()):
|
||||
self.store(e)
|
||||
|
||||
if last_flush < time.time() - 10:
|
||||
self.session.commit()
|
||||
self.session = self.session_maker()
|
||||
last_flush = time.time()
|
||||
|
||||
logging.warn("Filetailer exited...")
|
||||
|
||||
def store(self, raw: bytes) -> None:
|
||||
try:
|
||||
msg = gossipd.parse(raw)
|
||||
cls = None
|
||||
if isinstance(msg, gossipd.ChannelUpdate):
|
||||
cls = ChannelUpdate
|
||||
|
||||
elif isinstance(msg, gossipd.ChannelAnnouncement):
|
||||
cls = ChannelAnnouncement
|
||||
|
||||
elif isinstance(msg, gossipd.NodeAnnouncement):
|
||||
cls = NodeAnnouncement
|
||||
|
||||
else:
|
||||
return;
|
||||
|
||||
self.session.merge(cls.from_gossip(msg, raw))
|
||||
except Exception as e:
|
||||
logging.warn(f"Exception parsing gossip message: {e}")
|
||||
|
||||
|
||||
@plugin.init()
|
||||
def init(plugin, configuration, options):
|
||||
print(options)
|
||||
engine = create_engine(options['historian-dsn'], echo=False)
|
||||
Base.metadata.create_all(engine)
|
||||
plugin.engine = engine
|
||||
Flusher(engine).start()
|
||||
|
||||
|
||||
@plugin.method('historian-stats')
|
||||
def stats(plugin):
|
||||
engine = plugin.engine
|
||||
session_maker = sessionmaker(bind=engine)
|
||||
session = session_maker()
|
||||
|
||||
return {
|
||||
'channel_announcements': session.query(ChannelAnnouncement).count(),
|
||||
'channel_updates': session.query(ChannelUpdate).count(),
|
||||
'node_announcements': session.query(NodeAnnouncement).count(),
|
||||
'latest_node_announcement': session.query(NodeAnnouncement).order_by(desc(NodeAnnouncement.timestamp)).limit(1).first(),
|
||||
'latest_channel_update': session.query(ChannelUpdate).order_by(desc(ChannelUpdate.timestamp)).limit(1).first(),
|
||||
}
|
||||
|
||||
|
||||
plugin.add_option(
|
||||
'historian-dsn',
|
||||
'sqlite:///historian.sqlite3',
|
||||
"SQL DSN defining where the gossip data should be stored."
|
||||
)
|
||||
|
||||
if __name__ == "__main__":
|
||||
plugin.run()
|
||||
1002
Unmaintained/historian/poetry.lock
generated
Normal file
1002
Unmaintained/historian/poetry.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
24
Unmaintained/historian/pyproject.toml
Normal file
24
Unmaintained/historian/pyproject.toml
Normal file
@@ -0,0 +1,24 @@
|
||||
[tool.poetry]
|
||||
name = "cln-historian"
|
||||
version = "0.1.0"
|
||||
description = "A plugin to store historical Lightning Network gossip in a database."
|
||||
authors = ["Christian Decker <decker@blockstream.io>"]
|
||||
license = "MIT"
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = "^3.7"
|
||||
pyln-client = "0.11.1"
|
||||
inotify = "^0.2.10"
|
||||
|
||||
[tool.poetry.dev-dependencies]
|
||||
pyln-testing = "0.11.1"
|
||||
|
||||
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
pytest-timeout = "^2.1.0"
|
||||
pytest-rerunfailures = "^10.3"
|
||||
pytest-xdist = "^3.1.0"
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core>=1.0.0"]
|
||||
build-backend = "poetry.core.masonry.api"
|
||||
23
Unmaintained/historian/test_historian.py
Normal file
23
Unmaintained/historian/test_historian.py
Normal file
@@ -0,0 +1,23 @@
|
||||
from pyln.testing.fixtures import *
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
|
||||
plugin = os.path.join(os.path.dirname(__file__), 'historian.py')
|
||||
|
||||
|
||||
def test_start(node_factory):
|
||||
opts = {'plugin': plugin}
|
||||
l1 = node_factory.get_node(options=opts)
|
||||
l1.stop()
|
||||
|
||||
help_out = subprocess.check_output([
|
||||
'lightningd',
|
||||
'--plugin={}'.format(plugin),
|
||||
'--lightning-dir={}'.format(l1.daemon.lightning_dir),
|
||||
'--help'
|
||||
]).decode('utf-8').split('\n')
|
||||
help_out = [h.split(' ', 1) for h in help_out]
|
||||
help_out = [(v[0].strip(), v[1].strip()) for v in help_out if len(v) == 2]
|
||||
from pprint import pprint
|
||||
pprint(help_out)
|
||||
60
Unmaintained/paytest/README.org
Normal file
60
Unmaintained/paytest/README.org
Normal file
@@ -0,0 +1,60 @@
|
||||
#+TITLE: Paytest Plugin
|
||||
|
||||
A plugin to benchmark the performance of the ~pay~ plugin. It can
|
||||
generate mock invoices for remote nodes, and it can hold on to
|
||||
incoming test multi-part payments as if they were real payments.
|
||||
|
||||
🚧 This plugin is intended for LN developers. Use it at your own risk 👷
|
||||
|
||||
The plugin consists of three parts:
|
||||
|
||||
- ~testinvoice~: An RPC method to generate a fake invoice which
|
||||
forces any payer to go through a specific node as penultimate hop
|
||||
in the route. That node can then decode the onion and pretend it is
|
||||
the real destination. The invoice is valid, however the
|
||||
~payment_hash~ has no known preimage and can therefore not be
|
||||
settled.
|
||||
|
||||
- An ~htlc_accepted~ hook that intercepts any HTLC resulting from a
|
||||
remote ~testpay~ call, decodes the payload and pretends to be the
|
||||
recipient, exercising the MPP aggregation logic.
|
||||
|
||||
- ~testpay~: A wrapper RPC method that takes generates a
|
||||
~testinvoice~ and calls ~pay~ on it, triggering the
|
||||
benchmarking. Upon receiving the result it re-interprets it
|
||||
according to what we expect to happen
|
||||
|
||||
|
||||
* Protocol
|
||||
The protocol consists of a couple of conventions in order to correctly
|
||||
test the pay process.
|
||||
|
||||
The sender always creates a test invoice, with a ~payment_hash~ that
|
||||
it knows has no known preimage, and destined for a non-existent
|
||||
~node_id~, but which includes a route-hint from the real destination
|
||||
to the fake destination. The route-hint MUST use the short channel ID
|
||||
~1x1x1~ for the channel, as this is how the recipient identifies that
|
||||
this is a test-payment. In addition the other parameters in the route
|
||||
hint SHOULD use minimal values in order not to interfere with the
|
||||
testing (CLTV delta ~9~, base fee ~1~, proportional fee ~1~).
|
||||
|
||||
Since we are creating fake invoices for nodes that do not exist, and
|
||||
with a ~payment_hash~ that the destination cannot settle we
|
||||
re-interpret the following failure codes:
|
||||
|
||||
- ~16399~ / ~0x400f~ (~incorrect_or_unknown_payment_details~) is
|
||||
considered to be a successful payment attempt, i.e., the
|
||||
destination understood that this is a test payment, has held onto
|
||||
incoming parts for up to 60 seconds, allowing them to accumulate, and
|
||||
ultimately all parts reached the destination.
|
||||
|
||||
- ~23~ / ~0x0017~ (~mpp_timeout~) is considered a failed payment
|
||||
attempt, i.e., the destination understood this is a test payment,
|
||||
has helpd on to incoming parts for 60 seconds, but ultimately
|
||||
failed to accumulate all parts of the payment.
|
||||
|
||||
- ~16394~ / ~0x400a~ (~unknown_next_peer~) signifies that the
|
||||
destination did not understand the protocol, and the test
|
||||
failed. This is the default behavior that nodes implement if this
|
||||
plugin is not active.
|
||||
|
||||
321
Unmaintained/paytest/paytest.py
Executable file
321
Unmaintained/paytest/paytest.py
Executable file
@@ -0,0 +1,321 @@
|
||||
#!/usr/bin/env python3
|
||||
import os
|
||||
import struct
|
||||
from binascii import hexlify, unhexlify
|
||||
from collections import namedtuple
|
||||
from decimal import Decimal
|
||||
from threading import Timer
|
||||
|
||||
from cryptography.hazmat.backends import default_backend
|
||||
from cryptography.hazmat.primitives import hashes, hmac
|
||||
from pyln.client import Millisatoshi, Plugin, RpcError
|
||||
from pyln.proto.invoice import (
|
||||
Invoice, RouteHint, RouteHintSet, bech32_encode, bitarray_to_u5, bitstring,
|
||||
coincurve, encode_fallback, hashlib, shorten_amount, tagged, tagged_bytes)
|
||||
from pyln.proto.onion import RoutingOnion, chacha20_stream, ecdh, TlvPayload
|
||||
from pyln.proto.primitives import PrivateKey, Secret
|
||||
|
||||
# Something we don't have a preimage for, and allows downstream nodes
|
||||
# to recognize this as a test payment.
|
||||
PAYMENT_HASH = b"AA" * 32
|
||||
|
||||
# The private key used for the final hop. Well-known so the
|
||||
# penultimate hop can decode the onion.
|
||||
PRIVKEY = PrivateKey(b"\xAA" * 32)
|
||||
PUBKEY = PRIVKEY.public_key()
|
||||
|
||||
plugin = Plugin()
|
||||
|
||||
KeySet = namedtuple("KeySet", ["rho", "mu", "um", "pad", "gamma", "pi", "ammag"])
|
||||
|
||||
|
||||
def generate_key(secret: bytes, prefix: bytes):
|
||||
h = hmac.HMAC(prefix, hashes.SHA256(), backend=default_backend())
|
||||
h.update(secret)
|
||||
return h.finalize()
|
||||
|
||||
|
||||
def generate_keyset(secret: Secret) -> KeySet:
|
||||
types = [bytes(f, "ascii") for f in KeySet._fields]
|
||||
keys = [generate_key(secret.data, t) for t in types]
|
||||
return KeySet(*keys)
|
||||
|
||||
|
||||
class MyInvoice(Invoice):
|
||||
def __init__(self, *args, **kwargs):
|
||||
Invoice.__init__(self, *args, **kwargs)
|
||||
self.features = 0
|
||||
|
||||
def encode(self, privkey):
|
||||
if self.amount:
|
||||
amount = Decimal(str(self.amount))
|
||||
# We can only send down to millisatoshi.
|
||||
if amount * 10 ** 12 % 10:
|
||||
raise ValueError(
|
||||
"Cannot encode {}: too many decimal places".format(self.amount)
|
||||
)
|
||||
|
||||
amount = self.currency + shorten_amount(amount)
|
||||
else:
|
||||
amount = self.currency if self.currency else ""
|
||||
|
||||
hrp = "ln" + amount
|
||||
|
||||
# Start with the timestamp
|
||||
data = bitstring.pack("uint:35", self.date)
|
||||
|
||||
# Payment hash
|
||||
data += tagged_bytes("p", self.paymenthash)
|
||||
tags_set = set()
|
||||
|
||||
if self.route_hints is not None:
|
||||
for rh in self.route_hints.route_hints:
|
||||
data += tagged_bytes("r", rh.to_bytes())
|
||||
|
||||
if self.features != 0:
|
||||
b = "{:x}".format(self.features)
|
||||
if len(b) % 2 == 1:
|
||||
b = "0" + b
|
||||
data += tagged_bytes("9", unhexlify(b))
|
||||
|
||||
for k, v in self.tags:
|
||||
|
||||
# BOLT #11:
|
||||
#
|
||||
# A writer MUST NOT include more than one `d`, `h`, `n` or `x` fields,
|
||||
if k in ("d", "h", "n", "x"):
|
||||
if k in tags_set:
|
||||
raise ValueError("Duplicate '{}' tag".format(k))
|
||||
|
||||
if k == "r":
|
||||
pubkey, channel, fee, cltv = v
|
||||
route = (
|
||||
bitstring.BitArray(pubkey)
|
||||
+ bitstring.BitArray(channel)
|
||||
+ bitstring.pack("intbe:64", fee)
|
||||
+ bitstring.pack("intbe:16", cltv)
|
||||
)
|
||||
data += tagged("r", route)
|
||||
elif k == "f":
|
||||
data += encode_fallback(v, self.currency)
|
||||
elif k == "d":
|
||||
data += tagged_bytes("d", v.encode())
|
||||
elif k == "s":
|
||||
data += tagged_bytes("s", v)
|
||||
elif k == "x":
|
||||
# Get minimal length by trimming leading 5 bits at a time.
|
||||
expirybits = bitstring.pack("intbe:64", v)[4:64]
|
||||
while expirybits.startswith("0b00000"):
|
||||
expirybits = expirybits[5:]
|
||||
data += tagged("x", expirybits)
|
||||
elif k == "h":
|
||||
data += tagged_bytes("h", hashlib.sha256(v.encode("utf-8")).digest())
|
||||
elif k == "n":
|
||||
data += tagged_bytes("n", v)
|
||||
else:
|
||||
# FIXME: Support unknown tags?
|
||||
raise ValueError("Unknown tag {}".format(k))
|
||||
|
||||
tags_set.add(k)
|
||||
|
||||
# BOLT #11:
|
||||
#
|
||||
# A writer MUST include either a `d` or `h` field, and MUST NOT include
|
||||
# both.
|
||||
if "d" in tags_set and "h" in tags_set:
|
||||
raise ValueError("Cannot include both 'd' and 'h'")
|
||||
if "d" not in tags_set and "h" not in tags_set:
|
||||
raise ValueError("Must include either 'd' or 'h'")
|
||||
|
||||
# We actually sign the hrp, then data (padded to 8 bits with zeroes).
|
||||
privkey = coincurve.PrivateKey(secret=bytes(unhexlify(privkey)))
|
||||
data += privkey.sign_recoverable(
|
||||
bytearray([ord(c) for c in hrp]) + data.tobytes()
|
||||
)
|
||||
|
||||
return bech32_encode(hrp, bitarray_to_u5(data))
|
||||
|
||||
|
||||
@plugin.method("testinvoice")
|
||||
def testinvoice(destination, amount=None, **kwargs):
|
||||
if amount is not None:
|
||||
amount = Millisatoshi(amount).to_btc()
|
||||
|
||||
network = plugin.rpc.listconfigs()['network']
|
||||
|
||||
currency = {
|
||||
'bitcoin': 'bc',
|
||||
'regtest': 'bcrt',
|
||||
'signet': 'tb',
|
||||
'testnet': 'tb',
|
||||
'liquid-regtest': 'ert',
|
||||
'liquid': 'ex',
|
||||
}[network]
|
||||
|
||||
inv = MyInvoice(
|
||||
paymenthash=unhexlify(PAYMENT_HASH),
|
||||
amount=amount,
|
||||
currency=currency,
|
||||
)
|
||||
inv.pubkey = PUBKEY
|
||||
inv.tags.append(
|
||||
("d", "Test invoice for {destination}".format(destination=destination))
|
||||
)
|
||||
|
||||
# Payment_secret
|
||||
inv.tags.append(("s", os.urandom(32)))
|
||||
|
||||
# The real magic is here: we add a routehint that tells the sender
|
||||
# how to get to this non-existent node. The trick is that it has
|
||||
# to go through the real destination.
|
||||
|
||||
rh = RouteHint()
|
||||
rh.pubkey = unhexlify(destination)
|
||||
rh.short_channel_id = 1 << 40 | 1 << 16 | 1
|
||||
rh.fee_base_msat = 1
|
||||
rh.fee_proportional_millionths = 1
|
||||
rh.cltv_expiry_delta = 9
|
||||
rhs = RouteHintSet()
|
||||
rhs.add(rh)
|
||||
inv.route_hints = rhs
|
||||
|
||||
inv.features |= 1 << 14 # payment secret
|
||||
inv.features |= 1 << 16 # basic_mpp
|
||||
inv.features |= 1 << 8 # TLV payloads
|
||||
|
||||
return {
|
||||
"invoice": inv.encode(PRIVKEY.serializeCompressed().hex()),
|
||||
"attention": "The invoice is destined for {}, but forced through {} which will process it instead. So don't worry if decoding the invoice returns a different destination than you'd expect.".format(
|
||||
PUBKEY.serializeCompressed().hex(), destination
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
def wrap_error(keys, err):
|
||||
b = unhexlify(err)
|
||||
c = len(b)
|
||||
padlen = 256 - c
|
||||
pad = b"\x00" * padlen
|
||||
b = struct.pack("!H", c) + b + struct.pack("!H", padlen) + pad
|
||||
assert len(b) == 256 + 2 + 2
|
||||
h = hmac.HMAC(keys.um, hashes.SHA256(), backend=default_backend())
|
||||
h.update(b)
|
||||
# h.update(unhexlify(PAYMENT_HASH))
|
||||
hh = h.finalize()
|
||||
b = bytearray(hh + b)
|
||||
chacha20_stream(keys.ammag, b)
|
||||
return hexlify(bytes(b)).decode("ASCII")
|
||||
|
||||
|
||||
@plugin.method("paytest")
|
||||
def paytest(destination, amount, request, plugin):
|
||||
inv = testinvoice(destination, amount)
|
||||
|
||||
try:
|
||||
plugin.rpc.pay(inv["invoice"])
|
||||
raise ValueError("pay succeeded, this is impossible...")
|
||||
except RpcError as e:
|
||||
print(e)
|
||||
# TODO Reinterpret result as success or failure.
|
||||
|
||||
return {
|
||||
"invoice": inv,
|
||||
"status": plugin.rpc.paystatus(inv["invoice"])["pay"][0],
|
||||
}
|
||||
|
||||
|
||||
def timeout(plugin, secret):
|
||||
if secret not in plugin.pending:
|
||||
return
|
||||
|
||||
parts = plugin.pending.get(secret, None)
|
||||
|
||||
if parts is None:
|
||||
return
|
||||
|
||||
print("Timing out payment with secret={secret}".format(secret=secret))
|
||||
for p in parts:
|
||||
p[0].set_result({"result": "fail", "failure_onion": wrap_error(p[4], b"0017")})
|
||||
|
||||
|
||||
@plugin.async_hook("htlc_accepted")
|
||||
def on_htlc_accepted(onion, htlc, request, plugin, *args, **kwargs):
|
||||
print(
|
||||
"Got an incoming HTLC for {payment_hash}".format(
|
||||
payment_hash=htlc["payment_hash"]
|
||||
)
|
||||
)
|
||||
# If this is not a test payment, pass it on
|
||||
if 'short_channel_id' not in onion or onion["short_channel_id"] != "1x1x1":
|
||||
return request.set_result({"result": "continue"})
|
||||
|
||||
# Decode the onion so we get the details the virtual recipient
|
||||
# would get.
|
||||
ro = RoutingOnion.from_hex(onion["next_onion"])
|
||||
try:
|
||||
payload, next_onion = ro.unwrap(PRIVKEY, unhexlify(PAYMENT_HASH))
|
||||
except Exception:
|
||||
return request.set_result({"result": "continue"})
|
||||
|
||||
if next_onion is not None:
|
||||
# Whoops, apparently the virtual node isn't the last hop, fail
|
||||
# by default.
|
||||
return request.set_result({"result": "continue"})
|
||||
|
||||
# Shared key required for the response
|
||||
shared_secret = ecdh(PRIVKEY, ro.ephemeralkey)
|
||||
|
||||
# MPP payments really only work with TlvPayloads, otherwise we
|
||||
# don't know the total. In addition the `.get(8)` would fail on a
|
||||
# LegacyOnionPayload, so we just tell them to go away here.
|
||||
if not isinstance(payload, TlvPayload):
|
||||
return {'result': 'continue'}
|
||||
|
||||
# We key the payment by payment_secret rather than payment_hash so
|
||||
# we collide less often.
|
||||
ps = payload.get(8).value.hex()
|
||||
if ps not in plugin.pending:
|
||||
plugin.pending[ps] = []
|
||||
# Start the timer
|
||||
Timer(60.0, timeout, args=(plugin, ps)).start()
|
||||
|
||||
payment_data = payload.get(8).value
|
||||
# secret = payment_data[:32]
|
||||
total = payment_data[32:].hex()
|
||||
|
||||
total = int(total, 16)
|
||||
plugin.pending[ps].append(
|
||||
(
|
||||
request,
|
||||
total,
|
||||
int(Millisatoshi(onion["forward_msat"])),
|
||||
shared_secret,
|
||||
generate_keyset(shared_secret),
|
||||
)
|
||||
)
|
||||
|
||||
parts = plugin.pending[ps]
|
||||
received = sum([p[2] for p in parts])
|
||||
print("Received {}/{} with {} parts".format(received, total, len(parts)))
|
||||
|
||||
if received != total:
|
||||
return
|
||||
|
||||
for p in parts:
|
||||
p[0].set_result(
|
||||
{
|
||||
"result": "fail",
|
||||
"failure_onion": wrap_error(p[4], b"400F"),
|
||||
}
|
||||
)
|
||||
|
||||
del plugin.pending[ps]
|
||||
|
||||
|
||||
@plugin.init()
|
||||
def init(plugin, *args, **kwargs):
|
||||
# Multi-part payments that are currently pending
|
||||
plugin.pending = {}
|
||||
|
||||
|
||||
plugin.run()
|
||||
541
Unmaintained/paytest/poetry.lock
generated
Normal file
541
Unmaintained/paytest/poetry.lock
generated
Normal file
@@ -0,0 +1,541 @@
|
||||
[[package]]
|
||||
name = "asn1crypto"
|
||||
version = "1.5.1"
|
||||
description = "Fast ASN.1 parser and serializer with definitions for private keys, public keys, certificates, CRL, OCSP, CMS, PKCS#3, PKCS#7, PKCS#8, PKCS#12, PKCS#5, X.509 and TSP"
|
||||
category = "main"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
|
||||
[[package]]
|
||||
name = "attrs"
|
||||
version = "22.1.0"
|
||||
description = "Classes Without Boilerplate"
|
||||
category = "dev"
|
||||
optional = false
|
||||
python-versions = ">=3.5"
|
||||
|
||||
[package.extras]
|
||||
dev = ["cloudpickle", "coverage[toml] (>=5.0.2)", "furo", "hypothesis", "mypy (>=0.900,!=0.940)", "pre-commit", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "sphinx", "sphinx-notfound-page", "zope.interface"]
|
||||
docs = ["furo", "sphinx", "sphinx-notfound-page", "zope.interface"]
|
||||
tests = ["cloudpickle", "coverage[toml] (>=5.0.2)", "hypothesis", "mypy (>=0.900,!=0.940)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "zope.interface"]
|
||||
tests_no_zope = ["cloudpickle", "coverage[toml] (>=5.0.2)", "hypothesis", "mypy (>=0.900,!=0.940)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins"]
|
||||
|
||||
[[package]]
|
||||
name = "base58"
|
||||
version = "2.1.1"
|
||||
description = "Base58 and Base58Check implementation."
|
||||
category = "main"
|
||||
optional = false
|
||||
python-versions = ">=3.5"
|
||||
|
||||
[package.extras]
|
||||
tests = ["PyHamcrest (>=2.0.2)", "mypy", "pytest (>=4.6)", "pytest-benchmark", "pytest-cov", "pytest-flake8"]
|
||||
|
||||
[[package]]
|
||||
name = "bitstring"
|
||||
version = "3.1.9"
|
||||
description = "Simple construction, analysis and modification of binary data."
|
||||
category = "main"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
|
||||
[[package]]
|
||||
name = "cffi"
|
||||
version = "1.15.1"
|
||||
description = "Foreign Function Interface for Python calling C code."
|
||||
category = "main"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
|
||||
[package.dependencies]
|
||||
pycparser = "*"
|
||||
|
||||
[[package]]
|
||||
name = "coincurve"
|
||||
version = "17.0.0"
|
||||
description = "Cross-platform Python CFFI bindings for libsecp256k1"
|
||||
category = "main"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
|
||||
[package.dependencies]
|
||||
asn1crypto = "*"
|
||||
cffi = ">=1.3.0"
|
||||
|
||||
[[package]]
|
||||
name = "colorama"
|
||||
version = "0.4.5"
|
||||
description = "Cross-platform colored terminal text."
|
||||
category = "dev"
|
||||
optional = false
|
||||
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
|
||||
|
||||
[[package]]
|
||||
name = "cryptography"
|
||||
version = "36.0.2"
|
||||
description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers."
|
||||
category = "main"
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
|
||||
[package.dependencies]
|
||||
cffi = ">=1.12"
|
||||
|
||||
[package.extras]
|
||||
docs = ["sphinx (>=1.6.5,!=1.8.0,!=3.1.0,!=3.1.1)", "sphinx_rtd_theme"]
|
||||
docstest = ["pyenchant (>=1.6.11)", "sphinxcontrib-spelling (>=4.0.1)", "twine (>=1.12.0)"]
|
||||
pep8test = ["black", "flake8", "flake8-import-order", "pep8-naming"]
|
||||
sdist = ["setuptools_rust (>=0.11.4)"]
|
||||
ssh = ["bcrypt (>=3.1.5)"]
|
||||
test = ["hypothesis (>=1.11.4,!=3.79.2)", "iso8601", "pretend", "pytest (>=6.2.0)", "pytest-cov", "pytest-subtests", "pytest-xdist", "pytz"]
|
||||
|
||||
[[package]]
|
||||
name = "execnet"
|
||||
version = "1.9.0"
|
||||
description = "execnet: rapid multi-Python deployment"
|
||||
category = "dev"
|
||||
optional = false
|
||||
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
|
||||
|
||||
[package.extras]
|
||||
testing = ["pre-commit"]
|
||||
|
||||
[[package]]
|
||||
name = "importlib-metadata"
|
||||
version = "4.12.0"
|
||||
description = "Read metadata from Python packages"
|
||||
category = "dev"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
|
||||
[package.dependencies]
|
||||
typing-extensions = {version = ">=3.6.4", markers = "python_version < \"3.8\""}
|
||||
zipp = ">=0.5"
|
||||
|
||||
[package.extras]
|
||||
docs = ["jaraco.packaging (>=9)", "rst.linker (>=1.9)", "sphinx"]
|
||||
perf = ["ipython"]
|
||||
testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)"]
|
||||
|
||||
[[package]]
|
||||
name = "iniconfig"
|
||||
version = "1.1.1"
|
||||
description = "iniconfig: brain-dead simple config-ini parsing"
|
||||
category = "dev"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
|
||||
[[package]]
|
||||
name = "packaging"
|
||||
version = "21.3"
|
||||
description = "Core utilities for Python packages"
|
||||
category = "dev"
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
|
||||
[package.dependencies]
|
||||
pyparsing = ">=2.0.2,<3.0.5 || >3.0.5"
|
||||
|
||||
[[package]]
|
||||
name = "pluggy"
|
||||
version = "1.0.0"
|
||||
description = "plugin and hook calling mechanisms for python"
|
||||
category = "dev"
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
|
||||
[package.dependencies]
|
||||
importlib-metadata = {version = ">=0.12", markers = "python_version < \"3.8\""}
|
||||
|
||||
[package.extras]
|
||||
dev = ["pre-commit", "tox"]
|
||||
testing = ["pytest", "pytest-benchmark"]
|
||||
|
||||
[[package]]
|
||||
name = "py"
|
||||
version = "1.11.0"
|
||||
description = "library with cross-python path, ini-parsing, io, code, log facilities"
|
||||
category = "dev"
|
||||
optional = false
|
||||
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
|
||||
|
||||
[[package]]
|
||||
name = "pycparser"
|
||||
version = "2.21"
|
||||
description = "C parser in Python"
|
||||
category = "main"
|
||||
optional = false
|
||||
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
|
||||
|
||||
[[package]]
|
||||
name = "pyln-bolt7"
|
||||
version = "1.0.246"
|
||||
description = "BOLT7"
|
||||
category = "main"
|
||||
optional = false
|
||||
python-versions = ">=3.7,<4.0"
|
||||
|
||||
[[package]]
|
||||
name = "pyln-client"
|
||||
version = "0.12.0"
|
||||
description = "Client library and plugin library for Core Lightning"
|
||||
category = "main"
|
||||
optional = false
|
||||
python-versions = ">=3.7,<4.0"
|
||||
|
||||
[package.dependencies]
|
||||
pyln-bolt7 = ">=1.0,<2.0"
|
||||
pyln-proto = ">=0.12"
|
||||
|
||||
[[package]]
|
||||
name = "pyln-proto"
|
||||
version = "0.12.0"
|
||||
description = "This package implements some of the Lightning Network protocol in pure python. It is intended for protocol testing and some minor tooling only. It is not deemed secure enough to handle any amount of real funds (you have been warned!)."
|
||||
category = "main"
|
||||
optional = false
|
||||
python-versions = ">=3.7,<4.0"
|
||||
|
||||
[package.dependencies]
|
||||
base58 = ">=2.1.1,<3.0.0"
|
||||
bitstring = ">=3.1.9,<4.0.0"
|
||||
coincurve = ">=17.0.0,<18.0.0"
|
||||
cryptography = ">=36.0.1,<37.0.0"
|
||||
PySocks = ">=1.7.1,<2.0.0"
|
||||
|
||||
[[package]]
|
||||
name = "pyparsing"
|
||||
version = "3.0.9"
|
||||
description = "pyparsing module - Classes and methods to define and execute parsing grammars"
|
||||
category = "dev"
|
||||
optional = false
|
||||
python-versions = ">=3.6.8"
|
||||
|
||||
[package.extras]
|
||||
diagrams = ["jinja2", "railroad-diagrams"]
|
||||
|
||||
[[package]]
|
||||
name = "PySocks"
|
||||
version = "1.7.1"
|
||||
description = "A Python SOCKS client module. See https://github.com/Anorov/PySocks for more information."
|
||||
category = "main"
|
||||
optional = false
|
||||
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
|
||||
|
||||
[[package]]
|
||||
name = "pytest"
|
||||
version = "7.1.3"
|
||||
description = "pytest: simple powerful testing with Python"
|
||||
category = "dev"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
|
||||
[package.dependencies]
|
||||
attrs = ">=19.2.0"
|
||||
colorama = {version = "*", markers = "sys_platform == \"win32\""}
|
||||
importlib-metadata = {version = ">=0.12", markers = "python_version < \"3.8\""}
|
||||
iniconfig = "*"
|
||||
packaging = "*"
|
||||
pluggy = ">=0.12,<2.0"
|
||||
py = ">=1.8.2"
|
||||
tomli = ">=1.0.0"
|
||||
|
||||
[package.extras]
|
||||
testing = ["argcomplete", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "xmlschema"]
|
||||
|
||||
[[package]]
|
||||
name = "pytest-forked"
|
||||
version = "1.4.0"
|
||||
description = "run tests in isolated forked subprocesses"
|
||||
category = "dev"
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
|
||||
[package.dependencies]
|
||||
py = "*"
|
||||
pytest = ">=3.10"
|
||||
|
||||
[[package]]
|
||||
name = "pytest-timeout"
|
||||
version = "2.1.0"
|
||||
description = "pytest plugin to abort hanging tests"
|
||||
category = "dev"
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
|
||||
[package.dependencies]
|
||||
pytest = ">=5.0.0"
|
||||
|
||||
[[package]]
|
||||
name = "pytest-xdist"
|
||||
version = "2.5.0"
|
||||
description = "pytest xdist plugin for distributed testing and loop-on-failing modes"
|
||||
category = "dev"
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
|
||||
[package.dependencies]
|
||||
execnet = ">=1.1"
|
||||
pytest = ">=6.2.0"
|
||||
pytest-forked = "*"
|
||||
|
||||
[package.extras]
|
||||
psutil = ["psutil (>=3.0)"]
|
||||
setproctitle = ["setproctitle"]
|
||||
testing = ["filelock"]
|
||||
|
||||
[[package]]
|
||||
name = "tomli"
|
||||
version = "2.0.1"
|
||||
description = "A lil' TOML parser"
|
||||
category = "dev"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
|
||||
[[package]]
|
||||
name = "typing-extensions"
|
||||
version = "4.3.0"
|
||||
description = "Backported and Experimental Type Hints for Python 3.7+"
|
||||
category = "dev"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
|
||||
[[package]]
|
||||
name = "zipp"
|
||||
version = "3.8.1"
|
||||
description = "Backport of pathlib-compatible object wrapper for zip files"
|
||||
category = "dev"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
|
||||
[package.extras]
|
||||
docs = ["jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx"]
|
||||
testing = ["func-timeout", "jaraco.itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"]
|
||||
|
||||
[metadata]
|
||||
lock-version = "1.1"
|
||||
python-versions = "^3.7"
|
||||
content-hash = "33da3eb59719149f70eabe8d57861b6d051256d6752f414593f5ae67abe33fcd"
|
||||
|
||||
[metadata.files]
|
||||
asn1crypto = [
|
||||
{file = "asn1crypto-1.5.1-py2.py3-none-any.whl", hash = "sha256:db4e40728b728508912cbb3d44f19ce188f218e9eba635821bb4b68564f8fd67"},
|
||||
{file = "asn1crypto-1.5.1.tar.gz", hash = "sha256:13ae38502be632115abf8a24cbe5f4da52e3b5231990aff31123c805306ccb9c"},
|
||||
]
|
||||
attrs = [
|
||||
{file = "attrs-22.1.0-py2.py3-none-any.whl", hash = "sha256:86efa402f67bf2df34f51a335487cf46b1ec130d02b8d39fd248abfd30da551c"},
|
||||
{file = "attrs-22.1.0.tar.gz", hash = "sha256:29adc2665447e5191d0e7c568fde78b21f9672d344281d0c6e1ab085429b22b6"},
|
||||
]
|
||||
base58 = [
|
||||
{file = "base58-2.1.1-py3-none-any.whl", hash = "sha256:11a36f4d3ce51dfc1043f3218591ac4eb1ceb172919cebe05b52a5bcc8d245c2"},
|
||||
{file = "base58-2.1.1.tar.gz", hash = "sha256:c5d0cb3f5b6e81e8e35da5754388ddcc6d0d14b6c6a132cb93d69ed580a7278c"},
|
||||
]
|
||||
bitstring = [
|
||||
{file = "bitstring-3.1.9-py2-none-any.whl", hash = "sha256:e3e340e58900a948787a05e8c08772f1ccbe133f6f41fe3f0fa19a18a22bbf4f"},
|
||||
{file = "bitstring-3.1.9-py3-none-any.whl", hash = "sha256:0de167daa6a00c9386255a7cac931b45e6e24e0ad7ea64f1f92a64ac23ad4578"},
|
||||
{file = "bitstring-3.1.9.tar.gz", hash = "sha256:a5848a3f63111785224dca8bb4c0a75b62ecdef56a042c8d6be74b16f7e860e7"},
|
||||
]
|
||||
cffi = [
|
||||
{file = "cffi-1.15.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:a66d3508133af6e8548451b25058d5812812ec3798c886bf38ed24a98216fab2"},
|
||||
{file = "cffi-1.15.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:470c103ae716238bbe698d67ad020e1db9d9dba34fa5a899b5e21577e6d52ed2"},
|
||||
{file = "cffi-1.15.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:9ad5db27f9cabae298d151c85cf2bad1d359a1b9c686a275df03385758e2f914"},
|
||||
{file = "cffi-1.15.1-cp27-cp27m-win32.whl", hash = "sha256:b3bbeb01c2b273cca1e1e0c5df57f12dce9a4dd331b4fa1635b8bec26350bde3"},
|
||||
{file = "cffi-1.15.1-cp27-cp27m-win_amd64.whl", hash = "sha256:e00b098126fd45523dd056d2efba6c5a63b71ffe9f2bbe1a4fe1716e1d0c331e"},
|
||||
{file = "cffi-1.15.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:d61f4695e6c866a23a21acab0509af1cdfd2c013cf256bbf5b6b5e2695827162"},
|
||||
{file = "cffi-1.15.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:ed9cb427ba5504c1dc15ede7d516b84757c3e3d7868ccc85121d9310d27eed0b"},
|
||||
{file = "cffi-1.15.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:39d39875251ca8f612b6f33e6b1195af86d1b3e60086068be9cc053aa4376e21"},
|
||||
{file = "cffi-1.15.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:285d29981935eb726a4399badae8f0ffdff4f5050eaa6d0cfc3f64b857b77185"},
|
||||
{file = "cffi-1.15.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3eb6971dcff08619f8d91607cfc726518b6fa2a9eba42856be181c6d0d9515fd"},
|
||||
{file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:21157295583fe8943475029ed5abdcf71eb3911894724e360acff1d61c1d54bc"},
|
||||
{file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5635bd9cb9731e6d4a1132a498dd34f764034a8ce60cef4f5319c0541159392f"},
|
||||
{file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2012c72d854c2d03e45d06ae57f40d78e5770d252f195b93f581acf3ba44496e"},
|
||||
{file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd86c085fae2efd48ac91dd7ccffcfc0571387fe1193d33b6394db7ef31fe2a4"},
|
||||
{file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:fa6693661a4c91757f4412306191b6dc88c1703f780c8234035eac011922bc01"},
|
||||
{file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:59c0b02d0a6c384d453fece7566d1c7e6b7bae4fc5874ef2ef46d56776d61c9e"},
|
||||
{file = "cffi-1.15.1-cp310-cp310-win32.whl", hash = "sha256:cba9d6b9a7d64d4bd46167096fc9d2f835e25d7e4c121fb2ddfc6528fb0413b2"},
|
||||
{file = "cffi-1.15.1-cp310-cp310-win_amd64.whl", hash = "sha256:ce4bcc037df4fc5e3d184794f27bdaab018943698f4ca31630bc7f84a7b69c6d"},
|
||||
{file = "cffi-1.15.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3d08afd128ddaa624a48cf2b859afef385b720bb4b43df214f85616922e6a5ac"},
|
||||
{file = "cffi-1.15.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3799aecf2e17cf585d977b780ce79ff0dc9b78d799fc694221ce814c2c19db83"},
|
||||
{file = "cffi-1.15.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a591fe9e525846e4d154205572a029f653ada1a78b93697f3b5a8f1f2bc055b9"},
|
||||
{file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3548db281cd7d2561c9ad9984681c95f7b0e38881201e157833a2342c30d5e8c"},
|
||||
{file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:91fc98adde3d7881af9b59ed0294046f3806221863722ba7d8d120c575314325"},
|
||||
{file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94411f22c3985acaec6f83c6df553f2dbe17b698cc7f8ae751ff2237d96b9e3c"},
|
||||
{file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:03425bdae262c76aad70202debd780501fabeaca237cdfddc008987c0e0f59ef"},
|
||||
{file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cc4d65aeeaa04136a12677d3dd0b1c0c94dc43abac5860ab33cceb42b801c1e8"},
|
||||
{file = "cffi-1.15.1-cp311-cp311-win32.whl", hash = "sha256:a0f100c8912c114ff53e1202d0078b425bee3649ae34d7b070e9697f93c5d52d"},
|
||||
{file = "cffi-1.15.1-cp311-cp311-win_amd64.whl", hash = "sha256:04ed324bda3cda42b9b695d51bb7d54b680b9719cfab04227cdd1e04e5de3104"},
|
||||
{file = "cffi-1.15.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50a74364d85fd319352182ef59c5c790484a336f6db772c1a9231f1c3ed0cbd7"},
|
||||
{file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e263d77ee3dd201c3a142934a086a4450861778baaeeb45db4591ef65550b0a6"},
|
||||
{file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cec7d9412a9102bdc577382c3929b337320c4c4c4849f2c5cdd14d7368c5562d"},
|
||||
{file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4289fc34b2f5316fbb762d75362931e351941fa95fa18789191b33fc4cf9504a"},
|
||||
{file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:173379135477dc8cac4bc58f45db08ab45d228b3363adb7af79436135d028405"},
|
||||
{file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6975a3fac6bc83c4a65c9f9fcab9e47019a11d3d2cf7f3c0d03431bf145a941e"},
|
||||
{file = "cffi-1.15.1-cp36-cp36m-win32.whl", hash = "sha256:2470043b93ff09bf8fb1d46d1cb756ce6132c54826661a32d4e4d132e1977adf"},
|
||||
{file = "cffi-1.15.1-cp36-cp36m-win_amd64.whl", hash = "sha256:30d78fbc8ebf9c92c9b7823ee18eb92f2e6ef79b45ac84db507f52fbe3ec4497"},
|
||||
{file = "cffi-1.15.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:198caafb44239b60e252492445da556afafc7d1e3ab7a1fb3f0584ef6d742375"},
|
||||
{file = "cffi-1.15.1-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5ef34d190326c3b1f822a5b7a45f6c4535e2f47ed06fec77d3d799c450b2651e"},
|
||||
{file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8102eaf27e1e448db915d08afa8b41d6c7ca7a04b7d73af6514df10a3e74bd82"},
|
||||
{file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5df2768244d19ab7f60546d0c7c63ce1581f7af8b5de3eb3004b9b6fc8a9f84b"},
|
||||
{file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8c4917bd7ad33e8eb21e9a5bbba979b49d9a97acb3a803092cbc1133e20343c"},
|
||||
{file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2642fe3142e4cc4af0799748233ad6da94c62a8bec3a6648bf8ee68b1c7426"},
|
||||
{file = "cffi-1.15.1-cp37-cp37m-win32.whl", hash = "sha256:e229a521186c75c8ad9490854fd8bbdd9a0c9aa3a524326b55be83b54d4e0ad9"},
|
||||
{file = "cffi-1.15.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a0b71b1b8fbf2b96e41c4d990244165e2c9be83d54962a9a1d118fd8657d2045"},
|
||||
{file = "cffi-1.15.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:320dab6e7cb2eacdf0e658569d2575c4dad258c0fcc794f46215e1e39f90f2c3"},
|
||||
{file = "cffi-1.15.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e74c6b51a9ed6589199c787bf5f9875612ca4a8a0785fb2d4a84429badaf22a"},
|
||||
{file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5c84c68147988265e60416b57fc83425a78058853509c1b0629c180094904a5"},
|
||||
{file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b926aa83d1edb5aa5b427b4053dc420ec295a08e40911296b9eb1b6170f6cca"},
|
||||
{file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:87c450779d0914f2861b8526e035c5e6da0a3199d8f1add1a665e1cbc6fc6d02"},
|
||||
{file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f2c9f67e9821cad2e5f480bc8d83b8742896f1242dba247911072d4fa94c192"},
|
||||
{file = "cffi-1.15.1-cp38-cp38-win32.whl", hash = "sha256:8b7ee99e510d7b66cdb6c593f21c043c248537a32e0bedf02e01e9553a172314"},
|
||||
{file = "cffi-1.15.1-cp38-cp38-win_amd64.whl", hash = "sha256:00a9ed42e88df81ffae7a8ab6d9356b371399b91dbdf0c3cb1e84c03a13aceb5"},
|
||||
{file = "cffi-1.15.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:54a2db7b78338edd780e7ef7f9f6c442500fb0d41a5a4ea24fff1c929d5af585"},
|
||||
{file = "cffi-1.15.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:fcd131dd944808b5bdb38e6f5b53013c5aa4f334c5cad0c72742f6eba4b73db0"},
|
||||
{file = "cffi-1.15.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7473e861101c9e72452f9bf8acb984947aa1661a7704553a9f6e4baa5ba64415"},
|
||||
{file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c9a799e985904922a4d207a94eae35c78ebae90e128f0c4e521ce339396be9d"},
|
||||
{file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3bcde07039e586f91b45c88f8583ea7cf7a0770df3a1649627bf598332cb6984"},
|
||||
{file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:33ab79603146aace82c2427da5ca6e58f2b3f2fb5da893ceac0c42218a40be35"},
|
||||
{file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d598b938678ebf3c67377cdd45e09d431369c3b1a5b331058c338e201f12b27"},
|
||||
{file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:db0fbb9c62743ce59a9ff687eb5f4afbe77e5e8403d6697f7446e5f609976f76"},
|
||||
{file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:98d85c6a2bef81588d9227dde12db8a7f47f639f4a17c9ae08e773aa9c697bf3"},
|
||||
{file = "cffi-1.15.1-cp39-cp39-win32.whl", hash = "sha256:40f4774f5a9d4f5e344f31a32b5096977b5d48560c5592e2f3d2c4374bd543ee"},
|
||||
{file = "cffi-1.15.1-cp39-cp39-win_amd64.whl", hash = "sha256:70df4e3b545a17496c9b3f41f5115e69a4f2e77e94e1d2a8e1070bc0c38c8a3c"},
|
||||
{file = "cffi-1.15.1.tar.gz", hash = "sha256:d400bfb9a37b1351253cb402671cea7e89bdecc294e8016a707f6d1d8ac934f9"},
|
||||
]
|
||||
coincurve = [
|
||||
{file = "coincurve-17.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ac8c87d6fd080faa74e7ecf64a6ed20c11a254863238759eb02c3f13ad12b0c4"},
|
||||
{file = "coincurve-17.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:25dfa105beba24c8de886f8ed654bb1133866e4e22cfd7ea5ad8438cae6ed924"},
|
||||
{file = "coincurve-17.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:698efdd53e4fe1bbebaee9b75cbc851be617974c1c60098e9145cb7198ae97fb"},
|
||||
{file = "coincurve-17.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:30dd44d1039f1d237aaa2da6d14a455ca88df3bcb00610b41f3253fdca1be97b"},
|
||||
{file = "coincurve-17.0.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d154e2eb5711db8c5ef52fcd80935b5a0e751c057bc6ffb215a7bb409aedef03"},
|
||||
{file = "coincurve-17.0.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:c71caffb97dd3d0c243beb62352669b1e5dafa3a4bccdbb27d36bd82f5e65d20"},
|
||||
{file = "coincurve-17.0.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:747215254e51dd4dfbe6dded9235491263da5d88fe372d66541ca16b51ea078f"},
|
||||
{file = "coincurve-17.0.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ad2f6df39ba1e2b7b14bb984505ffa7d0a0ecdd697e8d7dbd19e04bc245c87ed"},
|
||||
{file = "coincurve-17.0.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:0503326963916c85b61d16f611ea0545f03c9e418fa8007c233c815429e381e8"},
|
||||
{file = "coincurve-17.0.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1013c1597b65684ae1c3e42497f9ef5a04527fa6136a84a16b34602606428c74"},
|
||||
{file = "coincurve-17.0.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4beef321fd6434448aab03a0c245f31c4e77f43b54b82108c0948d29852ac7e"},
|
||||
{file = "coincurve-17.0.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f47806527d3184da3e8b146fac92a8ed567bbd225194f4517943d8cdc85f9542"},
|
||||
{file = "coincurve-17.0.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:51e56373ac79f4ec1cfc5da53d72c55f5e5ac28d848b0849ef5e687ace857888"},
|
||||
{file = "coincurve-17.0.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:3d694ad194bee9e8792e2e75879dc5238d8a184010cde36c5ad518fcfe2cd8f2"},
|
||||
{file = "coincurve-17.0.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:74cedb3d3a1dc5abe0c9c2396e1b82cc64496babc5b42e007e72e185cb1edad8"},
|
||||
{file = "coincurve-17.0.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:db874c5c1dcb1f3a19379773b5e8cffc777625a7a7a60dd9a67206e31e62e2e9"},
|
||||
{file = "coincurve-17.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:896b01941254f0a218cf331a9bddfe2d43892f7f1ba10d6e372e2eb744a744c2"},
|
||||
{file = "coincurve-17.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6aec70238dbe7a5d66b5f9438ff45b08eb5e0990d49c32ebb65247c5d5b89d7a"},
|
||||
{file = "coincurve-17.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d24284d17162569df917a640f19d9654ba3b43cf560ced8864f270da903f73a5"},
|
||||
{file = "coincurve-17.0.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4ea057f777842396d387103c606babeb3a1b4c6126769cc0a12044312fc6c465"},
|
||||
{file = "coincurve-17.0.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b88642edf7f281649b0c0b6ffade051945ccceae4b885e40445634877d0b3049"},
|
||||
{file = "coincurve-17.0.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a80a207131813b038351c5bdae8f20f5f774bbf53622081f208d040dd2b7528f"},
|
||||
{file = "coincurve-17.0.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:f1ef72574aa423bc33665ef4be859164a478bad24d48442da874ef3dc39a474d"},
|
||||
{file = "coincurve-17.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:dfd4fab857bcd975edc39111cb5f5c104f138dac2e9ace35ea8434d37bcea3be"},
|
||||
{file = "coincurve-17.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:73f39579dd651a9fc29da5a8fc0d8153d872bcbc166f876457baced1a1c01501"},
|
||||
{file = "coincurve-17.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8852dc01af4f0fe941ffd04069f7e4fecdce9b867a016f823a02286a1a1f07b5"},
|
||||
{file = "coincurve-17.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1bef812da1da202cdd601a256825abcf26d86e8634fac3ec3e615e3bb3ff08c"},
|
||||
{file = "coincurve-17.0.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:abbefc9ccb170cb255a31df32457c2e43084b9f37589d0694dacc2dea6ddaf7c"},
|
||||
{file = "coincurve-17.0.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:abbd9d017a7638dc38a3b9bb4851f8801b7818d4e5ac22e0c75e373b3c1dbff0"},
|
||||
{file = "coincurve-17.0.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:e2c2e8a1f0b1f8e48049c891af4ae3cad65d115d358bde72f6b8abdbb8a23170"},
|
||||
{file = "coincurve-17.0.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8c571445b166c714af4f8155e38a894376c16c0431e88963f2fff474a9985d87"},
|
||||
{file = "coincurve-17.0.0-py3-none-win32.whl", hash = "sha256:b956b0b2c85e25a7d00099970ff5d8338254b45e46f0a940f4a2379438ce0dde"},
|
||||
{file = "coincurve-17.0.0-py3-none-win_amd64.whl", hash = "sha256:630388080da3026e0b0176cc6762eaabecba857ee3fc85767577dea063ea7c6e"},
|
||||
{file = "coincurve-17.0.0.tar.gz", hash = "sha256:68da55aff898702952fda3ee04fd6ed60bb6b91f919c69270786ed766b548b93"},
|
||||
]
|
||||
colorama = [
|
||||
{file = "colorama-0.4.5-py2.py3-none-any.whl", hash = "sha256:854bf444933e37f5824ae7bfc1e98d5bce2ebe4160d46b5edf346a89358e99da"},
|
||||
{file = "colorama-0.4.5.tar.gz", hash = "sha256:e6c6b4334fc50988a639d9b98aa429a0b57da6e17b9a44f0451f930b6967b7a4"},
|
||||
]
|
||||
cryptography = [
|
||||
{file = "cryptography-36.0.2-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:4e2dddd38a5ba733be6a025a1475a9f45e4e41139d1321f412c6b360b19070b6"},
|
||||
{file = "cryptography-36.0.2-cp36-abi3-macosx_10_10_x86_64.whl", hash = "sha256:4881d09298cd0b669bb15b9cfe6166f16fc1277b4ed0d04a22f3d6430cb30f1d"},
|
||||
{file = "cryptography-36.0.2-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ea634401ca02367c1567f012317502ef3437522e2fc44a3ea1844de028fa4b84"},
|
||||
{file = "cryptography-36.0.2-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:7be666cc4599b415f320839e36367b273db8501127b38316f3b9f22f17a0b815"},
|
||||
{file = "cryptography-36.0.2-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8241cac0aae90b82d6b5c443b853723bcc66963970c67e56e71a2609dc4b5eaf"},
|
||||
{file = "cryptography-36.0.2-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7b2d54e787a884ffc6e187262823b6feb06c338084bbe80d45166a1cb1c6c5bf"},
|
||||
{file = "cryptography-36.0.2-cp36-abi3-manylinux_2_24_x86_64.whl", hash = "sha256:c2c5250ff0d36fd58550252f54915776940e4e866f38f3a7866d92b32a654b86"},
|
||||
{file = "cryptography-36.0.2-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:ec6597aa85ce03f3e507566b8bcdf9da2227ec86c4266bd5e6ab4d9e0cc8dab2"},
|
||||
{file = "cryptography-36.0.2-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:ca9f686517ec2c4a4ce930207f75c00bf03d94e5063cbc00a1dc42531511b7eb"},
|
||||
{file = "cryptography-36.0.2-cp36-abi3-win32.whl", hash = "sha256:f64b232348ee82f13aac22856515ce0195837f6968aeaa94a3d0353ea2ec06a6"},
|
||||
{file = "cryptography-36.0.2-cp36-abi3-win_amd64.whl", hash = "sha256:53e0285b49fd0ab6e604f4c5d9c5ddd98de77018542e88366923f152dbeb3c29"},
|
||||
{file = "cryptography-36.0.2-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:32db5cc49c73f39aac27574522cecd0a4bb7384e71198bc65a0d23f901e89bb7"},
|
||||
{file = "cryptography-36.0.2-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b3d199647468d410994dbeb8cec5816fb74feb9368aedf300af709ef507e3e"},
|
||||
{file = "cryptography-36.0.2-pp37-pypy37_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:da73d095f8590ad437cd5e9faf6628a218aa7c387e1fdf67b888b47ba56a17f0"},
|
||||
{file = "cryptography-36.0.2-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:0a3bf09bb0b7a2c93ce7b98cb107e9170a90c51a0162a20af1c61c765b90e60b"},
|
||||
{file = "cryptography-36.0.2-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:8897b7b7ec077c819187a123174b645eb680c13df68354ed99f9b40a50898f77"},
|
||||
{file = "cryptography-36.0.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:82740818f2f240a5da8dfb8943b360e4f24022b093207160c77cadade47d7c85"},
|
||||
{file = "cryptography-36.0.2-pp38-pypy38_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:1f64a62b3b75e4005df19d3b5235abd43fa6358d5516cfc43d87aeba8d08dd51"},
|
||||
{file = "cryptography-36.0.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:e167b6b710c7f7bc54e67ef593f8731e1f45aa35f8a8a7b72d6e42ec76afd4b3"},
|
||||
{file = "cryptography-36.0.2.tar.gz", hash = "sha256:70f8f4f7bb2ac9f340655cbac89d68c527af5bb4387522a8413e841e3e6628c9"},
|
||||
]
|
||||
execnet = [
|
||||
{file = "execnet-1.9.0-py2.py3-none-any.whl", hash = "sha256:a295f7cc774947aac58dde7fdc85f4aa00c42adf5d8f5468fc630c1acf30a142"},
|
||||
{file = "execnet-1.9.0.tar.gz", hash = "sha256:8f694f3ba9cc92cab508b152dcfe322153975c29bda272e2fd7f3f00f36e47c5"},
|
||||
]
|
||||
importlib-metadata = [
|
||||
{file = "importlib_metadata-4.12.0-py3-none-any.whl", hash = "sha256:7401a975809ea1fdc658c3aa4f78cc2195a0e019c5cbc4c06122884e9ae80c23"},
|
||||
{file = "importlib_metadata-4.12.0.tar.gz", hash = "sha256:637245b8bab2b6502fcbc752cc4b7a6f6243bb02b31c5c26156ad103d3d45670"},
|
||||
]
|
||||
iniconfig = [
|
||||
{file = "iniconfig-1.1.1-py2.py3-none-any.whl", hash = "sha256:011e24c64b7f47f6ebd835bb12a743f2fbe9a26d4cecaa7f53bc4f35ee9da8b3"},
|
||||
{file = "iniconfig-1.1.1.tar.gz", hash = "sha256:bc3af051d7d14b2ee5ef9969666def0cd1a000e121eaea580d4a313df4b37f32"},
|
||||
]
|
||||
packaging = [
|
||||
{file = "packaging-21.3-py3-none-any.whl", hash = "sha256:ef103e05f519cdc783ae24ea4e2e0f508a9c99b2d4969652eed6a2e1ea5bd522"},
|
||||
{file = "packaging-21.3.tar.gz", hash = "sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb"},
|
||||
]
|
||||
pluggy = [
|
||||
{file = "pluggy-1.0.0-py2.py3-none-any.whl", hash = "sha256:74134bbf457f031a36d68416e1509f34bd5ccc019f0bcc952c7b909d06b37bd3"},
|
||||
{file = "pluggy-1.0.0.tar.gz", hash = "sha256:4224373bacce55f955a878bf9cfa763c1e360858e330072059e10bad68531159"},
|
||||
]
|
||||
py = [
|
||||
{file = "py-1.11.0-py2.py3-none-any.whl", hash = "sha256:607c53218732647dff4acdfcd50cb62615cedf612e72d1724fb1a0cc6405b378"},
|
||||
{file = "py-1.11.0.tar.gz", hash = "sha256:51c75c4126074b472f746a24399ad32f6053d1b34b68d2fa41e558e6f4a98719"},
|
||||
]
|
||||
pycparser = [
|
||||
{file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"},
|
||||
{file = "pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"},
|
||||
]
|
||||
pyln-bolt7 = [
|
||||
{file = "pyln-bolt7-1.0.246.tar.gz", hash = "sha256:2b53744fa21c1b12d2c9c9df153651b122e38fa65d4a5c3f2957317ee148e089"},
|
||||
{file = "pyln_bolt7-1.0.246-py3-none-any.whl", hash = "sha256:54d48ec27fdc8751762cb068b0a9f2757a58fb57933c6d8f8255d02c27eb63c5"},
|
||||
]
|
||||
pyln-client = [
|
||||
{file = "pyln-client-0.12.0.tar.gz", hash = "sha256:76786e4eb52e6934e09b3086e5b5fd8f62a8ef99de2dbce8049259e869a95a7a"},
|
||||
{file = "pyln_client-0.12.0-py3-none-any.whl", hash = "sha256:652b09a879fb30df88e54918e8d8f42cf14636ee3a1cfd0d06aa35999dc95a43"},
|
||||
]
|
||||
pyln-proto = [
|
||||
{file = "pyln-proto-0.12.0.tar.gz", hash = "sha256:3214d99d8385f2135a94937f0dc1da626a33b257e9ebc320841656edaefabbe5"},
|
||||
{file = "pyln_proto-0.12.0-py3-none-any.whl", hash = "sha256:dedef5d8e476a9ade5a0b2eb919ccc37e4a57f2a78fdc399f1c5e0de17e41604"},
|
||||
]
|
||||
pyparsing = [
|
||||
{file = "pyparsing-3.0.9-py3-none-any.whl", hash = "sha256:5026bae9a10eeaefb61dab2f09052b9f4307d44aee4eda64b309723d8d206bbc"},
|
||||
{file = "pyparsing-3.0.9.tar.gz", hash = "sha256:2b020ecf7d21b687f219b71ecad3631f644a47f01403fa1d1036b0c6416d70fb"},
|
||||
]
|
||||
PySocks = [
|
||||
{file = "PySocks-1.7.1-py27-none-any.whl", hash = "sha256:08e69f092cc6dbe92a0fdd16eeb9b9ffbc13cadfe5ca4c7bd92ffb078b293299"},
|
||||
{file = "PySocks-1.7.1-py3-none-any.whl", hash = "sha256:2725bd0a9925919b9b51739eea5f9e2bae91e83288108a9ad338b2e3a4435ee5"},
|
||||
{file = "PySocks-1.7.1.tar.gz", hash = "sha256:3f8804571ebe159c380ac6de37643bb4685970655d3bba243530d6558b799aa0"},
|
||||
]
|
||||
pytest = [
|
||||
{file = "pytest-7.1.3-py3-none-any.whl", hash = "sha256:1377bda3466d70b55e3f5cecfa55bb7cfcf219c7964629b967c37cf0bda818b7"},
|
||||
{file = "pytest-7.1.3.tar.gz", hash = "sha256:4f365fec2dff9c1162f834d9f18af1ba13062db0c708bf7b946f8a5c76180c39"},
|
||||
]
|
||||
pytest-forked = [
|
||||
{file = "pytest-forked-1.4.0.tar.gz", hash = "sha256:8b67587c8f98cbbadfdd804539ed5455b6ed03802203485dd2f53c1422d7440e"},
|
||||
{file = "pytest_forked-1.4.0-py3-none-any.whl", hash = "sha256:bbbb6717efc886b9d64537b41fb1497cfaf3c9601276be8da2cccfea5a3c8ad8"},
|
||||
]
|
||||
pytest-timeout = [
|
||||
{file = "pytest-timeout-2.1.0.tar.gz", hash = "sha256:c07ca07404c612f8abbe22294b23c368e2e5104b521c1790195561f37e1ac3d9"},
|
||||
{file = "pytest_timeout-2.1.0-py3-none-any.whl", hash = "sha256:f6f50101443ce70ad325ceb4473c4255e9d74e3c7cd0ef827309dfa4c0d975c6"},
|
||||
]
|
||||
pytest-xdist = [
|
||||
{file = "pytest-xdist-2.5.0.tar.gz", hash = "sha256:4580deca3ff04ddb2ac53eba39d76cb5dd5edeac050cb6fbc768b0dd712b4edf"},
|
||||
{file = "pytest_xdist-2.5.0-py3-none-any.whl", hash = "sha256:6fe5c74fec98906deb8f2d2b616b5c782022744978e7bd4695d39c8f42d0ce65"},
|
||||
]
|
||||
tomli = [
|
||||
{file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"},
|
||||
{file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"},
|
||||
]
|
||||
typing-extensions = [
|
||||
{file = "typing_extensions-4.3.0-py3-none-any.whl", hash = "sha256:25642c956049920a5aa49edcdd6ab1e06d7e5d467fc00e0506c44ac86fbfca02"},
|
||||
{file = "typing_extensions-4.3.0.tar.gz", hash = "sha256:e6d2677a32f47fc7eb2795db1dd15c1f34eff616bcaf2cfb5e997f854fa1c4a6"},
|
||||
]
|
||||
zipp = [
|
||||
{file = "zipp-3.8.1-py3-none-any.whl", hash = "sha256:47c40d7fe183a6f21403a199b3e4192cca5774656965b0a4988ad2f8feb5f009"},
|
||||
{file = "zipp-3.8.1.tar.gz", hash = "sha256:05b45f1ee8f807d0cc928485ca40a07cb491cf092ff587c0df9cb1fd154848d2"},
|
||||
]
|
||||
2
Unmaintained/paytest/requirements.txt
Normal file
2
Unmaintained/paytest/requirements.txt
Normal file
@@ -0,0 +1,2 @@
|
||||
pyln-client ~= 0.9.2
|
||||
pyln-proto ~= 0.9.2
|
||||
69
Unmaintained/paytest/test_paytest.py
Normal file
69
Unmaintained/paytest/test_paytest.py
Normal file
@@ -0,0 +1,69 @@
|
||||
from pyln.testing.fixtures import * # noqa: F401,F403
|
||||
from pyln.client import RpcError
|
||||
import os
|
||||
import pytest
|
||||
from pprint import pprint
|
||||
|
||||
|
||||
pluginopt = {'plugin': os.path.join(os.path.dirname(__file__), "paytest.py")}
|
||||
EXPERIMENTAL_FEATURES = int(os.environ.get("EXPERIMENTAL_FEATURES", "0"))
|
||||
|
||||
|
||||
def test_start(node_factory):
|
||||
node_factory.get_node(options=pluginopt)
|
||||
|
||||
|
||||
def test_invoice(node_factory):
|
||||
l1 = node_factory.get_node(options=pluginopt)
|
||||
inv = l1.rpc.testinvoice('03' * 33)
|
||||
details = l1.rpc.decodepay(inv['invoice'])
|
||||
pprint(details)
|
||||
|
||||
|
||||
def test_simple_pay(node_factory):
|
||||
""" l1 generates and pays an invoice on behalf of l2.
|
||||
"""
|
||||
l1, l2 = node_factory.line_graph(2, opts=pluginopt, wait_for_announce=True)
|
||||
|
||||
inv = l1.rpc.testinvoice(destination=l2.info['id'], amount=1)['invoice']
|
||||
details = l1.rpc.decodepay(inv)
|
||||
pprint(details)
|
||||
|
||||
# Paying the invoice without the reinterpretation from paytest
|
||||
# will cause an unknown payment details directly.
|
||||
with pytest.raises(RpcError, match=r'WIRE_INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS'):
|
||||
l1.rpc.pay(inv)
|
||||
|
||||
|
||||
def test_mpp_pay(node_factory):
|
||||
""" l1 send a payment that is going to be split.
|
||||
"""
|
||||
l1, l2 = node_factory.line_graph(2, opts=pluginopt, wait_for_announce=True)
|
||||
res = l1.rpc.paytest(l2.info['id'], 2 * 10**8)
|
||||
|
||||
l2.daemon.wait_for_log(r'Received 200000000/200000000 with [0-9]+ parts')
|
||||
|
||||
parts = res['status']['attempts']
|
||||
assert len(parts) > 2 # Initial split + >1 part
|
||||
|
||||
failures = [p['failure']['data'] for p in parts if 'failure' in p and 'data' in p['failure']]
|
||||
pprint(failures)
|
||||
|
||||
outcomes = [f['failcode'] for f in failures]
|
||||
is16399 = [p == 16399 for p in outcomes]
|
||||
assert all(is16399)
|
||||
assert len(is16399) >= 1
|
||||
|
||||
|
||||
def test_incoming_payment(node_factory):
|
||||
"""Ensure that we don't fail if the payment is not a paytest.
|
||||
"""
|
||||
l1, l2 = node_factory.line_graph(2, opts=pluginopt, wait_for_announce=True)
|
||||
inv = l2.rpc.invoice(42, 'lbl', 'desc')['bolt11']
|
||||
l1.rpc.pay(inv)
|
||||
|
||||
plugins = [p['name'] for p in l2.rpc.listconfigs()['plugins']]
|
||||
assert 'paytest.py' in plugins
|
||||
|
||||
plugins = [p['name'] for p in l1.rpc.listconfigs()['plugins']]
|
||||
assert 'paytest.py' in plugins
|
||||
32
Unmaintained/probe/README.md
Normal file
32
Unmaintained/probe/README.md
Normal file
@@ -0,0 +1,32 @@
|
||||
# Network Probe plugin
|
||||
|
||||
This plugin regularly performs a random probe of the network by sending a
|
||||
payment to a random node in the network, with a random `payment_hash`, and
|
||||
observing how the network reacts. The random `payment_hash` results in the
|
||||
payments being rejected at the destination, so no funds are actually
|
||||
transferred. The error messages however allow us to gather some information
|
||||
about the success probability of a payment, and the stability of the channels.
|
||||
|
||||
The random selection of destination nodes is a worst case scenario, since it's
|
||||
likely that most of the nodes in the network are leaf nodes that are not
|
||||
well-connected and often offline at any point in time. Expect to see a lot of
|
||||
errors about being unable to route these payments as a result of this.
|
||||
|
||||
The probe data is stored in a sqlite3 database for later inspection and to be
|
||||
able to eventually draw pretty plots about how the network stability changes
|
||||
over time. For now you can inspect the results using the `sqlite3` command
|
||||
line utility:
|
||||
|
||||
```bash
|
||||
sqlite3 ~/.lightning/probes.db "select destination, erring_channel, failcode from probes"
|
||||
```
|
||||
|
||||
Failcode -1 and 16399 are special:
|
||||
|
||||
- -1 indicates that we were unable to find a route to the destination. This
|
||||
usually indicates that this is a leaf node that is currently offline.
|
||||
|
||||
- 16399 is the code for unknown payment details and indicates a successful
|
||||
probe. The destination received the incoming payment but could not find a
|
||||
matching `payment_key`, which is expected since we generated the
|
||||
`payment_hash` at random :-)
|
||||
296
Unmaintained/probe/probe.py
Executable file
296
Unmaintained/probe/probe.py
Executable file
@@ -0,0 +1,296 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Plugin that probes the network for failed channels.
|
||||
|
||||
This plugin regularly performs a random probe of the network by sending a
|
||||
payment to a random node in the network, with a random `payment_hash`, and
|
||||
observing how the network reacts. The random `payment_hash` results in the
|
||||
payments being rejected at the destination, so no funds are actually
|
||||
transferred. The error messages however allow us to gather some information
|
||||
about the success probability of a payment, and the stability of the channels.
|
||||
|
||||
The random selection of destination nodes is a worst case scenario, since it's
|
||||
likely that most of the nodes in the network are leaf nodes that are not
|
||||
well-connected and often offline at any point in time. Expect to see a lot of
|
||||
errors about being unable to route these payments as a result of this.
|
||||
|
||||
The probe data is stored in a sqlite3 database for later inspection and to be
|
||||
able to eventually draw pretty plots about how the network stability changes
|
||||
over time. For now you can inspect the results using the `sqlite3` command
|
||||
line utility:
|
||||
|
||||
```bash
|
||||
sqlite3 ~/.lightning/probes.db "select destination, erring_channel, failcode from probes"
|
||||
```
|
||||
|
||||
Failcode -1 and 16399 are special:
|
||||
|
||||
- -1 indicates that we were unable to find a route to the destination. This
|
||||
usually indicates that this is a leaf node that is currently offline.
|
||||
|
||||
- 16399 is the code for unknown payment details and indicates a successful
|
||||
probe. The destination received the incoming payment but could not find a
|
||||
matching `payment_key`, which is expected since we generated the
|
||||
`payment_hash` at random :-)
|
||||
|
||||
"""
|
||||
from datetime import datetime
|
||||
from pyln.client import Plugin, RpcError
|
||||
from random import choice
|
||||
from sqlalchemy import Column, Integer, String, DateTime
|
||||
from sqlalchemy import create_engine
|
||||
from sqlalchemy.ext.declarative import declarative_base
|
||||
from sqlalchemy.orm import sessionmaker
|
||||
from time import sleep, time
|
||||
import heapq
|
||||
import json
|
||||
import os
|
||||
import random
|
||||
import string
|
||||
import threading
|
||||
|
||||
|
||||
Base = declarative_base()
|
||||
plugin = Plugin()
|
||||
|
||||
exclusions = []
|
||||
temporary_exclusions = {}
|
||||
|
||||
|
||||
class Probe(Base):
|
||||
__tablename__ = "probes"
|
||||
id = Column(Integer, primary_key=True)
|
||||
destination = Column(String)
|
||||
route = Column(String)
|
||||
error = Column(String)
|
||||
erring_channel = Column(String)
|
||||
failcode = Column(Integer)
|
||||
payment_hash = Column(String)
|
||||
started_at = Column(DateTime)
|
||||
finished_at = Column(DateTime)
|
||||
amount = Column(Integer)
|
||||
|
||||
def jsdict(self):
|
||||
return {
|
||||
'id': self.id,
|
||||
'destination': self.destination,
|
||||
'amount': self.amount,
|
||||
'route': self.route,
|
||||
'erring_channel': self.erring_channel,
|
||||
'failcode': self.failcode,
|
||||
'started_at': str(self.started_at),
|
||||
'finished_at': str(self.finished_at),
|
||||
}
|
||||
|
||||
|
||||
def start_probe(plugin):
|
||||
t = threading.Thread(target=probe, args=[plugin, None])
|
||||
t.daemon = True
|
||||
t.start()
|
||||
|
||||
|
||||
@plugin.async_method('probe')
|
||||
def probe(plugin, request, node_id=None, amount=10000, **kwargs):
|
||||
res = None
|
||||
if node_id is None:
|
||||
nodes = plugin.rpc.listnodes()['nodes']
|
||||
node_id = choice(nodes)['nodeid']
|
||||
|
||||
s = plugin.Session()
|
||||
p = Probe(
|
||||
destination=node_id,
|
||||
started_at=datetime.now(),
|
||||
amount=amount
|
||||
)
|
||||
s.add(p)
|
||||
|
||||
try:
|
||||
route = plugin.rpc.getroute(
|
||||
node_id,
|
||||
msatoshi=amount,
|
||||
riskfactor=1,
|
||||
exclude=exclusions + list(temporary_exclusions.keys())
|
||||
)['route']
|
||||
p.route = ','.join([r['channel'] for r in route])
|
||||
p.payment_hash = ''.join(choice(string.hexdigits) for _ in range(64))
|
||||
except RpcError:
|
||||
p.failcode = -1
|
||||
res = p.jsdict()
|
||||
s.commit()
|
||||
return request.set_result(res) if request else None
|
||||
|
||||
s.commit()
|
||||
plugin.rpc.sendpay(route, p.payment_hash)
|
||||
plugin.pending_probes.append({
|
||||
'request': request,
|
||||
'probe_id': p.id,
|
||||
'payment_hash': p.payment_hash,
|
||||
'callback': complete_probe,
|
||||
'plugin': plugin,
|
||||
})
|
||||
|
||||
|
||||
@plugin.method('traceroute')
|
||||
def traceroute(plugin, node_id, **kwargs):
|
||||
traceroute = {
|
||||
'destination': node_id,
|
||||
'started_at': str(datetime.now()),
|
||||
'probes': [],
|
||||
}
|
||||
try:
|
||||
traceroute['route'] = plugin.rpc.getroute(
|
||||
traceroute['destination'],
|
||||
msatoshi=10000,
|
||||
riskfactor=1,
|
||||
)['route']
|
||||
traceroute['payment_hash'] = ''.join(random.choice(string.hexdigits) for _ in range(64))
|
||||
except RpcError:
|
||||
traceroute['failcode'] = -1
|
||||
return traceroute
|
||||
|
||||
# For each prefix length, shorten the route and attempt the payment
|
||||
for i in range(1, len(traceroute['route']) + 1):
|
||||
probe = {
|
||||
'route': traceroute['route'][:i],
|
||||
'payment_hash': ''.join(random.choice(string.hexdigits) for _ in range(64)),
|
||||
'started_at': str(datetime.now()),
|
||||
}
|
||||
probe['destination'] = probe['route'][-1]['id']
|
||||
plugin.rpc.sendpay(probe['route'], probe['payment_hash'])
|
||||
|
||||
try:
|
||||
plugin.rpc.waitsendpay(probe['payment_hash'], timeout=30)
|
||||
raise ValueError("The recipient guessed the preimage? Cryptography is broken!!!")
|
||||
except RpcError as e:
|
||||
probe['finished_at'] = str(datetime.now())
|
||||
if e.error['code'] == 200:
|
||||
probe['error'] = "Timeout"
|
||||
break
|
||||
else:
|
||||
probe['error'] = e.error['data']
|
||||
probe['failcode'] = e.error['data']['failcode']
|
||||
|
||||
traceroute['probes'].append(probe)
|
||||
|
||||
return traceroute
|
||||
|
||||
|
||||
@plugin.method('probe-stats')
|
||||
def stats(plugin):
|
||||
return {
|
||||
'pending_probes': len(plugin.pending_probes),
|
||||
'exclusions': len(exclusions),
|
||||
'temporary_exclusions': len(temporary_exclusions),
|
||||
}
|
||||
|
||||
|
||||
def complete_probe(plugin, request, probe_id, payment_hash):
|
||||
s = plugin.Session()
|
||||
p = s.query(Probe).get(probe_id)
|
||||
try:
|
||||
plugin.rpc.waitsendpay(p.payment_hash)
|
||||
except RpcError as e:
|
||||
error = e.error['data']
|
||||
p.erring_channel = e.error['data']['erring_channel']
|
||||
p.failcode = e.error['data']['failcode']
|
||||
p.error = json.dumps(error)
|
||||
|
||||
if p.failcode in [16392, 16394]:
|
||||
exclusion = "{erring_channel}/{erring_direction}".format(**error)
|
||||
print('Adding exclusion for channel {} ({} total))'.format(
|
||||
exclusion, len(exclusions))
|
||||
)
|
||||
exclusions.append(exclusion)
|
||||
|
||||
if p.failcode in [21, 4103]:
|
||||
exclusion = "{erring_channel}/{erring_direction}".format(**error)
|
||||
print('Adding temporary exclusion for channel {} ({} total))'.format(
|
||||
exclusion, len(temporary_exclusions))
|
||||
)
|
||||
expiry = time() + plugin.probe_exclusion_duration
|
||||
temporary_exclusions[exclusion] = expiry
|
||||
|
||||
p.finished_at = datetime.now()
|
||||
res = p.jsdict()
|
||||
s.commit()
|
||||
s.close()
|
||||
request.set_result(res)
|
||||
|
||||
|
||||
def poll_payments(plugin):
|
||||
"""Iterate through all probes and complete the finalized ones.
|
||||
"""
|
||||
for probe in plugin.pending_probes:
|
||||
p = plugin.rpc.listsendpays(None, payment_hash=probe['payment_hash'])
|
||||
if p['payments'][0]['status'] == 'pending':
|
||||
continue
|
||||
|
||||
plugin.pending_probes.remove(probe)
|
||||
cb = probe['callback']
|
||||
del probe['callback']
|
||||
cb(**probe)
|
||||
|
||||
|
||||
def clear_temporary_exclusion(plugin):
|
||||
timed_out = [k for k, v in temporary_exclusions.items() if v < time()]
|
||||
for k in timed_out:
|
||||
del temporary_exclusions[k]
|
||||
|
||||
print("Removed {}/{} temporary exclusions.".format(
|
||||
len(timed_out), len(temporary_exclusions))
|
||||
)
|
||||
|
||||
|
||||
def schedule(plugin):
|
||||
# List of scheduled calls with next runtime, function and interval
|
||||
next_runs = [
|
||||
(time() + 300, clear_temporary_exclusion, 300),
|
||||
(time() + plugin.probe_interval, start_probe, plugin.probe_interval),
|
||||
(time() + 1, poll_payments, 1),
|
||||
]
|
||||
heapq.heapify(next_runs)
|
||||
|
||||
while True:
|
||||
n = heapq.heappop(next_runs)
|
||||
t = n[0] - time()
|
||||
if t > 0:
|
||||
sleep(t)
|
||||
# Call the function
|
||||
n[1](plugin)
|
||||
|
||||
# Schedule the next run
|
||||
heapq.heappush(next_runs, (time() + n[2], n[1], n[2]))
|
||||
|
||||
|
||||
@plugin.init()
|
||||
def init(configuration, options, plugin):
|
||||
plugin.probe_interval = int(options['probe-interval'])
|
||||
plugin.probe_exclusion_duration = int(options['probe-exclusion-duration'])
|
||||
|
||||
db_filename = 'sqlite:///' + os.path.join(
|
||||
configuration['lightning-dir'],
|
||||
'probes.db'
|
||||
)
|
||||
|
||||
engine = create_engine(db_filename, echo=True)
|
||||
Base.metadata.create_all(engine)
|
||||
plugin.Session = sessionmaker()
|
||||
plugin.Session.configure(bind=engine)
|
||||
t = threading.Thread(target=schedule, args=[plugin])
|
||||
t.daemon = True
|
||||
t.start()
|
||||
|
||||
# Probes that are still pending and need to be checked against.
|
||||
plugin.pending_probes = []
|
||||
|
||||
|
||||
plugin.add_option(
|
||||
'probe-interval',
|
||||
'3600',
|
||||
'How many seconds should we wait between probes?'
|
||||
)
|
||||
plugin.add_option(
|
||||
'probe-exclusion-duration',
|
||||
'1800',
|
||||
'How many seconds should temporarily failed channels be excluded?'
|
||||
)
|
||||
plugin.run()
|
||||
2
Unmaintained/probe/requirements.txt
Normal file
2
Unmaintained/probe/requirements.txt
Normal file
@@ -0,0 +1,2 @@
|
||||
sqlalchemy==1.3.6
|
||||
pyln-client>=0.7.3
|
||||
55
Unmaintained/probe/test_probe.py
Normal file
55
Unmaintained/probe/test_probe.py
Normal file
@@ -0,0 +1,55 @@
|
||||
import unittest
|
||||
import os
|
||||
from pyln.testing.fixtures import * # noqa: F401,F403
|
||||
|
||||
plugin_path = os.path.join(os.path.dirname(__file__), "probe.py")
|
||||
|
||||
|
||||
def test_probe_starts(node_factory):
|
||||
l1 = node_factory.get_node()
|
||||
# Test dynamically
|
||||
l1.rpc.plugin_start(plugin_path)
|
||||
l1.rpc.plugin_stop(plugin_path)
|
||||
l1.rpc.plugin_start(plugin_path)
|
||||
l1.stop()
|
||||
# Then statically
|
||||
l1.daemon.opts["plugin"] = plugin_path
|
||||
l1.start()
|
||||
|
||||
|
||||
def test_probe(node_factory):
|
||||
l1, l2, l3, l4 = node_factory.line_graph(
|
||||
4,
|
||||
opts=[
|
||||
{'plugin': plugin_path},
|
||||
{},
|
||||
{},
|
||||
{}
|
||||
],
|
||||
wait_for_announce=True
|
||||
)
|
||||
|
||||
res = l1.rpc.probe(l4.info['id'])
|
||||
assert(res['destination'] == l4.info['id'])
|
||||
assert(res['failcode'] == 16399)
|
||||
|
||||
|
||||
def test_route_unreachable(node_factory):
|
||||
l1, l2, l3, l4 = node_factory.line_graph(
|
||||
4,
|
||||
opts=[
|
||||
{'plugin': plugin_path},
|
||||
{},
|
||||
{},
|
||||
{}
|
||||
],
|
||||
wait_for_announce=True
|
||||
)
|
||||
|
||||
l2.rpc.close(l3.info['id'])
|
||||
|
||||
res = l1.rpc.probe(l4.info['id'])
|
||||
assert(res['destination'] == l4.info['id'])
|
||||
assert(res['failcode'] == 16394)
|
||||
route = res['route'].split(',')
|
||||
assert(route.index(res['erring_channel']) == 1)
|
||||
18
Unmaintained/prometheus/README.md
Normal file
18
Unmaintained/prometheus/README.md
Normal file
@@ -0,0 +1,18 @@
|
||||
# Prometheus plugin for c-lightning
|
||||
|
||||
This plugin exposes some key metrics from c-lightning in the prometheus format
|
||||
so it can be scraped, plotted and alerts can be created on it. The plugin adds
|
||||
the following command line arguments:
|
||||
|
||||
- `prometheus-listen`: the IP address and port to bind the HTTP server to
|
||||
(default: `127.0.0.1:9750`)
|
||||
|
||||
Exposed variables include:
|
||||
|
||||
- `node`: ID, version, ...
|
||||
- `peers`: whether they are connected, and how many channels are currently
|
||||
open
|
||||
- `channels`: fund allocations, spendable funds, and how many unresolved
|
||||
HTLCs are currently attached to the channel
|
||||
- `funds`: satoshis in on-chain outputs, satoshis allocated to channels and
|
||||
total sum (may be inaccurate during channel resolution).
|
||||
239
Unmaintained/prometheus/prometheus.py
Executable file
239
Unmaintained/prometheus/prometheus.py
Executable file
@@ -0,0 +1,239 @@
|
||||
#!/usr/bin/env python3
|
||||
from pyln.client import Plugin
|
||||
from prometheus_client import start_http_server, CollectorRegistry
|
||||
from prometheus_client.core import InfoMetricFamily, GaugeMetricFamily
|
||||
from sys import exit
|
||||
|
||||
plugin = Plugin()
|
||||
|
||||
|
||||
class BaseLnCollector(object):
|
||||
def __init__(self, rpc, registry):
|
||||
self.rpc = rpc
|
||||
self.registry = registry
|
||||
|
||||
|
||||
class NodeCollector(BaseLnCollector):
|
||||
def collect(self):
|
||||
info = self.rpc.getinfo()
|
||||
info_labels = {k.replace('-', '_'): v for k, v in info.items() if isinstance(v, str)}
|
||||
node_info_fam = InfoMetricFamily(
|
||||
'lightning_node',
|
||||
'Static node information',
|
||||
labels=info_labels.keys(),
|
||||
)
|
||||
node_info_fam.add_metric(info_labels, info_labels)
|
||||
yield node_info_fam
|
||||
|
||||
blockheight = info['blockheight']
|
||||
yield GaugeMetricFamily(
|
||||
'lightning_node_blockheight',
|
||||
"Current Bitcoin blockheight on this node.",
|
||||
value=blockheight,
|
||||
)
|
||||
|
||||
fees_msat = int(info.get(
|
||||
"fees_collected_msat",
|
||||
info.get("msatoshi_fees_collected", None)
|
||||
))
|
||||
yield GaugeMetricFamily(
|
||||
'lightning_fees_collected_msat',
|
||||
'How much have we been paid to route payments?',
|
||||
value=fees_msat,
|
||||
)
|
||||
|
||||
|
||||
class FundsCollector(BaseLnCollector):
|
||||
def collect(self):
|
||||
funds = self.rpc.listfunds()
|
||||
print(funds['outputs'])
|
||||
output_funds = sum(
|
||||
[o['amount_msat'].to_satoshi() for o in funds['outputs']]
|
||||
)
|
||||
channel_funds = sum(
|
||||
[c['our_amount_msat'].to_satoshi() for c in funds['channels']]
|
||||
)
|
||||
total = output_funds + channel_funds
|
||||
|
||||
yield GaugeMetricFamily(
|
||||
'lightning_funds_total',
|
||||
"Total satoshis we own on this node.",
|
||||
value=total,
|
||||
)
|
||||
yield GaugeMetricFamily(
|
||||
'lightning_funds_output',
|
||||
"On-chain satoshis at our disposal",
|
||||
value=output_funds,
|
||||
)
|
||||
yield GaugeMetricFamily(
|
||||
'lightning_funds_channel',
|
||||
"Satoshis in channels.",
|
||||
value=channel_funds,
|
||||
)
|
||||
|
||||
|
||||
class PeerCollector(BaseLnCollector):
|
||||
def collect(self):
|
||||
peers = self.rpc.listpeers()['peers']
|
||||
|
||||
connected = GaugeMetricFamily(
|
||||
'lightning_peer_connected',
|
||||
'Is the peer currently connected?',
|
||||
labels=['id'],
|
||||
)
|
||||
count = GaugeMetricFamily(
|
||||
'lightning_peer_channels',
|
||||
"The number of channels with the peer",
|
||||
labels=['id'],
|
||||
)
|
||||
|
||||
channels = self.rpc.listpeerchannels()['channels']
|
||||
# Associate each channel with a peer
|
||||
peers = {}
|
||||
conn = {}
|
||||
for c in channels:
|
||||
peer_id = c['peer_id']
|
||||
peers[peer_id] = peers.get(peer_id, 0) + 1
|
||||
conn[peer_id] = conn.get(peer_id, 0) + c['peer_connected']
|
||||
|
||||
for p in peers.keys():
|
||||
labels = [p]
|
||||
count.add_metric(labels, peers[p])
|
||||
connected.add_metric(labels, conn.get(p, 0))
|
||||
|
||||
return [count, connected]
|
||||
|
||||
|
||||
class ChannelsCollector(BaseLnCollector):
|
||||
def collect(self):
|
||||
balance_gauge = GaugeMetricFamily(
|
||||
'lightning_channel_balance',
|
||||
'How many funds are at our disposal?',
|
||||
labels=['id', 'scid', 'alias'],
|
||||
)
|
||||
spendable_gauge = GaugeMetricFamily(
|
||||
'lightning_channel_spendable',
|
||||
'How much can we currently send over this channel?',
|
||||
labels=['id', 'scid', 'alias'],
|
||||
)
|
||||
total_gauge = GaugeMetricFamily(
|
||||
'lightning_channel_capacity',
|
||||
'How many funds are in this channel in total?',
|
||||
labels=['id', 'scid', 'alias'],
|
||||
)
|
||||
htlc_gauge = GaugeMetricFamily(
|
||||
'lightning_channel_htlcs',
|
||||
'How many HTLCs are currently active on this channel?',
|
||||
labels=['id', 'scid', 'alias'],
|
||||
)
|
||||
|
||||
# Incoming routing statistics
|
||||
in_payments_offered_gauge = GaugeMetricFamily(
|
||||
'lightning_channel_in_payments_offered',
|
||||
'How many incoming payments did we try to forward?',
|
||||
labels=['id', 'scid', 'alias'],
|
||||
)
|
||||
in_payments_fulfilled_gauge = GaugeMetricFamily(
|
||||
'lightning_channel_in_payments_fulfilled',
|
||||
'How many incoming payments did we succeed to forward?',
|
||||
labels=['id', 'scid', 'alias'],
|
||||
)
|
||||
in_msatoshi_offered_gauge = GaugeMetricFamily(
|
||||
'lightning_channel_in_msatoshi_offered',
|
||||
'How many incoming msats did we try to forward?',
|
||||
labels=['id', 'scid', 'alias'],
|
||||
)
|
||||
in_msatoshi_fulfilled_gauge = GaugeMetricFamily(
|
||||
'lightning_channel_in_msatoshi_fulfilled',
|
||||
'How many incoming msats did we succeed to forward?',
|
||||
labels=['id', 'scid', 'alias'],
|
||||
)
|
||||
|
||||
# Outgoing routing statistics
|
||||
out_payments_offered_gauge = GaugeMetricFamily(
|
||||
'lightning_channel_out_payments_offered',
|
||||
'How many outgoing payments did we try to forward?',
|
||||
labels=['id', 'scid', 'alias'],
|
||||
)
|
||||
out_payments_fulfilled_gauge = GaugeMetricFamily(
|
||||
'lightning_channel_out_payments_fulfilled',
|
||||
'How many outgoing payments did we succeed to forward?',
|
||||
labels=['id', 'scid', 'alias'],
|
||||
)
|
||||
out_msatoshi_offered_gauge = GaugeMetricFamily(
|
||||
'lightning_channel_out_msatoshi_offered',
|
||||
'How many outgoing msats did we try to forward?',
|
||||
labels=['id', 'scid', 'alias'],
|
||||
)
|
||||
out_msatoshi_fulfilled_gauge = GaugeMetricFamily(
|
||||
'lightning_channel_out_msatoshi_fulfilled',
|
||||
'How many outgoing msats did we succeed to forward?',
|
||||
labels=['id', 'scid', 'alias'],
|
||||
)
|
||||
|
||||
channels = self.rpc.listpeerchannels()['channels']
|
||||
for c in channels:
|
||||
# append alias for human readable labels, if no label is found fill with shortid.
|
||||
node = self.rpc.listnodes(c['peer_id'])['nodes']
|
||||
if len(node) != 0 and 'alias' in node[0]:
|
||||
alias = node[0]['alias']
|
||||
else:
|
||||
alias = 'unknown'
|
||||
|
||||
labels = [c['peer_id'], c.get('short_channel_id', c.get('channel_id')), alias]
|
||||
balance_gauge.add_metric(labels, c['to_us_msat'].to_satoshi())
|
||||
spendable_gauge.add_metric(labels,
|
||||
c['spendable_msat'].to_satoshi())
|
||||
total_gauge.add_metric(labels, c['total_msat'].to_satoshi())
|
||||
htlc_gauge.add_metric(labels, len(c['htlcs']))
|
||||
|
||||
in_payments_offered_gauge.add_metric(labels, c['in_payments_offered'])
|
||||
in_payments_fulfilled_gauge.add_metric(labels, c['in_payments_fulfilled'])
|
||||
in_msatoshi_offered_gauge.add_metric(labels, int(c['in_offered_msat']))
|
||||
in_msatoshi_fulfilled_gauge.add_metric(labels, int(c['in_fulfilled_msat']))
|
||||
|
||||
out_payments_offered_gauge.add_metric(labels, c['out_payments_offered'])
|
||||
out_payments_fulfilled_gauge.add_metric(labels, c['out_payments_fulfilled'])
|
||||
out_msatoshi_offered_gauge.add_metric(labels, int(c['out_offered_msat']))
|
||||
out_msatoshi_fulfilled_gauge.add_metric(labels, int(c['out_fulfilled_msat']))
|
||||
|
||||
return [
|
||||
htlc_gauge,
|
||||
total_gauge,
|
||||
spendable_gauge,
|
||||
balance_gauge,
|
||||
in_payments_offered_gauge,
|
||||
in_payments_fulfilled_gauge,
|
||||
in_msatoshi_offered_gauge,
|
||||
in_msatoshi_fulfilled_gauge,
|
||||
out_payments_offered_gauge,
|
||||
out_payments_fulfilled_gauge,
|
||||
out_msatoshi_offered_gauge,
|
||||
out_msatoshi_fulfilled_gauge,
|
||||
]
|
||||
|
||||
|
||||
@plugin.init()
|
||||
def init(options, configuration, plugin):
|
||||
s = options['prometheus-listen'].rpartition(':')
|
||||
if len(s) != 3 or s[1] != ':':
|
||||
print("Could not parse prometheus-listen address")
|
||||
exit(1)
|
||||
ip, port = s[0], int(s[2])
|
||||
|
||||
registry = CollectorRegistry()
|
||||
start_http_server(addr=ip, port=port, registry=registry)
|
||||
registry.register(NodeCollector(plugin.rpc, registry))
|
||||
registry.register(FundsCollector(plugin.rpc, registry))
|
||||
registry.register(PeerCollector(plugin.rpc, registry))
|
||||
registry.register(ChannelsCollector(plugin.rpc, registry))
|
||||
|
||||
|
||||
plugin.add_option(
|
||||
'prometheus-listen',
|
||||
'127.0.0.1:9750',
|
||||
'Address and port to bind to'
|
||||
)
|
||||
|
||||
|
||||
plugin.run()
|
||||
2
Unmaintained/prometheus/requirements.txt
Normal file
2
Unmaintained/prometheus/requirements.txt
Normal file
@@ -0,0 +1,2 @@
|
||||
prometheus-client==0.6.0
|
||||
pyln-client~=0.9.3
|
||||
40
Unmaintained/prometheus/test_prometheus.py
Normal file
40
Unmaintained/prometheus/test_prometheus.py
Normal file
@@ -0,0 +1,40 @@
|
||||
import os
|
||||
from pyln.testing.fixtures import * # noqa: F401,F403
|
||||
import urllib
|
||||
from ephemeral_port_reserve import reserve
|
||||
|
||||
plugin_path = os.path.join(os.path.dirname(__file__), "prometheus.py")
|
||||
|
||||
|
||||
def test_prometheus_starts(node_factory):
|
||||
l1 = node_factory.get_node()
|
||||
# Test dynamically
|
||||
l1.rpc.plugin_start(plugin_path)
|
||||
l1.rpc.plugin_stop(plugin_path)
|
||||
l1.rpc.plugin_start(plugin_path)
|
||||
l1.stop()
|
||||
# Then statically
|
||||
l1.daemon.opts["plugin"] = plugin_path
|
||||
l1.start()
|
||||
|
||||
|
||||
def test_prometheus_scrape(node_factory):
|
||||
"""Test that we can scrape correctly.
|
||||
"""
|
||||
l1 = node_factory.get_node(options={'plugin': plugin_path})
|
||||
scrape = urllib.request.urlopen("http://localhost:9750")
|
||||
|
||||
|
||||
|
||||
def test_prometheus_channels(node_factory):
|
||||
port = reserve()
|
||||
l1, l2, l3 = node_factory.line_graph(
|
||||
3,
|
||||
opts=[
|
||||
{},
|
||||
{'plugin': plugin_path, 'prometheus-listen': f'127.0.0.1:{port}'},
|
||||
{}
|
||||
]
|
||||
)
|
||||
scrape = urllib.request.urlopen(f'http://localhost:{port}')
|
||||
print(scrape)
|
||||
101
Unmaintained/rebalance/README.md
Normal file
101
Unmaintained/rebalance/README.md
Normal file
@@ -0,0 +1,101 @@
|
||||
# Rebalance plugin
|
||||
|
||||
This plugin moves liquidity between your channels using circular payments
|
||||
|
||||
|
||||
## Installation
|
||||
|
||||
For general plugin installation instructions see the repos main
|
||||
[README.md](https://github.com/lightningd/plugins/blob/master/README.md#Installation)
|
||||
|
||||
|
||||
## Usage
|
||||
|
||||
Once the plugin is installed and active, there are four additional methods for helping to rebalance channels:
|
||||
1) Either you can call `lightning-cli rebalanceall` to automatically fix all of your channels' liquidity.
|
||||
2) `lightning-cli rebalancestop` stops the ongoing `rebalanceall`.
|
||||
3) Or you can call `lightning-cli rebalance outgoing_scid incoming_scid` to rebalance individual channels.
|
||||
4) `lightning-cli rebalancereport` shows information: plugin settings, past rebalance stats, etc.
|
||||
|
||||
## Automatic rebalance
|
||||
|
||||
A lightning node usually has multiple channels of different sizes. The node can perform best if all channels have `{enough_liquidity}` for both directions. So the rebalance has multiple purposes with different priority:
|
||||
1) **The primary goal** is to ensure all channels have `{enough_liquidity}` for both direction, or if a given channel is too small for that, then it has a 50/50 liquidity ratio.
|
||||
2) **The secondary goal** is to distribute the remaining liquidity evenly between the big channels.
|
||||
3) For the long run, it is very important **to do this economically**. So the fees of fixing liquidity have to be cheaper than the fees of transaction forwards, which can ruin the liquidity again. (This assumes your node has some rational fee setting.) This way the automatic rebalance can run regularly, and your node can earn more on transaction forwarding than spend for rebalancing.
|
||||
|
||||
If the plugin cannot find a cheap enough circular route to rebalancing economically, then it does nothing by default. To not to cause a loss for users.
|
||||
|
||||
#### Rebalancing strategy
|
||||
|
||||
As a first step, depending on the actual situation, there is a need to get a value of `{enough_liquidity}`. The plugin searches for a maximum possible threshold. For which all channels theoretically can be balanced beyond this threshold. Or smaller than `threshold * 2` channels can be balanced to a 50/50 ratio. `{enough_liquidity}` will be half of this maximum threshold.
|
||||
|
||||
The next step is to calculate `{ideal_ratio}` for big channels. Beyond the `{enough_liquidity}` threshold, big channels should share the remaining liquidity evenly, so every big channels' liquidity ratio should be close to the `{ideal_ratio}`.
|
||||
|
||||
After we know the current `{enough_liquidity}` threshold and `{ideal_ratio}`, the plugin checks every possible channel pairs to seek a proper rebalance opportunity. If it finds a matching pair, it calls the individual rebalance method for them. If the rebalance fails, the plugin tries again with a lesser amount, until it reaches the minimum rebalancable amount, or the rebalance succeeds.
|
||||
|
||||
This process may take a while. Automatic rebalance can run for hours in the background, but you can stop it anytime with `lightning-cli rebalancestop`.
|
||||
|
||||
#### Parameters for rebalanceall
|
||||
|
||||
- OPTIONAL: The `min_amount` parameter sets the minimum rebalancable amount in millisatoshis. The parameter also can be specified in other denominations by appending a valid suffix, i. e. '1000000sat', '0.01btc' or '10mbtc'. The default value is '50000sat'.
|
||||
- OPTIONAL: The `feeratio` sets how much the rebalance may cost as a ratio of your default fee. Its default value is `0.5`, which means it can use a maximum of half of your node's default fee.
|
||||
|
||||
#### Tips and Tricks for automatic rebalance
|
||||
|
||||
- It may work only with well-connected nodes. You should have several different channels to use it with a good chance for success.
|
||||
- Your node should have some rational default fee setting. If you use cheaper fees than your neighbors, it probably cannot find a cheap enough circular route to rebalance.
|
||||
|
||||
## Individual channel rebalance
|
||||
You can use the `lightning-cli` to rebalance channels like this:
|
||||
|
||||
```
|
||||
lightning-cli rebalance outgoing_scid incoming_scid [msatoshi] [retry_for] [maxfeepercent] [exemptfee] [getroute_method]
|
||||
```
|
||||
def rebalance(plugin, outgoing_scid, incoming_scid, msatoshi: Millisatoshi = None,
|
||||
retry_for: int = 60, maxfeepercent: float = 0.5,
|
||||
exemptfee: Millisatoshi = Millisatoshi(5000),
|
||||
getroute_method=None):
|
||||
If you want to skip/default certain optional parameters but use others, you can
|
||||
use always the `lightning-cli -k` (key=value) syntax like this:
|
||||
|
||||
```bash
|
||||
lightning-cli rebalance -k outgoing_scid=1514942x51x0 incoming_scid=1515133x10x0 maxfeepercent=1
|
||||
```
|
||||
|
||||
#### Parameters for rebalance
|
||||
|
||||
- The `outgoing_scid` is the short_channel_id of the sending channel,
|
||||
- The `incoming_scid` is the short_channel_id of the receiving channel.
|
||||
- OPTIONAL: The `msatoshi` parameter sets the amount in milli-satoshis to be
|
||||
transferred. If the parameter is left out, the plugin will calucate an amount
|
||||
that will balance the channels 50%/50%. The parameter can also be given in
|
||||
other denominations by appending i.e. '1000000sat', '0.01btc' or '10mbtc'.
|
||||
- OPTIONAL: `retry_for` defines the number of seconds the plugin will retry to
|
||||
find a suitable route. Default: 60 seconds.
|
||||
- OPTIONAL: `maxfeepercent` is a perecentage limit of the money to be paid in
|
||||
fees and defaults to 0.5.
|
||||
- OPTIONAL: The `exemptfee` option can be used for tiny payments which would be
|
||||
dominated by the fee leveraged by forwarding nodes. Setting `exemptfee`
|
||||
allows the `maxfeepercent` check to be skipped on fees that are smaller than
|
||||
exemptfee (default: 5000 millisatoshi).
|
||||
- OPTIONAL: The `getroute_method` option can be for route search can be 'basic'
|
||||
or 'iterative'.
|
||||
'basic': Tries all routes sequentially.
|
||||
'iterative': Tries shorter and bigger routes first.
|
||||
|
||||
|
||||
#### Tips and Tricks for individual rebalance
|
||||
|
||||
- To find the correct channel IDs, you can use the `summary` plugin which can
|
||||
be found [here](https://github.com/lightningd/plugins/tree/master/summary).
|
||||
- The ideal amount is not too big, but not too small: it is difficult to find a
|
||||
route for a big payment, however some node refuses to forward too small
|
||||
amounts (i.e. less than a thousand msatoshi).
|
||||
- After some failed attempts, may worth checking the `lightningd` logs for
|
||||
further information.
|
||||
- Channels have a `channel_reserve_satoshis` value, which is usually 1% of the
|
||||
channel's total balance. Initially, this reserve may not be met, as only one
|
||||
side has funds; but the protocol ensures that there is always progress toward
|
||||
meeting this reserve, and once met, [it is maintained.](https://github.com/lightningnetwork/lightning-rfc/blob/master/02-peer-protocol.md#rationale)
|
||||
Therefore you cannot rebalance a channel to be completely empty or full.
|
||||
23
Unmaintained/rebalance/clnutils.py
Normal file
23
Unmaintained/rebalance/clnutils.py
Normal file
@@ -0,0 +1,23 @@
|
||||
import re
|
||||
|
||||
|
||||
def cln_parse_rpcversion(string):
|
||||
"""
|
||||
Parse cln version string to determine RPC version.
|
||||
|
||||
cln switched from 'semver' alike `major.minor.sub[rcX][-mod]`
|
||||
to ubuntu style with version 22.11 `yy.mm[.patch][-mod]`
|
||||
make sure we can read all of them for (the next 80 years).
|
||||
"""
|
||||
rpcversion = string
|
||||
if rpcversion.startswith('v'): # strip leading 'v'
|
||||
rpcversion = rpcversion[1:]
|
||||
if rpcversion.find('-') != -1: # strip mods
|
||||
rpcversion = rpcversion[:rpcversion.find('-')]
|
||||
if re.search('.*(rc[\\d]*)$', rpcversion): # strip release candidates
|
||||
rpcversion = rpcversion[:rpcversion.find('rc')]
|
||||
if rpcversion.count('.') == 1: # imply patch version 0 if not given
|
||||
rpcversion = rpcversion + '.0'
|
||||
|
||||
# split and convert numeric string parts to actual integers
|
||||
return list(map(int, rpcversion.split('.')))
|
||||
1053
Unmaintained/rebalance/rebalance.py
Executable file
1053
Unmaintained/rebalance/rebalance.py
Executable file
File diff suppressed because it is too large
Load Diff
1
Unmaintained/rebalance/requirements.txt
Normal file
1
Unmaintained/rebalance/requirements.txt
Normal file
@@ -0,0 +1 @@
|
||||
pyln-client>=0.12
|
||||
147
Unmaintained/rebalance/test_rebalance.py
Normal file
147
Unmaintained/rebalance/test_rebalance.py
Normal file
@@ -0,0 +1,147 @@
|
||||
import os
|
||||
from pyln.testing.fixtures import * # noqa: F401,F403
|
||||
from pyln.client import Millisatoshi
|
||||
|
||||
plugin_path = os.path.join(os.path.dirname(__file__), "rebalance.py")
|
||||
plugin_opt = {'plugin': plugin_path}
|
||||
|
||||
|
||||
# waits for a bunch of nodes HTLCs to settle
|
||||
def wait_for_all_htlcs(nodes):
|
||||
for n in nodes:
|
||||
n.wait_for_htlcs()
|
||||
|
||||
|
||||
# waits for all nodes to have all scids gossip active
|
||||
def wait_for_all_active(nodes, scids):
|
||||
for n in nodes:
|
||||
for scid in scids:
|
||||
n.wait_channel_active(scid)
|
||||
|
||||
|
||||
def test_rebalance_starts(node_factory):
|
||||
l1 = node_factory.get_node()
|
||||
# Test dynamically
|
||||
l1.rpc.plugin_start(plugin_path)
|
||||
l1.rpc.plugin_stop(plugin_path)
|
||||
l1.rpc.plugin_start(plugin_path)
|
||||
l1.stop()
|
||||
# Then statically
|
||||
l1.daemon.opts["plugin"] = plugin_path
|
||||
l1.start()
|
||||
|
||||
|
||||
def test_rebalance_manual(node_factory, bitcoind):
|
||||
l1, l2, l3 = node_factory.line_graph(3, opts=plugin_opt)
|
||||
nodes = [l1, l2, l3]
|
||||
|
||||
# form a circle so we can do rebalancing
|
||||
l3.connect(l1)
|
||||
l3.fundchannel(l1)
|
||||
|
||||
# get scids
|
||||
scid12 = l1.get_channel_scid(l2)
|
||||
scid23 = l2.get_channel_scid(l3)
|
||||
scid31 = l3.get_channel_scid(l1)
|
||||
scids = [scid12, scid23, scid31]
|
||||
|
||||
# wait for each others gossip
|
||||
bitcoind.generate_block(6)
|
||||
for n in nodes:
|
||||
for scid in scids:
|
||||
n.wait_channel_active(scid)
|
||||
|
||||
# check we can do an auto amount rebalance
|
||||
result = l1.rpc.rebalance(scid12, scid31)
|
||||
print(result)
|
||||
assert result['status'] == 'complete'
|
||||
assert result['outgoing_scid'] == scid12
|
||||
assert result['incoming_scid'] == scid31
|
||||
assert result['hops'] == 3
|
||||
assert result['received'] == '500000000msat'
|
||||
|
||||
# wait until listpeers is up2date
|
||||
wait_for_all_htlcs(nodes)
|
||||
|
||||
# check that channels are now balanced
|
||||
c12 = l1.rpc.listpeerchannels(l2.info['id'])['channels'][0]
|
||||
c13 = l1.rpc.listpeerchannels(l3.info['id'])['channels'][0]
|
||||
assert abs(0.5 - (Millisatoshi(c12['to_us_msat']) / Millisatoshi(c12['total_msat']))) < 0.01
|
||||
assert abs(0.5 - (Millisatoshi(c13['to_us_msat']) / Millisatoshi(c13['total_msat']))) < 0.01
|
||||
|
||||
# check we can do a manual amount rebalance in the other direction
|
||||
result = l1.rpc.rebalance(scid31, scid12, '250000000msat')
|
||||
assert result['status'] == 'complete'
|
||||
assert result['outgoing_scid'] == scid31
|
||||
assert result['incoming_scid'] == scid12
|
||||
assert result['hops'] == 3
|
||||
assert result['received'] == '250000000msat'
|
||||
|
||||
# briefly check rebalancereport works
|
||||
report = l1.rpc.rebalancereport()
|
||||
assert report.get('rebalanceall_is_running') is False
|
||||
assert report.get('total_successful_rebalances') == 2
|
||||
|
||||
|
||||
def test_rebalance_all(node_factory, bitcoind):
|
||||
l1, l2, l3 = node_factory.line_graph(3, opts=plugin_opt)
|
||||
nodes = [l1, l2, l3]
|
||||
|
||||
# check we get an error if theres just one channel
|
||||
result = l1.rpc.rebalanceall()
|
||||
assert result['message'] == 'Error: Not enough open channels to rebalance anything'
|
||||
|
||||
# now we add another 100% outgoing liquidity to l1 which does not help
|
||||
l4 = node_factory.get_node()
|
||||
l1.connect(l4)
|
||||
l1.fundchannel(l4)
|
||||
|
||||
# test this is still not possible
|
||||
result = l1.rpc.rebalanceall()
|
||||
assert result['message'] == 'Error: Not enough liquidity to rebalance anything'
|
||||
|
||||
# remove l4 it does not distort further testing
|
||||
l1.rpc.close(l1.get_channel_scid(l4))
|
||||
|
||||
# now we form a circle so we can do actually rebalanceall
|
||||
l3.connect(l1)
|
||||
l3.fundchannel(l1)
|
||||
|
||||
# get scids
|
||||
scid12 = l1.get_channel_scid(l2)
|
||||
scid23 = l2.get_channel_scid(l3)
|
||||
scid31 = l3.get_channel_scid(l1)
|
||||
scids = [scid12, scid23, scid31]
|
||||
|
||||
# wait for each others gossip
|
||||
bitcoind.generate_block(6)
|
||||
wait_for_all_active(nodes, scids)
|
||||
|
||||
# check that theres nothing to stop when theres nothing to stop
|
||||
result = l1.rpc.rebalancestop()
|
||||
assert result['message'] == "No rebalance is running, nothing to stop."
|
||||
|
||||
# check the rebalanceall starts
|
||||
result = l1.rpc.rebalanceall(feeratio=5.0) # we need high fees to work
|
||||
assert result['message'].startswith('Rebalance started')
|
||||
l1.daemon.wait_for_logs([f"tries to rebalance: {scid12} -> {scid31}",
|
||||
f"Automatic rebalance finished"])
|
||||
|
||||
# check additional calls to stop return 'nothing to stop' + last message
|
||||
result = l1.rpc.rebalancestop()['message']
|
||||
assert result.startswith("No rebalance is running, nothing to stop. "
|
||||
"Last 'rebalanceall' gave: Automatic rebalance finished")
|
||||
|
||||
# wait until listpeers is up2date
|
||||
wait_for_all_htlcs(nodes)
|
||||
|
||||
# check that channels are now balanced
|
||||
c12 = l1.rpc.listpeerchannels(l2.info['id'])['channels'][0]
|
||||
c13 = l1.rpc.listpeerchannels(l3.info['id'])['channels'][0]
|
||||
assert abs(0.5 - (Millisatoshi(c12['to_us_msat']) / Millisatoshi(c12['total_msat']))) < 0.01
|
||||
assert abs(0.5 - (Millisatoshi(c13['to_us_msat']) / Millisatoshi(c13['total_msat']))) < 0.01
|
||||
|
||||
# briefly check rebalancereport works
|
||||
report = l1.rpc.rebalancereport()
|
||||
assert report.get('rebalanceall_is_running') is False
|
||||
assert report.get('total_successful_rebalances') == 2
|
||||
63
Unmaintained/summary/README.md
Normal file
63
Unmaintained/summary/README.md
Normal file
@@ -0,0 +1,63 @@
|
||||
# Summary plugin
|
||||
|
||||
This plugin is a little hack to show a summary of your node, including
|
||||
fiat amounts. If you have pylightning 0.0.7.1 or above, you get nice linegraphs,
|
||||
otherwise normal ASCII.
|
||||
|
||||
## Installation
|
||||
|
||||
For general plugin installation instructions see the repos main
|
||||
[README.md](https://github.com/lightningd/plugins/blob/master/README.md#Installation)
|
||||
|
||||
## Options:
|
||||
|
||||
* --summary-currency: Currency ticker to look up on bitaverage (default: `USD`)
|
||||
* --summary-currency-prefix: Prefix when printing currency (default: `USD $`)
|
||||
|
||||
## Example Usage
|
||||
|
||||
Unfortunately the python plugin framework doesn't pretty-print, nor does
|
||||
lightning-cli, so best viewed with -H:
|
||||
|
||||
```
|
||||
$ lightning-cli -H summary
|
||||
network=TESTNET
|
||||
my_address=031a3478d481b92e3c28810228252898c5f0d82fc4d07f5210c4f34d4aba56b769@165.227.30.200
|
||||
num_utxos=5
|
||||
utxo_amount=1.20119332000btc (USD $4473.84)
|
||||
num_channels=29
|
||||
num_connected=2
|
||||
num_gossipers=1
|
||||
avail_out=0.27095103btc (USD $1009.16)
|
||||
avail_in=2.05851379btc (USD $7666.93)
|
||||
fees_collected=0.00000012341btc (USD $0.00)
|
||||
channels_key=P=private O=offline
|
||||
channels= ├────────────╢ (O):02ac05912f89e43b88de3472e8c3003b
|
||||
├───────────╢ (O):02dd4cef0192611bc34cd1c3a0a7eb0f
|
||||
╟────────────┤ (PO):02a13878947a133d7c96e70303a9bf27
|
||||
║ (O):033e2db012833d997e3c
|
||||
╟┤ (O):Kenny_Loggins
|
||||
╟──────────────────────┤(O):DeutscheTestnetBank
|
||||
╟─────────────────────┤ (O):BlueLagoon1
|
||||
╟──────────────────────┤(O):0270dd38e8af9a64b4a483ab12b6aeb1
|
||||
╟┤ (O):btctest.lnetwork.tokyo
|
||||
╟─┤ (O):microbet.fun
|
||||
╟──────────────────────┤(PO):02fcab6e34a2ad21be2a752ab96d13f5
|
||||
╟──────────────────────┤(O):htlc.me
|
||||
╟───┤ (O):02229ea9a7a4f9bf8bf25ce225079aed
|
||||
╟─────────────────────┤ (O):025d5b572a94235cfcbdc429181b2b88
|
||||
╟────────────┤ (PO):03c56de3a84336b4a939777ace9ecbef
|
||||
╟────────┤ (O):LiteStrikeBTClnd
|
||||
╟────────────────┤ (PO):037c9cf1cde4414c59407d547b7eac08
|
||||
║ (O):03490a74e4def9125a84aee2d84e8cfe
|
||||
├─────────┼─────────┤ (O):aranguren.org
|
||||
║ (PO):03cc6603e1f6df535dd8b423284f2c09
|
||||
║ (O):cyclopes
|
||||
╟─────────────────────┤ (PO):02b73a2160863e925e9fa978b0ddc56b
|
||||
╟───┤ (O):lnd-testnet.ignios.net
|
||||
╟─┤ (PO):0327a104108173d4a4f34ab2cbc3084c
|
||||
╟─┤ :dwarf
|
||||
║ (PO):028133777757ce281658804dd82f5758
|
||||
╟────────────┤ (PO):02db62ffff5c35be74e7f856bba136db
|
||||
╟┤ (PO):03015ac044f5fa9768ededf6fed9c0ff
|
||||
╟──────────────────────┤:0270685ca81a8e4d4d01
|
||||
0
Unmaintained/summary/__init__.py
Normal file
0
Unmaintained/summary/__init__.py
Normal file
4
Unmaintained/summary/requirements.txt
Normal file
4
Unmaintained/summary/requirements.txt
Normal file
@@ -0,0 +1,4 @@
|
||||
pyln-client>=0.12.1
|
||||
requests>=2.10.0
|
||||
requests[socks]>=2.10.0
|
||||
packaging>=14.1
|
||||
370
Unmaintained/summary/summary.py
Executable file
370
Unmaintained/summary/summary.py
Executable file
@@ -0,0 +1,370 @@
|
||||
#!/usr/bin/env python3
|
||||
from pyln.client import Plugin, Millisatoshi
|
||||
from packaging import version
|
||||
from collections import namedtuple
|
||||
from operator import attrgetter
|
||||
from summary_avail import trace_availability, addpeer
|
||||
import pyln.client
|
||||
import requests
|
||||
import threading
|
||||
import time
|
||||
import pickle
|
||||
import sys
|
||||
|
||||
|
||||
plugin = Plugin(autopatch=True)
|
||||
datastore_key = ['summary', 'avail']
|
||||
|
||||
Channel = namedtuple('Channel', ['total', 'ours', 'theirs', 'pid', 'private', 'connected', 'scid', 'avail', 'base', 'ppm'])
|
||||
Charset = namedtuple('Charset', ['double_left', 'left', 'bar', 'mid', 'right', 'double_right', 'empty'])
|
||||
draw_boxch = Charset('╟', '├', '─', '┼', '┤', '╢', '║')
|
||||
draw_ascii = Charset('#', '[', '-', '+', ']', '#', '|')
|
||||
|
||||
summary_description = "Gets summary information about this node.\n"\
|
||||
"Pass a list of scids to the {exclude} parameter to exclude some channels from the outputs.\n"\
|
||||
"Sort the result by using the {sortkey} parameter that can be one of 'total', 'ours', 'theirs', 'scid' (default), 'avail', 'base', 'ppm'."
|
||||
|
||||
|
||||
class PeerThread(threading.Thread):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.daemon = True
|
||||
|
||||
def run(self):
|
||||
# delay initial execution, so peers have a chance to connect on startup
|
||||
time.sleep(plugin.avail_interval)
|
||||
|
||||
while True:
|
||||
try:
|
||||
rpcpeers = plugin.rpc.listpeers()
|
||||
trace_availability(plugin, rpcpeers)
|
||||
write_datastore(plugin)
|
||||
plugin.log("[PeerThread] Peerstate wrote to datastore. "
|
||||
"Sleeping now...", 'debug')
|
||||
time.sleep(plugin.avail_interval)
|
||||
except Exception as ex:
|
||||
plugin.log("[PeerThread] " + str(ex), 'warn')
|
||||
|
||||
|
||||
class PriceThread(threading.Thread):
|
||||
def __init__(self, proxies):
|
||||
super().__init__()
|
||||
self.daemon = True
|
||||
self.proxies = proxies
|
||||
|
||||
def run(self):
|
||||
while True:
|
||||
try:
|
||||
# NOTE: Bitstamp has a DNS/Proxy issues that can return 404
|
||||
# Workaround: retry up to 5 times with a delay
|
||||
for _ in range(5):
|
||||
r = requests.get('https://www.bitstamp.net/api/v2/ticker/btc{}'.format(plugin.currency.lower()), proxies=self.proxies)
|
||||
if not r.status_code == 200:
|
||||
time.sleep(1)
|
||||
continue
|
||||
break
|
||||
plugin.fiat_per_btc = float(r.json()['last'])
|
||||
except Exception as ex:
|
||||
plugin.log("[PriceThread] " + str(ex), 'warn')
|
||||
# Six hours is more than often enough for polling
|
||||
time.sleep(6 * 3600)
|
||||
|
||||
|
||||
def to_fiatstr(msat: Millisatoshi):
|
||||
return "{}{:.2f}".format(plugin.currency_prefix,
|
||||
int(msat) / 10**11 * plugin.fiat_per_btc)
|
||||
|
||||
|
||||
# appends an output table header that explains fields and capacity
|
||||
def append_header(table, max_msat):
|
||||
short_str = Millisatoshi(max_msat).to_approx_str()
|
||||
draw = plugin.draw
|
||||
table.append("%c%-13sOUT/OURS %c IN/THEIRS%12s%c SCID FLAG BASE PPM AVAIL ALIAS"
|
||||
% (draw.left, short_str, draw.mid, short_str, draw.right))
|
||||
|
||||
|
||||
@plugin.method("summary", long_desc=summary_description)
|
||||
def summary(plugin, exclude='', sortkey=None, ascii=None):
|
||||
"""Gets summary information about this node."""
|
||||
|
||||
# Sets ascii mode for this and future requests (if requested)
|
||||
if ascii is not None:
|
||||
if ascii:
|
||||
plugin.draw = draw_ascii
|
||||
else:
|
||||
plugin.draw = draw_boxch
|
||||
|
||||
reply = {}
|
||||
info = plugin.rpc.getinfo()
|
||||
funds = plugin.rpc.listfunds()
|
||||
peers = plugin.rpc.listpeers()['peers']
|
||||
|
||||
# Make it stand out if we're not on mainnet.
|
||||
if info['network'] != 'bitcoin':
|
||||
reply['network'] = info['network'].upper()
|
||||
|
||||
if hasattr(plugin, 'my_address') and plugin.my_address:
|
||||
reply['my_address'] = plugin.my_address
|
||||
else:
|
||||
reply['warning_no_address'] = "NO PUBLIC ADDRESSES"
|
||||
|
||||
utxos = [int(f['amount_msat']) for f in funds['outputs']
|
||||
if f['status'] == 'confirmed']
|
||||
reply['num_utxos'] = len(utxos)
|
||||
utxo_amount = Millisatoshi(sum(utxos))
|
||||
reply['utxo_amount'] = utxo_amount.to_btc_str()
|
||||
|
||||
avail_out = Millisatoshi(0)
|
||||
avail_in = Millisatoshi(0)
|
||||
chans = []
|
||||
reply['num_channels'] = 0
|
||||
reply['num_connected'] = 0
|
||||
reply['num_gossipers'] = 0
|
||||
for p in peers:
|
||||
pid = p['id']
|
||||
channels = []
|
||||
if 'channels' in p:
|
||||
channels = p['channels']
|
||||
elif 'num_channels' in p and p['num_channels'] > 0:
|
||||
channels = plugin.rpc.listpeerchannels(pid)['channels']
|
||||
addpeer(plugin, p)
|
||||
active_channel = False
|
||||
for c in channels:
|
||||
if c['state'] != 'CHANNELD_NORMAL':
|
||||
continue
|
||||
active_channel = True
|
||||
if c['short_channel_id'] in exclude:
|
||||
continue
|
||||
if p['connected']:
|
||||
reply['num_connected'] += 1
|
||||
if c['our_reserve_msat'] < c['to_us_msat']:
|
||||
to_us = c['to_us_msat'] - c['our_reserve_msat']
|
||||
else:
|
||||
to_us = Millisatoshi(0)
|
||||
avail_out += to_us
|
||||
|
||||
# We have to derive amount to them
|
||||
to_them = c['total_msat'] - c['to_us_msat']
|
||||
if c['their_reserve_msat'] < to_them:
|
||||
to_them = to_them - c['their_reserve_msat']
|
||||
else:
|
||||
to_them = Millisatoshi(0)
|
||||
avail_in += to_them
|
||||
reply['num_channels'] += 1
|
||||
chans.append(Channel(
|
||||
c['total_msat'],
|
||||
to_us, to_them,
|
||||
pid,
|
||||
c['private'],
|
||||
p['connected'],
|
||||
c['short_channel_id'],
|
||||
plugin.persist['p'][pid]['a'],
|
||||
Millisatoshi(c['fee_base_msat']),
|
||||
c['fee_proportional_millionths'],
|
||||
))
|
||||
|
||||
if not active_channel and p['connected']:
|
||||
reply['num_gossipers'] += 1
|
||||
|
||||
reply['avail_out'] = avail_out.to_btc_str()
|
||||
reply['avail_in'] = avail_in.to_btc_str()
|
||||
reply['fees_collected'] = Millisatoshi(info['fees_collected_msat']).to_btc_str()
|
||||
|
||||
if plugin.fiat_per_btc > 0:
|
||||
reply['utxo_amount'] += ' ({})'.format(to_fiatstr(utxo_amount))
|
||||
reply['avail_out'] += ' ({})'.format(to_fiatstr(avail_out))
|
||||
reply['avail_in'] += ' ({})'.format(to_fiatstr(avail_in))
|
||||
reply['fees_collected'] += ' ({})'.format(to_fiatstr(info['fees_collected_msat']))
|
||||
|
||||
if len(chans) > 0:
|
||||
if sortkey is None or sortkey.lower() not in Channel._fields:
|
||||
sortkey = plugin.sortkey
|
||||
chans = sorted(chans, key=attrgetter(sortkey.lower()))
|
||||
reply['channels_flags'] = 'P:private O:offline'
|
||||
reply['channels'] = ["\n"]
|
||||
biggest = max(max(int(c.ours), int(c.theirs)) for c in chans)
|
||||
append_header(reply['channels'], biggest)
|
||||
for c in chans:
|
||||
# Create simple line graph, 47 chars wide.
|
||||
our_len = int(round(int(c.ours) / biggest * 23))
|
||||
their_len = int(round(int(c.theirs) / biggest * 23))
|
||||
|
||||
# We put midpoint in the middle.
|
||||
draw = plugin.draw
|
||||
mid = draw.mid
|
||||
if our_len == 0:
|
||||
left = "{:>23}".format('')
|
||||
mid = draw.double_left
|
||||
else:
|
||||
left = "{:>23}".format(draw.left + draw.bar * (our_len - 1))
|
||||
|
||||
if their_len == 0:
|
||||
right = "{:23}".format('')
|
||||
# Both 0 is a special case.
|
||||
if our_len == 0:
|
||||
mid = draw.empty
|
||||
else:
|
||||
mid = draw.double_right
|
||||
else:
|
||||
right = "{:23}".format(draw.bar * (their_len - 1) + draw.right)
|
||||
|
||||
s = left + mid + right
|
||||
|
||||
# output short channel id, so things can be copyNpasted easily
|
||||
s += " {:14} ".format(c.scid)
|
||||
|
||||
extra = ''
|
||||
if c.private:
|
||||
extra += 'P'
|
||||
else:
|
||||
extra += '_'
|
||||
if not c.connected:
|
||||
extra += 'O'
|
||||
else:
|
||||
extra += '_'
|
||||
s += '[{}] '.format(extra)
|
||||
|
||||
# append fees
|
||||
s += ' {:4}'.format(c.base.millisatoshis)
|
||||
s += ' {:5} '.format(c.ppm)
|
||||
|
||||
# append 24hr availability
|
||||
s += '{:4.0%} '.format(c.avail)
|
||||
|
||||
# append alias or id
|
||||
node = plugin.rpc.listnodes(c.pid)['nodes']
|
||||
if len(node) != 0 and 'alias' in node[0]:
|
||||
s += node[0]['alias']
|
||||
else:
|
||||
s += c.pid[0:32]
|
||||
reply['channels'].append(s)
|
||||
|
||||
# Make modern lightning-cli format this human-readble by default!
|
||||
reply['format-hint'] = 'simple'
|
||||
return reply
|
||||
|
||||
|
||||
def new_datastore():
|
||||
return {'p': {}, 'r': 0, 'v': 1} # see summary_avail.py for structure
|
||||
|
||||
|
||||
def check_datastore(obj):
|
||||
if 'v' in obj and type(obj['v']) is int and obj['v'] == 1:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def load_datastore(plugin):
|
||||
entries = plugin.rpc.listdatastore(key=datastore_key)['datastore']
|
||||
if len(entries) == 0:
|
||||
plugin.log(f"Creating a new datastore '{datastore_key}'", 'debug')
|
||||
return new_datastore()
|
||||
persist = pickle.loads(bytearray.fromhex(entries[0]["hex"]))
|
||||
if not check_datastore(persist):
|
||||
plugin.log(f"Dismissing old datastore '{datastore_key}'", 'debug')
|
||||
return new_datastore()
|
||||
plugin.log(f"Reopened datastore '{datastore_key}' with {persist['r']} "
|
||||
f"runs and {len(persist['p'])} entries", 'debug')
|
||||
return persist
|
||||
|
||||
|
||||
def write_datastore(plugin):
|
||||
hexstr = pickle.dumps(plugin.persist).hex()
|
||||
plugin.rpc.datastore(key=datastore_key, hex=hexstr, mode="create-or-replace")
|
||||
|
||||
|
||||
@plugin.init()
|
||||
def init(options, configuration, plugin):
|
||||
plugin.sortkey = options['summary-sortkey']
|
||||
if plugin.sortkey not in Channel._fields:
|
||||
plugin.sortkey = 'scid' # default to 'scid' on unknown keys
|
||||
plugin.currency = options['summary-currency']
|
||||
plugin.currency_prefix = options['summary-currency-prefix']
|
||||
plugin.fiat_per_btc = 0
|
||||
|
||||
plugin.avail_interval = float(options['summary-availability-interval'])
|
||||
plugin.avail_window = 60 * 60 * int(options['summary-availability-window'])
|
||||
plugin.persist = load_datastore(plugin)
|
||||
|
||||
plugin.draw = draw_ascii
|
||||
# __version__ was introduced in 0.0.7.1, with utf8 passthrough support.
|
||||
if hasattr(pyln.client, "__version__") and version.parse(pyln.client.__version__) >= version.parse("0.0.7.1"):
|
||||
plugin.draw = draw_boxch
|
||||
if options.get('summary-ascii'):
|
||||
plugin.draw = draw_ascii
|
||||
|
||||
info = plugin.rpc.getinfo()
|
||||
config = plugin.rpc.listconfigs()
|
||||
if 'always-use-proxy' in config and config['always-use-proxy']:
|
||||
paddr = config['proxy']
|
||||
# Default port in 9050
|
||||
if ':' not in paddr:
|
||||
paddr += ':9050'
|
||||
proxies = {'https': 'socks5h://' + paddr,
|
||||
'http': 'socks5h://' + paddr}
|
||||
else:
|
||||
proxies = None
|
||||
|
||||
# Measure availability
|
||||
PeerThread().start()
|
||||
# Try to grab conversion price
|
||||
PriceThread(proxies).start()
|
||||
|
||||
# Prefer IPv4, otherwise take any to give out address.
|
||||
best_address = None
|
||||
for a in info['address']:
|
||||
if best_address is None:
|
||||
best_address = a
|
||||
elif a['type'] == 'ipv4' and best_address['type'] != 'ipv4':
|
||||
best_address = a
|
||||
|
||||
if best_address:
|
||||
plugin.my_address = info['id'] + '@' + best_address['address']
|
||||
if best_address['port'] != 9735:
|
||||
plugin.my_address += ':' + str(best_address['port'])
|
||||
else:
|
||||
plugin.my_address = None
|
||||
|
||||
plugin.log("Plugin summary.py initialized")
|
||||
|
||||
|
||||
@plugin.subscribe("shutdown")
|
||||
def on_rpc_command_callback(plugin, **kwargs):
|
||||
# FIXME: Writing datastore does not work on exit, as daemon is already lost.
|
||||
# plugin.log("Writing out datastore before shutting down")
|
||||
# write_datastore(plugin)
|
||||
sys.exit()
|
||||
|
||||
|
||||
plugin.add_option(
|
||||
'summary-currency',
|
||||
'USD',
|
||||
'What currency should I look up on btcaverage?'
|
||||
)
|
||||
plugin.add_option(
|
||||
'summary-currency-prefix',
|
||||
'USD $',
|
||||
'What prefix to use for currency'
|
||||
)
|
||||
plugin.add_option(
|
||||
'summary-availability-interval',
|
||||
300,
|
||||
'How often in seconds the availability should be calculated.'
|
||||
)
|
||||
plugin.add_option(
|
||||
'summary-availability-window',
|
||||
72,
|
||||
'How many hours the availability should be averaged over.'
|
||||
)
|
||||
plugin.add_option(
|
||||
'summary-sortkey',
|
||||
'scid',
|
||||
'Sort the channels list by a namedtuple key, defaults to "scid".'
|
||||
)
|
||||
plugin.add_option(
|
||||
'summary-ascii',
|
||||
False,
|
||||
'If ascii mode should be enabled by default',
|
||||
'flag'
|
||||
)
|
||||
plugin.run()
|
||||
42
Unmaintained/summary/summary_avail.py
Normal file
42
Unmaintained/summary/summary_avail.py
Normal file
@@ -0,0 +1,42 @@
|
||||
# This is the persist object structure:
|
||||
#
|
||||
# {
|
||||
# "p": { # peerstate
|
||||
# "PEER_ID" : { # the peers id
|
||||
# "c": True, # connected or not
|
||||
# "a": 1.0 # the availability value
|
||||
# }
|
||||
# },
|
||||
# "r": 123, # the number of runs
|
||||
# "v": 1 # version
|
||||
# }
|
||||
|
||||
|
||||
# ensure an rpc peer is added
|
||||
def addpeer(p, rpcpeer):
|
||||
pid = rpcpeer['id']
|
||||
if pid not in p.persist['p']:
|
||||
p.persist['p'][pid] = {
|
||||
'c': rpcpeer['connected'],
|
||||
'a': 1.0 if rpcpeer['connected'] else 0.0
|
||||
}
|
||||
|
||||
|
||||
# exponetially smooth online/offline states of peers
|
||||
def trace_availability(p, rpcpeers):
|
||||
p.persist['r'] += 1
|
||||
leadwin = max(min(p.avail_window, p.persist['r'] * p.avail_interval), p.avail_interval)
|
||||
samples = leadwin / p.avail_interval
|
||||
alpha = 1.0 / samples
|
||||
beta = 1.0 - alpha
|
||||
|
||||
for rpcpeer in rpcpeers['peers']:
|
||||
pid = rpcpeer['id']
|
||||
addpeer(p, rpcpeer)
|
||||
|
||||
if rpcpeer['connected']:
|
||||
p.persist['p'][pid]['c'] = True
|
||||
p.persist['p'][pid]['a'] = 1.0 * alpha + p.persist['p'][pid]['a'] * beta
|
||||
else:
|
||||
p.persist['p'][pid]['c'] = False
|
||||
p.persist['p'][pid]['a'] = 0.0 * alpha + p.persist['p'][pid]['a'] * beta
|
||||
295
Unmaintained/summary/test_summary.py
Normal file
295
Unmaintained/summary/test_summary.py
Normal file
@@ -0,0 +1,295 @@
|
||||
import subprocess
|
||||
import unittest
|
||||
import re
|
||||
import os
|
||||
|
||||
from pyln.client import Plugin
|
||||
from pyln.testing.fixtures import * # noqa: F401,F403
|
||||
from pyln.testing.utils import wait_for
|
||||
|
||||
from .summary_avail import trace_availability
|
||||
|
||||
pluginopt = {'plugin': os.path.join(os.path.dirname(__file__), "summary.py")}
|
||||
|
||||
|
||||
# returns a test plugin stub
|
||||
def get_stub():
|
||||
plugin = Plugin()
|
||||
plugin.avail_interval = 60
|
||||
plugin.avail_window = 3600
|
||||
plugin.persist = {}
|
||||
plugin.persist['p'] = {}
|
||||
plugin.persist['r'] = 0
|
||||
plugin.persist['v'] = 1
|
||||
return plugin
|
||||
|
||||
|
||||
def test_summary_peer_thread(node_factory):
|
||||
# Set a low PeerThread interval so we can test quickly.
|
||||
opts = {'summary-availability-interval': 0.5}
|
||||
opts.update(pluginopt)
|
||||
l1, l2 = node_factory.line_graph(2, opts=opts)
|
||||
l2id = l2.info['id']
|
||||
|
||||
# when
|
||||
s1 = l1.rpc.summary()
|
||||
l2.stop() # we stop l2 and wait for l1 to see that
|
||||
l1.daemon.wait_for_log(f".*{l2id}.*Peer connection lost.*")
|
||||
wait_for(lambda: l1.rpc.listpeers(l2id)['peers'][0]['connected'] is False)
|
||||
l1.daemon.wait_for_log("Peerstate wrote to datastore")
|
||||
s2 = l1.rpc.summary()
|
||||
|
||||
# then
|
||||
avail1 = int(re.search(' ([0-9]*)% ', s1['channels'][2]).group(1))
|
||||
avail2 = int(re.search(' ([0-9]*)% ', s2['channels'][2]).group(1))
|
||||
assert(avail1 == 100)
|
||||
assert(avail2 > 0 and avail2 < avail1)
|
||||
|
||||
|
||||
# tests the 72hr exponential availibility tracing
|
||||
# tests base algo and peerstate tracing
|
||||
def test_summary_avail_101():
|
||||
# given
|
||||
plugin = get_stub()
|
||||
rpcpeers = {
|
||||
'peers': [
|
||||
{'id': '1', 'connected': True},
|
||||
{'id': '2', 'connected': False},
|
||||
{'id': '3', 'connected': True},
|
||||
]
|
||||
}
|
||||
|
||||
# when
|
||||
for i in range(100):
|
||||
trace_availability(plugin, rpcpeers)
|
||||
|
||||
# then
|
||||
assert(plugin.persist['p']['1']['a'] == 1.0)
|
||||
assert(plugin.persist['p']['2']['a'] == 0.0)
|
||||
assert(plugin.persist['p']['3']['a'] == 1.0)
|
||||
assert(plugin.persist['p']['1']['c'] is True)
|
||||
assert(plugin.persist['p']['2']['c'] is False)
|
||||
assert(plugin.persist['p']['3']['c'] is True)
|
||||
|
||||
|
||||
# tests for 50% downtime
|
||||
def test_summary_avail_50():
|
||||
# given
|
||||
plugin = get_stub()
|
||||
rpcpeers_on = {
|
||||
'peers': [
|
||||
{'id': '1', 'connected': True},
|
||||
]
|
||||
}
|
||||
rpcpeers_off = {
|
||||
'peers': [
|
||||
{'id': '1', 'connected': False},
|
||||
]
|
||||
}
|
||||
|
||||
# when
|
||||
for i in range(30):
|
||||
trace_availability(plugin, rpcpeers_on)
|
||||
for i in range(30):
|
||||
trace_availability(plugin, rpcpeers_off)
|
||||
|
||||
# then
|
||||
assert(round(plugin.persist['p']['1']['a'], 3) == 0.5)
|
||||
|
||||
|
||||
# tests for 2/3 downtime
|
||||
def test_summary_avail_33():
|
||||
# given
|
||||
plugin = get_stub()
|
||||
rpcpeers_on = {
|
||||
'peers': [
|
||||
{'id': '1', 'connected': True},
|
||||
]
|
||||
}
|
||||
rpcpeers_off = {
|
||||
'peers': [
|
||||
{'id': '1', 'connected': False},
|
||||
]
|
||||
}
|
||||
|
||||
# when
|
||||
for i in range(20):
|
||||
trace_availability(plugin, rpcpeers_on)
|
||||
for i in range(40):
|
||||
trace_availability(plugin, rpcpeers_off)
|
||||
|
||||
# then
|
||||
assert(round(plugin.persist['p']['1']['a'], 3) == 0.333)
|
||||
|
||||
|
||||
# tests for 1/3 downtime
|
||||
def test_summary_avail_66():
|
||||
# given
|
||||
plugin = get_stub()
|
||||
rpcpeers_on = {
|
||||
'peers': [
|
||||
{'id': '1', 'connected': True},
|
||||
]
|
||||
}
|
||||
rpcpeers_off = {
|
||||
'peers': [
|
||||
{'id': '1', 'connected': False},
|
||||
]
|
||||
}
|
||||
|
||||
# when
|
||||
for i in range(40):
|
||||
trace_availability(plugin, rpcpeers_on)
|
||||
for i in range(20):
|
||||
trace_availability(plugin, rpcpeers_off)
|
||||
|
||||
# then
|
||||
assert(round(plugin.persist['p']['1']['a'], 3) == 0.667)
|
||||
|
||||
|
||||
# checks the leading window is smaller if interval count is low
|
||||
# when a node just started
|
||||
def test_summary_avail_leadwin():
|
||||
# given
|
||||
plugin = get_stub()
|
||||
rpcpeers_on = {
|
||||
'peers': [
|
||||
{'id': '1', 'connected': True},
|
||||
]
|
||||
}
|
||||
rpcpeers_off = {
|
||||
'peers': [
|
||||
{'id': '1', 'connected': False},
|
||||
]
|
||||
}
|
||||
|
||||
# when
|
||||
trace_availability(plugin, rpcpeers_on)
|
||||
trace_availability(plugin, rpcpeers_on)
|
||||
trace_availability(plugin, rpcpeers_off)
|
||||
|
||||
# then
|
||||
assert(round(plugin.persist['p']['1']['a'], 3) == 0.667)
|
||||
|
||||
|
||||
# checks whether the peerstate is persistent
|
||||
def test_summary_persist(node_factory):
|
||||
# Set a low PeerThread interval so we can test quickly.
|
||||
opts = {'summary-availability-interval': 0.5, 'may_reconnect': True}
|
||||
opts.update(pluginopt)
|
||||
l1, l2 = node_factory.line_graph(2, opts=opts)
|
||||
|
||||
# when
|
||||
l1.daemon.logsearch_start = 0
|
||||
l1.daemon.wait_for_log("Creating a new datastore")
|
||||
l1.daemon.wait_for_log("Peerstate wrote to datastore")
|
||||
s1 = l1.rpc.summary()
|
||||
l2.stop()
|
||||
l1.restart()
|
||||
assert l1.daemon.is_in_log("Reopened datastore")
|
||||
l1.daemon.logsearch_start = len(l1.daemon.logs)
|
||||
l1.daemon.wait_for_log("Peerstate wrote to datastore")
|
||||
s2 = l1.rpc.summary()
|
||||
|
||||
# then
|
||||
avail1 = int(re.search(' ([0-9]*)% ', s1['channels'][2]).group(1))
|
||||
avail2 = int(re.search(' ([0-9]*)% ', s2['channels'][2]).group(1))
|
||||
assert(avail1 == 100)
|
||||
assert(0 < avail2 < 100)
|
||||
|
||||
|
||||
def test_summary_start(node_factory):
|
||||
# given
|
||||
l1 = node_factory.get_node(options=pluginopt)
|
||||
l2 = node_factory.get_node(options=pluginopt)
|
||||
l1.connect(l2)
|
||||
|
||||
# when
|
||||
s = l1.rpc.summary()
|
||||
|
||||
# then
|
||||
expected = {
|
||||
'format-hint': 'simple',
|
||||
'network': 'REGTEST',
|
||||
'num_channels': 0,
|
||||
'num_connected': 0,
|
||||
'num_gossipers': 1,
|
||||
'num_utxos': 0,
|
||||
'warning_no_address': 'NO PUBLIC ADDRESSES'
|
||||
}
|
||||
for k, v in expected.items():
|
||||
assert(s[k] == v)
|
||||
|
||||
|
||||
def test_summary_ascii(node_factory):
|
||||
# given
|
||||
l1, l2 = node_factory.line_graph(2, opts=pluginopt)
|
||||
l3, l5 = node_factory.line_graph(2, opts={**pluginopt, 'summary-ascii': None})
|
||||
|
||||
# when
|
||||
s1 = l1.rpc.summary()
|
||||
s2 = l1.rpc.summary(ascii=True)
|
||||
s3 = l1.rpc.summary() # remembers last calls ascii setting
|
||||
s4 = l1.rpc.summary(ascii=False)
|
||||
s5 = l1.rpc.summary()
|
||||
s6 = l3.rpc.summary()
|
||||
|
||||
# then
|
||||
assert "├─────" in s1['channels'][-1]
|
||||
assert "[-----" in s2['channels'][-1]
|
||||
assert "[-----" in s3['channels'][-1]
|
||||
assert "├─────" in s4['channels'][-1]
|
||||
assert "├─────" in s5['channels'][-1]
|
||||
assert "[-----" in s6['channels'][-1]
|
||||
|
||||
|
||||
def test_summary_opts(directory):
|
||||
opts = ['--summary-currency', '--summary-currency-prefix']
|
||||
|
||||
help_out = subprocess.check_output([
|
||||
'lightningd',
|
||||
'--lightning-dir={}'.format(directory),
|
||||
'--help'
|
||||
]).decode('utf-8')
|
||||
for o in opts:
|
||||
assert(o not in help_out)
|
||||
|
||||
help_out = subprocess.check_output([
|
||||
'lightningd',
|
||||
'--lightning-dir={}'.format(directory),
|
||||
'--plugin={}'.format(pluginopt['plugin']),
|
||||
'--help'
|
||||
]).decode('utf-8')
|
||||
for o in opts:
|
||||
assert(o in help_out)
|
||||
|
||||
|
||||
def test_summary_exclude(node_factory):
|
||||
l1, l2 = node_factory.line_graph(2, opts=pluginopt)
|
||||
|
||||
s = l1.rpc.summary()
|
||||
expected = {
|
||||
'format-hint': 'simple',
|
||||
'network': 'REGTEST',
|
||||
'num_channels': 1,
|
||||
'num_connected': 1,
|
||||
'num_gossipers': 0,
|
||||
'num_utxos': 1,
|
||||
'warning_no_address': 'NO PUBLIC ADDRESSES'
|
||||
}
|
||||
for k, v in expected.items():
|
||||
assert(s[k] == v)
|
||||
|
||||
scid = l1.rpc.listchannels()['channels'][0]['short_channel_id']
|
||||
s = l1.rpc.summary(exclude=scid)
|
||||
expected = {
|
||||
'format-hint': 'simple',
|
||||
'network': 'REGTEST',
|
||||
'num_channels': 0,
|
||||
'num_connected': 0,
|
||||
'num_gossipers': 0,
|
||||
'num_utxos': 1,
|
||||
'warning_no_address': 'NO PUBLIC ADDRESSES'
|
||||
}
|
||||
for k, v in expected.items():
|
||||
assert(s[k] == v)
|
||||
Reference in New Issue
Block a user