mirror of
https://github.com/aljazceru/plugins.git
synced 2025-12-22 15:44:20 +01:00
autopilot: Add a direct copy of Rene's original autopilot code
Just copying it over, not pluginizing it yet.
This commit is contained in:
0
autopilot/__init__.py
Normal file
0
autopilot/__init__.py
Normal file
85
autopilot/bech32.py
Normal file
85
autopilot/bech32.py
Normal file
@@ -0,0 +1,85 @@
|
|||||||
|
# Copyright (c) 2017 Pieter Wuille
|
||||||
|
#
|
||||||
|
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
# of this software and associated documentation files (the "Software"), to deal
|
||||||
|
# in the Software without restriction, including without limitation the rights
|
||||||
|
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
# copies of the Software, and to permit persons to whom the Software is
|
||||||
|
# furnished to do so, subject to the following conditions:
|
||||||
|
#
|
||||||
|
# The above copyright notice and this permission notice shall be included in
|
||||||
|
# all copies or substantial portions of the Software.
|
||||||
|
#
|
||||||
|
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||||
|
# THE SOFTWARE.
|
||||||
|
|
||||||
|
"""subset of the reference implementation for Bech32 addresses."""
|
||||||
|
|
||||||
|
CHARSET = "qpzry9x8gf2tvdw0s3jn54khce6mua7l"
|
||||||
|
|
||||||
|
def bech32_polymod(values):
|
||||||
|
"""Internal function that computes the Bech32 checksum."""
|
||||||
|
generator = [0x3b6a57b2, 0x26508e6d, 0x1ea119fa, 0x3d4233dd, 0x2a1462b3]
|
||||||
|
chk = 1
|
||||||
|
for value in values:
|
||||||
|
top = chk >> 25
|
||||||
|
chk = (chk & 0x1ffffff) << 5 ^ value
|
||||||
|
for i in range(5):
|
||||||
|
chk ^= generator[i] if ((top >> i) & 1) else 0
|
||||||
|
return chk
|
||||||
|
|
||||||
|
|
||||||
|
def bech32_hrp_expand(hrp):
|
||||||
|
"""Expand the HRP into values for checksum computation."""
|
||||||
|
return [ord(x) >> 5 for x in hrp] + [0] + [ord(x) & 31 for x in hrp]
|
||||||
|
|
||||||
|
|
||||||
|
def bech32_verify_checksum(hrp, data):
|
||||||
|
"""Verify a checksum given HRP and converted data characters."""
|
||||||
|
return bech32_polymod(bech32_hrp_expand(hrp) + data) == 1
|
||||||
|
|
||||||
|
|
||||||
|
def bech32_decode(bech):
|
||||||
|
"""Validate a Bech32 string, and determine HRP and data."""
|
||||||
|
if ((any(ord(x) < 33 or ord(x) > 126 for x in bech)) or
|
||||||
|
(bech.lower() != bech and bech.upper() != bech)):
|
||||||
|
return (None, None)
|
||||||
|
bech = bech.lower()
|
||||||
|
pos = bech.rfind('1')
|
||||||
|
if pos < 1 or pos + 7 > len(bech) or len(bech) > 90:
|
||||||
|
return (None, None)
|
||||||
|
if not all(x in CHARSET for x in bech[pos+1:]):
|
||||||
|
return (None, None)
|
||||||
|
hrp = bech[:pos]
|
||||||
|
data = [CHARSET.find(x) for x in bech[pos+1:]]
|
||||||
|
if not bech32_verify_checksum(hrp, data):
|
||||||
|
return (None, None)
|
||||||
|
return (hrp, data[:-6])
|
||||||
|
|
||||||
|
|
||||||
|
def convertbits(data, frombits, tobits, pad=True):
|
||||||
|
"""General power-of-2 base conversion."""
|
||||||
|
acc = 0
|
||||||
|
bits = 0
|
||||||
|
ret = []
|
||||||
|
maxv = (1 << tobits) - 1
|
||||||
|
max_acc = (1 << (frombits + tobits - 1)) - 1
|
||||||
|
for value in data:
|
||||||
|
if value < 0 or (value >> frombits):
|
||||||
|
return None
|
||||||
|
acc = ((acc << frombits) | value) & max_acc
|
||||||
|
bits += frombits
|
||||||
|
while bits >= tobits:
|
||||||
|
bits -= tobits
|
||||||
|
ret.append((acc >> bits) & maxv)
|
||||||
|
if pad:
|
||||||
|
if bits:
|
||||||
|
ret.append((acc << (tobits - bits)) & maxv)
|
||||||
|
elif bits >= frombits or ((acc << (tobits - bits)) & maxv):
|
||||||
|
return None
|
||||||
|
return ret
|
||||||
269
autopilot/c-lightning-autopilot.py
Normal file
269
autopilot/c-lightning-autopilot.py
Normal file
@@ -0,0 +1,269 @@
|
|||||||
|
'''
|
||||||
|
Created on 04.09.2018
|
||||||
|
|
||||||
|
@author: rpickhardt
|
||||||
|
|
||||||
|
This software is a command line tool and c-lightning wrapper for lib_autopilot
|
||||||
|
|
||||||
|
You need to have a c-lightning node running in order to utilize this program.
|
||||||
|
Also you need lib_autopilot. You can run
|
||||||
|
|
||||||
|
python3 c-lightning-autopilot --help
|
||||||
|
|
||||||
|
in order to get all the command line options
|
||||||
|
|
||||||
|
usage: c-lightning-autopilot.py [-h] [-b BALANCE] [-c CHANNELS]
|
||||||
|
[-r PATH_TO_RPC_INTERFACE]
|
||||||
|
[-s {diverse,merge}] [-p PERCENTILE_CUTOFF]
|
||||||
|
[-d] [-i INPUT]
|
||||||
|
|
||||||
|
optional arguments:
|
||||||
|
-h, --help show this help message and exit
|
||||||
|
-b BALANCE, --balance BALANCE
|
||||||
|
use specified number of satoshis to open all channels
|
||||||
|
-c CHANNELS, --channels CHANNELS
|
||||||
|
opens specified amount of channels
|
||||||
|
-r PATH_TO_RPC_INTERFACE, --path_to_rpc_interface PATH_TO_RPC_INTERFACE
|
||||||
|
specifies the path to the rpc_interface
|
||||||
|
-s {diverse,merge}, --strategy {diverse,merge}
|
||||||
|
defines the strategy
|
||||||
|
-p PERCENTILE_CUTOFF, --percentile_cutoff PERCENTILE_CUTOFF
|
||||||
|
only uses the top percentile of each probability
|
||||||
|
distribution
|
||||||
|
-d, --dont_store don't store the network on the hard drive
|
||||||
|
-i INPUT, --input INPUT
|
||||||
|
points to a pickle file
|
||||||
|
|
||||||
|
a good example call of the program could look like that:
|
||||||
|
|
||||||
|
python3 c-lightning-autopilot.py -s diverse -c 30 -b 10000000
|
||||||
|
|
||||||
|
This call would use up to 10'000'000 satoshi to create 30 channels which are
|
||||||
|
generated by using the diverse strategy to mix the 4 heuristics.
|
||||||
|
|
||||||
|
Currently the software will not check, if sufficient funds are available
|
||||||
|
or if a channel already exists.
|
||||||
|
'''
|
||||||
|
|
||||||
|
from os.path import expanduser
|
||||||
|
import argparse
|
||||||
|
import logging
|
||||||
|
import math
|
||||||
|
import pickle
|
||||||
|
import sys
|
||||||
|
|
||||||
|
from lightning import LightningRpc
|
||||||
|
import dns.resolver
|
||||||
|
|
||||||
|
from bech32 import bech32_decode, CHARSET, convertbits
|
||||||
|
from lib_autopilot import Autopilot
|
||||||
|
from lib_autopilot import Strategy
|
||||||
|
import networkx as nx
|
||||||
|
|
||||||
|
|
||||||
|
class CLightning_autopilot(Autopilot):
|
||||||
|
|
||||||
|
def __init__(self, path, input=None, dont_store=None):
|
||||||
|
self.__add_clogger()
|
||||||
|
|
||||||
|
self.__rpc_interface = LightningRpc(path)
|
||||||
|
self.__clogger.info("connection to RPC interface successful")
|
||||||
|
|
||||||
|
G = None
|
||||||
|
if input:
|
||||||
|
try:
|
||||||
|
self.__clogger.info(
|
||||||
|
"Try to load graph from file system at:" + input)
|
||||||
|
with open(input, "rb") as infile:
|
||||||
|
G = pickle.load(infile)
|
||||||
|
self.__clogger.info(
|
||||||
|
"Successfully restored the lightning network graph from data/networkx_graph")
|
||||||
|
except FileNotFoundError:
|
||||||
|
self.__clogger.info(
|
||||||
|
"input file not found. Load the graph from the peers of the lightning network")
|
||||||
|
G = self.__download_graph()
|
||||||
|
else:
|
||||||
|
self.__clogger.info(
|
||||||
|
"no input specified download graph from peers")
|
||||||
|
G = self.__download_graph()
|
||||||
|
|
||||||
|
if dont_store is None:
|
||||||
|
with open("lightning_networkx_graph.pickle", "wb") as outfile:
|
||||||
|
pickle.dump(G, outfile, pickle.HIGHEST_PROTOCOL)
|
||||||
|
|
||||||
|
Autopilot.__init__(self,G)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def __add_clogger(self):
|
||||||
|
""" initiates the logging service for this class """
|
||||||
|
# FIXME: adapt to the settings that are proper for you
|
||||||
|
self.__clogger = logging.getLogger('clightning-autopilot')
|
||||||
|
self.__clogger.setLevel(logging.INFO)
|
||||||
|
ch = logging.StreamHandler()
|
||||||
|
ch.setLevel(logging.INFO)
|
||||||
|
formatter = logging.Formatter(
|
||||||
|
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
||||||
|
ch.setFormatter(formatter)
|
||||||
|
self.__clogger.addHandler(ch)
|
||||||
|
self.__clogger.info("set up logging infrastructure")
|
||||||
|
|
||||||
|
def __get_seed_keys(self):
|
||||||
|
"""
|
||||||
|
retrieve the nodeids of the ln seed nodes from lseed.bitcoinstats.com
|
||||||
|
"""
|
||||||
|
domain = "lseed.bitcoinstats.com"
|
||||||
|
srv_records = dns.resolver.query(domain,"SRV")
|
||||||
|
res = []
|
||||||
|
for srv in srv_records:
|
||||||
|
bech32 = str(srv.target).rstrip(".").split(".")[0]
|
||||||
|
data = bech32_decode(bech32)[1]
|
||||||
|
decoded = convertbits(data, 5, 4)
|
||||||
|
res.append("".join(
|
||||||
|
['{:1x}'.format(integer) for integer in decoded])[:-1])
|
||||||
|
return res
|
||||||
|
|
||||||
|
|
||||||
|
def __connect_to_seeds(self):
|
||||||
|
"""
|
||||||
|
sets up peering connection to seed nodes of the lightning network
|
||||||
|
|
||||||
|
This is necessary in case the node operating the autopilot has never
|
||||||
|
been connected to the lightning network.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
for nodeid in random.shuffle(self.__get_seed_keys()):
|
||||||
|
self.__clogger.info("peering with node: " + nodeid)
|
||||||
|
self.__rpc_interface.connect(nodeid)
|
||||||
|
# FIXME: better strategy than sleep(2) for building up
|
||||||
|
time.sleep(2)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def __download_graph(self):
|
||||||
|
"""
|
||||||
|
Downloads a local copy of the nodes view of the lightning network
|
||||||
|
|
||||||
|
This copy is retrieved by listnodes and listedges RPC calls and will
|
||||||
|
thus be incomplete as peering might not be ready yet.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# FIXME: it is a real problem that we don't know how many nodes there
|
||||||
|
# could be. In particular billion nodes networks will outgrow memory
|
||||||
|
G = nx.Graph()
|
||||||
|
self.__clogger.info("Instantiated networkx graph to store the lightning network")
|
||||||
|
|
||||||
|
nodes = []
|
||||||
|
try:
|
||||||
|
self.__clogger.info(
|
||||||
|
"Attempt RPC-call to download nodes from the lightning network")
|
||||||
|
while len(nodes) == 0:
|
||||||
|
peers = self.__rpc_interface.listpeers()["peers"]
|
||||||
|
if len(peers) < 1:
|
||||||
|
self.__connect_to_seeds()
|
||||||
|
nodes = self.__rpc_interface.listnodes()["nodes"]
|
||||||
|
except ValueError as e:
|
||||||
|
self.__clogger.info(
|
||||||
|
"Node list could not be retrieved from the peers of the lightning network")
|
||||||
|
self.__clogger.debug("RPC error: " + str(e))
|
||||||
|
raise e
|
||||||
|
|
||||||
|
for node in nodes:
|
||||||
|
G.add_node(node["nodeid"], **node)
|
||||||
|
|
||||||
|
self.__clogger.info(
|
||||||
|
"Number of nodes found and added to the local networkx graph: {}".format(len(nodes)))
|
||||||
|
|
||||||
|
|
||||||
|
channels = {}
|
||||||
|
try:
|
||||||
|
self.__clogger.info(
|
||||||
|
"Attempt RPC-call to download channels from the lightning network")
|
||||||
|
channels = self.__rpc_interface.listchannels()["channels"]
|
||||||
|
self.__clogger.info(
|
||||||
|
"Number of retrieved channels: {}".format(
|
||||||
|
len(channels)))
|
||||||
|
except ValueError as e:
|
||||||
|
self.__clogger.info(
|
||||||
|
"Channel list could not be retrieved from the peers of the lightning network")
|
||||||
|
self.__clogger.debug("RPC error: " + str(e))
|
||||||
|
return False
|
||||||
|
|
||||||
|
for channel in channels:
|
||||||
|
G.add_edge(
|
||||||
|
channel["source"],
|
||||||
|
channel["destination"],
|
||||||
|
**channel)
|
||||||
|
|
||||||
|
return G
|
||||||
|
|
||||||
|
def connect(self, candidates, balance=1000000):
|
||||||
|
pdf = self.calculate_statistics(candidates)
|
||||||
|
connection_dict = self.calculate_proposed_channel_capacities(
|
||||||
|
pdf, balance)
|
||||||
|
for nodeid, fraction in connection_dict.items():
|
||||||
|
try:
|
||||||
|
satoshis = math.ceil(balance * fraction)
|
||||||
|
self.__clogger.info(
|
||||||
|
"Try to open channel with a capacity of {} to node {}".format(
|
||||||
|
satoshis, nodeid))
|
||||||
|
self.__rpc_interface.fundchannel(nodeid, satoshis)
|
||||||
|
except ValueError as e:
|
||||||
|
self.__clogger.info(
|
||||||
|
"Could not open a channel to {} with capacity of {}. Error: {}".format(
|
||||||
|
nodeid, satoshis, str(e)))
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
parser = argparse.ArgumentParser()
|
||||||
|
parser.add_argument("-b", "--balance",
|
||||||
|
help="use specified number of satoshis to open all channels")
|
||||||
|
parser.add_argument("-c", "--channels",
|
||||||
|
help="opens specified amount of channels")
|
||||||
|
# FIXME: add the following command line option
|
||||||
|
# parser.add_argument("-m", "--maxchannels",
|
||||||
|
# help="opens channels as long as maxchannels is not reached")
|
||||||
|
parser.add_argument("-r", "--path_to_rpc_interface",
|
||||||
|
help="specifies the path to the rpc_interface")
|
||||||
|
parser.add_argument("-s", "--strategy",choices=[Strategy.DIVERSE,Strategy.MERGE],
|
||||||
|
help = "defines the strategy ")
|
||||||
|
parser.add_argument("-p", "--percentile_cutoff",
|
||||||
|
help = "only uses the top percentile of each probability distribution")
|
||||||
|
parser.add_argument("-d", "--dont_store", action='store_true',
|
||||||
|
help = "don't store the network on the hard drive")
|
||||||
|
parser.add_argument("-i", "--input",
|
||||||
|
help = "points to a pickle file")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
# FIXME: find ln-dir from lightningd.
|
||||||
|
path = path = expanduser("~/.lightning/lightning-rpc")
|
||||||
|
if args.path_to_rpc_interface is not None:
|
||||||
|
path=expanduser(parser.path-to-rpc-interface)
|
||||||
|
|
||||||
|
balance = 1000000
|
||||||
|
if args.balance is not None:
|
||||||
|
# FIXME: parser.argument does not accept type = int
|
||||||
|
balance = int(args.balance)
|
||||||
|
|
||||||
|
num_channels = 21
|
||||||
|
if args.channels is not None:
|
||||||
|
# FIXME: parser.argument does not accept type = int
|
||||||
|
num_channels = int(args.channels)
|
||||||
|
|
||||||
|
percentile = None
|
||||||
|
if args.percentile_cutoff is not None:
|
||||||
|
# FIXME: parser.argument does not accept type = float
|
||||||
|
percentile = float(args.percentile_cutoff)
|
||||||
|
|
||||||
|
autopilot = CLightning_autopilot(path, input = args.input,
|
||||||
|
dont_store = args.dont_store)
|
||||||
|
|
||||||
|
candidates = autopilot.find_candidates(num_channels,
|
||||||
|
strategy = args.strategy,
|
||||||
|
percentile = percentile)
|
||||||
|
|
||||||
|
autopilot.connect(candidates, balance)
|
||||||
|
print("Autopilot finished. We hope it did a good job for you (and the lightning network). Thanks for using it.")
|
||||||
430
autopilot/lib_autopilot.py
Normal file
430
autopilot/lib_autopilot.py
Normal file
@@ -0,0 +1,430 @@
|
|||||||
|
'''
|
||||||
|
Created on 26.08.2018
|
||||||
|
|
||||||
|
@author: rpickhardt
|
||||||
|
|
||||||
|
lib_autopilot is a library which based on a networkx graph tries to
|
||||||
|
predict which channels should be added for a new node on the network. The
|
||||||
|
long term is to generate a lightning network with good topological properties.
|
||||||
|
|
||||||
|
This library currently uses 4 heuristics to select channels and supports
|
||||||
|
two strategies for combining those heuristics.
|
||||||
|
1.) Diverse: which tries to to get nodes from every distribution
|
||||||
|
2.) Merge: which builds the mixture distribution of the 4 heuristics
|
||||||
|
|
||||||
|
The library also estimates how much funds should be used for every newly
|
||||||
|
added channel. This is achieved by looking at the average channel capacity
|
||||||
|
of the suggested channel partners. A probability distribution which is
|
||||||
|
proportional to those capacities is created and smoothed with the uniform
|
||||||
|
distribution.
|
||||||
|
|
||||||
|
The 4 heuristics for channel partner suggestion are:
|
||||||
|
|
||||||
|
1.) Random: following the Erdoes Renyi model nodes are drawn from a uniform
|
||||||
|
distribution
|
||||||
|
2.) Central: nodes are sampled from a distribution proportional to the
|
||||||
|
betweeness centrality of nodes
|
||||||
|
3.) Decrease Diameter: nodes are sampled from distribution of the nodes which
|
||||||
|
favors badly connected nodes
|
||||||
|
4.) Richness: nodes with high liquidity are taken and it is sampled from a
|
||||||
|
uniform distribution of those
|
||||||
|
|
||||||
|
The library is supposed to be extended by a simulation framework which can
|
||||||
|
be used to evaluate which strategies are useful on the long term. For this
|
||||||
|
heavy computations (like centrality measures) might have to be reimplemented
|
||||||
|
in a more dynamic way.
|
||||||
|
|
||||||
|
Also it is important to understand that this program is not optimized to run
|
||||||
|
efficiently on large scale graphs with more than 100k nodes or on densly
|
||||||
|
connected graphs.
|
||||||
|
|
||||||
|
the programm needs the following dependencies:
|
||||||
|
pip install networkx numpy
|
||||||
|
'''
|
||||||
|
"""
|
||||||
|
ideas:
|
||||||
|
* should we respect our own channel balances?
|
||||||
|
* respect node life time / uptime? or time of channels?
|
||||||
|
* include more statistics of the network:
|
||||||
|
* allow autopilots of various nodes to exchange some information
|
||||||
|
* exchange algorithms if the network grows.
|
||||||
|
* include better handling for duplicates and existing channels
|
||||||
|
* cap number of channels for well connected nodes.
|
||||||
|
* channel balance of automatic channels should not be more than 50% of
|
||||||
|
cummulative channel balance of destination node
|
||||||
|
|
||||||
|
|
||||||
|
next steps:
|
||||||
|
* test if the rankings from the heuristics are statistically independent
|
||||||
|
* evaluate / simulate which method produces graphs with desirable properties
|
||||||
|
"""
|
||||||
|
|
||||||
|
from operator import itemgetter
|
||||||
|
import logging
|
||||||
|
import math
|
||||||
|
import pickle
|
||||||
|
|
||||||
|
|
||||||
|
import networkx as nx
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
class Strategy:
|
||||||
|
#define constants. Never changed as they are part of the API
|
||||||
|
DIVERSE = "diverse"
|
||||||
|
MERGE = "merge"
|
||||||
|
|
||||||
|
class Autopilot():
|
||||||
|
|
||||||
|
def __init__(self,G):
|
||||||
|
self.__add_logger()
|
||||||
|
self.G = G
|
||||||
|
|
||||||
|
def __add_logger(self):
|
||||||
|
""" initiates the logging service for this class """
|
||||||
|
# FIXME: adapt to the settings that are proper for you
|
||||||
|
self.__logger = logging.getLogger('lib-autopilot')
|
||||||
|
self.__logger.setLevel(logging.INFO)
|
||||||
|
ch = logging.StreamHandler()
|
||||||
|
ch.setLevel(logging.INFO)
|
||||||
|
formatter = logging.Formatter(
|
||||||
|
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
||||||
|
ch.setFormatter(formatter)
|
||||||
|
self.__logger.addHandler(ch)
|
||||||
|
|
||||||
|
def __sample_from_pdf(self,pdf,k=21):
|
||||||
|
"""
|
||||||
|
helper function to quickly sample from a pdf encoded in a dictionary
|
||||||
|
"""
|
||||||
|
if type(k) is not int:
|
||||||
|
raise TypeError("__sample_from: k must be an integer variable")
|
||||||
|
if k < 0 or k > 21000:
|
||||||
|
raise ValueError("__sample_from: k must be between 0 and 21000")
|
||||||
|
|
||||||
|
keys,v = zip(*list(pdf.items()))
|
||||||
|
if k>=len(keys):
|
||||||
|
return keys
|
||||||
|
res = np.random.choice(keys, k, replace=False, p=v)
|
||||||
|
return res
|
||||||
|
|
||||||
|
def __sample_from_percentile(self, pdf, percentile=0.5, num_items=21):
|
||||||
|
"""
|
||||||
|
only look at the most likely items and sample from those
|
||||||
|
"""
|
||||||
|
if not percentile:
|
||||||
|
return self.__sample_from_pdf(pdf,num_items)
|
||||||
|
|
||||||
|
if type(percentile) is not float:
|
||||||
|
raise TypeError("percentile must be a floating point variable")
|
||||||
|
if percentile < 0 or percentile > 1:
|
||||||
|
raise ValueError("percentile must be btween 0 and 1")
|
||||||
|
|
||||||
|
cumsum = 0
|
||||||
|
used_pdf = {}
|
||||||
|
for n, value in sorted(
|
||||||
|
pdf.items(), key=itemgetter(1), reverse=True):
|
||||||
|
cumsum += value
|
||||||
|
used_pdf[n] = value
|
||||||
|
if cumsum > percentile:
|
||||||
|
break
|
||||||
|
|
||||||
|
used_pdf = {k:v/cumsum for k, v in used_pdf.items()}
|
||||||
|
return self.__sample_from_pdf(used_pdf, num_items)
|
||||||
|
|
||||||
|
def __get_uniform_pdf(self):
|
||||||
|
"""
|
||||||
|
Generates a uniform distribution of all nodes in the graph
|
||||||
|
|
||||||
|
In opposite to other methods there are no arguments for smoothing
|
||||||
|
or skewing since this would not do anything to the uniform
|
||||||
|
distribution
|
||||||
|
"""
|
||||||
|
pdf = {n:1 for n in self.G.nodes()}
|
||||||
|
length = len(pdf)
|
||||||
|
return {k:v/length for k, v in pdf.items()}
|
||||||
|
|
||||||
|
def __get_centrality_pdf(self, skew = False, smooth = False):
|
||||||
|
"""
|
||||||
|
produces a probability distribution which is proportional to nodes betweeness centrality scores
|
||||||
|
|
||||||
|
the betweeness centrality counts on how many shortest paths a node is
|
||||||
|
connecting to thos nodes will most likely make them even more central
|
||||||
|
however it is good for the node operating those operation as this node
|
||||||
|
itself gets a position in the network which is close to central nodes
|
||||||
|
|
||||||
|
this distribution can be skewed and smoothed
|
||||||
|
"""
|
||||||
|
self.__logger.info(
|
||||||
|
"CENTRALITY_PDF: Try to generate a PDF proportional to centrality scores")
|
||||||
|
pdf = {}
|
||||||
|
cumsum = 0
|
||||||
|
for n, score in nx.betweenness_centrality(self.G).items():
|
||||||
|
pdf[n] = score
|
||||||
|
cumsum += score
|
||||||
|
|
||||||
|
#renoremalize result
|
||||||
|
pdf = {k:v/cumsum for k, v in pdf.items()}
|
||||||
|
self.__logger.info(
|
||||||
|
"CENTRALITY_PDF: Generated pdf")
|
||||||
|
|
||||||
|
if skew and smooth:
|
||||||
|
self.__logger.info(
|
||||||
|
"CENTRALITY_PDF: Won't skew and smooth distribution ignore both")
|
||||||
|
smooth = False
|
||||||
|
skew = False
|
||||||
|
return self.__manipulate_pdf(pdf, skew, smooth)
|
||||||
|
|
||||||
|
def __get_rich_nodes_pdf(self,skew=False,smooth=False):
|
||||||
|
"""
|
||||||
|
Get a PDF proportional to the cummulative capacity of nodes
|
||||||
|
|
||||||
|
The probability density function is calculated by looking at the
|
||||||
|
cummulative capacity of all channels one node is part of.
|
||||||
|
|
||||||
|
The method will by default skew the pdf by taking the squares of the
|
||||||
|
sums of capacitoes after deriving a pdf. If one whishes the method
|
||||||
|
can also be smoothed by taking the mixture distribution with the
|
||||||
|
uniform distribution
|
||||||
|
|
||||||
|
Skewing and smoothing is controlled via the arguments skew and smooth
|
||||||
|
"""
|
||||||
|
self.__logger.info(
|
||||||
|
"RICH_PDF: Try to retrieve a PDF proportional to capacities")
|
||||||
|
|
||||||
|
rich_nodes = {}
|
||||||
|
network_capacity = 0
|
||||||
|
candidates = []
|
||||||
|
for n in self.G.nodes():
|
||||||
|
total_capacity = sum(
|
||||||
|
self.G.get_edge_data(
|
||||||
|
n, m)["satoshis"] for m in self.G.neighbors(n))
|
||||||
|
network_capacity += total_capacity
|
||||||
|
rich_nodes[n] = total_capacity
|
||||||
|
|
||||||
|
rich_nodes = {k:v/network_capacity for k, v in rich_nodes.items()}
|
||||||
|
|
||||||
|
self.__logger.info(
|
||||||
|
"RICH_PDF: Generated a PDF proportional to capacities")
|
||||||
|
|
||||||
|
|
||||||
|
if skew and smooth:
|
||||||
|
self.__logger.info(
|
||||||
|
"RICH_PDF: Can't skew and smooth distribution ignore both")
|
||||||
|
smooth = False
|
||||||
|
skew = False
|
||||||
|
|
||||||
|
return self.__manipulate_pdf(rich_nodes, skew, smooth)
|
||||||
|
|
||||||
|
|
||||||
|
def __get_long_path_pdf(self,skew=True,smooth=False):
|
||||||
|
"""
|
||||||
|
A probability distribution in which badly connected nodes are likely
|
||||||
|
|
||||||
|
This method looks at all pairs shortest paths and takes the sum of all
|
||||||
|
path lenghts for each node and derives the a probability distribution
|
||||||
|
from the sums. The idea of this method is to find nodes which are
|
||||||
|
increasing the diameter of the network.
|
||||||
|
|
||||||
|
The method will by default skew the pdf by taking the squares of the
|
||||||
|
sums of path lengths before deriving a pdf. If one whishes the method
|
||||||
|
can also be smoothed by taking the mixture distribution with the
|
||||||
|
uniform distribution
|
||||||
|
|
||||||
|
Skewing and smoothing is controlled via the arguments skew and smooth
|
||||||
|
"""
|
||||||
|
if skew and smooth:
|
||||||
|
self.__logger.info(
|
||||||
|
"DECREASE DIAMETER: Can't skew and smooth distribution ignore smoothing")
|
||||||
|
smooth = False
|
||||||
|
|
||||||
|
path_pdf = {}
|
||||||
|
self.__logger.info(
|
||||||
|
"DECREASE DIAMETER: Generating probability density function")
|
||||||
|
|
||||||
|
all_pair_shortest_path_lengths = nx.shortest_path_length(self.G)
|
||||||
|
|
||||||
|
for node, paths in all_pair_shortest_path_lengths:
|
||||||
|
path_sum = sum(length for _, length in paths.items())
|
||||||
|
path_pdf[node] = path_sum
|
||||||
|
|
||||||
|
s = sum(path_pdf.values())
|
||||||
|
path_pdf = {k:v/s for k,v in path_pdf.items()}
|
||||||
|
self.__logger.info(
|
||||||
|
"DECREASE DIAMETER: probability density function created")
|
||||||
|
|
||||||
|
path_pdf = self.__manipulate_pdf(path_pdf, skew, smooth)
|
||||||
|
|
||||||
|
return path_pdf
|
||||||
|
|
||||||
|
def __manipulate_pdf(self, pdf, skew=True, smooth=False):
|
||||||
|
"""
|
||||||
|
helper function to skew or smooth a probability distribution
|
||||||
|
|
||||||
|
skewing is achieved by taking the squares of probabilities and
|
||||||
|
re normalize
|
||||||
|
|
||||||
|
smoothing is achieved by taking the mixture distribution with the
|
||||||
|
uniform distribution
|
||||||
|
|
||||||
|
smoothing and skewing are not inverse to each other but should also
|
||||||
|
not happen at the same time. The method will however not prevent this
|
||||||
|
"""
|
||||||
|
if not skew and not smooth: #nothing to do
|
||||||
|
return pdf
|
||||||
|
length = len(pdf)
|
||||||
|
if skew:
|
||||||
|
self.__logger.info(
|
||||||
|
"manipulate_pdf: Skewing the probability density function")
|
||||||
|
pdf = {k:v**2 for k,v in pdf.items()}
|
||||||
|
s = sum(pdf.values())
|
||||||
|
pdf = {k:v/s for k,v in pdf.items()}
|
||||||
|
|
||||||
|
if smooth:
|
||||||
|
self.__logger.info(
|
||||||
|
"manipulate_pdf: Smoothing the probability density function")
|
||||||
|
pdf = {k:0.5*v + 0.5/length for k,v in pdf.items()}
|
||||||
|
|
||||||
|
return pdf
|
||||||
|
|
||||||
|
def __create_pdfs(self):
|
||||||
|
res = {}
|
||||||
|
res["path"] = self.__get_long_path_pdf()
|
||||||
|
res["centrality"] = self.__get_centrality_pdf()
|
||||||
|
res["rich"] = self.__get_rich_nodes_pdf()
|
||||||
|
res["uniform"] = self.__get_uniform_pdf()
|
||||||
|
return res
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def calculate_statistics(self, candidates):
|
||||||
|
"""
|
||||||
|
computes statistics of the candidate set about connectivity, wealth
|
||||||
|
and returns a probability density function (pdf) which encodes which
|
||||||
|
percentage of the funds should be used for each channel with each
|
||||||
|
candidate node
|
||||||
|
|
||||||
|
the pdf is proportional to the average balance of each candidate and
|
||||||
|
smoothed with a uniform distribution currently the smoothing is just a
|
||||||
|
weighted arithmetic mean with a weight of 0.3 for the uniform
|
||||||
|
distribution.
|
||||||
|
"""
|
||||||
|
pdf = {}
|
||||||
|
for candidate in candidates:
|
||||||
|
neighbors = list(self.G.neighbors(candidate))
|
||||||
|
capacity = sum([self.G.get_edge_data(candidate, n)
|
||||||
|
["satoshis"] for n in neighbors])
|
||||||
|
average = capacity / (1+len(neighbors))
|
||||||
|
pdf[candidate] = average
|
||||||
|
cumsum = sum(pdf.values())
|
||||||
|
pdf = {k: v / cumsum for k, v in pdf.items()}
|
||||||
|
w = 0.7
|
||||||
|
print("percentage smoothed percentage capacity numchannels alias")
|
||||||
|
print("----------------------------------------------------------------------")
|
||||||
|
res_pdf = {}
|
||||||
|
for k, v in pdf.items():
|
||||||
|
neighbors = list(self.G.neighbors(k))
|
||||||
|
capacity = sum([self.G.get_edge_data(k, n)["satoshis"]
|
||||||
|
for n in neighbors])
|
||||||
|
name = k
|
||||||
|
if "alias" in self.G.node[k]:
|
||||||
|
name = self.G.node[k]["alias"]
|
||||||
|
print("{:12.2f} ".format(100 * v),
|
||||||
|
"{:12.2f} ".format(
|
||||||
|
100 * (w * v + (1 - w) / len(candidates))),
|
||||||
|
"{:10} {:10} ".format(capacity,
|
||||||
|
len(neighbors)),
|
||||||
|
name)
|
||||||
|
res_pdf[k] = (w * v + (1 - w) / len(candidates))
|
||||||
|
return res_pdf
|
||||||
|
|
||||||
|
def calculate_proposed_channel_capacities(self, pdf, balance=1000000):
|
||||||
|
minimal_channel_balance = 20000 # lnd uses 20k satoshi which seems reasonble
|
||||||
|
|
||||||
|
min_probability = min(pdf.values())
|
||||||
|
needed_total_balance = math.ceil(
|
||||||
|
minimal_channel_balance / min_probability)
|
||||||
|
self.__logger.info(
|
||||||
|
"Need at least a balance of {} satoshi to open {} channels".format(
|
||||||
|
needed_total_balance, len(pdf)))
|
||||||
|
while needed_total_balance > balance and len(pdf) > 1:
|
||||||
|
min_val = min(pdf.values())
|
||||||
|
k = [k for k, v in pdf.items() if v == min_val][0]
|
||||||
|
self.__logger.info(
|
||||||
|
"Not enough balance to open {} channels. Remove node: {} and rebalance pdf for channel balances".format(
|
||||||
|
len(pdf), k))
|
||||||
|
del pdf[k]
|
||||||
|
|
||||||
|
s = sum(pdf.values())
|
||||||
|
pdf = {k: v / s for k, v in pdf.items()}
|
||||||
|
|
||||||
|
min_probability = min(pdf.values())
|
||||||
|
needed_total_balance = math.ceil(
|
||||||
|
minimal_channel_balance / min_probability)
|
||||||
|
self.__logger.info(
|
||||||
|
"Need at least a balance of {} satoshi to open {} channels".format(
|
||||||
|
needed_total_balance, len(pdf)))
|
||||||
|
|
||||||
|
return pdf
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def find_candidates(self, num_items=21,strategy = Strategy.DIVERSE,
|
||||||
|
percentile = None):
|
||||||
|
self.__logger.info("running the autopilot on a graph with {} nodes and {} edges.".format(
|
||||||
|
len(self.G.nodes()), len(self.G.edges())))
|
||||||
|
"""
|
||||||
|
Generates candidates with several strategies
|
||||||
|
"""
|
||||||
|
sub_k = math.ceil(num_items / 4)
|
||||||
|
self.__logger.info(
|
||||||
|
"GENERATE CANDIDATES: Try to generate up to {} nodes with 4 strategies: (random, central, network Improvement, liquidity)".format(num_items))
|
||||||
|
# FIXME: should remember from where nodes are known
|
||||||
|
|
||||||
|
res = self.__create_pdfs()
|
||||||
|
|
||||||
|
candidats = set()
|
||||||
|
# FIXME: Run simulations to decide the following problem:
|
||||||
|
"""
|
||||||
|
we can either do a global sampling by merging all probability
|
||||||
|
distributions and sample once from them or we can sample from
|
||||||
|
each probability distribution and merge the results. These processes
|
||||||
|
are obviously not commutative and we need to check which one seems
|
||||||
|
more reasonable.
|
||||||
|
My (renepickhardt) guts feeling says several samples which are
|
||||||
|
merged gives the best of all worlds where the other method would
|
||||||
|
probably result in something that is either pretty uniform or
|
||||||
|
dominated by one very skew distribution. as mentioned this needs
|
||||||
|
to be tested
|
||||||
|
"""
|
||||||
|
if strategy == Strategy.DIVERSE:
|
||||||
|
for strategy, pdf in res.items():
|
||||||
|
tmp = self.__sample_from_percentile(pdf, percentile, sub_k)
|
||||||
|
candidats = candidats.union(set(tmp))
|
||||||
|
|
||||||
|
elif strategy == Strategy.MERGE:
|
||||||
|
merged = {}
|
||||||
|
denominator = len(res)
|
||||||
|
for pdf in res.values():
|
||||||
|
for k, v in pdf.items():
|
||||||
|
if k not in merged:
|
||||||
|
merged[k] = v/denominator
|
||||||
|
else:
|
||||||
|
merged[k] += v/denominator
|
||||||
|
candidats = self.__sample_from_percentile(merged, percentile,
|
||||||
|
num_items)
|
||||||
|
"""
|
||||||
|
following code prints a list of candidates for debugging
|
||||||
|
for k in res:
|
||||||
|
if "alias" in self.G.node[key[k]]:
|
||||||
|
print(pdf[key[k]], self.G.node[key[k]]["alias"])
|
||||||
|
"""
|
||||||
|
|
||||||
|
if len(candidats) > num_items:
|
||||||
|
candidats = np.random.choice(list(candidats), num_items, replace=False)
|
||||||
|
|
||||||
|
self.__logger.info(
|
||||||
|
"GENERATE CANDIDATES: Found {} nodes with which channel creation is suggested".format(
|
||||||
|
len(candidats)))
|
||||||
|
return candidats
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
print("This lib needs to be given a network graph so you need to create a wrapper")
|
||||||
Reference in New Issue
Block a user