Merge pull request #1 from talaia-labs/master

updating master
This commit is contained in:
2020-04-09 07:01:39 +02:00
committed by GitHub
53 changed files with 3497 additions and 2127 deletions

View File

@@ -8,6 +8,15 @@ We use [black](https://github.com/psf/black) as our base code formatter with a l
```bash ```bash
black --line-length=120 {source_file_or_directory} black --line-length=120 {source_file_or_directory}
``` ```
In additon, we use [flake8](https://flake8.pycqa.org/en/latest/) to detect style issues with the code:
```bash
flake8 --max-line-length=120 {source_file_or_directory}
```
Not all outputs from flake8 are mandatory. For instance, splitting **bullet points in docstrings (E501)** will cause issues when generating the documentation, so we will leave that longer than the line length limit . Another example are **whitespaces before colons in inline fors (E203)**. `black` places them in that way, so we'll leave them like that.
On top of that, there are a few rules to also have in mind. On top of that, there are a few rules to also have in mind.
### Code Spacing ### Code Spacing

View File

@@ -66,7 +66,7 @@ The configuration file options to change the network where `teos` will run are t
``` ```
[bitcoind] [bitcoind]
btc_rpc_user = "user" btc_rpc_user = "user"
btc_rpc_passwd = "passwd" btc_rpc_password = "passwd"
btc_rpc_connect = "localhost" btc_rpc_connect = "localhost"
btc_rpc_port = 8332 btc_rpc_port = 8332
btc_network = "mainnet" btc_network = "mainnet"
@@ -77,7 +77,7 @@ For regtest, it should look like:
``` ```
[bitcoind] [bitcoind]
btc_rpc_user = "user" btc_rpc_user = "user"
btc_rpc_passwd = "passwd" btc_rpc_password = "passwd"
btc_rpc_connect = "localhost" btc_rpc_connect = "localhost"
btc_rpc_port = 18443 btc_rpc_port = 18443
btc_network = "regtest" btc_network = "regtest"

View File

@@ -54,7 +54,6 @@ The alpha release does not have authentication, payments nor rate limiting, ther
- `start_time` should be within the next 6 blocks `[current_time+1, current_time+6]`. - `start_time` should be within the next 6 blocks `[current_time+1, current_time+6]`.
- `end_time` cannot be bigger than (roughly) a month. That is `4320` blocks on top of `start_time`. - `end_time` cannot be bigger than (roughly) a month. That is `4320` blocks on top of `start_time`.
- `encrypted_blob`s are limited to `2 kib`.
#### Usage #### Usage

View File

@@ -33,7 +33,6 @@ The alpha release does not have authentication, payments nor rate limiting, ther
- `start_time` should be within the next 6 blocks `[current_time+1, current_time+6]`. - `start_time` should be within the next 6 blocks `[current_time+1, current_time+6]`.
- `end_time` cannot be bigger than (roughtly) a month. That is `4320` blocks on top of `start_time`. - `end_time` cannot be bigger than (roughtly) a month. That is `4320` blocks on top of `start_time`.
- `encrypted_blob`s are limited to `2 kib`.
#### Appointment example #### Appointment example

View File

@@ -3,6 +3,7 @@ def show_usage():
"USAGE: " "USAGE: "
"\n\tpython teos_cli.py [global options] command [command options] [arguments]" "\n\tpython teos_cli.py [global options] command [command options] [arguments]"
"\n\nCOMMANDS:" "\n\nCOMMANDS:"
"\n\tregister \tRegisters your user public key with the tower."
"\n\tadd_appointment \tRegisters a json formatted appointment with the tower." "\n\tadd_appointment \tRegisters a json formatted appointment with the tower."
"\n\tget_appointment \tGets json formatted data about an appointment from the tower." "\n\tget_appointment \tGets json formatted data about an appointment from the tower."
"\n\thelp \t\t\tShows a list of commands or help for a specific command." "\n\thelp \t\t\tShows a list of commands or help for a specific command."
@@ -14,12 +15,23 @@ def show_usage():
) )
def help_register():
return (
"NAME:"
"\n\n\tregister"
"\n\nUSAGE:"
"\n\n\tpython teos_cli.py register"
"\n\nDESCRIPTION:"
"\n\n\tRegisters your user public key with the tower."
)
def help_add_appointment(): def help_add_appointment():
return ( return (
"NAME:" "NAME:"
"\tpython teos_cli add_appointment - Registers a json formatted appointment to the tower." "\n\tadd_appointment - Registers a json formatted appointment to the tower."
"\n\nUSAGE:" "\n\nUSAGE:"
"\tpython teos_cli add_appointment [command options] appointment/path_to_appointment_file" "\n\tpython teos_cli.py add_appointment [command options] appointment/path_to_appointment_file"
"\n\nDESCRIPTION:" "\n\nDESCRIPTION:"
"\n\n\tRegisters a json formatted appointment to the tower." "\n\n\tRegisters a json formatted appointment to the tower."
"\n\tif -f, --file *is* specified, then the command expects a path to a json file instead of a json encoded " "\n\tif -f, --file *is* specified, then the command expects a path to a json file instead of a json encoded "
@@ -33,9 +45,9 @@ def help_add_appointment():
def help_get_appointment(): def help_get_appointment():
return ( return (
"NAME:" "NAME:"
"\tpython teos_cli get_appointment - Gets json formatted data about an appointment from the tower." "\n\tget_appointment - Gets json formatted data about an appointment from the tower."
"\n\nUSAGE:" "\n\nUSAGE:"
"\tpython teos_cli get_appointment appointment_locator" "\n\tpython teos_cli.py get_appointment appointment_locator"
"\n\nDESCRIPTION:" "\n\nDESCRIPTION:"
"\n\n\tGets json formatted data about an appointment from the tower.\n" "\n\n\tGets json formatted data about an appointment from the tower.\n"
) )

View File

@@ -3,7 +3,6 @@ import sys
import time import time
import json import json
import requests import requests
import binascii
from sys import argv from sys import argv
from uuid import uuid4 from uuid import uuid4
from coincurve import PublicKey from coincurve import PublicKey
@@ -11,7 +10,7 @@ from getopt import getopt, GetoptError
from requests import ConnectTimeout, ConnectionError from requests import ConnectTimeout, ConnectionError
from requests.exceptions import MissingSchema, InvalidSchema, InvalidURL from requests.exceptions import MissingSchema, InvalidSchema, InvalidURL
from cli.help import show_usage, help_add_appointment, help_get_appointment from cli.help import show_usage, help_add_appointment, help_get_appointment, help_register
from cli import DEFAULT_CONF, DATA_DIR, CONF_FILE_NAME, LOG_PREFIX from cli import DEFAULT_CONF, DATA_DIR, CONF_FILE_NAME, LOG_PREFIX
import common.cryptographer import common.cryptographer
@@ -22,24 +21,173 @@ from common.appointment import Appointment
from common.config_loader import ConfigLoader from common.config_loader import ConfigLoader
from common.cryptographer import Cryptographer from common.cryptographer import Cryptographer
from common.tools import setup_logging, setup_data_folder from common.tools import setup_logging, setup_data_folder
from common.tools import check_sha256_hex_format, check_locator_format, compute_locator from common.tools import is_256b_hex_str, is_locator, compute_locator, is_compressed_pk
logger = Logger(actor="Client", log_name_prefix=LOG_PREFIX) logger = Logger(actor="Client", log_name_prefix=LOG_PREFIX)
common.cryptographer.logger = Logger(actor="Cryptographer", log_name_prefix=LOG_PREFIX) common.cryptographer.logger = Logger(actor="Cryptographer", log_name_prefix=LOG_PREFIX)
def register(compressed_pk, teos_url):
"""
Registers the user to the tower.
Args:
compressed_pk (:obj:`str`): a 33-byte hex-encoded compressed public key representing the user.
teos_url (:obj:`str`): the teos base url.
Returns:
:obj:`dict` or :obj:`None`: a dictionary containing the tower response if the registration succeeded. ``None``
otherwise.
"""
if not is_compressed_pk(compressed_pk):
logger.error("The cli public key is not valid")
return None
# Send request to the server.
register_endpoint = "{}/register".format(teos_url)
data = {"public_key": compressed_pk}
logger.info("Registering in the Eye of Satoshi")
server_response = post_request(data, register_endpoint)
if server_response:
response_json = process_post_response(server_response)
return response_json
def add_appointment(appointment_data, cli_sk, teos_pk, teos_url, appointments_folder_path):
"""
Manages the add_appointment command.
The life cycle of the function is as follows:
- Check that the given commitment_txid is correct (proper format and not missing)
- Check that the transaction is correct (not missing)
- Create the appointment locator and encrypted blob from the commitment_txid and the penalty_tx
- Sign the appointment
- Send the appointment to the tower
- Wait for the response
- Check the tower's response and signature
- Store the receipt (appointment + signature) on disk
Args:
appointment_data (:obj:`dict`): a dictionary containing the appointment data.
cli_sk (:obj:`PrivateKey`): the client's private key.
teos_pk (:obj:`PublicKey`): the tower's public key.
teos_url (:obj:`str`): the teos base url.
appointments_folder_path (:obj:`str`): the path to the appointments folder.
Returns:
:obj:`bool`: True if the appointment is accepted by the tower and the receipt is properly stored. False if any
error occurs during the process.
"""
if appointment_data is None:
logger.error("The provided appointment JSON is empty")
return False
if not is_256b_hex_str(appointment_data.get("tx_id")):
logger.error("The provided txid is not valid")
return False
tx_id = appointment_data.get("tx_id")
tx = appointment_data.get("tx")
if None not in [tx_id, tx]:
appointment_data["locator"] = compute_locator(tx_id)
appointment_data["encrypted_blob"] = Cryptographer.encrypt(Blob(tx), tx_id)
else:
logger.error("Appointment data is missing some fields")
return False
appointment = Appointment.from_dict(appointment_data)
signature = Cryptographer.sign(appointment.serialize(), cli_sk)
if not (appointment and signature):
return False
data = {"appointment": appointment.to_dict(), "signature": signature}
# Send appointment to the server.
add_appointment_endpoint = "{}/add_appointment".format(teos_url)
logger.info("Sending appointment to the Eye of Satoshi")
server_response = post_request(data, add_appointment_endpoint)
if server_response is None:
return False
response_json = process_post_response(server_response)
if response_json is None:
return False
signature = response_json.get("signature")
# Check that the server signed the appointment as it should.
if signature is None:
logger.error("The response does not contain the signature of the appointment")
return False
rpk = Cryptographer.recover_pk(appointment.serialize(), signature)
if not Cryptographer.verify_rpk(teos_pk, rpk):
logger.error("The returned appointment's signature is invalid")
return False
logger.info("Appointment accepted and signed by the Eye of Satoshi")
logger.info("Remaining slots: {}".format(response_json.get("available_slots")))
# All good, store appointment and signature
return save_appointment_receipt(appointment.to_dict(), signature, appointments_folder_path)
def get_appointment(locator, cli_sk, teos_pk, teos_url):
"""
Gets information about an appointment from the tower.
Args:
locator (:obj:`str`): the appointment locator used to identify it.
cli_sk (:obj:`PrivateKey`): the client's private key.
teos_pk (:obj:`PublicKey`): the tower's public key.
teos_url (:obj:`str`): the teos base url.
Returns:
:obj:`dict` or :obj:`None`: a dictionary containing the appointment data if the locator is valid and the tower
responds. ``None`` otherwise.
"""
# FIXME: All responses from the tower should be signed. Not using teos_pk atm.
valid_locator = is_locator(locator)
if not valid_locator:
logger.error("The provided locator is not valid", locator=locator)
return None
message = "get appointment {}".format(locator)
signature = Cryptographer.sign(message.encode(), cli_sk)
data = {"locator": locator, "signature": signature}
# Send request to the server.
get_appointment_endpoint = "{}/get_appointment".format(teos_url)
logger.info("Sending appointment to the Eye of Satoshi")
server_response = post_request(data, get_appointment_endpoint)
response_json = process_post_response(server_response)
return response_json
def load_keys(teos_pk_path, cli_sk_path, cli_pk_path): def load_keys(teos_pk_path, cli_sk_path, cli_pk_path):
""" """
Loads all the keys required so sign, send, and verify the appointment. Loads all the keys required so sign, send, and verify the appointment.
Args: Args:
teos_pk_path (:obj:`str`): path to the TEOS public key file. teos_pk_path (:obj:`str`): path to the tower public key file.
cli_sk_path (:obj:`str`): path to the client private key file. cli_sk_path (:obj:`str`): path to the client private key file.
cli_pk_path (:obj:`str`): path to the client public key file. cli_pk_path (:obj:`str`): path to the client public key file.
Returns: Returns:
:obj:`tuple` or ``None``: a three item tuple containing a teos_pk object, cli_sk object and the cli_sk_der :obj:`tuple` or ``None``: a three-item tuple containing a ``PrivateKey``, a ``PublicKey`` and a ``str``
encoded key if all keys can be loaded. ``None`` otherwise. representing the tower pk, user sk and user compressed pk respectively if all keys can be loaded.
``None`` otherwise.
""" """
if teos_pk_path is None: if teos_pk_path is None:
@@ -71,118 +219,77 @@ def load_keys(teos_pk_path, cli_sk_path, cli_pk_path):
try: try:
cli_pk_der = Cryptographer.load_key_file(cli_pk_path) cli_pk_der = Cryptographer.load_key_file(cli_pk_path)
PublicKey(cli_pk_der) compressed_cli_pk = Cryptographer.get_compressed_pk(PublicKey(cli_pk_der))
except ValueError: except ValueError:
logger.error("Client public key is invalid or cannot be parsed") logger.error("Client public key is invalid or cannot be parsed")
return None return None
return teos_pk, cli_sk, cli_pk_der return teos_pk, cli_sk, compressed_cli_pk
def add_appointment(args, teos_url, config): def post_request(data, endpoint):
""" """
Manages the add_appointment command, from argument parsing, trough sending the appointment to the tower, until Sends a post request to the tower.
saving the appointment receipt.
The life cycle of the function is as follows:
- Load the add_appointment arguments
- Check that the given commitment_txid is correct (proper format and not missing)
- Check that the transaction is correct (not missing)
- Create the appointment locator and encrypted blob from the commitment_txid and the penalty_tx
- Load the client private key and sign the appointment
- Send the appointment to the tower
- Wait for the response
- Check the tower's response and signature
- Store the receipt (appointment + signature) on disk
If any of the above-mentioned steps fails, the method returns false, otherwise it returns true.
Args: Args:
args (:obj:`list`): a list of arguments to pass to ``parse_add_appointment_args``. Must contain a json encoded data (:obj:`dict`): a dictionary containing the data to be posted.
appointment, or the file option and the path to a file containing a json encoded appointment. endpoint (:obj:`str`): the endpoint to send the post request.
teos_url (:obj:`str`): the teos base url.
config (:obj:`dict`): a config dictionary following the format of :func:`create_config_dict <common.config_loader.ConfigLoader.create_config_dict>`.
Returns: Returns:
:obj:`bool`: True if the appointment is accepted by the tower and the receipt is properly stored, false if any :obj:`dict` or ``None``: a json-encoded dictionary with the server response if the data can be posted.
error occurs during the process. ``None`` otherwise.
""" """
# Currently the base_url is the same as the add_appointment_endpoint try:
add_appointment_endpoint = teos_url return requests.post(url=endpoint, json=data, timeout=5)
teos_pk, cli_sk, cli_pk_der = load_keys( except ConnectTimeout:
config.get("TEOS_PUBLIC_KEY"), config.get("CLI_PRIVATE_KEY"), config.get("CLI_PUBLIC_KEY") logger.error("Can't connect to the Eye of Satoshi's API. Connection timeout")
)
except ConnectionError:
logger.error("Can't connect to the Eye of Satoshi's API. Server cannot be reached")
except (InvalidSchema, MissingSchema, InvalidURL):
logger.error("Invalid URL. No schema, or invalid schema, found ({})".format(endpoint))
except requests.exceptions.Timeout:
logger.error("The request timed out")
return None
def process_post_response(response):
"""
Processes the server response to a post request.
Args:
response (:obj:`requests.models.Response`): a ``Response`` object obtained from the request.
Returns:
:obj:`dict` or :obj:`None`: a dictionary containing the tower's response data if the response type is
``HTTP_OK`` and the response can be properly parsed. ``None`` otherwise.
"""
if not response:
return None
try: try:
hex_pk_der = binascii.hexlify(cli_pk_der) response_json = response.json()
except binascii.Error as e: except (json.JSONDecodeError, AttributeError):
logger.error("Could not successfully encode public key as hex", error=str(e)) logger.error(
return False "The server returned a non-JSON response", status_code=response.status_code, reason=response.reason
)
return None
if teos_pk is None: if response.status_code != constants.HTTP_OK:
return False logger.error(
"The server returned an error", status_code=response.status_code, reason=response.reason, data=response_json
)
return None
# Get appointment data from user. return response_json
appointment_data = parse_add_appointment_args(args)
if appointment_data is None:
logger.error("The provided appointment JSON is empty")
return False
valid_txid = check_sha256_hex_format(appointment_data.get("tx_id"))
if not valid_txid:
logger.error("The provided txid is not valid")
return False
tx_id = appointment_data.get("tx_id")
tx = appointment_data.get("tx")
if None not in [tx_id, tx]:
appointment_data["locator"] = compute_locator(tx_id)
appointment_data["encrypted_blob"] = Cryptographer.encrypt(Blob(tx), tx_id)
else:
logger.error("Appointment data is missing some fields")
return False
appointment = Appointment.from_dict(appointment_data)
signature = Cryptographer.sign(appointment.serialize(), cli_sk)
if not (appointment and signature):
return False
data = {"appointment": appointment.to_dict(), "signature": signature, "public_key": hex_pk_der.decode("utf-8")}
# Send appointment to the server.
server_response = post_appointment(data, add_appointment_endpoint)
if server_response is None:
return False
response_json = process_post_appointment_response(server_response)
if response_json is None:
return False
signature = response_json.get("signature")
# Check that the server signed the appointment as it should.
if signature is None:
logger.error("The response does not contain the signature of the appointment")
return False
rpk = Cryptographer.recover_pk(appointment.serialize(), signature)
if not Cryptographer.verify_rpk(teos_pk, rpk):
logger.error("The returned appointment's signature is invalid")
return False
logger.info("Appointment accepted and signed by the Eye of Satoshi")
# All good, store appointment and signature
return save_appointment_receipt(appointment.to_dict(), signature, config)
def parse_add_appointment_args(args): def parse_add_appointment_args(args):
@@ -190,8 +297,8 @@ def parse_add_appointment_args(args):
Parses the arguments of the add_appointment command. Parses the arguments of the add_appointment command.
Args: Args:
args (:obj:`list`): a list of arguments to pass to ``parse_add_appointment_args``. Must contain a json encoded args (:obj:`list`): a list of command line arguments that must contain a json encoded appointment, or the file
appointment, or the file option and the path to a file containing a json encoded appointment. option and the path to a file containing a json encoded appointment.
Returns: Returns:
:obj:`dict` or :obj:`None`: A dictionary containing the appointment data if it can be loaded. ``None`` :obj:`dict` or :obj:`None`: A dictionary containing the appointment data if it can be loaded. ``None``
@@ -233,102 +340,30 @@ def parse_add_appointment_args(args):
return appointment_data return appointment_data
def post_appointment(data, add_appointment_endpoint): def save_appointment_receipt(appointment, signature, appointments_folder_path):
""" """
Sends appointment data to add_appointment endpoint to be processed by the tower. Saves an appointment receipt to disk. A receipt consists of an appointment and a signature from the tower.
Args:
data (:obj:`dict`): a dictionary containing three fields: an appointment, the client-side signature, and the
der-encoded client public key.
add_appointment_endpoint (:obj:`str`): the teos endpoint where to send appointments to.
Returns:
:obj:`dict` or ``None``: a json-encoded dictionary with the server response if the data can be posted.
None otherwise.
"""
logger.info("Sending appointment to the Eye of Satoshi")
try:
return requests.post(url=add_appointment_endpoint, json=json.dumps(data), timeout=5)
except ConnectTimeout:
logger.error("Can't connect to the Eye of Satoshi's API. Connection timeout")
return None
except ConnectionError:
logger.error("Can't connect to the Eye of Satoshi's API. Server cannot be reached")
return None
except (InvalidSchema, MissingSchema, InvalidURL):
logger.error("Invalid URL. No schema, or invalid schema, found ({})".format(add_appointment_endpoint))
except requests.exceptions.Timeout:
logger.error("The request timed out")
def process_post_appointment_response(response):
"""
Processes the server response to an add_appointment request.
Args:
response (:obj:`requests.models.Response`): a ``Response`` object obtained from the sent request.
Returns:
:obj:`dict` or :obj:`None`: a dictionary containing the tower's response data if it can be properly parsed and
the response type is ``HTTP_OK``. ``None`` otherwise.
"""
try:
response_json = response.json()
except json.JSONDecodeError:
logger.error(
"The server returned a non-JSON response", status_code=response.status_code, reason=response.reason
)
return None
if response.status_code != constants.HTTP_OK:
if "error" not in response_json:
logger.error(
"The server returned an error status code but no error description", status_code=response.status_code
)
else:
error = response_json["error"]
logger.error(
"The server returned an error status code with an error description",
status_code=response.status_code,
description=error,
)
return None
return response_json
def save_appointment_receipt(appointment, signature, config):
"""
Saves an appointment receipt to disk. A receipt consists in an appointment and a signature from the tower.
Args: Args:
appointment (:obj:`Appointment <common.appointment.Appointment>`): the appointment to be saved on disk. appointment (:obj:`Appointment <common.appointment.Appointment>`): the appointment to be saved on disk.
signature (:obj:`str`): the signature of the appointment performed by the tower. signature (:obj:`str`): the signature of the appointment performed by the tower.
config (:obj:`dict`): a config dictionary following the format of :func:`create_config_dict <common.config_loader.ConfigLoader.create_config_dict>`. appointments_folder_path (:obj:`str`): the path to the appointments folder.
Returns: Returns:
:obj:`bool`: True if the appointment if properly saved, false otherwise. :obj:`bool`: True if the appointment if properly saved. False otherwise.
Raises: Raises:
IOError: if an error occurs whilst writing the file on disk. IOError: if an error occurs whilst writing the file on disk.
""" """
# Create the appointments directory if it doesn't already exist # Create the appointments directory if it doesn't already exist
os.makedirs(config.get("APPOINTMENTS_FOLDER_NAME"), exist_ok=True) os.makedirs(appointments_folder_path, exist_ok=True)
timestamp = int(time.time()) timestamp = int(time.time())
locator = appointment["locator"] locator = appointment["locator"]
uuid = uuid4().hex # prevent filename collisions uuid = uuid4().hex # prevent filename collisions
filename = "{}/appointment-{}-{}-{}.json".format(config.get("APPOINTMENTS_FOLDER_NAME"), timestamp, locator, uuid) filename = "{}/appointment-{}-{}-{}.json".format(appointments_folder_path, timestamp, locator, uuid)
data = {"appointment": appointment, "signature": signature} data = {"appointment": appointment, "signature": signature}
try: try:
@@ -342,46 +377,6 @@ def save_appointment_receipt(appointment, signature, config):
return False return False
def get_appointment(locator, get_appointment_endpoint):
"""
Gets information about an appointment from the tower.
Args:
locator (:obj:`str`): the appointment locator used to identify it.
get_appointment_endpoint (:obj:`str`): the teos endpoint where to get appointments from.
Returns:
:obj:`dict` or :obj:`None`: a dictionary containing thew appointment data if the locator is valid and the tower
responds. ``None`` otherwise.
"""
valid_locator = check_locator_format(locator)
if not valid_locator:
logger.error("The provided locator is not valid", locator=locator)
return None
parameters = "?locator={}".format(locator)
try:
r = requests.get(url=get_appointment_endpoint + parameters, timeout=5)
return r.json()
except ConnectTimeout:
logger.error("Can't connect to the Eye of Satoshi's API. Connection timeout")
return None
except ConnectionError:
logger.error("Can't connect to the Eye of Satoshi's API. Server cannot be reached")
return None
except requests.exceptions.InvalidSchema:
logger.error("No transport protocol found. Have you missed http(s):// in the server url?")
except requests.exceptions.Timeout:
logger.error("The request timed out")
def main(args, command_line_conf): def main(args, command_line_conf):
# Loads config and sets up the data folder and log file # Loads config and sets up the data folder and log file
config_loader = ConfigLoader(DATA_DIR, CONF_FILE_NAME, DEFAULT_CONF, command_line_conf) config_loader = ConfigLoader(DATA_DIR, CONF_FILE_NAME, DEFAULT_CONF, command_line_conf)
@@ -396,58 +391,73 @@ def main(args, command_line_conf):
if not teos_url.startswith("http"): if not teos_url.startswith("http"):
teos_url = "http://" + teos_url teos_url = "http://" + teos_url
try: keys = load_keys(config.get("TEOS_PUBLIC_KEY"), config.get("CLI_PRIVATE_KEY"), config.get("CLI_PUBLIC_KEY"))
if args: if keys is not None:
command = args.pop(0) teos_pk, cli_sk, compressed_cli_pk = keys
if command in commands: try:
if command == "add_appointment": if args:
add_appointment(args, teos_url, config) command = args.pop(0)
elif command == "get_appointment": if command in commands:
if not args: if command == "register":
logger.error("No arguments were given") register_data = register(compressed_cli_pk, teos_url)
if register_data:
print(register_data)
else: if command == "add_appointment":
arg_opt = args.pop(0) # Get appointment data from user.
appointment_data = parse_add_appointment_args(args)
add_appointment(
appointment_data, cli_sk, teos_pk, teos_url, config.get("APPOINTMENTS_FOLDER_NAME")
)
if arg_opt in ["-h", "--help"]: elif command == "get_appointment":
sys.exit(help_get_appointment()) if not args:
logger.error("No arguments were given")
get_appointment_endpoint = "{}/get_appointment".format(teos_url)
appointment_data = get_appointment(arg_opt, get_appointment_endpoint)
if appointment_data:
print(appointment_data)
elif command == "help":
if args:
command = args.pop(0)
if command == "add_appointment":
sys.exit(help_add_appointment())
elif command == "get_appointment":
sys.exit(help_get_appointment())
else: else:
logger.error("Unknown command. Use help to check the list of available commands") arg_opt = args.pop(0)
else: if arg_opt in ["-h", "--help"]:
sys.exit(show_usage()) sys.exit(help_get_appointment())
appointment_data = get_appointment(arg_opt, cli_sk, teos_pk, teos_url)
if appointment_data:
print(appointment_data)
elif command == "help":
if args:
command = args.pop(0)
if command == "register":
sys.exit(help_register())
if command == "add_appointment":
sys.exit(help_add_appointment())
elif command == "get_appointment":
sys.exit(help_get_appointment())
else:
logger.error("Unknown command. Use help to check the list of available commands")
else:
sys.exit(show_usage())
else:
logger.error("Unknown command. Use help to check the list of available commands")
else: else:
logger.error("Unknown command. Use help to check the list of available commands") logger.error("No command provided. Use help to check the list of available commands")
else: except json.JSONDecodeError:
logger.error("No command provided. Use help to check the list of available commands") logger.error("Non-JSON encoded appointment passed as parameter")
except json.JSONDecodeError:
logger.error("Non-JSON encoded appointment passed as parameter")
if __name__ == "__main__": if __name__ == "__main__":
command_line_conf = {} command_line_conf = {}
commands = ["add_appointment", "get_appointment", "help"] commands = ["register", "add_appointment", "get_appointment", "help"]
try: try:
opts, args = getopt(argv[1:], "s:p:h", ["server", "port", "help"]) opts, args = getopt(argv[1:], "s:p:h", ["server", "port", "help"])

View File

@@ -1,4 +1,3 @@
import json
import struct import struct
from binascii import unhexlify from binascii import unhexlify
@@ -10,18 +9,17 @@ class Appointment:
The :class:`Appointment` contains the information regarding an appointment between a client and the Watchtower. The :class:`Appointment` contains the information regarding an appointment between a client and the Watchtower.
Args: Args:
locator (:mod:`str`): A 16-byte hex-encoded value used by the tower to detect channel breaches. It serves as a trigger locator (:obj:`str`): A 16-byte hex-encoded value used by the tower to detect channel breaches. It serves as a
for the tower to decrypt and broadcast the penalty transaction. trigger for the tower to decrypt and broadcast the penalty transaction.
start_time (:mod:`int`): The block height where the tower is hired to start watching for breaches. start_time (:obj:`int`): The block height where the tower is hired to start watching for breaches.
end_time (:mod:`int`): The block height where the tower will stop watching for breaches. end_time (:obj:`int`): The block height where the tower will stop watching for breaches.
to_self_delay (:mod:`int`): The ``to_self_delay`` encoded in the ``csv`` of the ``htlc`` that this appointment is to_self_delay (:obj:`int`): The ``to_self_delay`` encoded in the ``csv`` of the ``to_remote`` output of the
covering. commitment transaction that this appointment is covering.
encrypted_blob (:obj:`EncryptedBlob <common.encrypted_blob.EncryptedBlob>`): An ``EncryptedBlob`` object encrypted_blob (:obj:`EncryptedBlob <common.encrypted_blob.EncryptedBlob>`): An ``EncryptedBlob`` object
containing an encrypted penalty transaction. The tower will decrypt it and broadcast the penalty transaction containing an encrypted penalty transaction. The tower will decrypt it and broadcast the penalty transaction
upon seeing a breach on the blockchain. upon seeing a breach on the blockchain.
""" """
# DISCUSS: 35-appointment-checks
def __init__(self, locator, start_time, end_time, to_self_delay, encrypted_blob): def __init__(self, locator, start_time, end_time, to_self_delay, encrypted_blob):
self.locator = locator self.locator = locator
self.start_time = start_time # ToDo: #4-standardize-appointment-fields self.start_time = start_time # ToDo: #4-standardize-appointment-fields
@@ -37,7 +35,7 @@ class Appointment:
This method is useful to load data from a database. This method is useful to load data from a database.
Args: Args:
appointment_data (:mod:`dict`): a dictionary containing the following keys: appointment_data (:obj:`dict`): a dictionary containing the following keys:
``{locator, start_time, end_time, to_self_delay, encrypted_blob}`` ``{locator, start_time, end_time, to_self_delay, encrypted_blob}``
Returns: Returns:
@@ -63,11 +61,10 @@ class Appointment:
def to_dict(self): def to_dict(self):
""" """
Exports an appointment as a dictionary. Encodes an appointment as a dictionary.
Returns: Returns:
:obj:`dict`: A dictionary containing the appointment attributes. :obj:`dict`: A dictionary containing the appointment attributes.
""" """
# ToDO: #3-improve-appointment-structure # ToDO: #3-improve-appointment-structure
@@ -81,19 +78,6 @@ class Appointment:
return appointment return appointment
def to_json(self):
"""
Exports an appointment as a deterministic json encoded string.
This method ensures that multiple invocations with the same data yield the same value. This is the format used
to store appointments in the database.
Returns:
:obj:`str`: A json-encoded str representing the appointment.
"""
return json.dumps(self.to_dict(), sort_keys=True, separators=(",", ":"))
def serialize(self): def serialize(self):
""" """
Serializes an appointment to be signed. Serializes an appointment to be signed.
@@ -104,7 +88,7 @@ class Appointment:
All values are big endian. All values are big endian.
Returns: Returns:
:mod:`bytes`: The serialized data to be signed. :obj:`bytes`: The serialized data to be signed.
""" """
return ( return (
unhexlify(self.locator) unhexlify(self.locator)

View File

@@ -21,14 +21,14 @@ class ConfigLoader:
data_dir (:obj:`str`): the path to the data directory where the configuration file may be found. data_dir (:obj:`str`): the path to the data directory where the configuration file may be found.
conf_file_path (:obj:`str`): the path to the config file (the file may not exist). conf_file_path (:obj:`str`): the path to the config file (the file may not exist).
conf_fields (:obj:`dict`): a dictionary populated with the configuration params and the expected types. conf_fields (:obj:`dict`): a dictionary populated with the configuration params and the expected types.
follows the same format as default_conf. It follows the same format as default_conf.
command_line_conf (:obj:`dict`): a dictionary containing the command line parameters that may replace the command_line_conf (:obj:`dict`): a dictionary containing the command line parameters that may replace the
ones in default / config file. ones in default / config file.
""" """
def __init__(self, data_dir, conf_file_name, default_conf, command_line_conf): def __init__(self, data_dir, conf_file_name, default_conf, command_line_conf):
self.data_dir = data_dir self.data_dir = data_dir
self.conf_file_path = self.data_dir + conf_file_name self.conf_file_path = os.path.join(self.data_dir, conf_file_name)
self.conf_fields = default_conf self.conf_fields = default_conf
self.command_line_conf = command_line_conf self.command_line_conf = command_line_conf
@@ -36,13 +36,13 @@ class ConfigLoader:
""" """
Builds a config dictionary from command line, config file and default configuration parameters. Builds a config dictionary from command line, config file and default configuration parameters.
The priority if as follows: The priority is as follows:
- command line - command line
- config file - config file
- defaults - defaults
Returns: Returns:
obj:`dict`: a dictionary containing all the configuration parameters. :obj:`dict`: a dictionary containing all the configuration parameters.
""" """
@@ -50,6 +50,7 @@ class ConfigLoader:
file_config = configparser.ConfigParser() file_config = configparser.ConfigParser()
file_config.read(self.conf_file_path) file_config.read(self.conf_file_path)
# Load parameters and cast them to int if necessary
if file_config: if file_config:
for sec in file_config.sections(): for sec in file_config.sections():
for k, v in file_config.items(sec): for k, v in file_config.items(sec):
@@ -82,10 +83,10 @@ class ConfigLoader:
Returns: Returns:
:obj:`dict`: A dictionary with the same keys as the provided one, but containing only the "value" field as :obj:`dict`: A dictionary with the same keys as the provided one, but containing only the "value" field as
value if the provided ``conf_fields`` where correct. value if the provided ``conf_fields`` are correct.
Raises: Raises:
ValueError: If any of the dictionary elements does not have the expected type :obj:`ValueError`: If any of the dictionary elements does not have the expected type.
""" """
conf_dict = {} conf_dict = {}
@@ -104,11 +105,11 @@ class ConfigLoader:
def extend_paths(self): def extend_paths(self):
""" """
Extends the relative paths of the ``conf_fields`` dictionary with ``data_dir``. Extends the relative paths of the ``conf_fields`` dictionary with ``data_dir``.
If an absolute path is given, it'll remain the same. If an absolute path is given, it'll remain the same.
""" """
for key, field in self.conf_fields.items(): for key, field in self.conf_fields.items():
if field.get("path") is True and isinstance(field.get("value"), str): if field.get("path") and isinstance(field.get("value"), str):
self.conf_fields[key]["value"] = os.path.join(self.data_dir, self.conf_fields[key]["value"]) self.conf_fields[key]["value"] = os.path.join(self.data_dir, self.conf_fields[key]["value"])

View File

@@ -5,4 +5,8 @@ LOCATOR_LEN_BYTES = LOCATOR_LEN_HEX // 2
# HTTP # HTTP
HTTP_OK = 200 HTTP_OK = 200
HTTP_BAD_REQUEST = 400 HTTP_BAD_REQUEST = 400
HTTP_NOT_FOUND = 404
HTTP_SERVICE_UNAVAILABLE = 503 HTTP_SERVICE_UNAVAILABLE = 503
# Temporary constants, may be changed
ENCRYPTED_BLOB_MAX_SIZE_HEX = 2 * 2048

View File

@@ -1,19 +1,19 @@
import pyzbase32 import pyzbase32
from hashlib import sha256 from hashlib import sha256, new
from binascii import unhexlify, hexlify from binascii import unhexlify, hexlify
from coincurve.utils import int_to_bytes from coincurve.utils import int_to_bytes
from coincurve import PrivateKey, PublicKey from coincurve import PrivateKey, PublicKey
from cryptography.exceptions import InvalidTag from cryptography.exceptions import InvalidTag
from cryptography.hazmat.primitives.ciphers.aead import ChaCha20Poly1305 from cryptography.hazmat.primitives.ciphers.aead import ChaCha20Poly1305
from common.tools import check_sha256_hex_format from common.tools import is_256b_hex_str
LN_MESSAGE_PREFIX = b"Lightning Signed Message:" LN_MESSAGE_PREFIX = b"Lightning Signed Message:"
def sha256d(message): def sha256d(message):
""" """
Compute the sha245d (double sha256) of a given by message. Computes the double sha256 of a given by message.
Args: Args:
message(:obj:`bytes`): the message to be used as input to the hash function. message(:obj:`bytes`): the message to be used as input to the hash function.
@@ -25,12 +25,30 @@ def sha256d(message):
return sha256(sha256(message).digest()).digest() return sha256(sha256(message).digest()).digest()
def hash_160(message):
""" Calculates the RIPEMD-160 hash of a given message.
Args:
message (:obj:`str`) the message to be hashed.
Returns:
:obj:`str`: the ripemd160 hash of the given message.
"""
# Calculate the RIPEMD-160 hash of the given data.
md = new("ripemd160")
md.update(unhexlify(message))
h160 = md.hexdigest()
return h160
# NOTCOVERED
def sigrec_encode(rsig_rid): def sigrec_encode(rsig_rid):
""" """
Encodes a pk-recoverable signature to be used in LN. ```rsig_rid`` can be obtained trough Encodes a pk-recoverable signature to be used in LN. ``rsig_rid`` can be obtained trough
``PrivateKey.sign_recoverable``. The required format has the recovery id as the last byte, and for signing LN ``PrivateKey.sign_recoverable``. The required format has the recovery id as the last byte, and for signing LN
messages we need it as the first. messages we need it as the first. From: https://twitter.com/rusty_twit/status/1182102005914800128
From: https://twitter.com/rusty_twit/status/1182102005914800128
Args: Args:
rsig_rid(:obj:`bytes`): the signature to be encoded. rsig_rid(:obj:`bytes`): the signature to be encoded.
@@ -45,6 +63,7 @@ def sigrec_encode(rsig_rid):
return sigrec return sigrec
# NOTCOVERED
def sigrec_decode(sigrec): def sigrec_decode(sigrec):
""" """
Decodes a pk-recoverable signature in the format used by LN to be input to ``PublicKey.from_signature_and_message``. Decodes a pk-recoverable signature in the format used by LN to be input to ``PublicKey.from_signature_and_message``.
@@ -54,12 +73,18 @@ def sigrec_decode(sigrec):
Returns: Returns:
:obj:`bytes`: the decoded signature. :obj:`bytes`: the decoded signature.
Raises:
:obj:`ValueError`: if the SigRec is not properly encoded (first byte is not 31 + recovery id)
""" """
rid, rsig = int_to_bytes(sigrec[0] - 31), sigrec[1:] int_rid, rsig = sigrec[0] - 31, sigrec[1:]
rsig_rid = rsig + rid if int_rid < 0:
raise ValueError("Wrong SigRec")
else:
rid = int_to_bytes(int_rid)
return rsig_rid return rsig + rid
# FIXME: Common has not log file, so it needs to log in the same log as the caller. This is a temporary fix. # FIXME: Common has not log file, so it needs to log in the same log as the caller. This is a temporary fix.
@@ -68,7 +93,7 @@ logger = None
class Cryptographer: class Cryptographer:
""" """
The :class:`Cryptographer` is the class in charge of all the cryptography in the tower. The :class:`Cryptographer` is in charge of all the cryptography in the tower.
""" """
@staticmethod @staticmethod
@@ -78,21 +103,21 @@ class Cryptographer:
formatted. formatted.
Args: Args:
data(:mod:`str`): the data to be encrypted. data(:obj:`str`): the data to be encrypted.
secret(:mod:`str`): the secret used to derive the encryption key. secret(:obj:`str`): the secret used to derive the encryption key.
Returns: Returns:
:obj:`bool`: Whether or not the ``key`` and ``data`` are properly formatted. :obj:`bool`: Whether or not the ``key`` and ``data`` are properly formatted.
Raises: Raises:
ValueError: if either the ``key`` or ``data`` is not properly formatted. :obj:`ValueError`: if either the ``key`` or ``data`` is not properly formatted.
""" """
if len(data) % 2: if len(data) % 2:
error = "Incorrect (Odd-length) value" error = "Incorrect (Odd-length) value"
raise ValueError(error) raise ValueError(error)
if not check_sha256_hex_format(secret): if not is_256b_hex_str(secret):
error = "Secret must be a 32-byte hex value (64 hex chars)" error = "Secret must be a 32-byte hex value (64 hex chars)"
raise ValueError(error) raise ValueError(error)
@@ -101,16 +126,19 @@ class Cryptographer:
@staticmethod @staticmethod
def encrypt(blob, secret): def encrypt(blob, secret):
""" """
Encrypts a given :mod:`Blob <common.cli.blob.Blob>` data using ``CHACHA20POLY1305``. Encrypts a given :obj:`Blob <common.cli.blob.Blob>` data using ``CHACHA20POLY1305``.
``SHA256(secret)`` is used as ``key``, and ``0 (12-byte)`` as ``iv``. ``SHA256(secret)`` is used as ``key``, and ``0 (12-byte)`` as ``iv``.
Args: Args:
blob (:mod:`Blob <common.cli.blob.Blob>`): a ``Blob`` object containing a raw penalty transaction. blob (:obj:`Blob <common.cli.blob.Blob>`): a ``Blob`` object containing a raw penalty transaction.
secret (:mod:`str`): a value to used to derive the encryption key. Should be the dispute txid. secret (:obj:`str`): a value to used to derive the encryption key. Should be the dispute txid.
Returns: Returns:
:obj:`str`: The encrypted data (hex encoded). :obj:`str`: The encrypted data (hex encoded).
Raises:
:obj:`ValueError`: if either the ``secret`` or ``blob`` is not properly formatted.
""" """
Cryptographer.check_data_key_format(blob.data, secret) Cryptographer.check_data_key_format(blob.data, secret)
@@ -136,17 +164,20 @@ class Cryptographer:
# ToDo: #20-test-tx-decrypting-edge-cases # ToDo: #20-test-tx-decrypting-edge-cases
def decrypt(encrypted_blob, secret): def decrypt(encrypted_blob, secret):
""" """
Decrypts a given :mod:`EncryptedBlob <common.encrypted_blob.EncryptedBlob>` using ``CHACHA20POLY1305``. Decrypts a given :obj:`EncryptedBlob <common.encrypted_blob.EncryptedBlob>` using ``CHACHA20POLY1305``.
``SHA256(secret)`` is used as ``key``, and ``0 (12-byte)`` as ``iv``. ``SHA256(secret)`` is used as ``key``, and ``0 (12-byte)`` as ``iv``.
Args: Args:
encrypted_blob(:mod:`EncryptedBlob <common.encrypted_blob.EncryptedBlob>`): an ``EncryptedBlob`` potentially encrypted_blob(:obj:`EncryptedBlob <common.encrypted_blob.EncryptedBlob>`): an ``EncryptedBlob``
containing a penalty transaction. potentially containing a penalty transaction.
secret (:mod:`str`): a value to used to derive the decryption key. Should be the dispute txid. secret (:obj:`str`): a value to used to derive the decryption key. Should be the dispute txid.
Returns: Returns:
:obj:`str`: The decrypted data (hex encoded). :obj:`str`: The decrypted data (hex encoded).
Raises:
:obj:`ValueError`: if either the ``secret`` or ``encrypted_blob`` is not properly formatted.
""" """
Cryptographer.check_data_key_format(encrypted_blob.data, secret) Cryptographer.check_data_key_format(encrypted_blob.data, secret)
@@ -198,7 +229,7 @@ class Cryptographer:
return key return key
except FileNotFoundError: except FileNotFoundError:
logger.error("Key file not found. Please check your settings") logger.error("Key file not found at {}. Please check your settings".format(file_path))
return None return None
except IOError as e: except IOError as e:
@@ -208,17 +239,14 @@ class Cryptographer:
@staticmethod @staticmethod
def load_private_key_der(sk_der): def load_private_key_der(sk_der):
""" """
Creates a :mod:`PrivateKey` object from a given ``DER`` encoded private key. Creates a :obj:`PrivateKey` from a given ``DER`` encoded private key.
Args: Args:
sk_der(:mod:`str`): a private key encoded in ``DER`` format. sk_der(:obj:`str`): a private key encoded in ``DER`` format.
Returns: Returns:
:mod:`PrivateKey`: A ``PrivateKey`` object. :obj:`PrivateKey` or :obj:`None`: A ``PrivateKey`` object. if the private key can be loaded. `None`
otherwise.
Raises:
ValueError: if the provided ``pk_der`` data cannot be deserialized (wrong size or format).
TypeError: if the provided ``pk_der`` data is not a string.
""" """
try: try:
sk = PrivateKey.from_der(sk_der) sk = PrivateKey.from_der(sk_der)
@@ -235,14 +263,14 @@ class Cryptographer:
@staticmethod @staticmethod
def sign(message, sk): def sign(message, sk):
""" """
Signs a given data using a given secret key using ECDSA. Signs a given data using a given secret key using ECDSA over secp256k1.
Args: Args:
message(:obj:`bytes`): the data to be signed. message(:obj:`bytes`): the data to be signed.
sk(:obj:`PrivateKey`): the ECDSA secret key used to signed the data. sk(:obj:`PrivateKey`): the ECDSA secret key used to signed the data.
Returns: Returns:
:obj:`str`: The zbase32 signature of the given message. :obj:`str` or :obj:`None`: The zbase32 signature of the given message is it can be signed. `None` otherwise.
""" """
if not isinstance(message, bytes): if not isinstance(message, bytes):
@@ -253,9 +281,14 @@ class Cryptographer:
logger.error("The value passed as sk is not a private key (EllipticCurvePrivateKey)") logger.error("The value passed as sk is not a private key (EllipticCurvePrivateKey)")
return None return None
rsig_rid = sk.sign_recoverable(LN_MESSAGE_PREFIX + message, hasher=sha256d) try:
sigrec = sigrec_encode(rsig_rid) rsig_rid = sk.sign_recoverable(LN_MESSAGE_PREFIX + message, hasher=sha256d)
zb32_sig = pyzbase32.encode_bytes(sigrec).decode() sigrec = sigrec_encode(rsig_rid)
zb32_sig = pyzbase32.encode_bytes(sigrec).decode()
except ValueError:
logger.error("Couldn't sign the message")
return None
return zb32_sig return zb32_sig
@@ -265,11 +298,11 @@ class Cryptographer:
Recovers an ECDSA public key from a given message and zbase32 signature. Recovers an ECDSA public key from a given message and zbase32 signature.
Args: Args:
message(:obj:`bytes`): the data to be signed. message(:obj:`bytes`): original message from where the signature was generated.
zb32_sig(:obj:`str`): the zbase32 signature of the message. zb32_sig(:obj:`str`): the zbase32 signature of the message.
Returns: Returns:
:obj:`PublicKey`: The recovered public key. :obj:`PublicKey` or :obj:`None`: The recovered public key if it can be recovered. `None` otherwise.
""" """
if not isinstance(message, bytes): if not isinstance(message, bytes):
@@ -281,9 +314,9 @@ class Cryptographer:
return None return None
sigrec = pyzbase32.decode_bytes(zb32_sig) sigrec = pyzbase32.decode_bytes(zb32_sig)
rsig_recid = sigrec_decode(sigrec)
try: try:
rsig_recid = sigrec_decode(sigrec)
pk = PublicKey.from_signature_and_message(rsig_recid, LN_MESSAGE_PREFIX + message, hasher=sha256d) pk = PublicKey.from_signature_and_message(rsig_recid, LN_MESSAGE_PREFIX + message, hasher=sha256d)
return pk return pk
@@ -295,9 +328,9 @@ class Cryptographer:
except Exception as e: except Exception as e:
if "failed to recover ECDSA public key" in str(e): if "failed to recover ECDSA public key" in str(e):
logger.error("Cannot recover public key from signature".format(type(rsig_recid))) logger.error("Cannot recover public key from signature")
else: else:
logger.error("Unknown exception", error=e) logger.error("Unknown exception", error=str(e))
return None return None
@@ -315,3 +348,28 @@ class Cryptographer:
""" """
return pk.point() == rpk.point() return pk.point() == rpk.point()
@staticmethod
def get_compressed_pk(pk):
"""
Computes a compressed, hex-encoded, public key given a ``PublicKey``.
Args:
pk(:obj:`PublicKey`): a given public key.
Returns:
:obj:`str` or :obj:`None`: A compressed, hex-encoded, public key (33-byte long) if it can be compressed.
`None` oterwise.
"""
if not isinstance(pk, PublicKey):
logger.error("The received data is not a PublicKey object")
return None
try:
compressed_pk = pk.format(compressed=True)
return hexlify(compressed_pk).decode("utf-8")
except TypeError as e:
logger.error("PublicKey has invalid initializer", error=str(e))
return None

View File

@@ -15,9 +15,10 @@ class _StructuredMessage:
class Logger: class Logger:
""" """
The :class:`Logger` is the class in charge of logging events into the log file. The :class:`Logger` is in charge of logging events into the log file.
Args: Args:
log_name_prefix (:obj:`str`): the prefix of the logger where the data will be stored in (server, client, ...).
actor (:obj:`str`): the system actor that is logging the event (e.g. ``Watcher``, ``Cryptographer``, ...). actor (:obj:`str`): the system actor that is logging the event (e.g. ``Watcher``, ``Cryptographer``, ...).
""" """
@@ -52,7 +53,7 @@ class Logger:
Args: Args:
msg (:obj:`str`): the message to be logged. msg (:obj:`str`): the message to be logged.
kwargs: a ``key:value`` collection parameters to be added to the output. kwargs (:obj:`dict`): a ``key:value`` collection parameters to be added to the output.
""" """
self.f_logger.info(self._create_file_message(msg, **kwargs)) self.f_logger.info(self._create_file_message(msg, **kwargs))
@@ -64,7 +65,7 @@ class Logger:
Args: Args:
msg (:obj:`str`): the message to be logged. msg (:obj:`str`): the message to be logged.
kwargs: a ``key:value`` collection parameters to be added to the output. kwargs (:obj:`dict`): a ``key:value`` collection parameters to be added to the output.
""" """
self.f_logger.debug(self._create_file_message(msg, **kwargs)) self.f_logger.debug(self._create_file_message(msg, **kwargs))
@@ -76,7 +77,7 @@ class Logger:
Args: Args:
msg (:obj:`str`): the message to be logged. msg (:obj:`str`): the message to be logged.
kwargs: a ``key:value`` collection parameters to be added to the output. kwargs (:obj:`dict`): a ``key:value`` collection parameters to be added to the output.
""" """
self.f_logger.error(self._create_file_message(msg, **kwargs)) self.f_logger.error(self._create_file_message(msg, **kwargs))
@@ -88,7 +89,7 @@ class Logger:
Args: Args:
msg (:obj:`str`): the message to be logged. msg (:obj:`str`): the message to be logged.
kwargs: a ``key:value`` collection parameters to be added to the output. kwargs (:obj:`dict`): a ``key:value`` collection parameters to be added to the output.
""" """
self.f_logger.warning(self._create_file_message(msg, **kwargs)) self.f_logger.warning(self._create_file_message(msg, **kwargs))

View File

@@ -1,11 +1,24 @@
import re import re
import os
import logging import logging
from pathlib import Path from pathlib import Path
from common.constants import LOCATOR_LEN_HEX from common.constants import LOCATOR_LEN_HEX
def check_sha256_hex_format(value): def is_compressed_pk(value):
"""
Checks if a given value is a 33-byte hex-encoded string starting by 02 or 03.
Args:
value(:obj:`str`): the value to be checked.
Returns:
:obj:`bool`: Whether or not the value matches the format.
"""
return isinstance(value, str) and re.match(r"^0[2-3][0-9A-Fa-f]{64}$", value) is not None
def is_256b_hex_str(value):
""" """
Checks if a given value is a 32-byte hex encoded string. Checks if a given value is a 32-byte hex encoded string.
@@ -18,7 +31,7 @@ def check_sha256_hex_format(value):
return isinstance(value, str) and re.match(r"^[0-9A-Fa-f]{64}$", value) is not None return isinstance(value, str) and re.match(r"^[0-9A-Fa-f]{64}$", value) is not None
def check_locator_format(value): def is_locator(value):
""" """
Checks if a given value is a 16-byte hex encoded string. Checks if a given value is a 16-byte hex encoded string.
@@ -48,7 +61,7 @@ def setup_data_folder(data_folder):
Create a data folder for either the client or the server side if the folder does not exists. Create a data folder for either the client or the server side if the folder does not exists.
Args: Args:
data_folder (:obj:`str`): the path of the folder data_folder (:obj:`str`): the path of the folder.
""" """
Path(data_folder).mkdir(parents=True, exist_ok=True) Path(data_folder).mkdir(parents=True, exist_ok=True)
@@ -56,9 +69,12 @@ def setup_data_folder(data_folder):
def setup_logging(log_file_path, log_name_prefix): def setup_logging(log_file_path, log_name_prefix):
""" """
Setups a couple of loggers (console and file) given a prefix and a file path. The log names are: Setups a couple of loggers (console and file) given a prefix and a file path.
prefix | _file_log and prefix | _console_log The log names are:
prefix | _file_log
prefix | _console_log
Args: Args:
log_file_path (:obj:`str`): the path of the file to output the file log. log_file_path (:obj:`str`): the path of the file to output the file log.
@@ -67,10 +83,10 @@ def setup_logging(log_file_path, log_name_prefix):
if not isinstance(log_file_path, str): if not isinstance(log_file_path, str):
print(log_file_path) print(log_file_path)
raise ValueError("Wrong log file path.") raise ValueError("Wrong log file path")
if not isinstance(log_name_prefix, str): if not isinstance(log_name_prefix, str):
raise ValueError("Wrong log file name.") raise ValueError("Wrong log file name")
# Create the file logger # Create the file logger
f_logger = logging.getLogger("{}_file_log".format(log_name_prefix)) f_logger = logging.getLogger("{}_file_log".format(log_name_prefix))

View File

@@ -1,4 +1,6 @@
pytest pytest
black black
flake8
responses responses
bitcoind_mock===0.0.4 bitcoind_mock===0.0.4

View File

@@ -1,5 +1,4 @@
import os import os
from teos.utils.auth_proxy import AuthServiceProxy
HOST = "0.0.0.0" HOST = "0.0.0.0"
PORT = 9814 PORT = 9814
@@ -10,17 +9,19 @@ LOG_PREFIX = "teos"
# Default conf fields # Default conf fields
DEFAULT_CONF = { DEFAULT_CONF = {
"BTC_RPC_USER": {"value": "user", "type": str}, "BTC_RPC_USER": {"value": "user", "type": str},
"BTC_RPC_PASSWD": {"value": "passwd", "type": str}, "BTC_RPC_PASSWORD": {"value": "passwd", "type": str},
"BTC_RPC_CONNECT": {"value": "127.0.0.1", "type": str}, "BTC_RPC_CONNECT": {"value": "127.0.0.1", "type": str},
"BTC_RPC_PORT": {"value": 8332, "type": int}, "BTC_RPC_PORT": {"value": 8332, "type": int},
"BTC_NETWORK": {"value": "mainnet", "type": str}, "BTC_NETWORK": {"value": "mainnet", "type": str},
"FEED_PROTOCOL": {"value": "tcp", "type": str}, "FEED_PROTOCOL": {"value": "tcp", "type": str},
"FEED_CONNECT": {"value": "127.0.0.1", "type": str}, "FEED_CONNECT": {"value": "127.0.0.1", "type": str},
"FEED_PORT": {"value": 28332, "type": int}, "FEED_PORT": {"value": 28332, "type": int},
"MAX_APPOINTMENTS": {"value": 100, "type": int}, "MAX_APPOINTMENTS": {"value": 1000000, "type": int},
"DEFAULT_SLOTS": {"value": 100, "type": int},
"EXPIRY_DELTA": {"value": 6, "type": int}, "EXPIRY_DELTA": {"value": 6, "type": int},
"MIN_TO_SELF_DELAY": {"value": 20, "type": int}, "MIN_TO_SELF_DELAY": {"value": 20, "type": int},
"LOG_FILE": {"value": "teos.log", "type": str, "path": True}, "LOG_FILE": {"value": "teos.log", "type": str, "path": True},
"TEOS_SECRET_KEY": {"value": "teos_sk.der", "type": str, "path": True}, "TEOS_SECRET_KEY": {"value": "teos_sk.der", "type": str, "path": True},
"DB_PATH": {"value": "appointments", "type": str, "path": True}, "APPOINTMENTS_DB_PATH": {"value": "appointments", "type": str, "path": True},
"USERS_DB_PATH": {"value": "users", "type": str, "path": True},
} }

View File

@@ -1,13 +1,22 @@
import os import os
import json
import logging import logging
from math import ceil
from flask import Flask, request, abort, jsonify from flask import Flask, request, abort, jsonify
import teos.errors as errors
from teos import HOST, PORT, LOG_PREFIX from teos import HOST, PORT, LOG_PREFIX
from common.logger import Logger from teos.inspector import InspectionFailed
from common.appointment import Appointment from teos.gatekeeper import NotEnoughSlots, IdentificationFailure
from common.constants import HTTP_OK, HTTP_BAD_REQUEST, HTTP_SERVICE_UNAVAILABLE, LOCATOR_LEN_HEX from common.logger import Logger
from common.cryptographer import hash_160
from common.constants import (
HTTP_OK,
HTTP_BAD_REQUEST,
HTTP_SERVICE_UNAVAILABLE,
HTTP_NOT_FOUND,
ENCRYPTED_BLOB_MAX_SIZE_HEX,
)
# ToDo: #5-add-async-to-api # ToDo: #5-add-async-to-api
@@ -15,90 +24,220 @@ app = Flask(__name__)
logger = Logger(actor="API", log_name_prefix=LOG_PREFIX) logger = Logger(actor="API", log_name_prefix=LOG_PREFIX)
# NOTCOVERED: not sure how to monkey path this one. May be related to #77
def get_remote_addr():
"""
Gets the remote client ip address. The HTTP_X_REAL_IP field is tried first in case the server is behind a reverse
proxy.
Returns:
:obj:`str`: the IP address of the client.
"""
# Getting the real IP if the server is behind a reverse proxy
remote_addr = request.environ.get("HTTP_X_REAL_IP")
if not remote_addr:
remote_addr = request.environ.get("REMOTE_ADDR")
return remote_addr
# NOTCOVERED: not sure how to monkey path this one. May be related to #77
def get_request_data_json(request):
"""
Gets the content of a json POST request and makes sure it decodes to a dictionary.
Args:
request (:obj:`Request`): the request sent by the user.
Returns:
:obj:`dict`: the dictionary parsed from the json request.
Raises:
:obj:`TypeError`: if the request is not json encoded or it does not decodes to a dictionary.
"""
if request.is_json:
request_data = request.get_json()
if isinstance(request_data, dict):
return request_data
else:
raise TypeError("Invalid request content")
else:
raise TypeError("Request is not json encoded")
class API: class API:
""" """
The :class:`API` is in charge of the interface between the user and the tower. It handles and server user requests. The :class:`API` is in charge of the interface between the user and the tower. It handles and serves user requests.
Args: Args:
inspector (:obj:`Inspector <teos.inspector.Inspector>`): an ``Inspector`` instance to check the correctness of inspector (:obj:`Inspector <teos.inspector.Inspector>`): an ``Inspector`` instance to check the correctness of
the received data. the received appointment data.
watcher (:obj:`Watcher <teos.watcher.Watcher>`): a ``Watcher`` instance to pass the requests to. watcher (:obj:`Watcher <teos.watcher.Watcher>`): a ``Watcher`` instance to pass the requests to.
gatekeeper (:obj:`Watcher <teos.gatekeeper.Gatekeeper>`): a `Gatekeeper` instance in charge to control the user
access.
""" """
def __init__(self, inspector, watcher): def __init__(self, inspector, watcher, gatekeeper):
self.inspector = inspector self.inspector = inspector
self.watcher = watcher self.watcher = watcher
self.gatekeeper = gatekeeper
self.app = app
# Adds all the routes to the functions listed above.
routes = {
"/register": (self.register, ["POST"]),
"/add_appointment": (self.add_appointment, ["POST"]),
"/get_appointment": (self.get_appointment, ["POST"]),
"/get_all_appointments": (self.get_all_appointments, ["GET"]),
}
for url, params in routes.items():
app.add_url_rule(url, view_func=params[0], methods=params[1])
def register(self):
"""
Registers a user by creating a subscription.
Registration is pretty straightforward for now, since it does not require payments.
The amount of slots cannot be requested by the user yet either. This is linked to the previous point.
Users register by sending a public key to the proper endpoint. This is exploitable atm, but will be solved when
payments are introduced.
Returns:
:obj:`tuple`: A tuple containing the response (:obj:`str`) and response code (:obj:`int`). For accepted
requests, the ``rcode`` is always 200 and the response contains a json with the public key and number of
slots in the subscription. For rejected requests, the ``rcode`` is a 404 and the value contains an
application error, and an error message. Error messages can be found at :mod:`Errors <teos.errors>`.
"""
remote_addr = get_remote_addr()
logger.info("Received register request", from_addr="{}".format(remote_addr))
# Check that data type and content are correct. Abort otherwise.
try:
request_data = get_request_data_json(request)
except TypeError as e:
logger.info("Received invalid register request", from_addr="{}".format(remote_addr))
return abort(HTTP_BAD_REQUEST, e)
client_pk = request_data.get("public_key")
if client_pk:
try:
rcode = HTTP_OK
available_slots = self.gatekeeper.add_update_user(client_pk)
response = {"public_key": client_pk, "available_slots": available_slots}
except ValueError as e:
rcode = HTTP_BAD_REQUEST
error = "Error {}: {}".format(errors.REGISTRATION_MISSING_FIELD, str(e))
response = {"error": error}
else:
rcode = HTTP_BAD_REQUEST
error = "Error {}: public_key not found in register message".format(errors.REGISTRATION_WRONG_FIELD_FORMAT)
response = {"error": error}
logger.info("Sending response and disconnecting", from_addr="{}".format(remote_addr), response=response)
return jsonify(response), rcode
def add_appointment(self): def add_appointment(self):
""" """
Main endpoint of the Watchtower. Main endpoint of the Watchtower.
The client sends requests (appointments) to this endpoint to request a job to the Watchtower. Requests must be The client sends requests (appointments) to this endpoint to request a job to the Watchtower. Requests must be
json encoded and contain an ``appointment`` field and optionally a ``signature`` and ``public_key`` fields. json encoded and contain an ``appointment`` and ``signature`` fields.
Returns: Returns:
:obj:`tuple`: A tuple containing the response (``json``) and response code (``int``). For accepted :obj:`tuple`: A tuple containing the response (:obj:`str`) and response code (:obj:`int`). For accepted
appointments, the ``rcode`` is always 0 and the response contains the signed receipt. For rejected appointments, the ``rcode`` is always 200 and the response contains the receipt signature (json). For
appointments, the ``rcode`` is a negative value and the response contains the error message. Error messages rejected appointments, the ``rcode`` is a 404 and the value contains an application error, and an error
can be found at :mod:`Errors <teos.errors>`. message. Error messages can be found at :mod:`Errors <teos.errors>`.
""" """
# Getting the real IP if the server is behind a reverse proxy # Getting the real IP if the server is behind a reverse proxy
remote_addr = request.environ.get("HTTP_X_REAL_IP") remote_addr = get_remote_addr()
if not remote_addr:
remote_addr = request.environ.get("REMOTE_ADDR")
logger.info("Received add_appointment request", from_addr="{}".format(remote_addr)) logger.info("Received add_appointment request", from_addr="{}".format(remote_addr))
# FIXME: Logging every request so we can get better understanding of bugs in the alpha # Check that data type and content are correct. Abort otherwise.
logger.debug("Request details", data="{}".format(request.data)) try:
request_data = get_request_data_json(request)
if request.is_json: except TypeError as e:
# Check content type once if properly defined return abort(HTTP_BAD_REQUEST, e)
request_data = json.loads(request.get_json())
appointment = self.inspector.inspect(
request_data.get("appointment"), request_data.get("signature"), request_data.get("public_key")
)
error = None # We kind of have the chicken an the egg problem here. Data must be verified and the signature must be checked:
response = None # - If we verify the data first, we may encounter that the signature is wrong and wasted some time.
# - If we check the signature first, we may need to verify some of the information or expose to build
# appointments with potentially wrong data, which may be exploitable.
#
# The first approach seems safer since it only implies a bunch of pretty quick checks.
if type(appointment) == Appointment: try:
appointment_added, signature = self.watcher.add_appointment(appointment) appointment = self.inspector.inspect(request_data.get("appointment"))
user_pk = self.gatekeeper.identify_user(appointment.serialize(), request_data.get("signature"))
if appointment_added: # Check if the appointment is an update. Updates will return a summary.
rcode = HTTP_OK appointment_uuid = hash_160("{}{}".format(appointment.locator, user_pk))
response = {"locator": appointment.locator, "signature": signature} appointment_summary = self.watcher.get_appointment_summary(appointment_uuid)
else: if appointment_summary:
rcode = HTTP_SERVICE_UNAVAILABLE used_slots = ceil(appointment_summary.get("size") / ENCRYPTED_BLOB_MAX_SIZE_HEX)
error = "appointment rejected" required_slots = ceil(len(appointment.encrypted_blob.data) / ENCRYPTED_BLOB_MAX_SIZE_HEX)
slot_diff = required_slots - used_slots
elif type(appointment) == tuple: # For updates we only reserve the slot difference provided the new one is bigger.
rcode = HTTP_BAD_REQUEST required_slots = slot_diff if slot_diff > 0 else 0
error = "appointment rejected. Error {}: {}".format(appointment[0], appointment[1])
else: else:
# We should never end up here, since inspect only returns appointments or tuples. Just in case. # For regular appointments 1 slot is reserved per ENCRYPTED_BLOB_MAX_SIZE_HEX block.
rcode = HTTP_BAD_REQUEST slot_diff = 0
error = "appointment rejected. Request does not match the standard" required_slots = ceil(len(appointment.encrypted_blob.data) / ENCRYPTED_BLOB_MAX_SIZE_HEX)
else: # Slots are reserved before adding the appointments to prevent race conditions.
# DISCUSS: It may be worth using signals here to avoid race conditions anyway.
self.gatekeeper.fill_slots(user_pk, required_slots)
appointment_added, signature = self.watcher.add_appointment(appointment, user_pk)
if appointment_added:
# If the appointment is added and the update is smaller than the original, the difference is given back.
if slot_diff < 0:
self.gatekeeper.free_slots(user_pk, abs(slot_diff))
rcode = HTTP_OK
response = {
"locator": appointment.locator,
"signature": signature,
"available_slots": self.gatekeeper.registered_users[user_pk].get("available_slots"),
}
else:
# If the appointment is not added the reserved slots are given back
self.gatekeeper.free_slots(user_pk, required_slots)
rcode = HTTP_SERVICE_UNAVAILABLE
response = {"error": "appointment rejected"}
except InspectionFailed as e:
rcode = HTTP_BAD_REQUEST rcode = HTTP_BAD_REQUEST
error = "appointment rejected. Request is not json encoded" error = "appointment rejected. Error {}: {}".format(e.erno, e.reason)
response = None response = {"error": error}
logger.info( except (IdentificationFailure, NotEnoughSlots):
"Sending response and disconnecting", from_addr="{}".format(remote_addr), response=response, error=error rcode = HTTP_BAD_REQUEST
) error = "appointment rejected. Error {}: {}".format(
errors.APPOINTMENT_INVALID_SIGNATURE_OR_INSUFFICIENT_SLOTS,
"Invalid signature or user does not have enough slots available",
)
response = {"error": error}
if error is None: logger.info("Sending response and disconnecting", from_addr="{}".format(remote_addr), response=response)
return jsonify(response), rcode return jsonify(response), rcode
else:
return jsonify({"error": error}), rcode
# FIXME: THE NEXT TWO API ENDPOINTS ARE FOR TESTING AND SHOULD BE REMOVED / PROPERLY MANAGED BEFORE PRODUCTION!
# ToDo: #17-add-api-keys
def get_appointment(self): def get_appointment(self):
""" """
Gives information about a given appointment state in the Watchtower. Gives information about a given appointment state in the Watchtower.
@@ -106,7 +245,9 @@ class API:
The information is requested by ``locator``. The information is requested by ``locator``.
Returns: Returns:
:obj:`dict`: A json formatted dictionary containing information about the requested appointment. :obj:`str`: A json formatted dictionary containing information about the requested appointment.
Returns not found if the user does not have the requested appointment or the locator is invalid.
A ``status`` flag is added to the data provided by either the :obj:`Watcher <teos.watcher.Watcher>` or the A ``status`` flag is added to the data provided by either the :obj:`Watcher <teos.watcher.Watcher>` or the
:obj:`Responder <teos.responder.Responder>` that signals the status of the appointment. :obj:`Responder <teos.responder.Responder>` that signals the status of the appointment.
@@ -117,44 +258,54 @@ class API:
""" """
# Getting the real IP if the server is behind a reverse proxy # Getting the real IP if the server is behind a reverse proxy
remote_addr = request.environ.get("HTTP_X_REAL_IP") remote_addr = get_remote_addr()
if not remote_addr:
remote_addr = request.environ.get("REMOTE_ADDR")
locator = request.args.get("locator") # Check that data type and content are correct. Abort otherwise.
response = [] try:
request_data = get_request_data_json(request)
logger.info("Received get_appointment request", from_addr="{}".format(remote_addr), locator=locator) except TypeError as e:
logger.info("Received invalid get_appointment request", from_addr="{}".format(remote_addr))
return abort(HTTP_BAD_REQUEST, e)
# ToDo: #15-add-system-monitor locator = request_data.get("locator")
if not isinstance(locator, str) or len(locator) != LOCATOR_LEN_HEX:
response.append({"locator": locator, "status": "not_found"})
return jsonify(response)
locator_map = self.watcher.db_manager.load_locator_map(locator) try:
triggered_appointments = self.watcher.db_manager.load_all_triggered_flags() self.inspector.check_locator(locator)
logger.info("Received get_appointment request", from_addr="{}".format(remote_addr), locator=locator)
if locator_map is not None: message = "get appointment {}".format(locator).encode()
for uuid in locator_map: signature = request_data.get("signature")
if uuid not in triggered_appointments: user_pk = self.gatekeeper.identify_user(message, signature)
appointment_data = self.watcher.db_manager.load_watcher_appointment(uuid)
if appointment_data is not None: triggered_appointments = self.watcher.db_manager.load_all_triggered_flags()
appointment_data["status"] = "being_watched" uuid = hash_160("{}{}".format(locator, user_pk))
response.append(appointment_data)
tracker_data = self.watcher.db_manager.load_responder_tracker(uuid) # If the appointment has been triggered, it should be in the locator (default else just in case).
if uuid in triggered_appointments:
appointment_data = self.watcher.db_manager.load_responder_tracker(uuid)
if appointment_data:
rcode = HTTP_OK
response = {"locator": locator, "status": "dispute_responded", "appointment": appointment_data}
else:
rcode = HTTP_NOT_FOUND
response = {"locator": locator, "status": "not_found"}
if tracker_data is not None: # Otherwise it should be either in the watcher, or not in the system.
tracker_data["status"] = "dispute_responded" else:
response.append(tracker_data) appointment_data = self.watcher.db_manager.load_watcher_appointment(uuid)
if appointment_data:
rcode = HTTP_OK
response = {"locator": locator, "status": "being_watched", "appointment": appointment_data}
else:
rcode = HTTP_NOT_FOUND
response = {"locator": locator, "status": "not_found"}
else: except (InspectionFailed, IdentificationFailure):
response.append({"locator": locator, "status": "not_found"}) rcode = HTTP_NOT_FOUND
response = {"locator": locator, "status": "not_found"}
response = jsonify(response) return jsonify(response), rcode
return response
def get_all_appointments(self): def get_all_appointments(self):
""" """
@@ -163,10 +314,8 @@ class API:
This endpoint should only be accessible by the administrator. Requests are only allowed from localhost. This endpoint should only be accessible by the administrator. Requests are only allowed from localhost.
Returns: Returns:
:obj:`dict`: A json formatted dictionary containing all the appointments hold by the :obj:`str`: A json formatted dictionary containing all the appointments hold by the ``Watcher``
:obj:`Watcher <teos.watcher.Watcher>` (``watcher_appointments``) and by the (``watcher_appointments``) and by the ``Responder>`` (``responder_trackers``).
:obj:`Responder <teos.responder.Responder>` (``responder_trackers``).
""" """
# ToDo: #15-add-system-monitor # ToDo: #15-add-system-monitor
@@ -185,19 +334,10 @@ class API:
def start(self): def start(self):
""" """
This function starts the Flask server used to run the API. Adds all the routes to the functions listed above. This function starts the Flask server used to run the API.
""" """
routes = { # Setting Flask log to ERROR only so it does not mess with our logging. Also disabling flask initial messages
"/": (self.add_appointment, ["POST"]),
"/get_appointment": (self.get_appointment, ["GET"]),
"/get_all_appointments": (self.get_all_appointments, ["GET"]),
}
for url, params in routes.items():
app.add_url_rule(url, view_func=params[0], methods=params[1])
# Setting Flask log to ERROR only so it does not mess with out logging. Also disabling flask initial messages
logging.getLogger("werkzeug").setLevel(logging.ERROR) logging.getLogger("werkzeug").setLevel(logging.ERROR)
os.environ["WERKZEUG_RUN_MAIN"] = "true" os.environ["WERKZEUG_RUN_MAIN"] = "true"

508
teos/appointments_dbm.py Normal file
View File

@@ -0,0 +1,508 @@
import json
import plyvel
from teos.db_manager import DBManager
from teos import LOG_PREFIX
from common.logger import Logger
logger = Logger(actor="AppointmentsDBM", log_name_prefix=LOG_PREFIX)
WATCHER_PREFIX = "w"
WATCHER_LAST_BLOCK_KEY = "bw"
RESPONDER_PREFIX = "r"
RESPONDER_LAST_BLOCK_KEY = "br"
LOCATOR_MAP_PREFIX = "m"
TRIGGERED_APPOINTMENTS_PREFIX = "ta"
class AppointmentsDBM(DBManager):
"""
The :class:`AppointmentsDBM` is in charge of interacting with the appointments database (``LevelDB``).
Keys and values are stored as bytes in the database but processed as strings by the manager.
The database is split in six prefixes:
- ``WATCHER_PREFIX``, defined as ``b'w``, is used to store :obj:`Watcher <teos.watcher.Watcher>` appointments.
- ``RESPONDER_PREFIX``, defines as ``b'r``, is used to store :obj:`Responder <teos.responder.Responder>` trackers.
- ``WATCHER_LAST_BLOCK_KEY``, defined as ``b'bw``, is used to store the last block hash known by the :obj:`Watcher <teos.watcher.Watcher>`.
- ``RESPONDER_LAST_BLOCK_KEY``, defined as ``b'br``, is used to store the last block hash known by the :obj:`Responder <teos.responder.Responder>`.
- ``LOCATOR_MAP_PREFIX``, defined as ``b'm``, is used to store the ``locator:uuid`` maps.
- ``TRIGGERED_APPOINTMENTS_PREFIX``, defined as ``b'ta``, is used to stored triggered appointments (appointments that have been handed to the :obj:`Responder <teos.responder.Responder>`.)
Args:
db_path (:obj:`str`): the path (relative or absolute) to the system folder containing the database. A fresh
database will be created if the specified path does not contain one.
Raises:
:obj:`ValueError`: If the provided ``db_path`` is not a string.
:obj:`plyvel.Error`: If the db is currently unavailable (being used by another process).
"""
def __init__(self, db_path):
if not isinstance(db_path, str):
raise ValueError("db_path must be a valid path/name")
try:
super().__init__(db_path)
except plyvel.Error as e:
if "LOCK: Resource temporarily unavailable" in str(e):
logger.info("The db is already being used by another process (LOCK)")
raise e
def load_appointments_db(self, prefix):
"""
Loads all data from the appointments database given a prefix. Two prefixes are defined: ``WATCHER_PREFIX`` and
``RESPONDER_PREFIX``.
Args:
prefix (:obj:`str`): the prefix of the data to load.
Returns:
:obj:`dict`: A dictionary containing the requested data (appointments or trackers) indexed by ``uuid``.
Returns an empty dictionary if no data is found.
"""
data = {}
for k, v in self.db.iterator(prefix=prefix.encode("utf-8")):
# Get uuid and appointment_data from the db
uuid = k[len(prefix) :].decode("utf-8")
data[uuid] = json.loads(v)
return data
def get_last_known_block(self, key):
"""
Loads the last known block given a key.
Args:
key (:obj:`str`): the identifier of the db to look into (either ``WATCHER_LAST_BLOCK_KEY`` or
``RESPONDER_LAST_BLOCK_KEY``).
Returns:
:obj:`str` or :obj:`None`: A 16-byte hex-encoded str representing the last known block hash.
Returns ``None`` if the entry is not found.
"""
last_block = self.db.get(key.encode("utf-8"))
if last_block:
last_block = last_block.decode("utf-8")
return last_block
def load_watcher_appointment(self, uuid):
"""
Loads an appointment from the database using ``WATCHER_PREFIX`` as prefix to the given ``uuid``.
Args:
uuid (:obj:`str`): the appointment's unique identifier.
Returns:
:obj:`dict`: A dictionary containing the appointment data if they ``key`` is found.
Returns ``None`` otherwise.
"""
try:
data = self.load_entry(uuid, prefix=WATCHER_PREFIX)
data = json.loads(data)
except (TypeError, json.decoder.JSONDecodeError):
data = None
return data
def load_responder_tracker(self, uuid):
"""
Loads a tracker from the database using ``RESPONDER_PREFIX`` as a prefix to the given ``uuid``.
Args:
uuid (:obj:`str`): the tracker's unique identifier.
Returns:
:obj:`dict`: A dictionary containing the tracker data if they ``key`` is found.
Returns ``None`` otherwise.
"""
try:
data = self.load_entry(uuid, prefix=RESPONDER_PREFIX)
data = json.loads(data)
except (TypeError, json.decoder.JSONDecodeError):
data = None
return data
def load_watcher_appointments(self, include_triggered=False):
"""
Loads all the appointments from the database (all entries with the ``WATCHER_PREFIX`` prefix).
Args:
include_triggered (:obj:`bool`): whether to include the appointments flagged as triggered or not. ``False``
by default.
Returns:
:obj:`dict`: A dictionary with all the appointments stored in the database. An empty dictionary if there
are none.
"""
appointments = self.load_appointments_db(prefix=WATCHER_PREFIX)
triggered_appointments = self.load_all_triggered_flags()
if not include_triggered:
not_triggered = list(set(appointments.keys()).difference(triggered_appointments))
appointments = {uuid: appointments[uuid] for uuid in not_triggered}
return appointments
def load_responder_trackers(self):
"""
Loads all the trackers from the database (all entries with the ``RESPONDER_PREFIX`` prefix).
Returns:
:obj:`dict`: A dictionary with all the trackers stored in the database. An empty dictionary is there are
none.
"""
return self.load_appointments_db(prefix=RESPONDER_PREFIX)
def store_watcher_appointment(self, uuid, appointment):
"""
Stores an appointment in the database using the ``WATCHER_PREFIX`` prefix.
Args:
uuid (:obj:`str`): the identifier of the appointment to be stored.
appointment (:obj:`dict`): an appointment encoded as dictionary.
Returns:
:obj:`bool`: True if the appointment was stored in the db. False otherwise.
"""
try:
self.create_entry(uuid, json.dumps(appointment), prefix=WATCHER_PREFIX)
logger.info("Adding appointment to Watchers's db", uuid=uuid)
return True
except json.JSONDecodeError:
logger.info("Could't add appointment to db. Wrong appointment format.", uuid=uuid, appoinent=appointment)
return False
except TypeError:
logger.info("Could't add appointment to db.", uuid=uuid, appoinent=appointment)
return False
def store_responder_tracker(self, uuid, tracker):
"""
Stores a tracker in the database using the ``RESPONDER_PREFIX`` prefix.
Args:
uuid (:obj:`str`): the identifier of the appointment to be stored.
tracker (:obj:`dict`): a tracker encoded as dictionary.
Returns:
:obj:`bool`: True if the tracker was stored in the db. False otherwise.
"""
try:
self.create_entry(uuid, json.dumps(tracker), prefix=RESPONDER_PREFIX)
logger.info("Adding tracker to Responder's db", uuid=uuid)
return True
except json.JSONDecodeError:
logger.info("Could't add tracker to db. Wrong tracker format.", uuid=uuid, tracker=tracker)
return False
except TypeError:
logger.info("Could't add tracker to db.", uuid=uuid, tracker=tracker)
return False
def load_locator_map(self, locator):
"""
Loads the ``locator:uuid`` map of a given ``locator`` from the database.
Args:
locator (:obj:`str`): a 16-byte hex-encoded string representing the appointment locator.
Returns:
:obj:`dict` or :obj:`None`: The requested ``locator:uuid`` map if found.
Returns ``None`` otherwise.
"""
key = (LOCATOR_MAP_PREFIX + locator).encode("utf-8")
locator_map = self.db.get(key)
if locator_map is not None:
locator_map = json.loads(locator_map.decode("utf-8"))
else:
logger.info("Locator not found in the db", locator=locator)
return locator_map
def create_append_locator_map(self, locator, uuid):
"""
Creates (or appends to if already exists) a ``locator:uuid`` map.
If the map already exists, the new ``uuid`` is appended to the existing ones (if it is not already there).
Args:
locator (:obj:`str`): a 16-byte hex-encoded string used as the key of the map.
uuid (:obj:`str`): a 16-byte hex-encoded unique id to create (or add to) the map.
"""
locator_map = self.load_locator_map(locator)
if locator_map is not None:
if uuid not in locator_map:
locator_map.append(uuid)
logger.info("Updating locator map", locator=locator, uuid=uuid)
else:
logger.info("UUID already in the map", locator=locator, uuid=uuid)
else:
locator_map = [uuid]
logger.info("Creating new locator map", locator=locator, uuid=uuid)
key = (LOCATOR_MAP_PREFIX + locator).encode("utf-8")
self.db.put(key, json.dumps(locator_map).encode("utf-8"))
def update_locator_map(self, locator, locator_map):
"""
Updates a ``locator:uuid`` map in the database by deleting one of it's uuid. It will only work as long as
the given ``locator_map`` is a subset of the current one and it's not empty.
Args:
locator (:obj:`str`): a 16-byte hex-encoded string used as the key of the map.
locator_map (:obj:`list`): a list of uuids to replace the current one on the db.
"""
current_locator_map = self.load_locator_map(locator)
if set(locator_map).issubset(current_locator_map) and len(locator_map) != 0:
key = (LOCATOR_MAP_PREFIX + locator).encode("utf-8")
self.db.put(key, json.dumps(locator_map).encode("utf-8"))
else:
logger.error("Trying to update a locator_map with completely different, or empty, data")
def delete_locator_map(self, locator):
"""
Deletes a ``locator:uuid`` map.
Args:
locator (:obj:`str`): a 16-byte hex-encoded string identifying the map to delete.
Returns:
:obj:`bool`: True if the locator map was deleted from the database or it was non-existent, False otherwise.
"""
try:
self.delete_entry(locator, prefix=LOCATOR_MAP_PREFIX)
logger.info("Deleting locator map from db", locator=locator)
return True
except TypeError:
logger.info("Couldn't delete locator map from db, locator has wrong type", locator=locator)
return False
def delete_watcher_appointment(self, uuid):
"""
Deletes an appointment from the database.
Args:
uuid (:obj:`str`): a 16-byte hex-encoded string identifying the appointment to be deleted.
Returns:
:obj:`bool`: True if the appointment was deleted from the database or it was non-existent, False otherwise.
"""
try:
self.delete_entry(uuid, prefix=WATCHER_PREFIX)
logger.info("Deleting appointment from Watcher's db", uuid=uuid)
return True
except TypeError:
logger.info("Couldn't delete appointment from db, uuid has wrong type", uuid=uuid)
return False
def batch_delete_watcher_appointments(self, uuids):
"""
Deletes an appointment from the database.
Args:
uuids (:obj:`list`): a list of 16-byte hex-encoded strings identifying the appointments to be deleted.
"""
with self.db.write_batch() as b:
for uuid in uuids:
b.delete((WATCHER_PREFIX + uuid).encode("utf-8"))
logger.info("Deleting appointment from Watcher's db", uuid=uuid)
def delete_responder_tracker(self, uuid):
"""
Deletes a tracker from the database.
Args:
uuid (:obj:`str`): a 16-byte hex-encoded string identifying the tracker to be deleted.
Returns:
:obj:`bool`: True if the tracker was deleted from the database or it was non-existent, False otherwise.
"""
try:
self.delete_entry(uuid, prefix=RESPONDER_PREFIX)
logger.info("Deleting tracker from Responder's db", uuid=uuid)
return True
except TypeError:
logger.info("Couldn't delete tracker from db, uuid has wrong type", uuid=uuid)
return False
def batch_delete_responder_trackers(self, uuids):
"""
Deletes an appointment from the database.
Args:
uuids (:obj:`list`): a list of 16-byte hex-encoded strings identifying the trackers to be deleted.
"""
with self.db.write_batch() as b:
for uuid in uuids:
b.delete((RESPONDER_PREFIX + uuid).encode("utf-8"))
logger.info("Deleting appointment from Responder's db", uuid=uuid)
def load_last_block_hash_watcher(self):
"""
Loads the last known block hash of the :obj:`Watcher <teos.watcher.Watcher>` from the database.
Returns:
:obj:`str` or :obj:`None`: A 32-byte hex-encoded string representing the last known block hash if found.
Returns ``None`` otherwise.
"""
return self.get_last_known_block(WATCHER_LAST_BLOCK_KEY)
def load_last_block_hash_responder(self):
"""
Loads the last known block hash of the :obj:`Responder <teos.responder.Responder>` from the database.
Returns:
:obj:`str` or :obj:`None`: A 32-byte hex-encoded string representing the last known block hash if found.
Returns ``None`` otherwise.
"""
return self.get_last_known_block(RESPONDER_LAST_BLOCK_KEY)
def store_last_block_hash_watcher(self, block_hash):
"""
Stores a block hash as the last known block of the :obj:`Watcher <teos.watcher.Watcher>`.
Args:
block_hash (:obj:`str`): the block hash to be stored (32-byte hex-encoded)
Returns:
:obj:`bool`: True if the block hash was stored in the db. False otherwise.
"""
try:
self.create_entry(WATCHER_LAST_BLOCK_KEY, block_hash)
return True
except (TypeError, json.JSONDecodeError):
return False
def store_last_block_hash_responder(self, block_hash):
"""
Stores a block hash as the last known block of the :obj:`Responder <teos.responder.Responder>`.
Args:
block_hash (:obj:`str`): the block hash to be stored (32-byte hex-encoded)
Returns:
:obj:`bool`: True if the block hash was stored in the db. False otherwise.
"""
try:
self.create_entry(RESPONDER_LAST_BLOCK_KEY, block_hash)
return True
except (TypeError, json.JSONDecodeError):
return False
def create_triggered_appointment_flag(self, uuid):
"""
Creates a flag that signals that an appointment has been triggered.
Args:
uuid (:obj:`str`): the identifier of the flag to be created.
"""
self.db.put((TRIGGERED_APPOINTMENTS_PREFIX + uuid).encode("utf-8"), "".encode("utf-8"))
logger.info("Flagging appointment as triggered", uuid=uuid)
def batch_create_triggered_appointment_flag(self, uuids):
"""
Creates a flag that signals that an appointment has been triggered for every appointment in the given list
Args:
uuids (:obj:`list`): a list of identifiers for the appointments to flag.
"""
with self.db.write_batch() as b:
for uuid in uuids:
b.put((TRIGGERED_APPOINTMENTS_PREFIX + uuid).encode("utf-8"), b"")
logger.info("Flagging appointment as triggered", uuid=uuid)
def load_all_triggered_flags(self):
"""
Loads all the appointment triggered flags from the database.
Returns:
:obj:`list`: a list of all the uuids of the triggered appointments.
"""
return [
k.decode()[len(TRIGGERED_APPOINTMENTS_PREFIX) :]
for k, v in self.db.iterator(prefix=TRIGGERED_APPOINTMENTS_PREFIX.encode("utf-8"))
]
def delete_triggered_appointment_flag(self, uuid):
"""
Deletes a flag that signals that an appointment has been triggered.
Args:
uuid (:obj:`str`): the identifier of the flag to be removed.
Returns:
:obj:`bool`: True if the flag was deleted from the database or it was non-existent, False otherwise.
"""
try:
self.delete_entry(uuid, prefix=TRIGGERED_APPOINTMENTS_PREFIX)
logger.info("Removing triggered flag from appointment appointment", uuid=uuid)
return True
except TypeError:
logger.info("Couldn't delete triggered flag from db, uuid has wrong type", uuid=uuid)
return False
def batch_delete_triggered_appointment_flag(self, uuids):
"""
Deletes a list of flag signaling that some appointment have been triggered.
Args:
uuids (:obj:`list`): the identifier of the flag to be removed.
"""
with self.db.write_batch() as b:
for uuid in uuids:
b.delete((TRIGGERED_APPOINTMENTS_PREFIX + uuid).encode("utf-8"))
logger.info("Removing triggered flag from appointment appointment", uuid=uuid)

View File

@@ -26,7 +26,11 @@ class Builder:
locator_uuid_map = {} locator_uuid_map = {}
for uuid, data in appointments_data.items(): for uuid, data in appointments_data.items():
appointments[uuid] = {"locator": data.get("locator"), "end_time": data.get("end_time")} appointments[uuid] = {
"locator": data.get("locator"),
"end_time": data.get("end_time"),
"size": len(data.get("encrypted_blob")),
}
if data.get("locator") in locator_uuid_map: if data.get("locator") in locator_uuid_map:
locator_uuid_map[data.get("locator")].append(uuid) locator_uuid_map[data.get("locator")].append(uuid)
@@ -94,8 +98,9 @@ class Builder:
@staticmethod @staticmethod
def update_states(watcher, missed_blocks_watcher, missed_blocks_responder): def update_states(watcher, missed_blocks_watcher, missed_blocks_responder):
""" """
Updates the states of both the :mod:`Watcher <teos.watcher.Watcher>` and the :mod:`Responder <teos.responder.Responder>`. Updates the states of both the :mod:`Watcher <teos.watcher.Watcher>` and the
If both have pending blocks to process they need to be updates at the same time, block by block. :mod:`Responder <teos.responder.Responder>`. If both have pending blocks to process they need to be updated at
the same time, block by block.
If only one instance has to be updated, ``populate_block_queue`` should be used. If only one instance has to be updated, ``populate_block_queue`` should be used.

View File

@@ -1,7 +1,7 @@
from teos import LOG_PREFIX from teos import LOG_PREFIX
from teos.rpc_errors import *
from common.logger import Logger from common.logger import Logger
from teos.tools import bitcoin_cli from teos.tools import bitcoin_cli
import teos.rpc_errors as rpc_errors
from teos.utils.auth_proxy import JSONRPCException from teos.utils.auth_proxy import JSONRPCException
from teos.errors import UNKNOWN_JSON_RPC_EXCEPTION, RPC_TX_REORGED_AFTER_BROADCAST from teos.errors import UNKNOWN_JSON_RPC_EXCEPTION, RPC_TX_REORGED_AFTER_BROADCAST
@@ -36,12 +36,12 @@ class Receipt:
class Carrier: class Carrier:
""" """
The :class:`Carrier` is the class in charge of interacting with ``bitcoind`` to send/get transactions. It uses The :class:`Carrier` is in charge of interacting with ``bitcoind`` to send/get transactions. It uses :obj:`Receipt`
:obj:`Receipt` objects to report about the sending outcome. objects to report about the sending outcome.
Args: Args:
btc_connect_params (:obj:`dict`): a dictionary with the parameters to connect to bitcoind btc_connect_params (:obj:`dict`): a dictionary with the parameters to connect to bitcoind
(rpc user, rpc passwd, host and port) (rpc user, rpc password, host and port)
Attributes: Attributes:
issued_receipts (:obj:`dict`): a dictionary of issued receipts to prevent resending the same transaction over issued_receipts (:obj:`dict`): a dictionary of issued receipts to prevent resending the same transaction over
@@ -81,17 +81,17 @@ class Carrier:
except JSONRPCException as e: except JSONRPCException as e:
errno = e.error.get("code") errno = e.error.get("code")
# Since we're pushing a raw transaction to the network we can face several rejections # Since we're pushing a raw transaction to the network we can face several rejections
if errno == RPC_VERIFY_REJECTED: if errno == rpc_errors.RPC_VERIFY_REJECTED:
# DISCUSS: 37-transaction-rejection # DISCUSS: 37-transaction-rejection
receipt = Receipt(delivered=False, reason=RPC_VERIFY_REJECTED) receipt = Receipt(delivered=False, reason=rpc_errors.RPC_VERIFY_REJECTED)
logger.error("Transaction couldn't be broadcast", error=e.error) logger.error("Transaction couldn't be broadcast", error=e.error)
elif errno == RPC_VERIFY_ERROR: elif errno == rpc_errors.RPC_VERIFY_ERROR:
# DISCUSS: 37-transaction-rejection # DISCUSS: 37-transaction-rejection
receipt = Receipt(delivered=False, reason=RPC_VERIFY_ERROR) receipt = Receipt(delivered=False, reason=rpc_errors.RPC_VERIFY_ERROR)
logger.error("Transaction couldn't be broadcast", error=e.error) logger.error("Transaction couldn't be broadcast", error=e.error)
elif errno == RPC_VERIFY_ALREADY_IN_CHAIN: elif errno == rpc_errors.RPC_VERIFY_ALREADY_IN_CHAIN:
logger.info("Transaction is already in the blockchain. Getting confirmation count", txid=txid) logger.info("Transaction is already in the blockchain. Getting confirmation count", txid=txid)
# If the transaction is already in the chain, we get the number of confirmations and watch the tracker # If the transaction is already in the chain, we get the number of confirmations and watch the tracker
@@ -100,7 +100,9 @@ class Carrier:
if tx_info is not None: if tx_info is not None:
confirmations = int(tx_info.get("confirmations")) confirmations = int(tx_info.get("confirmations"))
receipt = Receipt(delivered=True, confirmations=confirmations, reason=RPC_VERIFY_ALREADY_IN_CHAIN) receipt = Receipt(
delivered=True, confirmations=confirmations, reason=rpc_errors.RPC_VERIFY_ALREADY_IN_CHAIN
)
else: else:
# There's a really unlikely edge case where a transaction can be reorged between receiving the # There's a really unlikely edge case where a transaction can be reorged between receiving the
@@ -108,12 +110,12 @@ class Carrier:
# mempool, which again is really unlikely. # mempool, which again is really unlikely.
receipt = Receipt(delivered=False, reason=RPC_TX_REORGED_AFTER_BROADCAST) receipt = Receipt(delivered=False, reason=RPC_TX_REORGED_AFTER_BROADCAST)
elif errno == RPC_DESERIALIZATION_ERROR: elif errno == rpc_errors.RPC_DESERIALIZATION_ERROR:
# Adding this here just for completeness. We should never end up here. The Carrier only sends txs # Adding this here just for completeness. We should never end up here. The Carrier only sends txs
# handed by the Responder, who receives them from the Watcher, who checks that the tx can be properly # handed by the Responder, who receives them from the Watcher, who checks that the tx can be properly
# deserialized # deserialized
logger.info("Transaction cannot be deserialized".format(txid)) logger.info("Transaction cannot be deserialized".format(txid))
receipt = Receipt(delivered=False, reason=RPC_DESERIALIZATION_ERROR) receipt = Receipt(delivered=False, reason=rpc_errors.RPC_DESERIALIZATION_ERROR)
else: else:
# If something else happens (unlikely but possible) log it so we can treat it in future releases # If something else happens (unlikely but possible) log it so we can treat it in future releases
@@ -133,23 +135,22 @@ class Carrier:
Returns: Returns:
:obj:`dict` or :obj:`None`: A dictionary with the transaction data if the transaction can be found on the :obj:`dict` or :obj:`None`: A dictionary with the transaction data if the transaction can be found on the
chain. chain. ``None`` otherwise.
Returns ``None`` otherwise.
""" """
try: try:
tx_info = bitcoin_cli(self.btc_connect_params).getrawtransaction(txid, 1) tx_info = bitcoin_cli(self.btc_connect_params).getrawtransaction(txid, 1)
return tx_info
except JSONRPCException as e: except JSONRPCException as e:
tx_info = None
# While it's quite unlikely, the transaction that was already in the blockchain could have been # While it's quite unlikely, the transaction that was already in the blockchain could have been
# reorged while we were querying bitcoind to get the confirmation count. In such a case we just # reorged while we were querying bitcoind to get the confirmation count. In that case we just restart
# restart the tracker # the tracker
if e.error.get("code") == RPC_INVALID_ADDRESS_OR_KEY: if e.error.get("code") == rpc_errors.RPC_INVALID_ADDRESS_OR_KEY:
logger.info("Transaction not found in mempool nor blockchain", txid=txid) logger.info("Transaction not found in mempool nor blockchain", txid=txid)
else: else:
# If something else happens (unlikely but possible) log it so we can treat it in future releases # If something else happens (unlikely but possible) log it so we can treat it in future releases
logger.error("JSONRPCException", method="Carrier.get_transaction", error=e.error) logger.error("JSONRPCException", method="Carrier.get_transaction", error=e.error)
return tx_info return None

View File

@@ -10,8 +10,8 @@ logger = Logger(actor="ChainMonitor", log_name_prefix=LOG_PREFIX)
class ChainMonitor: class ChainMonitor:
""" """
The :class:`ChainMonitor` is the class in charge of monitoring the blockchain (via ``bitcoind``) to detect new The :class:`ChainMonitor` is in charge of monitoring the blockchain (via ``bitcoind``) to detect new blocks on top
blocks on top of the best chain. If a new best block is spotted, the chain monitor will notify the of the best chain. If a new best block is spotted, the chain monitor will notify the
:obj:`Watcher <teos.watcher.Watcher>` and the :obj:`Responder <teos.responder.Responder>` using ``Queues``. :obj:`Watcher <teos.watcher.Watcher>` and the :obj:`Responder <teos.responder.Responder>` using ``Queues``.
The :class:`ChainMonitor` monitors the chain using two methods: ``zmq`` and ``polling``. Blocks are only notified The :class:`ChainMonitor` monitors the chain using two methods: ``zmq`` and ``polling``. Blocks are only notified
@@ -34,7 +34,6 @@ class ChainMonitor:
watcher_queue (:obj:`Queue`): a queue to send new best tips to the :obj:`Watcher <teos.watcher.Watcher>`. watcher_queue (:obj:`Queue`): a queue to send new best tips to the :obj:`Watcher <teos.watcher.Watcher>`.
responder_queue (:obj:`Queue`): a queue to send new best tips to the responder_queue (:obj:`Queue`): a queue to send new best tips to the
:obj:`Responder <teos.responder.Responder>`. :obj:`Responder <teos.responder.Responder>`.
polling_delta (:obj:`int`): time between polls (in seconds). polling_delta (:obj:`int`): time between polls (in seconds).
max_block_window_size (:obj:`int`): max size of last_tips. max_block_window_size (:obj:`int`): max size of last_tips.
block_processor (:obj:`BlockProcessor <teos.block_processor.BlockProcessor>`): a blockProcessor instance. block_processor (:obj:`BlockProcessor <teos.block_processor.BlockProcessor>`): a blockProcessor instance.
@@ -75,7 +74,6 @@ class ChainMonitor:
Args: Args:
block_hash (:obj:`str`): the new block hash to be sent to the subscribers. block_hash (:obj:`str`): the new block hash to be sent to the subscribers.
block_hash (:obj:`str`): the new block hash to be sent to the subscribers.
""" """
self.watcher_queue.put(block_hash) self.watcher_queue.put(block_hash)
@@ -90,7 +88,7 @@ class ChainMonitor:
block_hash (:obj:`block_hash`): the new best tip. block_hash (:obj:`block_hash`): the new best tip.
Returns: Returns:
(:obj:`bool`): ``True`` is the state was successfully updated, ``False`` otherwise. :obj:`bool`: True is the state was successfully updated, False otherwise.
""" """
if block_hash != self.best_tip and block_hash not in self.last_tips: if block_hash != self.best_tip and block_hash not in self.last_tips:

View File

@@ -7,7 +7,7 @@ logger = Logger(actor="Cleaner", log_name_prefix=LOG_PREFIX)
class Cleaner: class Cleaner:
""" """
The :class:`Cleaner` is the class in charge of removing expired/completed data from the tower. The :class:`Cleaner` is in charge of removing expired/completed data from the tower.
Mutable objects (like dicts) are passed-by-reference in Python, so no return is needed for the Cleaner. Mutable objects (like dicts) are passed-by-reference in Python, so no return is needed for the Cleaner.
""" """
@@ -15,15 +15,16 @@ class Cleaner:
@staticmethod @staticmethod
def delete_appointment_from_memory(uuid, appointments, locator_uuid_map): def delete_appointment_from_memory(uuid, appointments, locator_uuid_map):
""" """
Deletes an appointment from memory (appointments and locator_uuid_map dictionaries). If the given appointment Deletes an appointment from memory (``appointments`` and ``locator_uuid_map`` dictionaries). If the given
does not share locator with any other, the map will completely removed, otherwise, the uuid will be removed from appointment does not share locator with any other, the map will completely removed, otherwise, the uuid will be
the map. removed from the map.
Args: Args:
uuid (:obj:`str`): the identifier of the appointment to be deleted. uuid (:obj:`str`): the identifier of the appointment to be deleted.
appointments (:obj:`dict`): the appointments dictionary from where the appointment should be removed. appointments (:obj:`dict`): the appointments dictionary from where the appointment should be removed.
locator_uuid_map (:obj:`dict`): the locator:uuid map from where the appointment should also be removed. locator_uuid_map (:obj:`dict`): the locator:uuid map from where the appointment should also be removed.
""" """
locator = appointments[uuid].get("locator") locator = appointments[uuid].get("locator")
# Delete the appointment # Delete the appointment
@@ -43,8 +44,8 @@ class Cleaner:
Args: Args:
uuid (:obj:`str`): the identifier of the appointment to be deleted. uuid (:obj:`str`): the identifier of the appointment to be deleted.
db_manager (:obj:`DBManager <teos.db_manager.DBManager>`): a ``DBManager`` instance to interact with the db_manager (:obj:`AppointmentsDBM <teos.appointments_dbm.AppointmentsDBM>`): a ``AppointmentsDBM`` instance
database. to interact with the database.
""" """
db_manager.delete_watcher_appointment(uuid) db_manager.delete_watcher_appointment(uuid)
@@ -61,8 +62,8 @@ class Cleaner:
Args: Args:
uuids (:obj:`list`): a list of identifiers to be removed from the map. uuids (:obj:`list`): a list of identifiers to be removed from the map.
locator (:obj:`str`): the identifier of the map to be either updated or deleted. locator (:obj:`str`): the identifier of the map to be either updated or deleted.
db_manager (:obj:`DBManager <teos.db_manager.DBManager>`): a ``DBManager`` instance to interact with the db_manager (:obj:`AppointmentsDBM <teos.appointments_dbm.AppointmentsDBM>`): a ``AppointmentsDBM`` instance
database. to interact with the database.
""" """
locator_map = db_manager.load_locator_map(locator) locator_map = db_manager.load_locator_map(locator)
@@ -95,8 +96,8 @@ class Cleaner:
appointments. appointments.
locator_uuid_map (:obj:`dict`): a ``locator:uuid`` map for the :obj:`Watcher <teos.watcher.Watcher>` locator_uuid_map (:obj:`dict`): a ``locator:uuid`` map for the :obj:`Watcher <teos.watcher.Watcher>`
appointments. appointments.
db_manager (:obj:`DBManager <teos.db_manager.DBManager>`): a ``DBManager`` instance to interact with the db_manager (:obj:`AppointmentsDBM <teos.appointments_dbm.AppointmentsDBM>`): a ``AppointmentsDBM`` instance
database. to interact with the database.
""" """
locator_maps_to_update = {} locator_maps_to_update = {}
@@ -123,8 +124,9 @@ class Cleaner:
""" """
Deletes a completed appointment from memory (:obj:`Watcher <teos.watcher.Watcher>`) and disk. Deletes a completed appointment from memory (:obj:`Watcher <teos.watcher.Watcher>`) and disk.
Currently, an appointment is only completed if it cannot make it to the (:obj:`Responder <teos.responder.Responder>`), Currently, an appointment is only completed if it cannot make it to the
otherwise, it will be flagged as triggered and removed once the tracker is completed. (:obj:`Responder <teos.responder.Responder>`), otherwise, it will be flagged as triggered and removed once the
tracker is completed.
Args: Args:
completed_appointments (:obj:`list`): a list of appointments to be deleted. completed_appointments (:obj:`list`): a list of appointments to be deleted.
@@ -132,9 +134,10 @@ class Cleaner:
appointments. appointments.
locator_uuid_map (:obj:`dict`): a ``locator:uuid`` map for the :obj:`Watcher <teos.watcher.Watcher>` locator_uuid_map (:obj:`dict`): a ``locator:uuid`` map for the :obj:`Watcher <teos.watcher.Watcher>`
appointments. appointments.
db_manager (:obj:`DBManager <teos.db_manager.DBManager>`): a ``DBManager`` instance to interact with the db_manager (:obj:`AppointmentsDBM <teos.appointments_dbm.AppointmentsDBM>`): a ``AppointmentsDBM`` instance
database. to interact with the database.
""" """
locator_maps_to_update = {} locator_maps_to_update = {}
for uuid in completed_appointments: for uuid in completed_appointments:
@@ -160,7 +163,7 @@ class Cleaner:
@staticmethod @staticmethod
def flag_triggered_appointments(triggered_appointments, appointments, locator_uuid_map, db_manager): def flag_triggered_appointments(triggered_appointments, appointments, locator_uuid_map, db_manager):
""" """
Deletes a list of triggered appointment from memory (:obj:`Watcher <teos.watcher.Watcher>`) and flags them as Deletes a list of triggered appointment from memory (:obj:`Watcher <teos.watcher.Watcher>`) and flags them as
triggered on disk. triggered on disk.
Args: Args:
@@ -169,8 +172,8 @@ class Cleaner:
appointments. appointments.
locator_uuid_map (:obj:`dict`): a ``locator:uuid`` map for the :obj:`Watcher <teos.watcher.Watcher>` locator_uuid_map (:obj:`dict`): a ``locator:uuid`` map for the :obj:`Watcher <teos.watcher.Watcher>`
appointments. appointments.
db_manager (:obj:`DBManager <teos.db_manager.DBManager>`): a ``DBManager`` instance to interact with the db_manager (:obj:`AppointmentsDBM <teos.appointments_dbm.AppointmentsDBM>`): a ``AppointmentsDBM`` instance
database. to interact with the database.
""" """
for uuid in triggered_appointments: for uuid in triggered_appointments:
@@ -190,8 +193,8 @@ class Cleaner:
<teos.responder.Responder>` trackers. <teos.responder.Responder>` trackers.
completed_trackers (:obj:`dict`): a dict of completed trackers to be deleted (uuid:confirmations). completed_trackers (:obj:`dict`): a dict of completed trackers to be deleted (uuid:confirmations).
height (:obj:`int`): the block height at which the trackers were completed. height (:obj:`int`): the block height at which the trackers were completed.
db_manager (:obj:`DBManager <teos.db_manager.DBManager>`): a ``DBManager`` instance to interact with the db_manager (:obj:`AppointmentsDBM <teos.appointments_dbm.AppointmentsDBM>`): a ``AppointmentsDBM`` instance
database. to interact with the database.
""" """
locator_maps_to_update = {} locator_maps_to_update = {}

View File

@@ -1,34 +1,11 @@
import json
import plyvel import plyvel
from teos import LOG_PREFIX
from common.logger import Logger
logger = Logger(actor="DBManager", log_name_prefix=LOG_PREFIX)
WATCHER_PREFIX = "w"
WATCHER_LAST_BLOCK_KEY = "bw"
RESPONDER_PREFIX = "r"
RESPONDER_LAST_BLOCK_KEY = "br"
LOCATOR_MAP_PREFIX = "m"
TRIGGERED_APPOINTMENTS_PREFIX = "ta"
class DBManager: class DBManager:
""" """
The :class:`DBManager` is the class in charge of interacting with the appointments database (``LevelDB``). The :class:`DBManager` is in charge of interacting with a database (``LevelDB``).
Keys and values are stored as bytes in the database but processed as strings by the manager. Keys and values are stored as bytes in the database but processed as strings by the manager.
The database is split in six prefixes:
- ``WATCHER_PREFIX``, defined as ``b'w``, is used to store :obj:`Watcher <teos.watcher.Watcher>` appointments.
- ``RESPONDER_PREFIX``, defines as ``b'r``, is used to store :obj:`Responder <teos.responder.Responder>` trackers.
- ``WATCHER_LAST_BLOCK_KEY``, defined as ``b'bw``, is used to store the last block hash known by the :obj:`Watcher <teos.watcher.Watcher>`.
- ``RESPONDER_LAST_BLOCK_KEY``, defined as ``b'br``, is used to store the last block hash known by the :obj:`Responder <teos.responder.Responder>`.
- ``LOCATOR_MAP_PREFIX``, defined as ``b'm``, is used to store the ``locator:uuid`` maps.
- ``TRIGGERED_APPOINTMENTS_PREFIX``, defined as ``b'ta``, is used to stored triggered appointments (appointments that have been handed to the :obj:`Responder <teos.responder.Responder>`.)
Args: Args:
db_path (:obj:`str`): the path (relative or absolute) to the system folder containing the database. A fresh db_path (:obj:`str`): the path (relative or absolute) to the system folder containing the database. A fresh
database will be create if the specified path does not contain one. database will be create if the specified path does not contain one.
@@ -42,57 +19,7 @@ class DBManager:
if not isinstance(db_path, str): if not isinstance(db_path, str):
raise ValueError("db_path must be a valid path/name") raise ValueError("db_path must be a valid path/name")
try: self.db = plyvel.DB(db_path, create_if_missing=True)
self.db = plyvel.DB(db_path)
except plyvel.Error as e:
if "create_if_missing is false" in str(e):
logger.info("No db found. Creating a fresh one")
self.db = plyvel.DB(db_path, create_if_missing=True)
elif "LOCK: Resource temporarily unavailable" in str(e):
logger.info("The db is already being used by another process (LOCK)")
raise e
def load_appointments_db(self, prefix):
"""
Loads all data from the appointments database given a prefix. Two prefixes are defined: ``WATCHER_PREFIX`` and
``RESPONDER_PREFIX``.
Args:
prefix (:obj:`str`): the prefix of the data to load.
Returns:
:obj:`dict`: A dictionary containing the requested data (appointments or trackers) indexed by ``uuid``.
Returns an empty dictionary if no data is found.
"""
data = {}
for k, v in self.db.iterator(prefix=prefix.encode("utf-8")):
# Get uuid and appointment_data from the db
uuid = k[len(prefix) :].decode("utf-8")
data[uuid] = json.loads(v)
return data
def get_last_known_block(self, key):
"""
Loads the last known block given a key (either ``WATCHER_LAST_BLOCK_KEY`` or ``RESPONDER_LAST_BLOCK_KEY``).
Returns:
:obj:`str` or :obj:`None`: A 16-byte hex-encoded str representing the last known block hash.
Returns ``None`` if the entry is not found.
"""
last_block = self.db.get(key.encode("utf-8"))
if last_block:
last_block = last_block.decode("utf-8")
return last_block
def create_entry(self, key, value, prefix=None): def create_entry(self, key, value, prefix=None):
""" """
@@ -102,8 +29,20 @@ class DBManager:
key (:obj:`str`): the key of the new entry, used to identify it. key (:obj:`str`): the key of the new entry, used to identify it.
value (:obj:`str`): the data stored under the given ``key``. value (:obj:`str`): the data stored under the given ``key``.
prefix (:obj:`str`): an optional prefix added to the ``key``. prefix (:obj:`str`): an optional prefix added to the ``key``.
Raises:
(:obj:`TypeError`) if key, value or prefix are not strings.
""" """
if not isinstance(key, str):
raise TypeError("Key must be str")
if not isinstance(value, str):
raise TypeError("Value must be str")
if not isinstance(prefix, str) and prefix is not None:
raise TypeError("Prefix (if set) must be str")
if isinstance(prefix, str): if isinstance(prefix, str):
key = prefix + key key = prefix + key
@@ -112,348 +51,55 @@ class DBManager:
self.db.put(key, value) self.db.put(key, value)
def load_entry(self, key): def load_entry(self, key, prefix=None):
""" """
Loads an entry from the database given a ``key``. Loads an entry from the database given a ``key`` (and optionally a ``prefix``).
Args: Args:
key (:obj:`str`): the key that identifies the entry to be loaded. key (:obj:`str`): the key that identifies the entry to be loaded.
prefix (:obj:`str`): an optional prefix added to the ``key``.
Returns: Returns:
:obj:`dict` or :obj:`None`: A dictionary containing the requested data (an appointment or a tracker). :obj:`bytes` or :obj:`None`: A byte-array containing the requested data.
Returns ``None`` if the entry is not found. Returns ``None`` if the entry is not found.
Raises:
(:obj:`TypeError`) if key or prefix are not strings.
""" """
data = self.db.get(key.encode("utf-8")) if not isinstance(key, str):
data = json.loads(data) if data is not None else data raise TypeError("Key must be str")
return data
if not isinstance(prefix, str) and prefix is not None:
raise TypeError("Prefix (if set) must be str")
if isinstance(prefix, str):
key = prefix + key
return self.db.get(key.encode("utf-8"))
def delete_entry(self, key, prefix=None): def delete_entry(self, key, prefix=None):
""" """
Deletes an entry from the database given an ``key`` (and optionally a ``prefix``) Deletes an entry from the database given an ``key`` (and optionally a ``prefix``).
Args: Args:
key (:obj:`str`): the key that identifies the data to be deleted. key (:obj:`str`): the key that identifies the data to be deleted.
prefix (:obj:`str`): an optional prefix to be prepended to the ``key``. prefix (:obj:`str`): an optional prefix to be prepended to the ``key``.
Raises:
(:obj:`TypeError`) if key or prefix are not strings.
""" """
if not isinstance(key, str):
raise TypeError("Key must be str")
if not isinstance(prefix, str) and prefix is not None:
raise TypeError("Prefix (if set) must be str")
if isinstance(prefix, str): if isinstance(prefix, str):
key = prefix + key key = prefix + key
key = key.encode("utf-8") key = key.encode("utf-8")
self.db.delete(key) self.db.delete(key)
def load_watcher_appointment(self, key):
"""
Loads an appointment from the database using ``WATCHER_PREFIX`` as prefix to the given ``key``.
Returns:
:obj:`dict`: A dictionary containing the appointment data if they ``key`` is found.
Returns ``None`` otherwise.
"""
return self.load_entry(WATCHER_PREFIX + key)
def load_responder_tracker(self, key):
"""
Loads a tracker from the database using ``RESPONDER_PREFIX`` as a prefix to the given ``key``.
Returns:
:obj:`dict`: A dictionary containing the tracker data if they ``key`` is found.
Returns ``None`` otherwise.
"""
return self.load_entry(RESPONDER_PREFIX + key)
def load_watcher_appointments(self, include_triggered=False):
"""
Loads all the appointments from the database (all entries with the ``WATCHER_PREFIX`` prefix).
Args:
include_triggered (:obj:`bool`): Whether to include the appointments flagged as triggered or not. ``False``
by default.
Returns:
:obj:`dict`: A dictionary with all the appointments stored in the database. An empty dictionary is there
are none.
"""
appointments = self.load_appointments_db(prefix=WATCHER_PREFIX)
triggered_appointments = self.load_all_triggered_flags()
if not include_triggered:
not_triggered = list(set(appointments.keys()).difference(triggered_appointments))
appointments = {uuid: appointments[uuid] for uuid in not_triggered}
return appointments
def load_responder_trackers(self):
"""
Loads all the trackers from the database (all entries with the ``RESPONDER_PREFIX`` prefix).
Returns:
:obj:`dict`: A dictionary with all the trackers stored in the database. An empty dictionary is there are
none.
"""
return self.load_appointments_db(prefix=RESPONDER_PREFIX)
def store_watcher_appointment(self, uuid, appointment):
"""
Stores an appointment in the database using the ``WATCHER_PREFIX`` prefix.
Args:
uuid (:obj:`str`): the identifier of the appointment to be stored.
appointment (:obj: `str`): the json encoded appointment to be stored as data.
"""
self.create_entry(uuid, appointment, prefix=WATCHER_PREFIX)
logger.info("Adding appointment to Watchers's db", uuid=uuid)
def store_responder_tracker(self, uuid, tracker):
"""
Stores a tracker in the database using the ``RESPONDER_PREFIX`` prefix.
Args:
uuid (:obj:`str`): the identifier of the appointment to be stored.
tracker (:obj: `str`): the json encoded tracker to be stored as data.
"""
self.create_entry(uuid, tracker, prefix=RESPONDER_PREFIX)
logger.info("Adding appointment to Responder's db", uuid=uuid)
def load_locator_map(self, locator):
"""
Loads the ``locator:uuid`` map of a given ``locator`` from the database.
Args:
locator (:obj:`str`): a 16-byte hex-encoded string representing the appointment locator.
Returns:
:obj:`dict` or :obj:`None`: The requested ``locator:uuid`` map if found.
Returns ``None`` otherwise.
"""
key = (LOCATOR_MAP_PREFIX + locator).encode("utf-8")
locator_map = self.db.get(key)
if locator_map is not None:
locator_map = json.loads(locator_map.decode("utf-8"))
else:
logger.info("Locator not found in the db", locator=locator)
return locator_map
def create_append_locator_map(self, locator, uuid):
"""
Creates (or appends to if already exists) a ``locator:uuid`` map.
If the map already exists, the new ``uuid`` is appended to the existing ones (if it is not already there).
Args:
locator (:obj:`str`): a 16-byte hex-encoded string used as the key of the map.
uuid (:obj:`str`): a 16-byte hex-encoded unique id to create (or add to) the map.
"""
locator_map = self.load_locator_map(locator)
if locator_map is not None:
if uuid not in locator_map:
locator_map.append(uuid)
logger.info("Updating locator map", locator=locator, uuid=uuid)
else:
logger.info("UUID already in the map", locator=locator, uuid=uuid)
else:
locator_map = [uuid]
logger.info("Creating new locator map", locator=locator, uuid=uuid)
key = (LOCATOR_MAP_PREFIX + locator).encode("utf-8")
self.db.put(key, json.dumps(locator_map).encode("utf-8"))
def update_locator_map(self, locator, locator_map):
"""
Updates a ``locator:uuid`` map in the database by deleting one of it's uuid. It will only work as long as
the given ``locator_map`` is a subset of the current one and it's not empty.
Args:
locator (:obj:`str`): a 16-byte hex-encoded string used as the key of the map.
locator_map (:obj:`list`): a list of uuids to replace the current one on the db.
"""
current_locator_map = self.load_locator_map(locator)
if set(locator_map).issubset(current_locator_map) and len(locator_map) is not 0:
key = (LOCATOR_MAP_PREFIX + locator).encode("utf-8")
self.db.put(key, json.dumps(locator_map).encode("utf-8"))
else:
logger.error("Trying to update a locator_map with completely different, or empty, data")
def delete_locator_map(self, locator):
"""
Deletes a ``locator:uuid`` map.
Args:
locator (:obj:`str`): a 16-byte hex-encoded string identifying the map to delete.
"""
self.delete_entry(locator, prefix=LOCATOR_MAP_PREFIX)
logger.info("Deleting locator map from db", uuid=locator)
def delete_watcher_appointment(self, uuid):
"""
Deletes an appointment from the database.
Args:
uuid (:obj:`str`): a 16-byte hex-encoded string identifying the appointment to be deleted.
"""
self.delete_entry(uuid, prefix=WATCHER_PREFIX)
logger.info("Deleting appointment from Watcher's db", uuid=uuid)
def batch_delete_watcher_appointments(self, uuids):
"""
Deletes an appointment from the database.
Args:
uuids (:obj:`list`): a list of 16-byte hex-encoded strings identifying the appointments to be deleted.
"""
with self.db.write_batch() as b:
for uuid in uuids:
b.delete((WATCHER_PREFIX + uuid).encode("utf-8"))
logger.info("Deleting appointment from Watcher's db", uuid=uuid)
def delete_responder_tracker(self, uuid):
"""
Deletes a tracker from the database.
Args:
uuid (:obj:`str`): a 16-byte hex-encoded string identifying the tracker to be deleted.
"""
self.delete_entry(uuid, prefix=RESPONDER_PREFIX)
logger.info("Deleting appointment from Responder's db", uuid=uuid)
def batch_delete_responder_trackers(self, uuids):
"""
Deletes an appointment from the database.
Args:
uuids (:obj:`list`): a list of 16-byte hex-encoded strings identifying the trackers to be deleted.
"""
with self.db.write_batch() as b:
for uuid in uuids:
b.delete((RESPONDER_PREFIX + uuid).encode("utf-8"))
logger.info("Deleting appointment from Responder's db", uuid=uuid)
def load_last_block_hash_watcher(self):
"""
Loads the last known block hash of the :obj:`Watcher <teos.watcher.Watcher>` from the database.
Returns:
:obj:`str` or :obj:`None`: A 32-byte hex-encoded string representing the last known block hash if found.
Returns ``None`` otherwise.
"""
return self.get_last_known_block(WATCHER_LAST_BLOCK_KEY)
def load_last_block_hash_responder(self):
"""
Loads the last known block hash of the :obj:`Responder <teos.responder.Responder>` from the database.
Returns:
:obj:`str` or :obj:`None`: A 32-byte hex-encoded string representing the last known block hash if found.
Returns ``None`` otherwise.
"""
return self.get_last_known_block(RESPONDER_LAST_BLOCK_KEY)
def store_last_block_hash_watcher(self, block_hash):
"""
Stores a block hash as the last known block of the :obj:`Watcher <teos.watcher.Watcher>`.
Args:
block_hash (:obj:`str`): the block hash to be stored (32-byte hex-encoded)
"""
self.create_entry(WATCHER_LAST_BLOCK_KEY, block_hash)
def store_last_block_hash_responder(self, block_hash):
"""
Stores a block hash as the last known block of the :obj:`Responder <teos.responder.Responder>`.
Args:
block_hash (:obj:`str`): the block hash to be stored (32-byte hex-encoded)
"""
self.create_entry(RESPONDER_LAST_BLOCK_KEY, block_hash)
def create_triggered_appointment_flag(self, uuid):
"""
Creates a flag that signals that an appointment has been triggered.
Args:
uuid (:obj:`str`): the identifier of the flag to be created.
"""
self.db.put((TRIGGERED_APPOINTMENTS_PREFIX + uuid).encode("utf-8"), "".encode("utf-8"))
logger.info("Flagging appointment as triggered", uuid=uuid)
def batch_create_triggered_appointment_flag(self, uuids):
"""
Creates a flag that signals that an appointment has been triggered for every appointment in the given list
Args:
uuids (:obj:`list`): a list of identifier for the appointments to flag.
"""
with self.db.write_batch() as b:
for uuid in uuids:
b.put((TRIGGERED_APPOINTMENTS_PREFIX + uuid).encode("utf-8"), b"")
logger.info("Flagging appointment as triggered", uuid=uuid)
def load_all_triggered_flags(self):
"""
Loads all the appointment triggered flags from the database.
Returns:
:obj:`list`: a list of all the uuids of the triggered appointments.
"""
return [
k.decode()[len(TRIGGERED_APPOINTMENTS_PREFIX) :]
for k, v in self.db.iterator(prefix=TRIGGERED_APPOINTMENTS_PREFIX.encode("utf-8"))
]
def delete_triggered_appointment_flag(self, uuid):
"""
Deletes a flag that signals that an appointment has been triggered.
Args:
uuid (:obj:`str`): the identifier of the flag to be removed.
"""
self.delete_entry(uuid, prefix=TRIGGERED_APPOINTMENTS_PREFIX)
logger.info("Removing triggered flag from appointment appointment", uuid=uuid)
def batch_delete_triggered_appointment_flag(self, uuids):
"""
Deletes a list of flag signaling that some appointment have been triggered.
Args:
uuids (:obj:`list`): the identifier of the flag to be removed.
"""
with self.db.write_batch() as b:
for uuid in uuids:
b.delete((TRIGGERED_APPOINTMENTS_PREFIX + uuid).encode("utf-8"))
logger.info("Removing triggered flag from appointment appointment", uuid=uuid)

View File

@@ -1,4 +1,4 @@
# Appointment errors # Appointment errors [-1, -64]
APPOINTMENT_EMPTY_FIELD = -1 APPOINTMENT_EMPTY_FIELD = -1
APPOINTMENT_WRONG_FIELD_TYPE = -2 APPOINTMENT_WRONG_FIELD_TYPE = -2
APPOINTMENT_WRONG_FIELD_SIZE = -3 APPOINTMENT_WRONG_FIELD_SIZE = -3
@@ -6,7 +6,11 @@ APPOINTMENT_WRONG_FIELD_FORMAT = -4
APPOINTMENT_FIELD_TOO_SMALL = -5 APPOINTMENT_FIELD_TOO_SMALL = -5
APPOINTMENT_FIELD_TOO_BIG = -6 APPOINTMENT_FIELD_TOO_BIG = -6
APPOINTMENT_WRONG_FIELD = -7 APPOINTMENT_WRONG_FIELD = -7
APPOINTMENT_INVALID_SIGNATURE = -8 APPOINTMENT_INVALID_SIGNATURE_OR_INSUFFICIENT_SLOTS = -8
# Registration errors [-65, -128]
REGISTRATION_MISSING_FIELD = -65
REGISTRATION_WRONG_FIELD_FORMAT = -66
# Custom RPC errors # Custom RPC errors
RPC_TX_REORGED_AFTER_BROADCAST = -98 RPC_TX_REORGED_AFTER_BROADCAST = -98

117
teos/gatekeeper.py Normal file
View File

@@ -0,0 +1,117 @@
from common.tools import is_compressed_pk
from common.cryptographer import Cryptographer
class NotEnoughSlots(ValueError):
"""Raised when trying to subtract more slots than a user has available"""
def __init__(self, user_pk, requested_slots):
self.user_pk = user_pk
self.requested_slots = requested_slots
class IdentificationFailure(Exception):
"""
Raised when a user can not be identified. Either the user public key cannot be recovered or the user is
not found within the registered ones.
"""
pass
class Gatekeeper:
"""
The :class:`Gatekeeper` is in charge of managing the access to the tower. Only registered users are allowed to
perform actions.
Attributes:
registered_users (:obj:`dict`): a map of user_pk:appointment_slots.
"""
def __init__(self, user_db, default_slots):
self.default_slots = default_slots
self.user_db = user_db
self.registered_users = user_db.load_all_users()
def add_update_user(self, user_pk):
"""
Adds a new user or updates the subscription of an existing one, by adding additional slots.
Args:
user_pk(:obj:`str`): the public key that identifies the user (33-bytes hex str).
Returns:
:obj:`int`: the number of available slots in the user subscription.
"""
if not is_compressed_pk(user_pk):
raise ValueError("Provided public key does not match expected format (33-byte hex string)")
if user_pk not in self.registered_users:
self.registered_users[user_pk] = {"available_slots": self.default_slots}
else:
self.registered_users[user_pk]["available_slots"] += self.default_slots
self.user_db.store_user(user_pk, self.registered_users[user_pk])
return self.registered_users[user_pk]["available_slots"]
def identify_user(self, message, signature):
"""
Checks if a request comes from a registered user by ec-recovering their public key from a signed message.
Args:
message (:obj:`bytes`): byte representation of the original message from where the signature was generated.
signature (:obj:`str`): the user's signature (hex-encoded).
Returns:
:obj:`str`: a compressed key recovered from the signature and matching a registered user.
Raises:
:obj:`IdentificationFailure`: if the user cannot be identified.
"""
if isinstance(message, bytes) and isinstance(signature, str):
rpk = Cryptographer.recover_pk(message, signature)
compressed_pk = Cryptographer.get_compressed_pk(rpk)
if compressed_pk in self.registered_users:
return compressed_pk
else:
raise IdentificationFailure("User not found.")
else:
raise IdentificationFailure("Wrong message or signature.")
def fill_slots(self, user_pk, n):
"""
Fills a given number os slots of the user subscription.
Args:
user_pk(:obj:`str`): the public key that identifies the user (33-bytes hex str).
n (:obj:`int`): the number of slots to fill.
Raises:
:obj:`NotEnoughSlots`: if the user subscription does not have enough slots.
"""
# DISCUSS: we may want to return a different exception if the user does not exist
if user_pk in self.registered_users and n <= self.registered_users.get(user_pk).get("available_slots"):
self.registered_users[user_pk]["available_slots"] -= n
self.user_db.store_user(user_pk, self.registered_users[user_pk])
else:
raise NotEnoughSlots(user_pk, n)
def free_slots(self, user_pk, n):
"""
Frees some slots of a user subscription.
Args:
user_pk(:obj:`str`): the public key that identifies the user (33-bytes hex str).
n (:obj:`int`): the number of slots to free.
"""
# DISCUSS: if the user does not exist we may want to log or return an exception.
if user_pk in self.registered_users:
self.registered_users[user_pk]["available_slots"] += n
self.user_db.store_user(user_pk, self.registered_users[user_pk])

View File

@@ -3,7 +3,8 @@ def show_usage():
"USAGE: " "USAGE: "
"\n\tpython teosd.py [global options]" "\n\tpython teosd.py [global options]"
"\n\nGLOBAL OPTIONS:" "\n\nGLOBAL OPTIONS:"
"\n\t--btcnetwork \t\tNetwork bitcoind is connected to. Either mainnet, testnet or regtest. Defaults to 'mainnet' (modifiable in conf file)." "\n\t--btcnetwork \t\tNetwork bitcoind is connected to. Either mainnet, testnet or regtest. Defaults to "
"'mainnet' (modifiable in conf file)."
"\n\t--btcrpcuser \t\tbitcoind rpcuser. Defaults to 'user' (modifiable in conf file)." "\n\t--btcrpcuser \t\tbitcoind rpcuser. Defaults to 'user' (modifiable in conf file)."
"\n\t--btcrpcpassword \tbitcoind rpcpassword. Defaults to 'passwd' (modifiable in conf file)." "\n\t--btcrpcpassword \tbitcoind rpcpassword. Defaults to 'passwd' (modifiable in conf file)."
"\n\t--btcrpcconnect \tbitcoind rpcconnect. Defaults to 'localhost' (modifiable in conf file)." "\n\t--btcrpcconnect \tbitcoind rpcconnect. Defaults to 'localhost' (modifiable in conf file)."

View File

@@ -1,13 +1,12 @@
import re import re
from binascii import unhexlify
import common.cryptographer import common.cryptographer
from common.logger import Logger
from common.tools import is_locator
from common.constants import LOCATOR_LEN_HEX from common.constants import LOCATOR_LEN_HEX
from common.cryptographer import Cryptographer, PublicKey from common.appointment import Appointment
from teos import errors, LOG_PREFIX from teos import errors, LOG_PREFIX
from common.logger import Logger
from common.appointment import Appointment
logger = Logger(actor="Inspector", log_name_prefix=LOG_PREFIX) logger = Logger(actor="Inspector", log_name_prefix=LOG_PREFIX)
common.cryptographer.logger = Logger(actor="Cryptographer", log_name_prefix=LOG_PREFIX) common.cryptographer.logger = Logger(actor="Cryptographer", log_name_prefix=LOG_PREFIX)
@@ -19,7 +18,14 @@ common.cryptographer.logger = Logger(actor="Cryptographer", log_name_prefix=LOG_
BLOCKS_IN_A_MONTH = 4320 # 4320 = roughly a month in blocks BLOCKS_IN_A_MONTH = 4320 # 4320 = roughly a month in blocks
ENCRYPTED_BLOB_MAX_SIZE_HEX = 2 * 2048
class InspectionFailed(Exception):
"""Raise this the inspector finds a problem with any of the appointment fields"""
def __init__(self, erno, reason):
self.erno = erno
self.reason = reason
class Inspector: class Inspector:
@@ -36,97 +42,65 @@ class Inspector:
self.block_processor = block_processor self.block_processor = block_processor
self.min_to_self_delay = min_to_self_delay self.min_to_self_delay = min_to_self_delay
def inspect(self, appointment_data, signature, public_key): def inspect(self, appointment_data):
""" """
Inspects whether the data provided by the user is correct. Inspects whether the data provided by the user is correct.
Args: Args:
appointment_data (:obj:`dict`): a dictionary containing the appointment data. appointment_data (:obj:`dict`): a dictionary containing the appointment data.
signature (:obj:`str`): the appointment signature provided by the user (hex encoded).
public_key (:obj:`str`): the user's public key (hex encoded).
Returns: Returns:
:obj:`Appointment <teos.appointment.Appointment>` or :obj:`tuple`: An appointment initialized with the :obj:`Appointment <teos.appointment.Appointment>`: An appointment initialized with the provided data.
provided data if it is correct.
Returns a tuple ``(return code, message)`` describing the error otherwise. Raises:
:obj:`InspectionFailed`: if any of the fields is wrong.
Errors are defined in :mod:`Errors <teos.errors>`.
""" """
if appointment_data is None:
raise InspectionFailed(errors.APPOINTMENT_EMPTY_FIELD, "empty appointment received")
elif not isinstance(appointment_data, dict):
raise InspectionFailed(errors.APPOINTMENT_WRONG_FIELD, "wrong appointment format")
block_height = self.block_processor.get_block_count() block_height = self.block_processor.get_block_count()
if block_height is None:
raise InspectionFailed(errors.UNKNOWN_JSON_RPC_EXCEPTION, "unexpected error occurred")
if block_height is not None: self.check_locator(appointment_data.get("locator"))
rcode, message = self.check_locator(appointment_data.get("locator")) self.check_start_time(appointment_data.get("start_time"), block_height)
self.check_end_time(appointment_data.get("end_time"), appointment_data.get("start_time"), block_height)
self.check_to_self_delay(appointment_data.get("to_self_delay"))
self.check_blob(appointment_data.get("encrypted_blob"))
if rcode == 0: return Appointment.from_dict(appointment_data)
rcode, message = self.check_start_time(appointment_data.get("start_time"), block_height)
if rcode == 0:
rcode, message = self.check_end_time(
appointment_data.get("end_time"), appointment_data.get("start_time"), block_height
)
if rcode == 0:
rcode, message = self.check_to_self_delay(appointment_data.get("to_self_delay"))
if rcode == 0:
rcode, message = self.check_blob(appointment_data.get("encrypted_blob"))
if rcode == 0:
rcode, message = self.check_appointment_signature(appointment_data, signature, public_key)
if rcode == 0:
r = Appointment.from_dict(appointment_data)
else:
r = (rcode, message)
else:
# In case of an unknown exception, assign a special rcode and reason.
r = (errors.UNKNOWN_JSON_RPC_EXCEPTION, "Unexpected error occurred")
return r
@staticmethod @staticmethod
def check_locator(locator): def check_locator(locator):
""" """
Checks if the provided ``locator`` is correct. Checks if the provided ``locator`` is correct.
Locators must be 16-byte hex encoded strings. Locators must be 16-byte hex-encoded strings.
Args: Args:
locator (:obj:`str`): the locator to be checked. locator (:obj:`str`): the locator to be checked.
Returns: Raises:
:obj:`tuple`: A tuple (return code, message) as follows: :obj:`InspectionFailed`: if any of the fields is wrong.
- ``(0, None)`` if the ``locator`` is correct.
- ``!= (0, None)`` otherwise.
The possible return errors are: ``APPOINTMENT_EMPTY_FIELD``, ``APPOINTMENT_WRONG_FIELD_TYPE``,
``APPOINTMENT_WRONG_FIELD_SIZE``, and ``APPOINTMENT_WRONG_FIELD_FORMAT``.
""" """
message = None
rcode = 0
if locator is None: if locator is None:
rcode = errors.APPOINTMENT_EMPTY_FIELD raise InspectionFailed(errors.APPOINTMENT_EMPTY_FIELD, "empty locator received")
message = "empty locator received"
elif type(locator) != str: elif type(locator) != str:
rcode = errors.APPOINTMENT_WRONG_FIELD_TYPE raise InspectionFailed(
message = "wrong locator data type ({})".format(type(locator)) errors.APPOINTMENT_WRONG_FIELD_TYPE, "wrong locator data type ({})".format(type(locator))
)
elif len(locator) != LOCATOR_LEN_HEX: elif len(locator) != LOCATOR_LEN_HEX:
rcode = errors.APPOINTMENT_WRONG_FIELD_SIZE raise InspectionFailed(errors.APPOINTMENT_WRONG_FIELD_SIZE, "wrong locator size ({})".format(len(locator)))
message = "wrong locator size ({})".format(len(locator))
# TODO: #12-check-txid-regexp
elif re.search(r"^[0-9A-Fa-f]+$", locator) is None: elif not is_locator(locator):
rcode = errors.APPOINTMENT_WRONG_FIELD_FORMAT raise InspectionFailed(errors.APPOINTMENT_WRONG_FIELD_FORMAT, "wrong locator format ({})".format(locator))
message = "wrong locator format ({})".format(locator)
if message is not None:
logger.error(message)
return rcode, message
@staticmethod @staticmethod
def check_start_time(start_time, block_height): def check_start_time(start_time, block_height):
@@ -139,50 +113,32 @@ class Inspector:
start_time (:obj:`int`): the block height at which the tower is requested to start watching for breaches. start_time (:obj:`int`): the block height at which the tower is requested to start watching for breaches.
block_height (:obj:`int`): the chain height. block_height (:obj:`int`): the chain height.
Returns: Raises:
:obj:`tuple`: A tuple (return code, message) as follows: :obj:`InspectionFailed`: if any of the fields is wrong.
- ``(0, None)`` if the ``start_time`` is correct.
- ``!= (0, None)`` otherwise.
The possible return errors are: ``APPOINTMENT_EMPTY_FIELD``, ``APPOINTMENT_WRONG_FIELD_TYPE``, and
``APPOINTMENT_FIELD_TOO_SMALL``.
""" """
message = None
rcode = 0
# TODO: What's too close to the current height is not properly defined. Right now any appointment that is in the
# future will be accepted (even if it's only one block away).
t = type(start_time)
if start_time is None: if start_time is None:
rcode = errors.APPOINTMENT_EMPTY_FIELD raise InspectionFailed(errors.APPOINTMENT_EMPTY_FIELD, "empty start_time received")
message = "empty start_time received"
elif t != int: elif type(start_time) != int:
rcode = errors.APPOINTMENT_WRONG_FIELD_TYPE raise InspectionFailed(
message = "wrong start_time data type ({})".format(t) errors.APPOINTMENT_WRONG_FIELD_TYPE, "wrong start_time data type ({})".format(type(start_time))
)
elif start_time <= block_height: elif start_time < block_height:
rcode = errors.APPOINTMENT_FIELD_TOO_SMALL raise InspectionFailed(errors.APPOINTMENT_FIELD_TOO_SMALL, "start_time is in the past")
if start_time < block_height:
message = "start_time is in the past" elif start_time == block_height:
else: raise InspectionFailed(
message = ( errors.APPOINTMENT_FIELD_TOO_SMALL,
"start_time is too close to current height. " "start_time is too close to current height. Accepted times are: [current_height+1, current_height+6]",
"Accepted times are: [current_height+1, current_height+6]" )
)
elif start_time > block_height + 6: elif start_time > block_height + 6:
rcode = errors.APPOINTMENT_FIELD_TOO_BIG raise InspectionFailed(
message = "start_time is too far in the future. Accepted start times are up to 6 blocks in the future" errors.APPOINTMENT_FIELD_TOO_BIG,
"start_time is too far in the future. Accepted start times are up to 6 blocks in the future",
if message is not None: )
logger.error(message)
return rcode, message
@staticmethod @staticmethod
def check_end_time(end_time, start_time, block_height): def check_end_time(end_time, start_time, block_height):
@@ -196,54 +152,36 @@ class Inspector:
start_time (:obj:`int`): the block height at which the tower is requested to start watching for breaches. start_time (:obj:`int`): the block height at which the tower is requested to start watching for breaches.
block_height (:obj:`int`): the chain height. block_height (:obj:`int`): the chain height.
Returns: Raises:
:obj:`tuple`: A tuple (return code, message) as follows: :obj:`InspectionFailed`: if any of the fields is wrong.
- ``(0, None)`` if the ``end_time`` is correct.
- ``!= (0, None)`` otherwise.
The possible return errors are: ``APPOINTMENT_EMPTY_FIELD``, ``APPOINTMENT_WRONG_FIELD_TYPE``, and
``APPOINTMENT_FIELD_TOO_SMALL``.
""" """
message = None
rcode = 0
# TODO: What's too close to the current height is not properly defined. Right now any appointment that ends in # TODO: What's too close to the current height is not properly defined. Right now any appointment that ends in
# the future will be accepted (even if it's only one block away). # the future will be accepted (even if it's only one block away).
t = type(end_time)
if end_time is None: if end_time is None:
rcode = errors.APPOINTMENT_EMPTY_FIELD raise InspectionFailed(errors.APPOINTMENT_EMPTY_FIELD, "empty end_time received")
message = "empty end_time received"
elif t != int: elif type(end_time) != int:
rcode = errors.APPOINTMENT_WRONG_FIELD_TYPE raise InspectionFailed(
message = "wrong end_time data type ({})".format(t) errors.APPOINTMENT_WRONG_FIELD_TYPE, "wrong end_time data type ({})".format(type(end_time))
)
elif end_time > block_height + BLOCKS_IN_A_MONTH: # 4320 = roughly a month in blocks elif end_time > block_height + BLOCKS_IN_A_MONTH: # 4320 = roughly a month in blocks
rcode = errors.APPOINTMENT_FIELD_TOO_BIG raise InspectionFailed(
message = "end_time should be within the next month (<= current_height + 4320)" errors.APPOINTMENT_FIELD_TOO_BIG, "end_time should be within the next month (<= current_height + 4320)"
)
elif start_time > end_time:
raise InspectionFailed(errors.APPOINTMENT_FIELD_TOO_SMALL, "end_time is smaller than start_time")
elif start_time >= end_time: elif start_time == end_time:
rcode = errors.APPOINTMENT_FIELD_TOO_SMALL raise InspectionFailed(errors.APPOINTMENT_FIELD_TOO_SMALL, "end_time is equal to start_time")
if start_time > end_time:
message = "end_time is smaller than start_time"
else:
message = "end_time is equal to start_time"
elif block_height >= end_time: elif block_height > end_time:
rcode = errors.APPOINTMENT_FIELD_TOO_SMALL raise InspectionFailed(errors.APPOINTMENT_FIELD_TOO_SMALL, "end_time is in the past")
if block_height > end_time:
message = "end_time is in the past"
else:
message = "end_time is too close to current height"
if message is not None: elif block_height == end_time:
logger.error(message) raise InspectionFailed(errors.APPOINTMENT_FIELD_TOO_SMALL, "end_time is too close to current height")
return rcode, message
def check_to_self_delay(self, to_self_delay): def check_to_self_delay(self, to_self_delay):
""" """
@@ -252,49 +190,35 @@ class Inspector:
To self delays must be greater or equal to ``MIN_TO_SELF_DELAY``. To self delays must be greater or equal to ``MIN_TO_SELF_DELAY``.
Args: Args:
to_self_delay (:obj:`int`): The ``to_self_delay`` encoded in the ``csv`` of the ``htlc`` that this to_self_delay (:obj:`int`): The ``to_self_delay`` encoded in the ``csv`` of ``to_remote`` output of the
appointment is covering. commitment transaction this appointment is covering.
Returns: Raises:
:obj:`tuple`: A tuple (return code, message) as follows: :obj:`InspectionFailed`: if any of the fields is wrong.
- ``(0, None)`` if the ``to_self_delay`` is correct.
- ``!= (0, None)`` otherwise.
The possible return errors are: ``APPOINTMENT_EMPTY_FIELD``, ``APPOINTMENT_WRONG_FIELD_TYPE``, and
``APPOINTMENT_FIELD_TOO_SMALL``.
""" """
message = None
rcode = 0
t = type(to_self_delay)
if to_self_delay is None: if to_self_delay is None:
rcode = errors.APPOINTMENT_EMPTY_FIELD raise InspectionFailed(errors.APPOINTMENT_EMPTY_FIELD, "empty to_self_delay received")
message = "empty to_self_delay received"
elif t != int: elif type(to_self_delay) != int:
rcode = errors.APPOINTMENT_WRONG_FIELD_TYPE raise InspectionFailed(
message = "wrong to_self_delay data type ({})".format(t) errors.APPOINTMENT_WRONG_FIELD_TYPE, "wrong to_self_delay data type ({})".format(type(to_self_delay))
)
elif to_self_delay > pow(2, 32): elif to_self_delay > pow(2, 32):
rcode = errors.APPOINTMENT_FIELD_TOO_BIG raise InspectionFailed(
message = "to_self_delay must fit the transaction nLockTime field ({} > {})".format( errors.APPOINTMENT_FIELD_TOO_BIG,
to_self_delay, pow(2, 32) "to_self_delay must fit the transaction nLockTime field ({} > {})".format(to_self_delay, pow(2, 32)),
) )
elif to_self_delay < self.min_to_self_delay: elif to_self_delay < self.min_to_self_delay:
rcode = errors.APPOINTMENT_FIELD_TOO_SMALL raise InspectionFailed(
message = "to_self_delay too small. The to_self_delay should be at least {} (current: {})".format( errors.APPOINTMENT_FIELD_TOO_SMALL,
self.min_to_self_delay, to_self_delay "to_self_delay too small. The to_self_delay should be at least {} (current: {})".format(
self.min_to_self_delay, to_self_delay
),
) )
if message is not None:
logger.error(message)
return rcode, message
# ToDo: #6-define-checks-encrypted-blob # ToDo: #6-define-checks-encrypted-blob
@staticmethod @staticmethod
def check_blob(encrypted_blob): def check_blob(encrypted_blob):
@@ -302,88 +226,21 @@ class Inspector:
Checks if the provided ``encrypted_blob`` may be correct. Checks if the provided ``encrypted_blob`` may be correct.
Args: Args:
encrypted_blob (:obj:`str`): the encrypted blob to be checked (hex encoded). encrypted_blob (:obj:`str`): the encrypted blob to be checked (hex-encoded).
Returns: Raises:
:obj:`tuple`: A tuple (return code, message) as follows: :obj:`InspectionFailed`: if any of the fields is wrong.
- ``(0, None)`` if the ``encrypted_blob`` is correct.
- ``!= (0, None)`` otherwise.
The possible return errors are: ``APPOINTMENT_EMPTY_FIELD``, ``APPOINTMENT_WRONG_FIELD_TYPE``, and
``APPOINTMENT_WRONG_FIELD_FORMAT``.
""" """
message = None
rcode = 0
t = type(encrypted_blob)
if encrypted_blob is None: if encrypted_blob is None:
rcode = errors.APPOINTMENT_EMPTY_FIELD raise InspectionFailed(errors.APPOINTMENT_EMPTY_FIELD, "empty encrypted_blob received")
message = "empty encrypted_blob received"
elif t != str: elif type(encrypted_blob) != str:
rcode = errors.APPOINTMENT_WRONG_FIELD_TYPE raise InspectionFailed(
message = "wrong encrypted_blob data type ({})".format(t) errors.APPOINTMENT_WRONG_FIELD_TYPE, "wrong encrypted_blob data type ({})".format(type(encrypted_blob))
)
elif len(encrypted_blob) > ENCRYPTED_BLOB_MAX_SIZE_HEX:
rcode = errors.APPOINTMENT_FIELD_TOO_BIG
message = "encrypted_blob has to be 2Kib at most (current {})".format(len(encrypted_blob) // 2)
elif re.search(r"^[0-9A-Fa-f]+$", encrypted_blob) is None: elif re.search(r"^[0-9A-Fa-f]+$", encrypted_blob) is None:
rcode = errors.APPOINTMENT_WRONG_FIELD_FORMAT raise InspectionFailed(
message = "wrong encrypted_blob format ({})".format(encrypted_blob) errors.APPOINTMENT_WRONG_FIELD_FORMAT, "wrong encrypted_blob format ({})".format(encrypted_blob)
)
if message is not None:
logger.error(message)
return rcode, message
@staticmethod
# Verifies that the appointment signature is a valid signature with public key
def check_appointment_signature(appointment_data, signature, pk):
"""
Checks if the provided user signature is correct.
Args:
appointment_data (:obj:`dict`): the appointment that was signed by the user.
signature (:obj:`str`): the user's signature (hex encoded).
pk (:obj:`str`): the user's public key (hex encoded).
Returns:
:obj:`tuple`: A tuple (return code, message) as follows:
- ``(0, None)`` if the ``signature`` is correct.
- ``!= (0, None)`` otherwise.
The possible return errors are: ``APPOINTMENT_EMPTY_FIELD``, ``APPOINTMENT_WRONG_FIELD_TYPE``, and
``APPOINTMENT_WRONG_FIELD_FORMAT``.
"""
message = None
rcode = 0
if signature is None:
rcode = errors.APPOINTMENT_EMPTY_FIELD
message = "empty signature received"
elif pk is None:
rcode = errors.APPOINTMENT_EMPTY_FIELD
message = "empty public key received"
elif re.match(r"^[0-9A-Fa-f]{66}$", pk) is None:
rcode = errors.APPOINTMENT_WRONG_FIELD
message = "public key must be a hex encoded 33-byte long value"
else:
appointment = Appointment.from_dict(appointment_data)
rpk = Cryptographer.recover_pk(appointment.serialize(), signature)
pk = PublicKey(unhexlify(pk))
valid_sig = Cryptographer.verify_rpk(pk, rpk)
if not valid_sig:
rcode = errors.APPOINTMENT_INVALID_SIGNATURE
message = "invalid signature"
return rcode, message

View File

@@ -1,4 +1,3 @@
import json
from queue import Queue from queue import Queue
from threading import Thread from threading import Thread
@@ -14,7 +13,7 @@ logger = Logger(actor="Responder", log_name_prefix=LOG_PREFIX)
class TransactionTracker: class TransactionTracker:
""" """
A :class:`TransactionTracker` is used to monitor a ``penalty_tx``. Once the dispute is seen by the A :class:`TransactionTracker` is used to monitor a ``penalty_tx``. Once the dispute is seen by the
:obj:`Watcher <teos.watcher.Watcher>` the penalty transaction is decrypted and the relevant appointment data is :obj:`Watcher <teos.watcher.Watcher>` the penalty transaction is decrypted and the relevant appointment data is
passed along to the :obj:`Responder`. passed along to the :obj:`Responder`.
@@ -54,7 +53,7 @@ class TransactionTracker:
:obj:`TransactionTracker`: A ``TransactionTracker`` instantiated with the provided data. :obj:`TransactionTracker`: A ``TransactionTracker`` instantiated with the provided data.
Raises: Raises:
ValueError: if any of the required fields is missing. :obj:`ValueError`: if any of the required fields is missing.
""" """
locator = tx_tracker_data.get("locator") locator = tx_tracker_data.get("locator")
@@ -73,7 +72,7 @@ class TransactionTracker:
def to_dict(self): def to_dict(self):
""" """
Exports a :obj:`TransactionTracker` as a dictionary. Encodes a :obj:`TransactionTracker` as a dictionary.
Returns: Returns:
:obj:`dict`: A dictionary containing the :obj:`TransactionTracker` data. :obj:`dict`: A dictionary containing the :obj:`TransactionTracker` data.
@@ -89,26 +88,19 @@ class TransactionTracker:
return tx_tracker return tx_tracker
def to_json(self):
"""
Exports a :obj:`TransactionTracker` as a json-encoded dictionary.
Returns:
:obj:`str`: A json-encoded dictionary containing the :obj:`TransactionTracker` data.
"""
return json.dumps(self.to_dict())
class Responder: class Responder:
""" """
The :class:`Responder` is the class in charge of ensuring that channel breaches are dealt with. It does so handling The :class:`Responder` is in charge of ensuring that channel breaches are dealt with. It does so handling
the decrypted ``penalty_txs`` handed by the :obj:`Watcher <teos.watcher.Watcher>` and ensuring the they make it to the decrypted ``penalty_txs`` handed by the :obj:`Watcher <teos.watcher.Watcher>` and ensuring the they make it to
the blockchain. the blockchain.
Args: Args:
db_manager (:obj:`DBManager <teos.db_manager.DBManager>`): a ``DBManager`` instance to interact with the db_manager (:obj:`AppointmentsDBM <teos.appointments_dbm.AppointmentsDBM>`): a ``AppointmentsDBM`` instance
database. to interact with the database.
carrier (:obj:`Carrier <teos.carrier.Carrier>`): a ``Carrier`` instance to send transactions to bitcoind.
block_processor (:obj:`BlockProcessor <teos.block_processor.BlockProcessor>`): a ``BlockProcessor`` instance to
get data from bitcoind.
Attributes: Attributes:
trackers (:obj:`dict`): A dictionary containing the minimum information about the :obj:`TransactionTracker` trackers (:obj:`dict`): A dictionary containing the minimum information about the :obj:`TransactionTracker`
@@ -121,13 +113,12 @@ class Responder:
has missed. Used to trigger rebroadcast if needed. has missed. Used to trigger rebroadcast if needed.
block_queue (:obj:`Queue`): A queue used by the :obj:`Responder` to receive block hashes from ``bitcoind``. It block_queue (:obj:`Queue`): A queue used by the :obj:`Responder` to receive block hashes from ``bitcoind``. It
is populated by the :obj:`ChainMonitor <teos.chain_monitor.ChainMonitor>`. is populated by the :obj:`ChainMonitor <teos.chain_monitor.ChainMonitor>`.
db_manager (:obj:`DBManager <teos.db_manager.DBManager>`): A ``DBManager`` instance to interact with the db_manager (:obj:`AppointmentsDBM <teos.appointments_dbm.AppointmentsDBM>`): a ``AppointmentsDBM`` instance
database. to interact with the database.
carrier (:obj:`Carrier <teos.carrier.Carrier>`): a ``Carrier`` instance to send transactions to bitcoind. carrier (:obj:`Carrier <teos.carrier.Carrier>`): a ``Carrier`` instance to send transactions to bitcoind.
block_processor (:obj:`DBManager <teos.block_processor.BlockProcessor>`): a ``BlockProcessor`` instance to get block_processor (:obj:`BlockProcessor <teos.block_processor.BlockProcessor>`): a ``BlockProcessor`` instance to
data from bitcoind. get data from bitcoind.
last_known_block (:obj:`str`): the last block known by the ``Responder``. last_known_block (:obj:`str`): the last block known by the ``Responder``.
""" """
def __init__(self, db_manager, carrier, block_processor): def __init__(self, db_manager, carrier, block_processor):
@@ -142,6 +133,7 @@ class Responder:
self.last_known_block = db_manager.load_last_block_hash_responder() self.last_known_block = db_manager.load_last_block_hash_responder()
def awake(self): def awake(self):
"""Starts a new thread to monitor the blockchain to make sure triggered appointments get enough depth"""
responder_thread = Thread(target=self.do_watch, daemon=True) responder_thread = Thread(target=self.do_watch, daemon=True)
responder_thread.start() responder_thread.start()
@@ -151,7 +143,7 @@ class Responder:
""" """
Whether the :obj:`Responder` is on sync with ``bitcoind`` or not. Used when recovering from a crash. Whether the :obj:`Responder` is on sync with ``bitcoind`` or not. Used when recovering from a crash.
The Watchtower can be instantiated with fresh or with backed up data. In the later, some triggers may have been The Watchtower can be instantiated with fresh or with backed up data. In the later, some triggers may have been
missed. In order to go back on sync both the :obj:`Watcher <teos.watcher.Watcher>` and the :obj:`Responder` missed. In order to go back on sync both the :obj:`Watcher <teos.watcher.Watcher>` and the :obj:`Responder`
need to perform the state transitions until they catch up. need to perform the state transitions until they catch up.
@@ -216,9 +208,8 @@ class Responder:
""" """
Creates a :obj:`TransactionTracker` after successfully broadcasting a ``penalty_tx``. Creates a :obj:`TransactionTracker` after successfully broadcasting a ``penalty_tx``.
A reduction of :obj:`TransactionTracker` is stored in ``trackers`` and ``tx_tracker_map`` and the A summary of :obj:`TransactionTracker` is stored in ``trackers`` and ``tx_tracker_map`` and the ``penalty_txid``
``penalty_txid`` added to ``unconfirmed_txs`` if ``confirmations=0``. Finally, all the data is stored in the added to ``unconfirmed_txs`` if ``confirmations=0``. Finally, all the data is stored in the database.
database.
Args: Args:
uuid (:obj:`str`): a unique identifier for the appointment. uuid (:obj:`str`): a unique identifier for the appointment.
@@ -251,7 +242,7 @@ class Responder:
if penalty_txid not in self.unconfirmed_txs and confirmations == 0: if penalty_txid not in self.unconfirmed_txs and confirmations == 0:
self.unconfirmed_txs.append(penalty_txid) self.unconfirmed_txs.append(penalty_txid)
self.db_manager.store_responder_tracker(uuid, tracker.to_json()) self.db_manager.store_responder_tracker(uuid, tracker.to_dict())
logger.info( logger.info(
"New tracker added", dispute_txid=dispute_txid, penalty_txid=penalty_txid, appointment_end=appointment_end "New tracker added", dispute_txid=dispute_txid, penalty_txid=penalty_txid, appointment_end=appointment_end
@@ -259,7 +250,7 @@ class Responder:
def do_watch(self): def do_watch(self):
""" """
Monitors the blockchain whilst there are pending trackers. Monitors the blockchain for reorgs and appointment ends.
This is the main method of the :obj:`Responder` and triggers tracker cleaning, rebroadcasting, reorg managing, This is the main method of the :obj:`Responder` and triggers tracker cleaning, rebroadcasting, reorg managing,
etc. etc.
@@ -303,7 +294,7 @@ class Responder:
# Clear the receipts issued in this block # Clear the receipts issued in this block
self.carrier.issued_receipts = {} self.carrier.issued_receipts = {}
if len(self.trackers) is 0: if len(self.trackers) != 0:
logger.info("No more pending trackers") logger.info("No more pending trackers")
# Register the last processed block for the responder # Register the last processed block for the responder
@@ -395,9 +386,9 @@ class Responder:
def rebroadcast(self, txs_to_rebroadcast): def rebroadcast(self, txs_to_rebroadcast):
""" """
Rebroadcasts a ``penalty_tx`` that has missed too many confirmations. In the current approach this would loop Rebroadcasts a ``penalty_tx`` that has missed too many confirmations. In the current approach this would loop
forever si the transaction keeps not getting it. forever if the transaction keeps not getting it.
Potentially the fees could be bumped here if the transaction has some tower dedicated outputs (or allows it Potentially, the fees could be bumped here if the transaction has some tower dedicated outputs (or allows it
trough ``ANYONECANPAY`` or something similar). trough ``ANYONECANPAY`` or something similar).
Args: Args:

View File

@@ -3,16 +3,16 @@
# General application defined errors # General application defined errors
RPC_MISC_ERROR = -1 # std::exception thrown in command handling RPC_MISC_ERROR = -1 # std::exception thrown in command handling
RPC_TYPE_ERROR = -3 # Unexpected type was passed as parameter RPC_TYPE_ERROR = -3 # Unexpected type was passed as parameter
RPC_INVALID_ADDRESS_OR_KEY = -5 # Invalid address or key RPC_INVALID_ADDRESS_OR_KEY = -5 # Invalid address or key
RPC_OUT_OF_MEMORY = -7 # Ran out of memory during operation RPC_OUT_OF_MEMORY = -7 # Ran out of memory during operation
RPC_INVALID_PARAMETER = -8 # Invalid missing or duplicate parameter RPC_INVALID_PARAMETER = -8 # Invalid missing or duplicate parameter
RPC_DATABASE_ERROR = -20 # Database error RPC_DATABASE_ERROR = -20 # Database error
RPC_DESERIALIZATION_ERROR = -22 # Error parsing or validating structure in raw format RPC_DESERIALIZATION_ERROR = -22 # Error parsing or validating structure in raw format
RPC_VERIFY_ERROR = -25 # General error during transaction or block submission RPC_VERIFY_ERROR = -25 # General error during transaction or block submission
RPC_VERIFY_REJECTED = -26 # Transaction or block was rejected by network rules RPC_VERIFY_REJECTED = -26 # Transaction or block was rejected by network rules
RPC_VERIFY_ALREADY_IN_CHAIN = -27 # Transaction already in chain RPC_VERIFY_ALREADY_IN_CHAIN = -27 # Transaction already in chain
RPC_IN_WARMUP = -28 # Client still warming up RPC_IN_WARMUP = -28 # Client still warming up
RPC_METHOD_DEPRECATED = -32 # RPC method is deprecated RPC_METHOD_DEPRECATED = -32 # RPC method is deprecated
# Aliases for backward compatibility # Aliases for backward compatibility
RPC_TRANSACTION_ERROR = RPC_VERIFY_ERROR RPC_TRANSACTION_ERROR = RPC_VERIFY_ERROR
@@ -20,25 +20,23 @@ RPC_TRANSACTION_REJECTED = RPC_VERIFY_REJECTED
RPC_TRANSACTION_ALREADY_IN_CHAIN = RPC_VERIFY_ALREADY_IN_CHAIN RPC_TRANSACTION_ALREADY_IN_CHAIN = RPC_VERIFY_ALREADY_IN_CHAIN
# P2P client errors # P2P client errors
RPC_CLIENT_NOT_CONNECTED = -9 # Bitcoin is not connected RPC_CLIENT_NOT_CONNECTED = -9 # Bitcoin is not connected
RPC_CLIENT_IN_INITIAL_DOWNLOAD = -10 # Still downloading initial blocks RPC_CLIENT_IN_INITIAL_DOWNLOAD = -10 # Still downloading initial blocks
RPC_CLIENT_NODE_ALREADY_ADDED = -23 # Node is already added RPC_CLIENT_NODE_ALREADY_ADDED = -23 # Node is already added
RPC_CLIENT_NODE_NOT_ADDED = -24 # Node has not been added before RPC_CLIENT_NODE_NOT_ADDED = -24 # Node has not been added before
RPC_CLIENT_NODE_NOT_CONNECTED = -29 # Node to disconnect not found in connected nodes RPC_CLIENT_NODE_NOT_CONNECTED = -29 # Node to disconnect not found in connected nodes
RPC_CLIENT_INVALID_IP_OR_SUBNET = -30 # Invalid IP/Subnet RPC_CLIENT_INVALID_IP_OR_SUBNET = -30 # Invalid IP/Subnet
RPC_CLIENT_P2P_DISABLED = -31 # No valid connection manager instance found RPC_CLIENT_P2P_DISABLED = -31 # No valid connection manager instance found
# Wallet errors # Wallet errors
RPC_WALLET_ERROR = -4 # Unspecified problem with wallet (key not found etc.) RPC_WALLET_ERROR = -4 # Unspecified problem with wallet (key not found etc.)
RPC_WALLET_INSUFFICIENT_FUNDS = -6 # Not enough funds in wallet or account RPC_WALLET_INSUFFICIENT_FUNDS = -6 # Not enough funds in wallet or account
RPC_WALLET_INVALID_LABEL_NAME = -11 # Invalid label name RPC_WALLET_INVALID_LABEL_NAME = -11 # Invalid label name
RPC_WALLET_KEYPOOL_RAN_OUT = -12 # Keypool ran out call keypoolrefill first RPC_WALLET_KEYPOOL_RAN_OUT = -12 # Keypool ran out call keypoolrefill first
RPC_WALLET_UNLOCK_NEEDED = -13 # Enter the wallet passphrase with walletpassphrase first RPC_WALLET_UNLOCK_NEEDED = -13 # Enter the wallet passphrase with walletpassphrase first
RPC_WALLET_PASSPHRASE_INCORRECT = -14 # The wallet passphrase entered was incorrect RPC_WALLET_PASSPHRASE_INCORRECT = -14 # The wallet passphrase entered was incorrect
RPC_WALLET_WRONG_ENC_STATE = ( RPC_WALLET_WRONG_ENC_STATE = -15 # Command given in wrong wallet encryption state (encrypting an encrypted wallet etc.)
-15 RPC_WALLET_ENCRYPTION_FAILED = -16 # Failed to encrypt the wallet
) # Command given in wrong wallet encryption state (encrypting an encrypted wallet etc.) RPC_WALLET_ALREADY_UNLOCKED = -17 # Wallet is already unlocked
RPC_WALLET_ENCRYPTION_FAILED = -16 # Failed to encrypt the wallet RPC_WALLET_NOT_FOUND = -18 # Invalid wallet specified
RPC_WALLET_ALREADY_UNLOCKED = -17 # Wallet is already unlocked RPC_WALLET_NOT_SPECIFIED = -19 # No wallet specified (error when there are multiple wallets loaded)
RPC_WALLET_NOT_FOUND = -18 # Invalid wallet specified
RPC_WALLET_NOT_SPECIFIED = -19 # No wallet specified (error when there are multiple wallets loaded)

View File

@@ -1,6 +1,6 @@
[bitcoind] [bitcoind]
btc_rpc_user = user btc_rpc_user = user
btc_rpc_passwd = passwd btc_rpc_password = passwd
btc_rpc_connect = localhost btc_rpc_connect = localhost
btc_rpc_port = 8332 btc_rpc_port = 8332
btc_network = mainnet btc_network = mainnet
@@ -11,7 +11,8 @@ feed_connect = 127.0.0.1
feed_port = 28332 feed_port = 28332
[teos] [teos]
max_appointments = 100 subscription_slots = 100
max_appointments = 1000000
expiry_delta = 6 expiry_delta = 6
min_to_self_delay = 20 min_to_self_delay = 20

View File

@@ -14,11 +14,13 @@ from teos.help import show_usage
from teos.watcher import Watcher from teos.watcher import Watcher
from teos.builder import Builder from teos.builder import Builder
from teos.carrier import Carrier from teos.carrier import Carrier
from teos.users_dbm import UsersDBM
from teos.inspector import Inspector from teos.inspector import Inspector
from teos.responder import Responder from teos.responder import Responder
from teos.db_manager import DBManager from teos.gatekeeper import Gatekeeper
from teos.chain_monitor import ChainMonitor from teos.chain_monitor import ChainMonitor
from teos.block_processor import BlockProcessor from teos.block_processor import BlockProcessor
from teos.appointments_dbm import AppointmentsDBM
from teos.tools import can_connect_to_bitcoind, in_correct_network from teos.tools import can_connect_to_bitcoind, in_correct_network
from teos import LOG_PREFIX, DATA_DIR, DEFAULT_CONF, CONF_FILE_NAME from teos import LOG_PREFIX, DATA_DIR, DEFAULT_CONF, CONF_FILE_NAME
@@ -43,13 +45,14 @@ def main(command_line_conf):
signal(SIGQUIT, handle_signals) signal(SIGQUIT, handle_signals)
# Loads config and sets up the data folder and log file # Loads config and sets up the data folder and log file
config_loader = ConfigLoader(DATA_DIR, CONF_FILE_NAME, DEFAULT_CONF, command_line_conf) data_dir = command_line_conf.get("DATA_DIR") if "DATA_DIR" in command_line_conf else DATA_DIR
config_loader = ConfigLoader(data_dir, CONF_FILE_NAME, DEFAULT_CONF, command_line_conf)
config = config_loader.build_config() config = config_loader.build_config()
setup_data_folder(DATA_DIR) setup_data_folder(data_dir)
setup_logging(config.get("LOG_FILE"), LOG_PREFIX) setup_logging(config.get("LOG_FILE"), LOG_PREFIX)
logger.info("Starting TEOS") logger.info("Starting TEOS")
db_manager = DBManager(config.get("DB_PATH")) db_manager = AppointmentsDBM(config.get("APPOINTMENTS_DB_PATH"))
bitcoind_connect_params = {k: v for k, v in config.items() if k.startswith("BTC")} bitcoind_connect_params = {k: v for k, v in config.items() if k.startswith("BTC")}
bitcoind_feed_params = {k: v for k, v in config.items() if k.startswith("FEED")} bitcoind_feed_params = {k: v for k, v in config.items() if k.startswith("FEED")}
@@ -150,7 +153,8 @@ def main(command_line_conf):
# Fire the API and the ChainMonitor # Fire the API and the ChainMonitor
# FIXME: 92-block-data-during-bootstrap-db # FIXME: 92-block-data-during-bootstrap-db
chain_monitor.monitor_chain() chain_monitor.monitor_chain()
API(Inspector(block_processor, config.get("MIN_TO_SELF_DELAY")), watcher).start() gatekeeper = Gatekeeper(UsersDBM(config.get("USERS_DB_PATH")), config.get("DEFAULT_SLOTS"))
API(Inspector(block_processor, config.get("MIN_TO_SELF_DELAY")), watcher, gatekeeper).start()
except Exception as e: except Exception as e:
logger.error("An error occurred: {}. Shutting down".format(e)) logger.error("An error occurred: {}. Shutting down".format(e))
exit(1) exit(1)
@@ -171,7 +175,7 @@ if __name__ == "__main__":
if opt in ["--btcrpcuser"]: if opt in ["--btcrpcuser"]:
command_line_conf["BTC_RPC_USER"] = arg command_line_conf["BTC_RPC_USER"] = arg
if opt in ["--btcrpcpassword"]: if opt in ["--btcrpcpassword"]:
command_line_conf["BTC_RPC_PASSWD"] = arg command_line_conf["BTC_RPC_PASSWORD"] = arg
if opt in ["--btcrpcconnect"]: if opt in ["--btcrpcconnect"]:
command_line_conf["BTC_RPC_CONNECT"] = arg command_line_conf["BTC_RPC_CONNECT"] = arg
if opt in ["--btcrpcport"]: if opt in ["--btcrpcport"]:
@@ -180,7 +184,7 @@ if __name__ == "__main__":
except ValueError: except ValueError:
exit("btcrpcport must be an integer") exit("btcrpcport must be an integer")
if opt in ["--datadir"]: if opt in ["--datadir"]:
DATA_DIR = os.path.expanduser(arg) command_line_conf["DATA_DIR"] = os.path.expanduser(arg)
if opt in ["-h", "--help"]: if opt in ["-h", "--help"]:
exit(show_usage()) exit(show_usage())

View File

@@ -15,7 +15,7 @@ def bitcoin_cli(btc_connect_params):
Args: Args:
btc_connect_params (:obj:`dict`): a dictionary with the parameters to connect to bitcoind btc_connect_params (:obj:`dict`): a dictionary with the parameters to connect to bitcoind
(rpc user, rpc passwd, host and port) (rpc user, rpc password, host and port)
Returns: Returns:
:obj:`AuthServiceProxy <teos.utils.auth_proxy.AuthServiceProxy>`: An authenticated service proxy to ``bitcoind`` :obj:`AuthServiceProxy <teos.utils.auth_proxy.AuthServiceProxy>`: An authenticated service proxy to ``bitcoind``
@@ -26,7 +26,7 @@ def bitcoin_cli(btc_connect_params):
"http://%s:%s@%s:%d" "http://%s:%s@%s:%d"
% ( % (
btc_connect_params.get("BTC_RPC_USER"), btc_connect_params.get("BTC_RPC_USER"),
btc_connect_params.get("BTC_RPC_PASSWD"), btc_connect_params.get("BTC_RPC_PASSWORD"),
btc_connect_params.get("BTC_RPC_CONNECT"), btc_connect_params.get("BTC_RPC_CONNECT"),
btc_connect_params.get("BTC_RPC_PORT"), btc_connect_params.get("BTC_RPC_PORT"),
) )
@@ -40,7 +40,7 @@ def can_connect_to_bitcoind(btc_connect_params):
Args: Args:
btc_connect_params (:obj:`dict`): a dictionary with the parameters to connect to bitcoind btc_connect_params (:obj:`dict`): a dictionary with the parameters to connect to bitcoind
(rpc user, rpc passwd, host and port) (rpc user, rpc password, host and port)
Returns: Returns:
:obj:`bool`: ``True`` if the connection can be established. ``False`` otherwise. :obj:`bool`: ``True`` if the connection can be established. ``False`` otherwise.
""" """
@@ -62,7 +62,7 @@ def in_correct_network(btc_connect_params, network):
Args: Args:
btc_connect_params (:obj:`dict`): a dictionary with the parameters to connect to bitcoind btc_connect_params (:obj:`dict`): a dictionary with the parameters to connect to bitcoind
(rpc user, rpc passwd, host and port) (rpc user, rpc password, host and port)
network (:obj:`str`): the network the tower is connected to. network (:obj:`str`): the network the tower is connected to.
Returns: Returns:

128
teos/users_dbm.py Normal file
View File

@@ -0,0 +1,128 @@
import json
import plyvel
from teos import LOG_PREFIX
from teos.db_manager import DBManager
from common.logger import Logger
from common.tools import is_compressed_pk
logger = Logger(actor="UsersDBM", log_name_prefix=LOG_PREFIX)
class UsersDBM(DBManager):
"""
The :class:`UsersDBM` is in charge of interacting with the users database (``LevelDB``).
Keys and values are stored as bytes in the database but processed as strings by the manager.
Args:
db_path (:obj:`str`): the path (relative or absolute) to the system folder containing the database. A fresh
database will be created if the specified path does not contain one.
Raises:
:obj:`ValueError`: If the provided ``db_path`` is not a string.
:obj:`plyvel.Error`: If the db is currently unavailable (being used by another process).
"""
def __init__(self, db_path):
if not isinstance(db_path, str):
raise ValueError("db_path must be a valid path/name")
try:
super().__init__(db_path)
except plyvel.Error as e:
if "LOCK: Resource temporarily unavailable" in str(e):
logger.info("The db is already being used by another process (LOCK)")
raise e
def store_user(self, user_pk, user_data):
"""
Stores a user record to the database. ``user_pk`` is used as identifier.
Args:
user_pk (:obj:`str`): a 33-byte hex-encoded string identifying the user.
user_data (:obj:`dict`): the user associated data, as a dictionary.
Returns:
:obj:`bool`: True if the user was stored in the database, False otherwise.
"""
if is_compressed_pk(user_pk):
try:
self.create_entry(user_pk, json.dumps(user_data))
logger.info("Adding user to Gatekeeper's db", user_pk=user_pk)
return True
except json.JSONDecodeError:
logger.info("Could't add user to db. Wrong user data format", user_pk=user_pk, user_data=user_data)
return False
except TypeError:
logger.info("Could't add user to db", user_pk=user_pk, user_data=user_data)
return False
else:
logger.info("Could't add user to db. Wrong pk format", user_pk=user_pk, user_data=user_data)
return False
def load_user(self, user_pk):
"""
Loads a user record from the database using the ``user_pk`` as identifier.
Args:
user_pk (:obj:`str`): a 33-byte hex-encoded string identifying the user.
Returns:
:obj:`dict`: A dictionary containing the appointment data if the ``key`` is found.
Returns ``None`` otherwise.
"""
try:
data = self.load_entry(user_pk)
data = json.loads(data)
except (TypeError, json.decoder.JSONDecodeError):
data = None
return data
def delete_user(self, user_pk):
"""
Deletes a user record from the database.
Args:
user_pk (:obj:`str`): a 33-byte hex-encoded string identifying the user.
Returns:
:obj:`bool`: True if the user was deleted from the database or it was non-existent, False otherwise.
"""
try:
self.delete_entry(user_pk)
logger.info("Deleting user from Gatekeeper's db", uuid=user_pk)
return True
except TypeError:
logger.info("Cant delete user from db, user key has wrong type", uuid=user_pk)
return False
def load_all_users(self):
"""
Loads all user records from the database.
Returns:
:obj:`dict`: A dictionary containing all users indexed by ``user_pk``.
Returns an empty dictionary if no data is found.
"""
data = {}
for k, v in self.db.iterator():
# Get uuid and appointment_data from the db
user_pk = k.decode("utf-8")
data[user_pk] = json.loads(v)
return data

View File

@@ -1,4 +1,3 @@
from uuid import uuid4
from queue import Queue from queue import Queue
from threading import Thread from threading import Thread
@@ -6,7 +5,7 @@ import common.cryptographer
from common.logger import Logger from common.logger import Logger
from common.tools import compute_locator from common.tools import compute_locator
from common.appointment import Appointment from common.appointment import Appointment
from common.cryptographer import Cryptographer from common.cryptographer import Cryptographer, hash_160
from teos import LOG_PREFIX from teos import LOG_PREFIX
from teos.cleaner import Cleaner from teos.cleaner import Cleaner
@@ -17,13 +16,12 @@ common.cryptographer.logger = Logger(actor="Cryptographer", log_name_prefix=LOG_
class Watcher: class Watcher:
""" """
The :class:`Watcher` is the class in charge to watch for channel breaches for the appointments accepted by the The :class:`Watcher` is in charge of watching for channel breaches for the appointments accepted by the tower.
tower.
The :class:`Watcher` keeps track of the accepted appointments in ``appointments`` and, for new received block, The :class:`Watcher` keeps track of the accepted appointments in ``appointments`` and, for new received block,
checks if any breach has happened by comparing the txids with the appointment locators. If a breach is seen, the checks if any breach has happened by comparing the txids with the appointment locators. If a breach is seen, the
:obj:`EncryptedBlob <common.encrypted_blob.EncryptedBlob>` of the corresponding appointment is decrypted and the data :obj:`EncryptedBlob <common.encrypted_blob.EncryptedBlob>` of the corresponding appointment is decrypted and the
is passed to the :obj:`Responder <teos.responder.Responder>`. data is passed to the :obj:`Responder <teos.responder.Responder>`.
If an appointment reaches its end with no breach, the data is simply deleted. If an appointment reaches its end with no breach, the data is simply deleted.
@@ -31,28 +29,30 @@ class Watcher:
:obj:`ChainMonitor <teos.chain_monitor.ChainMonitor>`. :obj:`ChainMonitor <teos.chain_monitor.ChainMonitor>`.
Args: Args:
db_manager (:obj:`DBManager <teos.db_manager>`): a ``DBManager`` instance to interact with the database. db_manager (:obj:`AppointmentsDBM <teos.appointments_dbm.AppointmentsDBM>`): a ``AppointmentsDBM`` instance
to interact with the database.
block_processor (:obj:`BlockProcessor <teos.block_processor.BlockProcessor>`): a ``BlockProcessor`` instance to block_processor (:obj:`BlockProcessor <teos.block_processor.BlockProcessor>`): a ``BlockProcessor`` instance to
get block from bitcoind. get block from bitcoind.
responder (:obj:`Responder <teos.responder.Responder>`): a ``Responder`` instance. responder (:obj:`Responder <teos.responder.Responder>`): a ``Responder`` instance.
sk_der (:obj:`bytes`): a DER encoded private key used to sign appointment receipts (signaling acceptance). sk_der (:obj:`bytes`): a DER encoded private key used to sign appointment receipts (signaling acceptance).
max_appointments (:obj:`int`): the maximum ammount of appointments accepted by the ``Watcher`` at the same time. max_appointments (:obj:`int`): the maximum amount of appointments accepted by the ``Watcher`` at the same time.
expiry_delta (:obj:`int`): the additional time the ``Watcher`` will keep an expired appointment around. expiry_delta (:obj:`int`): the additional time the ``Watcher`` will keep an expired appointment around.
Attributes: Attributes:
appointments (:obj:`dict`): a dictionary containing a simplification of the appointments (:obj:`Appointment appointments (:obj:`dict`): a dictionary containing a summary of the appointments (:obj:`Appointment
<teos.appointment.Appointment>` instances) accepted by the tower (``locator`` and ``end_time``). <teos.appointment.Appointment>` instances) accepted by the tower (``locator``, ``end_time``, and ``size``).
It's populated trough ``add_appointment``. It's populated trough ``add_appointment``.
locator_uuid_map (:obj:`dict`): a ``locator:uuid`` map used to allow the :obj:`Watcher` to deal with several locator_uuid_map (:obj:`dict`): a ``locator:uuid`` map used to allow the :obj:`Watcher` to deal with several
appointments with the same ``locator``. appointments with the same ``locator``.
block_queue (:obj:`Queue`): A queue used by the :obj:`Watcher` to receive block hashes from ``bitcoind``. It is block_queue (:obj:`Queue`): A queue used by the :obj:`Watcher` to receive block hashes from ``bitcoind``. It is
populated by the :obj:`ChainMonitor <teos.chain_monitor.ChainMonitor>`. populated by the :obj:`ChainMonitor <teos.chain_monitor.ChainMonitor>`.
db_manager (:obj:`DBManager <teos.db_manager>`): A db manager instance to interact with the database. db_manager (:obj:`AppointmentsDBM <teos.appointments_dbm.AppointmentsDBM>`): a ``AppointmentsDBM`` instance
to interact with the database.
block_processor (:obj:`BlockProcessor <teos.block_processor.BlockProcessor>`): a ``BlockProcessor`` instance to block_processor (:obj:`BlockProcessor <teos.block_processor.BlockProcessor>`): a ``BlockProcessor`` instance to
get block from bitcoind. get block from bitcoind.
responder (:obj:`Responder <teos.responder.Responder>`): a ``Responder`` instance. responder (:obj:`Responder <teos.responder.Responder>`): a ``Responder`` instance.
signing_key (:mod:`PrivateKey`): a private key used to sign accepted appointments. signing_key (:mod:`PrivateKey`): a private key used to sign accepted appointments.
max_appointments (:obj:`int`): the maximum ammount of appointments accepted by the ``Watcher`` at the same time. max_appointments (:obj:`int`): the maximum amount of appointments accepted by the ``Watcher`` at the same time.
expiry_delta (:obj:`int`): the additional time the ``Watcher`` will keep an expired appointment around. expiry_delta (:obj:`int`): the additional time the ``Watcher`` will keep an expired appointment around.
Raises: Raises:
@@ -72,17 +72,33 @@ class Watcher:
self.signing_key = Cryptographer.load_private_key_der(sk_der) self.signing_key = Cryptographer.load_private_key_der(sk_der)
def awake(self): def awake(self):
"""Starts a new thread to monitor the blockchain for channel breaches"""
watcher_thread = Thread(target=self.do_watch, daemon=True) watcher_thread = Thread(target=self.do_watch, daemon=True)
watcher_thread.start() watcher_thread.start()
return watcher_thread return watcher_thread
def add_appointment(self, appointment): def get_appointment_summary(self, uuid):
"""
Returns the summary of an appointment. The summary consists of the data kept in memory:
{locator, end_time, and size}
Args:
uuid (:obj:`str`): a 16-byte hex string identifying the appointment.
Returns:
:obj:`dict` or :obj:`None`: a dictionary with the appointment summary, or ``None`` if the appointment is not
found.
"""
return self.appointments.get(uuid)
def add_appointment(self, appointment, user_pk):
""" """
Adds a new appointment to the ``appointments`` dictionary if ``max_appointments`` has not been reached. Adds a new appointment to the ``appointments`` dictionary if ``max_appointments`` has not been reached.
``add_appointment`` is the entry point of the Watcher. Upon receiving a new appointment it will start monitoring ``add_appointment`` is the entry point of the ``Watcher``. Upon receiving a new appointment it will start
the blockchain (``do_watch``) until ``appointments`` is empty. monitoring the blockchain (``do_watch``) until ``appointments`` is empty.
Once a breach is seen on the blockchain, the :obj:`Watcher` will decrypt the corresponding Once a breach is seen on the blockchain, the :obj:`Watcher` will decrypt the corresponding
:obj:`EncryptedBlob <common.encrypted_blob.EncryptedBlob>` and pass the information to the :obj:`EncryptedBlob <common.encrypted_blob.EncryptedBlob>` and pass the information to the
@@ -96,6 +112,7 @@ class Watcher:
Args: Args:
appointment (:obj:`Appointment <teos.appointment.Appointment>`): the appointment to be added to the appointment (:obj:`Appointment <teos.appointment.Appointment>`): the appointment to be added to the
:obj:`Watcher`. :obj:`Watcher`.
user_pk(:obj:`str`): the public key that identifies the user who sent the appointment (33-bytes hex str).
Returns: Returns:
:obj:`tuple`: A tuple signaling if the appointment has been added or not (based on ``max_appointments``). :obj:`tuple`: A tuple signaling if the appointment has been added or not (based on ``max_appointments``).
@@ -103,21 +120,29 @@ class Watcher:
- ``(True, signature)`` if the appointment has been accepted. - ``(True, signature)`` if the appointment has been accepted.
- ``(False, None)`` otherwise. - ``(False, None)`` otherwise.
""" """
if len(self.appointments) < self.max_appointments: if len(self.appointments) < self.max_appointments:
uuid = uuid4().hex # The uuids are generated as the RIPMED160(locator||user_pubkey), that way the tower does not need to know
self.appointments[uuid] = {"locator": appointment.locator, "end_time": appointment.end_time} # anything about the user from this point on (no need to store user_pk in the database).
# If an appointment is requested by the user the uuid can be recomputed and queried straightaway (no maps).
uuid = hash_160("{}{}".format(appointment.locator, user_pk))
self.appointments[uuid] = {
"locator": appointment.locator,
"end_time": appointment.end_time,
"size": len(appointment.encrypted_blob.data),
}
if appointment.locator in self.locator_uuid_map: if appointment.locator in self.locator_uuid_map:
self.locator_uuid_map[appointment.locator].append(uuid) # If the uuid is already in the map it means this is an update.
if uuid not in self.locator_uuid_map[appointment.locator]:
self.locator_uuid_map[appointment.locator].append(uuid)
else: else:
self.locator_uuid_map[appointment.locator] = [uuid] self.locator_uuid_map[appointment.locator] = [uuid]
self.db_manager.store_watcher_appointment(uuid, appointment.to_json()) self.db_manager.store_watcher_appointment(uuid, appointment.to_dict())
self.db_manager.create_append_locator_map(appointment.locator, uuid) self.db_manager.create_append_locator_map(appointment.locator, uuid)
appointment_added = True appointment_added = True
@@ -135,7 +160,7 @@ class Watcher:
def do_watch(self): def do_watch(self):
""" """
Monitors the blockchain whilst there are pending appointments. Monitors the blockchain for channel breaches.
This is the main method of the :obj:`Watcher` and the one in charge to pass appointments to the This is the main method of the :obj:`Watcher` and the one in charge to pass appointments to the
:obj:`Responder <teos.responder.Responder>` upon detecting a breach. :obj:`Responder <teos.responder.Responder>` upon detecting a breach.
@@ -198,7 +223,7 @@ class Watcher:
appointments_to_delete, self.appointments, self.locator_uuid_map, self.db_manager appointments_to_delete, self.appointments, self.locator_uuid_map, self.db_manager
) )
if len(self.appointments) is 0: if len(self.appointments) != 0:
logger.info("No more pending appointments") logger.info("No more pending appointments")
# Register the last processed block for the watcher # Register the last processed block for the watcher

View File

@@ -2,6 +2,7 @@ import os
import json import json
import shutil import shutil
import responses import responses
from binascii import hexlify
from coincurve import PrivateKey from coincurve import PrivateKey
from requests.exceptions import ConnectionError from requests.exceptions import ConnectionError
@@ -20,17 +21,18 @@ common.cryptographer.logger = Logger(actor="Cryptographer", log_name_prefix=teos
config = get_config() config = get_config()
# dummy keys for the tests # dummy keys for the tests
dummy_sk = PrivateKey() dummy_cli_sk = PrivateKey.from_int(1)
dummy_pk = dummy_sk.public_key dummy_cli_compressed_pk = dummy_cli_sk.public_key.format(compressed=True)
another_sk = PrivateKey() dummy_teos_sk = PrivateKey.from_int(2)
dummy_teos_pk = dummy_teos_sk.public_key
another_sk = PrivateKey.from_int(3)
teos_url = "http://{}:{}".format(config.get("TEOS_SERVER"), config.get("TEOS_PORT"))
add_appointment_endpoint = "{}/add_appointment".format(teos_url)
register_endpoint = "{}/register".format(teos_url)
get_appointment_endpoint = "{}/get_appointment".format(teos_url)
# Replace the key in the module with a key we control for the tests dummy_appointment_data = {
teos_cli.teos_public_key = dummy_pk
# Replace endpoint with dummy one
teos_endpoint = "http://{}:{}/".format(config.get("TEOS_SERVER"), config.get("TEOS_PORT"))
dummy_appointment_request = {
"tx": get_random_value_hex(192), "tx": get_random_value_hex(192),
"tx_id": get_random_value_hex(32), "tx_id": get_random_value_hex(32),
"start_time": 1500, "start_time": 1500,
@@ -39,29 +41,104 @@ dummy_appointment_request = {
} }
# This is the format appointment turns into once it hits "add_appointment" # This is the format appointment turns into once it hits "add_appointment"
dummy_appointment_full = { dummy_appointment_dict = {
"locator": compute_locator(dummy_appointment_request.get("tx_id")), "locator": compute_locator(dummy_appointment_data.get("tx_id")),
"start_time": dummy_appointment_request.get("start_time"), "start_time": dummy_appointment_data.get("start_time"),
"end_time": dummy_appointment_request.get("end_time"), "end_time": dummy_appointment_data.get("end_time"),
"to_self_delay": dummy_appointment_request.get("to_self_delay"), "to_self_delay": dummy_appointment_data.get("to_self_delay"),
"encrypted_blob": Cryptographer.encrypt( "encrypted_blob": Cryptographer.encrypt(
Blob(dummy_appointment_request.get("tx")), dummy_appointment_request.get("tx_id") Blob(dummy_appointment_data.get("tx")), dummy_appointment_data.get("tx_id")
), ),
} }
dummy_appointment = Appointment.from_dict(dummy_appointment_full) dummy_appointment = Appointment.from_dict(dummy_appointment_dict)
def load_dummy_keys(*args): def get_signature(message, sk):
return dummy_pk, dummy_sk, dummy_pk.format(compressed=True) return Cryptographer.sign(message, sk)
def get_dummy_signature(*args): # TODO: 90-add-more-add-appointment-tests
return Cryptographer.sign(dummy_appointment.serialize(), dummy_sk) @responses.activate
def test_register():
# Simulate a register response
compressed_pk_hex = hexlify(dummy_cli_compressed_pk).decode("utf-8")
response = {"public_key": compressed_pk_hex, "available_slots": 100}
responses.add(responses.POST, register_endpoint, json=response, status=200)
result = teos_cli.register(compressed_pk_hex, teos_url)
assert len(responses.calls) == 1
assert responses.calls[0].request.url == register_endpoint
assert result.get("public_key") == compressed_pk_hex and result.get("available_slots") == response.get(
"available_slots"
)
def get_bad_signature(*args): @responses.activate
return Cryptographer.sign(dummy_appointment.serialize(), another_sk) def test_add_appointment():
# Simulate a request to add_appointment for dummy_appointment, make sure that the right endpoint is requested
# and the return value is True
response = {
"locator": dummy_appointment.locator,
"signature": get_signature(dummy_appointment.serialize(), dummy_teos_sk),
"available_slots": 100,
}
responses.add(responses.POST, add_appointment_endpoint, json=response, status=200)
result = teos_cli.add_appointment(
dummy_appointment_data, dummy_cli_sk, dummy_teos_pk, teos_url, config.get("APPOINTMENTS_FOLDER_NAME")
)
assert len(responses.calls) == 1
assert responses.calls[0].request.url == add_appointment_endpoint
assert result
@responses.activate
def test_add_appointment_with_invalid_signature(monkeypatch):
# Simulate a request to add_appointment for dummy_appointment, but sign with a different key,
# make sure that the right endpoint is requested, but the return value is False
response = {
"locator": dummy_appointment.to_dict()["locator"],
"signature": get_signature(dummy_appointment.serialize(), another_sk), # Sign with a bad key
"available_slots": 100,
}
responses.add(responses.POST, add_appointment_endpoint, json=response, status=200)
result = teos_cli.add_appointment(
dummy_appointment_data, dummy_cli_sk, dummy_teos_pk, teos_url, config.get("APPOINTMENTS_FOLDER_NAME")
)
assert result is False
shutil.rmtree(config.get("APPOINTMENTS_FOLDER_NAME"))
@responses.activate
def test_get_appointment():
# Response of get_appointment endpoint is an appointment with status added to it.
response = {
"locator": dummy_appointment_dict.get("locator"),
"status": "being_watch",
"appointment": dummy_appointment_dict,
}
responses.add(responses.POST, get_appointment_endpoint, json=response, status=200)
result = teos_cli.get_appointment(dummy_appointment_dict.get("locator"), dummy_cli_sk, dummy_teos_pk, teos_url)
assert len(responses.calls) == 1
assert responses.calls[0].request.url == get_appointment_endpoint
assert result.get("locator") == response.get("locator")
@responses.activate
def test_get_appointment_err():
locator = get_random_value_hex(16)
# Test that get_appointment handles a connection error appropriately.
responses.add(responses.POST, get_appointment_endpoint, body=ConnectionError())
assert not teos_cli.get_appointment(locator, dummy_cli_sk, dummy_teos_pk, teos_url)
def test_load_keys(): def test_load_keys():
@@ -70,9 +147,9 @@ def test_load_keys():
public_key_file_path = "pk_test_file" public_key_file_path = "pk_test_file"
empty_file_path = "empty_file" empty_file_path = "empty_file"
with open(private_key_file_path, "wb") as f: with open(private_key_file_path, "wb") as f:
f.write(dummy_sk.to_der()) f.write(dummy_cli_sk.to_der())
with open(public_key_file_path, "wb") as f: with open(public_key_file_path, "wb") as f:
f.write(dummy_pk.format(compressed=True)) f.write(dummy_cli_compressed_pk)
with open(empty_file_path, "wb") as f: with open(empty_file_path, "wb") as f:
pass pass
@@ -99,41 +176,44 @@ def test_load_keys():
os.remove(empty_file_path) os.remove(empty_file_path)
# TODO: 90-add-more-add-appointment-tests # WIP: HERE
@responses.activate @responses.activate
def test_add_appointment(monkeypatch): def test_post_request():
# Simulate a request to add_appointment for dummy_appointment, make sure that the right endpoint is requested
# and the return value is True
monkeypatch.setattr(teos_cli, "load_keys", load_dummy_keys)
response = {"locator": dummy_appointment.locator, "signature": get_dummy_signature()}
responses.add(responses.POST, teos_endpoint, json=response, status=200)
result = teos_cli.add_appointment([json.dumps(dummy_appointment_request)], teos_endpoint, config)
assert len(responses.calls) == 1
assert responses.calls[0].request.url == teos_endpoint
assert result
@responses.activate
def test_add_appointment_with_invalid_signature(monkeypatch):
# Simulate a request to add_appointment for dummy_appointment, but sign with a different key,
# make sure that the right endpoint is requested, but the return value is False
# Make sure the test uses the bad dummy signature
monkeypatch.setattr(teos_cli, "load_keys", load_dummy_keys)
response = { response = {
"locator": dummy_appointment.to_dict()["locator"], "locator": dummy_appointment.to_dict()["locator"],
"signature": get_bad_signature(), # Sign with a bad key "signature": get_signature(dummy_appointment.serialize(), dummy_teos_sk),
} }
responses.add(responses.POST, teos_endpoint, json=response, status=200) responses.add(responses.POST, add_appointment_endpoint, json=response, status=200)
result = teos_cli.add_appointment([json.dumps(dummy_appointment_request)], teos_endpoint, config) response = teos_cli.post_request(json.dumps(dummy_appointment_data), add_appointment_endpoint)
shutil.rmtree(config.get("APPOINTMENTS_FOLDER_NAME")) assert len(responses.calls) == 1
assert responses.calls[0].request.url == add_appointment_endpoint
assert response
assert result is False
@responses.activate
def test_process_post_response():
# Let's first crete a response
response = {
"locator": dummy_appointment.to_dict()["locator"],
"signature": get_signature(dummy_appointment.serialize(), dummy_teos_sk),
}
# A 200 OK with a correct json response should return the json of the response
responses.add(responses.POST, add_appointment_endpoint, json=response, status=200)
r = teos_cli.post_request(json.dumps(dummy_appointment_data), add_appointment_endpoint)
assert teos_cli.process_post_response(r) == r.json()
# If we modify the response code for a rejection (lets say 404) we should get None
responses.replace(responses.POST, add_appointment_endpoint, json=response, status=404)
r = teos_cli.post_request(json.dumps(dummy_appointment_data), add_appointment_endpoint)
assert teos_cli.process_post_response(r) is None
# The same should happen if the response is not in json
responses.replace(responses.POST, add_appointment_endpoint, status=404)
r = teos_cli.post_request(json.dumps(dummy_appointment_data), add_appointment_endpoint)
assert teos_cli.process_post_response(r) is None
def test_parse_add_appointment_args(): def test_parse_add_appointment_args():
@@ -147,7 +227,7 @@ def test_parse_add_appointment_args():
# If file exists and has data in it, function should work. # If file exists and has data in it, function should work.
with open("appt_test_file", "w") as f: with open("appt_test_file", "w") as f:
json.dump(dummy_appointment_request, f) json.dump(dummy_appointment_data, f)
appt_data = teos_cli.parse_add_appointment_args(["-f", "appt_test_file"]) appt_data = teos_cli.parse_add_appointment_args(["-f", "appt_test_file"])
assert appt_data assert appt_data
@@ -155,56 +235,21 @@ def test_parse_add_appointment_args():
os.remove("appt_test_file") os.remove("appt_test_file")
# If appointment json is passed in, function should work. # If appointment json is passed in, function should work.
appt_data = teos_cli.parse_add_appointment_args([json.dumps(dummy_appointment_request)]) appt_data = teos_cli.parse_add_appointment_args([json.dumps(dummy_appointment_data)])
assert appt_data assert appt_data
@responses.activate
def test_post_appointment():
response = {
"locator": dummy_appointment.to_dict()["locator"],
"signature": Cryptographer.sign(dummy_appointment.serialize(), dummy_pk),
}
responses.add(responses.POST, teos_endpoint, json=response, status=200)
response = teos_cli.post_appointment(json.dumps(dummy_appointment_request), teos_endpoint)
assert len(responses.calls) == 1
assert responses.calls[0].request.url == teos_endpoint
assert response
@responses.activate
def test_process_post_appointment_response():
# Let's first crete a response
response = {
"locator": dummy_appointment.to_dict()["locator"],
"signature": Cryptographer.sign(dummy_appointment.serialize(), dummy_pk),
}
# A 200 OK with a correct json response should return the json of the response
responses.add(responses.POST, teos_endpoint, json=response, status=200)
r = teos_cli.post_appointment(json.dumps(dummy_appointment_request), teos_endpoint)
assert teos_cli.process_post_appointment_response(r) == r.json()
# If we modify the response code tor a rejection (lets say 404) we should get None
responses.replace(responses.POST, teos_endpoint, json=response, status=404)
r = teos_cli.post_appointment(json.dumps(dummy_appointment_request), teos_endpoint)
assert teos_cli.process_post_appointment_response(r) is None
# The same should happen if the response is not in json
responses.replace(responses.POST, teos_endpoint, status=404)
r = teos_cli.post_appointment(json.dumps(dummy_appointment_request), teos_endpoint)
assert teos_cli.process_post_appointment_response(r) is None
def test_save_appointment_receipt(monkeypatch): def test_save_appointment_receipt(monkeypatch):
appointments_folder = "test_appointments_receipts" appointments_folder = "test_appointments_receipts"
config["APPOINTMENTS_FOLDER_NAME"] = appointments_folder config["APPOINTMENTS_FOLDER_NAME"] = appointments_folder
# The functions creates a new directory if it does not exist # The functions creates a new directory if it does not exist
assert not os.path.exists(appointments_folder) assert not os.path.exists(appointments_folder)
teos_cli.save_appointment_receipt(dummy_appointment.to_dict(), get_dummy_signature(), config) teos_cli.save_appointment_receipt(
dummy_appointment.to_dict(),
get_signature(dummy_appointment.serialize(), dummy_teos_sk),
config.get("APPOINTMENTS_FOLDER_NAME"),
)
assert os.path.exists(appointments_folder) assert os.path.exists(appointments_folder)
# Check that the receipt has been saved by checking the file names # Check that the receipt has been saved by checking the file names
@@ -212,31 +257,3 @@ def test_save_appointment_receipt(monkeypatch):
assert any([dummy_appointment.locator in f for f in files]) assert any([dummy_appointment.locator in f for f in files])
shutil.rmtree(appointments_folder) shutil.rmtree(appointments_folder)
@responses.activate
def test_get_appointment():
# Response of get_appointment endpoint is an appointment with status added to it.
dummy_appointment_full["status"] = "being_watched"
response = dummy_appointment_full
get_appointment_endpoint = teos_endpoint + "get_appointment"
request_url = "{}?locator={}".format(get_appointment_endpoint, response.get("locator"))
responses.add(responses.GET, request_url, json=response, status=200)
result = teos_cli.get_appointment(response.get("locator"), get_appointment_endpoint)
assert len(responses.calls) == 1
assert responses.calls[0].request.url == request_url
assert result.get("locator") == response.get("locator")
@responses.activate
def test_get_appointment_err():
locator = get_random_value_hex(16)
get_appointment_endpoint = teos_endpoint + "get_appointment"
# Test that get_appointment handles a connection error appropriately.
request_url = "{}?locator={}".format(get_appointment_endpoint, locator)
responses.add(responses.GET, request_url, body=ConnectionError())
assert not teos_cli.get_appointment(locator, get_appointment_endpoint)

View File

@@ -1,4 +1,3 @@
import json
import struct import struct
import binascii import binascii
from pytest import fixture from pytest import fixture
@@ -71,26 +70,6 @@ def test_to_dict(appointment_data):
) )
def test_to_json(appointment_data):
appointment = Appointment(
appointment_data["locator"],
appointment_data["start_time"],
appointment_data["end_time"],
appointment_data["to_self_delay"],
appointment_data["encrypted_blob"],
)
dict_appointment = json.loads(appointment.to_json())
assert (
appointment_data["locator"] == dict_appointment["locator"]
and appointment_data["start_time"] == dict_appointment["start_time"]
and appointment_data["end_time"] == dict_appointment["end_time"]
and appointment_data["to_self_delay"] == dict_appointment["to_self_delay"]
and EncryptedBlob(appointment_data["encrypted_blob"]) == EncryptedBlob(dict_appointment["encrypted_blob"])
)
def test_from_dict(appointment_data): def test_from_dict(appointment_data):
# The appointment should be build if we don't miss any field # The appointment should be build if we don't miss any field
appointment = Appointment.from_dict(appointment_data) appointment = Appointment.from_dict(appointment_data)

View File

@@ -208,6 +208,15 @@ def test_recover_pk():
assert isinstance(rpk, PublicKey) assert isinstance(rpk, PublicKey)
def test_recover_pk_invalid_sigrec():
message = "Hey, it's me"
signature = "ddbfb019e4d56155b4175066c2b615ab765d317ae7996d188b4a5fae4cc394adf98fef46034d0553149392219ca6d37dca9abdfa6366a8e54b28f19d3e5efa8a14b556205dc7f33a"
# The given signature, when zbase32 decoded, has a fist byte with value lower than 31.
# The first byte of the signature should be 31 + SigRec, so this should fail
assert Cryptographer.recover_pk(message, signature) is None
def test_recover_pk_ground_truth(): def test_recover_pk_ground_truth():
# Use a message a signature generated by c-lightning and see if we recover the proper key # Use a message a signature generated by c-lightning and see if we recover the proper key
message = b"Test message" message = b"Test message"
@@ -255,3 +264,27 @@ def test_verify_pk_wrong():
rpk = Cryptographer.recover_pk(message, zbase32_sig) rpk = Cryptographer.recover_pk(message, zbase32_sig)
assert not Cryptographer.verify_rpk(sk2.public_key, rpk) assert not Cryptographer.verify_rpk(sk2.public_key, rpk)
def test_get_compressed_pk():
sk, pk = generate_keypair()
compressed_pk = Cryptographer.get_compressed_pk(pk)
assert isinstance(compressed_pk, str) and len(compressed_pk) == 66
assert compressed_pk[:2] in ["02", "03"]
def test_get_compressed_pk_wrong_key():
# pk should be properly initialized. Initializing from int will case it to not be recoverable
pk = PublicKey(0)
compressed_pk = Cryptographer.get_compressed_pk(pk)
assert compressed_pk is None
def test_get_compressed_pk_wrong_type():
# Passing a value that is not a PublicKey will make it to fail too
pk = get_random_value_hex(33)
compressed_pk = Cryptographer.get_compressed_pk(pk)
assert compressed_pk is None

View File

@@ -3,8 +3,9 @@ import logging
from common.constants import LOCATOR_LEN_BYTES from common.constants import LOCATOR_LEN_BYTES
from common.tools import ( from common.tools import (
check_sha256_hex_format, is_compressed_pk,
check_locator_format, is_256b_hex_str,
is_locator,
compute_locator, compute_locator,
setup_data_folder, setup_data_folder,
setup_logging, setup_logging,
@@ -12,14 +13,42 @@ from common.tools import (
from test.common.unit.conftest import get_random_value_hex from test.common.unit.conftest import get_random_value_hex
def test_check_sha256_hex_format(): def test_is_compressed_pk():
wrong_values = [
None,
3,
15.23,
"",
{},
(),
object,
str,
get_random_value_hex(32),
get_random_value_hex(34),
"06" + get_random_value_hex(32),
]
# check_user_pk must only accept values that is not a 33-byte hex string
for i in range(100):
if i % 2:
prefix = "02"
else:
prefix = "03"
assert is_compressed_pk(prefix + get_random_value_hex(32))
# check_user_pk must only accept values that is not a 33-byte hex string
for value in wrong_values:
assert not is_compressed_pk(value)
def test_is_256b_hex_str():
# Only 32-byte hex encoded strings should pass the test # Only 32-byte hex encoded strings should pass the test
wrong_inputs = [None, str(), 213, 46.67, dict(), "A" * 63, "C" * 65, bytes(), get_random_value_hex(31)] wrong_inputs = [None, str(), 213, 46.67, dict(), "A" * 63, "C" * 65, bytes(), get_random_value_hex(31)]
for wtype in wrong_inputs: for wtype in wrong_inputs:
assert check_sha256_hex_format(wtype) is False assert is_256b_hex_str(wtype) is False
for v in range(100): for v in range(100):
assert check_sha256_hex_format(get_random_value_hex(32)) is True assert is_256b_hex_str(get_random_value_hex(32)) is True
def test_check_locator_format(): def test_check_locator_format():
@@ -37,20 +66,20 @@ def test_check_locator_format():
get_random_value_hex(LOCATOR_LEN_BYTES - 1), get_random_value_hex(LOCATOR_LEN_BYTES - 1),
] ]
for wtype in wrong_inputs: for wtype in wrong_inputs:
assert check_locator_format(wtype) is False assert is_locator(wtype) is False
for _ in range(100): for _ in range(100):
assert check_locator_format(get_random_value_hex(LOCATOR_LEN_BYTES)) is True assert is_locator(get_random_value_hex(LOCATOR_LEN_BYTES)) is True
def test_compute_locator(): def test_compute_locator():
# The best way of checking that compute locator is correct is by using check_locator_format # The best way of checking that compute locator is correct is by using is_locator
for _ in range(100): for _ in range(100):
assert check_locator_format(compute_locator(get_random_value_hex(LOCATOR_LEN_BYTES))) is True assert is_locator(compute_locator(get_random_value_hex(LOCATOR_LEN_BYTES))) is True
# String of length smaller than LOCATOR_LEN_BYTES bytes must fail # String of length smaller than LOCATOR_LEN_BYTES bytes must fail
for i in range(1, LOCATOR_LEN_BYTES): for i in range(1, LOCATOR_LEN_BYTES):
assert check_locator_format(compute_locator(get_random_value_hex(i))) is False assert is_locator(compute_locator(get_random_value_hex(i))) is False
def test_setup_data_folder(): def test_setup_data_folder():
@@ -73,12 +102,12 @@ def test_setup_logging():
f_log_suffix = "_file_log" f_log_suffix = "_file_log"
c_log_suffix = "_console_log" c_log_suffix = "_console_log"
assert len(logging.getLogger(prefix + f_log_suffix).handlers) is 0 assert len(logging.getLogger(prefix + f_log_suffix).handlers) == 0
assert len(logging.getLogger(prefix + c_log_suffix).handlers) is 0 assert len(logging.getLogger(prefix + c_log_suffix).handlers) == 0
setup_logging(log_file, prefix) setup_logging(log_file, prefix)
assert len(logging.getLogger(prefix + f_log_suffix).handlers) is 1 assert len(logging.getLogger(prefix + f_log_suffix).handlers) == 1
assert len(logging.getLogger(prefix + c_log_suffix).handlers) is 1 assert len(logging.getLogger(prefix + c_log_suffix).handlers) == 1
os.remove(log_file) os.remove(log_file)

View File

@@ -17,14 +17,12 @@ END_TIME_DELTA = 10
@pytest.fixture(scope="session") @pytest.fixture(scope="session")
def bitcoin_cli(): def bitcoin_cli():
config = get_config(DATA_DIR, CONF_FILE_NAME, DEFAULT_CONF) config = get_config(DATA_DIR, CONF_FILE_NAME, DEFAULT_CONF)
print(config)
# btc_connect_params = {k: v["value"] for k, v in DEFAULT_CONF.items() if k.startswith("BTC")}
return AuthServiceProxy( return AuthServiceProxy(
"http://%s:%s@%s:%d" "http://%s:%s@%s:%d"
% ( % (
config.get("BTC_RPC_USER"), config.get("BTC_RPC_USER"),
config.get("BTC_RPC_PASSWD"), config.get("BTC_RPC_PASSWORD"),
config.get("BTC_RPC_CONNECT"), config.get("BTC_RPC_CONNECT"),
config.get("BTC_RPC_PORT"), config.get("BTC_RPC_PORT"),
) )

View File

@@ -1,6 +1,6 @@
[bitcoind] [bitcoind]
btc_rpc_user = user btc_rpc_user = user
btc_rpc_passwd = passwd btc_rpc_password = passwd
btc_rpc_connect = localhost btc_rpc_connect = localhost
btc_rpc_port = 18445 btc_rpc_port = 18445
btc_network = regtest btc_network = regtest

View File

@@ -1,7 +1,7 @@
import json
import binascii
from time import sleep from time import sleep
from riemann.tx import Tx from riemann.tx import Tx
from binascii import hexlify
from coincurve import PrivateKey
from cli import teos_cli, DATA_DIR, DEFAULT_CONF, CONF_FILE_NAME from cli import teos_cli, DATA_DIR, DEFAULT_CONF, CONF_FILE_NAME
@@ -24,18 +24,17 @@ from test.teos.e2e.conftest import (
cli_config = get_config(DATA_DIR, CONF_FILE_NAME, DEFAULT_CONF) cli_config = get_config(DATA_DIR, CONF_FILE_NAME, DEFAULT_CONF)
common.cryptographer.logger = Logger(actor="Cryptographer", log_name_prefix="") common.cryptographer.logger = Logger(actor="Cryptographer", log_name_prefix="")
# # We'll use teos_cli to add appointments. The expected input format is a list of arguments with a json-encoded
# # appointment
# teos_cli.teos_api_server = "http://{}".format(HOST)
# teos_cli.teos_api_port = PORT
teos_base_endpoint = "http://{}:{}".format(cli_config.get("TEOS_SERVER"), cli_config.get("TEOS_PORT")) teos_base_endpoint = "http://{}:{}".format(cli_config.get("TEOS_SERVER"), cli_config.get("TEOS_PORT"))
teos_add_appointment_endpoint = teos_base_endpoint teos_add_appointment_endpoint = "{}/add_appointment".format(teos_base_endpoint)
teos_get_appointment_endpoint = teos_base_endpoint + "/get_appointment" teos_get_appointment_endpoint = "{}/get_appointment".format(teos_base_endpoint)
# Run teosd # Run teosd
teosd_process = run_teosd() teosd_process = run_teosd()
teos_pk, cli_sk, compressed_cli_pk = teos_cli.load_keys(
cli_config.get("TEOS_PUBLIC_KEY"), cli_config.get("CLI_PRIVATE_KEY"), cli_config.get("CLI_PUBLIC_KEY")
)
def broadcast_transaction_and_mine_block(bitcoin_cli, commitment_tx, addr): def broadcast_transaction_and_mine_block(bitcoin_cli, commitment_tx, addr):
# Broadcast the commitment transaction and mine a block # Broadcast the commitment transaction and mine a block
@@ -43,32 +42,71 @@ def broadcast_transaction_and_mine_block(bitcoin_cli, commitment_tx, addr):
bitcoin_cli.generatetoaddress(1, addr) bitcoin_cli.generatetoaddress(1, addr)
def get_appointment_info(locator): def get_appointment_info(locator, sk=cli_sk):
# Check that the justice has been triggered (the appointment has moved from Watcher to Responder)
sleep(1) # Let's add a bit of delay so the state can be updated sleep(1) # Let's add a bit of delay so the state can be updated
return teos_cli.get_appointment(locator, teos_get_appointment_endpoint) return teos_cli.get_appointment(locator, sk, teos_pk, teos_base_endpoint)
def add_appointment(appointment_data, sk=cli_sk):
return teos_cli.add_appointment(
appointment_data, sk, teos_pk, teos_base_endpoint, cli_config.get("APPOINTMENTS_FOLDER_NAME")
)
def test_commands_non_registered(bitcoin_cli, create_txs):
# All commands should fail if the user is not registered
# Add appointment
commitment_tx, penalty_tx = create_txs
commitment_tx_id = bitcoin_cli.decoderawtransaction(commitment_tx).get("txid")
appointment_data = build_appointment_data(bitcoin_cli, commitment_tx_id, penalty_tx)
assert add_appointment(appointment_data) is False
# Get appointment
assert get_appointment_info(appointment_data.get("locator")) is None
def test_commands_registered(bitcoin_cli, create_txs):
# Test registering and trying again
teos_cli.register(compressed_cli_pk, teos_base_endpoint)
# Add appointment
commitment_tx, penalty_tx = create_txs
commitment_tx_id = bitcoin_cli.decoderawtransaction(commitment_tx).get("txid")
appointment_data = build_appointment_data(bitcoin_cli, commitment_tx_id, penalty_tx)
assert add_appointment(appointment_data) is True
# Get appointment
r = get_appointment_info(appointment_data.get("locator"))
assert r.get("locator") == appointment_data.get("locator")
assert r.get("appointment").get("locator") == appointment_data.get("locator")
assert r.get("appointment").get("encrypted_blob") == appointment_data.get("encrypted_blob")
assert r.get("appointment").get("start_time") == appointment_data.get("start_time")
assert r.get("appointment").get("end_time") == appointment_data.get("end_time")
def test_appointment_life_cycle(bitcoin_cli, create_txs): def test_appointment_life_cycle(bitcoin_cli, create_txs):
# First of all we need to register
# FIXME: requires register command in the cli
commitment_tx, penalty_tx = create_txs commitment_tx, penalty_tx = create_txs
commitment_tx_id = bitcoin_cli.decoderawtransaction(commitment_tx).get("txid") commitment_tx_id = bitcoin_cli.decoderawtransaction(commitment_tx).get("txid")
appointment_data = build_appointment_data(bitcoin_cli, commitment_tx_id, penalty_tx) appointment_data = build_appointment_data(bitcoin_cli, commitment_tx_id, penalty_tx)
locator = compute_locator(commitment_tx_id) locator = compute_locator(commitment_tx_id)
assert teos_cli.add_appointment([json.dumps(appointment_data)], teos_add_appointment_endpoint, cli_config) is True assert add_appointment(appointment_data) is True
appointment_info = get_appointment_info(locator) appointment_info = get_appointment_info(locator)
assert appointment_info is not None assert appointment_info is not None
assert len(appointment_info) == 1 assert appointment_info.get("status") == "being_watched"
assert appointment_info[0].get("status") == "being_watched"
new_addr = bitcoin_cli.getnewaddress() new_addr = bitcoin_cli.getnewaddress()
broadcast_transaction_and_mine_block(bitcoin_cli, commitment_tx, new_addr) broadcast_transaction_and_mine_block(bitcoin_cli, commitment_tx, new_addr)
appointment_info = get_appointment_info(locator) appointment_info = get_appointment_info(locator)
assert appointment_info is not None assert appointment_info is not None
assert len(appointment_info) == 1 assert appointment_info.get("status") == "dispute_responded"
assert appointment_info[0].get("status") == "dispute_responded"
# It can be also checked by ensuring that the penalty transaction made it to the network # It can be also checked by ensuring that the penalty transaction made it to the network
penalty_tx_id = bitcoin_cli.decoderawtransaction(penalty_tx).get("txid") penalty_tx_id = bitcoin_cli.decoderawtransaction(penalty_tx).get("txid")
@@ -78,7 +116,7 @@ def test_appointment_life_cycle(bitcoin_cli, create_txs):
assert True assert True
except JSONRPCException: except JSONRPCException:
# If the transaction if not found. # If the transaction is not found.
assert False assert False
# Now let's mine some blocks so the appointment reaches its end. # Now let's mine some blocks so the appointment reaches its end.
@@ -88,8 +126,7 @@ def test_appointment_life_cycle(bitcoin_cli, create_txs):
sleep(1) sleep(1)
bitcoin_cli.generatetoaddress(1, new_addr) bitcoin_cli.generatetoaddress(1, new_addr)
appointment_info = get_appointment_info(locator) assert get_appointment_info(locator) is None
assert appointment_info[0].get("status") == "not_found"
def test_appointment_malformed_penalty(bitcoin_cli, create_txs): def test_appointment_malformed_penalty(bitcoin_cli, create_txs):
@@ -105,7 +142,7 @@ def test_appointment_malformed_penalty(bitcoin_cli, create_txs):
appointment_data = build_appointment_data(bitcoin_cli, commitment_tx_id, mod_penalty_tx.hex()) appointment_data = build_appointment_data(bitcoin_cli, commitment_tx_id, mod_penalty_tx.hex())
locator = compute_locator(commitment_tx_id) locator = compute_locator(commitment_tx_id)
assert teos_cli.add_appointment([json.dumps(appointment_data)], teos_add_appointment_endpoint, cli_config) is True assert add_appointment(appointment_data) is True
# Broadcast the commitment transaction and mine a block # Broadcast the commitment transaction and mine a block
new_addr = bitcoin_cli.getnewaddress() new_addr = bitcoin_cli.getnewaddress()
@@ -113,11 +150,7 @@ def test_appointment_malformed_penalty(bitcoin_cli, create_txs):
# The appointment should have been removed since the penalty_tx was malformed. # The appointment should have been removed since the penalty_tx was malformed.
sleep(1) sleep(1)
appointment_info = get_appointment_info(locator) assert get_appointment_info(locator) is None
assert appointment_info is not None
assert len(appointment_info) == 1
assert appointment_info[0].get("status") == "not_found"
def test_appointment_wrong_key(bitcoin_cli, create_txs): def test_appointment_wrong_key(bitcoin_cli, create_txs):
@@ -134,17 +167,12 @@ def test_appointment_wrong_key(bitcoin_cli, create_txs):
appointment_data["encrypted_blob"] = Cryptographer.encrypt(Blob(penalty_tx), get_random_value_hex(32)) appointment_data["encrypted_blob"] = Cryptographer.encrypt(Blob(penalty_tx), get_random_value_hex(32))
appointment = Appointment.from_dict(appointment_data) appointment = Appointment.from_dict(appointment_data)
teos_pk, cli_sk, cli_pk_der = teos_cli.load_keys(
cli_config.get("TEOS_PUBLIC_KEY"), cli_config.get("CLI_PRIVATE_KEY"), cli_config.get("CLI_PUBLIC_KEY")
)
hex_pk_der = binascii.hexlify(cli_pk_der)
signature = Cryptographer.sign(appointment.serialize(), cli_sk) signature = Cryptographer.sign(appointment.serialize(), cli_sk)
data = {"appointment": appointment.to_dict(), "signature": signature, "public_key": hex_pk_der.decode("utf-8")} data = {"appointment": appointment.to_dict(), "signature": signature}
# Send appointment to the server. # Send appointment to the server.
response = teos_cli.post_appointment(data, teos_add_appointment_endpoint) response = teos_cli.post_request(data, teos_add_appointment_endpoint)
response_json = teos_cli.process_post_appointment_response(response) response_json = teos_cli.process_post_response(response)
# Check that the server has accepted the appointment # Check that the server has accepted the appointment
signature = response_json.get("signature") signature = response_json.get("signature")
@@ -159,19 +187,13 @@ def test_appointment_wrong_key(bitcoin_cli, create_txs):
# The appointment should have been removed since the decryption failed. # The appointment should have been removed since the decryption failed.
sleep(1) sleep(1)
appointment_info = get_appointment_info(appointment.locator) assert get_appointment_info(appointment.locator) is None
assert appointment_info is not None
assert len(appointment_info) == 1
assert appointment_info[0].get("status") == "not_found"
def test_two_identical_appointments(bitcoin_cli, create_txs): def test_two_identical_appointments(bitcoin_cli, create_txs):
# Tests sending two identical appointments to the tower. # Tests sending two identical appointments to the tower.
# At the moment there are no checks for identical appointments, so both will be accepted, decrypted and kept until
# the end.
# TODO: 34-exact-duplicate-appointment
# This tests sending an appointment with two valid transaction with the same locator. # This tests sending an appointment with two valid transaction with the same locator.
# If they come from the same user, the last one will be kept
commitment_tx, penalty_tx = create_txs commitment_tx, penalty_tx = create_txs
commitment_tx_id = bitcoin_cli.decoderawtransaction(commitment_tx).get("txid") commitment_tx_id = bitcoin_cli.decoderawtransaction(commitment_tx).get("txid")
@@ -179,27 +201,71 @@ def test_two_identical_appointments(bitcoin_cli, create_txs):
locator = compute_locator(commitment_tx_id) locator = compute_locator(commitment_tx_id)
# Send the appointment twice # Send the appointment twice
assert teos_cli.add_appointment([json.dumps(appointment_data)], teos_add_appointment_endpoint, cli_config) is True assert add_appointment(appointment_data) is True
assert teos_cli.add_appointment([json.dumps(appointment_data)], teos_add_appointment_endpoint, cli_config) is True assert add_appointment(appointment_data) is True
# Broadcast the commitment transaction and mine a block # Broadcast the commitment transaction and mine a block
new_addr = bitcoin_cli.getnewaddress() new_addr = bitcoin_cli.getnewaddress()
broadcast_transaction_and_mine_block(bitcoin_cli, commitment_tx, new_addr) broadcast_transaction_and_mine_block(bitcoin_cli, commitment_tx, new_addr)
# The first appointment should have made it to the Responder, and the second one should have been dropped for # The last appointment should have made it to the Responder
# double-spending
sleep(1) sleep(1)
appointment_info = get_appointment_info(locator) appointment_info = get_appointment_info(locator)
assert appointment_info is not None assert appointment_info is not None
assert len(appointment_info) == 2 assert appointment_info.get("status") == "dispute_responded"
assert appointment_info.get("appointment").get("penalty_rawtx") == penalty_tx
for info in appointment_info:
assert info.get("status") == "dispute_responded"
assert info.get("penalty_rawtx") == penalty_tx
def test_two_appointment_same_locator_different_penalty(bitcoin_cli, create_txs): # FIXME: This test won't work since we're still passing appointment replicas to the Responder.
# Uncomment when #88 is addressed
# def test_two_identical_appointments_different_users(bitcoin_cli, create_txs):
# # Tests sending two identical appointments from different users to the tower.
# # This tests sending an appointment with two valid transaction with the same locator.
# # If they come from different users, both will be kept, but one will be dropped fro double-spending when passing to
# # the responder
# commitment_tx, penalty_tx = create_txs
# commitment_tx_id = bitcoin_cli.decoderawtransaction(commitment_tx).get("txid")
#
# appointment_data = build_appointment_data(bitcoin_cli, commitment_tx_id, penalty_tx)
# locator = compute_locator(commitment_tx_id)
#
# # tmp keys from a different user
# tmp_sk = PrivateKey()
# tmp_compressed_pk = hexlify(tmp_sk.public_key.format(compressed=True)).decode("utf-8")
# teos_cli.register(tmp_compressed_pk, teos_base_endpoint)
#
# # Send the appointment twice
# assert add_appointment(appointment_data) is True
# assert add_appointment(appointment_data, sk=tmp_sk) is True
#
# # Check that we can get it from both users
# appointment_info = get_appointment_info(locator)
# assert appointment_info.get("status") == "being_watched"
# appointment_info = get_appointment_info(locator, sk=tmp_sk)
# assert appointment_info.get("status") == "being_watched"
#
# # Broadcast the commitment transaction and mine a block
# new_addr = bitcoin_cli.getnewaddress()
# broadcast_transaction_and_mine_block(bitcoin_cli, commitment_tx, new_addr)
#
# # The last appointment should have made it to the Responder
# sleep(1)
# appointment_info = get_appointment_info(locator)
# appointment_dup_info = get_appointment_info(locator, sk=tmp_sk)
#
# # One of the two request must be None, while the other must be valid
# assert (appointment_info is None and appointment_dup_info is not None) or (
# appointment_dup_info is None and appointment_info is not None
# )
#
# appointment_info = appointment_info if appointment_info is None else appointment_dup_info
#
# assert appointment_info.get("status") == "dispute_responded"
# assert appointment_info.get("appointment").get("penalty_rawtx") == penalty_tx
def test_two_appointment_same_locator_different_penalty_different_users(bitcoin_cli, create_txs):
# This tests sending an appointment with two valid transaction with the same locator. # This tests sending an appointment with two valid transaction with the same locator.
commitment_tx, penalty_tx1 = create_txs commitment_tx, penalty_tx1 = create_txs
commitment_tx_id = bitcoin_cli.decoderawtransaction(commitment_tx).get("txid") commitment_tx_id = bitcoin_cli.decoderawtransaction(commitment_tx).get("txid")
@@ -213,22 +279,35 @@ def test_two_appointment_same_locator_different_penalty(bitcoin_cli, create_txs)
appointment2_data = build_appointment_data(bitcoin_cli, commitment_tx_id, penalty_tx2) appointment2_data = build_appointment_data(bitcoin_cli, commitment_tx_id, penalty_tx2)
locator = compute_locator(commitment_tx_id) locator = compute_locator(commitment_tx_id)
assert teos_cli.add_appointment([json.dumps(appointment1_data)], teos_add_appointment_endpoint, cli_config) is True # tmp keys from a different user
assert teos_cli.add_appointment([json.dumps(appointment2_data)], teos_add_appointment_endpoint, cli_config) is True tmp_sk = PrivateKey()
tmp_compressed_pk = hexlify(tmp_sk.public_key.format(compressed=True)).decode("utf-8")
teos_cli.register(tmp_compressed_pk, teos_base_endpoint)
assert add_appointment(appointment1_data) is True
assert add_appointment(appointment2_data, sk=tmp_sk) is True
# Broadcast the commitment transaction and mine a block # Broadcast the commitment transaction and mine a block
new_addr = bitcoin_cli.getnewaddress() new_addr = bitcoin_cli.getnewaddress()
broadcast_transaction_and_mine_block(bitcoin_cli, commitment_tx, new_addr) broadcast_transaction_and_mine_block(bitcoin_cli, commitment_tx, new_addr)
# The first appointment should have made it to the Responder, and the second one should have been dropped for # One of the transactions must have made it to the Responder while the other must have been dropped for
# double-spending # double-spending
sleep(1) sleep(1)
appointment_info = get_appointment_info(locator) appointment_info = get_appointment_info(locator)
appointment2_info = get_appointment_info(locator, sk=tmp_sk)
assert appointment_info is not None # One of the two request must be None, while the other must be valid
assert len(appointment_info) == 1 assert (appointment_info is None and appointment2_info is not None) or (
assert appointment_info[0].get("status") == "dispute_responded" appointment2_info is None and appointment_info is not None
assert appointment_info[0].get("penalty_rawtx") == penalty_tx1 )
if appointment_info is None:
appointment_info = appointment2_info
appointment1_data = appointment2_data
assert appointment_info.get("status") == "dispute_responded"
assert appointment_info.get("locator") == appointment1_data.get("locator")
def test_appointment_shutdown_teos_trigger_back_online(create_txs, bitcoin_cli): def test_appointment_shutdown_teos_trigger_back_online(create_txs, bitcoin_cli):
@@ -241,7 +320,7 @@ def test_appointment_shutdown_teos_trigger_back_online(create_txs, bitcoin_cli):
appointment_data = build_appointment_data(bitcoin_cli, commitment_tx_id, penalty_tx) appointment_data = build_appointment_data(bitcoin_cli, commitment_tx_id, penalty_tx)
locator = compute_locator(commitment_tx_id) locator = compute_locator(commitment_tx_id)
assert teos_cli.add_appointment([json.dumps(appointment_data)], teos_add_appointment_endpoint, cli_config) is True assert add_appointment(appointment_data) is True
# Restart teos # Restart teos
teosd_process.terminate() teosd_process.terminate()
@@ -250,11 +329,11 @@ def test_appointment_shutdown_teos_trigger_back_online(create_txs, bitcoin_cli):
assert teos_pid != teosd_process.pid assert teos_pid != teosd_process.pid
# Check that the appointment is still in the Watcher # Check that the appointment is still in the Watcher
sleep(1)
appointment_info = get_appointment_info(locator) appointment_info = get_appointment_info(locator)
assert appointment_info is not None assert appointment_info is not None
assert len(appointment_info) == 1 assert appointment_info.get("status") == "being_watched"
assert appointment_info[0].get("status") == "being_watched"
# Trigger appointment after restart # Trigger appointment after restart
new_addr = bitcoin_cli.getnewaddress() new_addr = bitcoin_cli.getnewaddress()
@@ -265,8 +344,7 @@ def test_appointment_shutdown_teos_trigger_back_online(create_txs, bitcoin_cli):
appointment_info = get_appointment_info(locator) appointment_info = get_appointment_info(locator)
assert appointment_info is not None assert appointment_info is not None
assert len(appointment_info) == 1 assert appointment_info.get("status") == "dispute_responded"
assert appointment_info[0].get("status") == "dispute_responded"
def test_appointment_shutdown_teos_trigger_while_offline(create_txs, bitcoin_cli): def test_appointment_shutdown_teos_trigger_while_offline(create_txs, bitcoin_cli):
@@ -279,13 +357,12 @@ def test_appointment_shutdown_teos_trigger_while_offline(create_txs, bitcoin_cli
appointment_data = build_appointment_data(bitcoin_cli, commitment_tx_id, penalty_tx) appointment_data = build_appointment_data(bitcoin_cli, commitment_tx_id, penalty_tx)
locator = compute_locator(commitment_tx_id) locator = compute_locator(commitment_tx_id)
assert teos_cli.add_appointment([json.dumps(appointment_data)], teos_add_appointment_endpoint, cli_config) is True assert add_appointment(appointment_data) is True
# Check that the appointment is still in the Watcher # Check that the appointment is still in the Watcher
appointment_info = get_appointment_info(locator) appointment_info = get_appointment_info(locator)
assert appointment_info is not None assert appointment_info is not None
assert len(appointment_info) == 1 assert appointment_info.get("status") == "being_watched"
assert appointment_info[0].get("status") == "being_watched"
# Shutdown and trigger # Shutdown and trigger
teosd_process.terminate() teosd_process.terminate()
@@ -301,7 +378,6 @@ def test_appointment_shutdown_teos_trigger_while_offline(create_txs, bitcoin_cli
appointment_info = get_appointment_info(locator) appointment_info = get_appointment_info(locator)
assert appointment_info is not None assert appointment_info is not None
assert len(appointment_info) == 1 assert appointment_info.get("status") == "dispute_responded"
assert appointment_info[0].get("status") == "dispute_responded"
teosd_process.terminate() teosd_process.terminate()

View File

@@ -12,10 +12,12 @@ from bitcoind_mock.transaction import create_dummy_transaction
from teos.carrier import Carrier from teos.carrier import Carrier
from teos.tools import bitcoin_cli from teos.tools import bitcoin_cli
from teos.db_manager import DBManager from teos.users_dbm import UsersDBM
from teos.gatekeeper import Gatekeeper
from teos import LOG_PREFIX, DEFAULT_CONF from teos import LOG_PREFIX, DEFAULT_CONF
from teos.responder import TransactionTracker from teos.responder import TransactionTracker
from teos.block_processor import BlockProcessor from teos.block_processor import BlockProcessor
from teos.appointments_dbm import AppointmentsDBM
import common.cryptographer import common.cryptographer
from common.blob import Blob from common.blob import Blob
@@ -53,7 +55,7 @@ def prng_seed():
@pytest.fixture(scope="module") @pytest.fixture(scope="module")
def db_manager(): def db_manager():
manager = DBManager("test_db") manager = AppointmentsDBM("test_db")
# Add last know block for the Responder in the db # Add last know block for the Responder in the db
yield manager yield manager
@@ -62,6 +64,17 @@ def db_manager():
rmtree("test_db") rmtree("test_db")
@pytest.fixture(scope="module")
def user_db_manager():
manager = UsersDBM("test_user_db")
# Add last know block for the Responder in the db
yield manager
manager.db.close()
rmtree("test_user_db")
@pytest.fixture(scope="module") @pytest.fixture(scope="module")
def carrier(): def carrier():
return Carrier(bitcoind_connect_params) return Carrier(bitcoind_connect_params)
@@ -72,6 +85,11 @@ def block_processor():
return BlockProcessor(bitcoind_connect_params) return BlockProcessor(bitcoind_connect_params)
@pytest.fixture(scope="module")
def gatekeeper(user_db_manager):
return Gatekeeper(user_db_manager, get_config().get("DEFAULT_SLOTS"))
def generate_keypair(): def generate_keypair():
sk = PrivateKey() sk = PrivateKey()
pk = sk.public_key pk = sk.public_key
@@ -100,7 +118,7 @@ def fork(block_hash):
requests.post(fork_endpoint, json={"parent": block_hash}) requests.post(fork_endpoint, json={"parent": block_hash})
def generate_dummy_appointment_data(real_height=True, start_time_offset=5, end_time_offset=30): def generate_dummy_appointment(real_height=True, start_time_offset=5, end_time_offset=30):
if real_height: if real_height:
current_height = bitcoin_cli(bitcoind_connect_params).getblockcount() current_height = bitcoin_cli(bitcoind_connect_params).getblockcount()
@@ -119,10 +137,6 @@ def generate_dummy_appointment_data(real_height=True, start_time_offset=5, end_t
"to_self_delay": 20, "to_self_delay": 20,
} }
# dummy keys for this test
client_sk, client_pk = generate_keypair()
client_pk_hex = client_pk.format().hex()
locator = compute_locator(dispute_txid) locator = compute_locator(dispute_txid)
blob = Blob(dummy_appointment_data.get("tx")) blob = Blob(dummy_appointment_data.get("tx"))
@@ -136,19 +150,7 @@ def generate_dummy_appointment_data(real_height=True, start_time_offset=5, end_t
"encrypted_blob": encrypted_blob, "encrypted_blob": encrypted_blob,
} }
signature = Cryptographer.sign(Appointment.from_dict(appointment_data).serialize(), client_sk) return Appointment.from_dict(appointment_data), dispute_tx.hex()
data = {"appointment": appointment_data, "signature": signature, "public_key": client_pk_hex}
return data, dispute_tx.hex()
def generate_dummy_appointment(real_height=True, start_time_offset=5, end_time_offset=30):
appointment_data, dispute_tx = generate_dummy_appointment_data(
real_height=real_height, start_time_offset=start_time_offset, end_time_offset=end_time_offset
)
return Appointment.from_dict(appointment_data["appointment"]), dispute_tx
def generate_dummy_tracker(): def generate_dummy_tracker():

View File

@@ -1,202 +1,521 @@
import json
import pytest import pytest
import requests from shutil import rmtree
from time import sleep from binascii import hexlify
from threading import Thread
from teos.api import API from teos.api import API
from teos import HOST, PORT from teos import HOST, PORT
import teos.errors as errors
from teos.watcher import Watcher from teos.watcher import Watcher
from teos.tools import bitcoin_cli
from teos.inspector import Inspector from teos.inspector import Inspector
from teos.responder import Responder from teos.appointments_dbm import AppointmentsDBM
from teos.chain_monitor import ChainMonitor from teos.responder import Responder, TransactionTracker
from test.teos.unit.conftest import ( from test.teos.unit.conftest import get_random_value_hex, generate_dummy_appointment, generate_keypair, get_config
generate_block,
generate_blocks, from common.cryptographer import Cryptographer, hash_160
get_random_value_hex, from common.constants import (
generate_dummy_appointment_data, HTTP_OK,
generate_keypair, HTTP_NOT_FOUND,
get_config, HTTP_BAD_REQUEST,
bitcoind_connect_params, HTTP_SERVICE_UNAVAILABLE,
bitcoind_feed_params, LOCATOR_LEN_BYTES,
ENCRYPTED_BLOB_MAX_SIZE_HEX,
) )
from common.constants import LOCATOR_LEN_BYTES
TEOS_API = "http://{}:{}".format(HOST, PORT) TEOS_API = "http://{}:{}".format(HOST, PORT)
register_endpoint = "{}/register".format(TEOS_API)
add_appointment_endpoint = "{}/add_appointment".format(TEOS_API)
get_appointment_endpoint = "{}/get_appointment".format(TEOS_API)
get_all_appointment_endpoint = "{}/get_all_appointments".format(TEOS_API)
# Reduce the maximum number of appointments to something we can test faster
MAX_APPOINTMENTS = 100
MULTIPLE_APPOINTMENTS = 10 MULTIPLE_APPOINTMENTS = 10
appointments = [] TWO_SLOTS_BLOTS = "A" * ENCRYPTED_BLOB_MAX_SIZE_HEX + "AA"
appointments = {}
locator_dispute_tx_map = {} locator_dispute_tx_map = {}
config = get_config() config = get_config()
@pytest.fixture(scope="module") client_sk, client_pk = generate_keypair()
def run_api(db_manager, carrier, block_processor): compressed_client_pk = hexlify(client_pk.format(compressed=True)).decode("utf-8")
@pytest.fixture()
def get_all_db_manager():
manager = AppointmentsDBM("get_all_tmp_db")
# Add last know block for the Responder in the db
yield manager
manager.db.close()
rmtree("get_all_tmp_db")
@pytest.fixture(scope="module", autouse=True)
def api(db_manager, carrier, block_processor, gatekeeper, run_bitcoind):
sk, pk = generate_keypair() sk, pk = generate_keypair()
responder = Responder(db_manager, carrier, block_processor) responder = Responder(db_manager, carrier, block_processor)
watcher = Watcher( watcher = Watcher(db_manager, block_processor, responder, sk.to_der(), MAX_APPOINTMENTS, config.get("EXPIRY_DELTA"))
db_manager, block_processor, responder, sk.to_der(), config.get("MAX_APPOINTMENTS"), config.get("EXPIRY_DELTA")
)
chain_monitor = ChainMonitor( api = API(Inspector(block_processor, config.get("MIN_TO_SELF_DELAY")), watcher, gatekeeper)
watcher.block_queue, watcher.responder.block_queue, block_processor, bitcoind_feed_params
)
watcher.awake()
chain_monitor.monitor_chain()
api_thread = Thread(target=API(Inspector(block_processor, config.get("MIN_TO_SELF_DELAY")), watcher).start) return api
api_thread.daemon = True
api_thread.start()
# It takes a little bit of time to start the API (otherwise the requests are sent too early and they fail)
sleep(0.1) @pytest.fixture()
def app(api):
with api.app.app_context():
yield api.app
@pytest.fixture @pytest.fixture
def new_appt_data(): def client(app):
appt_data, dispute_tx = generate_dummy_appointment_data() return app.test_client()
locator_dispute_tx_map[appt_data["appointment"]["locator"]] = dispute_tx
return appt_data
def add_appointment(new_appt_data): @pytest.fixture
r = requests.post(url=TEOS_API, json=json.dumps(new_appt_data), timeout=5) def appointment():
appointment, dispute_tx = generate_dummy_appointment()
locator_dispute_tx_map[appointment.locator] = dispute_tx
if r.status_code == 200: return appointment
appointments.append(new_appt_data["appointment"])
def add_appointment(client, appointment_data, user_pk):
r = client.post(add_appointment_endpoint, json=appointment_data)
if r.status_code == HTTP_OK:
locator = appointment_data.get("appointment").get("locator")
uuid = hash_160("{}{}".format(locator, user_pk))
appointments[uuid] = appointment_data["appointment"]
return r return r
def test_add_appointment(run_api, run_bitcoind, new_appt_data): def test_register(client):
data = {"public_key": compressed_client_pk}
r = client.post(register_endpoint, json=data)
assert r.status_code == HTTP_OK
assert r.json.get("public_key") == compressed_client_pk
assert r.json.get("available_slots") == config.get("DEFAULT_SLOTS")
def test_register_top_up(client):
# Calling register more than once will give us DEFAULT_SLOTS * number_of_calls slots
temp_sk, tmp_pk = generate_keypair()
tmp_pk_hex = hexlify(tmp_pk.format(compressed=True)).decode("utf-8")
data = {"public_key": tmp_pk_hex}
for i in range(10):
r = client.post(register_endpoint, json=data)
assert r.status_code == HTTP_OK
assert r.json.get("public_key") == tmp_pk_hex
assert r.json.get("available_slots") == config.get("DEFAULT_SLOTS") * (i + 1)
def test_register_no_client_pk(client):
data = {"public_key": compressed_client_pk + compressed_client_pk}
r = client.post(register_endpoint, json=data)
assert r.status_code == HTTP_BAD_REQUEST
def test_register_wrong_client_pk(client):
data = {}
r = client.post(register_endpoint, json=data)
assert r.status_code == HTTP_BAD_REQUEST
def test_register_no_json(client):
r = client.post(register_endpoint, data="random_message")
assert r.status_code == HTTP_BAD_REQUEST
def test_register_json_no_inner_dict(client):
r = client.post(register_endpoint, json="random_message")
assert r.status_code == HTTP_BAD_REQUEST
def test_add_appointment(api, client, appointment):
# Simulate the user registration
api.gatekeeper.registered_users[compressed_client_pk] = {"available_slots": 1}
# Properly formatted appointment # Properly formatted appointment
r = add_appointment(new_appt_data) appointment_signature = Cryptographer.sign(appointment.serialize(), client_sk)
assert r.status_code == 200 r = add_appointment(
client, {"appointment": appointment.to_dict(), "signature": appointment_signature}, compressed_client_pk
)
assert r.status_code == HTTP_OK
assert r.json.get("available_slots") == 0
def test_add_appointment_no_json(api, client, appointment):
# Simulate the user registration
api.gatekeeper.registered_users[compressed_client_pk] = {"available_slots": 1}
# Properly formatted appointment
r = client.post(add_appointment_endpoint, data="random_message")
assert r.status_code == HTTP_BAD_REQUEST
def test_add_appointment_json_no_inner_dict(api, client, appointment):
# Simulate the user registration
api.gatekeeper.registered_users[compressed_client_pk] = {"available_slots": 1}
# Properly formatted appointment
r = client.post(add_appointment_endpoint, json="random_message")
assert r.status_code == HTTP_BAD_REQUEST
def test_add_appointment_wrong(api, client, appointment):
# Simulate the user registration
api.gatekeeper.registered_users[compressed_client_pk] = 1
# Incorrect appointment # Incorrect appointment
new_appt_data["appointment"]["to_self_delay"] = 0 appointment.to_self_delay = 0
r = add_appointment(new_appt_data) appointment_signature = Cryptographer.sign(appointment.serialize(), client_sk)
assert r.status_code == 400 r = add_appointment(
client, {"appointment": appointment.to_dict(), "signature": appointment_signature}, compressed_client_pk
)
assert r.status_code == HTTP_BAD_REQUEST
assert "Error {}:".format(errors.APPOINTMENT_FIELD_TOO_SMALL) in r.json.get("error")
def test_request_random_appointment(): def test_add_appointment_not_registered(api, client, appointment):
r = requests.get(url=TEOS_API + "/get_appointment?locator=" + get_random_value_hex(LOCATOR_LEN_BYTES)) # Properly formatted appointment
assert r.status_code == 200 tmp_sk, tmp_pk = generate_keypair()
tmp_compressed_pk = hexlify(tmp_pk.format(compressed=True)).decode("utf-8")
received_appointments = json.loads(r.content) appointment_signature = Cryptographer.sign(appointment.serialize(), tmp_sk)
appointment_status = [appointment.pop("status") for appointment in received_appointments] r = add_appointment(
client, {"appointment": appointment.to_dict(), "signature": appointment_signature}, tmp_compressed_pk
assert all([status == "not_found" for status in appointment_status]) )
assert r.status_code == HTTP_BAD_REQUEST
assert "Error {}:".format(errors.APPOINTMENT_INVALID_SIGNATURE_OR_INSUFFICIENT_SLOTS) in r.json.get("error")
def test_add_appointment_multiple_times(new_appt_data, n=MULTIPLE_APPOINTMENTS): def test_add_appointment_registered_no_free_slots(api, client, appointment):
# Multiple appointments with the same locator should be valid # Empty the user slots
# DISCUSS: #34-store-identical-appointments api.gatekeeper.registered_users[compressed_client_pk] = {"available_slots": 0}
# Properly formatted appointment
appointment_signature = Cryptographer.sign(appointment.serialize(), client_sk)
r = add_appointment(
client, {"appointment": appointment.to_dict(), "signature": appointment_signature}, compressed_client_pk
)
assert r.status_code == HTTP_BAD_REQUEST
assert "Error {}:".format(errors.APPOINTMENT_INVALID_SIGNATURE_OR_INSUFFICIENT_SLOTS) in r.json.get("error")
def test_add_appointment_registered_not_enough_free_slots(api, client, appointment):
# Give some slots to the user
api.gatekeeper.registered_users[compressed_client_pk] = 1
# Properly formatted appointment
appointment_signature = Cryptographer.sign(appointment.serialize(), client_sk)
# Let's create a big blob
appointment.encrypted_blob.data = TWO_SLOTS_BLOTS
r = add_appointment(
client, {"appointment": appointment.to_dict(), "signature": appointment_signature}, compressed_client_pk
)
assert r.status_code == HTTP_BAD_REQUEST
assert "Error {}:".format(errors.APPOINTMENT_INVALID_SIGNATURE_OR_INSUFFICIENT_SLOTS) in r.json.get("error")
def test_add_appointment_multiple_times_same_user(api, client, appointment, n=MULTIPLE_APPOINTMENTS):
# Multiple appointments with the same locator should be valid and counted as updates
appointment_signature = Cryptographer.sign(appointment.serialize(), client_sk)
# Simulate registering enough slots
api.gatekeeper.registered_users[compressed_client_pk] = {"available_slots": n}
for _ in range(n): for _ in range(n):
r = add_appointment(new_appt_data) r = add_appointment(
assert r.status_code == 200 client, {"appointment": appointment.to_dict(), "signature": appointment_signature}, compressed_client_pk
)
assert r.status_code == HTTP_OK
assert r.json.get("available_slots") == n - 1
# Since all updates came from the same user, only the last one is stored
assert len(api.watcher.locator_uuid_map[appointment.locator]) == 1
def test_request_multiple_appointments_same_locator(new_appt_data, n=MULTIPLE_APPOINTMENTS): def test_add_appointment_multiple_times_different_users(api, client, appointment, n=MULTIPLE_APPOINTMENTS):
for _ in range(n): # Create user keys and appointment signatures
r = add_appointment(new_appt_data) user_keys = [generate_keypair() for _ in range(n)]
assert r.status_code == 200 signatures = [Cryptographer.sign(appointment.serialize(), key[0]) for key in user_keys]
compressed_pks = [hexlify(pk.format(compressed=True)).decode("utf-8") for sk, pk in user_keys]
test_request_appointment_watcher(new_appt_data) # Add one slot per public key
for pair in user_keys:
tmp_compressed_pk = hexlify(pair[1].format(compressed=True)).decode("utf-8")
api.gatekeeper.registered_users[tmp_compressed_pk] = {"available_slots": 2}
# Send the appointments
for compressed_pk, signature in zip(compressed_pks, signatures):
r = add_appointment(client, {"appointment": appointment.to_dict(), "signature": signature}, compressed_pk)
assert r.status_code == HTTP_OK
assert r.json.get("available_slots") == 1
# Check that all the appointments have been added and that there are no duplicates
assert len(set(api.watcher.locator_uuid_map[appointment.locator])) == n
def test_add_too_many_appointment(new_appt_data): def test_add_appointment_update_same_size(api, client, appointment):
for _ in range(config.get("MAX_APPOINTMENTS") - len(appointments)): # Update an appointment by one of the same size and check that no additional slots are filled
r = add_appointment(new_appt_data) api.gatekeeper.registered_users[compressed_client_pk] = {"available_slots": 1}
assert r.status_code == 200
r = add_appointment(new_appt_data) appointment_signature = Cryptographer.sign(appointment.serialize(), client_sk)
assert r.status_code == 503 # # Since we will replace the appointment, we won't added to appointments
r = add_appointment(
client, {"appointment": appointment.to_dict(), "signature": appointment_signature}, compressed_client_pk
)
assert r.status_code == HTTP_OK and r.json.get("available_slots") == 0
# The user has no additional slots, but it should be able to update
# Let's just reverse the encrypted blob for example
appointment.encrypted_blob.data = appointment.encrypted_blob.data[::-1]
appointment_signature = Cryptographer.sign(appointment.serialize(), client_sk)
r = add_appointment(
client, {"appointment": appointment.to_dict(), "signature": appointment_signature}, compressed_client_pk
)
assert r.status_code == HTTP_OK and r.json.get("available_slots") == 0
def test_get_all_appointments_watcher(): def test_add_appointment_update_bigger(api, client, appointment):
r = requests.get(url=TEOS_API + "/get_all_appointments") # Update an appointment by one bigger, and check additional slots are filled
assert r.status_code == 200 and r.reason == "OK" api.gatekeeper.registered_users[compressed_client_pk] = {"available_slots": 2}
received_appointments = json.loads(r.content) appointment_signature = Cryptographer.sign(appointment.serialize(), client_sk)
r = add_appointment(
client, {"appointment": appointment.to_dict(), "signature": appointment_signature}, compressed_client_pk
)
assert r.status_code == HTTP_OK and r.json.get("available_slots") == 1
# Make sure there all the locators re in the watcher # The user has one slot, so it should be able to update as long as it only takes 1 additional slot
watcher_locators = [v["locator"] for k, v in received_appointments["watcher_appointments"].items()] appointment.encrypted_blob.data = TWO_SLOTS_BLOTS
local_locators = [appointment["locator"] for appointment in appointments] appointment_signature = Cryptographer.sign(appointment.serialize(), client_sk)
r = add_appointment(
client, {"appointment": appointment.to_dict(), "signature": appointment_signature}, compressed_client_pk
)
assert r.status_code == HTTP_OK and r.json.get("available_slots") == 0
assert set(watcher_locators) == set(local_locators) # Check that it'll fail if no enough slots are available
assert len(received_appointments["responder_trackers"]) == 0 # Double the size from before
appointment.encrypted_blob.data = TWO_SLOTS_BLOTS + TWO_SLOTS_BLOTS
appointment_signature = Cryptographer.sign(appointment.serialize(), client_sk)
r = add_appointment(
client, {"appointment": appointment.to_dict(), "signature": appointment_signature}, compressed_client_pk
)
assert r.status_code == HTTP_BAD_REQUEST
def test_get_all_appointments_responder(): def test_add_appointment_update_smaller(api, client, appointment):
# Trigger all disputes # Update an appointment by one bigger, and check slots are freed
locators = [appointment["locator"] for appointment in appointments] api.gatekeeper.registered_users[compressed_client_pk] = {"available_slots": 2}
for locator, dispute_tx in locator_dispute_tx_map.items():
if locator in locators:
bitcoin_cli(bitcoind_connect_params).sendrawtransaction(dispute_tx)
# Confirm transactions # This should take 2 slots
generate_blocks(6) appointment.encrypted_blob.data = TWO_SLOTS_BLOTS
appointment_signature = Cryptographer.sign(appointment.serialize(), client_sk)
r = add_appointment(
client, {"appointment": appointment.to_dict(), "signature": appointment_signature}, compressed_client_pk
)
assert r.status_code == HTTP_OK and r.json.get("available_slots") == 0
# Get all appointments # Let's update with one just small enough
r = requests.get(url=TEOS_API + "/get_all_appointments") appointment.encrypted_blob.data = "A" * (ENCRYPTED_BLOB_MAX_SIZE_HEX - 2)
received_appointments = json.loads(r.content) appointment_signature = Cryptographer.sign(appointment.serialize(), client_sk)
r = add_appointment(
# Make sure there is not pending locator in the watcher client, {"appointment": appointment.to_dict(), "signature": appointment_signature}, compressed_client_pk
responder_trackers = [v["locator"] for k, v in received_appointments["responder_trackers"].items()] )
local_locators = [appointment["locator"] for appointment in appointments] assert r.status_code == HTTP_OK and r.json.get("available_slots") == 1
assert set(responder_trackers) == set(local_locators)
assert len(received_appointments["watcher_appointments"]) == 0
def test_request_appointment_watcher(new_appt_data): def test_add_too_many_appointment(api, client):
# First we need to add an appointment # Give slots to the user
r = add_appointment(new_appt_data) api.gatekeeper.registered_users[compressed_client_pk] = {"available_slots": 200}
assert r.status_code == 200
free_appointment_slots = MAX_APPOINTMENTS - len(api.watcher.appointments)
for i in range(free_appointment_slots + 1):
appointment, dispute_tx = generate_dummy_appointment()
locator_dispute_tx_map[appointment.locator] = dispute_tx
appointment_signature = Cryptographer.sign(appointment.serialize(), client_sk)
r = add_appointment(
client, {"appointment": appointment.to_dict(), "signature": appointment_signature}, compressed_client_pk
)
if i < free_appointment_slots:
assert r.status_code == HTTP_OK
else:
assert r.status_code == HTTP_SERVICE_UNAVAILABLE
def test_get_appointment_no_json(api, client, appointment):
r = client.post(add_appointment_endpoint, data="random_message")
assert r.status_code == HTTP_BAD_REQUEST
def test_get_appointment_json_no_inner_dict(api, client, appointment):
r = client.post(add_appointment_endpoint, json="random_message")
assert r.status_code == HTTP_BAD_REQUEST
def test_request_random_appointment_registered_user(client, user_sk=client_sk):
locator = get_random_value_hex(LOCATOR_LEN_BYTES)
message = "get appointment {}".format(locator)
signature = Cryptographer.sign(message.encode("utf-8"), user_sk)
data = {"locator": locator, "signature": signature}
r = client.post(get_appointment_endpoint, json=data)
# We should get a 404 not found since we are using a made up locator
received_appointment = r.json
assert r.status_code == HTTP_NOT_FOUND
assert received_appointment.get("status") == "not_found"
def test_request_appointment_not_registered_user(client):
# Not registered users have no associated appointments, so this should fail
tmp_sk, tmp_pk = generate_keypair()
# The tower is designed so a not found appointment and a request from a non-registered user return the same error to
# prevent probing.
test_request_random_appointment_registered_user(client, tmp_sk)
def test_request_appointment_in_watcher(api, client, appointment):
# Mock the appointment in the Watcher
uuid = hash_160("{}{}".format(appointment.locator, compressed_client_pk))
api.watcher.db_manager.store_watcher_appointment(uuid, appointment.to_dict())
# Next we can request it # Next we can request it
r = requests.get(url=TEOS_API + "/get_appointment?locator=" + new_appt_data["appointment"]["locator"]) message = "get appointment {}".format(appointment.locator)
assert r.status_code == 200 signature = Cryptographer.sign(message.encode("utf-8"), client_sk)
data = {"locator": appointment.locator, "signature": signature}
r = client.post(get_appointment_endpoint, json=data)
assert r.status_code == HTTP_OK
# Each locator may point to multiple appointments, check them all # Check that the appointment is on the watcher
received_appointments = json.loads(r.content) assert r.json.get("status") == "being_watched"
# Take the status out and leave the received appointments ready to compare # Check the the sent appointment matches the received one
appointment_status = [appointment.pop("status") for appointment in received_appointments] assert r.json.get("locator") == appointment.locator
assert appointment.to_dict() == r.json.get("appointment")
# Check that the appointment is within the received appoints
assert new_appt_data["appointment"] in received_appointments
# Check that all the appointments are being watched
assert all([status == "being_watched" for status in appointment_status])
def test_request_appointment_responder(new_appt_data): def test_request_appointment_in_responder(api, client, appointment):
# Let's do something similar to what we did with the watcher but now we'll send the dispute tx to the network # Mock the appointment in the Responder
dispute_tx = locator_dispute_tx_map[new_appt_data["appointment"]["locator"]] tracker_data = {
bitcoin_cli(bitcoind_connect_params).sendrawtransaction(dispute_tx) "locator": appointment.locator,
"dispute_txid": get_random_value_hex(32),
"penalty_txid": get_random_value_hex(32),
"penalty_rawtx": get_random_value_hex(250),
"appointment_end": appointment.end_time,
}
tx_tracker = TransactionTracker.from_dict(tracker_data)
r = add_appointment(new_appt_data) uuid = hash_160("{}{}".format(appointment.locator, compressed_client_pk))
assert r.status_code == 200 api.watcher.db_manager.create_triggered_appointment_flag(uuid)
api.watcher.responder.db_manager.store_responder_tracker(uuid, tx_tracker.to_dict())
# Generate a block to trigger the watcher # Request back the data
generate_block() message = "get appointment {}".format(appointment.locator)
signature = Cryptographer.sign(message.encode("utf-8"), client_sk)
data = {"locator": appointment.locator, "signature": signature}
r = requests.get(url=TEOS_API + "/get_appointment?locator=" + new_appt_data["appointment"]["locator"]) # Next we can request it
assert r.status_code == 200 r = client.post(get_appointment_endpoint, json=data)
assert r.status_code == HTTP_OK
received_appointments = json.loads(r.content) # Check that the appointment is on the watcher
appointment_status = [appointment.pop("status") for appointment in received_appointments] assert r.json.get("status") == "dispute_responded"
appointment_locators = [appointment["locator"] for appointment in received_appointments]
assert new_appt_data["appointment"]["locator"] in appointment_locators and len(received_appointments) == 1 # Check the the sent appointment matches the received one
assert all([status == "dispute_responded" for status in appointment_status]) and len(appointment_status) == 1 assert tx_tracker.locator == r.json.get("locator")
assert tx_tracker.dispute_txid == r.json.get("appointment").get("dispute_txid")
assert tx_tracker.penalty_txid == r.json.get("appointment").get("penalty_txid")
assert tx_tracker.penalty_rawtx == r.json.get("appointment").get("penalty_rawtx")
assert tx_tracker.appointment_end == r.json.get("appointment").get("appointment_end")
def test_get_all_appointments_watcher(api, client, get_all_db_manager, appointment):
# Let's reset the dbs so we can test this clean
api.watcher.db_manager = get_all_db_manager
api.watcher.responder.db_manager = get_all_db_manager
# Check that they are wiped clean
r = client.get(get_all_appointment_endpoint)
assert r.status_code == HTTP_OK
assert len(r.json.get("watcher_appointments")) == 0 and len(r.json.get("responder_trackers")) == 0
# Add some appointments to the Watcher db
non_triggered_appointments = {}
for _ in range(10):
uuid = get_random_value_hex(16)
appointment.locator = get_random_value_hex(16)
non_triggered_appointments[uuid] = appointment.to_dict()
api.watcher.db_manager.store_watcher_appointment(uuid, appointment.to_dict())
triggered_appointments = {}
for _ in range(10):
uuid = get_random_value_hex(16)
appointment.locator = get_random_value_hex(16)
triggered_appointments[uuid] = appointment.to_dict()
api.watcher.db_manager.store_watcher_appointment(uuid, appointment.to_dict())
api.watcher.db_manager.create_triggered_appointment_flag(uuid)
# We should only get check the non-triggered appointments
r = client.get(get_all_appointment_endpoint)
assert r.status_code == HTTP_OK
watcher_locators = [v["locator"] for k, v in r.json["watcher_appointments"].items()]
local_locators = [appointment["locator"] for uuid, appointment in non_triggered_appointments.items()]
assert set(watcher_locators) == set(local_locators)
assert len(r.json["responder_trackers"]) == 0
def test_get_all_appointments_responder(api, client, get_all_db_manager):
# Let's reset the dbs so we can test this clean
api.watcher.db_manager = get_all_db_manager
api.watcher.responder.db_manager = get_all_db_manager
# Check that they are wiped clean
r = client.get(get_all_appointment_endpoint)
assert r.status_code == HTTP_OK
assert len(r.json.get("watcher_appointments")) == 0 and len(r.json.get("responder_trackers")) == 0
# Add some trackers to the Responder db
tx_trackers = {}
for _ in range(10):
uuid = get_random_value_hex(16)
tracker_data = {
"locator": get_random_value_hex(16),
"dispute_txid": get_random_value_hex(32),
"penalty_txid": get_random_value_hex(32),
"penalty_rawtx": get_random_value_hex(250),
"appointment_end": 20,
}
tracker = TransactionTracker.from_dict(tracker_data)
tx_trackers[uuid] = tracker.to_dict()
api.watcher.responder.db_manager.store_responder_tracker(uuid, tracker.to_dict())
api.watcher.db_manager.create_triggered_appointment_flag(uuid)
# Get all appointments
r = client.get(get_all_appointment_endpoint)
# Make sure there is not pending locator in the watcher
responder_trackers = [v["locator"] for k, v in r.json["responder_trackers"].items()]
local_locators = [tracker["locator"] for uuid, tracker in tx_trackers.items()]
assert set(responder_trackers) == set(local_locators)
assert len(r.json["watcher_appointments"]) == 0

View File

@@ -0,0 +1,426 @@
import os
import json
import pytest
import shutil
from uuid import uuid4
from teos.appointments_dbm import AppointmentsDBM
from teos.appointments_dbm import (
WATCHER_LAST_BLOCK_KEY,
RESPONDER_LAST_BLOCK_KEY,
LOCATOR_MAP_PREFIX,
TRIGGERED_APPOINTMENTS_PREFIX,
)
from common.constants import LOCATOR_LEN_BYTES
from test.teos.unit.conftest import get_random_value_hex, generate_dummy_appointment
@pytest.fixture(scope="module")
def watcher_appointments():
return {uuid4().hex: generate_dummy_appointment(real_height=False)[0] for _ in range(10)}
@pytest.fixture(scope="module")
def responder_trackers():
return {get_random_value_hex(16): get_random_value_hex(32) for _ in range(10)}
def open_create_db(db_path):
try:
db_manager = AppointmentsDBM(db_path)
return db_manager
except ValueError:
return False
def test_load_appointments_db(db_manager):
# Let's made up a prefix and try to load data from the database using it
prefix = "XX"
db_appointments = db_manager.load_appointments_db(prefix)
assert len(db_appointments) == 0
# We can add a bunch of data to the db and try again (data is stored in json by the manager)
local_appointments = {}
for _ in range(10):
key = get_random_value_hex(16)
value = get_random_value_hex(32)
local_appointments[key] = value
db_manager.db.put((prefix + key).encode("utf-8"), json.dumps({"value": value}).encode("utf-8"))
db_appointments = db_manager.load_appointments_db(prefix)
# Check that both keys and values are the same
assert db_appointments.keys() == local_appointments.keys()
values = [appointment["value"] for appointment in db_appointments.values()]
assert set(values) == set(local_appointments.values()) and (len(values) == len(local_appointments))
def test_get_last_known_block():
db_path = "empty_db"
# First we check if the db exists, and if so we delete it
if os.path.isdir(db_path):
shutil.rmtree(db_path)
# Check that the db can be created if it does not exist
db_manager = open_create_db(db_path)
# Trying to get any last block for either the watcher or the responder should return None for an empty db
for key in [WATCHER_LAST_BLOCK_KEY, RESPONDER_LAST_BLOCK_KEY]:
assert db_manager.get_last_known_block(key) is None
# After saving some block in the db we should get that exact value
for key in [WATCHER_LAST_BLOCK_KEY, RESPONDER_LAST_BLOCK_KEY]:
block_hash = get_random_value_hex(32)
db_manager.db.put(key.encode("utf-8"), block_hash.encode("utf-8"))
assert db_manager.get_last_known_block(key) == block_hash
# Removing test db
shutil.rmtree(db_path)
def test_load_watcher_appointments_empty(db_manager):
assert len(db_manager.load_watcher_appointments()) == 0
def test_load_responder_trackers_empty(db_manager):
assert len(db_manager.load_responder_trackers()) == 0
def test_load_locator_map_empty(db_manager):
assert db_manager.load_locator_map(get_random_value_hex(LOCATOR_LEN_BYTES)) is None
def test_create_append_locator_map(db_manager):
uuid = uuid4().hex
locator = get_random_value_hex(LOCATOR_LEN_BYTES)
db_manager.create_append_locator_map(locator, uuid)
# Check that the locator map has been properly stored
assert db_manager.load_locator_map(locator) == [uuid]
# If we try to add the same uuid again the list shouldn't change
db_manager.create_append_locator_map(locator, uuid)
assert db_manager.load_locator_map(locator) == [uuid]
# Add another uuid to the same locator and check that it also works
uuid2 = uuid4().hex
db_manager.create_append_locator_map(locator, uuid2)
assert set(db_manager.load_locator_map(locator)) == set([uuid, uuid2])
def test_update_locator_map(db_manager):
# Let's create a couple of appointments with the same locator
locator = get_random_value_hex(32)
uuid1 = uuid4().hex
uuid2 = uuid4().hex
db_manager.create_append_locator_map(locator, uuid1)
db_manager.create_append_locator_map(locator, uuid2)
locator_map = db_manager.load_locator_map(locator)
assert uuid1 in locator_map
locator_map.remove(uuid1)
db_manager.update_locator_map(locator, locator_map)
locator_map_after = db_manager.load_locator_map(locator)
assert uuid1 not in locator_map_after and uuid2 in locator_map_after and len(locator_map_after) == 1
def test_update_locator_map_wong_data(db_manager):
# Let's try to update the locator map with a different list of uuids
locator = get_random_value_hex(32)
db_manager.create_append_locator_map(locator, uuid4().hex)
db_manager.create_append_locator_map(locator, uuid4().hex)
locator_map = db_manager.load_locator_map(locator)
wrong_map_update = [uuid4().hex]
db_manager.update_locator_map(locator, wrong_map_update)
locator_map_after = db_manager.load_locator_map(locator)
assert locator_map_after == locator_map
def test_update_locator_map_empty(db_manager):
# We shouldn't be able to update a map with an empty list
locator = get_random_value_hex(32)
db_manager.create_append_locator_map(locator, uuid4().hex)
db_manager.create_append_locator_map(locator, uuid4().hex)
locator_map = db_manager.load_locator_map(locator)
db_manager.update_locator_map(locator, [])
locator_map_after = db_manager.load_locator_map(locator)
assert locator_map_after == locator_map
def test_delete_locator_map(db_manager):
locator_maps = db_manager.load_appointments_db(prefix=LOCATOR_MAP_PREFIX)
assert len(locator_maps) != 0
for locator, uuids in locator_maps.items():
assert db_manager.delete_locator_map(locator) is True
locator_maps = db_manager.load_appointments_db(prefix=LOCATOR_MAP_PREFIX)
assert len(locator_maps) == 0
# Keys of wrong type should fail
assert db_manager.delete_locator_map(42) is False
def test_store_watcher_appointment_wrong(db_manager, watcher_appointments):
# Wrong uuid types should fail
for _, appointment in watcher_appointments.items():
assert db_manager.store_watcher_appointment(42, appointment.to_dict()) is False
def test_load_watcher_appointment_wrong(db_manager):
# Random keys should fail
assert db_manager.load_watcher_appointment(get_random_value_hex(16)) is None
# Wrong format keys should also return None
assert db_manager.load_watcher_appointment(42) is None
def test_store_load_watcher_appointment(db_manager, watcher_appointments):
for uuid, appointment in watcher_appointments.items():
assert db_manager.store_watcher_appointment(uuid, appointment.to_dict()) is True
db_watcher_appointments = db_manager.load_watcher_appointments()
# Check that the two appointment collections are equal by checking:
# - Their size is equal
# - Each element in one collection exists in the other
assert watcher_appointments.keys() == db_watcher_appointments.keys()
for uuid, appointment in watcher_appointments.items():
assert appointment.to_dict() == db_watcher_appointments[uuid]
def test_store_load_triggered_appointment(db_manager):
db_watcher_appointments = db_manager.load_watcher_appointments()
db_watcher_appointments_with_triggered = db_manager.load_watcher_appointments(include_triggered=True)
assert db_watcher_appointments == db_watcher_appointments_with_triggered
# Create an appointment flagged as triggered
triggered_appointment, _ = generate_dummy_appointment(real_height=False)
uuid = uuid4().hex
assert db_manager.store_watcher_appointment(uuid, triggered_appointment.to_dict()) is True
db_manager.create_triggered_appointment_flag(uuid)
# The new appointment is grabbed only if we set include_triggered
assert db_watcher_appointments == db_manager.load_watcher_appointments()
assert uuid in db_manager.load_watcher_appointments(include_triggered=True)
def test_store_responder_trackers_wrong(db_manager, responder_trackers):
# Wrong uuid types should fail
for _, tracker in responder_trackers.items():
assert db_manager.store_responder_tracker(42, {"value": tracker}) is False
def test_load_responder_tracker_wrong(db_manager):
# Random keys should fail
assert db_manager.load_responder_tracker(get_random_value_hex(16)) is None
# Wrong format keys should also return None
assert db_manager.load_responder_tracker(42) is None
def test_store_load_responder_trackers(db_manager, responder_trackers):
for key, value in responder_trackers.items():
assert db_manager.store_responder_tracker(key, {"value": value}) is True
db_responder_trackers = db_manager.load_responder_trackers()
values = [tracker["value"] for tracker in db_responder_trackers.values()]
assert responder_trackers.keys() == db_responder_trackers.keys()
assert set(responder_trackers.values()) == set(values) and len(responder_trackers) == len(values)
def test_delete_watcher_appointment(db_manager, watcher_appointments):
# Let's delete all we added
db_watcher_appointments = db_manager.load_watcher_appointments(include_triggered=True)
assert len(db_watcher_appointments) != 0
for key in watcher_appointments.keys():
assert db_manager.delete_watcher_appointment(key) is True
db_watcher_appointments = db_manager.load_watcher_appointments()
assert len(db_watcher_appointments) == 0
# Keys of wrong type should fail
assert db_manager.delete_watcher_appointment(42) is False
def test_batch_delete_watcher_appointments(db_manager, watcher_appointments):
# Let's start by adding a bunch of appointments
for uuid, appointment in watcher_appointments.items():
assert db_manager.store_watcher_appointment(uuid, appointment.to_dict()) is True
first_half = list(watcher_appointments.keys())[: len(watcher_appointments) // 2]
second_half = list(watcher_appointments.keys())[len(watcher_appointments) // 2 :]
# Let's now delete half of them in a batch update
db_manager.batch_delete_watcher_appointments(first_half)
db_watcher_appointments = db_manager.load_watcher_appointments()
assert not set(db_watcher_appointments.keys()).issuperset(first_half)
assert set(db_watcher_appointments.keys()).issuperset(second_half)
# Let's delete the rest
db_manager.batch_delete_watcher_appointments(second_half)
# Now there should be no appointments left
db_watcher_appointments = db_manager.load_watcher_appointments()
assert not db_watcher_appointments
def test_delete_responder_tracker(db_manager, responder_trackers):
# Same for the responder
db_responder_trackers = db_manager.load_responder_trackers()
assert len(db_responder_trackers) != 0
for key in responder_trackers.keys():
assert db_manager.delete_responder_tracker(key) is True
db_responder_trackers = db_manager.load_responder_trackers()
assert len(db_responder_trackers) == 0
# Keys of wrong type should fail
assert db_manager.delete_responder_tracker(42) is False
def test_batch_delete_responder_trackers(db_manager, responder_trackers):
# Let's start by adding a bunch of appointments
for uuid, value in responder_trackers.items():
assert db_manager.store_responder_tracker(uuid, {"value": value}) is True
first_half = list(responder_trackers.keys())[: len(responder_trackers) // 2]
second_half = list(responder_trackers.keys())[len(responder_trackers) // 2 :]
# Let's now delete half of them in a batch update
db_manager.batch_delete_responder_trackers(first_half)
db_responder_trackers = db_manager.load_responder_trackers()
assert not set(db_responder_trackers.keys()).issuperset(first_half)
assert set(db_responder_trackers.keys()).issuperset(second_half)
# Let's delete the rest
db_manager.batch_delete_responder_trackers(second_half)
# Now there should be no trackers left
db_responder_trackers = db_manager.load_responder_trackers()
assert not db_responder_trackers
def test_store_load_last_block_hash_watcher(db_manager):
# Let's first create a made up block hash
local_last_block_hash = get_random_value_hex(32)
assert db_manager.store_last_block_hash_watcher(local_last_block_hash) is True
db_last_block_hash = db_manager.load_last_block_hash_watcher()
assert local_last_block_hash == db_last_block_hash
# Wrong types for last block should fail for both store and load
assert db_manager.store_last_block_hash_watcher(42) is False
def test_store_load_last_block_hash_responder(db_manager):
# Same for the responder
local_last_block_hash = get_random_value_hex(32)
assert db_manager.store_last_block_hash_responder(local_last_block_hash) is True
db_last_block_hash = db_manager.load_last_block_hash_responder()
assert local_last_block_hash == db_last_block_hash
# Wrong types for last block should fail for both store and load
assert db_manager.store_last_block_hash_responder(42) is False
def test_create_triggered_appointment_flag(db_manager):
# Test that flags are added
key = get_random_value_hex(16)
db_manager.create_triggered_appointment_flag(key)
assert db_manager.db.get((TRIGGERED_APPOINTMENTS_PREFIX + key).encode("utf-8")) is not None
# Test to get a random one that we haven't added
key = get_random_value_hex(16)
assert db_manager.db.get((TRIGGERED_APPOINTMENTS_PREFIX + key).encode("utf-8")) is None
def test_batch_create_triggered_appointment_flag(db_manager):
# Test that flags are added in batch
keys = [get_random_value_hex(16) for _ in range(10)]
# Checked that non of the flags is already in the db
db_flags = db_manager.load_all_triggered_flags()
assert not set(db_flags).issuperset(keys)
# Make sure that they are now
db_manager.batch_create_triggered_appointment_flag(keys)
db_flags = db_manager.load_all_triggered_flags()
assert set(db_flags).issuperset(keys)
def test_load_all_triggered_flags(db_manager):
# There should be a some flags in the db from the previous tests. Let's load them
flags = db_manager.load_all_triggered_flags()
# We can add another flag and see that there's two now
new_uuid = uuid4().hex
db_manager.create_triggered_appointment_flag(new_uuid)
flags.append(new_uuid)
assert set(db_manager.load_all_triggered_flags()) == set(flags)
def test_delete_triggered_appointment_flag(db_manager):
# Test data is properly deleted.
keys = db_manager.load_all_triggered_flags()
# Delete all entries
for k in keys:
assert db_manager.delete_triggered_appointment_flag(k) is True
# Try to load them back
for k in keys:
assert db_manager.db.get((TRIGGERED_APPOINTMENTS_PREFIX + k).encode("utf-8")) is None
# Keys of wrong type should fail
assert db_manager.delete_triggered_appointment_flag(42) is False
def test_batch_delete_triggered_appointment_flag(db_manager):
# Let's add some flags first
keys = [get_random_value_hex(16) for _ in range(10)]
db_manager.batch_create_triggered_appointment_flag(keys)
# And now let's delete in batch
first_half = keys[: len(keys) // 2]
second_half = keys[len(keys) // 2 :]
db_manager.batch_delete_triggered_appointment_flag(first_half)
db_falgs = db_manager.load_all_triggered_flags()
assert not set(db_falgs).issuperset(first_half)
assert set(db_falgs).issuperset(second_half)
# Delete the rest
db_manager.batch_delete_triggered_appointment_flag(second_half)
assert not db_manager.load_all_triggered_flags()

View File

@@ -1,6 +1,4 @@
import pytest from test.teos.unit.conftest import get_random_value_hex, generate_block, generate_blocks, fork
from test.teos.unit.conftest import get_random_value_hex, generate_block, generate_blocks, fork, bitcoind_connect_params
hex_tx = ( hex_tx = (

View File

@@ -46,6 +46,7 @@ def test_build_appointments():
assert uuid in appointments_data.keys() assert uuid in appointments_data.keys()
assert appointments_data[uuid].get("locator") == appointment.get("locator") assert appointments_data[uuid].get("locator") == appointment.get("locator")
assert appointments_data[uuid].get("end_time") == appointment.get("end_time") assert appointments_data[uuid].get("end_time") == appointment.get("end_time")
assert len(appointments_data[uuid].get("encrypted_blob")) == appointment.get("size")
assert uuid in locator_uuid_map[appointment.get("locator")] assert uuid in locator_uuid_map[appointment.get("locator")]

View File

@@ -5,7 +5,7 @@ from threading import Thread, Event, Condition
from teos.chain_monitor import ChainMonitor from teos.chain_monitor import ChainMonitor
from test.teos.unit.conftest import get_random_value_hex, generate_block, bitcoind_connect_params, bitcoind_feed_params from test.teos.unit.conftest import get_random_value_hex, generate_block, bitcoind_feed_params
def test_init(run_bitcoind, block_processor): def test_init(run_bitcoind, block_processor):
@@ -64,8 +64,8 @@ def test_update_state(block_processor):
def test_monitor_chain_polling(db_manager, block_processor): def test_monitor_chain_polling(db_manager, block_processor):
# Try polling with the Watcher # Try polling with the Watcher
wq = Queue() watcher_queue = Queue()
chain_monitor = ChainMonitor(Queue(), Queue(), block_processor, bitcoind_feed_params) chain_monitor = ChainMonitor(watcher_queue, Queue(), block_processor, bitcoind_feed_params)
chain_monitor.best_tip = block_processor.get_best_block_hash() chain_monitor.best_tip = block_processor.get_best_block_hash()
chain_monitor.polling_delta = 0.1 chain_monitor.polling_delta = 0.1

View File

@@ -27,7 +27,7 @@ def set_up_appointments(db_manager, total_appointments):
appointments[uuid] = {"locator": appointment.locator} appointments[uuid] = {"locator": appointment.locator}
locator_uuid_map[locator] = [uuid] locator_uuid_map[locator] = [uuid]
db_manager.store_watcher_appointment(uuid, appointment.to_json()) db_manager.store_watcher_appointment(uuid, appointment.to_dict())
db_manager.create_append_locator_map(locator, uuid) db_manager.create_append_locator_map(locator, uuid)
# Each locator can have more than one uuid assigned to it. # Each locator can have more than one uuid assigned to it.
@@ -37,7 +37,7 @@ def set_up_appointments(db_manager, total_appointments):
appointments[uuid] = {"locator": appointment.locator} appointments[uuid] = {"locator": appointment.locator}
locator_uuid_map[locator].append(uuid) locator_uuid_map[locator].append(uuid)
db_manager.store_watcher_appointment(uuid, appointment.to_json()) db_manager.store_watcher_appointment(uuid, appointment.to_dict())
db_manager.create_append_locator_map(locator, uuid) db_manager.create_append_locator_map(locator, uuid)
return appointments, locator_uuid_map return appointments, locator_uuid_map
@@ -60,7 +60,7 @@ def set_up_trackers(db_manager, total_trackers):
trackers[uuid] = {"locator": tracker.locator, "penalty_txid": tracker.penalty_txid} trackers[uuid] = {"locator": tracker.locator, "penalty_txid": tracker.penalty_txid}
tx_tracker_map[penalty_txid] = [uuid] tx_tracker_map[penalty_txid] = [uuid]
db_manager.store_responder_tracker(uuid, tracker.to_json()) db_manager.store_responder_tracker(uuid, tracker.to_dict())
db_manager.create_append_locator_map(tracker.locator, uuid) db_manager.create_append_locator_map(tracker.locator, uuid)
# Each penalty_txid can have more than one uuid assigned to it. # Each penalty_txid can have more than one uuid assigned to it.
@@ -70,7 +70,7 @@ def set_up_trackers(db_manager, total_trackers):
trackers[uuid] = {"locator": tracker.locator, "penalty_txid": tracker.penalty_txid} trackers[uuid] = {"locator": tracker.locator, "penalty_txid": tracker.penalty_txid}
tx_tracker_map[penalty_txid].append(uuid) tx_tracker_map[penalty_txid].append(uuid)
db_manager.store_responder_tracker(uuid, tracker.to_json()) db_manager.store_responder_tracker(uuid, tracker.to_dict())
db_manager.create_append_locator_map(tracker.locator, uuid) db_manager.create_append_locator_map(tracker.locator, uuid)
return trackers, tx_tracker_map return trackers, tx_tracker_map

View File

@@ -1,30 +1,9 @@
import os import os
import json
import pytest
import shutil import shutil
from uuid import uuid4 import pytest
from teos.db_manager import DBManager from teos.db_manager import DBManager
from teos.db_manager import ( from test.teos.unit.conftest import get_random_value_hex
WATCHER_LAST_BLOCK_KEY,
RESPONDER_LAST_BLOCK_KEY,
LOCATOR_MAP_PREFIX,
TRIGGERED_APPOINTMENTS_PREFIX,
)
from common.constants import LOCATOR_LEN_BYTES
from test.teos.unit.conftest import get_random_value_hex, generate_dummy_appointment
@pytest.fixture(scope="module")
def watcher_appointments():
return {uuid4().hex: generate_dummy_appointment(real_height=False)[0] for _ in range(10)}
@pytest.fixture(scope="module")
def responder_trackers():
return {get_random_value_hex(16): get_random_value_hex(32) for _ in range(10)}
def open_create_db(db_path): def open_create_db(db_path):
@@ -62,67 +41,15 @@ def test_init():
shutil.rmtree(db_path) shutil.rmtree(db_path)
def test_load_appointments_db(db_manager):
# Let's made up a prefix and try to load data from the database using it
prefix = "XX"
db_appointments = db_manager.load_appointments_db(prefix)
assert len(db_appointments) == 0
# We can add a bunch of data to the db and try again (data is stored in json by the manager)
local_appointments = {}
for _ in range(10):
key = get_random_value_hex(16)
value = get_random_value_hex(32)
local_appointments[key] = value
db_manager.db.put((prefix + key).encode("utf-8"), json.dumps({"value": value}).encode("utf-8"))
db_appointments = db_manager.load_appointments_db(prefix)
# Check that both keys and values are the same
assert db_appointments.keys() == local_appointments.keys()
values = [appointment["value"] for appointment in db_appointments.values()]
assert set(values) == set(local_appointments.values()) and (len(values) == len(local_appointments))
def test_get_last_known_block():
db_path = "empty_db"
# First we check if the db exists, and if so we delete it
if os.path.isdir(db_path):
shutil.rmtree(db_path)
# Check that the db can be created if it does not exist
db_manager = open_create_db(db_path)
# Trying to get any last block for either the watcher or the responder should return None for an empty db
for key in [WATCHER_LAST_BLOCK_KEY, RESPONDER_LAST_BLOCK_KEY]:
assert db_manager.get_last_known_block(key) is None
# After saving some block in the db we should get that exact value
for key in [WATCHER_LAST_BLOCK_KEY, RESPONDER_LAST_BLOCK_KEY]:
block_hash = get_random_value_hex(32)
db_manager.db.put(key.encode("utf-8"), block_hash.encode("utf-8"))
assert db_manager.get_last_known_block(key) == block_hash
# Removing test db
shutil.rmtree(db_path)
def test_create_entry(db_manager): def test_create_entry(db_manager):
key = get_random_value_hex(16) key = get_random_value_hex(16)
value = get_random_value_hex(32) value = get_random_value_hex(32)
# Adding a value with no prefix (create entry encodes values in utf-8 internally) # Adding a value with no prefix should work
db_manager.create_entry(key, value) db_manager.create_entry(key, value)
# We should be able to get it straightaway from the key
assert db_manager.db.get(key.encode("utf-8")).decode("utf-8") == value assert db_manager.db.get(key.encode("utf-8")).decode("utf-8") == value
# If we prefix the key we should be able to get it if we add the prefix, but not otherwise # Prefixing the key would require the prefix to load
key = get_random_value_hex(16) key = get_random_value_hex(16)
prefix = "w" prefix = "w"
db_manager.create_entry(key, value, prefix=prefix) db_manager.create_entry(key, value, prefix=prefix)
@@ -130,22 +57,51 @@ def test_create_entry(db_manager):
assert db_manager.db.get((prefix + key).encode("utf-8")).decode("utf-8") == value assert db_manager.db.get((prefix + key).encode("utf-8")).decode("utf-8") == value
assert db_manager.db.get(key.encode("utf-8")) is None assert db_manager.db.get(key.encode("utf-8")) is None
# Same if we try to use any other prefix # Keys, prefixes, and values of wrong format should fail
another_prefix = "r" with pytest.raises(TypeError):
assert db_manager.db.get((another_prefix + key).encode("utf-8")) is None db_manager.create_entry(key=None)
with pytest.raises(TypeError):
db_manager.create_entry(key=key, value=None)
with pytest.raises(TypeError):
db_manager.create_entry(key=key, value=value, prefix=1)
def test_load_entry(db_manager):
key = get_random_value_hex(16)
value = get_random_value_hex(32)
# Loading an existing key should work
db_manager.db.put(key.encode("utf-8"), value.encode("utf-8"))
assert db_manager.load_entry(key) == value.encode("utf-8")
# Adding an existing prefix should work
assert db_manager.load_entry(key[2:], prefix=key[:2]) == value.encode("utf-8")
# Adding a non-existing prefix should return None
assert db_manager.load_entry(key, prefix=get_random_value_hex(2)) is None
# Loading a non-existing entry should return None
assert db_manager.load_entry(get_random_value_hex(16)) is None
# Trying to load a non str key or prefix should fail
with pytest.raises(TypeError):
db_manager.load_entry(None)
with pytest.raises(TypeError):
db_manager.load_entry(get_random_value_hex(16), prefix=1)
def test_delete_entry(db_manager): def test_delete_entry(db_manager):
# Let's first get the key all the things we've wrote so far in the db # Let's get the key all the things we've wrote so far in the db and empty the db.
data = [k.decode("utf-8") for k, v in db_manager.db.iterator()] data = [k.decode("utf-8") for k, v in db_manager.db.iterator()]
# Let's empty the db now
for key in data: for key in data:
db_manager.delete_entry(key) db_manager.delete_entry(key)
assert len([k for k, v in db_manager.db.iterator()]) == 0 assert len([k for k, v in db_manager.db.iterator()]) == 0
# Let's check that the same works if a prefix is provided. # The same works if a prefix is provided.
prefix = "r" prefix = "r"
key = get_random_value_hex(16) key = get_random_value_hex(16)
value = get_random_value_hex(32) value = get_random_value_hex(32)
@@ -158,294 +114,12 @@ def test_delete_entry(db_manager):
db_manager.delete_entry(key, prefix) db_manager.delete_entry(key, prefix)
assert db_manager.db.get((prefix + key).encode("utf-8")) is None assert db_manager.db.get((prefix + key).encode("utf-8")) is None
# Deleting a non-existing key should be fine
db_manager.delete_entry(key, prefix)
def test_load_watcher_appointments_empty(db_manager): # Trying to delete a non str key or prefix should fail
assert len(db_manager.load_watcher_appointments()) == 0 with pytest.raises(TypeError):
db_manager.delete_entry(None)
with pytest.raises(TypeError):
def test_load_responder_trackers_empty(db_manager): db_manager.delete_entry(get_random_value_hex(16), prefix=1)
assert len(db_manager.load_responder_trackers()) == 0
def test_load_locator_map_empty(db_manager):
assert db_manager.load_locator_map(get_random_value_hex(LOCATOR_LEN_BYTES)) is None
def test_create_append_locator_map(db_manager):
uuid = uuid4().hex
locator = get_random_value_hex(LOCATOR_LEN_BYTES)
db_manager.create_append_locator_map(locator, uuid)
# Check that the locator map has been properly stored
assert db_manager.load_locator_map(locator) == [uuid]
# If we try to add the same uuid again the list shouldn't change
db_manager.create_append_locator_map(locator, uuid)
assert db_manager.load_locator_map(locator) == [uuid]
# Add another uuid to the same locator and check that it also works
uuid2 = uuid4().hex
db_manager.create_append_locator_map(locator, uuid2)
assert set(db_manager.load_locator_map(locator)) == set([uuid, uuid2])
def test_update_locator_map(db_manager):
# Let's create a couple of appointments with the same locator
locator = get_random_value_hex(32)
uuid1 = uuid4().hex
uuid2 = uuid4().hex
db_manager.create_append_locator_map(locator, uuid1)
db_manager.create_append_locator_map(locator, uuid2)
locator_map = db_manager.load_locator_map(locator)
assert uuid1 in locator_map
locator_map.remove(uuid1)
db_manager.update_locator_map(locator, locator_map)
locator_map_after = db_manager.load_locator_map(locator)
assert uuid1 not in locator_map_after and uuid2 in locator_map_after and len(locator_map_after) == 1
def test_update_locator_map_wong_data(db_manager):
# Let's try to update the locator map with a different list of uuids
locator = get_random_value_hex(32)
db_manager.create_append_locator_map(locator, uuid4().hex)
db_manager.create_append_locator_map(locator, uuid4().hex)
locator_map = db_manager.load_locator_map(locator)
wrong_map_update = [uuid4().hex]
db_manager.update_locator_map(locator, wrong_map_update)
locator_map_after = db_manager.load_locator_map(locator)
assert locator_map_after == locator_map
def test_update_locator_map_empty(db_manager):
# We shouldn't be able to update a map with an empty list
locator = get_random_value_hex(32)
db_manager.create_append_locator_map(locator, uuid4().hex)
db_manager.create_append_locator_map(locator, uuid4().hex)
locator_map = db_manager.load_locator_map(locator)
db_manager.update_locator_map(locator, [])
locator_map_after = db_manager.load_locator_map(locator)
assert locator_map_after == locator_map
def test_delete_locator_map(db_manager):
locator_maps = db_manager.load_appointments_db(prefix=LOCATOR_MAP_PREFIX)
assert len(locator_maps) != 0
for locator, uuids in locator_maps.items():
db_manager.delete_locator_map(locator)
locator_maps = db_manager.load_appointments_db(prefix=LOCATOR_MAP_PREFIX)
assert len(locator_maps) == 0
def test_store_load_watcher_appointment(db_manager, watcher_appointments):
for uuid, appointment in watcher_appointments.items():
db_manager.store_watcher_appointment(uuid, appointment.to_json())
db_watcher_appointments = db_manager.load_watcher_appointments()
# Check that the two appointment collections are equal by checking:
# - Their size is equal
# - Each element in one collection exists in the other
assert watcher_appointments.keys() == db_watcher_appointments.keys()
for uuid, appointment in watcher_appointments.items():
assert json.dumps(db_watcher_appointments[uuid], sort_keys=True, separators=(",", ":")) == appointment.to_json()
def test_store_load_triggered_appointment(db_manager):
db_watcher_appointments = db_manager.load_watcher_appointments()
db_watcher_appointments_with_triggered = db_manager.load_watcher_appointments(include_triggered=True)
assert db_watcher_appointments == db_watcher_appointments_with_triggered
# Create an appointment flagged as triggered
triggered_appointment, _ = generate_dummy_appointment(real_height=False)
uuid = uuid4().hex
db_manager.store_watcher_appointment(uuid, triggered_appointment.to_json())
db_manager.create_triggered_appointment_flag(uuid)
# The new appointment is grabbed only if we set include_triggered
assert db_watcher_appointments == db_manager.load_watcher_appointments()
assert uuid in db_manager.load_watcher_appointments(include_triggered=True)
def test_store_load_responder_trackers(db_manager, responder_trackers):
for key, value in responder_trackers.items():
db_manager.store_responder_tracker(key, json.dumps({"value": value}))
db_responder_trackers = db_manager.load_responder_trackers()
values = [tracker["value"] for tracker in db_responder_trackers.values()]
assert responder_trackers.keys() == db_responder_trackers.keys()
assert set(responder_trackers.values()) == set(values) and len(responder_trackers) == len(values)
def test_delete_watcher_appointment(db_manager, watcher_appointments):
# Let's delete all we added
db_watcher_appointments = db_manager.load_watcher_appointments(include_triggered=True)
assert len(db_watcher_appointments) != 0
for key in watcher_appointments.keys():
db_manager.delete_watcher_appointment(key)
db_watcher_appointments = db_manager.load_watcher_appointments()
assert len(db_watcher_appointments) == 0
def test_batch_delete_watcher_appointments(db_manager, watcher_appointments):
# Let's start by adding a bunch of appointments
for uuid, appointment in watcher_appointments.items():
db_manager.store_watcher_appointment(uuid, appointment.to_json())
first_half = list(watcher_appointments.keys())[: len(watcher_appointments) // 2]
second_half = list(watcher_appointments.keys())[len(watcher_appointments) // 2 :]
# Let's now delete half of them in a batch update
db_manager.batch_delete_watcher_appointments(first_half)
db_watcher_appointments = db_manager.load_watcher_appointments()
assert not set(db_watcher_appointments.keys()).issuperset(first_half)
assert set(db_watcher_appointments.keys()).issuperset(second_half)
# Let's delete the rest
db_manager.batch_delete_watcher_appointments(second_half)
# Now there should be no appointments left
db_watcher_appointments = db_manager.load_watcher_appointments()
assert not db_watcher_appointments
def test_delete_responder_tracker(db_manager, responder_trackers):
# Same for the responder
db_responder_trackers = db_manager.load_responder_trackers()
assert len(db_responder_trackers) != 0
for key in responder_trackers.keys():
db_manager.delete_responder_tracker(key)
db_responder_trackers = db_manager.load_responder_trackers()
assert len(db_responder_trackers) == 0
def test_batch_delete_responder_trackers(db_manager, responder_trackers):
# Let's start by adding a bunch of appointments
for uuid, value in responder_trackers.items():
db_manager.store_responder_tracker(uuid, json.dumps({"value": value}))
first_half = list(responder_trackers.keys())[: len(responder_trackers) // 2]
second_half = list(responder_trackers.keys())[len(responder_trackers) // 2 :]
# Let's now delete half of them in a batch update
db_manager.batch_delete_responder_trackers(first_half)
db_responder_trackers = db_manager.load_responder_trackers()
assert not set(db_responder_trackers.keys()).issuperset(first_half)
assert set(db_responder_trackers.keys()).issuperset(second_half)
# Let's delete the rest
db_manager.batch_delete_responder_trackers(second_half)
# Now there should be no trackers left
db_responder_trackers = db_manager.load_responder_trackers()
assert not db_responder_trackers
def test_store_load_last_block_hash_watcher(db_manager):
# Let's first create a made up block hash
local_last_block_hash = get_random_value_hex(32)
db_manager.store_last_block_hash_watcher(local_last_block_hash)
db_last_block_hash = db_manager.load_last_block_hash_watcher()
assert local_last_block_hash == db_last_block_hash
def test_store_load_last_block_hash_responder(db_manager):
# Same for the responder
local_last_block_hash = get_random_value_hex(32)
db_manager.store_last_block_hash_responder(local_last_block_hash)
db_last_block_hash = db_manager.load_last_block_hash_responder()
assert local_last_block_hash == db_last_block_hash
def test_create_triggered_appointment_flag(db_manager):
# Test that flags are added
key = get_random_value_hex(16)
db_manager.create_triggered_appointment_flag(key)
assert db_manager.db.get((TRIGGERED_APPOINTMENTS_PREFIX + key).encode("utf-8")) is not None
# Test to get a random one that we haven't added
key = get_random_value_hex(16)
assert db_manager.db.get((TRIGGERED_APPOINTMENTS_PREFIX + key).encode("utf-8")) is None
def test_batch_create_triggered_appointment_flag(db_manager):
# Test that flags are added in batch
keys = [get_random_value_hex(16) for _ in range(10)]
# Checked that non of the flags is already in the db
db_flags = db_manager.load_all_triggered_flags()
assert not set(db_flags).issuperset(keys)
# Make sure that they are now
db_manager.batch_create_triggered_appointment_flag(keys)
db_flags = db_manager.load_all_triggered_flags()
assert set(db_flags).issuperset(keys)
def test_load_all_triggered_flags(db_manager):
# There should be a some flags in the db from the previous tests. Let's load them
flags = db_manager.load_all_triggered_flags()
# We can add another flag and see that there's two now
new_uuid = uuid4().hex
db_manager.create_triggered_appointment_flag(new_uuid)
flags.append(new_uuid)
assert set(db_manager.load_all_triggered_flags()) == set(flags)
def test_delete_triggered_appointment_flag(db_manager):
# Test data is properly deleted.
keys = db_manager.load_all_triggered_flags()
# Delete all entries
for k in keys:
db_manager.delete_triggered_appointment_flag(k)
# Try to load them back
for k in keys:
assert db_manager.db.get((TRIGGERED_APPOINTMENTS_PREFIX + k).encode("utf-8")) is None
def test_batch_delete_triggered_appointment_flag(db_manager):
# Let's add some flags first
keys = [get_random_value_hex(16) for _ in range(10)]
db_manager.batch_create_triggered_appointment_flag(keys)
# And now let's delete in batch
first_half = keys[: len(keys) // 2]
second_half = keys[len(keys) // 2 :]
db_manager.batch_delete_triggered_appointment_flag(first_half)
db_falgs = db_manager.load_all_triggered_flags()
assert not set(db_falgs).issuperset(first_half)
assert set(db_falgs).issuperset(second_half)
# Delete the rest
db_manager.batch_delete_triggered_appointment_flag(second_half)
assert not db_manager.load_all_triggered_flags()

View File

@@ -0,0 +1,137 @@
import pytest
from teos.gatekeeper import IdentificationFailure, NotEnoughSlots
from common.cryptographer import Cryptographer
from test.teos.unit.conftest import get_random_value_hex, generate_keypair, get_config
config = get_config()
def test_init(gatekeeper):
assert isinstance(gatekeeper.default_slots, int) and gatekeeper.default_slots == config.get("DEFAULT_SLOTS")
assert isinstance(gatekeeper.registered_users, dict) and len(gatekeeper.registered_users) == 0
def test_add_update_user(gatekeeper):
# add_update_user adds DEFAULT_SLOTS to a given user as long as the identifier is {02, 03}| 32-byte hex str
user_pk = "02" + get_random_value_hex(32)
for _ in range(10):
current_slots = gatekeeper.registered_users.get(user_pk)
current_slots = current_slots.get("available_slots") if current_slots is not None else 0
gatekeeper.add_update_user(user_pk)
assert gatekeeper.registered_users.get(user_pk).get("available_slots") == current_slots + config.get(
"DEFAULT_SLOTS"
)
# The same can be checked for multiple users
for _ in range(10):
# The user identifier is changed every call
user_pk = "03" + get_random_value_hex(32)
gatekeeper.add_update_user(user_pk)
assert gatekeeper.registered_users.get(user_pk).get("available_slots") == config.get("DEFAULT_SLOTS")
def test_add_update_user_wrong_pk(gatekeeper):
# Passing a wrong pk defaults to the errors in check_user_pk. We can try with one.
wrong_pk = get_random_value_hex(32)
with pytest.raises(ValueError):
gatekeeper.add_update_user(wrong_pk)
def test_add_update_user_wrong_pk_prefix(gatekeeper):
# Prefixes must be 02 or 03, anything else should fail
wrong_pk = "04" + get_random_value_hex(32)
with pytest.raises(ValueError):
gatekeeper.add_update_user(wrong_pk)
def test_identify_user(gatekeeper):
# Identify user should return a user_pk for registered users. It raises
# IdentificationFailure for invalid parameters or non-registered users.
# Let's first register a user
sk, pk = generate_keypair()
compressed_pk = Cryptographer.get_compressed_pk(pk)
gatekeeper.add_update_user(compressed_pk)
message = "Hey, it's me"
signature = Cryptographer.sign(message.encode(), sk)
assert gatekeeper.identify_user(message.encode(), signature) == compressed_pk
def test_identify_user_non_registered(gatekeeper):
# Non-registered user won't be identified
sk, pk = generate_keypair()
message = "Hey, it's me"
signature = Cryptographer.sign(message.encode(), sk)
with pytest.raises(IdentificationFailure):
gatekeeper.identify_user(message.encode(), signature)
def test_identify_user_invalid_signature(gatekeeper):
# If the signature does not match the message given a public key, the user won't be identified
message = "Hey, it's me"
signature = get_random_value_hex(72)
with pytest.raises(IdentificationFailure):
gatekeeper.identify_user(message.encode(), signature)
def test_identify_user_wrong(gatekeeper):
# Wrong parameters shouldn't verify either
sk, pk = generate_keypair()
message = "Hey, it's me"
signature = Cryptographer.sign(message.encode(), sk)
# Non-byte message and str sig
with pytest.raises(IdentificationFailure):
gatekeeper.identify_user(message, signature)
# byte message and non-str sig
with pytest.raises(IdentificationFailure):
gatekeeper.identify_user(message.encode(), signature.encode())
# non-byte message and non-str sig
with pytest.raises(IdentificationFailure):
gatekeeper.identify_user(message, signature.encode())
def test_fill_slots(gatekeeper):
# Free slots will decrease the slot count of a user as long as he has enough slots, otherwise raise NotEnoughSlots
user_pk = "02" + get_random_value_hex(32)
gatekeeper.add_update_user(user_pk)
gatekeeper.fill_slots(user_pk, config.get("DEFAULT_SLOTS") - 1)
assert gatekeeper.registered_users.get(user_pk).get("available_slots") == 1
with pytest.raises(NotEnoughSlots):
gatekeeper.fill_slots(user_pk, 2)
# NotEnoughSlots is also raised if the user does not exist
with pytest.raises(NotEnoughSlots):
gatekeeper.fill_slots(get_random_value_hex(33), 2)
def test_free_slots(gatekeeper):
# Free slots simply adds slots to the user as long as it exists.
user_pk = "03" + get_random_value_hex(32)
gatekeeper.add_update_user(user_pk)
gatekeeper.free_slots(user_pk, 42)
assert gatekeeper.registered_users.get(user_pk).get("available_slots") == config.get("DEFAULT_SLOTS") + 42
# Just making sure it does not crash for non-registered user
assert gatekeeper.free_slots(get_random_value_hex(33), 10) is None

View File

@@ -1,27 +1,20 @@
import pytest
from binascii import unhexlify from binascii import unhexlify
from teos.errors import * import teos.errors as errors
from teos import LOG_PREFIX from teos import LOG_PREFIX
from teos.inspector import Inspector
from teos.block_processor import BlockProcessor from teos.block_processor import BlockProcessor
from teos.inspector import Inspector, InspectionFailed
import common.cryptographer import common.cryptographer
from common.logger import Logger from common.logger import Logger
from common.appointment import Appointment from common.appointment import Appointment
from common.cryptographer import Cryptographer
from common.constants import LOCATOR_LEN_BYTES, LOCATOR_LEN_HEX from common.constants import LOCATOR_LEN_BYTES, LOCATOR_LEN_HEX
from test.teos.unit.conftest import ( from test.teos.unit.conftest import get_random_value_hex, bitcoind_connect_params, get_config
get_random_value_hex,
generate_dummy_appointment_data,
generate_keypair,
bitcoind_connect_params,
get_config,
)
common.cryptographer.logger = Logger(actor="Cryptographer", log_name_prefix=LOG_PREFIX) common.cryptographer.logger = Logger(actor="Cryptographer", log_name_prefix=LOG_PREFIX)
APPOINTMENT_OK = (0, None)
NO_HEX_STRINGS = [ NO_HEX_STRINGS = [
"R" * LOCATOR_LEN_HEX, "R" * LOCATOR_LEN_HEX,
get_random_value_hex(LOCATOR_LEN_BYTES - 1) + "PP", get_random_value_hex(LOCATOR_LEN_BYTES - 1) + "PP",
@@ -51,30 +44,60 @@ inspector = Inspector(block_processor, MIN_TO_SELF_DELAY)
def test_check_locator(): def test_check_locator():
# Right appointment type, size and format # Right appointment type, size and format
locator = get_random_value_hex(LOCATOR_LEN_BYTES) locator = get_random_value_hex(LOCATOR_LEN_BYTES)
assert Inspector.check_locator(locator) == APPOINTMENT_OK assert inspector.check_locator(locator) is None
# Wrong size (too big) # Wrong size (too big)
locator = get_random_value_hex(LOCATOR_LEN_BYTES + 1) locator = get_random_value_hex(LOCATOR_LEN_BYTES + 1)
assert Inspector.check_locator(locator)[0] == APPOINTMENT_WRONG_FIELD_SIZE with pytest.raises(InspectionFailed):
try:
inspector.check_locator(locator)
except InspectionFailed as e:
assert e.erno == errors.APPOINTMENT_WRONG_FIELD_SIZE
raise e
# Wrong size (too small) # Wrong size (too small)
locator = get_random_value_hex(LOCATOR_LEN_BYTES - 1) locator = get_random_value_hex(LOCATOR_LEN_BYTES - 1)
assert Inspector.check_locator(locator)[0] == APPOINTMENT_WRONG_FIELD_SIZE with pytest.raises(InspectionFailed):
try:
inspector.check_locator(locator)
except InspectionFailed as e:
assert e.erno == errors.APPOINTMENT_WRONG_FIELD_SIZE
raise e
# Empty # Empty
locator = None locator = None
assert Inspector.check_locator(locator)[0] == APPOINTMENT_EMPTY_FIELD with pytest.raises(InspectionFailed):
try:
inspector.check_locator(locator)
except InspectionFailed as e:
assert e.erno == errors.APPOINTMENT_EMPTY_FIELD
raise e
# Wrong type (several types tested, it should do for anything that is not a string) # Wrong type (several types tested, it should do for anything that is not a string)
locators = [[], -1, 3.2, 0, 4, (), object, {}, object()] locators = [[], -1, 3.2, 0, 4, (), object, {}, object()]
for locator in locators: for locator in locators:
assert Inspector.check_locator(locator)[0] == APPOINTMENT_WRONG_FIELD_TYPE with pytest.raises(InspectionFailed):
try:
inspector.check_locator(locator)
except InspectionFailed as e:
assert e.erno == errors.APPOINTMENT_WRONG_FIELD_TYPE
raise e
# Wrong format (no hex) # Wrong format (no hex)
locators = NO_HEX_STRINGS locators = NO_HEX_STRINGS
for locator in locators: for locator in locators:
assert Inspector.check_locator(locator)[0] == APPOINTMENT_WRONG_FIELD_FORMAT with pytest.raises(InspectionFailed):
try:
inspector.check_locator(locator)
except InspectionFailed as e:
assert e.erno == errors.APPOINTMENT_WRONG_FIELD_FORMAT
raise e
def test_check_start_time(): def test_check_start_time():
@@ -83,21 +106,39 @@ def test_check_start_time():
# Right format and right value (start time in the future) # Right format and right value (start time in the future)
start_time = 101 start_time = 101
assert Inspector.check_start_time(start_time, current_time) == APPOINTMENT_OK assert inspector.check_start_time(start_time, current_time) is None
# Start time too small (either same block or block in the past) # Start time too small (either same block or block in the past)
start_times = [100, 99, 98, -1] start_times = [100, 99, 98, -1]
for start_time in start_times: for start_time in start_times:
assert Inspector.check_start_time(start_time, current_time)[0] == APPOINTMENT_FIELD_TOO_SMALL with pytest.raises(InspectionFailed):
try:
inspector.check_start_time(start_time, current_time)
except InspectionFailed as e:
assert e.erno == errors.APPOINTMENT_FIELD_TOO_SMALL
raise e
# Empty field # Empty field
start_time = None start_time = None
assert Inspector.check_start_time(start_time, current_time)[0] == APPOINTMENT_EMPTY_FIELD with pytest.raises(InspectionFailed):
try:
inspector.check_start_time(start_time, current_time)
except InspectionFailed as e:
assert e.erno == errors.APPOINTMENT_EMPTY_FIELD
raise e
# Wrong data type # Wrong data type
start_times = WRONG_TYPES start_times = WRONG_TYPES
for start_time in start_times: for start_time in start_times:
assert Inspector.check_start_time(start_time, current_time)[0] == APPOINTMENT_WRONG_FIELD_TYPE with pytest.raises(InspectionFailed):
try:
inspector.check_start_time(start_time, current_time)
except InspectionFailed as e:
assert e.erno == errors.APPOINTMENT_WRONG_FIELD_TYPE
raise e
def test_check_end_time(): def test_check_end_time():
@@ -107,54 +148,96 @@ def test_check_end_time():
# Right format and right value (start time before end and end in the future) # Right format and right value (start time before end and end in the future)
end_time = 121 end_time = 121
assert Inspector.check_end_time(end_time, start_time, current_time) == APPOINTMENT_OK assert inspector.check_end_time(end_time, start_time, current_time) is None
# End time too small (start time after end time) # End time too small (start time after end time)
end_times = [120, 119, 118, -1] end_times = [120, 119, 118, -1]
for end_time in end_times: for end_time in end_times:
assert Inspector.check_end_time(end_time, start_time, current_time)[0] == APPOINTMENT_FIELD_TOO_SMALL with pytest.raises(InspectionFailed):
try:
inspector.check_end_time(end_time, start_time, current_time)
except InspectionFailed as e:
assert e.erno == errors.APPOINTMENT_FIELD_TOO_SMALL
raise e
# End time too small (either same height as current block or in the past) # End time too small (either same height as current block or in the past)
current_time = 130 current_time = 130
end_times = [130, 129, 128, -1] end_times = [130, 129, 128, -1]
for end_time in end_times: for end_time in end_times:
assert Inspector.check_end_time(end_time, start_time, current_time)[0] == APPOINTMENT_FIELD_TOO_SMALL with pytest.raises(InspectionFailed):
try:
inspector.check_end_time(end_time, start_time, current_time)
except InspectionFailed as e:
assert e.erno == errors.APPOINTMENT_FIELD_TOO_SMALL
raise e
# Empty field # Empty field
end_time = None end_time = None
assert Inspector.check_end_time(end_time, start_time, current_time)[0] == APPOINTMENT_EMPTY_FIELD with pytest.raises(InspectionFailed):
try:
inspector.check_end_time(end_time, start_time, current_time)
except InspectionFailed as e:
assert e.erno == errors.APPOINTMENT_EMPTY_FIELD
raise e
# Wrong data type # Wrong data type
end_times = WRONG_TYPES end_times = WRONG_TYPES
for end_time in end_times: for end_time in end_times:
assert Inspector.check_end_time(end_time, start_time, current_time)[0] == APPOINTMENT_WRONG_FIELD_TYPE with pytest.raises(InspectionFailed):
try:
inspector.check_end_time(end_time, start_time, current_time)
except InspectionFailed as e:
assert e.erno == errors.APPOINTMENT_WRONG_FIELD_TYPE
raise e
def test_check_to_self_delay(): def test_check_to_self_delay():
# Right value, right format # Right value, right format
to_self_delays = [MIN_TO_SELF_DELAY, MIN_TO_SELF_DELAY + 1, MIN_TO_SELF_DELAY + 1000] to_self_delays = [MIN_TO_SELF_DELAY, MIN_TO_SELF_DELAY + 1, MIN_TO_SELF_DELAY + 1000]
for to_self_delay in to_self_delays: for to_self_delay in to_self_delays:
assert inspector.check_to_self_delay(to_self_delay) == APPOINTMENT_OK assert inspector.check_to_self_delay(to_self_delay) is None
# to_self_delay too small # to_self_delay too small
to_self_delays = [MIN_TO_SELF_DELAY - 1, MIN_TO_SELF_DELAY - 2, 0, -1, -1000] to_self_delays = [MIN_TO_SELF_DELAY - 1, MIN_TO_SELF_DELAY - 2, 0, -1, -1000]
for to_self_delay in to_self_delays: for to_self_delay in to_self_delays:
assert inspector.check_to_self_delay(to_self_delay)[0] == APPOINTMENT_FIELD_TOO_SMALL with pytest.raises(InspectionFailed):
try:
inspector.check_to_self_delay(to_self_delay)
except InspectionFailed as e:
assert e.erno == errors.APPOINTMENT_FIELD_TOO_SMALL
raise e
# Empty field # Empty field
to_self_delay = None to_self_delay = None
assert inspector.check_to_self_delay(to_self_delay)[0] == APPOINTMENT_EMPTY_FIELD with pytest.raises(InspectionFailed):
try:
inspector.check_to_self_delay(to_self_delay)
except InspectionFailed as e:
assert e.erno == errors.APPOINTMENT_EMPTY_FIELD
raise e
# Wrong data type # Wrong data type
to_self_delays = WRONG_TYPES to_self_delays = WRONG_TYPES
for to_self_delay in to_self_delays: for to_self_delay in to_self_delays:
assert inspector.check_to_self_delay(to_self_delay)[0] == APPOINTMENT_WRONG_FIELD_TYPE with pytest.raises(InspectionFailed):
try:
inspector.check_to_self_delay(to_self_delay)
except InspectionFailed as e:
assert e.erno == errors.APPOINTMENT_WRONG_FIELD_TYPE
raise e
def test_check_blob(): def test_check_blob():
# Right format and length # Right format and length
encrypted_blob = get_random_value_hex(120) encrypted_blob = get_random_value_hex(120)
assert Inspector.check_blob(encrypted_blob) == APPOINTMENT_OK assert inspector.check_blob(encrypted_blob) is None
# # Wrong content # # Wrong content
# # FIXME: There is not proper defined format for this yet. It should be restricted by size at least, and check it # # FIXME: There is not proper defined format for this yet. It should be restricted by size at least, and check it
@@ -163,47 +246,37 @@ def test_check_blob():
# Wrong type # Wrong type
encrypted_blobs = WRONG_TYPES_NO_STR encrypted_blobs = WRONG_TYPES_NO_STR
for encrypted_blob in encrypted_blobs: for encrypted_blob in encrypted_blobs:
assert Inspector.check_blob(encrypted_blob)[0] == APPOINTMENT_WRONG_FIELD_TYPE with pytest.raises(InspectionFailed):
try:
inspector.check_blob(encrypted_blob)
except InspectionFailed as e:
assert e.erno == errors.APPOINTMENT_WRONG_FIELD_TYPE
raise e
# Empty field # Empty field
encrypted_blob = None encrypted_blob = None
assert Inspector.check_blob(encrypted_blob)[0] == APPOINTMENT_EMPTY_FIELD with pytest.raises(InspectionFailed):
try:
inspector.check_blob(encrypted_blob)
except InspectionFailed as e:
assert e.erno == errors.APPOINTMENT_EMPTY_FIELD
raise e
# Wrong format (no hex) # Wrong format (no hex)
encrypted_blobs = NO_HEX_STRINGS encrypted_blobs = NO_HEX_STRINGS
for encrypted_blob in encrypted_blobs: for encrypted_blob in encrypted_blobs:
assert Inspector.check_blob(encrypted_blob)[0] == APPOINTMENT_WRONG_FIELD_FORMAT with pytest.raises(InspectionFailed):
try:
inspector.check_blob(encrypted_blob)
except InspectionFailed as e:
def test_check_appointment_signature(): assert e.erno == errors.APPOINTMENT_WRONG_FIELD_FORMAT
# The inspector receives the public key as hex raise e
client_sk, client_pk = generate_keypair()
client_pk_hex = client_pk.format().hex()
dummy_appointment_data, _ = generate_dummy_appointment_data(real_height=False)
assert Inspector.check_appointment_signature(
dummy_appointment_data["appointment"], dummy_appointment_data["signature"], dummy_appointment_data["public_key"]
)
fake_sk, _ = generate_keypair()
# Create a bad signature to make sure inspector rejects it
bad_signature = Cryptographer.sign(
Appointment.from_dict(dummy_appointment_data["appointment"]).serialize(), fake_sk
)
assert (
Inspector.check_appointment_signature(dummy_appointment_data["appointment"], bad_signature, client_pk_hex)[0]
== APPOINTMENT_INVALID_SIGNATURE
)
def test_inspect(run_bitcoind): def test_inspect(run_bitcoind):
# At this point every single check function has been already tested, let's test inspect with an invalid and a valid
# appointments.
client_sk, client_pk = generate_keypair()
client_pk_hex = client_pk.format().hex()
# Valid appointment # Valid appointment
locator = get_random_value_hex(LOCATOR_LEN_BYTES) locator = get_random_value_hex(LOCATOR_LEN_BYTES)
start_time = block_processor.get_block_count() + 5 start_time = block_processor.get_block_count() + 5
@@ -219,9 +292,7 @@ def test_inspect(run_bitcoind):
"encrypted_blob": encrypted_blob, "encrypted_blob": encrypted_blob,
} }
signature = Cryptographer.sign(Appointment.from_dict(appointment_data).serialize(), client_sk) appointment = inspector.inspect(appointment_data)
appointment = inspector.inspect(appointment_data, signature, client_pk_hex)
assert ( assert (
type(appointment) == Appointment type(appointment) == Appointment
@@ -231,3 +302,24 @@ def test_inspect(run_bitcoind):
and appointment.to_self_delay == to_self_delay and appointment.to_self_delay == to_self_delay
and appointment.encrypted_blob.data == encrypted_blob and appointment.encrypted_blob.data == encrypted_blob
) )
def test_inspect_wrong(run_bitcoind):
# Wrong types (taking out empty dict, since that's a different error)
wrong_types = WRONG_TYPES.pop(WRONG_TYPES.index({}))
for data in wrong_types:
with pytest.raises(InspectionFailed):
try:
inspector.inspect(data)
except InspectionFailed as e:
print(data)
assert e.erno == errors.APPOINTMENT_WRONG_FIELD
raise e
# None data
with pytest.raises(InspectionFailed):
try:
inspector.inspect(None)
except InspectionFailed as e:
assert e.erno == errors.APPOINTMENT_EMPTY_FIELD
raise e

View File

@@ -1,4 +1,3 @@
import json
import pytest import pytest
import random import random
from uuid import uuid4 from uuid import uuid4
@@ -9,8 +8,8 @@ from threading import Thread
from teos.carrier import Carrier from teos.carrier import Carrier
from teos.tools import bitcoin_cli from teos.tools import bitcoin_cli
from teos.db_manager import DBManager
from teos.chain_monitor import ChainMonitor from teos.chain_monitor import ChainMonitor
from teos.appointments_dbm import AppointmentsDBM
from teos.responder import Responder, TransactionTracker from teos.responder import Responder, TransactionTracker
from common.constants import LOCATOR_LEN_HEX from common.constants import LOCATOR_LEN_HEX
@@ -36,7 +35,7 @@ def responder(db_manager, carrier, block_processor):
@pytest.fixture(scope="session") @pytest.fixture(scope="session")
def temp_db_manager(): def temp_db_manager():
db_name = get_random_value_hex(8) db_name = get_random_value_hex(8)
db_manager = DBManager(db_name) db_manager = AppointmentsDBM(db_name)
yield db_manager yield db_manager
@@ -120,17 +119,6 @@ def test_tracker_to_dict():
) )
def test_tracker_to_json():
tracker = create_dummy_tracker()
tracker_dict = json.loads(tracker.to_json())
assert (
tracker.locator == tracker_dict["locator"]
and tracker.penalty_rawtx == tracker_dict["penalty_rawtx"]
and tracker.appointment_end == tracker_dict["appointment_end"]
)
def test_tracker_from_dict(): def test_tracker_from_dict():
tracker_dict = create_dummy_tracker().to_dict() tracker_dict = create_dummy_tracker().to_dict()
new_tracker = TransactionTracker.from_dict(tracker_dict) new_tracker = TransactionTracker.from_dict(tracker_dict)
@@ -295,7 +283,7 @@ def test_do_watch(temp_db_manager, carrier, block_processor):
# We also need to store the info in the db # We also need to store the info in the db
responder.db_manager.create_triggered_appointment_flag(uuid) responder.db_manager.create_triggered_appointment_flag(uuid)
responder.db_manager.store_responder_tracker(uuid, tracker.to_json()) responder.db_manager.store_responder_tracker(uuid, tracker.to_dict())
# Let's start to watch # Let's start to watch
Thread(target=responder.do_watch, daemon=True).start() Thread(target=responder.do_watch, daemon=True).start()
@@ -472,7 +460,7 @@ def test_rebroadcast(db_manager, carrier, block_processor):
# We need to add it to the db too # We need to add it to the db too
responder.db_manager.create_triggered_appointment_flag(uuid) responder.db_manager.create_triggered_appointment_flag(uuid)
responder.db_manager.store_responder_tracker(uuid, tracker.to_json()) responder.db_manager.store_responder_tracker(uuid, tracker.to_dict())
responder.tx_tracker_map[penalty_txid] = [uuid] responder.tx_tracker_map[penalty_txid] = [uuid]
responder.unconfirmed_txs.append(penalty_txid) responder.unconfirmed_txs.append(penalty_txid)

View File

@@ -1,5 +1,4 @@
from teos.tools import can_connect_to_bitcoind, in_correct_network, bitcoin_cli from teos.tools import can_connect_to_bitcoind, in_correct_network, bitcoin_cli
from common.tools import check_sha256_hex_format
from test.teos.unit.conftest import bitcoind_connect_params from test.teos.unit.conftest import bitcoind_connect_params
@@ -27,32 +26,3 @@ def test_bitcoin_cli():
except Exception: except Exception:
assert False assert False
def test_check_sha256_hex_format():
assert check_sha256_hex_format(None) is False
assert check_sha256_hex_format("") is False
assert (
check_sha256_hex_format(0x0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF) is False
) # wrong type
assert (
check_sha256_hex_format("abcdefabcdefabcdefabcdefabcdefabcdefabcdefabcdefabcdefabcdefabcd") is True
) # lowercase
assert (
check_sha256_hex_format("ABCDEFABCDEFABCDEFABCDEFABCDEFABCDEFABCDEFABCDEFABCDEFABCDEFABCD") is True
) # uppercase
assert (
check_sha256_hex_format("0123456789abcdef0123456789ABCDEF0123456789abcdef0123456789ABCDEF") is True
) # mixed case
assert (
check_sha256_hex_format("0123456789012345678901234567890123456789012345678901234567890123") is True
) # only nums
assert (
check_sha256_hex_format("0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdf") is False
) # too short
assert (
check_sha256_hex_format("0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0") is False
) # too long
assert (
check_sha256_hex_format("g123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef") is False
) # non-hex

View File

@@ -0,0 +1,82 @@
from teos.appointments_dbm import AppointmentsDBM
from test.teos.unit.conftest import get_random_value_hex
stored_users = {}
def open_create_db(db_path):
try:
db_manager = AppointmentsDBM(db_path)
return db_manager
except ValueError:
return False
def test_store_user(user_db_manager):
# Store user should work as long as the user_pk is properly formatted and data is a dictionary
user_pk = "02" + get_random_value_hex(32)
user_data = {"available_slots": 42}
stored_users[user_pk] = user_data
assert user_db_manager.store_user(user_pk, user_data) is True
# Wrong pks should return False on adding
user_pk = "04" + get_random_value_hex(32)
user_data = {"available_slots": 42}
assert user_db_manager.store_user(user_pk, user_data) is False
# Same for wrong types
assert user_db_manager.store_user(42, user_data) is False
# And for wrong type user data
assert user_db_manager.store_user(user_pk, 42) is False
def test_load_user(user_db_manager):
# Loading a user we have stored should work
for user_pk, user_data in stored_users.items():
assert user_db_manager.load_user(user_pk) == user_data
# Random keys should fail
assert user_db_manager.load_user(get_random_value_hex(33)) is None
# Wrong format keys should also return None
assert user_db_manager.load_user(42) is None
def test_delete_user(user_db_manager):
# Deleting an existing user should work
for user_pk, user_data in stored_users.items():
assert user_db_manager.delete_user(user_pk) is True
for user_pk, user_data in stored_users.items():
assert user_db_manager.load_user(user_pk) is None
# But deleting a non existing one should not fail
assert user_db_manager.delete_user(get_random_value_hex(32)) is True
# Keys of wrong type should fail
assert user_db_manager.delete_user(42) is False
def test_load_all_users(user_db_manager):
# There should be no users at the moment
assert user_db_manager.load_all_users() == {}
stored_users = {}
# Adding some and checking we get them all
for i in range(10):
user_pk = "02" + get_random_value_hex(32)
user_data = {"available_slots": i}
user_db_manager.store_user(user_pk, user_data)
stored_users[user_pk] = user_data
all_users = user_db_manager.load_all_users()
assert set(all_users.keys()) == set(stored_users.keys())
for k, v in all_users.items():
assert stored_users[k] == v

View File

@@ -9,8 +9,8 @@ from teos.carrier import Carrier
from teos.watcher import Watcher from teos.watcher import Watcher
from teos.tools import bitcoin_cli from teos.tools import bitcoin_cli
from teos.responder import Responder from teos.responder import Responder
from teos.db_manager import DBManager
from teos.chain_monitor import ChainMonitor from teos.chain_monitor import ChainMonitor
from teos.appointments_dbm import AppointmentsDBM
from teos.block_processor import BlockProcessor from teos.block_processor import BlockProcessor
import common.cryptographer import common.cryptographer
@@ -40,11 +40,14 @@ config = get_config()
signing_key, public_key = generate_keypair() signing_key, public_key = generate_keypair()
# Reduce the maximum number of appointments to something we can test faster
MAX_APPOINTMENTS = 100
@pytest.fixture(scope="session") @pytest.fixture(scope="session")
def temp_db_manager(): def temp_db_manager():
db_name = get_random_value_hex(8) db_name = get_random_value_hex(8)
db_manager = DBManager(db_name) db_manager = AppointmentsDBM(db_name)
yield db_manager yield db_manager
@@ -59,12 +62,7 @@ def watcher(db_manager):
responder = Responder(db_manager, carrier, block_processor) responder = Responder(db_manager, carrier, block_processor)
watcher = Watcher( watcher = Watcher(
db_manager, db_manager, block_processor, responder, signing_key.to_der(), MAX_APPOINTMENTS, config.get("EXPIRY_DELTA")
block_processor,
responder,
signing_key.to_der(),
config.get("MAX_APPOINTMENTS"),
config.get("EXPIRY_DELTA"),
) )
chain_monitor = ChainMonitor( chain_monitor = ChainMonitor(
@@ -114,13 +112,26 @@ def test_init(run_bitcoind, watcher):
assert isinstance(watcher.signing_key, PrivateKey) assert isinstance(watcher.signing_key, PrivateKey)
def test_get_appointment_summary(watcher):
# get_appointment_summary returns an appointment summary if found, else None.
random_uuid = get_random_value_hex(16)
appointment_summary = {"locator": get_random_value_hex(16), "end_time": 10, "size": 200}
watcher.appointments[random_uuid] = appointment_summary
assert watcher.get_appointment_summary(random_uuid) == appointment_summary
# Requesting a non-existing appointment
assert watcher.get_appointment_summary(get_random_value_hex(16)) is None
def test_add_appointment(watcher): def test_add_appointment(watcher):
# We should be able to add appointments up to the limit # We should be able to add appointments up to the limit
for _ in range(10): for _ in range(10):
appointment, dispute_tx = generate_dummy_appointment( appointment, dispute_tx = generate_dummy_appointment(
start_time_offset=START_TIME_OFFSET, end_time_offset=END_TIME_OFFSET start_time_offset=START_TIME_OFFSET, end_time_offset=END_TIME_OFFSET
) )
added_appointment, sig = watcher.add_appointment(appointment) user_pk = get_random_value_hex(33)
added_appointment, sig = watcher.add_appointment(appointment, user_pk)
assert added_appointment is True assert added_appointment is True
assert Cryptographer.verify_rpk( assert Cryptographer.verify_rpk(
@@ -128,23 +139,37 @@ def test_add_appointment(watcher):
) )
# Check that we can also add an already added appointment (same locator) # Check that we can also add an already added appointment (same locator)
added_appointment, sig = watcher.add_appointment(appointment) added_appointment, sig = watcher.add_appointment(appointment, user_pk)
assert added_appointment is True assert added_appointment is True
assert Cryptographer.verify_rpk( assert Cryptographer.verify_rpk(
watcher.signing_key.public_key, Cryptographer.recover_pk(appointment.serialize(), sig) watcher.signing_key.public_key, Cryptographer.recover_pk(appointment.serialize(), sig)
) )
# If two appointments with the same locator from the same user are added, they are overwritten, but if they come
# from different users, they are kept.
assert len(watcher.locator_uuid_map[appointment.locator]) == 1
different_user_pk = get_random_value_hex(33)
added_appointment, sig = watcher.add_appointment(appointment, different_user_pk)
assert added_appointment is True
assert Cryptographer.verify_rpk(
watcher.signing_key.public_key, Cryptographer.recover_pk(appointment.serialize(), sig)
)
assert len(watcher.locator_uuid_map[appointment.locator]) == 2
def test_add_too_many_appointments(watcher): def test_add_too_many_appointments(watcher):
# Any appointment on top of those should fail # Any appointment on top of those should fail
watcher.appointments = dict() watcher.appointments = dict()
for _ in range(config.get("MAX_APPOINTMENTS")): for _ in range(MAX_APPOINTMENTS):
appointment, dispute_tx = generate_dummy_appointment( appointment, dispute_tx = generate_dummy_appointment(
start_time_offset=START_TIME_OFFSET, end_time_offset=END_TIME_OFFSET start_time_offset=START_TIME_OFFSET, end_time_offset=END_TIME_OFFSET
) )
added_appointment, sig = watcher.add_appointment(appointment) user_pk = get_random_value_hex(33)
added_appointment, sig = watcher.add_appointment(appointment, user_pk)
assert added_appointment is True assert added_appointment is True
assert Cryptographer.verify_rpk( assert Cryptographer.verify_rpk(
@@ -154,7 +179,8 @@ def test_add_too_many_appointments(watcher):
appointment, dispute_tx = generate_dummy_appointment( appointment, dispute_tx = generate_dummy_appointment(
start_time_offset=START_TIME_OFFSET, end_time_offset=END_TIME_OFFSET start_time_offset=START_TIME_OFFSET, end_time_offset=END_TIME_OFFSET
) )
added_appointment, sig = watcher.add_appointment(appointment) user_pk = get_random_value_hex(33)
added_appointment, sig = watcher.add_appointment(appointment, user_pk)
assert added_appointment is False assert added_appointment is False
assert sig is None assert sig is None
@@ -171,8 +197,8 @@ def test_do_watch(watcher, temp_db_manager):
watcher.appointments = {} watcher.appointments = {}
for uuid, appointment in appointments.items(): for uuid, appointment in appointments.items():
watcher.appointments[uuid] = {"locator": appointment.locator, "end_time": appointment.end_time} watcher.appointments[uuid] = {"locator": appointment.locator, "end_time": appointment.end_time, "size": 200}
watcher.db_manager.store_watcher_appointment(uuid, appointment.to_json()) watcher.db_manager.store_watcher_appointment(uuid, appointment.to_dict())
watcher.db_manager.create_append_locator_map(appointment.locator, uuid) watcher.db_manager.create_append_locator_map(appointment.locator, uuid)
do_watch_thread = Thread(target=watcher.do_watch, daemon=True) do_watch_thread = Thread(target=watcher.do_watch, daemon=True)
@@ -222,7 +248,7 @@ def test_filter_valid_breaches_random_data(watcher):
dummy_appointment, _ = generate_dummy_appointment() dummy_appointment, _ = generate_dummy_appointment()
uuid = uuid4().hex uuid = uuid4().hex
appointments[uuid] = {"locator": dummy_appointment.locator, "end_time": dummy_appointment.end_time} appointments[uuid] = {"locator": dummy_appointment.locator, "end_time": dummy_appointment.end_time}
watcher.db_manager.store_watcher_appointment(uuid, dummy_appointment.to_json()) watcher.db_manager.store_watcher_appointment(uuid, dummy_appointment.to_dict())
watcher.db_manager.create_append_locator_map(dummy_appointment.locator, uuid) watcher.db_manager.create_append_locator_map(dummy_appointment.locator, uuid)
locator_uuid_map[dummy_appointment.locator] = [uuid] locator_uuid_map[dummy_appointment.locator] = [uuid]
@@ -262,7 +288,7 @@ def test_filter_valid_breaches(watcher):
for uuid, appointment in appointments.items(): for uuid, appointment in appointments.items():
watcher.appointments[uuid] = {"locator": appointment.locator, "end_time": appointment.end_time} watcher.appointments[uuid] = {"locator": appointment.locator, "end_time": appointment.end_time}
watcher.db_manager.store_watcher_appointment(uuid, dummy_appointment.to_json()) watcher.db_manager.store_watcher_appointment(uuid, dummy_appointment.to_dict())
watcher.db_manager.create_append_locator_map(dummy_appointment.locator, uuid) watcher.db_manager.create_append_locator_map(dummy_appointment.locator, uuid)
watcher.locator_uuid_map = locator_uuid_map watcher.locator_uuid_map = locator_uuid_map