Merge pull request #137 from talaia-labs/42-decouple-start-end-time

Removes start and end time from the appointment data
This commit is contained in:
Sergi Delgado Segura
2020-04-22 15:31:02 +02:00
committed by GitHub
36 changed files with 1336 additions and 1168 deletions

View File

@@ -49,10 +49,10 @@ for opt, arg in opts:
```
```python
if rcode == 0:
rcode, message = self.check_start_time(start_time, block_height)
if rcode == 0:
rcode, message = self.check_end_time(end_time, start_time, block_height)
if appointment_data is None:
raise InspectionFailed(errors.APPOINTMENT_EMPTY_FIELD, "empty appointment received")
elif not isinstance(appointment_data, dict):
raise InspectionFailed(errors.APPOINTMENT_WRONG_FIELD, "wrong appointment format")
```
## Dev Requirements

View File

@@ -44,8 +44,6 @@ This command is used to send appointments to the watchtower. Appointments **must
{ "tx": tx,
"tx_id": tx_id,
"start_time": s,
"end_time": e,
"to_self_delay": d }
`tx` **must** be the raw penalty transaction that will be encrypted before sent to the watchtower. `type(tx) = hex encoded str`
@@ -60,12 +58,6 @@ This command is used to send appointments to the watchtower. Appointments **must
The API will return a `application/json` HTTP response code `200/OK` if the appointment is accepted, with the locator encoded in the response text, or a `400/Bad Request` if the appointment is rejected, with the rejection reason encoded in the response text.
### Alpha release restrictions
The alpha release does not have authentication, payments nor rate limiting, therefore some self imposed restrictions apply:
- `start_time` should be within the next 6 blocks `[current_time+1, current_time+6]`.
- `end_time` cannot be bigger than (roughly) a month. That is `4320` blocks on top of `start_time`.
#### Usage
@@ -103,9 +95,7 @@ if `-f, --file` **is** specified, then the command expects a path to a json file
"appointment":
{
"encrypted_blob": eb,
"end_time": e,
"locator": appointment_locator,
"start_time": s,
"status": "being_watched",
"to_self_delay": d
}
@@ -118,7 +108,6 @@ if `-f, --file` **is** specified, then the command expects a path to a json file
"status": "dispute_responded",
"appointment":
{
"appointment_end": e,
"dispute_txid": dispute_txid,
"locator": appointment_locator,
"penalty_rawtx": penalty_rawtx,
@@ -164,10 +153,10 @@ python teos_cli.py register
2. Generate a new dummy appointment. **Note:** this appointment will never be fulfilled (it will eventually expire) since it does not correspond to a valid transaction. However it can be used to interact with the Eye of Satoshi's API.
```
echo '{"tx": "4615a58815475ab8145b6bb90b1268a0dbb02e344ddd483f45052bec1f15b1951c1ee7f070a0993da395a5ee92ea3a1c184b5ffdb2507164bf1f8c1364155d48bdbc882eee0868ca69864a807f213f538990ad16f56d7dfb28a18e69e3f31ae9adad229e3244073b7d643b4597ec88bf247b9f73f301b0f25ae8207b02b7709c271da98af19f1db276ac48ba64f099644af1ae2c90edb7def5e8589a1bb17cc72ac42ecf07dd29cff91823938fd0d772c2c92b7ab050f8837efd46197c9b2b3f", "tx_id": "0b9510d92a50c1d67c6f7fc5d47908d96b3eccdea093d89bcbaf05bcfebdd951", "start_time": 0, "end_time": 0, "to_self_delay": 20}' > dummy_appointment_data.json
echo '{"tx": "4615a58815475ab8145b6bb90b1268a0dbb02e344ddd483f45052bec1f15b1951c1ee7f070a0993da395a5ee92ea3a1c184b5ffdb2507164bf1f8c1364155d48bdbc882eee0868ca69864a807f213f538990ad16f56d7dfb28a18e69e3f31ae9adad229e3244073b7d643b4597ec88bf247b9f73f301b0f25ae8207b02b7709c271da98af19f1db276ac48ba64f099644af1ae2c90edb7def5e8589a1bb17cc72ac42ecf07dd29cff91823938fd0d772c2c92b7ab050f8837efd46197c9b2b3f", "tx_id": "0b9510d92a50c1d67c6f7fc5d47908d96b3eccdea093d89bcbaf05bcfebdd951", "to_self_delay": 20}' > dummy_appointment_data.json
```
That will create a json file that follows the appointment data structure filled with dummy data and store it in `dummy_appointment_data.json`. **Note**: You'll need to update the `start_time` and `end_time` to match valid block heights.
That will create a json file that follows the appointment data structure filled with dummy data and store it in `dummy_appointment_data.json`.
3. Send the appointment to the tower API. Which will then start monitoring for matching transactions.

View File

@@ -57,7 +57,7 @@ def register(user_id, teos_url):
return response
def add_appointment(appointment_data, cli_sk, teos_id, teos_url):
def add_appointment(appointment_data, user_sk, teos_id, teos_url):
"""
Manages the add_appointment command.
@@ -72,7 +72,7 @@ def add_appointment(appointment_data, cli_sk, teos_id, teos_url):
Args:
appointment_data (:obj:`dict`): a dictionary containing the appointment data.
cli_sk (:obj:`PrivateKey`): the client's private key.
user_sk (:obj:`PrivateKey`): the user's private key.
teos_id (:obj:`str`): the tower's compressed public key.
teos_url (:obj:`str`): the teos base url.
@@ -104,7 +104,7 @@ def add_appointment(appointment_data, cli_sk, teos_id, teos_url):
appointment_data["locator"] = compute_locator(tx_id)
appointment_data["encrypted_blob"] = Cryptographer.encrypt(tx, tx_id)
appointment = Appointment.from_dict(appointment_data)
signature = Cryptographer.sign(appointment.serialize(), cli_sk)
signature = Cryptographer.sign(appointment.serialize(), user_sk)
data = {"appointment": appointment.to_dict(), "signature": signature}
@@ -128,13 +128,13 @@ def add_appointment(appointment_data, cli_sk, teos_id, teos_url):
return appointment, signature
def get_appointment(locator, cli_sk, teos_id, teos_url):
def get_appointment(locator, user_sk, teos_id, teos_url):
"""
Gets information about an appointment from the tower.
Args:
locator (:obj:`str`): the appointment locator used to identify it.
cli_sk (:obj:`PrivateKey`): the client's private key.
user_sk (:obj:`PrivateKey`): the user's private key.
teos_id (:obj:`PublicKey`): the tower's compressed public key.
teos_url (:obj:`str`): the teos base url.
@@ -155,7 +155,7 @@ def get_appointment(locator, cli_sk, teos_id, teos_url):
raise InvalidParameter("The provided locator is not valid", locator=locator)
message = "get appointment {}".format(locator)
signature = Cryptographer.sign(message.encode(), cli_sk)
signature = Cryptographer.sign(message.encode(), user_sk)
data = {"locator": locator, "signature": signature}
# Send request to the server.
@@ -199,13 +199,13 @@ def get_all_appointments(teos_url):
return None
def load_keys(teos_pk_path, cli_sk_path):
def load_keys(teos_pk_path, user_sk_path):
"""
Loads all the keys required so sign, send, and verify the appointment.
Args:
teos_pk_path (:obj:`str`): path to the tower public key file.
cli_sk_path (:obj:`str`): path to the client private key file.
teos_pk_path (:obj:`str`): path to the tower's public key file.
user_sk_path (:obj:`str`): path to the user's private key file.
Returns:
:obj:`tuple`: a three-item tuple containing a ``str``, a ``PrivateKey`` and a ``str``
@@ -218,7 +218,7 @@ def load_keys(teos_pk_path, cli_sk_path):
if not teos_pk_path:
raise InvalidKey("TEOS's public key file not found. Please check your settings")
if not cli_sk_path:
if not user_sk_path:
raise InvalidKey("Client's private key file not found. Please check your settings")
try:
@@ -229,19 +229,19 @@ def load_keys(teos_pk_path, cli_sk_path):
raise InvalidKey("TEOS public key cannot be loaded")
try:
cli_sk_der = Cryptographer.load_key_file(cli_sk_path)
cli_sk = Cryptographer.load_private_key_der(cli_sk_der)
user_sk_der = Cryptographer.load_key_file(user_sk_path)
user_sk = Cryptographer.load_private_key_der(user_sk_der)
except (InvalidParameter, InvalidKey):
raise InvalidKey("Client private key is invalid or cannot be parsed")
try:
client_id = Cryptographer.get_compressed_pk(cli_sk.public_key)
user_id = Cryptographer.get_compressed_pk(user_sk.public_key)
except (InvalidParameter, InvalidKey):
raise InvalidKey("Client public key cannot be loaded")
return teos_id, cli_sk, client_id
return teos_id, user_sk, user_id
def post_request(data, endpoint):
@@ -263,7 +263,7 @@ def post_request(data, endpoint):
return requests.post(url=endpoint, json=data, timeout=5)
except Timeout:
message = "Can't connect to the Eye of Satoshi's API. Connection timeout"
message = "Cannot connect to the Eye of Satoshi's API. Connection timeout"
except ConnectionError:
message = "Cannot connect to the Eye of Satoshi's API. Server cannot be reached"
@@ -402,15 +402,15 @@ def main(command, args, command_line_conf):
teos_url = "http://" + teos_url
try:
teos_id, cli_sk, client_id = load_keys(config.get("TEOS_PUBLIC_KEY"), config.get("CLI_PRIVATE_KEY"))
teos_id, user_sk, user_id = load_keys(config.get("TEOS_PUBLIC_KEY"), config.get("CLI_PRIVATE_KEY"))
if command == "register":
register_data = register(client_id, teos_url)
register_data = register(user_id, teos_url)
logger.info("Registration succeeded. Available slots: {}".format(register_data.get("available_slots")))
if command == "add_appointment":
appointment_data = parse_add_appointment_args(args)
appointment, signature = add_appointment(appointment_data, cli_sk, teos_id, teos_url)
appointment, signature = add_appointment(appointment_data, user_sk, teos_id, teos_url)
save_appointment_receipt(appointment.to_dict(), signature, config.get("APPOINTMENTS_FOLDER_NAME"))
elif command == "get_appointment":
@@ -423,7 +423,7 @@ def main(command, args, command_line_conf):
if arg_opt in ["-h", "--help"]:
sys.exit(help_get_appointment())
appointment_data = get_appointment(arg_opt, cli_sk, teos_id, teos_url)
appointment_data = get_appointment(arg_opt, user_sk, teos_id, teos_url)
if appointment_data:
print(appointment_data)

View File

@@ -9,18 +9,14 @@ class Appointment:
Args:
locator (:obj:`str`): A 16-byte hex-encoded value used by the tower to detect channel breaches. It serves as a
trigger for the tower to decrypt and broadcast the penalty transaction.
start_time (:obj:`int`): The block height where the tower is hired to start watching for breaches.
end_time (:obj:`int`): The block height where the tower will stop watching for breaches.
to_self_delay (:obj:`int`): The ``to_self_delay`` encoded in the ``csv`` of the ``to_remote`` output of the
commitment transaction that this appointment is covering.
encrypted_blob (:obj:`str`): An encrypted blob of data containing a penalty transaction. The tower will decrypt
it and broadcast the penalty transaction upon seeing a breach on the blockchain.
"""
def __init__(self, locator, start_time, end_time, to_self_delay, encrypted_blob):
def __init__(self, locator, to_self_delay, encrypted_blob):
self.locator = locator
self.start_time = start_time # ToDo: #4-standardize-appointment-fields
self.end_time = end_time # ToDo: #4-standardize-appointment-fields
self.to_self_delay = to_self_delay
self.encrypted_blob = encrypted_blob
@@ -29,30 +25,26 @@ class Appointment:
"""
Builds an appointment from a dictionary.
This method is useful to load data from a database.
Args:
appointment_data (:obj:`dict`): a dictionary containing the following keys:
``{locator, start_time, end_time, to_self_delay, encrypted_blob}``
``{locator, to_self_delay, encrypted_blob}``
Returns:
:obj:`Appointment <teos.appointment.Appointment>`: An appointment initialized using the provided data.
:obj:`Appointment <common.appointment.Appointment>`: An appointment initialized using the provided data.
Raises:
ValueError: If one of the mandatory keys is missing in ``appointment_data``.
"""
locator = appointment_data.get("locator")
start_time = appointment_data.get("start_time") # ToDo: #4-standardize-appointment-fields
end_time = appointment_data.get("end_time") # ToDo: #4-standardize-appointment-fields
to_self_delay = appointment_data.get("to_self_delay")
encrypted_blob_data = appointment_data.get("encrypted_blob")
encrypted_blob = appointment_data.get("encrypted_blob")
if any(v is None for v in [locator, start_time, end_time, to_self_delay, encrypted_blob_data]):
if any(v is None for v in [locator, to_self_delay, encrypted_blob]):
raise ValueError("Wrong appointment data, some fields are missing")
else:
appointment = cls(locator, start_time, end_time, to_self_delay, encrypted_blob_data)
appointment = cls(locator, to_self_delay, encrypted_blob)
return appointment
@@ -64,33 +56,18 @@ class Appointment:
:obj:`dict`: A dictionary containing the appointment attributes.
"""
# ToDO: #3-improve-appointment-structure
appointment = {
"locator": self.locator,
"start_time": self.start_time,
"end_time": self.end_time,
"to_self_delay": self.to_self_delay,
"encrypted_blob": self.encrypted_blob,
}
return appointment
return self.__dict__
def serialize(self):
"""
Serializes an appointment to be signed.
The serialization follows the same ordering as the fields in the appointment:
locator:start_time:end_time:to_self_delay:encrypted_blob
locator:to_self_delay:encrypted_blob
All values are big endian.
Returns:
:obj:`bytes`: The serialized data to be signed.
"""
return (
unhexlify(self.locator)
+ struct.pack(">I", self.start_time)
+ struct.pack(">I", self.end_time)
+ struct.pack(">I", self.to_self_delay)
+ unhexlify(self.encrypted_blob)
)
return unhexlify(self.locator) + struct.pack(">I", self.to_self_delay) + unhexlify(self.encrypted_blob)

View File

@@ -8,5 +8,8 @@ HTTP_BAD_REQUEST = 400
HTTP_NOT_FOUND = 404
HTTP_SERVICE_UNAVAILABLE = 503
# LN general nomenclature
IRREVOCABLY_RESOLVED = 100
# Temporary constants, may be changed
ENCRYPTED_BLOB_MAX_SIZE_HEX = 2 * 2048

View File

@@ -18,6 +18,7 @@ DEFAULT_CONF = {
"FEED_PORT": {"value": 28332, "type": int},
"MAX_APPOINTMENTS": {"value": 1000000, "type": int},
"DEFAULT_SLOTS": {"value": 100, "type": int},
"DEFAULT_SUBSCRIPTION_DURATION": {"value": 4320, "type": int},
"EXPIRY_DELTA": {"value": 6, "type": int},
"MIN_TO_SELF_DELAY": {"value": 20, "type": int},
"LOG_FILE": {"value": "teos.log", "type": str, "path": True},

View File

@@ -1,22 +1,17 @@
import os
import logging
from math import ceil
from flask import Flask, request, abort, jsonify
from teos import LOG_PREFIX
import teos.errors as errors
from teos.inspector import InspectionFailed
from teos.gatekeeper import NotEnoughSlots, IdentificationFailure
from teos.watcher import AppointmentLimitReached
from teos.gatekeeper import NotEnoughSlots, AuthenticationFailure
from common.logger import Logger
from common.cryptographer import hash_160
from common.constants import (
HTTP_OK,
HTTP_BAD_REQUEST,
HTTP_SERVICE_UNAVAILABLE,
HTTP_NOT_FOUND,
ENCRYPTED_BLOB_MAX_SIZE_HEX,
)
from common.exceptions import InvalidParameter
from common.constants import HTTP_OK, HTTP_BAD_REQUEST, HTTP_SERVICE_UNAVAILABLE, HTTP_NOT_FOUND
# ToDo: #5-add-async-to-api
@@ -54,7 +49,7 @@ def get_request_data_json(request):
:obj:`dict`: the dictionary parsed from the json request.
Raises:
:obj:`TypeError`: if the request is not json encoded or it does not decodes to a dictionary.
:obj:`InvalidParameter`: if the request is not json encoded or it does not decodes to a dictionary.
"""
if request.is_json:
@@ -62,9 +57,9 @@ def get_request_data_json(request):
if isinstance(request_data, dict):
return request_data
else:
raise TypeError("Invalid request content")
raise InvalidParameter("Invalid request content")
else:
raise TypeError("Request is not json encoded")
raise InvalidParameter("Request is not json encoded")
class API:
@@ -72,19 +67,18 @@ class API:
The :class:`API` is in charge of the interface between the user and the tower. It handles and serves user requests.
Args:
host (:obj:`str`): the hostname to listen on.
port (:obj:`int`): the port of the webserver.
inspector (:obj:`Inspector <teos.inspector.Inspector>`): an ``Inspector`` instance to check the correctness of
the received appointment data.
watcher (:obj:`Watcher <teos.watcher.Watcher>`): a ``Watcher`` instance to pass the requests to.
gatekeeper (:obj:`Watcher <teos.gatekeeper.Gatekeeper>`): a `Gatekeeper` instance in charge to control the user
access.
"""
def __init__(self, host, port, inspector, watcher, gatekeeper):
def __init__(self, host, port, inspector, watcher):
self.host = host
self.port = port
self.inspector = inspector
self.watcher = watcher
self.gatekeeper = gatekeeper
self.app = app
# Adds all the routes to the functions listed above.
@@ -103,7 +97,8 @@ class API:
Registers a user by creating a subscription.
Registration is pretty straightforward for now, since it does not require payments.
The amount of slots cannot be requested by the user yet either. This is linked to the previous point.
The amount of slots and expiry of the subscription cannot be requested by the user yet either. This is linked to
the previous point.
Users register by sending a public key to the proper endpoint. This is exploitable atm, but will be solved when
payments are introduced.
@@ -121,19 +116,23 @@ class API:
try:
request_data = get_request_data_json(request)
except TypeError as e:
except InvalidParameter as e:
logger.info("Received invalid register request", from_addr="{}".format(remote_addr))
return abort(HTTP_BAD_REQUEST, e)
return jsonify({"error": str(e)}), HTTP_BAD_REQUEST
client_pk = request_data.get("public_key")
user_id = request_data.get("public_key")
if client_pk:
if user_id:
try:
rcode = HTTP_OK
available_slots = self.gatekeeper.add_update_user(client_pk)
response = {"public_key": client_pk, "available_slots": available_slots}
available_slots, subscription_expiry = self.watcher.gatekeeper.add_update_user(user_id)
response = {
"public_key": user_id,
"available_slots": available_slots,
"subscription_expiry": subscription_expiry,
}
except ValueError as e:
except InvalidParameter as e:
rcode = HTTP_BAD_REQUEST
error = "Error {}: {}".format(errors.REGISTRATION_MISSING_FIELD, str(e))
response = {"error": error}
@@ -157,8 +156,8 @@ class API:
Returns:
:obj:`tuple`: A tuple containing the response (:obj:`str`) and response code (:obj:`int`). For accepted
appointments, the ``rcode`` is always 200 and the response contains the receipt signature (json). For
rejected appointments, the ``rcode`` is a 404 and the value contains an application error, and an error
message. Error messages can be found at :mod:`Errors <teos.errors>`.
rejected appointments, the ``rcode`` contains an application error, and an error message. Error messages can
be found at :mod:`Errors <teos.errors>`.
"""
# Getting the real IP if the server is behind a reverse proxy
@@ -169,67 +168,20 @@ class API:
try:
request_data = get_request_data_json(request)
except TypeError as e:
return abort(HTTP_BAD_REQUEST, e)
# We kind of have the chicken an the egg problem here. Data must be verified and the signature must be checked:
# - If we verify the data first, we may encounter that the signature is wrong and wasted some time.
# - If we check the signature first, we may need to verify some of the information or expose to build
# appointments with potentially wrong data, which may be exploitable.
#
# The first approach seems safer since it only implies a bunch of pretty quick checks.
except InvalidParameter as e:
return jsonify({"error": str(e)}), HTTP_BAD_REQUEST
try:
appointment = self.inspector.inspect(request_data.get("appointment"))
user_pk = self.gatekeeper.identify_user(appointment.serialize(), request_data.get("signature"))
# Check if the appointment is an update. Updates will return a summary.
appointment_uuid = hash_160("{}{}".format(appointment.locator, user_pk))
appointment_summary = self.watcher.get_appointment_summary(appointment_uuid)
if appointment_summary:
used_slots = ceil(appointment_summary.get("size") / ENCRYPTED_BLOB_MAX_SIZE_HEX)
required_slots = ceil(len(appointment.encrypted_blob) / ENCRYPTED_BLOB_MAX_SIZE_HEX)
slot_diff = required_slots - used_slots
# For updates we only reserve the slot difference provided the new one is bigger.
required_slots = slot_diff if slot_diff > 0 else 0
else:
# For regular appointments 1 slot is reserved per ENCRYPTED_BLOB_MAX_SIZE_HEX block.
slot_diff = 0
required_slots = ceil(len(appointment.encrypted_blob) / ENCRYPTED_BLOB_MAX_SIZE_HEX)
# Slots are reserved before adding the appointments to prevent race conditions.
# DISCUSS: It may be worth using signals here to avoid race conditions anyway.
self.gatekeeper.fill_slots(user_pk, required_slots)
appointment_added, signature = self.watcher.add_appointment(appointment, user_pk)
if appointment_added:
# If the appointment is added and the update is smaller than the original, the difference is given back.
if slot_diff < 0:
self.gatekeeper.free_slots(user_pk, abs(slot_diff))
response = self.watcher.add_appointment(appointment, request_data.get("signature"))
rcode = HTTP_OK
response = {
"locator": appointment.locator,
"signature": signature,
"available_slots": self.gatekeeper.registered_users[user_pk].get("available_slots"),
}
else:
# If the appointment is not added the reserved slots are given back
self.gatekeeper.free_slots(user_pk, required_slots)
rcode = HTTP_SERVICE_UNAVAILABLE
response = {"error": "appointment rejected"}
except InspectionFailed as e:
rcode = HTTP_BAD_REQUEST
error = "appointment rejected. Error {}: {}".format(e.erno, e.reason)
response = {"error": error}
except (IdentificationFailure, NotEnoughSlots):
except (AuthenticationFailure, NotEnoughSlots):
rcode = HTTP_BAD_REQUEST
error = "appointment rejected. Error {}: {}".format(
errors.APPOINTMENT_INVALID_SIGNATURE_OR_INSUFFICIENT_SLOTS,
@@ -237,6 +189,10 @@ class API:
)
response = {"error": error}
except AppointmentLimitReached:
rcode = HTTP_SERVICE_UNAVAILABLE
response = {"error": "appointment rejected"}
logger.info("Sending response and disconnecting", from_addr="{}".format(remote_addr), response=response)
return jsonify(response), rcode
@@ -266,9 +222,9 @@ class API:
try:
request_data = get_request_data_json(request)
except TypeError as e:
except InvalidParameter as e:
logger.info("Received invalid get_appointment request", from_addr="{}".format(remote_addr))
return abort(HTTP_BAD_REQUEST, e)
return jsonify({"error": str(e)}), HTTP_BAD_REQUEST
locator = request_data.get("locator")
@@ -278,16 +234,18 @@ class API:
message = "get appointment {}".format(locator).encode()
signature = request_data.get("signature")
user_pk = self.gatekeeper.identify_user(message, signature)
user_id = self.watcher.gatekeeper.authenticate_user(message, signature)
triggered_appointments = self.watcher.db_manager.load_all_triggered_flags()
uuid = hash_160("{}{}".format(locator, user_pk))
uuid = hash_160("{}{}".format(locator, user_id))
# If the appointment has been triggered, it should be in the locator (default else just in case).
if uuid in triggered_appointments:
appointment_data = self.watcher.db_manager.load_responder_tracker(uuid)
if appointment_data:
rcode = HTTP_OK
# Remove user_id field from appointment data since it is an internal field
appointment_data.pop("user_id")
response = {"locator": locator, "status": "dispute_responded", "appointment": appointment_data}
else:
rcode = HTTP_NOT_FOUND
@@ -298,12 +256,14 @@ class API:
appointment_data = self.watcher.db_manager.load_watcher_appointment(uuid)
if appointment_data:
rcode = HTTP_OK
# Remove user_id field from appointment data since it is an internal field
appointment_data.pop("user_id")
response = {"locator": locator, "status": "being_watched", "appointment": appointment_data}
else:
rcode = HTTP_NOT_FOUND
response = {"locator": locator, "status": "not_found"}
except (InspectionFailed, IdentificationFailure):
except (InspectionFailed, AuthenticationFailure):
rcode = HTTP_NOT_FOUND
response = {"locator": locator, "status": "not_found"}
@@ -335,9 +295,7 @@ class API:
return response
def start(self):
"""
This function starts the Flask server used to run the API.
"""
""" This function starts the Flask server used to run the API """
# Setting Flask log to ERROR only so it does not mess with our logging. Also disabling flask initial messages
logging.getLogger("werkzeug").setLevel(logging.ERROR)

View File

@@ -84,7 +84,7 @@ class AppointmentsDBM(DBManager):
``RESPONDER_LAST_BLOCK_KEY``).
Returns:
:obj:`str` or :obj:`None`: A 16-byte hex-encoded str representing the last known block hash.
:obj:`str` or :obj:`None`: A 32-byte hex-encoded str representing the last known block hash.
Returns ``None`` if the entry is not found.
"""
@@ -177,7 +177,7 @@ class AppointmentsDBM(DBManager):
Args:
uuid (:obj:`str`): the identifier of the appointment to be stored.
appointment (:obj:`dict`): an appointment encoded as dictionary.
appointment (:obj:`dict`): an appointment encoded as a dictionary.
Returns:
:obj:`bool`: True if the appointment was stored in the db. False otherwise.
@@ -202,7 +202,7 @@ class AppointmentsDBM(DBManager):
Args:
uuid (:obj:`str`): the identifier of the appointment to be stored.
tracker (:obj:`dict`): a tracker encoded as dictionary.
tracker (:obj:`dict`): a tracker encoded as a dictionary.
Returns:
:obj:`bool`: True if the tracker was stored in the db. False otherwise.
@@ -247,7 +247,7 @@ class AppointmentsDBM(DBManager):
def create_append_locator_map(self, locator, uuid):
"""
Creates (or appends to if already exists) a ``locator:uuid`` map.
Creates a ``locator:uuid`` map.
If the map already exists, the new ``uuid`` is appended to the existing ones (if it is not already there).
@@ -334,7 +334,7 @@ class AppointmentsDBM(DBManager):
def batch_delete_watcher_appointments(self, uuids):
"""
Deletes an appointment from the database.
Deletes multiple appointments from the database.
Args:
uuids (:obj:`list`): a list of 16-byte hex-encoded strings identifying the appointments to be deleted.
@@ -367,7 +367,7 @@ class AppointmentsDBM(DBManager):
def batch_delete_responder_trackers(self, uuids):
"""
Deletes an appointment from the database.
Deletes multiple trackers from the database.
Args:
uuids (:obj:`list`): a list of 16-byte hex-encoded strings identifying the trackers to be deleted.

View File

@@ -14,7 +14,7 @@ class BlockProcessor:
Args:
btc_connect_params (:obj:`dict`): a dictionary with the parameters to connect to bitcoind
(rpc user, rpc passwd, host and port)
(rpc user, rpc password, host and port)
"""
def __init__(self, btc_connect_params):
@@ -22,10 +22,10 @@ class BlockProcessor:
def get_block(self, block_hash):
"""
Gives a block given a block hash by querying ``bitcoind``.
Gets a block given a block hash by querying ``bitcoind``.
Args:
block_hash (:obj:`str`): The block hash to be queried.
block_hash (:obj:`str`): the block hash to be queried.
Returns:
:obj:`dict` or :obj:`None`: A dictionary containing the requested block data if the block is found.
@@ -44,7 +44,7 @@ class BlockProcessor:
def get_best_block_hash(self):
"""
Returns the hash of the current best chain tip.
Gets the hash of the current best chain tip.
Returns:
:obj:`str` or :obj:`None`: The hash of the block if it can be found.
@@ -63,10 +63,10 @@ class BlockProcessor:
def get_block_count(self):
"""
Returns the block height of the best chain.
Gets the block count of the best chain.
Returns:
:obj:`int` or :obj:`None`: The block height if it can be computed.
:obj:`int` or :obj:`None`: The count of the best chain if it can be computed.
Returns ``None`` otherwise (not even sure this can actually happen).
"""
@@ -86,7 +86,7 @@ class BlockProcessor:
associated metadata given by ``bitcoind`` (e.g. confirmation count).
Args:
raw_tx (:obj:`str`): The hex representation of the transaction.
raw_tx (:obj:`str`): the hex representation of the transaction.
Returns:
:obj:`dict` or :obj:`None`: The decoding of the given ``raw_tx`` if the transaction is well formatted.
@@ -133,7 +133,7 @@ class BlockProcessor:
def get_missed_blocks(self, last_know_block_hash):
"""
Compute the blocks between the current best chain tip and a given block hash (``last_know_block_hash``).
Gets the blocks between the current best chain tip and a given block hash (``last_know_block_hash``).
This method is used to fetch all the missed information when recovering from a crash.
@@ -158,7 +158,7 @@ class BlockProcessor:
def is_block_in_best_chain(self, block_hash):
"""
Checks whether or not a given block is on the best chain. Blocks are identified by block_hash.
Checks whether a given block is on the best chain or not. Blocks are identified by block_hash.
A block that is not in the best chain will either not exists (block = None) or have a confirmation count of
-1 (implying that the block was forked out or the chain never grew from that one).

View File

@@ -1,42 +1,43 @@
from teos.responder import TransactionTracker
from teos.extended_appointment import ExtendedAppointment
class Builder:
"""
The :class:`Builder` class is in charge of reconstructing data loaded from the database and build the data
structures of the :obj:`Watcher <teos.watcher.Watcher>` and the :obj:`Responder <teos.responder.Responder>`.
The :class:`Builder` class is in charge of reconstructing data loaded from the appointments database and build the
data structures of the :obj:`Watcher <teos.watcher.Watcher>` and the :obj:`Responder <teos.responder.Responder>`.
"""
@staticmethod
def build_appointments(appointments_data):
"""
Builds an appointments dictionary (``uuid: Appointment``) and a locator_uuid_map (``locator: uuid``) given a
dictionary of appointments from the database.
Builds an appointments dictionary (``uuid:ExtendedAppointment``) and a locator_uuid_map (``locator:uuid``)
given a dictionary of appointments from the database.
Args:
appointments_data (:obj:`dict`): a dictionary of dictionaries representing all the
:obj:`Watcher <teos.watcher.Watcher>` appointments stored in the database. The structure is as follows:
``{uuid: {locator: str, start_time: int, ...}, uuid: {locator:...}}``
``{uuid: {locator: str, ...}, uuid: {locator:...}}``
Returns:
:obj:`tuple`: A tuple with two dictionaries. ``appointments`` containing the appointment information in
:obj:`Appointment <teos.appointment.Appointment>` objects and ``locator_uuid_map`` containing a map of
appointment (``uuid:locator``).
:obj:`ExtendedAppointment <teos.extended_appointment.ExtendedAppointment>` objects and ``locator_uuid_map``
containing a map of appointment (``uuid:locator``).
"""
appointments = {}
locator_uuid_map = {}
for uuid, data in appointments_data.items():
appointments[uuid] = {
"locator": data.get("locator"),
"end_time": data.get("end_time"),
"size": len(data.get("encrypted_blob")),
}
appointment = ExtendedAppointment.from_dict(data)
appointments[uuid] = appointment.get_summary()
if data.get("locator") in locator_uuid_map:
locator_uuid_map[data.get("locator")].append(uuid)
if appointment.locator in locator_uuid_map:
locator_uuid_map[appointment.locator].append(uuid)
else:
locator_uuid_map[data.get("locator")] = [uuid]
locator_uuid_map[appointment.locator] = [uuid]
return appointments, locator_uuid_map
@@ -64,17 +65,14 @@ class Builder:
tx_tracker_map = {}
for uuid, data in tracker_data.items():
trackers[uuid] = {
"penalty_txid": data.get("penalty_txid"),
"locator": data.get("locator"),
"appointment_end": data.get("appointment_end"),
}
tracker = TransactionTracker.from_dict(data)
trackers[uuid] = tracker.get_summary()
if data.get("penalty_txid") in tx_tracker_map:
tx_tracker_map[data.get("penalty_txid")].append(uuid)
if tracker.penalty_txid in tx_tracker_map:
tx_tracker_map[tracker.penalty_txid].append(uuid)
else:
tx_tracker_map[data.get("penalty_txid")] = [uuid]
tx_tracker_map[tracker.penalty_txid] = [uuid]
return trackers, tx_tracker_map
@@ -85,8 +83,8 @@ class Builder:
:mod:`Responder <teos.responder.Responder>` using backed up data.
Args:
block_queue (:obj:`Queue`): a ``Queue``
missed_blocks (:obj:`list`): list of block hashes missed by the Watchtower (do to a crash or shutdown).
block_queue (:obj:`Queue`): a ``Queue``.
missed_blocks (:obj:`list`): list of block hashes missed by the Watchtower (due to a crash or shutdown).
Returns:
:obj:`Queue`: A ``Queue`` containing all the missed blocks hashes.

View File

@@ -20,14 +20,14 @@ class ChainMonitor:
Args:
watcher_queue (:obj:`Queue`): the queue to be used to send blocks hashes to the ``Watcher``.
responder_queue (:obj:`Queue`): the queue to be used to send blocks hashes to the ``Responder``.
block_processor (:obj:`BlockProcessor <teos.block_processor.BlockProcessor>`): a blockProcessor instance.
block_processor (:obj:`BlockProcessor <teos.block_processor.BlockProcessor>`): a ``BlockProcessor`` instance.
bitcoind_feed_params (:obj:`dict`): a dict with the feed (ZMQ) connection parameters.
Attributes:
best_tip (:obj:`str`): a block hash representing the current best tip.
last_tips (:obj:`list`): a list of last chain tips. Used as a sliding window to avoid notifying about old tips.
terminate (:obj:`bool`): a flag to signal the termination of the :class:`ChainMonitor` (shutdown the tower).
check_tip (:obj:`Event`): an event that's triggered at fixed time intervals and controls the polling thread.
check_tip (:obj:`Event`): an event that is triggered at fixed time intervals and controls the polling thread.
lock (:obj:`Condition`): a lock used to protect concurrent access to the queues and ``best_tip`` by the zmq and
polling threads.
zmqSubSocket (:obj:`socket`): a socket to connect to ``bitcoind`` via ``zmq``.

View File

@@ -87,7 +87,7 @@ class Cleaner:
@staticmethod
def delete_expired_appointments(expired_appointments, appointments, locator_uuid_map, db_manager):
"""
Deletes appointments which ``end_time`` has been reached (with no trigger) both from memory
Deletes appointments which ``expiry`` has been reached (with no trigger) both from memory
(:obj:`Watcher <teos.watcher.Watcher>`) and disk.
Args:
@@ -181,30 +181,36 @@ class Cleaner:
db_manager.create_triggered_appointment_flag(uuid)
@staticmethod
def delete_completed_trackers(completed_trackers, height, trackers, tx_tracker_map, db_manager):
def delete_trackers(completed_trackers, height, trackers, tx_tracker_map, db_manager, expired=False):
"""
Deletes a completed tracker both from memory (:obj:`Responder <teos.responder.Responder>`) and disk (from the
Responder's and Watcher's databases).
Deletes completed/expired trackers both from memory (:obj:`Responder <teos.responder.Responder>`) and disk
(from the Responder's and Watcher's databases).
Args:
trackers (:obj:`dict`): a dictionary containing all the :obj:`Responder <teos.responder.Responder>`
trackers.
height (:obj:`int`): the block height at which the trackers were completed.
tx_tracker_map (:obj:`dict`): a ``penalty_txid:uuid`` map for the :obj:`Responder
<teos.responder.Responder>` trackers.
completed_trackers (:obj:`dict`): a dict of completed trackers to be deleted (uuid:confirmations).
height (:obj:`int`): the block height at which the trackers were completed.
completed_trackers (:obj:`dict`): a dict of completed/expired trackers to be deleted (uuid:confirmations).
db_manager (:obj:`AppointmentsDBM <teos.appointments_dbm.AppointmentsDBM>`): a ``AppointmentsDBM`` instance
to interact with the database.
expired (:obj:`bool`): whether the trackers have expired or not. Defaults to False.
"""
locator_maps_to_update = {}
for uuid, confirmations in completed_trackers.items():
for uuid in completed_trackers:
if expired:
logger.info(
"Appointment completed. Appointment ended after reaching enough confirmations",
"Appointment couldn't be completed. Expiry reached but penalty didn't make it to the chain",
uuid=uuid,
height=height,
confirmations=confirmations,
)
else:
logger.info(
"Appointment completed. Penalty transaction was irrevocably confirmed", uuid=uuid, height=height
)
penalty_txid = trackers[uuid].get("penalty_txid")
@@ -229,6 +235,35 @@ class Cleaner:
Cleaner.update_delete_db_locator_map(uuids, locator, db_manager)
# Delete appointment from the db (from watchers's and responder's db) and remove flag
db_manager.batch_delete_responder_trackers(list(completed_trackers.keys()))
db_manager.batch_delete_watcher_appointments(list(completed_trackers.keys()))
db_manager.batch_delete_triggered_appointment_flag(list(completed_trackers.keys()))
db_manager.batch_delete_responder_trackers(completed_trackers)
db_manager.batch_delete_watcher_appointments(completed_trackers)
db_manager.batch_delete_triggered_appointment_flag(completed_trackers)
@staticmethod
def delete_gatekeeper_appointments(gatekeeper, appointment_to_delete):
"""
Deletes a list of expired / completed appointments of a given user both from memory and the UserDB.
Args:
gatekeeper (:obj:`Gatekeeper <teos.gatekeeper.Gatekeeper>`): a `Gatekeeper` instance in charge to control
the user access and subscription expiry.
appointment_to_delete (:obj:`dict`): uuid:user_id dict containing the appointments to delete
(expired + completed)
"""
user_ids = []
# Remove appointments from memory
for uuid, user_id in appointment_to_delete.items():
if user_id in gatekeeper.registered_users and uuid in gatekeeper.registered_users[user_id].appointments:
# Remove the appointment from the appointment list and update the available slots
gatekeeper.lock.acquire()
freed_slots = gatekeeper.registered_users[user_id].appointments.pop(uuid)
gatekeeper.registered_users[user_id].available_slots += freed_slots
gatekeeper.lock.release()
if user_id not in user_ids:
user_ids.append(user_id)
# Store the updated users in the DB
for user_id in user_ids:
gatekeeper.user_db.store_user(user_id, gatekeeper.registered_users[user_id].to_dict())

View File

@@ -0,0 +1,46 @@
from common.appointment import Appointment
class ExtendedAppointment(Appointment):
def __init__(self, locator, to_self_delay, encrypted_blob, user_id):
super().__init__(locator, to_self_delay, encrypted_blob)
self.user_id = user_id
def get_summary(self):
"""
Returns the summary of an appointment, consisting on the locator, the user_id and the appointment size.
Returns:
:obj:`dict`: the appointment summary.
"""
return {"locator": self.locator, "user_id": self.user_id}
@classmethod
def from_dict(cls, appointment_data):
"""
Builds an appointment from a dictionary.
This method is useful to load data from a database.
Args:
appointment_data (:obj:`dict`): a dictionary containing the following keys:
``{locator, to_self_delay, encrypted_blob, user_id}``
Returns:
:obj:`ExtendedAppointment <teos.extended_appointment.ExtendedAppointment>`: An appointment initialized
using the provided data.
Raises:
ValueError: If one of the mandatory keys is missing in ``appointment_data``.
"""
appointment = Appointment.from_dict(appointment_data)
user_id = appointment_data.get("user_id")
if not user_id:
raise ValueError("Wrong appointment data, user_id is missing")
else:
appointment = cls(appointment.locator, appointment.to_self_delay, appointment.encrypted_blob, user_id)
return appointment

View File

@@ -1,63 +1,115 @@
from math import ceil
from threading import Lock
from common.tools import is_compressed_pk
from common.cryptographer import Cryptographer
from common.constants import ENCRYPTED_BLOB_MAX_SIZE_HEX
from common.exceptions import InvalidParameter, InvalidKey, SignatureError
class NotEnoughSlots(ValueError):
"""Raised when trying to subtract more slots than a user has available"""
def __init__(self, user_pk, requested_slots):
self.user_pk = user_pk
self.requested_slots = requested_slots
pass
class IdentificationFailure(Exception):
class AuthenticationFailure(Exception):
"""
Raised when a user can not be identified. Either the user public key cannot be recovered or the user is
Raised when a user can not be authenticated. Either the user public key cannot be recovered or the user is
not found within the registered ones.
"""
pass
class UserInfo:
def __init__(self, available_slots, subscription_expiry, appointments=None):
self.available_slots = available_slots
self.subscription_expiry = subscription_expiry
if not appointments:
# A dictionary of the form uuid:required_slots for each user appointment
self.appointments = {}
else:
self.appointments = appointments
@classmethod
def from_dict(cls, user_data):
available_slots = user_data.get("available_slots")
appointments = user_data.get("appointments")
subscription_expiry = user_data.get("subscription_expiry")
if any(v is None for v in [available_slots, appointments, subscription_expiry]):
raise ValueError("Wrong appointment data, some fields are missing")
return cls(available_slots, subscription_expiry, appointments)
def to_dict(self):
return self.__dict__
class Gatekeeper:
"""
The :class:`Gatekeeper` is in charge of managing the access to the tower. Only registered users are allowed to
perform actions.
Attributes:
registered_users (:obj:`dict`): a map of user_pk:appointment_slots.
default_slots (:obj:`int`): the number of slots assigned to a user subscription.
default_subscription_duration (:obj:`int`): the expiry assigned to a user subscription.
expiry_delta (:obj:`int`): the grace period given to the user to renew their subscription.
block_processor (:obj:`BlockProcessor <teos.block_processor.BlockProcessor>`): a ``BlockProcessor`` instance to
get block from bitcoind.
user_db (:obj:`UserDBM <teos.user_dbm.UserDBM>`): a ``UserDBM`` instance to interact with the database.
registered_users (:obj:`dict`): a map of user_pk:UserInfo.
lock (:obj:`Lock`): a Threading.Lock object to lock access to the Gatekeeper on updates.
"""
def __init__(self, user_db, default_slots):
def __init__(self, user_db, block_processor, default_slots, default_subscription_duration, expiry_delta):
self.default_slots = default_slots
self.default_subscription_duration = default_subscription_duration
self.expiry_delta = expiry_delta
self.block_processor = block_processor
self.user_db = user_db
self.registered_users = user_db.load_all_users()
self.registered_users = {
user_id: UserInfo.from_dict(user_data) for user_id, user_data in user_db.load_all_users().items()
}
self.lock = Lock()
def add_update_user(self, user_pk):
def add_update_user(self, user_id):
"""
Adds a new user or updates the subscription of an existing one, by adding additional slots.
Args:
user_pk(:obj:`str`): the public key that identifies the user (33-bytes hex str).
user_id(:obj:`str`): the public key that identifies the user (33-bytes hex str).
Returns:
:obj:`int`: the number of available slots in the user subscription.
:obj:`tuple`: a tuple with the number of available slots in the user subscription and the subscription
expiry (in absolute block height).
Raises:
:obj:`InvalidParameter`: if the user_pk does not match the expected format.
"""
if not is_compressed_pk(user_pk):
raise ValueError("Provided public key does not match expected format (33-byte hex string)")
if not is_compressed_pk(user_id):
raise InvalidParameter("Provided public key does not match expected format (33-byte hex string)")
if user_pk not in self.registered_users:
self.registered_users[user_pk] = {"available_slots": self.default_slots}
if user_id not in self.registered_users:
self.registered_users[user_id] = UserInfo(
self.default_slots, self.block_processor.get_block_count() + self.default_subscription_duration
)
else:
self.registered_users[user_pk]["available_slots"] += self.default_slots
# FIXME: For now new calls to register add default_slots to the current count and reset the expiry time
self.registered_users[user_id].available_slots += self.default_slots
self.registered_users[user_id].subscription_expiry = (
self.block_processor.get_block_count() + self.default_subscription_duration
)
self.user_db.store_user(user_pk, self.registered_users[user_pk])
self.user_db.store_user(user_id, self.registered_users[user_id].to_dict())
return self.registered_users[user_pk]["available_slots"]
return self.registered_users[user_id].available_slots, self.registered_users[user_id].subscription_expiry
def identify_user(self, message, signature):
def authenticate_user(self, message, signature):
"""
Checks if a request comes from a registered user by ec-recovering their public key from a signed message.
@@ -69,50 +121,81 @@ class Gatekeeper:
:obj:`str`: a compressed key recovered from the signature and matching a registered user.
Raises:
:obj:`IdentificationFailure`: if the user cannot be identified.
:obj:`AuthenticationFailure`: if the user cannot be authenticated.
"""
try:
rpk = Cryptographer.recover_pk(message, signature)
compressed_pk = Cryptographer.get_compressed_pk(rpk)
user_id = Cryptographer.get_compressed_pk(rpk)
if compressed_pk in self.registered_users:
return compressed_pk
if user_id in self.registered_users:
return user_id
else:
raise IdentificationFailure("User not found.")
raise AuthenticationFailure("User not found.")
except (InvalidParameter, InvalidKey, SignatureError):
raise IdentificationFailure("Wrong message or signature.")
raise AuthenticationFailure("Wrong message or signature.")
def fill_slots(self, user_pk, n):
def add_update_appointment(self, user_id, uuid, appointment):
"""
Fills a given number os slots of the user subscription.
Adds (or updates) an appointment to a user subscription. The user slots are updated accordingly.
Slots are taken if a new appointment is given, or an update is given with an appointment bigger than the
existing one.
Slots are given back if an update is given but the new appointment is smaller than the existing one.
Args:
user_pk(:obj:`str`): the public key that identifies the user (33-bytes hex str).
n (:obj:`int`): the number of slots to fill.
user_id (:obj:`str`): the public key that identifies the user (33-bytes hex str).
uuid (:obj:`str`): the appointment uuid.
appointment (:obj:`ExtendedAppointment <teos.extended_appointment.ExtendedAppointment`): the summary of new
appointment the user is requesting.
Returns:
:obj:`int`: the number of remaining appointment slots.
Raises:
:obj:`NotEnoughSlots`: if the user subscription does not have enough slots.
:obj:`NotEnoughSlots`: If the user does not have enough slots to fill.
"""
# DISCUSS: we may want to return a different exception if the user does not exist
if user_pk in self.registered_users and n <= self.registered_users.get(user_pk).get("available_slots"):
self.registered_users[user_pk]["available_slots"] -= n
self.user_db.store_user(user_pk, self.registered_users[user_pk])
self.lock.acquire()
# For updates the difference between the existing appointment and the update is computed.
if uuid in self.registered_users[user_id].appointments:
used_slots = self.registered_users[user_id].appointments[uuid]
else:
raise NotEnoughSlots(user_pk, n)
# For regular appointments 1 slot is reserved per ENCRYPTED_BLOB_MAX_SIZE_HEX block.
used_slots = 0
def free_slots(self, user_pk, n):
required_slots = ceil(len(appointment.encrypted_blob) / ENCRYPTED_BLOB_MAX_SIZE_HEX)
if required_slots - used_slots <= self.registered_users.get(user_id).available_slots:
# Filling / freeing slots depending on whether this is an update or not, and if it is bigger or smaller than
# the old appointment.
self.registered_users.get(user_id).appointments[uuid] = required_slots
self.registered_users.get(user_id).available_slots -= required_slots - used_slots
else:
self.lock.release()
raise NotEnoughSlots()
self.lock.release()
return self.registered_users.get(user_id).available_slots
def get_expired_appointments(self, block_height):
"""
Frees some slots of a user subscription.
Gets a list of appointments that expire at a given block height.
Args:
user_pk(:obj:`str`): the public key that identifies the user (33-bytes hex str).
n (:obj:`int`): the number of slots to free.
block_height: the block height that wants to be checked.
Returns:
:obj:`list`: a list of appointment uuids that will expire at ``block_height``.
"""
# DISCUSS: if the user does not exist we may want to log or return an exception.
if user_pk in self.registered_users:
self.registered_users[user_pk]["available_slots"] += n
self.user_db.store_user(user_pk, self.registered_users[user_pk])
expired_appointments = []
# Avoiding dictionary changed size during iteration
for user_id in list(self.registered_users.keys()):
if block_height == self.registered_users[user_id].subscription_expiry + self.expiry_delta:
expired_appointments.extend(self.registered_users[user_id].appointments)
return expired_appointments

View File

@@ -3,9 +3,9 @@ import re
from common.logger import Logger
from common.tools import is_locator
from common.constants import LOCATOR_LEN_HEX
from common.appointment import Appointment
from teos import errors, LOG_PREFIX
from teos.extended_appointment import ExtendedAppointment
logger = Logger(actor="Inspector", log_name_prefix=LOG_PREFIX)
@@ -47,9 +47,9 @@ class Inspector:
Args:
appointment_data (:obj:`dict`): a dictionary containing the appointment data.
Returns:
:obj:`Appointment <teos.appointment.Appointment>`: An appointment initialized with the provided data.
:obj:`Extended <teos.extended_appointment.ExtendedAppointment>`: An appointment initialized with
the provided data.
Raises:
:obj:`InspectionFailed`: if any of the fields is wrong.
@@ -65,12 +65,16 @@ class Inspector:
raise InspectionFailed(errors.UNKNOWN_JSON_RPC_EXCEPTION, "unexpected error occurred")
self.check_locator(appointment_data.get("locator"))
self.check_start_time(appointment_data.get("start_time"), block_height)
self.check_end_time(appointment_data.get("end_time"), appointment_data.get("start_time"), block_height)
self.check_to_self_delay(appointment_data.get("to_self_delay"))
self.check_blob(appointment_data.get("encrypted_blob"))
return Appointment.from_dict(appointment_data)
# Set user_id to None since we still don't know it, it'll be set by the API after querying the gatekeeper
return ExtendedAppointment(
appointment_data.get("locator"),
appointment_data.get("to_self_delay"),
appointment_data.get("encrypted_blob"),
user_id=None,
)
@staticmethod
def check_locator(locator):
@@ -100,87 +104,6 @@ class Inspector:
elif not is_locator(locator):
raise InspectionFailed(errors.APPOINTMENT_WRONG_FIELD_FORMAT, "wrong locator format ({})".format(locator))
@staticmethod
def check_start_time(start_time, block_height):
"""
Checks if the provided ``start_time`` is correct.
Start times must be ahead the current best chain tip.
Args:
start_time (:obj:`int`): the block height at which the tower is requested to start watching for breaches.
block_height (:obj:`int`): the chain height.
Raises:
:obj:`InspectionFailed`: if any of the fields is wrong.
"""
if start_time is None:
raise InspectionFailed(errors.APPOINTMENT_EMPTY_FIELD, "empty start_time received")
elif type(start_time) != int:
raise InspectionFailed(
errors.APPOINTMENT_WRONG_FIELD_TYPE, "wrong start_time data type ({})".format(type(start_time))
)
elif start_time < block_height:
raise InspectionFailed(errors.APPOINTMENT_FIELD_TOO_SMALL, "start_time is in the past")
elif start_time == block_height:
raise InspectionFailed(
errors.APPOINTMENT_FIELD_TOO_SMALL,
"start_time is too close to current height. Accepted times are: [current_height+1, current_height+6]",
)
elif start_time > block_height + 6:
raise InspectionFailed(
errors.APPOINTMENT_FIELD_TOO_BIG,
"start_time is too far in the future. Accepted start times are up to 6 blocks in the future",
)
@staticmethod
def check_end_time(end_time, start_time, block_height):
"""
Checks if the provided ``end_time`` is correct.
End times must be ahead both the ``start_time`` and the current best chain tip.
Args:
end_time (:obj:`int`): the block height at which the tower is requested to stop watching for breaches.
start_time (:obj:`int`): the block height at which the tower is requested to start watching for breaches.
block_height (:obj:`int`): the chain height.
Raises:
:obj:`InspectionFailed`: if any of the fields is wrong.
"""
# TODO: What's too close to the current height is not properly defined. Right now any appointment that ends in
# the future will be accepted (even if it's only one block away).
if end_time is None:
raise InspectionFailed(errors.APPOINTMENT_EMPTY_FIELD, "empty end_time received")
elif type(end_time) != int:
raise InspectionFailed(
errors.APPOINTMENT_WRONG_FIELD_TYPE, "wrong end_time data type ({})".format(type(end_time))
)
elif end_time > block_height + BLOCKS_IN_A_MONTH: # 4320 = roughly a month in blocks
raise InspectionFailed(
errors.APPOINTMENT_FIELD_TOO_BIG, "end_time should be within the next month (<= current_height + 4320)"
)
elif start_time > end_time:
raise InspectionFailed(errors.APPOINTMENT_FIELD_TOO_SMALL, "end_time is smaller than start_time")
elif start_time == end_time:
raise InspectionFailed(errors.APPOINTMENT_FIELD_TOO_SMALL, "end_time is equal to start_time")
elif block_height > end_time:
raise InspectionFailed(errors.APPOINTMENT_FIELD_TOO_SMALL, "end_time is in the past")
elif block_height == end_time:
raise InspectionFailed(errors.APPOINTMENT_FIELD_TOO_SMALL, "end_time is too close to current height")
def check_to_self_delay(self, to_self_delay):
"""
Checks if the provided ``to_self_delay`` is correct.
@@ -217,7 +140,6 @@ class Inspector:
),
)
# ToDo: #6-define-checks-encrypted-blob
@staticmethod
def check_blob(encrypted_blob):
"""

View File

@@ -2,9 +2,11 @@ from queue import Queue
from threading import Thread
from teos import LOG_PREFIX
from common.logger import Logger
from teos.cleaner import Cleaner
from common.logger import Logger
from common.constants import IRREVOCABLY_RESOLVED
CONFIRMATIONS_BEFORE_RETRY = 6
MIN_CONFIRMATIONS = 6
@@ -26,16 +28,15 @@ class TransactionTracker:
dispute_txid (:obj:`str`): the id of the transaction that created the channel breach and triggered the penalty.
penalty_txid (:obj:`str`): the id of the transaction that was encrypted under ``dispute_txid``.
penalty_rawtx (:obj:`str`): the raw transaction that was broadcast as a consequence of the channel breach.
appointment_end (:obj:`int`): the block at which the tower will stop monitoring the blockchain for this
appointment.
user_id(:obj:`str`): the public key that identifies the user (33-bytes hex str).
"""
def __init__(self, locator, dispute_txid, penalty_txid, penalty_rawtx, appointment_end):
def __init__(self, locator, dispute_txid, penalty_txid, penalty_rawtx, user_id):
self.locator = locator
self.dispute_txid = dispute_txid
self.penalty_txid = penalty_txid
self.penalty_rawtx = penalty_rawtx
self.appointment_end = appointment_end
self.user_id = user_id
@classmethod
def from_dict(cls, tx_tracker_data):
@@ -60,13 +61,13 @@ class TransactionTracker:
dispute_txid = tx_tracker_data.get("dispute_txid")
penalty_txid = tx_tracker_data.get("penalty_txid")
penalty_rawtx = tx_tracker_data.get("penalty_rawtx")
appointment_end = tx_tracker_data.get("appointment_end")
user_id = tx_tracker_data.get("user_id")
if any(v is None for v in [locator, dispute_txid, penalty_txid, penalty_rawtx, appointment_end]):
if any(v is None for v in [locator, dispute_txid, penalty_txid, penalty_rawtx, user_id]):
raise ValueError("Wrong transaction tracker data, some fields are missing")
else:
tx_tracker = cls(locator, dispute_txid, penalty_txid, penalty_rawtx, appointment_end)
tx_tracker = cls(locator, dispute_txid, penalty_txid, penalty_rawtx, user_id)
return tx_tracker
@@ -83,11 +84,21 @@ class TransactionTracker:
"dispute_txid": self.dispute_txid,
"penalty_txid": self.penalty_txid,
"penalty_rawtx": self.penalty_rawtx,
"appointment_end": self.appointment_end,
"user_id": self.user_id,
}
return tx_tracker
def get_summary(self):
"""
Returns the summary of a tracker, consisting on the locator, the user_id and the penalty_txid.
Returns:
:obj:`dict`: the appointment summary.
"""
return {"locator": self.locator, "user_id": self.user_id, "penalty_txid": self.penalty_txid}
class Responder:
"""
@@ -104,7 +115,7 @@ class Responder:
Attributes:
trackers (:obj:`dict`): A dictionary containing the minimum information about the :obj:`TransactionTracker`
required by the :obj:`Responder` (``penalty_txid``, ``locator`` and ``end_time``).
required by the :obj:`Responder` (``penalty_txid``, ``locator`` and ``user_id``).
Each entry is identified by a ``uuid``.
tx_tracker_map (:obj:`dict`): A ``penalty_txid:uuid`` map used to allow the :obj:`Responder` to deal with
several trackers triggered by the same ``penalty_txid``.
@@ -115,19 +126,22 @@ class Responder:
is populated by the :obj:`ChainMonitor <teos.chain_monitor.ChainMonitor>`.
db_manager (:obj:`AppointmentsDBM <teos.appointments_dbm.AppointmentsDBM>`): a ``AppointmentsDBM`` instance
to interact with the database.
gatekeeper (:obj:`Gatekeeper <teos.gatekeeper.Gatekeeper>`): a `Gatekeeper` instance in charge to control the
user access and subscription expiry.
carrier (:obj:`Carrier <teos.carrier.Carrier>`): a ``Carrier`` instance to send transactions to bitcoind.
block_processor (:obj:`BlockProcessor <teos.block_processor.BlockProcessor>`): a ``BlockProcessor`` instance to
get data from bitcoind.
last_known_block (:obj:`str`): the last block known by the ``Responder``.
"""
def __init__(self, db_manager, carrier, block_processor):
def __init__(self, db_manager, gatekeeper, carrier, block_processor):
self.trackers = dict()
self.tx_tracker_map = dict()
self.unconfirmed_txs = []
self.missed_confirmations = dict()
self.block_queue = Queue()
self.db_manager = db_manager
self.gatekeeper = gatekeeper
self.carrier = carrier
self.block_processor = block_processor
self.last_known_block = db_manager.load_last_block_hash_responder()
@@ -169,7 +183,7 @@ class Responder:
return synchronized
def handle_breach(self, uuid, locator, dispute_txid, penalty_txid, penalty_rawtx, appointment_end, block_hash):
def handle_breach(self, uuid, locator, dispute_txid, penalty_txid, penalty_rawtx, user_id, block_hash):
"""
Requests the :obj:`Responder` to handle a channel breach. This is the entry point of the :obj:`Responder`.
@@ -179,8 +193,7 @@ class Responder:
dispute_txid (:obj:`str`): the id of the transaction that created the channel breach.
penalty_txid (:obj:`str`): the id of the decrypted transaction included in the appointment.
penalty_rawtx (:obj:`str`): the raw transaction to be broadcast in response of the breach.
appointment_end (:obj:`int`): the block height at which the :obj:`Responder` will stop monitoring for this
penalty transaction.
user_id(:obj:`str`): the public key that identifies the user (33-bytes hex str).
block_hash (:obj:`str`): the block hash at which the breach was seen (used to see if we are on sync).
Returns:
@@ -191,9 +204,7 @@ class Responder:
receipt = self.carrier.send_transaction(penalty_rawtx, penalty_txid)
if receipt.delivered:
self.add_tracker(
uuid, locator, dispute_txid, penalty_txid, penalty_rawtx, appointment_end, receipt.confirmations
)
self.add_tracker(uuid, locator, dispute_txid, penalty_txid, penalty_rawtx, user_id, receipt.confirmations)
else:
# TODO: Add the missing reasons (e.g. RPC_VERIFY_REJECTED)
@@ -204,7 +215,7 @@ class Responder:
return receipt
def add_tracker(self, uuid, locator, dispute_txid, penalty_txid, penalty_rawtx, appointment_end, confirmations=0):
def add_tracker(self, uuid, locator, dispute_txid, penalty_txid, penalty_rawtx, user_id, confirmations=0):
"""
Creates a :obj:`TransactionTracker` after successfully broadcasting a ``penalty_tx``.
@@ -217,20 +228,15 @@ class Responder:
dispute_txid (:obj:`str`): the id of the transaction that created the channel breach.
penalty_txid (:obj:`str`): the id of the decrypted transaction included in the appointment.
penalty_rawtx (:obj:`str`): the raw transaction to be broadcast.
appointment_end (:obj:`int`): the block height at which the :obj:`Responder` will stop monitoring for the
tracker.
user_id(:obj:`str`): the public key that identifies the user (33-bytes hex str).
confirmations (:obj:`int`): the confirmation count of the ``penalty_tx``. In normal conditions it will be
zero, but if the transaction is already on the blockchain this won't be the case.
"""
tracker = TransactionTracker(locator, dispute_txid, penalty_txid, penalty_rawtx, appointment_end)
tracker = TransactionTracker(locator, dispute_txid, penalty_txid, penalty_rawtx, user_id)
# We only store the penalty_txid, locator and appointment_end in memory. The rest is dumped into the db.
self.trackers[uuid] = {
"penalty_txid": tracker.penalty_txid,
"locator": locator,
"appointment_end": appointment_end,
}
# We only store the penalty_txid, locator and user_id in memory. The rest is dumped into the db.
self.trackers[uuid] = tracker.get_summary()
if penalty_txid in self.tx_tracker_map:
self.tx_tracker_map[penalty_txid].append(uuid)
@@ -244,9 +250,7 @@ class Responder:
self.db_manager.store_responder_tracker(uuid, tracker.to_dict())
logger.info(
"New tracker added", dispute_txid=dispute_txid, penalty_txid=penalty_txid, appointment_end=appointment_end
)
logger.info("New tracker added", dispute_txid=dispute_txid, penalty_txid=penalty_txid, user_id=user_id)
def do_watch(self):
"""
@@ -269,17 +273,28 @@ class Responder:
if len(self.trackers) > 0 and block is not None:
txids = block.get("tx")
completed_trackers = self.get_completed_trackers()
expired_trackers = self.get_expired_trackers(block.get("height"))
trackers_to_delete_gatekeeper = {
uuid: self.trackers[uuid].get("user_id") for uuid in completed_trackers + expired_trackers
}
if self.last_known_block == block.get("previousblockhash"):
self.check_confirmations(txids)
height = block.get("height")
completed_trackers = self.get_completed_trackers(height)
Cleaner.delete_completed_trackers(
completed_trackers, height, self.trackers, self.tx_tracker_map, self.db_manager
Cleaner.delete_trackers(
completed_trackers, block.get("height"), self.trackers, self.tx_tracker_map, self.db_manager
)
Cleaner.delete_trackers(
expired_trackers,
block.get("height"),
self.trackers,
self.tx_tracker_map,
self.db_manager,
expired=True,
)
Cleaner.delete_gatekeeper_appointments(self.gatekeeper, trackers_to_delete_gatekeeper)
txs_to_rebroadcast = self.get_txs_to_rebroadcast()
self.rebroadcast(txs_to_rebroadcast)
self.rebroadcast(self.get_txs_to_rebroadcast())
# NOTCOVERED
else:
@@ -295,7 +310,7 @@ class Responder:
# Clear the receipts issued in this block
self.carrier.issued_receipts = {}
if len(self.trackers) != 0:
if len(self.trackers) == 0:
logger.info("No more pending trackers")
# Register the last processed block for the responder
@@ -326,7 +341,6 @@ class Responder:
for tx in self.unconfirmed_txs:
if tx in self.missed_confirmations:
self.missed_confirmations[tx] += 1
else:
self.missed_confirmations[tx] = 1
@@ -349,26 +363,24 @@ class Responder:
return txs_to_rebroadcast
def get_completed_trackers(self, height):
def get_completed_trackers(self):
"""
Gets the trackers that has already been fulfilled based on a given height (``end_time`` was reached with a
minimum confirmation count).
Args:
height (:obj:`int`): the height of the last received block.
Gets the trackers that has already been fulfilled based on a given height (the justice transaction is
irrevocably resolved).
Returns:
:obj:`dict`: a dict (``uuid:confirmations``) of the completed trackers.
:obj:`list`: a list of completed trackers uuids.
"""
completed_trackers = {}
completed_trackers = []
# FIXME: This is here for duplicated penalties, we should be able to get rid of it once we prevent duplicates in
# the responder.
checked_txs = {}
for uuid, tracker_data in self.trackers.items():
appointment_end = tracker_data.get("appointment_end")
penalty_txid = tracker_data.get("penalty_txid")
if appointment_end <= height and penalty_txid not in self.unconfirmed_txs:
# Avoiding dictionary changed size during iteration
for uuid in list(self.trackers.keys()):
penalty_txid = self.trackers[uuid].get("penalty_txid")
if penalty_txid not in self.unconfirmed_txs:
if penalty_txid not in checked_txs:
tx = self.carrier.get_transaction(penalty_txid)
else:
@@ -378,16 +390,37 @@ class Responder:
confirmations = tx.get("confirmations")
checked_txs[penalty_txid] = tx
if confirmations is not None and confirmations >= MIN_CONFIRMATIONS:
# The end of the appointment has been reached
completed_trackers[uuid] = confirmations
if confirmations is not None and confirmations >= IRREVOCABLY_RESOLVED:
completed_trackers.append(uuid)
return completed_trackers
def get_expired_trackers(self, height):
"""
Gets trackers than are expired due to the user subscription expiring.
Only gets those trackers which penalty transaction is not going trough (probably because of low fees), the rest
will be eventually completed once they are irrevocably resolved.
Args:
height (:obj:`int`): the height of the last received block.
Returns:
:obj:`list`: a list of the expired trackers uuids.
"""
expired_trackers = [
uuid
for uuid in self.gatekeeper.get_expired_appointments(height)
if self.trackers[uuid].get("penalty_txid") in self.unconfirmed_txs
]
return expired_trackers
def rebroadcast(self, txs_to_rebroadcast):
"""
Rebroadcasts a ``penalty_tx`` that has missed too many confirmations. In the current approach this would loop
forever if the transaction keeps not getting it.
Rebroadcasts a ``penalty_tx`` that has missed too many confirmations. In the current approach this will loop
until the tracker expires if the penalty transactions keeps getting rejected due to fees.
Potentially, the fees could be bumped here if the transaction has some tower dedicated outputs (or allows it
trough ``ANYONECANPAY`` or something similar).
@@ -436,7 +469,8 @@ class Responder:
"""
for uuid in self.trackers.keys():
# Avoiding dictionary changed size during iteration
for uuid in list(self.trackers.keys()):
tracker = TransactionTracker.from_dict(self.db_manager.load_responder_tracker(uuid))
# First we check if the dispute transaction is known (exists either in mempool or blockchain)
@@ -465,7 +499,7 @@ class Responder:
tracker.dispute_txid,
tracker.penalty_txid,
tracker.penalty_rawtx,
tracker.appointment_end,
tracker.user_id,
block_hash,
)

View File

@@ -70,16 +70,19 @@ def main(command_line_conf):
block_processor = BlockProcessor(bitcoind_connect_params)
carrier = Carrier(bitcoind_connect_params)
responder = Responder(db_manager, carrier, block_processor)
watcher = Watcher(
db_manager,
gatekeeper = Gatekeeper(
UsersDBM(config.get("USERS_DB_PATH")),
block_processor,
responder,
secret_key_der,
config.get("MAX_APPOINTMENTS"),
config.get("DEFAULT_SLOTS"),
config.get("DEFAULT_SUBSCRIPTION_DURATION"),
config.get("EXPIRY_DELTA"),
)
responder = Responder(db_manager, gatekeeper, carrier, block_processor)
watcher = Watcher(
db_manager, gatekeeper, block_processor, responder, secret_key_der, config.get("MAX_APPOINTMENTS")
)
# Create the chain monitor and start monitoring the chain
chain_monitor = ChainMonitor(
watcher.block_queue, watcher.responder.block_queue, block_processor, bitcoind_feed_params
@@ -151,9 +154,8 @@ def main(command_line_conf):
# Fire the API and the ChainMonitor
# FIXME: 92-block-data-during-bootstrap-db
chain_monitor.monitor_chain()
gatekeeper = Gatekeeper(UsersDBM(config.get("USERS_DB_PATH")), config.get("DEFAULT_SLOTS"))
inspector = Inspector(block_processor, config.get("MIN_TO_SELF_DELAY"))
API(config.get("API_BIND"), config.get("API_PORT"), inspector, watcher, gatekeeper).start()
API(config.get("API_BIND"), config.get("API_PORT"), inspector, watcher).start()
except Exception as e:
logger.error("An error occurred: {}. Shutting down".format(e))
exit(1)

View File

@@ -8,7 +8,6 @@ Tools is a module with general methods that can used by different entities in th
"""
# NOTCOVERED
def bitcoin_cli(btc_connect_params):
"""
An ``http`` connection with ``bitcoind`` using the ``json-rpc`` interface.

View File

@@ -37,42 +37,42 @@ class UsersDBM(DBManager):
raise e
def store_user(self, user_pk, user_data):
def store_user(self, user_id, user_data):
"""
Stores a user record to the database. ``user_pk`` is used as identifier.
Args:
user_pk (:obj:`str`): a 33-byte hex-encoded string identifying the user.
user_id (:obj:`str`): a 33-byte hex-encoded string identifying the user.
user_data (:obj:`dict`): the user associated data, as a dictionary.
Returns:
:obj:`bool`: True if the user was stored in the database, False otherwise.
"""
if is_compressed_pk(user_pk):
if is_compressed_pk(user_id):
try:
self.create_entry(user_pk, json.dumps(user_data))
logger.info("Adding user to Gatekeeper's db", user_pk=user_pk)
self.create_entry(user_id, json.dumps(user_data))
logger.info("Adding user to Gatekeeper's db", user_id=user_id)
return True
except json.JSONDecodeError:
logger.info("Could't add user to db. Wrong user data format", user_pk=user_pk, user_data=user_data)
logger.info("Could't add user to db. Wrong user data format", user_id=user_id, user_data=user_data)
return False
except TypeError:
logger.info("Could't add user to db", user_pk=user_pk, user_data=user_data)
logger.info("Could't add user to db", user_id=user_id, user_data=user_data)
return False
else:
logger.info("Could't add user to db. Wrong pk format", user_pk=user_pk, user_data=user_data)
logger.info("Could't add user to db. Wrong pk format", user_id=user_id, user_data=user_data)
return False
def load_user(self, user_pk):
def load_user(self, user_id):
"""
Loads a user record from the database using the ``user_pk`` as identifier.
Args:
user_pk (:obj:`str`): a 33-byte hex-encoded string identifying the user.
user_id (:obj:`str`): a 33-byte hex-encoded string identifying the user.
Returns:
:obj:`dict`: A dictionary containing the user data if the ``key`` is found.
@@ -81,31 +81,31 @@ class UsersDBM(DBManager):
"""
try:
data = self.load_entry(user_pk)
data = self.load_entry(user_id)
data = json.loads(data)
except (TypeError, json.decoder.JSONDecodeError):
data = None
return data
def delete_user(self, user_pk):
def delete_user(self, user_id):
"""
Deletes a user record from the database.
Args:
user_pk (:obj:`str`): a 33-byte hex-encoded string identifying the user.
user_id (:obj:`str`): a 33-byte hex-encoded string identifying the user.
Returns:
:obj:`bool`: True if the user was deleted from the database or it was non-existent, False otherwise.
"""
try:
self.delete_entry(user_pk)
logger.info("Deleting user from Gatekeeper's db", uuid=user_pk)
self.delete_entry(user_id)
logger.info("Deleting user from Gatekeeper's db", uuid=user_id)
return True
except TypeError:
logger.info("Cannot delete user from db, user key has wrong type", uuid=user_pk)
logger.info("Cannot delete user from db, user key has wrong type", uuid=user_id)
return False
def load_all_users(self):
@@ -122,7 +122,7 @@ class UsersDBM(DBManager):
for k, v in self.db.iterator():
# Get uuid and appointment_data from the db
user_pk = k.decode("utf-8")
data[user_pk] = json.loads(v)
user_id = k.decode("utf-8")
data[user_id] = json.loads(v)
return data

View File

@@ -3,22 +3,27 @@ from threading import Thread
from common.logger import Logger
from common.tools import compute_locator
from common.appointment import Appointment
from common.exceptions import BasicException
from common.exceptions import EncryptionError
from common.cryptographer import Cryptographer, hash_160
from common.exceptions import InvalidParameter, SignatureError
from teos import LOG_PREFIX
from teos.cleaner import Cleaner
from teos.extended_appointment import ExtendedAppointment
logger = Logger(actor="Watcher", log_name_prefix=LOG_PREFIX)
class AppointmentLimitReached(BasicException):
"""Raised when the tower maximum appointment count has been reached"""
class Watcher:
"""
The :class:`Watcher` is in charge of watching for channel breaches for the appointments accepted by the tower.
The :class:`Watcher` keeps track of the accepted appointments in ``appointments`` and, for new received block,
The :class:`Watcher` keeps track of the accepted appointments in ``appointments`` and, for new received blocks,
checks if any breach has happened by comparing the txids with the appointment locators. If a breach is seen, the
``encrypted_blob`` of the corresponding appointment is decrypted and the data is passed to the
:obj:`Responder <teos.responder.Responder>`.
@@ -36,24 +41,24 @@ class Watcher:
responder (:obj:`Responder <teos.responder.Responder>`): a ``Responder`` instance.
sk_der (:obj:`bytes`): a DER encoded private key used to sign appointment receipts (signaling acceptance).
max_appointments (:obj:`int`): the maximum amount of appointments accepted by the ``Watcher`` at the same time.
expiry_delta (:obj:`int`): the additional time the ``Watcher`` will keep an expired appointment around.
Attributes:
appointments (:obj:`dict`): a dictionary containing a summary of the appointments (:obj:`Appointment
<teos.appointment.Appointment>` instances) accepted by the tower (``locator``, ``end_time``, and ``size``).
It's populated trough ``add_appointment``.
appointments (:obj:`dict`): a dictionary containing a summary of the appointments (:obj:`ExtendedAppointment
<teos.extended_appointment.ExtendedAppointment>` instances) accepted by the tower (``locator`` and
``user_id``). It's populated trough ``add_appointment``.
locator_uuid_map (:obj:`dict`): a ``locator:uuid`` map used to allow the :obj:`Watcher` to deal with several
appointments with the same ``locator``.
block_queue (:obj:`Queue`): A queue used by the :obj:`Watcher` to receive block hashes from ``bitcoind``. It is
populated by the :obj:`ChainMonitor <teos.chain_monitor.ChainMonitor>`.
db_manager (:obj:`AppointmentsDBM <teos.appointments_dbm.AppointmentsDBM>`): a ``AppointmentsDBM`` instance
to interact with the database.
gatekeeper (:obj:`Gatekeeper <teos.gatekeeper.Gatekeeper>`): a `Gatekeeper` instance in charge to control the
user access and subscription expiry.
block_processor (:obj:`BlockProcessor <teos.block_processor.BlockProcessor>`): a ``BlockProcessor`` instance to
get block from bitcoind.
responder (:obj:`Responder <teos.responder.Responder>`): a ``Responder`` instance.
signing_key (:mod:`PrivateKey`): a private key used to sign accepted appointments.
max_appointments (:obj:`int`): the maximum amount of appointments accepted by the ``Watcher`` at the same time.
expiry_delta (:obj:`int`): the additional time the ``Watcher`` will keep an expired appointment around.
last_known_block (:obj:`str`): the last block known by the ``Watcher``.
Raises:
@@ -61,15 +66,15 @@ class Watcher:
"""
def __init__(self, db_manager, block_processor, responder, sk_der, max_appointments, expiry_delta):
def __init__(self, db_manager, gatekeeper, block_processor, responder, sk_der, max_appointments):
self.appointments = dict()
self.locator_uuid_map = dict()
self.block_queue = Queue()
self.db_manager = db_manager
self.gatekeeper = gatekeeper
self.block_processor = block_processor
self.responder = responder
self.max_appointments = max_appointments
self.expiry_delta = expiry_delta
self.signing_key = Cryptographer.load_private_key_der(sk_der)
self.last_known_block = db_manager.load_last_block_hash_watcher()
@@ -81,21 +86,7 @@ class Watcher:
return watcher_thread
def get_appointment_summary(self, uuid):
"""
Returns the summary of an appointment. The summary consists of the data kept in memory:
{locator, end_time, and size}
Args:
uuid (:obj:`str`): a 16-byte hex string identifying the appointment.
Returns:
:obj:`dict` or :obj:`None`: a dictionary with the appointment summary, or ``None`` if the appointment is not
found.
"""
return self.appointments.get(uuid)
def add_appointment(self, appointment, user_pk):
def add_appointment(self, appointment, signature):
"""
Adds a new appointment to the ``appointments`` dictionary if ``max_appointments`` has not been reached.
@@ -111,43 +102,49 @@ class Watcher:
identified by ``uuid`` and stored in ``appointments`` and ``locator_uuid_map``.
Args:
appointment (:obj:`Appointment <teos.appointment.Appointment>`): the appointment to be added to the
:obj:`Watcher`.
user_pk(:obj:`str`): the public key that identifies the user who sent the appointment (33-bytes hex str).
appointment (:obj:`ExtendedAppointment <teos.extended_appointment.ExtendedAppointment>`): the appointment to
be added to the :obj:`Watcher`.
signature (:obj:`str`): the user's appointment signature (hex-encoded).
Returns:
:obj:`tuple`: A tuple signaling if the appointment has been added or not (based on ``max_appointments``).
The structure looks as follows:
:obj:`dict`: The tower response as a dict, containing: locator, signature, available_slots and
subscription_expiry.
- ``(True, signature)`` if the appointment has been accepted.
- ``(False, None)`` otherwise.
Raises:
:obj:`AppointmentLimitReached`: If the tower cannot hold more appointments (cap reached).
:obj:`AuthenticationFailure <teos.gatekeeper.AuthenticationFailure>`: If the user cannot be authenticated.
:obj:`NotEnoughSlots <teos.gatekeeper.NotEnoughSlots>`: If the user does not have enough available slots,
so the appointment is rejected.
"""
if len(self.appointments) < self.max_appointments:
if len(self.appointments) >= self.max_appointments:
message = "Maximum appointments reached, appointment rejected"
logger.info(message, locator=appointment.locator)
raise AppointmentLimitReached(message)
# The uuids are generated as the RIPMED160(locator||user_pubkey), that way the tower does not need to know
# anything about the user from this point on (no need to store user_pk in the database).
user_id = self.gatekeeper.authenticate_user(appointment.serialize(), signature)
# The user_id needs to be added to the ExtendedAppointment once the former has been authenticated
appointment.user_id = user_id
# The uuids are generated as the RIPMED160(locator||user_pubkey).
# If an appointment is requested by the user the uuid can be recomputed and queried straightaway (no maps).
uuid = hash_160("{}{}".format(appointment.locator, user_pk))
self.appointments[uuid] = {
"locator": appointment.locator,
"end_time": appointment.end_time,
"size": len(appointment.encrypted_blob),
}
uuid = hash_160("{}{}".format(appointment.locator, user_id))
# Add the appointment to the Gatekeeper
available_slots = self.gatekeeper.add_update_appointment(user_id, uuid, appointment)
self.appointments[uuid] = appointment.get_summary()
if appointment.locator in self.locator_uuid_map:
# If the uuid is already in the map it means this is an update.
if uuid not in self.locator_uuid_map[appointment.locator]:
self.locator_uuid_map[appointment.locator].append(uuid)
else:
# Otherwise two users have sent an appointment with the same locator, so we need to store both.
self.locator_uuid_map[appointment.locator] = [uuid]
self.db_manager.store_watcher_appointment(uuid, appointment.to_dict())
self.db_manager.create_append_locator_map(appointment.locator, uuid)
appointment_added = True
try:
signature = Cryptographer.sign(appointment.serialize(), self.signing_key)
@@ -158,13 +155,12 @@ class Watcher:
logger.info("New appointment accepted", locator=appointment.locator)
else:
appointment_added = False
signature = None
logger.info("Maximum appointments reached, appointment rejected", locator=appointment.locator)
return appointment_added, signature
return {
"locator": appointment.locator,
"signature": signature,
"available_slots": available_slots,
"subscription_expiry": self.gatekeeper.registered_users[user_id].subscription_expiry,
}
def do_watch(self):
"""
@@ -187,17 +183,20 @@ class Watcher:
if len(self.appointments) > 0 and block is not None:
txids = block.get("tx")
expired_appointments = [
uuid
for uuid, appointment_data in self.appointments.items()
if block["height"] > appointment_data.get("end_time") + self.expiry_delta
]
expired_appointments = self.gatekeeper.get_expired_appointments(block["height"])
# Make sure we only try to delete what is on the Watcher (some appointments may have been triggered)
expired_appointments = list(set(expired_appointments).intersection(self.appointments.keys()))
# Keep track of the expired appointments before deleting them from memory
appointments_to_delete_gatekeeper = {
uuid: self.appointments[uuid].get("user_id") for uuid in expired_appointments
}
Cleaner.delete_expired_appointments(
expired_appointments, self.appointments, self.locator_uuid_map, self.db_manager
)
valid_breaches, invalid_breaches = self.filter_valid_breaches(self.get_breaches(txids))
valid_breaches, invalid_breaches = self.filter_breaches(self.get_breaches(txids))
triggered_flags = []
appointments_to_delete = []
@@ -216,7 +215,7 @@ class Watcher:
breach["dispute_txid"],
breach["penalty_txid"],
breach["penalty_rawtx"],
self.appointments[uuid].get("end_time"),
self.appointments[uuid].get("user_id"),
block_hash,
)
@@ -232,10 +231,18 @@ class Watcher:
appointments_to_delete.extend(invalid_breaches)
self.db_manager.batch_create_triggered_appointment_flag(triggered_flags)
# Update the dictionary with the completed appointments
appointments_to_delete_gatekeeper.update(
{uuid: self.appointments[uuid].get("user_id") for uuid in appointments_to_delete}
)
Cleaner.delete_completed_appointments(
appointments_to_delete, self.appointments, self.locator_uuid_map, self.db_manager
)
# Remove expired and completed appointments from the Gatekeeper
Cleaner.delete_gatekeeper_appointments(self.gatekeeper, appointments_to_delete_gatekeeper)
if len(self.appointments) != 0:
logger.info("No more pending appointments")
@@ -270,9 +277,9 @@ class Watcher:
return breaches
def filter_valid_breaches(self, breaches):
def filter_breaches(self, breaches):
"""
Filters what of the found breaches contain valid transaction data.
Filters the valid from the invalid channel breaches.
The :obj:`Watcher` cannot if a given ``encrypted_blob`` contains a valid transaction until a breach if seen.
Blobs that contain arbitrary data are dropped and not sent to the :obj:`Responder <teos.responder.Responder>`.
@@ -295,7 +302,7 @@ class Watcher:
for locator, dispute_txid in breaches.items():
for uuid in self.locator_uuid_map[locator]:
appointment = Appointment.from_dict(self.db_manager.load_watcher_appointment(uuid))
appointment = ExtendedAppointment.from_dict(self.db_manager.load_watcher_appointment(uuid))
if appointment.encrypted_blob in decrypted_blobs:
penalty_tx, penalty_rawtx = decrypted_blobs[appointment.encrypted_blob]

View File

@@ -1,5 +1,6 @@
import struct
import binascii
import pytest
from pytest import fixture
from common.appointment import Appointment
@@ -29,20 +30,12 @@ def appointment_data():
def test_init_appointment(appointment_data):
# The appointment has no checks whatsoever, since the inspector is the one taking care or that, and the only one
# creating appointments.
# DISCUSS: whether this makes sense by design or checks should be ported from the inspector to the appointment
# 35-appointment-checks
appointment = Appointment(
appointment_data["locator"],
appointment_data["start_time"],
appointment_data["end_time"],
appointment_data["to_self_delay"],
appointment_data["encrypted_blob"],
appointment_data["locator"], appointment_data["to_self_delay"], appointment_data["encrypted_blob"]
)
assert (
appointment_data["locator"] == appointment.locator
and appointment_data["start_time"] == appointment.start_time
and appointment_data["end_time"] == appointment.end_time
and appointment_data["to_self_delay"] == appointment.to_self_delay
and appointment_data["encrypted_blob"] == appointment.encrypted_blob
)
@@ -50,19 +43,13 @@ def test_init_appointment(appointment_data):
def test_to_dict(appointment_data):
appointment = Appointment(
appointment_data["locator"],
appointment_data["start_time"],
appointment_data["end_time"],
appointment_data["to_self_delay"],
appointment_data["encrypted_blob"],
appointment_data["locator"], appointment_data["to_self_delay"], appointment_data["encrypted_blob"]
)
dict_appointment = appointment.to_dict()
assert (
appointment_data["locator"] == dict_appointment["locator"]
and appointment_data["start_time"] == dict_appointment["start_time"]
and appointment_data["end_time"] == dict_appointment["end_time"]
and appointment_data["to_self_delay"] == dict_appointment["to_self_delay"]
and appointment_data["encrypted_blob"] == dict_appointment["encrypted_blob"]
)
@@ -78,13 +65,9 @@ def test_from_dict(appointment_data):
prev_val = appointment_data[key]
appointment_data[key] = None
try:
with pytest.raises(ValueError, match="Wrong appointment data"):
Appointment.from_dict(appointment_data)
assert False
except ValueError:
appointment_data[key] = prev_val
assert True
def test_serialize(appointment_data):
@@ -99,13 +82,9 @@ def test_serialize(appointment_data):
assert isinstance(serialized_appointment, bytes)
locator = serialized_appointment[:16]
start_time = serialized_appointment[16:20]
end_time = serialized_appointment[20:24]
to_self_delay = serialized_appointment[24:28]
encrypted_blob = serialized_appointment[28:]
to_self_delay = serialized_appointment[16:20]
encrypted_blob = serialized_appointment[20:]
assert binascii.hexlify(locator).decode() == appointment.locator
assert struct.unpack(">I", start_time)[0] == appointment.start_time
assert struct.unpack(">I", end_time)[0] == appointment.end_time
assert struct.unpack(">I", to_self_delay)[0] == appointment.to_self_delay
assert binascii.hexlify(encrypted_blob).decode() == appointment.encrypted_blob

View File

@@ -11,7 +11,6 @@ from common.config_loader import ConfigLoader
getcontext().prec = 10
END_TIME_DELTA = 10
@pytest.fixture(scope="session")
@@ -123,16 +122,8 @@ def create_penalty_tx(bitcoin_cli, decoded_commitment_tx, destination=None):
return signed_penalty_tx.get("hex")
def build_appointment_data(bitcoin_cli, commitment_tx_id, penalty_tx):
current_height = bitcoin_cli.getblockcount()
appointment_data = {
"tx": penalty_tx,
"tx_id": commitment_tx_id,
"start_time": current_height + 1,
"end_time": current_height + 1 + END_TIME_DELTA,
"to_self_delay": 20,
}
def build_appointment_data(commitment_tx_id, penalty_tx):
appointment_data = {"tx": penalty_tx, "tx_id": commitment_tx_id, "to_self_delay": 20}
return appointment_data

View File

@@ -11,9 +11,13 @@ from cli import teos_cli, DATA_DIR, DEFAULT_CONF, CONF_FILE_NAME
from common.tools import compute_locator
from common.appointment import Appointment
from common.cryptographer import Cryptographer
from teos import DEFAULT_CONF as TEOS_CONF
from teos import DATA_DIR as TEOS_DATA_DIR
from teos import CONF_FILE_NAME as TEOS_CONF_FILE_NAME
from teos.utils.auth_proxy import JSONRPCException
from test.teos.e2e.conftest import (
END_TIME_DELTA,
build_appointment_data,
get_random_value_hex,
create_penalty_tx,
@@ -23,6 +27,8 @@ from test.teos.e2e.conftest import (
)
cli_config = get_config(DATA_DIR, CONF_FILE_NAME, DEFAULT_CONF)
teos_config = get_config(TEOS_DATA_DIR, TEOS_CONF_FILE_NAME, TEOS_CONF)
teos_base_endpoint = "http://{}:{}".format(cli_config.get("API_CONNECT"), cli_config.get("API_PORT"))
teos_add_appointment_endpoint = "{}/add_appointment".format(teos_base_endpoint)
@@ -32,9 +38,7 @@ teos_get_all_appointments_endpoint = "{}/get_all_appointments".format(teos_base_
# Run teosd
teosd_process = run_teosd()
teos_pk, cli_sk, compressed_cli_pk = teos_cli.load_keys(
cli_config.get("TEOS_PUBLIC_KEY"), cli_config.get("CLI_PRIVATE_KEY")
)
teos_id, user_sk, user_id = teos_cli.load_keys(cli_config.get("TEOS_PUBLIC_KEY"), cli_config.get("CLI_PRIVATE_KEY"))
def broadcast_transaction_and_mine_block(bitcoin_cli, commitment_tx, addr):
@@ -43,13 +47,13 @@ def broadcast_transaction_and_mine_block(bitcoin_cli, commitment_tx, addr):
bitcoin_cli.generatetoaddress(1, addr)
def get_appointment_info(locator, sk=cli_sk):
def get_appointment_info(locator, sk=user_sk):
sleep(1) # Let's add a bit of delay so the state can be updated
return teos_cli.get_appointment(locator, sk, teos_pk, teos_base_endpoint)
return teos_cli.get_appointment(locator, sk, teos_id, teos_base_endpoint)
def add_appointment(appointment_data, sk=cli_sk):
return teos_cli.add_appointment(appointment_data, sk, teos_pk, teos_base_endpoint)
def add_appointment(appointment_data, sk=user_sk):
return teos_cli.add_appointment(appointment_data, sk, teos_id, teos_base_endpoint)
def get_all_appointments():
@@ -63,7 +67,7 @@ def test_commands_non_registered(bitcoin_cli):
# Add appointment
commitment_tx, penalty_tx = create_txs(bitcoin_cli)
commitment_tx_id = bitcoin_cli.decoderawtransaction(commitment_tx).get("txid")
appointment_data = build_appointment_data(bitcoin_cli, commitment_tx_id, penalty_tx)
appointment_data = build_appointment_data(commitment_tx_id, penalty_tx)
with pytest.raises(TowerResponseError):
assert add_appointment(appointment_data)
@@ -75,12 +79,12 @@ def test_commands_non_registered(bitcoin_cli):
def test_commands_registered(bitcoin_cli):
# Test registering and trying again
teos_cli.register(compressed_cli_pk, teos_base_endpoint)
teos_cli.register(user_id, teos_base_endpoint)
# Add appointment
commitment_tx, penalty_tx = create_txs(bitcoin_cli)
commitment_tx_id = bitcoin_cli.decoderawtransaction(commitment_tx).get("txid")
appointment_data = build_appointment_data(bitcoin_cli, commitment_tx_id, penalty_tx)
appointment_data = build_appointment_data(commitment_tx_id, penalty_tx)
appointment, available_slots = add_appointment(appointment_data)
assert isinstance(appointment, Appointment) and isinstance(available_slots, str)
@@ -93,14 +97,15 @@ def test_commands_registered(bitcoin_cli):
def test_appointment_life_cycle(bitcoin_cli):
# First of all we need to register
teos_cli.register(compressed_cli_pk, teos_base_endpoint)
response = teos_cli.register(user_id, teos_base_endpoint)
available_slots = response.get("available_slots")
# After that we can build an appointment and send it to the tower
commitment_tx, penalty_tx = create_txs(bitcoin_cli)
commitment_tx_id = bitcoin_cli.decoderawtransaction(commitment_tx).get("txid")
appointment_data = build_appointment_data(bitcoin_cli, commitment_tx_id, penalty_tx)
appointment_data = build_appointment_data(commitment_tx_id, penalty_tx)
locator = compute_locator(commitment_tx_id)
appointment, available_slots = add_appointment(appointment_data)
appointment, signature = add_appointment(appointment_data)
# Get the information from the tower to check that it matches
appointment_info = get_appointment_info(locator)
@@ -137,14 +142,18 @@ def test_appointment_life_cycle(bitcoin_cli):
# If the transaction is not found.
assert False
# Now let's mine some blocks so the appointment reaches its end.
for _ in range(END_TIME_DELTA):
bitcoin_cli.generatetoaddress(1, new_addr)
# Now let's mine some blocks so the appointment reaches its end. We need 100 + EXPIRY_DELTA -1
bitcoin_cli.generatetoaddress(100 + teos_config.get("EXPIRY_DELTA") - 1, new_addr)
# The appointment is no longer in the tower
with pytest.raises(TowerResponseError):
get_appointment_info(locator)
# Check that the appointment is not in the Gatekeeper by checking the available slots (should have increase by 1)
# We can do so by topping up the subscription (FIXME: find a better way to check this).
response = teos_cli.register(user_id, teos_base_endpoint)
assert response.get("available_slots") == available_slots + teos_config.get("DEFAULT_SLOTS") + 1
def test_multiple_appointments_life_cycle(bitcoin_cli):
# Tests that get_all_appointments returns all the appointments the tower is storing at various stages in the
@@ -156,7 +165,7 @@ def test_multiple_appointments_life_cycle(bitcoin_cli):
# Create five appointments.
for commitment_tx, penalty_tx in zip(commitment_txs, penalty_txs):
commitment_tx_id = bitcoin_cli.decoderawtransaction(commitment_tx).get("txid")
appointment_data = build_appointment_data(bitcoin_cli, commitment_tx_id, penalty_tx)
appointment_data = build_appointment_data(commitment_tx_id, penalty_tx)
locator = compute_locator(commitment_tx_id)
appointment = {
@@ -190,9 +199,8 @@ def test_multiple_appointments_life_cycle(bitcoin_cli):
assert set(responder_locators) == set(breached_appointments)
new_addr = bitcoin_cli.getnewaddress()
# Now let's mine some blocks so the appointment reaches its end.
for _ in range(END_TIME_DELTA):
bitcoin_cli.generatetoaddress(1, new_addr)
# Now let's mine some blocks so the appointment reaches its end. We need 100 + EXPIRY_DELTA -1
bitcoin_cli.generatetoaddress(100 + teos_config.get("EXPIRY_DELTA") - 1, new_addr)
# The appointment is no longer in the tower
with pytest.raises(TowerResponseError):
@@ -210,7 +218,7 @@ def test_appointment_malformed_penalty(bitcoin_cli):
mod_penalty_tx = mod_penalty_tx.copy(tx_ins=[tx_in])
commitment_tx_id = bitcoin_cli.decoderawtransaction(commitment_tx).get("txid")
appointment_data = build_appointment_data(bitcoin_cli, commitment_tx_id, mod_penalty_tx.hex())
appointment_data = build_appointment_data(commitment_tx_id, mod_penalty_tx.hex())
locator = compute_locator(commitment_tx_id)
appointment, _ = add_appointment(appointment_data)
@@ -236,7 +244,7 @@ def test_appointment_wrong_decryption_key(bitcoin_cli):
commitment_tx, penalty_tx = create_txs(bitcoin_cli)
# The appointment data is built using a random 32-byte value.
appointment_data = build_appointment_data(bitcoin_cli, get_random_value_hex(32), penalty_tx)
appointment_data = build_appointment_data(get_random_value_hex(32), penalty_tx)
# We cannot use teos_cli.add_appointment here since it computes the locator internally, so let's do it manually.
# We will encrypt the blob using the random value and derive the locator from the commitment tx.
@@ -244,7 +252,7 @@ def test_appointment_wrong_decryption_key(bitcoin_cli):
appointment_data["encrypted_blob"] = Cryptographer.encrypt(penalty_tx, get_random_value_hex(32))
appointment = Appointment.from_dict(appointment_data)
signature = Cryptographer.sign(appointment.serialize(), cli_sk)
signature = Cryptographer.sign(appointment.serialize(), user_sk)
data = {"appointment": appointment.to_dict(), "signature": signature}
# Send appointment to the server.
@@ -254,7 +262,7 @@ def test_appointment_wrong_decryption_key(bitcoin_cli):
# Check that the server has accepted the appointment
signature = response_json.get("signature")
rpk = Cryptographer.recover_pk(appointment.serialize(), signature)
assert teos_pk == Cryptographer.get_compressed_pk(rpk)
assert teos_id == Cryptographer.get_compressed_pk(rpk)
assert response_json.get("locator") == appointment.locator
# Trigger the appointment
@@ -273,7 +281,7 @@ def test_two_identical_appointments(bitcoin_cli):
commitment_tx, penalty_tx = create_txs(bitcoin_cli)
commitment_tx_id = bitcoin_cli.decoderawtransaction(commitment_tx).get("txid")
appointment_data = build_appointment_data(bitcoin_cli, commitment_tx_id, penalty_tx)
appointment_data = build_appointment_data(commitment_tx_id, penalty_tx)
locator = compute_locator(commitment_tx_id)
# Send the appointment twice
@@ -301,22 +309,22 @@ def test_two_identical_appointments(bitcoin_cli):
# commitment_tx, penalty_tx = create_txs(bitcoin_cli)
# commitment_tx_id = bitcoin_cli.decoderawtransaction(commitment_tx).get("txid")
#
# appointment_data = build_appointment_data(bitcoin_cli, commitment_tx_id, penalty_tx)
# appointment_data = build_appointment_data(commitment_tx_id, penalty_tx)
# locator = compute_locator(commitment_tx_id)
#
# # tmp keys from a different user
# tmp_sk = PrivateKey()
# tmp_compressed_pk = hexlify(tmp_sk.public_key.format(compressed=True)).decode("utf-8")
# teos_cli.register(tmp_compressed_pk, teos_base_endpoint)
# tmp_user_sk = PrivateKey()
# tmp_user_id = hexlify(tmp_user_sk.public_key.format(compressed=True)).decode("utf-8")
# teos_cli.register(tmp_user_id, teos_base_endpoint)
#
# # Send the appointment twice
# assert add_appointment(appointment_data) is True
# assert add_appointment(appointment_data, sk=tmp_sk) is True
# assert add_appointment(appointment_data, sk=tmp_user_sk) is True
#
# # Check that we can get it from both users
# appointment_info = get_appointment_info(locator)
# assert appointment_info.get("status") == "being_watched"
# appointment_info = get_appointment_info(locator, sk=tmp_sk)
# appointment_info = get_appointment_info(locator, sk=tmp_user_sk)
# assert appointment_info.get("status") == "being_watched"
#
# # Broadcast the commitment transaction and mine a block
@@ -326,7 +334,7 @@ def test_two_identical_appointments(bitcoin_cli):
# # The last appointment should have made it to the Responder
# sleep(1)
# appointment_info = get_appointment_info(locator)
# appointment_dup_info = get_appointment_info(locator, sk=tmp_sk)
# appointment_dup_info = get_appointment_info(locator, sk=tmp_user_sk)
#
# # One of the two request must be None, while the other must be valid
# assert (appointment_info is None and appointment_dup_info is not None) or (
@@ -349,17 +357,17 @@ def test_two_appointment_same_locator_different_penalty_different_users(bitcoin_
new_addr = bitcoin_cli.getnewaddress()
penalty_tx2 = create_penalty_tx(bitcoin_cli, decoded_commitment_tx, new_addr)
appointment1_data = build_appointment_data(bitcoin_cli, commitment_tx_id, penalty_tx1)
appointment2_data = build_appointment_data(bitcoin_cli, commitment_tx_id, penalty_tx2)
appointment1_data = build_appointment_data(commitment_tx_id, penalty_tx1)
appointment2_data = build_appointment_data(commitment_tx_id, penalty_tx2)
locator = compute_locator(commitment_tx_id)
# tmp keys for a different user
tmp_sk = PrivateKey()
tmp_compressed_pk = hexlify(tmp_sk.public_key.format(compressed=True)).decode("utf-8")
teos_cli.register(tmp_compressed_pk, teos_base_endpoint)
tmp_user_sk = PrivateKey()
tmp_user_id = hexlify(tmp_user_sk.public_key.format(compressed=True)).decode("utf-8")
teos_cli.register(tmp_user_id, teos_base_endpoint)
appointment, _ = add_appointment(appointment1_data)
appointment_2, _ = add_appointment(appointment2_data, sk=tmp_sk)
appointment_2, _ = add_appointment(appointment2_data, sk=tmp_user_sk)
# Broadcast the commitment transaction and mine a block
new_addr = bitcoin_cli.getnewaddress()
@@ -370,7 +378,7 @@ def test_two_appointment_same_locator_different_penalty_different_users(bitcoin_
appointment_info = None
with pytest.raises(TowerResponseError):
appointment_info = get_appointment_info(locator)
appointment2_info = get_appointment_info(locator, sk=tmp_sk)
appointment2_info = get_appointment_info(locator, sk=tmp_user_sk)
if appointment_info is None:
appointment_info = appointment2_info
@@ -388,7 +396,7 @@ def test_appointment_shutdown_teos_trigger_back_online(bitcoin_cli):
commitment_tx, penalty_tx = create_txs(bitcoin_cli)
commitment_tx_id = bitcoin_cli.decoderawtransaction(commitment_tx).get("txid")
appointment_data = build_appointment_data(bitcoin_cli, commitment_tx_id, penalty_tx)
appointment_data = build_appointment_data(commitment_tx_id, penalty_tx)
locator = compute_locator(commitment_tx_id)
appointment, _ = add_appointment(appointment_data)
@@ -421,7 +429,7 @@ def test_appointment_shutdown_teos_trigger_while_offline(bitcoin_cli):
commitment_tx, penalty_tx = create_txs(bitcoin_cli)
commitment_tx_id = bitcoin_cli.decoderawtransaction(commitment_tx).get("txid")
appointment_data = build_appointment_data(bitcoin_cli, commitment_tx_id, penalty_tx)
appointment_data = build_appointment_data(commitment_tx_id, penalty_tx)
locator = compute_locator(commitment_tx_id)
appointment, _ = add_appointment(appointment_data)

View File

@@ -12,15 +12,14 @@ from bitcoind_mock.transaction import create_dummy_transaction
from teos import DEFAULT_CONF
from teos.carrier import Carrier
from teos.tools import bitcoin_cli
from teos.users_dbm import UsersDBM
from teos.gatekeeper import Gatekeeper
from teos.responder import TransactionTracker
from teos.block_processor import BlockProcessor
from teos.appointments_dbm import AppointmentsDBM
from teos.extended_appointment import ExtendedAppointment
from common.tools import compute_locator
from common.appointment import Appointment
from common.constants import LOCATOR_LEN_HEX
from common.config_loader import ConfigLoader
from common.cryptographer import Cryptographer
@@ -81,8 +80,14 @@ def block_processor():
@pytest.fixture(scope="module")
def gatekeeper(user_db_manager):
return Gatekeeper(user_db_manager, get_config().get("DEFAULT_SLOTS"))
def gatekeeper(user_db_manager, block_processor):
return Gatekeeper(
user_db_manager,
block_processor,
get_config().get("DEFAULT_SLOTS"),
get_config().get("DEFAULT_SUBSCRIPTION_DURATION"),
get_config().get("EXPIRY_DELTA"),
)
def generate_keypair():
@@ -98,11 +103,21 @@ def get_random_value_hex(nbytes):
return prv_hex.zfill(2 * nbytes)
def generate_block():
def generate_block_w_delay():
requests.post(url="http://{}:{}/generate".format(BTC_RPC_HOST, BTC_RPC_PORT), timeout=5)
sleep(0.5)
def generate_blocks_w_delay(n):
for _ in range(n):
generate_block()
sleep(0.2)
def generate_block():
requests.post(url="http://{}:{}/generate".format(BTC_RPC_HOST, BTC_RPC_PORT), timeout=5)
def generate_blocks(n):
for _ in range(n):
generate_block()
@@ -113,38 +128,23 @@ def fork(block_hash):
requests.post(fork_endpoint, json={"parent": block_hash})
def generate_dummy_appointment(real_height=True, start_time_offset=5, end_time_offset=30):
if real_height:
current_height = bitcoin_cli(bitcoind_connect_params).getblockcount()
else:
current_height = 10
def generate_dummy_appointment():
dispute_tx = create_dummy_transaction()
dispute_txid = dispute_tx.tx_id.hex()
penalty_tx = create_dummy_transaction(dispute_txid)
dummy_appointment_data = {
"tx": penalty_tx.hex(),
"tx_id": dispute_txid,
"start_time": current_height + start_time_offset,
"end_time": current_height + end_time_offset,
"to_self_delay": 20,
}
locator = compute_locator(dispute_txid)
dummy_appointment_data = {"tx": penalty_tx.hex(), "tx_id": dispute_txid, "to_self_delay": 20}
encrypted_blob = Cryptographer.encrypt(dummy_appointment_data.get("tx"), dummy_appointment_data.get("tx_id"))
appointment_data = {
"locator": locator,
"start_time": dummy_appointment_data.get("start_time"),
"end_time": dummy_appointment_data.get("end_time"),
"to_self_delay": dummy_appointment_data.get("to_self_delay"),
"encrypted_blob": encrypted_blob,
"user_id": get_random_value_hex(16),
}
return Appointment.from_dict(appointment_data), dispute_tx.hex()
return ExtendedAppointment.from_dict(appointment_data), dispute_tx.hex()
def generate_dummy_tracker():
@@ -158,7 +158,7 @@ def generate_dummy_tracker():
dispute_txid=dispute_txid,
penalty_txid=penalty_txid,
penalty_rawtx=penalty_rawtx,
appointment_end=100,
user_id=get_random_value_hex(16),
)
return TransactionTracker.from_dict(tracker_data)

View File

@@ -6,6 +6,7 @@ from teos.api import API
import teos.errors as errors
from teos.watcher import Watcher
from teos.inspector import Inspector
from teos.gatekeeper import UserInfo
from teos.appointments_dbm import AppointmentsDBM
from teos.responder import Responder, TransactionTracker
@@ -39,8 +40,8 @@ appointments = {}
locator_dispute_tx_map = {}
client_sk, client_pk = generate_keypair()
compressed_client_pk = hexlify(client_pk.format(compressed=True)).decode("utf-8")
user_sk, user_pk = generate_keypair()
user_id = hexlify(user_pk.format(compressed=True)).decode("utf-8")
@pytest.fixture()
@@ -58,10 +59,10 @@ def get_all_db_manager():
def api(db_manager, carrier, block_processor, gatekeeper, run_bitcoind):
sk, pk = generate_keypair()
responder = Responder(db_manager, carrier, block_processor)
watcher = Watcher(db_manager, block_processor, responder, sk.to_der(), MAX_APPOINTMENTS, config.get("EXPIRY_DELTA"))
responder = Responder(db_manager, gatekeeper, carrier, block_processor)
watcher = Watcher(db_manager, gatekeeper, block_processor, responder, sk.to_der(), MAX_APPOINTMENTS)
inspector = Inspector(block_processor, config.get("MIN_TO_SELF_DELAY"))
api = API(config.get("API_HOST"), config.get("API_PORT"), inspector, watcher, gatekeeper)
api = API(config.get("API_HOST"), config.get("API_PORT"), inspector, watcher)
return api
@@ -85,47 +86,52 @@ def appointment():
return appointment
def add_appointment(client, appointment_data, user_pk):
def add_appointment(client, appointment_data, user_id):
r = client.post(add_appointment_endpoint, json=appointment_data)
if r.status_code == HTTP_OK:
locator = appointment_data.get("appointment").get("locator")
uuid = hash_160("{}{}".format(locator, user_pk))
uuid = hash_160("{}{}".format(locator, user_id))
appointments[uuid] = appointment_data["appointment"]
return r
def test_register(client):
data = {"public_key": compressed_client_pk}
def test_register(client, api):
current_height = api.watcher.block_processor.get_block_count()
data = {"public_key": user_id}
r = client.post(register_endpoint, json=data)
assert r.status_code == HTTP_OK
assert r.json.get("public_key") == compressed_client_pk
assert r.json.get("public_key") == user_id
assert r.json.get("available_slots") == config.get("DEFAULT_SLOTS")
assert r.json.get("subscription_expiry") == current_height + config.get("DEFAULT_SUBSCRIPTION_DURATION")
def test_register_top_up(client):
# Calling register more than once will give us DEFAULT_SLOTS * number_of_calls slots
def test_register_top_up(client, api):
# Calling register more than once will give us DEFAULT_SLOTS * number_of_calls slots.
# It will also refresh the expiry.
temp_sk, tmp_pk = generate_keypair()
tmp_pk_hex = hexlify(tmp_pk.format(compressed=True)).decode("utf-8")
tmp_user_id = hexlify(tmp_pk.format(compressed=True)).decode("utf-8")
current_height = api.watcher.block_processor.get_block_count()
data = {"public_key": tmp_pk_hex}
data = {"public_key": tmp_user_id}
for i in range(10):
r = client.post(register_endpoint, json=data)
assert r.status_code == HTTP_OK
assert r.json.get("public_key") == tmp_pk_hex
assert r.json.get("public_key") == tmp_user_id
assert r.json.get("available_slots") == config.get("DEFAULT_SLOTS") * (i + 1)
assert r.json.get("subscription_expiry") == current_height + config.get("DEFAULT_SUBSCRIPTION_DURATION")
def test_register_no_client_pk(client):
data = {"public_key": compressed_client_pk + compressed_client_pk}
data = {}
r = client.post(register_endpoint, json=data)
assert r.status_code == HTTP_BAD_REQUEST
def test_register_wrong_client_pk(client):
data = {}
data = {"public_key": user_id + user_id}
r = client.post(register_endpoint, json=data)
assert r.status_code == HTTP_BAD_REQUEST
@@ -141,52 +147,50 @@ def test_register_json_no_inner_dict(client):
def test_add_appointment(api, client, appointment):
# Simulate the user registration
api.gatekeeper.registered_users[compressed_client_pk] = {"available_slots": 1}
# Simulate the user registration (end time does not matter here)
api.watcher.gatekeeper.registered_users[user_id] = UserInfo(available_slots=1, subscription_expiry=0)
# Properly formatted appointment
appointment_signature = Cryptographer.sign(appointment.serialize(), client_sk)
r = add_appointment(
client, {"appointment": appointment.to_dict(), "signature": appointment_signature}, compressed_client_pk
)
appointment_signature = Cryptographer.sign(appointment.serialize(), user_sk)
r = add_appointment(client, {"appointment": appointment.to_dict(), "signature": appointment_signature}, user_id)
assert r.status_code == HTTP_OK
assert r.json.get("available_slots") == 0
def test_add_appointment_no_json(api, client, appointment):
# Simulate the user registration
api.gatekeeper.registered_users[compressed_client_pk] = {"available_slots": 1}
# Simulate the user registration (end time does not matter here)
api.watcher.gatekeeper.registered_users[user_id] = UserInfo(available_slots=1, subscription_expiry=0)
# Properly formatted appointment
# No JSON data
r = client.post(add_appointment_endpoint, data="random_message")
assert r.status_code == HTTP_BAD_REQUEST
assert "Request is not json encoded" in r.json.get("error")
def test_add_appointment_json_no_inner_dict(api, client, appointment):
# Simulate the user registration
api.gatekeeper.registered_users[compressed_client_pk] = {"available_slots": 1}
# Simulate the user registration (end time does not matter here)
api.watcher.gatekeeper.registered_users[user_id] = UserInfo(available_slots=1, subscription_expiry=0)
# Properly formatted appointment
# JSON data with no inner dict (invalid data foramat)
r = client.post(add_appointment_endpoint, json="random_message")
assert r.status_code == HTTP_BAD_REQUEST
assert "Invalid request content" in r.json.get("error")
def test_add_appointment_wrong(api, client, appointment):
# Simulate the user registration
api.gatekeeper.registered_users[compressed_client_pk] = 1
# Simulate the user registration (end time does not matter here)
api.watcher.gatekeeper.registered_users[user_id] = UserInfo(available_slots=1, subscription_expiry=0)
# Incorrect appointment
# Incorrect appointment (properly formatted, wrong data)
appointment.to_self_delay = 0
appointment_signature = Cryptographer.sign(appointment.serialize(), client_sk)
r = add_appointment(
client, {"appointment": appointment.to_dict(), "signature": appointment_signature}, compressed_client_pk
)
appointment_signature = Cryptographer.sign(appointment.serialize(), user_sk)
r = add_appointment(client, {"appointment": appointment.to_dict(), "signature": appointment_signature}, user_id)
assert r.status_code == HTTP_BAD_REQUEST
assert "Error {}:".format(errors.APPOINTMENT_FIELD_TOO_SMALL) in r.json.get("error")
def test_add_appointment_not_registered(api, client, appointment):
# Properly formatted appointment
# Properly formatted appointment, user is not registered
tmp_sk, tmp_pk = generate_keypair()
tmp_compressed_pk = hexlify(tmp_pk.format(compressed=True)).decode("utf-8")
@@ -199,45 +203,39 @@ def test_add_appointment_not_registered(api, client, appointment):
def test_add_appointment_registered_no_free_slots(api, client, appointment):
# Empty the user slots
api.gatekeeper.registered_users[compressed_client_pk] = {"available_slots": 0}
# Empty the user slots (end time does not matter here)
api.watcher.gatekeeper.registered_users[user_id] = UserInfo(available_slots=0, subscription_expiry=0)
# Properly formatted appointment
appointment_signature = Cryptographer.sign(appointment.serialize(), client_sk)
r = add_appointment(
client, {"appointment": appointment.to_dict(), "signature": appointment_signature}, compressed_client_pk
)
# Properly formatted appointment, user has no available slots
appointment_signature = Cryptographer.sign(appointment.serialize(), user_sk)
r = add_appointment(client, {"appointment": appointment.to_dict(), "signature": appointment_signature}, user_id)
assert r.status_code == HTTP_BAD_REQUEST
assert "Error {}:".format(errors.APPOINTMENT_INVALID_SIGNATURE_OR_INSUFFICIENT_SLOTS) in r.json.get("error")
def test_add_appointment_registered_not_enough_free_slots(api, client, appointment):
# Give some slots to the user
api.gatekeeper.registered_users[compressed_client_pk] = 1
# Give some slots to the user (end time does not matter here)
api.watcher.gatekeeper.registered_users[user_id] = UserInfo(available_slots=1, subscription_expiry=0)
# Properly formatted appointment
appointment_signature = Cryptographer.sign(appointment.serialize(), client_sk)
# Properly formatted appointment, user has not enough slots
appointment_signature = Cryptographer.sign(appointment.serialize(), user_sk)
# Let's create a big blob
appointment.encrypted_blob = TWO_SLOTS_BLOTS
r = add_appointment(
client, {"appointment": appointment.to_dict(), "signature": appointment_signature}, compressed_client_pk
)
r = add_appointment(client, {"appointment": appointment.to_dict(), "signature": appointment_signature}, user_id)
assert r.status_code == HTTP_BAD_REQUEST
assert "Error {}:".format(errors.APPOINTMENT_INVALID_SIGNATURE_OR_INSUFFICIENT_SLOTS) in r.json.get("error")
def test_add_appointment_multiple_times_same_user(api, client, appointment, n=MULTIPLE_APPOINTMENTS):
# Multiple appointments with the same locator should be valid and counted as updates
appointment_signature = Cryptographer.sign(appointment.serialize(), client_sk)
# Multiple appointments with the same locator should be valid and count as updates
appointment_signature = Cryptographer.sign(appointment.serialize(), user_sk)
# Simulate registering enough slots
api.gatekeeper.registered_users[compressed_client_pk] = {"available_slots": n}
# Simulate registering enough slots (end time does not matter here)
api.watcher.gatekeeper.registered_users[user_id] = UserInfo(available_slots=n, subscription_expiry=0)
for _ in range(n):
r = add_appointment(
client, {"appointment": appointment.to_dict(), "signature": appointment_signature}, compressed_client_pk
)
r = add_appointment(client, {"appointment": appointment.to_dict(), "signature": appointment_signature}, user_id)
assert r.status_code == HTTP_OK
assert r.json.get("available_slots") == n - 1
@@ -246,6 +244,7 @@ def test_add_appointment_multiple_times_same_user(api, client, appointment, n=MU
def test_add_appointment_multiple_times_different_users(api, client, appointment, n=MULTIPLE_APPOINTMENTS):
# If the same appointment comes from different users, all are kept
# Create user keys and appointment signatures
user_keys = [generate_keypair() for _ in range(n)]
signatures = [Cryptographer.sign(appointment.serialize(), key[0]) for key in user_keys]
@@ -254,7 +253,7 @@ def test_add_appointment_multiple_times_different_users(api, client, appointment
# Add one slot per public key
for pair in user_keys:
tmp_compressed_pk = hexlify(pair[1].format(compressed=True)).decode("utf-8")
api.gatekeeper.registered_users[tmp_compressed_pk] = {"available_slots": 2}
api.watcher.gatekeeper.registered_users[tmp_compressed_pk] = UserInfo(available_slots=2, subscription_expiry=0)
# Send the appointments
for compressed_pk, signature in zip(compressed_pks, signatures):
@@ -268,77 +267,61 @@ def test_add_appointment_multiple_times_different_users(api, client, appointment
def test_add_appointment_update_same_size(api, client, appointment):
# Update an appointment by one of the same size and check that no additional slots are filled
api.gatekeeper.registered_users[compressed_client_pk] = {"available_slots": 1}
api.watcher.gatekeeper.registered_users[user_id] = UserInfo(available_slots=1, subscription_expiry=0)
appointment_signature = Cryptographer.sign(appointment.serialize(), client_sk)
# # Since we will replace the appointment, we won't added to appointments
r = add_appointment(
client, {"appointment": appointment.to_dict(), "signature": appointment_signature}, compressed_client_pk
)
appointment_signature = Cryptographer.sign(appointment.serialize(), user_sk)
r = add_appointment(client, {"appointment": appointment.to_dict(), "signature": appointment_signature}, user_id)
assert r.status_code == HTTP_OK and r.json.get("available_slots") == 0
# The user has no additional slots, but it should be able to update
# Let's just reverse the encrypted blob for example
appointment.encrypted_blob = appointment.encrypted_blob[::-1]
appointment_signature = Cryptographer.sign(appointment.serialize(), client_sk)
r = add_appointment(
client, {"appointment": appointment.to_dict(), "signature": appointment_signature}, compressed_client_pk
)
appointment_signature = Cryptographer.sign(appointment.serialize(), user_sk)
r = add_appointment(client, {"appointment": appointment.to_dict(), "signature": appointment_signature}, user_id)
assert r.status_code == HTTP_OK and r.json.get("available_slots") == 0
def test_add_appointment_update_bigger(api, client, appointment):
# Update an appointment by one bigger, and check additional slots are filled
api.gatekeeper.registered_users[compressed_client_pk] = {"available_slots": 2}
api.watcher.gatekeeper.registered_users[user_id] = UserInfo(available_slots=2, subscription_expiry=0)
appointment_signature = Cryptographer.sign(appointment.serialize(), client_sk)
r = add_appointment(
client, {"appointment": appointment.to_dict(), "signature": appointment_signature}, compressed_client_pk
)
appointment_signature = Cryptographer.sign(appointment.serialize(), user_sk)
r = add_appointment(client, {"appointment": appointment.to_dict(), "signature": appointment_signature}, user_id)
assert r.status_code == HTTP_OK and r.json.get("available_slots") == 1
# The user has one slot, so it should be able to update as long as it only takes 1 additional slot
appointment.encrypted_blob = TWO_SLOTS_BLOTS
appointment_signature = Cryptographer.sign(appointment.serialize(), client_sk)
r = add_appointment(
client, {"appointment": appointment.to_dict(), "signature": appointment_signature}, compressed_client_pk
)
appointment_signature = Cryptographer.sign(appointment.serialize(), user_sk)
r = add_appointment(client, {"appointment": appointment.to_dict(), "signature": appointment_signature}, user_id)
assert r.status_code == HTTP_OK and r.json.get("available_slots") == 0
# Check that it'll fail if no enough slots are available
# Double the size from before
appointment.encrypted_blob = TWO_SLOTS_BLOTS + TWO_SLOTS_BLOTS
appointment_signature = Cryptographer.sign(appointment.serialize(), client_sk)
r = add_appointment(
client, {"appointment": appointment.to_dict(), "signature": appointment_signature}, compressed_client_pk
)
appointment_signature = Cryptographer.sign(appointment.serialize(), user_sk)
r = add_appointment(client, {"appointment": appointment.to_dict(), "signature": appointment_signature}, user_id)
assert r.status_code == HTTP_BAD_REQUEST
def test_add_appointment_update_smaller(api, client, appointment):
# Update an appointment by one bigger, and check slots are freed
api.gatekeeper.registered_users[compressed_client_pk] = {"available_slots": 2}
api.watcher.gatekeeper.registered_users[user_id] = UserInfo(available_slots=2, subscription_expiry=0)
# This should take 2 slots
appointment.encrypted_blob = TWO_SLOTS_BLOTS
appointment_signature = Cryptographer.sign(appointment.serialize(), client_sk)
r = add_appointment(
client, {"appointment": appointment.to_dict(), "signature": appointment_signature}, compressed_client_pk
)
appointment_signature = Cryptographer.sign(appointment.serialize(), user_sk)
r = add_appointment(client, {"appointment": appointment.to_dict(), "signature": appointment_signature}, user_id)
assert r.status_code == HTTP_OK and r.json.get("available_slots") == 0
# Let's update with one just small enough
appointment.encrypted_blob = "A" * (ENCRYPTED_BLOB_MAX_SIZE_HEX - 2)
appointment_signature = Cryptographer.sign(appointment.serialize(), client_sk)
r = add_appointment(
client, {"appointment": appointment.to_dict(), "signature": appointment_signature}, compressed_client_pk
)
appointment_signature = Cryptographer.sign(appointment.serialize(), user_sk)
r = add_appointment(client, {"appointment": appointment.to_dict(), "signature": appointment_signature}, user_id)
assert r.status_code == HTTP_OK and r.json.get("available_slots") == 1
def test_add_too_many_appointment(api, client):
# Give slots to the user
api.gatekeeper.registered_users[compressed_client_pk] = {"available_slots": 200}
api.watcher.gatekeeper.registered_users[user_id] = UserInfo(available_slots=200, subscription_expiry=0)
free_appointment_slots = MAX_APPOINTMENTS - len(api.watcher.appointments)
@@ -346,10 +329,8 @@ def test_add_too_many_appointment(api, client):
appointment, dispute_tx = generate_dummy_appointment()
locator_dispute_tx_map[appointment.locator] = dispute_tx
appointment_signature = Cryptographer.sign(appointment.serialize(), client_sk)
r = add_appointment(
client, {"appointment": appointment.to_dict(), "signature": appointment_signature}, compressed_client_pk
)
appointment_signature = Cryptographer.sign(appointment.serialize(), user_sk)
r = add_appointment(client, {"appointment": appointment.to_dict(), "signature": appointment_signature}, user_id)
if i < free_appointment_slots:
assert r.status_code == HTTP_OK
@@ -360,14 +341,16 @@ def test_add_too_many_appointment(api, client):
def test_get_appointment_no_json(api, client, appointment):
r = client.post(add_appointment_endpoint, data="random_message")
assert r.status_code == HTTP_BAD_REQUEST
assert "Request is not json encoded" in r.json.get("error")
def test_get_appointment_json_no_inner_dict(api, client, appointment):
r = client.post(add_appointment_endpoint, json="random_message")
assert r.status_code == HTTP_BAD_REQUEST
assert "Invalid request content" in r.json.get("error")
def test_request_random_appointment_registered_user(client, user_sk=client_sk):
def test_get_random_appointment_registered_user(client, user_sk=user_sk):
locator = get_random_value_hex(LOCATOR_LEN_BYTES)
message = "get appointment {}".format(locator)
signature = Cryptographer.sign(message.encode("utf-8"), user_sk)
@@ -381,60 +364,62 @@ def test_request_random_appointment_registered_user(client, user_sk=client_sk):
assert received_appointment.get("status") == "not_found"
def test_request_appointment_not_registered_user(client):
def test_get_appointment_not_registered_user(client):
# Not registered users have no associated appointments, so this should fail
tmp_sk, tmp_pk = generate_keypair()
# The tower is designed so a not found appointment and a request from a non-registered user return the same error to
# prevent probing.
test_request_random_appointment_registered_user(client, tmp_sk)
test_get_random_appointment_registered_user(client, tmp_sk)
def test_request_appointment_in_watcher(api, client, appointment):
def test_get_appointment_in_watcher(api, client, appointment):
# Mock the appointment in the Watcher
uuid = hash_160("{}{}".format(appointment.locator, compressed_client_pk))
uuid = hash_160("{}{}".format(appointment.locator, user_id))
api.watcher.db_manager.store_watcher_appointment(uuid, appointment.to_dict())
# Next we can request it
message = "get appointment {}".format(appointment.locator)
signature = Cryptographer.sign(message.encode("utf-8"), client_sk)
signature = Cryptographer.sign(message.encode("utf-8"), user_sk)
data = {"locator": appointment.locator, "signature": signature}
r = client.post(get_appointment_endpoint, json=data)
assert r.status_code == HTTP_OK
# Check that the appointment is on the watcher
# Check that the appointment is on the Watcher
assert r.json.get("status") == "being_watched"
# Check the the sent appointment matches the received one
appointment_dict = appointment.to_dict()
appointment_dict.pop("user_id")
assert r.json.get("locator") == appointment.locator
assert appointment.to_dict() == r.json.get("appointment")
def test_request_appointment_in_responder(api, client, appointment):
def test_get_appointment_in_responder(api, client, appointment):
# Mock the appointment in the Responder
tracker_data = {
"locator": appointment.locator,
"dispute_txid": get_random_value_hex(32),
"penalty_txid": get_random_value_hex(32),
"penalty_rawtx": get_random_value_hex(250),
"appointment_end": appointment.end_time,
"user_id": get_random_value_hex(16),
}
tx_tracker = TransactionTracker.from_dict(tracker_data)
uuid = hash_160("{}{}".format(appointment.locator, compressed_client_pk))
uuid = hash_160("{}{}".format(appointment.locator, user_id))
api.watcher.db_manager.create_triggered_appointment_flag(uuid)
api.watcher.responder.db_manager.store_responder_tracker(uuid, tx_tracker.to_dict())
# Request back the data
message = "get appointment {}".format(appointment.locator)
signature = Cryptographer.sign(message.encode("utf-8"), client_sk)
signature = Cryptographer.sign(message.encode("utf-8"), user_sk)
data = {"locator": appointment.locator, "signature": signature}
# Next we can request it
r = client.post(get_appointment_endpoint, json=data)
assert r.status_code == HTTP_OK
# Check that the appointment is on the watcher
# Check that the appointment is on the Responder
assert r.json.get("status") == "dispute_responded"
# Check the the sent appointment matches the received one
@@ -442,10 +427,9 @@ def test_request_appointment_in_responder(api, client, appointment):
assert tx_tracker.dispute_txid == r.json.get("appointment").get("dispute_txid")
assert tx_tracker.penalty_txid == r.json.get("appointment").get("penalty_txid")
assert tx_tracker.penalty_rawtx == r.json.get("appointment").get("penalty_rawtx")
assert tx_tracker.appointment_end == r.json.get("appointment").get("appointment_end")
def test_get_all_appointments_watcher(api, client, get_all_db_manager, appointment):
def test_get_all_appointments_watcher(api, client, get_all_db_manager):
# Let's reset the dbs so we can test this clean
api.watcher.db_manager = get_all_db_manager
api.watcher.responder.db_manager = get_all_db_manager
@@ -459,6 +443,7 @@ def test_get_all_appointments_watcher(api, client, get_all_db_manager, appointme
non_triggered_appointments = {}
for _ in range(10):
uuid = get_random_value_hex(16)
appointment, _ = generate_dummy_appointment()
appointment.locator = get_random_value_hex(16)
non_triggered_appointments[uuid] = appointment.to_dict()
api.watcher.db_manager.store_watcher_appointment(uuid, appointment.to_dict())
@@ -466,12 +451,13 @@ def test_get_all_appointments_watcher(api, client, get_all_db_manager, appointme
triggered_appointments = {}
for _ in range(10):
uuid = get_random_value_hex(16)
appointment, _ = generate_dummy_appointment()
appointment.locator = get_random_value_hex(16)
triggered_appointments[uuid] = appointment.to_dict()
api.watcher.db_manager.store_watcher_appointment(uuid, appointment.to_dict())
api.watcher.db_manager.create_triggered_appointment_flag(uuid)
# We should only get check the non-triggered appointments
# We should only get the non-triggered appointments
r = client.get(get_all_appointment_endpoint)
assert r.status_code == HTTP_OK
@@ -501,7 +487,7 @@ def test_get_all_appointments_responder(api, client, get_all_db_manager):
"dispute_txid": get_random_value_hex(32),
"penalty_txid": get_random_value_hex(32),
"penalty_rawtx": get_random_value_hex(250),
"appointment_end": 20,
"user_id": get_random_value_hex(16),
}
tracker = TransactionTracker.from_dict(tracker_data)
tx_trackers[uuid] = tracker.to_dict()

View File

@@ -19,7 +19,7 @@ from test.teos.unit.conftest import get_random_value_hex, generate_dummy_appoint
@pytest.fixture(scope="module")
def watcher_appointments():
return {uuid4().hex: generate_dummy_appointment(real_height=False)[0] for _ in range(10)}
return {uuid4().hex: generate_dummy_appointment()[0] for _ in range(10)}
@pytest.fixture(scope="module")
@@ -215,7 +215,7 @@ def test_store_load_triggered_appointment(db_manager):
assert db_watcher_appointments == db_watcher_appointments_with_triggered
# Create an appointment flagged as triggered
triggered_appointment, _ = generate_dummy_appointment(real_height=False)
triggered_appointment, _ = generate_dummy_appointment()
uuid = uuid4().hex
assert db_manager.store_watcher_appointment(uuid, triggered_appointment.to_dict()) is True
db_manager.create_triggered_appointment_flag(uuid)

View File

@@ -4,6 +4,7 @@ from queue import Queue
from teos.builder import Builder
from teos.watcher import Watcher
from teos.tools import bitcoin_cli
from teos.responder import Responder
from test.teos.unit.conftest import (
@@ -11,7 +12,6 @@ from test.teos.unit.conftest import (
generate_dummy_appointment,
generate_dummy_tracker,
generate_block,
bitcoin_cli,
get_config,
bitcoind_connect_params,
generate_keypair,
@@ -25,7 +25,7 @@ def test_build_appointments():
# Create some appointment data
for i in range(10):
appointment, _ = generate_dummy_appointment(real_height=False)
appointment, _ = generate_dummy_appointment()
uuid = uuid4().hex
appointments_data[uuid] = appointment.to_dict()
@@ -33,7 +33,7 @@ def test_build_appointments():
# Add some additional appointments that share the same locator to test all the builder's cases
if i % 2 == 0:
locator = appointment.locator
appointment, _ = generate_dummy_appointment(real_height=False)
appointment, _ = generate_dummy_appointment()
uuid = uuid4().hex
appointment.locator = locator
@@ -46,8 +46,7 @@ def test_build_appointments():
for uuid, appointment in appointments.items():
assert uuid in appointments_data.keys()
assert appointments_data[uuid].get("locator") == appointment.get("locator")
assert appointments_data[uuid].get("end_time") == appointment.get("end_time")
assert len(appointments_data[uuid].get("encrypted_blob")) == appointment.get("size")
assert appointments_data[uuid].get("user_id") == appointment.get("user_id")
assert uuid in locator_uuid_map[appointment.get("locator")]
@@ -76,7 +75,7 @@ def test_build_trackers():
assert tracker.get("penalty_txid") == trackers_data[uuid].get("penalty_txid")
assert tracker.get("locator") == trackers_data[uuid].get("locator")
assert tracker.get("appointment_end") == trackers_data[uuid].get("appointment_end")
assert tracker.get("user_id") == trackers_data[uuid].get("user_id")
assert uuid in tx_tracker_map[tracker.get("penalty_txid")]
@@ -95,14 +94,14 @@ def test_populate_block_queue():
assert len(blocks) == 0
def test_update_states_empty_list(db_manager, carrier, block_processor):
def test_update_states_empty_list(db_manager, gatekeeper, carrier, block_processor):
w = Watcher(
db_manager=db_manager,
gatekeeper=gatekeeper,
block_processor=block_processor,
responder=Responder(db_manager, carrier, block_processor),
responder=Responder(db_manager, gatekeeper, carrier, block_processor),
sk_der=generate_keypair()[0].to_der(),
max_appointments=config.get("MAX_APPOINTMENTS"),
expiry_delta=config.get("EXPIRY_DELTA"),
)
missed_blocks_watcher = []
@@ -116,14 +115,14 @@ def test_update_states_empty_list(db_manager, carrier, block_processor):
Builder.update_states(w, missed_blocks_responder, missed_blocks_watcher)
def test_update_states_responder_misses_more(run_bitcoind, db_manager, carrier, block_processor):
def test_update_states_responder_misses_more(run_bitcoind, db_manager, gatekeeper, carrier, block_processor):
w = Watcher(
db_manager=db_manager,
gatekeeper=gatekeeper,
block_processor=block_processor,
responder=Responder(db_manager, carrier, block_processor),
responder=Responder(db_manager, gatekeeper, carrier, block_processor),
sk_der=generate_keypair()[0].to_der(),
max_appointments=config.get("MAX_APPOINTMENTS"),
expiry_delta=config.get("EXPIRY_DELTA"),
)
blocks = []
@@ -140,15 +139,15 @@ def test_update_states_responder_misses_more(run_bitcoind, db_manager, carrier,
assert w.responder.last_known_block == blocks[-1]
def test_update_states_watcher_misses_more(db_manager, carrier, block_processor):
def test_update_states_watcher_misses_more(db_manager, gatekeeper, carrier, block_processor):
# Same as before, but data is now in the Responder
w = Watcher(
db_manager=db_manager,
gatekeeper=gatekeeper,
block_processor=block_processor,
responder=Responder(db_manager, carrier, block_processor),
responder=Responder(db_manager, gatekeeper, carrier, block_processor),
sk_der=generate_keypair()[0].to_der(),
max_appointments=config.get("MAX_APPOINTMENTS"),
expiry_delta=config.get("EXPIRY_DELTA"),
)
blocks = []

View File

@@ -1,8 +1,9 @@
import random
from uuid import uuid4
from teos.responder import TransactionTracker
from teos.cleaner import Cleaner
from teos.gatekeeper import UserInfo
from teos.responder import TransactionTracker
from common.appointment import Appointment
from test.teos.unit.conftest import get_random_value_hex
@@ -23,7 +24,7 @@ def set_up_appointments(db_manager, total_appointments):
uuid = uuid4().hex
locator = get_random_value_hex(LOCATOR_LEN_BYTES)
appointment = Appointment(locator, None, None, None, None)
appointment = Appointment(locator, None, None)
appointments[uuid] = {"locator": appointment.locator}
locator_uuid_map[locator] = [uuid]
@@ -156,7 +157,8 @@ def test_flag_triggered_appointments(db_manager):
assert set(triggered_appointments).issubset(db_appointments)
def test_delete_completed_trackers_db_match(db_manager):
def test_delete_trackers_db_match(db_manager):
# Completed and expired trackers are deleted using the same method. The only difference is the logging message
height = 0
for _ in range(ITERATIONS):
@@ -165,12 +167,12 @@ def test_delete_completed_trackers_db_match(db_manager):
completed_trackers = {tracker: 6 for tracker in selected_trackers}
Cleaner.delete_completed_trackers(completed_trackers, height, trackers, tx_tracker_map, db_manager)
Cleaner.delete_trackers(completed_trackers, height, trackers, tx_tracker_map, db_manager)
assert not set(completed_trackers).issubset(trackers.keys())
def test_delete_completed_trackers_no_db_match(db_manager):
def test_delete_trackers_no_db_match(db_manager):
height = 0
for _ in range(ITERATIONS):
@@ -203,5 +205,38 @@ def test_delete_completed_trackers_no_db_match(db_manager):
completed_trackers = {tracker: 6 for tracker in selected_trackers}
# We should be able to delete the correct ones and not fail in the others
Cleaner.delete_completed_trackers(completed_trackers, height, trackers, tx_tracker_map, db_manager)
Cleaner.delete_trackers(completed_trackers, height, trackers, tx_tracker_map, db_manager)
assert not set(completed_trackers).issubset(trackers.keys())
def test_delete_gatekeeper_appointments(gatekeeper):
# delete_gatekeeper_appointments should delete the appointments from user as long as both exist
appointments_not_to_delete = {}
appointments_to_delete = {}
# Let's add some users and appointments to the Gatekeeper
for _ in range(10):
user_id = get_random_value_hex(16)
# The UserInfo params do not matter much here
gatekeeper.registered_users[user_id] = UserInfo(available_slots=100, subscription_expiry=0)
for _ in range(random.randint(0, 10)):
# Add some appointments
uuid = get_random_value_hex(16)
gatekeeper.registered_users[user_id].appointments[uuid] = 1
if random.randint(0, 1) % 2:
appointments_to_delete[uuid] = user_id
else:
appointments_not_to_delete[uuid] = user_id
# Now let's delete half of them
Cleaner.delete_gatekeeper_appointments(gatekeeper, appointments_to_delete)
all_appointments_gatekeeper = []
# Let's get all the appointments in the Gatekeeper
for user_id, user in gatekeeper.registered_users.items():
all_appointments_gatekeeper.extend(user.appointments)
# Check that the first half of the appointments are not in the Gatekeeper, but the second half is
assert not set(appointments_to_delete).issubset(all_appointments_gatekeeper)
assert set(appointments_not_to_delete).issubset(all_appointments_gatekeeper)

View File

@@ -0,0 +1,63 @@
import pytest
from pytest import fixture
from common.constants import LOCATOR_LEN_BYTES
from teos.extended_appointment import ExtendedAppointment
from test.common.unit.conftest import get_random_value_hex
# Parent methods are not tested.
@fixture
def appointment_data():
locator = get_random_value_hex(LOCATOR_LEN_BYTES)
to_self_delay = 20
user_id = get_random_value_hex(16)
encrypted_blob_data = get_random_value_hex(100)
return {
"locator": locator,
"to_self_delay": to_self_delay,
"encrypted_blob": encrypted_blob_data,
"user_id": user_id,
}
def test_init_appointment(appointment_data):
# The appointment has no checks whatsoever, since the inspector is the one taking care or that, and the only one
# creating appointments.
appointment = ExtendedAppointment(
appointment_data["locator"],
appointment_data["to_self_delay"],
appointment_data["encrypted_blob"],
appointment_data["user_id"],
)
assert (
appointment_data["locator"] == appointment.locator
and appointment_data["to_self_delay"] == appointment.to_self_delay
and appointment_data["encrypted_blob"] == appointment.encrypted_blob
and appointment_data["user_id"] == appointment.user_id
)
def test_get_summary(appointment_data):
assert ExtendedAppointment.from_dict(appointment_data).get_summary() == {
"locator": appointment_data["locator"],
"user_id": appointment_data["user_id"],
}
def test_from_dict(appointment_data):
# The appointment should be build if we don't miss any field
appointment = ExtendedAppointment.from_dict(appointment_data)
assert isinstance(appointment, ExtendedAppointment)
# Otherwise it should fail
for key in appointment_data.keys():
prev_val = appointment_data[key]
appointment_data[key] = None
with pytest.raises(ValueError, match="Wrong appointment data"):
ExtendedAppointment.from_dict(appointment_data)
appointment_data[key] = prev_val

View File

@@ -1,57 +1,76 @@
import pytest
from teos.gatekeeper import IdentificationFailure, NotEnoughSlots
from teos.users_dbm import UsersDBM
from teos.block_processor import BlockProcessor
from teos.gatekeeper import AuthenticationFailure, NotEnoughSlots, UserInfo
from common.cryptographer import Cryptographer
from common.exceptions import InvalidParameter
from common.constants import ENCRYPTED_BLOB_MAX_SIZE_HEX
from test.teos.unit.conftest import get_random_value_hex, generate_keypair, get_config
from test.teos.unit.conftest import get_random_value_hex, generate_keypair, get_config, generate_dummy_appointment
config = get_config()
def test_init(gatekeeper):
def test_init(gatekeeper, run_bitcoind):
assert isinstance(gatekeeper.default_slots, int) and gatekeeper.default_slots == config.get("DEFAULT_SLOTS")
assert isinstance(
gatekeeper.default_subscription_duration, int
) and gatekeeper.default_subscription_duration == config.get("DEFAULT_SUBSCRIPTION_DURATION")
assert isinstance(gatekeeper.expiry_delta, int) and gatekeeper.expiry_delta == config.get("EXPIRY_DELTA")
assert isinstance(gatekeeper.block_processor, BlockProcessor)
assert isinstance(gatekeeper.user_db, UsersDBM)
assert isinstance(gatekeeper.registered_users, dict) and len(gatekeeper.registered_users) == 0
def test_add_update_user(gatekeeper):
# add_update_user adds DEFAULT_SLOTS to a given user as long as the identifier is {02, 03}| 32-byte hex str
user_pk = "02" + get_random_value_hex(32)
# it also add DEFAULT_SUBSCRIPTION_DURATION + current_block_height to the user
user_id = "02" + get_random_value_hex(32)
for _ in range(10):
current_slots = gatekeeper.registered_users.get(user_pk)
current_slots = current_slots.get("available_slots") if current_slots is not None else 0
user = gatekeeper.registered_users.get(user_id)
current_slots = user.available_slots if user is not None else 0
gatekeeper.add_update_user(user_pk)
gatekeeper.add_update_user(user_id)
assert gatekeeper.registered_users.get(user_pk).get("available_slots") == current_slots + config.get(
"DEFAULT_SLOTS"
assert gatekeeper.registered_users.get(user_id).available_slots == current_slots + config.get("DEFAULT_SLOTS")
assert gatekeeper.registered_users[
user_id
].subscription_expiry == gatekeeper.block_processor.get_block_count() + config.get(
"DEFAULT_SUBSCRIPTION_DURATION"
)
# The same can be checked for multiple users
for _ in range(10):
# The user identifier is changed every call
user_pk = "03" + get_random_value_hex(32)
user_id = "03" + get_random_value_hex(32)
gatekeeper.add_update_user(user_pk)
assert gatekeeper.registered_users.get(user_pk).get("available_slots") == config.get("DEFAULT_SLOTS")
gatekeeper.add_update_user(user_id)
assert gatekeeper.registered_users.get(user_id).available_slots == config.get("DEFAULT_SLOTS")
assert gatekeeper.registered_users[
user_id
].subscription_expiry == gatekeeper.block_processor.get_block_count() + config.get(
"DEFAULT_SUBSCRIPTION_DURATION"
)
def test_add_update_user_wrong_pk(gatekeeper):
def test_add_update_user_wrong_id(gatekeeper):
# Passing a wrong pk defaults to the errors in check_user_pk. We can try with one.
wrong_pk = get_random_value_hex(32)
wrong_id = get_random_value_hex(32)
with pytest.raises(ValueError):
gatekeeper.add_update_user(wrong_pk)
with pytest.raises(InvalidParameter):
gatekeeper.add_update_user(wrong_id)
def test_add_update_user_wrong_pk_prefix(gatekeeper):
def test_add_update_user_wrong_id_prefix(gatekeeper):
# Prefixes must be 02 or 03, anything else should fail
wrong_pk = "04" + get_random_value_hex(32)
wrong_id = "04" + get_random_value_hex(32)
with pytest.raises(ValueError):
gatekeeper.add_update_user(wrong_pk)
with pytest.raises(InvalidParameter):
gatekeeper.add_update_user(wrong_id)
def test_identify_user(gatekeeper):
@@ -60,13 +79,13 @@ def test_identify_user(gatekeeper):
# Let's first register a user
sk, pk = generate_keypair()
compressed_pk = Cryptographer.get_compressed_pk(pk)
gatekeeper.add_update_user(compressed_pk)
user_id = Cryptographer.get_compressed_pk(pk)
gatekeeper.add_update_user(user_id)
message = "Hey, it's me"
signature = Cryptographer.sign(message.encode(), sk)
assert gatekeeper.identify_user(message.encode(), signature) == compressed_pk
assert gatekeeper.authenticate_user(message.encode(), signature) == user_id
def test_identify_user_non_registered(gatekeeper):
@@ -76,8 +95,8 @@ def test_identify_user_non_registered(gatekeeper):
message = "Hey, it's me"
signature = Cryptographer.sign(message.encode(), sk)
with pytest.raises(IdentificationFailure):
gatekeeper.identify_user(message.encode(), signature)
with pytest.raises(AuthenticationFailure):
gatekeeper.authenticate_user(message.encode(), signature)
def test_identify_user_invalid_signature(gatekeeper):
@@ -85,8 +104,8 @@ def test_identify_user_invalid_signature(gatekeeper):
message = "Hey, it's me"
signature = get_random_value_hex(72)
with pytest.raises(IdentificationFailure):
gatekeeper.identify_user(message.encode(), signature)
with pytest.raises(AuthenticationFailure):
gatekeeper.authenticate_user(message.encode(), signature)
def test_identify_user_wrong(gatekeeper):
@@ -97,41 +116,74 @@ def test_identify_user_wrong(gatekeeper):
signature = Cryptographer.sign(message.encode(), sk)
# Non-byte message and str sig
with pytest.raises(IdentificationFailure):
gatekeeper.identify_user(message, signature)
with pytest.raises(AuthenticationFailure):
gatekeeper.authenticate_user(message, signature)
# byte message and non-str sig
with pytest.raises(IdentificationFailure):
gatekeeper.identify_user(message.encode(), signature.encode())
with pytest.raises(AuthenticationFailure):
gatekeeper.authenticate_user(message.encode(), signature.encode())
# non-byte message and non-str sig
with pytest.raises(IdentificationFailure):
gatekeeper.identify_user(message, signature.encode())
with pytest.raises(AuthenticationFailure):
gatekeeper.authenticate_user(message, signature.encode())
def test_fill_slots(gatekeeper):
# Free slots will decrease the slot count of a user as long as he has enough slots, otherwise raise NotEnoughSlots
user_pk = "02" + get_random_value_hex(32)
gatekeeper.add_update_user(user_pk)
def test_add_update_appointment(gatekeeper):
# add_update_appointment should decrease the slot count if a new appointment is added
# let's add a new user
sk, pk = generate_keypair()
user_id = Cryptographer.get_compressed_pk(pk)
gatekeeper.add_update_user(user_id)
gatekeeper.fill_slots(user_pk, config.get("DEFAULT_SLOTS") - 1)
assert gatekeeper.registered_users.get(user_pk).get("available_slots") == 1
# And now update add a new appointment
appointment, _ = generate_dummy_appointment()
appointment_uuid = get_random_value_hex(16)
remaining_slots = gatekeeper.add_update_appointment(user_id, appointment_uuid, appointment)
# This is a standard size appointment, so it should have reduced the slots by one
assert appointment_uuid in gatekeeper.registered_users[user_id].appointments
assert remaining_slots == config.get("DEFAULT_SLOTS") - 1
# Updates can leave the count as is, decrease it, or increase it, depending on the appointment size (modulo
# ENCRYPTED_BLOB_MAX_SIZE_HEX)
# Appointments of the same size leave it as is
appointment_same_size, _ = generate_dummy_appointment()
remaining_slots = gatekeeper.add_update_appointment(user_id, appointment_uuid, appointment)
assert appointment_uuid in gatekeeper.registered_users[user_id].appointments
assert remaining_slots == config.get("DEFAULT_SLOTS") - 1
# Bigger appointments decrease it
appointment_x2_size = appointment_same_size
appointment_x2_size.encrypted_blob = "A" * (ENCRYPTED_BLOB_MAX_SIZE_HEX + 1)
remaining_slots = gatekeeper.add_update_appointment(user_id, appointment_uuid, appointment_x2_size)
assert appointment_uuid in gatekeeper.registered_users[user_id].appointments
assert remaining_slots == config.get("DEFAULT_SLOTS") - 2
# Smaller appointments increase it
remaining_slots = gatekeeper.add_update_appointment(user_id, appointment_uuid, appointment)
assert remaining_slots == config.get("DEFAULT_SLOTS") - 1
# If the appointment needs more slots than there's free, it should fail
gatekeeper.registered_users[user_id].available_slots = 1
appointment_uuid = get_random_value_hex(16)
with pytest.raises(NotEnoughSlots):
gatekeeper.fill_slots(user_pk, 2)
# NotEnoughSlots is also raised if the user does not exist
with pytest.raises(NotEnoughSlots):
gatekeeper.fill_slots(get_random_value_hex(33), 2)
gatekeeper.add_update_appointment(user_id, appointment_uuid, appointment_x2_size)
def test_free_slots(gatekeeper):
# Free slots simply adds slots to the user as long as it exists.
user_pk = "03" + get_random_value_hex(32)
gatekeeper.add_update_user(user_pk)
gatekeeper.free_slots(user_pk, 42)
def test_get_expired_appointments(gatekeeper):
# get_expired_appointments returns a list of appointment uuids expiring at a given block
assert gatekeeper.registered_users.get(user_pk).get("available_slots") == config.get("DEFAULT_SLOTS") + 42
appointment = {}
# Let's simulate adding some users with dummy expiry times
gatekeeper.registered_users = {}
for i in reversed(range(100)):
uuid = get_random_value_hex(16)
user_appointments = [get_random_value_hex(16)]
# Add a single appointment to the user
gatekeeper.registered_users[uuid] = UserInfo(100, i, user_appointments)
appointment[i] = user_appointments
# Just making sure it does not crash for non-registered user
assert gatekeeper.free_slots(get_random_value_hex(33), 10) is None
# Now let's check that reversed
for i in range(100):
assert gatekeeper.get_expired_appointments(i + gatekeeper.expiry_delta) == appointment[i]

View File

@@ -4,8 +4,8 @@ from binascii import unhexlify
import teos.errors as errors
from teos.block_processor import BlockProcessor
from teos.inspector import Inspector, InspectionFailed
from teos.extended_appointment import ExtendedAppointment
from common.appointment import Appointment
from common.constants import LOCATOR_LEN_BYTES, LOCATOR_LEN_HEX
from test.teos.unit.conftest import get_random_value_hex, bitcoind_connect_params, get_config
@@ -95,101 +95,6 @@ def test_check_locator():
raise e
def test_check_start_time():
# Time is defined in block height
current_time = 100
# Right format and right value (start time in the future)
start_time = 101
assert inspector.check_start_time(start_time, current_time) is None
# Start time too small (either same block or block in the past)
start_times = [100, 99, 98, -1]
for start_time in start_times:
with pytest.raises(InspectionFailed):
try:
inspector.check_start_time(start_time, current_time)
except InspectionFailed as e:
assert e.erno == errors.APPOINTMENT_FIELD_TOO_SMALL
raise e
# Empty field
start_time = None
with pytest.raises(InspectionFailed):
try:
inspector.check_start_time(start_time, current_time)
except InspectionFailed as e:
assert e.erno == errors.APPOINTMENT_EMPTY_FIELD
raise e
# Wrong data type
start_times = WRONG_TYPES
for start_time in start_times:
with pytest.raises(InspectionFailed):
try:
inspector.check_start_time(start_time, current_time)
except InspectionFailed as e:
assert e.erno == errors.APPOINTMENT_WRONG_FIELD_TYPE
raise e
def test_check_end_time():
# Time is defined in block height
current_time = 100
start_time = 120
# Right format and right value (start time before end and end in the future)
end_time = 121
assert inspector.check_end_time(end_time, start_time, current_time) is None
# End time too small (start time after end time)
end_times = [120, 119, 118, -1]
for end_time in end_times:
with pytest.raises(InspectionFailed):
try:
inspector.check_end_time(end_time, start_time, current_time)
except InspectionFailed as e:
assert e.erno == errors.APPOINTMENT_FIELD_TOO_SMALL
raise e
# End time too small (either same height as current block or in the past)
current_time = 130
end_times = [130, 129, 128, -1]
for end_time in end_times:
with pytest.raises(InspectionFailed):
try:
inspector.check_end_time(end_time, start_time, current_time)
except InspectionFailed as e:
assert e.erno == errors.APPOINTMENT_FIELD_TOO_SMALL
raise e
# Empty field
end_time = None
with pytest.raises(InspectionFailed):
try:
inspector.check_end_time(end_time, start_time, current_time)
except InspectionFailed as e:
assert e.erno == errors.APPOINTMENT_EMPTY_FIELD
raise e
# Wrong data type
end_times = WRONG_TYPES
for end_time in end_times:
with pytest.raises(InspectionFailed):
try:
inspector.check_end_time(end_time, start_time, current_time)
except InspectionFailed as e:
assert e.erno == errors.APPOINTMENT_WRONG_FIELD_TYPE
raise e
def test_check_to_self_delay():
# Right value, right format
to_self_delays = [MIN_TO_SELF_DELAY, MIN_TO_SELF_DELAY + 1, MIN_TO_SELF_DELAY + 1000]
@@ -234,10 +139,6 @@ def test_check_blob():
encrypted_blob = get_random_value_hex(120)
assert inspector.check_blob(encrypted_blob) is None
# # Wrong content
# # FIXME: There is not proper defined format for this yet. It should be restricted by size at least, and check it
# # is multiple of the block size defined by the encryption function.
# Wrong type
encrypted_blobs = WRONG_TYPES_NO_STR
for encrypted_blob in encrypted_blobs:
@@ -279,21 +180,13 @@ def test_inspect(run_bitcoind):
to_self_delay = MIN_TO_SELF_DELAY
encrypted_blob = get_random_value_hex(64)
appointment_data = {
"locator": locator,
"start_time": start_time,
"end_time": end_time,
"to_self_delay": to_self_delay,
"encrypted_blob": encrypted_blob,
}
appointment_data = {"locator": locator, "to_self_delay": to_self_delay, "encrypted_blob": encrypted_blob}
appointment = inspector.inspect(appointment_data)
assert (
type(appointment) == Appointment
type(appointment) == ExtendedAppointment
and appointment.locator == locator
and appointment.start_time == start_time
and appointment.end_time == end_time
and appointment.to_self_delay == to_self_delay
and appointment.encrypted_blob == encrypted_blob
)

View File

@@ -9,23 +9,31 @@ from threading import Thread
from teos.carrier import Carrier
from teos.tools import bitcoin_cli
from teos.chain_monitor import ChainMonitor
from teos.block_processor import BlockProcessor
from teos.gatekeeper import Gatekeeper, UserInfo
from teos.appointments_dbm import AppointmentsDBM
from teos.responder import Responder, TransactionTracker
from teos.responder import Responder, TransactionTracker, CONFIRMATIONS_BEFORE_RETRY
from common.constants import LOCATOR_LEN_HEX
from bitcoind_mock.transaction import create_dummy_transaction, create_tx_from_hex
from test.teos.unit.conftest import (
generate_block,
generate_blocks,
generate_block_w_delay,
generate_blocks_w_delay,
get_random_value_hex,
bitcoind_connect_params,
bitcoind_feed_params,
get_config,
)
config = get_config()
@pytest.fixture(scope="module")
def responder(db_manager, carrier, block_processor):
responder = Responder(db_manager, carrier, block_processor)
def responder(db_manager, gatekeeper, carrier, block_processor):
responder = Responder(db_manager, gatekeeper, carrier, block_processor)
chain_monitor = ChainMonitor(Queue(), responder.block_queue, block_processor, bitcoind_feed_params)
chain_monitor.monitor_chain()
@@ -66,31 +74,86 @@ def create_dummy_tracker_data(random_txid=False, penalty_rawtx=None):
if random_txid is True:
penalty_txid = get_random_value_hex(32)
appointment_end = bitcoin_cli(bitcoind_connect_params).getblockcount() + 2
locator = dispute_txid[:LOCATOR_LEN_HEX]
user_id = get_random_value_hex(16)
return locator, dispute_txid, penalty_txid, penalty_rawtx, appointment_end
return locator, dispute_txid, penalty_txid, penalty_rawtx, user_id
def create_dummy_tracker(random_txid=False, penalty_rawtx=None):
locator, dispute_txid, penalty_txid, penalty_rawtx, appointment_end = create_dummy_tracker_data(
random_txid, penalty_rawtx
)
return TransactionTracker(locator, dispute_txid, penalty_txid, penalty_rawtx, appointment_end)
locator, dispute_txid, penalty_txid, penalty_rawtx, user_id = create_dummy_tracker_data(random_txid, penalty_rawtx)
return TransactionTracker(locator, dispute_txid, penalty_txid, penalty_rawtx, user_id)
def test_tracker_init(run_bitcoind):
locator, dispute_txid, penalty_txid, penalty_rawtx, appointment_end = create_dummy_tracker_data()
tracker = TransactionTracker(locator, dispute_txid, penalty_txid, penalty_rawtx, appointment_end)
locator, dispute_txid, penalty_txid, penalty_rawtx, user_id = create_dummy_tracker_data()
tracker = TransactionTracker(locator, dispute_txid, penalty_txid, penalty_rawtx, user_id)
assert (
tracker.dispute_txid == dispute_txid
tracker.locator == locator
and tracker.dispute_txid == dispute_txid
and tracker.penalty_txid == penalty_txid
and tracker.penalty_rawtx == penalty_rawtx
and tracker.appointment_end == appointment_end
and tracker.user_id == user_id
)
def test_tracker_to_dict():
tracker = create_dummy_tracker()
tracker_dict = tracker.to_dict()
assert (
tracker.locator == tracker_dict["locator"]
and tracker.penalty_rawtx == tracker_dict["penalty_rawtx"]
and tracker.user_id == tracker_dict["user_id"]
)
def test_tracker_from_dict():
tracker_dict = create_dummy_tracker().to_dict()
new_tracker = TransactionTracker.from_dict(tracker_dict)
assert tracker_dict == new_tracker.to_dict()
def test_tracker_from_dict_invalid_data():
tracker_dict = create_dummy_tracker().to_dict()
for value in ["dispute_txid", "penalty_txid", "penalty_rawtx", "user_id"]:
tracker_dict_copy = deepcopy(tracker_dict)
tracker_dict_copy[value] = None
try:
TransactionTracker.from_dict(tracker_dict_copy)
assert False
except ValueError:
assert True
def test_tracker_get_summary():
tracker = create_dummy_tracker()
assert tracker.get_summary() == {
"locator": tracker.locator,
"user_id": tracker.user_id,
"penalty_txid": tracker.penalty_txid,
}
def test_init_responder(temp_db_manager, gatekeeper, carrier, block_processor):
responder = Responder(temp_db_manager, gatekeeper, carrier, block_processor)
assert isinstance(responder.trackers, dict) and len(responder.trackers) == 0
assert isinstance(responder.tx_tracker_map, dict) and len(responder.tx_tracker_map) == 0
assert isinstance(responder.unconfirmed_txs, list) and len(responder.unconfirmed_txs) == 0
assert isinstance(responder.missed_confirmations, dict) and len(responder.missed_confirmations) == 0
assert isinstance(responder.block_queue, Queue) and responder.block_queue.empty()
assert isinstance(responder.db_manager, AppointmentsDBM)
assert isinstance(responder.gatekeeper, Gatekeeper)
assert isinstance(responder.carrier, Carrier)
assert isinstance(responder.block_processor, BlockProcessor)
assert responder.last_known_block is None or isinstance(responder.last_known_block, str)
def test_on_sync(run_bitcoind, responder, block_processor):
# We're on sync if we're 1 or less blocks behind the tip
chain_tip = block_processor.get_best_block_hash()
@@ -108,50 +171,8 @@ def test_on_sync_fail(responder, block_processor):
assert responder.on_sync(chain_tip) is False
def test_tracker_to_dict():
tracker = create_dummy_tracker()
tracker_dict = tracker.to_dict()
assert (
tracker.locator == tracker_dict["locator"]
and tracker.penalty_rawtx == tracker_dict["penalty_rawtx"]
and tracker.appointment_end == tracker_dict["appointment_end"]
)
def test_tracker_from_dict():
tracker_dict = create_dummy_tracker().to_dict()
new_tracker = TransactionTracker.from_dict(tracker_dict)
assert tracker_dict == new_tracker.to_dict()
def test_tracker_from_dict_invalid_data():
tracker_dict = create_dummy_tracker().to_dict()
for value in ["dispute_txid", "penalty_txid", "penalty_rawtx", "appointment_end"]:
tracker_dict_copy = deepcopy(tracker_dict)
tracker_dict_copy[value] = None
try:
TransactionTracker.from_dict(tracker_dict_copy)
assert False
except ValueError:
assert True
def test_init_responder(temp_db_manager, carrier, block_processor):
responder = Responder(temp_db_manager, carrier, block_processor)
assert isinstance(responder.trackers, dict) and len(responder.trackers) == 0
assert isinstance(responder.tx_tracker_map, dict) and len(responder.tx_tracker_map) == 0
assert isinstance(responder.unconfirmed_txs, list) and len(responder.unconfirmed_txs) == 0
assert isinstance(responder.missed_confirmations, dict) and len(responder.missed_confirmations) == 0
assert responder.block_queue.empty()
def test_handle_breach(db_manager, carrier, block_processor):
responder = Responder(db_manager, carrier, block_processor)
def test_handle_breach(db_manager, gatekeeper, carrier, block_processor):
responder = Responder(db_manager, gatekeeper, carrier, block_processor)
uuid = uuid4().hex
tracker = create_dummy_tracker()
@@ -163,17 +184,17 @@ def test_handle_breach(db_manager, carrier, block_processor):
tracker.dispute_txid,
tracker.penalty_txid,
tracker.penalty_rawtx,
tracker.appointment_end,
tracker.user_id,
block_hash=get_random_value_hex(32),
)
assert receipt.delivered is True
def test_handle_breach_bad_response(db_manager, block_processor):
def test_handle_breach_bad_response(db_manager, gatekeeper, block_processor):
# We need a new carrier here, otherwise the transaction will be flagged as previously sent and receipt.delivered
# will be True
responder = Responder(db_manager, Carrier(bitcoind_connect_params), block_processor)
responder = Responder(db_manager, gatekeeper, Carrier(bitcoind_connect_params), block_processor)
uuid = uuid4().hex
tracker = create_dummy_tracker()
@@ -188,7 +209,7 @@ def test_handle_breach_bad_response(db_manager, block_processor):
tracker.dispute_txid,
tracker.penalty_txid,
tracker.penalty_rawtx,
tracker.appointment_end,
tracker.user_id,
block_hash=get_random_value_hex(32),
)
@@ -199,9 +220,7 @@ def test_add_tracker(responder):
for _ in range(20):
uuid = uuid4().hex
confirmations = 0
locator, dispute_txid, penalty_txid, penalty_rawtx, appointment_end = create_dummy_tracker_data(
random_txid=True
)
locator, dispute_txid, penalty_txid, penalty_rawtx, user_id = create_dummy_tracker_data(random_txid=True)
# Check the tracker is not within the responder trackers before adding it
assert uuid not in responder.trackers
@@ -209,7 +228,7 @@ def test_add_tracker(responder):
assert penalty_txid not in responder.unconfirmed_txs
# And that it is afterwards
responder.add_tracker(uuid, locator, dispute_txid, penalty_txid, penalty_rawtx, appointment_end, confirmations)
responder.add_tracker(uuid, locator, dispute_txid, penalty_txid, penalty_rawtx, user_id, confirmations)
assert uuid in responder.trackers
assert penalty_txid in responder.tx_tracker_map
assert penalty_txid in responder.unconfirmed_txs
@@ -219,18 +238,18 @@ def test_add_tracker(responder):
assert (
tracker.get("penalty_txid") == penalty_txid
and tracker.get("locator") == locator
and tracker.get("appointment_end") == appointment_end
and tracker.get("user_id") == user_id
)
def test_add_tracker_same_penalty_txid(responder):
confirmations = 0
locator, dispute_txid, penalty_txid, penalty_rawtx, appointment_end = create_dummy_tracker_data(random_txid=True)
locator, dispute_txid, penalty_txid, penalty_rawtx, user_id = create_dummy_tracker_data(random_txid=True)
uuid_1 = uuid4().hex
uuid_2 = uuid4().hex
responder.add_tracker(uuid_1, locator, dispute_txid, penalty_txid, penalty_rawtx, appointment_end, confirmations)
responder.add_tracker(uuid_2, locator, dispute_txid, penalty_txid, penalty_rawtx, appointment_end, confirmations)
responder.add_tracker(uuid_1, locator, dispute_txid, penalty_txid, penalty_rawtx, user_id, confirmations)
responder.add_tracker(uuid_2, locator, dispute_txid, penalty_txid, penalty_rawtx, user_id, confirmations)
# Check that both trackers have been added
assert uuid_1 in responder.trackers and uuid_2 in responder.trackers
@@ -243,7 +262,7 @@ def test_add_tracker_same_penalty_txid(responder):
assert (
tracker.get("penalty_txid") == penalty_txid
and tracker.get("locator") == locator
and tracker.get("appointment_end") == appointment_end
and tracker.get("user_id") == user_id
)
@@ -251,35 +270,45 @@ def test_add_tracker_already_confirmed(responder):
for i in range(20):
uuid = uuid4().hex
confirmations = i + 1
locator, dispute_txid, penalty_txid, penalty_rawtx, appointment_end = create_dummy_tracker_data(
locator, dispute_txid, penalty_txid, penalty_rawtx, user_id = create_dummy_tracker_data(
penalty_rawtx=create_dummy_transaction().hex()
)
responder.add_tracker(uuid, locator, dispute_txid, penalty_txid, penalty_rawtx, appointment_end, confirmations)
responder.add_tracker(uuid, locator, dispute_txid, penalty_txid, penalty_rawtx, user_id, confirmations)
assert penalty_txid not in responder.unconfirmed_txs
assert (
responder.trackers[uuid].get("penalty_txid") == penalty_txid
and responder.trackers[uuid].get("locator") == locator
and responder.trackers[uuid].get("user_id") == user_id
)
def test_do_watch(temp_db_manager, carrier, block_processor):
def test_do_watch(temp_db_manager, gatekeeper, carrier, block_processor):
# Create a fresh responder to simplify the test
responder = Responder(temp_db_manager, carrier, block_processor)
responder = Responder(temp_db_manager, gatekeeper, carrier, block_processor)
chain_monitor = ChainMonitor(Queue(), responder.block_queue, block_processor, bitcoind_feed_params)
chain_monitor.monitor_chain()
trackers = [create_dummy_tracker(penalty_rawtx=create_dummy_transaction().hex()) for _ in range(20)]
subscription_expiry = responder.block_processor.get_block_count() + 110
# Let's set up the trackers first
for tracker in trackers:
uuid = uuid4().hex
responder.trackers[uuid] = {
"locator": tracker.locator,
"penalty_txid": tracker.penalty_txid,
"appointment_end": tracker.appointment_end,
}
# Simulate user registration so trackers can properly expire
responder.gatekeeper.registered_users[tracker.user_id] = UserInfo(
available_slots=10, subscription_expiry=subscription_expiry
)
# Add data to the Responder
responder.trackers[uuid] = tracker.get_summary()
responder.tx_tracker_map[tracker.penalty_txid] = [uuid]
responder.missed_confirmations[tracker.penalty_txid] = 0
responder.unconfirmed_txs.append(tracker.penalty_txid)
# Assuming the appointment only took a single slot
responder.gatekeeper.registered_users[tracker.user_id].appointments[uuid] = 1
# We also need to store the info in the db
responder.db_manager.create_triggered_appointment_flag(uuid)
@@ -295,37 +324,40 @@ def test_do_watch(temp_db_manager, carrier, block_processor):
broadcast_txs.append(tracker.penalty_txid)
# Mine a block
generate_block()
generate_block_w_delay()
# The transactions we sent shouldn't be in the unconfirmed transaction list anymore
assert not set(broadcast_txs).issubset(responder.unconfirmed_txs)
# TODO: test that reorgs can be detected once data persistence is merged (new version of the simulator)
# CONFIRMATIONS_BEFORE_RETRY+1 blocks after, the responder should rebroadcast the unconfirmed txs (15 remaining)
generate_blocks_w_delay(CONFIRMATIONS_BEFORE_RETRY + 1)
assert len(responder.unconfirmed_txs) == 0
assert len(responder.trackers) == 20
# Generating 5 additional blocks should complete the 5 trackers
generate_blocks(5)
# Generating 100 - CONFIRMATIONS_BEFORE_RETRY -2 additional blocks should complete the first 5 trackers
generate_blocks_w_delay(100 - CONFIRMATIONS_BEFORE_RETRY - 2)
assert len(responder.unconfirmed_txs) == 0
assert len(responder.trackers) == 15
# Check they are not in the Gatekeeper either
for tracker in trackers[:5]:
assert len(responder.gatekeeper.registered_users[tracker.user_id].appointments) == 0
assert not set(broadcast_txs).issubset(responder.tx_tracker_map)
# Do the rest
broadcast_txs = []
# CONFIRMATIONS_BEFORE_RETRY additional blocks should complete the rest
generate_blocks_w_delay(CONFIRMATIONS_BEFORE_RETRY)
assert len(responder.unconfirmed_txs) == 0
assert len(responder.trackers) == 0
# Check they are not in the Gatekeeper either
for tracker in trackers[5:]:
bitcoin_cli(bitcoind_connect_params).sendrawtransaction(tracker.penalty_rawtx)
broadcast_txs.append(tracker.penalty_txid)
# Mine a block
generate_blocks(6)
assert len(responder.tx_tracker_map) == 0
assert len(responder.gatekeeper.registered_users[tracker.user_id].appointments) == 0
def test_check_confirmations(db_manager, carrier, block_processor):
responder = Responder(db_manager, carrier, block_processor)
def test_check_confirmations(db_manager, gatekeeper, carrier, block_processor):
responder = Responder(db_manager, gatekeeper, carrier, block_processor)
chain_monitor = ChainMonitor(Queue(), responder.block_queue, block_processor, bitcoind_feed_params)
chain_monitor.monitor_chain()
# check_confirmations checks, given a list of transaction for a block, what of the known penalty transaction have
# been confirmed. To test this we need to create a list of transactions and the state of the responder
# been confirmed. To test this we need to create a list of transactions and the state of the Responder
txs = [get_random_value_hex(32) for _ in range(20)]
# The responder has a list of unconfirmed transaction, let make that some of them are the ones we've received
@@ -352,7 +384,6 @@ def test_check_confirmations(db_manager, carrier, block_processor):
assert responder.missed_confirmations[tx] == 1
# TODO: Check this properly, a bug pass unnoticed!
def test_get_txs_to_rebroadcast(responder):
# Let's create a few fake txids and assign at least 6 missing confirmations to each
txs_missing_too_many_conf = {get_random_value_hex(32): 6 + i for i in range(10)}
@@ -376,68 +407,131 @@ def test_get_txs_to_rebroadcast(responder):
assert txs_to_rebroadcast == list(txs_missing_too_many_conf.keys())
def test_get_completed_trackers(db_manager, carrier, block_processor):
initial_height = bitcoin_cli(bitcoind_connect_params).getblockcount()
responder = Responder(db_manager, carrier, block_processor)
def test_get_completed_trackers(db_manager, gatekeeper, carrier, block_processor):
responder = Responder(db_manager, gatekeeper, carrier, block_processor)
chain_monitor = ChainMonitor(Queue(), responder.block_queue, block_processor, bitcoind_feed_params)
chain_monitor.monitor_chain()
# A complete tracker is a tracker that has reached the appointment end with enough confs (> MIN_CONFIRMATIONS)
# We'll create three type of transactions: end reached + enough conf, end reached + no enough conf, end not reached
trackers_end_conf = {
# A complete tracker is a tracker which penalty transaction has been irrevocably resolved (i.e. has reached 100
# confirmations)
# We'll create 3 type of txs: irrevocably resolved, confirmed but not irrevocably resolved, and unconfirmed
trackers_ir_resolved = {
uuid4().hex: create_dummy_tracker(penalty_rawtx=create_dummy_transaction().hex()) for _ in range(10)
}
trackers_end_no_conf = {}
trackers_confirmed = {
uuid4().hex: create_dummy_tracker(penalty_rawtx=create_dummy_transaction().hex()) for _ in range(10)
}
trackers_unconfirmed = {}
for _ in range(10):
tracker = create_dummy_tracker(penalty_rawtx=create_dummy_transaction().hex())
responder.unconfirmed_txs.append(tracker.penalty_txid)
trackers_end_no_conf[uuid4().hex] = tracker
trackers_no_end = {}
for _ in range(10):
tracker = create_dummy_tracker(penalty_rawtx=create_dummy_transaction().hex())
tracker.appointment_end += 10
trackers_no_end[uuid4().hex] = tracker
trackers_unconfirmed[uuid4().hex] = tracker
all_trackers = {}
all_trackers.update(trackers_end_conf)
all_trackers.update(trackers_end_no_conf)
all_trackers.update(trackers_no_end)
all_trackers.update(trackers_ir_resolved)
all_trackers.update(trackers_confirmed)
all_trackers.update(trackers_unconfirmed)
# Let's add all to the responder
# Let's add all to the Responder
for uuid, tracker in all_trackers.items():
responder.trackers[uuid] = {
"locator": tracker.locator,
"penalty_txid": tracker.penalty_txid,
"appointment_end": tracker.appointment_end,
}
responder.trackers[uuid] = tracker.get_summary()
for uuid, tracker in all_trackers.items():
for uuid, tracker in trackers_ir_resolved.items():
bitcoin_cli(bitcoind_connect_params).sendrawtransaction(tracker.penalty_rawtx)
# The dummy appointments have a end_appointment time of current + 2, but trackers need at least 6 confs by default
generate_blocks(6)
generate_block_w_delay()
# And now let's check
completed_trackers = responder.get_completed_trackers(initial_height + 6)
completed_trackers_ids = [tracker_id for tracker_id, confirmations in completed_trackers.items()]
ended_trackers_keys = list(trackers_end_conf.keys())
assert set(completed_trackers_ids) == set(ended_trackers_keys)
for uuid, tracker in trackers_confirmed.items():
bitcoin_cli(bitcoind_connect_params).sendrawtransaction(tracker.penalty_rawtx)
# Generating 6 additional blocks should also confirm trackers_no_end
generate_blocks(6)
# ir_resolved have 100 confirmations and confirmed have 99
generate_blocks_w_delay(99)
completed_trackers = responder.get_completed_trackers(initial_height + 12)
completed_trackers_ids = [tracker_id for tracker_id, confirmations in completed_trackers.items()]
ended_trackers_keys.extend(list(trackers_no_end.keys()))
# Let's check
completed_trackers = responder.get_completed_trackers()
ended_trackers_keys = list(trackers_ir_resolved.keys())
assert set(completed_trackers) == set(ended_trackers_keys)
assert set(completed_trackers_ids) == set(ended_trackers_keys)
# Generating 1 additional blocks should also include confirmed
generate_block_w_delay()
completed_trackers = responder.get_completed_trackers()
ended_trackers_keys.extend(list(trackers_confirmed.keys()))
assert set(completed_trackers) == set(ended_trackers_keys)
def test_rebroadcast(db_manager, carrier, block_processor):
responder = Responder(db_manager, carrier, block_processor)
def test_get_expired_trackers(responder):
# expired trackers are those who's subscription has reached the expiry block and have not been confirmed.
# confirmed trackers that have reached their expiry will be kept until completed
current_block = responder.block_processor.get_block_count()
# Lets first register the a couple of users
user1_id = get_random_value_hex(16)
responder.gatekeeper.registered_users[user1_id] = UserInfo(
available_slots=10, subscription_expiry=current_block + 15
)
user2_id = get_random_value_hex(16)
responder.gatekeeper.registered_users[user2_id] = UserInfo(
available_slots=10, subscription_expiry=current_block + 16
)
# And create some trackers and add them to the corresponding user in the Gatekeeper
expired_unconfirmed_trackers_15 = {}
expired_unconfirmed_trackers_16 = {}
expired_confirmed_trackers_15 = {}
for _ in range(10):
uuid = uuid4().hex
dummy_tracker = create_dummy_tracker(penalty_rawtx=create_dummy_transaction().hex())
dummy_tracker.user_id = user1_id
expired_unconfirmed_trackers_15[uuid] = dummy_tracker
responder.unconfirmed_txs.append(dummy_tracker.penalty_txid)
# Assume the appointment only took a single slot
responder.gatekeeper.registered_users[dummy_tracker.user_id].appointments[uuid] = 1
for _ in range(10):
uuid = uuid4().hex
dummy_tracker = create_dummy_tracker(penalty_rawtx=create_dummy_transaction().hex())
dummy_tracker.user_id = user1_id
expired_confirmed_trackers_15[uuid] = dummy_tracker
# Assume the appointment only took a single slot
responder.gatekeeper.registered_users[dummy_tracker.user_id].appointments[uuid] = 1
for _ in range(10):
uuid = uuid4().hex
dummy_tracker = create_dummy_tracker(penalty_rawtx=create_dummy_transaction().hex())
dummy_tracker.user_id = user2_id
expired_unconfirmed_trackers_16[uuid] = dummy_tracker
responder.unconfirmed_txs.append(dummy_tracker.penalty_txid)
# Assume the appointment only took a single slot
responder.gatekeeper.registered_users[dummy_tracker.user_id].appointments[uuid] = 1
all_trackers = {}
all_trackers.update(expired_confirmed_trackers_15)
all_trackers.update(expired_unconfirmed_trackers_15)
all_trackers.update(expired_unconfirmed_trackers_16)
# Add everything to the Responder
for uuid, tracker in all_trackers.items():
responder.trackers[uuid] = tracker.get_summary()
# Currently nothing should be expired
assert responder.get_expired_trackers(current_block) == []
# 15 blocks (+ EXPIRY_DELTA) afterwards only user1's confirmed trackers should be expired
assert responder.get_expired_trackers(current_block + 15 + config.get("EXPIRY_DELTA")) == list(
expired_unconfirmed_trackers_15.keys()
)
# 1 (+ EXPIRY_DELTA) block after that user2's should be expired
assert responder.get_expired_trackers(current_block + 16 + config.get("EXPIRY_DELTA")) == list(
expired_unconfirmed_trackers_16.keys()
)
def test_rebroadcast(db_manager, gatekeeper, carrier, block_processor):
responder = Responder(db_manager, gatekeeper, carrier, block_processor)
chain_monitor = ChainMonitor(Queue(), responder.block_queue, block_processor, bitcoind_feed_params)
chain_monitor.monitor_chain()
@@ -446,17 +540,13 @@ def test_rebroadcast(db_manager, carrier, block_processor):
# Rebroadcast calls add_response with retry=True. The tracker data is already in trackers.
for i in range(20):
uuid = uuid4().hex
locator, dispute_txid, penalty_txid, penalty_rawtx, appointment_end = create_dummy_tracker_data(
locator, dispute_txid, penalty_txid, penalty_rawtx, user_id = create_dummy_tracker_data(
penalty_rawtx=create_dummy_transaction().hex()
)
tracker = TransactionTracker(locator, dispute_txid, penalty_txid, penalty_rawtx, appointment_end)
tracker = TransactionTracker(locator, dispute_txid, penalty_txid, penalty_rawtx, user_id)
responder.trackers[uuid] = {
"locator": locator,
"penalty_txid": penalty_txid,
"appointment_end": appointment_end,
}
responder.trackers[uuid] = {"locator": locator, "penalty_txid": penalty_txid, "user_id": user_id}
# We need to add it to the db too
responder.db_manager.create_triggered_appointment_flag(uuid)

View File

@@ -13,13 +13,7 @@ def test_can_connect_to_bitcoind():
assert can_connect_to_bitcoind(bitcoind_connect_params) is True
# def test_can_connect_to_bitcoind_bitcoin_not_running():
# # Kill the simulator thread and test the check fails
# bitcoind_process.kill()
# assert can_connect_to_bitcoind() is False
def test_bitcoin_cli():
def test_bitcoin_cli(run_bitcoind):
try:
bitcoin_cli(bitcoind_connect_params).help()
assert True

View File

@@ -1,15 +1,15 @@
from teos.appointments_dbm import AppointmentsDBM
from teos.users_dbm import UsersDBM
from teos.gatekeeper import UserInfo
from test.teos.unit.conftest import get_random_value_hex
stored_users = {}
def open_create_db(db_path):
try:
db_manager = AppointmentsDBM(db_path)
db_manager = UsersDBM(db_path)
return db_manager
@@ -19,27 +19,27 @@ def open_create_db(db_path):
def test_store_user(user_db_manager):
# Store user should work as long as the user_pk is properly formatted and data is a dictionary
user_pk = "02" + get_random_value_hex(32)
user_data = {"available_slots": 42}
stored_users[user_pk] = user_data
assert user_db_manager.store_user(user_pk, user_data) is True
user_id = "02" + get_random_value_hex(32)
user_info = UserInfo(available_slots=42, subscription_expiry=100)
stored_users[user_id] = user_info.to_dict()
assert user_db_manager.store_user(user_id, user_info.to_dict()) is True
# Wrong pks should return False on adding
user_pk = "04" + get_random_value_hex(32)
user_data = {"available_slots": 42}
assert user_db_manager.store_user(user_pk, user_data) is False
user_id = "04" + get_random_value_hex(32)
user_info = UserInfo(available_slots=42, subscription_expiry=100)
assert user_db_manager.store_user(user_id, user_info.to_dict()) is False
# Same for wrong types
assert user_db_manager.store_user(42, user_data) is False
assert user_db_manager.store_user(42, user_info.to_dict()) is False
# And for wrong type user data
assert user_db_manager.store_user(user_pk, 42) is False
assert user_db_manager.store_user(user_id, 42) is False
def test_load_user(user_db_manager):
# Loading a user we have stored should work
for user_pk, user_data in stored_users.items():
assert user_db_manager.load_user(user_pk) == user_data
for user_id, user_data in stored_users.items():
assert user_db_manager.load_user(user_id) == user_data
# Random keys should fail
assert user_db_manager.load_user(get_random_value_hex(33)) is None
@@ -50,11 +50,11 @@ def test_load_user(user_db_manager):
def test_delete_user(user_db_manager):
# Deleting an existing user should work
for user_pk, user_data in stored_users.items():
assert user_db_manager.delete_user(user_pk) is True
for user_id, user_data in stored_users.items():
assert user_db_manager.delete_user(user_id) is True
for user_pk, user_data in stored_users.items():
assert user_db_manager.load_user(user_pk) is None
for user_id, user_data in stored_users.items():
assert user_db_manager.load_user(user_id) is None
# But deleting a non existing one should not fail
assert user_db_manager.delete_user(get_random_value_hex(32)) is True
@@ -70,10 +70,10 @@ def test_load_all_users(user_db_manager):
# Adding some and checking we get them all
for i in range(10):
user_pk = "02" + get_random_value_hex(32)
user_data = {"available_slots": i}
user_db_manager.store_user(user_pk, user_data)
stored_users[user_pk] = user_data
user_id = "02" + get_random_value_hex(32)
user_info = UserInfo(available_slots=42, subscription_expiry=100)
user_db_manager.store_user(user_id, user_info.to_dict())
stored_users[user_id] = user_info.to_dict()
all_users = user_db_manager.load_all_users()

View File

@@ -4,20 +4,21 @@ from shutil import rmtree
from threading import Thread
from coincurve import PrivateKey
from teos import LOG_PREFIX
from teos.carrier import Carrier
from teos.watcher import Watcher
from teos.tools import bitcoin_cli
from teos.responder import Responder
from teos.gatekeeper import UserInfo
from teos.chain_monitor import ChainMonitor
from teos.appointments_dbm import AppointmentsDBM
from teos.block_processor import BlockProcessor
from teos.watcher import Watcher, AppointmentLimitReached
from teos.gatekeeper import Gatekeeper, AuthenticationFailure, NotEnoughSlots
from common.tools import compute_locator
from common.cryptographer import Cryptographer
from test.teos.unit.conftest import (
generate_blocks,
generate_blocks_w_delay,
generate_dummy_appointment,
get_random_value_hex,
generate_keypair,
@@ -27,8 +28,6 @@ from test.teos.unit.conftest import (
)
APPOINTMENTS = 5
START_TIME_OFFSET = 1
END_TIME_OFFSET = 1
TEST_SET_SIZE = 200
config = get_config()
@@ -51,14 +50,12 @@ def temp_db_manager():
@pytest.fixture(scope="module")
def watcher(db_manager):
def watcher(db_manager, gatekeeper):
block_processor = BlockProcessor(bitcoind_connect_params)
carrier = Carrier(bitcoind_connect_params)
responder = Responder(db_manager, carrier, block_processor)
watcher = Watcher(
db_manager, block_processor, responder, signing_key.to_der(), MAX_APPOINTMENTS, config.get("EXPIRY_DELTA")
)
responder = Responder(db_manager, gatekeeper, carrier, block_processor)
watcher = Watcher(db_manager, gatekeeper, block_processor, responder, signing_key.to_der(), MAX_APPOINTMENTS)
chain_monitor = ChainMonitor(
watcher.block_queue, watcher.responder.block_queue, block_processor, bitcoind_feed_params
@@ -84,9 +81,7 @@ def create_appointments(n):
dispute_txs = []
for i in range(n):
appointment, dispute_tx = generate_dummy_appointment(
start_time_offset=START_TIME_OFFSET, end_time_offset=END_TIME_OFFSET
)
appointment, dispute_tx = generate_dummy_appointment()
uuid = uuid4().hex
appointments[uuid] = appointment
@@ -100,85 +95,107 @@ def test_init(run_bitcoind, watcher):
assert isinstance(watcher.appointments, dict) and len(watcher.appointments) == 0
assert isinstance(watcher.locator_uuid_map, dict) and len(watcher.locator_uuid_map) == 0
assert watcher.block_queue.empty()
assert isinstance(watcher.db_manager, AppointmentsDBM)
assert isinstance(watcher.gatekeeper, Gatekeeper)
assert isinstance(watcher.block_processor, BlockProcessor)
assert isinstance(watcher.responder, Responder)
assert isinstance(watcher.max_appointments, int)
assert isinstance(watcher.expiry_delta, int)
assert isinstance(watcher.signing_key, PrivateKey)
def test_get_appointment_summary(watcher):
# get_appointment_summary returns an appointment summary if found, else None.
random_uuid = get_random_value_hex(16)
appointment_summary = {"locator": get_random_value_hex(16), "end_time": 10, "size": 200}
watcher.appointments[random_uuid] = appointment_summary
assert watcher.get_appointment_summary(random_uuid) == appointment_summary
def test_add_appointment_non_registered(watcher):
# Appointments from non-registered users should fail
user_sk, user_pk = generate_keypair()
# Requesting a non-existing appointment
assert watcher.get_appointment_summary(get_random_value_hex(16)) is None
appointment, dispute_tx = generate_dummy_appointment()
appointment_signature = Cryptographer.sign(appointment.serialize(), user_sk)
with pytest.raises(AuthenticationFailure, match="User not found"):
watcher.add_appointment(appointment, appointment_signature)
def test_add_appointment_no_slots(watcher):
# Appointments from register users with no available slots should aso fail
user_sk, user_pk = generate_keypair()
user_id = Cryptographer.get_compressed_pk(user_pk)
watcher.gatekeeper.registered_users[user_id] = UserInfo(available_slots=0, subscription_expiry=10)
appointment, dispute_tx = generate_dummy_appointment()
appointment_signature = Cryptographer.sign(appointment.serialize(), user_sk)
with pytest.raises(NotEnoughSlots):
watcher.add_appointment(appointment, appointment_signature)
def test_add_appointment(watcher):
# We should be able to add appointments up to the limit
for _ in range(10):
appointment, dispute_tx = generate_dummy_appointment(
start_time_offset=START_TIME_OFFSET, end_time_offset=END_TIME_OFFSET
)
user_pk = get_random_value_hex(33)
# Simulate the user is registered
user_sk, user_pk = generate_keypair()
available_slots = 100
user_id = Cryptographer.get_compressed_pk(user_pk)
watcher.gatekeeper.registered_users[user_id] = UserInfo(available_slots=available_slots, subscription_expiry=10)
added_appointment, sig = watcher.add_appointment(appointment, user_pk)
appointment, dispute_tx = generate_dummy_appointment()
appointment_signature = Cryptographer.sign(appointment.serialize(), user_sk)
assert added_appointment is True
response = watcher.add_appointment(appointment, appointment_signature)
assert response.get("locator") == appointment.locator
assert Cryptographer.get_compressed_pk(watcher.signing_key.public_key) == Cryptographer.get_compressed_pk(
Cryptographer.recover_pk(appointment.serialize(), sig)
Cryptographer.recover_pk(appointment.serialize(), response.get("signature"))
)
assert response.get("available_slots") == available_slots - 1
# Check that we can also add an already added appointment (same locator)
added_appointment, sig = watcher.add_appointment(appointment, user_pk)
assert added_appointment is True
response = watcher.add_appointment(appointment, appointment_signature)
assert response.get("locator") == appointment.locator
assert Cryptographer.get_compressed_pk(watcher.signing_key.public_key) == Cryptographer.get_compressed_pk(
Cryptographer.recover_pk(appointment.serialize(), sig)
Cryptographer.recover_pk(appointment.serialize(), response.get("signature"))
)
# If two appointments with the same locator from the same user are added, they are overwritten, but if they come
# from different users, they are kept.
# The slot count should not have been reduced and only one copy is kept.
assert response.get("available_slots") == available_slots - 1
assert len(watcher.locator_uuid_map[appointment.locator]) == 1
different_user_pk = get_random_value_hex(33)
added_appointment, sig = watcher.add_appointment(appointment, different_user_pk)
assert added_appointment is True
assert Cryptographer.get_compressed_pk(watcher.signing_key.public_key) == Cryptographer.get_compressed_pk(
Cryptographer.recover_pk(appointment.serialize(), sig)
# If two appointments with the same locator come from different users, they are kept.
another_user_sk, another_user_pk = generate_keypair()
another_user_id = Cryptographer.get_compressed_pk(another_user_pk)
watcher.gatekeeper.registered_users[another_user_id] = UserInfo(
available_slots=available_slots, subscription_expiry=10
)
appointment_signature = Cryptographer.sign(appointment.serialize(), another_user_sk)
response = watcher.add_appointment(appointment, appointment_signature)
assert response.get("locator") == appointment.locator
assert Cryptographer.get_compressed_pk(watcher.signing_key.public_key) == Cryptographer.get_compressed_pk(
Cryptographer.recover_pk(appointment.serialize(), response.get("signature"))
)
assert response.get("available_slots") == available_slots - 1
assert len(watcher.locator_uuid_map[appointment.locator]) == 2
def test_add_too_many_appointments(watcher):
# Any appointment on top of those should fail
# Simulate the user is registered
user_sk, user_pk = generate_keypair()
available_slots = 100
user_id = Cryptographer.get_compressed_pk(user_pk)
watcher.gatekeeper.registered_users[user_id] = UserInfo(available_slots=available_slots, subscription_expiry=10)
# Appointments on top of the limit should be rejected
watcher.appointments = dict()
for _ in range(MAX_APPOINTMENTS):
appointment, dispute_tx = generate_dummy_appointment(
start_time_offset=START_TIME_OFFSET, end_time_offset=END_TIME_OFFSET
)
user_pk = get_random_value_hex(33)
for i in range(MAX_APPOINTMENTS):
appointment, dispute_tx = generate_dummy_appointment()
appointment_signature = Cryptographer.sign(appointment.serialize(), user_sk)
added_appointment, sig = watcher.add_appointment(appointment, user_pk)
assert added_appointment is True
response = watcher.add_appointment(appointment, appointment_signature)
assert response.get("locator") == appointment.locator
assert Cryptographer.get_compressed_pk(watcher.signing_key.public_key) == Cryptographer.get_compressed_pk(
Cryptographer.recover_pk(appointment.serialize(), sig)
Cryptographer.recover_pk(appointment.serialize(), response.get("signature"))
)
assert response.get("available_slots") == available_slots - (i + 1)
appointment, dispute_tx = generate_dummy_appointment(
start_time_offset=START_TIME_OFFSET, end_time_offset=END_TIME_OFFSET
)
user_pk = get_random_value_hex(33)
added_appointment, sig = watcher.add_appointment(appointment, user_pk)
assert added_appointment is False
assert sig is None
with pytest.raises(AppointmentLimitReached):
appointment, dispute_tx = generate_dummy_appointment()
appointment_signature = Cryptographer.sign(appointment.serialize(), user_sk)
watcher.add_appointment(appointment, appointment_signature)
def test_do_watch(watcher, temp_db_manager):
@@ -190,9 +207,19 @@ def test_do_watch(watcher, temp_db_manager):
# Set the data into the Watcher and in the db
watcher.locator_uuid_map = locator_uuid_map
watcher.appointments = {}
watcher.gatekeeper.registered_users = {}
# Simulate a register (times out in 10 bocks)
user_id = get_random_value_hex(16)
watcher.gatekeeper.registered_users[user_id] = UserInfo(
available_slots=100, subscription_expiry=watcher.block_processor.get_block_count() + 10
)
# Add the appointments
for uuid, appointment in appointments.items():
watcher.appointments[uuid] = {"locator": appointment.locator, "end_time": appointment.end_time, "size": 200}
watcher.appointments[uuid] = {"locator": appointment.locator, "user_id": user_id}
# Assume the appointment only takes one slot
watcher.gatekeeper.registered_users[user_id].appointments[uuid] = 1
watcher.db_manager.store_watcher_appointment(uuid, appointment.to_dict())
watcher.db_manager.create_append_locator_map(appointment.locator, uuid)
@@ -203,17 +230,21 @@ def test_do_watch(watcher, temp_db_manager):
for dispute_tx in dispute_txs[:2]:
bitcoin_cli(bitcoind_connect_params).sendrawtransaction(dispute_tx)
# After generating enough blocks, the number of appointments should have reduced by two
generate_blocks(START_TIME_OFFSET + END_TIME_OFFSET)
# After generating a block, the appointment count should have been reduced by 2 (two breaches)
generate_blocks_w_delay(1)
assert len(watcher.appointments) == APPOINTMENTS - 2
# The rest of appointments will timeout after the end (2) + EXPIRY_DELTA
# The rest of appointments will timeout after the subscription times-out (9 more blocks) + EXPIRY_DELTA
# Wait for an additional block to be safe
generate_blocks(config.get("EXPIRY_DELTA") + START_TIME_OFFSET + END_TIME_OFFSET)
generate_blocks_w_delay(10 + config.get("EXPIRY_DELTA"))
assert len(watcher.appointments) == 0
# Check that they are not in the Gatekeeper either, only the two that passed to the Responder should remain
assert len(watcher.gatekeeper.registered_users[user_id].appointments) == 2
# FIXME: We should also add cases where the transactions are invalid. bitcoind_mock needs to be extended for this.
def test_get_breaches(watcher, txids, locator_uuid_map):
watcher.locator_uuid_map = locator_uuid_map
@@ -234,7 +265,7 @@ def test_get_breaches_random_data(watcher, locator_uuid_map):
assert len(potential_breaches) == 0
def test_filter_valid_breaches_random_data(watcher):
def test_filter_breaches_random_data(watcher):
appointments = {}
locator_uuid_map = {}
breaches = {}
@@ -242,7 +273,7 @@ def test_filter_valid_breaches_random_data(watcher):
for i in range(TEST_SET_SIZE):
dummy_appointment, _ = generate_dummy_appointment()
uuid = uuid4().hex
appointments[uuid] = {"locator": dummy_appointment.locator, "end_time": dummy_appointment.end_time}
appointments[uuid] = {"locator": dummy_appointment.locator, "user_id": dummy_appointment.user_id}
watcher.db_manager.store_watcher_appointment(uuid, dummy_appointment.to_dict())
watcher.db_manager.create_append_locator_map(dummy_appointment.locator, uuid)
@@ -255,7 +286,7 @@ def test_filter_valid_breaches_random_data(watcher):
watcher.locator_uuid_map = locator_uuid_map
watcher.appointments = appointments
valid_breaches, invalid_breaches = watcher.filter_valid_breaches(breaches)
valid_breaches, invalid_breaches = watcher.filter_breaches(breaches)
# We have "triggered" TEST_SET_SIZE/2 breaches, all of them invalid.
assert len(valid_breaches) == 0 and len(invalid_breaches) == TEST_SET_SIZE / 2
@@ -282,13 +313,13 @@ def test_filter_valid_breaches(watcher):
breaches = {dummy_appointment.locator: dispute_txid}
for uuid, appointment in appointments.items():
watcher.appointments[uuid] = {"locator": appointment.locator, "end_time": appointment.end_time}
watcher.appointments[uuid] = {"locator": appointment.locator, "user_id": appointment.user_id}
watcher.db_manager.store_watcher_appointment(uuid, dummy_appointment.to_dict())
watcher.db_manager.create_append_locator_map(dummy_appointment.locator, uuid)
watcher.locator_uuid_map = locator_uuid_map
valid_breaches, invalid_breaches = watcher.filter_valid_breaches(breaches)
valid_breaches, invalid_breaches = watcher.filter_breaches(breaches)
# We have "triggered" a single breach and it was valid.
assert len(invalid_breaches) == 0 and len(valid_breaches) == 1

View File

@@ -154,16 +154,9 @@ def get_appointment(plugin, *args):
@plugin.hook("commitment_revocation")
def add_appointment(plugin, **kwargs):
try:
# FIXME: start_time and end_time are temporary. Fix it on the tower side and remove it from there
block_height = plugin.rpc.getchaininfo().get("blockcount")
start_time = block_height + 1
end_time = block_height + 10
commitment_txid, penalty_tx = arg_parser.parse_add_appointment_arguments(kwargs)
appointment = Appointment(
locator=compute_locator(commitment_txid),
start_time=start_time,
end_time=end_time,
to_self_delay=20,
encrypted_blob=Cryptographer.encrypt(penalty_tx, commitment_txid),
)