mirror of
https://github.com/aljazceru/python-teos.git
synced 2025-12-18 14:44:21 +01:00
pisa -> teos
This commit is contained in:
0
test/teos/unit/__init__.py
Normal file
0
test/teos/unit/__init__.py
Normal file
173
test/teos/unit/conftest.py
Normal file
173
test/teos/unit/conftest.py
Normal file
@@ -0,0 +1,173 @@
|
||||
import os
|
||||
import pytest
|
||||
import random
|
||||
import requests
|
||||
from time import sleep
|
||||
from shutil import rmtree
|
||||
from threading import Thread
|
||||
|
||||
from coincurve import PrivateKey
|
||||
|
||||
from common.blob import Blob
|
||||
from teos.responder import TransactionTracker
|
||||
from teos.tools import bitcoin_cli
|
||||
from teos.db_manager import DBManager
|
||||
from common.appointment import Appointment
|
||||
from common.tools import compute_locator
|
||||
|
||||
from bitcoind_mock.transaction import create_dummy_transaction
|
||||
from bitcoind_mock.bitcoind import BitcoindMock
|
||||
from bitcoind_mock.conf import BTC_RPC_HOST, BTC_RPC_PORT
|
||||
|
||||
from teos import LOG_PREFIX
|
||||
import common.cryptographer
|
||||
from common.logger import Logger
|
||||
from common.constants import LOCATOR_LEN_HEX
|
||||
from common.cryptographer import Cryptographer
|
||||
|
||||
common.cryptographer.logger = Logger(actor="Cryptographer", log_name_prefix=LOG_PREFIX)
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def run_bitcoind():
|
||||
bitcoind_thread = Thread(target=BitcoindMock().run, kwargs={"mode": "event", "verbose": True})
|
||||
bitcoind_thread.daemon = True
|
||||
bitcoind_thread.start()
|
||||
|
||||
# It takes a little bit of time to start the API (otherwise the requests are sent too early and they fail)
|
||||
sleep(0.1)
|
||||
|
||||
|
||||
@pytest.fixture(scope="session", autouse=True)
|
||||
def prng_seed():
|
||||
random.seed(0)
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def db_manager():
|
||||
manager = DBManager("test_db")
|
||||
# Add last know block for the Responder in the db
|
||||
|
||||
yield manager
|
||||
|
||||
manager.db.close()
|
||||
rmtree("test_db")
|
||||
|
||||
|
||||
def generate_keypair():
|
||||
sk = PrivateKey()
|
||||
pk = sk.public_key
|
||||
|
||||
return sk, pk
|
||||
|
||||
|
||||
def get_random_value_hex(nbytes):
|
||||
pseudo_random_value = random.getrandbits(8 * nbytes)
|
||||
prv_hex = "{:x}".format(pseudo_random_value)
|
||||
return prv_hex.zfill(2 * nbytes)
|
||||
|
||||
|
||||
def generate_block():
|
||||
requests.post(url="http://{}:{}/generate".format(BTC_RPC_HOST, BTC_RPC_PORT), timeout=5)
|
||||
sleep(0.5)
|
||||
|
||||
|
||||
def generate_blocks(n):
|
||||
for _ in range(n):
|
||||
generate_block()
|
||||
|
||||
|
||||
def fork(block_hash):
|
||||
fork_endpoint = "http://{}:{}/fork".format(BTC_RPC_HOST, BTC_RPC_PORT)
|
||||
requests.post(fork_endpoint, json={"parent": block_hash})
|
||||
|
||||
|
||||
def generate_dummy_appointment_data(real_height=True, start_time_offset=5, end_time_offset=30):
|
||||
if real_height:
|
||||
current_height = bitcoin_cli().getblockcount()
|
||||
|
||||
else:
|
||||
current_height = 10
|
||||
|
||||
dispute_tx = create_dummy_transaction()
|
||||
dispute_txid = dispute_tx.tx_id.hex()
|
||||
penalty_tx = create_dummy_transaction(dispute_txid)
|
||||
|
||||
dummy_appointment_data = {
|
||||
"tx": penalty_tx.hex(),
|
||||
"tx_id": dispute_txid,
|
||||
"start_time": current_height + start_time_offset,
|
||||
"end_time": current_height + end_time_offset,
|
||||
"to_self_delay": 20,
|
||||
}
|
||||
|
||||
# dummy keys for this test
|
||||
client_sk, client_pk = generate_keypair()
|
||||
client_pk_hex = client_pk.format().hex()
|
||||
|
||||
locator = compute_locator(dispute_txid)
|
||||
blob = Blob(dummy_appointment_data.get("tx"))
|
||||
|
||||
encrypted_blob = Cryptographer.encrypt(blob, dummy_appointment_data.get("tx_id"))
|
||||
|
||||
appointment_data = {
|
||||
"locator": locator,
|
||||
"start_time": dummy_appointment_data.get("start_time"),
|
||||
"end_time": dummy_appointment_data.get("end_time"),
|
||||
"to_self_delay": dummy_appointment_data.get("to_self_delay"),
|
||||
"encrypted_blob": encrypted_blob,
|
||||
}
|
||||
|
||||
signature = Cryptographer.sign(Appointment.from_dict(appointment_data).serialize(), client_sk)
|
||||
|
||||
data = {"appointment": appointment_data, "signature": signature, "public_key": client_pk_hex}
|
||||
|
||||
return data, dispute_tx.hex()
|
||||
|
||||
|
||||
def generate_dummy_appointment(real_height=True, start_time_offset=5, end_time_offset=30):
|
||||
appointment_data, dispute_tx = generate_dummy_appointment_data(
|
||||
real_height=real_height, start_time_offset=start_time_offset, end_time_offset=end_time_offset
|
||||
)
|
||||
|
||||
return Appointment.from_dict(appointment_data["appointment"]), dispute_tx
|
||||
|
||||
|
||||
def generate_dummy_tracker():
|
||||
dispute_txid = get_random_value_hex(32)
|
||||
penalty_txid = get_random_value_hex(32)
|
||||
penalty_rawtx = get_random_value_hex(100)
|
||||
locator = dispute_txid[:LOCATOR_LEN_HEX]
|
||||
|
||||
tracker_data = dict(
|
||||
locator=locator,
|
||||
dispute_txid=dispute_txid,
|
||||
penalty_txid=penalty_txid,
|
||||
penalty_rawtx=penalty_rawtx,
|
||||
appointment_end=100,
|
||||
)
|
||||
|
||||
return TransactionTracker.from_dict(tracker_data)
|
||||
|
||||
|
||||
def get_config():
|
||||
data_folder = os.path.expanduser("~/.teos")
|
||||
config = {
|
||||
"BTC_RPC_USER": "username",
|
||||
"BTC_RPC_PASSWD": "password",
|
||||
"BTC_RPC_HOST": "localhost",
|
||||
"BTC_RPC_PORT": 8332,
|
||||
"BTC_NETWORK": "regtest",
|
||||
"FEED_PROTOCOL": "tcp",
|
||||
"FEED_ADDR": "127.0.0.1",
|
||||
"FEED_PORT": 28332,
|
||||
"DATA_FOLDER": data_folder,
|
||||
"MAX_APPOINTMENTS": 100,
|
||||
"EXPIRY_DELTA": 6,
|
||||
"MIN_TO_SELF_DELAY": 20,
|
||||
"SERVER_LOG_FILE": data_folder + "teos.log",
|
||||
"TEOS_SECRET_KEY": data_folder + "teos_sk.der",
|
||||
"DB_PATH": "appointments",
|
||||
}
|
||||
|
||||
return config
|
||||
192
test/teos/unit/test_api.py
Normal file
192
test/teos/unit/test_api.py
Normal file
@@ -0,0 +1,192 @@
|
||||
import json
|
||||
import pytest
|
||||
import requests
|
||||
from time import sleep
|
||||
from threading import Thread
|
||||
|
||||
from teos.api import API
|
||||
from teos.watcher import Watcher
|
||||
from teos.responder import Responder
|
||||
from teos.tools import bitcoin_cli
|
||||
from teos import HOST, PORT
|
||||
from teos.chain_monitor import ChainMonitor
|
||||
|
||||
from test.teos.unit.conftest import (
|
||||
generate_block,
|
||||
generate_blocks,
|
||||
get_random_value_hex,
|
||||
generate_dummy_appointment_data,
|
||||
generate_keypair,
|
||||
get_config,
|
||||
)
|
||||
|
||||
from common.constants import LOCATOR_LEN_BYTES
|
||||
|
||||
|
||||
TEOS_API = "http://{}:{}".format(HOST, PORT)
|
||||
MULTIPLE_APPOINTMENTS = 10
|
||||
|
||||
appointments = []
|
||||
locator_dispute_tx_map = {}
|
||||
|
||||
config = get_config()
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def run_api(db_manager):
|
||||
sk, pk = generate_keypair()
|
||||
|
||||
watcher = Watcher(db_manager, Responder(db_manager), sk.to_der(), get_config())
|
||||
chain_monitor = ChainMonitor(watcher.block_queue, watcher.responder.block_queue)
|
||||
watcher.awake()
|
||||
chain_monitor.monitor_chain()
|
||||
|
||||
api_thread = Thread(target=API(watcher, config).start)
|
||||
api_thread.daemon = True
|
||||
api_thread.start()
|
||||
|
||||
# It takes a little bit of time to start the API (otherwise the requests are sent too early and they fail)
|
||||
sleep(0.1)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def new_appt_data():
|
||||
appt_data, dispute_tx = generate_dummy_appointment_data()
|
||||
locator_dispute_tx_map[appt_data["appointment"]["locator"]] = dispute_tx
|
||||
|
||||
return appt_data
|
||||
|
||||
|
||||
def add_appointment(new_appt_data):
|
||||
r = requests.post(url=TEOS_API, json=json.dumps(new_appt_data), timeout=5)
|
||||
|
||||
if r.status_code == 200:
|
||||
appointments.append(new_appt_data["appointment"])
|
||||
|
||||
return r
|
||||
|
||||
|
||||
def test_add_appointment(run_api, run_bitcoind, new_appt_data):
|
||||
# Properly formatted appointment
|
||||
r = add_appointment(new_appt_data)
|
||||
assert r.status_code == 200
|
||||
|
||||
# Incorrect appointment
|
||||
new_appt_data["appointment"]["to_self_delay"] = 0
|
||||
r = add_appointment(new_appt_data)
|
||||
assert r.status_code == 400
|
||||
|
||||
|
||||
def test_request_random_appointment():
|
||||
r = requests.get(url=TEOS_API + "/get_appointment?locator=" + get_random_value_hex(LOCATOR_LEN_BYTES))
|
||||
assert r.status_code == 200
|
||||
|
||||
received_appointments = json.loads(r.content)
|
||||
appointment_status = [appointment.pop("status") for appointment in received_appointments]
|
||||
|
||||
assert all([status == "not_found" for status in appointment_status])
|
||||
|
||||
|
||||
def test_add_appointment_multiple_times(new_appt_data, n=MULTIPLE_APPOINTMENTS):
|
||||
# Multiple appointments with the same locator should be valid
|
||||
# DISCUSS: #34-store-identical-appointments
|
||||
for _ in range(n):
|
||||
r = add_appointment(new_appt_data)
|
||||
assert r.status_code == 200
|
||||
|
||||
|
||||
def test_request_multiple_appointments_same_locator(new_appt_data, n=MULTIPLE_APPOINTMENTS):
|
||||
for _ in range(n):
|
||||
r = add_appointment(new_appt_data)
|
||||
assert r.status_code == 200
|
||||
|
||||
test_request_appointment_watcher(new_appt_data)
|
||||
|
||||
|
||||
def test_add_too_many_appointment(new_appt_data):
|
||||
for _ in range(config.get("MAX_APPOINTMENTS") - len(appointments)):
|
||||
r = add_appointment(new_appt_data)
|
||||
assert r.status_code == 200
|
||||
|
||||
r = add_appointment(new_appt_data)
|
||||
assert r.status_code == 503
|
||||
|
||||
|
||||
def test_get_all_appointments_watcher():
|
||||
r = requests.get(url=TEOS_API + "/get_all_appointments")
|
||||
assert r.status_code == 200 and r.reason == "OK"
|
||||
|
||||
received_appointments = json.loads(r.content)
|
||||
|
||||
# Make sure there all the locators re in the watcher
|
||||
watcher_locators = [v["locator"] for k, v in received_appointments["watcher_appointments"].items()]
|
||||
local_locators = [appointment["locator"] for appointment in appointments]
|
||||
|
||||
assert set(watcher_locators) == set(local_locators)
|
||||
assert len(received_appointments["responder_trackers"]) == 0
|
||||
|
||||
|
||||
def test_get_all_appointments_responder():
|
||||
# Trigger all disputes
|
||||
locators = [appointment["locator"] for appointment in appointments]
|
||||
for locator, dispute_tx in locator_dispute_tx_map.items():
|
||||
if locator in locators:
|
||||
bitcoin_cli().sendrawtransaction(dispute_tx)
|
||||
|
||||
# Confirm transactions
|
||||
generate_blocks(6)
|
||||
|
||||
# Get all appointments
|
||||
r = requests.get(url=TEOS_API + "/get_all_appointments")
|
||||
received_appointments = json.loads(r.content)
|
||||
|
||||
# Make sure there is not pending locator in the watcher
|
||||
responder_trackers = [v["locator"] for k, v in received_appointments["responder_trackers"].items()]
|
||||
local_locators = [appointment["locator"] for appointment in appointments]
|
||||
|
||||
assert set(responder_trackers) == set(local_locators)
|
||||
assert len(received_appointments["watcher_appointments"]) == 0
|
||||
|
||||
|
||||
def test_request_appointment_watcher(new_appt_data):
|
||||
# First we need to add an appointment
|
||||
r = add_appointment(new_appt_data)
|
||||
assert r.status_code == 200
|
||||
|
||||
# Next we can request it
|
||||
r = requests.get(url=TEOS_API + "/get_appointment?locator=" + new_appt_data["appointment"]["locator"])
|
||||
assert r.status_code == 200
|
||||
|
||||
# Each locator may point to multiple appointments, check them all
|
||||
received_appointments = json.loads(r.content)
|
||||
|
||||
# Take the status out and leave the received appointments ready to compare
|
||||
appointment_status = [appointment.pop("status") for appointment in received_appointments]
|
||||
|
||||
# Check that the appointment is within the received appoints
|
||||
assert new_appt_data["appointment"] in received_appointments
|
||||
|
||||
# Check that all the appointments are being watched
|
||||
assert all([status == "being_watched" for status in appointment_status])
|
||||
|
||||
|
||||
def test_request_appointment_responder(new_appt_data):
|
||||
# Let's do something similar to what we did with the watcher but now we'll send the dispute tx to the network
|
||||
dispute_tx = locator_dispute_tx_map[new_appt_data["appointment"]["locator"]]
|
||||
bitcoin_cli().sendrawtransaction(dispute_tx)
|
||||
|
||||
r = add_appointment(new_appt_data)
|
||||
assert r.status_code == 200
|
||||
|
||||
# Generate a block to trigger the watcher
|
||||
generate_block()
|
||||
|
||||
r = requests.get(url=TEOS_API + "/get_appointment?locator=" + new_appt_data["appointment"]["locator"])
|
||||
assert r.status_code == 200
|
||||
|
||||
received_appointments = json.loads(r.content)
|
||||
appointment_status = [appointment.pop("status") for appointment in received_appointments]
|
||||
appointment_locators = [appointment["locator"] for appointment in received_appointments]
|
||||
|
||||
assert new_appt_data["appointment"]["locator"] in appointment_locators and len(received_appointments) == 1
|
||||
assert all([status == "dispute_responded" for status in appointment_status]) and len(appointment_status) == 1
|
||||
116
test/teos/unit/test_block_processor.py
Normal file
116
test/teos/unit/test_block_processor.py
Normal file
@@ -0,0 +1,116 @@
|
||||
import pytest
|
||||
|
||||
from teos.block_processor import BlockProcessor
|
||||
from test.teos.unit.conftest import get_random_value_hex, generate_block, generate_blocks, fork
|
||||
|
||||
|
||||
hex_tx = (
|
||||
"0100000001c997a5e56e104102fa209c6a852dd90660a20b2d9c352423edce25857fcd3704000000004847304402"
|
||||
"204e45e16932b8af514961a1d3a1a25fdf3f4f7732e9d624c6c61548ab5fb8cd410220181522ec8eca07de4860a4"
|
||||
"acdd12909d831cc56cbbac4622082221a8768d1d0901ffffffff0200ca9a3b00000000434104ae1a62fe09c5f51b"
|
||||
"13905f07f06b99a2f7159b2225f374cd378d71302fa28414e7aab37397f554a7df5f142c21c1b7303b8a0626f1ba"
|
||||
"ded5c72a704f7e6cd84cac00286bee0000000043410411db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482e"
|
||||
"cad7b148a6909a5cb2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3ac00000000"
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def best_block_hash():
|
||||
return BlockProcessor.get_best_block_hash()
|
||||
|
||||
|
||||
def test_get_best_block_hash(run_bitcoind, best_block_hash):
|
||||
# As long as bitcoind is running (or mocked in this case) we should always a block hash
|
||||
assert best_block_hash is not None and isinstance(best_block_hash, str)
|
||||
|
||||
|
||||
def test_get_block(best_block_hash):
|
||||
# Getting a block from a block hash we are aware of should return data
|
||||
block = BlockProcessor.get_block(best_block_hash)
|
||||
|
||||
# Checking that the received block has at least the fields we need
|
||||
# FIXME: We could be more strict here, but we'll need to add those restrictions to bitcoind_sim too
|
||||
assert isinstance(block, dict)
|
||||
assert block.get("hash") == best_block_hash and "height" in block and "previousblockhash" in block and "tx" in block
|
||||
|
||||
|
||||
def test_get_random_block():
|
||||
block = BlockProcessor.get_block(get_random_value_hex(32))
|
||||
|
||||
assert block is None
|
||||
|
||||
|
||||
def test_get_block_count():
|
||||
block_count = BlockProcessor.get_block_count()
|
||||
assert isinstance(block_count, int) and block_count >= 0
|
||||
|
||||
|
||||
def test_decode_raw_transaction():
|
||||
# We cannot exhaustively test this (we rely on bitcoind for this) but we can try to decode a correct transaction
|
||||
assert BlockProcessor.decode_raw_transaction(hex_tx) is not None
|
||||
|
||||
|
||||
def test_decode_raw_transaction_invalid():
|
||||
# Same but with an invalid one
|
||||
assert BlockProcessor.decode_raw_transaction(hex_tx[::-1]) is None
|
||||
|
||||
|
||||
def test_get_missed_blocks():
|
||||
target_block = BlockProcessor.get_best_block_hash()
|
||||
|
||||
# Generate some blocks and store the hash in a list
|
||||
missed_blocks = []
|
||||
for _ in range(5):
|
||||
generate_block()
|
||||
missed_blocks.append(BlockProcessor.get_best_block_hash())
|
||||
|
||||
# Check what we've missed
|
||||
assert BlockProcessor.get_missed_blocks(target_block) == missed_blocks
|
||||
|
||||
# We can see how it does not work if we replace the target by the first element in the list
|
||||
block_tip = missed_blocks[0]
|
||||
assert BlockProcessor.get_missed_blocks(block_tip) != missed_blocks
|
||||
|
||||
# But it does again if we skip that block
|
||||
assert BlockProcessor.get_missed_blocks(block_tip) == missed_blocks[1:]
|
||||
|
||||
|
||||
def test_get_distance_to_tip():
|
||||
target_distance = 5
|
||||
|
||||
target_block = BlockProcessor.get_best_block_hash()
|
||||
|
||||
# Mine some blocks up to the target distance
|
||||
generate_blocks(target_distance)
|
||||
|
||||
# Check if the distance is properly computed
|
||||
assert BlockProcessor.get_distance_to_tip(target_block) == target_distance
|
||||
|
||||
|
||||
def test_is_block_in_best_chain():
|
||||
best_block_hash = BlockProcessor.get_best_block_hash()
|
||||
best_block = BlockProcessor.get_block(best_block_hash)
|
||||
|
||||
assert BlockProcessor.is_block_in_best_chain(best_block_hash)
|
||||
|
||||
fork(best_block.get("previousblockhash"))
|
||||
generate_blocks(2)
|
||||
|
||||
assert not BlockProcessor.is_block_in_best_chain(best_block_hash)
|
||||
|
||||
|
||||
def test_find_last_common_ancestor():
|
||||
ancestor = BlockProcessor.get_best_block_hash()
|
||||
generate_blocks(3)
|
||||
best_block_hash = BlockProcessor.get_best_block_hash()
|
||||
|
||||
# Create a fork (forking creates a block if the mock is set by events)
|
||||
fork(ancestor)
|
||||
|
||||
# Create another block to make the best tip change (now both chains are at the same height)
|
||||
generate_blocks(5)
|
||||
|
||||
# The last common ancestor between the old best and the new best should be the "ancestor"
|
||||
last_common_ancestor, dropped_txs = BlockProcessor.find_last_common_ancestor(best_block_hash)
|
||||
assert last_common_ancestor == ancestor
|
||||
assert len(dropped_txs) == 3
|
||||
137
test/teos/unit/test_builder.py
Normal file
137
test/teos/unit/test_builder.py
Normal file
@@ -0,0 +1,137 @@
|
||||
import pytest
|
||||
from uuid import uuid4
|
||||
from queue import Queue
|
||||
|
||||
from teos.builder import Builder
|
||||
from teos.watcher import Watcher
|
||||
from teos.responder import Responder
|
||||
from test.teos.unit.conftest import (
|
||||
get_random_value_hex,
|
||||
generate_dummy_appointment,
|
||||
generate_dummy_tracker,
|
||||
generate_block,
|
||||
bitcoin_cli,
|
||||
get_config,
|
||||
)
|
||||
|
||||
|
||||
def test_build_appointments():
|
||||
appointments_data = {}
|
||||
|
||||
# Create some appointment data
|
||||
for i in range(10):
|
||||
appointment, _ = generate_dummy_appointment(real_height=False)
|
||||
uuid = uuid4().hex
|
||||
|
||||
appointments_data[uuid] = appointment.to_dict()
|
||||
|
||||
# Add some additional appointments that share the same locator to test all the builder's cases
|
||||
if i % 2 == 0:
|
||||
locator = appointment.locator
|
||||
appointment, _ = generate_dummy_appointment(real_height=False)
|
||||
uuid = uuid4().hex
|
||||
appointment.locator = locator
|
||||
|
||||
appointments_data[uuid] = appointment.to_dict()
|
||||
|
||||
# Use the builder to create the data structures
|
||||
appointments, locator_uuid_map = Builder.build_appointments(appointments_data)
|
||||
|
||||
# Check that the created appointments match the data
|
||||
for uuid, appointment in appointments.items():
|
||||
assert uuid in appointments_data.keys()
|
||||
assert appointments_data[uuid].get("locator") == appointment.get("locator")
|
||||
assert appointments_data[uuid].get("end_time") == appointment.get("end_time")
|
||||
assert uuid in locator_uuid_map[appointment.get("locator")]
|
||||
|
||||
|
||||
def test_build_trackers():
|
||||
trackers_data = {}
|
||||
|
||||
# Create some trackers data
|
||||
for i in range(10):
|
||||
tracker = generate_dummy_tracker()
|
||||
|
||||
trackers_data[uuid4().hex] = tracker.to_dict()
|
||||
|
||||
# Add some additional trackers that share the same locator to test all the builder's cases
|
||||
if i % 2 == 0:
|
||||
penalty_txid = tracker.penalty_txid
|
||||
tracker = generate_dummy_tracker()
|
||||
tracker.penalty_txid = penalty_txid
|
||||
|
||||
trackers_data[uuid4().hex] = tracker.to_dict()
|
||||
|
||||
trackers, tx_tracker_map = Builder.build_trackers(trackers_data)
|
||||
|
||||
# Check that the built trackers match the data
|
||||
for uuid, tracker in trackers.items():
|
||||
assert uuid in trackers_data.keys()
|
||||
|
||||
assert tracker.get("penalty_txid") == trackers_data[uuid].get("penalty_txid")
|
||||
assert tracker.get("locator") == trackers_data[uuid].get("locator")
|
||||
assert tracker.get("appointment_end") == trackers_data[uuid].get("appointment_end")
|
||||
assert uuid in tx_tracker_map[tracker.get("penalty_txid")]
|
||||
|
||||
|
||||
def test_populate_block_queue():
|
||||
# Create some random block hashes and construct the queue with them
|
||||
blocks = [get_random_value_hex(32) for _ in range(10)]
|
||||
queue = Queue()
|
||||
Builder.populate_block_queue(queue, blocks)
|
||||
|
||||
# Make sure every block is in the queue and that there are not additional ones
|
||||
while not queue.empty():
|
||||
block = queue.get()
|
||||
assert block in blocks
|
||||
blocks.remove(block)
|
||||
|
||||
assert len(blocks) == 0
|
||||
|
||||
|
||||
def test_update_states_empty_list(db_manager):
|
||||
w = Watcher(db_manager=db_manager, responder=Responder(db_manager), sk_der=None, config=None)
|
||||
|
||||
missed_blocks_watcher = []
|
||||
missed_blocks_responder = [get_random_value_hex(32)]
|
||||
|
||||
# Any combination of empty list must raise a ValueError
|
||||
with pytest.raises(ValueError):
|
||||
Builder.update_states(w, missed_blocks_watcher, missed_blocks_responder)
|
||||
|
||||
with pytest.raises(ValueError):
|
||||
Builder.update_states(w, missed_blocks_responder, missed_blocks_watcher)
|
||||
|
||||
|
||||
def test_update_states_responder_misses_more(run_bitcoind, db_manager):
|
||||
w = Watcher(db_manager=db_manager, responder=Responder(db_manager), sk_der=None, config=get_config())
|
||||
|
||||
blocks = []
|
||||
for _ in range(5):
|
||||
generate_block()
|
||||
blocks.append(bitcoin_cli().getbestblockhash())
|
||||
|
||||
# Updating the states should bring both to the same last known block.
|
||||
w.awake()
|
||||
w.responder.awake()
|
||||
Builder.update_states(w, blocks, blocks[1:])
|
||||
|
||||
assert db_manager.load_last_block_hash_watcher() == blocks[-1]
|
||||
assert w.responder.last_known_block == blocks[-1]
|
||||
|
||||
|
||||
def test_update_states_watcher_misses_more(run_bitcoind, db_manager):
|
||||
# Same as before, but data is now in the Responder
|
||||
w = Watcher(db_manager=db_manager, responder=Responder(db_manager), sk_der=None, config=get_config())
|
||||
|
||||
blocks = []
|
||||
for _ in range(5):
|
||||
generate_block()
|
||||
blocks.append(bitcoin_cli().getbestblockhash())
|
||||
|
||||
w.awake()
|
||||
w.responder.awake()
|
||||
Builder.update_states(w, blocks[1:], blocks)
|
||||
|
||||
assert db_manager.load_last_block_hash_watcher() == blocks[-1]
|
||||
assert db_manager.load_last_block_hash_responder() == blocks[-1]
|
||||
70
test/teos/unit/test_carrier.py
Normal file
70
test/teos/unit/test_carrier.py
Normal file
@@ -0,0 +1,70 @@
|
||||
import pytest
|
||||
|
||||
from teos.carrier import Carrier
|
||||
from bitcoind_mock.transaction import create_dummy_transaction
|
||||
from test.teos.unit.conftest import generate_blocks, get_random_value_hex
|
||||
from teos.rpc_errors import RPC_VERIFY_ALREADY_IN_CHAIN, RPC_DESERIALIZATION_ERROR
|
||||
|
||||
|
||||
# FIXME: This test do not fully cover the carrier since the simulator does not support every single error bitcoind may
|
||||
# return for RPC_VERIFY_REJECTED and RPC_VERIFY_ERROR. Further development of the simulator / mocks or simulation
|
||||
# with bitcoind is required
|
||||
|
||||
|
||||
sent_txs = []
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def carrier():
|
||||
return Carrier()
|
||||
|
||||
|
||||
def test_send_transaction(run_bitcoind, carrier):
|
||||
tx = create_dummy_transaction()
|
||||
|
||||
receipt = carrier.send_transaction(tx.hex(), tx.tx_id.hex())
|
||||
|
||||
assert receipt.delivered is True
|
||||
|
||||
|
||||
def test_send_double_spending_transaction(carrier):
|
||||
# We can test what happens if the same transaction is sent twice
|
||||
tx = create_dummy_transaction()
|
||||
txid = tx.tx_id.hex()
|
||||
|
||||
receipt = carrier.send_transaction(tx.hex(), txid)
|
||||
sent_txs.append(txid)
|
||||
|
||||
# Wait for a block to be mined. Issued receipts is reset from the Responder every block, so we should do it too.
|
||||
generate_blocks(2)
|
||||
carrier.issued_receipts = {}
|
||||
|
||||
# Try to send it again
|
||||
receipt2 = carrier.send_transaction(tx.hex(), txid)
|
||||
|
||||
# The carrier should report delivered True for both, but in the second case the transaction was already delivered
|
||||
# (either by himself or someone else)
|
||||
assert receipt.delivered is True
|
||||
assert receipt2.delivered is True and receipt2.confirmations >= 1 and receipt2.reason == RPC_VERIFY_ALREADY_IN_CHAIN
|
||||
|
||||
|
||||
def test_send_transaction_invalid_format(carrier):
|
||||
# Test sending a transaction that does not fits the format
|
||||
txid = create_dummy_transaction().tx_id.hex()
|
||||
receipt = carrier.send_transaction(txid, txid)
|
||||
|
||||
assert receipt.delivered is False and receipt.reason == RPC_DESERIALIZATION_ERROR
|
||||
|
||||
|
||||
def test_get_transaction():
|
||||
# We should be able to get back every transaction we've sent
|
||||
for tx in sent_txs:
|
||||
tx_info = Carrier.get_transaction(tx)
|
||||
|
||||
assert tx_info is not None
|
||||
|
||||
|
||||
def test_get_non_existing_transaction():
|
||||
tx_info = Carrier.get_transaction(get_random_value_hex(32))
|
||||
|
||||
assert tx_info is None
|
||||
157
test/teos/unit/test_chain_monitor.py
Normal file
157
test/teos/unit/test_chain_monitor.py
Normal file
@@ -0,0 +1,157 @@
|
||||
import zmq
|
||||
import time
|
||||
from queue import Queue
|
||||
from threading import Thread, Event, Condition
|
||||
|
||||
from teos.block_processor import BlockProcessor
|
||||
from teos.chain_monitor import ChainMonitor
|
||||
|
||||
from test.teos.unit.conftest import get_random_value_hex, generate_block
|
||||
|
||||
|
||||
def test_init(run_bitcoind):
|
||||
# run_bitcoind is started here instead of later on to avoid race conditions while it initializes
|
||||
|
||||
# Not much to test here, just sanity checks to make sure nothing goes south in the future
|
||||
chain_monitor = ChainMonitor(Queue(), Queue())
|
||||
|
||||
assert chain_monitor.best_tip is None
|
||||
assert isinstance(chain_monitor.last_tips, list) and len(chain_monitor.last_tips) == 0
|
||||
assert chain_monitor.terminate is False
|
||||
assert isinstance(chain_monitor.check_tip, Event)
|
||||
assert isinstance(chain_monitor.lock, Condition)
|
||||
assert isinstance(chain_monitor.zmqSubSocket, zmq.Socket)
|
||||
|
||||
# The Queues and asleep flags are initialized when attaching the corresponding subscriber
|
||||
assert isinstance(chain_monitor.watcher_queue, Queue)
|
||||
assert isinstance(chain_monitor.responder_queue, Queue)
|
||||
|
||||
|
||||
def test_notify_subscribers():
|
||||
chain_monitor = ChainMonitor(Queue(), Queue())
|
||||
# Subscribers are only notified as long as they are awake
|
||||
new_block = get_random_value_hex(32)
|
||||
|
||||
# Queues should be empty to start with
|
||||
assert chain_monitor.watcher_queue.empty()
|
||||
assert chain_monitor.responder_queue.empty()
|
||||
|
||||
chain_monitor.notify_subscribers(new_block)
|
||||
|
||||
assert chain_monitor.watcher_queue.get() == new_block
|
||||
assert chain_monitor.responder_queue.get() == new_block
|
||||
|
||||
|
||||
def test_update_state():
|
||||
# The state is updated after receiving a new block (and only if the block is not already known).
|
||||
# Let's start by setting a best_tip and a couple of old tips
|
||||
new_block_hash = get_random_value_hex(32)
|
||||
chain_monitor = ChainMonitor(Queue(), Queue())
|
||||
chain_monitor.best_tip = new_block_hash
|
||||
chain_monitor.last_tips = [get_random_value_hex(32) for _ in range(5)]
|
||||
|
||||
# Now we can try to update the state with an old best_tip and see how it doesn't work
|
||||
assert chain_monitor.update_state(chain_monitor.last_tips[0]) is False
|
||||
|
||||
# Same should happen with the current tip
|
||||
assert chain_monitor.update_state(chain_monitor.best_tip) is False
|
||||
|
||||
# The state should be correctly updated with a new block hash, the chain tip should change and the old tip should
|
||||
# have been added to the last_tips
|
||||
another_block_hash = get_random_value_hex(32)
|
||||
assert chain_monitor.update_state(another_block_hash) is True
|
||||
assert chain_monitor.best_tip == another_block_hash and new_block_hash == chain_monitor.last_tips[-1]
|
||||
|
||||
|
||||
def test_monitor_chain_polling(db_manager):
|
||||
# Try polling with the Watcher
|
||||
wq = Queue()
|
||||
chain_monitor = ChainMonitor(wq, Queue())
|
||||
chain_monitor.best_tip = BlockProcessor.get_best_block_hash()
|
||||
|
||||
# monitor_chain_polling runs until terminate if set
|
||||
polling_thread = Thread(target=chain_monitor.monitor_chain_polling, kwargs={"polling_delta": 0.1}, daemon=True)
|
||||
polling_thread.start()
|
||||
|
||||
# Check that nothing changes as long as a block is not generated
|
||||
for _ in range(5):
|
||||
assert chain_monitor.watcher_queue.empty()
|
||||
time.sleep(0.1)
|
||||
|
||||
# And that it does if we generate a block
|
||||
generate_block()
|
||||
|
||||
chain_monitor.watcher_queue.get()
|
||||
assert chain_monitor.watcher_queue.empty()
|
||||
|
||||
chain_monitor.terminate = True
|
||||
polling_thread.join()
|
||||
|
||||
|
||||
def test_monitor_chain_zmq(db_manager):
|
||||
rq = Queue()
|
||||
chain_monitor = ChainMonitor(Queue(), rq)
|
||||
chain_monitor.best_tip = BlockProcessor.get_best_block_hash()
|
||||
|
||||
zmq_thread = Thread(target=chain_monitor.monitor_chain_zmq, daemon=True)
|
||||
zmq_thread.start()
|
||||
|
||||
# Queues should start empty
|
||||
assert chain_monitor.responder_queue.empty()
|
||||
|
||||
# And have a new block every time we generate one
|
||||
for _ in range(3):
|
||||
generate_block()
|
||||
chain_monitor.responder_queue.get()
|
||||
assert chain_monitor.responder_queue.empty()
|
||||
|
||||
|
||||
def test_monitor_chain(db_manager):
|
||||
# Not much to test here, this should launch two threads (one per monitor approach) and finish on terminate
|
||||
chain_monitor = ChainMonitor(Queue(), Queue())
|
||||
|
||||
chain_monitor.best_tip = None
|
||||
chain_monitor.monitor_chain()
|
||||
|
||||
# The tip is updated before starting the threads, so it should have changed.
|
||||
assert chain_monitor.best_tip is not None
|
||||
|
||||
# Blocks should be received
|
||||
for _ in range(5):
|
||||
generate_block()
|
||||
watcher_block = chain_monitor.watcher_queue.get()
|
||||
responder_block = chain_monitor.responder_queue.get()
|
||||
assert watcher_block == responder_block
|
||||
assert chain_monitor.watcher_queue.empty()
|
||||
assert chain_monitor.responder_queue.empty()
|
||||
|
||||
# And the thread be terminated on terminate
|
||||
chain_monitor.terminate = True
|
||||
# The zmq thread needs a block generation to release from the recv method.
|
||||
generate_block()
|
||||
|
||||
|
||||
def test_monitor_chain_single_update(db_manager):
|
||||
# This test tests that if both threads try to add the same block to the queue, only the first one will make it
|
||||
chain_monitor = ChainMonitor(Queue(), Queue())
|
||||
|
||||
chain_monitor.best_tip = None
|
||||
|
||||
# We will create a block and wait for the polling thread. Then check the queues to see that the block hash has only
|
||||
# been added once.
|
||||
chain_monitor.monitor_chain(polling_delta=2)
|
||||
generate_block()
|
||||
|
||||
watcher_block = chain_monitor.watcher_queue.get()
|
||||
responder_block = chain_monitor.responder_queue.get()
|
||||
assert watcher_block == responder_block
|
||||
assert chain_monitor.watcher_queue.empty()
|
||||
assert chain_monitor.responder_queue.empty()
|
||||
|
||||
# The delta for polling is 2 secs, so let's wait and see
|
||||
time.sleep(2)
|
||||
assert chain_monitor.watcher_queue.empty()
|
||||
assert chain_monitor.responder_queue.empty()
|
||||
|
||||
# We can also force an update and see that it won't go through
|
||||
assert chain_monitor.update_state(watcher_block) is False
|
||||
207
test/teos/unit/test_cleaner.py
Normal file
207
test/teos/unit/test_cleaner.py
Normal file
@@ -0,0 +1,207 @@
|
||||
import random
|
||||
from uuid import uuid4
|
||||
|
||||
from teos.responder import TransactionTracker
|
||||
from teos.cleaner import Cleaner
|
||||
from common.appointment import Appointment
|
||||
|
||||
from test.teos.unit.conftest import get_random_value_hex
|
||||
|
||||
from common.constants import LOCATOR_LEN_BYTES, LOCATOR_LEN_HEX
|
||||
|
||||
CONFIRMATIONS = 6
|
||||
ITEMS = 10
|
||||
MAX_ITEMS = 100
|
||||
ITERATIONS = 10
|
||||
|
||||
|
||||
def set_up_appointments(db_manager, total_appointments):
|
||||
appointments = dict()
|
||||
locator_uuid_map = dict()
|
||||
|
||||
for i in range(total_appointments):
|
||||
uuid = uuid4().hex
|
||||
locator = get_random_value_hex(LOCATOR_LEN_BYTES)
|
||||
|
||||
appointment = Appointment(locator, None, None, None, None)
|
||||
appointments[uuid] = {"locator": appointment.locator}
|
||||
locator_uuid_map[locator] = [uuid]
|
||||
|
||||
db_manager.store_watcher_appointment(uuid, appointment.to_json())
|
||||
db_manager.create_append_locator_map(locator, uuid)
|
||||
|
||||
# Each locator can have more than one uuid assigned to it.
|
||||
if i % 2:
|
||||
uuid = uuid4().hex
|
||||
|
||||
appointments[uuid] = {"locator": appointment.locator}
|
||||
locator_uuid_map[locator].append(uuid)
|
||||
|
||||
db_manager.store_watcher_appointment(uuid, appointment.to_json())
|
||||
db_manager.create_append_locator_map(locator, uuid)
|
||||
|
||||
return appointments, locator_uuid_map
|
||||
|
||||
|
||||
def set_up_trackers(db_manager, total_trackers):
|
||||
trackers = dict()
|
||||
tx_tracker_map = dict()
|
||||
|
||||
for i in range(total_trackers):
|
||||
uuid = uuid4().hex
|
||||
|
||||
# We use the same txid for penalty and dispute here, it shouldn't matter
|
||||
penalty_txid = get_random_value_hex(32)
|
||||
dispute_txid = get_random_value_hex(32)
|
||||
locator = dispute_txid[:LOCATOR_LEN_HEX]
|
||||
|
||||
# Assign both penalty_txid and dispute_txid the same id (it shouldn't matter)
|
||||
tracker = TransactionTracker(locator, dispute_txid, penalty_txid, None, None)
|
||||
trackers[uuid] = {"locator": tracker.locator, "penalty_txid": tracker.penalty_txid}
|
||||
tx_tracker_map[penalty_txid] = [uuid]
|
||||
|
||||
db_manager.store_responder_tracker(uuid, tracker.to_json())
|
||||
db_manager.create_append_locator_map(tracker.locator, uuid)
|
||||
|
||||
# Each penalty_txid can have more than one uuid assigned to it.
|
||||
if i % 2:
|
||||
uuid = uuid4().hex
|
||||
|
||||
trackers[uuid] = {"locator": tracker.locator, "penalty_txid": tracker.penalty_txid}
|
||||
tx_tracker_map[penalty_txid].append(uuid)
|
||||
|
||||
db_manager.store_responder_tracker(uuid, tracker.to_json())
|
||||
db_manager.create_append_locator_map(tracker.locator, uuid)
|
||||
|
||||
return trackers, tx_tracker_map
|
||||
|
||||
|
||||
def test_delete_appointment_from_memory(db_manager):
|
||||
appointments, locator_uuid_map = set_up_appointments(db_manager, MAX_ITEMS)
|
||||
|
||||
for uuid in list(appointments.keys()):
|
||||
Cleaner.delete_appointment_from_memory(uuid, appointments, locator_uuid_map)
|
||||
|
||||
# The appointment should have been deleted from memory, but not from the db
|
||||
assert uuid not in appointments
|
||||
assert db_manager.load_watcher_appointment(uuid) is not None
|
||||
|
||||
|
||||
def test_delete_appointment_from_db(db_manager):
|
||||
appointments, locator_uuid_map = set_up_appointments(db_manager, MAX_ITEMS)
|
||||
|
||||
for uuid in list(appointments.keys()):
|
||||
Cleaner.delete_appointment_from_db(uuid, db_manager)
|
||||
|
||||
# The appointment should have been deleted from memory, but not from the db
|
||||
assert uuid in appointments
|
||||
assert db_manager.load_watcher_appointment(uuid) is None
|
||||
|
||||
|
||||
def test_update_delete_db_locator_map(db_manager):
|
||||
appointments, locator_uuid_map = set_up_appointments(db_manager, MAX_ITEMS)
|
||||
|
||||
for uuid, appointment in appointments.items():
|
||||
locator = appointment.get("locator")
|
||||
locator_map_before = db_manager.load_locator_map(locator)
|
||||
Cleaner.update_delete_db_locator_map([uuid], locator, db_manager)
|
||||
locator_map_after = db_manager.load_locator_map(locator)
|
||||
|
||||
if locator_map_after is None:
|
||||
assert locator_map_before is not None
|
||||
|
||||
else:
|
||||
assert uuid in locator_map_before and uuid not in locator_map_after
|
||||
|
||||
|
||||
def test_delete_expired_appointment(db_manager):
|
||||
for _ in range(ITERATIONS):
|
||||
appointments, locator_uuid_map = set_up_appointments(db_manager, MAX_ITEMS)
|
||||
expired_appointments = random.sample(list(appointments.keys()), k=ITEMS)
|
||||
|
||||
Cleaner.delete_expired_appointments(expired_appointments, appointments, locator_uuid_map, db_manager)
|
||||
|
||||
assert not set(expired_appointments).issubset(appointments.keys())
|
||||
|
||||
|
||||
def test_delete_completed_appointments(db_manager):
|
||||
for _ in range(ITERATIONS):
|
||||
appointments, locator_uuid_map = set_up_appointments(db_manager, MAX_ITEMS)
|
||||
completed_appointments = random.sample(list(appointments.keys()), k=ITEMS)
|
||||
|
||||
len_before_clean = len(appointments)
|
||||
Cleaner.delete_completed_appointments(completed_appointments, appointments, locator_uuid_map, db_manager)
|
||||
|
||||
# ITEMS appointments should have been deleted from memory
|
||||
assert len(appointments) == len_before_clean - ITEMS
|
||||
|
||||
# Make sure they are not in the db either
|
||||
db_appointments = db_manager.load_watcher_appointments(include_triggered=True)
|
||||
assert not set(completed_appointments).issubset(db_appointments)
|
||||
|
||||
|
||||
def test_flag_triggered_appointments(db_manager):
|
||||
for _ in range(ITERATIONS):
|
||||
appointments, locator_uuid_map = set_up_appointments(db_manager, MAX_ITEMS)
|
||||
triggered_appointments = random.sample(list(appointments.keys()), k=ITEMS)
|
||||
|
||||
len_before_clean = len(appointments)
|
||||
Cleaner.flag_triggered_appointments(triggered_appointments, appointments, locator_uuid_map, db_manager)
|
||||
|
||||
# ITEMS appointments should have been deleted from memory
|
||||
assert len(appointments) == len_before_clean - ITEMS
|
||||
|
||||
# Make sure that all appointments are flagged as triggered in the db
|
||||
db_appointments = db_manager.load_all_triggered_flags()
|
||||
assert set(triggered_appointments).issubset(db_appointments)
|
||||
|
||||
|
||||
def test_delete_completed_trackers_db_match(db_manager):
|
||||
height = 0
|
||||
|
||||
for _ in range(ITERATIONS):
|
||||
trackers, tx_tracker_map = set_up_trackers(db_manager, MAX_ITEMS)
|
||||
selected_trackers = random.sample(list(trackers.keys()), k=ITEMS)
|
||||
|
||||
completed_trackers = {tracker: 6 for tracker in selected_trackers}
|
||||
|
||||
Cleaner.delete_completed_trackers(completed_trackers, height, trackers, tx_tracker_map, db_manager)
|
||||
|
||||
assert not set(completed_trackers).issubset(trackers.keys())
|
||||
|
||||
|
||||
def test_delete_completed_trackers_no_db_match(db_manager):
|
||||
height = 0
|
||||
|
||||
for _ in range(ITERATIONS):
|
||||
trackers, tx_tracker_map = set_up_trackers(db_manager, MAX_ITEMS)
|
||||
selected_trackers = random.sample(list(trackers.keys()), k=ITEMS)
|
||||
|
||||
# Let's change some uuid's by creating new trackers that are not included in the db and share a penalty_txid
|
||||
# with another tracker that is stored in the db.
|
||||
for uuid in selected_trackers[: ITEMS // 2]:
|
||||
penalty_txid = trackers[uuid].get("penalty_txid")
|
||||
dispute_txid = get_random_value_hex(32)
|
||||
locator = dispute_txid[:LOCATOR_LEN_HEX]
|
||||
new_uuid = uuid4().hex
|
||||
|
||||
trackers[new_uuid] = {"locator": locator, "penalty_txid": penalty_txid}
|
||||
tx_tracker_map[penalty_txid].append(new_uuid)
|
||||
selected_trackers.append(new_uuid)
|
||||
|
||||
# Let's add some random data
|
||||
for i in range(ITEMS // 2):
|
||||
uuid = uuid4().hex
|
||||
penalty_txid = get_random_value_hex(32)
|
||||
dispute_txid = get_random_value_hex(32)
|
||||
locator = dispute_txid[:LOCATOR_LEN_HEX]
|
||||
|
||||
trackers[uuid] = {"locator": locator, "penalty_txid": penalty_txid}
|
||||
tx_tracker_map[penalty_txid] = [uuid]
|
||||
selected_trackers.append(uuid)
|
||||
|
||||
completed_trackers = {tracker: 6 for tracker in selected_trackers}
|
||||
|
||||
# We should be able to delete the correct ones and not fail in the others
|
||||
Cleaner.delete_completed_trackers(completed_trackers, height, trackers, tx_tracker_map, db_manager)
|
||||
assert not set(completed_trackers).issubset(trackers.keys())
|
||||
451
test/teos/unit/test_db_manager.py
Normal file
451
test/teos/unit/test_db_manager.py
Normal file
@@ -0,0 +1,451 @@
|
||||
import os
|
||||
import json
|
||||
import pytest
|
||||
import shutil
|
||||
from uuid import uuid4
|
||||
|
||||
from teos.db_manager import DBManager
|
||||
from teos.db_manager import (
|
||||
WATCHER_LAST_BLOCK_KEY,
|
||||
RESPONDER_LAST_BLOCK_KEY,
|
||||
LOCATOR_MAP_PREFIX,
|
||||
TRIGGERED_APPOINTMENTS_PREFIX,
|
||||
)
|
||||
|
||||
from common.constants import LOCATOR_LEN_BYTES
|
||||
|
||||
from test.teos.unit.conftest import get_random_value_hex, generate_dummy_appointment
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def watcher_appointments():
|
||||
return {uuid4().hex: generate_dummy_appointment(real_height=False)[0] for _ in range(10)}
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def responder_trackers():
|
||||
return {get_random_value_hex(16): get_random_value_hex(32) for _ in range(10)}
|
||||
|
||||
|
||||
def open_create_db(db_path):
|
||||
|
||||
try:
|
||||
db_manager = DBManager(db_path)
|
||||
|
||||
return db_manager
|
||||
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
|
||||
def test_init():
|
||||
db_path = "init_test_db"
|
||||
|
||||
# First we check if the db exists, and if so we delete it
|
||||
if os.path.isdir(db_path):
|
||||
shutil.rmtree(db_path)
|
||||
|
||||
# Check that the db can be created if it does not exist
|
||||
db_manager = open_create_db(db_path)
|
||||
assert isinstance(db_manager, DBManager)
|
||||
db_manager.db.close()
|
||||
|
||||
# Check that we can open an already create db
|
||||
db_manager = open_create_db(db_path)
|
||||
assert isinstance(db_manager, DBManager)
|
||||
db_manager.db.close()
|
||||
|
||||
# Check we cannot create/open a db with an invalid parameter
|
||||
assert open_create_db(0) is False
|
||||
|
||||
# Removing test db
|
||||
shutil.rmtree(db_path)
|
||||
|
||||
|
||||
def test_load_appointments_db(db_manager):
|
||||
# Let's made up a prefix and try to load data from the database using it
|
||||
prefix = "XX"
|
||||
db_appointments = db_manager.load_appointments_db(prefix)
|
||||
|
||||
assert len(db_appointments) == 0
|
||||
|
||||
# We can add a bunch of data to the db and try again (data is stored in json by the manager)
|
||||
local_appointments = {}
|
||||
for _ in range(10):
|
||||
key = get_random_value_hex(16)
|
||||
value = get_random_value_hex(32)
|
||||
local_appointments[key] = value
|
||||
|
||||
db_manager.db.put((prefix + key).encode("utf-8"), json.dumps({"value": value}).encode("utf-8"))
|
||||
|
||||
db_appointments = db_manager.load_appointments_db(prefix)
|
||||
|
||||
# Check that both keys and values are the same
|
||||
assert db_appointments.keys() == local_appointments.keys()
|
||||
|
||||
values = [appointment["value"] for appointment in db_appointments.values()]
|
||||
assert set(values) == set(local_appointments.values()) and (len(values) == len(local_appointments))
|
||||
|
||||
|
||||
def test_get_last_known_block():
|
||||
db_path = "empty_db"
|
||||
|
||||
# First we check if the db exists, and if so we delete it
|
||||
if os.path.isdir(db_path):
|
||||
shutil.rmtree(db_path)
|
||||
|
||||
# Check that the db can be created if it does not exist
|
||||
db_manager = open_create_db(db_path)
|
||||
|
||||
# Trying to get any last block for either the watcher or the responder should return None for an empty db
|
||||
|
||||
for key in [WATCHER_LAST_BLOCK_KEY, RESPONDER_LAST_BLOCK_KEY]:
|
||||
assert db_manager.get_last_known_block(key) is None
|
||||
|
||||
# After saving some block in the db we should get that exact value
|
||||
for key in [WATCHER_LAST_BLOCK_KEY, RESPONDER_LAST_BLOCK_KEY]:
|
||||
block_hash = get_random_value_hex(32)
|
||||
db_manager.db.put(key.encode("utf-8"), block_hash.encode("utf-8"))
|
||||
assert db_manager.get_last_known_block(key) == block_hash
|
||||
|
||||
# Removing test db
|
||||
shutil.rmtree(db_path)
|
||||
|
||||
|
||||
def test_create_entry(db_manager):
|
||||
key = get_random_value_hex(16)
|
||||
value = get_random_value_hex(32)
|
||||
|
||||
# Adding a value with no prefix (create entry encodes values in utf-8 internally)
|
||||
db_manager.create_entry(key, value)
|
||||
|
||||
# We should be able to get it straightaway from the key
|
||||
assert db_manager.db.get(key.encode("utf-8")).decode("utf-8") == value
|
||||
|
||||
# If we prefix the key we should be able to get it if we add the prefix, but not otherwise
|
||||
key = get_random_value_hex(16)
|
||||
prefix = "w"
|
||||
db_manager.create_entry(key, value, prefix=prefix)
|
||||
|
||||
assert db_manager.db.get((prefix + key).encode("utf-8")).decode("utf-8") == value
|
||||
assert db_manager.db.get(key.encode("utf-8")) is None
|
||||
|
||||
# Same if we try to use any other prefix
|
||||
another_prefix = "r"
|
||||
assert db_manager.db.get((another_prefix + key).encode("utf-8")) is None
|
||||
|
||||
|
||||
def test_delete_entry(db_manager):
|
||||
# Let's first get the key all the things we've wrote so far in the db
|
||||
data = [k.decode("utf-8") for k, v in db_manager.db.iterator()]
|
||||
|
||||
# Let's empty the db now
|
||||
for key in data:
|
||||
db_manager.delete_entry(key)
|
||||
|
||||
assert len([k for k, v in db_manager.db.iterator()]) == 0
|
||||
|
||||
# Let's check that the same works if a prefix is provided.
|
||||
prefix = "r"
|
||||
key = get_random_value_hex(16)
|
||||
value = get_random_value_hex(32)
|
||||
db_manager.create_entry(key, value, prefix)
|
||||
|
||||
# Checks it's there
|
||||
assert db_manager.db.get((prefix + key).encode("utf-8")).decode("utf-8") == value
|
||||
|
||||
# And now it's gone
|
||||
db_manager.delete_entry(key, prefix)
|
||||
assert db_manager.db.get((prefix + key).encode("utf-8")) is None
|
||||
|
||||
|
||||
def test_load_watcher_appointments_empty(db_manager):
|
||||
assert len(db_manager.load_watcher_appointments()) == 0
|
||||
|
||||
|
||||
def test_load_responder_trackers_empty(db_manager):
|
||||
assert len(db_manager.load_responder_trackers()) == 0
|
||||
|
||||
|
||||
def test_load_locator_map_empty(db_manager):
|
||||
assert db_manager.load_locator_map(get_random_value_hex(LOCATOR_LEN_BYTES)) is None
|
||||
|
||||
|
||||
def test_create_append_locator_map(db_manager):
|
||||
uuid = uuid4().hex
|
||||
locator = get_random_value_hex(LOCATOR_LEN_BYTES)
|
||||
db_manager.create_append_locator_map(locator, uuid)
|
||||
|
||||
# Check that the locator map has been properly stored
|
||||
assert db_manager.load_locator_map(locator) == [uuid]
|
||||
|
||||
# If we try to add the same uuid again the list shouldn't change
|
||||
db_manager.create_append_locator_map(locator, uuid)
|
||||
assert db_manager.load_locator_map(locator) == [uuid]
|
||||
|
||||
# Add another uuid to the same locator and check that it also works
|
||||
uuid2 = uuid4().hex
|
||||
db_manager.create_append_locator_map(locator, uuid2)
|
||||
|
||||
assert set(db_manager.load_locator_map(locator)) == set([uuid, uuid2])
|
||||
|
||||
|
||||
def test_update_locator_map(db_manager):
|
||||
# Let's create a couple of appointments with the same locator
|
||||
locator = get_random_value_hex(32)
|
||||
uuid1 = uuid4().hex
|
||||
uuid2 = uuid4().hex
|
||||
db_manager.create_append_locator_map(locator, uuid1)
|
||||
db_manager.create_append_locator_map(locator, uuid2)
|
||||
|
||||
locator_map = db_manager.load_locator_map(locator)
|
||||
assert uuid1 in locator_map
|
||||
|
||||
locator_map.remove(uuid1)
|
||||
db_manager.update_locator_map(locator, locator_map)
|
||||
|
||||
locator_map_after = db_manager.load_locator_map(locator)
|
||||
assert uuid1 not in locator_map_after and uuid2 in locator_map_after and len(locator_map_after) == 1
|
||||
|
||||
|
||||
def test_update_locator_map_wong_data(db_manager):
|
||||
# Let's try to update the locator map with a different list of uuids
|
||||
locator = get_random_value_hex(32)
|
||||
db_manager.create_append_locator_map(locator, uuid4().hex)
|
||||
db_manager.create_append_locator_map(locator, uuid4().hex)
|
||||
|
||||
locator_map = db_manager.load_locator_map(locator)
|
||||
wrong_map_update = [uuid4().hex]
|
||||
db_manager.update_locator_map(locator, wrong_map_update)
|
||||
locator_map_after = db_manager.load_locator_map(locator)
|
||||
|
||||
assert locator_map_after == locator_map
|
||||
|
||||
|
||||
def test_update_locator_map_empty(db_manager):
|
||||
# We shouldn't be able to update a map with an empty list
|
||||
locator = get_random_value_hex(32)
|
||||
db_manager.create_append_locator_map(locator, uuid4().hex)
|
||||
db_manager.create_append_locator_map(locator, uuid4().hex)
|
||||
|
||||
locator_map = db_manager.load_locator_map(locator)
|
||||
db_manager.update_locator_map(locator, [])
|
||||
locator_map_after = db_manager.load_locator_map(locator)
|
||||
|
||||
assert locator_map_after == locator_map
|
||||
|
||||
|
||||
def test_delete_locator_map(db_manager):
|
||||
locator_maps = db_manager.load_appointments_db(prefix=LOCATOR_MAP_PREFIX)
|
||||
assert len(locator_maps) != 0
|
||||
|
||||
for locator, uuids in locator_maps.items():
|
||||
db_manager.delete_locator_map(locator)
|
||||
|
||||
locator_maps = db_manager.load_appointments_db(prefix=LOCATOR_MAP_PREFIX)
|
||||
assert len(locator_maps) == 0
|
||||
|
||||
|
||||
def test_store_load_watcher_appointment(db_manager, watcher_appointments):
|
||||
for uuid, appointment in watcher_appointments.items():
|
||||
db_manager.store_watcher_appointment(uuid, appointment.to_json())
|
||||
|
||||
db_watcher_appointments = db_manager.load_watcher_appointments()
|
||||
|
||||
# Check that the two appointment collections are equal by checking:
|
||||
# - Their size is equal
|
||||
# - Each element in one collection exists in the other
|
||||
|
||||
assert watcher_appointments.keys() == db_watcher_appointments.keys()
|
||||
|
||||
for uuid, appointment in watcher_appointments.items():
|
||||
assert json.dumps(db_watcher_appointments[uuid], sort_keys=True, separators=(",", ":")) == appointment.to_json()
|
||||
|
||||
|
||||
def test_store_load_triggered_appointment(db_manager):
|
||||
db_watcher_appointments = db_manager.load_watcher_appointments()
|
||||
db_watcher_appointments_with_triggered = db_manager.load_watcher_appointments(include_triggered=True)
|
||||
|
||||
assert db_watcher_appointments == db_watcher_appointments_with_triggered
|
||||
|
||||
# Create an appointment flagged as triggered
|
||||
triggered_appointment, _ = generate_dummy_appointment(real_height=False)
|
||||
uuid = uuid4().hex
|
||||
db_manager.store_watcher_appointment(uuid, triggered_appointment.to_json())
|
||||
db_manager.create_triggered_appointment_flag(uuid)
|
||||
|
||||
# The new appointment is grabbed only if we set include_triggered
|
||||
assert db_watcher_appointments == db_manager.load_watcher_appointments()
|
||||
assert uuid in db_manager.load_watcher_appointments(include_triggered=True)
|
||||
|
||||
|
||||
def test_store_load_responder_trackers(db_manager, responder_trackers):
|
||||
for key, value in responder_trackers.items():
|
||||
db_manager.store_responder_tracker(key, json.dumps({"value": value}))
|
||||
|
||||
db_responder_trackers = db_manager.load_responder_trackers()
|
||||
|
||||
values = [tracker["value"] for tracker in db_responder_trackers.values()]
|
||||
|
||||
assert responder_trackers.keys() == db_responder_trackers.keys()
|
||||
assert set(responder_trackers.values()) == set(values) and len(responder_trackers) == len(values)
|
||||
|
||||
|
||||
def test_delete_watcher_appointment(db_manager, watcher_appointments):
|
||||
# Let's delete all we added
|
||||
db_watcher_appointments = db_manager.load_watcher_appointments(include_triggered=True)
|
||||
assert len(db_watcher_appointments) != 0
|
||||
|
||||
for key in watcher_appointments.keys():
|
||||
db_manager.delete_watcher_appointment(key)
|
||||
|
||||
db_watcher_appointments = db_manager.load_watcher_appointments()
|
||||
assert len(db_watcher_appointments) == 0
|
||||
|
||||
|
||||
def test_batch_delete_watcher_appointments(db_manager, watcher_appointments):
|
||||
# Let's start by adding a bunch of appointments
|
||||
for uuid, appointment in watcher_appointments.items():
|
||||
db_manager.store_watcher_appointment(uuid, appointment.to_json())
|
||||
|
||||
first_half = list(watcher_appointments.keys())[: len(watcher_appointments) // 2]
|
||||
second_half = list(watcher_appointments.keys())[len(watcher_appointments) // 2 :]
|
||||
|
||||
# Let's now delete half of them in a batch update
|
||||
db_manager.batch_delete_watcher_appointments(first_half)
|
||||
|
||||
db_watcher_appointments = db_manager.load_watcher_appointments()
|
||||
assert not set(db_watcher_appointments.keys()).issuperset(first_half)
|
||||
assert set(db_watcher_appointments.keys()).issuperset(second_half)
|
||||
|
||||
# Let's delete the rest
|
||||
db_manager.batch_delete_watcher_appointments(second_half)
|
||||
|
||||
# Now there should be no appointments left
|
||||
db_watcher_appointments = db_manager.load_watcher_appointments()
|
||||
assert not db_watcher_appointments
|
||||
|
||||
|
||||
def test_delete_responder_tracker(db_manager, responder_trackers):
|
||||
# Same for the responder
|
||||
db_responder_trackers = db_manager.load_responder_trackers()
|
||||
assert len(db_responder_trackers) != 0
|
||||
|
||||
for key in responder_trackers.keys():
|
||||
db_manager.delete_responder_tracker(key)
|
||||
|
||||
db_responder_trackers = db_manager.load_responder_trackers()
|
||||
assert len(db_responder_trackers) == 0
|
||||
|
||||
|
||||
def test_batch_delete_responder_trackers(db_manager, responder_trackers):
|
||||
# Let's start by adding a bunch of appointments
|
||||
for uuid, value in responder_trackers.items():
|
||||
db_manager.store_responder_tracker(uuid, json.dumps({"value": value}))
|
||||
|
||||
first_half = list(responder_trackers.keys())[: len(responder_trackers) // 2]
|
||||
second_half = list(responder_trackers.keys())[len(responder_trackers) // 2 :]
|
||||
|
||||
# Let's now delete half of them in a batch update
|
||||
db_manager.batch_delete_responder_trackers(first_half)
|
||||
|
||||
db_responder_trackers = db_manager.load_responder_trackers()
|
||||
assert not set(db_responder_trackers.keys()).issuperset(first_half)
|
||||
assert set(db_responder_trackers.keys()).issuperset(second_half)
|
||||
|
||||
# Let's delete the rest
|
||||
db_manager.batch_delete_responder_trackers(second_half)
|
||||
|
||||
# Now there should be no trackers left
|
||||
db_responder_trackers = db_manager.load_responder_trackers()
|
||||
assert not db_responder_trackers
|
||||
|
||||
|
||||
def test_store_load_last_block_hash_watcher(db_manager):
|
||||
# Let's first create a made up block hash
|
||||
local_last_block_hash = get_random_value_hex(32)
|
||||
db_manager.store_last_block_hash_watcher(local_last_block_hash)
|
||||
|
||||
db_last_block_hash = db_manager.load_last_block_hash_watcher()
|
||||
|
||||
assert local_last_block_hash == db_last_block_hash
|
||||
|
||||
|
||||
def test_store_load_last_block_hash_responder(db_manager):
|
||||
# Same for the responder
|
||||
local_last_block_hash = get_random_value_hex(32)
|
||||
db_manager.store_last_block_hash_responder(local_last_block_hash)
|
||||
|
||||
db_last_block_hash = db_manager.load_last_block_hash_responder()
|
||||
|
||||
assert local_last_block_hash == db_last_block_hash
|
||||
|
||||
|
||||
def test_create_triggered_appointment_flag(db_manager):
|
||||
# Test that flags are added
|
||||
key = get_random_value_hex(16)
|
||||
db_manager.create_triggered_appointment_flag(key)
|
||||
|
||||
assert db_manager.db.get((TRIGGERED_APPOINTMENTS_PREFIX + key).encode("utf-8")) is not None
|
||||
|
||||
# Test to get a random one that we haven't added
|
||||
key = get_random_value_hex(16)
|
||||
assert db_manager.db.get((TRIGGERED_APPOINTMENTS_PREFIX + key).encode("utf-8")) is None
|
||||
|
||||
|
||||
def test_batch_create_triggered_appointment_flag(db_manager):
|
||||
# Test that flags are added in batch
|
||||
keys = [get_random_value_hex(16) for _ in range(10)]
|
||||
|
||||
# Checked that non of the flags is already in the db
|
||||
db_flags = db_manager.load_all_triggered_flags()
|
||||
assert not set(db_flags).issuperset(keys)
|
||||
|
||||
# Make sure that they are now
|
||||
db_manager.batch_create_triggered_appointment_flag(keys)
|
||||
db_flags = db_manager.load_all_triggered_flags()
|
||||
assert set(db_flags).issuperset(keys)
|
||||
|
||||
|
||||
def test_load_all_triggered_flags(db_manager):
|
||||
# There should be a some flags in the db from the previous tests. Let's load them
|
||||
flags = db_manager.load_all_triggered_flags()
|
||||
|
||||
# We can add another flag and see that there's two now
|
||||
new_uuid = uuid4().hex
|
||||
db_manager.create_triggered_appointment_flag(new_uuid)
|
||||
flags.append(new_uuid)
|
||||
|
||||
assert set(db_manager.load_all_triggered_flags()) == set(flags)
|
||||
|
||||
|
||||
def test_delete_triggered_appointment_flag(db_manager):
|
||||
# Test data is properly deleted.
|
||||
keys = db_manager.load_all_triggered_flags()
|
||||
|
||||
# Delete all entries
|
||||
for k in keys:
|
||||
db_manager.delete_triggered_appointment_flag(k)
|
||||
|
||||
# Try to load them back
|
||||
for k in keys:
|
||||
assert db_manager.db.get((TRIGGERED_APPOINTMENTS_PREFIX + k).encode("utf-8")) is None
|
||||
|
||||
|
||||
def test_batch_delete_triggered_appointment_flag(db_manager):
|
||||
# Let's add some flags first
|
||||
keys = [get_random_value_hex(16) for _ in range(10)]
|
||||
db_manager.batch_create_triggered_appointment_flag(keys)
|
||||
|
||||
# And now let's delete in batch
|
||||
first_half = keys[: len(keys) // 2]
|
||||
second_half = keys[len(keys) // 2 :]
|
||||
|
||||
db_manager.batch_delete_triggered_appointment_flag(first_half)
|
||||
db_falgs = db_manager.load_all_triggered_flags()
|
||||
assert not set(db_falgs).issuperset(first_half)
|
||||
assert set(db_falgs).issuperset(second_half)
|
||||
|
||||
# Delete the rest
|
||||
db_manager.batch_delete_triggered_appointment_flag(second_half)
|
||||
assert not db_manager.load_all_triggered_flags()
|
||||
227
test/teos/unit/test_inspector.py
Normal file
227
test/teos/unit/test_inspector.py
Normal file
@@ -0,0 +1,227 @@
|
||||
from binascii import unhexlify
|
||||
|
||||
from teos.errors import *
|
||||
from teos.inspector import Inspector
|
||||
from common.appointment import Appointment
|
||||
from teos.block_processor import BlockProcessor
|
||||
from teos.conf import MIN_TO_SELF_DELAY
|
||||
|
||||
from test.teos.unit.conftest import get_random_value_hex, generate_dummy_appointment_data, generate_keypair, get_config
|
||||
|
||||
from common.constants import LOCATOR_LEN_BYTES, LOCATOR_LEN_HEX
|
||||
from common.cryptographer import Cryptographer
|
||||
from common.logger import Logger
|
||||
|
||||
from teos import LOG_PREFIX
|
||||
import common.cryptographer
|
||||
|
||||
common.cryptographer.logger = Logger(actor="Cryptographer", log_name_prefix=LOG_PREFIX)
|
||||
|
||||
|
||||
inspector = Inspector(get_config())
|
||||
APPOINTMENT_OK = (0, None)
|
||||
|
||||
NO_HEX_STRINGS = [
|
||||
"R" * LOCATOR_LEN_HEX,
|
||||
get_random_value_hex(LOCATOR_LEN_BYTES - 1) + "PP",
|
||||
"$" * LOCATOR_LEN_HEX,
|
||||
" " * LOCATOR_LEN_HEX,
|
||||
]
|
||||
WRONG_TYPES = [
|
||||
[],
|
||||
"",
|
||||
get_random_value_hex(LOCATOR_LEN_BYTES),
|
||||
3.2,
|
||||
2.0,
|
||||
(),
|
||||
object,
|
||||
{},
|
||||
" " * LOCATOR_LEN_HEX,
|
||||
object(),
|
||||
]
|
||||
WRONG_TYPES_NO_STR = [[], unhexlify(get_random_value_hex(LOCATOR_LEN_BYTES)), 3.2, 2.0, (), object, {}, object()]
|
||||
|
||||
|
||||
def test_check_locator():
|
||||
# Right appointment type, size and format
|
||||
locator = get_random_value_hex(LOCATOR_LEN_BYTES)
|
||||
assert Inspector.check_locator(locator) == APPOINTMENT_OK
|
||||
|
||||
# Wrong size (too big)
|
||||
locator = get_random_value_hex(LOCATOR_LEN_BYTES + 1)
|
||||
assert Inspector.check_locator(locator)[0] == APPOINTMENT_WRONG_FIELD_SIZE
|
||||
|
||||
# Wrong size (too small)
|
||||
locator = get_random_value_hex(LOCATOR_LEN_BYTES - 1)
|
||||
assert Inspector.check_locator(locator)[0] == APPOINTMENT_WRONG_FIELD_SIZE
|
||||
|
||||
# Empty
|
||||
locator = None
|
||||
assert Inspector.check_locator(locator)[0] == APPOINTMENT_EMPTY_FIELD
|
||||
|
||||
# Wrong type (several types tested, it should do for anything that is not a string)
|
||||
locators = [[], -1, 3.2, 0, 4, (), object, {}, object()]
|
||||
|
||||
for locator in locators:
|
||||
assert Inspector.check_locator(locator)[0] == APPOINTMENT_WRONG_FIELD_TYPE
|
||||
|
||||
# Wrong format (no hex)
|
||||
locators = NO_HEX_STRINGS
|
||||
for locator in locators:
|
||||
assert Inspector.check_locator(locator)[0] == APPOINTMENT_WRONG_FIELD_FORMAT
|
||||
|
||||
|
||||
def test_check_start_time():
|
||||
# Time is defined in block height
|
||||
current_time = 100
|
||||
|
||||
# Right format and right value (start time in the future)
|
||||
start_time = 101
|
||||
assert Inspector.check_start_time(start_time, current_time) == APPOINTMENT_OK
|
||||
|
||||
# Start time too small (either same block or block in the past)
|
||||
start_times = [100, 99, 98, -1]
|
||||
for start_time in start_times:
|
||||
assert Inspector.check_start_time(start_time, current_time)[0] == APPOINTMENT_FIELD_TOO_SMALL
|
||||
|
||||
# Empty field
|
||||
start_time = None
|
||||
assert Inspector.check_start_time(start_time, current_time)[0] == APPOINTMENT_EMPTY_FIELD
|
||||
|
||||
# Wrong data type
|
||||
start_times = WRONG_TYPES
|
||||
for start_time in start_times:
|
||||
assert Inspector.check_start_time(start_time, current_time)[0] == APPOINTMENT_WRONG_FIELD_TYPE
|
||||
|
||||
|
||||
def test_check_end_time():
|
||||
# Time is defined in block height
|
||||
current_time = 100
|
||||
start_time = 120
|
||||
|
||||
# Right format and right value (start time before end and end in the future)
|
||||
end_time = 121
|
||||
assert Inspector.check_end_time(end_time, start_time, current_time) == APPOINTMENT_OK
|
||||
|
||||
# End time too small (start time after end time)
|
||||
end_times = [120, 119, 118, -1]
|
||||
for end_time in end_times:
|
||||
assert Inspector.check_end_time(end_time, start_time, current_time)[0] == APPOINTMENT_FIELD_TOO_SMALL
|
||||
|
||||
# End time too small (either same height as current block or in the past)
|
||||
current_time = 130
|
||||
end_times = [130, 129, 128, -1]
|
||||
for end_time in end_times:
|
||||
assert Inspector.check_end_time(end_time, start_time, current_time)[0] == APPOINTMENT_FIELD_TOO_SMALL
|
||||
|
||||
# Empty field
|
||||
end_time = None
|
||||
assert Inspector.check_end_time(end_time, start_time, current_time)[0] == APPOINTMENT_EMPTY_FIELD
|
||||
|
||||
# Wrong data type
|
||||
end_times = WRONG_TYPES
|
||||
for end_time in end_times:
|
||||
assert Inspector.check_end_time(end_time, start_time, current_time)[0] == APPOINTMENT_WRONG_FIELD_TYPE
|
||||
|
||||
|
||||
def test_check_to_self_delay():
|
||||
# Right value, right format
|
||||
to_self_delays = [MIN_TO_SELF_DELAY, MIN_TO_SELF_DELAY + 1, MIN_TO_SELF_DELAY + 1000]
|
||||
for to_self_delay in to_self_delays:
|
||||
assert inspector.check_to_self_delay(to_self_delay) == APPOINTMENT_OK
|
||||
|
||||
# to_self_delay too small
|
||||
to_self_delays = [MIN_TO_SELF_DELAY - 1, MIN_TO_SELF_DELAY - 2, 0, -1, -1000]
|
||||
for to_self_delay in to_self_delays:
|
||||
assert inspector.check_to_self_delay(to_self_delay)[0] == APPOINTMENT_FIELD_TOO_SMALL
|
||||
|
||||
# Empty field
|
||||
to_self_delay = None
|
||||
assert inspector.check_to_self_delay(to_self_delay)[0] == APPOINTMENT_EMPTY_FIELD
|
||||
|
||||
# Wrong data type
|
||||
to_self_delays = WRONG_TYPES
|
||||
for to_self_delay in to_self_delays:
|
||||
assert inspector.check_to_self_delay(to_self_delay)[0] == APPOINTMENT_WRONG_FIELD_TYPE
|
||||
|
||||
|
||||
def test_check_blob():
|
||||
# Right format and length
|
||||
encrypted_blob = get_random_value_hex(120)
|
||||
assert Inspector.check_blob(encrypted_blob) == APPOINTMENT_OK
|
||||
|
||||
# # Wrong content
|
||||
# # FIXME: There is not proper defined format for this yet. It should be restricted by size at least, and check it
|
||||
# # is multiple of the block size defined by the encryption function.
|
||||
|
||||
# Wrong type
|
||||
encrypted_blobs = WRONG_TYPES_NO_STR
|
||||
for encrypted_blob in encrypted_blobs:
|
||||
assert Inspector.check_blob(encrypted_blob)[0] == APPOINTMENT_WRONG_FIELD_TYPE
|
||||
|
||||
# Empty field
|
||||
encrypted_blob = None
|
||||
assert Inspector.check_blob(encrypted_blob)[0] == APPOINTMENT_EMPTY_FIELD
|
||||
|
||||
# Wrong format (no hex)
|
||||
encrypted_blobs = NO_HEX_STRINGS
|
||||
for encrypted_blob in encrypted_blobs:
|
||||
assert Inspector.check_blob(encrypted_blob)[0] == APPOINTMENT_WRONG_FIELD_FORMAT
|
||||
|
||||
|
||||
def test_check_appointment_signature():
|
||||
# The inspector receives the public key as hex
|
||||
client_sk, client_pk = generate_keypair()
|
||||
client_pk_hex = client_pk.format().hex()
|
||||
|
||||
dummy_appointment_data, _ = generate_dummy_appointment_data(real_height=False)
|
||||
assert Inspector.check_appointment_signature(
|
||||
dummy_appointment_data["appointment"], dummy_appointment_data["signature"], dummy_appointment_data["public_key"]
|
||||
)
|
||||
|
||||
fake_sk, _ = generate_keypair()
|
||||
|
||||
# Create a bad signature to make sure inspector rejects it
|
||||
bad_signature = Cryptographer.sign(
|
||||
Appointment.from_dict(dummy_appointment_data["appointment"]).serialize(), fake_sk
|
||||
)
|
||||
assert (
|
||||
Inspector.check_appointment_signature(dummy_appointment_data["appointment"], bad_signature, client_pk_hex)[0]
|
||||
== APPOINTMENT_INVALID_SIGNATURE
|
||||
)
|
||||
|
||||
|
||||
def test_inspect(run_bitcoind):
|
||||
# At this point every single check function has been already tested, let's test inspect with an invalid and a valid
|
||||
# appointments.
|
||||
|
||||
client_sk, client_pk = generate_keypair()
|
||||
client_pk_hex = client_pk.format().hex()
|
||||
|
||||
# Valid appointment
|
||||
locator = get_random_value_hex(LOCATOR_LEN_BYTES)
|
||||
start_time = BlockProcessor.get_block_count() + 5
|
||||
end_time = start_time + 20
|
||||
to_self_delay = MIN_TO_SELF_DELAY
|
||||
encrypted_blob = get_random_value_hex(64)
|
||||
|
||||
appointment_data = {
|
||||
"locator": locator,
|
||||
"start_time": start_time,
|
||||
"end_time": end_time,
|
||||
"to_self_delay": to_self_delay,
|
||||
"encrypted_blob": encrypted_blob,
|
||||
}
|
||||
|
||||
signature = Cryptographer.sign(Appointment.from_dict(appointment_data).serialize(), client_sk)
|
||||
|
||||
appointment = inspector.inspect(appointment_data, signature, client_pk_hex)
|
||||
|
||||
assert (
|
||||
type(appointment) == Appointment
|
||||
and appointment.locator == locator
|
||||
and appointment.start_time == start_time
|
||||
and appointment.end_time == end_time
|
||||
and appointment.to_self_delay == to_self_delay
|
||||
and appointment.encrypted_blob.data == encrypted_blob
|
||||
)
|
||||
483
test/teos/unit/test_responder.py
Normal file
483
test/teos/unit/test_responder.py
Normal file
@@ -0,0 +1,483 @@
|
||||
import json
|
||||
import pytest
|
||||
import random
|
||||
from queue import Queue
|
||||
from uuid import uuid4
|
||||
from shutil import rmtree
|
||||
from copy import deepcopy
|
||||
from threading import Thread
|
||||
|
||||
from teos.db_manager import DBManager
|
||||
from teos.responder import Responder, TransactionTracker
|
||||
from teos.block_processor import BlockProcessor
|
||||
from teos.chain_monitor import ChainMonitor
|
||||
from teos.tools import bitcoin_cli
|
||||
|
||||
from common.constants import LOCATOR_LEN_HEX
|
||||
from bitcoind_mock.transaction import create_dummy_transaction, create_tx_from_hex
|
||||
from test.teos.unit.conftest import generate_block, generate_blocks, get_random_value_hex
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def responder(db_manager):
|
||||
responder = Responder(db_manager)
|
||||
chain_monitor = ChainMonitor(Queue(), responder.block_queue)
|
||||
chain_monitor.monitor_chain()
|
||||
|
||||
return responder
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def temp_db_manager():
|
||||
db_name = get_random_value_hex(8)
|
||||
db_manager = DBManager(db_name)
|
||||
|
||||
yield db_manager
|
||||
|
||||
db_manager.db.close()
|
||||
rmtree(db_name)
|
||||
|
||||
|
||||
def create_dummy_tracker_data(random_txid=False, penalty_rawtx=None):
|
||||
# The following transaction data corresponds to a valid transaction. For some test it may be interesting to have
|
||||
# some valid data, but for others we may need multiple different penalty_txids.
|
||||
|
||||
dispute_txid = "0437cd7f8525ceed2324359c2d0ba26006d92d856a9c20fa0241106ee5a597c9"
|
||||
penalty_txid = "f4184fc596403b9d638783cf57adfe4c75c605f6356fbc91338530e9831e9e16"
|
||||
|
||||
if penalty_rawtx is None:
|
||||
penalty_rawtx = (
|
||||
"0100000001c997a5e56e104102fa209c6a852dd90660a20b2d9c352423edce25857fcd3704000000004847304402"
|
||||
"204e45e16932b8af514961a1d3a1a25fdf3f4f7732e9d624c6c61548ab5fb8cd410220181522ec8eca07de4860a4"
|
||||
"acdd12909d831cc56cbbac4622082221a8768d1d0901ffffffff0200ca9a3b00000000434104ae1a62fe09c5f51b"
|
||||
"13905f07f06b99a2f7159b2225f374cd378d71302fa28414e7aab37397f554a7df5f142c21c1b7303b8a0626f1ba"
|
||||
"ded5c72a704f7e6cd84cac00286bee0000000043410411db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482e"
|
||||
"cad7b148a6909a5cb2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3ac00000000"
|
||||
)
|
||||
|
||||
else:
|
||||
penalty_txid = create_tx_from_hex(penalty_rawtx).tx_id.hex()
|
||||
|
||||
if random_txid is True:
|
||||
penalty_txid = get_random_value_hex(32)
|
||||
|
||||
appointment_end = bitcoin_cli().getblockcount() + 2
|
||||
locator = dispute_txid[:LOCATOR_LEN_HEX]
|
||||
|
||||
return locator, dispute_txid, penalty_txid, penalty_rawtx, appointment_end
|
||||
|
||||
|
||||
def create_dummy_tracker(random_txid=False, penalty_rawtx=None):
|
||||
locator, dispute_txid, penalty_txid, penalty_rawtx, appointment_end = create_dummy_tracker_data(
|
||||
random_txid, penalty_rawtx
|
||||
)
|
||||
return TransactionTracker(locator, dispute_txid, penalty_txid, penalty_rawtx, appointment_end)
|
||||
|
||||
|
||||
def test_tracker_init(run_bitcoind):
|
||||
locator, dispute_txid, penalty_txid, penalty_rawtx, appointment_end = create_dummy_tracker_data()
|
||||
tracker = TransactionTracker(locator, dispute_txid, penalty_txid, penalty_rawtx, appointment_end)
|
||||
|
||||
assert (
|
||||
tracker.dispute_txid == dispute_txid
|
||||
and tracker.penalty_txid == penalty_txid
|
||||
and tracker.penalty_rawtx == penalty_rawtx
|
||||
and tracker.appointment_end == appointment_end
|
||||
)
|
||||
|
||||
|
||||
def test_on_sync(run_bitcoind, responder):
|
||||
# We're on sync if we're 1 or less blocks behind the tip
|
||||
chain_tip = BlockProcessor.get_best_block_hash()
|
||||
assert Responder.on_sync(chain_tip) is True
|
||||
|
||||
generate_block()
|
||||
assert Responder.on_sync(chain_tip) is True
|
||||
|
||||
|
||||
def test_on_sync_fail(responder):
|
||||
# This should fail if we're more than 1 block behind the tip
|
||||
chain_tip = BlockProcessor.get_best_block_hash()
|
||||
generate_blocks(2)
|
||||
|
||||
assert Responder.on_sync(chain_tip) is False
|
||||
|
||||
|
||||
def test_tracker_to_dict():
|
||||
tracker = create_dummy_tracker()
|
||||
tracker_dict = tracker.to_dict()
|
||||
|
||||
assert (
|
||||
tracker.locator == tracker_dict["locator"]
|
||||
and tracker.penalty_rawtx == tracker_dict["penalty_rawtx"]
|
||||
and tracker.appointment_end == tracker_dict["appointment_end"]
|
||||
)
|
||||
|
||||
|
||||
def test_tracker_to_json():
|
||||
tracker = create_dummy_tracker()
|
||||
tracker_dict = json.loads(tracker.to_json())
|
||||
|
||||
assert (
|
||||
tracker.locator == tracker_dict["locator"]
|
||||
and tracker.penalty_rawtx == tracker_dict["penalty_rawtx"]
|
||||
and tracker.appointment_end == tracker_dict["appointment_end"]
|
||||
)
|
||||
|
||||
|
||||
def test_tracker_from_dict():
|
||||
tracker_dict = create_dummy_tracker().to_dict()
|
||||
new_tracker = TransactionTracker.from_dict(tracker_dict)
|
||||
|
||||
assert tracker_dict == new_tracker.to_dict()
|
||||
|
||||
|
||||
def test_tracker_from_dict_invalid_data():
|
||||
tracker_dict = create_dummy_tracker().to_dict()
|
||||
|
||||
for value in ["dispute_txid", "penalty_txid", "penalty_rawtx", "appointment_end"]:
|
||||
tracker_dict_copy = deepcopy(tracker_dict)
|
||||
tracker_dict_copy[value] = None
|
||||
|
||||
try:
|
||||
TransactionTracker.from_dict(tracker_dict_copy)
|
||||
assert False
|
||||
|
||||
except ValueError:
|
||||
assert True
|
||||
|
||||
|
||||
def test_init_responder(temp_db_manager):
|
||||
responder = Responder(temp_db_manager)
|
||||
assert isinstance(responder.trackers, dict) and len(responder.trackers) == 0
|
||||
assert isinstance(responder.tx_tracker_map, dict) and len(responder.tx_tracker_map) == 0
|
||||
assert isinstance(responder.unconfirmed_txs, list) and len(responder.unconfirmed_txs) == 0
|
||||
assert isinstance(responder.missed_confirmations, dict) and len(responder.missed_confirmations) == 0
|
||||
assert responder.block_queue.empty()
|
||||
|
||||
|
||||
def test_handle_breach(db_manager):
|
||||
responder = Responder(db_manager)
|
||||
|
||||
uuid = uuid4().hex
|
||||
tracker = create_dummy_tracker()
|
||||
|
||||
# The block_hash passed to add_response does not matter much now. It will in the future to deal with errors
|
||||
receipt = responder.handle_breach(
|
||||
tracker.locator,
|
||||
uuid,
|
||||
tracker.dispute_txid,
|
||||
tracker.penalty_txid,
|
||||
tracker.penalty_rawtx,
|
||||
tracker.appointment_end,
|
||||
block_hash=get_random_value_hex(32),
|
||||
)
|
||||
|
||||
assert receipt.delivered is True
|
||||
|
||||
|
||||
def test_handle_breach_bad_response(responder):
|
||||
uuid = uuid4().hex
|
||||
tracker = create_dummy_tracker()
|
||||
|
||||
# A txid instead of a rawtx should be enough for unit tests using the bitcoind mock, better tests are needed though.
|
||||
tracker.penalty_rawtx = tracker.penalty_txid
|
||||
|
||||
# The block_hash passed to add_response does not matter much now. It will in the future to deal with errors
|
||||
receipt = responder.handle_breach(
|
||||
tracker.locator,
|
||||
uuid,
|
||||
tracker.dispute_txid,
|
||||
tracker.penalty_txid,
|
||||
tracker.penalty_rawtx,
|
||||
tracker.appointment_end,
|
||||
block_hash=get_random_value_hex(32),
|
||||
)
|
||||
|
||||
assert receipt.delivered is False
|
||||
|
||||
|
||||
def test_add_tracker(responder):
|
||||
for _ in range(20):
|
||||
uuid = uuid4().hex
|
||||
confirmations = 0
|
||||
locator, dispute_txid, penalty_txid, penalty_rawtx, appointment_end = create_dummy_tracker_data(
|
||||
random_txid=True
|
||||
)
|
||||
|
||||
# Check the tracker is not within the responder trackers before adding it
|
||||
assert uuid not in responder.trackers
|
||||
assert penalty_txid not in responder.tx_tracker_map
|
||||
assert penalty_txid not in responder.unconfirmed_txs
|
||||
|
||||
# And that it is afterwards
|
||||
responder.add_tracker(uuid, locator, dispute_txid, penalty_txid, penalty_rawtx, appointment_end, confirmations)
|
||||
assert uuid in responder.trackers
|
||||
assert penalty_txid in responder.tx_tracker_map
|
||||
assert penalty_txid in responder.unconfirmed_txs
|
||||
|
||||
# Check that the rest of tracker data also matches
|
||||
tracker = responder.trackers[uuid]
|
||||
assert (
|
||||
tracker.get("penalty_txid") == penalty_txid
|
||||
and tracker.get("locator") == locator
|
||||
and tracker.get("appointment_end") == appointment_end
|
||||
)
|
||||
|
||||
|
||||
def test_add_tracker_same_penalty_txid(responder):
|
||||
confirmations = 0
|
||||
locator, dispute_txid, penalty_txid, penalty_rawtx, appointment_end = create_dummy_tracker_data(random_txid=True)
|
||||
uuid_1 = uuid4().hex
|
||||
uuid_2 = uuid4().hex
|
||||
|
||||
responder.add_tracker(uuid_1, locator, dispute_txid, penalty_txid, penalty_rawtx, appointment_end, confirmations)
|
||||
responder.add_tracker(uuid_2, locator, dispute_txid, penalty_txid, penalty_rawtx, appointment_end, confirmations)
|
||||
|
||||
# Check that both trackers have been added
|
||||
assert uuid_1 in responder.trackers and uuid_2 in responder.trackers
|
||||
assert penalty_txid in responder.tx_tracker_map
|
||||
assert penalty_txid in responder.unconfirmed_txs
|
||||
|
||||
# Check that the rest of tracker data also matches
|
||||
for uuid in [uuid_1, uuid_2]:
|
||||
tracker = responder.trackers[uuid]
|
||||
assert (
|
||||
tracker.get("penalty_txid") == penalty_txid
|
||||
and tracker.get("locator") == locator
|
||||
and tracker.get("appointment_end") == appointment_end
|
||||
)
|
||||
|
||||
|
||||
def test_add_tracker_already_confirmed(responder):
|
||||
for i in range(20):
|
||||
uuid = uuid4().hex
|
||||
confirmations = i + 1
|
||||
locator, dispute_txid, penalty_txid, penalty_rawtx, appointment_end = create_dummy_tracker_data(
|
||||
penalty_rawtx=create_dummy_transaction().hex()
|
||||
)
|
||||
|
||||
responder.add_tracker(uuid, locator, dispute_txid, penalty_txid, penalty_rawtx, appointment_end, confirmations)
|
||||
|
||||
assert penalty_txid not in responder.unconfirmed_txs
|
||||
|
||||
|
||||
def test_do_watch(temp_db_manager):
|
||||
# Create a fresh responder to simplify the test
|
||||
responder = Responder(temp_db_manager)
|
||||
chain_monitor = ChainMonitor(Queue(), responder.block_queue)
|
||||
chain_monitor.monitor_chain()
|
||||
|
||||
trackers = [create_dummy_tracker(penalty_rawtx=create_dummy_transaction().hex()) for _ in range(20)]
|
||||
|
||||
# Let's set up the trackers first
|
||||
for tracker in trackers:
|
||||
uuid = uuid4().hex
|
||||
|
||||
responder.trackers[uuid] = {
|
||||
"locator": tracker.locator,
|
||||
"penalty_txid": tracker.penalty_txid,
|
||||
"appointment_end": tracker.appointment_end,
|
||||
}
|
||||
responder.tx_tracker_map[tracker.penalty_txid] = [uuid]
|
||||
responder.missed_confirmations[tracker.penalty_txid] = 0
|
||||
responder.unconfirmed_txs.append(tracker.penalty_txid)
|
||||
|
||||
# We also need to store the info in the db
|
||||
responder.db_manager.create_triggered_appointment_flag(uuid)
|
||||
responder.db_manager.store_responder_tracker(uuid, tracker.to_json())
|
||||
|
||||
# Let's start to watch
|
||||
Thread(target=responder.do_watch, daemon=True).start()
|
||||
|
||||
# And broadcast some of the transactions
|
||||
broadcast_txs = []
|
||||
for tracker in trackers[:5]:
|
||||
bitcoin_cli().sendrawtransaction(tracker.penalty_rawtx)
|
||||
broadcast_txs.append(tracker.penalty_txid)
|
||||
|
||||
# Mine a block
|
||||
generate_block()
|
||||
|
||||
# The transactions we sent shouldn't be in the unconfirmed transaction list anymore
|
||||
assert not set(broadcast_txs).issubset(responder.unconfirmed_txs)
|
||||
|
||||
# TODO: test that reorgs can be detected once data persistence is merged (new version of the simulator)
|
||||
|
||||
# Generating 5 additional blocks should complete the 5 trackers
|
||||
generate_blocks(5)
|
||||
|
||||
assert not set(broadcast_txs).issubset(responder.tx_tracker_map)
|
||||
|
||||
# Do the rest
|
||||
broadcast_txs = []
|
||||
for tracker in trackers[5:]:
|
||||
bitcoin_cli().sendrawtransaction(tracker.penalty_rawtx)
|
||||
broadcast_txs.append(tracker.penalty_txid)
|
||||
|
||||
# Mine a block
|
||||
generate_blocks(6)
|
||||
|
||||
assert len(responder.tx_tracker_map) == 0
|
||||
|
||||
|
||||
def test_check_confirmations(db_manager):
|
||||
responder = Responder(db_manager)
|
||||
chain_monitor = ChainMonitor(Queue(), responder.block_queue)
|
||||
chain_monitor.monitor_chain()
|
||||
|
||||
# check_confirmations checks, given a list of transaction for a block, what of the known penalty transaction have
|
||||
# been confirmed. To test this we need to create a list of transactions and the state of the responder
|
||||
txs = [get_random_value_hex(32) for _ in range(20)]
|
||||
|
||||
# The responder has a list of unconfirmed transaction, let make that some of them are the ones we've received
|
||||
responder.unconfirmed_txs = [get_random_value_hex(32) for _ in range(10)]
|
||||
txs_subset = random.sample(txs, k=10)
|
||||
responder.unconfirmed_txs.extend(txs_subset)
|
||||
|
||||
# We also need to add them to the tx_tracker_map since they would be there in normal conditions
|
||||
responder.tx_tracker_map = {
|
||||
txid: TransactionTracker(txid[:LOCATOR_LEN_HEX], txid, None, None, None) for txid in responder.unconfirmed_txs
|
||||
}
|
||||
|
||||
# Let's make sure that there are no txs with missed confirmations yet
|
||||
assert len(responder.missed_confirmations) == 0
|
||||
|
||||
responder.check_confirmations(txs)
|
||||
|
||||
# After checking confirmations the txs in txs_subset should be confirmed (not part of unconfirmed_txs anymore)
|
||||
# and the rest should have a missing confirmation
|
||||
for tx in txs_subset:
|
||||
assert tx not in responder.unconfirmed_txs
|
||||
|
||||
for tx in responder.unconfirmed_txs:
|
||||
assert responder.missed_confirmations[tx] == 1
|
||||
|
||||
|
||||
# TODO: Check this properly, a bug pass unnoticed!
|
||||
def test_get_txs_to_rebroadcast(responder):
|
||||
# Let's create a few fake txids and assign at least 6 missing confirmations to each
|
||||
txs_missing_too_many_conf = {get_random_value_hex(32): 6 + i for i in range(10)}
|
||||
|
||||
# Let's create some other transaction that has missed some confirmations but not that many
|
||||
txs_missing_some_conf = {get_random_value_hex(32): 3 for _ in range(10)}
|
||||
|
||||
# All the txs in the first dict should be flagged as to_rebroadcast
|
||||
responder.missed_confirmations = txs_missing_too_many_conf
|
||||
txs_to_rebroadcast = responder.get_txs_to_rebroadcast()
|
||||
assert txs_to_rebroadcast == list(txs_missing_too_many_conf.keys())
|
||||
|
||||
# Non of the txs in the second dict should be flagged
|
||||
responder.missed_confirmations = txs_missing_some_conf
|
||||
txs_to_rebroadcast = responder.get_txs_to_rebroadcast()
|
||||
assert txs_to_rebroadcast == []
|
||||
|
||||
# Let's check that it also works with a mixed dict
|
||||
responder.missed_confirmations.update(txs_missing_too_many_conf)
|
||||
txs_to_rebroadcast = responder.get_txs_to_rebroadcast()
|
||||
assert txs_to_rebroadcast == list(txs_missing_too_many_conf.keys())
|
||||
|
||||
|
||||
def test_get_completed_trackers(db_manager):
|
||||
initial_height = bitcoin_cli().getblockcount()
|
||||
|
||||
responder = Responder(db_manager)
|
||||
chain_monitor = ChainMonitor(Queue(), responder.block_queue)
|
||||
chain_monitor.monitor_chain()
|
||||
|
||||
# A complete tracker is a tracker that has reached the appointment end with enough confs (> MIN_CONFIRMATIONS)
|
||||
# We'll create three type of transactions: end reached + enough conf, end reached + no enough conf, end not reached
|
||||
trackers_end_conf = {
|
||||
uuid4().hex: create_dummy_tracker(penalty_rawtx=create_dummy_transaction().hex()) for _ in range(10)
|
||||
}
|
||||
|
||||
trackers_end_no_conf = {}
|
||||
for _ in range(10):
|
||||
tracker = create_dummy_tracker(penalty_rawtx=create_dummy_transaction().hex())
|
||||
responder.unconfirmed_txs.append(tracker.penalty_txid)
|
||||
trackers_end_no_conf[uuid4().hex] = tracker
|
||||
|
||||
trackers_no_end = {}
|
||||
for _ in range(10):
|
||||
tracker = create_dummy_tracker(penalty_rawtx=create_dummy_transaction().hex())
|
||||
tracker.appointment_end += 10
|
||||
trackers_no_end[uuid4().hex] = tracker
|
||||
|
||||
all_trackers = {}
|
||||
all_trackers.update(trackers_end_conf)
|
||||
all_trackers.update(trackers_end_no_conf)
|
||||
all_trackers.update(trackers_no_end)
|
||||
|
||||
# Let's add all to the responder
|
||||
for uuid, tracker in all_trackers.items():
|
||||
responder.trackers[uuid] = {
|
||||
"locator": tracker.locator,
|
||||
"penalty_txid": tracker.penalty_txid,
|
||||
"appointment_end": tracker.appointment_end,
|
||||
}
|
||||
|
||||
for uuid, tracker in all_trackers.items():
|
||||
bitcoin_cli().sendrawtransaction(tracker.penalty_rawtx)
|
||||
|
||||
# The dummy appointments have a end_appointment time of current + 2, but trackers need at least 6 confs by default
|
||||
generate_blocks(6)
|
||||
|
||||
# And now let's check
|
||||
completed_trackers = responder.get_completed_trackers(initial_height + 6)
|
||||
completed_trackers_ids = [tracker_id for tracker_id, confirmations in completed_trackers.items()]
|
||||
ended_trackers_keys = list(trackers_end_conf.keys())
|
||||
assert set(completed_trackers_ids) == set(ended_trackers_keys)
|
||||
|
||||
# Generating 6 additional blocks should also confirm trackers_no_end
|
||||
generate_blocks(6)
|
||||
|
||||
completed_trackers = responder.get_completed_trackers(initial_height + 12)
|
||||
completed_trackers_ids = [tracker_id for tracker_id, confirmations in completed_trackers.items()]
|
||||
ended_trackers_keys.extend(list(trackers_no_end.keys()))
|
||||
|
||||
assert set(completed_trackers_ids) == set(ended_trackers_keys)
|
||||
|
||||
|
||||
def test_rebroadcast(db_manager):
|
||||
responder = Responder(db_manager)
|
||||
chain_monitor = ChainMonitor(Queue(), responder.block_queue)
|
||||
chain_monitor.monitor_chain()
|
||||
|
||||
txs_to_rebroadcast = []
|
||||
|
||||
# Rebroadcast calls add_response with retry=True. The tracker data is already in trackers.
|
||||
for i in range(20):
|
||||
uuid = uuid4().hex
|
||||
locator, dispute_txid, penalty_txid, penalty_rawtx, appointment_end = create_dummy_tracker_data(
|
||||
penalty_rawtx=create_dummy_transaction().hex()
|
||||
)
|
||||
|
||||
tracker = TransactionTracker(locator, dispute_txid, penalty_txid, penalty_rawtx, appointment_end)
|
||||
|
||||
responder.trackers[uuid] = {
|
||||
"locator": locator,
|
||||
"penalty_txid": penalty_txid,
|
||||
"appointment_end": appointment_end,
|
||||
}
|
||||
|
||||
# We need to add it to the db too
|
||||
responder.db_manager.create_triggered_appointment_flag(uuid)
|
||||
responder.db_manager.store_responder_tracker(uuid, tracker.to_json())
|
||||
|
||||
responder.tx_tracker_map[penalty_txid] = [uuid]
|
||||
responder.unconfirmed_txs.append(penalty_txid)
|
||||
|
||||
# Let's add some of the txs in the rebroadcast list
|
||||
if (i % 2) == 0:
|
||||
txs_to_rebroadcast.append(penalty_txid)
|
||||
|
||||
# The block_hash passed to rebroadcast does not matter much now. It will in the future to deal with errors
|
||||
receipts = responder.rebroadcast(txs_to_rebroadcast)
|
||||
|
||||
# All txs should have been delivered and the missed confirmation reset
|
||||
for txid, receipt in receipts:
|
||||
# Sanity check
|
||||
assert txid in txs_to_rebroadcast
|
||||
|
||||
assert receipt.delivered is True
|
||||
assert responder.missed_confirmations[txid] == 0
|
||||
58
test/teos/unit/test_tools.py
Normal file
58
test/teos/unit/test_tools.py
Normal file
@@ -0,0 +1,58 @@
|
||||
from teos.tools import can_connect_to_bitcoind, in_correct_network, bitcoin_cli
|
||||
|
||||
from common.tools import check_sha256_hex_format
|
||||
|
||||
|
||||
def test_in_correct_network(run_bitcoind):
|
||||
# The simulator runs as if it was regtest, so every other network should fail
|
||||
assert in_correct_network("mainnet") is False
|
||||
assert in_correct_network("testnet") is False
|
||||
assert in_correct_network("regtest") is True
|
||||
|
||||
|
||||
def test_can_connect_to_bitcoind():
|
||||
assert can_connect_to_bitcoind() is True
|
||||
|
||||
|
||||
# def test_can_connect_to_bitcoind_bitcoin_not_running():
|
||||
# # Kill the simulator thread and test the check fails
|
||||
# bitcoind_process.kill()
|
||||
# assert can_connect_to_bitcoind() is False
|
||||
|
||||
|
||||
def test_bitcoin_cli():
|
||||
try:
|
||||
bitcoin_cli().help()
|
||||
assert True
|
||||
|
||||
except Exception:
|
||||
assert False
|
||||
|
||||
|
||||
def test_check_sha256_hex_format():
|
||||
assert check_sha256_hex_format(None) is False
|
||||
assert check_sha256_hex_format("") is False
|
||||
assert (
|
||||
check_sha256_hex_format(0x0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF) is False
|
||||
) # wrong type
|
||||
assert (
|
||||
check_sha256_hex_format("abcdefabcdefabcdefabcdefabcdefabcdefabcdefabcdefabcdefabcdefabcd") is True
|
||||
) # lowercase
|
||||
assert (
|
||||
check_sha256_hex_format("ABCDEFABCDEFABCDEFABCDEFABCDEFABCDEFABCDEFABCDEFABCDEFABCDEFABCD") is True
|
||||
) # uppercase
|
||||
assert (
|
||||
check_sha256_hex_format("0123456789abcdef0123456789ABCDEF0123456789abcdef0123456789ABCDEF") is True
|
||||
) # mixed case
|
||||
assert (
|
||||
check_sha256_hex_format("0123456789012345678901234567890123456789012345678901234567890123") is True
|
||||
) # only nums
|
||||
assert (
|
||||
check_sha256_hex_format("0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdf") is False
|
||||
) # too short
|
||||
assert (
|
||||
check_sha256_hex_format("0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0") is False
|
||||
) # too long
|
||||
assert (
|
||||
check_sha256_hex_format("g123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef") is False
|
||||
) # non-hex
|
||||
253
test/teos/unit/test_watcher.py
Normal file
253
test/teos/unit/test_watcher.py
Normal file
@@ -0,0 +1,253 @@
|
||||
import pytest
|
||||
from uuid import uuid4
|
||||
from shutil import rmtree
|
||||
from threading import Thread
|
||||
from coincurve import PrivateKey
|
||||
|
||||
from teos.watcher import Watcher
|
||||
from teos.responder import Responder
|
||||
from teos.tools import bitcoin_cli
|
||||
from teos.chain_monitor import ChainMonitor
|
||||
from teos.db_manager import DBManager
|
||||
|
||||
from test.teos.unit.conftest import (
|
||||
generate_blocks,
|
||||
generate_dummy_appointment,
|
||||
get_random_value_hex,
|
||||
generate_keypair,
|
||||
get_config,
|
||||
)
|
||||
from teos.conf import EXPIRY_DELTA, MAX_APPOINTMENTS
|
||||
|
||||
import common.cryptographer
|
||||
from teos import LOG_PREFIX
|
||||
from common.logger import Logger
|
||||
from common.tools import compute_locator
|
||||
from common.cryptographer import Cryptographer
|
||||
|
||||
common.cryptographer.logger = Logger(actor="Cryptographer", log_name_prefix=LOG_PREFIX)
|
||||
|
||||
|
||||
APPOINTMENTS = 5
|
||||
START_TIME_OFFSET = 1
|
||||
END_TIME_OFFSET = 1
|
||||
TEST_SET_SIZE = 200
|
||||
|
||||
|
||||
signing_key, public_key = generate_keypair()
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def temp_db_manager():
|
||||
db_name = get_random_value_hex(8)
|
||||
db_manager = DBManager(db_name)
|
||||
|
||||
yield db_manager
|
||||
|
||||
db_manager.db.close()
|
||||
rmtree(db_name)
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def watcher(db_manager):
|
||||
watcher = Watcher(db_manager, Responder(db_manager), signing_key.to_der(), get_config())
|
||||
chain_monitor = ChainMonitor(watcher.block_queue, watcher.responder.block_queue)
|
||||
chain_monitor.monitor_chain()
|
||||
|
||||
return watcher
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def txids():
|
||||
return [get_random_value_hex(32) for _ in range(100)]
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def locator_uuid_map(txids):
|
||||
return {compute_locator(txid): uuid4().hex for txid in txids}
|
||||
|
||||
|
||||
def create_appointments(n):
|
||||
locator_uuid_map = dict()
|
||||
appointments = dict()
|
||||
dispute_txs = []
|
||||
|
||||
for i in range(n):
|
||||
appointment, dispute_tx = generate_dummy_appointment(
|
||||
start_time_offset=START_TIME_OFFSET, end_time_offset=END_TIME_OFFSET
|
||||
)
|
||||
uuid = uuid4().hex
|
||||
|
||||
appointments[uuid] = appointment
|
||||
locator_uuid_map[appointment.locator] = [uuid]
|
||||
dispute_txs.append(dispute_tx)
|
||||
|
||||
return appointments, locator_uuid_map, dispute_txs
|
||||
|
||||
|
||||
def test_init(run_bitcoind, watcher):
|
||||
assert isinstance(watcher.appointments, dict) and len(watcher.appointments) == 0
|
||||
assert isinstance(watcher.locator_uuid_map, dict) and len(watcher.locator_uuid_map) == 0
|
||||
assert watcher.block_queue.empty()
|
||||
assert isinstance(watcher.config, dict)
|
||||
assert isinstance(watcher.signing_key, PrivateKey)
|
||||
assert isinstance(watcher.responder, Responder)
|
||||
|
||||
|
||||
def test_add_appointment(watcher):
|
||||
# We should be able to add appointments up to the limit
|
||||
for _ in range(10):
|
||||
appointment, dispute_tx = generate_dummy_appointment(
|
||||
start_time_offset=START_TIME_OFFSET, end_time_offset=END_TIME_OFFSET
|
||||
)
|
||||
added_appointment, sig = watcher.add_appointment(appointment)
|
||||
|
||||
assert added_appointment is True
|
||||
assert Cryptographer.verify_rpk(
|
||||
watcher.signing_key.public_key, Cryptographer.recover_pk(appointment.serialize(), sig)
|
||||
)
|
||||
|
||||
# Check that we can also add an already added appointment (same locator)
|
||||
added_appointment, sig = watcher.add_appointment(appointment)
|
||||
|
||||
assert added_appointment is True
|
||||
assert Cryptographer.verify_rpk(
|
||||
watcher.signing_key.public_key, Cryptographer.recover_pk(appointment.serialize(), sig)
|
||||
)
|
||||
|
||||
|
||||
def test_add_too_many_appointments(watcher):
|
||||
# Any appointment on top of those should fail
|
||||
watcher.appointments = dict()
|
||||
|
||||
for _ in range(MAX_APPOINTMENTS):
|
||||
appointment, dispute_tx = generate_dummy_appointment(
|
||||
start_time_offset=START_TIME_OFFSET, end_time_offset=END_TIME_OFFSET
|
||||
)
|
||||
added_appointment, sig = watcher.add_appointment(appointment)
|
||||
|
||||
assert added_appointment is True
|
||||
assert Cryptographer.verify_rpk(
|
||||
watcher.signing_key.public_key, Cryptographer.recover_pk(appointment.serialize(), sig)
|
||||
)
|
||||
|
||||
appointment, dispute_tx = generate_dummy_appointment(
|
||||
start_time_offset=START_TIME_OFFSET, end_time_offset=END_TIME_OFFSET
|
||||
)
|
||||
added_appointment, sig = watcher.add_appointment(appointment)
|
||||
|
||||
assert added_appointment is False
|
||||
assert sig is None
|
||||
|
||||
|
||||
def test_do_watch(watcher, temp_db_manager):
|
||||
watcher.db_manager = temp_db_manager
|
||||
|
||||
# We will wipe all the previous data and add 5 appointments
|
||||
appointments, locator_uuid_map, dispute_txs = create_appointments(APPOINTMENTS)
|
||||
|
||||
# Set the data into the Watcher and in the db
|
||||
watcher.locator_uuid_map = locator_uuid_map
|
||||
watcher.appointments = {}
|
||||
|
||||
for uuid, appointment in appointments.items():
|
||||
watcher.appointments[uuid] = {"locator": appointment.locator, "end_time": appointment.end_time}
|
||||
watcher.db_manager.store_watcher_appointment(uuid, appointment.to_json())
|
||||
watcher.db_manager.create_append_locator_map(appointment.locator, uuid)
|
||||
|
||||
do_watch_thread = Thread(target=watcher.do_watch, daemon=True)
|
||||
do_watch_thread.start()
|
||||
|
||||
# Broadcast the first two
|
||||
for dispute_tx in dispute_txs[:2]:
|
||||
bitcoin_cli().sendrawtransaction(dispute_tx)
|
||||
|
||||
# After generating enough blocks, the number of appointments should have reduced by two
|
||||
generate_blocks(START_TIME_OFFSET + END_TIME_OFFSET)
|
||||
|
||||
assert len(watcher.appointments) == APPOINTMENTS - 2
|
||||
|
||||
# The rest of appointments will timeout after the end (2) + EXPIRY_DELTA
|
||||
# Wait for an additional block to be safe
|
||||
generate_blocks(EXPIRY_DELTA + START_TIME_OFFSET + END_TIME_OFFSET)
|
||||
|
||||
assert len(watcher.appointments) == 0
|
||||
|
||||
|
||||
def test_get_breaches(watcher, txids, locator_uuid_map):
|
||||
watcher.locator_uuid_map = locator_uuid_map
|
||||
potential_breaches = watcher.get_breaches(txids)
|
||||
|
||||
# All the txids must breach
|
||||
assert locator_uuid_map.keys() == potential_breaches.keys()
|
||||
|
||||
|
||||
def test_get_breaches_random_data(watcher, locator_uuid_map):
|
||||
# The likelihood of finding a potential breach with random data should be negligible
|
||||
watcher.locator_uuid_map = locator_uuid_map
|
||||
txids = [get_random_value_hex(32) for _ in range(TEST_SET_SIZE)]
|
||||
|
||||
potential_breaches = watcher.get_breaches(txids)
|
||||
|
||||
# None of the txids should breach
|
||||
assert len(potential_breaches) == 0
|
||||
|
||||
|
||||
def test_filter_valid_breaches_random_data(watcher):
|
||||
appointments = {}
|
||||
locator_uuid_map = {}
|
||||
breaches = {}
|
||||
|
||||
for i in range(TEST_SET_SIZE):
|
||||
dummy_appointment, _ = generate_dummy_appointment()
|
||||
uuid = uuid4().hex
|
||||
appointments[uuid] = {"locator": dummy_appointment.locator, "end_time": dummy_appointment.end_time}
|
||||
watcher.db_manager.store_watcher_appointment(uuid, dummy_appointment.to_json())
|
||||
watcher.db_manager.create_append_locator_map(dummy_appointment.locator, uuid)
|
||||
|
||||
locator_uuid_map[dummy_appointment.locator] = [uuid]
|
||||
|
||||
if i % 2:
|
||||
dispute_txid = get_random_value_hex(32)
|
||||
breaches[dummy_appointment.locator] = dispute_txid
|
||||
|
||||
watcher.locator_uuid_map = locator_uuid_map
|
||||
watcher.appointments = appointments
|
||||
|
||||
valid_breaches, invalid_breaches = watcher.filter_valid_breaches(breaches)
|
||||
|
||||
# We have "triggered" TEST_SET_SIZE/2 breaches, all of them invalid.
|
||||
assert len(valid_breaches) == 0 and len(invalid_breaches) == TEST_SET_SIZE / 2
|
||||
|
||||
|
||||
def test_filter_valid_breaches(watcher):
|
||||
dispute_txid = "0437cd7f8525ceed2324359c2d0ba26006d92d856a9c20fa0241106ee5a597c9"
|
||||
encrypted_blob = (
|
||||
"a62aa9bb3c8591e4d5de10f1bd49db92432ce2341af55762cdc9242c08662f97f5f47da0a1aa88373508cd6e67e87eefddeca0cee98c1"
|
||||
"967ec1c1ecbb4c5e8bf08aa26159214e6c0bc4b2c7c247f87e7601d15c746fc4e711be95ba0e363001280138ba9a65b06c4aa6f592b21"
|
||||
"3635ee763984d522a4c225814510c8f7ab0801f36d4a68f5ee7dd3930710005074121a172c29beba79ed647ebaf7e7fab1bbd9a208251"
|
||||
"ef5486feadf2c46e33a7d66adf9dbbc5f67b55a34b1b3c4909dd34a482d759b0bc25ecd2400f656db509466d7479b5b92a2fadabccc9e"
|
||||
"c8918da8979a9feadea27531643210368fee494d3aaa4983e05d6cf082a49105e2f8a7c7821899239ba7dee12940acd7d8a629894b5d31"
|
||||
"e94b439cfe8d2e9f21e974ae5342a70c91e8"
|
||||
)
|
||||
|
||||
dummy_appointment, _ = generate_dummy_appointment()
|
||||
dummy_appointment.encrypted_blob.data = encrypted_blob
|
||||
dummy_appointment.locator = compute_locator(dispute_txid)
|
||||
uuid = uuid4().hex
|
||||
|
||||
appointments = {uuid: dummy_appointment}
|
||||
locator_uuid_map = {dummy_appointment.locator: [uuid]}
|
||||
breaches = {dummy_appointment.locator: dispute_txid}
|
||||
|
||||
for uuid, appointment in appointments.items():
|
||||
watcher.appointments[uuid] = {"locator": appointment.locator, "end_time": appointment.end_time}
|
||||
watcher.db_manager.store_watcher_appointment(uuid, dummy_appointment.to_json())
|
||||
watcher.db_manager.create_append_locator_map(dummy_appointment.locator, uuid)
|
||||
|
||||
watcher.locator_uuid_map = locator_uuid_map
|
||||
|
||||
valid_breaches, invalid_breaches = watcher.filter_valid_breaches(breaches)
|
||||
|
||||
# We have "triggered" a single breach and it was valid.
|
||||
assert len(invalid_breaches) == 0 and len(valid_breaches) == 1
|
||||
Reference in New Issue
Block a user