Merge branch 'master' into master

This commit is contained in:
Sergi Delgado Segura
2020-06-15 11:30:01 +02:00
committed by GitHub
15 changed files with 926 additions and 98 deletions

View File

@@ -11,6 +11,7 @@ from common.config_loader import ConfigLoader
getcontext().prec = 10
utxos = []
@pytest.fixture(scope="session")
@@ -37,11 +38,13 @@ def prng_seed():
def setup_node(bitcoin_cli):
# This method will create a new address a mine bitcoin so the node can be used for testing
new_addr = bitcoin_cli.getnewaddress()
bitcoin_cli.generatetoaddress(106, new_addr)
bitcoin_cli.generatetoaddress(200, new_addr)
def create_txs(bitcoin_cli, n=1):
utxos = bitcoin_cli.listunspent()
global utxos
if not utxos:
utxos = bitcoin_cli.listunspent()
if len(utxos) < n:
raise ValueError("There're no enough UTXOs.")

View File

@@ -40,6 +40,9 @@ teosd_process = run_teosd()
teos_id, user_sk, user_id = teos_cli.load_keys(cli_config.get("TEOS_PUBLIC_KEY"), cli_config.get("CLI_PRIVATE_KEY"))
appointments_in_watcher = 0
appointments_in_responder = 0
def broadcast_transaction_and_mine_block(bitcoin_cli, commitment_tx, addr):
# Broadcast the commitment transaction and mine a block
@@ -78,6 +81,8 @@ def test_commands_non_registered(bitcoin_cli):
def test_commands_registered(bitcoin_cli):
global appointments_in_watcher
# Test registering and trying again
teos_cli.register(user_id, teos_base_endpoint)
@@ -93,9 +98,12 @@ def test_commands_registered(bitcoin_cli):
r = get_appointment_info(appointment_data.get("locator"))
assert r.get("locator") == appointment.locator
assert r.get("appointment") == appointment.to_dict()
appointments_in_watcher += 1
def test_appointment_life_cycle(bitcoin_cli):
global appointments_in_watcher, appointments_in_responder
# First of all we need to register
response = teos_cli.register(user_id, teos_base_endpoint)
available_slots = response.get("available_slots")
@@ -106,6 +114,7 @@ def test_appointment_life_cycle(bitcoin_cli):
appointment_data = build_appointment_data(commitment_tx_id, penalty_tx)
locator = compute_locator(commitment_tx_id)
appointment, signature = add_appointment(appointment_data)
appointments_in_watcher += 1
# Get the information from the tower to check that it matches
appointment_info = get_appointment_info(locator)
@@ -117,7 +126,7 @@ def test_appointment_life_cycle(bitcoin_cli):
all_appointments = get_all_appointments()
watching = all_appointments.get("watcher_appointments")
responding = all_appointments.get("responder_trackers")
assert len(watching) == 1 and len(responding) == 0
assert len(watching) == appointments_in_watcher and len(responding) == 0
# Trigger a breach and check again
new_addr = bitcoin_cli.getnewaddress()
@@ -125,11 +134,13 @@ def test_appointment_life_cycle(bitcoin_cli):
appointment_info = get_appointment_info(locator)
assert appointment_info.get("status") == "dispute_responded"
assert appointment_info.get("locator") == locator
appointments_in_watcher -= 1
appointments_in_responder += 1
all_appointments = get_all_appointments()
watching = all_appointments.get("watcher_appointments")
responding = all_appointments.get("responder_trackers")
assert len(watching) == 0 and len(responding) == 1
assert len(watching) == appointments_in_watcher and len(responding) == appointments_in_responder
# It can be also checked by ensuring that the penalty transaction made it to the network
penalty_tx_id = bitcoin_cli.decoderawtransaction(penalty_tx).get("txid")
@@ -144,6 +155,7 @@ def test_appointment_life_cycle(bitcoin_cli):
# Now let's mine some blocks so the appointment reaches its end. We need 100 + EXPIRY_DELTA -1
bitcoin_cli.generatetoaddress(100 + teos_config.get("EXPIRY_DELTA") - 1, new_addr)
appointments_in_responder -= 1
# The appointment is no longer in the tower
with pytest.raises(TowerResponseError):
@@ -152,10 +164,14 @@ def test_appointment_life_cycle(bitcoin_cli):
# Check that the appointment is not in the Gatekeeper by checking the available slots (should have increase by 1)
# We can do so by topping up the subscription (FIXME: find a better way to check this).
response = teos_cli.register(user_id, teos_base_endpoint)
assert response.get("available_slots") == available_slots + teos_config.get("DEFAULT_SLOTS") + 1
assert (
response.get("available_slots")
== available_slots + teos_config.get("DEFAULT_SLOTS") + 1 - appointments_in_watcher - appointments_in_responder
)
def test_multiple_appointments_life_cycle(bitcoin_cli):
global appointments_in_watcher, appointments_in_responder
# Tests that get_all_appointments returns all the appointments the tower is storing at various stages in the
# appointment lifecycle.
appointments = []
@@ -180,6 +196,7 @@ def test_multiple_appointments_life_cycle(bitcoin_cli):
# Send all of them to watchtower.
for appt in appointments:
add_appointment(appt.get("appointment_data"))
appointments_in_watcher += 1
# Two of these appointments are breached, and the watchtower responds to them.
breached_appointments = []
@@ -188,13 +205,15 @@ def test_multiple_appointments_life_cycle(bitcoin_cli):
broadcast_transaction_and_mine_block(bitcoin_cli, appointments[i]["commitment_tx"], new_addr)
bitcoin_cli.generatetoaddress(1, new_addr)
breached_appointments.append(appointments[i]["locator"])
appointments_in_watcher -= 1
appointments_in_responder += 1
sleep(1)
# Test that they all show up in get_all_appointments at the correct stages.
all_appointments = get_all_appointments()
watching = all_appointments.get("watcher_appointments")
responding = all_appointments.get("responder_trackers")
assert len(watching) == 3 and len(responding) == 2
assert len(watching) == appointments_in_watcher and len(responding) == appointments_in_responder
responder_locators = [appointment["locator"] for uuid, appointment in responding.items()]
assert set(responder_locators) == set(breached_appointments)
@@ -389,6 +408,73 @@ def test_two_appointment_same_locator_different_penalty_different_users(bitcoin_
assert appointment_info.get("appointment").get("penalty_tx") == appointment1_data.get("penalty_tx")
def test_add_appointment_trigger_on_cache(bitcoin_cli):
# This tests sending an appointment whose trigger is in the cache
commitment_tx, penalty_tx = create_txs(bitcoin_cli)
commitment_tx_id = bitcoin_cli.decoderawtransaction(commitment_tx).get("txid")
appointment_data = build_appointment_data(commitment_tx_id, penalty_tx)
locator = compute_locator(commitment_tx_id)
# Let's send the commitment to the network and mine a block
broadcast_transaction_and_mine_block(bitcoin_cli, commitment_tx, bitcoin_cli.getnewaddress())
# Send the data to the tower and request it back. It should have gone straightaway to the Responder
add_appointment(appointment_data)
assert get_appointment_info(locator).get("status") == "dispute_responded"
def test_add_appointment_invalid_trigger_on_cache(bitcoin_cli):
# This tests sending an invalid appointment which trigger is in the cache
commitment_tx, penalty_tx = create_txs(bitcoin_cli)
commitment_tx_id = bitcoin_cli.decoderawtransaction(commitment_tx).get("txid")
# We can just flip the justice tx so it is invalid
appointment_data = build_appointment_data(commitment_tx_id, penalty_tx[::-1])
locator = compute_locator(commitment_tx_id)
# Let's send the commitment to the network and mine a block
broadcast_transaction_and_mine_block(bitcoin_cli, commitment_tx, bitcoin_cli.getnewaddress())
sleep(1)
# Send the data to the tower and request it back. It should get accepted but the data will be dropped.
add_appointment(appointment_data)
with pytest.raises(TowerResponseError):
get_appointment_info(locator)
def test_add_appointment_trigger_on_cache_cannot_decrypt(bitcoin_cli):
commitment_tx, penalty_tx = create_txs(bitcoin_cli)
# Let's send the commitment to the network and mine a block
broadcast_transaction_and_mine_block(bitcoin_cli, commitment_tx, bitcoin_cli.getnewaddress())
sleep(1)
# The appointment data is built using a random 32-byte value.
appointment_data = build_appointment_data(get_random_value_hex(32), penalty_tx)
# We cannot use teos_cli.add_appointment here since it computes the locator internally, so let's do it manually.
appointment_data["locator"] = compute_locator(bitcoin_cli.decoderawtransaction(commitment_tx).get("txid"))
appointment_data["encrypted_blob"] = Cryptographer.encrypt(penalty_tx, get_random_value_hex(32))
appointment = Appointment.from_dict(appointment_data)
signature = Cryptographer.sign(appointment.serialize(), user_sk)
data = {"appointment": appointment.to_dict(), "signature": signature}
# Send appointment to the server.
response = teos_cli.post_request(data, teos_add_appointment_endpoint)
response_json = teos_cli.process_post_response(response)
# Check that the server has accepted the appointment
signature = response_json.get("signature")
rpk = Cryptographer.recover_pk(appointment.serialize(), signature)
assert teos_id == Cryptographer.get_compressed_pk(rpk)
assert response_json.get("locator") == appointment.locator
# The appointment should should have been inmediately dropped
with pytest.raises(TowerResponseError):
get_appointment_info(appointment_data["locator"])
def test_appointment_shutdown_teos_trigger_back_online(bitcoin_cli):
global teosd_process

View File

@@ -4,13 +4,21 @@ from binascii import hexlify
from teos.api import API
import common.errors as errors
from teos.watcher import Watcher
from teos.inspector import Inspector
from teos.gatekeeper import UserInfo
from teos.appointments_dbm import AppointmentsDBM
from teos.responder import Responder, TransactionTracker
from teos.extended_appointment import ExtendedAppointment
from teos.watcher import Watcher, AppointmentAlreadyTriggered
from test.teos.unit.conftest import get_random_value_hex, generate_dummy_appointment, generate_keypair, get_config
from test.teos.unit.conftest import (
get_random_value_hex,
generate_dummy_appointment,
generate_keypair,
get_config,
create_dummy_transaction,
compute_locator,
)
from common.cryptographer import Cryptographer, hash_160
from common.constants import (
@@ -60,7 +68,15 @@ def api(db_manager, carrier, block_processor, gatekeeper, run_bitcoind):
sk, pk = generate_keypair()
responder = Responder(db_manager, gatekeeper, carrier, block_processor)
watcher = Watcher(db_manager, gatekeeper, block_processor, responder, sk.to_der(), MAX_APPOINTMENTS)
watcher = Watcher(
db_manager,
gatekeeper,
block_processor,
responder,
sk.to_der(),
MAX_APPOINTMENTS,
config.get("LOCATOR_CACHE_SIZE"),
)
inspector = Inspector(block_processor, config.get("MIN_TO_SELF_DELAY"))
api = API(config.get("API_HOST"), config.get("API_PORT"), inspector, watcher)
@@ -157,6 +173,7 @@ def test_add_appointment(api, client, appointment):
r = add_appointment(client, {"appointment": appointment.to_dict(), "signature": appointment_signature}, user_id)
assert r.status_code == HTTP_OK
assert r.json.get("available_slots") == 0
assert r.json.get("start_block") == api.watcher.last_known_block
def test_add_appointment_no_json(api, client, appointment):
@@ -242,6 +259,7 @@ def test_add_appointment_multiple_times_same_user(api, client, appointment, n=MU
r = add_appointment(client, {"appointment": appointment.to_dict(), "signature": appointment_signature}, user_id)
assert r.status_code == HTTP_OK
assert r.json.get("available_slots") == n - 1
assert r.json.get("start_block") == api.watcher.last_known_block
# Since all updates came from the same user, only the last one is stored
assert len(api.watcher.locator_uuid_map[appointment.locator]) == 1
@@ -264,6 +282,7 @@ def test_add_appointment_multiple_times_different_users(api, client, appointment
r = add_appointment(client, {"appointment": appointment.to_dict(), "signature": signature}, compressed_pk)
assert r.status_code == HTTP_OK
assert r.json.get("available_slots") == 1
assert r.json.get("start_block") == api.watcher.last_known_block
# Check that all the appointments have been added and that there are no duplicates
assert len(set(api.watcher.locator_uuid_map[appointment.locator])) == n
@@ -275,14 +294,22 @@ def test_add_appointment_update_same_size(api, client, appointment):
appointment_signature = Cryptographer.sign(appointment.serialize(), user_sk)
r = add_appointment(client, {"appointment": appointment.to_dict(), "signature": appointment_signature}, user_id)
assert r.status_code == HTTP_OK and r.json.get("available_slots") == 0
assert (
r.status_code == HTTP_OK
and r.json.get("available_slots") == 0
and r.json.get("start_block") == api.watcher.last_known_block
)
# The user has no additional slots, but it should be able to update
# Let's just reverse the encrypted blob for example
appointment.encrypted_blob = appointment.encrypted_blob[::-1]
appointment_signature = Cryptographer.sign(appointment.serialize(), user_sk)
r = add_appointment(client, {"appointment": appointment.to_dict(), "signature": appointment_signature}, user_id)
assert r.status_code == HTTP_OK and r.json.get("available_slots") == 0
assert (
r.status_code == HTTP_OK
and r.json.get("available_slots") == 0
and r.json.get("start_block") == api.watcher.last_known_block
)
def test_add_appointment_update_bigger(api, client, appointment):
@@ -297,7 +324,11 @@ def test_add_appointment_update_bigger(api, client, appointment):
appointment.encrypted_blob = TWO_SLOTS_BLOTS
appointment_signature = Cryptographer.sign(appointment.serialize(), user_sk)
r = add_appointment(client, {"appointment": appointment.to_dict(), "signature": appointment_signature}, user_id)
assert r.status_code == HTTP_OK and r.json.get("available_slots") == 0
assert (
r.status_code == HTTP_OK
and r.json.get("available_slots") == 0
and r.json.get("start_block") == api.watcher.last_known_block
)
# Check that it'll fail if no enough slots are available
# Double the size from before
@@ -314,13 +345,101 @@ def test_add_appointment_update_smaller(api, client, appointment):
appointment.encrypted_blob = TWO_SLOTS_BLOTS
appointment_signature = Cryptographer.sign(appointment.serialize(), user_sk)
r = add_appointment(client, {"appointment": appointment.to_dict(), "signature": appointment_signature}, user_id)
assert r.status_code == HTTP_OK and r.json.get("available_slots") == 0
assert (
r.status_code == HTTP_OK
and r.json.get("available_slots") == 0
and r.json.get("start_block") == api.watcher.last_known_block
)
# Let's update with one just small enough
appointment.encrypted_blob = "A" * (ENCRYPTED_BLOB_MAX_SIZE_HEX - 2)
appointment_signature = Cryptographer.sign(appointment.serialize(), user_sk)
r = add_appointment(client, {"appointment": appointment.to_dict(), "signature": appointment_signature}, user_id)
assert r.status_code == HTTP_OK and r.json.get("available_slots") == 1
assert (
r.status_code == HTTP_OK
and r.json.get("available_slots") == 1
and r.json.get("start_block") == api.watcher.last_known_block
)
def test_add_appointment_in_cache(api, client):
api.watcher.gatekeeper.registered_users[user_id] = UserInfo(available_slots=1, subscription_expiry=0)
appointment, dispute_tx = generate_dummy_appointment()
appointment_signature = Cryptographer.sign(appointment.serialize(), user_sk)
# Add the data to the cache
dispute_txid = api.watcher.block_processor.decode_raw_transaction(dispute_tx).get("txid")
api.watcher.locator_cache.cache[appointment.locator] = dispute_txid
r = add_appointment(client, {"appointment": appointment.to_dict(), "signature": appointment_signature}, user_id)
assert (
r.status_code == HTTP_OK
and r.json.get("available_slots") == 0
and r.json.get("start_block") == api.watcher.last_known_block
)
# Trying to add it again should fail, since it is already in the Responder
r = add_appointment(client, {"appointment": appointment.to_dict(), "signature": appointment_signature}, user_id)
assert r.status_code == HTTP_BAD_REQUEST and r.json.get("error_code") == errors.APPOINTMENT_ALREADY_TRIGGERED
# The appointment would be rejected even if the data is not in the cache provided it has been triggered
del api.watcher.locator_cache.cache[appointment.locator]
r = add_appointment(client, {"appointment": appointment.to_dict(), "signature": appointment_signature}, user_id)
assert r.status_code == HTTP_BAD_REQUEST and r.json.get("error_code") == errors.APPOINTMENT_ALREADY_TRIGGERED
def test_add_appointment_in_cache_cannot_decrypt(api, client):
api.watcher.gatekeeper.registered_users[user_id] = UserInfo(available_slots=1, subscription_expiry=0)
appointment, dispute_tx = generate_dummy_appointment()
appointment.encrypted_blob = appointment.encrypted_blob[::-1]
appointment_signature = Cryptographer.sign(appointment.serialize(), user_sk)
# Add the data to the cache
dispute_txid = api.watcher.block_processor.decode_raw_transaction(dispute_tx).get("txid")
api.watcher.locator_cache.cache[dispute_txid] = appointment.locator
# The appointment should be accepted
r = add_appointment(client, {"appointment": appointment.to_dict(), "signature": appointment_signature}, user_id)
assert (
r.status_code == HTTP_OK
and r.json.get("available_slots") == 0
and r.json.get("start_block") == api.watcher.last_known_block
)
def test_add_appointment_in_cache_invalid_transaction(api, client):
api.watcher.gatekeeper.registered_users[user_id] = UserInfo(available_slots=1, subscription_expiry=0)
# We need to create the appointment manually
dispute_tx = create_dummy_transaction()
dispute_txid = dispute_tx.tx_id.hex()
penalty_tx = create_dummy_transaction(dispute_txid)
locator = compute_locator(dispute_txid)
dummy_appointment_data = {"tx": penalty_tx.hex(), "tx_id": dispute_txid, "to_self_delay": 20}
encrypted_blob = Cryptographer.encrypt(dummy_appointment_data.get("tx")[::-1], dummy_appointment_data.get("tx_id"))
appointment_data = {
"locator": locator,
"to_self_delay": dummy_appointment_data.get("to_self_delay"),
"encrypted_blob": encrypted_blob,
"user_id": get_random_value_hex(16),
}
appointment = ExtendedAppointment.from_dict(appointment_data)
api.watcher.locator_cache.cache[appointment.locator] = dispute_tx.tx_id.hex()
appointment_signature = Cryptographer.sign(appointment.serialize(), user_sk)
# Add the data to the cache
api.watcher.locator_cache.cache[dispute_txid] = appointment.locator
# The appointment should be accepted
r = add_appointment(client, {"appointment": appointment.to_dict(), "signature": appointment_signature}, user_id)
assert (
r.status_code == HTTP_OK
and r.json.get("available_slots") == 0
and r.json.get("start_block") == api.watcher.last_known_block
)
def test_add_too_many_appointment(api, client):
@@ -337,7 +456,7 @@ def test_add_too_many_appointment(api, client):
r = add_appointment(client, {"appointment": appointment.to_dict(), "signature": appointment_signature}, user_id)
if i < free_appointment_slots:
assert r.status_code == HTTP_OK
assert r.status_code == HTTP_OK and r.json.get("start_block") == api.watcher.last_known_block
else:
assert r.status_code == HTTP_SERVICE_UNAVAILABLE

View File

@@ -1,3 +1,5 @@
import pytest
from teos.watcher import InvalidTransactionFormat
from test.teos.unit.conftest import get_random_value_hex, generate_block, generate_blocks, fork
@@ -46,7 +48,8 @@ def test_decode_raw_transaction(block_processor):
def test_decode_raw_transaction_invalid(block_processor):
# Same but with an invalid one
assert block_processor.decode_raw_transaction(hex_tx[::-1]) is None
with pytest.raises(InvalidTransactionFormat):
block_processor.decode_raw_transaction(hex_tx[::-1])
def test_get_missed_blocks(block_processor):

View File

@@ -102,6 +102,7 @@ def test_update_states_empty_list(db_manager, gatekeeper, carrier, block_process
responder=Responder(db_manager, gatekeeper, carrier, block_processor),
sk_der=generate_keypair()[0].to_der(),
max_appointments=config.get("MAX_APPOINTMENTS"),
blocks_in_cache=config.get("LOCATOR_CACHE_SIZE"),
)
missed_blocks_watcher = []
@@ -123,6 +124,7 @@ def test_update_states_responder_misses_more(run_bitcoind, db_manager, gatekeepe
responder=Responder(db_manager, gatekeeper, carrier, block_processor),
sk_der=generate_keypair()[0].to_der(),
max_appointments=config.get("MAX_APPOINTMENTS"),
blocks_in_cache=config.get("LOCATOR_CACHE_SIZE"),
)
blocks = []
@@ -148,6 +150,7 @@ def test_update_states_watcher_misses_more(db_manager, gatekeeper, carrier, bloc
responder=Responder(db_manager, gatekeeper, carrier, block_processor),
sk_der=generate_keypair()[0].to_der(),
max_appointments=config.get("MAX_APPOINTMENTS"),
blocks_in_cache=config.get("LOCATOR_CACHE_SIZE"),
)
blocks = []

View File

@@ -1,6 +1,7 @@
import pytest
from uuid import uuid4
from shutil import rmtree
from copy import deepcopy
from threading import Thread
from coincurve import PrivateKey
@@ -9,22 +10,32 @@ from teos.tools import bitcoin_cli
from teos.responder import Responder
from teos.gatekeeper import UserInfo
from teos.chain_monitor import ChainMonitor
from teos.appointments_dbm import AppointmentsDBM
from teos.block_processor import BlockProcessor
from teos.watcher import Watcher, AppointmentLimitReached
from teos.appointments_dbm import AppointmentsDBM
from teos.extended_appointment import ExtendedAppointment
from teos.gatekeeper import Gatekeeper, AuthenticationFailure, NotEnoughSlots
from teos.watcher import (
Watcher,
AppointmentLimitReached,
LocatorCache,
EncryptionError,
InvalidTransactionFormat,
AppointmentAlreadyTriggered,
)
from common.tools import compute_locator
from common.cryptographer import Cryptographer
from test.teos.unit.conftest import (
generate_blocks_w_delay,
generate_blocks,
generate_dummy_appointment,
get_random_value_hex,
generate_keypair,
get_config,
bitcoind_feed_params,
bitcoind_connect_params,
create_dummy_transaction,
)
APPOINTMENTS = 5
@@ -55,7 +66,15 @@ def watcher(db_manager, gatekeeper):
carrier = Carrier(bitcoind_connect_params)
responder = Responder(db_manager, gatekeeper, carrier, block_processor)
watcher = Watcher(db_manager, gatekeeper, block_processor, responder, signing_key.to_der(), MAX_APPOINTMENTS)
watcher = Watcher(
db_manager,
gatekeeper,
block_processor,
responder,
signing_key.to_der(),
MAX_APPOINTMENTS,
config.get("LOCATOR_CACHE_SIZE"),
)
chain_monitor = ChainMonitor(
watcher.block_queue, watcher.responder.block_queue, block_processor, bitcoind_feed_params
@@ -91,7 +110,198 @@ def create_appointments(n):
return appointments, locator_uuid_map, dispute_txs
def test_init(run_bitcoind, watcher):
def test_locator_cache_init_not_enough_blocks(run_bitcoind, block_processor):
locator_cache = LocatorCache(config.get("LOCATOR_CACHE_SIZE"))
# Make sure there are at least 3 blocks
block_count = block_processor.get_block_count()
if block_count < 3:
generate_blocks_w_delay(3 - block_count)
# Simulate there are only 3 blocks
third_block_hash = bitcoin_cli(bitcoind_connect_params).getblockhash(2)
locator_cache.init(third_block_hash, block_processor)
assert len(locator_cache.blocks) == 3
for k, v in locator_cache.blocks.items():
assert block_processor.get_block(k)
def test_locator_cache_init(block_processor):
locator_cache = LocatorCache(config.get("LOCATOR_CACHE_SIZE"))
# Generate enough blocks so the cache can start full
generate_blocks(2 * locator_cache.cache_size)
locator_cache.init(block_processor.get_best_block_hash(), block_processor)
assert len(locator_cache.blocks) == locator_cache.cache_size
for k, v in locator_cache.blocks.items():
assert block_processor.get_block(k)
def test_get_txid():
# Not much to test here, this is shadowing dict.get
locator = get_random_value_hex(16)
txid = get_random_value_hex(32)
locator_cache = LocatorCache(config.get("LOCATOR_CACHE_SIZE"))
locator_cache.cache[locator] = txid
assert locator_cache.get_txid(locator) == txid
# A random locator should fail
assert locator_cache.get_txid(get_random_value_hex(16)) is None
def test_update_cache():
# Update should add data about a new block in the cache. If the cache is full, the oldest block is dropped.
locator_cache = LocatorCache(config.get("LOCATOR_CACHE_SIZE"))
block_hash = get_random_value_hex(32)
txs = [get_random_value_hex(32) for _ in range(10)]
locator_txid_map = {compute_locator(txid): txid for txid in txs}
# Cache is empty
assert block_hash not in locator_cache.blocks
for locator in locator_txid_map.keys():
assert locator not in locator_cache.cache
# The data has been added to the cache
locator_cache.update(block_hash, locator_txid_map)
assert block_hash in locator_cache.blocks
for locator in locator_txid_map.keys():
assert locator in locator_cache.cache
def test_update_cache_full():
locator_cache = LocatorCache(config.get("LOCATOR_CACHE_SIZE"))
block_hashes = []
big_map = {}
for i in range(locator_cache.cache_size):
block_hash = get_random_value_hex(32)
txs = [get_random_value_hex(32) for _ in range(10)]
locator_txid_map = {compute_locator(txid): txid for txid in txs}
locator_cache.update(block_hash, locator_txid_map)
if i == 0:
first_block_hash = block_hash
first_locator_txid_map = locator_txid_map
else:
block_hashes.append(block_hash)
big_map.update(locator_txid_map)
# The cache is now full.
assert first_block_hash in locator_cache.blocks
for locator in first_locator_txid_map.keys():
assert locator in locator_cache.cache
# Add one more
block_hash = get_random_value_hex(32)
txs = [get_random_value_hex(32) for _ in range(10)]
locator_txid_map = {compute_locator(txid): txid for txid in txs}
locator_cache.update(block_hash, locator_txid_map)
# The first block is not there anymore, but the rest are there
assert first_block_hash not in locator_cache.blocks
for locator in first_locator_txid_map.keys():
assert locator not in locator_cache.cache
for block_hash in block_hashes:
assert block_hash in locator_cache.blocks
for locator in big_map.keys():
assert locator in locator_cache.cache
def test_locator_cache_is_full(block_processor):
# Empty cache
locator_cache = LocatorCache(config.get("LOCATOR_CACHE_SIZE"))
for _ in range(locator_cache.cache_size):
locator_cache.blocks[uuid4().hex] = 0
assert not locator_cache.is_full()
locator_cache.blocks[uuid4().hex] = 0
assert locator_cache.is_full()
def test_locator_remove_oldest_block(block_processor):
# Empty cache
locator_cache = LocatorCache(config.get("LOCATOR_CACHE_SIZE"))
# Add some blocks to the cache
for _ in range(locator_cache.cache_size):
txid = get_random_value_hex(32)
locator = txid[:16]
locator_cache.blocks[get_random_value_hex(32)] = {locator: txid}
locator_cache.cache[locator] = txid
blocks_in_cache = locator_cache.blocks
oldest_block_hash = list(blocks_in_cache.keys())[0]
oldest_block_data = blocks_in_cache.get(oldest_block_hash)
rest_of_blocks = list(blocks_in_cache.keys())[1:]
locator_cache.remove_oldest_block()
# Oldest block data is not in the cache
assert oldest_block_hash not in locator_cache.blocks
for locator in oldest_block_data:
assert locator not in locator_cache.cache
# The rest of data is in the cache
assert set(rest_of_blocks).issubset(locator_cache.blocks)
for block_hash in rest_of_blocks:
for locator in locator_cache.blocks[block_hash]:
assert locator in locator_cache.cache
def test_fix_cache(block_processor):
# This tests how a reorg will create a new version of the cache
# Let's start setting a full cache. We'll mine ``cache_size`` bocks to be sure it's full
generate_blocks_w_delay((config.get("LOCATOR_CACHE_SIZE")))
locator_cache = LocatorCache(config.get("LOCATOR_CACHE_SIZE"))
locator_cache.init(block_processor.get_best_block_hash(), block_processor)
assert len(locator_cache.blocks) == locator_cache.cache_size
# Now let's fake a reorg of less than ``cache_size``. We'll go two blocks into the past.
current_tip = block_processor.get_best_block_hash()
current_tip_locators = locator_cache.blocks[current_tip]
current_tip_parent = block_processor.get_block(current_tip).get("previousblockhash")
current_tip_parent_locators = locator_cache.blocks[current_tip_parent]
fake_tip = block_processor.get_block(current_tip_parent).get("previousblockhash")
locator_cache.fix(fake_tip, block_processor)
# The last two blocks are not in the cache nor are the any of its locators
assert current_tip not in locator_cache.blocks and current_tip_parent not in locator_cache.blocks
for locator in current_tip_parent_locators + current_tip_locators:
assert locator not in locator_cache.cache
# The fake tip is the new tip, and two additional blocks are at the bottom
assert fake_tip in locator_cache.blocks and list(locator_cache.blocks.keys())[-1] == fake_tip
assert len(locator_cache.blocks) == locator_cache.cache_size
# Test the same for a full cache reorg. We can simulate this by adding more blocks than the cache can fit and
# trigger a fix. We'll use a new cache to compare with the old
old_cache_blocks = deepcopy(locator_cache.blocks)
generate_blocks_w_delay((config.get("LOCATOR_CACHE_SIZE") * 2))
locator_cache.fix(block_processor.get_best_block_hash(), block_processor)
# None of the data from the old cache is in the new cache
for block_hash, locators in old_cache_blocks.items():
assert block_hash not in locator_cache.blocks
for locator in locators:
assert locator not in locator_cache.cache
# The data in the new cache corresponds to the last ``cache_size`` blocks.
block_count = block_processor.get_block_count()
for i in range(block_count, block_count - locator_cache.cache_size, -1):
block_hash = bitcoin_cli(bitcoind_connect_params).getblockhash(i - 1)
assert block_hash in locator_cache.blocks
for locator in locator_cache.blocks[block_hash]:
assert locator in locator_cache.cache
def test_watcher_init(watcher):
assert isinstance(watcher.appointments, dict) and len(watcher.appointments) == 0
assert isinstance(watcher.locator_uuid_map, dict) and len(watcher.locator_uuid_map) == 0
assert watcher.block_queue.empty()
@@ -101,6 +311,7 @@ def test_init(run_bitcoind, watcher):
assert isinstance(watcher.responder, Responder)
assert isinstance(watcher.max_appointments, int)
assert isinstance(watcher.signing_key, PrivateKey)
assert isinstance(watcher.locator_cache, LocatorCache)
def test_add_appointment_non_registered(watcher):
@@ -171,6 +382,102 @@ def test_add_appointment(watcher):
assert len(watcher.locator_uuid_map[appointment.locator]) == 2
def test_add_appointment_in_cache(watcher):
# Generate an appointment and add the dispute txid to the cache
user_sk, user_pk = generate_keypair()
user_id = Cryptographer.get_compressed_pk(user_pk)
watcher.gatekeeper.registered_users[user_id] = UserInfo(available_slots=1, subscription_expiry=10)
appointment, dispute_tx = generate_dummy_appointment()
dispute_txid = watcher.block_processor.decode_raw_transaction(dispute_tx).get("txid")
watcher.locator_cache.cache[appointment.locator] = dispute_txid
# Try to add the appointment
response = watcher.add_appointment(appointment, Cryptographer.sign(appointment.serialize(), user_sk))
# The appointment is accepted but it's not in the Watcher
assert (
response
and response.get("locator") == appointment.locator
and Cryptographer.get_compressed_pk(watcher.signing_key.public_key)
== Cryptographer.get_compressed_pk(Cryptographer.recover_pk(appointment.serialize(), response.get("signature")))
)
assert not watcher.locator_uuid_map.get(appointment.locator)
# It went to the Responder straightaway
assert appointment.locator in [tracker.get("locator") for tracker in watcher.responder.trackers.values()]
# Trying to send it again should fail since it is already in the Responder
with pytest.raises(AppointmentAlreadyTriggered):
watcher.add_appointment(appointment, Cryptographer.sign(appointment.serialize(), user_sk))
def test_add_appointment_in_cache_invalid_blob(watcher):
# Generate an appointment with an invalid transaction and add the dispute txid to the cache
user_sk, user_pk = generate_keypair()
user_id = Cryptographer.get_compressed_pk(user_pk)
watcher.gatekeeper.registered_users[user_id] = UserInfo(available_slots=1, subscription_expiry=10)
# We need to create the appointment manually
dispute_tx = create_dummy_transaction()
dispute_txid = dispute_tx.tx_id.hex()
penalty_tx = create_dummy_transaction(dispute_txid)
locator = compute_locator(dispute_txid)
dummy_appointment_data = {"tx": penalty_tx.hex(), "tx_id": dispute_txid, "to_self_delay": 20}
encrypted_blob = Cryptographer.encrypt(dummy_appointment_data.get("tx")[::-1], dummy_appointment_data.get("tx_id"))
appointment_data = {
"locator": locator,
"to_self_delay": dummy_appointment_data.get("to_self_delay"),
"encrypted_blob": encrypted_blob,
"user_id": get_random_value_hex(16),
}
appointment = ExtendedAppointment.from_dict(appointment_data)
watcher.locator_cache.cache[appointment.locator] = dispute_tx.tx_id.hex()
# Try to add the appointment
response = watcher.add_appointment(appointment, Cryptographer.sign(appointment.serialize(), user_sk))
# The appointment is accepted but dropped (same as an invalid appointment that gets triggered)
assert (
response
and response.get("locator") == appointment.locator
and Cryptographer.get_compressed_pk(watcher.signing_key.public_key)
== Cryptographer.get_compressed_pk(Cryptographer.recover_pk(appointment.serialize(), response.get("signature")))
)
assert not watcher.locator_uuid_map.get(appointment.locator)
assert appointment.locator not in [tracker.get("locator") for tracker in watcher.responder.trackers.values()]
def test_add_appointment_in_cache_invalid_transaction(watcher):
# Generate an appointment that cannot be decrypted and add the dispute txid to the cache
user_sk, user_pk = generate_keypair()
user_id = Cryptographer.get_compressed_pk(user_pk)
watcher.gatekeeper.registered_users[user_id] = UserInfo(available_slots=1, subscription_expiry=10)
appointment, dispute_tx = generate_dummy_appointment()
appointment.encrypted_blob = appointment.encrypted_blob[::-1]
dispute_txid = watcher.block_processor.decode_raw_transaction(dispute_tx).get("txid")
watcher.locator_cache.cache[appointment.locator] = dispute_txid
# Try to add the appointment
response = watcher.add_appointment(appointment, Cryptographer.sign(appointment.serialize(), user_sk))
# The appointment is accepted but dropped (same as an invalid appointment that gets triggered)
assert (
response
and response.get("locator") == appointment.locator
and Cryptographer.get_compressed_pk(watcher.signing_key.public_key)
== Cryptographer.get_compressed_pk(Cryptographer.recover_pk(appointment.serialize(), response.get("signature")))
)
assert not watcher.locator_uuid_map.get(appointment.locator)
assert appointment.locator not in [tracker.get("locator") for tracker in watcher.responder.trackers.values()]
def test_add_too_many_appointments(watcher):
# Simulate the user is registered
user_sk, user_pk = generate_keypair()
@@ -246,9 +553,37 @@ def test_do_watch(watcher, temp_db_manager):
# FIXME: We should also add cases where the transactions are invalid. bitcoind_mock needs to be extended for this.
def test_do_watch_cache_update(watcher):
# Test that data is properly added/remove to/from the cache
for _ in range(10):
blocks_in_cache = watcher.locator_cache.blocks
oldest_block_hash = list(blocks_in_cache.keys())[0]
oldest_block_data = blocks_in_cache.get(oldest_block_hash)
rest_of_blocks = list(blocks_in_cache.keys())[1:]
assert len(watcher.locator_cache.blocks) == watcher.locator_cache.cache_size
generate_blocks_w_delay(1)
# The last oldest block is gone but the rest remain
assert oldest_block_hash not in watcher.locator_cache.blocks
assert set(rest_of_blocks).issubset(watcher.locator_cache.blocks.keys())
# The locators of the oldest block are gone but the rest remain
for locator in oldest_block_data:
assert locator not in watcher.locator_cache.cache
for block_hash in rest_of_blocks:
for locator in watcher.locator_cache.blocks[block_hash]:
assert locator in watcher.locator_cache.cache
# The size of the cache is the same
assert len(watcher.locator_cache.blocks) == watcher.locator_cache.cache_size
def test_get_breaches(watcher, txids, locator_uuid_map):
watcher.locator_uuid_map = locator_uuid_map
potential_breaches = watcher.get_breaches(txids)
locators_txid_map = {compute_locator(txid): txid for txid in txids}
potential_breaches = watcher.get_breaches(locators_txid_map)
# All the txids must breach
assert locator_uuid_map.keys() == potential_breaches.keys()
@@ -258,38 +593,50 @@ def test_get_breaches_random_data(watcher, locator_uuid_map):
# The likelihood of finding a potential breach with random data should be negligible
watcher.locator_uuid_map = locator_uuid_map
txids = [get_random_value_hex(32) for _ in range(TEST_SET_SIZE)]
locators_txid_map = {compute_locator(txid): txid for txid in txids}
potential_breaches = watcher.get_breaches(txids)
potential_breaches = watcher.get_breaches(locators_txid_map)
# None of the txids should breach
assert len(potential_breaches) == 0
def test_filter_breaches_random_data(watcher):
appointments = {}
locator_uuid_map = {}
breaches = {}
def test_check_breach(watcher):
# A breach will be flagged as valid only if the encrypted blob can be properly decrypted and the resulting data
# matches a transaction format.
uuid = uuid4().hex
appointment, dispute_tx = generate_dummy_appointment()
dispute_txid = watcher.block_processor.decode_raw_transaction(dispute_tx).get("txid")
for i in range(TEST_SET_SIZE):
dummy_appointment, _ = generate_dummy_appointment()
uuid = uuid4().hex
appointments[uuid] = {"locator": dummy_appointment.locator, "user_id": dummy_appointment.user_id}
watcher.db_manager.store_watcher_appointment(uuid, dummy_appointment.to_dict())
watcher.db_manager.create_append_locator_map(dummy_appointment.locator, uuid)
penalty_txid, penalty_rawtx = watcher.check_breach(uuid, appointment, dispute_txid)
assert Cryptographer.encrypt(penalty_rawtx, dispute_txid) == appointment.encrypted_blob
locator_uuid_map[dummy_appointment.locator] = [uuid]
if i % 2:
dispute_txid = get_random_value_hex(32)
breaches[dummy_appointment.locator] = dispute_txid
def test_check_breach_random_data(watcher):
# If a breach triggers an appointment with random data as encrypted blob, the check should fail.
uuid = uuid4().hex
appointment, dispute_tx = generate_dummy_appointment()
dispute_txid = watcher.block_processor.decode_raw_transaction(dispute_tx).get("txid")
watcher.locator_uuid_map = locator_uuid_map
watcher.appointments = appointments
# Set the blob to something "random"
appointment.encrypted_blob = get_random_value_hex(200)
valid_breaches, invalid_breaches = watcher.filter_breaches(breaches)
with pytest.raises(EncryptionError):
watcher.check_breach(uuid, appointment, dispute_txid)
# We have "triggered" TEST_SET_SIZE/2 breaches, all of them invalid.
assert len(valid_breaches) == 0 and len(invalid_breaches) == TEST_SET_SIZE / 2
def test_check_breach_invalid_transaction(watcher):
# If the breach triggers an appointment with data that can be decrypted but does not match a transaction, it should
# fail
uuid = uuid4().hex
appointment, dispute_tx = generate_dummy_appointment()
dispute_txid = watcher.block_processor.decode_raw_transaction(dispute_tx).get("txid")
# Set the blob to something "random"
appointment.encrypted_blob = Cryptographer.encrypt(get_random_value_hex(200), dispute_txid)
with pytest.raises(InvalidTransactionFormat):
watcher.check_breach(uuid, appointment, dispute_txid)
def test_filter_valid_breaches(watcher):
@@ -323,3 +670,30 @@ def test_filter_valid_breaches(watcher):
# We have "triggered" a single breach and it was valid.
assert len(invalid_breaches) == 0 and len(valid_breaches) == 1
def test_filter_breaches_random_data(watcher):
appointments = {}
locator_uuid_map = {}
breaches = {}
for i in range(TEST_SET_SIZE):
dummy_appointment, _ = generate_dummy_appointment()
uuid = uuid4().hex
appointments[uuid] = {"locator": dummy_appointment.locator, "user_id": dummy_appointment.user_id}
watcher.db_manager.store_watcher_appointment(uuid, dummy_appointment.to_dict())
watcher.db_manager.create_append_locator_map(dummy_appointment.locator, uuid)
locator_uuid_map[dummy_appointment.locator] = [uuid]
if i % 2:
dispute_txid = get_random_value_hex(32)
breaches[dummy_appointment.locator] = dispute_txid
watcher.locator_uuid_map = locator_uuid_map
watcher.appointments = appointments
valid_breaches, invalid_breaches = watcher.filter_breaches(breaches)
# We have "triggered" TEST_SET_SIZE/2 breaches, all of them invalid.
assert len(valid_breaches) == 0 and len(invalid_breaches) == TEST_SET_SIZE / 2