Update tests to remove the asleep flags

This commit is contained in:
Sergi Delgado Segura
2020-02-10 16:21:31 +01:00
parent a4f7548804
commit 6913d1cd18
6 changed files with 85 additions and 250 deletions

View File

@@ -15,7 +15,6 @@ from apps.cli.blob import Blob
from pisa.responder import TransactionTracker
from pisa.tools import bitcoin_cli
from pisa.db_manager import DBManager
from pisa.chain_monitor import ChainMonitor
from common.appointment import Appointment
from common.tools import compute_locator
@@ -46,23 +45,13 @@ def prng_seed():
def db_manager():
manager = DBManager("test_db")
# Add last know block for the Responder in the db
yield manager
manager.db.close()
rmtree("test_db")
@pytest.fixture(scope="module")
def chain_monitor():
chain_monitor = ChainMonitor()
chain_monitor.monitor_chain()
yield chain_monitor
chain_monitor.terminate = True
generate_block()
def generate_keypair():
client_sk = ec.generate_private_key(ec.SECP256K1, default_backend())
client_pk = client_sk.public_key()

View File

@@ -7,8 +7,10 @@ from cryptography.hazmat.primitives import serialization
from pisa.api import API
from pisa.watcher import Watcher
from pisa.responder import Responder
from pisa.tools import bitcoin_cli
from pisa import HOST, PORT
from pisa.chain_monitor import ChainMonitor
from test.pisa.unit.conftest import (
generate_block,
@@ -32,7 +34,7 @@ config = get_config()
@pytest.fixture(scope="module")
def run_api(db_manager, chain_monitor):
def run_api(db_manager):
sk, pk = generate_keypair()
sk_der = sk.private_bytes(
encoding=serialization.Encoding.DER,
@@ -40,9 +42,10 @@ def run_api(db_manager, chain_monitor):
encryption_algorithm=serialization.NoEncryption(),
)
watcher = Watcher(db_manager, chain_monitor, sk_der, get_config())
chain_monitor.attach_watcher(watcher.block_queue, watcher.asleep)
chain_monitor.attach_responder(watcher.responder.block_queue, watcher.responder.asleep)
watcher = Watcher(db_manager, Responder(db_manager), sk_der, get_config())
chain_monitor = ChainMonitor(watcher.block_queue, watcher.responder.block_queue)
watcher.awake()
chain_monitor.monitor_chain()
api_thread = Thread(target=API(watcher, config).start)
api_thread.daemon = True

View File

@@ -4,6 +4,7 @@ from queue import Queue
from pisa.builder import Builder
from pisa.watcher import Watcher
from pisa.responder import Responder
from test.pisa.unit.conftest import (
get_random_value_hex,
generate_dummy_appointment,
@@ -89,7 +90,7 @@ def test_populate_block_queue():
def test_update_states_empty_list(db_manager):
w = Watcher(db_manager=db_manager, chain_monitor=None, sk_der=None, config=None)
w = Watcher(db_manager=db_manager, responder=Responder(db_manager), sk_der=None, config=None)
missed_blocks_watcher = []
missed_blocks_responder = [get_random_value_hex(32)]
@@ -102,121 +103,35 @@ def test_update_states_empty_list(db_manager):
Builder.update_states(w, missed_blocks_responder, missed_blocks_watcher)
def test_update_states_different_sizes(run_bitcoind, db_manager, chain_monitor):
w = Watcher(db_manager=db_manager, chain_monitor=chain_monitor, sk_der=None, config=get_config())
chain_monitor.attach_watcher(w.responder, True)
chain_monitor.attach_responder(w.responder, True)
# For the states to be updated data needs to be present in the actors (either appointments or trackers).
# Let's start from the Watcher. We add one appointment and mine some blocks that both are gonna miss.
w.appointments[uuid4().hex] = {"locator": get_random_value_hex(16), "end_time": 200}
def test_update_states_responder_misses_more(run_bitcoind, db_manager):
w = Watcher(db_manager=db_manager, responder=Responder(db_manager), sk_der=None, config=get_config())
blocks = []
for _ in range(5):
generate_block()
blocks.append(bitcoin_cli().getbestblockhash())
# Updating the states should bring both to the same last known block. The Watcher's is stored in the db since it has
# gone over do_watch, whereas the Responders in only updated by update state.
# Updating the states should bring both to the same last known block.
w.awake()
w.responder.awake()
Builder.update_states(w, blocks, blocks[1:])
assert db_manager.load_last_block_hash_watcher() == blocks[-1]
assert w.responder.last_known_block == blocks[-1]
# If both have work, both last known blocks are updated
w.sleep()
w.responder.sleep()
w.responder.trackers[uuid4().hex] = {
"penalty_txid": get_random_value_hex(32),
"locator": get_random_value_hex(16),
"appointment_end": 200,
}
def test_update_states_watcher_misses_more(run_bitcoind, db_manager):
# Same as before, but data is now in the Responder
w = Watcher(db_manager=db_manager, responder=Responder(db_manager), sk_der=None, config=get_config())
blocks = []
for _ in range(5):
generate_block()
blocks.append(bitcoin_cli().getbestblockhash())
w.awake()
w.responder.awake()
Builder.update_states(w, blocks[1:], blocks)
assert db_manager.load_last_block_hash_watcher() == blocks[-1]
assert db_manager.load_last_block_hash_responder() == blocks[-1]
# Let's try the opposite of the first test (Responder with data, Watcher without)
w.sleep()
w.responder.sleep()
w.appointments = {}
last_block_prev = blocks[-1]
blocks = []
for _ in range(5):
generate_block()
blocks.append(bitcoin_cli().getbestblockhash())
# The Responder should have been brought up to date via do_watch, whereas the Watcher's last known block hash't
# change. The Watcher does not keep track of reorgs, so if he has no work to do he does not even update the last
# known block.
Builder.update_states(w, blocks[1:], blocks)
assert db_manager.load_last_block_hash_watcher() == last_block_prev
assert db_manager.load_last_block_hash_responder() == blocks[-1]
def test_update_states_same_sizes(db_manager, chain_monitor):
# The exact same behaviour of the last test is expected here, since different sizes are even using
# populate_block_queue and then run with the same list size.
w = Watcher(db_manager=db_manager, chain_monitor=chain_monitor, sk_der=None, config=get_config())
chain_monitor.attach_watcher(w.responder, True)
chain_monitor.attach_responder(w.responder, True)
# For the states to be updated data needs to be present in the actors (either appointments or trackers).
# Let's start from the Watcher. We add one appointment and mine some blocks that both are gonna miss.
w.appointments[uuid4().hex] = {"locator": get_random_value_hex(16), "end_time": 200}
blocks = []
for _ in range(5):
generate_block()
blocks.append(bitcoin_cli().getbestblockhash())
Builder.update_states(w, blocks, blocks)
assert db_manager.load_last_block_hash_watcher() == blocks[-1]
assert w.responder.last_known_block == blocks[-1]
# If both have work, both last known blocks are updated
w.sleep()
w.responder.sleep()
w.responder.trackers[uuid4().hex] = {
"penalty_txid": get_random_value_hex(32),
"locator": get_random_value_hex(16),
"appointment_end": 200,
}
blocks = []
for _ in range(5):
generate_block()
blocks.append(bitcoin_cli().getbestblockhash())
Builder.update_states(w, blocks, blocks)
assert db_manager.load_last_block_hash_watcher() == blocks[-1]
assert db_manager.load_last_block_hash_responder() == blocks[-1]
# Let's try the opposite of the first test (Responder with data, Watcher without)
w.sleep()
w.responder.sleep()
w.appointments = {}
last_block_prev = blocks[-1]
blocks = []
for _ in range(5):
generate_block()
blocks.append(bitcoin_cli().getbestblockhash())
# The Responder should have been brought up to date via do_watch, whereas the Watcher's last known block hash't
# change. The Watcher does not keep track of reorgs, so if he has no work to do he does not even update the last
# known block.
Builder.update_states(w, blocks, blocks)
assert db_manager.load_last_block_hash_watcher() == last_block_prev
assert db_manager.load_last_block_hash_responder() == blocks[-1]

View File

@@ -1,20 +1,19 @@
import zmq
import time
from queue import Queue
from threading import Thread, Event, Condition
from pisa.watcher import Watcher
from pisa.responder import Responder
from pisa.block_processor import BlockProcessor
from pisa.chain_monitor import ChainMonitor
from test.pisa.unit.conftest import get_random_value_hex, generate_block, get_config
from test.pisa.unit.conftest import get_random_value_hex, generate_block
def test_init(run_bitcoind):
# run_bitcoind is started here instead of later on to avoid race conditions while it initializes
# Not much to test here, just sanity checks to make sure nothing goes south in the future
chain_monitor = ChainMonitor()
chain_monitor = ChainMonitor(Queue(), Queue())
assert chain_monitor.best_tip is None
assert isinstance(chain_monitor.last_tips, list) and len(chain_monitor.last_tips) == 0
@@ -24,41 +23,12 @@ def test_init(run_bitcoind):
assert isinstance(chain_monitor.zmqSubSocket, zmq.Socket)
# The Queues and asleep flags are initialized when attaching the corresponding subscriber
assert chain_monitor.watcher_queue is None
assert chain_monitor.responder_queue is None
assert chain_monitor.watcher_asleep and chain_monitor.responder_asleep
assert isinstance(chain_monitor.watcher_queue, Queue)
assert isinstance(chain_monitor.responder_queue, Queue)
def test_attach_watcher(chain_monitor, db_manager):
watcher = Watcher(db_manager=db_manager, chain_monitor=chain_monitor, sk_der=None, config=get_config())
chain_monitor.attach_watcher(watcher.block_queue, watcher.asleep)
# booleans are not passed as reference in Python, so the flags need to be set separately
assert watcher.asleep == chain_monitor.watcher_asleep
watcher.asleep = False
assert chain_monitor.watcher_asleep != watcher.asleep
# Test that the Queue work
r_hash = get_random_value_hex(32)
chain_monitor.watcher_queue.put(r_hash)
assert watcher.block_queue.get() == r_hash
def test_attach_responder(chain_monitor, db_manager):
responder = Responder(db_manager=db_manager, chain_monitor=chain_monitor)
chain_monitor.attach_responder(responder.block_queue, responder.asleep)
# Same kind of testing as with the attach watcher
assert responder.asleep == chain_monitor.watcher_asleep
responder.asleep = False
assert chain_monitor.watcher_asleep != responder.asleep
r_hash = get_random_value_hex(32)
chain_monitor.responder_queue.put(r_hash)
assert responder.block_queue.get() == r_hash
def test_notify_subscribers(chain_monitor):
def test_notify_subscribers():
chain_monitor = ChainMonitor(Queue(), Queue())
# Subscribers are only notified as long as they are awake
new_block = get_random_value_hex(32)
@@ -66,27 +36,17 @@ def test_notify_subscribers(chain_monitor):
assert chain_monitor.watcher_queue.empty()
assert chain_monitor.responder_queue.empty()
chain_monitor.watcher_asleep = True
chain_monitor.responder_asleep = True
chain_monitor.notify_subscribers(new_block)
# And remain empty afterwards since both subscribers were asleep
assert chain_monitor.watcher_queue.empty()
assert chain_monitor.responder_queue.empty()
# Let's flag them as awake and try again
chain_monitor.watcher_asleep = False
chain_monitor.responder_asleep = False
chain_monitor.notify_subscribers(new_block)
assert chain_monitor.watcher_queue.get() == new_block
assert chain_monitor.responder_queue.get() == new_block
def test_update_state(chain_monitor):
def test_update_state():
# The state is updated after receiving a new block (and only if the block is not already known).
# Let's start by setting a best_tip and a couple of old tips
new_block_hash = get_random_value_hex(32)
chain_monitor = ChainMonitor(Queue(), Queue())
chain_monitor.best_tip = new_block_hash
chain_monitor.last_tips = [get_random_value_hex(32) for _ in range(5)]
@@ -105,12 +65,10 @@ def test_update_state(chain_monitor):
def test_monitor_chain_polling(db_manager):
# Try polling with the Watcher
chain_monitor = ChainMonitor()
wq = Queue()
chain_monitor = ChainMonitor(wq, Queue())
chain_monitor.best_tip = BlockProcessor.get_best_block_hash()
watcher = Watcher(db_manager=db_manager, chain_monitor=chain_monitor, sk_der=None, config=get_config())
chain_monitor.attach_watcher(watcher.block_queue, asleep=False)
# monitor_chain_polling runs until terminate if set
polling_thread = Thread(target=chain_monitor.monitor_chain_polling, kwargs={"polling_delta": 0.1}, daemon=True)
polling_thread.start()
@@ -131,13 +89,10 @@ def test_monitor_chain_polling(db_manager):
def test_monitor_chain_zmq(db_manager):
# Try zmq with the Responder
chain_monitor = ChainMonitor()
rq = Queue()
chain_monitor = ChainMonitor(Queue(), rq)
chain_monitor.best_tip = BlockProcessor.get_best_block_hash()
responder = Responder(db_manager=db_manager, chain_monitor=chain_monitor)
chain_monitor.attach_responder(responder.block_queue, asleep=False)
zmq_thread = Thread(target=chain_monitor.monitor_chain_zmq, daemon=True)
zmq_thread.start()
@@ -150,28 +105,10 @@ def test_monitor_chain_zmq(db_manager):
chain_monitor.responder_queue.get()
assert chain_monitor.responder_queue.empty()
# If we flag it to sleep no notification is sent
chain_monitor.responder_asleep = True
for _ in range(3):
generate_block()
assert chain_monitor.responder_queue.empty()
chain_monitor.terminate = True
# The zmq thread needs a block generation to release from the recv method.
generate_block()
zmq_thread.join()
def test_monitor_chain(db_manager):
# Not much to test here, this should launch two threads (one per monitor approach) and finish on terminate
chain_monitor = ChainMonitor()
watcher = Watcher(db_manager=db_manager, chain_monitor=chain_monitor, sk_der=None, config=get_config())
responder = Responder(db_manager=db_manager, chain_monitor=chain_monitor)
chain_monitor.attach_responder(responder.block_queue, asleep=False)
chain_monitor.attach_watcher(watcher.block_queue, asleep=False)
chain_monitor = ChainMonitor(Queue(), Queue())
chain_monitor.best_tip = None
chain_monitor.monitor_chain()
@@ -196,12 +133,7 @@ def test_monitor_chain(db_manager):
def test_monitor_chain_single_update(db_manager):
# This test tests that if both threads try to add the same block to the queue, only the first one will make it
chain_monitor = ChainMonitor()
watcher = Watcher(db_manager=db_manager, chain_monitor=chain_monitor, sk_der=None, config=get_config())
responder = Responder(db_manager=db_manager, chain_monitor=chain_monitor)
chain_monitor.attach_responder(responder.block_queue, asleep=False)
chain_monitor.attach_watcher(watcher.block_queue, asleep=False)
chain_monitor = ChainMonitor(Queue(), Queue())
chain_monitor.best_tip = None

View File

@@ -1,6 +1,7 @@
import json
import pytest
import random
from queue import Queue
from uuid import uuid4
from shutil import rmtree
from copy import deepcopy
@@ -18,17 +19,19 @@ from test.pisa.unit.conftest import generate_block, generate_blocks, get_random_
@pytest.fixture(scope="module")
def responder(db_manager, chain_monitor):
responder = Responder(db_manager, chain_monitor)
chain_monitor.attach_responder(responder.block_queue, responder.asleep)
def responder(db_manager):
responder = Responder(db_manager)
chain_monitor = ChainMonitor(Queue(), responder.block_queue)
chain_monitor.monitor_chain()
return responder
@pytest.fixture()
@pytest.fixture(scope="session")
def temp_db_manager():
db_name = get_random_value_hex(8)
db_manager = DBManager(db_name)
yield db_manager
db_manager.db.close()
@@ -144,19 +147,17 @@ def test_tracker_from_dict_invalid_data():
assert True
def test_init_responder(responder):
def test_init_responder(temp_db_manager):
responder = Responder(temp_db_manager)
assert isinstance(responder.trackers, dict) and len(responder.trackers) == 0
assert isinstance(responder.tx_tracker_map, dict) and len(responder.tx_tracker_map) == 0
assert isinstance(responder.unconfirmed_txs, list) and len(responder.unconfirmed_txs) == 0
assert isinstance(responder.missed_confirmations, dict) and len(responder.missed_confirmations) == 0
assert isinstance(responder.chain_monitor, ChainMonitor)
assert responder.block_queue.empty()
assert responder.asleep is True
def test_handle_breach(db_manager, chain_monitor):
responder = Responder(db_manager, chain_monitor)
chain_monitor.attach_responder(responder.block_queue, responder.asleep)
def test_handle_breach(db_manager):
responder = Responder(db_manager)
uuid = uuid4().hex
tracker = create_dummy_tracker()
@@ -174,20 +175,11 @@ def test_handle_breach(db_manager, chain_monitor):
assert receipt.delivered is True
# The responder automatically fires add_tracker on adding a tracker if it is asleep. We need to stop the processes
# now. To do so we delete all the trackers, and generate a new block.
responder.trackers = dict()
generate_block()
def test_add_bad_response(responder):
def test_handle_breach_bad_response(responder):
uuid = uuid4().hex
tracker = create_dummy_tracker()
# Now that the asleep / awake functionality has been tested we can avoid manually killing the responder by setting
# to awake. That will prevent the chain_monitor thread to be launched again.
responder.asleep = False
# A txid instead of a rawtx should be enough for unit tests using the bitcoind mock, better tests are needed though.
tracker.penalty_rawtx = tracker.penalty_txid
@@ -206,8 +198,6 @@ def test_add_bad_response(responder):
def test_add_tracker(responder):
# Responder is asleep
for _ in range(20):
uuid = uuid4().hex
confirmations = 0
@@ -236,8 +226,6 @@ def test_add_tracker(responder):
def test_add_tracker_same_penalty_txid(responder):
# Responder is asleep
confirmations = 0
locator, dispute_txid, penalty_txid, penalty_rawtx, appointment_end = create_dummy_tracker_data(random_txid=True)
uuid_1 = uuid4().hex
@@ -262,8 +250,6 @@ def test_add_tracker_same_penalty_txid(responder):
def test_add_tracker_already_confirmed(responder):
# Responder is asleep
for i in range(20):
uuid = uuid4().hex
confirmations = i + 1
@@ -276,10 +262,11 @@ def test_add_tracker_already_confirmed(responder):
assert penalty_txid not in responder.unconfirmed_txs
def test_do_watch(temp_db_manager, chain_monitor):
def test_do_watch(temp_db_manager):
# Create a fresh responder to simplify the test
responder = Responder(temp_db_manager, chain_monitor)
chain_monitor.attach_responder(responder.block_queue, False)
responder = Responder(temp_db_manager)
chain_monitor = ChainMonitor(Queue(), responder.block_queue)
chain_monitor.monitor_chain()
trackers = [create_dummy_tracker(penalty_rawtx=create_dummy_transaction().hex()) for _ in range(20)]
@@ -332,12 +319,12 @@ def test_do_watch(temp_db_manager, chain_monitor):
generate_blocks(6)
assert len(responder.tx_tracker_map) == 0
assert responder.asleep is True
def test_check_confirmations(temp_db_manager, chain_monitor):
responder = Responder(temp_db_manager, chain_monitor)
chain_monitor.attach_responder(responder.block_queue, responder.asleep)
def test_check_confirmations(db_manager):
responder = Responder(db_manager)
chain_monitor = ChainMonitor(Queue(), responder.block_queue)
chain_monitor.monitor_chain()
# check_confirmations checks, given a list of transaction for a block, what of the known penalty transaction have
# been confirmed. To test this we need to create a list of transactions and the state of the responder
@@ -391,11 +378,12 @@ def test_get_txs_to_rebroadcast(responder):
assert txs_to_rebroadcast == list(txs_missing_too_many_conf.keys())
def test_get_completed_trackers(db_manager, chain_monitor):
def test_get_completed_trackers(db_manager):
initial_height = bitcoin_cli().getblockcount()
responder = Responder(db_manager, chain_monitor)
chain_monitor.attach_responder(responder.block_queue, responder.asleep)
responder = Responder(db_manager)
chain_monitor = ChainMonitor(Queue(), responder.block_queue)
chain_monitor.monitor_chain()
# A complete tracker is a tracker that has reached the appointment end with enough confs (> MIN_CONFIRMATIONS)
# We'll create three type of transactions: end reached + enough conf, end reached + no enough conf, end not reached
@@ -450,10 +438,10 @@ def test_get_completed_trackers(db_manager, chain_monitor):
assert set(completed_trackers_ids) == set(ended_trackers_keys)
def test_rebroadcast(db_manager, chain_monitor):
responder = Responder(db_manager, chain_monitor)
responder.asleep = False
chain_monitor.attach_responder(responder.block_queue, responder.asleep)
def test_rebroadcast(db_manager):
responder = Responder(db_manager)
chain_monitor = ChainMonitor(Queue(), responder.block_queue)
chain_monitor.monitor_chain()
txs_to_rebroadcast = []

View File

@@ -1,5 +1,6 @@
import pytest
from uuid import uuid4
from shutil import rmtree
from threading import Thread
from cryptography.hazmat.primitives.asymmetric import ec
from cryptography.hazmat.primitives import serialization
@@ -8,6 +9,7 @@ from pisa.watcher import Watcher
from pisa.responder import Responder
from pisa.tools import bitcoin_cli
from pisa.chain_monitor import ChainMonitor
from pisa.db_manager import DBManager
from test.pisa.unit.conftest import (
generate_blocks,
@@ -36,11 +38,22 @@ sk_der = signing_key.private_bytes(
)
@pytest.fixture(scope="session")
def temp_db_manager():
db_name = get_random_value_hex(8)
db_manager = DBManager(db_name)
yield db_manager
db_manager.db.close()
rmtree(db_name)
@pytest.fixture(scope="module")
def watcher(db_manager, chain_monitor):
watcher = Watcher(db_manager, chain_monitor, sk_der, get_config())
chain_monitor.attach_watcher(watcher.block_queue, watcher.asleep)
chain_monitor.attach_responder(watcher.responder.block_queue, watcher.responder.asleep)
def watcher(db_manager):
watcher = Watcher(db_manager, Responder(db_manager), sk_der, get_config())
chain_monitor = ChainMonitor(watcher.block_queue, watcher.responder.block_queue)
chain_monitor.monitor_chain()
return watcher
@@ -76,19 +89,13 @@ def create_appointments(n):
def test_init(run_bitcoind, watcher):
assert isinstance(watcher.appointments, dict) and len(watcher.appointments) == 0
assert isinstance(watcher.locator_uuid_map, dict) and len(watcher.locator_uuid_map) == 0
assert watcher.asleep is True
assert watcher.block_queue.empty()
assert isinstance(watcher.chain_monitor, ChainMonitor)
assert isinstance(watcher.config, dict)
assert isinstance(watcher.signing_key, ec.EllipticCurvePrivateKey)
assert isinstance(watcher.responder, Responder)
def test_add_appointment(watcher):
# The watcher automatically fires do_watch and do_subscribe on adding an appointment if it is asleep (initial state)
# Avoid this by setting the state to awake.
watcher.asleep = False
# We should be able to add appointments up to the limit
for _ in range(10):
appointment, dispute_tx = generate_dummy_appointment(
@@ -128,10 +135,11 @@ def test_add_too_many_appointments(watcher):
assert sig is None
def test_do_watch(watcher):
def test_do_watch(watcher, temp_db_manager):
watcher.db_manager = temp_db_manager
# We will wipe all the previous data and add 5 appointments
appointments, locator_uuid_map, dispute_txs = create_appointments(APPOINTMENTS)
watcher.chain_monitor.watcher_asleep = False
# Set the data into the Watcher and in the db
watcher.locator_uuid_map = locator_uuid_map
@@ -142,7 +150,8 @@ def test_do_watch(watcher):
watcher.db_manager.store_watcher_appointment(uuid, appointment.to_json())
watcher.db_manager.create_append_locator_map(appointment.locator, uuid)
Thread(target=watcher.do_watch, daemon=True).start()
do_watch_thread = Thread(target=watcher.do_watch, daemon=True)
do_watch_thread.start()
# Broadcast the first two
for dispute_tx in dispute_txs[:2]:
@@ -158,7 +167,6 @@ def test_do_watch(watcher):
generate_blocks(EXPIRY_DELTA + START_TIME_OFFSET + END_TIME_OFFSET)
assert len(watcher.appointments) == 0
assert watcher.asleep is True
def test_get_breaches(watcher, txids, locator_uuid_map):