mirror of
https://github.com/aljazceru/python-teos.git
synced 2025-12-17 14:14:22 +01:00
Adds some missing Responder unit tests
Excluding the basic reorgs hangle, we need a better bitcoind mock or a different testing approach to reach a higher code coverage. This is as far as it can get for the Responder at the moment
This commit is contained in:
@@ -1,14 +1,17 @@
|
||||
import json
|
||||
import pytest
|
||||
import random
|
||||
from uuid import uuid4
|
||||
from copy import deepcopy
|
||||
from threading import Thread
|
||||
from queue import Queue, Empty
|
||||
|
||||
from pisa import c_logger
|
||||
from pisa.tools import check_txid_format, bitcoin_cli
|
||||
from test.simulator.utils import sha256d
|
||||
from pisa.responder import Responder, Job
|
||||
from test.simulator.bitcoind_sim import TX
|
||||
from pisa.block_processor import BlockProcessor
|
||||
from pisa.tools import check_txid_format, bitcoin_cli
|
||||
from test.unit.conftest import generate_block, generate_blocks, get_random_value_hex
|
||||
|
||||
c_logger.disabled = True
|
||||
@@ -64,6 +67,23 @@ def test_job_init(run_bitcoind):
|
||||
)
|
||||
|
||||
|
||||
def test_on_sync(run_bitcoind, responder):
|
||||
# We're on sync if we're 1 or less blocks behind the tip
|
||||
chain_tip = BlockProcessor.get_best_block_hash()
|
||||
assert Responder.on_sync(chain_tip) is True
|
||||
|
||||
generate_block()
|
||||
assert Responder.on_sync(chain_tip) is True
|
||||
|
||||
|
||||
def test_on_sync_fail(responder):
|
||||
# This should fail if we're more than 1 block behind the tip
|
||||
chain_tip = BlockProcessor.get_best_block_hash()
|
||||
generate_blocks(2)
|
||||
|
||||
assert Responder.on_sync(chain_tip) is False
|
||||
|
||||
|
||||
def test_job_to_dict():
|
||||
job = create_dummy_job()
|
||||
job_dict = job.to_dict()
|
||||
@@ -86,6 +106,28 @@ def test_job_to_json():
|
||||
)
|
||||
|
||||
|
||||
def test_job_from_dict():
|
||||
job_dict = create_dummy_job().to_dict()
|
||||
new_job = Job.from_dict(job_dict)
|
||||
|
||||
assert job_dict == new_job.to_dict()
|
||||
|
||||
|
||||
def test_job_from_dict_invalid_data():
|
||||
job_dict = create_dummy_job().to_dict()
|
||||
|
||||
for value in ["dispute_txid", "justice_txid", "justice_rawtx", "appointment_end"]:
|
||||
job_dict_copy = deepcopy(job_dict)
|
||||
job_dict_copy[value] = None
|
||||
|
||||
try:
|
||||
Job.from_dict(job_dict_copy)
|
||||
assert False
|
||||
|
||||
except ValueError:
|
||||
assert True
|
||||
|
||||
|
||||
def test_init_responder(responder):
|
||||
assert type(responder.jobs) is dict and len(responder.jobs) == 0
|
||||
assert type(responder.tx_job_map) is dict and len(responder.tx_job_map) == 0
|
||||
@@ -96,14 +138,11 @@ def test_init_responder(responder):
|
||||
assert responder.zmq_subscriber is None
|
||||
|
||||
|
||||
def test_add_response(responder):
|
||||
def test_add_response(db_manager):
|
||||
responder = Responder(db_manager)
|
||||
uuid = uuid4().hex
|
||||
job = create_dummy_job()
|
||||
|
||||
# The responder automatically fires create_job on adding a job if it is asleep (initial state). Avoid this by
|
||||
# setting the state to awake.
|
||||
responder.asleep = False
|
||||
|
||||
# The block_hash passed to add_response does not matter much now. It will in the future to deal with errors
|
||||
receipt = responder.add_response(
|
||||
uuid,
|
||||
@@ -116,6 +155,36 @@ def test_add_response(responder):
|
||||
|
||||
assert receipt.delivered is True
|
||||
|
||||
# The responder automatically fires create_job on adding a job if it is asleep. We need to stop the processes now.
|
||||
# To do so we delete all the jobs, stop the zmq and create a new fake block to unblock the queue.get method
|
||||
responder.jobs = dict()
|
||||
responder.zmq_subscriber.terminate = True
|
||||
responder.block_queue.put(get_random_value_hex(32))
|
||||
|
||||
|
||||
def test_add_bad_response(responder):
|
||||
uuid = uuid4().hex
|
||||
job = create_dummy_job()
|
||||
|
||||
# Now that the asleep / awake functionality has been tested we can avoid manually killing the responder by setting
|
||||
# to awake. That will prevent the zmq thread to be launched again.
|
||||
responder.asleep = False
|
||||
|
||||
# A txid instead of a rawtx should be enough for unit tests using the bitcoind mock, better tests are needed though.
|
||||
job.justice_rawtx = job.justice_txid
|
||||
|
||||
# The block_hash passed to add_response does not matter much now. It will in the future to deal with errors
|
||||
receipt = responder.add_response(
|
||||
uuid,
|
||||
job.dispute_txid,
|
||||
job.justice_txid,
|
||||
job.justice_rawtx,
|
||||
job.appointment_end,
|
||||
block_hash=get_random_value_hex(32),
|
||||
)
|
||||
|
||||
assert receipt.delivered is False
|
||||
|
||||
|
||||
def test_create_job(responder):
|
||||
responder.asleep = False
|
||||
@@ -147,6 +216,33 @@ def test_create_job(responder):
|
||||
)
|
||||
|
||||
|
||||
def test_create_job_same_justice_txid(responder):
|
||||
# Create the same job using two different uuids
|
||||
confirmations = 0
|
||||
dispute_txid, justice_txid, justice_rawtx, appointment_end = create_dummy_job_data(random_txid=True)
|
||||
uuid_1 = uuid4().hex
|
||||
uuid_2 = uuid4().hex
|
||||
|
||||
responder.create_job(uuid_1, dispute_txid, justice_txid, justice_rawtx, appointment_end, confirmations)
|
||||
responder.create_job(uuid_2, dispute_txid, justice_txid, justice_rawtx, appointment_end, confirmations)
|
||||
|
||||
# Check that both jobs have been added
|
||||
assert uuid_1 in responder.jobs and uuid_2 in responder.jobs
|
||||
assert justice_txid in responder.tx_job_map
|
||||
assert justice_txid in responder.unconfirmed_txs
|
||||
|
||||
# Check that the rest of job data also matches
|
||||
for uuid in [uuid_1, uuid_2]:
|
||||
job = responder.jobs[uuid]
|
||||
assert (
|
||||
job.dispute_txid == dispute_txid
|
||||
and job.justice_txid == justice_txid
|
||||
and job.justice_rawtx == justice_rawtx
|
||||
and job.appointment_end == appointment_end
|
||||
and job.appointment_end == appointment_end
|
||||
)
|
||||
|
||||
|
||||
def test_create_job_already_confirmed(responder):
|
||||
responder.asleep = False
|
||||
|
||||
@@ -233,6 +329,39 @@ def test_do_watch(responder):
|
||||
assert responder.asleep is True
|
||||
|
||||
|
||||
def test_check_confirmations(responder):
|
||||
# Reinitializing responder (but keeping the subscriber)
|
||||
responder.jobs = dict()
|
||||
responder.tx_job_map = dict()
|
||||
responder.unconfirmed_txs = []
|
||||
responder.missed_confirmations = dict()
|
||||
|
||||
# check_confirmations checks, given a list of transaction for a block, what of the known justice transaction have
|
||||
# been confirmed. To test this we need to create a list of transactions and the state of the responder
|
||||
txs = [get_random_value_hex(32) for _ in range(20)]
|
||||
|
||||
# The responder has a list of unconfirmed transaction, let make that some of them are the ones we've received
|
||||
responder.unconfirmed_txs = [get_random_value_hex(32) for _ in range(10)]
|
||||
txs_subset = random.sample(txs, k=10)
|
||||
responder.unconfirmed_txs.extend(txs_subset)
|
||||
|
||||
# We also need to add them to the tx_job_map since they would be there in normal conditions
|
||||
responder.tx_job_map = {txid: Job(txid, None, None, None) for txid in responder.unconfirmed_txs}
|
||||
|
||||
# Let's make sure that there are no txs with missed confirmations yet
|
||||
assert len(responder.missed_confirmations) == 0
|
||||
|
||||
responder.check_confirmations(txs)
|
||||
|
||||
# After checking confirmations the txs in txs_subset should be confirmed (not part of unconfirmed_txs anymore)
|
||||
# and the rest should have a missing confirmation
|
||||
for tx in txs_subset:
|
||||
assert tx not in responder.unconfirmed_txs
|
||||
|
||||
for tx in responder.unconfirmed_txs:
|
||||
assert responder.missed_confirmations[tx] == 1
|
||||
|
||||
|
||||
def test_get_txs_to_rebroadcast(responder):
|
||||
# Let's create a few fake txids and assign at least 6 missing confirmations to each
|
||||
txs_missing_too_many_conf = {get_random_value_hex(32): 6 + i for i in range(10)}
|
||||
|
||||
Reference in New Issue
Block a user