Merge branch 'testing' into 13-appointment-signature

This commit is contained in:
Salvatore Ingala
2019-10-23 09:19:44 +08:00
16 changed files with 442 additions and 61 deletions

View File

@@ -1,6 +1,7 @@
[run] [run]
omit = omit =
pisa/pisad.py pisa/pisad.py
pisa/logger.py
pisa/sample_conf.py pisa/sample_conf.py
pisa/time_traveler.py pisa/time_traveler.py
pisa/utils/auth_proxy.py pisa/utils/auth_proxy.py

View File

@@ -93,7 +93,7 @@ def get_appointment():
response.append(job_data) response.append(job_data)
if not response: if not response:
response.append({"locator": locator, "status": "not found"}) response.append({"locator": locator, "status": "not_found"})
response = jsonify(response) response = jsonify(response)

View File

@@ -65,6 +65,7 @@ class BlockProcessor:
return potential_matches return potential_matches
@staticmethod @staticmethod
# NOTCOVERED
def get_matches(potential_matches, locator_uuid_map, appointments): def get_matches(potential_matches, locator_uuid_map, appointments):
matches = [] matches = []
@@ -89,6 +90,7 @@ class BlockProcessor:
# DISCUSS: This method comes from the Responder and seems like it could go back there. # DISCUSS: This method comes from the Responder and seems like it could go back there.
@staticmethod @staticmethod
# NOTCOVERED
def check_confirmations(txs, unconfirmed_txs, tx_job_map, missed_confirmations): def check_confirmations(txs, unconfirmed_txs, tx_job_map, missed_confirmations):
for tx in txs: for tx in txs:

View File

@@ -2,7 +2,7 @@ from pisa.rpc_errors import *
from pisa.logger import Logger from pisa.logger import Logger
from pisa.tools import bitcoin_cli from pisa.tools import bitcoin_cli
from pisa.utils.auth_proxy import JSONRPCException from pisa.utils.auth_proxy import JSONRPCException
from pisa.errors import UNKNOWN_JSON_RPC_EXCEPTION from pisa.errors import UNKNOWN_JSON_RPC_EXCEPTION, RPC_TX_REORGED_AFTER_BROADCAST
logger = Logger("Carrier") logger = Logger("Carrier")
@@ -17,6 +17,7 @@ class Receipt:
class Carrier: class Carrier:
# NOTCOVERED
def send_transaction(self, rawtx, txid): def send_transaction(self, rawtx, txid):
try: try:
logger.info("Pushing transaction to the network", txid=txid, rawtx=rawtx) logger.info("Pushing transaction to the network", txid=txid, rawtx=rawtx)
@@ -50,8 +51,9 @@ class Carrier:
else: else:
# There's a really unlikely edge case where a transaction can be reorged between receiving the # There's a really unlikely edge case where a transaction can be reorged between receiving the
# notification and querying the data. In such a case we just resend # notification and querying the data. Notice that this implies the tx being also kicked off the
self.send_transaction(rawtx, txid) # mempool, which again is really unlikely.
receipt = Receipt(delivered=False, reason=RPC_TX_REORGED_AFTER_BROADCAST)
elif errno == RPC_DESERIALIZATION_ERROR: elif errno == RPC_DESERIALIZATION_ERROR:
# Adding this here just for completeness. We should never end up here. The Carrier only sends txs # Adding this here just for completeness. We should never end up here. The Carrier only sends txs

View File

@@ -9,6 +9,9 @@ APPOINTMENT_WRONG_FIELD = -7
APPOINTMENT_CIPHER_NOT_SUPPORTED = -8 APPOINTMENT_CIPHER_NOT_SUPPORTED = -8
APPOINTMENT_HASH_FUNCTION_NOT_SUPPORTED = -9 APPOINTMENT_HASH_FUNCTION_NOT_SUPPORTED = -9
# Custom RPC errors
RPC_TX_REORGED_AFTER_BROADCAST = -98
# UNHANDLED # UNHANDLED
UNKNOWN_JSON_RPC_EXCEPTION = -99 UNKNOWN_JSON_RPC_EXCEPTION = -99

View File

@@ -18,14 +18,12 @@ logger = Logger("Responder")
class Job: class Job:
def __init__(self, dispute_txid, justice_txid, justice_rawtx, appointment_end, retry_counter=0): def __init__(self, dispute_txid, justice_txid, justice_rawtx, appointment_end):
self.dispute_txid = dispute_txid self.dispute_txid = dispute_txid
self.justice_txid = justice_txid self.justice_txid = justice_txid
self.justice_rawtx = justice_rawtx self.justice_rawtx = justice_rawtx
self.appointment_end = appointment_end self.appointment_end = appointment_end
self.retry_counter = retry_counter
# FIXME: locator is here so we can give info about jobs for now. It can be either passed from watcher or info # FIXME: locator is here so we can give info about jobs for now. It can be either passed from watcher or info
# can be directly got from DB # can be directly got from DB
self.locator = sha256(unhexlify(dispute_txid)).hexdigest() self.locator = sha256(unhexlify(dispute_txid)).hexdigest()
@@ -56,11 +54,11 @@ class Responder:
carrier = Carrier() carrier = Carrier()
receipt = carrier.send_transaction(justice_rawtx, justice_txid) receipt = carrier.send_transaction(justice_rawtx, justice_txid)
# do_watch can call add_response recursively if a broadcast transaction does not get confirmations
# retry holds that information. If retry is true the job already exists
if receipt.delivered: if receipt.delivered:
# do_watch can call add_response recursively if a broadcast transaction does not get confirmations if not retry:
# retry holds such information. self.create_job(uuid, dispute_txid, justice_txid, justice_rawtx, appointment_end, receipt.confirmations)
self.create_job(uuid, dispute_txid, justice_txid, justice_rawtx, appointment_end, retry=retry,
confirmations=receipt.confirmations)
else: else:
# TODO: Add the missing reasons (e.g. RPC_VERIFY_REJECTED) # TODO: Add the missing reasons (e.g. RPC_VERIFY_REJECTED)
@@ -68,25 +66,17 @@ class Responder:
return receipt return receipt
def create_job(self, uuid, dispute_txid, justice_txid, justice_rawtx, appointment_end, confirmations=0, def create_job(self, uuid, dispute_txid, justice_txid, justice_rawtx, appointment_end, confirmations=0):
retry=False): self.jobs[uuid] = Job(dispute_txid, justice_txid, justice_rawtx, appointment_end)
# ToDo: #23-define-behaviour-approaching-end if justice_txid in self.tx_job_map:
if retry: self.tx_job_map[justice_txid].append(uuid)
self.jobs[uuid].retry_counter += 1
self.missed_confirmations[justice_txid] = 0
else: else:
self.jobs[uuid] = Job(dispute_txid, justice_txid, justice_rawtx, appointment_end, confirmations) self.tx_job_map[justice_txid] = [uuid]
if justice_txid in self.tx_job_map: if confirmations == 0:
self.tx_job_map[justice_txid].append(uuid) self.unconfirmed_txs.append(justice_txid)
else:
self.tx_job_map[justice_txid] = [uuid]
if confirmations == 0:
self.unconfirmed_txs.append(justice_txid)
logger.info("New job added.", dispute_txid=dispute_txid, justice_txid=justice_txid, logger.info("New job added.", dispute_txid=dispute_txid, justice_txid=justice_txid,
appointment_end=appointment_end) appointment_end=appointment_end)
@@ -106,7 +96,7 @@ class Responder:
def do_watch(self): def do_watch(self):
# ToDo: #9-add-data-persistence # ToDo: #9-add-data-persistence
# change prev_block_hash to the last known tip when bootstrapping # change prev_block_hash to the last known tip when bootstrapping
prev_block_hash = 0 prev_block_hash = BlockProcessor.get_best_block_hash()
while len(self.jobs) > 0: while len(self.jobs) > 0:
# We get notified for every new received block # We get notified for every new received block
@@ -121,21 +111,22 @@ class Responder:
block_hash=block_hash, prev_block_hash=block.get('previousblockhash'), txs=txs) block_hash=block_hash, prev_block_hash=block.get('previousblockhash'), txs=txs)
# ToDo: #9-add-data-persistence # ToDo: #9-add-data-persistence
# change prev_block_hash condition if prev_block_hash == block.get('previousblockhash'):
if prev_block_hash == block.get('previousblockhash') or prev_block_hash == 0:
self.unconfirmed_txs, self.missed_confirmations = BlockProcessor.check_confirmations( self.unconfirmed_txs, self.missed_confirmations = BlockProcessor.check_confirmations(
txs, self.unconfirmed_txs, self.tx_job_map, self.missed_confirmations) txs, self.unconfirmed_txs, self.tx_job_map, self.missed_confirmations)
txs_to_rebroadcast = self.get_txs_to_rebroadcast(txs) txs_to_rebroadcast = self.get_txs_to_rebroadcast(txs)
Cleaner.delete_completed_jobs(self.jobs, self.tx_job_map, self.get_completed_jobs(height), height) completed_jobs = self.get_completed_jobs(height)
Cleaner.delete_completed_jobs(self.jobs, self.tx_job_map, completed_jobs, height)
self.rebroadcast(txs_to_rebroadcast) self.rebroadcast(txs_to_rebroadcast)
# NOTCOVERED
else: else:
logger.warning("Reorg found", logger.warning("Reorg found", local_prev_block_hash=prev_block_hash,
local_prev_block_hash=prev_block_hash,
remote_prev_block_hash=block.get('previousblockhash')) remote_prev_block_hash=block.get('previousblockhash'))
# ToDo: #24-properly-handle-reorgs
self.handle_reorgs() self.handle_reorgs()
prev_block_hash = block.get('hash') prev_block_hash = block.get('hash')
@@ -160,31 +151,42 @@ class Responder:
completed_jobs = [] completed_jobs = []
for uuid, job in self.jobs.items(): for uuid, job in self.jobs.items():
if job.appointment_end <= height: if job.appointment_end <= height and job.justice_txid not in self.unconfirmed_txs:
tx = Carrier.get_transaction(job.justice_txid) tx = Carrier.get_transaction(job.justice_txid)
# FIXME: Should be improved with the librarian # FIXME: Should be improved with the librarian
confirmations = tx.get('confirmations') if tx is not None:
if tx is not None and confirmations > MIN_CONFIRMATIONS: confirmations = tx.get('confirmations')
# The end of the appointment has been reached
completed_jobs.append((uuid, confirmations)) if confirmations >= MIN_CONFIRMATIONS:
# The end of the appointment has been reached
completed_jobs.append((uuid, confirmations))
return completed_jobs return completed_jobs
def rebroadcast(self, jobs_to_rebroadcast): def rebroadcast(self, txs_to_rebroadcast):
# DISCUSS: #22-discuss-confirmations-before-retry # DISCUSS: #22-discuss-confirmations-before-retry
# ToDo: #23-define-behaviour-approaching-end # ToDo: #23-define-behaviour-approaching-end
for tx in jobs_to_rebroadcast: receipts = []
for uuid in self.tx_job_map[tx]:
self.add_response(uuid, self.jobs[uuid].dispute_txid, self.jobs[uuid].justice_txid, for txid in txs_to_rebroadcast:
self.jobs[uuid].justice_rawtx, self.jobs[uuid].appointment_end, retry=True) self.missed_confirmations[txid] = 0
for uuid in self.tx_job_map[txid]:
job = self.jobs[uuid]
receipt = self.add_response(uuid, job.dispute_txid, job.justice_txid, job.justice_rawtx,
job.appointment_end, retry=True)
logger.warning("Transaction has missed many confirmations. Rebroadcasting.", logger.warning("Transaction has missed many confirmations. Rebroadcasting.",
justice_txid=self.jobs[uuid].justice_txid, justice_txid=job.justice_txid, confirmations_missed=CONFIRMATIONS_BEFORE_RETRY)
confirmations_missed=CONFIRMATIONS_BEFORE_RETRY)
receipts.append((txid, receipt))
return receipts
# FIXME: Legacy code, must be checked and updated/fixed # FIXME: Legacy code, must be checked and updated/fixed
# NOTCOVERED
def handle_reorgs(self): def handle_reorgs(self):
for uuid, job in self.jobs.items(): for uuid, job in self.jobs.items():
# First we check if the dispute transaction is still in the blockchain. If not, the justice can not be # First we check if the dispute transaction is still in the blockchain. If not, the justice can not be

View File

@@ -7,12 +7,14 @@ from pisa.rpc_errors import RPC_INVALID_ADDRESS_OR_KEY
from pisa.utils.auth_proxy import AuthServiceProxy, JSONRPCException from pisa.utils.auth_proxy import AuthServiceProxy, JSONRPCException
# NOTCOVERED
def bitcoin_cli(): def bitcoin_cli():
return AuthServiceProxy("http://%s:%s@%s:%d" % (conf.BTC_RPC_USER, conf.BTC_RPC_PASSWD, conf.BTC_RPC_HOST, return AuthServiceProxy("http://%s:%s@%s:%d" % (conf.BTC_RPC_USER, conf.BTC_RPC_PASSWD, conf.BTC_RPC_HOST,
conf.BTC_RPC_PORT)) conf.BTC_RPC_PORT))
# TODO: currently only used in the Responder; might move there or in the BlockProcessor # TODO: currently only used in the Responder; might move there or in the BlockProcessor
# NOTCOVERED
def check_tx_in_chain(tx_id, logger=Logger(), tx_label='Transaction'): def check_tx_in_chain(tx_id, logger=Logger(), tx_label='Transaction'):
tx_in_chain = False tx_in_chain = False
confirmations = 0 confirmations = 0
@@ -39,6 +41,7 @@ def check_tx_in_chain(tx_id, logger=Logger(), tx_label='Transaction'):
return tx_in_chain, confirmations return tx_in_chain, confirmations
# NOTCOVERED
def can_connect_to_bitcoind(): def can_connect_to_bitcoind():
can_connect = True can_connect = True

View File

@@ -1,6 +1,6 @@
# Porting some functionality from https://github.com/sr-gi/bitcoin_tools with some modifications <3 # Porting some functionality from https://github.com/sr-gi/bitcoin_tools with some modifications <3
from hashlib import sha256 from hashlib import sha256
from binascii import unhexlify from binascii import unhexlify, hexlify
def change_endianness(x): def change_endianness(x):
@@ -15,9 +15,9 @@ def change_endianness(x):
if (len(x) % 2) == 1: if (len(x) % 2) == 1:
x += "0" x += "0"
y = bytes(x, 'utf-8') y = unhexlify(x)
z = y[::-1] z = y[::-1]
return z.decode('utf-8') return hexlify(z).decode('utf-8')
def parse_varint(tx): def parse_varint(tx):

View File

@@ -32,3 +32,8 @@ def generate_block():
sleep(0.5) sleep(0.5)
def generate_blocks(n):
for _ in range(n):
generate_block()

View File

@@ -1,6 +1,7 @@
import json import json
import pytest import pytest
import requests import requests
from os import urandom
from hashlib import sha256 from hashlib import sha256
from binascii import unhexlify from binascii import unhexlify
@@ -99,6 +100,16 @@ def test_request_appointment(new_appointment):
assert (all([status == "being_watched" for status in appointment_status])) assert (all([status == "being_watched" for status in appointment_status]))
def test_request_random_appointment():
r = requests.get(url=PISA_API + "/get_appointment?locator=" + urandom(32).hex())
assert (r.status_code == 200)
received_appointments = json.loads(r.content)
appointment_status = [appointment.pop("status") for appointment in received_appointments]
assert (all([status == "not_found" for status in appointment_status]))
def test_add_appointment_multiple_times(new_appointment, n=MULTIPLE_APPOINTMENTS): def test_add_appointment_multiple_times(new_appointment, n=MULTIPLE_APPOINTMENTS):
# Multiple appointments with the same locator should be valid # Multiple appointments with the same locator should be valid
# DISCUSS: #34-store-identical-appointments # DISCUSS: #34-store-identical-appointments

View File

@@ -43,6 +43,12 @@ def test_get_block(best_block_hash):
assert block.get('hash') == best_block_hash and 'height' in block and 'previousblockhash' in block and 'tx' in block assert block.get('hash') == best_block_hash and 'height' in block and 'previousblockhash' in block and 'tx' in block
def test_get_random_block():
block = BlockProcessor.get_block(urandom(32).hex())
assert block is None
def test_get_block_count(): def test_get_block_count():
block_count = BlockProcessor.get_block_count() block_count = BlockProcessor.get_block_count()
assert isinstance(block_count, int) and block_count >= 0 assert isinstance(block_count, int) and block_count >= 0
@@ -55,6 +61,15 @@ def test_potential_matches(txids, locator_uuid_map):
assert locator_uuid_map.keys() == potential_matches.keys() assert locator_uuid_map.keys() == potential_matches.keys()
def test_potential_matches_random(locator_uuid_map):
txids = [urandom(32).hex() for _ in range(len(locator_uuid_map))]
potential_matches = BlockProcessor.get_potential_matches(txids, locator_uuid_map)
# None of the ids should match
assert len(potential_matches) == 0
def test_potential_matches_random_data(locator_uuid_map): def test_potential_matches_random_data(locator_uuid_map):
# The likelihood of finding a potential match with random data should be negligible # The likelihood of finding a potential match with random data should be negligible
txids = [urandom(32).hex() for _ in range(TEST_SET_SIZE)] txids = [urandom(32).hex() for _ in range(TEST_SET_SIZE)]

View File

@@ -6,7 +6,7 @@ from os import urandom
from pisa.carrier import Carrier from pisa.carrier import Carrier
from test.simulator.utils import sha256d from test.simulator.utils import sha256d
from test.simulator.transaction import TX from test.simulator.transaction import TX
from test.unit.conftest import generate_block from test.unit.conftest import generate_blocks
from pisa.rpc_errors import RPC_VERIFY_ALREADY_IN_CHAIN, RPC_DESERIALIZATION_ERROR from pisa.rpc_errors import RPC_VERIFY_ALREADY_IN_CHAIN, RPC_DESERIALIZATION_ERROR
logging.getLogger().disabled = True logging.getLogger().disabled = True
@@ -25,7 +25,6 @@ def carrier():
def test_send_transaction(run_bitcoind, carrier): def test_send_transaction(run_bitcoind, carrier):
# We are mocking bitcoind and in our simulator txid == tx
tx = TX.create_dummy_transaction() tx = TX.create_dummy_transaction()
txid = sha256d(tx) txid = sha256d(tx)
@@ -43,8 +42,7 @@ def test_send_double_spending_transaction(carrier):
sent_txs.append(txid) sent_txs.append(txid)
# Wait for a block to be mined # Wait for a block to be mined
for _ in range(2): generate_blocks(2)
generate_block()
# Try to send it again # Try to send it again
receipt2 = carrier.send_transaction(tx, txid) receipt2 = carrier.send_transaction(tx, txid)

View File

@@ -45,14 +45,14 @@ def set_up_jobs(total_jobs):
txid = urandom(32).hex() txid = urandom(32).hex()
# Assign both justice_txid and dispute_txid the same id (it shouldn't matter) # Assign both justice_txid and dispute_txid the same id (it shouldn't matter)
jobs[uuid] = Job(txid, txid, None, None, None) jobs[uuid] = Job(txid, txid, None, None)
tx_job_map[txid] = [uuid] tx_job_map[txid] = [uuid]
# Each justice_txid can have more than one uuid assigned to it. Do a coin toss to add multiple ones # Each justice_txid can have more than one uuid assigned to it. Do a coin toss to add multiple ones
while random.randint(0, 1): while random.randint(0, 1):
uuid = uuid4().hex uuid = uuid4().hex
jobs[uuid] = Job(txid, txid, None, None, None) jobs[uuid] = Job(txid, txid, None, None)
tx_job_map[txid].append(uuid) tx_job_map[txid].append(uuid)
return jobs, tx_job_map return jobs, tx_job_map

333
test/unit/test_responder.py Normal file
View File

@@ -0,0 +1,333 @@
import json
import pytest
from os import urandom
from uuid import uuid4
from threading import Thread
from queue import Queue, Empty
from pisa.tools import check_txid_format
from test.simulator.utils import sha256d
from pisa.responder import Responder, Job
from test.simulator.bitcoind_sim import TX
from test.unit.conftest import generate_block, generate_blocks
from pisa.utils.auth_proxy import AuthServiceProxy
from pisa.conf import BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, BTC_RPC_PORT
@pytest.fixture(scope="module")
def responder():
return Responder()
def create_dummy_job_data(random_txid=False, justice_rawtx=None):
bitcoin_cli = AuthServiceProxy("http://%s:%s@%s:%d" % (BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, BTC_RPC_PORT))
# The following transaction data corresponds to a valid transaction. For some test it may be interesting to have
# some valid data, but for others we may need multiple different justice_txids.
dispute_txid = "0437cd7f8525ceed2324359c2d0ba26006d92d856a9c20fa0241106ee5a597c9"
justice_txid = "f4184fc596403b9d638783cf57adfe4c75c605f6356fbc91338530e9831e9e16"
if justice_rawtx is None:
justice_rawtx = "0100000001c997a5e56e104102fa209c6a852dd90660a20b2d9c352423edce25857fcd3704000000004847304402" \
"204e45e16932b8af514961a1d3a1a25fdf3f4f7732e9d624c6c61548ab5fb8cd410220181522ec8eca07de4860a4" \
"acdd12909d831cc56cbbac4622082221a8768d1d0901ffffffff0200ca9a3b00000000434104ae1a62fe09c5f51b" \
"13905f07f06b99a2f7159b2225f374cd378d71302fa28414e7aab37397f554a7df5f142c21c1b7303b8a0626f1ba" \
"ded5c72a704f7e6cd84cac00286bee0000000043410411db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482e" \
"cad7b148a6909a5cb2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3ac00000000"
else:
justice_txid = sha256d(justice_rawtx)
if random_txid is True:
justice_txid = urandom(32).hex()
appointment_end = bitcoin_cli.getblockcount() + 2
return dispute_txid, justice_txid, justice_rawtx, appointment_end
def create_dummy_job(random_txid=False, justice_rawtx=None):
dispute_txid, justice_txid, justice_rawtx, appointment_end = create_dummy_job_data(random_txid, justice_rawtx)
return Job(dispute_txid, justice_txid, justice_rawtx, appointment_end)
def test_job_init(run_bitcoind):
dispute_txid, justice_txid, justice_rawtx, appointment_end = create_dummy_job_data()
job = Job(dispute_txid, justice_txid, justice_rawtx, appointment_end)
assert job.dispute_txid == dispute_txid and job.justice_txid == justice_txid \
and job.justice_rawtx == justice_rawtx and job.appointment_end == appointment_end
def test_job_to_dict():
job = create_dummy_job()
job_dict = job.to_dict()
assert job.locator == job_dict["locator"] and job.justice_rawtx == job_dict["justice_rawtx"] \
and job.appointment_end == job_dict["appointment_end"]
def test_job_to_json():
job = create_dummy_job()
job_dict = json.loads(job.to_json())
assert job.locator == job_dict["locator"] and job.justice_rawtx == job_dict["justice_rawtx"] \
and job.appointment_end == job_dict["appointment_end"]
def test_init_responder(responder):
assert type(responder.jobs) is dict and len(responder.jobs) == 0
assert type(responder.tx_job_map) is dict and len(responder.tx_job_map) == 0
assert type(responder.unconfirmed_txs) is list and len(responder.unconfirmed_txs) == 0
assert type(responder.missed_confirmations) is dict and len(responder.missed_confirmations) == 0
assert responder.block_queue is None
assert responder.asleep is True
assert responder.zmq_subscriber is None
def test_add_response(responder):
uuid = uuid4().hex
job = create_dummy_job()
# The responder automatically fires create_job on adding a job if it is asleep (initial state). Avoid this by
# setting the state to awake.
responder.asleep = False
receipt = responder.add_response(uuid, job.dispute_txid, job.justice_txid, job.justice_rawtx, job.appointment_end)
assert receipt.delivered is True
def test_create_job(responder):
responder.asleep = False
for _ in range(20):
uuid = uuid4().hex
confirmations = 0
dispute_txid, justice_txid, justice_rawtx, appointment_end = create_dummy_job_data(random_txid=True)
# Check the job is not within the responder jobs before adding it
assert uuid not in responder.jobs
assert justice_txid not in responder.tx_job_map
assert justice_txid not in responder.unconfirmed_txs
# And that it is afterwards
responder.create_job(uuid, dispute_txid, justice_txid, justice_rawtx, appointment_end, confirmations)
assert uuid in responder.jobs
assert justice_txid in responder.tx_job_map
assert justice_txid in responder.unconfirmed_txs
# Check that the rest of job data also matches
job = responder.jobs[uuid]
assert job.dispute_txid == dispute_txid and job.justice_txid == justice_txid \
and job.justice_rawtx == justice_rawtx and job.appointment_end == appointment_end \
and job.appointment_end == appointment_end
def test_create_job_already_confirmed(responder):
responder.asleep = False
for i in range(20):
uuid = uuid4().hex
confirmations = i+1
dispute_txid, justice_txid, justice_rawtx, appointment_end = create_dummy_job_data(
justice_rawtx=TX.create_dummy_transaction())
responder.create_job(uuid, dispute_txid, justice_txid, justice_rawtx, appointment_end, confirmations)
assert justice_txid not in responder.unconfirmed_txs
def test_do_subscribe(responder):
responder.block_queue = Queue()
zmq_thread = Thread(target=responder.do_subscribe)
zmq_thread.daemon = True
zmq_thread.start()
try:
generate_block()
block_hash = responder.block_queue.get()
assert check_txid_format(block_hash)
except Empty:
assert False
def test_do_watch(responder):
# Reinitializing responder (but keeping the subscriber)
responder.jobs = dict()
responder.tx_job_map = dict()
responder.unconfirmed_txs = []
responder.missed_confirmations = dict()
bitcoin_cli = AuthServiceProxy("http://%s:%s@%s:%d" % (BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, BTC_RPC_PORT))
jobs = [create_dummy_job(justice_rawtx=TX.create_dummy_transaction()) for _ in range(20)]
# Let's set up the jobs first
for job in jobs:
uuid = uuid4().hex
responder.jobs[uuid] = job
responder.tx_job_map[job.justice_txid] = [uuid]
responder.missed_confirmations[job.justice_txid] = 0
responder.unconfirmed_txs.append(job.justice_txid)
# Let's start to watch
watch_thread = Thread(target=responder.do_watch)
watch_thread.daemon = True
watch_thread.start()
# And broadcast some of the transactions
broadcast_txs = []
for job in jobs[:5]:
bitcoin_cli.sendrawtransaction(job.justice_rawtx)
broadcast_txs.append(job.justice_txid)
# Mine a block
generate_block()
# The transactions we sent shouldn't be in the unconfirmed transaction list anymore
assert not set(broadcast_txs).issubset(responder.unconfirmed_txs)
# TODO: test that reorgs can be detected once data persistence is merged (new version of the simulator)
# Generating 5 additional blocks should complete the 5 jobs
generate_blocks(5)
assert not set(broadcast_txs).issubset(responder.tx_job_map)
# Do the rest
broadcast_txs = []
for job in jobs[5:]:
bitcoin_cli.sendrawtransaction(job.justice_rawtx)
broadcast_txs.append(job.justice_txid)
# Mine a block
generate_blocks(6)
assert len(responder.tx_job_map) == 0
assert responder.asleep is True
def test_get_txs_to_rebroadcast(responder):
# Let's create a few fake txids and assign at least 6 missing confirmations to each
txs_missing_too_many_conf = {urandom(32).hex(): 6+i for i in range(10)}
# Let's create some other transaction that has missed some confirmations but not that many
txs_missing_some_conf = {urandom(32).hex(): 3 for _ in range(10)}
# All the txs in the first dict should be flagged as to_rebroadcast
responder.missed_confirmations = txs_missing_too_many_conf
txs_to_rebroadcast = responder.get_txs_to_rebroadcast(txs_missing_too_many_conf)
assert txs_to_rebroadcast == list(txs_missing_too_many_conf.keys())
# Non of the txs in the second dict should be flagged
responder.missed_confirmations = txs_missing_some_conf
txs_to_rebroadcast = responder.get_txs_to_rebroadcast(txs_missing_some_conf)
assert txs_to_rebroadcast == []
# Let's check that it also works with a mixed dict
responder.missed_confirmations.update(txs_missing_too_many_conf)
txs_to_rebroadcast = responder.get_txs_to_rebroadcast(txs_missing_some_conf)
assert txs_to_rebroadcast == list(txs_missing_too_many_conf.keys())
def test_get_completed_jobs():
bitcoin_cli = AuthServiceProxy("http://%s:%s@%s:%d" % (BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, BTC_RPC_PORT))
initial_height = bitcoin_cli.getblockcount()
# Let's use a fresh responder for this to make it easier to compare the results
responder = Responder()
# A complete job is a job that has reached the appointment end with enough confirmations (> MIN_CONFIRMATIONS)
# We'll create three type of transactions: end reached + enough conf, end reached + no enough conf, end not reached
jobs_end_conf = {uuid4().hex: create_dummy_job(justice_rawtx=TX.create_dummy_transaction()) for _ in range(10)}
jobs_end_no_conf = {}
for _ in range(10):
job = create_dummy_job(justice_rawtx=TX.create_dummy_transaction())
responder.unconfirmed_txs.append(job.justice_txid)
jobs_end_no_conf[uuid4().hex] = job
jobs_no_end = {}
for _ in range(10):
job = create_dummy_job(justice_rawtx=TX.create_dummy_transaction())
job.appointment_end += 10
jobs_no_end[uuid4().hex] = job
# Let's add all to the responder
responder.jobs.update(jobs_end_conf)
responder.jobs.update(jobs_end_no_conf)
responder.jobs.update(jobs_no_end)
for uuid, job in responder.jobs.items():
bitcoin_cli.sendrawtransaction(job.justice_rawtx)
# The dummy appointments have a end_appointment time of current + 2, but jobs need at least 6 confs by default
generate_blocks(6)
# And now let's check
completed_jobs = responder.get_completed_jobs(initial_height + 6)
completed_jobs_ids = [job_id for job_id, confirmations in completed_jobs]
ended_jobs_keys = list(jobs_end_conf.keys())
assert set(completed_jobs_ids) == set(ended_jobs_keys)
# Generating 6 additional blocks should also confirm jobs_no_end
generate_blocks(6)
completed_jobs = responder.get_completed_jobs(initial_height + 12)
completed_jobs_ids = [job_id for job_id, confirmations in completed_jobs]
ended_jobs_keys.extend(list(jobs_no_end.keys()))
assert set(completed_jobs_ids) == set(ended_jobs_keys)
def test_rebroadcast():
responder = Responder()
responder.asleep = False
txs_to_rebroadcast = []
# Rebroadcast calls add_response with retry=True. The job data is already in jobs.
for i in range(20):
uuid = uuid4().hex
dispute_txid, justice_txid, justice_rawtx, appointment_end = create_dummy_job_data(
justice_rawtx=TX.create_dummy_transaction())
responder.jobs[uuid] = Job(dispute_txid, justice_txid, justice_rawtx, appointment_end)
responder.tx_job_map[justice_txid] = [uuid]
responder.unconfirmed_txs.append(justice_txid)
# Let's add some of the txs in the rebroadcast list
if (i % 2) == 0:
txs_to_rebroadcast.append(justice_txid)
receipts = responder.rebroadcast(txs_to_rebroadcast)
# All txs should have been delivered and the missed confirmation reset
for txid, receipt in receipts:
# Sanity check
assert txid in txs_to_rebroadcast
assert receipt.delivered is True
assert responder.missed_confirmations[txid] == 0

View File

@@ -1,6 +1,6 @@
from pisa import logging from pisa import logging
from pisa.tools import check_txid_format from pisa.tools import check_txid_format
from pisa.tools import can_connect_to_bitcoind, in_correct_network from pisa.tools import can_connect_to_bitcoind, in_correct_network, bitcoin_cli
logging.getLogger().disabled = True logging.getLogger().disabled = True
@@ -22,6 +22,15 @@ def test_can_connect_to_bitcoind():
# assert can_connect_to_bitcoind() is False # assert can_connect_to_bitcoind() is False
def test_bitcoin_cli():
try:
bitcoin_cli().help()
assert True
except Exception:
assert False
def test_check_txid_format(): def test_check_txid_format():
assert(check_txid_format(None) is False) assert(check_txid_format(None) is False)
assert(check_txid_format("") is False) assert(check_txid_format("") is False)

View File

@@ -20,8 +20,8 @@ from pisa.appointment import Appointment
from pisa.tools import check_txid_format from pisa.tools import check_txid_format
from test.simulator.utils import sha256d from test.simulator.utils import sha256d
from test.simulator.transaction import TX from test.simulator.transaction import TX
from test.unit.conftest import generate_block
from pisa.utils.auth_proxy import AuthServiceProxy from pisa.utils.auth_proxy import AuthServiceProxy
from test.unit.conftest import generate_block, generate_blocks
from pisa.conf import EXPIRY_DELTA, BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, BTC_RPC_PORT, PISA_SECRET_KEY from pisa.conf import EXPIRY_DELTA, BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, BTC_RPC_PORT, PISA_SECRET_KEY
logging.getLogger().disabled = True logging.getLogger().disabled = True
@@ -156,20 +156,17 @@ def test_do_watch(watcher):
# Broadcast the first two # Broadcast the first two
for dispute_tx in dispute_txs[:2]: for dispute_tx in dispute_txs[:2]:
r = bitcoin_cli.sendrawtransaction(dispute_tx) bitcoin_cli.sendrawtransaction(dispute_tx)
# After leaving some time for the block to be mined and processed, the number of appointments should have reduced # After leaving some time for the block to be mined and processed, the number of appointments should have reduced
# by two # by two
for _ in range(START_TIME_OFFSET + END_TIME_OFFSET): generate_blocks(START_TIME_OFFSET + END_TIME_OFFSET)
generate_block()
assert len(watcher.appointments) == APPOINTMENTS - 2 assert len(watcher.appointments) == APPOINTMENTS - 2
# The rest of appointments will timeout after the end (2) + EXPIRY_DELTA # The rest of appointments will timeout after the end (2) + EXPIRY_DELTA
# Wait for an additional block to be safe # Wait for an additional block to be safe
generate_blocks(EXPIRY_DELTA + START_TIME_OFFSET + END_TIME_OFFSET)
for _ in range(EXPIRY_DELTA + START_TIME_OFFSET + END_TIME_OFFSET):
generate_block()
assert len(watcher.appointments) == 0 assert len(watcher.appointments) == 0
assert watcher.asleep is True assert watcher.asleep is True