mirror of
https://github.com/aljazceru/python-teos.git
synced 2025-12-18 06:34:19 +01:00
Merge branch '59-uncovered-tests'
This commit is contained in:
@@ -3,6 +3,7 @@ import time
|
||||
import json
|
||||
import logging
|
||||
import binascii
|
||||
from itertools import islice
|
||||
from threading import Thread, Event
|
||||
from flask import Flask, request, Response, abort
|
||||
|
||||
@@ -19,7 +20,7 @@ PORT = "18443"
|
||||
blockchain = []
|
||||
blocks = {}
|
||||
mined_transactions = {}
|
||||
mempool = []
|
||||
mempool = {}
|
||||
|
||||
mine_new_block = Event()
|
||||
|
||||
@@ -138,7 +139,7 @@ def process_request():
|
||||
|
||||
if TX.deserialize(rawtx) is not None:
|
||||
if txid not in list(mined_transactions.keys()):
|
||||
mempool.append(rawtx)
|
||||
mempool[txid] = rawtx
|
||||
response["result"] = {"txid": txid}
|
||||
|
||||
else:
|
||||
@@ -164,7 +165,7 @@ def process_request():
|
||||
response["result"] = {"hex": rawtx, "confirmations": len(blockchain) - block.get("height")}
|
||||
|
||||
elif txid in mempool:
|
||||
response["result"] = {"confirmations": 0}
|
||||
response["result"] = {"confirmations": None}
|
||||
|
||||
else:
|
||||
response["error"] = {
|
||||
@@ -260,11 +261,9 @@ def simulate_mining(mode, time_between_blocks):
|
||||
|
||||
if len(mempool) != 0:
|
||||
# We'll mine up to 100 txs per block
|
||||
for rawtx in mempool[:99]:
|
||||
txid = sha256d(rawtx)
|
||||
for txid, rawtx in dict(islice(mempool.items(), 99)).items():
|
||||
txs_to_mine[txid] = rawtx
|
||||
|
||||
mempool = mempool[99:]
|
||||
mempool.pop(txid)
|
||||
|
||||
# Keep track of the mined transaction (to respond to getrawtransaction)
|
||||
for txid, tx in txs_to_mine.items():
|
||||
|
||||
@@ -31,9 +31,14 @@ def parse_varint(tx):
|
||||
# First of all, the offset of the hex transaction if moved to the proper position (i.e where the varint should be
|
||||
# located) and the length and format of the data to be analyzed is checked.
|
||||
data = tx.hex[tx.offset :]
|
||||
assert len(data) > 0
|
||||
size = int(data[:2], 16)
|
||||
assert size <= 255
|
||||
if len(data) > 0:
|
||||
size = int(data[:2], 16)
|
||||
|
||||
else:
|
||||
raise ValueError("No data to be parsed")
|
||||
|
||||
if size > 255:
|
||||
raise ValueError("Wrong value (varint size > 255)")
|
||||
|
||||
# Then, the integer is encoded as a varint using the proper prefix, if needed.
|
||||
if size <= 252: # No prefix
|
||||
|
||||
@@ -13,11 +13,8 @@ from cryptography.hazmat.primitives import hashes
|
||||
from cryptography.hazmat.primitives.asymmetric import ec
|
||||
from cryptography.hazmat.primitives import serialization
|
||||
|
||||
from pisa.conf import DB_PATH
|
||||
from apps.cli.blob import Blob
|
||||
from pisa.api import start_api
|
||||
from pisa.responder import Job
|
||||
from pisa.watcher import Watcher
|
||||
from pisa.tools import bitcoin_cli
|
||||
from pisa.db_manager import DBManager
|
||||
from pisa.appointment import Appointment
|
||||
@@ -36,19 +33,6 @@ def run_bitcoind():
|
||||
sleep(0.1)
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def run_api():
|
||||
db_manager = DBManager(DB_PATH)
|
||||
watcher = Watcher(db_manager)
|
||||
|
||||
api_thread = Thread(target=start_api, args=[watcher])
|
||||
api_thread.daemon = True
|
||||
api_thread.start()
|
||||
|
||||
# It takes a little bit of time to start the API (otherwise the requests are sent too early and they fail)
|
||||
sleep(0.1)
|
||||
|
||||
|
||||
@pytest.fixture(scope="session", autouse=True)
|
||||
def prng_seed():
|
||||
random.seed(0)
|
||||
@@ -66,7 +50,7 @@ def generate_keypair():
|
||||
return client_sk, client_pk
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
@pytest.fixture(scope="session")
|
||||
def db_manager():
|
||||
manager = DBManager("test_db")
|
||||
yield manager
|
||||
@@ -96,8 +80,12 @@ def sign_appointment(sk, appointment):
|
||||
return hexlify(sk.sign(data, ec.ECDSA(hashes.SHA256()))).decode("utf-8")
|
||||
|
||||
|
||||
def generate_dummy_appointment_data(start_time_offset=5, end_time_offset=30):
|
||||
current_height = bitcoin_cli().getblockcount()
|
||||
def generate_dummy_appointment_data(real_height=True, start_time_offset=5, end_time_offset=30):
|
||||
if real_height:
|
||||
current_height = bitcoin_cli().getblockcount()
|
||||
|
||||
else:
|
||||
current_height = 10
|
||||
|
||||
dispute_tx = TX.create_dummy_transaction()
|
||||
dispute_txid = sha256d(dispute_tx)
|
||||
@@ -135,7 +123,6 @@ def generate_dummy_appointment_data(start_time_offset=5, end_time_offset=30):
|
||||
"encrypted_blob": encrypted_blob,
|
||||
"cipher": cipher,
|
||||
"hash_function": hash_function,
|
||||
"triggered": False,
|
||||
}
|
||||
|
||||
signature = sign_appointment(client_sk, appointment_data)
|
||||
@@ -145,9 +132,9 @@ def generate_dummy_appointment_data(start_time_offset=5, end_time_offset=30):
|
||||
return data, dispute_tx
|
||||
|
||||
|
||||
def generate_dummy_appointment(start_time_offset=5, end_time_offset=30):
|
||||
def generate_dummy_appointment(real_height=True, start_time_offset=5, end_time_offset=30):
|
||||
appointment_data, dispute_tx = generate_dummy_appointment_data(
|
||||
start_time_offset=start_time_offset, end_time_offset=end_time_offset
|
||||
real_height=real_height, start_time_offset=start_time_offset, end_time_offset=end_time_offset
|
||||
)
|
||||
|
||||
return Appointment.from_dict(appointment_data["appointment"]), dispute_tx
|
||||
|
||||
@@ -1,11 +1,15 @@
|
||||
import json
|
||||
import pytest
|
||||
import requests
|
||||
from time import sleep
|
||||
from threading import Thread
|
||||
|
||||
from pisa.api import start_api
|
||||
from pisa.watcher import Watcher
|
||||
from pisa.tools import bitcoin_cli
|
||||
from pisa import HOST, PORT, c_logger
|
||||
from pisa.utils.auth_proxy import AuthServiceProxy
|
||||
from test.unit.conftest import generate_blocks, get_random_value_hex, generate_dummy_appointment_data
|
||||
from pisa.conf import BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, BTC_RPC_PORT, MAX_APPOINTMENTS
|
||||
from pisa.conf import MAX_APPOINTMENTS
|
||||
from test.unit.conftest import generate_block, generate_blocks, get_random_value_hex, generate_dummy_appointment_data
|
||||
|
||||
c_logger.disabled = True
|
||||
|
||||
@@ -16,6 +20,18 @@ appointments = []
|
||||
locator_dispute_tx_map = {}
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def run_api(db_manager):
|
||||
watcher = Watcher(db_manager)
|
||||
|
||||
api_thread = Thread(target=start_api, args=[watcher])
|
||||
api_thread.daemon = True
|
||||
api_thread.start()
|
||||
|
||||
# It takes a little bit of time to start the API (otherwise the requests are sent too early and they fail)
|
||||
sleep(0.1)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def new_appt_data():
|
||||
appt_data, dispute_tx = generate_dummy_appointment_data()
|
||||
@@ -44,28 +60,6 @@ def test_add_appointment(run_api, run_bitcoind, new_appt_data):
|
||||
assert r.status_code == 400
|
||||
|
||||
|
||||
def test_request_appointment(new_appt_data):
|
||||
# First we need to add an appointment
|
||||
r = add_appointment(new_appt_data)
|
||||
assert r.status_code == 200
|
||||
|
||||
# Next we can request it
|
||||
r = requests.get(url=PISA_API + "/get_appointment?locator=" + new_appt_data["appointment"]["locator"])
|
||||
assert r.status_code == 200
|
||||
|
||||
# Each locator may point to multiple appointments, check them all
|
||||
received_appointments = json.loads(r.content)
|
||||
|
||||
# Take the status out and leave the received appointments ready to compare
|
||||
appointment_status = [appointment.pop("status") for appointment in received_appointments]
|
||||
|
||||
# Check that the appointment is within the received appoints
|
||||
assert new_appt_data["appointment"] in received_appointments
|
||||
|
||||
# Check that all the appointments are being watched
|
||||
assert all([status == "being_watched" for status in appointment_status])
|
||||
|
||||
|
||||
def test_request_random_appointment():
|
||||
r = requests.get(url=PISA_API + "/get_appointment?locator=" + get_random_value_hex(32))
|
||||
assert r.status_code == 200
|
||||
@@ -89,7 +83,7 @@ def test_request_multiple_appointments_same_locator(new_appt_data, n=MULTIPLE_AP
|
||||
r = add_appointment(new_appt_data)
|
||||
assert r.status_code == 200
|
||||
|
||||
test_request_appointment(new_appt_data)
|
||||
test_request_appointment_watcher(new_appt_data)
|
||||
|
||||
|
||||
def test_add_too_many_appointment(new_appt_data):
|
||||
@@ -117,12 +111,10 @@ def test_get_all_appointments_watcher():
|
||||
|
||||
def test_get_all_appointments_responder():
|
||||
# Trigger all disputes
|
||||
bitcoin_cli = AuthServiceProxy("http://%s:%s@%s:%d" % (BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, BTC_RPC_PORT))
|
||||
|
||||
locators = [appointment["locator"] for appointment in appointments]
|
||||
for locator, dispute_tx in locator_dispute_tx_map.items():
|
||||
if locator in locators:
|
||||
bitcoin_cli.sendrawtransaction(dispute_tx)
|
||||
bitcoin_cli().sendrawtransaction(dispute_tx)
|
||||
|
||||
# Confirm transactions
|
||||
generate_blocks(6)
|
||||
@@ -135,8 +127,49 @@ def test_get_all_appointments_responder():
|
||||
responder_jobs = [v["locator"] for k, v in received_appointments["responder_jobs"].items()]
|
||||
local_locators = [appointment["locator"] for appointment in appointments]
|
||||
|
||||
watcher_appointments = [v["locator"] for k, v in received_appointments["watcher_appointments"].items()]
|
||||
print(set(watcher_appointments) == set(local_locators))
|
||||
|
||||
assert set(responder_jobs) == set(local_locators)
|
||||
assert len(received_appointments["watcher_appointments"]) == 0
|
||||
|
||||
|
||||
def test_request_appointment_watcher(new_appt_data):
|
||||
# First we need to add an appointment
|
||||
r = add_appointment(new_appt_data)
|
||||
assert r.status_code == 200
|
||||
|
||||
# Next we can request it
|
||||
r = requests.get(url=PISA_API + "/get_appointment?locator=" + new_appt_data["appointment"]["locator"])
|
||||
assert r.status_code == 200
|
||||
|
||||
# Each locator may point to multiple appointments, check them all
|
||||
received_appointments = json.loads(r.content)
|
||||
|
||||
# Take the status out and leave the received appointments ready to compare
|
||||
appointment_status = [appointment.pop("status") for appointment in received_appointments]
|
||||
|
||||
# Check that the appointment is within the received appoints
|
||||
assert new_appt_data["appointment"] in received_appointments
|
||||
|
||||
# Check that all the appointments are being watched
|
||||
assert all([status == "being_watched" for status in appointment_status])
|
||||
|
||||
|
||||
def test_request_appointment_responder(new_appt_data):
|
||||
# Let's do something similar to what we did with the watcher but now we'll send the dispute tx to the network
|
||||
dispute_tx = locator_dispute_tx_map[new_appt_data["appointment"]["locator"]]
|
||||
bitcoin_cli().sendrawtransaction(dispute_tx)
|
||||
|
||||
r = add_appointment(new_appt_data)
|
||||
assert r.status_code == 200
|
||||
|
||||
# Generate a block to trigger the watcher
|
||||
generate_block()
|
||||
|
||||
r = requests.get(url=PISA_API + "/get_appointment?locator=" + new_appt_data["appointment"]["locator"])
|
||||
assert r.status_code == 200
|
||||
|
||||
received_appointments = json.loads(r.content)
|
||||
appointment_status = [appointment.pop("status") for appointment in received_appointments]
|
||||
appointment_locators = [appointment["locator"] for appointment in received_appointments]
|
||||
|
||||
assert new_appt_data["appointment"]["locator"] in appointment_locators and len(received_appointments) == 1
|
||||
assert all([status == "dispute_responded" for status in appointment_status]) and len(appointment_status) == 1
|
||||
|
||||
@@ -20,7 +20,15 @@ def appointment_data():
|
||||
cipher = "AES-GCM-128"
|
||||
hash_function = "SHA256"
|
||||
|
||||
return locator, start_time, end_time, dispute_delta, encrypted_blob_data, cipher, hash_function
|
||||
return {
|
||||
"locator": locator,
|
||||
"start_time": start_time,
|
||||
"end_time": end_time,
|
||||
"dispute_delta": dispute_delta,
|
||||
"encrypted_blob": encrypted_blob_data,
|
||||
"cipher": cipher,
|
||||
"hash_function": hash_function,
|
||||
}
|
||||
|
||||
|
||||
def test_init_appointment(appointment_data):
|
||||
@@ -28,51 +36,98 @@ def test_init_appointment(appointment_data):
|
||||
# creating appointments.
|
||||
# DISCUSS: whether this makes sense by design or checks should be ported from the inspector to the appointment
|
||||
# 35-appointment-checks
|
||||
|
||||
locator, start_time, end_time, dispute_delta, encrypted_blob_data, cipher, hash_function = appointment_data
|
||||
|
||||
appointment = Appointment(locator, start_time, end_time, dispute_delta, encrypted_blob_data, cipher, hash_function)
|
||||
appointment = Appointment(
|
||||
appointment_data["locator"],
|
||||
appointment_data["start_time"],
|
||||
appointment_data["end_time"],
|
||||
appointment_data["dispute_delta"],
|
||||
appointment_data["encrypted_blob"],
|
||||
appointment_data["cipher"],
|
||||
appointment_data["hash_function"],
|
||||
)
|
||||
|
||||
assert (
|
||||
locator == appointment.locator
|
||||
and start_time == appointment.start_time
|
||||
and end_time == appointment.end_time
|
||||
and EncryptedBlob(encrypted_blob_data) == appointment.encrypted_blob
|
||||
and cipher == appointment.cipher
|
||||
and dispute_delta == appointment.dispute_delta
|
||||
and hash_function == appointment.hash_function
|
||||
appointment_data["locator"] == appointment.locator
|
||||
and appointment_data["start_time"] == appointment.start_time
|
||||
and appointment_data["end_time"] == appointment.end_time
|
||||
and appointment_data["dispute_delta"] == appointment.dispute_delta
|
||||
and EncryptedBlob(appointment_data["encrypted_blob"]) == appointment.encrypted_blob
|
||||
and appointment_data["cipher"] == appointment.cipher
|
||||
and appointment_data["hash_function"] == appointment.hash_function
|
||||
)
|
||||
|
||||
|
||||
def test_to_dict(appointment_data):
|
||||
locator, start_time, end_time, dispute_delta, encrypted_blob_data, cipher, hash_function = appointment_data
|
||||
appointment = Appointment(locator, start_time, end_time, dispute_delta, encrypted_blob_data, cipher, hash_function)
|
||||
appointment = Appointment(
|
||||
appointment_data["locator"],
|
||||
appointment_data["start_time"],
|
||||
appointment_data["end_time"],
|
||||
appointment_data["dispute_delta"],
|
||||
appointment_data["encrypted_blob"],
|
||||
appointment_data["cipher"],
|
||||
appointment_data["hash_function"],
|
||||
)
|
||||
|
||||
dict_appointment = appointment.to_dict()
|
||||
|
||||
assert (
|
||||
locator == dict_appointment.get("locator")
|
||||
and start_time == dict_appointment.get("start_time")
|
||||
and end_time == dict_appointment.get("end_time")
|
||||
and dispute_delta == dict_appointment.get("dispute_delta")
|
||||
and cipher == dict_appointment.get("cipher")
|
||||
and hash_function == dict_appointment.get("hash_function")
|
||||
and encrypted_blob_data == dict_appointment.get("encrypted_blob")
|
||||
appointment_data["locator"] == dict_appointment["locator"]
|
||||
and appointment_data["start_time"] == dict_appointment["start_time"]
|
||||
and appointment_data["end_time"] == dict_appointment["end_time"]
|
||||
and appointment_data["dispute_delta"] == dict_appointment["dispute_delta"]
|
||||
and EncryptedBlob(appointment_data["encrypted_blob"]) == EncryptedBlob(dict_appointment["encrypted_blob"])
|
||||
and appointment_data["cipher"] == dict_appointment["cipher"]
|
||||
and appointment_data["hash_function"] == dict_appointment["hash_function"]
|
||||
)
|
||||
|
||||
|
||||
def test_to_json(appointment_data):
|
||||
locator, start_time, end_time, dispute_delta, encrypted_blob_data, cipher, hash_function = appointment_data
|
||||
appointment = Appointment(locator, start_time, end_time, dispute_delta, encrypted_blob_data, cipher, hash_function)
|
||||
appointment = Appointment(
|
||||
appointment_data["locator"],
|
||||
appointment_data["start_time"],
|
||||
appointment_data["end_time"],
|
||||
appointment_data["dispute_delta"],
|
||||
appointment_data["encrypted_blob"],
|
||||
appointment_data["cipher"],
|
||||
appointment_data["hash_function"],
|
||||
)
|
||||
|
||||
dict_appointment = json.loads(appointment.to_json())
|
||||
|
||||
assert (
|
||||
locator == dict_appointment.get("locator")
|
||||
and start_time == dict_appointment.get("start_time")
|
||||
and end_time == dict_appointment.get("end_time")
|
||||
and dispute_delta == dict_appointment.get("dispute_delta")
|
||||
and cipher == dict_appointment.get("cipher")
|
||||
and hash_function == dict_appointment.get("hash_function")
|
||||
and encrypted_blob_data == dict_appointment.get("encrypted_blob")
|
||||
appointment_data["locator"] == dict_appointment["locator"]
|
||||
and appointment_data["start_time"] == dict_appointment["start_time"]
|
||||
and appointment_data["end_time"] == dict_appointment["end_time"]
|
||||
and appointment_data["dispute_delta"] == dict_appointment["dispute_delta"]
|
||||
and EncryptedBlob(appointment_data["encrypted_blob"]) == EncryptedBlob(dict_appointment["encrypted_blob"])
|
||||
and appointment_data["cipher"] == dict_appointment["cipher"]
|
||||
and appointment_data["hash_function"] == dict_appointment["hash_function"]
|
||||
)
|
||||
|
||||
|
||||
def test_from_dict(appointment_data):
|
||||
# The appointment should be build if we don't miss any field
|
||||
appointment = Appointment.from_dict(appointment_data)
|
||||
assert isinstance(appointment, Appointment)
|
||||
|
||||
# Otherwise it should fail
|
||||
for key in appointment_data.keys():
|
||||
prev_val = appointment_data[key]
|
||||
appointment_data[key] = None
|
||||
|
||||
try:
|
||||
Appointment.from_dict(appointment_data)
|
||||
assert False
|
||||
|
||||
except ValueError:
|
||||
appointment_data[key] = prev_val
|
||||
assert True
|
||||
|
||||
|
||||
def test_serialize(appointment_data):
|
||||
appointment = Appointment.from_dict(appointment_data)
|
||||
|
||||
assert appointment.triggered is False
|
||||
ser_appointment = appointment.serialize()
|
||||
|
||||
assert ser_appointment == json.dumps(appointment_data, sort_keys=True, separators=(",", ":")).encode("utf-8")
|
||||
|
||||
@@ -1,26 +1,19 @@
|
||||
import pytest
|
||||
from uuid import uuid4
|
||||
from hashlib import sha256
|
||||
from binascii import unhexlify
|
||||
|
||||
from pisa import c_logger
|
||||
from pisa.block_processor import BlockProcessor
|
||||
from test.unit.conftest import get_random_value_hex
|
||||
from test.unit.conftest import get_random_value_hex, generate_block, generate_blocks
|
||||
|
||||
c_logger.disabled = True
|
||||
|
||||
APPOINTMENT_COUNT = 100
|
||||
TEST_SET_SIZE = 200
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def txids():
|
||||
return [get_random_value_hex(32) for _ in range(APPOINTMENT_COUNT)]
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def locator_uuid_map(txids):
|
||||
return {sha256(unhexlify(txid)).hexdigest(): uuid4().hex for txid in txids}
|
||||
hex_tx = (
|
||||
"0100000001c997a5e56e104102fa209c6a852dd90660a20b2d9c352423edce25857fcd3704000000004847304402"
|
||||
"204e45e16932b8af514961a1d3a1a25fdf3f4f7732e9d624c6c61548ab5fb8cd410220181522ec8eca07de4860a4"
|
||||
"acdd12909d831cc56cbbac4622082221a8768d1d0901ffffffff0200ca9a3b00000000434104ae1a62fe09c5f51b"
|
||||
"13905f07f06b99a2f7159b2225f374cd378d71302fa28414e7aab37397f554a7df5f142c21c1b7303b8a0626f1ba"
|
||||
"ded5c72a704f7e6cd84cac00286bee0000000043410411db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482e"
|
||||
"cad7b148a6909a5cb2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3ac00000000"
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@@ -54,27 +47,45 @@ def test_get_block_count():
|
||||
assert isinstance(block_count, int) and block_count >= 0
|
||||
|
||||
|
||||
def test_potential_matches(txids, locator_uuid_map):
|
||||
potential_matches = BlockProcessor.get_potential_matches(txids, locator_uuid_map)
|
||||
|
||||
# All the txids must match
|
||||
assert locator_uuid_map.keys() == potential_matches.keys()
|
||||
def test_decode_raw_transaction():
|
||||
# We cannot exhaustively test this (we rely on bitcoind for this) but we can try to decode a correct transaction
|
||||
assert BlockProcessor.decode_raw_transaction(hex_tx) is not None
|
||||
|
||||
|
||||
def test_potential_matches_random(locator_uuid_map):
|
||||
txids = [get_random_value_hex(32) for _ in range(len(locator_uuid_map))]
|
||||
|
||||
potential_matches = BlockProcessor.get_potential_matches(txids, locator_uuid_map)
|
||||
|
||||
# None of the ids should match
|
||||
assert len(potential_matches) == 0
|
||||
def test_decode_raw_transaction_invalid():
|
||||
# Same but with an invalid one
|
||||
assert BlockProcessor.decode_raw_transaction(hex_tx[::-1]) is None
|
||||
|
||||
|
||||
def test_potential_matches_random_data(locator_uuid_map):
|
||||
# The likelihood of finding a potential match with random data should be negligible
|
||||
txids = [get_random_value_hex(32) for _ in range(TEST_SET_SIZE)]
|
||||
def test_get_missed_blocks():
|
||||
block_processor = BlockProcessor()
|
||||
target_block = block_processor.get_best_block_hash()
|
||||
|
||||
potential_matches = BlockProcessor.get_potential_matches(txids, locator_uuid_map)
|
||||
# Generate some blocks and store the hash in a list
|
||||
missed_blocks = []
|
||||
for _ in range(5):
|
||||
generate_block()
|
||||
missed_blocks.append(BlockProcessor.get_best_block_hash())
|
||||
|
||||
# None of the txids should match
|
||||
assert len(potential_matches) == 0
|
||||
# Check what we've missed
|
||||
assert block_processor.get_missed_blocks(target_block) == missed_blocks
|
||||
|
||||
# We can see how it does not work if we replace the target by the first element in the list
|
||||
block_tip = missed_blocks[0]
|
||||
assert block_processor.get_missed_blocks(block_tip) != missed_blocks
|
||||
|
||||
# But it does again if we skip that block
|
||||
assert block_processor.get_missed_blocks(block_tip) == missed_blocks[1:]
|
||||
|
||||
|
||||
def test_get_distance_to_tip():
|
||||
target_distance = 5
|
||||
|
||||
block_processor = BlockProcessor()
|
||||
target_block = block_processor.get_best_block_hash()
|
||||
|
||||
# Mine some blocks up to the target distance
|
||||
generate_blocks(target_distance)
|
||||
|
||||
# Check if the distance is properly computed
|
||||
assert block_processor.get_distance_to_tip(target_block) == target_distance
|
||||
|
||||
@@ -9,7 +9,7 @@ def test_build_appointments():
|
||||
|
||||
# Create some appointment data
|
||||
for i in range(10):
|
||||
appointment, _ = generate_dummy_appointment()
|
||||
appointment, _ = generate_dummy_appointment(real_height=False)
|
||||
uuid = uuid4().hex
|
||||
|
||||
appointments_data[uuid] = appointment.to_dict()
|
||||
@@ -17,7 +17,7 @@ def test_build_appointments():
|
||||
# Add some additional appointments that share the same locator to test all the builder's cases
|
||||
if i % 2 == 0:
|
||||
locator = appointment.locator
|
||||
appointment, _ = generate_dummy_appointment()
|
||||
appointment, _ = generate_dummy_appointment(real_height=False)
|
||||
uuid = uuid4().hex
|
||||
appointment.locator = locator
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ from pisa import c_logger
|
||||
from pisa.carrier import Carrier
|
||||
from test.simulator.utils import sha256d
|
||||
from test.simulator.transaction import TX
|
||||
from test.unit.conftest import generate_blocks, get_random_value_hex
|
||||
from test.unit.conftest import generate_blocks, generate_block, get_random_value_hex
|
||||
from pisa.rpc_errors import RPC_VERIFY_ALREADY_IN_CHAIN, RPC_DESERIALIZATION_ERROR
|
||||
|
||||
c_logger.disabled = True
|
||||
|
||||
@@ -5,63 +5,81 @@ from pisa import c_logger
|
||||
from pisa.responder import Job
|
||||
from pisa.cleaner import Cleaner
|
||||
from pisa.appointment import Appointment
|
||||
from pisa.db_manager import WATCHER_PREFIX
|
||||
from test.unit.conftest import get_random_value_hex
|
||||
|
||||
CONFIRMATIONS = 6
|
||||
ITEMS = 10
|
||||
MAX_ITEMS = 100
|
||||
ITERATIONS = 1000
|
||||
ITERATIONS = 10
|
||||
|
||||
c_logger.disabled = True
|
||||
|
||||
|
||||
def set_up_appointments(total_appointments):
|
||||
# WIP: FIX CLEANER TESTS AFTER ADDING delete_complete_appointment
|
||||
def set_up_appointments(db_manager, total_appointments):
|
||||
appointments = dict()
|
||||
locator_uuid_map = dict()
|
||||
|
||||
for _ in range(total_appointments):
|
||||
for i in range(total_appointments):
|
||||
uuid = uuid4().hex
|
||||
locator = get_random_value_hex(32)
|
||||
|
||||
appointments[uuid] = Appointment(locator, None, None, None, None, None, None)
|
||||
appointment = Appointment(locator, None, None, None, None, None, None)
|
||||
appointments[uuid] = appointment
|
||||
locator_uuid_map[locator] = [uuid]
|
||||
|
||||
# Each locator can have more than one uuid assigned to it. Do a coin toss to add multiple ones
|
||||
while random.randint(0, 1):
|
||||
db_manager.store_watcher_appointment(uuid, appointment.to_json())
|
||||
db_manager.store_update_locator_map(locator, uuid)
|
||||
|
||||
# Each locator can have more than one uuid assigned to it.
|
||||
if i % 2:
|
||||
uuid = uuid4().hex
|
||||
|
||||
appointments[uuid] = Appointment(locator, None, None, None, None, None, None)
|
||||
appointments[uuid] = appointment
|
||||
locator_uuid_map[locator].append(uuid)
|
||||
|
||||
db_manager.store_watcher_appointment(uuid, appointment.to_json())
|
||||
db_manager.store_update_locator_map(locator, uuid)
|
||||
|
||||
return appointments, locator_uuid_map
|
||||
|
||||
|
||||
def set_up_jobs(total_jobs):
|
||||
def set_up_jobs(db_manager, total_jobs):
|
||||
jobs = dict()
|
||||
tx_job_map = dict()
|
||||
|
||||
for _ in range(total_jobs):
|
||||
for i in range(total_jobs):
|
||||
uuid = uuid4().hex
|
||||
txid = get_random_value_hex(32)
|
||||
|
||||
# We use the same txid for justice and dispute here, it shouldn't matter
|
||||
justice_txid = get_random_value_hex(32)
|
||||
dispute_txid = get_random_value_hex(32)
|
||||
|
||||
# Assign both justice_txid and dispute_txid the same id (it shouldn't matter)
|
||||
jobs[uuid] = Job(txid, txid, None, None)
|
||||
tx_job_map[txid] = [uuid]
|
||||
job = Job(dispute_txid, justice_txid, None, None)
|
||||
jobs[uuid] = job
|
||||
tx_job_map[justice_txid] = [uuid]
|
||||
|
||||
# Each justice_txid can have more than one uuid assigned to it. Do a coin toss to add multiple ones
|
||||
while random.randint(0, 1):
|
||||
db_manager.store_responder_job(uuid, job.to_json())
|
||||
db_manager.store_update_locator_map(job.locator, uuid)
|
||||
|
||||
# Each justice_txid can have more than one uuid assigned to it.
|
||||
if i % 2:
|
||||
uuid = uuid4().hex
|
||||
|
||||
jobs[uuid] = Job(txid, txid, None, None)
|
||||
tx_job_map[txid].append(uuid)
|
||||
jobs[uuid] = job
|
||||
tx_job_map[justice_txid].append(uuid)
|
||||
|
||||
db_manager.store_responder_job(uuid, job.to_json())
|
||||
db_manager.store_update_locator_map(job.locator, uuid)
|
||||
|
||||
return jobs, tx_job_map
|
||||
|
||||
|
||||
def test_delete_expired_appointment(db_manager):
|
||||
|
||||
for _ in range(ITERATIONS):
|
||||
appointments, locator_uuid_map = set_up_appointments(MAX_ITEMS)
|
||||
appointments, locator_uuid_map = set_up_appointments(db_manager, MAX_ITEMS)
|
||||
expired_appointments = random.sample(list(appointments.keys()), k=ITEMS)
|
||||
|
||||
Cleaner.delete_expired_appointment(expired_appointments, appointments, locator_uuid_map, db_manager)
|
||||
@@ -69,11 +87,29 @@ def test_delete_expired_appointment(db_manager):
|
||||
assert not set(expired_appointments).issubset(appointments.keys())
|
||||
|
||||
|
||||
def test_delete_completed_jobs(db_manager):
|
||||
def test_delete_completed_appointments(db_manager):
|
||||
appointments, locator_uuid_map = set_up_appointments(db_manager, MAX_ITEMS)
|
||||
uuids = list(appointments.keys())
|
||||
|
||||
for uuid in uuids:
|
||||
Cleaner.delete_completed_appointment(
|
||||
appointments[uuid].locator, uuid, appointments, locator_uuid_map, db_manager
|
||||
)
|
||||
|
||||
# All appointments should have been deleted
|
||||
assert len(appointments) == 0
|
||||
|
||||
# Make sure that all appointments are flagged as triggered in the db
|
||||
db_appointments = db_manager.load_appointments_db(prefix=WATCHER_PREFIX)
|
||||
for uuid in uuids:
|
||||
assert db_appointments[uuid]["triggered"] is True
|
||||
|
||||
|
||||
def test_delete_completed_jobs_db_match(db_manager):
|
||||
height = 0
|
||||
|
||||
for _ in range(ITERATIONS):
|
||||
jobs, tx_job_map = set_up_jobs(MAX_ITEMS)
|
||||
jobs, tx_job_map = set_up_jobs(db_manager, MAX_ITEMS)
|
||||
selected_jobs = random.sample(list(jobs.keys()), k=ITEMS)
|
||||
|
||||
completed_jobs = [(job, 6) for job in selected_jobs]
|
||||
@@ -81,3 +117,38 @@ def test_delete_completed_jobs(db_manager):
|
||||
Cleaner.delete_completed_jobs(jobs, tx_job_map, completed_jobs, height, db_manager)
|
||||
|
||||
assert not set(completed_jobs).issubset(jobs.keys())
|
||||
|
||||
|
||||
def test_delete_completed_jobs_no_db_match(db_manager):
|
||||
height = 0
|
||||
|
||||
for _ in range(ITERATIONS):
|
||||
jobs, tx_job_map = set_up_jobs(db_manager, MAX_ITEMS)
|
||||
selected_jobs = random.sample(list(jobs.keys()), k=ITEMS)
|
||||
|
||||
# Let's change some uuid's by creating new jobs that are not included in the db and share a justice_txid with
|
||||
# another job that is stored in the db.
|
||||
for uuid in selected_jobs[: ITEMS // 2]:
|
||||
justice_txid = jobs[uuid].justice_txid
|
||||
dispute_txid = get_random_value_hex(32)
|
||||
new_uuid = uuid4().hex
|
||||
|
||||
jobs[new_uuid] = Job(dispute_txid, justice_txid, None, None)
|
||||
tx_job_map[justice_txid].append(new_uuid)
|
||||
selected_jobs.append(new_uuid)
|
||||
|
||||
# Let's add some random data
|
||||
for i in range(ITEMS // 2):
|
||||
uuid = uuid4().hex
|
||||
justice_txid = get_random_value_hex(32)
|
||||
dispute_txid = get_random_value_hex(32)
|
||||
|
||||
jobs[uuid] = Job(dispute_txid, justice_txid, None, None)
|
||||
tx_job_map[justice_txid] = [uuid]
|
||||
selected_jobs.append(uuid)
|
||||
|
||||
completed_jobs = [(job, 6) for job in selected_jobs]
|
||||
|
||||
# We should be able to delete the correct ones and not fail in the others
|
||||
Cleaner.delete_completed_jobs(jobs, tx_job_map, completed_jobs, height, db_manager)
|
||||
assert not set(completed_jobs).issubset(jobs.keys())
|
||||
|
||||
51
test/unit/test_cryptographer.py
Normal file
51
test/unit/test_cryptographer.py
Normal file
@@ -0,0 +1,51 @@
|
||||
import binascii
|
||||
|
||||
from pisa.cryptographer import Cryptographer
|
||||
from pisa.encrypted_blob import EncryptedBlob
|
||||
from test.unit.conftest import get_random_value_hex
|
||||
|
||||
data = "6097cdf52309b1b2124efeed36bd34f46dc1c25ad23ac86f28380f746254f777"
|
||||
key = "b2e984a570f6f49bc38ace178e09147b0aa296cbb7c92eb01412f7e2d07b5659"
|
||||
encrypted_data = "092e93d4a34aac4367075506f2c050ddfa1a201ee6669b65058572904dcea642aeb01ea4b57293618e8c46809dfadadc"
|
||||
encrypted_blob = EncryptedBlob(encrypted_data)
|
||||
|
||||
|
||||
# TODO: The decryption tests are assuming the cipher is AES-GCM-128, since EncryptedBlob assumes the same. Fix this.
|
||||
def test_decrypt_wrong_data():
|
||||
random_key = get_random_value_hex(32)
|
||||
random_encrypted_data = get_random_value_hex(64)
|
||||
random_encrypted_blob = EncryptedBlob(random_encrypted_data)
|
||||
|
||||
# Trying to decrypt random data (in AES_GCM-128) should result in an InvalidTag exception. Our decrypt function
|
||||
# returns None
|
||||
hex_tx = Cryptographer.decrypt(random_encrypted_blob, random_key)
|
||||
assert hex_tx is None
|
||||
|
||||
|
||||
def test_decrypt_odd_length():
|
||||
random_key = get_random_value_hex(32)
|
||||
random_encrypted_data_odd = get_random_value_hex(64)[:-1]
|
||||
random_encrypted_blob_odd = EncryptedBlob(random_encrypted_data_odd)
|
||||
|
||||
assert Cryptographer.decrypt(random_encrypted_blob_odd, random_key) is None
|
||||
|
||||
|
||||
def test_decrypt_hex():
|
||||
# Valid data should run with no InvalidTag and verify
|
||||
assert Cryptographer.decrypt(encrypted_blob, key) == data
|
||||
|
||||
|
||||
def test_decrypt_bytes():
|
||||
# We can also get the decryption in bytes
|
||||
byte_blob = Cryptographer.decrypt(encrypted_blob, key, rtype="bytes")
|
||||
assert isinstance(byte_blob, bytes) and byte_blob == binascii.unhexlify(data)
|
||||
|
||||
|
||||
def test_decrypt_wrong_return():
|
||||
# Any other type but "hex" (default) or "bytes" should fail
|
||||
try:
|
||||
Cryptographer.decrypt(encrypted_blob, key, rtype="random_value")
|
||||
assert False
|
||||
|
||||
except ValueError:
|
||||
assert True
|
||||
@@ -11,7 +11,7 @@ from pisa.db_manager import WATCHER_LAST_BLOCK_KEY, RESPONDER_LAST_BLOCK_KEY, LO
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def watcher_appointments():
|
||||
return {uuid4().hex: generate_dummy_appointment()[0] for _ in range(10)}
|
||||
return {uuid4().hex: generate_dummy_appointment(real_height=False)[0] for _ in range(10)}
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
@@ -80,8 +80,18 @@ def test_load_appointments_db(db_manager):
|
||||
assert set(values) == set(local_appointments.values()) and (len(values) == len(local_appointments))
|
||||
|
||||
|
||||
def test_get_last_known_block(db_manager):
|
||||
def test_get_last_known_block():
|
||||
db_path = "empty_db"
|
||||
|
||||
# First we check if the db exists, and if so we delete it
|
||||
if os.path.isdir(db_path):
|
||||
shutil.rmtree(db_path)
|
||||
|
||||
# Check that the db can be created if it does not exist
|
||||
db_manager = open_create_db(db_path)
|
||||
|
||||
# Trying to get any last block for either the watcher or the responder should return None for an empty db
|
||||
|
||||
for key in [WATCHER_LAST_BLOCK_KEY, RESPONDER_LAST_BLOCK_KEY]:
|
||||
assert db_manager.get_last_known_block(key) is None
|
||||
|
||||
@@ -91,6 +101,9 @@ def test_get_last_known_block(db_manager):
|
||||
db_manager.db.put(key.encode("utf-8"), block_hash.encode("utf-8"))
|
||||
assert db_manager.get_last_known_block(key) == block_hash
|
||||
|
||||
# Removing test db
|
||||
shutil.rmtree(db_path)
|
||||
|
||||
|
||||
def test_create_entry(db_manager):
|
||||
key = get_random_value_hex(32)
|
||||
|
||||
@@ -11,21 +11,27 @@ def test_init_encrypted_blob():
|
||||
assert EncryptedBlob(data).data == data
|
||||
|
||||
|
||||
def test_decrypt():
|
||||
# TODO: The decryption tests are assuming the cipher is AES-GCM-128, since EncryptedBlob assumes the same. Fix this.
|
||||
key = get_random_value_hex(32)
|
||||
encrypted_data = get_random_value_hex(64)
|
||||
encrypted_blob = EncryptedBlob(encrypted_data)
|
||||
def test_init_encrypted_blob_wrong_cipher():
|
||||
try:
|
||||
EncryptedBlob(get_random_value_hex(64), cipher="")
|
||||
assert False
|
||||
|
||||
# Trying to decrypt random data (in AES_GCM-128) should result in an InvalidTag exception. Our decrypt function
|
||||
# returns None
|
||||
hex_tx = encrypted_blob.decrypt(key)
|
||||
assert hex_tx is None
|
||||
except ValueError:
|
||||
assert True
|
||||
|
||||
# Valid data should run with no InvalidTag and verify
|
||||
data = "6097cdf52309b1b2124efeed36bd34f46dc1c25ad23ac86f28380f746254f777"
|
||||
key = "b2e984a570f6f49bc38ace178e09147b0aa296cbb7c92eb01412f7e2d07b5659"
|
||||
encrypted_data = "092e93d4a34aac4367075506f2c050ddfa1a201ee6669b65058572904dcea642aeb01ea4b57293618e8c46809dfadadc"
|
||||
encrypted_blob = EncryptedBlob(encrypted_data)
|
||||
|
||||
assert encrypted_blob.decrypt(key) == data
|
||||
def test_init_encrypted_blob_wrong_hash_function():
|
||||
try:
|
||||
EncryptedBlob(get_random_value_hex(64), hash_function="")
|
||||
assert False
|
||||
|
||||
except ValueError:
|
||||
assert True
|
||||
|
||||
|
||||
def test_equal():
|
||||
data = get_random_value_hex(64)
|
||||
e_blob1 = EncryptedBlob(data)
|
||||
e_blob2 = EncryptedBlob(data)
|
||||
|
||||
assert e_blob1 == e_blob2 and id(e_blob1) != id(e_blob2)
|
||||
|
||||
@@ -1,17 +1,20 @@
|
||||
import json
|
||||
import pytest
|
||||
import random
|
||||
from uuid import uuid4
|
||||
from shutil import rmtree
|
||||
from copy import deepcopy
|
||||
from threading import Thread
|
||||
from queue import Queue, Empty
|
||||
|
||||
from pisa import c_logger
|
||||
from pisa.tools import check_txid_format
|
||||
from pisa.db_manager import DBManager
|
||||
from test.simulator.utils import sha256d
|
||||
from pisa.responder import Responder, Job
|
||||
from test.simulator.bitcoind_sim import TX
|
||||
from pisa.utils.auth_proxy import AuthServiceProxy
|
||||
from pisa.block_processor import BlockProcessor
|
||||
from pisa.tools import check_txid_format, bitcoin_cli
|
||||
from test.unit.conftest import generate_block, generate_blocks, get_random_value_hex
|
||||
from pisa.conf import BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, BTC_RPC_PORT
|
||||
|
||||
c_logger.disabled = True
|
||||
|
||||
@@ -21,9 +24,17 @@ def responder(db_manager):
|
||||
return Responder(db_manager)
|
||||
|
||||
|
||||
def create_dummy_job_data(random_txid=False, justice_rawtx=None):
|
||||
bitcoin_cli = AuthServiceProxy("http://%s:%s@%s:%d" % (BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, BTC_RPC_PORT))
|
||||
@pytest.fixture()
|
||||
def temp_db_manager():
|
||||
db_name = get_random_value_hex(8)
|
||||
db_manager = DBManager(db_name)
|
||||
yield db_manager
|
||||
|
||||
db_manager.db.close()
|
||||
rmtree(db_name)
|
||||
|
||||
|
||||
def create_dummy_job_data(random_txid=False, justice_rawtx=None):
|
||||
# The following transaction data corresponds to a valid transaction. For some test it may be interesting to have
|
||||
# some valid data, but for others we may need multiple different justice_txids.
|
||||
|
||||
@@ -46,7 +57,7 @@ def create_dummy_job_data(random_txid=False, justice_rawtx=None):
|
||||
if random_txid is True:
|
||||
justice_txid = get_random_value_hex(32)
|
||||
|
||||
appointment_end = bitcoin_cli.getblockcount() + 2
|
||||
appointment_end = bitcoin_cli().getblockcount() + 2
|
||||
|
||||
return dispute_txid, justice_txid, justice_rawtx, appointment_end
|
||||
|
||||
@@ -68,6 +79,23 @@ def test_job_init(run_bitcoind):
|
||||
)
|
||||
|
||||
|
||||
def test_on_sync(run_bitcoind, responder):
|
||||
# We're on sync if we're 1 or less blocks behind the tip
|
||||
chain_tip = BlockProcessor.get_best_block_hash()
|
||||
assert Responder.on_sync(chain_tip) is True
|
||||
|
||||
generate_block()
|
||||
assert Responder.on_sync(chain_tip) is True
|
||||
|
||||
|
||||
def test_on_sync_fail(responder):
|
||||
# This should fail if we're more than 1 block behind the tip
|
||||
chain_tip = BlockProcessor.get_best_block_hash()
|
||||
generate_blocks(2)
|
||||
|
||||
assert Responder.on_sync(chain_tip) is False
|
||||
|
||||
|
||||
def test_job_to_dict():
|
||||
job = create_dummy_job()
|
||||
job_dict = job.to_dict()
|
||||
@@ -90,6 +118,28 @@ def test_job_to_json():
|
||||
)
|
||||
|
||||
|
||||
def test_job_from_dict():
|
||||
job_dict = create_dummy_job().to_dict()
|
||||
new_job = Job.from_dict(job_dict)
|
||||
|
||||
assert job_dict == new_job.to_dict()
|
||||
|
||||
|
||||
def test_job_from_dict_invalid_data():
|
||||
job_dict = create_dummy_job().to_dict()
|
||||
|
||||
for value in ["dispute_txid", "justice_txid", "justice_rawtx", "appointment_end"]:
|
||||
job_dict_copy = deepcopy(job_dict)
|
||||
job_dict_copy[value] = None
|
||||
|
||||
try:
|
||||
Job.from_dict(job_dict_copy)
|
||||
assert False
|
||||
|
||||
except ValueError:
|
||||
assert True
|
||||
|
||||
|
||||
def test_init_responder(responder):
|
||||
assert type(responder.jobs) is dict and len(responder.jobs) == 0
|
||||
assert type(responder.tx_job_map) is dict and len(responder.tx_job_map) == 0
|
||||
@@ -100,14 +150,11 @@ def test_init_responder(responder):
|
||||
assert responder.zmq_subscriber is None
|
||||
|
||||
|
||||
def test_add_response(responder):
|
||||
def test_add_response(db_manager):
|
||||
responder = Responder(db_manager)
|
||||
uuid = uuid4().hex
|
||||
job = create_dummy_job()
|
||||
|
||||
# The responder automatically fires create_job on adding a job if it is asleep (initial state). Avoid this by
|
||||
# setting the state to awake.
|
||||
responder.asleep = False
|
||||
|
||||
# The block_hash passed to add_response does not matter much now. It will in the future to deal with errors
|
||||
receipt = responder.add_response(
|
||||
uuid,
|
||||
@@ -120,6 +167,36 @@ def test_add_response(responder):
|
||||
|
||||
assert receipt.delivered is True
|
||||
|
||||
# The responder automatically fires create_job on adding a job if it is asleep. We need to stop the processes now.
|
||||
# To do so we delete all the jobs, stop the zmq and create a new fake block to unblock the queue.get method
|
||||
responder.jobs = dict()
|
||||
responder.zmq_subscriber.terminate = True
|
||||
responder.block_queue.put(get_random_value_hex(32))
|
||||
|
||||
|
||||
def test_add_bad_response(responder):
|
||||
uuid = uuid4().hex
|
||||
job = create_dummy_job()
|
||||
|
||||
# Now that the asleep / awake functionality has been tested we can avoid manually killing the responder by setting
|
||||
# to awake. That will prevent the zmq thread to be launched again.
|
||||
responder.asleep = False
|
||||
|
||||
# A txid instead of a rawtx should be enough for unit tests using the bitcoind mock, better tests are needed though.
|
||||
job.justice_rawtx = job.justice_txid
|
||||
|
||||
# The block_hash passed to add_response does not matter much now. It will in the future to deal with errors
|
||||
receipt = responder.add_response(
|
||||
uuid,
|
||||
job.dispute_txid,
|
||||
job.justice_txid,
|
||||
job.justice_rawtx,
|
||||
job.appointment_end,
|
||||
block_hash=get_random_value_hex(32),
|
||||
)
|
||||
|
||||
assert receipt.delivered is False
|
||||
|
||||
|
||||
def test_create_job(responder):
|
||||
responder.asleep = False
|
||||
@@ -151,6 +228,33 @@ def test_create_job(responder):
|
||||
)
|
||||
|
||||
|
||||
def test_create_job_same_justice_txid(responder):
|
||||
# Create the same job using two different uuids
|
||||
confirmations = 0
|
||||
dispute_txid, justice_txid, justice_rawtx, appointment_end = create_dummy_job_data(random_txid=True)
|
||||
uuid_1 = uuid4().hex
|
||||
uuid_2 = uuid4().hex
|
||||
|
||||
responder.create_job(uuid_1, dispute_txid, justice_txid, justice_rawtx, appointment_end, confirmations)
|
||||
responder.create_job(uuid_2, dispute_txid, justice_txid, justice_rawtx, appointment_end, confirmations)
|
||||
|
||||
# Check that both jobs have been added
|
||||
assert uuid_1 in responder.jobs and uuid_2 in responder.jobs
|
||||
assert justice_txid in responder.tx_job_map
|
||||
assert justice_txid in responder.unconfirmed_txs
|
||||
|
||||
# Check that the rest of job data also matches
|
||||
for uuid in [uuid_1, uuid_2]:
|
||||
job = responder.jobs[uuid]
|
||||
assert (
|
||||
job.dispute_txid == dispute_txid
|
||||
and job.justice_txid == justice_txid
|
||||
and job.justice_rawtx == justice_rawtx
|
||||
and job.appointment_end == appointment_end
|
||||
and job.appointment_end == appointment_end
|
||||
)
|
||||
|
||||
|
||||
def test_create_job_already_confirmed(responder):
|
||||
responder.asleep = False
|
||||
|
||||
@@ -182,14 +286,13 @@ def test_do_subscribe(responder):
|
||||
assert False
|
||||
|
||||
|
||||
def test_do_watch(responder):
|
||||
# Reinitializing responder (but keeping the subscriber)
|
||||
responder.jobs = dict()
|
||||
responder.tx_job_map = dict()
|
||||
responder.unconfirmed_txs = []
|
||||
responder.missed_confirmations = dict()
|
||||
def test_do_watch(temp_db_manager):
|
||||
responder = Responder(temp_db_manager)
|
||||
responder.block_queue = Queue()
|
||||
|
||||
bitcoin_cli = AuthServiceProxy("http://%s:%s@%s:%d" % (BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, BTC_RPC_PORT))
|
||||
zmq_thread = Thread(target=responder.do_subscribe)
|
||||
zmq_thread.daemon = True
|
||||
zmq_thread.start()
|
||||
|
||||
jobs = [create_dummy_job(justice_rawtx=TX.create_dummy_transaction()) for _ in range(20)]
|
||||
|
||||
@@ -210,7 +313,7 @@ def test_do_watch(responder):
|
||||
# And broadcast some of the transactions
|
||||
broadcast_txs = []
|
||||
for job in jobs[:5]:
|
||||
bitcoin_cli.sendrawtransaction(job.justice_rawtx)
|
||||
bitcoin_cli().sendrawtransaction(job.justice_rawtx)
|
||||
broadcast_txs.append(job.justice_txid)
|
||||
|
||||
# Mine a block
|
||||
@@ -229,7 +332,7 @@ def test_do_watch(responder):
|
||||
# Do the rest
|
||||
broadcast_txs = []
|
||||
for job in jobs[5:]:
|
||||
bitcoin_cli.sendrawtransaction(job.justice_rawtx)
|
||||
bitcoin_cli().sendrawtransaction(job.justice_rawtx)
|
||||
broadcast_txs.append(job.justice_txid)
|
||||
|
||||
# Mine a block
|
||||
@@ -239,6 +342,40 @@ def test_do_watch(responder):
|
||||
assert responder.asleep is True
|
||||
|
||||
|
||||
def test_check_confirmations(temp_db_manager):
|
||||
responder = Responder(temp_db_manager)
|
||||
responder.block_queue = Queue()
|
||||
|
||||
zmq_thread = Thread(target=responder.do_subscribe)
|
||||
zmq_thread.daemon = True
|
||||
zmq_thread.start()
|
||||
|
||||
# check_confirmations checks, given a list of transaction for a block, what of the known justice transaction have
|
||||
# been confirmed. To test this we need to create a list of transactions and the state of the responder
|
||||
txs = [get_random_value_hex(32) for _ in range(20)]
|
||||
|
||||
# The responder has a list of unconfirmed transaction, let make that some of them are the ones we've received
|
||||
responder.unconfirmed_txs = [get_random_value_hex(32) for _ in range(10)]
|
||||
txs_subset = random.sample(txs, k=10)
|
||||
responder.unconfirmed_txs.extend(txs_subset)
|
||||
|
||||
# We also need to add them to the tx_job_map since they would be there in normal conditions
|
||||
responder.tx_job_map = {txid: Job(txid, None, None, None) for txid in responder.unconfirmed_txs}
|
||||
|
||||
# Let's make sure that there are no txs with missed confirmations yet
|
||||
assert len(responder.missed_confirmations) == 0
|
||||
|
||||
responder.check_confirmations(txs)
|
||||
|
||||
# After checking confirmations the txs in txs_subset should be confirmed (not part of unconfirmed_txs anymore)
|
||||
# and the rest should have a missing confirmation
|
||||
for tx in txs_subset:
|
||||
assert tx not in responder.unconfirmed_txs
|
||||
|
||||
for tx in responder.unconfirmed_txs:
|
||||
assert responder.missed_confirmations[tx] == 1
|
||||
|
||||
|
||||
def test_get_txs_to_rebroadcast(responder):
|
||||
# Let's create a few fake txids and assign at least 6 missing confirmations to each
|
||||
txs_missing_too_many_conf = {get_random_value_hex(32): 6 + i for i in range(10)}
|
||||
@@ -263,8 +400,7 @@ def test_get_txs_to_rebroadcast(responder):
|
||||
|
||||
|
||||
def test_get_completed_jobs(db_manager):
|
||||
bitcoin_cli = AuthServiceProxy("http://%s:%s@%s:%d" % (BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, BTC_RPC_PORT))
|
||||
initial_height = bitcoin_cli.getblockcount()
|
||||
initial_height = bitcoin_cli().getblockcount()
|
||||
|
||||
# Let's use a fresh responder for this to make it easier to compare the results
|
||||
responder = Responder(db_manager)
|
||||
@@ -291,7 +427,7 @@ def test_get_completed_jobs(db_manager):
|
||||
responder.jobs.update(jobs_no_end)
|
||||
|
||||
for uuid, job in responder.jobs.items():
|
||||
bitcoin_cli.sendrawtransaction(job.justice_rawtx)
|
||||
bitcoin_cli().sendrawtransaction(job.justice_rawtx)
|
||||
|
||||
# The dummy appointments have a end_appointment time of current + 2, but jobs need at least 6 confs by default
|
||||
generate_blocks(6)
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
import pytest
|
||||
from uuid import uuid4
|
||||
from hashlib import sha256
|
||||
from threading import Thread
|
||||
from binascii import unhexlify
|
||||
from queue import Queue, Empty
|
||||
|
||||
from cryptography.hazmat.backends import default_backend
|
||||
@@ -12,24 +14,16 @@ from cryptography.exceptions import InvalidSignature
|
||||
from pisa import c_logger
|
||||
from pisa.watcher import Watcher
|
||||
from pisa.responder import Responder
|
||||
from pisa.tools import check_txid_format
|
||||
from pisa.utils.auth_proxy import AuthServiceProxy
|
||||
from test.unit.conftest import generate_block, generate_blocks, generate_dummy_appointment
|
||||
from pisa.conf import (
|
||||
EXPIRY_DELTA,
|
||||
BTC_RPC_USER,
|
||||
BTC_RPC_PASSWD,
|
||||
BTC_RPC_HOST,
|
||||
BTC_RPC_PORT,
|
||||
PISA_SECRET_KEY,
|
||||
MAX_APPOINTMENTS,
|
||||
)
|
||||
from pisa.tools import check_txid_format, bitcoin_cli
|
||||
from test.unit.conftest import generate_block, generate_blocks, generate_dummy_appointment, get_random_value_hex
|
||||
from pisa.conf import EXPIRY_DELTA, PISA_SECRET_KEY, MAX_APPOINTMENTS
|
||||
|
||||
c_logger.disabled = True
|
||||
|
||||
APPOINTMENTS = 5
|
||||
START_TIME_OFFSET = 1
|
||||
END_TIME_OFFSET = 1
|
||||
TEST_SET_SIZE = 200
|
||||
|
||||
with open(PISA_SECRET_KEY, "r") as key_file:
|
||||
pubkey_pem = key_file.read().encode("utf-8")
|
||||
@@ -43,6 +37,16 @@ def watcher(db_manager):
|
||||
return Watcher(db_manager)
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def txids():
|
||||
return [get_random_value_hex(32) for _ in range(100)]
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def locator_uuid_map(txids):
|
||||
return {sha256(unhexlify(txid)).hexdigest(): uuid4().hex for txid in txids}
|
||||
|
||||
|
||||
def create_appointments(n):
|
||||
locator_uuid_map = dict()
|
||||
appointments = dict()
|
||||
@@ -81,6 +85,15 @@ def test_init(watcher):
|
||||
assert type(watcher.responder) is Responder
|
||||
|
||||
|
||||
def test_init_no_key(db_manager):
|
||||
try:
|
||||
Watcher(db_manager, pisa_sk_file=None)
|
||||
assert False
|
||||
|
||||
except ValueError:
|
||||
assert True
|
||||
|
||||
|
||||
def test_add_appointment(run_bitcoind, watcher):
|
||||
# The watcher automatically fires do_watch and do_subscribe on adding an appointment if it is asleep (initial state)
|
||||
# Avoid this by setting the state to awake.
|
||||
@@ -96,6 +109,12 @@ def test_add_appointment(run_bitcoind, watcher):
|
||||
assert added_appointment is True
|
||||
assert is_signature_valid(appointment, sig, public_key)
|
||||
|
||||
# Check that we can also add an already added appointment (same locator)
|
||||
added_appointment, sig = watcher.add_appointment(appointment)
|
||||
|
||||
assert added_appointment is True
|
||||
assert is_signature_valid(appointment, sig, public_key)
|
||||
|
||||
|
||||
def test_sign_appointment(watcher):
|
||||
appointment, _ = generate_dummy_appointment(start_time_offset=START_TIME_OFFSET, end_time_offset=END_TIME_OFFSET)
|
||||
@@ -142,8 +161,6 @@ def test_do_subscribe(watcher):
|
||||
|
||||
|
||||
def test_do_watch(watcher):
|
||||
bitcoin_cli = AuthServiceProxy("http://%s:%s@%s:%d" % (BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, BTC_RPC_PORT))
|
||||
|
||||
# We will wipe all the previous data and add 5 appointments
|
||||
watcher.appointments, watcher.locator_uuid_map, dispute_txs = create_appointments(APPOINTMENTS)
|
||||
|
||||
@@ -153,7 +170,7 @@ def test_do_watch(watcher):
|
||||
|
||||
# Broadcast the first two
|
||||
for dispute_tx in dispute_txs[:2]:
|
||||
bitcoin_cli.sendrawtransaction(dispute_tx)
|
||||
bitcoin_cli().sendrawtransaction(dispute_tx)
|
||||
|
||||
# After leaving some time for the block to be mined and processed, the number of appointments should have reduced
|
||||
# by two
|
||||
@@ -167,3 +184,75 @@ def test_do_watch(watcher):
|
||||
|
||||
assert len(watcher.appointments) == 0
|
||||
assert watcher.asleep is True
|
||||
|
||||
|
||||
def test_get_matches(watcher, txids, locator_uuid_map):
|
||||
watcher.locator_uuid_map = locator_uuid_map
|
||||
potential_matches = watcher.get_matches(txids)
|
||||
|
||||
# All the txids must match
|
||||
assert locator_uuid_map.keys() == potential_matches.keys()
|
||||
|
||||
|
||||
def test_get_matches_random_data(watcher, locator_uuid_map):
|
||||
# The likelihood of finding a potential match with random data should be negligible
|
||||
watcher.locator_uuid_map = locator_uuid_map
|
||||
txids = [get_random_value_hex(32) for _ in range(TEST_SET_SIZE)]
|
||||
|
||||
potential_matches = watcher.get_matches(txids)
|
||||
|
||||
# None of the txids should match
|
||||
assert len(potential_matches) == 0
|
||||
|
||||
|
||||
def test_filter_valid_matches_random_data(watcher):
|
||||
appointments = {}
|
||||
locator_uuid_map = {}
|
||||
matches = {}
|
||||
|
||||
for i in range(TEST_SET_SIZE):
|
||||
dummy_appointment, _ = generate_dummy_appointment()
|
||||
uuid = uuid4().hex
|
||||
appointments[uuid] = dummy_appointment
|
||||
|
||||
locator_uuid_map[dummy_appointment.locator] = [uuid]
|
||||
|
||||
if i % 2:
|
||||
dispute_txid = get_random_value_hex(32)
|
||||
matches[dummy_appointment.locator] = dispute_txid
|
||||
|
||||
watcher.locator_uuid_map = locator_uuid_map
|
||||
watcher.appointments = appointments
|
||||
|
||||
filtered_valid_matches = watcher.filter_valid_matches(matches)
|
||||
|
||||
assert not any([fil_match["valid_match"] for uuid, fil_match in filtered_valid_matches.items()])
|
||||
|
||||
|
||||
def test_filter_valid_matches(watcher):
|
||||
dispute_txid = "0437cd7f8525ceed2324359c2d0ba26006d92d856a9c20fa0241106ee5a597c9"
|
||||
encrypted_blob = (
|
||||
"29f55518945408f567bb7feb4d7bb15ba88b7d8ca0223a44d5c67dfe32d038caee7613e35736025d95ad4ecd6538a50"
|
||||
"74cbe8d7739705697a5dc4d19b8a6e4459ed2d1b0d0a9b18c49bc2187dcbfb4046b14d58a1add83235fc632efc398d5"
|
||||
"0abcb7738f1a04b3783d025c1828b4e8a8dc8f13f2843e6bc3bf08eade02fc7e2c4dce7d2f83b055652e944ac114e0b"
|
||||
"72a9abcd98fd1d785a5d976c05ed780e033e125fa083c6591b6029aa68dbc099f148a2bc2e0cb63733e68af717d48d5"
|
||||
"a312b5f5b2fcca9561b2ff4191f9cdff936a43f6efef4ee45fbaf1f18d0a4b006f3fc8399dd8ecb21f709d4583bba14"
|
||||
"4af6d49fa99d7be2ca21059a997475aa8642b66b921dc7fc0321b6a2f6927f6f9bab55c75e17a19dc3b2ae895b6d4a4"
|
||||
"f64f8eb21b1e"
|
||||
)
|
||||
|
||||
dummy_appointment, _ = generate_dummy_appointment()
|
||||
dummy_appointment.encrypted_blob.data = encrypted_blob
|
||||
dummy_appointment.locator = sha256(unhexlify(dispute_txid)).hexdigest()
|
||||
uuid = uuid4().hex
|
||||
|
||||
appointments = {uuid: dummy_appointment}
|
||||
locator_uuid_map = {dummy_appointment.locator: [uuid]}
|
||||
matches = {dummy_appointment.locator: dispute_txid}
|
||||
|
||||
watcher.appointments = appointments
|
||||
watcher.locator_uuid_map = locator_uuid_map
|
||||
|
||||
filtered_valid_matches = watcher.filter_valid_matches(matches)
|
||||
|
||||
assert all([fil_match["valid_match"] for uuid, fil_match in filtered_valid_matches.items()])
|
||||
|
||||
Reference in New Issue
Block a user