mirror of
https://github.com/aljazceru/python-teos.git
synced 2025-12-18 06:34:19 +01:00
Merge branch 'master' into 64-data-to-disk
This commit is contained in:
@@ -1,23 +1,41 @@
|
||||
import responses
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
from binascii import hexlify
|
||||
|
||||
from cryptography.hazmat.backends import default_backend
|
||||
from cryptography.hazmat.primitives import hashes
|
||||
from cryptography.hazmat.primitives import serialization
|
||||
from cryptography.hazmat.primitives.asymmetric import ec
|
||||
|
||||
from common.appointment import Appointment
|
||||
from common.cryptographer import Cryptographer
|
||||
|
||||
import apps.cli.pisa_cli as pisa_cli
|
||||
from test.apps.cli.unit.conftest import get_random_value_hex
|
||||
|
||||
# TODO: should find a way of doing without this
|
||||
from apps.cli.pisa_cli import build_appointment
|
||||
|
||||
# dummy keys for the tests
|
||||
pisa_sk = ec.generate_private_key(ec.SECP256K1, default_backend())
|
||||
pisa_pk = pisa_sk.public_key()
|
||||
|
||||
other_sk = ec.generate_private_key(ec.SECP256K1, default_backend())
|
||||
|
||||
pisa_sk_der = pisa_sk.private_bytes(
|
||||
encoding=serialization.Encoding.DER,
|
||||
format=serialization.PrivateFormat.TraditionalOpenSSL,
|
||||
encryption_algorithm=serialization.NoEncryption(),
|
||||
)
|
||||
pisa_pk_der = pisa_pk.public_bytes(
|
||||
encoding=serialization.Encoding.DER, format=serialization.PublicFormat.SubjectPublicKeyInfo
|
||||
)
|
||||
|
||||
other_sk_der = other_sk.private_bytes(
|
||||
encoding=serialization.Encoding.DER,
|
||||
format=serialization.PrivateFormat.TraditionalOpenSSL,
|
||||
encryption_algorithm=serialization.NoEncryption(),
|
||||
)
|
||||
|
||||
|
||||
# Replace the key in the module with a key we control for the tests
|
||||
pisa_cli.pisa_public_key = pisa_pk
|
||||
# Replace endpoint with dummy one
|
||||
@@ -32,18 +50,47 @@ dummy_appointment_request = {
|
||||
"end_time": 50000,
|
||||
"to_self_delay": 200,
|
||||
}
|
||||
dummy_appointment = build_appointment(**dummy_appointment_request)
|
||||
|
||||
# FIXME: USE CRYPTOGRAPHER
|
||||
# This is the format appointment turns into once it hits "add_appointment"
|
||||
dummy_appointment_full = {
|
||||
"locator": get_random_value_hex(32),
|
||||
"start_time": 1500,
|
||||
"end_time": 50000,
|
||||
"to_self_delay": 200,
|
||||
"encrypted_blob": get_random_value_hex(120),
|
||||
}
|
||||
|
||||
dummy_appointment = Appointment.from_dict(dummy_appointment_full)
|
||||
|
||||
|
||||
def sign_appointment(sk, appointment):
|
||||
data = json.dumps(appointment, sort_keys=True, separators=(",", ":")).encode("utf-8")
|
||||
return hexlify(sk.sign(data, ec.ECDSA(hashes.SHA256()))).decode("utf-8")
|
||||
def get_dummy_pisa_sk_der(*args):
|
||||
return pisa_sk_der
|
||||
|
||||
|
||||
def get_dummy_pisa_pk(der_data):
|
||||
return pisa_pk
|
||||
def get_dummy_pisa_pk_der(*args):
|
||||
return pisa_pk_der
|
||||
|
||||
|
||||
def get_dummy_hex_pk_der(*args):
|
||||
return hexlify(get_dummy_pisa_pk_der())
|
||||
|
||||
|
||||
def get_dummy_signature(*args):
|
||||
sk = Cryptographer.load_private_key_der(pisa_sk_der)
|
||||
return Cryptographer.sign(dummy_appointment.serialize(), sk)
|
||||
|
||||
|
||||
def get_bad_signature(*args):
|
||||
sk = Cryptographer.load_private_key_der(other_sk_der)
|
||||
return Cryptographer.sign(dummy_appointment.serialize(), sk)
|
||||
|
||||
|
||||
def valid_sig(*args):
|
||||
return True
|
||||
|
||||
|
||||
def invalid_sig(*args):
|
||||
return False
|
||||
|
||||
|
||||
@responses.activate
|
||||
@@ -51,10 +98,12 @@ def test_add_appointment(monkeypatch):
|
||||
# Simulate a request to add_appointment for dummy_appointment, make sure that the right endpoint is requested
|
||||
# and the return value is True
|
||||
|
||||
# make sure the test uses the right dummy key instead of loading it from disk
|
||||
monkeypatch.setattr(pisa_cli, "load_public_key", get_dummy_pisa_pk)
|
||||
# Make sure the test uses the dummy signature
|
||||
monkeypatch.setattr(pisa_cli, "get_appointment_signature", get_dummy_signature)
|
||||
monkeypatch.setattr(pisa_cli, "get_pk", get_dummy_hex_pk_der)
|
||||
monkeypatch.setattr(pisa_cli, "check_signature", valid_sig)
|
||||
|
||||
response = {"locator": dummy_appointment["locator"], "signature": sign_appointment(pisa_sk, dummy_appointment)}
|
||||
response = {"locator": dummy_appointment.to_dict()["locator"], "signature": get_dummy_signature()}
|
||||
|
||||
request_url = "http://{}/".format(pisa_endpoint)
|
||||
responses.add(responses.POST, request_url, json=response, status=200)
|
||||
@@ -72,12 +121,14 @@ def test_add_appointment_with_invalid_signature(monkeypatch):
|
||||
# Simulate a request to add_appointment for dummy_appointment, but sign with a different key,
|
||||
# make sure that the right endpoint is requested, but the return value is False
|
||||
|
||||
# make sure the test uses the right dummy key instead of loading it from disk
|
||||
monkeypatch.setattr(pisa_cli, "load_public_key", get_dummy_pisa_pk)
|
||||
# Make sure the test uses the bad dummy signature
|
||||
monkeypatch.setattr(pisa_cli, "get_appointment_signature", get_bad_signature)
|
||||
monkeypatch.setattr(pisa_cli, "get_pk", get_dummy_hex_pk_der)
|
||||
monkeypatch.setattr(pisa_cli, "check_signature", invalid_sig)
|
||||
|
||||
response = {
|
||||
"locator": dummy_appointment["locator"],
|
||||
"signature": sign_appointment(other_sk, dummy_appointment), # signing with a different key
|
||||
"locator": dummy_appointment.to_dict()["locator"],
|
||||
"signature": get_bad_signature(), # Sign with a bad key
|
||||
}
|
||||
|
||||
request_url = "http://{}/".format(pisa_endpoint)
|
||||
@@ -85,4 +136,141 @@ def test_add_appointment_with_invalid_signature(monkeypatch):
|
||||
|
||||
result = pisa_cli.add_appointment([json.dumps(dummy_appointment_request)])
|
||||
|
||||
assert not result
|
||||
assert result is False
|
||||
|
||||
|
||||
def test_load_key_file_data():
|
||||
# If file exists and has data in it, function should work.
|
||||
with open("key_test_file", "w+b") as f:
|
||||
f.write(pisa_sk_der)
|
||||
|
||||
appt_data = pisa_cli.load_key_file_data("key_test_file")
|
||||
assert appt_data
|
||||
|
||||
os.remove("key_test_file")
|
||||
|
||||
# If file doesn't exist, function should fail.
|
||||
appt_data = pisa_cli.load_key_file_data("nonexistent_file")
|
||||
assert not appt_data
|
||||
|
||||
|
||||
def test_save_signed_appointment(monkeypatch):
|
||||
monkeypatch.setattr(pisa_cli, "APPOINTMENTS_FOLDER_NAME", "test_appointments")
|
||||
|
||||
pisa_cli.save_signed_appointment(dummy_appointment.to_dict(), get_dummy_signature())
|
||||
|
||||
# In folder "Appointments," grab all files and print them.
|
||||
files = os.listdir("test_appointments")
|
||||
|
||||
found = False
|
||||
for f in files:
|
||||
if dummy_appointment.to_dict().get("locator") in f:
|
||||
found = True
|
||||
|
||||
assert found
|
||||
|
||||
# If "appointments" directory doesn't exist, function should create it.
|
||||
assert os.path.exists("test_appointments")
|
||||
|
||||
# Delete test directory once we're done.
|
||||
shutil.rmtree("test_appointments")
|
||||
|
||||
|
||||
def test_parse_add_appointment_args():
|
||||
# If no args are passed, function should fail.
|
||||
appt_data = pisa_cli.parse_add_appointment_args(None)
|
||||
assert not appt_data
|
||||
|
||||
# If file doesn't exist, function should fail.
|
||||
appt_data = pisa_cli.parse_add_appointment_args(["-f", "nonexistent_file"])
|
||||
assert not appt_data
|
||||
|
||||
# If file exists and has data in it, function should work.
|
||||
with open("appt_test_file", "w") as f:
|
||||
json.dump(dummy_appointment_request, f)
|
||||
|
||||
appt_data = pisa_cli.parse_add_appointment_args(["-f", "appt_test_file"])
|
||||
assert appt_data
|
||||
|
||||
os.remove("appt_test_file")
|
||||
|
||||
# If appointment json is passed in, function should work.
|
||||
appt_data = pisa_cli.parse_add_appointment_args([json.dumps(dummy_appointment_request)])
|
||||
assert appt_data
|
||||
|
||||
|
||||
@responses.activate
|
||||
def test_post_data_to_add_appointment_endpoint():
|
||||
response = {
|
||||
"locator": dummy_appointment.to_dict()["locator"],
|
||||
"signature": Cryptographer.sign(dummy_appointment.serialize(), pisa_sk),
|
||||
}
|
||||
|
||||
request_url = "http://{}/".format(pisa_endpoint)
|
||||
responses.add(responses.POST, request_url, json=response, status=200)
|
||||
|
||||
response = pisa_cli.post_data_to_add_appointment_endpoint(request_url, json.dumps(dummy_appointment_request))
|
||||
|
||||
assert len(responses.calls) == 1
|
||||
assert responses.calls[0].request.url == request_url
|
||||
|
||||
assert response
|
||||
|
||||
|
||||
def test_check_signature(monkeypatch):
|
||||
# Make sure the test uses the right dummy key instead of loading it from disk
|
||||
monkeypatch.setattr(pisa_cli, "load_key_file_data", get_dummy_pisa_pk_der)
|
||||
|
||||
valid = pisa_cli.check_signature(get_dummy_signature(), dummy_appointment)
|
||||
|
||||
assert valid
|
||||
|
||||
valid = pisa_cli.check_signature(get_bad_signature(), dummy_appointment)
|
||||
|
||||
assert not valid
|
||||
|
||||
|
||||
@responses.activate
|
||||
def test_get_appointment():
|
||||
# Response of get_appointment endpoint is an appointment with status added to it.
|
||||
dummy_appointment_full["status"] = "being_watched"
|
||||
response = dummy_appointment_full
|
||||
|
||||
request_url = "http://{}/".format(pisa_endpoint) + "get_appointment?locator={}".format(response.get("locator"))
|
||||
responses.add(responses.GET, request_url, json=response, status=200)
|
||||
|
||||
result = pisa_cli.get_appointment([response.get("locator")])
|
||||
|
||||
assert len(responses.calls) == 1
|
||||
assert responses.calls[0].request.url == request_url
|
||||
|
||||
assert result
|
||||
|
||||
|
||||
@responses.activate
|
||||
def test_get_appointment_err():
|
||||
locator = get_random_value_hex(32)
|
||||
|
||||
# Test that get_appointment handles a connection error appropriately.
|
||||
request_url = "http://{}/".format(pisa_endpoint) + "get_appointment?locator=".format(locator)
|
||||
responses.add(responses.GET, request_url, body=ConnectionError())
|
||||
|
||||
assert not pisa_cli.get_appointment([locator])
|
||||
|
||||
|
||||
def test_get_appointment_signature(monkeypatch):
|
||||
# Make sure the test uses the right dummy key instead of loading it from disk
|
||||
monkeypatch.setattr(pisa_cli, "load_key_file_data", get_dummy_pisa_sk_der)
|
||||
|
||||
signature = pisa_cli.get_appointment_signature(dummy_appointment)
|
||||
|
||||
assert isinstance(signature, str)
|
||||
|
||||
|
||||
def test_get_pk(monkeypatch):
|
||||
# Make sure the test uses the right dummy key instead of loading it from disk
|
||||
monkeypatch.setattr(pisa_cli, "load_key_file_data", get_dummy_pisa_pk_der)
|
||||
|
||||
pk = pisa_cli.get_pk()
|
||||
|
||||
assert isinstance(pk, bytes)
|
||||
|
||||
@@ -12,10 +12,11 @@ from cryptography.hazmat.primitives import serialization
|
||||
|
||||
from apps.cli.blob import Blob
|
||||
from pisa.responder import TransactionTracker
|
||||
from pisa.watcher import Watcher
|
||||
from pisa.tools import bitcoin_cli
|
||||
from pisa.db_manager import DBManager
|
||||
from pisa.chain_monitor import ChainMonitor
|
||||
from common.appointment import Appointment
|
||||
from common.tools import compute_locator
|
||||
|
||||
from bitcoind_mock.utils import sha256d
|
||||
from bitcoind_mock.transaction import TX
|
||||
@@ -50,6 +51,17 @@ def db_manager():
|
||||
rmtree("test_db")
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def chain_monitor():
|
||||
chain_monitor = ChainMonitor()
|
||||
chain_monitor.monitor_chain()
|
||||
|
||||
yield chain_monitor
|
||||
|
||||
chain_monitor.terminate = True
|
||||
generate_block()
|
||||
|
||||
|
||||
def generate_keypair():
|
||||
client_sk = ec.generate_private_key(ec.SECP256K1, default_backend())
|
||||
client_pk = client_sk.public_key()
|
||||
@@ -103,7 +115,7 @@ def generate_dummy_appointment_data(real_height=True, start_time_offset=5, end_t
|
||||
encoding=serialization.Encoding.DER, format=serialization.PublicFormat.SubjectPublicKeyInfo
|
||||
)
|
||||
|
||||
locator = Watcher.compute_locator(dispute_txid)
|
||||
locator = compute_locator(dispute_txid)
|
||||
blob = Blob(dummy_appointment_data.get("tx"))
|
||||
|
||||
encrypted_blob = Cryptographer.encrypt(blob, dummy_appointment_data.get("tx_id"))
|
||||
@@ -147,3 +159,26 @@ def generate_dummy_tracker():
|
||||
)
|
||||
|
||||
return TransactionTracker.from_dict(tracker_data)
|
||||
|
||||
|
||||
def get_config():
|
||||
config = {
|
||||
"BTC_RPC_USER": "username",
|
||||
"BTC_RPC_PASSWD": "password",
|
||||
"BTC_RPC_HOST": "localhost",
|
||||
"BTC_RPC_PORT": 8332,
|
||||
"BTC_NETWORK": "regtest",
|
||||
"FEED_PROTOCOL": "tcp",
|
||||
"FEED_ADDR": "127.0.0.1",
|
||||
"FEED_PORT": 28332,
|
||||
"MAX_APPOINTMENTS": 100,
|
||||
"EXPIRY_DELTA": 6,
|
||||
"MIN_TO_SELF_DELAY": 20,
|
||||
"SERVER_LOG_FILE": "pisa.log",
|
||||
"PISA_SECRET_KEY": "pisa_sk.der",
|
||||
"CLIENT_LOG_FILE": "pisa.log",
|
||||
"TEST_LOG_FILE": "test.log",
|
||||
"DB_PATH": "appointments",
|
||||
}
|
||||
|
||||
return config
|
||||
|
||||
@@ -9,7 +9,6 @@ from pisa.api import API
|
||||
from pisa.watcher import Watcher
|
||||
from pisa.tools import bitcoin_cli
|
||||
from pisa import HOST, PORT
|
||||
from pisa.conf import MAX_APPOINTMENTS
|
||||
|
||||
from test.pisa.unit.conftest import (
|
||||
generate_block,
|
||||
@@ -17,6 +16,7 @@ from test.pisa.unit.conftest import (
|
||||
get_random_value_hex,
|
||||
generate_dummy_appointment_data,
|
||||
generate_keypair,
|
||||
get_config,
|
||||
)
|
||||
|
||||
from common.constants import LOCATOR_LEN_BYTES
|
||||
@@ -28,18 +28,23 @@ MULTIPLE_APPOINTMENTS = 10
|
||||
appointments = []
|
||||
locator_dispute_tx_map = {}
|
||||
|
||||
config = get_config()
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def run_api(db_manager):
|
||||
def run_api(db_manager, chain_monitor):
|
||||
sk, pk = generate_keypair()
|
||||
sk_der = sk.private_bytes(
|
||||
encoding=serialization.Encoding.DER,
|
||||
format=serialization.PrivateFormat.TraditionalOpenSSL,
|
||||
encryption_algorithm=serialization.NoEncryption(),
|
||||
)
|
||||
watcher = Watcher(db_manager, sk_der)
|
||||
|
||||
api_thread = Thread(target=API(watcher).start)
|
||||
watcher = Watcher(db_manager, chain_monitor, sk_der, get_config())
|
||||
chain_monitor.attach_watcher(watcher.block_queue, watcher.asleep)
|
||||
chain_monitor.attach_responder(watcher.responder.block_queue, watcher.responder.asleep)
|
||||
|
||||
api_thread = Thread(target=API(watcher, config).start)
|
||||
api_thread.daemon = True
|
||||
api_thread.start()
|
||||
|
||||
@@ -102,7 +107,7 @@ def test_request_multiple_appointments_same_locator(new_appt_data, n=MULTIPLE_AP
|
||||
|
||||
|
||||
def test_add_too_many_appointment(new_appt_data):
|
||||
for _ in range(MAX_APPOINTMENTS - len(appointments)):
|
||||
for _ in range(config.get("MAX_APPOINTMENTS") - len(appointments)):
|
||||
r = add_appointment(new_appt_data)
|
||||
assert r.status_code == 200
|
||||
|
||||
|
||||
225
test/pisa/unit/test_chain_monitor.py
Normal file
225
test/pisa/unit/test_chain_monitor.py
Normal file
@@ -0,0 +1,225 @@
|
||||
import zmq
|
||||
import time
|
||||
from threading import Thread, Event, Condition
|
||||
|
||||
from pisa.watcher import Watcher
|
||||
from pisa.responder import Responder
|
||||
from pisa.block_processor import BlockProcessor
|
||||
from pisa.chain_monitor import ChainMonitor
|
||||
|
||||
from test.pisa.unit.conftest import get_random_value_hex, generate_block, get_config
|
||||
|
||||
|
||||
def test_init(run_bitcoind):
|
||||
# run_bitcoind is started here instead of later on to avoid race conditions while it initializes
|
||||
|
||||
# Not much to test here, just sanity checks to make sure nothing goes south in the future
|
||||
chain_monitor = ChainMonitor()
|
||||
|
||||
assert chain_monitor.best_tip is None
|
||||
assert isinstance(chain_monitor.last_tips, list) and len(chain_monitor.last_tips) == 0
|
||||
assert chain_monitor.terminate is False
|
||||
assert isinstance(chain_monitor.check_tip, Event)
|
||||
assert isinstance(chain_monitor.lock, Condition)
|
||||
assert isinstance(chain_monitor.zmqSubSocket, zmq.Socket)
|
||||
|
||||
# The Queues and asleep flags are initialized when attaching the corresponding subscriber
|
||||
assert chain_monitor.watcher_queue is None
|
||||
assert chain_monitor.responder_queue is None
|
||||
assert chain_monitor.watcher_asleep and chain_monitor.responder_asleep
|
||||
|
||||
|
||||
def test_attach_watcher(chain_monitor):
|
||||
watcher = Watcher(db_manager=None, chain_monitor=chain_monitor, sk_der=None, config=get_config())
|
||||
chain_monitor.attach_watcher(watcher.block_queue, watcher.asleep)
|
||||
|
||||
# booleans are not passed as reference in Python, so the flags need to be set separately
|
||||
assert watcher.asleep == chain_monitor.watcher_asleep
|
||||
watcher.asleep = False
|
||||
assert chain_monitor.watcher_asleep != watcher.asleep
|
||||
|
||||
# Test that the Queue work
|
||||
r_hash = get_random_value_hex(32)
|
||||
chain_monitor.watcher_queue.put(r_hash)
|
||||
assert watcher.block_queue.get() == r_hash
|
||||
|
||||
|
||||
def test_attach_responder(chain_monitor):
|
||||
responder = Responder(db_manager=None, chain_monitor=chain_monitor)
|
||||
chain_monitor.attach_responder(responder.block_queue, responder.asleep)
|
||||
|
||||
# Same kind of testing as with the attach watcher
|
||||
assert responder.asleep == chain_monitor.watcher_asleep
|
||||
responder.asleep = False
|
||||
assert chain_monitor.watcher_asleep != responder.asleep
|
||||
|
||||
r_hash = get_random_value_hex(32)
|
||||
chain_monitor.responder_queue.put(r_hash)
|
||||
assert responder.block_queue.get() == r_hash
|
||||
|
||||
|
||||
def test_notify_subscribers(chain_monitor):
|
||||
# Subscribers are only notified as long as they are awake
|
||||
new_block = get_random_value_hex(32)
|
||||
|
||||
# Queues should be empty to start with
|
||||
assert chain_monitor.watcher_queue.empty()
|
||||
assert chain_monitor.responder_queue.empty()
|
||||
|
||||
chain_monitor.watcher_asleep = True
|
||||
chain_monitor.responder_asleep = True
|
||||
chain_monitor.notify_subscribers(new_block)
|
||||
|
||||
# And remain empty afterwards since both subscribers were asleep
|
||||
assert chain_monitor.watcher_queue.empty()
|
||||
assert chain_monitor.responder_queue.empty()
|
||||
|
||||
# Let's flag them as awake and try again
|
||||
chain_monitor.watcher_asleep = False
|
||||
chain_monitor.responder_asleep = False
|
||||
chain_monitor.notify_subscribers(new_block)
|
||||
|
||||
assert chain_monitor.watcher_queue.get() == new_block
|
||||
assert chain_monitor.responder_queue.get() == new_block
|
||||
|
||||
|
||||
def test_update_state(chain_monitor):
|
||||
# The state is updated after receiving a new block (and only if the block is not already known).
|
||||
# Let's start by setting a best_tip and a couple of old tips
|
||||
new_block_hash = get_random_value_hex(32)
|
||||
chain_monitor.best_tip = new_block_hash
|
||||
chain_monitor.last_tips = [get_random_value_hex(32) for _ in range(5)]
|
||||
|
||||
# Now we can try to update the state with an old best_tip and see how it doesn't work
|
||||
assert chain_monitor.update_state(chain_monitor.last_tips[0]) is False
|
||||
|
||||
# Same should happen with the current tip
|
||||
assert chain_monitor.update_state(chain_monitor.best_tip) is False
|
||||
|
||||
# The state should be correctly updated with a new block hash, the chain tip should change and the old tip should
|
||||
# have been added to the last_tips
|
||||
another_block_hash = get_random_value_hex(32)
|
||||
assert chain_monitor.update_state(another_block_hash) is True
|
||||
assert chain_monitor.best_tip == another_block_hash and new_block_hash == chain_monitor.last_tips[-1]
|
||||
|
||||
|
||||
def test_monitor_chain_polling():
|
||||
# Try polling with the Watcher
|
||||
chain_monitor = ChainMonitor()
|
||||
chain_monitor.best_tip = BlockProcessor.get_best_block_hash()
|
||||
|
||||
watcher = Watcher(db_manager=None, chain_monitor=chain_monitor, sk_der=None, config=get_config())
|
||||
chain_monitor.attach_watcher(watcher.block_queue, asleep=False)
|
||||
|
||||
# monitor_chain_polling runs until terminate if set
|
||||
polling_thread = Thread(target=chain_monitor.monitor_chain_polling, kwargs={"polling_delta": 0.1}, daemon=True)
|
||||
polling_thread.start()
|
||||
|
||||
# Check that nothing changes as long as a block is not generated
|
||||
for _ in range(5):
|
||||
assert chain_monitor.watcher_queue.empty()
|
||||
time.sleep(0.1)
|
||||
|
||||
# And that it does if we generate a block
|
||||
generate_block()
|
||||
|
||||
chain_monitor.watcher_queue.get()
|
||||
assert chain_monitor.watcher_queue.empty()
|
||||
|
||||
chain_monitor.terminate = True
|
||||
polling_thread.join()
|
||||
|
||||
|
||||
def test_monitor_chain_zmq():
|
||||
# Try zmq with the Responder
|
||||
chain_monitor = ChainMonitor()
|
||||
chain_monitor.best_tip = BlockProcessor.get_best_block_hash()
|
||||
|
||||
responder = Responder(db_manager=None, chain_monitor=chain_monitor)
|
||||
chain_monitor.attach_responder(responder.block_queue, asleep=False)
|
||||
|
||||
zmq_thread = Thread(target=chain_monitor.monitor_chain_zmq, daemon=True)
|
||||
zmq_thread.start()
|
||||
|
||||
# Queues should start empty
|
||||
assert chain_monitor.responder_queue.empty()
|
||||
|
||||
# And have a new block every time we generate one
|
||||
for _ in range(3):
|
||||
generate_block()
|
||||
chain_monitor.responder_queue.get()
|
||||
assert chain_monitor.responder_queue.empty()
|
||||
|
||||
# If we flag it to sleep no notification is sent
|
||||
chain_monitor.responder_asleep = True
|
||||
|
||||
for _ in range(3):
|
||||
generate_block()
|
||||
assert chain_monitor.responder_queue.empty()
|
||||
|
||||
chain_monitor.terminate = True
|
||||
# The zmq thread needs a block generation to release from the recv method.
|
||||
generate_block()
|
||||
|
||||
zmq_thread.join()
|
||||
|
||||
|
||||
def test_monitor_chain():
|
||||
# Not much to test here, this should launch two threads (one per monitor approach) and finish on terminate
|
||||
chain_monitor = ChainMonitor()
|
||||
|
||||
watcher = Watcher(db_manager=None, chain_monitor=chain_monitor, sk_der=None, config=get_config())
|
||||
responder = Responder(db_manager=None, chain_monitor=chain_monitor)
|
||||
chain_monitor.attach_responder(responder.block_queue, asleep=False)
|
||||
chain_monitor.attach_watcher(watcher.block_queue, asleep=False)
|
||||
|
||||
chain_monitor.best_tip = None
|
||||
chain_monitor.monitor_chain()
|
||||
|
||||
# The tip is updated before starting the threads, so it should have changed.
|
||||
assert chain_monitor.best_tip is not None
|
||||
|
||||
# Blocks should be received
|
||||
for _ in range(5):
|
||||
generate_block()
|
||||
watcher_block = chain_monitor.watcher_queue.get()
|
||||
responder_block = chain_monitor.responder_queue.get()
|
||||
assert watcher_block == responder_block
|
||||
assert chain_monitor.watcher_queue.empty()
|
||||
assert chain_monitor.responder_queue.empty()
|
||||
|
||||
# And the thread be terminated on terminate
|
||||
chain_monitor.terminate = True
|
||||
# The zmq thread needs a block generation to release from the recv method.
|
||||
generate_block()
|
||||
|
||||
|
||||
def test_monitor_chain_single_update():
|
||||
# This test tests that if both threads try to add the same block to the queue, only the first one will make it
|
||||
chain_monitor = ChainMonitor()
|
||||
|
||||
watcher = Watcher(db_manager=None, chain_monitor=chain_monitor, sk_der=None, config=get_config())
|
||||
responder = Responder(db_manager=None, chain_monitor=chain_monitor)
|
||||
chain_monitor.attach_responder(responder.block_queue, asleep=False)
|
||||
chain_monitor.attach_watcher(watcher.block_queue, asleep=False)
|
||||
|
||||
chain_monitor.best_tip = None
|
||||
|
||||
# We will create a block and wait for the polling thread. Then check the queues to see that the block hash has only
|
||||
# been added once.
|
||||
chain_monitor.monitor_chain(polling_delta=2)
|
||||
generate_block()
|
||||
|
||||
watcher_block = chain_monitor.watcher_queue.get()
|
||||
responder_block = chain_monitor.responder_queue.get()
|
||||
assert watcher_block == responder_block
|
||||
assert chain_monitor.watcher_queue.empty()
|
||||
assert chain_monitor.responder_queue.empty()
|
||||
|
||||
# The delta for polling is 2 secs, so let's wait and see
|
||||
time.sleep(2)
|
||||
assert chain_monitor.watcher_queue.empty()
|
||||
assert chain_monitor.responder_queue.empty()
|
||||
|
||||
# We can also force an update and see that it won't go through
|
||||
assert chain_monitor.update_state(watcher_block) is False
|
||||
@@ -10,13 +10,13 @@ from common.appointment import Appointment
|
||||
from pisa.block_processor import BlockProcessor
|
||||
from pisa.conf import MIN_TO_SELF_DELAY
|
||||
|
||||
from test.pisa.unit.conftest import get_random_value_hex, generate_dummy_appointment_data, generate_keypair
|
||||
from test.pisa.unit.conftest import get_random_value_hex, generate_dummy_appointment_data, generate_keypair, get_config
|
||||
|
||||
from common.constants import LOCATOR_LEN_BYTES, LOCATOR_LEN_HEX
|
||||
from common.cryptographer import Cryptographer
|
||||
|
||||
|
||||
inspector = Inspector()
|
||||
inspector = Inspector(get_config())
|
||||
APPOINTMENT_OK = (0, None)
|
||||
|
||||
NO_HEX_STRINGS = [
|
||||
@@ -126,21 +126,21 @@ def test_check_to_self_delay():
|
||||
# Right value, right format
|
||||
to_self_delays = [MIN_TO_SELF_DELAY, MIN_TO_SELF_DELAY + 1, MIN_TO_SELF_DELAY + 1000]
|
||||
for to_self_delay in to_self_delays:
|
||||
assert Inspector.check_to_self_delay(to_self_delay) == APPOINTMENT_OK
|
||||
assert inspector.check_to_self_delay(to_self_delay) == APPOINTMENT_OK
|
||||
|
||||
# to_self_delay too small
|
||||
to_self_delays = [MIN_TO_SELF_DELAY - 1, MIN_TO_SELF_DELAY - 2, 0, -1, -1000]
|
||||
for to_self_delay in to_self_delays:
|
||||
assert Inspector.check_to_self_delay(to_self_delay)[0] == APPOINTMENT_FIELD_TOO_SMALL
|
||||
assert inspector.check_to_self_delay(to_self_delay)[0] == APPOINTMENT_FIELD_TOO_SMALL
|
||||
|
||||
# Empty field
|
||||
to_self_delay = None
|
||||
assert Inspector.check_to_self_delay(to_self_delay)[0] == APPOINTMENT_EMPTY_FIELD
|
||||
assert inspector.check_to_self_delay(to_self_delay)[0] == APPOINTMENT_EMPTY_FIELD
|
||||
|
||||
# Wrong data type
|
||||
to_self_delays = WRONG_TYPES
|
||||
for to_self_delay in to_self_delays:
|
||||
assert Inspector.check_to_self_delay(to_self_delay)[0] == APPOINTMENT_WRONG_FIELD_TYPE
|
||||
assert inspector.check_to_self_delay(to_self_delay)[0] == APPOINTMENT_WRONG_FIELD_TYPE
|
||||
|
||||
|
||||
def test_check_blob():
|
||||
|
||||
52
test/pisa/unit/test_pisad.py
Normal file
52
test/pisa/unit/test_pisad.py
Normal file
@@ -0,0 +1,52 @@
|
||||
import importlib
|
||||
import os
|
||||
import pytest
|
||||
from pathlib import Path
|
||||
from shutil import copyfile
|
||||
|
||||
from pisa.pisad import load_config
|
||||
|
||||
test_conf_file_path = os.getcwd() + "/test/pisa/unit/test_conf.py"
|
||||
|
||||
|
||||
def test_load_config():
|
||||
# Copy the sample-conf.py file to use as a test config file.
|
||||
copyfile(os.getcwd() + "/pisa/sample_conf.py", test_conf_file_path)
|
||||
|
||||
import test.pisa.unit.test_conf as conf
|
||||
|
||||
# If the file has all the correct fields and data, it should return a dict.
|
||||
conf_dict = load_config(conf)
|
||||
assert type(conf_dict) == dict
|
||||
|
||||
# Delete the file.
|
||||
os.remove(test_conf_file_path)
|
||||
|
||||
|
||||
def test_bad_load_config():
|
||||
# Create a messed up version of the file that should throw an error.
|
||||
with open(test_conf_file_path, "w") as f:
|
||||
f.write('# bitcoind\nBTC_RPC_USER = 0000\nBTC_RPC_PASSWD = "password"\nBTC_RPC_HOST = 000')
|
||||
|
||||
import test.pisa.unit.test_conf as conf
|
||||
|
||||
importlib.reload(conf)
|
||||
|
||||
with pytest.raises(Exception):
|
||||
conf_dict = load_config(conf)
|
||||
|
||||
os.remove(test_conf_file_path)
|
||||
|
||||
|
||||
def test_empty_load_config():
|
||||
# Create an empty version of the file that should throw an error.
|
||||
open(test_conf_file_path, "a")
|
||||
|
||||
import test.pisa.unit.test_conf as conf
|
||||
|
||||
importlib.reload(conf)
|
||||
|
||||
with pytest.raises(Exception):
|
||||
conf_dict = load_config(conf)
|
||||
|
||||
os.remove(test_conf_file_path)
|
||||
@@ -5,15 +5,14 @@ from uuid import uuid4
|
||||
from shutil import rmtree
|
||||
from copy import deepcopy
|
||||
from threading import Thread
|
||||
from queue import Queue, Empty
|
||||
|
||||
from pisa.db_manager import DBManager
|
||||
from pisa.responder import Responder, TransactionTracker
|
||||
from pisa.block_processor import BlockProcessor
|
||||
from pisa.chain_monitor import ChainMonitor
|
||||
from pisa.tools import bitcoin_cli
|
||||
|
||||
from common.constants import LOCATOR_LEN_HEX
|
||||
from common.tools import check_sha256_hex_format
|
||||
|
||||
from bitcoind_mock.utils import sha256d
|
||||
from bitcoind_mock.transaction import TX
|
||||
@@ -21,8 +20,11 @@ from test.pisa.unit.conftest import generate_block, generate_blocks, get_random_
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def responder(db_manager):
|
||||
return Responder(db_manager)
|
||||
def responder(db_manager, chain_monitor):
|
||||
responder = Responder(db_manager, chain_monitor)
|
||||
chain_monitor.attach_responder(responder.block_queue, responder.asleep)
|
||||
|
||||
return responder
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
@@ -145,17 +147,19 @@ def test_tracker_from_dict_invalid_data():
|
||||
|
||||
|
||||
def test_init_responder(responder):
|
||||
assert type(responder.trackers) is dict and len(responder.trackers) == 0
|
||||
assert type(responder.tx_tracker_map) is dict and len(responder.tx_tracker_map) == 0
|
||||
assert type(responder.unconfirmed_txs) is list and len(responder.unconfirmed_txs) == 0
|
||||
assert type(responder.missed_confirmations) is dict and len(responder.missed_confirmations) == 0
|
||||
assert isinstance(responder.trackers, dict) and len(responder.trackers) == 0
|
||||
assert isinstance(responder.tx_tracker_map, dict) and len(responder.tx_tracker_map) == 0
|
||||
assert isinstance(responder.unconfirmed_txs, list) and len(responder.unconfirmed_txs) == 0
|
||||
assert isinstance(responder.missed_confirmations, dict) and len(responder.missed_confirmations) == 0
|
||||
assert isinstance(responder.chain_monitor, ChainMonitor)
|
||||
assert responder.block_queue.empty()
|
||||
assert responder.asleep is True
|
||||
assert responder.zmq_subscriber is None
|
||||
|
||||
|
||||
def test_handle_breach(db_manager):
|
||||
responder = Responder(db_manager)
|
||||
def test_handle_breach(db_manager, chain_monitor):
|
||||
responder = Responder(db_manager, chain_monitor)
|
||||
chain_monitor.attach_responder(responder.block_queue, responder.asleep)
|
||||
|
||||
uuid = uuid4().hex
|
||||
tracker = create_dummy_tracker()
|
||||
|
||||
@@ -172,11 +176,10 @@ def test_handle_breach(db_manager):
|
||||
|
||||
assert receipt.delivered is True
|
||||
|
||||
# The responder automatically fires add_tracker on adding a tracker if it is asleep. We need to stop the processes now.
|
||||
# To do so we delete all the trackers, stop the zmq and create a new fake block to unblock the queue.get method
|
||||
# The responder automatically fires add_tracker on adding a tracker if it is asleep. We need to stop the processes
|
||||
# now. To do so we delete all the trackers, and generate a new block.
|
||||
responder.trackers = dict()
|
||||
responder.zmq_subscriber.terminate = True
|
||||
responder.block_queue.put(get_random_value_hex(32))
|
||||
generate_block()
|
||||
|
||||
|
||||
def test_add_bad_response(responder):
|
||||
@@ -184,7 +187,7 @@ def test_add_bad_response(responder):
|
||||
tracker = create_dummy_tracker()
|
||||
|
||||
# Now that the asleep / awake functionality has been tested we can avoid manually killing the responder by setting
|
||||
# to awake. That will prevent the zmq thread to be launched again.
|
||||
# to awake. That will prevent the chain_monitor thread to be launched again.
|
||||
responder.asleep = False
|
||||
|
||||
# A txid instead of a rawtx should be enough for unit tests using the bitcoind mock, better tests are needed though.
|
||||
@@ -205,7 +208,7 @@ def test_add_bad_response(responder):
|
||||
|
||||
|
||||
def test_add_tracker(responder):
|
||||
responder.asleep = False
|
||||
# Responder is asleep
|
||||
|
||||
for _ in range(20):
|
||||
uuid = uuid4().hex
|
||||
@@ -235,7 +238,8 @@ def test_add_tracker(responder):
|
||||
|
||||
|
||||
def test_add_tracker_same_penalty_txid(responder):
|
||||
# Create the same tracker using two different uuids
|
||||
# Responder is asleep
|
||||
|
||||
confirmations = 0
|
||||
locator, dispute_txid, penalty_txid, penalty_rawtx, appointment_end = create_dummy_tracker_data(random_txid=True)
|
||||
uuid_1 = uuid4().hex
|
||||
@@ -260,7 +264,7 @@ def test_add_tracker_same_penalty_txid(responder):
|
||||
|
||||
|
||||
def test_add_tracker_already_confirmed(responder):
|
||||
responder.asleep = False
|
||||
# Responder is asleep
|
||||
|
||||
for i in range(20):
|
||||
uuid = uuid4().hex
|
||||
@@ -274,29 +278,10 @@ def test_add_tracker_already_confirmed(responder):
|
||||
assert penalty_txid not in responder.unconfirmed_txs
|
||||
|
||||
|
||||
def test_do_subscribe(responder):
|
||||
responder.block_queue = Queue()
|
||||
|
||||
zmq_thread = Thread(target=responder.do_subscribe)
|
||||
zmq_thread.daemon = True
|
||||
zmq_thread.start()
|
||||
|
||||
try:
|
||||
generate_block()
|
||||
block_hash = responder.block_queue.get()
|
||||
assert check_sha256_hex_format(block_hash)
|
||||
|
||||
except Empty:
|
||||
assert False
|
||||
|
||||
|
||||
def test_do_watch(temp_db_manager):
|
||||
responder = Responder(temp_db_manager)
|
||||
responder.block_queue = Queue()
|
||||
|
||||
zmq_thread = Thread(target=responder.do_subscribe)
|
||||
zmq_thread.daemon = True
|
||||
zmq_thread.start()
|
||||
def test_do_watch(temp_db_manager, chain_monitor):
|
||||
# Create a fresh responder to simplify the test
|
||||
responder = Responder(temp_db_manager, chain_monitor)
|
||||
chain_monitor.attach_responder(responder.block_queue, False)
|
||||
|
||||
trackers = [create_dummy_tracker(penalty_rawtx=TX.create_dummy_transaction()) for _ in range(20)]
|
||||
|
||||
@@ -318,9 +303,7 @@ def test_do_watch(temp_db_manager):
|
||||
responder.db_manager.store_responder_tracker(uuid, tracker.to_json())
|
||||
|
||||
# Let's start to watch
|
||||
watch_thread = Thread(target=responder.do_watch)
|
||||
watch_thread.daemon = True
|
||||
watch_thread.start()
|
||||
Thread(target=responder.do_watch, daemon=True).start()
|
||||
|
||||
# And broadcast some of the transactions
|
||||
broadcast_txs = []
|
||||
@@ -354,13 +337,9 @@ def test_do_watch(temp_db_manager):
|
||||
assert responder.asleep is True
|
||||
|
||||
|
||||
def test_check_confirmations(temp_db_manager):
|
||||
responder = Responder(temp_db_manager)
|
||||
responder.block_queue = Queue()
|
||||
|
||||
zmq_thread = Thread(target=responder.do_subscribe)
|
||||
zmq_thread.daemon = True
|
||||
zmq_thread.start()
|
||||
def test_check_confirmations(temp_db_manager, chain_monitor):
|
||||
responder = Responder(temp_db_manager, chain_monitor)
|
||||
chain_monitor.attach_responder(responder.block_queue, responder.asleep)
|
||||
|
||||
# check_confirmations checks, given a list of transaction for a block, what of the known penalty transaction have
|
||||
# been confirmed. To test this we need to create a list of transactions and the state of the responder
|
||||
@@ -390,7 +369,7 @@ def test_check_confirmations(temp_db_manager):
|
||||
assert responder.missed_confirmations[tx] == 1
|
||||
|
||||
|
||||
# WIP: Check this properly, a bug pass unnoticed!
|
||||
# TODO: Check this properly, a bug pass unnoticed!
|
||||
def test_get_txs_to_rebroadcast(responder):
|
||||
# Let's create a few fake txids and assign at least 6 missing confirmations to each
|
||||
txs_missing_too_many_conf = {get_random_value_hex(32): 6 + i for i in range(10)}
|
||||
@@ -414,13 +393,13 @@ def test_get_txs_to_rebroadcast(responder):
|
||||
assert txs_to_rebroadcast == list(txs_missing_too_many_conf.keys())
|
||||
|
||||
|
||||
def test_get_completed_trackers(db_manager):
|
||||
def test_get_completed_trackers(db_manager, chain_monitor):
|
||||
initial_height = bitcoin_cli().getblockcount()
|
||||
|
||||
# Let's use a fresh responder for this to make it easier to compare the results
|
||||
responder = Responder(db_manager)
|
||||
responder = Responder(db_manager, chain_monitor)
|
||||
chain_monitor.attach_responder(responder.block_queue, responder.asleep)
|
||||
|
||||
# A complete tracker is a tracker that has reached the appointment end with enough confirmations (> MIN_CONFIRMATIONS)
|
||||
# A complete tracker is a tracker that has reached the appointment end with enough confs (> MIN_CONFIRMATIONS)
|
||||
# We'll create three type of transactions: end reached + enough conf, end reached + no enough conf, end not reached
|
||||
trackers_end_conf = {
|
||||
uuid4().hex: create_dummy_tracker(penalty_rawtx=TX.create_dummy_transaction()) for _ in range(10)
|
||||
@@ -473,9 +452,10 @@ def test_get_completed_trackers(db_manager):
|
||||
assert set(completed_trackers_ids) == set(ended_trackers_keys)
|
||||
|
||||
|
||||
def test_rebroadcast(db_manager):
|
||||
responder = Responder(db_manager)
|
||||
def test_rebroadcast(db_manager, chain_monitor):
|
||||
responder = Responder(db_manager, chain_monitor)
|
||||
responder.asleep = False
|
||||
chain_monitor.attach_responder(responder.block_queue, responder.asleep)
|
||||
|
||||
txs_to_rebroadcast = []
|
||||
|
||||
|
||||
@@ -1,22 +1,24 @@
|
||||
import pytest
|
||||
from uuid import uuid4
|
||||
from threading import Thread
|
||||
from queue import Queue, Empty
|
||||
from cryptography.hazmat.primitives.asymmetric import ec
|
||||
from cryptography.hazmat.primitives import serialization
|
||||
|
||||
from pisa.watcher import Watcher
|
||||
from pisa.responder import Responder
|
||||
from pisa.tools import bitcoin_cli
|
||||
from pisa.chain_monitor import ChainMonitor
|
||||
|
||||
from test.pisa.unit.conftest import (
|
||||
generate_block,
|
||||
generate_blocks,
|
||||
generate_dummy_appointment,
|
||||
get_random_value_hex,
|
||||
generate_keypair,
|
||||
get_config,
|
||||
)
|
||||
from pisa.conf import EXPIRY_DELTA, MAX_APPOINTMENTS
|
||||
|
||||
from common.tools import check_sha256_hex_format
|
||||
from common.tools import compute_locator
|
||||
from common.cryptographer import Cryptographer
|
||||
|
||||
|
||||
@@ -35,8 +37,12 @@ sk_der = signing_key.private_bytes(
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def watcher(db_manager):
|
||||
return Watcher(db_manager, sk_der)
|
||||
def watcher(db_manager, chain_monitor):
|
||||
watcher = Watcher(db_manager, chain_monitor, sk_der, get_config())
|
||||
chain_monitor.attach_watcher(watcher.block_queue, watcher.asleep)
|
||||
chain_monitor.attach_responder(watcher.responder.block_queue, watcher.responder.asleep)
|
||||
|
||||
return watcher
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
@@ -46,7 +52,7 @@ def txids():
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def locator_uuid_map(txids):
|
||||
return {Watcher.compute_locator(txid): uuid4().hex for txid in txids}
|
||||
return {compute_locator(txid): uuid4().hex for txid in txids}
|
||||
|
||||
|
||||
def create_appointments(n):
|
||||
@@ -67,17 +73,18 @@ def create_appointments(n):
|
||||
return appointments, locator_uuid_map, dispute_txs
|
||||
|
||||
|
||||
def test_init(watcher):
|
||||
assert type(watcher.appointments) is dict and len(watcher.appointments) == 0
|
||||
assert type(watcher.locator_uuid_map) is dict and len(watcher.locator_uuid_map) == 0
|
||||
assert watcher.block_queue.empty()
|
||||
def test_init(run_bitcoind, watcher):
|
||||
assert isinstance(watcher.appointments, dict) and len(watcher.appointments) == 0
|
||||
assert isinstance(watcher.locator_uuid_map, dict) and len(watcher.locator_uuid_map) == 0
|
||||
assert watcher.asleep is True
|
||||
assert watcher.max_appointments == MAX_APPOINTMENTS
|
||||
assert watcher.zmq_subscriber is None
|
||||
assert type(watcher.responder) is Responder
|
||||
assert watcher.block_queue.empty()
|
||||
assert isinstance(watcher.chain_monitor, ChainMonitor)
|
||||
assert isinstance(watcher.config, dict)
|
||||
assert isinstance(watcher.signing_key, ec.EllipticCurvePrivateKey)
|
||||
assert isinstance(watcher.responder, Responder)
|
||||
|
||||
|
||||
def test_add_appointment(run_bitcoind, watcher):
|
||||
def test_add_appointment(watcher):
|
||||
# The watcher automatically fires do_watch and do_subscribe on adding an appointment if it is asleep (initial state)
|
||||
# Avoid this by setting the state to awake.
|
||||
watcher.asleep = False
|
||||
@@ -121,25 +128,10 @@ def test_add_too_many_appointments(watcher):
|
||||
assert sig is None
|
||||
|
||||
|
||||
def test_do_subscribe(watcher):
|
||||
watcher.block_queue = Queue()
|
||||
|
||||
zmq_thread = Thread(target=watcher.do_subscribe)
|
||||
zmq_thread.daemon = True
|
||||
zmq_thread.start()
|
||||
|
||||
try:
|
||||
generate_block()
|
||||
block_hash = watcher.block_queue.get()
|
||||
assert check_sha256_hex_format(block_hash)
|
||||
|
||||
except Empty:
|
||||
assert False
|
||||
|
||||
|
||||
def test_do_watch(watcher):
|
||||
# We will wipe all the previous data and add 5 appointments
|
||||
appointments, locator_uuid_map, dispute_txs = create_appointments(APPOINTMENTS)
|
||||
watcher.chain_monitor.watcher_asleep = False
|
||||
|
||||
# Set the data into the Watcher and in the db
|
||||
watcher.locator_uuid_map = locator_uuid_map
|
||||
@@ -150,16 +142,13 @@ def test_do_watch(watcher):
|
||||
watcher.db_manager.store_watcher_appointment(uuid, appointment.to_json())
|
||||
watcher.db_manager.store_update_locator_map(appointment.locator, uuid)
|
||||
|
||||
watch_thread = Thread(target=watcher.do_watch)
|
||||
watch_thread.daemon = True
|
||||
watch_thread.start()
|
||||
Thread(target=watcher.do_watch, daemon=True).start()
|
||||
|
||||
# Broadcast the first two
|
||||
for dispute_tx in dispute_txs[:2]:
|
||||
bitcoin_cli().sendrawtransaction(dispute_tx)
|
||||
|
||||
# After leaving some time for the block to be mined and processed, the number of appointments should have reduced
|
||||
# by two
|
||||
# After generating enough blocks, the number of appointments should have reduced by two
|
||||
generate_blocks(START_TIME_OFFSET + END_TIME_OFFSET)
|
||||
|
||||
assert len(watcher.appointments) == APPOINTMENTS - 2
|
||||
@@ -230,7 +219,7 @@ def test_filter_valid_breaches(watcher):
|
||||
|
||||
dummy_appointment, _ = generate_dummy_appointment()
|
||||
dummy_appointment.encrypted_blob.data = encrypted_blob
|
||||
dummy_appointment.locator = Watcher.compute_locator(dispute_txid)
|
||||
dummy_appointment.locator = compute_locator(dispute_txid)
|
||||
uuid = uuid4().hex
|
||||
|
||||
appointments = {uuid: dummy_appointment}
|
||||
|
||||
Reference in New Issue
Block a user