mirror of
https://github.com/aljazceru/python-teos.git
synced 2025-12-17 14:14:22 +01:00
Merge branch '59-uncovered-tests'
This commit is contained in:
2
.gitignore
vendored
2
.gitignore
vendored
@@ -14,3 +14,5 @@ test.py
|
||||
.cache
|
||||
.pytest_cache/
|
||||
*.pem
|
||||
.coverage
|
||||
htmlcov
|
||||
|
||||
37
pisa/api.py
37
pisa/api.py
@@ -79,25 +79,30 @@ def get_appointment():
|
||||
response = []
|
||||
|
||||
# ToDo: #15-add-system-monitor
|
||||
if not isinstance(locator, str) or len(locator) != 64:
|
||||
response.append({"locator": locator, "status": "not_found"})
|
||||
return jsonify(response)
|
||||
|
||||
appointment_in_watcher = watcher.locator_uuid_map.get(locator)
|
||||
locator_map = watcher.db_manager.load_locator_map(locator)
|
||||
|
||||
if locator_map is not None:
|
||||
for uuid in locator_map:
|
||||
appointment_data = watcher.db_manager.load_watcher_appointment(uuid)
|
||||
|
||||
if appointment_data is not None and appointment_data["triggered"] is False:
|
||||
# Triggered is an internal flag that does not need to be send
|
||||
del appointment_data["triggered"]
|
||||
|
||||
if appointment_in_watcher:
|
||||
for uuid in appointment_in_watcher:
|
||||
appointment_data = watcher.appointments[uuid].to_dict()
|
||||
appointment_data["status"] = "being_watched"
|
||||
response.append(appointment_data)
|
||||
|
||||
if watcher.responder:
|
||||
responder_jobs = watcher.responder.jobs
|
||||
job_data = watcher.db_manager.load_responder_job(uuid)
|
||||
|
||||
for job in responder_jobs.values():
|
||||
if job.locator == locator:
|
||||
job_data = job.to_dict()
|
||||
if job_data is not None:
|
||||
job_data["status"] = "dispute_responded"
|
||||
response.append(job_data)
|
||||
|
||||
if not response:
|
||||
else:
|
||||
response.append({"locator": locator, "status": "not_found"})
|
||||
|
||||
response = jsonify(response)
|
||||
@@ -107,18 +112,12 @@ def get_appointment():
|
||||
|
||||
@app.route("/get_all_appointments", methods=["GET"])
|
||||
def get_all_appointments():
|
||||
watcher_appointments = {}
|
||||
responder_jobs = {}
|
||||
|
||||
# ToDo: #15-add-system-monitor
|
||||
response = None
|
||||
|
||||
if request.remote_addr in request.host or request.remote_addr == "127.0.0.1":
|
||||
for uuid, appointment in watcher.appointments.items():
|
||||
watcher_appointments[uuid] = appointment.to_dict()
|
||||
|
||||
if watcher.responder:
|
||||
for uuid, job in watcher.responder.jobs.items():
|
||||
responder_jobs[uuid] = job.to_dict()
|
||||
watcher_appointments = watcher.db_manager.load_watcher_appointments()
|
||||
responder_jobs = watcher.db_manager.load_responder_jobs()
|
||||
|
||||
response = jsonify({"watcher_appointments": watcher_appointments, "responder_jobs": responder_jobs})
|
||||
|
||||
|
||||
@@ -27,7 +27,8 @@ class Appointment:
|
||||
encrypted_blob_data = appointment_data.get("encrypted_blob")
|
||||
cipher = appointment_data.get("cipher")
|
||||
hash_function = appointment_data.get("hash_function")
|
||||
triggered = appointment_data.get("triggered")
|
||||
|
||||
triggered = True if appointment_data.get("triggered") is True else False
|
||||
|
||||
if any(
|
||||
v is None
|
||||
|
||||
@@ -1,6 +1,3 @@
|
||||
import binascii
|
||||
from hashlib import sha256
|
||||
|
||||
from pisa.logger import Logger
|
||||
from pisa.tools import bitcoin_cli
|
||||
from pisa.utils.auth_proxy import JSONRPCException
|
||||
@@ -45,6 +42,18 @@ class BlockProcessor:
|
||||
|
||||
return block_count
|
||||
|
||||
@staticmethod
|
||||
def decode_raw_transaction(raw_tx):
|
||||
|
||||
try:
|
||||
tx = bitcoin_cli().decoderawtransaction(raw_tx)
|
||||
|
||||
except JSONRPCException as e:
|
||||
tx = None
|
||||
logger.error("Can't build transaction from decoded data.", error=e.error)
|
||||
|
||||
return tx
|
||||
|
||||
def get_missed_blocks(self, last_know_block_hash):
|
||||
current_block_hash = self.get_best_block_hash()
|
||||
missed_blocks = []
|
||||
@@ -63,7 +72,7 @@ class BlockProcessor:
|
||||
chain_tip = self.get_best_block_hash()
|
||||
chain_tip_height = self.get_block(chain_tip).get("height")
|
||||
|
||||
target_block = self.get_block(target_block_hash).get("height")
|
||||
target_block = self.get_block(target_block_hash)
|
||||
|
||||
if target_block is not None:
|
||||
target_block_height = target_block.get("height")
|
||||
@@ -71,68 +80,3 @@ class BlockProcessor:
|
||||
distance = chain_tip_height - target_block_height
|
||||
|
||||
return distance
|
||||
|
||||
# FIXME: The following two functions does not seem to belong here. They come from the Watcher, and need to be
|
||||
# separated since they will be reused by the TimeTraveller.
|
||||
# DISCUSS: 36-who-should-check-appointment-trigger
|
||||
@staticmethod
|
||||
def get_potential_matches(txids, locator_uuid_map):
|
||||
potential_locators = {sha256(binascii.unhexlify(txid)).hexdigest(): txid for txid in txids}
|
||||
|
||||
# Check is any of the tx_ids in the received block is an actual match
|
||||
intersection = set(locator_uuid_map.keys()).intersection(potential_locators.keys())
|
||||
potential_matches = {locator: potential_locators[locator] for locator in intersection}
|
||||
|
||||
if len(potential_matches) > 0:
|
||||
logger.info("List of potential matches", potential_matches=potential_matches)
|
||||
|
||||
else:
|
||||
logger.info("No potential matches found")
|
||||
|
||||
return potential_matches
|
||||
|
||||
@staticmethod
|
||||
# NOTCOVERED
|
||||
def get_matches(potential_matches, locator_uuid_map, appointments):
|
||||
matches = []
|
||||
|
||||
for locator, dispute_txid in potential_matches.items():
|
||||
for uuid in locator_uuid_map[locator]:
|
||||
try:
|
||||
# ToDo: #20-test-tx-decrypting-edge-cases
|
||||
justice_rawtx = appointments[uuid].encrypted_blob.decrypt(dispute_txid)
|
||||
justice_txid = bitcoin_cli().decoderawtransaction(justice_rawtx).get("txid")
|
||||
logger.info("Match found for locator.", locator=locator, uuid=uuid, justice_txid=justice_txid)
|
||||
|
||||
except JSONRPCException as e:
|
||||
# Tx decode failed returns error code -22, maybe we should be more strict here. Leaving it simple
|
||||
# for the POC
|
||||
justice_txid = None
|
||||
justice_rawtx = None
|
||||
logger.error("Can't build transaction from decoded data.", error=e.error)
|
||||
|
||||
matches.append((locator, uuid, dispute_txid, justice_txid, justice_rawtx))
|
||||
|
||||
return matches
|
||||
|
||||
# DISCUSS: This method comes from the Responder and seems like it could go back there.
|
||||
@staticmethod
|
||||
# NOTCOVERED
|
||||
def check_confirmations(txs, unconfirmed_txs, tx_job_map, missed_confirmations):
|
||||
|
||||
for tx in txs:
|
||||
if tx in tx_job_map and tx in unconfirmed_txs:
|
||||
unconfirmed_txs.remove(tx)
|
||||
|
||||
logger.info("Confirmation received for transaction", tx=tx)
|
||||
|
||||
elif tx in unconfirmed_txs:
|
||||
if tx in missed_confirmations:
|
||||
missed_confirmations[tx] += 1
|
||||
|
||||
else:
|
||||
missed_confirmations[tx] = 1
|
||||
|
||||
logger.info("Transaction missed a confirmation", tx=tx, missed_confirmations=missed_confirmations[tx])
|
||||
|
||||
return unconfirmed_txs, missed_confirmations
|
||||
|
||||
@@ -80,7 +80,7 @@ class Carrier:
|
||||
# reorged while we were querying bitcoind to get the confirmation count. In such a case we just
|
||||
# restart the job
|
||||
if e.error.get("code") == RPC_INVALID_ADDRESS_OR_KEY:
|
||||
logger.info("Transaction got reorged before obtaining information", txid=txid)
|
||||
logger.info("Transaction not found in mempool nor blockchain", txid=txid)
|
||||
|
||||
else:
|
||||
# If something else happens (unlikely but possible) log it so we can treat it in future releases
|
||||
|
||||
@@ -25,6 +25,24 @@ class Cleaner:
|
||||
# Delete appointment from the db
|
||||
db_manager.delete_watcher_appointment(uuid)
|
||||
|
||||
@staticmethod
|
||||
def delete_completed_appointment(locator, uuid, appointments, locator_uuid_map, db_manager):
|
||||
# Delete the appointment
|
||||
appointment = appointments.pop(uuid)
|
||||
|
||||
# If there was only one appointment that matches the locator we can delete the whole list
|
||||
if len(locator_uuid_map[locator]) == 1:
|
||||
locator_uuid_map.pop(locator)
|
||||
else:
|
||||
# Otherwise we just delete the appointment that matches locator:appointment_pos
|
||||
locator_uuid_map[locator].remove(uuid)
|
||||
|
||||
# DISCUSS: instead of deleting the appointment, we will mark it as triggered and delete it from both
|
||||
# the watcher's and responder's db after fulfilled
|
||||
# Update appointment in the db
|
||||
appointment.triggered = True
|
||||
db_manager.store_watcher_appointment(uuid, appointment.to_json())
|
||||
|
||||
@staticmethod
|
||||
def delete_completed_jobs(jobs, tx_job_map, completed_jobs, height, db_manager):
|
||||
for uuid, confirmations in completed_jobs:
|
||||
|
||||
56
pisa/cryptographer.py
Normal file
56
pisa/cryptographer.py
Normal file
@@ -0,0 +1,56 @@
|
||||
from hashlib import sha256
|
||||
from binascii import unhexlify, hexlify
|
||||
from cryptography.exceptions import InvalidTag
|
||||
from cryptography.hazmat.primitives.ciphers.aead import AESGCM
|
||||
|
||||
from pisa.logger import Logger
|
||||
|
||||
logger = Logger("Cryptographer")
|
||||
|
||||
|
||||
# FIXME: Cryptographer is assuming AES-128-GCM and SHA256 since they are the only pair accepted by the encrypted blob
|
||||
# and the only pair programmed so far.
|
||||
class Cryptographer:
|
||||
@staticmethod
|
||||
# ToDo: #20-test-tx-decrypting-edge-cases
|
||||
def decrypt(encrypted_blob, key, rtype="hex"):
|
||||
if rtype not in ["hex", "bytes"]:
|
||||
raise ValueError("Wrong return type. Return type must be 'hex' or 'bytes'")
|
||||
|
||||
if len(encrypted_blob.data) % 2:
|
||||
logger.info(
|
||||
"Incorrect (Odd-length) value to be decrypted.", encrypted_blob=encrypted_blob.data, dispute_txid=key
|
||||
)
|
||||
return None
|
||||
|
||||
# master_key = H(tx_id | tx_id)
|
||||
key = unhexlify(key)
|
||||
master_key = sha256(key + key).digest()
|
||||
|
||||
# The 16 MSB of the master key will serve as the AES GCM 128 secret key. The 16 LSB will serve as the IV.
|
||||
sk = master_key[:16]
|
||||
nonce = master_key[16:]
|
||||
|
||||
logger.info(
|
||||
"Creating new blob.",
|
||||
master_key=hexlify(master_key).decode(),
|
||||
sk=hexlify(sk).decode(),
|
||||
nonce=hexlify(nonce).decode(),
|
||||
encrypted_blob=encrypted_blob.data,
|
||||
)
|
||||
|
||||
# Decrypt
|
||||
cipher = AESGCM(sk)
|
||||
data = unhexlify(encrypted_blob.data.encode())
|
||||
|
||||
try:
|
||||
blob = cipher.decrypt(nonce=nonce, data=data, associated_data=None)
|
||||
|
||||
# Change the blob encoding to hex depending on the rtype (default)
|
||||
if rtype == "hex":
|
||||
blob = hexlify(blob).decode("utf8")
|
||||
|
||||
except InvalidTag:
|
||||
blob = None
|
||||
|
||||
return blob
|
||||
@@ -52,6 +52,11 @@ class DBManager:
|
||||
|
||||
self.db.put(key, value)
|
||||
|
||||
def load_entry(self, key):
|
||||
data = self.db.get(key.encode("utf-8"))
|
||||
data = json.loads(data) if data is not None else data
|
||||
return data
|
||||
|
||||
def delete_entry(self, key, prefix=None):
|
||||
if isinstance(prefix, str):
|
||||
key = prefix + key
|
||||
@@ -60,6 +65,12 @@ class DBManager:
|
||||
|
||||
self.db.delete(key)
|
||||
|
||||
def load_watcher_appointment(self, key):
|
||||
return self.load_entry(WATCHER_PREFIX + key)
|
||||
|
||||
def load_responder_job(self, key):
|
||||
return self.load_entry(RESPONDER_PREFIX + key)
|
||||
|
||||
def load_watcher_appointments(self):
|
||||
all_appointments = self.load_appointments_db(prefix=WATCHER_PREFIX)
|
||||
non_triggered_appointments = {
|
||||
|
||||
@@ -1,48 +1,21 @@
|
||||
from hashlib import sha256
|
||||
from binascii import unhexlify, hexlify
|
||||
from cryptography.exceptions import InvalidTag
|
||||
from cryptography.hazmat.primitives.ciphers.aead import AESGCM
|
||||
|
||||
from pisa.logger import Logger
|
||||
|
||||
logger = Logger("Watcher")
|
||||
from pisa.conf import SUPPORTED_CIPHERS, SUPPORTED_HASH_FUNCTIONS
|
||||
|
||||
|
||||
# FIXME: EncryptedBlob is assuming AES-128-GCM. A cipher field should be part of the object and the decryption should be
|
||||
# performed depending on the cipher.
|
||||
class EncryptedBlob:
|
||||
def __init__(self, data):
|
||||
def __init__(self, data, cipher="AES-GCM-128", hash_function="SHA256"):
|
||||
if cipher in SUPPORTED_CIPHERS:
|
||||
self.cipher = cipher
|
||||
|
||||
else:
|
||||
raise ValueError("Cipher not supported")
|
||||
|
||||
if hash_function in SUPPORTED_HASH_FUNCTIONS:
|
||||
self.hash_function = hash_function
|
||||
|
||||
else:
|
||||
raise ValueError("Hash function not supported")
|
||||
|
||||
self.data = data
|
||||
|
||||
def __eq__(self, other):
|
||||
return isinstance(other, EncryptedBlob) and self.data == other.data
|
||||
|
||||
def decrypt(self, key):
|
||||
# master_key = H(tx_id | tx_id)
|
||||
key = unhexlify(key)
|
||||
master_key = sha256(key + key).digest()
|
||||
|
||||
# The 16 MSB of the master key will serve as the AES GCM 128 secret key. The 16 LSB will serve as the IV.
|
||||
sk = master_key[:16]
|
||||
nonce = master_key[16:]
|
||||
|
||||
logger.info(
|
||||
"Creating new blob.",
|
||||
master_key=hexlify(master_key).decode(),
|
||||
sk=hexlify(sk).decode(),
|
||||
nonce=hexlify(sk).decode(),
|
||||
encrypted_blob=self.data,
|
||||
)
|
||||
|
||||
# Decrypt
|
||||
aesgcm = AESGCM(sk)
|
||||
data = unhexlify(self.data.encode())
|
||||
|
||||
try:
|
||||
raw_tx = aesgcm.decrypt(nonce=nonce, data=data, associated_data=None)
|
||||
hex_raw_tx = hexlify(raw_tx).decode("utf8")
|
||||
|
||||
except InvalidTag:
|
||||
hex_raw_tx = None
|
||||
|
||||
return hex_raw_tx
|
||||
|
||||
@@ -24,36 +24,28 @@ logger = Logger("Inspector")
|
||||
|
||||
class Inspector:
|
||||
def inspect(self, appt, signature, public_key):
|
||||
locator = appt.get("locator")
|
||||
start_time = appt.get("start_time")
|
||||
end_time = appt.get("end_time")
|
||||
dispute_delta = appt.get("dispute_delta")
|
||||
encrypted_blob = appt.get("encrypted_blob")
|
||||
cipher = appt.get("cipher")
|
||||
hash_function = appt.get("hash_function")
|
||||
|
||||
block_height = BlockProcessor.get_block_count()
|
||||
|
||||
if block_height is not None:
|
||||
rcode, message = self.check_locator(locator)
|
||||
rcode, message = self.check_locator(appt.get("locator"))
|
||||
|
||||
if rcode == 0:
|
||||
rcode, message = self.check_start_time(start_time, block_height)
|
||||
rcode, message = self.check_start_time(appt.get("start_time"), block_height)
|
||||
if rcode == 0:
|
||||
rcode, message = self.check_end_time(end_time, start_time, block_height)
|
||||
rcode, message = self.check_end_time(appt.get("end_time"), appt.get("start_time"), block_height)
|
||||
if rcode == 0:
|
||||
rcode, message = self.check_delta(dispute_delta)
|
||||
rcode, message = self.check_delta(appt.get("dispute_delta"))
|
||||
if rcode == 0:
|
||||
rcode, message = self.check_blob(encrypted_blob)
|
||||
rcode, message = self.check_blob(appt.get("encrypted_blob"))
|
||||
if rcode == 0:
|
||||
rcode, message = self.check_cipher(cipher)
|
||||
rcode, message = self.check_cipher(appt.get("cipher"))
|
||||
if rcode == 0:
|
||||
rcode, message = self.check_hash_function(hash_function)
|
||||
rcode, message = self.check_hash_function(appt.get("hash_function"))
|
||||
if rcode == 0:
|
||||
rcode, message = self.check_appointment_signature(appt, signature, public_key)
|
||||
|
||||
if rcode == 0:
|
||||
r = Appointment(locator, start_time, end_time, dispute_delta, encrypted_blob, cipher, hash_function)
|
||||
r = Appointment.from_dict(appt)
|
||||
else:
|
||||
r = (rcode, message)
|
||||
|
||||
@@ -274,5 +266,6 @@ class Inspector:
|
||||
|
||||
except InvalidSignature:
|
||||
rcode = errors.APPOINTMENT_INVALID_SIGNATURE
|
||||
message = "invalid signature"
|
||||
|
||||
return rcode, message
|
||||
|
||||
@@ -65,16 +65,16 @@ if __name__ == "__main__":
|
||||
missed_blocks_responder = (
|
||||
missed_blocks_watcher
|
||||
if last_block_watcher == last_block_responder
|
||||
else block_processor.get_missed_blocks(last_block_watcher)
|
||||
else block_processor.get_missed_blocks(last_block_responder)
|
||||
)
|
||||
|
||||
responder = Responder(db_manager)
|
||||
responder.jobs, responder.tx_job_map = Builder.build_jobs(responder_jobs_data)
|
||||
responder.block_queue = Builder.build_block_queue(last_block_responder)
|
||||
responder.block_queue = Builder.build_block_queue(missed_blocks_responder)
|
||||
|
||||
watcher.responder = responder
|
||||
watcher.appointments, watcher.locator_uuid_map = Builder.build_appointments(watcher_appointments_data)
|
||||
watcher.block_queue = Builder.build_block_queue(last_block_responder)
|
||||
watcher.block_queue = Builder.build_block_queue(missed_blocks_watcher)
|
||||
|
||||
# Create an instance of the Watcher and fire the API
|
||||
start_api(watcher)
|
||||
|
||||
@@ -7,7 +7,6 @@ from binascii import unhexlify
|
||||
from pisa.logger import Logger
|
||||
from pisa.cleaner import Cleaner
|
||||
from pisa.carrier import Carrier
|
||||
from pisa.tools import check_tx_in_chain
|
||||
from pisa.block_processor import BlockProcessor
|
||||
from pisa.utils.zmq_subscriber import ZMQHandler
|
||||
|
||||
@@ -113,7 +112,8 @@ class Responder:
|
||||
else:
|
||||
self.tx_job_map[justice_txid] = [uuid]
|
||||
|
||||
if confirmations == 0:
|
||||
# In the case we receive two jobs with the same justice txid we only add it to the unconfirmed txs list once
|
||||
if justice_txid not in self.unconfirmed_txs and confirmations == 0:
|
||||
self.unconfirmed_txs.append(justice_txid)
|
||||
|
||||
self.db_manager.store_responder_job(uuid, job.to_json())
|
||||
@@ -145,7 +145,6 @@ class Responder:
|
||||
|
||||
if block is not None:
|
||||
txs = block.get("tx")
|
||||
height = block.get("height")
|
||||
|
||||
logger.info(
|
||||
"New block received", block_hash=block_hash, prev_block_hash=block.get("previousblockhash"), txs=txs
|
||||
@@ -153,10 +152,9 @@ class Responder:
|
||||
|
||||
# ToDo: #9-add-data-persistence
|
||||
if prev_block_hash == block.get("previousblockhash"):
|
||||
self.unconfirmed_txs, self.missed_confirmations = BlockProcessor.check_confirmations(
|
||||
txs, self.unconfirmed_txs, self.tx_job_map, self.missed_confirmations
|
||||
)
|
||||
self.check_confirmations(txs)
|
||||
|
||||
height = block.get("height")
|
||||
txs_to_rebroadcast = self.get_txs_to_rebroadcast(txs)
|
||||
completed_jobs = self.get_completed_jobs(height)
|
||||
|
||||
@@ -172,7 +170,7 @@ class Responder:
|
||||
)
|
||||
|
||||
# ToDo: #24-properly-handle-reorgs
|
||||
self.handle_reorgs()
|
||||
self.handle_reorgs(block_hash)
|
||||
|
||||
# Register the last processed block for the responder
|
||||
self.db_manager.store_last_block_hash_responder(block_hash)
|
||||
@@ -186,6 +184,25 @@ class Responder:
|
||||
|
||||
logger.info("No more pending jobs, going back to sleep")
|
||||
|
||||
def check_confirmations(self, txs):
|
||||
# If a new confirmed tx matches a tx we are watching, then we remove it from the unconfirmed txs map
|
||||
for tx in txs:
|
||||
if tx in self.tx_job_map and tx in self.unconfirmed_txs:
|
||||
self.unconfirmed_txs.remove(tx)
|
||||
|
||||
logger.info("Confirmation received for transaction", tx=tx)
|
||||
|
||||
# We also add a missing confirmation to all those txs waiting to be confirmed that have not been confirmed in
|
||||
# the current block
|
||||
for tx in self.unconfirmed_txs:
|
||||
if tx in self.missed_confirmations:
|
||||
self.missed_confirmations[tx] += 1
|
||||
|
||||
else:
|
||||
self.missed_confirmations[tx] = 1
|
||||
|
||||
logger.info("Transaction missed a confirmation", tx=tx, missed_confirmations=self.missed_confirmations[tx])
|
||||
|
||||
def get_txs_to_rebroadcast(self, txs):
|
||||
txs_to_rebroadcast = []
|
||||
|
||||
@@ -244,41 +261,43 @@ class Responder:
|
||||
|
||||
return receipts
|
||||
|
||||
# FIXME: Legacy code, must be checked and updated/fixed
|
||||
# NOTCOVERED
|
||||
def handle_reorgs(self):
|
||||
def handle_reorgs(self, block_hash):
|
||||
carrier = Carrier()
|
||||
|
||||
for uuid, job in self.jobs.items():
|
||||
# First we check if the dispute transaction is still in the blockchain. If not, the justice can not be
|
||||
# there either, so we'll need to call the reorg manager straight away
|
||||
dispute_in_chain, _ = check_tx_in_chain(job.dispute_txid, logger=logger, tx_label="Dispute tx")
|
||||
# First we check if the dispute transaction is known (exists either in mempool or blockchain)
|
||||
dispute_tx = carrier.get_transaction(job.dispute_txid)
|
||||
|
||||
# If the dispute is there, we can check the justice tx
|
||||
if dispute_in_chain:
|
||||
justice_in_chain, justice_confirmations = check_tx_in_chain(
|
||||
job.justice_txid, logger=logger, tx_label="Justice tx"
|
||||
)
|
||||
if dispute_tx is not None:
|
||||
# If the dispute is there, we check the justice
|
||||
justice_tx = carrier.get_transaction(job.justice_txid)
|
||||
|
||||
if justice_tx is not None:
|
||||
# If the justice exists we need to check is it's on the blockchain or not so we can update the
|
||||
# unconfirmed transactions list accordingly.
|
||||
if justice_tx.get("confirmations") is None:
|
||||
self.unconfirmed_txs.append(job.justice_txid)
|
||||
|
||||
# If both transactions are there, we only need to update the justice tx confirmation count
|
||||
if justice_in_chain:
|
||||
logger.info(
|
||||
"Updating confirmation count for transaction.",
|
||||
"Justice transaction back in mempool. Updating unconfirmed transactions.",
|
||||
justice_txid=job.justice_txid,
|
||||
prev_count=job.confirmations,
|
||||
curr_count=justice_confirmations,
|
||||
)
|
||||
|
||||
job.confirmations = justice_confirmations
|
||||
|
||||
else:
|
||||
# Otherwise, we will add the job back (implying rebroadcast of the tx) and monitor it again
|
||||
# If the justice transaction is missing, we need to reset the job.
|
||||
# DISCUSS: Adding job back, should we flag it as retried?
|
||||
# FIXME: Whether we decide to increase the retried counter or not, the current counter should be
|
||||
# maintained. There is no way of doing so with the current approach. Update if required
|
||||
self.add_response(uuid, job.dispute_txid, job.justice_txid, job.justice_rawtx, job.appointment_end)
|
||||
self.add_response(
|
||||
uuid, job.dispute_txid, job.justice_txid, job.justice_rawtx, job.appointment_end, block_hash
|
||||
)
|
||||
|
||||
logger.warning("Justice transaction banished. Resetting the job", justice_tx=job.justice_txid)
|
||||
|
||||
else:
|
||||
# ToDo: #24-properly-handle-reorgs
|
||||
# FIXME: if the dispute is not on chain (either in mempool or not there at all), we need to call the
|
||||
# reorg manager
|
||||
logger.warning("Dispute and justice transaction missing. Calling the reorg manager")
|
||||
logger.error("Reorg manager not yet implemented")
|
||||
logger.warning("Dispute and justice transaction missing. Calling the reorg manager.")
|
||||
logger.error("Reorg manager not yet implemented.")
|
||||
|
||||
@@ -2,8 +2,6 @@ import re
|
||||
from http.client import HTTPException
|
||||
|
||||
import pisa.conf as conf
|
||||
from pisa.logger import Logger
|
||||
from pisa.rpc_errors import RPC_INVALID_ADDRESS_OR_KEY
|
||||
from pisa.utils.auth_proxy import AuthServiceProxy, JSONRPCException
|
||||
|
||||
|
||||
@@ -14,34 +12,6 @@ def bitcoin_cli():
|
||||
)
|
||||
|
||||
|
||||
# TODO: currently only used in the Responder; might move there or in the BlockProcessor
|
||||
# NOTCOVERED
|
||||
def check_tx_in_chain(tx_id, logger=Logger(), tx_label="Transaction"):
|
||||
tx_in_chain = False
|
||||
confirmations = 0
|
||||
|
||||
try:
|
||||
tx_info = bitcoin_cli().getrawtransaction(tx_id, 1)
|
||||
|
||||
if tx_info.get("confirmations"):
|
||||
confirmations = int(tx_info.get("confirmations"))
|
||||
tx_in_chain = True
|
||||
logger.error("{} found in the blockchain".format(tx_label), txid=tx_id)
|
||||
|
||||
else:
|
||||
logger.error("{} found in mempool".format(tx_label), txid=tx_id)
|
||||
|
||||
except JSONRPCException as e:
|
||||
if e.error.get("code") == RPC_INVALID_ADDRESS_OR_KEY:
|
||||
logger.error("{} not found in mempool nor blockchain".format(tx_label), txid=tx_id)
|
||||
|
||||
else:
|
||||
# ToDO: Unhandled errors, check this properly
|
||||
logger.error("JSONRPCException.", method="tools.check_tx_in_chain", error=e.error)
|
||||
|
||||
return tx_in_chain, confirmations
|
||||
|
||||
|
||||
# NOTCOVERED
|
||||
def can_connect_to_bitcoind():
|
||||
can_connect = True
|
||||
|
||||
@@ -1,24 +1,27 @@
|
||||
from uuid import uuid4
|
||||
from queue import Queue
|
||||
from hashlib import sha256
|
||||
from threading import Thread
|
||||
from binascii import unhexlify
|
||||
|
||||
from cryptography.hazmat.backends import default_backend
|
||||
from cryptography.hazmat.primitives import hashes
|
||||
from cryptography.hazmat.primitives.serialization import load_pem_private_key
|
||||
from cryptography.hazmat.backends import default_backend
|
||||
from cryptography.hazmat.primitives.asymmetric import ec
|
||||
from cryptography.hazmat.primitives.serialization import load_pem_private_key
|
||||
|
||||
from pisa.logger import Logger
|
||||
from pisa.cleaner import Cleaner
|
||||
from pisa.conf import EXPIRY_DELTA, MAX_APPOINTMENTS, PISA_SECRET_KEY
|
||||
from pisa.responder import Responder
|
||||
from pisa.cryptographer import Cryptographer
|
||||
from pisa.block_processor import BlockProcessor
|
||||
from pisa.utils.zmq_subscriber import ZMQHandler
|
||||
from pisa.conf import EXPIRY_DELTA, MAX_APPOINTMENTS, PISA_SECRET_KEY
|
||||
|
||||
logger = Logger("Watcher")
|
||||
|
||||
|
||||
class Watcher:
|
||||
def __init__(self, db_manager, responder=None, max_appointments=MAX_APPOINTMENTS):
|
||||
def __init__(self, db_manager, pisa_sk_file=PISA_SECRET_KEY, responder=None, max_appointments=MAX_APPOINTMENTS):
|
||||
self.appointments = dict()
|
||||
self.locator_uuid_map = dict()
|
||||
self.asleep = True
|
||||
@@ -30,13 +33,17 @@ class Watcher:
|
||||
if not isinstance(responder, Responder):
|
||||
self.responder = Responder(db_manager)
|
||||
|
||||
if PISA_SECRET_KEY is None:
|
||||
if pisa_sk_file is None:
|
||||
raise ValueError("No signing key provided. Please fix your pisa.conf")
|
||||
else:
|
||||
with open(PISA_SECRET_KEY, "r") as key_file:
|
||||
secret_key_pem = key_file.read().encode("utf-8")
|
||||
self.signing_key = load_pem_private_key(secret_key_pem, password=None, backend=default_backend())
|
||||
|
||||
@staticmethod
|
||||
def compute_locator(tx_id):
|
||||
return sha256(unhexlify(tx_id)).hexdigest()
|
||||
|
||||
def sign_appointment(self, appointment):
|
||||
data = appointment.serialize()
|
||||
return self.signing_key.sign(data, ec.ECDSA(hashes.SHA256()))
|
||||
@@ -115,43 +122,31 @@ class Watcher:
|
||||
expired_appointments, self.appointments, self.locator_uuid_map, self.db_manager
|
||||
)
|
||||
|
||||
potential_matches = BlockProcessor.get_potential_matches(txids, self.locator_uuid_map)
|
||||
matches = BlockProcessor.get_matches(potential_matches, self.locator_uuid_map, self.appointments)
|
||||
filtered_matches = self.filter_valid_matches(self.get_matches(txids))
|
||||
|
||||
for locator, uuid, dispute_txid, justice_txid, justice_rawtx in matches:
|
||||
for uuid, filtered_match in filtered_matches.items():
|
||||
# Errors decrypting the Blob will result in a None justice_txid
|
||||
if justice_txid is not None:
|
||||
if filtered_match["valid_match"] is True:
|
||||
logger.info(
|
||||
"Notifying responder and deleting appointment.",
|
||||
justice_txid=justice_txid,
|
||||
locator=locator,
|
||||
justice_txid=filtered_match["justice_txid"],
|
||||
locator=filtered_match["locator"],
|
||||
uuid=uuid,
|
||||
)
|
||||
|
||||
self.responder.add_response(
|
||||
uuid,
|
||||
dispute_txid,
|
||||
justice_txid,
|
||||
justice_rawtx,
|
||||
filtered_match["dispute_txid"],
|
||||
filtered_match["justice_txid"],
|
||||
filtered_match["justice_rawtx"],
|
||||
self.appointments[uuid].end_time,
|
||||
block_hash,
|
||||
)
|
||||
|
||||
# Delete the appointment
|
||||
appointment = self.appointments.pop(uuid)
|
||||
|
||||
# If there was only one appointment that matches the locator we can delete the whole list
|
||||
if len(self.locator_uuid_map[locator]) == 1:
|
||||
self.locator_uuid_map.pop(locator)
|
||||
else:
|
||||
# Otherwise we just delete the appointment that matches locator:appointment_pos
|
||||
self.locator_uuid_map[locator].remove(uuid)
|
||||
|
||||
# DISCUSS: instead of deleting the appointment, we will mark it as triggered and delete it from both
|
||||
# the watcher's and responder's db after fulfilled
|
||||
# Update appointment in the db
|
||||
appointment.triggered = True
|
||||
self.db_manager.store_watcher_appointment(uuid, appointment.to_json())
|
||||
# Delete the appointment and update db
|
||||
Cleaner.delete_completed_appointment(
|
||||
filtered_match["locator"], uuid, self.appointments, self.locator_uuid_map, self.db_manager
|
||||
)
|
||||
|
||||
# Register the last processed block for the watcher
|
||||
self.db_manager.store_last_block_hash_watcher(block_hash)
|
||||
@@ -162,3 +157,47 @@ class Watcher:
|
||||
self.block_queue = Queue()
|
||||
|
||||
logger.info("No more pending appointments, going back to sleep")
|
||||
|
||||
def get_matches(self, txids):
|
||||
potential_locators = {Watcher.compute_locator(txid): txid for txid in txids}
|
||||
|
||||
# Check is any of the tx_ids in the received block is an actual match
|
||||
intersection = set(self.locator_uuid_map.keys()).intersection(potential_locators.keys())
|
||||
matches = {locator: potential_locators[locator] for locator in intersection}
|
||||
|
||||
if len(matches) > 0:
|
||||
logger.info("List of matches", potential_matches=matches)
|
||||
|
||||
else:
|
||||
logger.info("No matches found")
|
||||
|
||||
return matches
|
||||
|
||||
def filter_valid_matches(self, matches):
|
||||
filtered_matches = {}
|
||||
|
||||
for locator, dispute_txid in matches.items():
|
||||
for uuid in self.locator_uuid_map[locator]:
|
||||
|
||||
justice_rawtx = Cryptographer.decrypt(self.appointments[uuid].encrypted_blob, dispute_txid)
|
||||
justice_tx = BlockProcessor.decode_raw_transaction(justice_rawtx)
|
||||
|
||||
if justice_tx is not None:
|
||||
justice_txid = justice_tx.get("txid")
|
||||
valid_match = True
|
||||
|
||||
logger.info("Match found for locator.", locator=locator, uuid=uuid, justice_txid=justice_txid)
|
||||
|
||||
else:
|
||||
justice_txid = None
|
||||
valid_match = False
|
||||
|
||||
filtered_matches[uuid] = {
|
||||
"locator": locator,
|
||||
"dispute_txid": dispute_txid,
|
||||
"justice_txid": justice_txid,
|
||||
"justice_rawtx": justice_rawtx,
|
||||
"valid_match": valid_match,
|
||||
}
|
||||
|
||||
return filtered_matches
|
||||
|
||||
@@ -3,6 +3,7 @@ import time
|
||||
import json
|
||||
import logging
|
||||
import binascii
|
||||
from itertools import islice
|
||||
from threading import Thread, Event
|
||||
from flask import Flask, request, Response, abort
|
||||
|
||||
@@ -19,7 +20,7 @@ PORT = "18443"
|
||||
blockchain = []
|
||||
blocks = {}
|
||||
mined_transactions = {}
|
||||
mempool = []
|
||||
mempool = {}
|
||||
|
||||
mine_new_block = Event()
|
||||
|
||||
@@ -138,7 +139,7 @@ def process_request():
|
||||
|
||||
if TX.deserialize(rawtx) is not None:
|
||||
if txid not in list(mined_transactions.keys()):
|
||||
mempool.append(rawtx)
|
||||
mempool[txid] = rawtx
|
||||
response["result"] = {"txid": txid}
|
||||
|
||||
else:
|
||||
@@ -164,7 +165,7 @@ def process_request():
|
||||
response["result"] = {"hex": rawtx, "confirmations": len(blockchain) - block.get("height")}
|
||||
|
||||
elif txid in mempool:
|
||||
response["result"] = {"confirmations": 0}
|
||||
response["result"] = {"confirmations": None}
|
||||
|
||||
else:
|
||||
response["error"] = {
|
||||
@@ -260,11 +261,9 @@ def simulate_mining(mode, time_between_blocks):
|
||||
|
||||
if len(mempool) != 0:
|
||||
# We'll mine up to 100 txs per block
|
||||
for rawtx in mempool[:99]:
|
||||
txid = sha256d(rawtx)
|
||||
for txid, rawtx in dict(islice(mempool.items(), 99)).items():
|
||||
txs_to_mine[txid] = rawtx
|
||||
|
||||
mempool = mempool[99:]
|
||||
mempool.pop(txid)
|
||||
|
||||
# Keep track of the mined transaction (to respond to getrawtransaction)
|
||||
for txid, tx in txs_to_mine.items():
|
||||
|
||||
@@ -31,9 +31,14 @@ def parse_varint(tx):
|
||||
# First of all, the offset of the hex transaction if moved to the proper position (i.e where the varint should be
|
||||
# located) and the length and format of the data to be analyzed is checked.
|
||||
data = tx.hex[tx.offset :]
|
||||
assert len(data) > 0
|
||||
if len(data) > 0:
|
||||
size = int(data[:2], 16)
|
||||
assert size <= 255
|
||||
|
||||
else:
|
||||
raise ValueError("No data to be parsed")
|
||||
|
||||
if size > 255:
|
||||
raise ValueError("Wrong value (varint size > 255)")
|
||||
|
||||
# Then, the integer is encoded as a varint using the proper prefix, if needed.
|
||||
if size <= 252: # No prefix
|
||||
|
||||
@@ -13,11 +13,8 @@ from cryptography.hazmat.primitives import hashes
|
||||
from cryptography.hazmat.primitives.asymmetric import ec
|
||||
from cryptography.hazmat.primitives import serialization
|
||||
|
||||
from pisa.conf import DB_PATH
|
||||
from apps.cli.blob import Blob
|
||||
from pisa.api import start_api
|
||||
from pisa.responder import Job
|
||||
from pisa.watcher import Watcher
|
||||
from pisa.tools import bitcoin_cli
|
||||
from pisa.db_manager import DBManager
|
||||
from pisa.appointment import Appointment
|
||||
@@ -36,19 +33,6 @@ def run_bitcoind():
|
||||
sleep(0.1)
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def run_api():
|
||||
db_manager = DBManager(DB_PATH)
|
||||
watcher = Watcher(db_manager)
|
||||
|
||||
api_thread = Thread(target=start_api, args=[watcher])
|
||||
api_thread.daemon = True
|
||||
api_thread.start()
|
||||
|
||||
# It takes a little bit of time to start the API (otherwise the requests are sent too early and they fail)
|
||||
sleep(0.1)
|
||||
|
||||
|
||||
@pytest.fixture(scope="session", autouse=True)
|
||||
def prng_seed():
|
||||
random.seed(0)
|
||||
@@ -66,7 +50,7 @@ def generate_keypair():
|
||||
return client_sk, client_pk
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
@pytest.fixture(scope="session")
|
||||
def db_manager():
|
||||
manager = DBManager("test_db")
|
||||
yield manager
|
||||
@@ -96,9 +80,13 @@ def sign_appointment(sk, appointment):
|
||||
return hexlify(sk.sign(data, ec.ECDSA(hashes.SHA256()))).decode("utf-8")
|
||||
|
||||
|
||||
def generate_dummy_appointment_data(start_time_offset=5, end_time_offset=30):
|
||||
def generate_dummy_appointment_data(real_height=True, start_time_offset=5, end_time_offset=30):
|
||||
if real_height:
|
||||
current_height = bitcoin_cli().getblockcount()
|
||||
|
||||
else:
|
||||
current_height = 10
|
||||
|
||||
dispute_tx = TX.create_dummy_transaction()
|
||||
dispute_txid = sha256d(dispute_tx)
|
||||
justice_tx = TX.create_dummy_transaction(dispute_txid)
|
||||
@@ -135,7 +123,6 @@ def generate_dummy_appointment_data(start_time_offset=5, end_time_offset=30):
|
||||
"encrypted_blob": encrypted_blob,
|
||||
"cipher": cipher,
|
||||
"hash_function": hash_function,
|
||||
"triggered": False,
|
||||
}
|
||||
|
||||
signature = sign_appointment(client_sk, appointment_data)
|
||||
@@ -145,9 +132,9 @@ def generate_dummy_appointment_data(start_time_offset=5, end_time_offset=30):
|
||||
return data, dispute_tx
|
||||
|
||||
|
||||
def generate_dummy_appointment(start_time_offset=5, end_time_offset=30):
|
||||
def generate_dummy_appointment(real_height=True, start_time_offset=5, end_time_offset=30):
|
||||
appointment_data, dispute_tx = generate_dummy_appointment_data(
|
||||
start_time_offset=start_time_offset, end_time_offset=end_time_offset
|
||||
real_height=real_height, start_time_offset=start_time_offset, end_time_offset=end_time_offset
|
||||
)
|
||||
|
||||
return Appointment.from_dict(appointment_data["appointment"]), dispute_tx
|
||||
|
||||
@@ -1,11 +1,15 @@
|
||||
import json
|
||||
import pytest
|
||||
import requests
|
||||
from time import sleep
|
||||
from threading import Thread
|
||||
|
||||
from pisa.api import start_api
|
||||
from pisa.watcher import Watcher
|
||||
from pisa.tools import bitcoin_cli
|
||||
from pisa import HOST, PORT, c_logger
|
||||
from pisa.utils.auth_proxy import AuthServiceProxy
|
||||
from test.unit.conftest import generate_blocks, get_random_value_hex, generate_dummy_appointment_data
|
||||
from pisa.conf import BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, BTC_RPC_PORT, MAX_APPOINTMENTS
|
||||
from pisa.conf import MAX_APPOINTMENTS
|
||||
from test.unit.conftest import generate_block, generate_blocks, get_random_value_hex, generate_dummy_appointment_data
|
||||
|
||||
c_logger.disabled = True
|
||||
|
||||
@@ -16,6 +20,18 @@ appointments = []
|
||||
locator_dispute_tx_map = {}
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def run_api(db_manager):
|
||||
watcher = Watcher(db_manager)
|
||||
|
||||
api_thread = Thread(target=start_api, args=[watcher])
|
||||
api_thread.daemon = True
|
||||
api_thread.start()
|
||||
|
||||
# It takes a little bit of time to start the API (otherwise the requests are sent too early and they fail)
|
||||
sleep(0.1)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def new_appt_data():
|
||||
appt_data, dispute_tx = generate_dummy_appointment_data()
|
||||
@@ -44,28 +60,6 @@ def test_add_appointment(run_api, run_bitcoind, new_appt_data):
|
||||
assert r.status_code == 400
|
||||
|
||||
|
||||
def test_request_appointment(new_appt_data):
|
||||
# First we need to add an appointment
|
||||
r = add_appointment(new_appt_data)
|
||||
assert r.status_code == 200
|
||||
|
||||
# Next we can request it
|
||||
r = requests.get(url=PISA_API + "/get_appointment?locator=" + new_appt_data["appointment"]["locator"])
|
||||
assert r.status_code == 200
|
||||
|
||||
# Each locator may point to multiple appointments, check them all
|
||||
received_appointments = json.loads(r.content)
|
||||
|
||||
# Take the status out and leave the received appointments ready to compare
|
||||
appointment_status = [appointment.pop("status") for appointment in received_appointments]
|
||||
|
||||
# Check that the appointment is within the received appoints
|
||||
assert new_appt_data["appointment"] in received_appointments
|
||||
|
||||
# Check that all the appointments are being watched
|
||||
assert all([status == "being_watched" for status in appointment_status])
|
||||
|
||||
|
||||
def test_request_random_appointment():
|
||||
r = requests.get(url=PISA_API + "/get_appointment?locator=" + get_random_value_hex(32))
|
||||
assert r.status_code == 200
|
||||
@@ -89,7 +83,7 @@ def test_request_multiple_appointments_same_locator(new_appt_data, n=MULTIPLE_AP
|
||||
r = add_appointment(new_appt_data)
|
||||
assert r.status_code == 200
|
||||
|
||||
test_request_appointment(new_appt_data)
|
||||
test_request_appointment_watcher(new_appt_data)
|
||||
|
||||
|
||||
def test_add_too_many_appointment(new_appt_data):
|
||||
@@ -117,12 +111,10 @@ def test_get_all_appointments_watcher():
|
||||
|
||||
def test_get_all_appointments_responder():
|
||||
# Trigger all disputes
|
||||
bitcoin_cli = AuthServiceProxy("http://%s:%s@%s:%d" % (BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, BTC_RPC_PORT))
|
||||
|
||||
locators = [appointment["locator"] for appointment in appointments]
|
||||
for locator, dispute_tx in locator_dispute_tx_map.items():
|
||||
if locator in locators:
|
||||
bitcoin_cli.sendrawtransaction(dispute_tx)
|
||||
bitcoin_cli().sendrawtransaction(dispute_tx)
|
||||
|
||||
# Confirm transactions
|
||||
generate_blocks(6)
|
||||
@@ -135,8 +127,49 @@ def test_get_all_appointments_responder():
|
||||
responder_jobs = [v["locator"] for k, v in received_appointments["responder_jobs"].items()]
|
||||
local_locators = [appointment["locator"] for appointment in appointments]
|
||||
|
||||
watcher_appointments = [v["locator"] for k, v in received_appointments["watcher_appointments"].items()]
|
||||
print(set(watcher_appointments) == set(local_locators))
|
||||
|
||||
assert set(responder_jobs) == set(local_locators)
|
||||
assert len(received_appointments["watcher_appointments"]) == 0
|
||||
|
||||
|
||||
def test_request_appointment_watcher(new_appt_data):
|
||||
# First we need to add an appointment
|
||||
r = add_appointment(new_appt_data)
|
||||
assert r.status_code == 200
|
||||
|
||||
# Next we can request it
|
||||
r = requests.get(url=PISA_API + "/get_appointment?locator=" + new_appt_data["appointment"]["locator"])
|
||||
assert r.status_code == 200
|
||||
|
||||
# Each locator may point to multiple appointments, check them all
|
||||
received_appointments = json.loads(r.content)
|
||||
|
||||
# Take the status out and leave the received appointments ready to compare
|
||||
appointment_status = [appointment.pop("status") for appointment in received_appointments]
|
||||
|
||||
# Check that the appointment is within the received appoints
|
||||
assert new_appt_data["appointment"] in received_appointments
|
||||
|
||||
# Check that all the appointments are being watched
|
||||
assert all([status == "being_watched" for status in appointment_status])
|
||||
|
||||
|
||||
def test_request_appointment_responder(new_appt_data):
|
||||
# Let's do something similar to what we did with the watcher but now we'll send the dispute tx to the network
|
||||
dispute_tx = locator_dispute_tx_map[new_appt_data["appointment"]["locator"]]
|
||||
bitcoin_cli().sendrawtransaction(dispute_tx)
|
||||
|
||||
r = add_appointment(new_appt_data)
|
||||
assert r.status_code == 200
|
||||
|
||||
# Generate a block to trigger the watcher
|
||||
generate_block()
|
||||
|
||||
r = requests.get(url=PISA_API + "/get_appointment?locator=" + new_appt_data["appointment"]["locator"])
|
||||
assert r.status_code == 200
|
||||
|
||||
received_appointments = json.loads(r.content)
|
||||
appointment_status = [appointment.pop("status") for appointment in received_appointments]
|
||||
appointment_locators = [appointment["locator"] for appointment in received_appointments]
|
||||
|
||||
assert new_appt_data["appointment"]["locator"] in appointment_locators and len(received_appointments) == 1
|
||||
assert all([status == "dispute_responded" for status in appointment_status]) and len(appointment_status) == 1
|
||||
|
||||
@@ -20,7 +20,15 @@ def appointment_data():
|
||||
cipher = "AES-GCM-128"
|
||||
hash_function = "SHA256"
|
||||
|
||||
return locator, start_time, end_time, dispute_delta, encrypted_blob_data, cipher, hash_function
|
||||
return {
|
||||
"locator": locator,
|
||||
"start_time": start_time,
|
||||
"end_time": end_time,
|
||||
"dispute_delta": dispute_delta,
|
||||
"encrypted_blob": encrypted_blob_data,
|
||||
"cipher": cipher,
|
||||
"hash_function": hash_function,
|
||||
}
|
||||
|
||||
|
||||
def test_init_appointment(appointment_data):
|
||||
@@ -28,51 +36,98 @@ def test_init_appointment(appointment_data):
|
||||
# creating appointments.
|
||||
# DISCUSS: whether this makes sense by design or checks should be ported from the inspector to the appointment
|
||||
# 35-appointment-checks
|
||||
|
||||
locator, start_time, end_time, dispute_delta, encrypted_blob_data, cipher, hash_function = appointment_data
|
||||
|
||||
appointment = Appointment(locator, start_time, end_time, dispute_delta, encrypted_blob_data, cipher, hash_function)
|
||||
appointment = Appointment(
|
||||
appointment_data["locator"],
|
||||
appointment_data["start_time"],
|
||||
appointment_data["end_time"],
|
||||
appointment_data["dispute_delta"],
|
||||
appointment_data["encrypted_blob"],
|
||||
appointment_data["cipher"],
|
||||
appointment_data["hash_function"],
|
||||
)
|
||||
|
||||
assert (
|
||||
locator == appointment.locator
|
||||
and start_time == appointment.start_time
|
||||
and end_time == appointment.end_time
|
||||
and EncryptedBlob(encrypted_blob_data) == appointment.encrypted_blob
|
||||
and cipher == appointment.cipher
|
||||
and dispute_delta == appointment.dispute_delta
|
||||
and hash_function == appointment.hash_function
|
||||
appointment_data["locator"] == appointment.locator
|
||||
and appointment_data["start_time"] == appointment.start_time
|
||||
and appointment_data["end_time"] == appointment.end_time
|
||||
and appointment_data["dispute_delta"] == appointment.dispute_delta
|
||||
and EncryptedBlob(appointment_data["encrypted_blob"]) == appointment.encrypted_blob
|
||||
and appointment_data["cipher"] == appointment.cipher
|
||||
and appointment_data["hash_function"] == appointment.hash_function
|
||||
)
|
||||
|
||||
|
||||
def test_to_dict(appointment_data):
|
||||
locator, start_time, end_time, dispute_delta, encrypted_blob_data, cipher, hash_function = appointment_data
|
||||
appointment = Appointment(locator, start_time, end_time, dispute_delta, encrypted_blob_data, cipher, hash_function)
|
||||
appointment = Appointment(
|
||||
appointment_data["locator"],
|
||||
appointment_data["start_time"],
|
||||
appointment_data["end_time"],
|
||||
appointment_data["dispute_delta"],
|
||||
appointment_data["encrypted_blob"],
|
||||
appointment_data["cipher"],
|
||||
appointment_data["hash_function"],
|
||||
)
|
||||
|
||||
dict_appointment = appointment.to_dict()
|
||||
|
||||
assert (
|
||||
locator == dict_appointment.get("locator")
|
||||
and start_time == dict_appointment.get("start_time")
|
||||
and end_time == dict_appointment.get("end_time")
|
||||
and dispute_delta == dict_appointment.get("dispute_delta")
|
||||
and cipher == dict_appointment.get("cipher")
|
||||
and hash_function == dict_appointment.get("hash_function")
|
||||
and encrypted_blob_data == dict_appointment.get("encrypted_blob")
|
||||
appointment_data["locator"] == dict_appointment["locator"]
|
||||
and appointment_data["start_time"] == dict_appointment["start_time"]
|
||||
and appointment_data["end_time"] == dict_appointment["end_time"]
|
||||
and appointment_data["dispute_delta"] == dict_appointment["dispute_delta"]
|
||||
and EncryptedBlob(appointment_data["encrypted_blob"]) == EncryptedBlob(dict_appointment["encrypted_blob"])
|
||||
and appointment_data["cipher"] == dict_appointment["cipher"]
|
||||
and appointment_data["hash_function"] == dict_appointment["hash_function"]
|
||||
)
|
||||
|
||||
|
||||
def test_to_json(appointment_data):
|
||||
locator, start_time, end_time, dispute_delta, encrypted_blob_data, cipher, hash_function = appointment_data
|
||||
appointment = Appointment(locator, start_time, end_time, dispute_delta, encrypted_blob_data, cipher, hash_function)
|
||||
appointment = Appointment(
|
||||
appointment_data["locator"],
|
||||
appointment_data["start_time"],
|
||||
appointment_data["end_time"],
|
||||
appointment_data["dispute_delta"],
|
||||
appointment_data["encrypted_blob"],
|
||||
appointment_data["cipher"],
|
||||
appointment_data["hash_function"],
|
||||
)
|
||||
|
||||
dict_appointment = json.loads(appointment.to_json())
|
||||
|
||||
assert (
|
||||
locator == dict_appointment.get("locator")
|
||||
and start_time == dict_appointment.get("start_time")
|
||||
and end_time == dict_appointment.get("end_time")
|
||||
and dispute_delta == dict_appointment.get("dispute_delta")
|
||||
and cipher == dict_appointment.get("cipher")
|
||||
and hash_function == dict_appointment.get("hash_function")
|
||||
and encrypted_blob_data == dict_appointment.get("encrypted_blob")
|
||||
appointment_data["locator"] == dict_appointment["locator"]
|
||||
and appointment_data["start_time"] == dict_appointment["start_time"]
|
||||
and appointment_data["end_time"] == dict_appointment["end_time"]
|
||||
and appointment_data["dispute_delta"] == dict_appointment["dispute_delta"]
|
||||
and EncryptedBlob(appointment_data["encrypted_blob"]) == EncryptedBlob(dict_appointment["encrypted_blob"])
|
||||
and appointment_data["cipher"] == dict_appointment["cipher"]
|
||||
and appointment_data["hash_function"] == dict_appointment["hash_function"]
|
||||
)
|
||||
|
||||
|
||||
def test_from_dict(appointment_data):
|
||||
# The appointment should be build if we don't miss any field
|
||||
appointment = Appointment.from_dict(appointment_data)
|
||||
assert isinstance(appointment, Appointment)
|
||||
|
||||
# Otherwise it should fail
|
||||
for key in appointment_data.keys():
|
||||
prev_val = appointment_data[key]
|
||||
appointment_data[key] = None
|
||||
|
||||
try:
|
||||
Appointment.from_dict(appointment_data)
|
||||
assert False
|
||||
|
||||
except ValueError:
|
||||
appointment_data[key] = prev_val
|
||||
assert True
|
||||
|
||||
|
||||
def test_serialize(appointment_data):
|
||||
appointment = Appointment.from_dict(appointment_data)
|
||||
|
||||
assert appointment.triggered is False
|
||||
ser_appointment = appointment.serialize()
|
||||
|
||||
assert ser_appointment == json.dumps(appointment_data, sort_keys=True, separators=(",", ":")).encode("utf-8")
|
||||
|
||||
@@ -1,26 +1,19 @@
|
||||
import pytest
|
||||
from uuid import uuid4
|
||||
from hashlib import sha256
|
||||
from binascii import unhexlify
|
||||
|
||||
from pisa import c_logger
|
||||
from pisa.block_processor import BlockProcessor
|
||||
from test.unit.conftest import get_random_value_hex
|
||||
from test.unit.conftest import get_random_value_hex, generate_block, generate_blocks
|
||||
|
||||
c_logger.disabled = True
|
||||
|
||||
APPOINTMENT_COUNT = 100
|
||||
TEST_SET_SIZE = 200
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def txids():
|
||||
return [get_random_value_hex(32) for _ in range(APPOINTMENT_COUNT)]
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def locator_uuid_map(txids):
|
||||
return {sha256(unhexlify(txid)).hexdigest(): uuid4().hex for txid in txids}
|
||||
hex_tx = (
|
||||
"0100000001c997a5e56e104102fa209c6a852dd90660a20b2d9c352423edce25857fcd3704000000004847304402"
|
||||
"204e45e16932b8af514961a1d3a1a25fdf3f4f7732e9d624c6c61548ab5fb8cd410220181522ec8eca07de4860a4"
|
||||
"acdd12909d831cc56cbbac4622082221a8768d1d0901ffffffff0200ca9a3b00000000434104ae1a62fe09c5f51b"
|
||||
"13905f07f06b99a2f7159b2225f374cd378d71302fa28414e7aab37397f554a7df5f142c21c1b7303b8a0626f1ba"
|
||||
"ded5c72a704f7e6cd84cac00286bee0000000043410411db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482e"
|
||||
"cad7b148a6909a5cb2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3ac00000000"
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@@ -54,27 +47,45 @@ def test_get_block_count():
|
||||
assert isinstance(block_count, int) and block_count >= 0
|
||||
|
||||
|
||||
def test_potential_matches(txids, locator_uuid_map):
|
||||
potential_matches = BlockProcessor.get_potential_matches(txids, locator_uuid_map)
|
||||
|
||||
# All the txids must match
|
||||
assert locator_uuid_map.keys() == potential_matches.keys()
|
||||
def test_decode_raw_transaction():
|
||||
# We cannot exhaustively test this (we rely on bitcoind for this) but we can try to decode a correct transaction
|
||||
assert BlockProcessor.decode_raw_transaction(hex_tx) is not None
|
||||
|
||||
|
||||
def test_potential_matches_random(locator_uuid_map):
|
||||
txids = [get_random_value_hex(32) for _ in range(len(locator_uuid_map))]
|
||||
|
||||
potential_matches = BlockProcessor.get_potential_matches(txids, locator_uuid_map)
|
||||
|
||||
# None of the ids should match
|
||||
assert len(potential_matches) == 0
|
||||
def test_decode_raw_transaction_invalid():
|
||||
# Same but with an invalid one
|
||||
assert BlockProcessor.decode_raw_transaction(hex_tx[::-1]) is None
|
||||
|
||||
|
||||
def test_potential_matches_random_data(locator_uuid_map):
|
||||
# The likelihood of finding a potential match with random data should be negligible
|
||||
txids = [get_random_value_hex(32) for _ in range(TEST_SET_SIZE)]
|
||||
def test_get_missed_blocks():
|
||||
block_processor = BlockProcessor()
|
||||
target_block = block_processor.get_best_block_hash()
|
||||
|
||||
potential_matches = BlockProcessor.get_potential_matches(txids, locator_uuid_map)
|
||||
# Generate some blocks and store the hash in a list
|
||||
missed_blocks = []
|
||||
for _ in range(5):
|
||||
generate_block()
|
||||
missed_blocks.append(BlockProcessor.get_best_block_hash())
|
||||
|
||||
# None of the txids should match
|
||||
assert len(potential_matches) == 0
|
||||
# Check what we've missed
|
||||
assert block_processor.get_missed_blocks(target_block) == missed_blocks
|
||||
|
||||
# We can see how it does not work if we replace the target by the first element in the list
|
||||
block_tip = missed_blocks[0]
|
||||
assert block_processor.get_missed_blocks(block_tip) != missed_blocks
|
||||
|
||||
# But it does again if we skip that block
|
||||
assert block_processor.get_missed_blocks(block_tip) == missed_blocks[1:]
|
||||
|
||||
|
||||
def test_get_distance_to_tip():
|
||||
target_distance = 5
|
||||
|
||||
block_processor = BlockProcessor()
|
||||
target_block = block_processor.get_best_block_hash()
|
||||
|
||||
# Mine some blocks up to the target distance
|
||||
generate_blocks(target_distance)
|
||||
|
||||
# Check if the distance is properly computed
|
||||
assert block_processor.get_distance_to_tip(target_block) == target_distance
|
||||
|
||||
@@ -9,7 +9,7 @@ def test_build_appointments():
|
||||
|
||||
# Create some appointment data
|
||||
for i in range(10):
|
||||
appointment, _ = generate_dummy_appointment()
|
||||
appointment, _ = generate_dummy_appointment(real_height=False)
|
||||
uuid = uuid4().hex
|
||||
|
||||
appointments_data[uuid] = appointment.to_dict()
|
||||
@@ -17,7 +17,7 @@ def test_build_appointments():
|
||||
# Add some additional appointments that share the same locator to test all the builder's cases
|
||||
if i % 2 == 0:
|
||||
locator = appointment.locator
|
||||
appointment, _ = generate_dummy_appointment()
|
||||
appointment, _ = generate_dummy_appointment(real_height=False)
|
||||
uuid = uuid4().hex
|
||||
appointment.locator = locator
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ from pisa import c_logger
|
||||
from pisa.carrier import Carrier
|
||||
from test.simulator.utils import sha256d
|
||||
from test.simulator.transaction import TX
|
||||
from test.unit.conftest import generate_blocks, get_random_value_hex
|
||||
from test.unit.conftest import generate_blocks, generate_block, get_random_value_hex
|
||||
from pisa.rpc_errors import RPC_VERIFY_ALREADY_IN_CHAIN, RPC_DESERIALIZATION_ERROR
|
||||
|
||||
c_logger.disabled = True
|
||||
|
||||
@@ -5,63 +5,81 @@ from pisa import c_logger
|
||||
from pisa.responder import Job
|
||||
from pisa.cleaner import Cleaner
|
||||
from pisa.appointment import Appointment
|
||||
from pisa.db_manager import WATCHER_PREFIX
|
||||
from test.unit.conftest import get_random_value_hex
|
||||
|
||||
CONFIRMATIONS = 6
|
||||
ITEMS = 10
|
||||
MAX_ITEMS = 100
|
||||
ITERATIONS = 1000
|
||||
ITERATIONS = 10
|
||||
|
||||
c_logger.disabled = True
|
||||
|
||||
|
||||
def set_up_appointments(total_appointments):
|
||||
# WIP: FIX CLEANER TESTS AFTER ADDING delete_complete_appointment
|
||||
def set_up_appointments(db_manager, total_appointments):
|
||||
appointments = dict()
|
||||
locator_uuid_map = dict()
|
||||
|
||||
for _ in range(total_appointments):
|
||||
for i in range(total_appointments):
|
||||
uuid = uuid4().hex
|
||||
locator = get_random_value_hex(32)
|
||||
|
||||
appointments[uuid] = Appointment(locator, None, None, None, None, None, None)
|
||||
appointment = Appointment(locator, None, None, None, None, None, None)
|
||||
appointments[uuid] = appointment
|
||||
locator_uuid_map[locator] = [uuid]
|
||||
|
||||
# Each locator can have more than one uuid assigned to it. Do a coin toss to add multiple ones
|
||||
while random.randint(0, 1):
|
||||
db_manager.store_watcher_appointment(uuid, appointment.to_json())
|
||||
db_manager.store_update_locator_map(locator, uuid)
|
||||
|
||||
# Each locator can have more than one uuid assigned to it.
|
||||
if i % 2:
|
||||
uuid = uuid4().hex
|
||||
|
||||
appointments[uuid] = Appointment(locator, None, None, None, None, None, None)
|
||||
appointments[uuid] = appointment
|
||||
locator_uuid_map[locator].append(uuid)
|
||||
|
||||
db_manager.store_watcher_appointment(uuid, appointment.to_json())
|
||||
db_manager.store_update_locator_map(locator, uuid)
|
||||
|
||||
return appointments, locator_uuid_map
|
||||
|
||||
|
||||
def set_up_jobs(total_jobs):
|
||||
def set_up_jobs(db_manager, total_jobs):
|
||||
jobs = dict()
|
||||
tx_job_map = dict()
|
||||
|
||||
for _ in range(total_jobs):
|
||||
for i in range(total_jobs):
|
||||
uuid = uuid4().hex
|
||||
txid = get_random_value_hex(32)
|
||||
|
||||
# We use the same txid for justice and dispute here, it shouldn't matter
|
||||
justice_txid = get_random_value_hex(32)
|
||||
dispute_txid = get_random_value_hex(32)
|
||||
|
||||
# Assign both justice_txid and dispute_txid the same id (it shouldn't matter)
|
||||
jobs[uuid] = Job(txid, txid, None, None)
|
||||
tx_job_map[txid] = [uuid]
|
||||
job = Job(dispute_txid, justice_txid, None, None)
|
||||
jobs[uuid] = job
|
||||
tx_job_map[justice_txid] = [uuid]
|
||||
|
||||
# Each justice_txid can have more than one uuid assigned to it. Do a coin toss to add multiple ones
|
||||
while random.randint(0, 1):
|
||||
db_manager.store_responder_job(uuid, job.to_json())
|
||||
db_manager.store_update_locator_map(job.locator, uuid)
|
||||
|
||||
# Each justice_txid can have more than one uuid assigned to it.
|
||||
if i % 2:
|
||||
uuid = uuid4().hex
|
||||
|
||||
jobs[uuid] = Job(txid, txid, None, None)
|
||||
tx_job_map[txid].append(uuid)
|
||||
jobs[uuid] = job
|
||||
tx_job_map[justice_txid].append(uuid)
|
||||
|
||||
db_manager.store_responder_job(uuid, job.to_json())
|
||||
db_manager.store_update_locator_map(job.locator, uuid)
|
||||
|
||||
return jobs, tx_job_map
|
||||
|
||||
|
||||
def test_delete_expired_appointment(db_manager):
|
||||
|
||||
for _ in range(ITERATIONS):
|
||||
appointments, locator_uuid_map = set_up_appointments(MAX_ITEMS)
|
||||
appointments, locator_uuid_map = set_up_appointments(db_manager, MAX_ITEMS)
|
||||
expired_appointments = random.sample(list(appointments.keys()), k=ITEMS)
|
||||
|
||||
Cleaner.delete_expired_appointment(expired_appointments, appointments, locator_uuid_map, db_manager)
|
||||
@@ -69,11 +87,29 @@ def test_delete_expired_appointment(db_manager):
|
||||
assert not set(expired_appointments).issubset(appointments.keys())
|
||||
|
||||
|
||||
def test_delete_completed_jobs(db_manager):
|
||||
def test_delete_completed_appointments(db_manager):
|
||||
appointments, locator_uuid_map = set_up_appointments(db_manager, MAX_ITEMS)
|
||||
uuids = list(appointments.keys())
|
||||
|
||||
for uuid in uuids:
|
||||
Cleaner.delete_completed_appointment(
|
||||
appointments[uuid].locator, uuid, appointments, locator_uuid_map, db_manager
|
||||
)
|
||||
|
||||
# All appointments should have been deleted
|
||||
assert len(appointments) == 0
|
||||
|
||||
# Make sure that all appointments are flagged as triggered in the db
|
||||
db_appointments = db_manager.load_appointments_db(prefix=WATCHER_PREFIX)
|
||||
for uuid in uuids:
|
||||
assert db_appointments[uuid]["triggered"] is True
|
||||
|
||||
|
||||
def test_delete_completed_jobs_db_match(db_manager):
|
||||
height = 0
|
||||
|
||||
for _ in range(ITERATIONS):
|
||||
jobs, tx_job_map = set_up_jobs(MAX_ITEMS)
|
||||
jobs, tx_job_map = set_up_jobs(db_manager, MAX_ITEMS)
|
||||
selected_jobs = random.sample(list(jobs.keys()), k=ITEMS)
|
||||
|
||||
completed_jobs = [(job, 6) for job in selected_jobs]
|
||||
@@ -81,3 +117,38 @@ def test_delete_completed_jobs(db_manager):
|
||||
Cleaner.delete_completed_jobs(jobs, tx_job_map, completed_jobs, height, db_manager)
|
||||
|
||||
assert not set(completed_jobs).issubset(jobs.keys())
|
||||
|
||||
|
||||
def test_delete_completed_jobs_no_db_match(db_manager):
|
||||
height = 0
|
||||
|
||||
for _ in range(ITERATIONS):
|
||||
jobs, tx_job_map = set_up_jobs(db_manager, MAX_ITEMS)
|
||||
selected_jobs = random.sample(list(jobs.keys()), k=ITEMS)
|
||||
|
||||
# Let's change some uuid's by creating new jobs that are not included in the db and share a justice_txid with
|
||||
# another job that is stored in the db.
|
||||
for uuid in selected_jobs[: ITEMS // 2]:
|
||||
justice_txid = jobs[uuid].justice_txid
|
||||
dispute_txid = get_random_value_hex(32)
|
||||
new_uuid = uuid4().hex
|
||||
|
||||
jobs[new_uuid] = Job(dispute_txid, justice_txid, None, None)
|
||||
tx_job_map[justice_txid].append(new_uuid)
|
||||
selected_jobs.append(new_uuid)
|
||||
|
||||
# Let's add some random data
|
||||
for i in range(ITEMS // 2):
|
||||
uuid = uuid4().hex
|
||||
justice_txid = get_random_value_hex(32)
|
||||
dispute_txid = get_random_value_hex(32)
|
||||
|
||||
jobs[uuid] = Job(dispute_txid, justice_txid, None, None)
|
||||
tx_job_map[justice_txid] = [uuid]
|
||||
selected_jobs.append(uuid)
|
||||
|
||||
completed_jobs = [(job, 6) for job in selected_jobs]
|
||||
|
||||
# We should be able to delete the correct ones and not fail in the others
|
||||
Cleaner.delete_completed_jobs(jobs, tx_job_map, completed_jobs, height, db_manager)
|
||||
assert not set(completed_jobs).issubset(jobs.keys())
|
||||
|
||||
51
test/unit/test_cryptographer.py
Normal file
51
test/unit/test_cryptographer.py
Normal file
@@ -0,0 +1,51 @@
|
||||
import binascii
|
||||
|
||||
from pisa.cryptographer import Cryptographer
|
||||
from pisa.encrypted_blob import EncryptedBlob
|
||||
from test.unit.conftest import get_random_value_hex
|
||||
|
||||
data = "6097cdf52309b1b2124efeed36bd34f46dc1c25ad23ac86f28380f746254f777"
|
||||
key = "b2e984a570f6f49bc38ace178e09147b0aa296cbb7c92eb01412f7e2d07b5659"
|
||||
encrypted_data = "092e93d4a34aac4367075506f2c050ddfa1a201ee6669b65058572904dcea642aeb01ea4b57293618e8c46809dfadadc"
|
||||
encrypted_blob = EncryptedBlob(encrypted_data)
|
||||
|
||||
|
||||
# TODO: The decryption tests are assuming the cipher is AES-GCM-128, since EncryptedBlob assumes the same. Fix this.
|
||||
def test_decrypt_wrong_data():
|
||||
random_key = get_random_value_hex(32)
|
||||
random_encrypted_data = get_random_value_hex(64)
|
||||
random_encrypted_blob = EncryptedBlob(random_encrypted_data)
|
||||
|
||||
# Trying to decrypt random data (in AES_GCM-128) should result in an InvalidTag exception. Our decrypt function
|
||||
# returns None
|
||||
hex_tx = Cryptographer.decrypt(random_encrypted_blob, random_key)
|
||||
assert hex_tx is None
|
||||
|
||||
|
||||
def test_decrypt_odd_length():
|
||||
random_key = get_random_value_hex(32)
|
||||
random_encrypted_data_odd = get_random_value_hex(64)[:-1]
|
||||
random_encrypted_blob_odd = EncryptedBlob(random_encrypted_data_odd)
|
||||
|
||||
assert Cryptographer.decrypt(random_encrypted_blob_odd, random_key) is None
|
||||
|
||||
|
||||
def test_decrypt_hex():
|
||||
# Valid data should run with no InvalidTag and verify
|
||||
assert Cryptographer.decrypt(encrypted_blob, key) == data
|
||||
|
||||
|
||||
def test_decrypt_bytes():
|
||||
# We can also get the decryption in bytes
|
||||
byte_blob = Cryptographer.decrypt(encrypted_blob, key, rtype="bytes")
|
||||
assert isinstance(byte_blob, bytes) and byte_blob == binascii.unhexlify(data)
|
||||
|
||||
|
||||
def test_decrypt_wrong_return():
|
||||
# Any other type but "hex" (default) or "bytes" should fail
|
||||
try:
|
||||
Cryptographer.decrypt(encrypted_blob, key, rtype="random_value")
|
||||
assert False
|
||||
|
||||
except ValueError:
|
||||
assert True
|
||||
@@ -11,7 +11,7 @@ from pisa.db_manager import WATCHER_LAST_BLOCK_KEY, RESPONDER_LAST_BLOCK_KEY, LO
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def watcher_appointments():
|
||||
return {uuid4().hex: generate_dummy_appointment()[0] for _ in range(10)}
|
||||
return {uuid4().hex: generate_dummy_appointment(real_height=False)[0] for _ in range(10)}
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
@@ -80,8 +80,18 @@ def test_load_appointments_db(db_manager):
|
||||
assert set(values) == set(local_appointments.values()) and (len(values) == len(local_appointments))
|
||||
|
||||
|
||||
def test_get_last_known_block(db_manager):
|
||||
def test_get_last_known_block():
|
||||
db_path = "empty_db"
|
||||
|
||||
# First we check if the db exists, and if so we delete it
|
||||
if os.path.isdir(db_path):
|
||||
shutil.rmtree(db_path)
|
||||
|
||||
# Check that the db can be created if it does not exist
|
||||
db_manager = open_create_db(db_path)
|
||||
|
||||
# Trying to get any last block for either the watcher or the responder should return None for an empty db
|
||||
|
||||
for key in [WATCHER_LAST_BLOCK_KEY, RESPONDER_LAST_BLOCK_KEY]:
|
||||
assert db_manager.get_last_known_block(key) is None
|
||||
|
||||
@@ -91,6 +101,9 @@ def test_get_last_known_block(db_manager):
|
||||
db_manager.db.put(key.encode("utf-8"), block_hash.encode("utf-8"))
|
||||
assert db_manager.get_last_known_block(key) == block_hash
|
||||
|
||||
# Removing test db
|
||||
shutil.rmtree(db_path)
|
||||
|
||||
|
||||
def test_create_entry(db_manager):
|
||||
key = get_random_value_hex(32)
|
||||
|
||||
@@ -11,21 +11,27 @@ def test_init_encrypted_blob():
|
||||
assert EncryptedBlob(data).data == data
|
||||
|
||||
|
||||
def test_decrypt():
|
||||
# TODO: The decryption tests are assuming the cipher is AES-GCM-128, since EncryptedBlob assumes the same. Fix this.
|
||||
key = get_random_value_hex(32)
|
||||
encrypted_data = get_random_value_hex(64)
|
||||
encrypted_blob = EncryptedBlob(encrypted_data)
|
||||
def test_init_encrypted_blob_wrong_cipher():
|
||||
try:
|
||||
EncryptedBlob(get_random_value_hex(64), cipher="")
|
||||
assert False
|
||||
|
||||
# Trying to decrypt random data (in AES_GCM-128) should result in an InvalidTag exception. Our decrypt function
|
||||
# returns None
|
||||
hex_tx = encrypted_blob.decrypt(key)
|
||||
assert hex_tx is None
|
||||
except ValueError:
|
||||
assert True
|
||||
|
||||
# Valid data should run with no InvalidTag and verify
|
||||
data = "6097cdf52309b1b2124efeed36bd34f46dc1c25ad23ac86f28380f746254f777"
|
||||
key = "b2e984a570f6f49bc38ace178e09147b0aa296cbb7c92eb01412f7e2d07b5659"
|
||||
encrypted_data = "092e93d4a34aac4367075506f2c050ddfa1a201ee6669b65058572904dcea642aeb01ea4b57293618e8c46809dfadadc"
|
||||
encrypted_blob = EncryptedBlob(encrypted_data)
|
||||
|
||||
assert encrypted_blob.decrypt(key) == data
|
||||
def test_init_encrypted_blob_wrong_hash_function():
|
||||
try:
|
||||
EncryptedBlob(get_random_value_hex(64), hash_function="")
|
||||
assert False
|
||||
|
||||
except ValueError:
|
||||
assert True
|
||||
|
||||
|
||||
def test_equal():
|
||||
data = get_random_value_hex(64)
|
||||
e_blob1 = EncryptedBlob(data)
|
||||
e_blob2 = EncryptedBlob(data)
|
||||
|
||||
assert e_blob1 == e_blob2 and id(e_blob1) != id(e_blob2)
|
||||
|
||||
@@ -1,17 +1,20 @@
|
||||
import json
|
||||
import pytest
|
||||
import random
|
||||
from uuid import uuid4
|
||||
from shutil import rmtree
|
||||
from copy import deepcopy
|
||||
from threading import Thread
|
||||
from queue import Queue, Empty
|
||||
|
||||
from pisa import c_logger
|
||||
from pisa.tools import check_txid_format
|
||||
from pisa.db_manager import DBManager
|
||||
from test.simulator.utils import sha256d
|
||||
from pisa.responder import Responder, Job
|
||||
from test.simulator.bitcoind_sim import TX
|
||||
from pisa.utils.auth_proxy import AuthServiceProxy
|
||||
from pisa.block_processor import BlockProcessor
|
||||
from pisa.tools import check_txid_format, bitcoin_cli
|
||||
from test.unit.conftest import generate_block, generate_blocks, get_random_value_hex
|
||||
from pisa.conf import BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, BTC_RPC_PORT
|
||||
|
||||
c_logger.disabled = True
|
||||
|
||||
@@ -21,9 +24,17 @@ def responder(db_manager):
|
||||
return Responder(db_manager)
|
||||
|
||||
|
||||
def create_dummy_job_data(random_txid=False, justice_rawtx=None):
|
||||
bitcoin_cli = AuthServiceProxy("http://%s:%s@%s:%d" % (BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, BTC_RPC_PORT))
|
||||
@pytest.fixture()
|
||||
def temp_db_manager():
|
||||
db_name = get_random_value_hex(8)
|
||||
db_manager = DBManager(db_name)
|
||||
yield db_manager
|
||||
|
||||
db_manager.db.close()
|
||||
rmtree(db_name)
|
||||
|
||||
|
||||
def create_dummy_job_data(random_txid=False, justice_rawtx=None):
|
||||
# The following transaction data corresponds to a valid transaction. For some test it may be interesting to have
|
||||
# some valid data, but for others we may need multiple different justice_txids.
|
||||
|
||||
@@ -46,7 +57,7 @@ def create_dummy_job_data(random_txid=False, justice_rawtx=None):
|
||||
if random_txid is True:
|
||||
justice_txid = get_random_value_hex(32)
|
||||
|
||||
appointment_end = bitcoin_cli.getblockcount() + 2
|
||||
appointment_end = bitcoin_cli().getblockcount() + 2
|
||||
|
||||
return dispute_txid, justice_txid, justice_rawtx, appointment_end
|
||||
|
||||
@@ -68,6 +79,23 @@ def test_job_init(run_bitcoind):
|
||||
)
|
||||
|
||||
|
||||
def test_on_sync(run_bitcoind, responder):
|
||||
# We're on sync if we're 1 or less blocks behind the tip
|
||||
chain_tip = BlockProcessor.get_best_block_hash()
|
||||
assert Responder.on_sync(chain_tip) is True
|
||||
|
||||
generate_block()
|
||||
assert Responder.on_sync(chain_tip) is True
|
||||
|
||||
|
||||
def test_on_sync_fail(responder):
|
||||
# This should fail if we're more than 1 block behind the tip
|
||||
chain_tip = BlockProcessor.get_best_block_hash()
|
||||
generate_blocks(2)
|
||||
|
||||
assert Responder.on_sync(chain_tip) is False
|
||||
|
||||
|
||||
def test_job_to_dict():
|
||||
job = create_dummy_job()
|
||||
job_dict = job.to_dict()
|
||||
@@ -90,6 +118,28 @@ def test_job_to_json():
|
||||
)
|
||||
|
||||
|
||||
def test_job_from_dict():
|
||||
job_dict = create_dummy_job().to_dict()
|
||||
new_job = Job.from_dict(job_dict)
|
||||
|
||||
assert job_dict == new_job.to_dict()
|
||||
|
||||
|
||||
def test_job_from_dict_invalid_data():
|
||||
job_dict = create_dummy_job().to_dict()
|
||||
|
||||
for value in ["dispute_txid", "justice_txid", "justice_rawtx", "appointment_end"]:
|
||||
job_dict_copy = deepcopy(job_dict)
|
||||
job_dict_copy[value] = None
|
||||
|
||||
try:
|
||||
Job.from_dict(job_dict_copy)
|
||||
assert False
|
||||
|
||||
except ValueError:
|
||||
assert True
|
||||
|
||||
|
||||
def test_init_responder(responder):
|
||||
assert type(responder.jobs) is dict and len(responder.jobs) == 0
|
||||
assert type(responder.tx_job_map) is dict and len(responder.tx_job_map) == 0
|
||||
@@ -100,14 +150,11 @@ def test_init_responder(responder):
|
||||
assert responder.zmq_subscriber is None
|
||||
|
||||
|
||||
def test_add_response(responder):
|
||||
def test_add_response(db_manager):
|
||||
responder = Responder(db_manager)
|
||||
uuid = uuid4().hex
|
||||
job = create_dummy_job()
|
||||
|
||||
# The responder automatically fires create_job on adding a job if it is asleep (initial state). Avoid this by
|
||||
# setting the state to awake.
|
||||
responder.asleep = False
|
||||
|
||||
# The block_hash passed to add_response does not matter much now. It will in the future to deal with errors
|
||||
receipt = responder.add_response(
|
||||
uuid,
|
||||
@@ -120,6 +167,36 @@ def test_add_response(responder):
|
||||
|
||||
assert receipt.delivered is True
|
||||
|
||||
# The responder automatically fires create_job on adding a job if it is asleep. We need to stop the processes now.
|
||||
# To do so we delete all the jobs, stop the zmq and create a new fake block to unblock the queue.get method
|
||||
responder.jobs = dict()
|
||||
responder.zmq_subscriber.terminate = True
|
||||
responder.block_queue.put(get_random_value_hex(32))
|
||||
|
||||
|
||||
def test_add_bad_response(responder):
|
||||
uuid = uuid4().hex
|
||||
job = create_dummy_job()
|
||||
|
||||
# Now that the asleep / awake functionality has been tested we can avoid manually killing the responder by setting
|
||||
# to awake. That will prevent the zmq thread to be launched again.
|
||||
responder.asleep = False
|
||||
|
||||
# A txid instead of a rawtx should be enough for unit tests using the bitcoind mock, better tests are needed though.
|
||||
job.justice_rawtx = job.justice_txid
|
||||
|
||||
# The block_hash passed to add_response does not matter much now. It will in the future to deal with errors
|
||||
receipt = responder.add_response(
|
||||
uuid,
|
||||
job.dispute_txid,
|
||||
job.justice_txid,
|
||||
job.justice_rawtx,
|
||||
job.appointment_end,
|
||||
block_hash=get_random_value_hex(32),
|
||||
)
|
||||
|
||||
assert receipt.delivered is False
|
||||
|
||||
|
||||
def test_create_job(responder):
|
||||
responder.asleep = False
|
||||
@@ -151,6 +228,33 @@ def test_create_job(responder):
|
||||
)
|
||||
|
||||
|
||||
def test_create_job_same_justice_txid(responder):
|
||||
# Create the same job using two different uuids
|
||||
confirmations = 0
|
||||
dispute_txid, justice_txid, justice_rawtx, appointment_end = create_dummy_job_data(random_txid=True)
|
||||
uuid_1 = uuid4().hex
|
||||
uuid_2 = uuid4().hex
|
||||
|
||||
responder.create_job(uuid_1, dispute_txid, justice_txid, justice_rawtx, appointment_end, confirmations)
|
||||
responder.create_job(uuid_2, dispute_txid, justice_txid, justice_rawtx, appointment_end, confirmations)
|
||||
|
||||
# Check that both jobs have been added
|
||||
assert uuid_1 in responder.jobs and uuid_2 in responder.jobs
|
||||
assert justice_txid in responder.tx_job_map
|
||||
assert justice_txid in responder.unconfirmed_txs
|
||||
|
||||
# Check that the rest of job data also matches
|
||||
for uuid in [uuid_1, uuid_2]:
|
||||
job = responder.jobs[uuid]
|
||||
assert (
|
||||
job.dispute_txid == dispute_txid
|
||||
and job.justice_txid == justice_txid
|
||||
and job.justice_rawtx == justice_rawtx
|
||||
and job.appointment_end == appointment_end
|
||||
and job.appointment_end == appointment_end
|
||||
)
|
||||
|
||||
|
||||
def test_create_job_already_confirmed(responder):
|
||||
responder.asleep = False
|
||||
|
||||
@@ -182,14 +286,13 @@ def test_do_subscribe(responder):
|
||||
assert False
|
||||
|
||||
|
||||
def test_do_watch(responder):
|
||||
# Reinitializing responder (but keeping the subscriber)
|
||||
responder.jobs = dict()
|
||||
responder.tx_job_map = dict()
|
||||
responder.unconfirmed_txs = []
|
||||
responder.missed_confirmations = dict()
|
||||
def test_do_watch(temp_db_manager):
|
||||
responder = Responder(temp_db_manager)
|
||||
responder.block_queue = Queue()
|
||||
|
||||
bitcoin_cli = AuthServiceProxy("http://%s:%s@%s:%d" % (BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, BTC_RPC_PORT))
|
||||
zmq_thread = Thread(target=responder.do_subscribe)
|
||||
zmq_thread.daemon = True
|
||||
zmq_thread.start()
|
||||
|
||||
jobs = [create_dummy_job(justice_rawtx=TX.create_dummy_transaction()) for _ in range(20)]
|
||||
|
||||
@@ -210,7 +313,7 @@ def test_do_watch(responder):
|
||||
# And broadcast some of the transactions
|
||||
broadcast_txs = []
|
||||
for job in jobs[:5]:
|
||||
bitcoin_cli.sendrawtransaction(job.justice_rawtx)
|
||||
bitcoin_cli().sendrawtransaction(job.justice_rawtx)
|
||||
broadcast_txs.append(job.justice_txid)
|
||||
|
||||
# Mine a block
|
||||
@@ -229,7 +332,7 @@ def test_do_watch(responder):
|
||||
# Do the rest
|
||||
broadcast_txs = []
|
||||
for job in jobs[5:]:
|
||||
bitcoin_cli.sendrawtransaction(job.justice_rawtx)
|
||||
bitcoin_cli().sendrawtransaction(job.justice_rawtx)
|
||||
broadcast_txs.append(job.justice_txid)
|
||||
|
||||
# Mine a block
|
||||
@@ -239,6 +342,40 @@ def test_do_watch(responder):
|
||||
assert responder.asleep is True
|
||||
|
||||
|
||||
def test_check_confirmations(temp_db_manager):
|
||||
responder = Responder(temp_db_manager)
|
||||
responder.block_queue = Queue()
|
||||
|
||||
zmq_thread = Thread(target=responder.do_subscribe)
|
||||
zmq_thread.daemon = True
|
||||
zmq_thread.start()
|
||||
|
||||
# check_confirmations checks, given a list of transaction for a block, what of the known justice transaction have
|
||||
# been confirmed. To test this we need to create a list of transactions and the state of the responder
|
||||
txs = [get_random_value_hex(32) for _ in range(20)]
|
||||
|
||||
# The responder has a list of unconfirmed transaction, let make that some of them are the ones we've received
|
||||
responder.unconfirmed_txs = [get_random_value_hex(32) for _ in range(10)]
|
||||
txs_subset = random.sample(txs, k=10)
|
||||
responder.unconfirmed_txs.extend(txs_subset)
|
||||
|
||||
# We also need to add them to the tx_job_map since they would be there in normal conditions
|
||||
responder.tx_job_map = {txid: Job(txid, None, None, None) for txid in responder.unconfirmed_txs}
|
||||
|
||||
# Let's make sure that there are no txs with missed confirmations yet
|
||||
assert len(responder.missed_confirmations) == 0
|
||||
|
||||
responder.check_confirmations(txs)
|
||||
|
||||
# After checking confirmations the txs in txs_subset should be confirmed (not part of unconfirmed_txs anymore)
|
||||
# and the rest should have a missing confirmation
|
||||
for tx in txs_subset:
|
||||
assert tx not in responder.unconfirmed_txs
|
||||
|
||||
for tx in responder.unconfirmed_txs:
|
||||
assert responder.missed_confirmations[tx] == 1
|
||||
|
||||
|
||||
def test_get_txs_to_rebroadcast(responder):
|
||||
# Let's create a few fake txids and assign at least 6 missing confirmations to each
|
||||
txs_missing_too_many_conf = {get_random_value_hex(32): 6 + i for i in range(10)}
|
||||
@@ -263,8 +400,7 @@ def test_get_txs_to_rebroadcast(responder):
|
||||
|
||||
|
||||
def test_get_completed_jobs(db_manager):
|
||||
bitcoin_cli = AuthServiceProxy("http://%s:%s@%s:%d" % (BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, BTC_RPC_PORT))
|
||||
initial_height = bitcoin_cli.getblockcount()
|
||||
initial_height = bitcoin_cli().getblockcount()
|
||||
|
||||
# Let's use a fresh responder for this to make it easier to compare the results
|
||||
responder = Responder(db_manager)
|
||||
@@ -291,7 +427,7 @@ def test_get_completed_jobs(db_manager):
|
||||
responder.jobs.update(jobs_no_end)
|
||||
|
||||
for uuid, job in responder.jobs.items():
|
||||
bitcoin_cli.sendrawtransaction(job.justice_rawtx)
|
||||
bitcoin_cli().sendrawtransaction(job.justice_rawtx)
|
||||
|
||||
# The dummy appointments have a end_appointment time of current + 2, but jobs need at least 6 confs by default
|
||||
generate_blocks(6)
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
import pytest
|
||||
from uuid import uuid4
|
||||
from hashlib import sha256
|
||||
from threading import Thread
|
||||
from binascii import unhexlify
|
||||
from queue import Queue, Empty
|
||||
|
||||
from cryptography.hazmat.backends import default_backend
|
||||
@@ -12,24 +14,16 @@ from cryptography.exceptions import InvalidSignature
|
||||
from pisa import c_logger
|
||||
from pisa.watcher import Watcher
|
||||
from pisa.responder import Responder
|
||||
from pisa.tools import check_txid_format
|
||||
from pisa.utils.auth_proxy import AuthServiceProxy
|
||||
from test.unit.conftest import generate_block, generate_blocks, generate_dummy_appointment
|
||||
from pisa.conf import (
|
||||
EXPIRY_DELTA,
|
||||
BTC_RPC_USER,
|
||||
BTC_RPC_PASSWD,
|
||||
BTC_RPC_HOST,
|
||||
BTC_RPC_PORT,
|
||||
PISA_SECRET_KEY,
|
||||
MAX_APPOINTMENTS,
|
||||
)
|
||||
from pisa.tools import check_txid_format, bitcoin_cli
|
||||
from test.unit.conftest import generate_block, generate_blocks, generate_dummy_appointment, get_random_value_hex
|
||||
from pisa.conf import EXPIRY_DELTA, PISA_SECRET_KEY, MAX_APPOINTMENTS
|
||||
|
||||
c_logger.disabled = True
|
||||
|
||||
APPOINTMENTS = 5
|
||||
START_TIME_OFFSET = 1
|
||||
END_TIME_OFFSET = 1
|
||||
TEST_SET_SIZE = 200
|
||||
|
||||
with open(PISA_SECRET_KEY, "r") as key_file:
|
||||
pubkey_pem = key_file.read().encode("utf-8")
|
||||
@@ -43,6 +37,16 @@ def watcher(db_manager):
|
||||
return Watcher(db_manager)
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def txids():
|
||||
return [get_random_value_hex(32) for _ in range(100)]
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def locator_uuid_map(txids):
|
||||
return {sha256(unhexlify(txid)).hexdigest(): uuid4().hex for txid in txids}
|
||||
|
||||
|
||||
def create_appointments(n):
|
||||
locator_uuid_map = dict()
|
||||
appointments = dict()
|
||||
@@ -81,6 +85,15 @@ def test_init(watcher):
|
||||
assert type(watcher.responder) is Responder
|
||||
|
||||
|
||||
def test_init_no_key(db_manager):
|
||||
try:
|
||||
Watcher(db_manager, pisa_sk_file=None)
|
||||
assert False
|
||||
|
||||
except ValueError:
|
||||
assert True
|
||||
|
||||
|
||||
def test_add_appointment(run_bitcoind, watcher):
|
||||
# The watcher automatically fires do_watch and do_subscribe on adding an appointment if it is asleep (initial state)
|
||||
# Avoid this by setting the state to awake.
|
||||
@@ -96,6 +109,12 @@ def test_add_appointment(run_bitcoind, watcher):
|
||||
assert added_appointment is True
|
||||
assert is_signature_valid(appointment, sig, public_key)
|
||||
|
||||
# Check that we can also add an already added appointment (same locator)
|
||||
added_appointment, sig = watcher.add_appointment(appointment)
|
||||
|
||||
assert added_appointment is True
|
||||
assert is_signature_valid(appointment, sig, public_key)
|
||||
|
||||
|
||||
def test_sign_appointment(watcher):
|
||||
appointment, _ = generate_dummy_appointment(start_time_offset=START_TIME_OFFSET, end_time_offset=END_TIME_OFFSET)
|
||||
@@ -142,8 +161,6 @@ def test_do_subscribe(watcher):
|
||||
|
||||
|
||||
def test_do_watch(watcher):
|
||||
bitcoin_cli = AuthServiceProxy("http://%s:%s@%s:%d" % (BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, BTC_RPC_PORT))
|
||||
|
||||
# We will wipe all the previous data and add 5 appointments
|
||||
watcher.appointments, watcher.locator_uuid_map, dispute_txs = create_appointments(APPOINTMENTS)
|
||||
|
||||
@@ -153,7 +170,7 @@ def test_do_watch(watcher):
|
||||
|
||||
# Broadcast the first two
|
||||
for dispute_tx in dispute_txs[:2]:
|
||||
bitcoin_cli.sendrawtransaction(dispute_tx)
|
||||
bitcoin_cli().sendrawtransaction(dispute_tx)
|
||||
|
||||
# After leaving some time for the block to be mined and processed, the number of appointments should have reduced
|
||||
# by two
|
||||
@@ -167,3 +184,75 @@ def test_do_watch(watcher):
|
||||
|
||||
assert len(watcher.appointments) == 0
|
||||
assert watcher.asleep is True
|
||||
|
||||
|
||||
def test_get_matches(watcher, txids, locator_uuid_map):
|
||||
watcher.locator_uuid_map = locator_uuid_map
|
||||
potential_matches = watcher.get_matches(txids)
|
||||
|
||||
# All the txids must match
|
||||
assert locator_uuid_map.keys() == potential_matches.keys()
|
||||
|
||||
|
||||
def test_get_matches_random_data(watcher, locator_uuid_map):
|
||||
# The likelihood of finding a potential match with random data should be negligible
|
||||
watcher.locator_uuid_map = locator_uuid_map
|
||||
txids = [get_random_value_hex(32) for _ in range(TEST_SET_SIZE)]
|
||||
|
||||
potential_matches = watcher.get_matches(txids)
|
||||
|
||||
# None of the txids should match
|
||||
assert len(potential_matches) == 0
|
||||
|
||||
|
||||
def test_filter_valid_matches_random_data(watcher):
|
||||
appointments = {}
|
||||
locator_uuid_map = {}
|
||||
matches = {}
|
||||
|
||||
for i in range(TEST_SET_SIZE):
|
||||
dummy_appointment, _ = generate_dummy_appointment()
|
||||
uuid = uuid4().hex
|
||||
appointments[uuid] = dummy_appointment
|
||||
|
||||
locator_uuid_map[dummy_appointment.locator] = [uuid]
|
||||
|
||||
if i % 2:
|
||||
dispute_txid = get_random_value_hex(32)
|
||||
matches[dummy_appointment.locator] = dispute_txid
|
||||
|
||||
watcher.locator_uuid_map = locator_uuid_map
|
||||
watcher.appointments = appointments
|
||||
|
||||
filtered_valid_matches = watcher.filter_valid_matches(matches)
|
||||
|
||||
assert not any([fil_match["valid_match"] for uuid, fil_match in filtered_valid_matches.items()])
|
||||
|
||||
|
||||
def test_filter_valid_matches(watcher):
|
||||
dispute_txid = "0437cd7f8525ceed2324359c2d0ba26006d92d856a9c20fa0241106ee5a597c9"
|
||||
encrypted_blob = (
|
||||
"29f55518945408f567bb7feb4d7bb15ba88b7d8ca0223a44d5c67dfe32d038caee7613e35736025d95ad4ecd6538a50"
|
||||
"74cbe8d7739705697a5dc4d19b8a6e4459ed2d1b0d0a9b18c49bc2187dcbfb4046b14d58a1add83235fc632efc398d5"
|
||||
"0abcb7738f1a04b3783d025c1828b4e8a8dc8f13f2843e6bc3bf08eade02fc7e2c4dce7d2f83b055652e944ac114e0b"
|
||||
"72a9abcd98fd1d785a5d976c05ed780e033e125fa083c6591b6029aa68dbc099f148a2bc2e0cb63733e68af717d48d5"
|
||||
"a312b5f5b2fcca9561b2ff4191f9cdff936a43f6efef4ee45fbaf1f18d0a4b006f3fc8399dd8ecb21f709d4583bba14"
|
||||
"4af6d49fa99d7be2ca21059a997475aa8642b66b921dc7fc0321b6a2f6927f6f9bab55c75e17a19dc3b2ae895b6d4a4"
|
||||
"f64f8eb21b1e"
|
||||
)
|
||||
|
||||
dummy_appointment, _ = generate_dummy_appointment()
|
||||
dummy_appointment.encrypted_blob.data = encrypted_blob
|
||||
dummy_appointment.locator = sha256(unhexlify(dispute_txid)).hexdigest()
|
||||
uuid = uuid4().hex
|
||||
|
||||
appointments = {uuid: dummy_appointment}
|
||||
locator_uuid_map = {dummy_appointment.locator: [uuid]}
|
||||
matches = {dummy_appointment.locator: dispute_txid}
|
||||
|
||||
watcher.appointments = appointments
|
||||
watcher.locator_uuid_map = locator_uuid_map
|
||||
|
||||
filtered_valid_matches = watcher.filter_valid_matches(matches)
|
||||
|
||||
assert all([fil_match["valid_match"] for uuid, fil_match in filtered_valid_matches.items()])
|
||||
|
||||
Reference in New Issue
Block a user