Renames dispute_delta to to_self_delay and justice tx to penalty tx

Uses the naming convention followed by LN implementations and BOLTs
This commit is contained in:
Sergi Delgado Segura
2019-12-11 12:45:05 +01:00
parent 6a7cd4d3aa
commit df5dcbdfe9
16 changed files with 203 additions and 194 deletions

View File

@@ -40,7 +40,7 @@ def generate_dummy_appointment():
"tx_id": os.urandom(32).hex(),
"start_time": current_height + 5,
"end_time": current_height + 10,
"dispute_delta": 20,
"to_self_delay": 20,
}
print("Generating dummy appointment data:" "\n\n" + json.dumps(dummy_appointment_data, indent=4, sort_keys=True))
@@ -131,7 +131,7 @@ def add_appointment(args):
appointment_data.get("tx_id"),
appointment_data.get("start_time"),
appointment_data.get("end_time"),
appointment_data.get("dispute_delta"),
appointment_data.get("to_self_delay"),
)
try:
@@ -205,12 +205,13 @@ def add_appointment(args):
try:
pisa_pk_der = load_key_file_data(PISA_PUBLIC_KEY)
pisa_pk = Cryptographer.load_public_key_der(pisa_pk_der)
is_sig_valid = Cryptographer.verify(Cryptographer.signature_format(appointment), signature, pisa_pk)
except ValueError:
if pisa_pk is None:
logger.error("Failed to deserialize the public key. It might be in an unsupported format.")
return False
is_sig_valid = Cryptographer.verify(Cryptographer.signature_format(appointment), signature, pisa_pk)
except FileNotFoundError:
logger.error("Pisa's public key file not found. Please check your settings.")
return False
@@ -270,7 +271,7 @@ def get_appointment(args):
return True
def build_appointment(tx, tx_id, start_time, end_time, dispute_delta):
def build_appointment(tx, tx_id, start_time, end_time, to_self_delay):
locator = compute_locator(tx_id)
# FIXME: The blob data should contain more things that just the transaction. Leaving like this for now.
@@ -281,7 +282,7 @@ def build_appointment(tx, tx_id, start_time, end_time, dispute_delta):
"locator": locator,
"start_time": start_time,
"end_time": end_time,
"dispute_delta": dispute_delta,
"to_self_delay": to_self_delay,
"encrypted_blob": encrypted_blob,
}

View File

@@ -106,7 +106,15 @@ class Cryptographer:
return pk
except UnsupportedAlgorithm:
raise ValueError("Could not deserialize the public key (unsupported algorithm).")
logger.error("Could not deserialize the public key (unsupported algorithm).")
except ValueError:
logger.error("The provided data cannot be deserialized (wrong size or format)")
except TypeError:
logger.error("The provided data cannot be deserialized (wrong type)")
return None
# Deserialize private key from der data.
@staticmethod

View File

@@ -12,7 +12,7 @@ class Appointment:
for the tower to decrypt and broadcast the penalty transaction.
start_time (int): The block height at which the tower is hired to start watching for breaches.
end_time (int): The block height at which the tower will stop watching for breaches.
dispute_delta (int): The ``to_self_delay`` encoded in the ``csv`` of the ``htlc`` that this appointment is
to_self_delay (int): The ``to_self_delay`` encoded in the ``csv`` of the ``htlc`` that this appointment is
covering.
encrypted_blob (EncryptedBlob): An :mod:`EncryptedBlob <pisa.encrypted_blob>` object containing an encrypted
penalty transaction. The tower will decrypt it and broadcast the penalty transaction upon seeing a breach on
@@ -20,11 +20,11 @@ class Appointment:
"""
# DISCUSS: 35-appointment-checks
def __init__(self, locator, start_time, end_time, dispute_delta, encrypted_blob):
def __init__(self, locator, start_time, end_time, to_self_delay, encrypted_blob):
self.locator = locator
self.start_time = start_time # ToDo: #4-standardize-appointment-fields
self.end_time = end_time # ToDo: #4-standardize-appointment-fields
self.dispute_delta = dispute_delta
self.to_self_delay = to_self_delay
self.encrypted_blob = EncryptedBlob(encrypted_blob)
@classmethod
@@ -36,7 +36,7 @@ class Appointment:
Args:
appointment_data (dict): a dictionary containing the following keys:
``{locator, start_time, end_time, dispute_delta, encrypted_blob}``
``{locator, start_time, end_time, to_self_delay, encrypted_blob}``
Returns:
``Appointment``: An appointment initialized using the provided data.
@@ -48,14 +48,14 @@ class Appointment:
locator = appointment_data.get("locator")
start_time = appointment_data.get("start_time") # ToDo: #4-standardize-appointment-fields
end_time = appointment_data.get("end_time") # ToDo: #4-standardize-appointment-fields
dispute_delta = appointment_data.get("dispute_delta")
to_self_delay = appointment_data.get("to_self_delay")
encrypted_blob_data = appointment_data.get("encrypted_blob")
if any(v is None for v in [locator, start_time, end_time, dispute_delta, encrypted_blob_data]):
if any(v is None for v in [locator, start_time, end_time, to_self_delay, encrypted_blob_data]):
raise ValueError("Wrong appointment data, some fields are missing")
else:
appointment = cls(locator, start_time, end_time, dispute_delta, encrypted_blob_data)
appointment = cls(locator, start_time, end_time, to_self_delay, encrypted_blob_data)
return appointment
@@ -73,7 +73,7 @@ class Appointment:
"locator": self.locator,
"start_time": self.start_time,
"end_time": self.end_time,
"dispute_delta": self.dispute_delta,
"to_self_delay": self.to_self_delay,
"encrypted_blob": self.encrypted_blob.data,
}

View File

@@ -69,11 +69,11 @@ class Builder:
job = Job.from_dict(data)
jobs[uuid] = job
if job.justice_txid in tx_job_map:
tx_job_map[job.justice_txid].append(uuid)
if job.penalty_txid in tx_job_map:
tx_job_map[job.penalty_txid].append(uuid)
else:
tx_job_map[job.justice_txid] = [uuid]
tx_job_map[job.penalty_txid] = [uuid]
return jobs, tx_job_map

View File

@@ -88,17 +88,17 @@ class Cleaner:
confirmations=confirmations,
)
justice_txid = jobs[uuid].justice_txid
penalty_txid = jobs[uuid].penalty_txid
locator = jobs[uuid].locator
jobs.pop(uuid)
if len(tx_job_map[justice_txid]) == 1:
tx_job_map.pop(justice_txid)
if len(tx_job_map[penalty_txid]) == 1:
tx_job_map.pop(penalty_txid)
logger.info("No more jobs for justice transaction.", justice_txid=justice_txid)
logger.info("No more jobs for penalty transaction.", penalty_txid=penalty_txid)
else:
tx_job_map[justice_txid].remove(uuid)
tx_job_map[penalty_txid].remove(uuid)
# Delete appointment from the db (both watchers's and responder's)
db_manager.delete_watcher_appointment(uuid)

View File

@@ -15,26 +15,26 @@ logger = Logger("Responder")
class Job:
def __init__(self, locator, dispute_txid, justice_txid, justice_rawtx, appointment_end):
def __init__(self, locator, dispute_txid, penalty_txid, penalty_rawtx, appointment_end):
self.locator = locator
self.dispute_txid = dispute_txid
self.justice_txid = justice_txid
self.justice_rawtx = justice_rawtx
self.penalty_txid = penalty_txid
self.penalty_rawtx = penalty_rawtx
self.appointment_end = appointment_end
@classmethod
def from_dict(cls, job_data):
locator = job_data.get("locator")
dispute_txid = job_data.get("dispute_txid")
justice_txid = job_data.get("justice_txid")
justice_rawtx = job_data.get("justice_rawtx")
penalty_txid = job_data.get("penalty_txid")
penalty_rawtx = job_data.get("penalty_rawtx")
appointment_end = job_data.get("appointment_end")
if any(v is None for v in [locator, dispute_txid, justice_txid, justice_rawtx, appointment_end]):
if any(v is None for v in [locator, dispute_txid, penalty_txid, penalty_rawtx, appointment_end]):
raise ValueError("Wrong job data, some fields are missing")
else:
job = cls(locator, dispute_txid, justice_txid, justice_rawtx, appointment_end)
job = cls(locator, dispute_txid, penalty_txid, penalty_rawtx, appointment_end)
return job
@@ -42,8 +42,8 @@ class Job:
job = {
"locator": self.locator,
"dispute_txid": self.dispute_txid,
"justice_txid": self.justice_txid,
"justice_rawtx": self.justice_rawtx,
"penalty_txid": self.penalty_txid,
"penalty_rawtx": self.penalty_rawtx,
"appointment_end": self.appointment_end,
}
@@ -78,20 +78,20 @@ class Responder:
return synchronized
def add_response(
self, uuid, locator, dispute_txid, justice_txid, justice_rawtx, appointment_end, block_hash, retry=False
self, uuid, locator, dispute_txid, penalty_txid, penalty_rawtx, appointment_end, block_hash, retry=False
):
if self.asleep:
logger.info("Waking up")
carrier = Carrier()
receipt = carrier.send_transaction(justice_rawtx, justice_txid)
receipt = carrier.send_transaction(penalty_rawtx, penalty_txid)
# do_watch can call add_response recursively if a broadcast transaction does not get confirmations
# retry holds that information. If retry is true the job already exists
if receipt.delivered:
if not retry:
self.create_job(
uuid, locator, dispute_txid, justice_txid, justice_rawtx, appointment_end, receipt.confirmations
uuid, locator, dispute_txid, penalty_txid, penalty_rawtx, appointment_end, receipt.confirmations
)
else:
@@ -102,24 +102,24 @@ class Responder:
return receipt
def create_job(self, uuid, locator, dispute_txid, justice_txid, justice_rawtx, appointment_end, confirmations=0):
job = Job(locator, dispute_txid, justice_txid, justice_rawtx, appointment_end)
def create_job(self, uuid, locator, dispute_txid, penalty_txid, penalty_rawtx, appointment_end, confirmations=0):
job = Job(locator, dispute_txid, penalty_txid, penalty_rawtx, appointment_end)
self.jobs[uuid] = job
if justice_txid in self.tx_job_map:
self.tx_job_map[justice_txid].append(uuid)
if penalty_txid in self.tx_job_map:
self.tx_job_map[penalty_txid].append(uuid)
else:
self.tx_job_map[justice_txid] = [uuid]
self.tx_job_map[penalty_txid] = [uuid]
# In the case we receive two jobs with the same justice txid we only add it to the unconfirmed txs list once
if justice_txid not in self.unconfirmed_txs and confirmations == 0:
self.unconfirmed_txs.append(justice_txid)
# In the case we receive two jobs with the same penalty txid we only add it to the unconfirmed txs list once
if penalty_txid not in self.unconfirmed_txs and confirmations == 0:
self.unconfirmed_txs.append(penalty_txid)
self.db_manager.store_responder_job(uuid, job.to_json())
logger.info(
"New job added.", dispute_txid=dispute_txid, justice_txid=justice_txid, appointment_end=appointment_end
"New job added.", dispute_txid=dispute_txid, penalty_txid=penalty_txid, appointment_end=appointment_end
)
if self.asleep:
@@ -215,8 +215,8 @@ class Responder:
completed_jobs = []
for uuid, job in self.jobs.items():
if job.appointment_end <= height and job.justice_txid not in self.unconfirmed_txs:
tx = Carrier.get_transaction(job.justice_txid)
if job.appointment_end <= height and job.penalty_txid not in self.unconfirmed_txs:
tx = Carrier.get_transaction(job.penalty_txid)
# FIXME: Should be improved with the librarian
if tx is not None:
@@ -243,8 +243,8 @@ class Responder:
job.locator,
uuid,
job.dispute_txid,
job.justice_txid,
job.justice_rawtx,
job.penalty_txid,
job.penalty_rawtx,
job.appointment_end,
block_hash,
retry=True,
@@ -252,7 +252,7 @@ class Responder:
logger.warning(
"Transaction has missed many confirmations. Rebroadcasting.",
justice_txid=job.justice_txid,
penalty_txid=job.penalty_txid,
confirmations_missed=CONFIRMATIONS_BEFORE_RETRY,
)
@@ -269,22 +269,22 @@ class Responder:
dispute_tx = carrier.get_transaction(job.dispute_txid)
if dispute_tx is not None:
# If the dispute is there, we check the justice
justice_tx = carrier.get_transaction(job.justice_txid)
# If the dispute is there, we check the penalty
penalty_tx = carrier.get_transaction(job.penalty_txid)
if justice_tx is not None:
# If the justice exists we need to check is it's on the blockchain or not so we can update the
if penalty_tx is not None:
# If the penalty exists we need to check is it's on the blockchain or not so we can update the
# unconfirmed transactions list accordingly.
if justice_tx.get("confirmations") is None:
self.unconfirmed_txs.append(job.justice_txid)
if penalty_tx.get("confirmations") is None:
self.unconfirmed_txs.append(job.penalty_txid)
logger.info(
"Justice transaction back in mempool. Updating unconfirmed transactions.",
justice_txid=job.justice_txid,
"Penalty transaction back in mempool. Updating unconfirmed transactions.",
penalty_txid=job.penalty_txid,
)
else:
# If the justice transaction is missing, we need to reset the job.
# If the penalty transaction is missing, we need to reset the job.
# DISCUSS: Adding job back, should we flag it as retried?
# FIXME: Whether we decide to increase the retried counter or not, the current counter should be
# maintained. There is no way of doing so with the current approach. Update if required
@@ -292,17 +292,17 @@ class Responder:
job.locator,
uuid,
job.dispute_txid,
job.justice_txid,
job.justice_rawtx,
job.penalty_txid,
job.penalty_rawtx,
job.appointment_end,
block_hash,
)
logger.warning("Justice transaction banished. Resetting the job", justice_tx=job.justice_txid)
logger.warning("Penalty transaction banished. Resetting the job", penalty_tx=job.penalty_txid)
else:
# ToDo: #24-properly-handle-reorgs
# FIXME: if the dispute is not on chain (either in mempool or not there at all), we need to call the
# reorg manager
logger.warning("Dispute and justice transaction missing. Calling the reorg manager.")
logger.warning("Dispute and penalty transaction missing. Calling the reorg manager.")
logger.error("Reorg manager not yet implemented.")

View File

@@ -13,7 +13,7 @@ FEED_PORT = None
# PISA
MAX_APPOINTMENTS = 100
EXPIRY_DELTA = 6
MIN_DISPUTE_DELTA = 20
MIN_TO_SELF_DELAY = 20
SERVER_LOG_FILE = "pisa.log"
PISA_SECRET_KEY = "pisa_sk.der"

View File

@@ -117,11 +117,11 @@ class Watcher:
filtered_matches = self.filter_valid_matches(self.get_matches(txids))
for uuid, filtered_match in filtered_matches.items():
# Errors decrypting the Blob will result in a None justice_txid
# Errors decrypting the Blob will result in a None penalty_txid
if filtered_match["valid_match"] is True:
logger.info(
"Notifying responder and deleting appointment.",
justice_txid=filtered_match["justice_txid"],
penalty_txid=filtered_match["penalty_txid"],
locator=filtered_match["locator"],
uuid=uuid,
)
@@ -130,8 +130,8 @@ class Watcher:
uuid,
filtered_match["locator"],
filtered_match["dispute_txid"],
filtered_match["justice_txid"],
filtered_match["justice_rawtx"],
filtered_match["penalty_txid"],
filtered_match["penalty_rawtx"],
self.appointments[uuid].end_time,
block_hash,
)
@@ -173,28 +173,28 @@ class Watcher:
for uuid in self.locator_uuid_map[locator]:
try:
justice_rawtx = Cryptographer.decrypt(self.appointments[uuid].encrypted_blob, dispute_txid)
penalty_rawtx = Cryptographer.decrypt(self.appointments[uuid].encrypted_blob, dispute_txid)
except ValueError:
justice_rawtx = None
penalty_rawtx = None
justice_tx = BlockProcessor.decode_raw_transaction(justice_rawtx)
penalty_tx = BlockProcessor.decode_raw_transaction(penalty_rawtx)
if justice_tx is not None:
justice_txid = justice_tx.get("txid")
if penalty_tx is not None:
penalty_txid = penalty_tx.get("txid")
valid_match = True
logger.info("Match found for locator.", locator=locator, uuid=uuid, justice_txid=justice_txid)
logger.info("Match found for locator.", locator=locator, uuid=uuid, penalty_txid=penalty_txid)
else:
justice_txid = None
penalty_txid = None
valid_match = False
filtered_matches[uuid] = {
"locator": locator,
"dispute_txid": dispute_txid,
"justice_txid": justice_txid,
"justice_rawtx": justice_rawtx,
"penalty_txid": penalty_txid,
"penalty_rawtx": penalty_rawtx,
"valid_match": valid_match,
}

View File

@@ -30,7 +30,7 @@ dummy_appointment_request = {
"tx_id": get_random_value_hex(32),
"start_time": 1500,
"end_time": 50000,
"dispute_delta": 200,
"to_self_delay": 200,
}
dummy_appointment = build_appointment(**dummy_appointment_request)

View File

@@ -81,14 +81,14 @@ def generate_dummy_appointment_data(real_height=True, start_time_offset=5, end_t
dispute_tx = TX.create_dummy_transaction()
dispute_txid = sha256d(dispute_tx)
justice_tx = TX.create_dummy_transaction(dispute_txid)
penalty_tx = TX.create_dummy_transaction(dispute_txid)
dummy_appointment_data = {
"tx": justice_tx,
"tx": penalty_tx,
"tx_id": dispute_txid,
"start_time": current_height + start_time_offset,
"end_time": current_height + end_time_offset,
"dispute_delta": 20,
"to_self_delay": 20,
}
# dummy keys for this test
@@ -106,7 +106,7 @@ def generate_dummy_appointment_data(real_height=True, start_time_offset=5, end_t
"locator": locator,
"start_time": dummy_appointment_data.get("start_time"),
"end_time": dummy_appointment_data.get("end_time"),
"dispute_delta": dummy_appointment_data.get("dispute_delta"),
"to_self_delay": dummy_appointment_data.get("to_self_delay"),
"encrypted_blob": encrypted_blob,
}
@@ -128,15 +128,15 @@ def generate_dummy_appointment(real_height=True, start_time_offset=5, end_time_o
def generate_dummy_job():
dispute_txid = get_random_value_hex(32)
justice_txid = get_random_value_hex(32)
justice_rawtx = get_random_value_hex(100)
penalty_txid = get_random_value_hex(32)
penalty_rawtx = get_random_value_hex(100)
locator = dispute_txid[:LOCATOR_LEN_HEX]
job_data = dict(
locator=locator,
dispute_txid=dispute_txid,
justice_txid=justice_txid,
justice_rawtx=justice_rawtx,
penalty_txid=penalty_txid,
penalty_rawtx=penalty_rawtx,
appointment_end=100,
)

View File

@@ -58,7 +58,7 @@ def test_add_appointment(run_api, run_bitcoind, new_appt_data):
assert r.status_code == 200
# Incorrect appointment
new_appt_data["appointment"]["dispute_delta"] = 0
new_appt_data["appointment"]["to_self_delay"] = 0
r = add_appointment(new_appt_data)
assert r.status_code == 400

View File

@@ -19,14 +19,14 @@ def appointment_data():
locator = get_random_value_hex(LOCATOR_LEN_BYTES)
start_time = 100
end_time = 120
dispute_delta = 20
to_self_delay = 20
encrypted_blob_data = get_random_value_hex(100)
return {
"locator": locator,
"start_time": start_time,
"end_time": end_time,
"dispute_delta": dispute_delta,
"to_self_delay": to_self_delay,
"encrypted_blob": encrypted_blob_data,
}
@@ -40,7 +40,7 @@ def test_init_appointment(appointment_data):
appointment_data["locator"],
appointment_data["start_time"],
appointment_data["end_time"],
appointment_data["dispute_delta"],
appointment_data["to_self_delay"],
appointment_data["encrypted_blob"],
)
@@ -48,7 +48,7 @@ def test_init_appointment(appointment_data):
appointment_data["locator"] == appointment.locator
and appointment_data["start_time"] == appointment.start_time
and appointment_data["end_time"] == appointment.end_time
and appointment_data["dispute_delta"] == appointment.dispute_delta
and appointment_data["to_self_delay"] == appointment.to_self_delay
and EncryptedBlob(appointment_data["encrypted_blob"]) == appointment.encrypted_blob
)
@@ -58,7 +58,7 @@ def test_to_dict(appointment_data):
appointment_data["locator"],
appointment_data["start_time"],
appointment_data["end_time"],
appointment_data["dispute_delta"],
appointment_data["to_self_delay"],
appointment_data["encrypted_blob"],
)
@@ -68,7 +68,7 @@ def test_to_dict(appointment_data):
appointment_data["locator"] == dict_appointment["locator"]
and appointment_data["start_time"] == dict_appointment["start_time"]
and appointment_data["end_time"] == dict_appointment["end_time"]
and appointment_data["dispute_delta"] == dict_appointment["dispute_delta"]
and appointment_data["to_self_delay"] == dict_appointment["to_self_delay"]
and EncryptedBlob(appointment_data["encrypted_blob"]) == EncryptedBlob(dict_appointment["encrypted_blob"])
)
@@ -78,7 +78,7 @@ def test_to_json(appointment_data):
appointment_data["locator"],
appointment_data["start_time"],
appointment_data["end_time"],
appointment_data["dispute_delta"],
appointment_data["to_self_delay"],
appointment_data["encrypted_blob"],
)
@@ -88,7 +88,7 @@ def test_to_json(appointment_data):
appointment_data["locator"] == dict_appointment["locator"]
and appointment_data["start_time"] == dict_appointment["start_time"]
and appointment_data["end_time"] == dict_appointment["end_time"]
and appointment_data["dispute_delta"] == dict_appointment["dispute_delta"]
and appointment_data["to_self_delay"] == dict_appointment["to_self_delay"]
and EncryptedBlob(appointment_data["encrypted_blob"]) == EncryptedBlob(dict_appointment["encrypted_blob"])
)

View File

@@ -44,9 +44,9 @@ def test_build_jobs():
# Add some additional jobs that share the same locator to test all the builder's cases
if i % 2 == 0:
justice_txid = job.justice_txid
penalty_txid = job.penalty_txid
job = generate_dummy_job()
job.justice_txid = justice_txid
job.penalty_txid = penalty_txid
jobs_data[uuid4().hex] = job.to_dict()
@@ -59,7 +59,7 @@ def test_build_jobs():
# The locator is not part of the job_data found in the database (for now)
assert jobs_data[uuid] == job_dict
assert uuid in tx_job_map[job.justice_txid]
assert uuid in tx_job_map[job.penalty_txid]
def test_build_block_queue():

View File

@@ -55,25 +55,25 @@ def set_up_jobs(db_manager, total_jobs):
for i in range(total_jobs):
uuid = uuid4().hex
# We use the same txid for justice and dispute here, it shouldn't matter
justice_txid = get_random_value_hex(32)
# We use the same txid for penalty and dispute here, it shouldn't matter
penalty_txid = get_random_value_hex(32)
dispute_txid = get_random_value_hex(32)
locator = dispute_txid[:LOCATOR_LEN_HEX]
# Assign both justice_txid and dispute_txid the same id (it shouldn't matter)
job = Job(locator, dispute_txid, justice_txid, None, None)
# Assign both penalty_txid and dispute_txid the same id (it shouldn't matter)
job = Job(locator, dispute_txid, penalty_txid, None, None)
jobs[uuid] = job
tx_job_map[justice_txid] = [uuid]
tx_job_map[penalty_txid] = [uuid]
db_manager.store_responder_job(uuid, job.to_json())
db_manager.store_update_locator_map(job.locator, uuid)
# Each justice_txid can have more than one uuid assigned to it.
# Each penalty_txid can have more than one uuid assigned to it.
if i % 2:
uuid = uuid4().hex
jobs[uuid] = job
tx_job_map[justice_txid].append(uuid)
tx_job_map[penalty_txid].append(uuid)
db_manager.store_responder_job(uuid, job.to_json())
db_manager.store_update_locator_map(job.locator, uuid)
@@ -128,27 +128,27 @@ def test_delete_completed_jobs_no_db_match(db_manager):
jobs, tx_job_map = set_up_jobs(db_manager, MAX_ITEMS)
selected_jobs = random.sample(list(jobs.keys()), k=ITEMS)
# Let's change some uuid's by creating new jobs that are not included in the db and share a justice_txid with
# Let's change some uuid's by creating new jobs that are not included in the db and share a penalty_txid with
# another job that is stored in the db.
for uuid in selected_jobs[: ITEMS // 2]:
justice_txid = jobs[uuid].justice_txid
penalty_txid = jobs[uuid].penalty_txid
dispute_txid = get_random_value_hex(32)
locator = dispute_txid[:LOCATOR_LEN_HEX]
new_uuid = uuid4().hex
jobs[new_uuid] = Job(locator, dispute_txid, justice_txid, None, None)
tx_job_map[justice_txid].append(new_uuid)
jobs[new_uuid] = Job(locator, dispute_txid, penalty_txid, None, None)
tx_job_map[penalty_txid].append(new_uuid)
selected_jobs.append(new_uuid)
# Let's add some random data
for i in range(ITEMS // 2):
uuid = uuid4().hex
justice_txid = get_random_value_hex(32)
penalty_txid = get_random_value_hex(32)
dispute_txid = get_random_value_hex(32)
locator = dispute_txid[:LOCATOR_LEN_HEX]
jobs[uuid] = Job(locator, dispute_txid, justice_txid, None, None)
tx_job_map[justice_txid] = [uuid]
jobs[uuid] = Job(locator, dispute_txid, penalty_txid, None, None)
tx_job_map[penalty_txid] = [uuid]
selected_jobs.append(uuid)
completed_jobs = [(job, 6) for job in selected_jobs]

View File

@@ -9,7 +9,7 @@ from pisa.errors import *
from pisa.inspector import Inspector
from pisa.appointment import Appointment
from pisa.block_processor import BlockProcessor
from pisa.conf import MIN_DISPUTE_DELTA
from pisa.conf import MIN_TO_SELF_DELAY
from test.unit.conftest import get_random_value_hex, generate_dummy_appointment_data, generate_keypair
@@ -124,25 +124,25 @@ def test_check_end_time():
assert Inspector.check_end_time(end_time, start_time, current_time)[0] == APPOINTMENT_WRONG_FIELD_TYPE
def test_check_delta():
def test_check_to_self_delay():
# Right value, right format
deltas = [MIN_DISPUTE_DELTA, MIN_DISPUTE_DELTA + 1, MIN_DISPUTE_DELTA + 1000]
for delta in deltas:
assert Inspector.check_delta(delta) == APPOINTMENT_OK
to_self_delays = [MIN_TO_SELF_DELAY, MIN_TO_SELF_DELAY + 1, MIN_TO_SELF_DELAY + 1000]
for to_self_delay in to_self_delays:
assert Inspector.check_to_self_delay(to_self_delay) == APPOINTMENT_OK
# Delta too small
deltas = [MIN_DISPUTE_DELTA - 1, MIN_DISPUTE_DELTA - 2, 0, -1, -1000]
for delta in deltas:
assert Inspector.check_delta(delta)[0] == APPOINTMENT_FIELD_TOO_SMALL
# to_self_delay too small
to_self_delays = [MIN_TO_SELF_DELAY - 1, MIN_TO_SELF_DELAY - 2, 0, -1, -1000]
for to_self_delay in to_self_delays:
assert Inspector.check_to_self_delay(to_self_delay)[0] == APPOINTMENT_FIELD_TOO_SMALL
# Empty field
delta = None
assert Inspector.check_delta(delta)[0] == APPOINTMENT_EMPTY_FIELD
to_self_delay = None
assert Inspector.check_to_self_delay(to_self_delay)[0] == APPOINTMENT_EMPTY_FIELD
# Wrong data type
deltas = WRONG_TYPES
for delta in deltas:
assert Inspector.check_delta(delta)[0] == APPOINTMENT_WRONG_FIELD_TYPE
to_self_delays = WRONG_TYPES
for to_self_delay in to_self_delays:
assert Inspector.check_to_self_delay(to_self_delay)[0] == APPOINTMENT_WRONG_FIELD_TYPE
def test_check_blob():
@@ -212,14 +212,14 @@ def test_inspect(run_bitcoind):
locator = get_random_value_hex(LOCATOR_LEN_BYTES)
start_time = BlockProcessor.get_block_count() + 5
end_time = start_time + 20
dispute_delta = MIN_DISPUTE_DELTA
to_self_delay = MIN_TO_SELF_DELAY
encrypted_blob = get_random_value_hex(64)
appointment_data = {
"locator": locator,
"start_time": start_time,
"end_time": end_time,
"dispute_delta": dispute_delta,
"to_self_delay": to_self_delay,
"encrypted_blob": encrypted_blob,
}
@@ -232,6 +232,6 @@ def test_inspect(run_bitcoind):
and appointment.locator == locator
and appointment.start_time == start_time
and appointment.end_time == end_time
and appointment.dispute_delta == dispute_delta
and appointment.to_self_delay == to_self_delay
and appointment.encrypted_blob.data == encrypted_blob
)

View File

@@ -38,15 +38,15 @@ def temp_db_manager():
rmtree(db_name)
def create_dummy_job_data(random_txid=False, justice_rawtx=None):
def create_dummy_job_data(random_txid=False, penalty_rawtx=None):
# The following transaction data corresponds to a valid transaction. For some test it may be interesting to have
# some valid data, but for others we may need multiple different justice_txids.
# some valid data, but for others we may need multiple different penalty_txids.
dispute_txid = "0437cd7f8525ceed2324359c2d0ba26006d92d856a9c20fa0241106ee5a597c9"
justice_txid = "f4184fc596403b9d638783cf57adfe4c75c605f6356fbc91338530e9831e9e16"
penalty_txid = "f4184fc596403b9d638783cf57adfe4c75c605f6356fbc91338530e9831e9e16"
if justice_rawtx is None:
justice_rawtx = (
if penalty_rawtx is None:
penalty_rawtx = (
"0100000001c997a5e56e104102fa209c6a852dd90660a20b2d9c352423edce25857fcd3704000000004847304402"
"204e45e16932b8af514961a1d3a1a25fdf3f4f7732e9d624c6c61548ab5fb8cd410220181522ec8eca07de4860a4"
"acdd12909d831cc56cbbac4622082221a8768d1d0901ffffffff0200ca9a3b00000000434104ae1a62fe09c5f51b"
@@ -56,32 +56,32 @@ def create_dummy_job_data(random_txid=False, justice_rawtx=None):
)
else:
justice_txid = sha256d(justice_rawtx)
penalty_txid = sha256d(penalty_rawtx)
if random_txid is True:
justice_txid = get_random_value_hex(32)
penalty_txid = get_random_value_hex(32)
appointment_end = bitcoin_cli().getblockcount() + 2
locator = dispute_txid[:LOCATOR_LEN_HEX]
return locator, dispute_txid, justice_txid, justice_rawtx, appointment_end
return locator, dispute_txid, penalty_txid, penalty_rawtx, appointment_end
def create_dummy_job(random_txid=False, justice_rawtx=None):
locator, dispute_txid, justice_txid, justice_rawtx, appointment_end = create_dummy_job_data(
random_txid, justice_rawtx
def create_dummy_job(random_txid=False, penalty_rawtx=None):
locator, dispute_txid, penalty_txid, penalty_rawtx, appointment_end = create_dummy_job_data(
random_txid, penalty_rawtx
)
return Job(locator, dispute_txid, justice_txid, justice_rawtx, appointment_end)
return Job(locator, dispute_txid, penalty_txid, penalty_rawtx, appointment_end)
def test_job_init(run_bitcoind):
locator, dispute_txid, justice_txid, justice_rawtx, appointment_end = create_dummy_job_data()
job = Job(locator, dispute_txid, justice_txid, justice_rawtx, appointment_end)
locator, dispute_txid, penalty_txid, penalty_rawtx, appointment_end = create_dummy_job_data()
job = Job(locator, dispute_txid, penalty_txid, penalty_rawtx, appointment_end)
assert (
job.dispute_txid == dispute_txid
and job.justice_txid == justice_txid
and job.justice_rawtx == justice_rawtx
and job.penalty_txid == penalty_txid
and job.penalty_rawtx == penalty_rawtx
and job.appointment_end == appointment_end
)
@@ -109,7 +109,7 @@ def test_job_to_dict():
assert (
job.locator == job_dict["locator"]
and job.justice_rawtx == job_dict["justice_rawtx"]
and job.penalty_rawtx == job_dict["penalty_rawtx"]
and job.appointment_end == job_dict["appointment_end"]
)
@@ -120,7 +120,7 @@ def test_job_to_json():
assert (
job.locator == job_dict["locator"]
and job.justice_rawtx == job_dict["justice_rawtx"]
and job.penalty_rawtx == job_dict["penalty_rawtx"]
and job.appointment_end == job_dict["appointment_end"]
)
@@ -135,7 +135,7 @@ def test_job_from_dict():
def test_job_from_dict_invalid_data():
job_dict = create_dummy_job().to_dict()
for value in ["dispute_txid", "justice_txid", "justice_rawtx", "appointment_end"]:
for value in ["dispute_txid", "penalty_txid", "penalty_rawtx", "appointment_end"]:
job_dict_copy = deepcopy(job_dict)
job_dict_copy[value] = None
@@ -167,8 +167,8 @@ def test_add_response(db_manager):
job.locator,
uuid,
job.dispute_txid,
job.justice_txid,
job.justice_rawtx,
job.penalty_txid,
job.penalty_rawtx,
job.appointment_end,
block_hash=get_random_value_hex(32),
)
@@ -191,15 +191,15 @@ def test_add_bad_response(responder):
responder.asleep = False
# A txid instead of a rawtx should be enough for unit tests using the bitcoind mock, better tests are needed though.
job.justice_rawtx = job.justice_txid
job.penalty_rawtx = job.penalty_txid
# The block_hash passed to add_response does not matter much now. It will in the future to deal with errors
receipt = responder.add_response(
job.locator,
uuid,
job.dispute_txid,
job.justice_txid,
job.justice_rawtx,
job.penalty_txid,
job.penalty_rawtx,
job.appointment_end,
block_hash=get_random_value_hex(32),
)
@@ -213,52 +213,52 @@ def test_create_job(responder):
for _ in range(20):
uuid = uuid4().hex
confirmations = 0
locator, dispute_txid, justice_txid, justice_rawtx, appointment_end = create_dummy_job_data(random_txid=True)
locator, dispute_txid, penalty_txid, penalty_rawtx, appointment_end = create_dummy_job_data(random_txid=True)
# Check the job is not within the responder jobs before adding it
assert uuid not in responder.jobs
assert justice_txid not in responder.tx_job_map
assert justice_txid not in responder.unconfirmed_txs
assert penalty_txid not in responder.tx_job_map
assert penalty_txid not in responder.unconfirmed_txs
# And that it is afterwards
responder.create_job(uuid, locator, dispute_txid, justice_txid, justice_rawtx, appointment_end, confirmations)
responder.create_job(uuid, locator, dispute_txid, penalty_txid, penalty_rawtx, appointment_end, confirmations)
assert uuid in responder.jobs
assert justice_txid in responder.tx_job_map
assert justice_txid in responder.unconfirmed_txs
assert penalty_txid in responder.tx_job_map
assert penalty_txid in responder.unconfirmed_txs
# Check that the rest of job data also matches
job = responder.jobs[uuid]
assert (
job.dispute_txid == dispute_txid
and job.justice_txid == justice_txid
and job.justice_rawtx == justice_rawtx
and job.penalty_txid == penalty_txid
and job.penalty_rawtx == penalty_rawtx
and job.appointment_end == appointment_end
and job.appointment_end == appointment_end
)
def test_create_job_same_justice_txid(responder):
def test_create_job_same_penalty_txid(responder):
# Create the same job using two different uuids
confirmations = 0
locator, dispute_txid, justice_txid, justice_rawtx, appointment_end = create_dummy_job_data(random_txid=True)
locator, dispute_txid, penalty_txid, penalty_rawtx, appointment_end = create_dummy_job_data(random_txid=True)
uuid_1 = uuid4().hex
uuid_2 = uuid4().hex
responder.create_job(uuid_1, locator, dispute_txid, justice_txid, justice_rawtx, appointment_end, confirmations)
responder.create_job(uuid_2, locator, dispute_txid, justice_txid, justice_rawtx, appointment_end, confirmations)
responder.create_job(uuid_1, locator, dispute_txid, penalty_txid, penalty_rawtx, appointment_end, confirmations)
responder.create_job(uuid_2, locator, dispute_txid, penalty_txid, penalty_rawtx, appointment_end, confirmations)
# Check that both jobs have been added
assert uuid_1 in responder.jobs and uuid_2 in responder.jobs
assert justice_txid in responder.tx_job_map
assert justice_txid in responder.unconfirmed_txs
assert penalty_txid in responder.tx_job_map
assert penalty_txid in responder.unconfirmed_txs
# Check that the rest of job data also matches
for uuid in [uuid_1, uuid_2]:
job = responder.jobs[uuid]
assert (
job.dispute_txid == dispute_txid
and job.justice_txid == justice_txid
and job.justice_rawtx == justice_rawtx
and job.penalty_txid == penalty_txid
and job.penalty_rawtx == penalty_rawtx
and job.appointment_end == appointment_end
and job.appointment_end == appointment_end
)
@@ -270,13 +270,13 @@ def test_create_job_already_confirmed(responder):
for i in range(20):
uuid = uuid4().hex
confirmations = i + 1
locator, dispute_txid, justice_txid, justice_rawtx, appointment_end = create_dummy_job_data(
justice_rawtx=TX.create_dummy_transaction()
locator, dispute_txid, penalty_txid, penalty_rawtx, appointment_end = create_dummy_job_data(
penalty_rawtx=TX.create_dummy_transaction()
)
responder.create_job(uuid, locator, dispute_txid, justice_txid, justice_rawtx, appointment_end, confirmations)
responder.create_job(uuid, locator, dispute_txid, penalty_txid, penalty_rawtx, appointment_end, confirmations)
assert justice_txid not in responder.unconfirmed_txs
assert penalty_txid not in responder.unconfirmed_txs
def test_do_subscribe(responder):
@@ -303,16 +303,16 @@ def test_do_watch(temp_db_manager):
zmq_thread.daemon = True
zmq_thread.start()
jobs = [create_dummy_job(justice_rawtx=TX.create_dummy_transaction()) for _ in range(20)]
jobs = [create_dummy_job(penalty_rawtx=TX.create_dummy_transaction()) for _ in range(20)]
# Let's set up the jobs first
for job in jobs:
uuid = uuid4().hex
responder.jobs[uuid] = job
responder.tx_job_map[job.justice_txid] = [uuid]
responder.missed_confirmations[job.justice_txid] = 0
responder.unconfirmed_txs.append(job.justice_txid)
responder.tx_job_map[job.penalty_txid] = [uuid]
responder.missed_confirmations[job.penalty_txid] = 0
responder.unconfirmed_txs.append(job.penalty_txid)
# Let's start to watch
watch_thread = Thread(target=responder.do_watch)
@@ -322,8 +322,8 @@ def test_do_watch(temp_db_manager):
# And broadcast some of the transactions
broadcast_txs = []
for job in jobs[:5]:
bitcoin_cli().sendrawtransaction(job.justice_rawtx)
broadcast_txs.append(job.justice_txid)
bitcoin_cli().sendrawtransaction(job.penalty_rawtx)
broadcast_txs.append(job.penalty_txid)
# Mine a block
generate_block()
@@ -341,8 +341,8 @@ def test_do_watch(temp_db_manager):
# Do the rest
broadcast_txs = []
for job in jobs[5:]:
bitcoin_cli().sendrawtransaction(job.justice_rawtx)
broadcast_txs.append(job.justice_txid)
bitcoin_cli().sendrawtransaction(job.penalty_rawtx)
broadcast_txs.append(job.penalty_txid)
# Mine a block
generate_blocks(6)
@@ -359,7 +359,7 @@ def test_check_confirmations(temp_db_manager):
zmq_thread.daemon = True
zmq_thread.start()
# check_confirmations checks, given a list of transaction for a block, what of the known justice transaction have
# check_confirmations checks, given a list of transaction for a block, what of the known penalty transaction have
# been confirmed. To test this we need to create a list of transactions and the state of the responder
txs = [get_random_value_hex(32) for _ in range(20)]
@@ -418,17 +418,17 @@ def test_get_completed_jobs(db_manager):
# A complete job is a job that has reached the appointment end with enough confirmations (> MIN_CONFIRMATIONS)
# We'll create three type of transactions: end reached + enough conf, end reached + no enough conf, end not reached
jobs_end_conf = {uuid4().hex: create_dummy_job(justice_rawtx=TX.create_dummy_transaction()) for _ in range(10)}
jobs_end_conf = {uuid4().hex: create_dummy_job(penalty_rawtx=TX.create_dummy_transaction()) for _ in range(10)}
jobs_end_no_conf = {}
for _ in range(10):
job = create_dummy_job(justice_rawtx=TX.create_dummy_transaction())
responder.unconfirmed_txs.append(job.justice_txid)
job = create_dummy_job(penalty_rawtx=TX.create_dummy_transaction())
responder.unconfirmed_txs.append(job.penalty_txid)
jobs_end_no_conf[uuid4().hex] = job
jobs_no_end = {}
for _ in range(10):
job = create_dummy_job(justice_rawtx=TX.create_dummy_transaction())
job = create_dummy_job(penalty_rawtx=TX.create_dummy_transaction())
job.appointment_end += 10
jobs_no_end[uuid4().hex] = job
@@ -438,7 +438,7 @@ def test_get_completed_jobs(db_manager):
responder.jobs.update(jobs_no_end)
for uuid, job in responder.jobs.items():
bitcoin_cli().sendrawtransaction(job.justice_rawtx)
bitcoin_cli().sendrawtransaction(job.penalty_rawtx)
# The dummy appointments have a end_appointment time of current + 2, but jobs need at least 6 confs by default
generate_blocks(6)
@@ -468,17 +468,17 @@ def test_rebroadcast(db_manager):
# Rebroadcast calls add_response with retry=True. The job data is already in jobs.
for i in range(20):
uuid = uuid4().hex
locator, dispute_txid, justice_txid, justice_rawtx, appointment_end = create_dummy_job_data(
justice_rawtx=TX.create_dummy_transaction()
locator, dispute_txid, penalty_txid, penalty_rawtx, appointment_end = create_dummy_job_data(
penalty_rawtx=TX.create_dummy_transaction()
)
responder.jobs[uuid] = Job(locator, dispute_txid, justice_txid, justice_rawtx, appointment_end)
responder.tx_job_map[justice_txid] = [uuid]
responder.unconfirmed_txs.append(justice_txid)
responder.jobs[uuid] = Job(locator, dispute_txid, penalty_txid, penalty_rawtx, appointment_end)
responder.tx_job_map[penalty_txid] = [uuid]
responder.unconfirmed_txs.append(penalty_txid)
# Let's add some of the txs in the rebroadcast list
if (i % 2) == 0:
txs_to_rebroadcast.append(justice_txid)
txs_to_rebroadcast.append(penalty_txid)
# The block_hash passed to rebroadcast does not matter much now. It will in the future to deal with errors
receipts = responder.rebroadcast(txs_to_rebroadcast, get_random_value_hex(32))