mirror of
https://github.com/aljazceru/python-teos.git
synced 2025-12-17 14:14:22 +01:00
teos - addresses minor comments from #149
This commit is contained in:
@@ -63,12 +63,12 @@ class LocatorCache:
|
||||
for _ in range(self.cache_size):
|
||||
# In some setups, like regtest, it could be the case that there are no enough previous blocks.
|
||||
# In those cases we pull as many as we can (up to cache_size).
|
||||
if target_block_hash:
|
||||
if not target_block_hash:
|
||||
break
|
||||
|
||||
target_block = block_processor.get_block(target_block_hash)
|
||||
if not target_block:
|
||||
break
|
||||
else:
|
||||
break
|
||||
|
||||
locator_txid_map = {compute_locator(txid): txid for txid in target_block.get("tx")}
|
||||
self.cache.update(locator_txid_map)
|
||||
@@ -79,7 +79,7 @@ class LocatorCache:
|
||||
|
||||
def fix_cache(self, last_known_block, block_processor):
|
||||
"""
|
||||
Fixes an existing cache after a reorg has been detected by feeding the last ``cache_size`` blocks to it.
|
||||
Fixes an existing cache after a reorg has been detected by feeding the most recent ``cache_size`` blocks to it.
|
||||
|
||||
Args:
|
||||
last_known_block (:obj:`str`): the last known block hash after the reorg.
|
||||
@@ -94,6 +94,8 @@ class LocatorCache:
|
||||
for _ in range(tmp_cache.cache_size):
|
||||
target_block = block_processor.get_block(target_block_hash)
|
||||
if target_block:
|
||||
# Compute the locator:txid par for every transaction in the block and update both the cache and
|
||||
# the block mapping.
|
||||
locator_txid_map = {compute_locator(txid): txid for txid in target_block.get("tx")}
|
||||
tmp_cache.cache.update(locator_txid_map)
|
||||
tmp_cache.blocks[target_block_hash] = list(locator_txid_map.keys())
|
||||
@@ -239,7 +241,7 @@ class Watcher:
|
||||
# Add the appointment to the Gatekeeper
|
||||
available_slots = self.gatekeeper.add_update_appointment(user_id, uuid, appointment)
|
||||
|
||||
# Appointments that were triggered in blocks hold in the cache
|
||||
# Appointments that were triggered in blocks held in the cache
|
||||
if appointment.locator in self.locator_cache.cache:
|
||||
try:
|
||||
dispute_txid = self.locator_cache.cache[appointment.locator]
|
||||
@@ -315,7 +317,7 @@ class Watcher:
|
||||
block = self.block_processor.get_block(block_hash)
|
||||
logger.info("New block received", block_hash=block_hash, prev_block_hash=block.get("previousblockhash"))
|
||||
|
||||
# If a reorg is detected, the cache is fixed to cover the las `cache_size` blocks of the new chain
|
||||
# If a reorg is detected, the cache is fixed to cover the last `cache_size` blocks of the new chain
|
||||
if self.last_known_block != block.get("previousblockhash"):
|
||||
self.locator_cache.fix_cache(block_hash, self.block_processor)
|
||||
|
||||
|
||||
@@ -40,8 +40,8 @@ teosd_process = run_teosd()
|
||||
|
||||
teos_id, user_sk, user_id = teos_cli.load_keys(cli_config.get("TEOS_PUBLIC_KEY"), cli_config.get("CLI_PRIVATE_KEY"))
|
||||
|
||||
appointment_in_watcher = 0
|
||||
appointment_in_responder = 0
|
||||
appointments_in_watcher = 0
|
||||
appointments_in_responder = 0
|
||||
|
||||
|
||||
def broadcast_transaction_and_mine_block(bitcoin_cli, commitment_tx, addr):
|
||||
@@ -81,7 +81,7 @@ def test_commands_non_registered(bitcoin_cli):
|
||||
|
||||
|
||||
def test_commands_registered(bitcoin_cli):
|
||||
global appointment_in_watcher
|
||||
global appointments_in_watcher
|
||||
|
||||
# Test registering and trying again
|
||||
teos_cli.register(user_id, teos_base_endpoint)
|
||||
@@ -98,11 +98,11 @@ def test_commands_registered(bitcoin_cli):
|
||||
r = get_appointment_info(appointment_data.get("locator"))
|
||||
assert r.get("locator") == appointment.locator
|
||||
assert r.get("appointment") == appointment.to_dict()
|
||||
appointment_in_watcher += 1
|
||||
appointments_in_watcher += 1
|
||||
|
||||
|
||||
def test_appointment_life_cycle(bitcoin_cli):
|
||||
global appointment_in_watcher, appointment_in_responder
|
||||
global appointments_in_watcher, appointments_in_responder
|
||||
|
||||
# First of all we need to register
|
||||
response = teos_cli.register(user_id, teos_base_endpoint)
|
||||
@@ -114,7 +114,7 @@ def test_appointment_life_cycle(bitcoin_cli):
|
||||
appointment_data = build_appointment_data(commitment_tx_id, penalty_tx)
|
||||
locator = compute_locator(commitment_tx_id)
|
||||
appointment, signature = add_appointment(appointment_data)
|
||||
appointment_in_watcher += 1
|
||||
appointments_in_watcher += 1
|
||||
|
||||
# Get the information from the tower to check that it matches
|
||||
appointment_info = get_appointment_info(locator)
|
||||
@@ -126,7 +126,7 @@ def test_appointment_life_cycle(bitcoin_cli):
|
||||
all_appointments = get_all_appointments()
|
||||
watching = all_appointments.get("watcher_appointments")
|
||||
responding = all_appointments.get("responder_trackers")
|
||||
assert len(watching) == appointment_in_watcher and len(responding) == 0
|
||||
assert len(watching) == appointments_in_watcher and len(responding) == 0
|
||||
|
||||
# Trigger a breach and check again
|
||||
new_addr = bitcoin_cli.getnewaddress()
|
||||
@@ -134,13 +134,13 @@ def test_appointment_life_cycle(bitcoin_cli):
|
||||
appointment_info = get_appointment_info(locator)
|
||||
assert appointment_info.get("status") == "dispute_responded"
|
||||
assert appointment_info.get("locator") == locator
|
||||
appointment_in_watcher -= 1
|
||||
appointment_in_responder += 1
|
||||
appointments_in_watcher -= 1
|
||||
appointments_in_responder += 1
|
||||
|
||||
all_appointments = get_all_appointments()
|
||||
watching = all_appointments.get("watcher_appointments")
|
||||
responding = all_appointments.get("responder_trackers")
|
||||
assert len(watching) == appointment_in_watcher and len(responding) == appointment_in_responder
|
||||
assert len(watching) == appointments_in_watcher and len(responding) == appointments_in_responder
|
||||
|
||||
# It can be also checked by ensuring that the penalty transaction made it to the network
|
||||
penalty_tx_id = bitcoin_cli.decoderawtransaction(penalty_tx).get("txid")
|
||||
@@ -155,7 +155,7 @@ def test_appointment_life_cycle(bitcoin_cli):
|
||||
|
||||
# Now let's mine some blocks so the appointment reaches its end. We need 100 + EXPIRY_DELTA -1
|
||||
bitcoin_cli.generatetoaddress(100 + teos_config.get("EXPIRY_DELTA") - 1, new_addr)
|
||||
appointment_in_responder -= 1
|
||||
appointments_in_responder -= 1
|
||||
|
||||
# The appointment is no longer in the tower
|
||||
with pytest.raises(TowerResponseError):
|
||||
@@ -166,12 +166,12 @@ def test_appointment_life_cycle(bitcoin_cli):
|
||||
response = teos_cli.register(user_id, teos_base_endpoint)
|
||||
assert (
|
||||
response.get("available_slots")
|
||||
== available_slots + teos_config.get("DEFAULT_SLOTS") + 1 - appointment_in_watcher - appointment_in_responder
|
||||
== available_slots + teos_config.get("DEFAULT_SLOTS") + 1 - appointments_in_watcher - appointments_in_responder
|
||||
)
|
||||
|
||||
|
||||
def test_multiple_appointments_life_cycle(bitcoin_cli):
|
||||
global appointment_in_watcher, appointment_in_responder
|
||||
global appointments_in_watcher, appointments_in_responder
|
||||
# Tests that get_all_appointments returns all the appointments the tower is storing at various stages in the
|
||||
# appointment lifecycle.
|
||||
appointments = []
|
||||
@@ -196,7 +196,7 @@ def test_multiple_appointments_life_cycle(bitcoin_cli):
|
||||
# Send all of them to watchtower.
|
||||
for appt in appointments:
|
||||
add_appointment(appt.get("appointment_data"))
|
||||
appointment_in_watcher += 1
|
||||
appointments_in_watcher += 1
|
||||
|
||||
# Two of these appointments are breached, and the watchtower responds to them.
|
||||
breached_appointments = []
|
||||
@@ -205,15 +205,15 @@ def test_multiple_appointments_life_cycle(bitcoin_cli):
|
||||
broadcast_transaction_and_mine_block(bitcoin_cli, appointments[i]["commitment_tx"], new_addr)
|
||||
bitcoin_cli.generatetoaddress(1, new_addr)
|
||||
breached_appointments.append(appointments[i]["locator"])
|
||||
appointment_in_watcher -= 1
|
||||
appointment_in_responder += 1
|
||||
appointments_in_watcher -= 1
|
||||
appointments_in_responder += 1
|
||||
sleep(1)
|
||||
|
||||
# Test that they all show up in get_all_appointments at the correct stages.
|
||||
all_appointments = get_all_appointments()
|
||||
watching = all_appointments.get("watcher_appointments")
|
||||
responding = all_appointments.get("responder_trackers")
|
||||
assert len(watching) == appointment_in_watcher and len(responding) == appointment_in_responder
|
||||
assert len(watching) == appointments_in_watcher and len(responding) == appointments_in_responder
|
||||
responder_locators = [appointment["locator"] for uuid, appointment in responding.items()]
|
||||
assert set(responder_locators) == set(breached_appointments)
|
||||
|
||||
@@ -409,7 +409,7 @@ def test_two_appointment_same_locator_different_penalty_different_users(bitcoin_
|
||||
|
||||
|
||||
def test_add_appointment_trigger_on_cache(bitcoin_cli):
|
||||
# This tests sending an appointment which trigger is in the cache
|
||||
# This tests sending an appointment whose trigger is in the cache
|
||||
commitment_tx, penalty_tx = create_txs(bitcoin_cli)
|
||||
commitment_tx_id = bitcoin_cli.decoderawtransaction(commitment_tx).get("txid")
|
||||
appointment_data = build_appointment_data(commitment_tx_id, penalty_tx)
|
||||
|
||||
Reference in New Issue
Block a user