teos - additional docs

This commit is contained in:
Sergi Delgado Segura
2020-06-03 17:39:27 +02:00
parent 6ea3e8e3ff
commit 08c794590c
2 changed files with 15 additions and 5 deletions

View File

@@ -62,7 +62,7 @@ class LocatorCache:
target_block_hash = last_known_block target_block_hash = last_known_block
for _ in range(self.cache_size): for _ in range(self.cache_size):
# In some setups, like regtest, it could be the case that there are no enough previous blocks. # In some setups, like regtest, it could be the case that there are no enough previous blocks.
# In those cases we pull as many as we can (up to ``cache_size``). # In those cases we pull as many as we can (up to cache_size).
if target_block_hash: if target_block_hash:
target_block = block_processor.get_block(target_block_hash) target_block = block_processor.get_block(target_block_hash)
if not target_block: if not target_block:
@@ -78,10 +78,18 @@ class LocatorCache:
self.blocks = OrderedDict(reversed((list(self.blocks.items())))) self.blocks = OrderedDict(reversed((list(self.blocks.items()))))
def fix_cache(self, last_known_block, block_processor): def fix_cache(self, last_known_block, block_processor):
"""
Fixes an existing cache after a reorg has been detected by feeding the last ``cache_size`` blocks to it.
Args:
last_known_block (:obj:`str`): the last known block hash after the reorg.
block_processor (:obj:`teos.block_processor.BlockProcessor`): a ``BlockProcessor`` instance.
"""
tmp_cache = LocatorCache(self.cache_size) tmp_cache = LocatorCache(self.cache_size)
# We assume there are no reorgs back to genesis. If so, this would raise some log warnings. And the cache will # We assume there are no reorgs back to genesis. If so, this would raise some log warnings. And the cache will
# be filled with less than ``cache_size`` blocks.` # be filled with less than cache_size blocks.
target_block_hash = last_known_block target_block_hash = last_known_block
for _ in range(tmp_cache.cache_size): for _ in range(tmp_cache.cache_size):
target_block = block_processor.get_block(target_block_hash) target_block = block_processor.get_block(target_block_hash)
@@ -463,8 +471,10 @@ class Watcher:
breaches (:obj:`dict`): a dictionary containing channel breaches (``locator:txid``). breaches (:obj:`dict`): a dictionary containing channel breaches (``locator:txid``).
Returns: Returns:
:obj:`dict`: A dictionary containing all the breaches flagged either as valid or invalid. :obj:`tuple`: A dictionary and a list. The former contains the valid breaches, while the latter contain the
The structure is as follows: invalid ones.
The valid breaches dictionary has the following structure:
``{locator, dispute_txid, penalty_txid, penalty_rawtx}`` ``{locator, dispute_txid, penalty_txid, penalty_rawtx}``
""" """

View File

@@ -382,7 +382,7 @@ def test_add_appointment_in_cache(api, client):
r = add_appointment(client, {"appointment": appointment.to_dict(), "signature": appointment_signature}, user_id) r = add_appointment(client, {"appointment": appointment.to_dict(), "signature": appointment_signature}, user_id)
assert r.status_code == HTTP_BAD_REQUEST and r.json.get("error_code") == errors.APPOINTMENT_ALREADY_TRIGGERED assert r.status_code == HTTP_BAD_REQUEST and r.json.get("error_code") == errors.APPOINTMENT_ALREADY_TRIGGERED
# The appointment would be rejected even if the data is not in the cache provided we've it has been triggered # The appointment would be rejected even if the data is not in the cache provided it has been triggered
del api.watcher.locator_cache.cache[appointment.locator] del api.watcher.locator_cache.cache[appointment.locator]
r = add_appointment(client, {"appointment": appointment.to_dict(), "signature": appointment_signature}, user_id) r = add_appointment(client, {"appointment": appointment.to_dict(), "signature": appointment_signature}, user_id)
assert r.status_code == HTTP_BAD_REQUEST and r.json.get("error_code") == errors.APPOINTMENT_ALREADY_TRIGGERED assert r.status_code == HTTP_BAD_REQUEST and r.json.get("error_code") == errors.APPOINTMENT_ALREADY_TRIGGERED