mirror of
https://github.com/aljazceru/python-teos.git
synced 2025-12-17 14:14:22 +01:00
watcher - Updates the LocatorCache so it is self handled
With the current approach the cache deals with deletion and provides getters and setters so consumers do not directly access the internals
This commit is contained in:
@@ -77,9 +77,50 @@ class LocatorCache:
|
|||||||
|
|
||||||
self.blocks = OrderedDict(reversed((list(self.blocks.items()))))
|
self.blocks = OrderedDict(reversed((list(self.blocks.items()))))
|
||||||
|
|
||||||
def fix_cache(self, last_known_block, block_processor):
|
def get_txid(self, locator):
|
||||||
"""
|
"""
|
||||||
Fixes an existing cache after a reorg has been detected by feeding the most recent ``cache_size`` blocks to it.
|
Gets a txid from the locator cache.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
locator (:obj:`str`): the locator to lookup in the cache.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
:obj:`str` or :obj:`None`: The txid linked to the given locator if found. None otherwise.
|
||||||
|
"""
|
||||||
|
return self.cache.get(locator)
|
||||||
|
|
||||||
|
def update(self, block_hash, locator_txid_map):
|
||||||
|
"""
|
||||||
|
Updates the cache with data from a new block. Removes the oldest block if the cache is full after the addition.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
block_hash (:obj:`str`): the hash of the new block.
|
||||||
|
locator_txid_map (:obj:`dict`): the dictionary of locators (locator:txid) derived from a list of transaction
|
||||||
|
ids.
|
||||||
|
"""
|
||||||
|
|
||||||
|
self.cache.update(locator_txid_map)
|
||||||
|
self.blocks[block_hash] = list(locator_txid_map.keys())
|
||||||
|
logger.debug("Block added to cache", block_hash=block_hash)
|
||||||
|
|
||||||
|
if self.is_full():
|
||||||
|
self.remove_oldest_block()
|
||||||
|
|
||||||
|
def is_full(self):
|
||||||
|
""" Returns whether the cache is full or not """
|
||||||
|
return len(self.blocks) > self.cache_size
|
||||||
|
|
||||||
|
def remove_oldest_block(self):
|
||||||
|
""" Removes the oldest block from the cache """
|
||||||
|
block_hash, locators = self.blocks.popitem(last=False)
|
||||||
|
for locator in locators:
|
||||||
|
del self.cache[locator]
|
||||||
|
|
||||||
|
logger.debug("Block removed from cache", block_hash=block_hash)
|
||||||
|
|
||||||
|
def fix(self, last_known_block, block_processor):
|
||||||
|
"""
|
||||||
|
Fixes the cache after a reorg has been detected by feeding the most recent ``cache_size`` blocks to it.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
last_known_block (:obj:`str`): the last known block hash after the reorg.
|
last_known_block (:obj:`str`): the last known block hash after the reorg.
|
||||||
@@ -104,18 +145,6 @@ class LocatorCache:
|
|||||||
self.blocks = OrderedDict(reversed((list(tmp_cache.blocks.items()))))
|
self.blocks = OrderedDict(reversed((list(tmp_cache.blocks.items()))))
|
||||||
self.cache = tmp_cache.cache
|
self.cache = tmp_cache.cache
|
||||||
|
|
||||||
def is_full(self):
|
|
||||||
""" Returns whether the cache is full or not """
|
|
||||||
return len(self.blocks) > self.cache_size
|
|
||||||
|
|
||||||
def remove_older_block(self):
|
|
||||||
""" Removes the older block from the cache """
|
|
||||||
block_hash, locators = self.blocks.popitem(last=False)
|
|
||||||
for locator in locators:
|
|
||||||
del self.cache[locator]
|
|
||||||
|
|
||||||
logger.debug("Block removed from cache", block_hash=block_hash)
|
|
||||||
|
|
||||||
|
|
||||||
class Watcher:
|
class Watcher:
|
||||||
"""
|
"""
|
||||||
@@ -242,9 +271,9 @@ class Watcher:
|
|||||||
available_slots = self.gatekeeper.add_update_appointment(user_id, uuid, appointment)
|
available_slots = self.gatekeeper.add_update_appointment(user_id, uuid, appointment)
|
||||||
|
|
||||||
# Appointments that were triggered in blocks held in the cache
|
# Appointments that were triggered in blocks held in the cache
|
||||||
if appointment.locator in self.locator_cache.cache:
|
dispute_txid = self.locator_cache.get_txid(appointment.locator)
|
||||||
|
if dispute_txid:
|
||||||
try:
|
try:
|
||||||
dispute_txid = self.locator_cache.cache[appointment.locator]
|
|
||||||
penalty_txid, penalty_rawtx = self.check_breach(uuid, appointment, dispute_txid)
|
penalty_txid, penalty_rawtx = self.check_breach(uuid, appointment, dispute_txid)
|
||||||
receipt = self.responder.handle_breach(
|
receipt = self.responder.handle_breach(
|
||||||
uuid, appointment.locator, dispute_txid, penalty_txid, penalty_rawtx, user_id, self.last_known_block
|
uuid, appointment.locator, dispute_txid, penalty_txid, penalty_rawtx, user_id, self.last_known_block
|
||||||
@@ -319,14 +348,12 @@ class Watcher:
|
|||||||
|
|
||||||
# If a reorg is detected, the cache is fixed to cover the last `cache_size` blocks of the new chain
|
# If a reorg is detected, the cache is fixed to cover the last `cache_size` blocks of the new chain
|
||||||
if self.last_known_block != block.get("previousblockhash"):
|
if self.last_known_block != block.get("previousblockhash"):
|
||||||
self.locator_cache.fix_cache(block_hash, self.block_processor)
|
self.locator_cache.fix(block_hash, self.block_processor)
|
||||||
|
|
||||||
txids = block.get("tx")
|
txids = block.get("tx")
|
||||||
# Compute the locator for every transaction in the block and add them to the cache
|
# Compute the locator for every transaction in the block and add them to the cache
|
||||||
locator_txid_map = {compute_locator(txid): txid for txid in txids}
|
locator_txid_map = {compute_locator(txid): txid for txid in txids}
|
||||||
self.locator_cache.cache.update(locator_txid_map)
|
self.locator_cache.update(block_hash, locator_txid_map)
|
||||||
self.locator_cache.blocks[block_hash] = list(locator_txid_map.keys())
|
|
||||||
logger.debug("Block added to cache", block_hash=block_hash)
|
|
||||||
|
|
||||||
if len(self.appointments) > 0 and locator_txid_map:
|
if len(self.appointments) > 0 and locator_txid_map:
|
||||||
expired_appointments = self.gatekeeper.get_expired_appointments(block["height"])
|
expired_appointments = self.gatekeeper.get_expired_appointments(block["height"])
|
||||||
@@ -392,10 +419,6 @@ class Watcher:
|
|||||||
if len(self.appointments) != 0:
|
if len(self.appointments) != 0:
|
||||||
logger.info("No more pending appointments")
|
logger.info("No more pending appointments")
|
||||||
|
|
||||||
# Remove a block from the cache if the cache has reached its maximum size
|
|
||||||
if self.locator_cache.is_full():
|
|
||||||
self.locator_cache.remove_older_block()
|
|
||||||
|
|
||||||
# Register the last processed block for the Watcher
|
# Register the last processed block for the Watcher
|
||||||
self.db_manager.store_last_block_hash_watcher(block_hash)
|
self.db_manager.store_last_block_hash_watcher(block_hash)
|
||||||
self.last_known_block = block.get("hash")
|
self.last_known_block = block.get("hash")
|
||||||
|
|||||||
@@ -137,6 +137,122 @@ def test_locator_cache_init(block_processor):
|
|||||||
assert block_processor.get_block(k)
|
assert block_processor.get_block(k)
|
||||||
|
|
||||||
|
|
||||||
|
def test_get_txid():
|
||||||
|
# Not much to test here, this is shadowing dict.get
|
||||||
|
locator = get_random_value_hex(16)
|
||||||
|
txid = get_random_value_hex(32)
|
||||||
|
|
||||||
|
locator_cache = LocatorCache(config.get("LOCATOR_CACHE_SIZE"))
|
||||||
|
locator_cache.cache[locator] = txid
|
||||||
|
|
||||||
|
assert locator_cache.get_txid(locator) == txid
|
||||||
|
|
||||||
|
# A random locator should fail
|
||||||
|
assert locator_cache.get_txid(get_random_value_hex(16)) is None
|
||||||
|
|
||||||
|
|
||||||
|
def test_update_cache():
|
||||||
|
# Update should add data about a new block in the cache. If the cache is full, the oldest block is dropped.
|
||||||
|
locator_cache = LocatorCache(config.get("LOCATOR_CACHE_SIZE"))
|
||||||
|
|
||||||
|
block_hash = get_random_value_hex(32)
|
||||||
|
txs = [get_random_value_hex(32) for _ in range(10)]
|
||||||
|
locator_txid_map = {compute_locator(txid): txid for txid in txs}
|
||||||
|
|
||||||
|
# Cache is empty
|
||||||
|
assert block_hash not in locator_cache.blocks
|
||||||
|
for locator in locator_txid_map.keys():
|
||||||
|
assert locator not in locator_cache.cache
|
||||||
|
|
||||||
|
# The data has been added to the cache
|
||||||
|
locator_cache.update(block_hash, locator_txid_map)
|
||||||
|
assert block_hash in locator_cache.blocks
|
||||||
|
for locator in locator_txid_map.keys():
|
||||||
|
assert locator in locator_cache.cache
|
||||||
|
|
||||||
|
|
||||||
|
def test_update_cache_full():
|
||||||
|
locator_cache = LocatorCache(config.get("LOCATOR_CACHE_SIZE"))
|
||||||
|
block_hashes = []
|
||||||
|
big_map = {}
|
||||||
|
|
||||||
|
for i in range(locator_cache.cache_size):
|
||||||
|
block_hash = get_random_value_hex(32)
|
||||||
|
txs = [get_random_value_hex(32) for _ in range(10)]
|
||||||
|
locator_txid_map = {compute_locator(txid): txid for txid in txs}
|
||||||
|
locator_cache.update(block_hash, locator_txid_map)
|
||||||
|
|
||||||
|
if i == 0:
|
||||||
|
first_block_hash = block_hash
|
||||||
|
first_locator_txid_map = locator_txid_map
|
||||||
|
else:
|
||||||
|
block_hashes.append(block_hash)
|
||||||
|
big_map.update(locator_txid_map)
|
||||||
|
|
||||||
|
# The cache is now full.
|
||||||
|
assert first_block_hash in locator_cache.blocks
|
||||||
|
for locator in first_locator_txid_map.keys():
|
||||||
|
assert locator in locator_cache.cache
|
||||||
|
|
||||||
|
# Add one more
|
||||||
|
block_hash = get_random_value_hex(32)
|
||||||
|
txs = [get_random_value_hex(32) for _ in range(10)]
|
||||||
|
locator_txid_map = {compute_locator(txid): txid for txid in txs}
|
||||||
|
locator_cache.update(block_hash, locator_txid_map)
|
||||||
|
|
||||||
|
# The first block is not there anymore, but the rest are there
|
||||||
|
assert first_block_hash not in locator_cache.blocks
|
||||||
|
for locator in first_locator_txid_map.keys():
|
||||||
|
assert locator not in locator_cache.cache
|
||||||
|
|
||||||
|
for block_hash in block_hashes:
|
||||||
|
assert block_hash in locator_cache.blocks
|
||||||
|
|
||||||
|
for locator in big_map.keys():
|
||||||
|
assert locator in locator_cache.cache
|
||||||
|
|
||||||
|
|
||||||
|
def test_locator_cache_is_full(block_processor):
|
||||||
|
# Empty cache
|
||||||
|
locator_cache = LocatorCache(config.get("LOCATOR_CACHE_SIZE"))
|
||||||
|
|
||||||
|
for _ in range(locator_cache.cache_size):
|
||||||
|
locator_cache.blocks[uuid4().hex] = 0
|
||||||
|
assert not locator_cache.is_full()
|
||||||
|
|
||||||
|
locator_cache.blocks[uuid4().hex] = 0
|
||||||
|
assert locator_cache.is_full()
|
||||||
|
|
||||||
|
|
||||||
|
def test_locator_remove_oldest_block(block_processor):
|
||||||
|
# Empty cache
|
||||||
|
locator_cache = LocatorCache(config.get("LOCATOR_CACHE_SIZE"))
|
||||||
|
|
||||||
|
# Add some blocks to the cache
|
||||||
|
for _ in range(locator_cache.cache_size):
|
||||||
|
txid = get_random_value_hex(32)
|
||||||
|
locator = txid[:16]
|
||||||
|
locator_cache.blocks[get_random_value_hex(32)] = {locator: txid}
|
||||||
|
locator_cache.cache[locator] = txid
|
||||||
|
|
||||||
|
blocks_in_cache = locator_cache.blocks
|
||||||
|
oldest_block_hash = list(blocks_in_cache.keys())[0]
|
||||||
|
oldest_block_data = blocks_in_cache.get(oldest_block_hash)
|
||||||
|
rest_of_blocks = list(blocks_in_cache.keys())[1:]
|
||||||
|
locator_cache.remove_oldest_block()
|
||||||
|
|
||||||
|
# Oldest block data is not in the cache
|
||||||
|
assert oldest_block_hash not in locator_cache.blocks
|
||||||
|
for locator in oldest_block_data:
|
||||||
|
assert locator not in locator_cache.cache
|
||||||
|
|
||||||
|
# The rest of data is in the cache
|
||||||
|
assert set(rest_of_blocks).issubset(locator_cache.blocks)
|
||||||
|
for block_hash in rest_of_blocks:
|
||||||
|
for locator in locator_cache.blocks[block_hash]:
|
||||||
|
assert locator in locator_cache.cache
|
||||||
|
|
||||||
|
|
||||||
def test_fix_cache(block_processor):
|
def test_fix_cache(block_processor):
|
||||||
# This tests how a reorg will create a new version of the cache
|
# This tests how a reorg will create a new version of the cache
|
||||||
# Let's start setting a full cache. We'll mine ``cache_size`` bocks to be sure it's full
|
# Let's start setting a full cache. We'll mine ``cache_size`` bocks to be sure it's full
|
||||||
@@ -152,7 +268,7 @@ def test_fix_cache(block_processor):
|
|||||||
current_tip_parent = block_processor.get_block(current_tip).get("previousblockhash")
|
current_tip_parent = block_processor.get_block(current_tip).get("previousblockhash")
|
||||||
current_tip_parent_locators = locator_cache.blocks[current_tip_parent]
|
current_tip_parent_locators = locator_cache.blocks[current_tip_parent]
|
||||||
fake_tip = block_processor.get_block(current_tip_parent).get("previousblockhash")
|
fake_tip = block_processor.get_block(current_tip_parent).get("previousblockhash")
|
||||||
locator_cache.fix_cache(fake_tip, block_processor)
|
locator_cache.fix(fake_tip, block_processor)
|
||||||
|
|
||||||
# The last two blocks are not in the cache nor are the any of its locators
|
# The last two blocks are not in the cache nor are the any of its locators
|
||||||
assert current_tip not in locator_cache.blocks and current_tip_parent not in locator_cache.blocks
|
assert current_tip not in locator_cache.blocks and current_tip_parent not in locator_cache.blocks
|
||||||
@@ -168,7 +284,7 @@ def test_fix_cache(block_processor):
|
|||||||
new_cache = deepcopy(locator_cache)
|
new_cache = deepcopy(locator_cache)
|
||||||
|
|
||||||
generate_blocks_w_delay((config.get("LOCATOR_CACHE_SIZE") * 2))
|
generate_blocks_w_delay((config.get("LOCATOR_CACHE_SIZE") * 2))
|
||||||
new_cache.fix_cache(block_processor.get_best_block_hash(), block_processor)
|
new_cache.fix(block_processor.get_best_block_hash(), block_processor)
|
||||||
|
|
||||||
# None of the data from the old cache is in the new cache
|
# None of the data from the old cache is in the new cache
|
||||||
for block_hash, locators in locator_cache.blocks.items():
|
for block_hash, locators in locator_cache.blocks.items():
|
||||||
@@ -185,47 +301,6 @@ def test_fix_cache(block_processor):
|
|||||||
assert locator in new_cache.cache
|
assert locator in new_cache.cache
|
||||||
|
|
||||||
|
|
||||||
def test_locator_cache_is_full(block_processor):
|
|
||||||
# Empty cache
|
|
||||||
locator_cache = LocatorCache(config.get("LOCATOR_CACHE_SIZE"))
|
|
||||||
|
|
||||||
for _ in range(locator_cache.cache_size):
|
|
||||||
locator_cache.blocks[uuid4().hex] = 0
|
|
||||||
assert not locator_cache.is_full()
|
|
||||||
|
|
||||||
locator_cache.blocks[uuid4().hex] = 0
|
|
||||||
assert locator_cache.is_full()
|
|
||||||
|
|
||||||
|
|
||||||
def test_locator_remove_older_block(block_processor):
|
|
||||||
# Empty cache
|
|
||||||
locator_cache = LocatorCache(config.get("LOCATOR_CACHE_SIZE"))
|
|
||||||
|
|
||||||
# Add some blocks to the cache
|
|
||||||
for _ in range(locator_cache.cache_size):
|
|
||||||
txid = get_random_value_hex(32)
|
|
||||||
locator = txid[:16]
|
|
||||||
locator_cache.blocks[get_random_value_hex(32)] = {locator: txid}
|
|
||||||
locator_cache.cache[locator] = txid
|
|
||||||
|
|
||||||
blocks_in_cache = locator_cache.blocks
|
|
||||||
oldest_block_hash = list(blocks_in_cache.keys())[0]
|
|
||||||
oldest_block_data = blocks_in_cache.get(oldest_block_hash)
|
|
||||||
rest_of_blocks = list(blocks_in_cache.keys())[1:]
|
|
||||||
locator_cache.remove_older_block()
|
|
||||||
|
|
||||||
# Oldest block data is not in the cache
|
|
||||||
assert oldest_block_hash not in locator_cache.blocks
|
|
||||||
for locator in oldest_block_data:
|
|
||||||
assert locator not in locator_cache.cache
|
|
||||||
|
|
||||||
# The rest of data is in the cache
|
|
||||||
assert set(rest_of_blocks).issubset(locator_cache.blocks)
|
|
||||||
for block_hash in rest_of_blocks:
|
|
||||||
for locator in locator_cache.blocks[block_hash]:
|
|
||||||
assert locator in locator_cache.cache
|
|
||||||
|
|
||||||
|
|
||||||
def test_watcher_init(watcher):
|
def test_watcher_init(watcher):
|
||||||
assert isinstance(watcher.appointments, dict) and len(watcher.appointments) == 0
|
assert isinstance(watcher.appointments, dict) and len(watcher.appointments) == 0
|
||||||
assert isinstance(watcher.locator_uuid_map, dict) and len(watcher.locator_uuid_map) == 0
|
assert isinstance(watcher.locator_uuid_map, dict) and len(watcher.locator_uuid_map) == 0
|
||||||
@@ -494,7 +569,7 @@ def test_do_watch_cache_update(watcher):
|
|||||||
assert oldest_block_hash not in watcher.locator_cache.blocks
|
assert oldest_block_hash not in watcher.locator_cache.blocks
|
||||||
assert set(rest_of_blocks).issubset(watcher.locator_cache.blocks.keys())
|
assert set(rest_of_blocks).issubset(watcher.locator_cache.blocks.keys())
|
||||||
|
|
||||||
# The locators of the older block are gone but the rest remain
|
# The locators of the oldest block are gone but the rest remain
|
||||||
for locator in oldest_block_data:
|
for locator in oldest_block_data:
|
||||||
assert locator not in watcher.locator_cache.cache
|
assert locator not in watcher.locator_cache.cache
|
||||||
for block_hash in rest_of_blocks:
|
for block_hash in rest_of_blocks:
|
||||||
|
|||||||
Reference in New Issue
Block a user