From 90e1245a84e8a97457f1b392a6e84b111720f73f Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Thu, 16 Jan 2020 17:08:44 +0100 Subject: [PATCH 01/93] Fixes some bugs based on E2E testing --- apps/cli/pisa_cli.py | 16 ++++++++-------- common/tools.py | 13 +++++++++++++ pisa/tools.py | 3 ++- 3 files changed, 23 insertions(+), 9 deletions(-) diff --git a/apps/cli/pisa_cli.py b/apps/cli/pisa_cli.py index 440860d..56074de 100644 --- a/apps/cli/pisa_cli.py +++ b/apps/cli/pisa_cli.py @@ -23,7 +23,7 @@ from apps.cli import ( from common.logger import Logger from common.appointment import Appointment from common.cryptographer import Cryptographer -from common.tools import check_sha256_hex_format, compute_locator +from common.tools import check_sha256_hex_format, check_locator_format, compute_locator HTTP_OK = 200 @@ -256,7 +256,7 @@ def check_signature(signature, appointment): def get_appointment(args): if not args: logger.error("No arguments were given") - return False + return None arg_opt = args.pop(0) @@ -264,27 +264,27 @@ def get_appointment(args): sys.exit(help_get_appointment()) else: locator = arg_opt - valid_locator = check_sha256_hex_format(locator) + valid_locator = check_locator_format(locator) if not valid_locator: logger.error("The provided locator is not valid", locator=locator) - return False + return None get_appointment_endpoint = "http://{}:{}/get_appointment".format(pisa_api_server, pisa_api_port) parameters = "?locator={}".format(locator) try: r = requests.get(url=get_appointment_endpoint + parameters, timeout=5) - logger.info("Appointment response returned from server: " + str(r)) - return True + logger.info("Appointment response returned from server: {}".format(r.json())) + return r.json() except ConnectTimeout: logger.error("Can't connect to pisa API. Connection timeout") - return False + return None except ConnectionError: logger.error("Can't connect to pisa API. Server cannot be reached") - return False + return None def get_appointment_signature(appointment): diff --git a/common/tools.py b/common/tools.py index 2ac6d1e..d208272 100644 --- a/common/tools.py +++ b/common/tools.py @@ -15,6 +15,19 @@ def check_sha256_hex_format(value): return isinstance(value, str) and re.match(r"^[0-9A-Fa-f]{64}$", value) is not None +def check_locator_format(value): + """ + Checks if a given value is a 16-byte hex encoded string. + + Args: + value(:mod:`str`): the value to be checked. + + Returns: + :mod:`bool`: Whether or not the value matches the format. + """ + return isinstance(value, str) and re.match(r"^[0-9A-Fa-f]{32}$", value) is not None + + def compute_locator(tx_id): """ Computes an appointment locator given a transaction id. diff --git a/pisa/tools.py b/pisa/tools.py index 534b535..540020c 100644 --- a/pisa/tools.py +++ b/pisa/tools.py @@ -1,4 +1,5 @@ from http.client import HTTPException +from socket import timeout import pisa.conf as conf from pisa.utils.auth_proxy import AuthServiceProxy, JSONRPCException @@ -36,7 +37,7 @@ def can_connect_to_bitcoind(): try: bitcoin_cli().help() - except (ConnectionRefusedError, JSONRPCException, HTTPException): + except (timeout, ConnectionRefusedError, JSONRPCException, HTTPException): can_connect = False return can_connect From db330ce353a0d466f15e3e5849020c43803f02fc Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Thu, 16 Jan 2020 17:09:13 +0100 Subject: [PATCH 02/93] Updates cli unit tests to match the bug fixes --- test/apps/cli/unit/test_pisa_cli.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/apps/cli/unit/test_pisa_cli.py b/test/apps/cli/unit/test_pisa_cli.py index 74c6a95..b6b0219 100644 --- a/test/apps/cli/unit/test_pisa_cli.py +++ b/test/apps/cli/unit/test_pisa_cli.py @@ -53,7 +53,7 @@ dummy_appointment_request = { # This is the format appointment turns into once it hits "add_appointment" dummy_appointment_full = { - "locator": get_random_value_hex(32), + "locator": get_random_value_hex(16), "start_time": 1500, "end_time": 50000, "to_self_delay": 200, @@ -244,7 +244,7 @@ def test_get_appointment(): assert len(responses.calls) == 1 assert responses.calls[0].request.url == request_url - assert result + assert result.get("locator") == response.get("locator") @responses.activate From 15b31105e10a3924995a4e35192fc66683b11a85 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Thu, 16 Jan 2020 17:09:26 +0100 Subject: [PATCH 03/93] Adds basic E2E testing --- test/pisa/e2e/__init__.py | 0 test/pisa/e2e/test_basic_e2e.py | 111 ++++++++++++++++++++++++++++++++ 2 files changed, 111 insertions(+) create mode 100644 test/pisa/e2e/__init__.py create mode 100644 test/pisa/e2e/test_basic_e2e.py diff --git a/test/pisa/e2e/__init__.py b/test/pisa/e2e/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/test/pisa/e2e/test_basic_e2e.py b/test/pisa/e2e/test_basic_e2e.py new file mode 100644 index 0000000..b635ea6 --- /dev/null +++ b/test/pisa/e2e/test_basic_e2e.py @@ -0,0 +1,111 @@ +import json +from time import sleep +from decimal import Decimal, getcontext + +import pisa.conf as conf +from pisa import HOST, PORT +from pisa.utils.auth_proxy import AuthServiceProxy + +from common.tools import compute_locator + +from apps.cli import pisa_cli + + +getcontext().prec = 10 + +bitcoin_cli = AuthServiceProxy( + "http://%s:%s@%s:%d" % (conf.BTC_RPC_USER, conf.BTC_RPC_PASSWD, conf.BTC_RPC_HOST, 18444) +) + +END_TIME_DELTA = 10 + + +def create_txs(): + utxos = bitcoin_cli.listunspent() + + if len(utxos) == 0: + raise ValueError("There's no UTXOs.") + + commitment_tx_ins = {"txid": utxos[0].get("txid"), "vout": utxos[0].get("vout")} + commitment_tx_outs = {utxos[0].get("address"): utxos[0].get("amount") - Decimal(1 / pow(10, 5))} + + raw_commitment_tx = bitcoin_cli.createrawtransaction([commitment_tx_ins], commitment_tx_outs) + signed_commitment_tx = bitcoin_cli.signrawtransactionwithwallet(raw_commitment_tx) + + if not signed_commitment_tx.get("complete"): + raise ValueError("Couldn't sign transaction. {}".format(signed_commitment_tx)) + + decoded_commitment_tx = bitcoin_cli.decoderawtransaction(signed_commitment_tx.get("hex")) + + penalty_tx_ins = {"txid": decoded_commitment_tx.get("txid"), "vout": 0} + address = decoded_commitment_tx.get("vout")[0].get("scriptPubKey").get("addresses")[0] + penalty_tx_outs = {address: decoded_commitment_tx.get("vout")[0].get("value") - Decimal(1 / pow(10, 5))} + + orphan_info = { + "txid": decoded_commitment_tx.get("txid"), + "scriptPubKey": decoded_commitment_tx.get("vout")[0].get("scriptPubKey").get("hex"), + "vout": 0, + "amount": decoded_commitment_tx.get("vout")[0].get("value"), + } + + raw_penalty_tx = bitcoin_cli.createrawtransaction([penalty_tx_ins], penalty_tx_outs) + signed_penalty_tx = bitcoin_cli.signrawtransactionwithwallet(raw_penalty_tx, [orphan_info]) + + if not signed_penalty_tx.get("complete"): + raise ValueError("Couldn't sign orphan transaction. {}".format(signed_commitment_tx)) + + return signed_commitment_tx.get("hex"), signed_penalty_tx.get("hex") + + +def build_appointment_data(commitment_tx, penalty_tx): + commitment_tx_id = bitcoin_cli.decoderawtransaction(commitment_tx).get("txid") + current_height = bitcoin_cli.getblockcount() + + appointment_data = { + "tx": penalty_tx, + "tx_id": commitment_tx_id, + "start_time": current_height + 1, + "end_time": current_height + 1 + END_TIME_DELTA, + "to_self_delay": 20, + } + + return appointment_data + + +def test_appointment_life_cycle(): + commitment_tx, penalty_tx = create_txs() + appointment_data = build_appointment_data(commitment_tx, penalty_tx) + + # We'll use pisa_cli to add the appointment. The expected input format is a list of arguments with a json-encoded + # appointment + pisa_cli.pisa_api_server = HOST + pisa_cli.pisa_api_port = PORT + + response = pisa_cli.add_appointment([json.dumps(appointment_data)]) + assert response is True + + # Broadcast the commitment transaction and mine a block + new_addr = bitcoin_cli.getnewaddress() + bitcoin_cli.sendrawtransaction(commitment_tx) + bitcoin_cli.generatetoaddress(1, new_addr) + + # Check that the justice has been triggered (the appointment has moved from Watcher to Responder) + locator = compute_locator(appointment_data.get("tx_id")) + + # Let's add a bit of delay so the state can be updated + sleep(1) + appointment_info = pisa_cli.get_appointment([locator]) + + assert appointment_info is not None + assert len(appointment_info) == 1 + assert appointment_info[0].get("status") == "dispute_responded" + + # Now let's mine some blocks so the appointment reaches its end. + # Since we are running all the nodes remotely data may take more time than normal, and some confirmations may be + # missed, so we generate more than enough confirmations and add some delays. + for _ in range(int(1.5 * END_TIME_DELTA)): + sleep(1) + bitcoin_cli.generatetoaddress(1, new_addr) + + appointment_info = pisa_cli.get_appointment([locator]) + assert appointment_info[0].get("status") == "not_found" From b32a8672835d3dc0993980484e93f9371ccf7a86 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Fri, 17 Jan 2020 15:32:30 +0100 Subject: [PATCH 04/93] Moves db_manager instantiation before checking the connection with bitcoin As it was until now, if the connection with bitcoind failed, the command would have also failed, since the db_manager was not yet defined. --- pisa/pisad.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pisa/pisad.py b/pisa/pisad.py index 6bc8b9c..2643a19 100644 --- a/pisa/pisad.py +++ b/pisa/pisad.py @@ -84,6 +84,7 @@ if __name__ == "__main__": pass pisa_config = load_config(conf) + db_manager = DBManager(pisa_config.get("DB_PATH")) if not can_connect_to_bitcoind(): logger.error("Can't connect to bitcoind. Shutting down") @@ -93,8 +94,6 @@ if __name__ == "__main__": else: try: - db_manager = DBManager(pisa_config.get("DB_PATH")) - # Create the chain monitor and start monitoring the chain chain_monitor = ChainMonitor() chain_monitor.monitor_chain() From 07c9b7d19e6fc6eb995ca76cea024315cbd623ef Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Fri, 17 Jan 2020 17:46:14 +0100 Subject: [PATCH 05/93] Updates error message for `RPC_VERIFY_REJECTED` rpc error. --- pisa/carrier.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pisa/carrier.py b/pisa/carrier.py index d4029d4..00602e9 100644 --- a/pisa/carrier.py +++ b/pisa/carrier.py @@ -63,8 +63,8 @@ class Carrier: # Since we're pushing a raw transaction to the network we can face several rejections if errno == RPC_VERIFY_REJECTED: # DISCUSS: 37-transaction-rejection - # TODO: UNKNOWN_JSON_RPC_EXCEPTION is not the proper exception here. This is long due. - receipt = Receipt(delivered=False, reason=UNKNOWN_JSON_RPC_EXCEPTION) + receipt = Receipt(delivered=False, reason=RPC_VERIFY_REJECTED) + logger.error("Transaction couldn't be broadcast", error=e.error) elif errno == RPC_VERIFY_ERROR: # DISCUSS: 37-transaction-rejection From dad70eb780c9f2e3b89b8312980c25720b4f8d3a Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Fri, 17 Jan 2020 17:47:05 +0100 Subject: [PATCH 06/93] Moves update_delete_locator_map functionality to its own method. --- pisa/cleaner.py | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/pisa/cleaner.py b/pisa/cleaner.py index 6b8d73e..cd87e4c 100644 --- a/pisa/cleaner.py +++ b/pisa/cleaner.py @@ -118,15 +118,19 @@ class Cleaner: db_manager.delete_triggered_appointment_flag(uuid) # Update / delete the locator map - locator_map = db_manager.load_locator_map(locator) - if locator_map is not None: - if uuid in locator_map: - if len(locator_map) == 1: - db_manager.delete_locator_map(locator) - else: - locator_map.remove(uuid) - db_manager.store_update_locator_map(locator, locator_map) + Cleaner.update_delete_locator_map(locator, uuid, db_manager) + + @staticmethod + def update_delete_locator_map(locator, uuid, db_manager): + locator_map = db_manager.load_locator_map(locator) + if locator_map is not None: + if uuid in locator_map: + if len(locator_map) == 1: + db_manager.delete_locator_map(locator) else: - logger.error("UUID not found in the db", uuid=uuid) + locator_map.remove(uuid) + db_manager.store_update_locator_map(locator, locator_map) else: - logger.error("Locator not found in the db", uuid=uuid) + logger.error("UUID not found in the db", uuid=uuid) + else: + logger.error("Locator not found in the db", uuid=uuid) From 0f887feb4dc329735de285667c5c5316b5373ca8 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Fri, 17 Jan 2020 17:47:45 +0100 Subject: [PATCH 07/93] Deletes appointment from Watcher's db if a tracker cannot be added There was a bug in the Responder where, if a tracker could not be added (e.g. malformed tx) the data was dropped but never removed from the Watcher's db. This is due to flagging appointments as triggered instead of deleting them from the Watcher's db straight away (in order to deal with reorgs in the future). If that approach is not followed, and appointments are removed from the Watcher's db once they are passed to the Responder, this should be removed since it would be redundant. --- pisa/responder.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/pisa/responder.py b/pisa/responder.py index 39f3777..d4866e1 100644 --- a/pisa/responder.py +++ b/pisa/responder.py @@ -213,7 +213,11 @@ class Responder: logger.warning( "Tracker cannot be created", reason=receipt.reason, uuid=uuid, on_sync=self.on_sync(block_hash) ) - pass + + # FIXME: This is only necessary because of the triggered appointment approach. Remove if it changes. + self.db_manager.delete_watcher_appointment(uuid) + self.db_manager.delete_triggered_appointment_flag(uuid) + Cleaner.update_delete_locator_map(locator, uuid, self.db_manager) return receipt From 404952415dba006c29c3ce86ceed808e64831d4d Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Fri, 17 Jan 2020 17:51:01 +0100 Subject: [PATCH 08/93] Creates conftest for e2e tests --- test/pisa/e2e/conftest.py | 69 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 69 insertions(+) create mode 100644 test/pisa/e2e/conftest.py diff --git a/test/pisa/e2e/conftest.py b/test/pisa/e2e/conftest.py new file mode 100644 index 0000000..9aa4f83 --- /dev/null +++ b/test/pisa/e2e/conftest.py @@ -0,0 +1,69 @@ +import pytest +from decimal import Decimal, getcontext + +import pisa.conf as conf +from pisa.utils.auth_proxy import AuthServiceProxy + +getcontext().prec = 10 +END_TIME_DELTA = 10 + + +@pytest.fixture() +def bitcoin_cli(): + # return AuthServiceProxy("http://%s:%s@%s:%d" % (conf.BTC_RPC_USER, conf.BTC_RPC_PASSWD, conf.BTC_RPC_HOST, 18444)) + return AuthServiceProxy( + "http://%s:%s@%s:%d" % (conf.BTC_RPC_USER, conf.BTC_RPC_PASSWD, conf.BTC_RPC_HOST, conf.BTC_RPC_PORT) + ) + + +@pytest.fixture() +def create_txs(bitcoin_cli): + utxos = bitcoin_cli.listunspent() + + if len(utxos) == 0: + raise ValueError("There're no UTXOs.") + + commitment_tx_ins = {"txid": utxos[0].get("txid"), "vout": utxos[0].get("vout")} + commitment_tx_outs = {utxos[0].get("address"): utxos[0].get("amount") - Decimal(1 / pow(10, 5))} + + raw_commitment_tx = bitcoin_cli.createrawtransaction([commitment_tx_ins], commitment_tx_outs) + signed_commitment_tx = bitcoin_cli.signrawtransactionwithwallet(raw_commitment_tx) + + if not signed_commitment_tx.get("complete"): + raise ValueError("Couldn't sign transaction. {}".format(signed_commitment_tx)) + + decoded_commitment_tx = bitcoin_cli.decoderawtransaction(signed_commitment_tx.get("hex")) + + penalty_tx_ins = {"txid": decoded_commitment_tx.get("txid"), "vout": 0} + address = decoded_commitment_tx.get("vout")[0].get("scriptPubKey").get("addresses")[0] + penalty_tx_outs = {address: decoded_commitment_tx.get("vout")[0].get("value") - Decimal(1 / pow(10, 5))} + + orphan_info = { + "txid": decoded_commitment_tx.get("txid"), + "scriptPubKey": decoded_commitment_tx.get("vout")[0].get("scriptPubKey").get("hex"), + "vout": 0, + "amount": decoded_commitment_tx.get("vout")[0].get("value"), + } + + raw_penalty_tx = bitcoin_cli.createrawtransaction([penalty_tx_ins], penalty_tx_outs) + signed_penalty_tx = bitcoin_cli.signrawtransactionwithwallet(raw_penalty_tx, [orphan_info]) + + if not signed_penalty_tx.get("complete"): + raise ValueError("Couldn't sign orphan transaction. {}".format(signed_commitment_tx)) + + return signed_commitment_tx.get("hex"), signed_penalty_tx.get("hex") + + +def build_appointment_data(bitcoin_cli, commitment_tx, penalty_tx): + commitment_tx_id = bitcoin_cli.decoderawtransaction(commitment_tx).get("txid") + current_height = bitcoin_cli.getblockcount() + + appointment_data = { + "tx": penalty_tx, + "tx_id": commitment_tx_id, + "start_time": current_height + 1, + "end_time": current_height + 1 + END_TIME_DELTA, + "to_self_delay": 20, + } + + return appointment_data From f91475c61bf907006e7351163c98379f52d836fc Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Fri, 17 Jan 2020 17:51:09 +0100 Subject: [PATCH 09/93] Updates life cycle tests and adds malformed tx tests --- test/pisa/e2e/test_basic_e2e.py | 150 ++++++++++++++------------------ 1 file changed, 65 insertions(+), 85 deletions(-) diff --git a/test/pisa/e2e/test_basic_e2e.py b/test/pisa/e2e/test_basic_e2e.py index b635ea6..1fddef0 100644 --- a/test/pisa/e2e/test_basic_e2e.py +++ b/test/pisa/e2e/test_basic_e2e.py @@ -1,105 +1,58 @@ import json from time import sleep -from decimal import Decimal, getcontext +from riemann.tx import Tx -import pisa.conf as conf from pisa import HOST, PORT -from pisa.utils.auth_proxy import AuthServiceProxy - -from common.tools import compute_locator - from apps.cli import pisa_cli +from pisa.utils.auth_proxy import JSONRPCException +from common.tools import compute_locator +from test.pisa.e2e.conftest import END_TIME_DELTA, build_appointment_data + +# We'll use pisa_cli to add appointments. The expected input format is a list of arguments with a json-encoded +# appointment +pisa_cli.pisa_api_server = HOST +pisa_cli.pisa_api_port = PORT -getcontext().prec = 10 - -bitcoin_cli = AuthServiceProxy( - "http://%s:%s@%s:%d" % (conf.BTC_RPC_USER, conf.BTC_RPC_PASSWD, conf.BTC_RPC_HOST, 18444) -) - -END_TIME_DELTA = 10 - - -def create_txs(): - utxos = bitcoin_cli.listunspent() - - if len(utxos) == 0: - raise ValueError("There's no UTXOs.") - - commitment_tx_ins = {"txid": utxos[0].get("txid"), "vout": utxos[0].get("vout")} - commitment_tx_outs = {utxos[0].get("address"): utxos[0].get("amount") - Decimal(1 / pow(10, 5))} - - raw_commitment_tx = bitcoin_cli.createrawtransaction([commitment_tx_ins], commitment_tx_outs) - signed_commitment_tx = bitcoin_cli.signrawtransactionwithwallet(raw_commitment_tx) - - if not signed_commitment_tx.get("complete"): - raise ValueError("Couldn't sign transaction. {}".format(signed_commitment_tx)) - - decoded_commitment_tx = bitcoin_cli.decoderawtransaction(signed_commitment_tx.get("hex")) - - penalty_tx_ins = {"txid": decoded_commitment_tx.get("txid"), "vout": 0} - address = decoded_commitment_tx.get("vout")[0].get("scriptPubKey").get("addresses")[0] - penalty_tx_outs = {address: decoded_commitment_tx.get("vout")[0].get("value") - Decimal(1 / pow(10, 5))} - - orphan_info = { - "txid": decoded_commitment_tx.get("txid"), - "scriptPubKey": decoded_commitment_tx.get("vout")[0].get("scriptPubKey").get("hex"), - "vout": 0, - "amount": decoded_commitment_tx.get("vout")[0].get("value"), - } - - raw_penalty_tx = bitcoin_cli.createrawtransaction([penalty_tx_ins], penalty_tx_outs) - signed_penalty_tx = bitcoin_cli.signrawtransactionwithwallet(raw_penalty_tx, [orphan_info]) - - if not signed_penalty_tx.get("complete"): - raise ValueError("Couldn't sign orphan transaction. {}".format(signed_commitment_tx)) - - return signed_commitment_tx.get("hex"), signed_penalty_tx.get("hex") - - -def build_appointment_data(commitment_tx, penalty_tx): - commitment_tx_id = bitcoin_cli.decoderawtransaction(commitment_tx).get("txid") - current_height = bitcoin_cli.getblockcount() - - appointment_data = { - "tx": penalty_tx, - "tx_id": commitment_tx_id, - "start_time": current_height + 1, - "end_time": current_height + 1 + END_TIME_DELTA, - "to_self_delay": 20, - } - - return appointment_data - - -def test_appointment_life_cycle(): - commitment_tx, penalty_tx = create_txs() - appointment_data = build_appointment_data(commitment_tx, penalty_tx) - - # We'll use pisa_cli to add the appointment. The expected input format is a list of arguments with a json-encoded - # appointment - pisa_cli.pisa_api_server = HOST - pisa_cli.pisa_api_port = PORT - - response = pisa_cli.add_appointment([json.dumps(appointment_data)]) - assert response is True - +def broadcast_transaction_and_mine_block(bitcoin_cli, commitment_tx, addr): # Broadcast the commitment transaction and mine a block - new_addr = bitcoin_cli.getnewaddress() bitcoin_cli.sendrawtransaction(commitment_tx) - bitcoin_cli.generatetoaddress(1, new_addr) + bitcoin_cli.generatetoaddress(1, addr) + +def get_appointment_info(locator): # Check that the justice has been triggered (the appointment has moved from Watcher to Responder) + sleep(1) # Let's add a bit of delay so the state can be updated + return pisa_cli.get_appointment([locator]) + + +def test_appointment_life_cycle(bitcoin_cli, create_txs): + commitment_tx, penalty_tx = create_txs + appointment_data = build_appointment_data(bitcoin_cli, commitment_tx, penalty_tx) locator = compute_locator(appointment_data.get("tx_id")) - # Let's add a bit of delay so the state can be updated - sleep(1) - appointment_info = pisa_cli.get_appointment([locator]) + assert pisa_cli.add_appointment([json.dumps(appointment_data)]) is True + + new_addr = bitcoin_cli.getnewaddress() + broadcast_transaction_and_mine_block(bitcoin_cli, commitment_tx, new_addr) + + appointment_info = get_appointment_info(locator) assert appointment_info is not None assert len(appointment_info) == 1 assert appointment_info[0].get("status") == "dispute_responded" + # It can be also checked by ensuring that the penalty transaction made it to the network + penalty_tx_id = bitcoin_cli.decoderawtransaction(penalty_tx).get("txid") + + try: + bitcoin_cli.getrawtransaction(penalty_tx_id) + assert True + + except JSONRPCException: + # If the transaction if not found. + assert False + # Now let's mine some blocks so the appointment reaches its end. # Since we are running all the nodes remotely data may take more time than normal, and some confirmations may be # missed, so we generate more than enough confirmations and add some delays. @@ -107,5 +60,32 @@ def test_appointment_life_cycle(): sleep(1) bitcoin_cli.generatetoaddress(1, new_addr) - appointment_info = pisa_cli.get_appointment([locator]) + appointment_info = get_appointment_info(locator) + assert appointment_info[0].get("status") == "not_found" + + +def test_appointment_malformed_penalty(bitcoin_cli, create_txs): + # Lets start by creating two valid transaction + commitment_tx, penalty_tx = create_txs + + # Now we can modify the penalty so it is invalid when broadcast + mod_penalty_tx = Tx.from_hex(penalty_tx) + tx_in = mod_penalty_tx.tx_ins[0].copy(redeem_script=b"") + mod_penalty_tx = mod_penalty_tx.copy(tx_ins=[tx_in]) + + appointment_data = build_appointment_data(bitcoin_cli, commitment_tx, mod_penalty_tx.hex()) + locator = compute_locator(appointment_data.get("tx_id")) + + assert pisa_cli.add_appointment([json.dumps(appointment_data)]) is True + + # Broadcast the commitment transaction and mine a block + new_addr = bitcoin_cli.getnewaddress() + broadcast_transaction_and_mine_block(bitcoin_cli, commitment_tx, new_addr) + + # The appointment should have been removed since the penalty_tx was malformed. + sleep(1) + appointment_info = get_appointment_info(locator) + + assert appointment_info is not None + assert len(appointment_info) == 1 assert appointment_info[0].get("status") == "not_found" From 2c8c8c7323303ad32556635a1ab9e064c53c5668 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Mon, 20 Jan 2020 10:48:11 +0100 Subject: [PATCH 10/93] Updates circle-ci with basic e2e infraestructure --- .circleci/config.yml | 22 ++++++++++++++++------ test/pisa/e2e/bitcoin.conf | 27 +++++++++++++++++++++++++++ test/pisa/e2e/conf.py | 19 +++++++++++++++++++ 3 files changed, 62 insertions(+), 6 deletions(-) create mode 100644 test/pisa/e2e/bitcoin.conf create mode 100644 test/pisa/e2e/conf.py diff --git a/.circleci/config.yml b/.circleci/config.yml index 41711ad..da9ac56 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -32,7 +32,8 @@ jobs: command: | python3 -m venv venv . venv/bin/activate - pip install -r pisa/requirements.txt + sudo pip install --upgrade pip + pip install -r pisa/requirements.txt pip install -r pisa/requirements-dev.txt pip install -r apps/cli/requirements-dev.txt @@ -41,11 +42,20 @@ jobs: - ./venv key: v1-dependencies-{{ checksum "pisa/requirements.txt" }}-{{ checksum "pisa/requirements-dev.txt" }}-{{ checksum "apps/cli/requirements-dev.txt" }} - # run tests! - # this example uses Django's built-in test-runner - # other common Python testing frameworks include pytest and nose - # https://pytest.org - # https://nose.readthedocs.io + # Build docker env for E2E testing + - run: + name: build bitcoin_sandbox + command: | + git clone --single-branch --branch ln https://github.com/sr-gi/bitcoin_sandbox.git + . venv/bin/activate + pip install -r bitcoin_sandbox/requirements.txt + cp test/pisa/e2e/bitcoin.conf bitcoin_sandbox/ + cp test/pisa/e2e/conf.py bitcoin_sandbox/bitcoin_sandbox/ + mv bitcoin_sandbox/bitcoin_sandbox venv/lib/python3.6/site-packages + python venv/lib/python3.6/site-packages/bitcoin_sandbox/run_scenarios.py + + + # Run unit tests - run: name: create config command: cp pisa/sample_conf.py pisa/conf.py diff --git a/test/pisa/e2e/bitcoin.conf b/test/pisa/e2e/bitcoin.conf new file mode 100644 index 0000000..92c9105 --- /dev/null +++ b/test/pisa/e2e/bitcoin.conf @@ -0,0 +1,27 @@ +# [network] +dnsseed=0 + +# [debug] +regtest=1 +debug=1 +logips=1 + +# [rpc] +server=1 +rpcuser=user +rpcpassword=passwd +rpcallowip=0.0.0.0/0 +rpcservertimeout=300 + +# [zmq] +zmqpubhashblock=tcp://0.0.0.0:28332 +zmqpubrawblock=tcp://0.0.0.0:28332 +zmqpubrawtx=tcp://0.0.0.0:28333 + +# [blockchain] +txindex=1 + +# There are some parameters that only work in the specific on regtest if specified in the regtest section +[regtest] +rpcbind=0.0.0.0 +rpcport=18443 \ No newline at end of file diff --git a/test/pisa/e2e/conf.py b/test/pisa/e2e/conf.py new file mode 100644 index 0000000..cbdd3d0 --- /dev/null +++ b/test/pisa/e2e/conf.py @@ -0,0 +1,19 @@ +# Copy this file with your own configuration and save it as conf.py + +# Docker +DOCK_NETWORK_NAME = "pisa_net" +DOCK_NETWORK_SUBNET = "172.16.0.0/16" +DOCK_NETWORK_GW = "172.16.0.1" +DOCK_CONTAINER_NAME_PREFIX = "btc_n" +DOCK_IMAGE_NAME = "sandbox_btc" +DOCKER_INI_PORT_MAPPING = 22000 +DOCKER_RPC_PORT_MAPPING = 18444 +DOCKER_ZMQ_BLOCK_PORT_MAPPING = 28334 + +# Log +LOG_FILE = "bitcoin_sandbox.log" + +# Graphs +BITCOIN_GRAPH_FILE = "./graphs/basic3.graphml" +LN_GRAPH_FILE = "./graphs/basic3_ln.graphml" +DEFAULT_LN_GRAPH_WEIGHT = 10000 From b38ad70352200ab762a661fc9efc9674c7254b22 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Mon, 20 Jan 2020 12:05:45 +0100 Subject: [PATCH 11/93] Changes circle-ci base image to Ubuntu --- .circleci/config.yml | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index da9ac56..21d8b60 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -5,10 +5,8 @@ version: 2 jobs: build: - docker: - # specify the version you desire here - # use `-browsers` prefix for selenium tests, e.g. `3.6.1-browsers` - - image: circleci/python:3.6.1 + machine: + image: ubuntu-1604:201903-01 # Specify service dependencies here if necessary # CircleCI maintains a library of pre-built images @@ -30,6 +28,7 @@ jobs: - run: name: install dependencies command: | + pyenv local 3.7.0 python3 -m venv venv . venv/bin/activate sudo pip install --upgrade pip @@ -51,8 +50,8 @@ jobs: pip install -r bitcoin_sandbox/requirements.txt cp test/pisa/e2e/bitcoin.conf bitcoin_sandbox/ cp test/pisa/e2e/conf.py bitcoin_sandbox/bitcoin_sandbox/ - mv bitcoin_sandbox/bitcoin_sandbox venv/lib/python3.6/site-packages - python venv/lib/python3.6/site-packages/bitcoin_sandbox/run_scenarios.py + mv bitcoin_sandbox/bitcoin_sandbox venv/lib/python3.7/site-packages + python venv/lib/python3.7/site-packages/bitcoin_sandbox/run_scenarios.py # Run unit tests From 5286cbf0ac4a84ee0a353bf69666813ed3c0795d Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Mon, 20 Jan 2020 12:43:45 +0100 Subject: [PATCH 12/93] Clears circle-ci caches --- .circleci/config.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 21d8b60..58a6ef4 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -21,9 +21,9 @@ jobs: # Download and cache dependencies - restore_cache: keys: - - v1-dependencies-{{ checksum "pisa/requirements.txt" }}-{{ checksum "pisa/requirements-dev.txt" }}-{{ checksum "apps/cli/requirements-dev.txt" }} + - v2-dependencies-{{ checksum "pisa/requirements.txt" }}-{{ checksum "pisa/requirements-dev.txt" }}-{{ checksum "apps/cli/requirements-dev.txt" }} # fallback to using the latest cache if no exact match is found - - v1-dependencies- + - v2-dependencies- - run: name: install dependencies @@ -39,7 +39,7 @@ jobs: - save_cache: paths: - ./venv - key: v1-dependencies-{{ checksum "pisa/requirements.txt" }}-{{ checksum "pisa/requirements-dev.txt" }}-{{ checksum "apps/cli/requirements-dev.txt" }} + key: v2-dependencies-{{ checksum "pisa/requirements.txt" }}-{{ checksum "pisa/requirements-dev.txt" }}-{{ checksum "apps/cli/requirements-dev.txt" }} # Build docker env for E2E testing - run: From 77d678a4ca501c4bb665e9fbd2f537a5c13fdd94 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Mon, 20 Jan 2020 13:01:21 +0100 Subject: [PATCH 13/93] Updates paths for bitcoin_sandbox --- .circleci/config.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 58a6ef4..d28557a 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -50,8 +50,7 @@ jobs: pip install -r bitcoin_sandbox/requirements.txt cp test/pisa/e2e/bitcoin.conf bitcoin_sandbox/ cp test/pisa/e2e/conf.py bitcoin_sandbox/bitcoin_sandbox/ - mv bitcoin_sandbox/bitcoin_sandbox venv/lib/python3.7/site-packages - python venv/lib/python3.7/site-packages/bitcoin_sandbox/run_scenarios.py + cd bitcoin_sandbox && python3 bitcoin_sandbox/run_scenarios.py # Run unit tests From ed31be8a03e533a221e054519b833e03a6790412 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Mon, 20 Jan 2020 13:18:12 +0100 Subject: [PATCH 14/93] Updates circle-ci to cache bitcoin_sandbox requirements --- .circleci/config.yml | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index d28557a..8832e7a 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -18,10 +18,15 @@ jobs: steps: - checkout + # Get bitcoin_sandbox + - run: + name: get bitcoin_sandbox + command: git clone --single-branch --branch ln https://github.com/sr-gi/bitcoin_sandbox.git + # Download and cache dependencies - restore_cache: keys: - - v2-dependencies-{{ checksum "pisa/requirements.txt" }}-{{ checksum "pisa/requirements-dev.txt" }}-{{ checksum "apps/cli/requirements-dev.txt" }} + - v2-dependencies-{{ checksum "pisa/requirements.txt" }}-{{ checksum "pisa/requirements-dev.txt" }}-{{ checksum "apps/cli/requirements-dev.txt" }}-{{ checksum "bitcoin_sandbox/requirements.txt" }} # fallback to using the latest cache if no exact match is found - v2-dependencies- @@ -35,22 +40,21 @@ jobs: pip install -r pisa/requirements.txt pip install -r pisa/requirements-dev.txt pip install -r apps/cli/requirements-dev.txt + pip install -r bitcoin_sandbox/requirements.txt - save_cache: paths: - ./venv - key: v2-dependencies-{{ checksum "pisa/requirements.txt" }}-{{ checksum "pisa/requirements-dev.txt" }}-{{ checksum "apps/cli/requirements-dev.txt" }} + key: v2-dependencies-{{ checksum "pisa/requirements.txt" }}-{{ checksum "pisa/requirements-dev.txt" }}-{{ checksum "apps/cli/requirements-dev.txt" }}-{{ checksum "bitcoin_sandbox/requirements.txt" }} # Build docker env for E2E testing - run: name: build bitcoin_sandbox command: | - git clone --single-branch --branch ln https://github.com/sr-gi/bitcoin_sandbox.git - . venv/bin/activate - pip install -r bitcoin_sandbox/requirements.txt cp test/pisa/e2e/bitcoin.conf bitcoin_sandbox/ cp test/pisa/e2e/conf.py bitcoin_sandbox/bitcoin_sandbox/ - cd bitcoin_sandbox && python3 bitcoin_sandbox/run_scenarios.py + . venv/bin/activate + cd bitcoin_sandbox && python -m bitcoin_sandbox.run_scenarios # Run unit tests From a3423a01e773ee9b903581e0bba77675f5d523d7 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Mon, 20 Jan 2020 15:07:38 +0100 Subject: [PATCH 15/93] Updates Dockerfile to not build ln We cannot use docker caching with the current circleci plan, so we'll build a lighter image for now --- .circleci/config.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.circleci/config.yml b/.circleci/config.yml index 8832e7a..991f303 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -53,6 +53,7 @@ jobs: command: | cp test/pisa/e2e/bitcoin.conf bitcoin_sandbox/ cp test/pisa/e2e/conf.py bitcoin_sandbox/bitcoin_sandbox/ + cp bitcoin_sandbox/docker/Dockerfile_ubuntu_no_ln bitcoin_sandbox/Dockerfile . venv/bin/activate cd bitcoin_sandbox && python -m bitcoin_sandbox.run_scenarios From 892e25e9715413650645f17dcd23a86f3c6b3a49 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Mon, 20 Jan 2020 15:14:04 +0100 Subject: [PATCH 16/93] ocd fixes --- .circleci/config.yml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 991f303..253a4d8 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -20,7 +20,7 @@ jobs: # Get bitcoin_sandbox - run: - name: get bitcoin_sandbox + name: Get bitcoin_sandbox command: git clone --single-branch --branch ln https://github.com/sr-gi/bitcoin_sandbox.git # Download and cache dependencies @@ -31,7 +31,7 @@ jobs: - v2-dependencies- - run: - name: install dependencies + name: Install dependencies command: | pyenv local 3.7.0 python3 -m venv venv @@ -49,7 +49,7 @@ jobs: # Build docker env for E2E testing - run: - name: build bitcoin_sandbox + name: Build bitcoin_sandbox command: | cp test/pisa/e2e/bitcoin.conf bitcoin_sandbox/ cp test/pisa/e2e/conf.py bitcoin_sandbox/bitcoin_sandbox/ @@ -60,23 +60,23 @@ jobs: # Run unit tests - run: - name: create config + name: Create pisa config command: cp pisa/sample_conf.py pisa/conf.py - run: - name: run pisa unit tests + name: Run pisa unit tests command: | . venv/bin/activate pytest test/pisa/unit/ - run: - name: run common unit tests + name: Run common unit tests command: | . venv/bin/activate pytest test/common/unit - run: - name: run cli unit tests + name: Run cli unit tests command: | . venv/bin/activate pytest test/apps/cli/unit From 4eb55eaacc5fe1e5afd44694b8916e85d4637ec2 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Mon, 20 Jan 2020 16:19:45 +0100 Subject: [PATCH 17/93] Raises exception on missing cli keys load_key_file_data was returning False on FileNotFound but get_pk was expecting an exception --- apps/cli/pisa_cli.py | 6 +++--- test/pisa/e2e/{conf.py => sandbox-conf.py} | 0 2 files changed, 3 insertions(+), 3 deletions(-) rename test/pisa/e2e/{conf.py => sandbox-conf.py} (100%) diff --git a/apps/cli/pisa_cli.py b/apps/cli/pisa_cli.py index 56074de..6a5bebd 100644 --- a/apps/cli/pisa_cli.py +++ b/apps/cli/pisa_cli.py @@ -61,13 +61,13 @@ def load_key_file_data(file_name): key = key_file.read() return key - except FileNotFoundError: + except FileNotFoundError as e: logger.error("Client's key file not found. Please check your settings.") - return False + raise e except IOError as e: logger.error("I/O error({}): {}".format(e.errno, e.strerror)) - return False + raise e # Makes sure that the folder APPOINTMENTS_FOLDER_NAME exists, then saves the appointment and signature in it. diff --git a/test/pisa/e2e/conf.py b/test/pisa/e2e/sandbox-conf.py similarity index 100% rename from test/pisa/e2e/conf.py rename to test/pisa/e2e/sandbox-conf.py From 396c56e5db6378cdfda17acb24d56ef9197d546f Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Mon, 20 Jan 2020 16:21:15 +0100 Subject: [PATCH 18/93] Adds name parameter to generate_keys so the name can be chosen from terminal --- apps/generate_key.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/apps/generate_key.py b/apps/generate_key.py index 5d9da59..74ba84c 100644 --- a/apps/generate_key.py +++ b/apps/generate_key.py @@ -1,5 +1,6 @@ import os.path -from sys import exit +from getopt import getopt +from sys import argv, exit from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import serialization @@ -9,9 +10,6 @@ from cryptography.hazmat.primitives.asymmetric import ec # Simple tool to generate an ECDSA private key using the secp256k1 curve and save private and public keys # as 'pisa_sk.der' 'and pisa_pk.der', respectively. -SK_FILE_NAME = "../pisa_sk.der" -PK_FILE_NAME = "../pisa_pk.der" - def save_sk(sk, filename): der = sk.private_bytes( @@ -31,6 +29,16 @@ def save_pk(pk, filename): if __name__ == "__main__": + name = "pisa" + + opts, _ = getopt(argv[1:], "n:", ["name"]) + for opt, arg in opts: + if opt in ["-n", "--name"]: + name = arg + + SK_FILE_NAME = "../{}_sk.der".format(name) + PK_FILE_NAME = "../{}_pk.der".format(name) + if os.path.exists(SK_FILE_NAME): print('A key with name "{}" already exists. Aborting.'.format(SK_FILE_NAME)) exit(1) From 18cd2a7499008754930398d7fadb3fe1d1073311 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Mon, 20 Jan 2020 16:24:33 +0100 Subject: [PATCH 19/93] Adds pisa conf for e2e testing and initial setup for the regtest node --- test/pisa/e2e/conftest.py | 7 +++++++ test/pisa/e2e/pisa-conf.py | 31 +++++++++++++++++++++++++++++++ 2 files changed, 38 insertions(+) create mode 100644 test/pisa/e2e/pisa-conf.py diff --git a/test/pisa/e2e/conftest.py b/test/pisa/e2e/conftest.py index 9aa4f83..72e6194 100644 --- a/test/pisa/e2e/conftest.py +++ b/test/pisa/e2e/conftest.py @@ -18,6 +18,7 @@ def bitcoin_cli(): @pytest.fixture() def create_txs(bitcoin_cli): + set_up_node(bitcoin_cli) utxos = bitcoin_cli.listunspent() if len(utxos) == 0: @@ -54,6 +55,12 @@ def create_txs(bitcoin_cli): return signed_commitment_tx.get("hex"), signed_penalty_tx.get("hex") +def set_up_node(bitcoin_cli): + # This method will create a new address a mine bitcoin so the node can be used for testing + new_addr = bitcoin_cli.getnewaddress() + bitcoin_cli.generatetoaddress(101, new_addr) + + def build_appointment_data(bitcoin_cli, commitment_tx, penalty_tx): commitment_tx_id = bitcoin_cli.decoderawtransaction(commitment_tx).get("txid") current_height = bitcoin_cli.getblockcount() diff --git a/test/pisa/e2e/pisa-conf.py b/test/pisa/e2e/pisa-conf.py new file mode 100644 index 0000000..83fe719 --- /dev/null +++ b/test/pisa/e2e/pisa-conf.py @@ -0,0 +1,31 @@ +# bitcoind +BTC_RPC_USER = "user" +BTC_RPC_PASSWD = "passwd" +BTC_RPC_HOST = "localhost" +BTC_RPC_PORT = 18445 +BTC_NETWORK = "regtest" + +# CHAIN MONITOR +POLLING_DELTA = 60 +BLOCK_WINDOW_SIZE = 10 + +# ZMQ +FEED_PROTOCOL = "tcp" +FEED_ADDR = "127.0.0.1" +FEED_PORT = 28335 + +# PISA +MAX_APPOINTMENTS = 100 +EXPIRY_DELTA = 6 +MIN_TO_SELF_DELAY = 20 +SERVER_LOG_FILE = "pisa.log" +PISA_SECRET_KEY = "pisa_sk.der" + +# PISA-CLI +CLIENT_LOG_FILE = "pisa.log" + +# TEST +TEST_LOG_FILE = "test.log" + +# LEVELDB +DB_PATH = "appointments" From ff4e7f2b6733c4d4e45a7ae90b0ac50dd9b7113c Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Mon, 20 Jan 2020 16:28:20 +0100 Subject: [PATCH 20/93] Adds pisa setup and e2e test run to circle-ci --- .circleci/config.yml | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 253a4d8..da0ba21 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -52,7 +52,7 @@ jobs: name: Build bitcoin_sandbox command: | cp test/pisa/e2e/bitcoin.conf bitcoin_sandbox/ - cp test/pisa/e2e/conf.py bitcoin_sandbox/bitcoin_sandbox/ + cp test/pisa/e2e/sandbox-conf.py bitcoin_sandbox/bitcoin_sandbox/conf.py cp bitcoin_sandbox/docker/Dockerfile_ubuntu_no_ln bitcoin_sandbox/Dockerfile . venv/bin/activate cd bitcoin_sandbox && python -m bitcoin_sandbox.run_scenarios @@ -81,6 +81,25 @@ jobs: . venv/bin/activate pytest test/apps/cli/unit + # Setup pisa for E2E testing + - run: + name: Setup pisa + command: | + . venv/bin/activate + cp test/pisa/e2e/pisa-conf.py pisa/conf.py + cd apps/ + python3 -m generate_key + python3 -m generate_key -n cli + + + # Run E2E tests + - run: + name: Run e2e tests + command: | + . venv/bin/activate + python3 -m pisa.pisad & + pytest test/pisa/e2e/ + # - store_artifacts: # path: test-reports # destination: test-reports From d7e91d34e738570e763eff0cce6268ec23a4be1f Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Mon, 20 Jan 2020 16:40:58 +0100 Subject: [PATCH 21/93] Updates cli test to expect an exception instead of False --- test/apps/cli/unit/test_pisa_cli.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/test/apps/cli/unit/test_pisa_cli.py b/test/apps/cli/unit/test_pisa_cli.py index b6b0219..bed98f5 100644 --- a/test/apps/cli/unit/test_pisa_cli.py +++ b/test/apps/cli/unit/test_pisa_cli.py @@ -1,3 +1,4 @@ +import pytest import responses import json import os @@ -150,8 +151,8 @@ def test_load_key_file_data(): os.remove("key_test_file") # If file doesn't exist, function should fail. - appt_data = pisa_cli.load_key_file_data("nonexistent_file") - assert not appt_data + with pytest.raises(FileNotFoundError): + assert pisa_cli.load_key_file_data("nonexistent_file") def test_save_signed_appointment(monkeypatch): From 568418a18e2b3a921a01085c92bfae08a69ae5b5 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Tue, 21 Jan 2020 12:07:05 +0100 Subject: [PATCH 22/93] Simplifies pisa_cli.post_data_to_add_appointment_endpoint The appointment endpoint was a parameter even though the method is only used to send data the a single endpoint. The json data is only used inside the method, so it can be computed there too. --- apps/cli/pisa_cli.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/apps/cli/pisa_cli.py b/apps/cli/pisa_cli.py index 6a5bebd..eccd1ab 100644 --- a/apps/cli/pisa_cli.py +++ b/apps/cli/pisa_cli.py @@ -121,11 +121,8 @@ def add_appointment(args): data = {"appointment": appointment.to_dict(), "signature": signature, "public_key": hex_pk_der.decode("utf-8")} - appointment_json = json.dumps(data, sort_keys=True, separators=(",", ":")) - # Send appointment to the server. - add_appointment_endpoint = "http://{}:{}".format(pisa_api_server, pisa_api_port) - response_json = post_data_to_add_appointment_endpoint(add_appointment_endpoint, appointment_json) + response_json = post_data_to_add_appointment_endpoint(data) if response_json is None: return False @@ -193,11 +190,12 @@ def parse_add_appointment_args(args): # Sends appointment data to add_appointment endpoint to be processed by the server. -def post_data_to_add_appointment_endpoint(add_appointment_endpoint, appointment_json): +def post_data_to_add_appointment_endpoint(data): logger.info("Sending appointment to PISA") try: - r = requests.post(url=add_appointment_endpoint, json=appointment_json, timeout=5) + add_appointment_endpoint = "http://{}:{}".format(pisa_api_server, pisa_api_port) + r = requests.post(url=add_appointment_endpoint, json=json.dumps(data), timeout=5) response_json = r.json() From 3605590fa4ee87bfda7af3e013cbddf07df927e4 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Tue, 21 Jan 2020 12:07:44 +0100 Subject: [PATCH 23/93] Updates cli unit tests to match the net post_data_to_add_appointment_endpoint method --- test/apps/cli/unit/test_pisa_cli.py | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/test/apps/cli/unit/test_pisa_cli.py b/test/apps/cli/unit/test_pisa_cli.py index bed98f5..4927c7a 100644 --- a/test/apps/cli/unit/test_pisa_cli.py +++ b/test/apps/cli/unit/test_pisa_cli.py @@ -42,7 +42,7 @@ pisa_cli.pisa_public_key = pisa_pk # Replace endpoint with dummy one pisa_cli.pisa_api_server = "dummy.com" pisa_cli.pisa_api_port = 12345 -pisa_endpoint = pisa_cli.pisa_api_server + ":" + str(pisa_cli.pisa_api_port) +pisa_endpoint = "http://{}:{}/".format(pisa_cli.pisa_api_server, pisa_cli.pisa_api_port) dummy_appointment_request = { "tx": get_random_value_hex(192), @@ -106,13 +106,12 @@ def test_add_appointment(monkeypatch): response = {"locator": dummy_appointment.to_dict()["locator"], "signature": get_dummy_signature()} - request_url = "http://{}/".format(pisa_endpoint) - responses.add(responses.POST, request_url, json=response, status=200) + responses.add(responses.POST, pisa_endpoint, json=response, status=200) result = pisa_cli.add_appointment([json.dumps(dummy_appointment_request)]) assert len(responses.calls) == 1 - assert responses.calls[0].request.url == request_url + assert responses.calls[0].request.url == pisa_endpoint assert result @@ -132,8 +131,7 @@ def test_add_appointment_with_invalid_signature(monkeypatch): "signature": get_bad_signature(), # Sign with a bad key } - request_url = "http://{}/".format(pisa_endpoint) - responses.add(responses.POST, request_url, json=response, status=200) + responses.add(responses.POST, pisa_endpoint, json=response, status=200) result = pisa_cli.add_appointment([json.dumps(dummy_appointment_request)]) @@ -207,13 +205,12 @@ def test_post_data_to_add_appointment_endpoint(): "signature": Cryptographer.sign(dummy_appointment.serialize(), pisa_sk), } - request_url = "http://{}/".format(pisa_endpoint) - responses.add(responses.POST, request_url, json=response, status=200) + responses.add(responses.POST, pisa_endpoint, json=response, status=200) - response = pisa_cli.post_data_to_add_appointment_endpoint(request_url, json.dumps(dummy_appointment_request)) + response = pisa_cli.post_data_to_add_appointment_endpoint(json.dumps(dummy_appointment_request)) assert len(responses.calls) == 1 - assert responses.calls[0].request.url == request_url + assert responses.calls[0].request.url == pisa_endpoint assert response @@ -237,7 +234,7 @@ def test_get_appointment(): dummy_appointment_full["status"] = "being_watched" response = dummy_appointment_full - request_url = "http://{}/".format(pisa_endpoint) + "get_appointment?locator={}".format(response.get("locator")) + request_url = "{}get_appointment?locator={}".format(pisa_endpoint, response.get("locator")) responses.add(responses.GET, request_url, json=response, status=200) result = pisa_cli.get_appointment([response.get("locator")]) @@ -253,7 +250,7 @@ def test_get_appointment_err(): locator = get_random_value_hex(32) # Test that get_appointment handles a connection error appropriately. - request_url = "http://{}/".format(pisa_endpoint) + "get_appointment?locator=".format(locator) + request_url = "{}get_appointment?locator=".format(pisa_endpoint, locator) responses.add(responses.GET, request_url, body=ConnectionError()) assert not pisa_cli.get_appointment([locator]) From 200304cbce6f163fbb0888f966ddf501968478b5 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Tue, 21 Jan 2020 12:14:51 +0100 Subject: [PATCH 24/93] Remove unnecessary tailing dots from some log messages --- apps/cli/pisa_cli.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/apps/cli/pisa_cli.py b/apps/cli/pisa_cli.py index eccd1ab..5e7c4cf 100644 --- a/apps/cli/pisa_cli.py +++ b/apps/cli/pisa_cli.py @@ -62,7 +62,7 @@ def load_key_file_data(file_name): return key except FileNotFoundError as e: - logger.error("Client's key file not found. Please check your settings.") + logger.error("Client's key file not found. Please check your settings") raise e except IOError as e: @@ -108,7 +108,7 @@ def add_appointment(args): appointment_data["encrypted_blob"] = Cryptographer.encrypt(Blob(tx), tx_id) else: - logger.error("Appointment data is missing some fields.") + logger.error("Appointment data is missing some fields") return False appointment = Appointment.from_dict(appointment_data) @@ -130,7 +130,7 @@ def add_appointment(args): signature = response_json.get("signature") # Check that the server signed the appointment as it should. if signature is None: - logger.error("The response does not contain the signature of the appointment.") + logger.error("The response does not contain the signature of the appointment") return False valid = check_signature(signature, appointment) From 2559d143aa474c50569192da1335dc302fb8b74a Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Tue, 21 Jan 2020 19:06:29 +0100 Subject: [PATCH 25/93] Fixes logging --- common/cryptographer.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/common/cryptographer.py b/common/cryptographer.py index 319d7f1..67ff6e0 100644 --- a/common/cryptographer.py +++ b/common/cryptographer.py @@ -123,7 +123,7 @@ class Cryptographer: nonce = bytearray(12) logger.info( - "Decrypting Blob", + "Decrypting blob", sk=hexlify(sk).decode(), nonce=hexlify(nonce).decode(), encrypted_blob=encrypted_blob.data, @@ -142,6 +142,7 @@ class Cryptographer: except InvalidTag: blob = None + logger.error("Can't decrypt blob with the provided key") return blob From 5ead43163c67526e61ed532b326342dbc7b76196 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Tue, 21 Jan 2020 19:06:51 +0100 Subject: [PATCH 26/93] Refactors Cleaner to improve its modularity and fixes some small bugs - Improves code modularity. - Separates flag_triggered from delete_appointment, which was pretty misleading (and causing some minor bugs) and make them work with lists intead of single objects. - Removes unused import --- pisa/cleaner.py | 153 +++++++++++++++++++++++++++++++++--------------- 1 file changed, 107 insertions(+), 46 deletions(-) diff --git a/pisa/cleaner.py b/pisa/cleaner.py index cd87e4c..37480a6 100644 --- a/pisa/cleaner.py +++ b/pisa/cleaner.py @@ -1,5 +1,4 @@ from common.logger import Logger -from common.appointment import Appointment logger = Logger("Cleaner") @@ -12,7 +11,73 @@ class Cleaner: """ @staticmethod - def delete_expired_appointment(expired_appointments, appointments, locator_uuid_map, db_manager): + def delete_appointment_from_memory(uuid, appointments, locator_uuid_map): + """ + Deletes an appointment from memory (appointments and locator_uuid_map dictionaries). If the given appointment + does not share locator with any other, the map will completely removed, otherwise, the uuid will be removed from + the map. + + Args: + uuid (:obj:`str`): the identifier of the appointment to be deleted. + appointments (:obj:`dict`): the appointments dictionary from where the appointment should be removed. + locator_uuid_map (:obj:`dict`): the locator:uuid map from where the appointment should also be removed. + """ + locator = appointments[uuid].get("locator") + + # Delete the appointment + appointments.pop(uuid) + + # If there was only one appointment that matches the locator we can delete the whole list + if len(locator_uuid_map[locator]) == 1: + locator_uuid_map.pop(locator) + else: + # Otherwise we just delete the appointment that matches locator:appointment_pos + locator_uuid_map[locator].remove(uuid) + + @staticmethod + def delete_appointment_from_db(uuid, db_manager): + """ + Deletes an appointment from the appointments database. + + Args: + uuid (:obj:`str`): the identifier of the appointment to be deleted. + db_manager (:obj:`DBManager `): a ``DBManager`` instance to interact with the + database. + """ + + db_manager.delete_watcher_appointment(uuid) + db_manager.delete_triggered_appointment_flag(uuid) + + @staticmethod + def update_delete_db_locator_map(uuid, locator, db_manager): + """ + Updates the locator:uuid map of a given locator from the database by removing a given uuid. If the uuid is the + only element of the map, the map is deleted, otherwise the uuid is simply removed and the database is updated. + + If either the uuid of the locator are not found, the data is not modified. + + Args: + uuid (:obj:`str`): the identifier to be removed from the map. + locator (:obj:`str`): the identifier of the map to be either updated or deleted. + db_manager (:obj:`DBManager `): a ``DBManager`` instance to interact with the + database. + """ + + locator_map = db_manager.load_locator_map(locator) + if locator_map is not None: + if uuid in locator_map: + if len(locator_map) == 1: + db_manager.delete_locator_map(locator) + else: + locator_map.remove(uuid) + db_manager.update_locator_map(locator, locator_map) + else: + logger.error("UUID not found in the db", uuid=uuid) + else: + logger.error("Locator not found in the db", uuid=uuid) + + @staticmethod + def delete_expired_appointments(expired_appointments, appointments, locator_uuid_map, db_manager): """ Deletes appointments which ``end_time`` has been reached (with no trigger) both from memory (:obj:`Watcher `) and disk. @@ -29,28 +94,24 @@ class Cleaner: for uuid in expired_appointments: locator = appointments[uuid].get("locator") - - appointments.pop(uuid) - - if len(locator_uuid_map[locator]) == 1: - locator_uuid_map.pop(locator) - - else: - locator_uuid_map[locator].remove(uuid) - logger.info("End time reached with no breach. Deleting appointment", locator=locator, uuid=uuid) - # Delete appointment from the db + Cleaner.delete_appointment_from_memory(uuid, appointments, locator_uuid_map) + Cleaner.update_delete_db_locator_map(uuid, locator, db_manager) + + # Expired appointments are not flagged, so they can be deleted without caring about the db flag. db_manager.delete_watcher_appointment(uuid) @staticmethod - def delete_completed_appointment(uuid, appointments, locator_uuid_map, db_manager): + def delete_completed_appointments(completed_appointments, appointments, locator_uuid_map, db_manager): """ - Deletes a triggered appointment from memory (:obj:`Watcher `) and flags it as triggered in - disk. + Deletes a completed appointment from memory (:obj:`Watcher `) and disk. + + Currently, an appointment is only completed if it cannot make it to the (:obj:`Responder `), + otherwise, it will be flagged as triggered and removed once the tracker is completed. Args: - uuid (:obj:`str`): a unique 16-byte hex-encoded str that identifies the appointment. + completed_appointments (:obj:`list`): a list of appointments to be deleted. appointments (:obj:`dict`): a dictionary containing all the :obj:`Watcher ` appointments. locator_uuid_map (:obj:`dict`): a ``locator:uuid`` map for the :obj:`Watcher ` @@ -59,21 +120,37 @@ class Cleaner: database. """ - locator = appointments[uuid].get("locator") + for uuid in completed_appointments: + locator = appointments[uuid].get("locator") - # Delete the appointment - appointments.pop(uuid) + logger.warning( + "Appointment cannot be completed, it contains invalid data. Deleting", locator=locator, uuid=uuid + ) - # If there was only one appointment that matches the locator we can delete the whole list - if len(locator_uuid_map[locator]) == 1: - locator_uuid_map.pop(locator) - else: - # Otherwise we just delete the appointment that matches locator:appointment_pos - locator_uuid_map[locator].remove(uuid) + Cleaner.delete_appointment_from_memory(uuid, appointments, locator_uuid_map) + Cleaner.delete_appointment_from_db(uuid, db_manager) + Cleaner.update_delete_db_locator_map(uuid, locator, db_manager) - # DISCUSS: instead of deleting the appointment, we will mark it as triggered and delete it from both - # the watcher's and responder's db after fulfilled - db_manager.create_triggered_appointment_flag(uuid) + @staticmethod + def flag_triggered_appointments(triggered_appointments, appointments, locator_uuid_map, db_manager): + """ + Deletes a list of triggered appointment from memory (:obj:`Watcher `) and flags them as + triggered on disk. + + Args: + triggered_appointments (:obj:`list`): a list of appointments to be flagged as triggered on the database. + appointments (:obj:`dict`): a dictionary containing all the :obj:`Watcher ` + appointments. + locator_uuid_map (:obj:`dict`): a ``locator:uuid`` map for the :obj:`Watcher ` + appointments. + db_manager (:obj:`DBManager `): a ``DBManager`` instance to interact with the + database. + """ + + for uuid in triggered_appointments: + logger.info("Flagging appointment as triggered", locator=appointments[uuid].get("locator"), uuid=uuid) + Cleaner.delete_appointment_from_memory(uuid, appointments, locator_uuid_map) + db_manager.create_triggered_appointment_flag(uuid) @staticmethod def delete_completed_trackers(completed_trackers, height, trackers, tx_tracker_map, db_manager): @@ -113,24 +190,8 @@ class Cleaner: tx_tracker_map[penalty_txid].remove(uuid) # Delete appointment from the db (from watchers's and responder's db) and remove flag - db_manager.delete_watcher_appointment(uuid) db_manager.delete_responder_tracker(uuid) - db_manager.delete_triggered_appointment_flag(uuid) + Cleaner.delete_appointment_from_db(uuid, db_manager) # Update / delete the locator map - Cleaner.update_delete_locator_map(locator, uuid, db_manager) - - @staticmethod - def update_delete_locator_map(locator, uuid, db_manager): - locator_map = db_manager.load_locator_map(locator) - if locator_map is not None: - if uuid in locator_map: - if len(locator_map) == 1: - db_manager.delete_locator_map(locator) - else: - locator_map.remove(uuid) - db_manager.store_update_locator_map(locator, locator_map) - else: - logger.error("UUID not found in the db", uuid=uuid) - else: - logger.error("Locator not found in the db", uuid=uuid) + Cleaner.update_delete_db_locator_map(uuid, locator, db_manager) From a95e30171339936c98b92a5b4687ab20b621856b Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Tue, 21 Jan 2020 19:09:39 +0100 Subject: [PATCH 27/93] Renames some DBManager methods and adds new functionality - Refactors store_update_locator_map name to create_append_locator_map, which is a better fit. - Adds update_locator_map method to update maps by removing some uuids (store_update_locator_map was not covering this, what was misleading). - Adds missing docs. --- pisa/db_manager.py | 37 +++++++++++++++++++++++++++++++++++-- 1 file changed, 35 insertions(+), 2 deletions(-) diff --git a/pisa/db_manager.py b/pisa/db_manager.py index e337065..4670215 100644 --- a/pisa/db_manager.py +++ b/pisa/db_manager.py @@ -195,6 +195,10 @@ class DBManager: def store_watcher_appointment(self, uuid, appointment): """ Stores an appointment in the database using the ``WATCHER_PREFIX`` prefix. + + Args: + uuid (:obj:`str`): the identifier of the appointment to be stored. + appointment (:obj: `str`): the json encoded appointment to be stored as data. """ self.create_entry(uuid, appointment, prefix=WATCHER_PREFIX) @@ -203,6 +207,10 @@ class DBManager: def store_responder_tracker(self, uuid, tracker): """ Stores a tracker in the database using the ``RESPONDER_PREFIX`` prefix. + + Args: + uuid (:obj:`str`): the identifier of the appointment to be stored. + tracker (:obj: `str`): the json encoded tracker to be stored as data. """ self.create_entry(uuid, tracker, prefix=RESPONDER_PREFIX) @@ -232,9 +240,9 @@ class DBManager: return locator_map - def store_update_locator_map(self, locator, uuid): + def create_append_locator_map(self, locator, uuid): """ - Stores (or updates if already exists) a ``locator:uuid`` map. + Creates (or appends to if already exists) a ``locator:uuid`` map. If the map already exists, the new ``uuid`` is appended to the existing ones (if it is not already there). @@ -260,6 +268,25 @@ class DBManager: key = (LOCATOR_MAP_PREFIX + locator).encode("utf-8") self.db.put(key, json.dumps(locator_map).encode("utf-8")) + def update_locator_map(self, locator, locator_map): + """ + Updates a ``locator:uuid`` map in the database by deleting one of it's uuid. It will only work as long as + the given ``locator_map`` is a subset of the current one and it's not empty. + + Args: + locator (:obj:`str`): a 16-byte hex-encoded string used as the key of the map. + locator_map (:obj:`list`): a list of uuids to replace the current one on the db. + """ + + current_locator_map = self.load_locator_map(locator) + + if set(locator_map).issubset(current_locator_map) and len(locator_map) is not 0: + key = (LOCATOR_MAP_PREFIX + locator).encode("utf-8") + self.db.put(key, json.dumps(locator_map).encode("utf-8")) + + else: + logger.error("Trying to update a locator_map with completely different, or empty, data") + def delete_locator_map(self, locator): """ Deletes a ``locator:uuid`` map. @@ -338,6 +365,9 @@ class DBManager: def create_triggered_appointment_flag(self, uuid): """ Creates a flag that signals that an appointment has been triggered. + + Args: + uuid (:obj:`str`): the identifier of the flag to be created. """ self.db.put((TRIGGERED_APPOINTMENTS_PREFIX + uuid).encode("utf-8"), "".encode("utf-8")) @@ -358,6 +388,9 @@ class DBManager: def delete_triggered_appointment_flag(self, uuid): """ Deletes a flag that signals that an appointment has been triggered. + + Args: + uuid (:obj:`str`): the identifier of the flag to be removed. """ self.delete_entry(uuid, prefix=TRIGGERED_APPOINTMENTS_PREFIX) From 33f2ab35d8f6aeb3412ef478028926763279d1e1 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Tue, 21 Jan 2020 19:11:02 +0100 Subject: [PATCH 28/93] Updates responder to use new Cleaner functions --- pisa/responder.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/pisa/responder.py b/pisa/responder.py index d4866e1..cef46c5 100644 --- a/pisa/responder.py +++ b/pisa/responder.py @@ -215,9 +215,8 @@ class Responder: ) # FIXME: This is only necessary because of the triggered appointment approach. Remove if it changes. - self.db_manager.delete_watcher_appointment(uuid) - self.db_manager.delete_triggered_appointment_flag(uuid) - Cleaner.update_delete_locator_map(locator, uuid, self.db_manager) + Cleaner.delete_appointment_from_db(uuid, self.db_manager) + Cleaner.update_delete_db_locator_map(uuid, locator, self.db_manager) return receipt From 74a9dad071ade32e15f8beded5537807b164f87d Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Tue, 21 Jan 2020 19:11:15 +0100 Subject: [PATCH 29/93] Updates Watcher to use new Cleaner functions and reformats filter_valid_breaches to return a list of valid and invalid breaches. --- pisa/watcher.py | 81 ++++++++++++++++++++++++------------------------- 1 file changed, 40 insertions(+), 41 deletions(-) diff --git a/pisa/watcher.py b/pisa/watcher.py index 828a45d..acc390c 100644 --- a/pisa/watcher.py +++ b/pisa/watcher.py @@ -124,7 +124,7 @@ class Watcher: logger.info("Waking up") self.db_manager.store_watcher_appointment(uuid, appointment.to_json()) - self.db_manager.store_update_locator_map(appointment.locator, uuid) + self.db_manager.create_append_locator_map(appointment.locator, uuid) appointment_added = True signature = Cryptographer.sign(appointment.serialize(), self.signing_key) @@ -164,37 +164,38 @@ class Watcher: if block["height"] > appointment_data.get("end_time") + self.config.get("EXPIRY_DELTA") ] - Cleaner.delete_expired_appointment( + Cleaner.delete_expired_appointments( expired_appointments, self.appointments, self.locator_uuid_map, self.db_manager ) - filtered_breaches = self.filter_valid_breaches(self.get_breaches(txids)) + valid_breaches, invalid_breaches = self.filter_valid_breaches(self.get_breaches(txids)) - for uuid, filtered_breach in filtered_breaches.items(): - # Errors decrypting the Blob will result in a None penalty_txid - if filtered_breach["valid_breach"] is True: - logger.info( - "Notifying responder and deleting appointment", - penalty_txid=filtered_breach["penalty_txid"], - locator=filtered_breach["locator"], - uuid=uuid, - ) - - self.responder.handle_breach( - uuid, - filtered_breach["locator"], - filtered_breach["dispute_txid"], - filtered_breach["penalty_txid"], - filtered_breach["penalty_rawtx"], - self.appointments[uuid].get("end_time"), - block_hash, - ) - - # Delete the appointment and update db - Cleaner.delete_completed_appointment( - uuid, self.appointments, self.locator_uuid_map, self.db_manager + for uuid, breach in valid_breaches.items(): + logger.info( + "Notifying responder and deleting appointment", + penalty_txid=breach["penalty_txid"], + locator=breach["locator"], + uuid=uuid, ) + self.responder.handle_breach( + uuid, + breach["locator"], + breach["dispute_txid"], + breach["penalty_txid"], + breach["penalty_rawtx"], + self.appointments[uuid].get("end_time"), + block_hash, + ) + + Cleaner.flag_triggered_appointments( + list(valid_breaches.keys()), self.appointments, self.locator_uuid_map, self.db_manager + ) + + Cleaner.delete_completed_appointments( + invalid_breaches, self.appointments, self.locator_uuid_map, self.db_manager + ) + # Register the last processed block for the watcher self.db_manager.store_last_block_hash_watcher(block_hash) @@ -248,7 +249,8 @@ class Watcher: ``{locator, dispute_txid, penalty_txid, penalty_rawtx, valid_breach}`` """ - filtered_breaches = {} + valid_breaches = {} + invalid_breaches = [] for locator, dispute_txid in breaches.items(): for uuid in self.locator_uuid_map[locator]: @@ -263,21 +265,18 @@ class Watcher: penalty_tx = BlockProcessor.decode_raw_transaction(penalty_rawtx) if penalty_tx is not None: - penalty_txid = penalty_tx.get("txid") - valid_breach = True + valid_breaches[uuid] = { + "locator": locator, + "dispute_txid": dispute_txid, + "penalty_txid": penalty_tx.get("txid"), + "penalty_rawtx": penalty_rawtx, + } - logger.info("Breach found for locator", locator=locator, uuid=uuid, penalty_txid=penalty_txid) + logger.info( + "Breach found for locator", locator=locator, uuid=uuid, penalty_txid=penalty_tx.get("txid") + ) else: - penalty_txid = None - valid_breach = False + invalid_breaches.append(uuid) - filtered_breaches[uuid] = { - "locator": locator, - "dispute_txid": dispute_txid, - "penalty_txid": penalty_txid, - "penalty_rawtx": penalty_rawtx, - "valid_breach": valid_breach, - } - - return filtered_breaches + return valid_breaches, invalid_breaches From 7bf65c40f548b9c98b9d958e7fdb11b0c2b1813f Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Tue, 21 Jan 2020 19:11:33 +0100 Subject: [PATCH 30/93] Updates unit tests, adds tests to cover new methods and removes unused imports --- test/pisa/unit/test_cleaner.py | 85 +++++++++++++++++++++++++------ test/pisa/unit/test_db_manager.py | 53 +++++++++++++++++-- test/pisa/unit/test_pisad.py | 1 - test/pisa/unit/test_watcher.py | 16 +++--- 4 files changed, 127 insertions(+), 28 deletions(-) diff --git a/test/pisa/unit/test_cleaner.py b/test/pisa/unit/test_cleaner.py index 5b9eaf2..fb5db76 100644 --- a/test/pisa/unit/test_cleaner.py +++ b/test/pisa/unit/test_cleaner.py @@ -4,7 +4,6 @@ from uuid import uuid4 from pisa.responder import TransactionTracker from pisa.cleaner import Cleaner from common.appointment import Appointment -from pisa.db_manager import WATCHER_PREFIX, TRIGGERED_APPOINTMENTS_PREFIX from test.pisa.unit.conftest import get_random_value_hex @@ -16,7 +15,6 @@ MAX_ITEMS = 100 ITERATIONS = 10 -# WIP: FIX CLEANER TESTS AFTER ADDING delete_complete_appointment def set_up_appointments(db_manager, total_appointments): appointments = dict() locator_uuid_map = dict() @@ -30,7 +28,7 @@ def set_up_appointments(db_manager, total_appointments): locator_uuid_map[locator] = [uuid] db_manager.store_watcher_appointment(uuid, appointment.to_json()) - db_manager.store_update_locator_map(locator, uuid) + db_manager.create_append_locator_map(locator, uuid) # Each locator can have more than one uuid assigned to it. if i % 2: @@ -40,7 +38,7 @@ def set_up_appointments(db_manager, total_appointments): locator_uuid_map[locator].append(uuid) db_manager.store_watcher_appointment(uuid, appointment.to_json()) - db_manager.store_update_locator_map(locator, uuid) + db_manager.create_append_locator_map(locator, uuid) return appointments, locator_uuid_map @@ -63,7 +61,7 @@ def set_up_trackers(db_manager, total_trackers): tx_tracker_map[penalty_txid] = [uuid] db_manager.store_responder_tracker(uuid, tracker.to_json()) - db_manager.store_update_locator_map(tracker.locator, uuid) + db_manager.create_append_locator_map(tracker.locator, uuid) # Each penalty_txid can have more than one uuid assigned to it. if i % 2: @@ -73,34 +71,89 @@ def set_up_trackers(db_manager, total_trackers): tx_tracker_map[penalty_txid].append(uuid) db_manager.store_responder_tracker(uuid, tracker.to_json()) - db_manager.store_update_locator_map(tracker.locator, uuid) + db_manager.create_append_locator_map(tracker.locator, uuid) return trackers, tx_tracker_map +def test_delete_appointment_from_memory(db_manager): + appointments, locator_uuid_map = set_up_appointments(db_manager, MAX_ITEMS) + + for uuid in list(appointments.keys()): + Cleaner.delete_appointment_from_memory(uuid, appointments, locator_uuid_map) + + # The appointment should have been deleted from memory, but not from the db + assert uuid not in appointments + assert db_manager.load_watcher_appointment(uuid) is not None + + +def test_delete_appointment_from_db(db_manager): + appointments, locator_uuid_map = set_up_appointments(db_manager, MAX_ITEMS) + + for uuid in list(appointments.keys()): + Cleaner.delete_appointment_from_db(uuid, db_manager) + + # The appointment should have been deleted from memory, but not from the db + assert uuid in appointments + assert db_manager.load_watcher_appointment(uuid) is None + + +def test_update_delete_db_locator_map(db_manager): + appointments, locator_uuid_map = set_up_appointments(db_manager, MAX_ITEMS) + + for uuid, appointment in appointments.items(): + locator = appointment.get("locator") + locator_map_before = db_manager.load_locator_map(locator) + Cleaner.update_delete_db_locator_map(uuid, locator, db_manager) + locator_map_after = db_manager.load_locator_map(locator) + + if locator_map_after is None: + assert locator_map_before is not None + + else: + assert uuid in locator_map_before and uuid not in locator_map_after + + def test_delete_expired_appointment(db_manager): for _ in range(ITERATIONS): appointments, locator_uuid_map = set_up_appointments(db_manager, MAX_ITEMS) expired_appointments = random.sample(list(appointments.keys()), k=ITEMS) - Cleaner.delete_expired_appointment(expired_appointments, appointments, locator_uuid_map, db_manager) + Cleaner.delete_expired_appointments(expired_appointments, appointments, locator_uuid_map, db_manager) assert not set(expired_appointments).issubset(appointments.keys()) def test_delete_completed_appointments(db_manager): - appointments, locator_uuid_map = set_up_appointments(db_manager, MAX_ITEMS) - uuids = list(appointments.keys()) + for _ in range(ITERATIONS): + appointments, locator_uuid_map = set_up_appointments(db_manager, MAX_ITEMS) + completed_appointments = random.sample(list(appointments.keys()), k=ITEMS) - for uuid in uuids: - Cleaner.delete_completed_appointment(uuid, appointments, locator_uuid_map, db_manager) + len_before_clean = len(appointments) + Cleaner.delete_completed_appointments(completed_appointments, appointments, locator_uuid_map, db_manager) - # All appointments should have been deleted - assert len(appointments) == 0 + # ITEMS appointments should have been deleted from memory + assert len(appointments) == len_before_clean - ITEMS - # Make sure that all appointments are flagged as triggered in the db - for uuid in uuids: - assert db_manager.db.get((TRIGGERED_APPOINTMENTS_PREFIX + uuid).encode("utf-8")) is not None + # Make sure they are not in the db either + db_appointments = db_manager.load_watcher_appointments(include_triggered=True) + assert not set(completed_appointments).issubset(db_appointments) + + +def test_flag_triggered_appointments(db_manager): + for _ in range(ITERATIONS): + appointments, locator_uuid_map = set_up_appointments(db_manager, MAX_ITEMS) + triggered_appointments = random.sample(list(appointments.keys()), k=ITEMS) + + len_before_clean = len(appointments) + Cleaner.flag_triggered_appointments(triggered_appointments, appointments, locator_uuid_map, db_manager) + + # ITEMS appointments should have been deleted from memory + assert len(appointments) == len_before_clean - ITEMS + + # Make sure that all appointments are flagged as triggered in the db + db_appointments = db_manager.load_all_triggered_flags() + assert set(triggered_appointments).issubset(db_appointments) def test_delete_completed_trackers_db_match(db_manager): diff --git a/test/pisa/unit/test_db_manager.py b/test/pisa/unit/test_db_manager.py index 10483a1..8c7d0a0 100644 --- a/test/pisa/unit/test_db_manager.py +++ b/test/pisa/unit/test_db_manager.py @@ -171,25 +171,70 @@ def test_load_locator_map_empty(db_manager): assert db_manager.load_locator_map(get_random_value_hex(LOCATOR_LEN_BYTES)) is None -def test_store_update_locator_map_empty(db_manager): +def test_create_append_locator_map(db_manager): uuid = uuid4().hex locator = get_random_value_hex(LOCATOR_LEN_BYTES) - db_manager.store_update_locator_map(locator, uuid) + db_manager.create_append_locator_map(locator, uuid) # Check that the locator map has been properly stored assert db_manager.load_locator_map(locator) == [uuid] # If we try to add the same uuid again the list shouldn't change - db_manager.store_update_locator_map(locator, uuid) + db_manager.create_append_locator_map(locator, uuid) assert db_manager.load_locator_map(locator) == [uuid] # Add another uuid to the same locator and check that it also works uuid2 = uuid4().hex - db_manager.store_update_locator_map(locator, uuid2) + db_manager.create_append_locator_map(locator, uuid2) assert set(db_manager.load_locator_map(locator)) == set([uuid, uuid2]) +def test_update_locator_map(db_manager): + # Let's create a couple of appointments with the same locator + locator = get_random_value_hex(32) + uuid1 = uuid4().hex + uuid2 = uuid4().hex + db_manager.create_append_locator_map(locator, uuid1) + db_manager.create_append_locator_map(locator, uuid2) + + locator_map = db_manager.load_locator_map(locator) + assert uuid1 in locator_map + + locator_map.remove(uuid1) + db_manager.update_locator_map(locator, locator_map) + + locator_map_after = db_manager.load_locator_map(locator) + assert uuid1 not in locator_map_after and uuid2 in locator_map_after and len(locator_map_after) == 1 + + +def test_update_locator_map_wong_data(db_manager): + # Let's try to update the locator map with a different list of uuids + locator = get_random_value_hex(32) + db_manager.create_append_locator_map(locator, uuid4().hex) + db_manager.create_append_locator_map(locator, uuid4().hex) + + locator_map = db_manager.load_locator_map(locator) + wrong_map_update = [uuid4().hex] + db_manager.update_locator_map(locator, wrong_map_update) + locator_map_after = db_manager.load_locator_map(locator) + + assert locator_map_after == locator_map + + +def test_update_locator_map_empty(db_manager): + # We shouldn't be able to update a map with an empty list + locator = get_random_value_hex(32) + db_manager.create_append_locator_map(locator, uuid4().hex) + db_manager.create_append_locator_map(locator, uuid4().hex) + + locator_map = db_manager.load_locator_map(locator) + db_manager.update_locator_map(locator, []) + locator_map_after = db_manager.load_locator_map(locator) + + assert locator_map_after == locator_map + + def test_delete_locator_map(db_manager): locator_maps = db_manager.load_appointments_db(prefix=LOCATOR_MAP_PREFIX) assert len(locator_maps) != 0 diff --git a/test/pisa/unit/test_pisad.py b/test/pisa/unit/test_pisad.py index fae1d85..30db71e 100644 --- a/test/pisa/unit/test_pisad.py +++ b/test/pisa/unit/test_pisad.py @@ -1,7 +1,6 @@ import importlib import os import pytest -from pathlib import Path from shutil import copyfile from pisa.pisad import load_config diff --git a/test/pisa/unit/test_watcher.py b/test/pisa/unit/test_watcher.py index 7c1147f..03c6f45 100644 --- a/test/pisa/unit/test_watcher.py +++ b/test/pisa/unit/test_watcher.py @@ -140,7 +140,7 @@ def test_do_watch(watcher): for uuid, appointment in appointments.items(): watcher.appointments[uuid] = {"locator": appointment.locator, "end_time": appointment.end_time} watcher.db_manager.store_watcher_appointment(uuid, appointment.to_json()) - watcher.db_manager.store_update_locator_map(appointment.locator, uuid) + watcher.db_manager.create_append_locator_map(appointment.locator, uuid) Thread(target=watcher.do_watch, daemon=True).start() @@ -190,7 +190,7 @@ def test_filter_valid_breaches_random_data(watcher): uuid = uuid4().hex appointments[uuid] = {"locator": dummy_appointment.locator, "end_time": dummy_appointment.end_time} watcher.db_manager.store_watcher_appointment(uuid, dummy_appointment.to_json()) - watcher.db_manager.store_update_locator_map(dummy_appointment.locator, uuid) + watcher.db_manager.create_append_locator_map(dummy_appointment.locator, uuid) locator_uuid_map[dummy_appointment.locator] = [uuid] @@ -201,9 +201,10 @@ def test_filter_valid_breaches_random_data(watcher): watcher.locator_uuid_map = locator_uuid_map watcher.appointments = appointments - filtered_valid_breaches = watcher.filter_valid_breaches(breaches) + valid_breaches, invalid_breaches = watcher.filter_valid_breaches(breaches) - assert not any([fil_breach["valid_breach"] for uuid, fil_breach in filtered_valid_breaches.items()]) + # We have "triggered" TEST_SET_SIZE/2 breaches, all of them invalid. + assert len(valid_breaches) == 0 and len(invalid_breaches) == TEST_SET_SIZE / 2 def test_filter_valid_breaches(watcher): @@ -229,10 +230,11 @@ def test_filter_valid_breaches(watcher): for uuid, appointment in appointments.items(): watcher.appointments[uuid] = {"locator": appointment.locator, "end_time": appointment.end_time} watcher.db_manager.store_watcher_appointment(uuid, dummy_appointment.to_json()) - watcher.db_manager.store_update_locator_map(dummy_appointment.locator, uuid) + watcher.db_manager.create_append_locator_map(dummy_appointment.locator, uuid) watcher.locator_uuid_map = locator_uuid_map - filtered_valid_breaches = watcher.filter_valid_breaches(breaches) + valid_breaches, invalid_breaches = watcher.filter_valid_breaches(breaches) - assert all([fil_breach["valid_breach"] for uuid, fil_breach in filtered_valid_breaches.items()]) + # We have "triggered" a single breach and it was valid. + assert len(invalid_breaches) == 0 and len(valid_breaches) == 1 From 0f45e8fe11a1ca8446acbf1d2056f3a5dc5537db Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Tue, 21 Jan 2020 19:11:50 +0100 Subject: [PATCH 31/93] Adds methods to get seeded random values and fixes create_txs --- test/pisa/e2e/conftest.py | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/test/pisa/e2e/conftest.py b/test/pisa/e2e/conftest.py index 72e6194..9585756 100644 --- a/test/pisa/e2e/conftest.py +++ b/test/pisa/e2e/conftest.py @@ -1,4 +1,5 @@ import pytest +import random from decimal import Decimal, getcontext import pisa.conf as conf @@ -16,6 +17,17 @@ def bitcoin_cli(): ) +@pytest.fixture(scope="session", autouse=True) +def prng_seed(): + random.seed(0) + + +def get_random_value_hex(nbytes): + pseudo_random_value = random.getrandbits(8 * nbytes) + prv_hex = "{:x}".format(pseudo_random_value) + return prv_hex.zfill(2 * nbytes) + + @pytest.fixture() def create_txs(bitcoin_cli): set_up_node(bitcoin_cli) @@ -24,8 +36,12 @@ def create_txs(bitcoin_cli): if len(utxos) == 0: raise ValueError("There're no UTXOs.") - commitment_tx_ins = {"txid": utxos[0].get("txid"), "vout": utxos[0].get("vout")} - commitment_tx_outs = {utxos[0].get("address"): utxos[0].get("amount") - Decimal(1 / pow(10, 5))} + utxo = utxos.pop(0) + while utxo.get("amount") < Decimal(2 / pow(10, 5)): + utxo = utxos.pop(0) + + commitment_tx_ins = {"txid": utxo.get("txid"), "vout": utxo.get("vout")} + commitment_tx_outs = {utxo.get("address"): utxo.get("amount") - Decimal(1 / pow(10, 5))} raw_commitment_tx = bitcoin_cli.createrawtransaction([commitment_tx_ins], commitment_tx_outs) signed_commitment_tx = bitcoin_cli.signrawtransactionwithwallet(raw_commitment_tx) @@ -61,8 +77,7 @@ def set_up_node(bitcoin_cli): bitcoin_cli.generatetoaddress(101, new_addr) -def build_appointment_data(bitcoin_cli, commitment_tx, penalty_tx): - commitment_tx_id = bitcoin_cli.decoderawtransaction(commitment_tx).get("txid") +def build_appointment_data(bitcoin_cli, commitment_tx_id, penalty_tx): current_height = bitcoin_cli.getblockcount() appointment_data = { From 14503dcebde5272852876f465d997fe92c4a4da6 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Tue, 21 Jan 2020 19:12:01 +0100 Subject: [PATCH 32/93] Adds e2e tests encrypted blobs with wrong key --- test/pisa/e2e/test_basic_e2e.py | 54 ++++++++++++++++++++++++++++++--- 1 file changed, 50 insertions(+), 4 deletions(-) diff --git a/test/pisa/e2e/test_basic_e2e.py b/test/pisa/e2e/test_basic_e2e.py index 1fddef0..3b8c5b7 100644 --- a/test/pisa/e2e/test_basic_e2e.py +++ b/test/pisa/e2e/test_basic_e2e.py @@ -4,9 +4,12 @@ from riemann.tx import Tx from pisa import HOST, PORT from apps.cli import pisa_cli -from pisa.utils.auth_proxy import JSONRPCException +from apps.cli.blob import Blob from common.tools import compute_locator -from test.pisa.e2e.conftest import END_TIME_DELTA, build_appointment_data +from common.appointment import Appointment +from common.cryptographer import Cryptographer +from pisa.utils.auth_proxy import JSONRPCException +from test.pisa.e2e.conftest import END_TIME_DELTA, build_appointment_data, get_random_value_hex # We'll use pisa_cli to add appointments. The expected input format is a list of arguments with a json-encoded # appointment @@ -28,7 +31,8 @@ def get_appointment_info(locator): def test_appointment_life_cycle(bitcoin_cli, create_txs): commitment_tx, penalty_tx = create_txs - appointment_data = build_appointment_data(bitcoin_cli, commitment_tx, penalty_tx) + commitment_tx_id = bitcoin_cli.decoderawtransaction(commitment_tx).get("txid") + appointment_data = build_appointment_data(bitcoin_cli, commitment_tx_id, penalty_tx) locator = compute_locator(appointment_data.get("tx_id")) assert pisa_cli.add_appointment([json.dumps(appointment_data)]) is True @@ -73,7 +77,8 @@ def test_appointment_malformed_penalty(bitcoin_cli, create_txs): tx_in = mod_penalty_tx.tx_ins[0].copy(redeem_script=b"") mod_penalty_tx = mod_penalty_tx.copy(tx_ins=[tx_in]) - appointment_data = build_appointment_data(bitcoin_cli, commitment_tx, mod_penalty_tx.hex()) + commitment_tx_id = bitcoin_cli.decoderawtransaction(commitment_tx).get("txid") + appointment_data = build_appointment_data(bitcoin_cli, commitment_tx_id, mod_penalty_tx.hex()) locator = compute_locator(appointment_data.get("tx_id")) assert pisa_cli.add_appointment([json.dumps(appointment_data)]) is True @@ -89,3 +94,44 @@ def test_appointment_malformed_penalty(bitcoin_cli, create_txs): assert appointment_info is not None assert len(appointment_info) == 1 assert appointment_info[0].get("status") == "not_found" + + +def test_appointment_wrong_key(bitcoin_cli, create_txs): + # This tests an appointment encrypted with a key that has not been derived from the same source as the locator. + # Therefore the tower won't be able to decrypt the blob once the appointment is triggered. + commitment_tx, penalty_tx = create_txs + + # The appointment data is built using a random 32-byte value. + appointment_data = build_appointment_data(bitcoin_cli, get_random_value_hex(32), penalty_tx) + + # We can't use pisa_cli.add_appointment here since it computes the locator internally, so let's do it manually. + # We will encrypt the blob using the random value and derive the locator from the commitment tx. + appointment_data["locator"] = compute_locator(bitcoin_cli.decoderawtransaction(commitment_tx).get("txid")) + appointment_data["encrypted_blob"] = Cryptographer.encrypt(Blob(penalty_tx), appointment_data.get("tx_id")) + appointment = Appointment.from_dict(appointment_data) + + signature = pisa_cli.get_appointment_signature(appointment) + hex_pk_der = pisa_cli.get_pk() + + data = {"appointment": appointment.to_dict(), "signature": signature, "public_key": hex_pk_der.decode("utf-8")} + + # Send appointment to the server. + response_json = pisa_cli.post_data_to_add_appointment_endpoint(data) + + # Check that the server has accepted the appointment + signature = response_json.get("signature") + assert signature is not None + assert pisa_cli.check_signature(signature, appointment) is True + assert response_json.get("locator") == appointment.locator + + # Trigger the appointment + new_addr = bitcoin_cli.getnewaddress() + broadcast_transaction_and_mine_block(bitcoin_cli, commitment_tx, new_addr) + + # The appointment should have been removed since the decryption failed. + sleep(1) + appointment_info = get_appointment_info(appointment.locator) + + assert appointment_info is not None + assert len(appointment_info) == 1 + assert appointment_info[0].get("status") == "not_found" From 93cb00336063168217f4c62c221127e55eea97c4 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Wed, 22 Jan 2020 11:12:01 +0100 Subject: [PATCH 33/93] Setup seed for cli and common unit tests to random values are replicable --- test/apps/cli/unit/conftest.py | 6 ++++++ test/common/unit/conftest.py | 6 ++++++ 2 files changed, 12 insertions(+) diff --git a/test/apps/cli/unit/conftest.py b/test/apps/cli/unit/conftest.py index c87a930..3752ac0 100644 --- a/test/apps/cli/unit/conftest.py +++ b/test/apps/cli/unit/conftest.py @@ -1,6 +1,12 @@ +import pytest import random +@pytest.fixture(scope="session", autouse=True) +def prng_seed(): + random.seed(0) + + def get_random_value_hex(nbytes): pseudo_random_value = random.getrandbits(8 * nbytes) prv_hex = "{:x}".format(pseudo_random_value) diff --git a/test/common/unit/conftest.py b/test/common/unit/conftest.py index c87a930..3752ac0 100644 --- a/test/common/unit/conftest.py +++ b/test/common/unit/conftest.py @@ -1,6 +1,12 @@ +import pytest import random +@pytest.fixture(scope="session", autouse=True) +def prng_seed(): + random.seed(0) + + def get_random_value_hex(nbytes): pseudo_random_value = random.getrandbits(8 * nbytes) prv_hex = "{:x}".format(pseudo_random_value) From 1027f4861146f181184905e1a6351682683957fc Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Wed, 22 Jan 2020 14:46:59 +0100 Subject: [PATCH 34/93] Splits create_txs in two simpler functions and refactors fixtures so setup_node is not called every test --- test/pisa/e2e/conftest.py | 54 ++++++++++++++++++++++++++------------- 1 file changed, 36 insertions(+), 18 deletions(-) diff --git a/test/pisa/e2e/conftest.py b/test/pisa/e2e/conftest.py index 9585756..cef3237 100644 --- a/test/pisa/e2e/conftest.py +++ b/test/pisa/e2e/conftest.py @@ -9,7 +9,7 @@ getcontext().prec = 10 END_TIME_DELTA = 10 -@pytest.fixture() +@pytest.fixture(scope="session") def bitcoin_cli(): # return AuthServiceProxy("http://%s:%s@%s:%d" % (conf.BTC_RPC_USER, conf.BTC_RPC_PASSWD, conf.BTC_RPC_HOST, 18444)) return AuthServiceProxy( @@ -22,15 +22,15 @@ def prng_seed(): random.seed(0) -def get_random_value_hex(nbytes): - pseudo_random_value = random.getrandbits(8 * nbytes) - prv_hex = "{:x}".format(pseudo_random_value) - return prv_hex.zfill(2 * nbytes) +@pytest.fixture(scope="session", autouse=True) +def setup_node(bitcoin_cli): + # This method will create a new address a mine bitcoin so the node can be used for testing + new_addr = bitcoin_cli.getnewaddress() + bitcoin_cli.generatetoaddress(101, new_addr) @pytest.fixture() def create_txs(bitcoin_cli): - set_up_node(bitcoin_cli) utxos = bitcoin_cli.listunspent() if len(utxos) == 0: @@ -40,8 +40,27 @@ def create_txs(bitcoin_cli): while utxo.get("amount") < Decimal(2 / pow(10, 5)): utxo = utxos.pop(0) + signed_commitment_tx = create_commitment_tx(bitcoin_cli, utxo) + decoded_commitment_tx = bitcoin_cli.decoderawtransaction(signed_commitment_tx) + + signed_penalty_tx = create_penalty_tx(bitcoin_cli, decoded_commitment_tx) + + return signed_commitment_tx, signed_penalty_tx + + +def get_random_value_hex(nbytes): + pseudo_random_value = random.getrandbits(8 * nbytes) + prv_hex = "{:x}".format(pseudo_random_value) + return prv_hex.zfill(2 * nbytes) + + +def create_commitment_tx(bitcoin_cli, utxo, destination=None): + # We will set the recipient to ourselves is destination is None + if destination is None: + destination = utxo.get("address") + commitment_tx_ins = {"txid": utxo.get("txid"), "vout": utxo.get("vout")} - commitment_tx_outs = {utxo.get("address"): utxo.get("amount") - Decimal(1 / pow(10, 5))} + commitment_tx_outs = {destination: utxo.get("amount") - Decimal(1 / pow(10, 5))} raw_commitment_tx = bitcoin_cli.createrawtransaction([commitment_tx_ins], commitment_tx_outs) signed_commitment_tx = bitcoin_cli.signrawtransactionwithwallet(raw_commitment_tx) @@ -49,11 +68,16 @@ def create_txs(bitcoin_cli): if not signed_commitment_tx.get("complete"): raise ValueError("Couldn't sign transaction. {}".format(signed_commitment_tx)) - decoded_commitment_tx = bitcoin_cli.decoderawtransaction(signed_commitment_tx.get("hex")) + return signed_commitment_tx.get("hex") + + +def create_penalty_tx(bitcoin_cli, decoded_commitment_tx, destination=None): + # We will set the recipient to ourselves is destination is None + if destination is None: + destination = decoded_commitment_tx.get("vout")[0].get("scriptPubKey").get("addresses")[0] penalty_tx_ins = {"txid": decoded_commitment_tx.get("txid"), "vout": 0} - address = decoded_commitment_tx.get("vout")[0].get("scriptPubKey").get("addresses")[0] - penalty_tx_outs = {address: decoded_commitment_tx.get("vout")[0].get("value") - Decimal(1 / pow(10, 5))} + penalty_tx_outs = {destination: decoded_commitment_tx.get("vout")[0].get("value") - Decimal(1 / pow(10, 5))} orphan_info = { "txid": decoded_commitment_tx.get("txid"), @@ -66,15 +90,9 @@ def create_txs(bitcoin_cli): signed_penalty_tx = bitcoin_cli.signrawtransactionwithwallet(raw_penalty_tx, [orphan_info]) if not signed_penalty_tx.get("complete"): - raise ValueError("Couldn't sign orphan transaction. {}".format(signed_commitment_tx)) + raise ValueError("Couldn't sign orphan transaction. {}".format(signed_penalty_tx)) - return signed_commitment_tx.get("hex"), signed_penalty_tx.get("hex") - - -def set_up_node(bitcoin_cli): - # This method will create a new address a mine bitcoin so the node can be used for testing - new_addr = bitcoin_cli.getnewaddress() - bitcoin_cli.generatetoaddress(101, new_addr) + return signed_penalty_tx.get("hex") def build_appointment_data(bitcoin_cli, commitment_tx_id, penalty_tx): From fd6c85ced22108c4332112134b769eb6c2872151 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Wed, 22 Jan 2020 15:20:50 +0100 Subject: [PATCH 35/93] Moves logs to be consistent with the rest of db logs. Adds missing docs and adds an exception if the db is alredy being used --- pisa/cleaner.py | 1 - pisa/db_manager.py | 10 ++++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/pisa/cleaner.py b/pisa/cleaner.py index 37480a6..75ea322 100644 --- a/pisa/cleaner.py +++ b/pisa/cleaner.py @@ -148,7 +148,6 @@ class Cleaner: """ for uuid in triggered_appointments: - logger.info("Flagging appointment as triggered", locator=appointments[uuid].get("locator"), uuid=uuid) Cleaner.delete_appointment_from_memory(uuid, appointments, locator_uuid_map) db_manager.create_triggered_appointment_flag(uuid) diff --git a/pisa/db_manager.py b/pisa/db_manager.py index 4670215..2c693b6 100644 --- a/pisa/db_manager.py +++ b/pisa/db_manager.py @@ -30,6 +30,10 @@ class DBManager: Args: db_path (:obj:`str`): the path (relative or absolute) to the system folder containing the database. A fresh database will be create if the specified path does not contain one. + + Raises: + ValueError: If the provided ``db_path`` is not a string. + plyvel.Error: If the db is currently unavailable (being used by another process). """ def __init__(self, db_path): @@ -44,6 +48,10 @@ class DBManager: logger.info("No db found. Creating a fresh one") self.db = plyvel.DB(db_path, create_if_missing=True) + elif "LOCK: Resource temporarily unavailable" in str(e): + logger.info("The db is already being used by another process (LOCK)") + raise e + def load_appointments_db(self, prefix): """ Loads all data from the appointments database given a prefix. Two prefixes are defined: ``WATCHER_PREFIX`` and @@ -371,6 +379,7 @@ class DBManager: """ self.db.put((TRIGGERED_APPOINTMENTS_PREFIX + uuid).encode("utf-8"), "".encode("utf-8")) + logger.info("Flagging appointment as triggered", uuid=uuid) def load_all_triggered_flags(self): """ @@ -394,3 +403,4 @@ class DBManager: """ self.delete_entry(uuid, prefix=TRIGGERED_APPOINTMENTS_PREFIX) + logger.info("Removing triggered flag from appointment appointment", uuid=uuid) From 272e61922d821b74f50eadfc811d3dfd45339cc4 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Wed, 22 Jan 2020 16:19:51 +0100 Subject: [PATCH 36/93] Move triggered flag clearing to the Watcher when a triggered appointment cannot make it to the mempool When an appointment was triggered a flag was set in the Watcher, and removed later on in the Responder if the transaction ended up being rejected. That's pretty annoying. Since we have information about whether a transaction has made it to the mempool or not via the Carrier's receipt, this can be all done in the Watcher, which makes more sense and reduces the interaction with the db (1 write if succeeds, 0 otherwise instead of 1 write if succeeds, 2 otherwise). --- pisa/responder.py | 4 ---- pisa/watcher.py | 15 +++++++++++---- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/pisa/responder.py b/pisa/responder.py index cef46c5..5d4ac9d 100644 --- a/pisa/responder.py +++ b/pisa/responder.py @@ -214,10 +214,6 @@ class Responder: "Tracker cannot be created", reason=receipt.reason, uuid=uuid, on_sync=self.on_sync(block_hash) ) - # FIXME: This is only necessary because of the triggered appointment approach. Remove if it changes. - Cleaner.delete_appointment_from_db(uuid, self.db_manager) - Cleaner.update_delete_db_locator_map(uuid, locator, self.db_manager) - return receipt def add_tracker(self, uuid, locator, dispute_txid, penalty_txid, penalty_rawtx, appointment_end, confirmations=0): diff --git a/pisa/watcher.py b/pisa/watcher.py index acc390c..c0e852a 100644 --- a/pisa/watcher.py +++ b/pisa/watcher.py @@ -178,7 +178,7 @@ class Watcher: uuid=uuid, ) - self.responder.handle_breach( + receipt = self.responder.handle_breach( uuid, breach["locator"], breach["dispute_txid"], @@ -188,9 +188,16 @@ class Watcher: block_hash, ) - Cleaner.flag_triggered_appointments( - list(valid_breaches.keys()), self.appointments, self.locator_uuid_map, self.db_manager - ) + Cleaner.delete_appointment_from_memory(uuid, self.appointments, self.locator_uuid_map) + + # Appointments are only flagged as triggered if they are delivered, otherwise they are just deleted. + # FIXME: This is only necessary because of the triggered appointment approach. Fix if it changes. + if receipt.delivered: + self.db_manager.create_triggered_appointment_flag(uuid) + + else: + self.db_manager.delete_watcher_appointment(uuid) + Cleaner.update_delete_db_locator_map(uuid, breach["locator"], self.db_manager) Cleaner.delete_completed_appointments( invalid_breaches, self.appointments, self.locator_uuid_map, self.db_manager From 03f0a270d81bed0f67d149efb5bedbb46323bbc8 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Wed, 22 Jan 2020 16:24:32 +0100 Subject: [PATCH 37/93] Modifies when the flag is cleared Now the only time a triggered flag has to be cleared is when a tracker is removed, otherwise the flag is never created. --- pisa/cleaner.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/pisa/cleaner.py b/pisa/cleaner.py index 75ea322..d2e6925 100644 --- a/pisa/cleaner.py +++ b/pisa/cleaner.py @@ -127,8 +127,8 @@ class Cleaner: "Appointment cannot be completed, it contains invalid data. Deleting", locator=locator, uuid=uuid ) + db_manager.delete_watcher_appointment(uuid) Cleaner.delete_appointment_from_memory(uuid, appointments, locator_uuid_map) - Cleaner.delete_appointment_from_db(uuid, db_manager) Cleaner.update_delete_db_locator_map(uuid, locator, db_manager) @staticmethod @@ -190,7 +190,8 @@ class Cleaner: # Delete appointment from the db (from watchers's and responder's db) and remove flag db_manager.delete_responder_tracker(uuid) - Cleaner.delete_appointment_from_db(uuid, db_manager) + db_manager.delete_watcher_appointment(uuid) + db_manager.delete_triggered_appointment_flag(uuid) # Update / delete the locator map Cleaner.update_delete_db_locator_map(uuid, locator, db_manager) From 852368a2ad4bb222c16e8bd237a59700547467f1 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Wed, 22 Jan 2020 16:25:52 +0100 Subject: [PATCH 38/93] Adds tests for sending two different appointments with the same locator to the tower --- test/pisa/e2e/test_basic_e2e.py | 71 +++++++++++++++++++++++++++++++-- 1 file changed, 68 insertions(+), 3 deletions(-) diff --git a/test/pisa/e2e/test_basic_e2e.py b/test/pisa/e2e/test_basic_e2e.py index 3b8c5b7..8c26867 100644 --- a/test/pisa/e2e/test_basic_e2e.py +++ b/test/pisa/e2e/test_basic_e2e.py @@ -9,7 +9,7 @@ from common.tools import compute_locator from common.appointment import Appointment from common.cryptographer import Cryptographer from pisa.utils.auth_proxy import JSONRPCException -from test.pisa.e2e.conftest import END_TIME_DELTA, build_appointment_data, get_random_value_hex +from test.pisa.e2e.conftest import END_TIME_DELTA, build_appointment_data, get_random_value_hex, create_penalty_tx # We'll use pisa_cli to add appointments. The expected input format is a list of arguments with a json-encoded # appointment @@ -33,7 +33,7 @@ def test_appointment_life_cycle(bitcoin_cli, create_txs): commitment_tx, penalty_tx = create_txs commitment_tx_id = bitcoin_cli.decoderawtransaction(commitment_tx).get("txid") appointment_data = build_appointment_data(bitcoin_cli, commitment_tx_id, penalty_tx) - locator = compute_locator(appointment_data.get("tx_id")) + locator = compute_locator(commitment_tx_id) assert pisa_cli.add_appointment([json.dumps(appointment_data)]) is True @@ -79,7 +79,7 @@ def test_appointment_malformed_penalty(bitcoin_cli, create_txs): commitment_tx_id = bitcoin_cli.decoderawtransaction(commitment_tx).get("txid") appointment_data = build_appointment_data(bitcoin_cli, commitment_tx_id, mod_penalty_tx.hex()) - locator = compute_locator(appointment_data.get("tx_id")) + locator = compute_locator(commitment_tx_id) assert pisa_cli.add_appointment([json.dumps(appointment_data)]) is True @@ -135,3 +135,68 @@ def test_appointment_wrong_key(bitcoin_cli, create_txs): assert appointment_info is not None assert len(appointment_info) == 1 assert appointment_info[0].get("status") == "not_found" + + +def test_two_identical_appointments(bitcoin_cli, create_txs): + # Tests sending two identical appointments to the tower. + # At the moment there are no checks for identical appointments, so both will be accepted, decrypted and kept until + # the end. + # TODO: 34-exact-duplicate-appointment + # This tests sending an appointment with two valid transaction with the same locator. + commitment_tx, penalty_tx = create_txs + commitment_tx_id = bitcoin_cli.decoderawtransaction(commitment_tx).get("txid") + + appointment_data = build_appointment_data(bitcoin_cli, commitment_tx_id, penalty_tx) + locator = compute_locator(commitment_tx_id) + + # Send the appointment twice + assert pisa_cli.add_appointment([json.dumps(appointment_data)]) is True + assert pisa_cli.add_appointment([json.dumps(appointment_data)]) is True + + # Broadcast the commitment transaction and mine a block + new_addr = bitcoin_cli.getnewaddress() + broadcast_transaction_and_mine_block(bitcoin_cli, commitment_tx, new_addr) + + # The first appointment should have made it to the Responder, and the second one should have been dropped for + # double-spending + sleep(1) + appointment_info = get_appointment_info(locator) + + assert appointment_info is not None + assert len(appointment_info) == 2 + + for info in appointment_info: + assert info.get("status") == "dispute_responded" + assert info.get("penalty_rawtx") == penalty_tx + + +def test_two_appointment_same_locator_different_penalty(bitcoin_cli, create_txs): + # This tests sending an appointment with two valid transaction with the same locator. + commitment_tx, penalty_tx1 = create_txs + commitment_tx_id = bitcoin_cli.decoderawtransaction(commitment_tx).get("txid") + + # We need to create a second penalty spending from the same commitment + decoded_commitment_tx = bitcoin_cli.decoderawtransaction(commitment_tx) + new_addr = bitcoin_cli.getnewaddress() + penalty_tx2 = create_penalty_tx(bitcoin_cli, decoded_commitment_tx, new_addr) + + appointment1_data = build_appointment_data(bitcoin_cli, commitment_tx_id, penalty_tx1) + appointment2_data = build_appointment_data(bitcoin_cli, commitment_tx_id, penalty_tx2) + locator = compute_locator(commitment_tx_id) + + assert pisa_cli.add_appointment([json.dumps(appointment1_data)]) is True + assert pisa_cli.add_appointment([json.dumps(appointment2_data)]) is True + + # Broadcast the commitment transaction and mine a block + new_addr = bitcoin_cli.getnewaddress() + broadcast_transaction_and_mine_block(bitcoin_cli, commitment_tx, new_addr) + + # The first appointment should have made it to the Responder, and the second one should have been dropped for + # double-spending + sleep(1) + appointment_info = get_appointment_info(locator) + + assert appointment_info is not None + assert len(appointment_info) == 1 + assert appointment_info[0].get("status") == "dispute_responded" + assert appointment_info[0].get("penalty_rawtx") == penalty_tx1 From 245d5b49f32f870f44c8ef8a078e063bf6d2116e Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Thu, 23 Jan 2020 16:23:07 +0100 Subject: [PATCH 39/93] Updates config files Removes unused values from pisa config file and adds cli config file --- .gitignore | 1 + apps/cli/sample_conf.py | 13 +++++++++++++ pisa/sample_conf.py | 13 ++++--------- test/pisa/e2e/pisa-conf.py | 13 ++++--------- 4 files changed, 22 insertions(+), 18 deletions(-) create mode 100644 apps/cli/sample_conf.py diff --git a/.gitignore b/.gitignore index 978a651..8d7a2c1 100644 --- a/.gitignore +++ b/.gitignore @@ -17,3 +17,4 @@ test.py .coverage htmlcov docs/ +.pisa_btc diff --git a/apps/cli/sample_conf.py b/apps/cli/sample_conf.py new file mode 100644 index 0000000..d9f2b90 --- /dev/null +++ b/apps/cli/sample_conf.py @@ -0,0 +1,13 @@ +# PISA-SERVER +DEFAULT_PISA_API_SERVER = "btc.pisa.watch" +DEFAULT_PISA_API_PORT = 9814 + +# PISA-CLI +DATA_FOLDER = "~/.pisa_btc/" + +CLIENT_LOG_FILE = "pisa-cli.log" +APPOINTMENTS_FOLDER_NAME = "appointment_receipts" + +CLI_PUBLIC_KEY = "cli_pk.der" +CLI_PRIVATE_KEY = "cli_sk.der" +PISA_PUBLIC_KEY = "pisa_pk.der" diff --git a/pisa/sample_conf.py b/pisa/sample_conf.py index 8d08590..3c219c1 100644 --- a/pisa/sample_conf.py +++ b/pisa/sample_conf.py @@ -5,27 +5,22 @@ BTC_RPC_HOST = "localhost" BTC_RPC_PORT = 18443 BTC_NETWORK = "regtest" -# CHAIN MONITOR -POLLING_DELTA = 60 -BLOCK_WINDOW_SIZE = 10 - # ZMQ FEED_PROTOCOL = "tcp" FEED_ADDR = "127.0.0.1" FEED_PORT = 28332 # PISA +DATA_FOLDER = "~/.pisa_btc/" MAX_APPOINTMENTS = 100 EXPIRY_DELTA = 6 MIN_TO_SELF_DELAY = 20 SERVER_LOG_FILE = "pisa.log" PISA_SECRET_KEY = "pisa_sk.der" -# PISA-CLI -CLIENT_LOG_FILE = "pisa.log" - -# TEST -TEST_LOG_FILE = "test.log" +# CHAIN MONITOR +POLLING_DELTA = 60 +BLOCK_WINDOW_SIZE = 10 # LEVELDB DB_PATH = "appointments" diff --git a/test/pisa/e2e/pisa-conf.py b/test/pisa/e2e/pisa-conf.py index 83fe719..f53a81b 100644 --- a/test/pisa/e2e/pisa-conf.py +++ b/test/pisa/e2e/pisa-conf.py @@ -5,27 +5,22 @@ BTC_RPC_HOST = "localhost" BTC_RPC_PORT = 18445 BTC_NETWORK = "regtest" -# CHAIN MONITOR -POLLING_DELTA = 60 -BLOCK_WINDOW_SIZE = 10 - # ZMQ FEED_PROTOCOL = "tcp" FEED_ADDR = "127.0.0.1" FEED_PORT = 28335 # PISA +DATA_FOLDER = "~/.pisa_btc/" MAX_APPOINTMENTS = 100 EXPIRY_DELTA = 6 MIN_TO_SELF_DELAY = 20 SERVER_LOG_FILE = "pisa.log" PISA_SECRET_KEY = "pisa_sk.der" -# PISA-CLI -CLIENT_LOG_FILE = "pisa.log" - -# TEST -TEST_LOG_FILE = "test.log" +# CHAIN MONITOR +POLLING_DELTA = 60 +BLOCK_WINDOW_SIZE = 10 # LEVELDB DB_PATH = "appointments" From 418b7b49ab62f1b25f0af2676474f942e3aaae4e Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Thu, 23 Jan 2020 16:27:18 +0100 Subject: [PATCH 40/93] Creates main function and updates config to add data folder pisad can now be run easier from other files (simplifies e2e testing) --- pisa/pisad.py | 50 ++++++++++++++++++++++++++++---------------------- 1 file changed, 28 insertions(+), 22 deletions(-) diff --git a/pisa/pisad.py b/pisa/pisad.py index 2643a19..6be2c98 100644 --- a/pisa/pisad.py +++ b/pisa/pisad.py @@ -1,8 +1,11 @@ +import os from getopt import getopt from sys import argv, exit from signal import signal, SIGINT, SIGQUIT, SIGTERM from common.logger import Logger +from common.tools import check_conf_fields, setup_data_folder + from pisa.api import API from pisa.watcher import Watcher from pisa.builder import Builder @@ -38,6 +41,12 @@ def load_config(config): conf_dict = {} + data_folder = config.DATA_FOLDER + if isinstance(data_folder, str): + data_folder = os.path.expanduser(data_folder) + else: + raise ValueError("The provided user folder is invalid.") + conf_fields = { "BTC_RPC_USER": {"value": config.BTC_RPC_USER, "type": str}, "BTC_RPC_PASSWD": {"value": config.BTC_RPC_PASSWD, "type": str}, @@ -47,43 +56,31 @@ def load_config(config): "FEED_PROTOCOL": {"value": config.FEED_PROTOCOL, "type": str}, "FEED_ADDR": {"value": config.FEED_ADDR, "type": str}, "FEED_PORT": {"value": config.FEED_PORT, "type": int}, + "DATA_FOLDER": {"value": data_folder, "type": str}, "MAX_APPOINTMENTS": {"value": config.MAX_APPOINTMENTS, "type": int}, "EXPIRY_DELTA": {"value": config.EXPIRY_DELTA, "type": int}, "MIN_TO_SELF_DELAY": {"value": config.MIN_TO_SELF_DELAY, "type": int}, - "SERVER_LOG_FILE": {"value": config.SERVER_LOG_FILE, "type": str}, - "PISA_SECRET_KEY": {"value": config.PISA_SECRET_KEY, "type": str}, - "CLIENT_LOG_FILE": {"value": config.CLIENT_LOG_FILE, "type": str}, - "TEST_LOG_FILE": {"value": config.TEST_LOG_FILE, "type": str}, - "DB_PATH": {"value": config.DB_PATH, "type": str}, + "SERVER_LOG_FILE": {"value": data_folder, "type": str}, + "PISA_SECRET_KEY": {"value": data_folder + config.PISA_SECRET_KEY, "type": str}, + "DB_PATH": {"value": data_folder + config.DB_PATH, "type": str}, } - for field in conf_fields: - value = conf_fields[field]["value"] - correct_type = conf_fields[field]["type"] - - if (value is not None) and isinstance(value, correct_type): - conf_dict[field] = value - else: - err_msg = "{} variable in config is of the wrong type".format(field) - logger.error(err_msg) - raise ValueError(err_msg) + check_conf_fields(conf_fields, logger) return conf_dict -if __name__ == "__main__": - logger.info("Starting PISA") +def main(): + global db_manager, chain_monitor signal(SIGINT, handle_signals) signal(SIGTERM, handle_signals) signal(SIGQUIT, handle_signals) - opts, _ = getopt(argv[1:], "", [""]) - for opt, arg in opts: - # FIXME: Leaving this here for future option/arguments - pass - pisa_config = load_config(conf) + logger.info("Starting PISA") + + setup_data_folder(pisa_config.get("DATA_FOLDER"), logger) db_manager = DBManager(pisa_config.get("DB_PATH")) if not can_connect_to_bitcoind(): @@ -155,3 +152,12 @@ if __name__ == "__main__": except Exception as e: logger.error("An error occurred: {}. Shutting down".format(e)) exit(1) + + +if __name__ == "__main__": + opts, _ = getopt(argv[1:], "", [""]) + for opt, arg in opts: + # FIXME: Leaving this here for future option/arguments + pass + + main() From c1ad1a4924a262362edd0ed60d9c2a6f638e71ac Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Thu, 23 Jan 2020 16:30:09 +0100 Subject: [PATCH 41/93] Updates cli to run with config file --- apps/cli/pisa_cli.py | 72 ++++++++++++++++++++++++++++++++++---------- 1 file changed, 56 insertions(+), 16 deletions(-) diff --git a/apps/cli/pisa_cli.py b/apps/cli/pisa_cli.py index 5e7c4cf..742f562 100644 --- a/apps/cli/pisa_cli.py +++ b/apps/cli/pisa_cli.py @@ -11,19 +11,18 @@ from uuid import uuid4 from apps.cli.help import help_add_appointment, help_get_appointment from apps.cli.blob import Blob -from apps.cli import ( - DEFAULT_PISA_API_SERVER, - DEFAULT_PISA_API_PORT, - CLI_PUBLIC_KEY, - CLI_PRIVATE_KEY, - PISA_PUBLIC_KEY, - APPOINTMENTS_FOLDER_NAME, -) +import apps.cli.conf as conf from common.logger import Logger from common.appointment import Appointment from common.cryptographer import Cryptographer -from common.tools import check_sha256_hex_format, check_locator_format, compute_locator +from common.tools import ( + check_sha256_hex_format, + check_locator_format, + compute_locator, + check_conf_fields, + setup_data_folder, +) HTTP_OK = 200 @@ -54,6 +53,42 @@ def generate_dummy_appointment(): logger.info("\nData stored in dummy_appointment_data.json") +def load_config(config): + """ + Looks through all of the config options to make sure they contain the right type of data and builds a config + dictionary. + + Args: + config (:obj:`module`): It takes in a config module object. + + Returns: + :obj:`dict` A dictionary containing the config values. + """ + + conf_dict = {} + + data_folder = config.DATA_FOLDER + if isinstance(data_folder, str): + data_folder = os.path.expanduser(data_folder) + else: + raise ValueError("The provided user folder is invalid.") + + conf_fields = { + "DEFAULT_PISA_API_SERVER": {"value": config.DEFAULT_PISA_API_SERVER, "type": str}, + "DEFAULT_PISA_API_PORT": {"value": config.DEFAULT_PISA_API_PORT, "type": int}, + "DATA_FOLDER": {"value": data_folder, "type": str}, + "CLIENT_LOG_FILE": {"value": data_folder + config.CLIENT_LOG_FILE, "type": str}, + "APPOINTMENTS_FOLDER_NAME": {"value": data_folder + config.APPOINTMENTS_FOLDER_NAME, "type": str}, + "CLI_PUBLIC_KEY": {"value": data_folder + config.CLI_PUBLIC_KEY, "type": str}, + "CLI_PRIVATE_KEY": {"value": data_folder + config.CLI_PRIVATE_KEY, "type": str}, + "PISA_PUBLIC_KEY": {"value": data_folder + config.PISA_PUBLIC_KEY, "type": str}, + } + + check_conf_fields(conf_fields, logger) + + return conf_dict + + # Loads and returns Pisa keys from disk def load_key_file_data(file_name): try: @@ -73,13 +108,13 @@ def load_key_file_data(file_name): # Makes sure that the folder APPOINTMENTS_FOLDER_NAME exists, then saves the appointment and signature in it. def save_signed_appointment(appointment, signature): # Create the appointments directory if it doesn't already exist - os.makedirs(APPOINTMENTS_FOLDER_NAME, exist_ok=True) + os.makedirs(config.get("APPOINTMENTS_FOLDER_NAME"), exist_ok=True) timestamp = int(time.time()) locator = appointment["locator"] uuid = uuid4().hex # prevent filename collisions - filename = "{}/appointment-{}-{}-{}.json".format(APPOINTMENTS_FOLDER_NAME, timestamp, locator, uuid) + filename = "{}/appointment-{}-{}-{}.json".format(config.get("APPOINTMENTS_FOLDER_NAME"), timestamp, locator, uuid) data = {"appointment": appointment, "signature": signature} with open(filename, "w") as f: @@ -233,7 +268,7 @@ def post_data_to_add_appointment_endpoint(data): # Verify that the signature returned from the watchtower is valid. def check_signature(signature, appointment): try: - pisa_pk_der = load_key_file_data(PISA_PUBLIC_KEY) + pisa_pk_der = load_key_file_data(config.get("PISA_PUBLIC_KEY")) pisa_pk = Cryptographer.load_public_key_der(pisa_pk_der) if pisa_pk is None: @@ -287,7 +322,7 @@ def get_appointment(args): def get_appointment_signature(appointment): try: - sk_der = load_key_file_data(CLI_PRIVATE_KEY) + sk_der = load_key_file_data(config.get("CLI_PRIVATE_KEY")) cli_sk = Cryptographer.load_private_key_der(sk_der) signature = Cryptographer.sign(appointment.serialize(), cli_sk) @@ -309,7 +344,7 @@ def get_appointment_signature(appointment): def get_pk(): try: - cli_pk_der = load_key_file_data(CLI_PUBLIC_KEY) + cli_pk_der = load_key_file_data(config.get("CLI_PUBLIC_KEY")) hex_pk_der = binascii.hexlify(cli_pk_der) return hex_pk_der @@ -345,11 +380,16 @@ def show_usage(): if __name__ == "__main__": - pisa_api_server = DEFAULT_PISA_API_SERVER - pisa_api_port = DEFAULT_PISA_API_PORT + config = load_config(conf) + + pisa_api_server = config.get("DEFAULT_PISA_API_SERVER") + pisa_api_port = config.get("DEFAULT_PISA_API_PORT") commands = ["add_appointment", "get_appointment", "help"] testing_commands = ["generate_dummy_appointment"] + # Create user folder if missing + setup_data_folder(config.get("DATA_FOLDER"), logger) + try: opts, args = getopt(argv[1:], "s:p:h", ["server", "port", "help"]) From a3f2d20499bbee501bae995a923416fa3292cedf Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Thu, 23 Jan 2020 18:05:44 +0100 Subject: [PATCH 42/93] Moves/adds methods to deal with config to common --- common/tools.py | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/common/tools.py b/common/tools.py index d208272..6c2d0c2 100644 --- a/common/tools.py +++ b/common/tools.py @@ -1,4 +1,5 @@ import re +import os from common.constants import LOCATOR_LEN_HEX @@ -38,3 +39,33 @@ def compute_locator(tx_id): """ return tx_id[:LOCATOR_LEN_HEX] + + +def setup_data_folder(data_folder, logger): + if not os.path.isdir(data_folder): + logger.info("Data folder not found. Creating it") + os.makedirs(data_folder, exist_ok=True) + + +def check_conf_fields(conf_fields): + conf_dict = {} + + for field in conf_fields: + value = conf_fields[field]["value"] + correct_type = conf_fields[field]["type"] + + if (value is not None) and isinstance(value, correct_type): + conf_dict[field] = value + else: + err_msg = "{} variable in config is of the wrong type".format(field) + raise ValueError(err_msg) + + return conf_dict + + +def extend_paths(base_path, config_fields): + for key, field in config_fields.items(): + if field.get("path"): + config_fields[key]["value"] = base_path + config_fields[key]["value"] + + return config_fields From fddf2e6968ba849077787fdded6fba62878f3b7c Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Thu, 23 Jan 2020 18:13:30 +0100 Subject: [PATCH 43/93] Fixes logger not properly working for cli The Logger was set to use c_logger and f_logger from pisad, so the cli file logs were never created --- common/logger.py | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/common/logger.py b/common/logger.py index ac683c2..b175ebf 100644 --- a/common/logger.py +++ b/common/logger.py @@ -1,8 +1,7 @@ import json +import logging from datetime import datetime -from pisa import f_logger, c_logger - class _StructuredMessage: def __init__(self, message, **kwargs): @@ -22,8 +21,10 @@ class Logger: actor (:obj:`str`): the system actor that is logging the event (e.g. ``Watcher``, ``Cryptographer``, ...). """ - def __init__(self, actor=None): + def __init__(self, log_name_prefix, actor=None): self.actor = actor + self.f_logger = logging.getLogger("{}_file_log".format(log_name_prefix)) + self.c_logger = logging.getLogger("{}_console_log".format(log_name_prefix)) def _add_prefix(self, msg): return msg if self.actor is None else "[{}]: {}".format(self.actor, msg) @@ -54,8 +55,8 @@ class Logger: kwargs: a ``key:value`` collection parameters to be added to the output. """ - f_logger.info(self._create_file_message(msg, **kwargs)) - c_logger.info(self._create_console_message(msg, **kwargs)) + self.f_logger.info(self._create_file_message(msg, **kwargs)) + self.c_logger.info(self._create_console_message(msg, **kwargs)) def debug(self, msg, **kwargs): """ @@ -66,8 +67,8 @@ class Logger: kwargs: a ``key:value`` collection parameters to be added to the output. """ - f_logger.debug(self._create_file_message(msg, **kwargs)) - c_logger.debug(self._create_console_message(msg, **kwargs)) + self.f_logger.debug(self._create_file_message(msg, **kwargs)) + self.c_logger.debug(self._create_console_message(msg, **kwargs)) def error(self, msg, **kwargs): """ @@ -78,8 +79,8 @@ class Logger: kwargs: a ``key:value`` collection parameters to be added to the output. """ - f_logger.error(self._create_file_message(msg, **kwargs)) - c_logger.error(self._create_console_message(msg, **kwargs)) + self.f_logger.error(self._create_file_message(msg, **kwargs)) + self.c_logger.error(self._create_console_message(msg, **kwargs)) def warning(self, msg, **kwargs): """ @@ -90,5 +91,5 @@ class Logger: kwargs: a ``key:value`` collection parameters to be added to the output. """ - f_logger.warning(self._create_file_message(msg, **kwargs)) - c_logger.warning(self._create_console_message(msg, **kwargs)) + self.f_logger.warning(self._create_file_message(msg, **kwargs)) + self.c_logger.warning(self._create_console_message(msg, **kwargs)) From 1f46e6eb26749cf3b633753f257ead414b3cc887 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Thu, 23 Jan 2020 18:14:34 +0100 Subject: [PATCH 44/93] Moves logging setup to common The setup it's identical for cli and pisad --- common/tools.py | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/common/tools.py b/common/tools.py index 6c2d0c2..ec02451 100644 --- a/common/tools.py +++ b/common/tools.py @@ -1,5 +1,6 @@ import re import os +import logging from common.constants import LOCATOR_LEN_HEX @@ -69,3 +70,32 @@ def extend_paths(base_path, config_fields): config_fields[key]["value"] = base_path + config_fields[key]["value"] return config_fields + + +def setup_logging(log_file_path, log_name_prefix): + if not isinstance(log_file_path, str): + print(log_file_path) + raise ValueError("Wrong log file path.") + + if not isinstance(log_name_prefix, str): + raise ValueError("Wrong log file name.") + + # Create the file logger + f_logger = logging.getLogger("{}_file_log".format(log_name_prefix)) + f_logger.setLevel(logging.INFO) + + fh = logging.FileHandler(log_file_path) + fh.setLevel(logging.INFO) + fh_formatter = logging.Formatter("%(message)s") + fh.setFormatter(fh_formatter) + f_logger.addHandler(fh) + + # Create the console logger + c_logger = logging.getLogger("{}_console_log".format(log_name_prefix)) + c_logger.setLevel(logging.INFO) + + ch = logging.StreamHandler() + ch.setLevel(logging.INFO) + ch_formatter = logging.Formatter("%(message)s.", "%Y-%m-%d %H:%M:%S") + ch.setFormatter(ch_formatter) + c_logger.addHandler(ch) From 6884db9f58abb3d7b827a8e2ea8256f09f11ba86 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Thu, 23 Jan 2020 18:16:25 +0100 Subject: [PATCH 45/93] Updates logger codebase-wise to match the Logger updates Logger instances now specify the logger prefix so the logger can be properly loaded, fixing the issues with the cli file logger --- pisa/api.py | 5 +++-- pisa/block_processor.py | 4 +++- pisa/carrier.py | 3 ++- pisa/chain_monitor.py | 3 ++- pisa/cleaner.py | 4 +++- pisa/db_manager.py | 4 +++- pisa/inspector.py | 4 ++-- pisa/responder.py | 3 ++- pisa/watcher.py | 3 ++- 9 files changed, 22 insertions(+), 11 deletions(-) diff --git a/pisa/api.py b/pisa/api.py index a70ca34..e31d0d2 100644 --- a/pisa/api.py +++ b/pisa/api.py @@ -1,8 +1,9 @@ import os import json +import logging from flask import Flask, request, abort, jsonify -from pisa import HOST, PORT, logging +from pisa import HOST, PORT, LOG_PREFIX from common.logger import Logger from pisa.inspector import Inspector from common.appointment import Appointment @@ -13,7 +14,7 @@ from common.constants import HTTP_OK, HTTP_BAD_REQUEST, HTTP_SERVICE_UNAVAILABLE # ToDo: #5-add-async-to-api app = Flask(__name__) -logger = Logger("API") +logger = Logger(actor="API", log_name_prefix=LOG_PREFIX) class API: diff --git a/pisa/block_processor.py b/pisa/block_processor.py index 1970b42..c5a7dd1 100644 --- a/pisa/block_processor.py +++ b/pisa/block_processor.py @@ -1,8 +1,10 @@ from common.logger import Logger + +from pisa import LOG_PREFIX from pisa.tools import bitcoin_cli from pisa.utils.auth_proxy import JSONRPCException -logger = Logger("BlockProcessor") +logger = Logger(actor="BlockProcessor", log_name_prefix=LOG_PREFIX) class BlockProcessor: diff --git a/pisa/carrier.py b/pisa/carrier.py index 00602e9..dec4ba6 100644 --- a/pisa/carrier.py +++ b/pisa/carrier.py @@ -1,10 +1,11 @@ +from pisa import LOG_PREFIX from pisa.rpc_errors import * from common.logger import Logger from pisa.tools import bitcoin_cli from pisa.utils.auth_proxy import JSONRPCException from pisa.errors import UNKNOWN_JSON_RPC_EXCEPTION, RPC_TX_REORGED_AFTER_BROADCAST -logger = Logger("Carrier") +logger = Logger(actor="Carrier", log_name_prefix=LOG_PREFIX) # FIXME: This class is not fully covered by unit tests diff --git a/pisa/chain_monitor.py b/pisa/chain_monitor.py index 689a223..22ef377 100644 --- a/pisa/chain_monitor.py +++ b/pisa/chain_monitor.py @@ -2,11 +2,12 @@ import zmq import binascii from threading import Thread, Event, Condition +from pisa import LOG_PREFIX from common.logger import Logger from pisa.conf import FEED_PROTOCOL, FEED_ADDR, FEED_PORT, POLLING_DELTA, BLOCK_WINDOW_SIZE from pisa.block_processor import BlockProcessor -logger = Logger("ChainMonitor") +logger = Logger(actor="ChainMonitor", log_name_prefix=LOG_PREFIX) class ChainMonitor: diff --git a/pisa/cleaner.py b/pisa/cleaner.py index d2e6925..777834c 100644 --- a/pisa/cleaner.py +++ b/pisa/cleaner.py @@ -1,6 +1,8 @@ +from pisa import LOG_PREFIX + from common.logger import Logger -logger = Logger("Cleaner") +logger = Logger(actor="Cleaner", log_name_prefix=LOG_PREFIX) class Cleaner: diff --git a/pisa/db_manager.py b/pisa/db_manager.py index 2c693b6..983a26e 100644 --- a/pisa/db_manager.py +++ b/pisa/db_manager.py @@ -1,9 +1,11 @@ import json import plyvel +from pisa import LOG_PREFIX + from common.logger import Logger -logger = Logger("DBManager") +logger = Logger(actor="DBManager", log_name_prefix=LOG_PREFIX) WATCHER_PREFIX = "w" WATCHER_LAST_BLOCK_KEY = "bw" diff --git a/pisa/inspector.py b/pisa/inspector.py index fcc570e..dfdb0a8 100644 --- a/pisa/inspector.py +++ b/pisa/inspector.py @@ -4,12 +4,12 @@ from binascii import unhexlify from common.constants import LOCATOR_LEN_HEX from common.cryptographer import Cryptographer -from pisa import errors +from pisa import errors, LOG_PREFIX from common.logger import Logger from common.appointment import Appointment from pisa.block_processor import BlockProcessor -logger = Logger("Inspector") +logger = Logger(actor="Inspector", log_name_prefix=LOG_PREFIX) # FIXME: The inspector logs the wrong messages sent form the users. A possible attack surface would be to send a really # long field that, even if not accepted by PISA, would be stored in the logs. This is a possible DoS surface diff --git a/pisa/responder.py b/pisa/responder.py index 5d4ac9d..1198553 100644 --- a/pisa/responder.py +++ b/pisa/responder.py @@ -2,6 +2,7 @@ import json from queue import Queue from threading import Thread +from pisa import LOG_PREFIX from common.logger import Logger from pisa.cleaner import Cleaner from pisa.carrier import Carrier @@ -10,7 +11,7 @@ from pisa.block_processor import BlockProcessor CONFIRMATIONS_BEFORE_RETRY = 6 MIN_CONFIRMATIONS = 6 -logger = Logger("Responder") +logger = Logger(actor="Responder", log_name_prefix=LOG_PREFIX) class TransactionTracker: diff --git a/pisa/watcher.py b/pisa/watcher.py index c0e852a..86a5c40 100644 --- a/pisa/watcher.py +++ b/pisa/watcher.py @@ -8,11 +8,12 @@ from common.tools import compute_locator from common.logger import Logger +from pisa import LOG_PREFIX from pisa.cleaner import Cleaner from pisa.responder import Responder from pisa.block_processor import BlockProcessor -logger = Logger("Watcher") +logger = Logger(actor="Watcher", log_name_prefix=LOG_PREFIX) class Watcher: From 00a989e1b23577ae9d0b13a9f96c974283ddcdc9 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Thu, 23 Jan 2020 18:27:04 +0100 Subject: [PATCH 46/93] Updates pisad and __init__ to put together the log and config fixes The setup of the logs and the parsing of the config file are closely related. The former need info from the later to be created, and needs to be setup only once per pisa instance. In the same way, the later need to only be loaded and validated once per pisa intance and contains info to setup the logs. Intead of setting up the logs in init and loading the config file in pisad, now both are dealt with in __init__ --- pisa/__init__.py | 48 +++++++++++++++++++++--------------- pisa/pisad.py | 63 +++++++----------------------------------------- 2 files changed, 38 insertions(+), 73 deletions(-) diff --git a/pisa/__init__.py b/pisa/__init__.py index dd06913..e7d380b 100644 --- a/pisa/__init__.py +++ b/pisa/__init__.py @@ -1,27 +1,37 @@ -import logging - -from pisa.utils.auth_proxy import AuthServiceProxy +import os import pisa.conf as conf +from common.tools import check_conf_fields, setup_logging, extend_paths +from pisa.utils.auth_proxy import AuthServiceProxy HOST = "localhost" PORT = 9814 +LOG_PREFIX = "pisa" -# Create the file logger -f_logger = logging.getLogger("pisa_file_log") -f_logger.setLevel(logging.INFO) +# Load config fields +conf_fields = { + "BTC_RPC_USER": {"value": conf.BTC_RPC_USER, "type": str}, + "BTC_RPC_PASSWD": {"value": conf.BTC_RPC_PASSWD, "type": str}, + "BTC_RPC_HOST": {"value": conf.BTC_RPC_HOST, "type": str}, + "BTC_RPC_PORT": {"value": conf.BTC_RPC_PORT, "type": int}, + "BTC_NETWORK": {"value": conf.BTC_NETWORK, "type": str}, + "FEED_PROTOCOL": {"value": conf.FEED_PROTOCOL, "type": str}, + "FEED_ADDR": {"value": conf.FEED_ADDR, "type": str}, + "FEED_PORT": {"value": conf.FEED_PORT, "type": int}, + "DATA_FOLDER": {"value": conf.DATA_FOLDER, "type": str}, + "MAX_APPOINTMENTS": {"value": conf.MAX_APPOINTMENTS, "type": int}, + "EXPIRY_DELTA": {"value": conf.EXPIRY_DELTA, "type": int}, + "MIN_TO_SELF_DELAY": {"value": conf.MIN_TO_SELF_DELAY, "type": int}, + "SERVER_LOG_FILE": {"value": conf.SERVER_LOG_FILE, "type": str, "path": True}, + "PISA_SECRET_KEY": {"value": conf.PISA_SECRET_KEY, "type": str, "path": True}, + "DB_PATH": {"value": conf.DB_PATH, "type": str, "path": True}, +} -fh = logging.FileHandler(conf.SERVER_LOG_FILE) -fh.setLevel(logging.INFO) -fh_formatter = logging.Formatter("%(message)s") -fh.setFormatter(fh_formatter) -f_logger.addHandler(fh) +# Expand user (~) if found and check fields are correct +conf_fields["DATA_FOLDER"]["value"] = os.path.expanduser(conf_fields["DATA_FOLDER"]["value"]) +# Extend relative paths +conf_fields = extend_paths(conf_fields["DATA_FOLDER"]["value"], conf_fields) -# Create the console logger -c_logger = logging.getLogger("pisa_console_log") -c_logger.setLevel(logging.INFO) +# Sanity check fields and build config dictionary +config = check_conf_fields(conf_fields) -ch = logging.StreamHandler() -ch.setLevel(logging.INFO) -ch_formatter = logging.Formatter("%(message)s.", "%Y-%m-%d %H:%M:%S") -ch.setFormatter(ch_formatter) -c_logger.addHandler(ch) +setup_logging(config.get("SERVER_LOG_FILE"), LOG_PREFIX) diff --git a/pisa/pisad.py b/pisa/pisad.py index 6be2c98..1c35000 100644 --- a/pisa/pisad.py +++ b/pisa/pisad.py @@ -1,21 +1,20 @@ -import os from getopt import getopt from sys import argv, exit from signal import signal, SIGINT, SIGQUIT, SIGTERM from common.logger import Logger -from common.tools import check_conf_fields, setup_data_folder +from common.tools import setup_data_folder +from pisa import config, LOG_PREFIX from pisa.api import API from pisa.watcher import Watcher from pisa.builder import Builder -import pisa.conf as conf from pisa.db_manager import DBManager from pisa.chain_monitor import ChainMonitor from pisa.block_processor import BlockProcessor from pisa.tools import can_connect_to_bitcoind, in_correct_network -logger = Logger("Daemon") +logger = Logger(actor="Daemon", log_name_prefix=LOG_PREFIX) def handle_signals(signal_received, frame): @@ -27,49 +26,6 @@ def handle_signals(signal_received, frame): exit(0) -def load_config(config): - """ - Looks through all of the config options to make sure they contain the right type of data and builds a config - dictionary. - - Args: - config (:obj:`module`): It takes in a config module object. - - Returns: - :obj:`dict` A dictionary containing the config values. - """ - - conf_dict = {} - - data_folder = config.DATA_FOLDER - if isinstance(data_folder, str): - data_folder = os.path.expanduser(data_folder) - else: - raise ValueError("The provided user folder is invalid.") - - conf_fields = { - "BTC_RPC_USER": {"value": config.BTC_RPC_USER, "type": str}, - "BTC_RPC_PASSWD": {"value": config.BTC_RPC_PASSWD, "type": str}, - "BTC_RPC_HOST": {"value": config.BTC_RPC_HOST, "type": str}, - "BTC_RPC_PORT": {"value": config.BTC_RPC_PORT, "type": int}, - "BTC_NETWORK": {"value": config.BTC_NETWORK, "type": str}, - "FEED_PROTOCOL": {"value": config.FEED_PROTOCOL, "type": str}, - "FEED_ADDR": {"value": config.FEED_ADDR, "type": str}, - "FEED_PORT": {"value": config.FEED_PORT, "type": int}, - "DATA_FOLDER": {"value": data_folder, "type": str}, - "MAX_APPOINTMENTS": {"value": config.MAX_APPOINTMENTS, "type": int}, - "EXPIRY_DELTA": {"value": config.EXPIRY_DELTA, "type": int}, - "MIN_TO_SELF_DELAY": {"value": config.MIN_TO_SELF_DELAY, "type": int}, - "SERVER_LOG_FILE": {"value": data_folder, "type": str}, - "PISA_SECRET_KEY": {"value": data_folder + config.PISA_SECRET_KEY, "type": str}, - "DB_PATH": {"value": data_folder + config.DB_PATH, "type": str}, - } - - check_conf_fields(conf_fields, logger) - - return conf_dict - - def main(): global db_manager, chain_monitor @@ -77,16 +33,15 @@ def main(): signal(SIGTERM, handle_signals) signal(SIGQUIT, handle_signals) - pisa_config = load_config(conf) logger.info("Starting PISA") - setup_data_folder(pisa_config.get("DATA_FOLDER"), logger) - db_manager = DBManager(pisa_config.get("DB_PATH")) + setup_data_folder(config.get("DATA_FOLDER"), logger) + db_manager = DBManager(config.get("DB_PATH")) if not can_connect_to_bitcoind(): logger.error("Can't connect to bitcoind. Shutting down") - elif not in_correct_network(pisa_config.get("BTC_NETWORK")): + elif not in_correct_network(config.get("BTC_NETWORK")): logger.error("bitcoind is running on a different network, check conf.py and bitcoin.conf. Shutting down") else: @@ -98,10 +53,10 @@ def main(): watcher_appointments_data = db_manager.load_watcher_appointments() responder_trackers_data = db_manager.load_responder_trackers() - with open(pisa_config.get("PISA_SECRET_KEY"), "rb") as key_file: + with open(config.get("PISA_SECRET_KEY"), "rb") as key_file: secret_key_der = key_file.read() - watcher = Watcher(db_manager, chain_monitor, secret_key_der, pisa_config) + watcher = Watcher(db_manager, chain_monitor, secret_key_der, config) chain_monitor.attach_watcher(watcher.block_queue, watcher.asleep) chain_monitor.attach_responder(watcher.responder.block_queue, watcher.responder.asleep) @@ -147,7 +102,7 @@ def main(): watcher.block_queue = Builder.build_block_queue(missed_blocks_watcher) # Fire the API - API(watcher, config=pisa_config).start() + API(watcher, config=config).start() except Exception as e: logger.error("An error occurred: {}. Shutting down".format(e)) From 836048c54d0fe91e693f7228124a29ef8e4fa17b Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Thu, 23 Jan 2020 18:31:47 +0100 Subject: [PATCH 47/93] Same as 00a989e1b23577ae9d0b13a9f96c974283ddcdc9 but for the cli --- apps/cli/__init__.py | 50 +++++++++++++++++++------------------------- apps/cli/pisa_cli.py | 50 +++----------------------------------------- 2 files changed, 25 insertions(+), 75 deletions(-) diff --git a/apps/cli/__init__.py b/apps/cli/__init__.py index 2c9e149..df7cf41 100644 --- a/apps/cli/__init__.py +++ b/apps/cli/__init__.py @@ -1,33 +1,27 @@ -import logging +import os +import apps.cli.conf as conf +from common.tools import extend_paths, check_conf_fields, setup_logging -# PISA-SERVER -DEFAULT_PISA_API_SERVER = "btc.pisa.watch" -DEFAULT_PISA_API_PORT = 9814 +LOG_PREFIX = "cli" -# PISA-CLI -CLIENT_LOG_FILE = "pisa-cli.log" -APPOINTMENTS_FOLDER_NAME = "appointments" +# Load config fields +conf_fields = { + "DEFAULT_PISA_API_SERVER": {"value": conf.DEFAULT_PISA_API_SERVER, "type": str}, + "DEFAULT_PISA_API_PORT": {"value": conf.DEFAULT_PISA_API_PORT, "type": int}, + "DATA_FOLDER": {"value": conf.DATA_FOLDER, "type": str}, + "CLIENT_LOG_FILE": {"value": conf.CLIENT_LOG_FILE, "type": str, "path": True}, + "APPOINTMENTS_FOLDER_NAME": {"value": conf.APPOINTMENTS_FOLDER_NAME, "type": str, "path": True}, + "CLI_PUBLIC_KEY": {"value": conf.CLI_PUBLIC_KEY, "type": str, "path": True}, + "CLI_PRIVATE_KEY": {"value": conf.CLI_PRIVATE_KEY, "type": str, "path": True}, + "PISA_PUBLIC_KEY": {"value": conf.PISA_PUBLIC_KEY, "type": str, "path": True}, +} -CLI_PUBLIC_KEY = "cli_pk.der" -CLI_PRIVATE_KEY = "cli_sk.der" -PISA_PUBLIC_KEY = "pisa_pk.der" +# Expand user (~) if found and check fields are correct +conf_fields["DATA_FOLDER"]["value"] = os.path.expanduser(conf_fields["DATA_FOLDER"]["value"]) +# Extend relative paths +conf_fields = extend_paths(conf_fields["DATA_FOLDER"]["value"], conf_fields) -# Create the file logger -f_logger = logging.getLogger("cli_file_log") -f_logger.setLevel(logging.INFO) +# Sanity check fields and build config dictionary +config = check_conf_fields(conf_fields) -fh = logging.FileHandler(CLIENT_LOG_FILE) -fh.setLevel(logging.INFO) -fh_formatter = logging.Formatter("%(message)s") -fh.setFormatter(fh_formatter) -f_logger.addHandler(fh) - -# Create the console logger -c_logger = logging.getLogger("cli_console_log") -c_logger.setLevel(logging.INFO) - -ch = logging.StreamHandler() -ch.setLevel(logging.INFO) -ch_formatter = logging.Formatter("%(asctime)s %(message)s.", "%Y-%m-%d %H:%M:%S") -ch.setFormatter(ch_formatter) -c_logger.addHandler(ch) +setup_logging(config.get("CLIENT_LOG_FILE"), LOG_PREFIX) diff --git a/apps/cli/pisa_cli.py b/apps/cli/pisa_cli.py index 742f562..6bc8346 100644 --- a/apps/cli/pisa_cli.py +++ b/apps/cli/pisa_cli.py @@ -9,24 +9,18 @@ from getopt import getopt, GetoptError from requests import ConnectTimeout, ConnectionError from uuid import uuid4 +from apps.cli import config, LOG_PREFIX from apps.cli.help import help_add_appointment, help_get_appointment from apps.cli.blob import Blob -import apps.cli.conf as conf from common.logger import Logger from common.appointment import Appointment from common.cryptographer import Cryptographer -from common.tools import ( - check_sha256_hex_format, - check_locator_format, - compute_locator, - check_conf_fields, - setup_data_folder, -) +from common.tools import check_sha256_hex_format, check_locator_format, compute_locator, setup_data_folder HTTP_OK = 200 -logger = Logger("Client") +logger = Logger(actor="Client", log_name_prefix=LOG_PREFIX) # FIXME: TESTING ENDPOINT, WON'T BE THERE IN PRODUCTION @@ -53,42 +47,6 @@ def generate_dummy_appointment(): logger.info("\nData stored in dummy_appointment_data.json") -def load_config(config): - """ - Looks through all of the config options to make sure they contain the right type of data and builds a config - dictionary. - - Args: - config (:obj:`module`): It takes in a config module object. - - Returns: - :obj:`dict` A dictionary containing the config values. - """ - - conf_dict = {} - - data_folder = config.DATA_FOLDER - if isinstance(data_folder, str): - data_folder = os.path.expanduser(data_folder) - else: - raise ValueError("The provided user folder is invalid.") - - conf_fields = { - "DEFAULT_PISA_API_SERVER": {"value": config.DEFAULT_PISA_API_SERVER, "type": str}, - "DEFAULT_PISA_API_PORT": {"value": config.DEFAULT_PISA_API_PORT, "type": int}, - "DATA_FOLDER": {"value": data_folder, "type": str}, - "CLIENT_LOG_FILE": {"value": data_folder + config.CLIENT_LOG_FILE, "type": str}, - "APPOINTMENTS_FOLDER_NAME": {"value": data_folder + config.APPOINTMENTS_FOLDER_NAME, "type": str}, - "CLI_PUBLIC_KEY": {"value": data_folder + config.CLI_PUBLIC_KEY, "type": str}, - "CLI_PRIVATE_KEY": {"value": data_folder + config.CLI_PRIVATE_KEY, "type": str}, - "PISA_PUBLIC_KEY": {"value": data_folder + config.PISA_PUBLIC_KEY, "type": str}, - } - - check_conf_fields(conf_fields, logger) - - return conf_dict - - # Loads and returns Pisa keys from disk def load_key_file_data(file_name): try: @@ -380,8 +338,6 @@ def show_usage(): if __name__ == "__main__": - config = load_config(conf) - pisa_api_server = config.get("DEFAULT_PISA_API_SERVER") pisa_api_port = config.get("DEFAULT_PISA_API_PORT") commands = ["add_appointment", "get_appointment", "help"] From 3185ae124d5e2240c220009fb35e3c8b6b06b01b Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Thu, 23 Jan 2020 18:34:53 +0100 Subject: [PATCH 48/93] Fixes paths on cli tests --- test/apps/cli/unit/test_pisa_cli.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/test/apps/cli/unit/test_pisa_cli.py b/test/apps/cli/unit/test_pisa_cli.py index 4927c7a..0ce40c6 100644 --- a/test/apps/cli/unit/test_pisa_cli.py +++ b/test/apps/cli/unit/test_pisa_cli.py @@ -154,12 +154,13 @@ def test_load_key_file_data(): def test_save_signed_appointment(monkeypatch): - monkeypatch.setattr(pisa_cli, "APPOINTMENTS_FOLDER_NAME", "test_appointments") + appointments_folder = "test_appointments_receipts" + pisa_cli.config["APPOINTMENTS_FOLDER_NAME"] = appointments_folder pisa_cli.save_signed_appointment(dummy_appointment.to_dict(), get_dummy_signature()) # In folder "Appointments," grab all files and print them. - files = os.listdir("test_appointments") + files = os.listdir(appointments_folder) found = False for f in files: @@ -169,10 +170,10 @@ def test_save_signed_appointment(monkeypatch): assert found # If "appointments" directory doesn't exist, function should create it. - assert os.path.exists("test_appointments") + assert os.path.exists(appointments_folder) # Delete test directory once we're done. - shutil.rmtree("test_appointments") + shutil.rmtree(appointments_folder) def test_parse_add_appointment_args(): From 9be2c2475fa4c3529f3e040098b8e2ad9438c1cc Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Thu, 23 Jan 2020 18:35:32 +0100 Subject: [PATCH 49/93] Removes logger instance from test_appointment --- test/common/unit/test_appointment.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/test/common/unit/test_appointment.py b/test/common/unit/test_appointment.py index 2dea9b0..8087138 100644 --- a/test/common/unit/test_appointment.py +++ b/test/common/unit/test_appointment.py @@ -3,7 +3,6 @@ import struct import binascii from pytest import fixture -from pisa import c_logger from common.appointment import Appointment from pisa.encrypted_blob import EncryptedBlob @@ -12,9 +11,6 @@ from test.pisa.unit.conftest import get_random_value_hex from common.constants import LOCATOR_LEN_BYTES -c_logger.disabled = True - - # Not much to test here, adding it for completeness @fixture def appointment_data(): From 96ab0039e7aab5d09896dad544d22af00b5af652 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Thu, 23 Jan 2020 18:36:18 +0100 Subject: [PATCH 50/93] Updates config file params for testing --- test/pisa/unit/conftest.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/test/pisa/unit/conftest.py b/test/pisa/unit/conftest.py index e70d2c7..3e373e1 100644 --- a/test/pisa/unit/conftest.py +++ b/test/pisa/unit/conftest.py @@ -1,3 +1,4 @@ +import os import pytest import random import requests @@ -161,6 +162,7 @@ def generate_dummy_tracker(): def get_config(): + data_folder = os.path.expanduser("~/.pisa_btc") config = { "BTC_RPC_USER": "username", "BTC_RPC_PASSWD": "password", @@ -170,13 +172,12 @@ def get_config(): "FEED_PROTOCOL": "tcp", "FEED_ADDR": "127.0.0.1", "FEED_PORT": 28332, + "DATA_FOLDER": data_folder, "MAX_APPOINTMENTS": 100, "EXPIRY_DELTA": 6, "MIN_TO_SELF_DELAY": 20, - "SERVER_LOG_FILE": "pisa.log", - "PISA_SECRET_KEY": "pisa_sk.der", - "CLIENT_LOG_FILE": "pisa.log", - "TEST_LOG_FILE": "test.log", + "SERVER_LOG_FILE": data_folder + "pisa.log", + "PISA_SECRET_KEY": data_folder + "pisa_sk.der", "DB_PATH": "appointments", } From dce7b4d39ed0e351baad505c028e966a51e4a0de Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Thu, 23 Jan 2020 19:00:55 +0100 Subject: [PATCH 51/93] Adds missing tools docs --- common/tools.py | 57 +++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 53 insertions(+), 4 deletions(-) diff --git a/common/tools.py b/common/tools.py index ec02451..8e0617c 100644 --- a/common/tools.py +++ b/common/tools.py @@ -12,7 +12,7 @@ def check_sha256_hex_format(value): value(:mod:`str`): the value to be checked. Returns: - :mod:`bool`: Whether or not the value matches the format. + :obj:`bool`: Whether or not the value matches the format. """ return isinstance(value, str) and re.match(r"^[0-9A-Fa-f]{64}$", value) is not None @@ -25,7 +25,7 @@ def check_locator_format(value): value(:mod:`str`): the value to be checked. Returns: - :mod:`bool`: Whether or not the value matches the format. + :obj:`bool`: Whether or not the value matches the format. """ return isinstance(value, str) and re.match(r"^[0-9A-Fa-f]{32}$", value) is not None @@ -36,19 +36,44 @@ def compute_locator(tx_id): Args: tx_id (:obj:`str`): the transaction id used to compute the locator. Returns: - (:obj:`str`): The computed locator. + :obj:`str`: The computed locator. """ return tx_id[:LOCATOR_LEN_HEX] def setup_data_folder(data_folder, logger): + """ + Create a data folder for either the client or the server side if the folder does not exists. + + Args: + data_folder (:obj:`str`): the path of the folder + logger (:obj: `Logger `): a logger instance to notify about the folder creation. + """ + if not os.path.isdir(data_folder): logger.info("Data folder not found. Creating it") os.makedirs(data_folder, exist_ok=True) def check_conf_fields(conf_fields): + """ + Checks that the provided configuration field have the right type. + + Args: + conf_fields (:obj:`dict`): a dictionary populated with the configuration file params and the expected types. + The format is as follows: + + {"field0": {"value": value_from_conf_file, "type": expected_type, ...}} + + Returns: + :obj:`dict`: A dictionary with the same keys as the provided one, but containing only the "value" field as value + if the provided ``conf_fields`` where correct. + + Raises: + ValueError: If any of the dictionary elements does not have the expected type + """ + conf_dict = {} for field in conf_fields: @@ -65,14 +90,38 @@ def check_conf_fields(conf_fields): def extend_paths(base_path, config_fields): + """ + Extends the relative paths of a given ``config_fields`` dictionary with a diven ``base_path``. + + Paths in the config file are based on DATA_PATH, this method extends them so they are all absolute. + + Args: + base_path (:obj:`str`): the base path to prepend the other paths. + config_fields (:obj:`dict`): a dictionary of configuration fields containing a ``path`` flag, as follows: + {"field0": {"value": value_from_conf_file, "path": True, ...}} + + Returns: + :obj:`dict`: A ``config_fields`` with the flagged paths updated. + """ + for key, field in config_fields.items(): - if field.get("path"): + if field.get("path") is True: config_fields[key]["value"] = base_path + config_fields[key]["value"] return config_fields def setup_logging(log_file_path, log_name_prefix): + """ + Setups a couple of loggers (console and file) given a prefix and a file path. The log names are: + + prefix | _file_log and prefix | _console_log + + Args: + log_file_path (:obj:`str`): the path of the file to output the file log. + log_name_prefix (:obj:`str`): the prefix to identify the log. + """ + if not isinstance(log_file_path, str): print(log_file_path) raise ValueError("Wrong log file path.") From 5c75b1f40da1c935b0275e4e1b8d8e764fcd26d7 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Thu, 23 Jan 2020 19:32:40 +0100 Subject: [PATCH 52/93] Removes Logger dependency from setup_data_folder Logger was only used to log when a new folder was created, and was making that the setup needed to be done on the main of pisad and cli instead of __init__, which seems a better fit --- apps/cli/__init__.py | 3 ++- apps/cli/pisa_cli.py | 5 +---- common/tools.py | 4 +--- pisa/__init__.py | 3 ++- pisa/pisad.py | 3 --- 5 files changed, 6 insertions(+), 12 deletions(-) diff --git a/apps/cli/__init__.py b/apps/cli/__init__.py index df7cf41..0861ee2 100644 --- a/apps/cli/__init__.py +++ b/apps/cli/__init__.py @@ -1,6 +1,6 @@ import os import apps.cli.conf as conf -from common.tools import extend_paths, check_conf_fields, setup_logging +from common.tools import extend_paths, check_conf_fields, setup_logging, setup_data_folder LOG_PREFIX = "cli" @@ -24,4 +24,5 @@ conf_fields = extend_paths(conf_fields["DATA_FOLDER"]["value"], conf_fields) # Sanity check fields and build config dictionary config = check_conf_fields(conf_fields) +setup_data_folder(config.get("DATA_FOLDER")) setup_logging(config.get("CLIENT_LOG_FILE"), LOG_PREFIX) diff --git a/apps/cli/pisa_cli.py b/apps/cli/pisa_cli.py index 6bc8346..41fce7f 100644 --- a/apps/cli/pisa_cli.py +++ b/apps/cli/pisa_cli.py @@ -16,7 +16,7 @@ from apps.cli.blob import Blob from common.logger import Logger from common.appointment import Appointment from common.cryptographer import Cryptographer -from common.tools import check_sha256_hex_format, check_locator_format, compute_locator, setup_data_folder +from common.tools import check_sha256_hex_format, check_locator_format, compute_locator HTTP_OK = 200 @@ -343,9 +343,6 @@ if __name__ == "__main__": commands = ["add_appointment", "get_appointment", "help"] testing_commands = ["generate_dummy_appointment"] - # Create user folder if missing - setup_data_folder(config.get("DATA_FOLDER"), logger) - try: opts, args = getopt(argv[1:], "s:p:h", ["server", "port", "help"]) diff --git a/common/tools.py b/common/tools.py index 8e0617c..0c131da 100644 --- a/common/tools.py +++ b/common/tools.py @@ -42,17 +42,15 @@ def compute_locator(tx_id): return tx_id[:LOCATOR_LEN_HEX] -def setup_data_folder(data_folder, logger): +def setup_data_folder(data_folder): """ Create a data folder for either the client or the server side if the folder does not exists. Args: data_folder (:obj:`str`): the path of the folder - logger (:obj: `Logger `): a logger instance to notify about the folder creation. """ if not os.path.isdir(data_folder): - logger.info("Data folder not found. Creating it") os.makedirs(data_folder, exist_ok=True) diff --git a/pisa/__init__.py b/pisa/__init__.py index e7d380b..2e5149f 100644 --- a/pisa/__init__.py +++ b/pisa/__init__.py @@ -1,6 +1,6 @@ import os import pisa.conf as conf -from common.tools import check_conf_fields, setup_logging, extend_paths +from common.tools import check_conf_fields, setup_logging, extend_paths, setup_data_folder from pisa.utils.auth_proxy import AuthServiceProxy HOST = "localhost" @@ -34,4 +34,5 @@ conf_fields = extend_paths(conf_fields["DATA_FOLDER"]["value"], conf_fields) # Sanity check fields and build config dictionary config = check_conf_fields(conf_fields) +setup_data_folder(config.get("DATA_FOLDER")) setup_logging(config.get("SERVER_LOG_FILE"), LOG_PREFIX) diff --git a/pisa/pisad.py b/pisa/pisad.py index 1c35000..0335832 100644 --- a/pisa/pisad.py +++ b/pisa/pisad.py @@ -3,7 +3,6 @@ from sys import argv, exit from signal import signal, SIGINT, SIGQUIT, SIGTERM from common.logger import Logger -from common.tools import setup_data_folder from pisa import config, LOG_PREFIX from pisa.api import API @@ -34,8 +33,6 @@ def main(): signal(SIGQUIT, handle_signals) logger.info("Starting PISA") - - setup_data_folder(config.get("DATA_FOLDER"), logger) db_manager = DBManager(config.get("DB_PATH")) if not can_connect_to_bitcoind(): From 14724ceda9b963738c95708121360059807bb22d Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Thu, 23 Jan 2020 19:54:40 +0100 Subject: [PATCH 53/93] Adds missing common/tools unit tests and removes pisad tests `test_pisad.py` was only covering config parsing tests, that are now part of `common/tools.py`. --- test/common/unit/test_tools.py | 116 ++++++++++++++++++++++++++++++++- test/pisa/unit/test_pisad.py | 51 --------------- 2 files changed, 115 insertions(+), 52 deletions(-) delete mode 100644 test/pisa/unit/test_pisad.py diff --git a/test/common/unit/test_tools.py b/test/common/unit/test_tools.py index eebdab9..b4d2ad4 100644 --- a/test/common/unit/test_tools.py +++ b/test/common/unit/test_tools.py @@ -1,7 +1,26 @@ -from common.tools import check_sha256_hex_format +import os +import pytest +import logging +from copy import deepcopy + +from pisa import conf_fields + +from common.constants import LOCATOR_LEN_BYTES +from common.tools import ( + check_sha256_hex_format, + check_locator_format, + compute_locator, + setup_data_folder, + check_conf_fields, + extend_paths, + setup_logging, +) from test.common.unit.conftest import get_random_value_hex +conf_fields_copy = deepcopy(conf_fields) + + def test_check_sha256_hex_format(): # Only 32-byte hex encoded strings should pass the test wrong_inputs = [None, str(), 213, 46.67, dict(), "A" * 63, "C" * 65, bytes(), get_random_value_hex(31)] @@ -10,3 +29,98 @@ def test_check_sha256_hex_format(): for v in range(100): assert check_sha256_hex_format(get_random_value_hex(32)) is True + + +def test_check_locator_format(): + # Check that only LOCATOR_LEN_BYTES long string pass the test + + wrong_inputs = [ + None, + str(), + 213, + 46.67, + dict(), + "A" * (2 * LOCATOR_LEN_BYTES - 1), + "C" * (2 * LOCATOR_LEN_BYTES + 1), + bytes(), + get_random_value_hex(LOCATOR_LEN_BYTES - 1), + ] + for wtype in wrong_inputs: + assert check_sha256_hex_format(wtype) is False + + for _ in range(100): + assert check_locator_format(get_random_value_hex(LOCATOR_LEN_BYTES)) is True + + +def test_compute_locator(): + # The best way of checking that compute locator is correct is by using check_locator_format + for _ in range(100): + assert check_locator_format(compute_locator(get_random_value_hex(LOCATOR_LEN_BYTES))) is True + + # String of length smaller than LOCATOR_LEN_BYTES bytes must fail + for i in range(1, LOCATOR_LEN_BYTES): + assert check_locator_format(compute_locator(get_random_value_hex(i))) is False + + +def test_setup_data_folder(): + # This method should create a folder if it does not exist, and do nothing otherwise + test_folder = "test_folder" + assert not os.path.isdir(test_folder) + + setup_data_folder(test_folder) + + assert os.path.isdir(test_folder) + + os.rmdir(test_folder) + + +def test_check_conf_fields(): + # The test should work with a valid config_fields (obtained from a valid conf.py) + assert type(check_conf_fields(conf_fields_copy)) == dict + + +def test_bad_check_conf_fields(): + # Create a messed up version of the file that should throw an error. + conf_fields_copy["BTC_RPC_USER"] = 0000 + conf_fields_copy["BTC_RPC_PASSWD"] = "password" + conf_fields_copy["BTC_RPC_HOST"] = 000 + + # We should get a ValueError here. + with pytest.raises(Exception): + check_conf_fields(conf_fields_copy) + + +def test_extend_paths(): + # Test that only items with the path flag are extended + config_fields = { + "foo": {"value": "foofoo"}, + "var": {"value": "varvar", "path": True}, + "foovar": {"value": "foovarfoovar"}, + } + base_path = "base_path/" + extended_config_field = extend_paths(base_path, config_fields) + + for k, field in extended_config_field.items(): + if field.get("path") is True: + assert base_path in field.get("value") + else: + assert base_path not in field.get("value") + + +def test_setup_logging(): + # Check that setup_logging creates two new logs for every prefix + prefix = "foo" + log_file = "var.log" + + f_log_suffix = "_file_log" + c_log_suffix = "_console_log" + + assert len(logging.getLogger(prefix + f_log_suffix).handlers) is 0 + assert len(logging.getLogger(prefix + c_log_suffix).handlers) is 0 + + setup_logging(log_file, prefix) + + assert len(logging.getLogger(prefix + f_log_suffix).handlers) is 1 + assert len(logging.getLogger(prefix + c_log_suffix).handlers) is 1 + + os.remove(log_file) diff --git a/test/pisa/unit/test_pisad.py b/test/pisa/unit/test_pisad.py deleted file mode 100644 index 30db71e..0000000 --- a/test/pisa/unit/test_pisad.py +++ /dev/null @@ -1,51 +0,0 @@ -import importlib -import os -import pytest -from shutil import copyfile - -from pisa.pisad import load_config - -test_conf_file_path = os.getcwd() + "/test/pisa/unit/test_conf.py" - - -def test_load_config(): - # Copy the sample-conf.py file to use as a test config file. - copyfile(os.getcwd() + "/pisa/sample_conf.py", test_conf_file_path) - - import test.pisa.unit.test_conf as conf - - # If the file has all the correct fields and data, it should return a dict. - conf_dict = load_config(conf) - assert type(conf_dict) == dict - - # Delete the file. - os.remove(test_conf_file_path) - - -def test_bad_load_config(): - # Create a messed up version of the file that should throw an error. - with open(test_conf_file_path, "w") as f: - f.write('# bitcoind\nBTC_RPC_USER = 0000\nBTC_RPC_PASSWD = "password"\nBTC_RPC_HOST = 000') - - import test.pisa.unit.test_conf as conf - - importlib.reload(conf) - - with pytest.raises(Exception): - conf_dict = load_config(conf) - - os.remove(test_conf_file_path) - - -def test_empty_load_config(): - # Create an empty version of the file that should throw an error. - open(test_conf_file_path, "a") - - import test.pisa.unit.test_conf as conf - - importlib.reload(conf) - - with pytest.raises(Exception): - conf_dict = load_config(conf) - - os.remove(test_conf_file_path) From 3fd84a8d7f1b8d461c15f1e19bf2dbfce19e76d4 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Thu, 23 Jan 2020 19:57:21 +0100 Subject: [PATCH 54/93] Modifies e2e tests so pisad can be run and stop from there --- test/pisa/e2e/conftest.py | 9 +++++++++ test/pisa/e2e/test_basic_e2e.py | 11 ++++++++++- 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/test/pisa/e2e/conftest.py b/test/pisa/e2e/conftest.py index cef3237..fbf00c9 100644 --- a/test/pisa/e2e/conftest.py +++ b/test/pisa/e2e/conftest.py @@ -1,8 +1,10 @@ import pytest import random +from multiprocessing import Process from decimal import Decimal, getcontext import pisa.conf as conf +from pisa.pisad import main from pisa.utils.auth_proxy import AuthServiceProxy getcontext().prec = 10 @@ -48,6 +50,13 @@ def create_txs(bitcoin_cli): return signed_commitment_tx, signed_penalty_tx +def run_pisad(): + pisad_process = Process(target=main, daemon=True) + pisad_process.start() + + return pisad_process + + def get_random_value_hex(nbytes): pseudo_random_value = random.getrandbits(8 * nbytes) prv_hex = "{:x}".format(pseudo_random_value) diff --git a/test/pisa/e2e/test_basic_e2e.py b/test/pisa/e2e/test_basic_e2e.py index 8c26867..a1b0cec 100644 --- a/test/pisa/e2e/test_basic_e2e.py +++ b/test/pisa/e2e/test_basic_e2e.py @@ -9,13 +9,22 @@ from common.tools import compute_locator from common.appointment import Appointment from common.cryptographer import Cryptographer from pisa.utils.auth_proxy import JSONRPCException -from test.pisa.e2e.conftest import END_TIME_DELTA, build_appointment_data, get_random_value_hex, create_penalty_tx +from test.pisa.e2e.conftest import ( + END_TIME_DELTA, + build_appointment_data, + get_random_value_hex, + create_penalty_tx, + run_pisad, +) # We'll use pisa_cli to add appointments. The expected input format is a list of arguments with a json-encoded # appointment pisa_cli.pisa_api_server = HOST pisa_cli.pisa_api_port = PORT +# Run pisad +pisad_process = run_pisad() + def broadcast_transaction_and_mine_block(bitcoin_cli, commitment_tx, addr): # Broadcast the commitment transaction and mine a block From 3d13cfbe392bffee1b9f0eaed883315d2152bec9 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Thu, 23 Jan 2020 19:58:07 +0100 Subject: [PATCH 55/93] Removes pisad from the commands to be run by circle-ci before e2e tests --- .circleci/config.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index da0ba21..98cb5e0 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -97,7 +97,6 @@ jobs: name: Run e2e tests command: | . venv/bin/activate - python3 -m pisa.pisad & pytest test/pisa/e2e/ # - store_artifacts: From ebea93c103ee4ee64e85e302041d8e78c37d012a Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Thu, 23 Jan 2020 20:02:05 +0100 Subject: [PATCH 56/93] Updates circle-ci to create cli conf for unit test --- .circleci/config.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 98cb5e0..12f9be5 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -60,8 +60,10 @@ jobs: # Run unit tests - run: - name: Create pisa config - command: cp pisa/sample_conf.py pisa/conf.py + name: Creates config files + command: | + cp pisa/sample_conf.py pisa/conf.py + cp apps/cli/sample_conf.py apps/cli/conf.py - run: name: Run pisa unit tests From a84d753c4a299c10c1d75af881a622012556abc8 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Thu, 23 Jan 2020 20:45:45 +0100 Subject: [PATCH 57/93] Updates generate_key to accept output directory. It stores the keys in the current dir by default. --- apps/generate_key.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/apps/generate_key.py b/apps/generate_key.py index 74ba84c..30c1b26 100644 --- a/apps/generate_key.py +++ b/apps/generate_key.py @@ -30,14 +30,21 @@ def save_pk(pk, filename): if __name__ == "__main__": name = "pisa" + output_dir = "." - opts, _ = getopt(argv[1:], "n:", ["name"]) + opts, _ = getopt(argv[1:], "n:d:", ["name", "dir"]) for opt, arg in opts: if opt in ["-n", "--name"]: name = arg - SK_FILE_NAME = "../{}_sk.der".format(name) - PK_FILE_NAME = "../{}_pk.der".format(name) + if opt in ["-d", "--dir"]: + output_dir = arg + + if output_dir.endswith("/"): + output_dir = output_dir[:-1] + + SK_FILE_NAME = "{}/{}_sk.der".format(output_dir, name) + PK_FILE_NAME = "{}/{}_pk.der".format(output_dir, name) if os.path.exists(SK_FILE_NAME): print('A key with name "{}" already exists. Aborting.'.format(SK_FILE_NAME)) From deb182edda28023d5096b03c530c38e0e73ca7ee Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Thu, 23 Jan 2020 20:58:37 +0100 Subject: [PATCH 58/93] Updates circle-ci to store keys in data folder --- .circleci/config.yml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 12f9be5..2a67e6f 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -89,9 +89,8 @@ jobs: command: | . venv/bin/activate cp test/pisa/e2e/pisa-conf.py pisa/conf.py - cd apps/ - python3 -m generate_key - python3 -m generate_key -n cli + python3 -m apps.generate_key -d ~/.pisa_btc/ + python3 -m apps.generate_key -n cli -d ~/.pisa_btc/ # Run E2E tests From ba5aa9f6512adaff56c738d2ba378aace139d2d4 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Fri, 24 Jan 2020 13:21:36 +0100 Subject: [PATCH 59/93] Updates builder with new minimal in-memory data from #83 - The builder was never modified when the in-memory data was reduced, so it was still trying to build data based on the past approach. - Renames create_block_queue to poplate_block_queue and repurposes the method When creating a block queue, a new Queue was created and populated. That was breaking the link between the Watcher/Responder and the ChainMonitor since the Queue is defined beforehand. --- pisa/builder.py | 31 +++++++++++++++---------------- 1 file changed, 15 insertions(+), 16 deletions(-) diff --git a/pisa/builder.py b/pisa/builder.py index 6f0f2bf..072b638 100644 --- a/pisa/builder.py +++ b/pisa/builder.py @@ -32,14 +32,13 @@ class Builder: locator_uuid_map = {} for uuid, data in appointments_data.items(): - appointment = Appointment.from_dict(data) - appointments[uuid] = appointment + appointments[uuid] = {"locator": data.get("locator"), "end_time": data.get("end_time")} - if appointment.locator in locator_uuid_map: - locator_uuid_map[appointment.locator].append(uuid) + if data.get("locator") in locator_uuid_map: + locator_uuid_map[data.get("locator")].append(uuid) else: - locator_uuid_map[appointment.locator] = [uuid] + locator_uuid_map[data.get("locator")] = [uuid] return appointments, locator_uuid_map @@ -67,33 +66,33 @@ class Builder: tx_tracker_map = {} for uuid, data in tracker_data.items(): - tracker = TransactionTracker.from_dict(data) - trackers[uuid] = tracker + trackers[uuid] = { + "penalty_txid": data.get("penalty_txid"), + "locator": data.get("locator"), + "appointment_end": data.get("appointment_end"), + } - if tracker.penalty_txid in tx_tracker_map: - tx_tracker_map[tracker.penalty_txid].append(uuid) + if data.get("penalty_txid") in tx_tracker_map: + tx_tracker_map[data.get("penalty_txid")].append(uuid) else: - tx_tracker_map[tracker.penalty_txid] = [uuid] + tx_tracker_map[data.get("penalty_txid")] = [uuid] return trackers, tx_tracker_map @staticmethod - def build_block_queue(missed_blocks): + def populate_block_queue(block_queue, missed_blocks): """ - Builds a ``Queue`` of block hashes to initialize the :mod:`Watcher ` or the + Populates a ``Queue`` of block hashes to initialize the :mod:`Watcher ` or the :mod:`Responder ` using backed up data. Args: + block_queue (:obj:`Queue`): a ``Queue`` missed_blocks (:obj:`list`): list of block hashes missed by the Watchtower (do to a crash or shutdown). Returns: :obj:`Queue`: A ``Queue`` containing all the missed blocks hashes. """ - block_queue = Queue() - for block in missed_blocks: block_queue.put(block) - - return block_queue From 7c4d4d0aad50d4cd282f66119cf0dcdf50946393 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Fri, 24 Jan 2020 13:24:12 +0100 Subject: [PATCH 60/93] Moves awaking / sleeping functionality to their own methods --- pisa/responder.py | 20 +++++++++++++------- pisa/watcher.py | 24 +++++++++++++++--------- 2 files changed, 28 insertions(+), 16 deletions(-) diff --git a/pisa/responder.py b/pisa/responder.py index 1198553..d1c2aa3 100644 --- a/pisa/responder.py +++ b/pisa/responder.py @@ -146,6 +146,17 @@ class Responder: self.chain_monitor = chain_monitor self.db_manager = db_manager + def awake(self): + self.asleep = False + self.chain_monitor.responder_asleep = False + Thread(target=self.do_watch).start() + + def sleep(self): + self.asleep = True + self.chain_monitor.responder_asleep = True + + logger.info("No more pending trackers, going back to sleep") + @staticmethod def on_sync(block_hash): """ @@ -265,9 +276,7 @@ class Responder: ) if self.asleep: - self.asleep = False - self.chain_monitor.responder_asleep = False - Thread(target=self.do_watch).start() + self.awake() def do_watch(self): """ @@ -321,10 +330,7 @@ class Responder: prev_block_hash = block.get("hash") # Go back to sleep if there are no more pending trackers - self.asleep = True - self.chain_monitor.responder_asleep = True - - logger.info("No more pending trackers, going back to sleep") + self.sleep() def check_confirmations(self, txs): """ diff --git a/pisa/watcher.py b/pisa/watcher.py index 86a5c40..7dbc68b 100644 --- a/pisa/watcher.py +++ b/pisa/watcher.py @@ -76,6 +76,19 @@ class Watcher: if not isinstance(responder, Responder): self.responder = Responder(db_manager, chain_monitor) + def awake(self): + self.asleep = False + self.chain_monitor.watcher_asleep = False + Thread(target=self.do_watch).start() + + logger.info("Waking up") + + def sleep(self): + self.asleep = True + self.chain_monitor.watcher_asleep = True + + logger.info("No more pending appointments, going back to sleep") + def add_appointment(self, appointment): """ Adds a new appointment to the ``appointments`` dictionary if ``max_appointments`` has not been reached. @@ -118,11 +131,7 @@ class Watcher: self.locator_uuid_map[appointment.locator] = [uuid] if self.asleep: - self.asleep = False - self.chain_monitor.watcher_asleep = False - Thread(target=self.do_watch).start() - - logger.info("Waking up") + self.awake() self.db_manager.store_watcher_appointment(uuid, appointment.to_json()) self.db_manager.create_append_locator_map(appointment.locator, uuid) @@ -208,10 +217,7 @@ class Watcher: self.db_manager.store_last_block_hash_watcher(block_hash) # Go back to sleep if there are no more appointments - self.asleep = True - self.chain_monitor.watcher_asleep = True - - logger.info("No more pending appointments, going back to sleep") + self.sleep() def get_breaches(self, txids): """ From 89181e6a7e09577548ee203a4609ceb118e5fe88 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Fri, 24 Jan 2020 13:25:02 +0100 Subject: [PATCH 61/93] Fixes handling data from db Data obtained from the database was not properly handled in some cases: - If some appointments are accepted and no block is received and the tower is restarted, the data was not loaded. The tower was checking that the Watcher/Responder had a last known block, which may not have been the case. Now best_tip is set as last_known_block on a fresh bootstrap. - The Watcher/ Responder were not being awaken when loading data from the database. - The block queues were not properly populated --- pisa/pisad.py | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/pisa/pisad.py b/pisa/pisad.py index 0335832..0eca57b 100644 --- a/pisa/pisad.py +++ b/pisa/pisad.py @@ -60,6 +60,10 @@ def main(): if len(watcher_appointments_data) == 0 and len(responder_trackers_data) == 0: logger.info("Fresh bootstrap") + # Set the current tip as the last known block for both on a fresh start + db_manager.store_last_block_hash_watcher(BlockProcessor.get_best_block_hash()) + db_manager.store_last_block_hash_responder(BlockProcessor.get_best_block_hash()) + else: logger.info("Bootstrapping from backed up data") block_processor = BlockProcessor() @@ -68,11 +72,10 @@ def main(): last_block_responder = db_manager.load_last_block_hash_responder() # FIXME: 32-reorgs-offline dropped txs are not used at this point. - last_common_ancestor_responder = None missed_blocks_responder = None # Build Responder with backed up data if found - if last_block_responder is not None: + if len(responder_trackers_data) != 0: last_common_ancestor_responder, dropped_txs_responder = block_processor.find_last_common_ancestor( last_block_responder ) @@ -81,11 +84,12 @@ def main(): watcher.responder.trackers, watcher.responder.tx_tracker_map = Builder.build_trackers( responder_trackers_data ) - watcher.responder.block_queue = Builder.build_block_queue(missed_blocks_responder) + Builder.populate_block_queue(watcher.responder.block_queue, missed_blocks_responder) + watcher.responder.awake() # Build Watcher. If the blocks of both match we don't perform the search twice. - if last_block_watcher is not None: - if last_block_watcher == last_block_responder: + if len(watcher_appointments_data) != 0: + if last_block_watcher == last_block_responder and missed_blocks_responder is not None: missed_blocks_watcher = missed_blocks_responder else: last_common_ancestor_watcher, dropped_txs_watcher = block_processor.find_last_common_ancestor( @@ -96,7 +100,8 @@ def main(): watcher.appointments, watcher.locator_uuid_map = Builder.build_appointments( watcher_appointments_data ) - watcher.block_queue = Builder.build_block_queue(missed_blocks_watcher) + Builder.populate_block_queue(watcher.block_queue, missed_blocks_watcher) + watcher.awake() # Fire the API API(watcher, config=config).start() From a718a5c6ae1c2815517d5175ea6024ec657d4161 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Fri, 24 Jan 2020 13:32:20 +0100 Subject: [PATCH 62/93] Updates Builder unit tests --- test/pisa/unit/test_builder.py | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/test/pisa/unit/test_builder.py b/test/pisa/unit/test_builder.py index c45ef47..6cbf073 100644 --- a/test/pisa/unit/test_builder.py +++ b/test/pisa/unit/test_builder.py @@ -1,4 +1,5 @@ from uuid import uuid4 +from queue import Queue from pisa.builder import Builder from test.pisa.unit.conftest import get_random_value_hex, generate_dummy_appointment, generate_dummy_tracker @@ -29,8 +30,9 @@ def test_build_appointments(): # Check that the created appointments match the data for uuid, appointment in appointments.items(): assert uuid in appointments_data.keys() - assert appointments_data[uuid] == appointment.to_dict() - assert uuid in locator_uuid_map[appointment.locator] + assert appointments_data[uuid].get("locator") == appointment.get("locator") + assert appointments_data[uuid].get("end_time") == appointment.get("end_time") + assert uuid in locator_uuid_map[appointment.get("locator")] def test_build_trackers(): @@ -55,17 +57,18 @@ def test_build_trackers(): # Check that the built trackers match the data for uuid, tracker in trackers.items(): assert uuid in trackers_data.keys() - tracker_dict = tracker.to_dict() - # The locator is not part of the tracker_data found in the database (for now) - assert trackers_data[uuid] == tracker_dict - assert uuid in tx_tracker_map[tracker.penalty_txid] + assert tracker.get("penalty_txid") == trackers_data[uuid].get("penalty_txid") + assert tracker.get("locator") == trackers_data[uuid].get("locator") + assert tracker.get("appointment_end") == trackers_data[uuid].get("appointment_end") + assert uuid in tx_tracker_map[tracker.get("penalty_txid")] -def test_build_block_queue(): +def test_populate_block_queue(): # Create some random block hashes and construct the queue with them blocks = [get_random_value_hex(32) for _ in range(10)] - queue = Builder.build_block_queue(blocks) + queue = Queue() + Builder.populate_block_queue(queue, blocks) # Make sure every block is in the queue and that there are not additional ones while not queue.empty(): From 5d068f5ecca2be34a9e82b4fa2f0e287671d024b Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Fri, 24 Jan 2020 13:40:08 +0100 Subject: [PATCH 63/93] Adds first test of dealing with pisa with a reboot in the middle --- test/pisa/e2e/test_basic_e2e.py | 41 +++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/test/pisa/e2e/test_basic_e2e.py b/test/pisa/e2e/test_basic_e2e.py index a1b0cec..b7e231e 100644 --- a/test/pisa/e2e/test_basic_e2e.py +++ b/test/pisa/e2e/test_basic_e2e.py @@ -209,3 +209,44 @@ def test_two_appointment_same_locator_different_penalty(bitcoin_cli, create_txs) assert len(appointment_info) == 1 assert appointment_info[0].get("status") == "dispute_responded" assert appointment_info[0].get("penalty_rawtx") == penalty_tx1 + + +def test_appointment_shutdown_pisa(create_txs, bitcoin_cli): + global pisad_process + + pisa_pid = pisad_process.pid + + commitment_tx, penalty_tx = create_txs + commitment_tx_id = bitcoin_cli.decoderawtransaction(commitment_tx).get("txid") + appointment_data = build_appointment_data(bitcoin_cli, commitment_tx_id, penalty_tx) + locator = compute_locator(commitment_tx_id) + + assert pisa_cli.add_appointment([json.dumps(appointment_data)]) is True + sleep(2) + + # Restart pisa + pisad_process.terminate() + pisad_process = run_pisad() + + assert pisa_pid != pisad_process.pid + + # Check that the appointment is still in the Watcher + appointment_info = get_appointment_info(locator) + + assert appointment_info is not None + assert len(appointment_info) == 1 + assert appointment_info[0].get("status") == "being_watched" + + # Trigger appointment after restart + new_addr = bitcoin_cli.getnewaddress() + broadcast_transaction_and_mine_block(bitcoin_cli, commitment_tx, new_addr) + + # The appointment should have been removed since the penalty_tx was malformed. + sleep(1) + appointment_info = get_appointment_info(locator) + + assert appointment_info is not None + assert len(appointment_info) == 1 + assert appointment_info[0].get("status") == "dispute_responded" + + pisad_process.terminate() From 084295b889aaa3adda6aa108c02faf2f2d3cc2ae Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Fri, 24 Jan 2020 14:33:04 +0100 Subject: [PATCH 64/93] Adds test for triggering appointment with pissad offline --- test/pisa/e2e/test_basic_e2e.py | 41 ++++++++++++++++++++++++++++++--- 1 file changed, 38 insertions(+), 3 deletions(-) diff --git a/test/pisa/e2e/test_basic_e2e.py b/test/pisa/e2e/test_basic_e2e.py index b7e231e..dee74b2 100644 --- a/test/pisa/e2e/test_basic_e2e.py +++ b/test/pisa/e2e/test_basic_e2e.py @@ -211,7 +211,7 @@ def test_two_appointment_same_locator_different_penalty(bitcoin_cli, create_txs) assert appointment_info[0].get("penalty_rawtx") == penalty_tx1 -def test_appointment_shutdown_pisa(create_txs, bitcoin_cli): +def test_appointment_shutdown_pisa_trigger_back_online(create_txs, bitcoin_cli): global pisad_process pisa_pid = pisad_process.pid @@ -222,7 +222,6 @@ def test_appointment_shutdown_pisa(create_txs, bitcoin_cli): locator = compute_locator(commitment_tx_id) assert pisa_cli.add_appointment([json.dumps(appointment_data)]) is True - sleep(2) # Restart pisa pisad_process.terminate() @@ -241,7 +240,43 @@ def test_appointment_shutdown_pisa(create_txs, bitcoin_cli): new_addr = bitcoin_cli.getnewaddress() broadcast_transaction_and_mine_block(bitcoin_cli, commitment_tx, new_addr) - # The appointment should have been removed since the penalty_tx was malformed. + # The appointment should have been moved to the Responder + sleep(1) + appointment_info = get_appointment_info(locator) + + assert appointment_info is not None + assert len(appointment_info) == 1 + assert appointment_info[0].get("status") == "dispute_responded" + + +def test_appointment_shutdown_pisa_trigger_while_offline(create_txs, bitcoin_cli): + global pisad_process + + pisa_pid = pisad_process.pid + + commitment_tx, penalty_tx = create_txs + commitment_tx_id = bitcoin_cli.decoderawtransaction(commitment_tx).get("txid") + appointment_data = build_appointment_data(bitcoin_cli, commitment_tx_id, penalty_tx) + locator = compute_locator(commitment_tx_id) + + assert pisa_cli.add_appointment([json.dumps(appointment_data)]) is True + + # Check that the appointment is still in the Watcher + appointment_info = get_appointment_info(locator) + assert appointment_info is not None + assert len(appointment_info) == 1 + assert appointment_info[0].get("status") == "being_watched" + + # Shutdown and trigger + pisad_process.terminate() + new_addr = bitcoin_cli.getnewaddress() + broadcast_transaction_and_mine_block(bitcoin_cli, commitment_tx, new_addr) + + # Restart + pisad_process = run_pisad() + assert pisa_pid != pisad_process.pid + + # The appointment should have been moved to the Responder sleep(1) appointment_info = get_appointment_info(locator) From caab7a8cdd255fc647616a799eaa6ef4d5760c71 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Fri, 31 Jan 2020 11:05:26 +0100 Subject: [PATCH 65/93] Adds a method to update the states of the Watcher and Responder if both have missed blocks The Watcher and Responder were not properly bootstrapped from db data if both were missing blocks to process. Since some appointments may need to pass from the Watcher to the Responder during this process, they need to be brought up to date at the same time, block after block. --- pisa/builder.py | 68 ++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 62 insertions(+), 6 deletions(-) diff --git a/pisa/builder.py b/pisa/builder.py index 072b638..39298dd 100644 --- a/pisa/builder.py +++ b/pisa/builder.py @@ -1,9 +1,3 @@ -from queue import Queue - -from pisa.responder import TransactionTracker -from common.appointment import Appointment - - class Builder: """ The :class:`Builder` class is in charge or reconstructing data loaded from the database and build the data @@ -96,3 +90,65 @@ class Builder: for block in missed_blocks: block_queue.put(block) + + @staticmethod + def update_states(watcher, missed_blocks_watcher, missed_blocks_responder): + """ + Updates the states of both the :mod:`Watcher ` and the :mod:`Responder `. + If both have pending blocks to process they need to be updates at the same time, block by block. + + If only one instance has to be updated, ``populate_block_queue`` should be used. + + Args: + watcher (:obj:`Watcher `): a ``Watcher`` instance (including a ``Responder``). + missed_blocks_watcher (:obj:`list`): the list of block missed by the ``Watcher``. + missed_blocks_responder (:obj:`list`): the list of block missed by the ``Responder``. + + Raises: + ValueError: is one of the provided list is empty. + """ + + if len(missed_blocks_responder) == 0 or len(missed_blocks_watcher) == 0: + raise ValueError( + "Both the Watcher and the Responder must have missed blocks. Use ``populate_block_queue`` otherwise." + ) + + # If the missed blocks of the Watcher and the Responder are not the same, we need to bring one up to date with + # the other. + if len(missed_blocks_responder) > len(missed_blocks_watcher): + block_diff = sorted( + set(missed_blocks_responder).difference(missed_blocks_watcher), key=missed_blocks_responder.index + ) + Builder.populate_block_queue(watcher.responder.block_queue, block_diff) + watcher.responder.awake() + watcher.responder.block_queue.join() + + elif len(missed_blocks_watcher) > len(missed_blocks_responder): + block_diff = sorted( + set(missed_blocks_watcher).difference(missed_blocks_responder), key=missed_blocks_watcher.index + ) + Builder.populate_block_queue(watcher.block_queue, block_diff) + watcher.awake() + watcher.block_queue.join() + + # Awake the actors if they are asleep and have pending work. No new inputs are provided, so if the Watcher is + # asleep it will remain asleep. However, the Responder may come and go to sleep since it will be awaken if + # appointments are passed trough from the Watcher. + if watcher.appointments and watcher.asleep: + watcher.awake() + + if watcher.responder.trackers and watcher.responder.asleep: + watcher.responder.awake() + + for block in missed_blocks_watcher: + if not watcher.asleep: + watcher.block_queue.put(block) + watcher.block_queue.join() + + if not watcher.responder.asleep: + watcher.responder.block_queue.put(block) + watcher.responder.block_queue.join() + else: + # The Responder keeps track of last know block for reorgs, so it has to be updated even if there're no + # trackers + watcher.responder.last_known_block = block From b31e24f655ea8d72765a4bbce5b7d47a84bb68ef Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Fri, 31 Jan 2020 12:44:35 +0100 Subject: [PATCH 66/93] Adds unit tests for Builder.update_states --- test/pisa/unit/test_builder.py | 145 ++++++++++++++++++++++++++++++++- 1 file changed, 144 insertions(+), 1 deletion(-) diff --git a/test/pisa/unit/test_builder.py b/test/pisa/unit/test_builder.py index 6cbf073..8fe09af 100644 --- a/test/pisa/unit/test_builder.py +++ b/test/pisa/unit/test_builder.py @@ -1,8 +1,17 @@ +import pytest from uuid import uuid4 from queue import Queue from pisa.builder import Builder -from test.pisa.unit.conftest import get_random_value_hex, generate_dummy_appointment, generate_dummy_tracker +from pisa.watcher import Watcher +from test.pisa.unit.conftest import ( + get_random_value_hex, + generate_dummy_appointment, + generate_dummy_tracker, + generate_block, + bitcoin_cli, + get_config, +) def test_build_appointments(): @@ -77,3 +86,137 @@ def test_populate_block_queue(): blocks.remove(block) assert len(blocks) == 0 + + +def test_update_states_empty_list(db_manager): + w = Watcher(db_manager=db_manager, chain_monitor=None, sk_der=None, config=None) + + missed_blocks_watcher = [] + missed_blocks_responder = [get_random_value_hex(32)] + + # Any combination of empty list must raise a ValueError + with pytest.raises(ValueError): + Builder.update_states(w, missed_blocks_watcher, missed_blocks_responder) + + with pytest.raises(ValueError): + Builder.update_states(w, missed_blocks_responder, missed_blocks_watcher) + + +def test_update_states_different_sizes(run_bitcoind, db_manager, chain_monitor): + w = Watcher(db_manager=db_manager, chain_monitor=chain_monitor, sk_der=None, config=get_config()) + chain_monitor.attach_watcher(w.responder, True) + chain_monitor.attach_responder(w.responder, True) + + # For the states to be updated data needs to be present in the actors (either appointments or trackers). + # Let's start from the Watcher. We add one appointment and mine some blocks that both are gonna miss. + w.appointments[uuid4().hex] = {"locator": get_random_value_hex(16), "end_time": 200} + + blocks = [] + for _ in range(5): + generate_block() + blocks.append(bitcoin_cli().getbestblockhash()) + + # Updating the states should bring both to the same last known block. The Watcher's is stored in the db since it has + # gone over do_watch, whereas the Responders in only updated by update state. + Builder.update_states(w, blocks, blocks[1:]) + + assert db_manager.load_last_block_hash_watcher() == blocks[-1] + assert w.responder.last_known_block == blocks[-1] + + # If both have work, both last known blocks are updated + w.sleep() + w.responder.sleep() + + w.responder.trackers[uuid4().hex] = { + "penalty_txid": get_random_value_hex(32), + "locator": get_random_value_hex(16), + "appointment_end": 200, + } + + blocks = [] + for _ in range(5): + generate_block() + blocks.append(bitcoin_cli().getbestblockhash()) + + Builder.update_states(w, blocks[1:], blocks) + assert db_manager.load_last_block_hash_watcher() == blocks[-1] + assert db_manager.load_last_block_hash_responder() == blocks[-1] + + # Let's try the opposite of the first test (Responder with data, Watcher without) + w.sleep() + w.responder.sleep() + + w.appointments = {} + last_block_prev = blocks[-1] + + blocks = [] + for _ in range(5): + generate_block() + blocks.append(bitcoin_cli().getbestblockhash()) + + # The Responder should have been brought up to date via do_watch, whereas the Watcher's last known block hash't + # change. The Watcher does not keep track of reorgs, so if he has no work to do he does not even update the last + # known block. + Builder.update_states(w, blocks[1:], blocks) + assert db_manager.load_last_block_hash_watcher() == last_block_prev + assert db_manager.load_last_block_hash_responder() == blocks[-1] + + +def test_update_states_same_sizes(db_manager, chain_monitor): + # The exact same behaviour of the last test is expected here, since different sizes are even using + # populate_block_queue and then run with the same list size. + w = Watcher(db_manager=db_manager, chain_monitor=chain_monitor, sk_der=None, config=get_config()) + chain_monitor.attach_watcher(w.responder, True) + chain_monitor.attach_responder(w.responder, True) + + # For the states to be updated data needs to be present in the actors (either appointments or trackers). + # Let's start from the Watcher. We add one appointment and mine some blocks that both are gonna miss. + w.appointments[uuid4().hex] = {"locator": get_random_value_hex(16), "end_time": 200} + + blocks = [] + for _ in range(5): + generate_block() + blocks.append(bitcoin_cli().getbestblockhash()) + + Builder.update_states(w, blocks, blocks) + + assert db_manager.load_last_block_hash_watcher() == blocks[-1] + assert w.responder.last_known_block == blocks[-1] + + # If both have work, both last known blocks are updated + w.sleep() + w.responder.sleep() + + w.responder.trackers[uuid4().hex] = { + "penalty_txid": get_random_value_hex(32), + "locator": get_random_value_hex(16), + "appointment_end": 200, + } + + blocks = [] + for _ in range(5): + generate_block() + blocks.append(bitcoin_cli().getbestblockhash()) + + Builder.update_states(w, blocks, blocks) + assert db_manager.load_last_block_hash_watcher() == blocks[-1] + assert db_manager.load_last_block_hash_responder() == blocks[-1] + + # Let's try the opposite of the first test (Responder with data, Watcher without) + w.sleep() + w.responder.sleep() + + w.appointments = {} + last_block_prev = blocks[-1] + + blocks = [] + for _ in range(5): + generate_block() + blocks.append(bitcoin_cli().getbestblockhash()) + + # The Responder should have been brought up to date via do_watch, whereas the Watcher's last known block hash't + # change. The Watcher does not keep track of reorgs, so if he has no work to do he does not even update the last + # known block. + Builder.update_states(w, blocks, blocks) + assert db_manager.load_last_block_hash_watcher() == last_block_prev + assert db_manager.load_last_block_hash_responder() == blocks[-1] From 0c4b8eaf4a1419fb6a3688e6313cc231ba075fdb Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Fri, 31 Jan 2020 12:48:11 +0100 Subject: [PATCH 67/93] Adds a dict of sent receipts to act as a cache and avoid sending the same data more than once The dict is supposed to be reset periodically so it does not grow unbounded --- pisa/carrier.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/pisa/carrier.py b/pisa/carrier.py index dec4ba6..160bd34 100644 --- a/pisa/carrier.py +++ b/pisa/carrier.py @@ -38,8 +38,16 @@ class Carrier: """ The :class:`Carrier` is the class in charge of interacting with ``bitcoind`` to send/get transactions. It uses :obj:`Receipt` objects to report about the sending outcome. + + Attributes: + issued_receipts (:obj:`dict`): a dictionary of issued receipts to prevent resending the same transaction over + and over. It should periodically be reset to prevent it from growing unbounded. + """ + def __init__(self): + self.issued_receipts = {} + # NOTCOVERED def send_transaction(self, rawtx, txid): """ @@ -53,6 +61,12 @@ class Carrier: :obj:`Receipt`: A receipt reporting whether the transaction was successfully delivered or not and why. """ + if txid in self.issued_receipts: + logger.info("Transaction already sent", txid=txid) + receipt = self.issued_receipts[txid] + + return receipt + try: logger.info("Pushing transaction to the network", txid=txid, rawtx=rawtx) bitcoin_cli().sendrawtransaction(rawtx) @@ -101,6 +115,8 @@ class Carrier: logger.error("JSONRPCException", method="Carrier.send_transaction", error=e.error) receipt = Receipt(delivered=False, reason=UNKNOWN_JSON_RPC_EXCEPTION) + self.issued_receipts[txid] = receipt + return receipt @staticmethod From 5bf8dbd9deaf528705fdc261fcb10f7bd87e21b0 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Fri, 31 Jan 2020 12:52:12 +0100 Subject: [PATCH 68/93] Improves load_watcher_appointments and adds methods for batch updating the db Dumping data into the db became a bottleneck mainly due to not using batch aggregation when dealing with huge amounts of info. --- pisa/db_manager.py | 57 +++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 54 insertions(+), 3 deletions(-) diff --git a/pisa/db_manager.py b/pisa/db_manager.py index 983a26e..9d87a56 100644 --- a/pisa/db_manager.py +++ b/pisa/db_manager.py @@ -185,9 +185,8 @@ class DBManager: triggered_appointments = self.load_all_triggered_flags() if not include_triggered: - appointments = { - uuid: appointment for uuid, appointment in appointments.items() if uuid not in triggered_appointments - } + not_triggered = list(set(appointments.keys()).difference(triggered_appointments)) + appointments = {uuid: appointments[uuid] for uuid in not_triggered} return appointments @@ -319,6 +318,19 @@ class DBManager: self.delete_entry(uuid, prefix=WATCHER_PREFIX) logger.info("Deleting appointment from Watcher's db", uuid=uuid) + def batch_delete_watcher_appointments(self, uuids): + """ + Deletes an appointment from the database. + + Args: + uuids (:obj:`list`): a list of 16-byte hex-encoded strings identifying the appointments to be deleted. + """ + + with self.db.write_batch() as b: + for uuid in uuids: + b.delete((WATCHER_PREFIX + uuid).encode("utf-8")) + logger.info("Deleting appointment from Watcher's db", uuid=uuid) + def delete_responder_tracker(self, uuid): """ Deletes a tracker from the database. @@ -330,6 +342,19 @@ class DBManager: self.delete_entry(uuid, prefix=RESPONDER_PREFIX) logger.info("Deleting appointment from Responder's db", uuid=uuid) + def batch_delete_responder_trackers(self, uuids): + """ + Deletes an appointment from the database. + + Args: + uuids (:obj:`list`): a list of 16-byte hex-encoded strings identifying the trackers to be deleted. + """ + + with self.db.write_batch() as b: + for uuid in uuids: + b.delete((RESPONDER_PREFIX + uuid).encode("utf-8")) + logger.info("Deleting appointment from Responder's db", uuid=uuid) + def load_last_block_hash_watcher(self): """ Loads the last known block hash of the :obj:`Watcher ` from the database. @@ -383,6 +408,19 @@ class DBManager: self.db.put((TRIGGERED_APPOINTMENTS_PREFIX + uuid).encode("utf-8"), "".encode("utf-8")) logger.info("Flagging appointment as triggered", uuid=uuid) + def batch_create_triggered_appointment_flag(self, uuids): + """ + Creates a flag that signals that an appointment has been triggered for every appointment in the given list + + Args: + uuids (:obj:`list`): a list of identifier for the appointments to flag. + """ + + with self.db.write_batch() as b: + for uuid in uuids: + b.put((TRIGGERED_APPOINTMENTS_PREFIX + uuid).encode("utf-8"), b"") + logger.info("Flagging appointment as triggered", uuid=uuid) + def load_all_triggered_flags(self): """ Loads all the appointment triggered flags from the database. @@ -406,3 +444,16 @@ class DBManager: self.delete_entry(uuid, prefix=TRIGGERED_APPOINTMENTS_PREFIX) logger.info("Removing triggered flag from appointment appointment", uuid=uuid) + + def batch_delete_triggered_appointment_flag(self, uuids): + """ + Deletes a list of flag signaling that some appointment have been triggered. + + Args: + uuids (:obj:`list`): the identifier of the flag to be removed. + """ + + with self.db.write_batch() as b: + for uuid in uuids: + b.delete((TRIGGERED_APPOINTMENTS_PREFIX + uuid).encode("utf-8")) + logger.info("Removing triggered flag from appointment appointment", uuid=uuid) From 4848b9a05833089db9aa5b247172a5ca54979e27 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Fri, 31 Jan 2020 12:53:38 +0100 Subject: [PATCH 69/93] Updates cleaner to use db batch updates --- pisa/cleaner.py | 70 +++++++++++++++++++++++++++++++++++-------------- 1 file changed, 51 insertions(+), 19 deletions(-) diff --git a/pisa/cleaner.py b/pisa/cleaner.py index 777834c..c5a98dd 100644 --- a/pisa/cleaner.py +++ b/pisa/cleaner.py @@ -51,7 +51,7 @@ class Cleaner: db_manager.delete_triggered_appointment_flag(uuid) @staticmethod - def update_delete_db_locator_map(uuid, locator, db_manager): + def update_delete_db_locator_map(uuids, locator, db_manager): """ Updates the locator:uuid map of a given locator from the database by removing a given uuid. If the uuid is the only element of the map, the map is deleted, otherwise the uuid is simply removed and the database is updated. @@ -59,24 +59,29 @@ class Cleaner: If either the uuid of the locator are not found, the data is not modified. Args: - uuid (:obj:`str`): the identifier to be removed from the map. + uuids (:obj:`list`): a list of identifiers to be removed from the map. locator (:obj:`str`): the identifier of the map to be either updated or deleted. db_manager (:obj:`DBManager `): a ``DBManager`` instance to interact with the database. """ locator_map = db_manager.load_locator_map(locator) + if locator_map is not None: - if uuid in locator_map: - if len(locator_map) == 1: + if set(locator_map).issuperset(uuids): + # Remove the map if all keys are requested to be deleted + if set(locator_map) == set(uuids): db_manager.delete_locator_map(locator) else: - locator_map.remove(uuid) + # Otherwise remove only the selected keys + locator_map = list(set(locator_map).difference(uuids)) db_manager.update_locator_map(locator, locator_map) + else: - logger.error("UUID not found in the db", uuid=uuid) + logger.error("Some UUIDs not found in the db", locator=locator, all_uuids=uuids) + else: - logger.error("Locator not found in the db", uuid=uuid) + logger.error("Locator map not found in the db", uuid=locator) @staticmethod def delete_expired_appointments(expired_appointments, appointments, locator_uuid_map, db_manager): @@ -94,15 +99,24 @@ class Cleaner: database. """ + locator_maps_to_update = {} + for uuid in expired_appointments: locator = appointments[uuid].get("locator") logger.info("End time reached with no breach. Deleting appointment", locator=locator, uuid=uuid) Cleaner.delete_appointment_from_memory(uuid, appointments, locator_uuid_map) - Cleaner.update_delete_db_locator_map(uuid, locator, db_manager) - # Expired appointments are not flagged, so they can be deleted without caring about the db flag. - db_manager.delete_watcher_appointment(uuid) + if locator not in locator_maps_to_update: + locator_maps_to_update[locator] = [] + + locator_maps_to_update[locator].append(uuid) + + for locator, uuids in locator_maps_to_update.items(): + Cleaner.update_delete_db_locator_map(uuids, locator, db_manager) + + # Expired appointments are not flagged, so they can be deleted without caring about the db flag. + db_manager.batch_delete_watcher_appointments(expired_appointments) @staticmethod def delete_completed_appointments(completed_appointments, appointments, locator_uuid_map, db_manager): @@ -121,6 +135,7 @@ class Cleaner: db_manager (:obj:`DBManager `): a ``DBManager`` instance to interact with the database. """ + locator_maps_to_update = {} for uuid in completed_appointments: locator = appointments[uuid].get("locator") @@ -129,9 +144,18 @@ class Cleaner: "Appointment cannot be completed, it contains invalid data. Deleting", locator=locator, uuid=uuid ) - db_manager.delete_watcher_appointment(uuid) Cleaner.delete_appointment_from_memory(uuid, appointments, locator_uuid_map) - Cleaner.update_delete_db_locator_map(uuid, locator, db_manager) + + if locator not in locator_maps_to_update: + locator_maps_to_update[locator] = [] + + locator_maps_to_update[locator].append(uuid) + + for locator, uuids in locator_maps_to_update.items(): + # Update / delete the locator map + Cleaner.update_delete_db_locator_map(uuids, locator, db_manager) + + db_manager.batch_delete_watcher_appointments(completed_appointments) @staticmethod def flag_triggered_appointments(triggered_appointments, appointments, locator_uuid_map, db_manager): @@ -164,13 +188,15 @@ class Cleaner: trackers. tx_tracker_map (:obj:`dict`): a ``penalty_txid:uuid`` map for the :obj:`Responder ` trackers. - completed_trackers (:obj:`list`): a list of completed trackers to be deleted. + completed_trackers (:obj:`dict`): a dict of completed trackers to be deleted (uuid:confirmations). height (:obj:`int`): the block height at which the trackers were completed. db_manager (:obj:`DBManager `): a ``DBManager`` instance to interact with the database. """ - for uuid, confirmations in completed_trackers: + locator_maps_to_update = {} + + for uuid, confirmations in completed_trackers.items(): logger.info( "Appointment completed. Appointment ended after reaching enough confirmations", uuid=uuid, @@ -190,10 +216,16 @@ class Cleaner: else: tx_tracker_map[penalty_txid].remove(uuid) - # Delete appointment from the db (from watchers's and responder's db) and remove flag - db_manager.delete_responder_tracker(uuid) - db_manager.delete_watcher_appointment(uuid) - db_manager.delete_triggered_appointment_flag(uuid) + if locator not in locator_maps_to_update: + locator_maps_to_update[locator] = [] + locator_maps_to_update[locator].append(uuid) + + for locator, uuids in locator_maps_to_update.items(): # Update / delete the locator map - Cleaner.update_delete_db_locator_map(uuid, locator, db_manager) + Cleaner.update_delete_db_locator_map(uuids, locator, db_manager) + + # Delete appointment from the db (from watchers's and responder's db) and remove flag + db_manager.batch_delete_responder_trackers(list(completed_trackers.keys())) + db_manager.batch_delete_watcher_appointments(list(completed_trackers.keys())) + db_manager.batch_delete_triggered_appointment_flag(list(completed_trackers.keys())) From d9ce265c00b1909eb1a2bdc54ce4cb82363f4296 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Fri, 31 Jan 2020 12:55:21 +0100 Subject: [PATCH 70/93] Updates Responder to minimize data replication operations and properly load data from db - Uses an instance of the Carrier so it can benefit from issued_receipts and avoid resending multiple copies of the same triggered appointment - Defines last_known_block to properly load data from db - Uses task_done from Queue to properly signal task completion when boostraping from db - Creates a checked_txs dict in get_completed_trackers to avoid querying bitcoind for the same transaction over and over - Redefines completed_trackers as dict instead of tuple --- pisa/responder.py | 45 ++++++++++++++++++++++++++++----------------- 1 file changed, 28 insertions(+), 17 deletions(-) diff --git a/pisa/responder.py b/pisa/responder.py index d1c2aa3..6f18d19 100644 --- a/pisa/responder.py +++ b/pisa/responder.py @@ -145,11 +145,15 @@ class Responder: self.block_queue = Queue() self.chain_monitor = chain_monitor self.db_manager = db_manager + self.carrier = Carrier() + self.last_known_block = db_manager.load_last_block_hash_responder() def awake(self): self.asleep = False self.chain_monitor.responder_asleep = False - Thread(target=self.do_watch).start() + responder_thread = Thread(target=self.do_watch, daemon=True).start() + + return responder_thread def sleep(self): self.asleep = True @@ -211,8 +215,7 @@ class Responder: if self.asleep: logger.info("Waking up") - carrier = Carrier() - receipt = carrier.send_transaction(penalty_rawtx, penalty_txid) + receipt = self.carrier.send_transaction(penalty_rawtx, penalty_txid) if receipt.delivered: self.add_tracker( @@ -286,8 +289,9 @@ class Responder: etc. """ - # ToDo: change prev_block_hash to the last known tip when bootstrapping - prev_block_hash = BlockProcessor.get_best_block_hash() + # Distinguish fresh bootstraps from bootstraps from db + if self.last_known_block is None: + self.last_known_block = BlockProcessor.get_best_block_hash() while len(self.trackers) > 0: # We get notified for every new received block @@ -301,7 +305,7 @@ class Responder: "New block received", block_hash=block_hash, prev_block_hash=block.get("previousblockhash"), txs=txs ) - if prev_block_hash == block.get("previousblockhash"): + if self.last_known_block == block.get("previousblockhash"): self.check_confirmations(txs) height = block.get("height") @@ -317,7 +321,7 @@ class Responder: else: logger.warning( "Reorg found", - local_prev_block_hash=prev_block_hash, + local_prev_block_hash=self.last_known_block, remote_prev_block_hash=block.get("previousblockhash"), ) @@ -327,7 +331,10 @@ class Responder: # Register the last processed block for the responder self.db_manager.store_last_block_hash_responder(block_hash) - prev_block_hash = block.get("hash") + self.last_known_block = block.get("hash") + + self.block_queue.task_done() + self.carrier.issued_receipts = {} # Go back to sleep if there are no more pending trackers self.sleep() @@ -387,23 +394,29 @@ class Responder: height (:obj:`int`): the height of the last received block. Returns: - :obj:`list`: a list of tuples ``uuid:confirmations`` for the completed trackers. + :obj:`dict`: a dict (``uuid:confirmations``) of the completed trackers. """ - completed_trackers = [] + completed_trackers = {} + checked_txs = {} for uuid, tracker_data in self.trackers.items(): appointment_end = tracker_data.get("appointment_end") penalty_txid = tracker_data.get("penalty_txid") if appointment_end <= height and penalty_txid not in self.unconfirmed_txs: - tx = Carrier.get_transaction(penalty_txid) + + if penalty_txid not in checked_txs: + tx = Carrier.get_transaction(penalty_txid) + else: + tx = checked_txs.get(penalty_txid) if tx is not None: confirmations = tx.get("confirmations") + checked_txs[penalty_txid] = tx if confirmations is not None and confirmations >= MIN_CONFIRMATIONS: # The end of the appointment has been reached - completed_trackers.append((uuid, confirmations)) + completed_trackers[uuid] = confirmations return completed_trackers @@ -427,7 +440,6 @@ class Responder: # ToDo: #23-define-behaviour-approaching-end receipts = [] - carrier = Carrier() for txid in txs_to_rebroadcast: self.missed_confirmations[txid] = 0 @@ -440,7 +452,7 @@ class Responder: "Transaction has missed many confirmations. Rebroadcasting", penalty_txid=tracker.penalty_txid ) - receipt = carrier.send_transaction(tracker.penalty_rawtx, tracker.penalty_txid) + receipt = self.carrier.send_transaction(tracker.penalty_rawtx, tracker.penalty_txid) receipts.append((txid, receipt)) if not receipt.delivered: @@ -459,17 +471,16 @@ class Responder: block_hash (:obj:`str`): the hash of the last block received (which triggered the reorg). """ - carrier = Carrier() for uuid in self.trackers.keys(): tracker = TransactionTracker.from_dict(self.db_manager.load_responder_tracker(uuid)) # First we check if the dispute transaction is known (exists either in mempool or blockchain) - dispute_tx = carrier.get_transaction(tracker.dispute_txid) + dispute_tx = self.carrier.get_transaction(tracker.dispute_txid) if dispute_tx is not None: # If the dispute is there, we check the penalty - penalty_tx = carrier.get_transaction(tracker.penalty_txid) + penalty_tx = self.carrier.get_transaction(tracker.penalty_txid) if penalty_tx is not None: # If the penalty exists we need to check is it's on the blockchain or not so we can update the From a8f2407df902226e0dcaa95e9443ee9a56ad4808 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Fri, 31 Jan 2020 13:00:04 +0100 Subject: [PATCH 71/93] Updates tests for Responder.check_completed_trackers --- test/pisa/unit/test_responder.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/pisa/unit/test_responder.py b/test/pisa/unit/test_responder.py index f646186..a3a9e75 100644 --- a/test/pisa/unit/test_responder.py +++ b/test/pisa/unit/test_responder.py @@ -436,7 +436,7 @@ def test_get_completed_trackers(db_manager, chain_monitor): # And now let's check completed_trackers = responder.get_completed_trackers(initial_height + 6) - completed_trackers_ids = [tracker_id for tracker_id, confirmations in completed_trackers] + completed_trackers_ids = [tracker_id for tracker_id, confirmations in completed_trackers.items()] ended_trackers_keys = list(trackers_end_conf.keys()) assert set(completed_trackers_ids) == set(ended_trackers_keys) @@ -444,7 +444,7 @@ def test_get_completed_trackers(db_manager, chain_monitor): generate_blocks(6) completed_trackers = responder.get_completed_trackers(initial_height + 12) - completed_trackers_ids = [tracker_id for tracker_id, confirmations in completed_trackers] + completed_trackers_ids = [tracker_id for tracker_id, confirmations in completed_trackers.items()] ended_trackers_keys.extend(list(trackers_no_end.keys())) assert set(completed_trackers_ids) == set(ended_trackers_keys) From 02bc88ed84a5e35b14d719cfb3024956a159f8bb Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Fri, 31 Jan 2020 13:00:55 +0100 Subject: [PATCH 72/93] Updates Watcher to use db batch updates and avoid multiple decryptions of the same data --- pisa/watcher.py | 44 ++++++++++++++++++++++++++++++-------------- 1 file changed, 30 insertions(+), 14 deletions(-) diff --git a/pisa/watcher.py b/pisa/watcher.py index 7dbc68b..dc35aec 100644 --- a/pisa/watcher.py +++ b/pisa/watcher.py @@ -79,10 +79,12 @@ class Watcher: def awake(self): self.asleep = False self.chain_monitor.watcher_asleep = False - Thread(target=self.do_watch).start() + watcher_thread = Thread(target=self.do_watch, daemon=True).start() logger.info("Waking up") + return watcher_thread + def sleep(self): self.asleep = True self.chain_monitor.watcher_asleep = True @@ -180,6 +182,9 @@ class Watcher: valid_breaches, invalid_breaches = self.filter_valid_breaches(self.get_breaches(txids)) + triggered_flags = [] + appointments_to_delete = [] + for uuid, breach in valid_breaches.items(): logger.info( "Notifying responder and deleting appointment", @@ -198,24 +203,27 @@ class Watcher: block_hash, ) - Cleaner.delete_appointment_from_memory(uuid, self.appointments, self.locator_uuid_map) - - # Appointments are only flagged as triggered if they are delivered, otherwise they are just deleted. # FIXME: This is only necessary because of the triggered appointment approach. Fix if it changes. - if receipt.delivered: - self.db_manager.create_triggered_appointment_flag(uuid) + if receipt.delivered: + Cleaner.delete_appointment_from_memory(uuid, self.appointments, self.locator_uuid_map) + triggered_flags.append(uuid) else: - self.db_manager.delete_watcher_appointment(uuid) - Cleaner.update_delete_db_locator_map(uuid, breach["locator"], self.db_manager) + appointments_to_delete.append(uuid) + + # Appointments are only flagged as triggered if they are delivered, otherwise they are just deleted. + appointments_to_delete.extend(invalid_breaches) + self.db_manager.batch_create_triggered_appointment_flag(triggered_flags) Cleaner.delete_completed_appointments( - invalid_breaches, self.appointments, self.locator_uuid_map, self.db_manager + appointments_to_delete, self.appointments, self.locator_uuid_map, self.db_manager ) # Register the last processed block for the watcher self.db_manager.store_last_block_hash_watcher(block_hash) + self.block_queue.task_done() + # Go back to sleep if there are no more appointments self.sleep() @@ -266,17 +274,25 @@ class Watcher: valid_breaches = {} invalid_breaches = [] + # A cache of the already decrypted blobs so replicate decryption can be avoided + decrypted_blobs = {} + for locator, dispute_txid in breaches.items(): for uuid in self.locator_uuid_map[locator]: appointment = Appointment.from_dict(self.db_manager.load_watcher_appointment(uuid)) - try: - penalty_rawtx = Cryptographer.decrypt(appointment.encrypted_blob, dispute_txid) + if appointment.encrypted_blob.data in decrypted_blobs: + penalty_tx, penalty_rawtx = decrypted_blobs[appointment.encrypted_blob.data] - except ValueError: - penalty_rawtx = None + else: + try: + penalty_rawtx = Cryptographer.decrypt(appointment.encrypted_blob, dispute_txid) - penalty_tx = BlockProcessor.decode_raw_transaction(penalty_rawtx) + except ValueError: + penalty_rawtx = None + + penalty_tx = BlockProcessor.decode_raw_transaction(penalty_rawtx) + decrypted_blobs[appointment.encrypted_blob.data] = (penalty_tx, penalty_rawtx) if penalty_tx is not None: valid_breaches[uuid] = { From 32ff13a495e7ca70b11fe1ece7b94a0ae9425f51 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Fri, 31 Jan 2020 13:02:32 +0100 Subject: [PATCH 73/93] Fixes loading data from disk when both Watcher and Responder need to be brough up to date The previous approach was not correct, since both actors need to be brought up to date at the same time. --- pisa/pisad.py | 60 +++++++++++++++++++++++++++++++-------------------- 1 file changed, 37 insertions(+), 23 deletions(-) diff --git a/pisa/pisad.py b/pisa/pisad.py index 0eca57b..0d361f3 100644 --- a/pisa/pisad.py +++ b/pisa/pisad.py @@ -60,10 +60,6 @@ def main(): if len(watcher_appointments_data) == 0 and len(responder_trackers_data) == 0: logger.info("Fresh bootstrap") - # Set the current tip as the last known block for both on a fresh start - db_manager.store_last_block_hash_watcher(BlockProcessor.get_best_block_hash()) - db_manager.store_last_block_hash_responder(BlockProcessor.get_best_block_hash()) - else: logger.info("Bootstrapping from backed up data") block_processor = BlockProcessor() @@ -72,40 +68,58 @@ def main(): last_block_responder = db_manager.load_last_block_hash_responder() # FIXME: 32-reorgs-offline dropped txs are not used at this point. - missed_blocks_responder = None + # Get the blocks missed by both the Watcher and the Responder. If the blocks of both match we don't + # perform the search twice. + last_common_ancestor_watcher, dropped_txs_watcher = block_processor.find_last_common_ancestor( + last_block_watcher + ) + missed_blocks_watcher = block_processor.get_missed_blocks(last_common_ancestor_watcher) - # Build Responder with backed up data if found - if len(responder_trackers_data) != 0: + if last_block_watcher == last_block_responder: + dropped_txs_responder = dropped_txs_watcher + missed_blocks_responder = missed_blocks_watcher + + else: last_common_ancestor_responder, dropped_txs_responder = block_processor.find_last_common_ancestor( last_block_responder ) missed_blocks_responder = block_processor.get_missed_blocks(last_common_ancestor_responder) - watcher.responder.trackers, watcher.responder.tx_tracker_map = Builder.build_trackers( - responder_trackers_data - ) - Builder.populate_block_queue(watcher.responder.block_queue, missed_blocks_responder) - watcher.responder.awake() - - # Build Watcher. If the blocks of both match we don't perform the search twice. + # Build and update the Watcher. if len(watcher_appointments_data) != 0: - if last_block_watcher == last_block_responder and missed_blocks_responder is not None: - missed_blocks_watcher = missed_blocks_responder - else: - last_common_ancestor_watcher, dropped_txs_watcher = block_processor.find_last_common_ancestor( - last_block_watcher - ) - missed_blocks_watcher = block_processor.get_missed_blocks(last_common_ancestor_watcher) - watcher.appointments, watcher.locator_uuid_map = Builder.build_appointments( watcher_appointments_data ) + + # Build Responder with backed up data if found + if len(responder_trackers_data) != 0: + watcher.responder.trackers, watcher.responder.tx_tracker_map = Builder.build_trackers( + responder_trackers_data + ) + + # If only one of the instances needs to be updated, it can be done separately. + if len(missed_blocks_watcher) == 0 and len(missed_blocks_responder) != 0: + Builder.populate_block_queue(watcher.responder.block_queue, missed_blocks_responder) + watcher.responder.awake() + watcher.responder.block_queue.join() + + elif len(missed_blocks_responder) == 0 and len(missed_blocks_watcher) != 0: Builder.populate_block_queue(watcher.block_queue, missed_blocks_watcher) watcher.awake() + watcher.block_queue.join() + + # Otherwise the need to be updated at the same time, block by block + elif len(missed_blocks_responder) != 0 and len(missed_blocks_watcher) != 0: + Builder.update_states(watcher, missed_blocks_watcher, missed_blocks_responder) + + # Awake the Watcher/Responder if they ended up with pending work + if watcher.appointments and watcher.asleep: + watcher.awake() + if watcher.responder.trackers and watcher.responder.asleep: + watcher.responder.awake() # Fire the API API(watcher, config=config).start() - except Exception as e: logger.error("An error occurred: {}. Shutting down".format(e)) exit(1) From 1a7464f31a60df6e970b21a601b91acc2368e803 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Fri, 31 Jan 2020 13:04:23 +0100 Subject: [PATCH 74/93] Changes db_manager fixture from session to module --- test/pisa/unit/conftest.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/pisa/unit/conftest.py b/test/pisa/unit/conftest.py index 3e373e1..0995539 100644 --- a/test/pisa/unit/conftest.py +++ b/test/pisa/unit/conftest.py @@ -42,9 +42,10 @@ def prng_seed(): random.seed(0) -@pytest.fixture(scope="session") +@pytest.fixture(scope="module") def db_manager(): manager = DBManager("test_db") + # Add last know block for the Responder in the db yield manager manager.db.close() From 1f88f70c9a37d3e10610af919980fe1981e4fef8 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Fri, 31 Jan 2020 13:05:49 +0100 Subject: [PATCH 75/93] Updates test_carrier with issued_receipts The functionality expected in the test required the issued_receipts dict to be cleare, what is normally performed by the Responder --- test/pisa/unit/test_carrier.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/pisa/unit/test_carrier.py b/test/pisa/unit/test_carrier.py index fe9540a..6cdfdae 100644 --- a/test/pisa/unit/test_carrier.py +++ b/test/pisa/unit/test_carrier.py @@ -35,8 +35,9 @@ def test_send_double_spending_transaction(carrier): receipt = carrier.send_transaction(tx.hex(), txid) sent_txs.append(txid) - # Wait for a block to be mined + # Wait for a block to be mined. Issued receipts is reset from the Responder every block, so we should do it too. generate_blocks(2) + carrier.issued_receipts = {} # Try to send it again receipt2 = carrier.send_transaction(tx.hex(), txid) From 451311b765a4e897c934f9d66f4b5242a68fa514 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Fri, 31 Jan 2020 13:08:55 +0100 Subject: [PATCH 76/93] Updates ChainMonitor unit tests to ititialize the db_manager The Responder needs a DBManager instance to pull the last know block, so None is not valid anymore for this tests --- test/pisa/unit/test_chain_monitor.py | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/test/pisa/unit/test_chain_monitor.py b/test/pisa/unit/test_chain_monitor.py index 3c2bee6..56c2b31 100644 --- a/test/pisa/unit/test_chain_monitor.py +++ b/test/pisa/unit/test_chain_monitor.py @@ -29,8 +29,8 @@ def test_init(run_bitcoind): assert chain_monitor.watcher_asleep and chain_monitor.responder_asleep -def test_attach_watcher(chain_monitor): - watcher = Watcher(db_manager=None, chain_monitor=chain_monitor, sk_der=None, config=get_config()) +def test_attach_watcher(chain_monitor, db_manager): + watcher = Watcher(db_manager=db_manager, chain_monitor=chain_monitor, sk_der=None, config=get_config()) chain_monitor.attach_watcher(watcher.block_queue, watcher.asleep) # booleans are not passed as reference in Python, so the flags need to be set separately @@ -44,8 +44,8 @@ def test_attach_watcher(chain_monitor): assert watcher.block_queue.get() == r_hash -def test_attach_responder(chain_monitor): - responder = Responder(db_manager=None, chain_monitor=chain_monitor) +def test_attach_responder(chain_monitor, db_manager): + responder = Responder(db_manager=db_manager, chain_monitor=chain_monitor) chain_monitor.attach_responder(responder.block_queue, responder.asleep) # Same kind of testing as with the attach watcher @@ -103,12 +103,12 @@ def test_update_state(chain_monitor): assert chain_monitor.best_tip == another_block_hash and new_block_hash == chain_monitor.last_tips[-1] -def test_monitor_chain_polling(): +def test_monitor_chain_polling(db_manager): # Try polling with the Watcher chain_monitor = ChainMonitor() chain_monitor.best_tip = BlockProcessor.get_best_block_hash() - watcher = Watcher(db_manager=None, chain_monitor=chain_monitor, sk_der=None, config=get_config()) + watcher = Watcher(db_manager=db_manager, chain_monitor=chain_monitor, sk_der=None, config=get_config()) chain_monitor.attach_watcher(watcher.block_queue, asleep=False) # monitor_chain_polling runs until terminate if set @@ -130,12 +130,12 @@ def test_monitor_chain_polling(): polling_thread.join() -def test_monitor_chain_zmq(): +def test_monitor_chain_zmq(db_manager): # Try zmq with the Responder chain_monitor = ChainMonitor() chain_monitor.best_tip = BlockProcessor.get_best_block_hash() - responder = Responder(db_manager=None, chain_monitor=chain_monitor) + responder = Responder(db_manager=db_manager, chain_monitor=chain_monitor) chain_monitor.attach_responder(responder.block_queue, asleep=False) zmq_thread = Thread(target=chain_monitor.monitor_chain_zmq, daemon=True) @@ -164,12 +164,12 @@ def test_monitor_chain_zmq(): zmq_thread.join() -def test_monitor_chain(): +def test_monitor_chain(db_manager): # Not much to test here, this should launch two threads (one per monitor approach) and finish on terminate chain_monitor = ChainMonitor() - watcher = Watcher(db_manager=None, chain_monitor=chain_monitor, sk_der=None, config=get_config()) - responder = Responder(db_manager=None, chain_monitor=chain_monitor) + watcher = Watcher(db_manager=db_manager, chain_monitor=chain_monitor, sk_der=None, config=get_config()) + responder = Responder(db_manager=db_manager, chain_monitor=chain_monitor) chain_monitor.attach_responder(responder.block_queue, asleep=False) chain_monitor.attach_watcher(watcher.block_queue, asleep=False) @@ -194,12 +194,12 @@ def test_monitor_chain(): generate_block() -def test_monitor_chain_single_update(): +def test_monitor_chain_single_update(db_manager): # This test tests that if both threads try to add the same block to the queue, only the first one will make it chain_monitor = ChainMonitor() - watcher = Watcher(db_manager=None, chain_monitor=chain_monitor, sk_der=None, config=get_config()) - responder = Responder(db_manager=None, chain_monitor=chain_monitor) + watcher = Watcher(db_manager=db_manager, chain_monitor=chain_monitor, sk_der=None, config=get_config()) + responder = Responder(db_manager=db_manager, chain_monitor=chain_monitor) chain_monitor.attach_responder(responder.block_queue, asleep=False) chain_monitor.attach_watcher(watcher.block_queue, asleep=False) From c538ebadd0baa4f28116084f342cc7d29a974597 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Fri, 31 Jan 2020 13:10:06 +0100 Subject: [PATCH 77/93] Updates Clear unit tests to use dicts for completed_trackers instead of tuples --- test/pisa/unit/test_cleaner.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/pisa/unit/test_cleaner.py b/test/pisa/unit/test_cleaner.py index fb5db76..1c59111 100644 --- a/test/pisa/unit/test_cleaner.py +++ b/test/pisa/unit/test_cleaner.py @@ -104,7 +104,7 @@ def test_update_delete_db_locator_map(db_manager): for uuid, appointment in appointments.items(): locator = appointment.get("locator") locator_map_before = db_manager.load_locator_map(locator) - Cleaner.update_delete_db_locator_map(uuid, locator, db_manager) + Cleaner.update_delete_db_locator_map([uuid], locator, db_manager) locator_map_after = db_manager.load_locator_map(locator) if locator_map_after is None: @@ -163,7 +163,7 @@ def test_delete_completed_trackers_db_match(db_manager): trackers, tx_tracker_map = set_up_trackers(db_manager, MAX_ITEMS) selected_trackers = random.sample(list(trackers.keys()), k=ITEMS) - completed_trackers = [(tracker, 6) for tracker in selected_trackers] + completed_trackers = {tracker: 6 for tracker in selected_trackers} Cleaner.delete_completed_trackers(completed_trackers, height, trackers, tx_tracker_map, db_manager) @@ -200,7 +200,7 @@ def test_delete_completed_trackers_no_db_match(db_manager): tx_tracker_map[penalty_txid] = [uuid] selected_trackers.append(uuid) - completed_trackers = [(tracker, 6) for tracker in selected_trackers] + completed_trackers = {tracker: 6 for tracker in selected_trackers} # We should be able to delete the correct ones and not fail in the others Cleaner.delete_completed_trackers(completed_trackers, height, trackers, tx_tracker_map, db_manager) From 4f000298fae662f8c0bdec7735ed407d6341f573 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Fri, 31 Jan 2020 13:11:59 +0100 Subject: [PATCH 78/93] Removes zmq_subscriber The ZMQSubscriber was merged into the ChainMonitor but never deleted --- pisa/utils/zmq_subscriber.py | 36 ------------------------------------ 1 file changed, 36 deletions(-) delete mode 100644 pisa/utils/zmq_subscriber.py diff --git a/pisa/utils/zmq_subscriber.py b/pisa/utils/zmq_subscriber.py deleted file mode 100644 index 5bf29f5..0000000 --- a/pisa/utils/zmq_subscriber.py +++ /dev/null @@ -1,36 +0,0 @@ -import zmq -import binascii -from common.logger import Logger -from pisa.conf import FEED_PROTOCOL, FEED_ADDR, FEED_PORT - - -# ToDo: #7-add-async-back-to-zmq -class ZMQSubscriber: - """ Adapted from https://github.com/bitcoin/bitcoin/blob/master/contrib/zmq/zmq_sub.py""" - - def __init__(self, config, parent): - self.zmqContext = zmq.Context() - self.zmqSubSocket = self.zmqContext.socket(zmq.SUB) - self.zmqSubSocket.setsockopt(zmq.RCVHWM, 0) - self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashblock") - self.zmqSubSocket.connect( - "%s://%s:%s" % (config.get("FEED_PROTOCOL"), config.get("FEED_ADDR"), config.get("FEED_PORT")) - ) - self.logger = Logger("ZMQSubscriber-{}".format(parent)) - - self.terminate = False - - def handle(self, block_queue): - while not self.terminate: - msg = self.zmqSubSocket.recv_multipart() - - # Terminate could have been set wile the thread was blocked in recv - if not self.terminate: - topic = msg[0] - body = msg[1] - - if topic == b"hashblock": - block_hash = binascii.hexlify(body).decode("utf-8") - block_queue.put(block_hash) - - self.logger.info("New block received via ZMQ", block_hash=block_hash) From 6c957b067d1e6fbd85cb0f7e5fa46e753f8738c4 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Fri, 31 Jan 2020 13:57:30 +0100 Subject: [PATCH 79/93] Adds batch update unit tests for DBManager --- test/pisa/unit/test_db_manager.py | 79 +++++++++++++++++++++++++++++++ 1 file changed, 79 insertions(+) diff --git a/test/pisa/unit/test_db_manager.py b/test/pisa/unit/test_db_manager.py index 8c7d0a0..75a01ff 100644 --- a/test/pisa/unit/test_db_manager.py +++ b/test/pisa/unit/test_db_manager.py @@ -303,6 +303,29 @@ def test_delete_watcher_appointment(db_manager, watcher_appointments): assert len(db_watcher_appointments) == 0 +def test_batch_delete_watcher_appointments(db_manager, watcher_appointments): + # Let's start by adding a bunch of appointments + for uuid, appointment in watcher_appointments.items(): + db_manager.store_watcher_appointment(uuid, appointment.to_json()) + + first_half = list(watcher_appointments.keys())[: len(watcher_appointments) // 2] + second_half = list(watcher_appointments.keys())[len(watcher_appointments) // 2 :] + + # Let's now delete half of them in a batch update + db_manager.batch_delete_watcher_appointments(first_half) + + db_watcher_appointments = db_manager.load_watcher_appointments() + assert not set(db_watcher_appointments.keys()).issuperset(first_half) + assert set(db_watcher_appointments.keys()).issuperset(second_half) + + # Let's delete the rest + db_manager.batch_delete_watcher_appointments(second_half) + + # Now there should be no appointments left + db_watcher_appointments = db_manager.load_watcher_appointments() + assert not db_watcher_appointments + + def test_delete_responder_tracker(db_manager, responder_trackers): # Same for the responder db_responder_trackers = db_manager.load_responder_trackers() @@ -315,6 +338,29 @@ def test_delete_responder_tracker(db_manager, responder_trackers): assert len(db_responder_trackers) == 0 +def test_batch_delete_responder_trackers(db_manager, responder_trackers): + # Let's start by adding a bunch of appointments + for uuid, value in responder_trackers.items(): + db_manager.store_responder_tracker(uuid, json.dumps({"value": value})) + + first_half = list(responder_trackers.keys())[: len(responder_trackers) // 2] + second_half = list(responder_trackers.keys())[len(responder_trackers) // 2 :] + + # Let's now delete half of them in a batch update + db_manager.batch_delete_responder_trackers(first_half) + + db_responder_trackers = db_manager.load_responder_trackers() + assert not set(db_responder_trackers.keys()).issuperset(first_half) + assert set(db_responder_trackers.keys()).issuperset(second_half) + + # Let's delete the rest + db_manager.batch_delete_responder_trackers(second_half) + + # Now there should be no trackers left + db_responder_trackers = db_manager.load_responder_trackers() + assert not db_responder_trackers + + def test_store_load_last_block_hash_watcher(db_manager): # Let's first create a made up block hash local_last_block_hash = get_random_value_hex(32) @@ -347,6 +393,20 @@ def test_create_triggered_appointment_flag(db_manager): assert db_manager.db.get((TRIGGERED_APPOINTMENTS_PREFIX + key).encode("utf-8")) is None +def test_batch_create_triggered_appointment_flag(db_manager): + # Test that flags are added in batch + keys = [get_random_value_hex(16) for _ in range(10)] + + # Checked that non of the flags is already in the db + db_flags = db_manager.load_all_triggered_flags() + assert not set(db_flags).issuperset(keys) + + # Make sure that they are now + db_manager.batch_create_triggered_appointment_flag(keys) + db_flags = db_manager.load_all_triggered_flags() + assert set(db_flags).issuperset(keys) + + def test_load_all_triggered_flags(db_manager): # There should be a some flags in the db from the previous tests. Let's load them flags = db_manager.load_all_triggered_flags() @@ -370,3 +430,22 @@ def test_delete_triggered_appointment_flag(db_manager): # Try to load them back for k in keys: assert db_manager.db.get((TRIGGERED_APPOINTMENTS_PREFIX + k).encode("utf-8")) is None + + +def test_batch_delete_triggered_appointment_flag(db_manager): + # Let's add some flags first + keys = [get_random_value_hex(16) for _ in range(10)] + db_manager.batch_create_triggered_appointment_flag(keys) + + # And now let's delete in batch + first_half = keys[: len(keys) // 2] + second_half = keys[len(keys) // 2 :] + + db_manager.batch_delete_triggered_appointment_flag(first_half) + db_falgs = db_manager.load_all_triggered_flags() + assert not set(db_falgs).issuperset(first_half) + assert set(db_falgs).issuperset(second_half) + + # Delete the rest + db_manager.batch_delete_triggered_appointment_flag(second_half) + assert not db_manager.load_all_triggered_flags() From aa12fa2cf88c34c839d6409ded538425b73f37cb Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Mon, 10 Feb 2020 16:19:22 +0100 Subject: [PATCH 80/93] Removes sleep flag from Watcher and Responder The sleep flag was used to avoid doing useless work when no data was hold by the tower. However, from the implementation of the data persistence on, the Watcher and Responder should at least keep track of the last known block. The current apporach was making this harder. --- pisa/builder.py | 26 ++++-------------- pisa/chain_monitor.py | 56 +++++++++----------------------------- pisa/responder.py | 62 ++++++++++++------------------------------- pisa/watcher.py | 57 +++++++++++---------------------------- 4 files changed, 50 insertions(+), 151 deletions(-) diff --git a/pisa/builder.py b/pisa/builder.py index 39298dd..e9728cc 100644 --- a/pisa/builder.py +++ b/pisa/builder.py @@ -120,7 +120,6 @@ class Builder: set(missed_blocks_responder).difference(missed_blocks_watcher), key=missed_blocks_responder.index ) Builder.populate_block_queue(watcher.responder.block_queue, block_diff) - watcher.responder.awake() watcher.responder.block_queue.join() elif len(missed_blocks_watcher) > len(missed_blocks_responder): @@ -128,27 +127,12 @@ class Builder: set(missed_blocks_watcher).difference(missed_blocks_responder), key=missed_blocks_watcher.index ) Builder.populate_block_queue(watcher.block_queue, block_diff) - watcher.awake() watcher.block_queue.join() - # Awake the actors if they are asleep and have pending work. No new inputs are provided, so if the Watcher is - # asleep it will remain asleep. However, the Responder may come and go to sleep since it will be awaken if - # appointments are passed trough from the Watcher. - if watcher.appointments and watcher.asleep: - watcher.awake() - - if watcher.responder.trackers and watcher.responder.asleep: - watcher.responder.awake() - + # Once they are at the same height, we update them one by one for block in missed_blocks_watcher: - if not watcher.asleep: - watcher.block_queue.put(block) - watcher.block_queue.join() + watcher.block_queue.put(block) + watcher.block_queue.join() - if not watcher.responder.asleep: - watcher.responder.block_queue.put(block) - watcher.responder.block_queue.join() - else: - # The Responder keeps track of last know block for reorgs, so it has to be updated even if there're no - # trackers - watcher.responder.last_known_block = block + watcher.responder.block_queue.put(block) + watcher.responder.block_queue.join() diff --git a/pisa/chain_monitor.py b/pisa/chain_monitor.py index 22ef377..0dad221 100644 --- a/pisa/chain_monitor.py +++ b/pisa/chain_monitor.py @@ -19,6 +19,10 @@ class ChainMonitor: The :class:`ChainMonitor` monitors the chain using two methods: ``zmq`` and ``polling``. Blocks are only notified once per queue and the notification is triggered by the method that detects the block faster. + Args: + watcher_queue (:obj:`Queue`): the queue to be used to send blocks hashes to the ``Watcher``. + responder_queue (:obj:`Queue`): the queue to be used to send blocks hashes to the ``Responder``. + Attributes: best_tip (:obj:`str`): a block hash representing the current best tip. last_tips (:obj:`list`): a list of last chain tips. Used as a sliding window to avoid notifying about old tips. @@ -30,11 +34,9 @@ class ChainMonitor: watcher_queue (:obj:`Queue`): a queue to send new best tips to the :obj:`Watcher `. responder_queue (:obj:`Queue`): a queue to send new best tips to the :obj:`Responder `. - watcher_asleep (:obj:`bool`): a flag that signals whether to send information to the ``Watcher`` or not. - responder_asleep (:obj:`bool`): a flag that signals whether to send information to the ``Responder`` or not. """ - def __init__(self): + def __init__(self, watcher_queue, responder_queue): self.best_tip = None self.last_tips = [] self.terminate = False @@ -48,53 +50,21 @@ class ChainMonitor: self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashblock") self.zmqSubSocket.connect("%s://%s:%s" % (FEED_PROTOCOL, FEED_ADDR, FEED_PORT)) - self.watcher_queue = None - self.responder_queue = None - self.watcher_asleep = True - self.responder_asleep = True - - def attach_watcher(self, queue, asleep): - """ - Attaches a :obj:`Watcher ` to the :class:`ChainMonitor`. The ``Watcher`` and the - ``ChainMonitor`` are connected via the ``watcher_queue`` and the ``watcher_asleep`` flag. - - Args: - queue (:obj:`Queue`): the queue to be used to send blocks hashes to the ``Watcher``. - asleep( :obj:`bool`): whether the ``Watcher`` is initially awake or asleep. It is changed on the fly from - the ``Watcher`` when the state changes. - """ - - self.watcher_queue = queue - self.watcher_asleep = asleep - - def attach_responder(self, queue, asleep): - """ - Attaches a :obj:`Responder ` to the :class:`ChainMonitor`. The ``Responder`` and the - ``ChainMonitor`` are connected via the ``responder_queue`` and the ``responder_asleep`` flag. - - Args: - queue (:obj:`Queue`): the queue to be used to send blocks hashes to the ``Responder``. - asleep( :obj:`bool`): whether the ``Responder`` is initially awake or asleep. It is changed on the fly from - the ``Responder`` when the state changes. - """ - - self.responder_queue = queue - self.responder_asleep = asleep + self.watcher_queue = watcher_queue + self.responder_queue = responder_queue def notify_subscribers(self, block_hash): """ - Notifies the subscribers (``Watcher`` and ``Responder``) about a new block provided they are awake. It does so - by putting the hash in the corresponding queue(s). + Notifies the subscribers (``Watcher`` and ``Responder``) about a new block. It does so by putting the hash in + the corresponding queue(s). Args: - block_hash (:obj:`str`): the new block hash to be sent to the awake subscribers. + block_hash (:obj:`str`): the new block hash to be sent to the subscribers. + block_hash (:obj:`str`): the new block hash to be sent to the subscribers. """ - if not self.watcher_asleep: - self.watcher_queue.put(block_hash) - - if not self.responder_asleep: - self.responder_queue.put(block_hash) + self.watcher_queue.put(block_hash) + self.responder_queue.put(block_hash) def update_state(self, block_hash, max_block_window_size=BLOCK_WINDOW_SIZE): """ diff --git a/pisa/responder.py b/pisa/responder.py index 6f18d19..4ae73ab 100644 --- a/pisa/responder.py +++ b/pisa/responder.py @@ -108,11 +108,6 @@ class Responder: the decrypted ``penalty_txs`` handed by the :obj:`Watcher ` and ensuring the they make it to the blockchain. - The :class:`Responder` can be in two states: - - - Asleep (``self.asleep = True)`` when there are no trackers to take care of (``self.trackers`` is empty). - - Awake (``self.asleep = False)`` when there are trackers to take care of (actively monitoring the blockchain). - Args: db_manager (:obj:`DBManager `): a ``DBManager`` instance to interact with the database. @@ -126,41 +121,29 @@ class Responder: unconfirmed_txs (:obj:`list`): A list that keeps track of all unconfirmed ``penalty_txs``. missed_confirmations (:obj:`dict`): A dictionary that keeps count of how many confirmations each ``penalty_tx`` has missed. Used to trigger rebroadcast if needed. - asleep (:obj:`bool`): A flag that signals whether the :obj:`Responder` is asleep or awake. block_queue (:obj:`Queue`): A queue used by the :obj:`Responder` to receive block hashes from ``bitcoind``. It - is populated by the :obj:`ChainMonitor `. - chain_monitor (:obj:`ChainMonitor `): a ``ChainMonitor`` instance used to track - new blocks received by ``bitcoind``. + is populated by the :obj:`ChainMonitor `. db_manager (:obj:`DBManager `): A ``DBManager`` instance to interact with the database. """ - def __init__(self, db_manager, chain_monitor): + def __init__(self, db_manager): self.trackers = dict() self.tx_tracker_map = dict() self.unconfirmed_txs = [] self.missed_confirmations = dict() - self.asleep = True self.block_queue = Queue() - self.chain_monitor = chain_monitor self.db_manager = db_manager self.carrier = Carrier() self.last_known_block = db_manager.load_last_block_hash_responder() def awake(self): - self.asleep = False - self.chain_monitor.responder_asleep = False - responder_thread = Thread(target=self.do_watch, daemon=True).start() + responder_thread = Thread(target=self.do_watch, daemon=True) + responder_thread.start() return responder_thread - def sleep(self): - self.asleep = True - self.chain_monitor.responder_asleep = True - - logger.info("No more pending trackers, going back to sleep") - @staticmethod def on_sync(block_hash): """ @@ -212,9 +195,6 @@ class Responder: into the blockchain. """ - if self.asleep: - logger.info("Waking up") - receipt = self.carrier.send_transaction(penalty_rawtx, penalty_txid) if receipt.delivered: @@ -239,8 +219,6 @@ class Responder: ``penalty_txid`` added to ``unconfirmed_txs`` if ``confirmations=0``. Finally, all the data is stored in the database. - ``add_tracker`` awakes the :obj:`Responder` if it is asleep. - Args: uuid (:obj:`str`): a unique identifier for the appointment. locator (:obj:`str`): the appointment locator provided by the user (16-byte hex-encoded). @@ -278,9 +256,6 @@ class Responder: "New tracker added", dispute_txid=dispute_txid, penalty_txid=penalty_txid, appointment_end=appointment_end ) - if self.asleep: - self.awake() - def do_watch(self): """ Monitors the blockchain whilst there are pending trackers. @@ -293,20 +268,17 @@ class Responder: if self.last_known_block is None: self.last_known_block = BlockProcessor.get_best_block_hash() - while len(self.trackers) > 0: - # We get notified for every new received block + while True: block_hash = self.block_queue.get() block = BlockProcessor.get_block(block_hash) + logger.info("New block received", block_hash=block_hash, prev_block_hash=block.get("previousblockhash")) - if block is not None: - txs = block.get("tx") - - logger.info( - "New block received", block_hash=block_hash, prev_block_hash=block.get("previousblockhash"), txs=txs - ) + if len(self.trackers) > 0 and block is not None: + txids = block.get("tx") + logger.info("List of transactions", txids=txids) if self.last_known_block == block.get("previousblockhash"): - self.check_confirmations(txs) + self.check_confirmations(txids) height = block.get("height") completed_trackers = self.get_completed_trackers(height) @@ -328,16 +300,16 @@ class Responder: # ToDo: #24-properly-handle-reorgs self.handle_reorgs(block_hash) - # Register the last processed block for the responder - self.db_manager.store_last_block_hash_responder(block_hash) + # Clear the receipts issued in this block + self.carrier.issued_receipts = {} - self.last_known_block = block.get("hash") + if len(self.trackers) is 0: + logger.info("No more pending trackers") + # Register the last processed block for the responder + self.db_manager.store_last_block_hash_responder(block_hash) + self.last_known_block = block.get("hash") self.block_queue.task_done() - self.carrier.issued_receipts = {} - - # Go back to sleep if there are no more pending trackers - self.sleep() def check_confirmations(self, txs): """ diff --git a/pisa/watcher.py b/pisa/watcher.py index dc35aec..5b1860c 100644 --- a/pisa/watcher.py +++ b/pisa/watcher.py @@ -10,7 +10,6 @@ from common.logger import Logger from pisa import LOG_PREFIX from pisa.cleaner import Cleaner -from pisa.responder import Responder from pisa.block_processor import BlockProcessor logger = Logger(actor="Watcher", log_name_prefix=LOG_PREFIX) @@ -33,13 +32,10 @@ class Watcher: Args: db_manager (:obj:`DBManager `): a ``DBManager`` instance to interact with the database. - chain_monitor (:obj:`ChainMonitor `): a ``ChainMonitor`` instance used to track - new blocks received by ``bitcoind``. sk_der (:obj:`bytes`): a DER encoded private key used to sign appointment receipts (signaling acceptance). config (:obj:`dict`): a dictionary containing all the configuration parameters. Used locally to retrieve ``MAX_APPOINTMENTS`` and ``EXPIRY_DELTA``. - responder (:obj:`Responder `): a ``Responder`` instance. If ``None`` is passed, a new - instance is created. Populated instances are useful when bootstrapping the system from backed-up data. + responder (:obj:`Responder `): a ``Responder`` instance. Attributes: @@ -48,11 +44,8 @@ class Watcher: It's populated trough ``add_appointment``. locator_uuid_map (:obj:`dict`): a ``locator:uuid`` map used to allow the :obj:`Watcher` to deal with several appointments with the same ``locator``. - asleep (:obj:`bool`): A flag that signals whether the :obj:`Watcher` is asleep or awake. block_queue (:obj:`Queue`): A queue used by the :obj:`Watcher` to receive block hashes from ``bitcoind``. It is populated by the :obj:`ChainMonitor `. - chain_monitor (:obj:`ChainMonitor `): a ``ChainMonitor`` instance used to track - new blocks received by ``bitcoind``. config (:obj:`dict`): a dictionary containing all the configuration parameters. Used locally to retrieve ``MAX_APPOINTMENTS`` and ``EXPIRY_DELTA``. db_manager (:obj:`DBManager `): A db manager instance to interact with the database. @@ -63,41 +56,27 @@ class Watcher: """ - def __init__(self, db_manager, chain_monitor, sk_der, config, responder=None): + def __init__(self, db_manager, responder, sk_der, config): self.appointments = dict() self.locator_uuid_map = dict() - self.asleep = True self.block_queue = Queue() - self.chain_monitor = chain_monitor self.config = config self.db_manager = db_manager + self.responder = responder self.signing_key = Cryptographer.load_private_key_der(sk_der) - if not isinstance(responder, Responder): - self.responder = Responder(db_manager, chain_monitor) - def awake(self): - self.asleep = False - self.chain_monitor.watcher_asleep = False - watcher_thread = Thread(target=self.do_watch, daemon=True).start() - - logger.info("Waking up") + watcher_thread = Thread(target=self.do_watch, daemon=True) + watcher_thread.start() return watcher_thread - def sleep(self): - self.asleep = True - self.chain_monitor.watcher_asleep = True - - logger.info("No more pending appointments, going back to sleep") - def add_appointment(self, appointment): """ Adds a new appointment to the ``appointments`` dictionary if ``max_appointments`` has not been reached. - ``add_appointment`` is the entry point of the Watcher. Upon receiving a new appointment, if the :obj:`Watcher` - is asleep, it will be awaken and start monitoring the blockchain (``do_watch``) until ``appointments`` is empty. - It will go back to sleep once there are no more pending appointments. + ``add_appointment`` is the entry point of the Watcher. Upon receiving a new appointment it will start monitoring + the blockchain (``do_watch``) until ``appointments`` is empty. Once a breach is seen on the blockchain, the :obj:`Watcher` will decrypt the corresponding :obj:`EncryptedBlob ` and pass the information to the @@ -132,9 +111,6 @@ class Watcher: else: self.locator_uuid_map[appointment.locator] = [uuid] - if self.asleep: - self.awake() - self.db_manager.store_watcher_appointment(uuid, appointment.to_json()) self.db_manager.create_append_locator_map(appointment.locator, uuid) @@ -159,15 +135,13 @@ class Watcher: :obj:`Responder ` upon detecting a breach. """ - while len(self.appointments) > 0: + while True: block_hash = self.block_queue.get() - logger.info("New block received", block_hash=block_hash) - block = BlockProcessor.get_block(block_hash) + logger.info("New block received", block_hash=block_hash, prev_block_hash=block.get("previousblockhash")) - if block is not None: + if len(self.appointments) > 0 and block is not None: txids = block.get("tx") - logger.info("List of transactions", txids=txids) expired_appointments = [ @@ -203,7 +177,7 @@ class Watcher: block_hash, ) - # FIXME: This is only necessary because of the triggered appointment approach. Fix if it changes. + # FIXME: Only necessary because of the triggered appointment approach. Fix if it changes. if receipt.delivered: Cleaner.delete_appointment_from_memory(uuid, self.appointments, self.locator_uuid_map) @@ -219,14 +193,13 @@ class Watcher: appointments_to_delete, self.appointments, self.locator_uuid_map, self.db_manager ) - # Register the last processed block for the watcher - self.db_manager.store_last_block_hash_watcher(block_hash) + if len(self.appointments) is 0: + logger.info("No more pending appointments") + # Register the last processed block for the watcher + self.db_manager.store_last_block_hash_watcher(block_hash) self.block_queue.task_done() - # Go back to sleep if there are no more appointments - self.sleep() - def get_breaches(self, txids): """ Gets a list of channel breaches given the list of transaction ids. From a4f7548804a2af7138d97c0bdbbd59a668c09971 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Mon, 10 Feb 2020 16:21:05 +0100 Subject: [PATCH 81/93] Removes sleep flag and reorders code for redability --- pisa/pisad.py | 68 +++++++++++++++++++++++++-------------------------- 1 file changed, 34 insertions(+), 34 deletions(-) diff --git a/pisa/pisad.py b/pisa/pisad.py index 0d361f3..e7d771e 100644 --- a/pisa/pisad.py +++ b/pisa/pisad.py @@ -8,6 +8,7 @@ from pisa import config, LOG_PREFIX from pisa.api import API from pisa.watcher import Watcher from pisa.builder import Builder +from pisa.responder import Responder from pisa.db_manager import DBManager from pisa.chain_monitor import ChainMonitor from pisa.block_processor import BlockProcessor @@ -43,33 +44,50 @@ def main(): else: try: + with open(config.get("PISA_SECRET_KEY"), "rb") as key_file: + secret_key_der = key_file.read() + + watcher = Watcher(db_manager, Responder(db_manager), secret_key_der, config) + # Create the chain monitor and start monitoring the chain - chain_monitor = ChainMonitor() - chain_monitor.monitor_chain() + chain_monitor = ChainMonitor(watcher.block_queue, watcher.responder.block_queue) watcher_appointments_data = db_manager.load_watcher_appointments() responder_trackers_data = db_manager.load_responder_trackers() - with open(config.get("PISA_SECRET_KEY"), "rb") as key_file: - secret_key_der = key_file.read() - - watcher = Watcher(db_manager, chain_monitor, secret_key_der, config) - chain_monitor.attach_watcher(watcher.block_queue, watcher.asleep) - chain_monitor.attach_responder(watcher.responder.block_queue, watcher.responder.asleep) - if len(watcher_appointments_data) == 0 and len(responder_trackers_data) == 0: logger.info("Fresh bootstrap") + watcher.awake() + watcher.responder.awake() + else: logger.info("Bootstrapping from backed up data") - block_processor = BlockProcessor() + + # Update the Watcher backed up data if found. + if len(watcher_appointments_data) != 0: + watcher.appointments, watcher.locator_uuid_map = Builder.build_appointments( + watcher_appointments_data + ) + + # Update the Responder with backed up data if found. + if len(responder_trackers_data) != 0: + watcher.responder.trackers, watcher.responder.tx_tracker_map = Builder.build_trackers( + responder_trackers_data + ) + + # Awaking components so the states can be updated. + watcher.awake() + watcher.responder.awake() last_block_watcher = db_manager.load_last_block_hash_watcher() last_block_responder = db_manager.load_last_block_hash_responder() + # Populate the block queues with data if they've missed some while offline. If the blocks of both match + # we don't perform the search twice. + block_processor = BlockProcessor() + # FIXME: 32-reorgs-offline dropped txs are not used at this point. - # Get the blocks missed by both the Watcher and the Responder. If the blocks of both match we don't - # perform the search twice. last_common_ancestor_watcher, dropped_txs_watcher = block_processor.find_last_common_ancestor( last_block_watcher ) @@ -85,40 +103,22 @@ def main(): ) missed_blocks_responder = block_processor.get_missed_blocks(last_common_ancestor_responder) - # Build and update the Watcher. - if len(watcher_appointments_data) != 0: - watcher.appointments, watcher.locator_uuid_map = Builder.build_appointments( - watcher_appointments_data - ) - - # Build Responder with backed up data if found - if len(responder_trackers_data) != 0: - watcher.responder.trackers, watcher.responder.tx_tracker_map = Builder.build_trackers( - responder_trackers_data - ) - # If only one of the instances needs to be updated, it can be done separately. if len(missed_blocks_watcher) == 0 and len(missed_blocks_responder) != 0: Builder.populate_block_queue(watcher.responder.block_queue, missed_blocks_responder) - watcher.responder.awake() watcher.responder.block_queue.join() elif len(missed_blocks_responder) == 0 and len(missed_blocks_watcher) != 0: Builder.populate_block_queue(watcher.block_queue, missed_blocks_watcher) - watcher.awake() watcher.block_queue.join() - # Otherwise the need to be updated at the same time, block by block + # Otherwise they need to be updated at the same time, block by block elif len(missed_blocks_responder) != 0 and len(missed_blocks_watcher) != 0: Builder.update_states(watcher, missed_blocks_watcher, missed_blocks_responder) - # Awake the Watcher/Responder if they ended up with pending work - if watcher.appointments and watcher.asleep: - watcher.awake() - if watcher.responder.trackers and watcher.responder.asleep: - watcher.responder.awake() - - # Fire the API + # Fire the API and the ChainMonitor + # FIXME: 92-block-data-during-bootstrap-db + chain_monitor.monitor_chain() API(watcher, config=config).start() except Exception as e: logger.error("An error occurred: {}. Shutting down".format(e)) From 6913d1cd18102c1f930eae353c1ce8733185baaa Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Mon, 10 Feb 2020 16:21:31 +0100 Subject: [PATCH 82/93] Update tests to remove the asleep flags --- test/pisa/unit/conftest.py | 13 +--- test/pisa/unit/test_api.py | 11 ++- test/pisa/unit/test_builder.py | 109 +++------------------------ test/pisa/unit/test_chain_monitor.py | 98 ++++-------------------- test/pisa/unit/test_responder.py | 68 +++++++---------- test/pisa/unit/test_watcher.py | 36 +++++---- 6 files changed, 85 insertions(+), 250 deletions(-) diff --git a/test/pisa/unit/conftest.py b/test/pisa/unit/conftest.py index 0995539..2ffcb85 100644 --- a/test/pisa/unit/conftest.py +++ b/test/pisa/unit/conftest.py @@ -15,7 +15,6 @@ from apps.cli.blob import Blob from pisa.responder import TransactionTracker from pisa.tools import bitcoin_cli from pisa.db_manager import DBManager -from pisa.chain_monitor import ChainMonitor from common.appointment import Appointment from common.tools import compute_locator @@ -46,23 +45,13 @@ def prng_seed(): def db_manager(): manager = DBManager("test_db") # Add last know block for the Responder in the db + yield manager manager.db.close() rmtree("test_db") -@pytest.fixture(scope="module") -def chain_monitor(): - chain_monitor = ChainMonitor() - chain_monitor.monitor_chain() - - yield chain_monitor - - chain_monitor.terminate = True - generate_block() - - def generate_keypair(): client_sk = ec.generate_private_key(ec.SECP256K1, default_backend()) client_pk = client_sk.public_key() diff --git a/test/pisa/unit/test_api.py b/test/pisa/unit/test_api.py index f94ab40..6561569 100644 --- a/test/pisa/unit/test_api.py +++ b/test/pisa/unit/test_api.py @@ -7,8 +7,10 @@ from cryptography.hazmat.primitives import serialization from pisa.api import API from pisa.watcher import Watcher +from pisa.responder import Responder from pisa.tools import bitcoin_cli from pisa import HOST, PORT +from pisa.chain_monitor import ChainMonitor from test.pisa.unit.conftest import ( generate_block, @@ -32,7 +34,7 @@ config = get_config() @pytest.fixture(scope="module") -def run_api(db_manager, chain_monitor): +def run_api(db_manager): sk, pk = generate_keypair() sk_der = sk.private_bytes( encoding=serialization.Encoding.DER, @@ -40,9 +42,10 @@ def run_api(db_manager, chain_monitor): encryption_algorithm=serialization.NoEncryption(), ) - watcher = Watcher(db_manager, chain_monitor, sk_der, get_config()) - chain_monitor.attach_watcher(watcher.block_queue, watcher.asleep) - chain_monitor.attach_responder(watcher.responder.block_queue, watcher.responder.asleep) + watcher = Watcher(db_manager, Responder(db_manager), sk_der, get_config()) + chain_monitor = ChainMonitor(watcher.block_queue, watcher.responder.block_queue) + watcher.awake() + chain_monitor.monitor_chain() api_thread = Thread(target=API(watcher, config).start) api_thread.daemon = True diff --git a/test/pisa/unit/test_builder.py b/test/pisa/unit/test_builder.py index 8fe09af..c0a2ace 100644 --- a/test/pisa/unit/test_builder.py +++ b/test/pisa/unit/test_builder.py @@ -4,6 +4,7 @@ from queue import Queue from pisa.builder import Builder from pisa.watcher import Watcher +from pisa.responder import Responder from test.pisa.unit.conftest import ( get_random_value_hex, generate_dummy_appointment, @@ -89,7 +90,7 @@ def test_populate_block_queue(): def test_update_states_empty_list(db_manager): - w = Watcher(db_manager=db_manager, chain_monitor=None, sk_der=None, config=None) + w = Watcher(db_manager=db_manager, responder=Responder(db_manager), sk_der=None, config=None) missed_blocks_watcher = [] missed_blocks_responder = [get_random_value_hex(32)] @@ -102,121 +103,35 @@ def test_update_states_empty_list(db_manager): Builder.update_states(w, missed_blocks_responder, missed_blocks_watcher) -def test_update_states_different_sizes(run_bitcoind, db_manager, chain_monitor): - w = Watcher(db_manager=db_manager, chain_monitor=chain_monitor, sk_der=None, config=get_config()) - chain_monitor.attach_watcher(w.responder, True) - chain_monitor.attach_responder(w.responder, True) - - # For the states to be updated data needs to be present in the actors (either appointments or trackers). - # Let's start from the Watcher. We add one appointment and mine some blocks that both are gonna miss. - w.appointments[uuid4().hex] = {"locator": get_random_value_hex(16), "end_time": 200} +def test_update_states_responder_misses_more(run_bitcoind, db_manager): + w = Watcher(db_manager=db_manager, responder=Responder(db_manager), sk_der=None, config=get_config()) blocks = [] for _ in range(5): generate_block() blocks.append(bitcoin_cli().getbestblockhash()) - # Updating the states should bring both to the same last known block. The Watcher's is stored in the db since it has - # gone over do_watch, whereas the Responders in only updated by update state. + # Updating the states should bring both to the same last known block. + w.awake() + w.responder.awake() Builder.update_states(w, blocks, blocks[1:]) assert db_manager.load_last_block_hash_watcher() == blocks[-1] assert w.responder.last_known_block == blocks[-1] - # If both have work, both last known blocks are updated - w.sleep() - w.responder.sleep() - w.responder.trackers[uuid4().hex] = { - "penalty_txid": get_random_value_hex(32), - "locator": get_random_value_hex(16), - "appointment_end": 200, - } +def test_update_states_watcher_misses_more(run_bitcoind, db_manager): + # Same as before, but data is now in the Responder + w = Watcher(db_manager=db_manager, responder=Responder(db_manager), sk_der=None, config=get_config()) blocks = [] for _ in range(5): generate_block() blocks.append(bitcoin_cli().getbestblockhash()) + w.awake() + w.responder.awake() Builder.update_states(w, blocks[1:], blocks) - assert db_manager.load_last_block_hash_watcher() == blocks[-1] - assert db_manager.load_last_block_hash_responder() == blocks[-1] - - # Let's try the opposite of the first test (Responder with data, Watcher without) - w.sleep() - w.responder.sleep() - - w.appointments = {} - last_block_prev = blocks[-1] - - blocks = [] - for _ in range(5): - generate_block() - blocks.append(bitcoin_cli().getbestblockhash()) - - # The Responder should have been brought up to date via do_watch, whereas the Watcher's last known block hash't - # change. The Watcher does not keep track of reorgs, so if he has no work to do he does not even update the last - # known block. - Builder.update_states(w, blocks[1:], blocks) - assert db_manager.load_last_block_hash_watcher() == last_block_prev - assert db_manager.load_last_block_hash_responder() == blocks[-1] - - -def test_update_states_same_sizes(db_manager, chain_monitor): - # The exact same behaviour of the last test is expected here, since different sizes are even using - # populate_block_queue and then run with the same list size. - w = Watcher(db_manager=db_manager, chain_monitor=chain_monitor, sk_der=None, config=get_config()) - chain_monitor.attach_watcher(w.responder, True) - chain_monitor.attach_responder(w.responder, True) - - # For the states to be updated data needs to be present in the actors (either appointments or trackers). - # Let's start from the Watcher. We add one appointment and mine some blocks that both are gonna miss. - w.appointments[uuid4().hex] = {"locator": get_random_value_hex(16), "end_time": 200} - - blocks = [] - for _ in range(5): - generate_block() - blocks.append(bitcoin_cli().getbestblockhash()) - - Builder.update_states(w, blocks, blocks) - assert db_manager.load_last_block_hash_watcher() == blocks[-1] - assert w.responder.last_known_block == blocks[-1] - - # If both have work, both last known blocks are updated - w.sleep() - w.responder.sleep() - - w.responder.trackers[uuid4().hex] = { - "penalty_txid": get_random_value_hex(32), - "locator": get_random_value_hex(16), - "appointment_end": 200, - } - - blocks = [] - for _ in range(5): - generate_block() - blocks.append(bitcoin_cli().getbestblockhash()) - - Builder.update_states(w, blocks, blocks) assert db_manager.load_last_block_hash_watcher() == blocks[-1] assert db_manager.load_last_block_hash_responder() == blocks[-1] - - # Let's try the opposite of the first test (Responder with data, Watcher without) - w.sleep() - w.responder.sleep() - - w.appointments = {} - last_block_prev = blocks[-1] - - blocks = [] - for _ in range(5): - generate_block() - blocks.append(bitcoin_cli().getbestblockhash()) - - # The Responder should have been brought up to date via do_watch, whereas the Watcher's last known block hash't - # change. The Watcher does not keep track of reorgs, so if he has no work to do he does not even update the last - # known block. - Builder.update_states(w, blocks, blocks) - assert db_manager.load_last_block_hash_watcher() == last_block_prev - assert db_manager.load_last_block_hash_responder() == blocks[-1] diff --git a/test/pisa/unit/test_chain_monitor.py b/test/pisa/unit/test_chain_monitor.py index 56c2b31..7098478 100644 --- a/test/pisa/unit/test_chain_monitor.py +++ b/test/pisa/unit/test_chain_monitor.py @@ -1,20 +1,19 @@ import zmq import time +from queue import Queue from threading import Thread, Event, Condition -from pisa.watcher import Watcher -from pisa.responder import Responder from pisa.block_processor import BlockProcessor from pisa.chain_monitor import ChainMonitor -from test.pisa.unit.conftest import get_random_value_hex, generate_block, get_config +from test.pisa.unit.conftest import get_random_value_hex, generate_block def test_init(run_bitcoind): # run_bitcoind is started here instead of later on to avoid race conditions while it initializes # Not much to test here, just sanity checks to make sure nothing goes south in the future - chain_monitor = ChainMonitor() + chain_monitor = ChainMonitor(Queue(), Queue()) assert chain_monitor.best_tip is None assert isinstance(chain_monitor.last_tips, list) and len(chain_monitor.last_tips) == 0 @@ -24,41 +23,12 @@ def test_init(run_bitcoind): assert isinstance(chain_monitor.zmqSubSocket, zmq.Socket) # The Queues and asleep flags are initialized when attaching the corresponding subscriber - assert chain_monitor.watcher_queue is None - assert chain_monitor.responder_queue is None - assert chain_monitor.watcher_asleep and chain_monitor.responder_asleep + assert isinstance(chain_monitor.watcher_queue, Queue) + assert isinstance(chain_monitor.responder_queue, Queue) -def test_attach_watcher(chain_monitor, db_manager): - watcher = Watcher(db_manager=db_manager, chain_monitor=chain_monitor, sk_der=None, config=get_config()) - chain_monitor.attach_watcher(watcher.block_queue, watcher.asleep) - - # booleans are not passed as reference in Python, so the flags need to be set separately - assert watcher.asleep == chain_monitor.watcher_asleep - watcher.asleep = False - assert chain_monitor.watcher_asleep != watcher.asleep - - # Test that the Queue work - r_hash = get_random_value_hex(32) - chain_monitor.watcher_queue.put(r_hash) - assert watcher.block_queue.get() == r_hash - - -def test_attach_responder(chain_monitor, db_manager): - responder = Responder(db_manager=db_manager, chain_monitor=chain_monitor) - chain_monitor.attach_responder(responder.block_queue, responder.asleep) - - # Same kind of testing as with the attach watcher - assert responder.asleep == chain_monitor.watcher_asleep - responder.asleep = False - assert chain_monitor.watcher_asleep != responder.asleep - - r_hash = get_random_value_hex(32) - chain_monitor.responder_queue.put(r_hash) - assert responder.block_queue.get() == r_hash - - -def test_notify_subscribers(chain_monitor): +def test_notify_subscribers(): + chain_monitor = ChainMonitor(Queue(), Queue()) # Subscribers are only notified as long as they are awake new_block = get_random_value_hex(32) @@ -66,27 +36,17 @@ def test_notify_subscribers(chain_monitor): assert chain_monitor.watcher_queue.empty() assert chain_monitor.responder_queue.empty() - chain_monitor.watcher_asleep = True - chain_monitor.responder_asleep = True - chain_monitor.notify_subscribers(new_block) - - # And remain empty afterwards since both subscribers were asleep - assert chain_monitor.watcher_queue.empty() - assert chain_monitor.responder_queue.empty() - - # Let's flag them as awake and try again - chain_monitor.watcher_asleep = False - chain_monitor.responder_asleep = False chain_monitor.notify_subscribers(new_block) assert chain_monitor.watcher_queue.get() == new_block assert chain_monitor.responder_queue.get() == new_block -def test_update_state(chain_monitor): +def test_update_state(): # The state is updated after receiving a new block (and only if the block is not already known). # Let's start by setting a best_tip and a couple of old tips new_block_hash = get_random_value_hex(32) + chain_monitor = ChainMonitor(Queue(), Queue()) chain_monitor.best_tip = new_block_hash chain_monitor.last_tips = [get_random_value_hex(32) for _ in range(5)] @@ -105,12 +65,10 @@ def test_update_state(chain_monitor): def test_monitor_chain_polling(db_manager): # Try polling with the Watcher - chain_monitor = ChainMonitor() + wq = Queue() + chain_monitor = ChainMonitor(wq, Queue()) chain_monitor.best_tip = BlockProcessor.get_best_block_hash() - watcher = Watcher(db_manager=db_manager, chain_monitor=chain_monitor, sk_der=None, config=get_config()) - chain_monitor.attach_watcher(watcher.block_queue, asleep=False) - # monitor_chain_polling runs until terminate if set polling_thread = Thread(target=chain_monitor.monitor_chain_polling, kwargs={"polling_delta": 0.1}, daemon=True) polling_thread.start() @@ -131,13 +89,10 @@ def test_monitor_chain_polling(db_manager): def test_monitor_chain_zmq(db_manager): - # Try zmq with the Responder - chain_monitor = ChainMonitor() + rq = Queue() + chain_monitor = ChainMonitor(Queue(), rq) chain_monitor.best_tip = BlockProcessor.get_best_block_hash() - responder = Responder(db_manager=db_manager, chain_monitor=chain_monitor) - chain_monitor.attach_responder(responder.block_queue, asleep=False) - zmq_thread = Thread(target=chain_monitor.monitor_chain_zmq, daemon=True) zmq_thread.start() @@ -150,28 +105,10 @@ def test_monitor_chain_zmq(db_manager): chain_monitor.responder_queue.get() assert chain_monitor.responder_queue.empty() - # If we flag it to sleep no notification is sent - chain_monitor.responder_asleep = True - - for _ in range(3): - generate_block() - assert chain_monitor.responder_queue.empty() - - chain_monitor.terminate = True - # The zmq thread needs a block generation to release from the recv method. - generate_block() - - zmq_thread.join() - def test_monitor_chain(db_manager): # Not much to test here, this should launch two threads (one per monitor approach) and finish on terminate - chain_monitor = ChainMonitor() - - watcher = Watcher(db_manager=db_manager, chain_monitor=chain_monitor, sk_der=None, config=get_config()) - responder = Responder(db_manager=db_manager, chain_monitor=chain_monitor) - chain_monitor.attach_responder(responder.block_queue, asleep=False) - chain_monitor.attach_watcher(watcher.block_queue, asleep=False) + chain_monitor = ChainMonitor(Queue(), Queue()) chain_monitor.best_tip = None chain_monitor.monitor_chain() @@ -196,12 +133,7 @@ def test_monitor_chain(db_manager): def test_monitor_chain_single_update(db_manager): # This test tests that if both threads try to add the same block to the queue, only the first one will make it - chain_monitor = ChainMonitor() - - watcher = Watcher(db_manager=db_manager, chain_monitor=chain_monitor, sk_der=None, config=get_config()) - responder = Responder(db_manager=db_manager, chain_monitor=chain_monitor) - chain_monitor.attach_responder(responder.block_queue, asleep=False) - chain_monitor.attach_watcher(watcher.block_queue, asleep=False) + chain_monitor = ChainMonitor(Queue(), Queue()) chain_monitor.best_tip = None diff --git a/test/pisa/unit/test_responder.py b/test/pisa/unit/test_responder.py index a3a9e75..41af14f 100644 --- a/test/pisa/unit/test_responder.py +++ b/test/pisa/unit/test_responder.py @@ -1,6 +1,7 @@ import json import pytest import random +from queue import Queue from uuid import uuid4 from shutil import rmtree from copy import deepcopy @@ -18,17 +19,19 @@ from test.pisa.unit.conftest import generate_block, generate_blocks, get_random_ @pytest.fixture(scope="module") -def responder(db_manager, chain_monitor): - responder = Responder(db_manager, chain_monitor) - chain_monitor.attach_responder(responder.block_queue, responder.asleep) +def responder(db_manager): + responder = Responder(db_manager) + chain_monitor = ChainMonitor(Queue(), responder.block_queue) + chain_monitor.monitor_chain() return responder -@pytest.fixture() +@pytest.fixture(scope="session") def temp_db_manager(): db_name = get_random_value_hex(8) db_manager = DBManager(db_name) + yield db_manager db_manager.db.close() @@ -144,19 +147,17 @@ def test_tracker_from_dict_invalid_data(): assert True -def test_init_responder(responder): +def test_init_responder(temp_db_manager): + responder = Responder(temp_db_manager) assert isinstance(responder.trackers, dict) and len(responder.trackers) == 0 assert isinstance(responder.tx_tracker_map, dict) and len(responder.tx_tracker_map) == 0 assert isinstance(responder.unconfirmed_txs, list) and len(responder.unconfirmed_txs) == 0 assert isinstance(responder.missed_confirmations, dict) and len(responder.missed_confirmations) == 0 - assert isinstance(responder.chain_monitor, ChainMonitor) assert responder.block_queue.empty() - assert responder.asleep is True -def test_handle_breach(db_manager, chain_monitor): - responder = Responder(db_manager, chain_monitor) - chain_monitor.attach_responder(responder.block_queue, responder.asleep) +def test_handle_breach(db_manager): + responder = Responder(db_manager) uuid = uuid4().hex tracker = create_dummy_tracker() @@ -174,20 +175,11 @@ def test_handle_breach(db_manager, chain_monitor): assert receipt.delivered is True - # The responder automatically fires add_tracker on adding a tracker if it is asleep. We need to stop the processes - # now. To do so we delete all the trackers, and generate a new block. - responder.trackers = dict() - generate_block() - -def test_add_bad_response(responder): +def test_handle_breach_bad_response(responder): uuid = uuid4().hex tracker = create_dummy_tracker() - # Now that the asleep / awake functionality has been tested we can avoid manually killing the responder by setting - # to awake. That will prevent the chain_monitor thread to be launched again. - responder.asleep = False - # A txid instead of a rawtx should be enough for unit tests using the bitcoind mock, better tests are needed though. tracker.penalty_rawtx = tracker.penalty_txid @@ -206,8 +198,6 @@ def test_add_bad_response(responder): def test_add_tracker(responder): - # Responder is asleep - for _ in range(20): uuid = uuid4().hex confirmations = 0 @@ -236,8 +226,6 @@ def test_add_tracker(responder): def test_add_tracker_same_penalty_txid(responder): - # Responder is asleep - confirmations = 0 locator, dispute_txid, penalty_txid, penalty_rawtx, appointment_end = create_dummy_tracker_data(random_txid=True) uuid_1 = uuid4().hex @@ -262,8 +250,6 @@ def test_add_tracker_same_penalty_txid(responder): def test_add_tracker_already_confirmed(responder): - # Responder is asleep - for i in range(20): uuid = uuid4().hex confirmations = i + 1 @@ -276,10 +262,11 @@ def test_add_tracker_already_confirmed(responder): assert penalty_txid not in responder.unconfirmed_txs -def test_do_watch(temp_db_manager, chain_monitor): +def test_do_watch(temp_db_manager): # Create a fresh responder to simplify the test - responder = Responder(temp_db_manager, chain_monitor) - chain_monitor.attach_responder(responder.block_queue, False) + responder = Responder(temp_db_manager) + chain_monitor = ChainMonitor(Queue(), responder.block_queue) + chain_monitor.monitor_chain() trackers = [create_dummy_tracker(penalty_rawtx=create_dummy_transaction().hex()) for _ in range(20)] @@ -332,12 +319,12 @@ def test_do_watch(temp_db_manager, chain_monitor): generate_blocks(6) assert len(responder.tx_tracker_map) == 0 - assert responder.asleep is True -def test_check_confirmations(temp_db_manager, chain_monitor): - responder = Responder(temp_db_manager, chain_monitor) - chain_monitor.attach_responder(responder.block_queue, responder.asleep) +def test_check_confirmations(db_manager): + responder = Responder(db_manager) + chain_monitor = ChainMonitor(Queue(), responder.block_queue) + chain_monitor.monitor_chain() # check_confirmations checks, given a list of transaction for a block, what of the known penalty transaction have # been confirmed. To test this we need to create a list of transactions and the state of the responder @@ -391,11 +378,12 @@ def test_get_txs_to_rebroadcast(responder): assert txs_to_rebroadcast == list(txs_missing_too_many_conf.keys()) -def test_get_completed_trackers(db_manager, chain_monitor): +def test_get_completed_trackers(db_manager): initial_height = bitcoin_cli().getblockcount() - responder = Responder(db_manager, chain_monitor) - chain_monitor.attach_responder(responder.block_queue, responder.asleep) + responder = Responder(db_manager) + chain_monitor = ChainMonitor(Queue(), responder.block_queue) + chain_monitor.monitor_chain() # A complete tracker is a tracker that has reached the appointment end with enough confs (> MIN_CONFIRMATIONS) # We'll create three type of transactions: end reached + enough conf, end reached + no enough conf, end not reached @@ -450,10 +438,10 @@ def test_get_completed_trackers(db_manager, chain_monitor): assert set(completed_trackers_ids) == set(ended_trackers_keys) -def test_rebroadcast(db_manager, chain_monitor): - responder = Responder(db_manager, chain_monitor) - responder.asleep = False - chain_monitor.attach_responder(responder.block_queue, responder.asleep) +def test_rebroadcast(db_manager): + responder = Responder(db_manager) + chain_monitor = ChainMonitor(Queue(), responder.block_queue) + chain_monitor.monitor_chain() txs_to_rebroadcast = [] diff --git a/test/pisa/unit/test_watcher.py b/test/pisa/unit/test_watcher.py index 03c6f45..5a85bec 100644 --- a/test/pisa/unit/test_watcher.py +++ b/test/pisa/unit/test_watcher.py @@ -1,5 +1,6 @@ import pytest from uuid import uuid4 +from shutil import rmtree from threading import Thread from cryptography.hazmat.primitives.asymmetric import ec from cryptography.hazmat.primitives import serialization @@ -8,6 +9,7 @@ from pisa.watcher import Watcher from pisa.responder import Responder from pisa.tools import bitcoin_cli from pisa.chain_monitor import ChainMonitor +from pisa.db_manager import DBManager from test.pisa.unit.conftest import ( generate_blocks, @@ -36,11 +38,22 @@ sk_der = signing_key.private_bytes( ) +@pytest.fixture(scope="session") +def temp_db_manager(): + db_name = get_random_value_hex(8) + db_manager = DBManager(db_name) + + yield db_manager + + db_manager.db.close() + rmtree(db_name) + + @pytest.fixture(scope="module") -def watcher(db_manager, chain_monitor): - watcher = Watcher(db_manager, chain_monitor, sk_der, get_config()) - chain_monitor.attach_watcher(watcher.block_queue, watcher.asleep) - chain_monitor.attach_responder(watcher.responder.block_queue, watcher.responder.asleep) +def watcher(db_manager): + watcher = Watcher(db_manager, Responder(db_manager), sk_der, get_config()) + chain_monitor = ChainMonitor(watcher.block_queue, watcher.responder.block_queue) + chain_monitor.monitor_chain() return watcher @@ -76,19 +89,13 @@ def create_appointments(n): def test_init(run_bitcoind, watcher): assert isinstance(watcher.appointments, dict) and len(watcher.appointments) == 0 assert isinstance(watcher.locator_uuid_map, dict) and len(watcher.locator_uuid_map) == 0 - assert watcher.asleep is True assert watcher.block_queue.empty() - assert isinstance(watcher.chain_monitor, ChainMonitor) assert isinstance(watcher.config, dict) assert isinstance(watcher.signing_key, ec.EllipticCurvePrivateKey) assert isinstance(watcher.responder, Responder) def test_add_appointment(watcher): - # The watcher automatically fires do_watch and do_subscribe on adding an appointment if it is asleep (initial state) - # Avoid this by setting the state to awake. - watcher.asleep = False - # We should be able to add appointments up to the limit for _ in range(10): appointment, dispute_tx = generate_dummy_appointment( @@ -128,10 +135,11 @@ def test_add_too_many_appointments(watcher): assert sig is None -def test_do_watch(watcher): +def test_do_watch(watcher, temp_db_manager): + watcher.db_manager = temp_db_manager + # We will wipe all the previous data and add 5 appointments appointments, locator_uuid_map, dispute_txs = create_appointments(APPOINTMENTS) - watcher.chain_monitor.watcher_asleep = False # Set the data into the Watcher and in the db watcher.locator_uuid_map = locator_uuid_map @@ -142,7 +150,8 @@ def test_do_watch(watcher): watcher.db_manager.store_watcher_appointment(uuid, appointment.to_json()) watcher.db_manager.create_append_locator_map(appointment.locator, uuid) - Thread(target=watcher.do_watch, daemon=True).start() + do_watch_thread = Thread(target=watcher.do_watch, daemon=True) + do_watch_thread.start() # Broadcast the first two for dispute_tx in dispute_txs[:2]: @@ -158,7 +167,6 @@ def test_do_watch(watcher): generate_blocks(EXPIRY_DELTA + START_TIME_OFFSET + END_TIME_OFFSET) assert len(watcher.appointments) == 0 - assert watcher.asleep is True def test_get_breaches(watcher, txids, locator_uuid_map): From ab21cbfc8fe8e1d35071d85c15b40487a944b1cd Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Sat, 1 Feb 2020 12:26:02 +0100 Subject: [PATCH 83/93] Moves load_key_file to Cryptographer and updates pisad to use it --- common/cryptographer.py | 33 +++++++++++++++++++++++++- pisa/pisad.py | 6 +++-- test/common/unit/test_cryptographer.py | 26 ++++++++++++++++++++ 3 files changed, 62 insertions(+), 3 deletions(-) diff --git a/common/cryptographer.py b/common/cryptographer.py index 67ff6e0..fee5001 100644 --- a/common/cryptographer.py +++ b/common/cryptographer.py @@ -146,6 +146,35 @@ class Cryptographer: return blob + @staticmethod + def load_key_file(file_path): + """ + Loads a key from a key file. + + Args: + file_path (:obj:`str`): the path to the key file to be loaded. + + Returns: + :obj:`bytes` or :obj:`None`: the key file data if the file can be found and read. ``None`` otherwise. + """ + + if not isinstance(file_path, str): + logger.error("Key file path was expected, {} received".format(type(file_path))) + return None + + try: + with open(file_path, "rb") as key_file: + key = key_file.read() + return key + + except FileNotFoundError: + logger.error("Key file not found. Please check your settings") + return None + + except IOError as e: + logger.error("I/O error({}): {}".format(e.errno, e.strerror)) + return None + @staticmethod def load_public_key_der(pk_der): """ @@ -199,7 +228,7 @@ class Cryptographer: return sk except UnsupportedAlgorithm: - raise ValueError("Could not deserialize the private key (unsupported algorithm).") + logger.error("Could not deserialize the private key (unsupported algorithm)") except ValueError: logger.error("The provided data cannot be deserialized (wrong size or format)") @@ -207,6 +236,8 @@ class Cryptographer: except TypeError: logger.error("The provided data cannot be deserialized (wrong type)") + return None + @staticmethod def sign(data, sk, rtype="str"): """ diff --git a/pisa/pisad.py b/pisa/pisad.py index e7d771e..89602b5 100644 --- a/pisa/pisad.py +++ b/pisa/pisad.py @@ -3,6 +3,7 @@ from sys import argv, exit from signal import signal, SIGINT, SIGQUIT, SIGTERM from common.logger import Logger +from common.cryptographer import Cryptographer from pisa import config, LOG_PREFIX from pisa.api import API @@ -44,8 +45,9 @@ def main(): else: try: - with open(config.get("PISA_SECRET_KEY"), "rb") as key_file: - secret_key_der = key_file.read() + secret_key_der = Cryptographer.load_key_file(config.get("PISA_SECRET_KEY")) + if not secret_key_der: + raise IOError("PISA private key can't be loaded") watcher = Watcher(db_manager, Responder(db_manager), secret_key_der, config) diff --git a/test/common/unit/test_cryptographer.py b/test/common/unit/test_cryptographer.py index 44a1b77..875cea4 100644 --- a/test/common/unit/test_cryptographer.py +++ b/test/common/unit/test_cryptographer.py @@ -1,3 +1,5 @@ +import os +import pytest import binascii from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives.asymmetric import ec @@ -181,6 +183,30 @@ def test_decrypt_wrong_return(): assert True +def test_load_key_file(): + dummy_sk = ec.generate_private_key(ec.SECP256K1, default_backend()) + dummy_sk_der = dummy_sk.private_bytes( + encoding=serialization.Encoding.DER, + format=serialization.PrivateFormat.TraditionalOpenSSL, + encryption_algorithm=serialization.NoEncryption(), + ) + + # If file exists and has data in it, function should work. + with open("key_test_file", "wb") as f: + f.write(dummy_sk_der) + + appt_data = Cryptographer.load_key_file("key_test_file") + assert appt_data + + os.remove("key_test_file") + + # If file doesn't exist, function should return None + assert Cryptographer.load_key_file("nonexistent_file") is None + + # If something that's not a file_path is passed as parameter the method should also return None + assert Cryptographer.load_key_file(0) is None and Cryptographer.load_key_file(None) is None + + def test_load_public_key_der(): # load_public_key_der expects a byte encoded data. Any other should fail and return None for wtype in WRONG_TYPES: From 5a49a93710116119ea242886455505a705810980 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Sat, 1 Feb 2020 12:26:30 +0100 Subject: [PATCH 84/93] Improves cli - Improves modularity - Adds missing exceptions - Adds docstrings - Simplifies some method names --- apps/cli/pisa_cli.py | 318 +++++++++++++++++++++++++------------------ 1 file changed, 183 insertions(+), 135 deletions(-) diff --git a/apps/cli/pisa_cli.py b/apps/cli/pisa_cli.py index 41fce7f..680b45c 100644 --- a/apps/cli/pisa_cli.py +++ b/apps/cli/pisa_cli.py @@ -13,13 +13,12 @@ from apps.cli import config, LOG_PREFIX from apps.cli.help import help_add_appointment, help_get_appointment from apps.cli.blob import Blob +from common import constants from common.logger import Logger from common.appointment import Appointment from common.cryptographer import Cryptographer from common.tools import check_sha256_hex_format, check_locator_format, compute_locator - -HTTP_OK = 200 logger = Logger(actor="Client", log_name_prefix=LOG_PREFIX) @@ -47,39 +46,81 @@ def generate_dummy_appointment(): logger.info("\nData stored in dummy_appointment_data.json") -# Loads and returns Pisa keys from disk -def load_key_file_data(file_name): - try: - with open(file_name, "rb") as key_file: - key = key_file.read() - return key +def load_keys(pisa_pk_path, cli_sk_path, cli_pk_path): + """ + Loads all the keys required so sign, send, and verify the appointment. - except FileNotFoundError as e: - logger.error("Client's key file not found. Please check your settings") - raise e + Args: + pisa_pk_path (:obj:`str`): path to the PISA public key file. + cli_sk_path (:obj:`str`): path to the client private key file. + cli_pk_path (:obj:`str`): path to the client public key file. - except IOError as e: - logger.error("I/O error({}): {}".format(e.errno, e.strerror)) - raise e + Returns: + :obj:`tuple` or ``None``: a three item tuple containing a pisa_pk object, cli_sk object and the cli_sk_der + encoded key if all keys can be loaded. ``None`` otherwise. + """ + pisa_pk_der = Cryptographer.load_key_file(pisa_pk_path) + pisa_pk = Cryptographer.load_public_key_der(pisa_pk_der) -# Makes sure that the folder APPOINTMENTS_FOLDER_NAME exists, then saves the appointment and signature in it. -def save_signed_appointment(appointment, signature): - # Create the appointments directory if it doesn't already exist - os.makedirs(config.get("APPOINTMENTS_FOLDER_NAME"), exist_ok=True) + if pisa_pk is None: + logger.error("PISA's public key file not found. Please check your settings") + return None - timestamp = int(time.time()) - locator = appointment["locator"] - uuid = uuid4().hex # prevent filename collisions + cli_sk_der = Cryptographer.load_key_file(cli_sk_path) + cli_sk = Cryptographer.load_private_key_der(cli_sk_der) - filename = "{}/appointment-{}-{}-{}.json".format(config.get("APPOINTMENTS_FOLDER_NAME"), timestamp, locator, uuid) - data = {"appointment": appointment, "signature": signature} + if cli_sk is None: + logger.error("Client's private key file not found. Please check your settings") + return None - with open(filename, "w") as f: - json.dump(data, f) + cli_pk_der = Cryptographer.load_key_file(cli_pk_path) + + if cli_pk_der is None: + logger.error("Client's public key file not found. Please check your settings") + return None + + return pisa_pk, cli_sk, cli_pk_der def add_appointment(args): + """ + Manages the add_appointment command, from argument parsing, trough sending the appointment to the tower, until + saving the appointment receipt. + + The life cycle of the function is as follows: + - Load the add_appointment arguments + - Check that the given commitment_txid is correct (proper format and not missing) + - Check that the transaction is correct (not missing) + - Create the appointment locator and encrypted blob from the commitment_txid and the penalty_tx + - Load the client private key and sign the appointment + - Send the appointment to the tower + - Wait for the response + - Check the tower's response and signature + - Store the receipt (appointment + signature) on disk + + If any of the above-mentioned steps fails, the method returns false, otherwise it returns true. + + Args: + args (:obj:`list`): a list of arguments to pass to ``parse_add_appointment_args``. Must contain a json encoded + appointment, or the file option and the path to a file containing a json encoded appointment. + + Returns: + :obj:`bool`: True if the appointment is accepted by the tower and the receipt is properly stored, false if any + error occurs during the process. + """ + + pisa_pk, cli_sk, cli_pk_der = load_keys( + config.get("PISA_PUBLIC_KEY"), config.get("CLI_PRIVATE_KEY"), config.get("CLI_PUBLIC_KEY") + ) + + try: + hex_pk_der = binascii.hexlify(cli_pk_der) + + except binascii.Error as e: + logger.error("Could not successfully encode public key as hex", error=str(e)) + return False + # Get appointment data from user. appointment_data = parse_add_appointment_args(args) @@ -105,17 +146,16 @@ def add_appointment(args): return False appointment = Appointment.from_dict(appointment_data) + signature = Cryptographer.sign(appointment.serialize(), cli_sk) - signature = get_appointment_signature(appointment) - hex_pk_der = get_pk() - - if not (appointment and signature and hex_pk_der): + if not (appointment and signature): return False data = {"appointment": appointment.to_dict(), "signature": signature, "public_key": hex_pk_der.decode("utf-8")} # Send appointment to the server. - response_json = post_data_to_add_appointment_endpoint(data) + server_response = post_appointment(data) + response_json = process_post_appointment_response(server_response) if response_json is None: return False @@ -126,27 +166,29 @@ def add_appointment(args): logger.error("The response does not contain the signature of the appointment") return False - valid = check_signature(signature, appointment) - - if not valid: + if not Cryptographer.verify(appointment.serialize(), signature, pisa_pk): logger.error("The returned appointment's signature is invalid") return False - logger.info("Appointment accepted and signed by Pisa") - # all good, store appointment and signature - try: - save_signed_appointment(appointment.to_dict(), signature) + logger.info("Appointment accepted and signed by PISA") - except OSError as e: - logger.error("There was an error while saving the appointment", error=e) - return False - - return True + # All good, store appointment and signature + return save_appointment_receipt(appointment.to_dict(), signature) -# Parse arguments passed to add_appointment and handle them accordingly. -# Returns appointment data. def parse_add_appointment_args(args): + """ + Parses the arguments of the add_appointment command. + + Args: + args (:obj:`list`): a list of arguments to pass to ``parse_add_appointment_args``. Must contain a json encoded + appointment, or the file option and the path to a file containing a json encoded appointment. + + Returns: + :obj:`dict` or :obj:`None`: A dictionary containing the appointment data if it can be loaded. ``None`` + otherwise. + """ + use_help = "Use 'help add_appointment' for help of how to use the command" if not args: @@ -182,80 +224,118 @@ def parse_add_appointment_args(args): return appointment_data -# Sends appointment data to add_appointment endpoint to be processed by the server. -def post_data_to_add_appointment_endpoint(data): +def post_appointment(data): + """ + Sends appointment data to add_appointment endpoint to be processed by the tower. + + Args: + data (:obj:`dict`): a dictionary containing three fields: an appointment, the client-side signature, and the + der-encoded client public key. + + Returns: + :obj:`dict` or ``None``: a json-encoded dictionary with the server response if the data can be posted. + None otherwise. + """ + logger.info("Sending appointment to PISA") try: add_appointment_endpoint = "http://{}:{}".format(pisa_api_server, pisa_api_port) - r = requests.post(url=add_appointment_endpoint, json=json.dumps(data), timeout=5) + return requests.post(url=add_appointment_endpoint, json=json.dumps(data), timeout=5) - response_json = r.json() + except ConnectTimeout: + logger.error("Can't connect to PISA API. Connection timeout") + return None + + except ConnectionError: + logger.error("Can't connect to PISA API. Server cannot be reached") + return None + + +def process_post_appointment_response(response): + """ + Processes the server response to an add_appointment request. + + Args: + response (:obj:`requests.models.Response`): a ``Response` object obtained from the sent request. + + Returns: + :obj:`dict` or :obj:`None`: a dictionary containing the tower's response data if it can be properly parsed and + the response type is ``HTTP_OK``. ``None`` otherwise. + """ + + try: + response_json = response.json() except json.JSONDecodeError: logger.error("The response was not valid JSON") return None - except ConnectTimeout: - logger.error("Can't connect to pisa API. Connection timeout") - return None - - except ConnectionError: - logger.error("Can't connect to pisa API. Server cannot be reached") - return None - - if r.status_code != HTTP_OK: + if response.status_code != constants.HTTP_OK: if "error" not in response_json: - logger.error("The server returned an error status code but no error description", status_code=r.status_code) + logger.error( + "The server returned an error status code but no error description", status_code=response.status_code + ) else: error = response_json["error"] logger.error( "The server returned an error status code with an error description", - status_code=r.status_code, + status_code=response.status_code, description=error, ) return None - if "signature" not in response_json: - logger.error("The response does not contain the signature of the appointment") - return None - return response_json -# Verify that the signature returned from the watchtower is valid. -def check_signature(signature, appointment): +def save_appointment_receipt(appointment, signature): + """ + Saves an appointment receipt to disk. A receipt consists in an appointment and a signature from the tower. + + Args: + appointment (:obj:`Appointment `): the appointment to be saved on disk. + signature (:obj:`str`): the signature of the appointment performed by the tower. + + Returns: + :obj:`bool`: True if the appointment if properly saved, false otherwise. + + Raises: + IOError: if an error occurs whilst writing the file on disk. + """ + + # Create the appointments directory if it doesn't already exist + os.makedirs(config.get("APPOINTMENTS_FOLDER_NAME"), exist_ok=True) + + timestamp = int(time.time()) + locator = appointment["locator"] + uuid = uuid4().hex # prevent filename collisions + + filename = "{}/appointment-{}-{}-{}.json".format(config.get("APPOINTMENTS_FOLDER_NAME"), timestamp, locator, uuid) + data = {"appointment": appointment, "signature": signature} + try: - pisa_pk_der = load_key_file_data(config.get("PISA_PUBLIC_KEY")) - pisa_pk = Cryptographer.load_public_key_der(pisa_pk_der) - - if pisa_pk is None: - logger.error("Failed to deserialize the public key. It might be in an unsupported format") - return False - - return Cryptographer.verify(appointment.serialize(), signature, pisa_pk) - - except FileNotFoundError: - logger.error("Pisa's public key file not found. Please check your settings") - return False + with open(filename, "w") as f: + json.dump(data, f) + return True except IOError as e: - logger.error("I/O error", errno=e.errno, error=e.strerror) + logger.error("There was an error while saving the appointment", error=e) return False -def get_appointment(args): - if not args: - logger.error("No arguments were given") - return None +def get_appointment(locator): + """ + Gets information about an appointment from the tower. - arg_opt = args.pop(0) + Args: + locator (:obj:`str`): the appointment locator used to identify it. - if arg_opt in ["-h", "--help"]: - sys.exit(help_get_appointment()) - else: - locator = arg_opt - valid_locator = check_locator_format(locator) + Returns: + :obj:`dict` or :obj:`None`: a dictionary containing thew appointment data if the locator is valid and the tower + responds. ``None`` otherwise. + """ + + valid_locator = check_locator_format(locator) if not valid_locator: logger.error("The provided locator is not valid", locator=locator) @@ -266,60 +346,17 @@ def get_appointment(args): try: r = requests.get(url=get_appointment_endpoint + parameters, timeout=5) - logger.info("Appointment response returned from server: {}".format(r.json())) return r.json() except ConnectTimeout: - logger.error("Can't connect to pisa API. Connection timeout") + logger.error("Can't connect to PISA API. Connection timeout") return None except ConnectionError: - logger.error("Can't connect to pisa API. Server cannot be reached") + logger.error("Can't connect to PISA API. Server cannot be reached") return None -def get_appointment_signature(appointment): - try: - sk_der = load_key_file_data(config.get("CLI_PRIVATE_KEY")) - cli_sk = Cryptographer.load_private_key_der(sk_der) - - signature = Cryptographer.sign(appointment.serialize(), cli_sk) - - return signature - - except ValueError: - logger.error("Failed to deserialize the public key. It might be in an unsupported format") - return False - - except FileNotFoundError: - logger.error("Client's private key file not found. Please check your settings") - return False - - except IOError as e: - logger.error("I/O error", errno=e.errno, error=e.strerror) - return False - - -def get_pk(): - try: - cli_pk_der = load_key_file_data(config.get("CLI_PUBLIC_KEY")) - hex_pk_der = binascii.hexlify(cli_pk_der) - - return hex_pk_der - - except FileNotFoundError: - logger.error("Client's public key file not found. Please check your settings") - return False - - except IOError as e: - logger.error("I/O error", errno=e.errno, error=e.strerror) - return False - - except binascii.Error as e: - logger.error("Could not successfully encode public key as hex: ", e) - return False - - def show_usage(): return ( "USAGE: " @@ -332,7 +369,7 @@ def show_usage(): "\n\t-s, --server \tAPI server where to send the requests. Defaults to btc.pisa.watch (modifiable in " "__init__.py)" "\n\t-p, --port \tAPI port where to send the requests. Defaults to 9814 (modifiable in __init__.py)" - "\n\t-d, --debug \tshows debug information and stores it in pisa.log" + "\n\t-d, --debug \tshows debug information and stores it in pisa_cli.log" "\n\t-h --help \tshows this message." ) @@ -366,7 +403,18 @@ if __name__ == "__main__": add_appointment(args) elif command == "get_appointment": - get_appointment(args) + if not args: + logger.error("No arguments were given") + + else: + arg_opt = args.pop(0) + + if arg_opt in ["-h", "--help"]: + sys.exit(help_get_appointment()) + + appointment_data = get_appointment(arg_opt) + if appointment_data: + print(appointment_data) elif command == "help": if args: From ee4269d0471394954b01f85a8223e6fa59e67470 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Sat, 1 Feb 2020 12:28:44 +0100 Subject: [PATCH 85/93] Updates cli tests and adds some missing ones --- test/apps/cli/unit/test_pisa_cli.py | 205 ++++++++++++---------------- 1 file changed, 88 insertions(+), 117 deletions(-) diff --git a/test/apps/cli/unit/test_pisa_cli.py b/test/apps/cli/unit/test_pisa_cli.py index 0ce40c6..d972118 100644 --- a/test/apps/cli/unit/test_pisa_cli.py +++ b/test/apps/cli/unit/test_pisa_cli.py @@ -9,36 +9,31 @@ from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import serialization from cryptography.hazmat.primitives.asymmetric import ec +from common.tools import compute_locator from common.appointment import Appointment from common.cryptographer import Cryptographer +from apps.cli.blob import Blob import apps.cli.pisa_cli as pisa_cli from test.apps.cli.unit.conftest import get_random_value_hex # dummy keys for the tests -pisa_sk = ec.generate_private_key(ec.SECP256K1, default_backend()) -pisa_pk = pisa_sk.public_key() +dummy_sk = ec.generate_private_key(ec.SECP256K1, default_backend()) +dummy_pk = dummy_sk.public_key() +another_sk = ec.generate_private_key(ec.SECP256K1, default_backend()) -other_sk = ec.generate_private_key(ec.SECP256K1, default_backend()) - -pisa_sk_der = pisa_sk.private_bytes( +dummy_sk_der = dummy_sk.private_bytes( encoding=serialization.Encoding.DER, format=serialization.PrivateFormat.TraditionalOpenSSL, encryption_algorithm=serialization.NoEncryption(), ) -pisa_pk_der = pisa_pk.public_bytes( +dummy_pk_der = dummy_pk.public_bytes( encoding=serialization.Encoding.DER, format=serialization.PublicFormat.SubjectPublicKeyInfo ) -other_sk_der = other_sk.private_bytes( - encoding=serialization.Encoding.DER, - format=serialization.PrivateFormat.TraditionalOpenSSL, - encryption_algorithm=serialization.NoEncryption(), -) - # Replace the key in the module with a key we control for the tests -pisa_cli.pisa_public_key = pisa_pk +pisa_cli.pisa_public_key = dummy_pk # Replace endpoint with dummy one pisa_cli.pisa_api_server = "dummy.com" pisa_cli.pisa_api_port = 12345 @@ -54,22 +49,24 @@ dummy_appointment_request = { # This is the format appointment turns into once it hits "add_appointment" dummy_appointment_full = { - "locator": get_random_value_hex(16), - "start_time": 1500, - "end_time": 50000, - "to_self_delay": 200, - "encrypted_blob": get_random_value_hex(120), + "locator": compute_locator(dummy_appointment_request.get("tx_id")), + "start_time": dummy_appointment_request.get("start_time"), + "end_time": dummy_appointment_request.get("end_time"), + "to_self_delay": dummy_appointment_request.get("to_self_delay"), + "encrypted_blob": Cryptographer.encrypt( + Blob(dummy_appointment_request.get("tx")), dummy_appointment_request.get("tx_id") + ), } dummy_appointment = Appointment.from_dict(dummy_appointment_full) -def get_dummy_pisa_sk_der(*args): - return pisa_sk_der +def load_dummy_keys(*args): + return dummy_pk, dummy_sk, dummy_pk_der def get_dummy_pisa_pk_der(*args): - return pisa_pk_der + return dummy_pk_der def get_dummy_hex_pk_der(*args): @@ -77,42 +74,52 @@ def get_dummy_hex_pk_der(*args): def get_dummy_signature(*args): - sk = Cryptographer.load_private_key_der(pisa_sk_der) - return Cryptographer.sign(dummy_appointment.serialize(), sk) + return Cryptographer.sign(dummy_appointment.serialize(), dummy_sk) def get_bad_signature(*args): - sk = Cryptographer.load_private_key_der(other_sk_der) - return Cryptographer.sign(dummy_appointment.serialize(), sk) + return Cryptographer.sign(dummy_appointment.serialize(), another_sk) -def valid_sig(*args): - return True - - -def invalid_sig(*args): - return False +def test_load_keys(): + # Let's first create a private key and public key files + private_key_file_path = "sk_test_file" + public_key_file_path = "pk_test_file" + with open(private_key_file_path, "wb") as f: + f.write(dummy_sk_der) + with open(public_key_file_path, "wb") as f: + f.write(dummy_pk_der) + + # Now we can test the function passing the using this files (we'll use the same pk for both) + r = pisa_cli.load_keys(public_key_file_path, private_key_file_path, public_key_file_path) + assert isinstance(r, tuple) + assert len(r) == 3 + + # If any param does not match we should get None as result + assert pisa_cli.load_keys(None, private_key_file_path, public_key_file_path) is None + assert pisa_cli.load_keys(public_key_file_path, None, public_key_file_path) is None + assert pisa_cli.load_keys(public_key_file_path, private_key_file_path, None) is None + + # The same should happen if we pass a public key where a private should be, for instance + assert pisa_cli.load_keys(private_key_file_path, public_key_file_path, private_key_file_path) is None + + os.remove(private_key_file_path) + os.remove(public_key_file_path) +# TODO: 90-add-more-add-appointment-tests @responses.activate def test_add_appointment(monkeypatch): # Simulate a request to add_appointment for dummy_appointment, make sure that the right endpoint is requested # and the return value is True + monkeypatch.setattr(pisa_cli, "load_keys", load_dummy_keys) - # Make sure the test uses the dummy signature - monkeypatch.setattr(pisa_cli, "get_appointment_signature", get_dummy_signature) - monkeypatch.setattr(pisa_cli, "get_pk", get_dummy_hex_pk_der) - monkeypatch.setattr(pisa_cli, "check_signature", valid_sig) - - response = {"locator": dummy_appointment.to_dict()["locator"], "signature": get_dummy_signature()} - + response = {"locator": dummy_appointment.locator, "signature": get_dummy_signature()} responses.add(responses.POST, pisa_endpoint, json=response, status=200) - result = pisa_cli.add_appointment([json.dumps(dummy_appointment_request)]) assert len(responses.calls) == 1 assert responses.calls[0].request.url == pisa_endpoint - assert result @@ -122,9 +129,7 @@ def test_add_appointment_with_invalid_signature(monkeypatch): # make sure that the right endpoint is requested, but the return value is False # Make sure the test uses the bad dummy signature - monkeypatch.setattr(pisa_cli, "get_appointment_signature", get_bad_signature) - monkeypatch.setattr(pisa_cli, "get_pk", get_dummy_hex_pk_der) - monkeypatch.setattr(pisa_cli, "check_signature", invalid_sig) + monkeypatch.setattr(pisa_cli, "load_keys", load_dummy_keys) response = { "locator": dummy_appointment.to_dict()["locator"], @@ -132,50 +137,11 @@ def test_add_appointment_with_invalid_signature(monkeypatch): } responses.add(responses.POST, pisa_endpoint, json=response, status=200) - result = pisa_cli.add_appointment([json.dumps(dummy_appointment_request)]) assert result is False -def test_load_key_file_data(): - # If file exists and has data in it, function should work. - with open("key_test_file", "w+b") as f: - f.write(pisa_sk_der) - - appt_data = pisa_cli.load_key_file_data("key_test_file") - assert appt_data - - os.remove("key_test_file") - - # If file doesn't exist, function should fail. - with pytest.raises(FileNotFoundError): - assert pisa_cli.load_key_file_data("nonexistent_file") - - -def test_save_signed_appointment(monkeypatch): - appointments_folder = "test_appointments_receipts" - pisa_cli.config["APPOINTMENTS_FOLDER_NAME"] = appointments_folder - - pisa_cli.save_signed_appointment(dummy_appointment.to_dict(), get_dummy_signature()) - - # In folder "Appointments," grab all files and print them. - files = os.listdir(appointments_folder) - - found = False - for f in files: - if dummy_appointment.to_dict().get("locator") in f: - found = True - - assert found - - # If "appointments" directory doesn't exist, function should create it. - assert os.path.exists(appointments_folder) - - # Delete test directory once we're done. - shutil.rmtree(appointments_folder) - - def test_parse_add_appointment_args(): # If no args are passed, function should fail. appt_data = pisa_cli.parse_add_appointment_args(None) @@ -200,33 +166,58 @@ def test_parse_add_appointment_args(): @responses.activate -def test_post_data_to_add_appointment_endpoint(): +def test_post_appointment(): response = { "locator": dummy_appointment.to_dict()["locator"], - "signature": Cryptographer.sign(dummy_appointment.serialize(), pisa_sk), + "signature": Cryptographer.sign(dummy_appointment.serialize(), dummy_pk), } responses.add(responses.POST, pisa_endpoint, json=response, status=200) - - response = pisa_cli.post_data_to_add_appointment_endpoint(json.dumps(dummy_appointment_request)) + response = pisa_cli.post_appointment(json.dumps(dummy_appointment_request)) assert len(responses.calls) == 1 assert responses.calls[0].request.url == pisa_endpoint - assert response -def test_check_signature(monkeypatch): - # Make sure the test uses the right dummy key instead of loading it from disk - monkeypatch.setattr(pisa_cli, "load_key_file_data", get_dummy_pisa_pk_der) +@responses.activate +def test_process_post_appointment_response(): + # Let's first crete a response + response = { + "locator": dummy_appointment.to_dict()["locator"], + "signature": Cryptographer.sign(dummy_appointment.serialize(), dummy_pk), + } - valid = pisa_cli.check_signature(get_dummy_signature(), dummy_appointment) + # A 200 OK with a correct json response should return the json of the response + responses.add(responses.POST, pisa_endpoint, json=response, status=200) + r = pisa_cli.post_appointment(json.dumps(dummy_appointment_request)) + assert pisa_cli.process_post_appointment_response(r) == r.json() - assert valid + # If we modify the response code tor a rejection (lets say 404) we should get None + responses.replace(responses.POST, pisa_endpoint, json=response, status=404) + r = pisa_cli.post_appointment(json.dumps(dummy_appointment_request)) + assert pisa_cli.process_post_appointment_response(r) is None - valid = pisa_cli.check_signature(get_bad_signature(), dummy_appointment) + # The same should happen if the response is not in json + responses.replace(responses.POST, pisa_endpoint, status=404) + r = pisa_cli.post_appointment(json.dumps(dummy_appointment_request)) + assert pisa_cli.process_post_appointment_response(r) is None - assert not valid + +def test_save_appointment_receipt(monkeypatch): + appointments_folder = "test_appointments_receipts" + pisa_cli.config["APPOINTMENTS_FOLDER_NAME"] = appointments_folder + + # The functions creates a new directory if it does not exist + assert not os.path.exists(appointments_folder) + pisa_cli.save_appointment_receipt(dummy_appointment.to_dict(), get_dummy_signature()) + assert os.path.exists(appointments_folder) + + # Check that the receipt has been saved by checking the file names + files = os.listdir(appointments_folder) + assert any([dummy_appointment.locator in f for f in files]) + + shutil.rmtree(appointments_folder) @responses.activate @@ -237,39 +228,19 @@ def test_get_appointment(): request_url = "{}get_appointment?locator={}".format(pisa_endpoint, response.get("locator")) responses.add(responses.GET, request_url, json=response, status=200) - - result = pisa_cli.get_appointment([response.get("locator")]) + result = pisa_cli.get_appointment(response.get("locator")) assert len(responses.calls) == 1 assert responses.calls[0].request.url == request_url - assert result.get("locator") == response.get("locator") @responses.activate def test_get_appointment_err(): - locator = get_random_value_hex(32) + locator = get_random_value_hex(16) # Test that get_appointment handles a connection error appropriately. request_url = "{}get_appointment?locator=".format(pisa_endpoint, locator) responses.add(responses.GET, request_url, body=ConnectionError()) - assert not pisa_cli.get_appointment([locator]) - - -def test_get_appointment_signature(monkeypatch): - # Make sure the test uses the right dummy key instead of loading it from disk - monkeypatch.setattr(pisa_cli, "load_key_file_data", get_dummy_pisa_sk_der) - - signature = pisa_cli.get_appointment_signature(dummy_appointment) - - assert isinstance(signature, str) - - -def test_get_pk(monkeypatch): - # Make sure the test uses the right dummy key instead of loading it from disk - monkeypatch.setattr(pisa_cli, "load_key_file_data", get_dummy_pisa_pk_der) - - pk = pisa_cli.get_pk() - - assert isinstance(pk, bytes) + assert not pisa_cli.get_appointment(locator) From dd232c678aae07589897cb4f9af840acfa592e5b Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Sat, 1 Feb 2020 12:28:56 +0100 Subject: [PATCH 86/93] Updates default cli.log name --- apps/cli/sample_conf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/apps/cli/sample_conf.py b/apps/cli/sample_conf.py index d9f2b90..6749dd2 100644 --- a/apps/cli/sample_conf.py +++ b/apps/cli/sample_conf.py @@ -5,7 +5,7 @@ DEFAULT_PISA_API_PORT = 9814 # PISA-CLI DATA_FOLDER = "~/.pisa_btc/" -CLIENT_LOG_FILE = "pisa-cli.log" +CLIENT_LOG_FILE = "cli.log" APPOINTMENTS_FOLDER_NAME = "appointment_receipts" CLI_PUBLIC_KEY = "cli_pk.der" From f492fe7cbbb20b3734b56cbe32c83578f610d25a Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Sat, 1 Feb 2020 13:00:03 +0100 Subject: [PATCH 87/93] Updates e2e tests to use new cli methods --- test/pisa/e2e/test_basic_e2e.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/test/pisa/e2e/test_basic_e2e.py b/test/pisa/e2e/test_basic_e2e.py index dee74b2..a468099 100644 --- a/test/pisa/e2e/test_basic_e2e.py +++ b/test/pisa/e2e/test_basic_e2e.py @@ -1,10 +1,12 @@ import json +import binascii from time import sleep from riemann.tx import Tx from pisa import HOST, PORT from apps.cli import pisa_cli from apps.cli.blob import Blob +from apps.cli import config as cli_conf from common.tools import compute_locator from common.appointment import Appointment from common.cryptographer import Cryptographer @@ -35,7 +37,7 @@ def broadcast_transaction_and_mine_block(bitcoin_cli, commitment_tx, addr): def get_appointment_info(locator): # Check that the justice has been triggered (the appointment has moved from Watcher to Responder) sleep(1) # Let's add a bit of delay so the state can be updated - return pisa_cli.get_appointment([locator]) + return pisa_cli.get_appointment(locator) def test_appointment_life_cycle(bitcoin_cli, create_txs): @@ -119,18 +121,22 @@ def test_appointment_wrong_key(bitcoin_cli, create_txs): appointment_data["encrypted_blob"] = Cryptographer.encrypt(Blob(penalty_tx), appointment_data.get("tx_id")) appointment = Appointment.from_dict(appointment_data) - signature = pisa_cli.get_appointment_signature(appointment) - hex_pk_der = pisa_cli.get_pk() + pisa_pk, cli_sk, cli_pk_der = pisa_cli.load_keys( + cli_conf.get("PISA_PUBLIC_KEY"), cli_conf.get("CLI_PRIVATE_KEY"), cli_conf.get("CLI_PUBLIC_KEY") + ) + hex_pk_der = binascii.hexlify(cli_pk_der) + signature = Cryptographer.sign(appointment.serialize(), cli_sk) data = {"appointment": appointment.to_dict(), "signature": signature, "public_key": hex_pk_der.decode("utf-8")} # Send appointment to the server. - response_json = pisa_cli.post_data_to_add_appointment_endpoint(data) + response = pisa_cli.post_appointment(data) + response_json = pisa_cli.process_post_appointment_response(response) # Check that the server has accepted the appointment signature = response_json.get("signature") assert signature is not None - assert pisa_cli.check_signature(signature, appointment) is True + assert Cryptographer.verify(appointment.serialize(), signature, pisa_pk) is True assert response_json.get("locator") == appointment.locator # Trigger the appointment From 22ded55990d632e307b062c7ff0b41033fb7c859 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Tue, 11 Feb 2020 18:35:06 +0100 Subject: [PATCH 88/93] Renames pisa_cli to wt_cli --- apps/cli/__init__.py | 6 +- apps/cli/sample_conf.py | 4 - apps/cli/{pisa_cli.py => wt_cli.py} | 141 ++++++++---------- pisa/inspector.py | 19 ++- .../unit/{test_pisa_cli.py => test_wt_cli.py} | 99 ++++++------ test/pisa/e2e/test_basic_e2e.py | 40 ++--- 6 files changed, 152 insertions(+), 157 deletions(-) rename apps/cli/{pisa_cli.py => wt_cli.py} (81%) rename test/apps/cli/unit/{test_pisa_cli.py => test_wt_cli.py} (70%) diff --git a/apps/cli/__init__.py b/apps/cli/__init__.py index 0861ee2..ec94bd0 100644 --- a/apps/cli/__init__.py +++ b/apps/cli/__init__.py @@ -11,9 +11,9 @@ conf_fields = { "DATA_FOLDER": {"value": conf.DATA_FOLDER, "type": str}, "CLIENT_LOG_FILE": {"value": conf.CLIENT_LOG_FILE, "type": str, "path": True}, "APPOINTMENTS_FOLDER_NAME": {"value": conf.APPOINTMENTS_FOLDER_NAME, "type": str, "path": True}, - "CLI_PUBLIC_KEY": {"value": conf.CLI_PUBLIC_KEY, "type": str, "path": True}, - "CLI_PRIVATE_KEY": {"value": conf.CLI_PRIVATE_KEY, "type": str, "path": True}, - "PISA_PUBLIC_KEY": {"value": conf.PISA_PUBLIC_KEY, "type": str, "path": True}, + # "CLI_PUBLIC_KEY": {"value": conf.CLI_PUBLIC_KEY, "type": str, "path": True}, + # "CLI_PRIVATE_KEY": {"value": conf.CLI_PRIVATE_KEY, "type": str, "path": True}, + # "PISA_PUBLIC_KEY": {"value": conf.PISA_PUBLIC_KEY, "type": str, "path": True}, } # Expand user (~) if found and check fields are correct diff --git a/apps/cli/sample_conf.py b/apps/cli/sample_conf.py index 6749dd2..ebe03ed 100644 --- a/apps/cli/sample_conf.py +++ b/apps/cli/sample_conf.py @@ -7,7 +7,3 @@ DATA_FOLDER = "~/.pisa_btc/" CLIENT_LOG_FILE = "cli.log" APPOINTMENTS_FOLDER_NAME = "appointment_receipts" - -CLI_PUBLIC_KEY = "cli_pk.der" -CLI_PRIVATE_KEY = "cli_sk.der" -PISA_PUBLIC_KEY = "pisa_pk.der" diff --git a/apps/cli/pisa_cli.py b/apps/cli/wt_cli.py similarity index 81% rename from apps/cli/pisa_cli.py rename to apps/cli/wt_cli.py index 680b45c..275ae25 100644 --- a/apps/cli/pisa_cli.py +++ b/apps/cli/wt_cli.py @@ -22,65 +22,49 @@ from common.tools import check_sha256_hex_format, check_locator_format, compute_ logger = Logger(actor="Client", log_name_prefix=LOG_PREFIX) -# FIXME: TESTING ENDPOINT, WON'T BE THERE IN PRODUCTION -def generate_dummy_appointment(): - get_block_count_end_point = "http://{}:{}/get_block_count".format(pisa_api_server, pisa_api_port) - r = requests.get(url=get_block_count_end_point, timeout=5) - - current_height = r.json().get("block_count") - - dummy_appointment_data = { - "tx": os.urandom(192).hex(), - "tx_id": os.urandom(32).hex(), - "start_time": current_height + 5, - "end_time": current_height + 10, - "to_self_delay": 20, - } - - logger.info( - "Generating dummy appointment data:" "\n\n" + json.dumps(dummy_appointment_data, indent=4, sort_keys=True) - ) - - json.dump(dummy_appointment_data, open("dummy_appointment_data.json", "w")) - - logger.info("\nData stored in dummy_appointment_data.json") +# FIXME: creating a simpler load_keys for the alpha. Client keys will not be necessary. PISA key is hardcoded. +# def load_keys(pisa_pk_path, cli_sk_path, cli_pk_path): +# """ +# Loads all the keys required so sign, send, and verify the appointment. +# +# Args: +# pisa_pk_path (:obj:`str`): path to the PISA public key file. +# cli_sk_path (:obj:`str`): path to the client private key file. +# cli_pk_path (:obj:`str`): path to the client public key file. +# +# Returns: +# :obj:`tuple` or ``None``: a three item tuple containing a pisa_pk object, cli_sk object and the cli_sk_der +# encoded key if all keys can be loaded. ``None`` otherwise. +# """ +# +# pisa_pk_der = Cryptographer.load_key_file(pisa_pk_path) +# pisa_pk = Cryptographer.load_public_key_der(pisa_pk_der) +# +# if pisa_pk is None: +# logger.error("PISA's public key file not found. Please check your settings") +# return None +# +# cli_sk_der = Cryptographer.load_key_file(cli_sk_path) +# cli_sk = Cryptographer.load_private_key_der(cli_sk_der) +# +# if cli_sk is None: +# logger.error("Client's private key file not found. Please check your settings") +# return None +# +# cli_pk_der = Cryptographer.load_key_file(cli_pk_path) +# +# if cli_pk_der is None: +# logger.error("Client's public key file not found. Please check your settings") +# return None +# +# return pisa_pk, cli_sk, cli_pk_der -def load_keys(pisa_pk_path, cli_sk_path, cli_pk_path): - """ - Loads all the keys required so sign, send, and verify the appointment. +def load_keys(): + PISA_PUBLIC_KEY = "3056301006072a8648ce3d020106052b8104000a0342000430053e39c53b8bcb43354a4ed886b8082af1d1e8fc14956e60ad0592bfdfab511b7e309f6ac83b7495462196692e145bf7b1a321e96ec8fc4d678719c77342da" + pisa_pk = Cryptographer.load_public_key_der(binascii.unhexlify(PISA_PUBLIC_KEY)) - Args: - pisa_pk_path (:obj:`str`): path to the PISA public key file. - cli_sk_path (:obj:`str`): path to the client private key file. - cli_pk_path (:obj:`str`): path to the client public key file. - - Returns: - :obj:`tuple` or ``None``: a three item tuple containing a pisa_pk object, cli_sk object and the cli_sk_der - encoded key if all keys can be loaded. ``None`` otherwise. - """ - - pisa_pk_der = Cryptographer.load_key_file(pisa_pk_path) - pisa_pk = Cryptographer.load_public_key_der(pisa_pk_der) - - if pisa_pk is None: - logger.error("PISA's public key file not found. Please check your settings") - return None - - cli_sk_der = Cryptographer.load_key_file(cli_sk_path) - cli_sk = Cryptographer.load_private_key_der(cli_sk_der) - - if cli_sk is None: - logger.error("Client's private key file not found. Please check your settings") - return None - - cli_pk_der = Cryptographer.load_key_file(cli_pk_path) - - if cli_pk_der is None: - logger.error("Client's public key file not found. Please check your settings") - return None - - return pisa_pk, cli_sk, cli_pk_der + return pisa_pk def add_appointment(args): @@ -109,16 +93,20 @@ def add_appointment(args): :obj:`bool`: True if the appointment is accepted by the tower and the receipt is properly stored, false if any error occurs during the process. """ + # FIXME: creating a simpler load_keys for the alpha. Client keys will not be necessary. PISA key is hardcoded. + # pisa_pk, cli_sk, cli_pk_der = load_keys( + # config.get("PISA_PUBLIC_KEY"), config.get("CLI_PRIVATE_KEY"), config.get("CLI_PUBLIC_KEY") + # ) + # + # try: + # hex_pk_der = binascii.hexlify(cli_pk_der) + # + # except binascii.Error as e: + # logger.error("Could not successfully encode public key as hex", error=str(e)) + # return False + pisa_pk = load_keys() - pisa_pk, cli_sk, cli_pk_der = load_keys( - config.get("PISA_PUBLIC_KEY"), config.get("CLI_PRIVATE_KEY"), config.get("CLI_PUBLIC_KEY") - ) - - try: - hex_pk_der = binascii.hexlify(cli_pk_der) - - except binascii.Error as e: - logger.error("Could not successfully encode public key as hex", error=str(e)) + if pisa_pk is None: return False # Get appointment data from user. @@ -146,15 +134,21 @@ def add_appointment(args): return False appointment = Appointment.from_dict(appointment_data) - signature = Cryptographer.sign(appointment.serialize(), cli_sk) - if not (appointment and signature): - return False - - data = {"appointment": appointment.to_dict(), "signature": signature, "public_key": hex_pk_der.decode("utf-8")} + # FIXME: getting rid of the client-side signature for the alpha. A proper authentication is required. + # signature = Cryptographer.sign(appointment.serialize(), cli_sk) + # + # if not (appointment and signature): + # return False + # + # data = {"appointment": appointment.to_dict(), "signature": signature, "public_key": hex_pk_der.decode("utf-8")} + data = {"appointment": appointment.to_dict()} # Send appointment to the server. server_response = post_appointment(data) + if server_response is None: + return False + response_json = process_post_appointment_response(server_response) if response_json is None: @@ -316,6 +310,7 @@ def save_appointment_receipt(appointment, signature): try: with open(filename, "w") as f: json.dump(data, f) + logger.info("Appointment saved at {}".format(filename)) return True except IOError as e: @@ -378,7 +373,6 @@ if __name__ == "__main__": pisa_api_server = config.get("DEFAULT_PISA_API_SERVER") pisa_api_port = config.get("DEFAULT_PISA_API_PORT") commands = ["add_appointment", "get_appointment", "help"] - testing_commands = ["generate_dummy_appointment"] try: opts, args = getopt(argv[1:], "s:p:h", ["server", "port", "help"]) @@ -432,11 +426,6 @@ if __name__ == "__main__": else: sys.exit(show_usage()) - # FIXME: testing command, not for production - elif command in testing_commands: - if command == "generate_dummy_appointment": - generate_dummy_appointment() - else: logger.error("Unknown command. Use help to check the list of available commands") diff --git a/pisa/inspector.py b/pisa/inspector.py index dfdb0a8..b78b7cb 100644 --- a/pisa/inspector.py +++ b/pisa/inspector.py @@ -58,8 +58,8 @@ class Inspector: rcode, message = self.check_to_self_delay(appointment_data.get("to_self_delay")) if rcode == 0: rcode, message = self.check_blob(appointment_data.get("encrypted_blob")) - if rcode == 0: - rcode, message = self.check_appointment_signature(appointment_data, signature, public_key) + # if rcode == 0: + # rcode, message = self.check_appointment_signature(appointment_data, signature, public_key) if rcode == 0: r = Appointment.from_dict(appointment_data) @@ -336,11 +336,16 @@ class Inspector: rcode = errors.APPOINTMENT_EMPTY_FIELD message = "empty signature received" - pk = Cryptographer.load_public_key_der(unhexlify(pk_der)) - valid_sig = Cryptographer.verify(Appointment.from_dict(appointment_data).serialize(), signature, pk) + elif pk_der is None: + rcode = errors.APPOINTMENT_EMPTY_FIELD + message = "empty public key received" - if not valid_sig: - rcode = errors.APPOINTMENT_INVALID_SIGNATURE - message = "invalid signature" + else: + pk = Cryptographer.load_public_key_der(unhexlify(pk_der)) + valid_sig = Cryptographer.verify(Appointment.from_dict(appointment_data).serialize(), signature, pk) + + if not valid_sig: + rcode = errors.APPOINTMENT_INVALID_SIGNATURE + message = "invalid signature" return rcode, message diff --git a/test/apps/cli/unit/test_pisa_cli.py b/test/apps/cli/unit/test_wt_cli.py similarity index 70% rename from test/apps/cli/unit/test_pisa_cli.py rename to test/apps/cli/unit/test_wt_cli.py index d972118..67aeb8b 100644 --- a/test/apps/cli/unit/test_pisa_cli.py +++ b/test/apps/cli/unit/test_wt_cli.py @@ -14,7 +14,7 @@ from common.appointment import Appointment from common.cryptographer import Cryptographer from apps.cli.blob import Blob -import apps.cli.pisa_cli as pisa_cli +import apps.cli.wt_cli as wt_cli from test.apps.cli.unit.conftest import get_random_value_hex # dummy keys for the tests @@ -33,11 +33,11 @@ dummy_pk_der = dummy_pk.public_bytes( # Replace the key in the module with a key we control for the tests -pisa_cli.pisa_public_key = dummy_pk +wt_cli.pisa_public_key = dummy_pk # Replace endpoint with dummy one -pisa_cli.pisa_api_server = "dummy.com" -pisa_cli.pisa_api_port = 12345 -pisa_endpoint = "http://{}:{}/".format(pisa_cli.pisa_api_server, pisa_cli.pisa_api_port) +wt_cli.pisa_api_server = "dummy.com" +wt_cli.pisa_api_port = 12345 +pisa_endpoint = "http://{}:{}/".format(wt_cli.pisa_api_server, wt_cli.pisa_api_port) dummy_appointment_request = { "tx": get_random_value_hex(192), @@ -62,7 +62,8 @@ dummy_appointment = Appointment.from_dict(dummy_appointment_full) def load_dummy_keys(*args): - return dummy_pk, dummy_sk, dummy_pk_der + # return dummy_pk, dummy_sk, dummy_pk_der + return dummy_pk def get_dummy_pisa_pk_der(*args): @@ -81,30 +82,30 @@ def get_bad_signature(*args): return Cryptographer.sign(dummy_appointment.serialize(), another_sk) -def test_load_keys(): - # Let's first create a private key and public key files - private_key_file_path = "sk_test_file" - public_key_file_path = "pk_test_file" - with open(private_key_file_path, "wb") as f: - f.write(dummy_sk_der) - with open(public_key_file_path, "wb") as f: - f.write(dummy_pk_der) - - # Now we can test the function passing the using this files (we'll use the same pk for both) - r = pisa_cli.load_keys(public_key_file_path, private_key_file_path, public_key_file_path) - assert isinstance(r, tuple) - assert len(r) == 3 - - # If any param does not match we should get None as result - assert pisa_cli.load_keys(None, private_key_file_path, public_key_file_path) is None - assert pisa_cli.load_keys(public_key_file_path, None, public_key_file_path) is None - assert pisa_cli.load_keys(public_key_file_path, private_key_file_path, None) is None - - # The same should happen if we pass a public key where a private should be, for instance - assert pisa_cli.load_keys(private_key_file_path, public_key_file_path, private_key_file_path) is None - - os.remove(private_key_file_path) - os.remove(public_key_file_path) +# def test_load_keys(): +# # Let's first create a private key and public key files +# private_key_file_path = "sk_test_file" +# public_key_file_path = "pk_test_file" +# with open(private_key_file_path, "wb") as f: +# f.write(dummy_sk_der) +# with open(public_key_file_path, "wb") as f: +# f.write(dummy_pk_der) +# +# # Now we can test the function passing the using this files (we'll use the same pk for both) +# r = wt_cli.load_keys(public_key_file_path, private_key_file_path, public_key_file_path) +# assert isinstance(r, tuple) +# assert len(r) == 3 +# +# # If any param does not match we should get None as result +# assert wt_cli.load_keys(None, private_key_file_path, public_key_file_path) is None +# assert wt_cli.load_keys(public_key_file_path, None, public_key_file_path) is None +# assert wt_cli.load_keys(public_key_file_path, private_key_file_path, None) is None +# +# # The same should happen if we pass a public key where a private should be, for instance +# assert wt_cli.load_keys(private_key_file_path, public_key_file_path, private_key_file_path) is None +# +# os.remove(private_key_file_path) +# os.remove(public_key_file_path) # TODO: 90-add-more-add-appointment-tests @@ -112,11 +113,11 @@ def test_load_keys(): def test_add_appointment(monkeypatch): # Simulate a request to add_appointment for dummy_appointment, make sure that the right endpoint is requested # and the return value is True - monkeypatch.setattr(pisa_cli, "load_keys", load_dummy_keys) + monkeypatch.setattr(wt_cli, "load_keys", load_dummy_keys) response = {"locator": dummy_appointment.locator, "signature": get_dummy_signature()} responses.add(responses.POST, pisa_endpoint, json=response, status=200) - result = pisa_cli.add_appointment([json.dumps(dummy_appointment_request)]) + result = wt_cli.add_appointment([json.dumps(dummy_appointment_request)]) assert len(responses.calls) == 1 assert responses.calls[0].request.url == pisa_endpoint @@ -129,7 +130,7 @@ def test_add_appointment_with_invalid_signature(monkeypatch): # make sure that the right endpoint is requested, but the return value is False # Make sure the test uses the bad dummy signature - monkeypatch.setattr(pisa_cli, "load_keys", load_dummy_keys) + monkeypatch.setattr(wt_cli, "load_keys", load_dummy_keys) response = { "locator": dummy_appointment.to_dict()["locator"], @@ -137,31 +138,31 @@ def test_add_appointment_with_invalid_signature(monkeypatch): } responses.add(responses.POST, pisa_endpoint, json=response, status=200) - result = pisa_cli.add_appointment([json.dumps(dummy_appointment_request)]) + result = wt_cli.add_appointment([json.dumps(dummy_appointment_request)]) assert result is False def test_parse_add_appointment_args(): # If no args are passed, function should fail. - appt_data = pisa_cli.parse_add_appointment_args(None) + appt_data = wt_cli.parse_add_appointment_args(None) assert not appt_data # If file doesn't exist, function should fail. - appt_data = pisa_cli.parse_add_appointment_args(["-f", "nonexistent_file"]) + appt_data = wt_cli.parse_add_appointment_args(["-f", "nonexistent_file"]) assert not appt_data # If file exists and has data in it, function should work. with open("appt_test_file", "w") as f: json.dump(dummy_appointment_request, f) - appt_data = pisa_cli.parse_add_appointment_args(["-f", "appt_test_file"]) + appt_data = wt_cli.parse_add_appointment_args(["-f", "appt_test_file"]) assert appt_data os.remove("appt_test_file") # If appointment json is passed in, function should work. - appt_data = pisa_cli.parse_add_appointment_args([json.dumps(dummy_appointment_request)]) + appt_data = wt_cli.parse_add_appointment_args([json.dumps(dummy_appointment_request)]) assert appt_data @@ -173,7 +174,7 @@ def test_post_appointment(): } responses.add(responses.POST, pisa_endpoint, json=response, status=200) - response = pisa_cli.post_appointment(json.dumps(dummy_appointment_request)) + response = wt_cli.post_appointment(json.dumps(dummy_appointment_request)) assert len(responses.calls) == 1 assert responses.calls[0].request.url == pisa_endpoint @@ -190,27 +191,27 @@ def test_process_post_appointment_response(): # A 200 OK with a correct json response should return the json of the response responses.add(responses.POST, pisa_endpoint, json=response, status=200) - r = pisa_cli.post_appointment(json.dumps(dummy_appointment_request)) - assert pisa_cli.process_post_appointment_response(r) == r.json() + r = wt_cli.post_appointment(json.dumps(dummy_appointment_request)) + assert wt_cli.process_post_appointment_response(r) == r.json() # If we modify the response code tor a rejection (lets say 404) we should get None responses.replace(responses.POST, pisa_endpoint, json=response, status=404) - r = pisa_cli.post_appointment(json.dumps(dummy_appointment_request)) - assert pisa_cli.process_post_appointment_response(r) is None + r = wt_cli.post_appointment(json.dumps(dummy_appointment_request)) + assert wt_cli.process_post_appointment_response(r) is None # The same should happen if the response is not in json responses.replace(responses.POST, pisa_endpoint, status=404) - r = pisa_cli.post_appointment(json.dumps(dummy_appointment_request)) - assert pisa_cli.process_post_appointment_response(r) is None + r = wt_cli.post_appointment(json.dumps(dummy_appointment_request)) + assert wt_cli.process_post_appointment_response(r) is None def test_save_appointment_receipt(monkeypatch): appointments_folder = "test_appointments_receipts" - pisa_cli.config["APPOINTMENTS_FOLDER_NAME"] = appointments_folder + wt_cli.config["APPOINTMENTS_FOLDER_NAME"] = appointments_folder # The functions creates a new directory if it does not exist assert not os.path.exists(appointments_folder) - pisa_cli.save_appointment_receipt(dummy_appointment.to_dict(), get_dummy_signature()) + wt_cli.save_appointment_receipt(dummy_appointment.to_dict(), get_dummy_signature()) assert os.path.exists(appointments_folder) # Check that the receipt has been saved by checking the file names @@ -228,7 +229,7 @@ def test_get_appointment(): request_url = "{}get_appointment?locator={}".format(pisa_endpoint, response.get("locator")) responses.add(responses.GET, request_url, json=response, status=200) - result = pisa_cli.get_appointment(response.get("locator")) + result = wt_cli.get_appointment(response.get("locator")) assert len(responses.calls) == 1 assert responses.calls[0].request.url == request_url @@ -243,4 +244,4 @@ def test_get_appointment_err(): request_url = "{}get_appointment?locator=".format(pisa_endpoint, locator) responses.add(responses.GET, request_url, body=ConnectionError()) - assert not pisa_cli.get_appointment(locator) + assert not wt_cli.get_appointment(locator) diff --git a/test/pisa/e2e/test_basic_e2e.py b/test/pisa/e2e/test_basic_e2e.py index a468099..8a6df9f 100644 --- a/test/pisa/e2e/test_basic_e2e.py +++ b/test/pisa/e2e/test_basic_e2e.py @@ -4,7 +4,7 @@ from time import sleep from riemann.tx import Tx from pisa import HOST, PORT -from apps.cli import pisa_cli +from apps.cli import wt_cli from apps.cli.blob import Blob from apps.cli import config as cli_conf from common.tools import compute_locator @@ -19,10 +19,10 @@ from test.pisa.e2e.conftest import ( run_pisad, ) -# We'll use pisa_cli to add appointments. The expected input format is a list of arguments with a json-encoded +# We'll use wt_cli to add appointments. The expected input format is a list of arguments with a json-encoded # appointment -pisa_cli.pisa_api_server = HOST -pisa_cli.pisa_api_port = PORT +wt_cli.pisa_api_server = HOST +wt_cli.pisa_api_port = PORT # Run pisad pisad_process = run_pisad() @@ -37,7 +37,7 @@ def broadcast_transaction_and_mine_block(bitcoin_cli, commitment_tx, addr): def get_appointment_info(locator): # Check that the justice has been triggered (the appointment has moved from Watcher to Responder) sleep(1) # Let's add a bit of delay so the state can be updated - return pisa_cli.get_appointment(locator) + return wt_cli.get_appointment(locator) def test_appointment_life_cycle(bitcoin_cli, create_txs): @@ -46,13 +46,17 @@ def test_appointment_life_cycle(bitcoin_cli, create_txs): appointment_data = build_appointment_data(bitcoin_cli, commitment_tx_id, penalty_tx) locator = compute_locator(commitment_tx_id) - assert pisa_cli.add_appointment([json.dumps(appointment_data)]) is True + assert wt_cli.add_appointment([json.dumps(appointment_data)]) is True + + appointment_info = get_appointment_info(locator) + assert appointment_info is not None + assert len(appointment_info) == 1 + assert appointment_info[0].get("status") == "being_watched" new_addr = bitcoin_cli.getnewaddress() broadcast_transaction_and_mine_block(bitcoin_cli, commitment_tx, new_addr) appointment_info = get_appointment_info(locator) - assert appointment_info is not None assert len(appointment_info) == 1 assert appointment_info[0].get("status") == "dispute_responded" @@ -92,7 +96,7 @@ def test_appointment_malformed_penalty(bitcoin_cli, create_txs): appointment_data = build_appointment_data(bitcoin_cli, commitment_tx_id, mod_penalty_tx.hex()) locator = compute_locator(commitment_tx_id) - assert pisa_cli.add_appointment([json.dumps(appointment_data)]) is True + assert wt_cli.add_appointment([json.dumps(appointment_data)]) is True # Broadcast the commitment transaction and mine a block new_addr = bitcoin_cli.getnewaddress() @@ -115,13 +119,13 @@ def test_appointment_wrong_key(bitcoin_cli, create_txs): # The appointment data is built using a random 32-byte value. appointment_data = build_appointment_data(bitcoin_cli, get_random_value_hex(32), penalty_tx) - # We can't use pisa_cli.add_appointment here since it computes the locator internally, so let's do it manually. + # We can't use wt_cli.add_appointment here since it computes the locator internally, so let's do it manually. # We will encrypt the blob using the random value and derive the locator from the commitment tx. appointment_data["locator"] = compute_locator(bitcoin_cli.decoderawtransaction(commitment_tx).get("txid")) appointment_data["encrypted_blob"] = Cryptographer.encrypt(Blob(penalty_tx), appointment_data.get("tx_id")) appointment = Appointment.from_dict(appointment_data) - pisa_pk, cli_sk, cli_pk_der = pisa_cli.load_keys( + pisa_pk, cli_sk, cli_pk_der = wt_cli.load_keys( cli_conf.get("PISA_PUBLIC_KEY"), cli_conf.get("CLI_PRIVATE_KEY"), cli_conf.get("CLI_PUBLIC_KEY") ) hex_pk_der = binascii.hexlify(cli_pk_der) @@ -130,8 +134,8 @@ def test_appointment_wrong_key(bitcoin_cli, create_txs): data = {"appointment": appointment.to_dict(), "signature": signature, "public_key": hex_pk_der.decode("utf-8")} # Send appointment to the server. - response = pisa_cli.post_appointment(data) - response_json = pisa_cli.process_post_appointment_response(response) + response = wt_cli.post_appointment(data) + response_json = wt_cli.process_post_appointment_response(response) # Check that the server has accepted the appointment signature = response_json.get("signature") @@ -165,8 +169,8 @@ def test_two_identical_appointments(bitcoin_cli, create_txs): locator = compute_locator(commitment_tx_id) # Send the appointment twice - assert pisa_cli.add_appointment([json.dumps(appointment_data)]) is True - assert pisa_cli.add_appointment([json.dumps(appointment_data)]) is True + assert wt_cli.add_appointment([json.dumps(appointment_data)]) is True + assert wt_cli.add_appointment([json.dumps(appointment_data)]) is True # Broadcast the commitment transaction and mine a block new_addr = bitcoin_cli.getnewaddress() @@ -199,8 +203,8 @@ def test_two_appointment_same_locator_different_penalty(bitcoin_cli, create_txs) appointment2_data = build_appointment_data(bitcoin_cli, commitment_tx_id, penalty_tx2) locator = compute_locator(commitment_tx_id) - assert pisa_cli.add_appointment([json.dumps(appointment1_data)]) is True - assert pisa_cli.add_appointment([json.dumps(appointment2_data)]) is True + assert wt_cli.add_appointment([json.dumps(appointment1_data)]) is True + assert wt_cli.add_appointment([json.dumps(appointment2_data)]) is True # Broadcast the commitment transaction and mine a block new_addr = bitcoin_cli.getnewaddress() @@ -227,7 +231,7 @@ def test_appointment_shutdown_pisa_trigger_back_online(create_txs, bitcoin_cli): appointment_data = build_appointment_data(bitcoin_cli, commitment_tx_id, penalty_tx) locator = compute_locator(commitment_tx_id) - assert pisa_cli.add_appointment([json.dumps(appointment_data)]) is True + assert wt_cli.add_appointment([json.dumps(appointment_data)]) is True # Restart pisa pisad_process.terminate() @@ -265,7 +269,7 @@ def test_appointment_shutdown_pisa_trigger_while_offline(create_txs, bitcoin_cli appointment_data = build_appointment_data(bitcoin_cli, commitment_tx_id, penalty_tx) locator = compute_locator(commitment_tx_id) - assert pisa_cli.add_appointment([json.dumps(appointment_data)]) is True + assert wt_cli.add_appointment([json.dumps(appointment_data)]) is True # Check that the appointment is still in the Watcher appointment_info = get_appointment_info(locator) From e2794eff2a08414dcdee901bf6f77511d3dfd26e Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Tue, 11 Feb 2020 19:27:33 +0100 Subject: [PATCH 89/93] Modifies e2e tests to match alpha cli.load_keys --- test/pisa/e2e/test_basic_e2e.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/test/pisa/e2e/test_basic_e2e.py b/test/pisa/e2e/test_basic_e2e.py index 8a6df9f..a8f83e7 100644 --- a/test/pisa/e2e/test_basic_e2e.py +++ b/test/pisa/e2e/test_basic_e2e.py @@ -125,13 +125,15 @@ def test_appointment_wrong_key(bitcoin_cli, create_txs): appointment_data["encrypted_blob"] = Cryptographer.encrypt(Blob(penalty_tx), appointment_data.get("tx_id")) appointment = Appointment.from_dict(appointment_data) - pisa_pk, cli_sk, cli_pk_der = wt_cli.load_keys( - cli_conf.get("PISA_PUBLIC_KEY"), cli_conf.get("CLI_PRIVATE_KEY"), cli_conf.get("CLI_PUBLIC_KEY") - ) - hex_pk_der = binascii.hexlify(cli_pk_der) - - signature = Cryptographer.sign(appointment.serialize(), cli_sk) - data = {"appointment": appointment.to_dict(), "signature": signature, "public_key": hex_pk_der.decode("utf-8")} + # pisa_pk, cli_sk, cli_pk_der = wt_cli.load_keys( + # cli_conf.get("PISA_PUBLIC_KEY"), cli_conf.get("CLI_PRIVATE_KEY"), cli_conf.get("CLI_PUBLIC_KEY") + # ) + # hex_pk_der = binascii.hexlify(cli_pk_der) + # + # signature = Cryptographer.sign(appointment.serialize(), cli_sk) + # data = {"appointment": appointment.to_dict(), "signature": signature, "public_key": hex_pk_der.decode("utf-8")} + pisa_pk = wt_cli.load_keys() + data = {"appointment": appointment.to_dict()} # Send appointment to the server. response = wt_cli.post_appointment(data) From d447debe54c1c6cc2d92481971275200ae622be4 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Tue, 11 Feb 2020 20:22:51 +0100 Subject: [PATCH 90/93] Temporal fix for Cryptographer logs. Close #91 --- apps/cli/wt_cli.py | 3 ++- common/cryptographer.py | 7 ++----- pisa/inspector.py | 2 ++ pisa/pisad.py | 2 ++ pisa/watcher.py | 2 ++ test/apps/cli/unit/test_wt_cli.py | 5 ++++- test/common/unit/test_cryptographer.py | 5 ++++- test/pisa/e2e/test_basic_e2e.py | 5 +++++ test/pisa/unit/conftest.py | 5 +++++ test/pisa/unit/test_inspector.py | 6 ++++++ test/pisa/unit/test_watcher.py | 5 +++++ 11 files changed, 39 insertions(+), 8 deletions(-) diff --git a/apps/cli/wt_cli.py b/apps/cli/wt_cli.py index 275ae25..c9b41b9 100644 --- a/apps/cli/wt_cli.py +++ b/apps/cli/wt_cli.py @@ -13,6 +13,7 @@ from apps.cli import config, LOG_PREFIX from apps.cli.help import help_add_appointment, help_get_appointment from apps.cli.blob import Blob +import common.cryptographer from common import constants from common.logger import Logger from common.appointment import Appointment @@ -20,7 +21,7 @@ from common.cryptographer import Cryptographer from common.tools import check_sha256_hex_format, check_locator_format, compute_locator logger = Logger(actor="Client", log_name_prefix=LOG_PREFIX) - +common.cryptographer.logger = Logger(actor="Cryptographer", log_name_prefix=LOG_PREFIX) # FIXME: creating a simpler load_keys for the alpha. Client keys will not be necessary. PISA key is hardcoded. # def load_keys(pisa_pk_path, cli_sk_path, cli_pk_path): diff --git a/common/cryptographer.py b/common/cryptographer.py index fee5001..6519620 100644 --- a/common/cryptographer.py +++ b/common/cryptographer.py @@ -10,9 +10,8 @@ from cryptography.hazmat.primitives.serialization import load_der_public_key, lo from cryptography.exceptions import InvalidSignature from common.tools import check_sha256_hex_format -from common.logger import Logger - -logger = Logger("Cryptographer") +# FIXME: Common has not log file, so it needs to log in the same log as the caller. This is a temporary fix. +logger = None class Cryptographer: @@ -39,12 +38,10 @@ class Cryptographer: if len(data) % 2: error = "Incorrect (Odd-length) value" - logger.error(error, data=data) raise ValueError(error) if not check_sha256_hex_format(secret): error = "Secret must be a 32-byte hex value (64 hex chars)" - logger.error(error, secret=secret) raise ValueError(error) return True diff --git a/pisa/inspector.py b/pisa/inspector.py index b78b7cb..ee5bd10 100644 --- a/pisa/inspector.py +++ b/pisa/inspector.py @@ -1,6 +1,7 @@ import re from binascii import unhexlify +import common.cryptographer from common.constants import LOCATOR_LEN_HEX from common.cryptographer import Cryptographer @@ -10,6 +11,7 @@ from common.appointment import Appointment from pisa.block_processor import BlockProcessor logger = Logger(actor="Inspector", log_name_prefix=LOG_PREFIX) +common.cryptographer.logger = Logger(actor="Cryptographer", log_name_prefix=LOG_PREFIX) # FIXME: The inspector logs the wrong messages sent form the users. A possible attack surface would be to send a really # long field that, even if not accepted by PISA, would be stored in the logs. This is a possible DoS surface diff --git a/pisa/pisad.py b/pisa/pisad.py index 89602b5..ad0c183 100644 --- a/pisa/pisad.py +++ b/pisa/pisad.py @@ -2,6 +2,7 @@ from getopt import getopt from sys import argv, exit from signal import signal, SIGINT, SIGQUIT, SIGTERM +import common.cryptographer from common.logger import Logger from common.cryptographer import Cryptographer @@ -16,6 +17,7 @@ from pisa.block_processor import BlockProcessor from pisa.tools import can_connect_to_bitcoind, in_correct_network logger = Logger(actor="Daemon", log_name_prefix=LOG_PREFIX) +common.cryptographer.logger = Logger(actor="Cryptographer", log_name_prefix=LOG_PREFIX) def handle_signals(signal_received, frame): diff --git a/pisa/watcher.py b/pisa/watcher.py index 5b1860c..281de92 100644 --- a/pisa/watcher.py +++ b/pisa/watcher.py @@ -2,6 +2,7 @@ from uuid import uuid4 from queue import Queue from threading import Thread +import common.cryptographer from common.cryptographer import Cryptographer from common.appointment import Appointment from common.tools import compute_locator @@ -13,6 +14,7 @@ from pisa.cleaner import Cleaner from pisa.block_processor import BlockProcessor logger = Logger(actor="Watcher", log_name_prefix=LOG_PREFIX) +common.cryptographer.logger = Logger(actor="Cryptographer", log_name_prefix=LOG_PREFIX) class Watcher: diff --git a/test/apps/cli/unit/test_wt_cli.py b/test/apps/cli/unit/test_wt_cli.py index 67aeb8b..b05164e 100644 --- a/test/apps/cli/unit/test_wt_cli.py +++ b/test/apps/cli/unit/test_wt_cli.py @@ -1,4 +1,3 @@ -import pytest import responses import json import os @@ -9,6 +8,8 @@ from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import serialization from cryptography.hazmat.primitives.asymmetric import ec +import common.cryptographer +from common.logger import Logger from common.tools import compute_locator from common.appointment import Appointment from common.cryptographer import Cryptographer @@ -17,6 +18,8 @@ from apps.cli.blob import Blob import apps.cli.wt_cli as wt_cli from test.apps.cli.unit.conftest import get_random_value_hex +common.cryptographer.logger = Logger(actor="Cryptographer", log_name_prefix=wt_cli.LOG_PREFIX) + # dummy keys for the tests dummy_sk = ec.generate_private_key(ec.SECP256K1, default_backend()) dummy_pk = dummy_sk.public_key() diff --git a/test/common/unit/test_cryptographer.py b/test/common/unit/test_cryptographer.py index 875cea4..728e1fd 100644 --- a/test/common/unit/test_cryptographer.py +++ b/test/common/unit/test_cryptographer.py @@ -1,15 +1,18 @@ import os -import pytest import binascii from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives.asymmetric import ec from cryptography.hazmat.primitives import serialization +import common.cryptographer from apps.cli.blob import Blob +from common.logger import Logger from common.cryptographer import Cryptographer from pisa.encrypted_blob import EncryptedBlob from test.common.unit.conftest import get_random_value_hex +common.cryptographer.logger = Logger(actor="Cryptographer", log_name_prefix="") + data = "6097cdf52309b1b2124efeed36bd34f46dc1c25ad23ac86f28380f746254f777" key = "b2e984a570f6f49bc38ace178e09147b0aa296cbb7c92eb01412f7e2d07b5659" encrypted_data = "8f31028097a8bf12a92e088caab5cf3fcddf0d35ed2b72c24b12269373efcdea04f9d2a820adafe830c20ff132d89810" diff --git a/test/pisa/e2e/test_basic_e2e.py b/test/pisa/e2e/test_basic_e2e.py index a8f83e7..ceec093 100644 --- a/test/pisa/e2e/test_basic_e2e.py +++ b/test/pisa/e2e/test_basic_e2e.py @@ -7,6 +7,9 @@ from pisa import HOST, PORT from apps.cli import wt_cli from apps.cli.blob import Blob from apps.cli import config as cli_conf + +import common.cryptographer +from common.logger import Logger from common.tools import compute_locator from common.appointment import Appointment from common.cryptographer import Cryptographer @@ -19,6 +22,8 @@ from test.pisa.e2e.conftest import ( run_pisad, ) +common.cryptographer.logger = Logger(actor="Cryptographer", log_name_prefix="") + # We'll use wt_cli to add appointments. The expected input format is a list of arguments with a json-encoded # appointment wt_cli.pisa_api_server = HOST diff --git a/test/pisa/unit/conftest.py b/test/pisa/unit/conftest.py index 2ffcb85..6766faa 100644 --- a/test/pisa/unit/conftest.py +++ b/test/pisa/unit/conftest.py @@ -22,9 +22,14 @@ from bitcoind_mock.transaction import create_dummy_transaction from bitcoind_mock.bitcoind import BitcoindMock from bitcoind_mock.conf import BTC_RPC_HOST, BTC_RPC_PORT +from pisa import LOG_PREFIX +import common.cryptographer +from common.logger import Logger from common.constants import LOCATOR_LEN_HEX from common.cryptographer import Cryptographer +common.cryptographer.logger = Logger(actor="Cryptographer", log_name_prefix=LOG_PREFIX) + @pytest.fixture(scope="session") def run_bitcoind(): diff --git a/test/pisa/unit/test_inspector.py b/test/pisa/unit/test_inspector.py index 4dbafce..2cf7c54 100644 --- a/test/pisa/unit/test_inspector.py +++ b/test/pisa/unit/test_inspector.py @@ -14,6 +14,12 @@ from test.pisa.unit.conftest import get_random_value_hex, generate_dummy_appoint from common.constants import LOCATOR_LEN_BYTES, LOCATOR_LEN_HEX from common.cryptographer import Cryptographer +from common.logger import Logger + +from pisa import LOG_PREFIX +import common.cryptographer + +common.cryptographer.logger = Logger(actor="Cryptographer", log_name_prefix=LOG_PREFIX) inspector = Inspector(get_config()) diff --git a/test/pisa/unit/test_watcher.py b/test/pisa/unit/test_watcher.py index 5a85bec..447c85c 100644 --- a/test/pisa/unit/test_watcher.py +++ b/test/pisa/unit/test_watcher.py @@ -20,9 +20,14 @@ from test.pisa.unit.conftest import ( ) from pisa.conf import EXPIRY_DELTA, MAX_APPOINTMENTS +import common.cryptographer +from pisa import LOG_PREFIX +from common.logger import Logger from common.tools import compute_locator from common.cryptographer import Cryptographer +common.cryptographer.logger = Logger(actor="Cryptographer", log_name_prefix=LOG_PREFIX) + APPOINTMENTS = 5 START_TIME_OFFSET = 1 From afcada03bff2d68b054bca2a6411864279008c3f Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Tue, 11 Feb 2020 20:24:03 +0100 Subject: [PATCH 91/93] Improve docs --- apps/cli/PISA-API.md | 66 +++++++++++++++++++------------------------- apps/cli/README.md | 62 ++++++++++++++++++++--------------------- 2 files changed, 59 insertions(+), 69 deletions(-) diff --git a/apps/cli/PISA-API.md b/apps/cli/PISA-API.md index 5369de5..60038b1 100644 --- a/apps/cli/PISA-API.md +++ b/apps/cli/PISA-API.md @@ -7,81 +7,73 @@ The PISA REST API consists, currently, of two endpoints: `/` and `/check_appoint `/` is the default endpoint, and is where the appointments should be sent to. `/` accepts `HTTP POST` requests only, with json request body, where data must match the following format: {"locator": l, "start_time": s, "end_time": e, - "dispute_delta": d, "encrypted_blob": eb, "cipher": - c, "hash_function": h} + "dispute_delta": d, "encrypted_blob": eb} We'll discuss the parameters one by one in the following: -The locator, `l`, is the `sha256` hex representation of the **dispute transaction id** (i.e. the sha256 of the byte representation of the dispute transaction id encoded in hex). `type(l) = hex encoded str` +The locator, `l`, is the first half of the **dispute transaction id** (i.e. the 16 MSB of the dispute_txid encoded in hex). `type(l) = hex encoded str` -The start\_time, `s`, is the time when the PISA server will start watching your transaction, and will normally match with whenever you will be offline. `s` is measured in block height, and must be **higher than the current block height** and not too close to it. `type(s) = int` +The start\_time, `s`, is the time when the PISA server will start watching your transaction, and will normally match with whenever you will be offline. `s` is measured in block height, and must be **higher than the current block height**. `type(s) = int` -The end\_time, `e`, is the time where the PISA server will stop watching your transaction, and will normally match which whenever you should be back online. `e` is also measured in block height, and must be **higher than** `s`. `type(e) = int` +The end\_time, `e`, is the time where the PISA server will stop watching your transaction, and will normally match with whenever you should be back online. `e` is also measured in block height, and must be **higher than** `s`. `type(e) = int` -The dispute\_delta, `d`, is the time PISA would have to respond with the **justice transaction** once the **dispute transaction** is seen in the blockchain. `d` must match with the `OP_CSV` specified in the dispute transaction. If the dispute_delta does not match the `OP_CSV `, PISA would try to respond with the justice transaction anyway, but success is not guaranteed. `d` is measured in blocks and should be, at least, `20`. `type(d) = int` +The to\_self\_delay, `d`, is the time PISA would have to respond with the **penalty transaction** once the **dispute transaction** is seen in the blockchain. `d` must match with the `OP_CSV` specified in the dispute transaction. If the dispute_delta does not match the `OP_CSV `, PISA would try to respond with the penalty transaction anyway, but success is not guaranteed. `d` is measured in blocks and should be, at least, `20`. `type(d) = int` -The encrypted\_blob, `eb`, is a data blob containing the `raw justice transaction` and it is encrypted using `AES-GCM-128`. The `encryption key` and `nonce` used by the cipher are **derived from the justice transaction id** as follows: +The encrypted\_blob, `eb`, is a data blob containing the `raw penalty transaction` and it is encrypted using `CHACHA20-POLY1305`. The `encryption key` used by the cipher is the sha256 of the **dispute transaction id**, and the `nonce` is a 12-byte long zero byte array: - master_key = SHA256(tx_id|tx_id) - sk = master_key[:16] - nonce = master_key[16:] + sk = sk = sha256(unhexlify(secret)).digest() + nonce = nonce = bytearray(12) # b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' -where `| `represents concatenation, `[:16]` represent the first half (16 bytes), and `[16:]` represents the second half of the master key. Finally, the encrypted blob must be hex encoded. `type(eb) = hex encoded str` +Finally, the encrypted blob must be hex encoded. `type(eb) = hex encoded str` -The cipher, `c`, represents the cipher used to encrypt `eb`. The only cipher supported, for now, is `AES-GCM-128`. `type(c) = str` - -The hash\_function, `h`, represents the hash function used to derive the encryption key and the nonce used to create `eb`. The only hash function supported, for now, is `SHA256`. `type(h) = str` - -The API will return a `text/plain` HTTP response code `200/OK` if the appointment is accepted, with the locator encoded in the response text, or a `400/Bad Request` if the appointment is rejected, with the rejection reason encoded in the response text. +The API will return a `application/json` HTTP response code `200/OK` if the appointment is accepted, with the locator encoded in the response text, or a `400/Bad Request` if the appointment is rejected, with the rejection reason encoded in the response text. #### Appointment example {"locator": "3c3375883f01027e5ca14f9760a8b853824ca4ebc0258c00e7fae4bae2571a80", "start_time": 1568118, "end_time": 1568120, - "dispute_delta": 20, - "encrypted_blob": "6c7687a97e874363e1c2b9a08386125e09ea000a9b4330feb33a5c698265f3565c267554e6fdd7b0544ced026aaab73c255bcc97c18eb9fa704d9cc5f1c83adaf921de7ba62b2b6ddb1bda7775288019ec3708642e738eddc22882abf5b3f4e34ef2d4077ed23e135f7fe22caaec845982918e7df4a3f949cadd2d3e7c541b1dbf77daf64e7ed61531aaa487b468581b5aa7b1da81e2617e351c9d5cf445e3391c3fea4497aaa7ad286552759791b9caa5e4c055d1b38adfceddb1ef2b99e3b467dd0b0b13ce863c1bf6b6f24543c30d", - "cipher": "AES-GCM-128", - "hash_function": "SHA256"} + "to_self_delay": 20, + "encrypted_blob": "6c7687a97e874363e1c2b9a08386125e09ea000a9b4330feb33a5c698265f3565c267554e6fdd7b0544ced026aaab73c255bcc97c18eb9fa704d9cc5f1c83adaf921de7ba62b2b6ddb1bda7775288019ec3708642e738eddc22882abf5b3f4e34ef2d4077ed23e135f7fe22caaec845982918e7df4a3f949cadd2d3e7c541b1dbf77daf64e7ed61531aaa487b468581b5aa7b1da81e2617e351c9d5cf445e3391c3fea4497aaa7ad286552759791b9caa5e4c055d1b38adfceddb1ef2b99e3b467dd0b0b13ce863c1bf6b6f24543c30d"} # Check appointment -`/check_appointment` is a testing endpoint provided to check the status of the appointments sent to PISA. The endpoint is accessible without any type of authentication for now. `/check_appointment` accepts `HTTP GET` requests only, where the data to be provided must be the locator of an appointment. The query must match the following format: +`/check_appointment` is an endpoint provided to check the status of the appointments sent to PISA. The endpoint is accessible without any type of authentication for now. `/check_appointment` accepts `HTTP GET` requests only, where the data to be provided must be the locator of an appointment. The query must match the following format: `http://pisa_server:pisa_port/check_appointment?locator=appointment_locator` ### Appointment can be in three states -- `not_found`: meaning the locator is not recognised by the API. This could either mean the locator is wrong, or the appointment has already been fulfilled (the PISA server does not have any kind of data persistency for now). -- `being_watched`: the appointment has been accepted by the PISA server and it's being watched at the moment. This stage means that the dispute transaction has now been seen yet, and therefore no justice transaction has been published. +- `not_found`: meaning the locator is not recognised by the API. This could either mean the locator is wrong, or the appointment has already been fulfilled. +- `being_watched`: the appointment has been accepted by the PISA server and it's being watched at the moment. This stage means that the dispute transaction has not been seen yet, and therefore no justice transaction has been published. - `dispute_responded`: the dispute was found by the watcher and the corresponding justice transaction has been broadcast by the node. In this stage PISA is actively monitoring until the justice transaction reaches enough confirmations and making sure no fork occurs in the meantime. ### Check appointment response formats `/check_appointment` will always reply with `json` containing the information about the requested appointment. The structure is as follows: -#### not_found +**not_found** [{"locator": appointment_locator, "status":"not_found"}] -#### being_watched - [{"cipher": "AES-GCM-128", - "dispute_delta": d, - "encrypted_blob": eb, +**being_watched** + + [{"encrypted_blob": eb, "end_time": e, - "hash_function": "SHA256", "locator": appointment_locator, "start_time": s, - "status": "being_watched"}] + "status": "being_watched", + "to_self_delay": d}] -#### dispute_responded +**dispute_responded** - [{"locator": appointment_locator, - "justice_rawtx": j, - "appointment_end": e, - "status": "dispute_responded" - "confirmations": c}] + [{"appointment_end": e, + "dispute_txid": dispute_txid, + "locator": appointment_locator, + "penalty_rawtx": penalty_rawtx, + "penalty_txid": penalty_txid, + "status": "dispute_responded"}] Notice that the response json always contains a list. Why? It is possible for both parties to send the “same locator” to our service: @@ -92,6 +84,6 @@ In the above scenario, Bob can hire our service with a bad encrypted blob for th ### Data persistence -As mentioned earlier, our service has no data persistence. this means that fulfilled appointments cannot be queried from `/check_appointment`. On top of that, if our service is restarted, all jobs are lost. This is only temporary and we are currently working on it. Do not use this service for production-ready software yet and please consider it as an early-stage demo to better understand how our API will work. +PISA keeps track of the appointment while they are being monitored, but data is wiped once an appointment has been completed with enough confirmations. Notice that during the alpha there will be no authentication, so data may be wiped periodically. diff --git a/apps/cli/README.md b/apps/cli/README.md index 9002c93..ddea17a 100644 --- a/apps/cli/README.md +++ b/apps/cli/README.md @@ -1,6 +1,6 @@ -# pisa_cli +# wt_cli -`pisa_cli` is a command line interface to interact with the PISA server, written in Python3. +`wt_cli` is a command line interface to interact with the PISA WatchTower server, written in Python3. ## Dependencies Refer to [DEPENDENCIES.md](DEPENDENCIES.md) @@ -11,7 +11,7 @@ Refer to [INSTALL.md](INSTALL.md) ## Usage - python pisa_cli.py [global options] command [command options] [arguments] + python wt_cli.py [global options] command [command options] [arguments] #### Global options @@ -39,25 +39,24 @@ This command is used to register appointments to the PISA server. Appointments * "dispute_delta": d } -`tx` **must** be the raw justice transaction that will be encrypted before sent to the PISA server. `type(tx) = hex encoded str` +`tx` **must** be the raw penalty transaction that will be encrypted before sent to the PISA server. `type(tx) = hex encoded str` -`tx_id` **must** match the **commitment transaction id**, and will be used to encrypt the **justice transaction** and **generate the locator**. `type(tx_id) = hex encoded str` +`tx_id` **must** match the **commitment transaction id**, and will be used to encrypt the **penalty transaction** and **generate the locator**. `type(tx_id) = hex encoded str` -`s` is the time when the PISA server will start watching your transaction, and will normally match to whenever you will be offline. `s` is measured in block height, and must be **higher than the current block height** and not too close to it. `type(s) = int` +`s` is the time when the PISA server will start watching your transaction, and will normally match to whenever you will be offline. `s` is measured in block height, and must be **higher than the current block height**. `type(s) = int` -`e` is the time where the PISA server will stop watching your transaction, and will normally match which whenever you should be back online. `e` is also measured in block height, and must be **higher than** `s`. `type(e) = int` +`e` is the time where the PISA server will stop watching your transaction, and will normally match with whenever you should be back online. `e` is also measured in block height, and must be **higher than** `s`. `type(e) = int` -`d` is the time PISA would have to respond with the **justice transaction** once the **dispute transaction** is seen in the blockchain. `d` must match with the `OP_CSV` specified in the dispute transaction. If the dispute_delta does not match the `OP_CSV `, PISA would try to respond with the justice transaction anyway, but success is not guaranteed. `d` is measured in blocks and should be at least `20`. `type(d) = int` +`d` is the time PISA would have to respond with the **penalty transaction** once the **dispute transaction** is seen in the blockchain. `d` must match with the `OP_CSV` specified in the dispute transaction. If the to\_self\_delay does not match the `OP_CSV`, PISA will try to respond with the penalty transaction anyway, but success is not guaranteed. `d` is measured in blocks and should be at least `20`. `type(d) = int` -The API will return a `text/plain` HTTP response code `200/OK` if the appointment is accepted, with the locator encoded in the response text, or a `400/Bad Request` if the appointment is rejected, with the rejection reason encoded in the response text. +The API will return a `application/json` HTTP response code `200/OK` if the appointment is accepted, with the locator encoded in the response text, or a `400/Bad Request` if the appointment is rejected, with the rejection reason encoded in the response text. #### Usage - python pisa_cli add_appointment [command options] / + python wt_cli.py add_appointment [command options] / -if `-f, --file` **is** specified, then the command expects a path to a json file instead of a json encoded - string as parameter. +if `-f, --file` **is** specified, then the command expects a path to a json file instead of a json encoded string as parameter. #### Options - `-f, --file path_to_json_file` loads the appointment data from the specified json file instead of command line. @@ -68,9 +67,9 @@ if `-f, --file` **is** specified, then the command expects a path to a json file **Appointment can be in three states** -- `not_found`: meaning the locator is not recognised by the API. This could either mean the locator is wrong, or the appointment has already been fulfilled (the PISA server does not have any kind of data persistency for now). -- `being_watched`: the appointment has been accepted by the PISA server and it's being watched at the moment. This stage means that the dispute transaction has now been seen yet, and therefore no justice transaction has been published. -- `dispute_responded`: the dispute was found by the watcher and the corresponding justice transaction has been broadcast by the node. In this stage PISA is actively monitoring until the justice transaction reaches enough confirmations and making sure no fork occurs in the meantime. +- `not_found`: meaning the locator is not recognised by the tower. This can either mean the locator is wrong, or the appointment has already been fulfilled (the PISA server does not keep track of completed appointments for now). +- `being_watched`: the appointment has been accepted by the PISA server and it's being watched at the moment. This stage means that the dispute transaction has not been seen yet, and therefore no penalty transaction has been broadcast. +- `dispute_responded`: the dispute was found by the watcher and the corresponding penalty transaction has been broadcast by the node. In this stage PISA is actively monitoring until the penalty transaction reaches enough confirmations and making sure no fork occurs in the meantime. **Response formats** @@ -81,26 +80,25 @@ if `-f, --file` **is** specified, then the command expects a path to a json file **being_watched** - [{"cipher": "AES-GCM-128", - "dispute_delta": d, - "encrypted_blob": eb, + [{"encrypted_blob": eb, "end_time": e, - "hash_function": "SHA256", "locator": appointment_locator, "start_time": s, - "status": "being_watched"}] + "status": "being_watched", + "to_self_delay": d}] **dispute_responded** - [{"locator": appointment_locator, - "justice_rawtx": j, - "appointment_end": e, - "status": "dispute_responded" - "confirmations": c}] + [{"appointment_end": e, + "dispute_txid": dispute_txid, + "locator": appointment_locator, + "penalty_rawtx": penalty_rawtx, + "penalty_txid": penalty_txid, + "status": "dispute_responded"}] #### Usage - python pisa_cli get_appointment + python wt_cli.py get_appointment @@ -109,26 +107,26 @@ if `-f, --file` **is** specified, then the command expects a path to a json file Shows the list of commands or help about how to run a specific command. #### Usage - python pisa_cli help + python wt_cli.py help or - python pisa_cli help command + python wt_cli.py help command ## Example 1. Generate a new dummy appointment. **Note:** this appointment will never be fulfilled (it will eventually expire) since it does not corresopond to a valid transaction. However it can be used to interact with the PISA API. ``` - python pisa_cli.py generate_dummy_appointment +echo '{"tx": "4615a58815475ab8145b6bb90b1268a0dbb02e344ddd483f45052bec1f15b1951c1ee7f070a0993da395a5ee92ea3a1c184b5ffdb2507164bf1f8c1364155d48bdbc882eee0868ca69864a807f213f538990ad16f56d7dfb28a18e69e3f31ae9adad229e3244073b7d643b4597ec88bf247b9f73f301b0f25ae8207b02b7709c271da98af19f1db276ac48ba64f099644af1ae2c90edb7def5e8589a1bb17cc72ac42ecf07dd29cff91823938fd0d772c2c92b7ab050f8837efd46197c9b2b3f", "tx_id": "0b9510d92a50c1d67c6f7fc5d47908d96b3eccdea093d89bcbaf05bcfebdd951", "start_time": 0, "end_time": 0, "to_self_delay": 20}' > dummy_appointment_data.json ``` - That will create a json file that follows the appointment data structure filled with dummy data and store it in `dummy_appointment_data.json`. + That will create a json file that follows the appointment data structure filled with dummy data and store it in `dummy_appointment_data.json`. **Note**: You'll need to update the `start_time` and `end_time` to match valid block heights. 2. Send the appointment to the PISA API. Which will then start monitoring for matching transactions. ``` - python pisa_cli.py add_appointment -f dummy_appointment_data.json + python wt_cli.py add_appointment -f dummy_appointment_data.json ``` This returns a appointment locator that can be used to get updates about this appointment from PISA. @@ -136,7 +134,7 @@ or 3. Test that PISA is still watching the appointment by replacing the appointment locator received into the following command: ``` - python pisa_cli.py get_appointment + python wt_cli.py get_appointment ``` ## PISA API From ea4d6cf34510f2bababe456d0e0428099edf9021 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Tue, 11 Feb 2020 21:55:02 +0100 Subject: [PATCH 92/93] Updates e2e tests to derive pk from sk intead of using load_keys Since we're creating a new set of keys in the e2e tests the hardcoded public key of the cli won't work --- test/pisa/e2e/test_basic_e2e.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/test/pisa/e2e/test_basic_e2e.py b/test/pisa/e2e/test_basic_e2e.py index ceec093..5e3ef73 100644 --- a/test/pisa/e2e/test_basic_e2e.py +++ b/test/pisa/e2e/test_basic_e2e.py @@ -3,6 +3,7 @@ import binascii from time import sleep from riemann.tx import Tx +from pisa import config from pisa import HOST, PORT from apps.cli import wt_cli from apps.cli.blob import Blob @@ -137,7 +138,10 @@ def test_appointment_wrong_key(bitcoin_cli, create_txs): # # signature = Cryptographer.sign(appointment.serialize(), cli_sk) # data = {"appointment": appointment.to_dict(), "signature": signature, "public_key": hex_pk_der.decode("utf-8")} - pisa_pk = wt_cli.load_keys() + # FIXME: Since the pk is now hardcoded for the alpha in the cli we cannot use load_keys here. We need to derive + # the pk from the sk on disk. + pisa_sk = Cryptographer.load_private_key_der(Cryptographer.load_key_file(config.get("PISA_SECRET_KEY"))) + pisa_pk = pisa_sk.public_key() data = {"appointment": appointment.to_dict()} # Send appointment to the server. From 036f879302cef67acc493592069cecb99d1b5279 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Tue, 11 Feb 2020 22:23:08 +0100 Subject: [PATCH 93/93] Monkeypatches load_keys in e2e tests since add_appointment calls it internally --- test/pisa/e2e/test_basic_e2e.py | 36 ++++++++++++++++++++++++--------- 1 file changed, 27 insertions(+), 9 deletions(-) diff --git a/test/pisa/e2e/test_basic_e2e.py b/test/pisa/e2e/test_basic_e2e.py index 5e3ef73..3124e7c 100644 --- a/test/pisa/e2e/test_basic_e2e.py +++ b/test/pisa/e2e/test_basic_e2e.py @@ -34,6 +34,13 @@ wt_cli.pisa_api_port = PORT pisad_process = run_pisad() +def get_pisa_pk(): + pisa_sk = Cryptographer.load_private_key_der(Cryptographer.load_key_file(config.get("PISA_SECRET_KEY"))) + pisa_pk = pisa_sk.public_key() + + return pisa_pk + + def broadcast_transaction_and_mine_block(bitcoin_cli, commitment_tx, addr): # Broadcast the commitment transaction and mine a block bitcoin_cli.sendrawtransaction(commitment_tx) @@ -46,7 +53,9 @@ def get_appointment_info(locator): return wt_cli.get_appointment(locator) -def test_appointment_life_cycle(bitcoin_cli, create_txs): +def test_appointment_life_cycle(monkeypatch, bitcoin_cli, create_txs): + monkeypatch.setattr(wt_cli, "load_keys", get_pisa_pk) + commitment_tx, penalty_tx = create_txs commitment_tx_id = bitcoin_cli.decoderawtransaction(commitment_tx).get("txid") appointment_data = build_appointment_data(bitcoin_cli, commitment_tx_id, penalty_tx) @@ -89,7 +98,9 @@ def test_appointment_life_cycle(bitcoin_cli, create_txs): assert appointment_info[0].get("status") == "not_found" -def test_appointment_malformed_penalty(bitcoin_cli, create_txs): +def test_appointment_malformed_penalty(monkeypatch, bitcoin_cli, create_txs): + monkeypatch.setattr(wt_cli, "load_keys", get_pisa_pk) + # Lets start by creating two valid transaction commitment_tx, penalty_tx = create_txs @@ -128,7 +139,7 @@ def test_appointment_wrong_key(bitcoin_cli, create_txs): # We can't use wt_cli.add_appointment here since it computes the locator internally, so let's do it manually. # We will encrypt the blob using the random value and derive the locator from the commitment tx. appointment_data["locator"] = compute_locator(bitcoin_cli.decoderawtransaction(commitment_tx).get("txid")) - appointment_data["encrypted_blob"] = Cryptographer.encrypt(Blob(penalty_tx), appointment_data.get("tx_id")) + appointment_data["encrypted_blob"] = Cryptographer.encrypt(Blob(penalty_tx), get_random_value_hex(32)) appointment = Appointment.from_dict(appointment_data) # pisa_pk, cli_sk, cli_pk_der = wt_cli.load_keys( @@ -140,8 +151,7 @@ def test_appointment_wrong_key(bitcoin_cli, create_txs): # data = {"appointment": appointment.to_dict(), "signature": signature, "public_key": hex_pk_der.decode("utf-8")} # FIXME: Since the pk is now hardcoded for the alpha in the cli we cannot use load_keys here. We need to derive # the pk from the sk on disk. - pisa_sk = Cryptographer.load_private_key_der(Cryptographer.load_key_file(config.get("PISA_SECRET_KEY"))) - pisa_pk = pisa_sk.public_key() + pisa_pk = get_pisa_pk() data = {"appointment": appointment.to_dict()} # Send appointment to the server. @@ -167,7 +177,9 @@ def test_appointment_wrong_key(bitcoin_cli, create_txs): assert appointment_info[0].get("status") == "not_found" -def test_two_identical_appointments(bitcoin_cli, create_txs): +def test_two_identical_appointments(monkeypatch, bitcoin_cli, create_txs): + monkeypatch.setattr(wt_cli, "load_keys", get_pisa_pk) + # Tests sending two identical appointments to the tower. # At the moment there are no checks for identical appointments, so both will be accepted, decrypted and kept until # the end. @@ -200,7 +212,9 @@ def test_two_identical_appointments(bitcoin_cli, create_txs): assert info.get("penalty_rawtx") == penalty_tx -def test_two_appointment_same_locator_different_penalty(bitcoin_cli, create_txs): +def test_two_appointment_same_locator_different_penalty(monkeypatch, bitcoin_cli, create_txs): + monkeypatch.setattr(wt_cli, "load_keys", get_pisa_pk) + # This tests sending an appointment with two valid transaction with the same locator. commitment_tx, penalty_tx1 = create_txs commitment_tx_id = bitcoin_cli.decoderawtransaction(commitment_tx).get("txid") @@ -232,9 +246,11 @@ def test_two_appointment_same_locator_different_penalty(bitcoin_cli, create_txs) assert appointment_info[0].get("penalty_rawtx") == penalty_tx1 -def test_appointment_shutdown_pisa_trigger_back_online(create_txs, bitcoin_cli): +def test_appointment_shutdown_pisa_trigger_back_online(monkeypatch, create_txs, bitcoin_cli): global pisad_process + monkeypatch.setattr(wt_cli, "load_keys", get_pisa_pk) + pisa_pid = pisad_process.pid commitment_tx, penalty_tx = create_txs @@ -270,9 +286,11 @@ def test_appointment_shutdown_pisa_trigger_back_online(create_txs, bitcoin_cli): assert appointment_info[0].get("status") == "dispute_responded" -def test_appointment_shutdown_pisa_trigger_while_offline(create_txs, bitcoin_cli): +def test_appointment_shutdown_pisa_trigger_while_offline(monkeypatch, create_txs, bitcoin_cli): global pisad_process + monkeypatch.setattr(wt_cli, "load_keys", get_pisa_pk) + pisa_pid = pisad_process.pid commitment_tx, penalty_tx = create_txs